From 4d1fa655b1175db4b306b5364cbbff55e4176f53 Mon Sep 17 00:00:00 2001 From: Adrian Riobo Date: Fri, 21 Mar 2025 09:38:15 +0100 Subject: [PATCH 01/11] fix(deps): update go major updates --- go.mod | 72 +- go.sum | 124 +- pkg/provider/util/instancetypes/aws.go | 5 +- vendor/dario.cat/mergo/.gitignore | 3 + vendor/dario.cat/mergo/README.md | 102 +- vendor/dario.cat/mergo/map.go | 2 +- vendor/dario.cat/mergo/merge.go | 2 +- .../v2/pkg/awsapi/selectorec2.go | 12 - .../{v2 => v3}/LICENSE | 0 .../{v2 => v3}/NOTICE | 0 .../v3/pkg/awsapi/selectorec2.go | 26 + .../pkg/bytequantity/bytequantity.go | 51 +- .../{v2 => v3}/pkg/ec2pricing/ec2pricing.go | 73 +- .../{v2 => v3}/pkg/ec2pricing/odpricing.go | 122 +- .../{v2 => v3}/pkg/ec2pricing/spotpricing.go | 84 +- .../pkg/instancetypes/instancetypes.go | 96 +- .../{v2 => v3}/pkg/selector/aggregates.go | 43 +- .../{v2 => v3}/pkg/selector/comparators.go | 69 +- .../{v2 => v3}/pkg/selector/emr.go | 25 +- .../pkg/selector/outputs/bubbletea.go | 46 +- .../pkg/selector/outputs/outputs.go | 39 +- .../pkg/selector/outputs/sortingView.go | 62 +- .../pkg/selector/outputs/tableView.go | 100 +- .../pkg/selector/outputs/verboseView.go | 38 +- .../{v2 => v3}/pkg/selector/selector.go | 235 +- .../{v2 => v3}/pkg/selector/services.go | 37 +- .../{v2 => v3}/pkg/selector/types.go | 75 +- .../{v2 => v3}/pkg/sorter/sorter.go | 44 +- .../aws/aws-sdk-go-v2/aws/credential_cache.go | 11 + .../aws/aws-sdk-go-v2/aws/credentials.go | 57 + .../aws-sdk-go-v2/aws/go_module_metadata.go | 2 +- .../aws/middleware/user_agent.go | 57 + .../aws/aws-sdk-go-v2/config/CHANGELOG.md | 4 + .../config/go_module_metadata.go | 2 +- .../config/resolve_credentials.go | 102 +- .../aws-sdk-go-v2/credentials/CHANGELOG.md | 4 + .../credentials/ec2rolecreds/provider.go | 12 + .../credentials/endpointcreds/provider.go | 14 + .../credentials/go_module_metadata.go | 2 +- .../credentials/processcreds/provider.go | 11 + .../ssocreds/sso_credentials_provider.go | 12 + .../credentials/static_provider.go | 10 + .../stscreds/assume_role_provider.go | 12 + .../stscreds/web_identity_provider.go | 12 + .../feature/ec2/imds/CHANGELOG.md | 4 + .../feature/ec2/imds/go_module_metadata.go | 2 +- .../internal/configsources/CHANGELOG.md | 4 + .../configsources/go_module_metadata.go | 2 +- .../internal/endpoints/v2/CHANGELOG.md | 4 + .../endpoints/v2/go_module_metadata.go | 2 +- .../internal/presigned-url/CHANGELOG.md | 4 + .../presigned-url/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/service/sso/CHANGELOG.md | 5 + .../aws-sdk-go-v2/service/sso/api_client.go | 31 + .../service/sso/api_op_GetRoleCredentials.go | 3 + .../service/sso/api_op_ListAccountRoles.go | 3 + .../service/sso/api_op_ListAccounts.go | 3 + .../service/sso/api_op_Logout.go | 3 + .../service/sso/go_module_metadata.go | 2 +- .../service/ssooidc/CHANGELOG.md | 5 + .../service/ssooidc/api_client.go | 31 + .../service/ssooidc/api_op_CreateToken.go | 3 + .../ssooidc/api_op_CreateTokenWithIAM.go | 3 + .../service/ssooidc/api_op_RegisterClient.go | 3 + .../api_op_StartDeviceAuthorization.go | 3 + .../service/ssooidc/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/service/sts/CHANGELOG.md | 4 + .../aws-sdk-go-v2/service/sts/api_client.go | 31 + .../service/sts/api_op_AssumeRole.go | 3 + .../service/sts/api_op_AssumeRoleWithSAML.go | 3 + .../sts/api_op_AssumeRoleWithWebIdentity.go | 3 + .../service/sts/api_op_AssumeRoot.go | 3 + .../sts/api_op_DecodeAuthorizationMessage.go | 3 + .../service/sts/api_op_GetAccessKeyInfo.go | 3 + .../service/sts/api_op_GetCallerIdentity.go | 3 + .../service/sts/api_op_GetFederationToken.go | 3 + .../service/sts/api_op_GetSessionToken.go | 3 + .../service/sts/go_module_metadata.go | 2 +- vendor/github.com/aws/aws-sdk-go/LICENSE.txt | 202 - vendor/github.com/aws/aws-sdk-go/NOTICE.txt | 3 - .../aws/aws-sdk-go/aws/awserr/error.go | 164 - .../aws/aws-sdk-go/aws/awserr/types.go | 221 - .../aws/aws-sdk-go/aws/endpoints/decode.go | 193 - .../aws/aws-sdk-go/aws/endpoints/defaults.go | 45748 ---------------- .../aws/endpoints/dep_service_ids.go | 141 - .../aws/aws-sdk-go/aws/endpoints/doc.go | 65 - .../aws/aws-sdk-go/aws/endpoints/endpoints.go | 708 - .../aws/endpoints/legacy_regions.go | 24 - .../aws/aws-sdk-go/aws/endpoints/v3model.go | 594 - .../aws/endpoints/v3model_codegen.go | 412 - vendor/github.com/aws/smithy-go/CHANGELOG.md | 10 + .../aws/smithy-go/go_module_metadata.go | 2 +- .../charmbracelet/bubbletea/.gitignore | 1 + .../bubbletea/.golangci-soft.yml | 10 +- .../charmbracelet/bubbletea/.golangci.yml | 2 - .../charmbracelet/bubbletea/.goreleaser.yml | 5 +- .../charmbracelet/bubbletea/CONTRIBUTING.md | 13 - .../charmbracelet/bubbletea/README.md | 197 +- .../bubbletea/inputreader_windows.go | 16 +- .../charmbracelet/bubbletea/key_windows.go | 21 +- .../charmbracelet/bubbletea/options.go | 11 +- .../bubbletea/standard_renderer.go | 241 +- .../github.com/charmbracelet/bubbletea/tea.go | 121 +- .../.golangci-soft.yml | 10 +- .../charmbracelet/colorprofile/.golangci.yml | 28 + .../colorprofile/.goreleaser.yml | 6 + .../charmbracelet/colorprofile/LICENSE | 21 + .../charmbracelet/colorprofile/README.md | 103 + .../charmbracelet/colorprofile/env.go | 287 + .../charmbracelet/colorprofile/env_other.go | 8 + .../charmbracelet/colorprofile/env_windows.go | 45 + .../charmbracelet/colorprofile/profile.go | 399 + .../charmbracelet/colorprofile/writer.go | 166 + .../charmbracelet/lipgloss/.gitignore | 1 + .../charmbracelet/lipgloss/.golangci.yml | 15 +- .../charmbracelet/lipgloss/.goreleaser.yml | 4 +- .../charmbracelet/lipgloss/README.md | 174 +- .../charmbracelet/lipgloss/Taskfile.yaml | 19 + .../charmbracelet/lipgloss/align.go | 6 +- .../charmbracelet/lipgloss/borders.go | 75 +- .../charmbracelet/lipgloss/color.go | 2 +- .../github.com/charmbracelet/lipgloss/get.go | 22 +- .../charmbracelet/lipgloss/ranges.go | 48 + .../github.com/charmbracelet/lipgloss/set.go | 18 +- .../charmbracelet/lipgloss/style.go | 11 +- .../charmbracelet/x/ansi/background.go | 114 +- vendor/github.com/charmbracelet/x/ansi/c0.go | 7 + .../charmbracelet/x/ansi/charset.go | 55 + .../github.com/charmbracelet/x/ansi/color.go | 2 +- vendor/github.com/charmbracelet/x/ansi/csi.go | 141 - .../github.com/charmbracelet/x/ansi/ctrl.go | 122 +- .../github.com/charmbracelet/x/ansi/cursor.go | 442 +- vendor/github.com/charmbracelet/x/ansi/cwd.go | 26 + vendor/github.com/charmbracelet/x/ansi/dcs.go | 148 - .../github.com/charmbracelet/x/ansi/focus.go | 9 + .../charmbracelet/x/ansi/graphics.go | 199 + .../github.com/charmbracelet/x/ansi/iterm2.go | 18 + .../github.com/charmbracelet/x/ansi/keypad.go | 28 + .../github.com/charmbracelet/x/ansi/kitty.go | 2 +- .../charmbracelet/x/ansi/kitty/decoder.go | 85 + .../charmbracelet/x/ansi/kitty/encoder.go | 64 + .../charmbracelet/x/ansi/kitty/graphics.go | 414 + .../charmbracelet/x/ansi/kitty/options.go | 367 + .../github.com/charmbracelet/x/ansi/method.go | 172 + .../github.com/charmbracelet/x/ansi/mode.go | 642 +- .../github.com/charmbracelet/x/ansi/modes.go | 71 + .../github.com/charmbracelet/x/ansi/mouse.go | 172 + .../charmbracelet/x/ansi/notification.go | 13 + vendor/github.com/charmbracelet/x/ansi/osc.go | 69 - .../github.com/charmbracelet/x/ansi/params.go | 45 - .../github.com/charmbracelet/x/ansi/parser.go | 383 +- .../charmbracelet/x/ansi/parser/const.go | 4 +- .../charmbracelet/x/ansi/parser/seq.go | 16 +- .../x/ansi/parser/transition_table.go | 4 +- .../charmbracelet/x/ansi/parser_decode.go | 311 +- .../charmbracelet/x/ansi/parser_handler.go | 60 + .../charmbracelet/x/ansi/parser_sync.go | 29 + .../github.com/charmbracelet/x/ansi/paste.go | 7 + .../github.com/charmbracelet/x/ansi/reset.go | 11 + .../github.com/charmbracelet/x/ansi/screen.go | 318 +- .../charmbracelet/x/ansi/sequence.go | 199 - vendor/github.com/charmbracelet/x/ansi/sgr.go | 95 + .../github.com/charmbracelet/x/ansi/status.go | 144 + .../github.com/charmbracelet/x/ansi/style.go | 522 +- .../charmbracelet/x/ansi/termcap.go | 12 +- .../charmbracelet/x/ansi/truncate.go | 183 +- .../github.com/charmbracelet/x/ansi/util.go | 77 + .../github.com/charmbracelet/x/ansi/width.go | 18 + .../github.com/charmbracelet/x/ansi/winop.go | 53 + .../github.com/charmbracelet/x/ansi/wrap.go | 84 +- .../github.com/charmbracelet/x/ansi/xterm.go | 88 + .../charmbracelet/x/cellbuf/LICENSE | 21 + .../charmbracelet/x/cellbuf/buffer.go | 473 + .../charmbracelet/x/cellbuf/cell.go | 503 + .../charmbracelet/x/cellbuf/errors.go | 6 + .../charmbracelet/x/cellbuf/geom.go | 21 + .../charmbracelet/x/cellbuf/hardscroll.go | 272 + .../charmbracelet/x/cellbuf/hashmap.go | 301 + .../charmbracelet/x/cellbuf/link.go | 14 + .../charmbracelet/x/cellbuf/screen.go | 1457 + .../charmbracelet/x/cellbuf/sequence.go | 131 + .../charmbracelet/x/cellbuf/style.go | 31 + .../charmbracelet/x/cellbuf/tabstop.go | 137 + .../charmbracelet/x/cellbuf/utils.go | 38 + .../charmbracelet/x/cellbuf/wrap.go | 178 + .../charmbracelet/x/cellbuf/writer.go | 339 + .../charmbracelet/x/term/term_windows.go | 1 + .../github.com/imdario/mergo/.deepsource.toml | 12 - vendor/github.com/imdario/mergo/.gitignore | 33 - vendor/github.com/imdario/mergo/.travis.yml | 12 - .../imdario/mergo/CODE_OF_CONDUCT.md | 46 - .../github.com/imdario/mergo/CONTRIBUTING.md | 112 - vendor/github.com/imdario/mergo/LICENSE | 28 - vendor/github.com/imdario/mergo/README.md | 242 - vendor/github.com/imdario/mergo/SECURITY.md | 14 - vendor/github.com/imdario/mergo/doc.go | 143 - vendor/github.com/imdario/mergo/map.go | 178 - vendor/github.com/imdario/mergo/merge.go | 409 - vendor/github.com/imdario/mergo/mergo.go | 81 - .../muesli/termenv/.golangci-soft.yml | 6 +- .../github.com/muesli/termenv/.golangci.yml | 1 - vendor/github.com/muesli/termenv/README.md | 6 +- .../github.com/muesli/termenv/ansicolors.go | 2 +- vendor/github.com/muesli/termenv/color.go | 17 +- .../muesli/termenv/constants_zos.go | 8 + vendor/github.com/muesli/termenv/output.go | 34 +- vendor/github.com/muesli/termenv/profile.go | 25 +- vendor/github.com/muesli/termenv/screen.go | 90 +- vendor/github.com/muesli/termenv/style.go | 4 +- .../muesli/termenv/templatehelper.go | 2 + vendor/github.com/muesli/termenv/termenv.go | 17 +- .../muesli/termenv/termenv_posix.go | 4 +- .../github.com/muesli/termenv/termenv_unix.go | 36 +- .../muesli/termenv/termenv_windows.go | 5 +- .../pelletier/go-toml/v2/.goreleaser.yaml | 1 + .../github.com/pelletier/go-toml/v2/README.md | 2 +- .../pelletier/go-toml/v2/marshaler.go | 24 +- .../pelletier/go-toml/v2/unmarshaler.go | 45 +- vendor/github.com/spf13/cobra/.golangci.yml | 21 +- vendor/github.com/spf13/cobra/README.md | 5 +- vendor/github.com/spf13/cobra/active_help.go | 15 +- vendor/github.com/spf13/cobra/args.go | 4 +- .../spf13/cobra/bash_completions.go | 25 +- .../spf13/cobra/bash_completionsV2.go | 152 +- vendor/github.com/spf13/cobra/cobra.go | 18 +- vendor/github.com/spf13/cobra/command.go | 356 +- vendor/github.com/spf13/cobra/completions.go | 186 +- vendor/github.com/spf13/cobra/flag_groups.go | 34 +- .../spf13/cobra/powershell_completions.go | 39 +- vendor/github.com/spf13/pflag/.editorconfig | 12 + vendor/github.com/spf13/pflag/.golangci.yaml | 4 + vendor/github.com/spf13/pflag/flag.go | 29 +- vendor/github.com/spf13/pflag/ip.go | 3 + vendor/github.com/spf13/pflag/ipnet_slice.go | 147 + vendor/github.com/spf13/pflag/string_array.go | 4 - vendor/github.com/xo/terminfo/.gitignore | 9 + vendor/github.com/xo/terminfo/LICENSE | 21 + vendor/github.com/xo/terminfo/README.md | 139 + vendor/github.com/xo/terminfo/caps.go | 31 + vendor/github.com/xo/terminfo/capvals.go | 1525 + vendor/github.com/xo/terminfo/color.go | 88 + vendor/github.com/xo/terminfo/dec.go | 245 + vendor/github.com/xo/terminfo/load.go | 64 + vendor/github.com/xo/terminfo/param.go | 405 + vendor/github.com/xo/terminfo/stack.go | 48 + vendor/github.com/xo/terminfo/terminfo.go | 479 + .../go-cty/cty/convert/conversion_dynamic.go | 29 +- .../zclconf/go-cty/cty/primitive_type.go | 16 +- vendor/github.com/zclconf/go-cty/cty/walk.go | 2 +- vendor/golang.org/x/mod/LICENSE | 4 +- vendor/golang.org/x/tools/LICENSE | 4 +- .../x/tools/go/packages/external.go | 2 +- .../x/tools/go/packages/packages.go | 11 +- .../golang.org/x/tools/go/packages/visit.go | 9 + .../x/tools/go/types/objectpath/objectpath.go | 40 +- .../x/tools/internal/aliases/aliases.go | 10 +- .../x/tools/internal/aliases/aliases_go121.go | 16 +- .../x/tools/internal/aliases/aliases_go122.go | 41 +- .../x/tools/internal/gcimporter/iexport.go | 249 +- .../x/tools/internal/gcimporter/iimport.go | 18 +- .../tools/internal/gcimporter/ureader_yes.go | 28 +- .../x/tools/internal/pkgbits/decoder.go | 38 +- .../x/tools/internal/pkgbits/encoder.go | 43 +- .../x/tools/internal/pkgbits/frames_go1.go | 21 - .../x/tools/internal/pkgbits/frames_go17.go | 28 - .../x/tools/internal/pkgbits/support.go | 2 +- .../x/tools/internal/pkgbits/sync.go | 23 + .../internal/pkgbits/syncmarker_string.go | 7 +- .../x/tools/internal/pkgbits/version.go | 85 + .../x/tools/internal/stdlib/manifest.go | 2 +- .../tools/internal/typesinternal/errorcode.go | 8 +- .../x/tools/internal/versions/constraint.go | 13 + .../internal/versions/constraint_go121.go | 14 + vendor/modules.txt | 95 +- 274 files changed, 16657 insertions(+), 52608 deletions(-) delete mode 100644 vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/awsapi/selectorec2.go rename vendor/github.com/aws/amazon-ec2-instance-selector/{v2 => v3}/LICENSE (100%) rename vendor/github.com/aws/amazon-ec2-instance-selector/{v2 => v3}/NOTICE (100%) create mode 100644 vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/awsapi/selectorec2.go rename vendor/github.com/aws/amazon-ec2-instance-selector/{v2 => v3}/pkg/bytequantity/bytequantity.go (78%) rename vendor/github.com/aws/amazon-ec2-instance-selector/{v2 => v3}/pkg/ec2pricing/ec2pricing.go (68%) rename vendor/github.com/aws/amazon-ec2-instance-selector/{v2 => v3}/pkg/ec2pricing/odpricing.go (72%) rename vendor/github.com/aws/amazon-ec2-instance-selector/{v2 => v3}/pkg/ec2pricing/spotpricing.go (78%) rename vendor/github.com/aws/amazon-ec2-instance-selector/{v2 => v3}/pkg/instancetypes/instancetypes.go (58%) rename vendor/github.com/aws/amazon-ec2-instance-selector/{v2 => v3}/pkg/selector/aggregates.go (79%) rename vendor/github.com/aws/amazon-ec2-instance-selector/{v2 => v3}/pkg/selector/comparators.go (86%) rename vendor/github.com/aws/amazon-ec2-instance-selector/{v2 => v3}/pkg/selector/emr.go (92%) rename vendor/github.com/aws/amazon-ec2-instance-selector/{v2 => v3}/pkg/selector/outputs/bubbletea.go (81%) rename vendor/github.com/aws/amazon-ec2-instance-selector/{v2 => v3}/pkg/selector/outputs/outputs.go (90%) rename vendor/github.com/aws/amazon-ec2-instance-selector/{v2 => v3}/pkg/selector/outputs/sortingView.go (83%) rename vendor/github.com/aws/amazon-ec2-instance-selector/{v2 => v3}/pkg/selector/outputs/tableView.go (89%) rename vendor/github.com/aws/amazon-ec2-instance-selector/{v2 => v3}/pkg/selector/outputs/verboseView.go (76%) rename vendor/github.com/aws/amazon-ec2-instance-selector/{v2 => v3}/pkg/selector/selector.go (75%) rename vendor/github.com/aws/amazon-ec2-instance-selector/{v2 => v3}/pkg/selector/services.go (73%) rename vendor/github.com/aws/amazon-ec2-instance-selector/{v2 => v3}/pkg/selector/types.go (86%) rename vendor/github.com/aws/amazon-ec2-instance-selector/{v2 => v3}/pkg/sorter/sorter.go (93%) delete mode 100644 vendor/github.com/aws/aws-sdk-go/LICENSE.txt delete mode 100644 vendor/github.com/aws/aws-sdk-go/NOTICE.txt delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go delete mode 100644 vendor/github.com/charmbracelet/bubbletea/CONTRIBUTING.md rename vendor/github.com/charmbracelet/{lipgloss => colorprofile}/.golangci-soft.yml (81%) create mode 100644 vendor/github.com/charmbracelet/colorprofile/.golangci.yml create mode 100644 vendor/github.com/charmbracelet/colorprofile/.goreleaser.yml create mode 100644 vendor/github.com/charmbracelet/colorprofile/LICENSE create mode 100644 vendor/github.com/charmbracelet/colorprofile/README.md create mode 100644 vendor/github.com/charmbracelet/colorprofile/env.go create mode 100644 vendor/github.com/charmbracelet/colorprofile/env_other.go create mode 100644 vendor/github.com/charmbracelet/colorprofile/env_windows.go create mode 100644 vendor/github.com/charmbracelet/colorprofile/profile.go create mode 100644 vendor/github.com/charmbracelet/colorprofile/writer.go create mode 100644 vendor/github.com/charmbracelet/lipgloss/Taskfile.yaml create mode 100644 vendor/github.com/charmbracelet/lipgloss/ranges.go create mode 100644 vendor/github.com/charmbracelet/x/ansi/charset.go delete mode 100644 vendor/github.com/charmbracelet/x/ansi/csi.go create mode 100644 vendor/github.com/charmbracelet/x/ansi/cwd.go delete mode 100644 vendor/github.com/charmbracelet/x/ansi/dcs.go create mode 100644 vendor/github.com/charmbracelet/x/ansi/focus.go create mode 100644 vendor/github.com/charmbracelet/x/ansi/graphics.go create mode 100644 vendor/github.com/charmbracelet/x/ansi/iterm2.go create mode 100644 vendor/github.com/charmbracelet/x/ansi/keypad.go create mode 100644 vendor/github.com/charmbracelet/x/ansi/kitty/decoder.go create mode 100644 vendor/github.com/charmbracelet/x/ansi/kitty/encoder.go create mode 100644 vendor/github.com/charmbracelet/x/ansi/kitty/graphics.go create mode 100644 vendor/github.com/charmbracelet/x/ansi/kitty/options.go create mode 100644 vendor/github.com/charmbracelet/x/ansi/method.go create mode 100644 vendor/github.com/charmbracelet/x/ansi/modes.go create mode 100644 vendor/github.com/charmbracelet/x/ansi/mouse.go create mode 100644 vendor/github.com/charmbracelet/x/ansi/notification.go delete mode 100644 vendor/github.com/charmbracelet/x/ansi/osc.go delete mode 100644 vendor/github.com/charmbracelet/x/ansi/params.go create mode 100644 vendor/github.com/charmbracelet/x/ansi/parser_handler.go create mode 100644 vendor/github.com/charmbracelet/x/ansi/parser_sync.go create mode 100644 vendor/github.com/charmbracelet/x/ansi/paste.go create mode 100644 vendor/github.com/charmbracelet/x/ansi/reset.go delete mode 100644 vendor/github.com/charmbracelet/x/ansi/sequence.go create mode 100644 vendor/github.com/charmbracelet/x/ansi/sgr.go create mode 100644 vendor/github.com/charmbracelet/x/ansi/status.go create mode 100644 vendor/github.com/charmbracelet/x/ansi/winop.go create mode 100644 vendor/github.com/charmbracelet/x/cellbuf/LICENSE create mode 100644 vendor/github.com/charmbracelet/x/cellbuf/buffer.go create mode 100644 vendor/github.com/charmbracelet/x/cellbuf/cell.go create mode 100644 vendor/github.com/charmbracelet/x/cellbuf/errors.go create mode 100644 vendor/github.com/charmbracelet/x/cellbuf/geom.go create mode 100644 vendor/github.com/charmbracelet/x/cellbuf/hardscroll.go create mode 100644 vendor/github.com/charmbracelet/x/cellbuf/hashmap.go create mode 100644 vendor/github.com/charmbracelet/x/cellbuf/link.go create mode 100644 vendor/github.com/charmbracelet/x/cellbuf/screen.go create mode 100644 vendor/github.com/charmbracelet/x/cellbuf/sequence.go create mode 100644 vendor/github.com/charmbracelet/x/cellbuf/style.go create mode 100644 vendor/github.com/charmbracelet/x/cellbuf/tabstop.go create mode 100644 vendor/github.com/charmbracelet/x/cellbuf/utils.go create mode 100644 vendor/github.com/charmbracelet/x/cellbuf/wrap.go create mode 100644 vendor/github.com/charmbracelet/x/cellbuf/writer.go delete mode 100644 vendor/github.com/imdario/mergo/.deepsource.toml delete mode 100644 vendor/github.com/imdario/mergo/.gitignore delete mode 100644 vendor/github.com/imdario/mergo/.travis.yml delete mode 100644 vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/imdario/mergo/CONTRIBUTING.md delete mode 100644 vendor/github.com/imdario/mergo/LICENSE delete mode 100644 vendor/github.com/imdario/mergo/README.md delete mode 100644 vendor/github.com/imdario/mergo/SECURITY.md delete mode 100644 vendor/github.com/imdario/mergo/doc.go delete mode 100644 vendor/github.com/imdario/mergo/map.go delete mode 100644 vendor/github.com/imdario/mergo/merge.go delete mode 100644 vendor/github.com/imdario/mergo/mergo.go create mode 100644 vendor/github.com/muesli/termenv/constants_zos.go create mode 100644 vendor/github.com/spf13/pflag/.editorconfig create mode 100644 vendor/github.com/spf13/pflag/.golangci.yaml create mode 100644 vendor/github.com/spf13/pflag/ipnet_slice.go create mode 100644 vendor/github.com/xo/terminfo/.gitignore create mode 100644 vendor/github.com/xo/terminfo/LICENSE create mode 100644 vendor/github.com/xo/terminfo/README.md create mode 100644 vendor/github.com/xo/terminfo/caps.go create mode 100644 vendor/github.com/xo/terminfo/capvals.go create mode 100644 vendor/github.com/xo/terminfo/color.go create mode 100644 vendor/github.com/xo/terminfo/dec.go create mode 100644 vendor/github.com/xo/terminfo/load.go create mode 100644 vendor/github.com/xo/terminfo/param.go create mode 100644 vendor/github.com/xo/terminfo/stack.go create mode 100644 vendor/github.com/xo/terminfo/terminfo.go delete mode 100644 vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go delete mode 100644 vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go create mode 100644 vendor/golang.org/x/tools/internal/pkgbits/version.go create mode 100644 vendor/golang.org/x/tools/internal/versions/constraint.go create mode 100644 vendor/golang.org/x/tools/internal/versions/constraint_go121.go diff --git a/go.mod b/go.mod index 32bbe1455..0dc4cdcfb 100644 --- a/go.mod +++ b/go.mod @@ -1,8 +1,8 @@ module github.com/redhat-developer/mapt -go 1.22.0 +go 1.23 -toolchain go1.23.6 +toolchain go1.23.7 require ( github.com/coocood/freecache v1.2.4 @@ -10,8 +10,8 @@ require ( github.com/pulumi/pulumi-random/sdk/v4 v4.16.7 github.com/pulumi/pulumi/sdk/v3 v3.152.0 github.com/sirupsen/logrus v1.9.3 - github.com/spf13/cobra v1.8.0 - golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 + github.com/spf13/cobra v1.9.1 + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 ) require ( @@ -19,9 +19,9 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6 v6.3.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resourcegraph/armresourcegraph v0.9.0 - github.com/aws/amazon-ec2-instance-selector/v2 v2.4.2-0.20231006140257-d989c5d76f0e - github.com/aws/aws-sdk-go-v2 v1.36.2 - github.com/aws/aws-sdk-go-v2/config v1.29.7 + github.com/aws/amazon-ec2-instance-selector/v3 v3.1.1 + github.com/aws/aws-sdk-go-v2 v1.36.3 + github.com/aws/aws-sdk-go-v2/config v1.29.8 github.com/aws/aws-sdk-go-v2/service/ec2 v1.203.1 github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1 github.com/pulumi/pulumi-aws-native/sdk v1.25.0 @@ -36,60 +36,60 @@ require ( github.com/pulumi/pulumi-tls/sdk/v5 v5.0.9 ) -require github.com/aws/aws-sdk-go v1.50.36 // indirect - require ( github.com/BurntSushi/toml v1.4.0 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 // indirect github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5 // indirect github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5 // indirect - github.com/aws/aws-sdk-go-v2/service/pricing v1.32.17 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/evertras/bubble-table v0.17.1 // indirect - github.com/hashicorp/hcl v1.0.0 // indirect - github.com/imdario/mergo v0.3.16 // indirect + github.com/hashicorp/hcl/v2 v2.23.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/oliveagle/jsonpath v0.0.0-20180606110733-2e52cf6e6852 // indirect github.com/patrickmn/go-cache v2.1.0+incompatible // indirect github.com/pulumi/pulumi-docker/sdk/v4 v4.5.8 // indirect github.com/sahilm/fuzzy v0.1.1 // indirect - golang.org/x/mod v0.19.0 // indirect - golang.org/x/tools v0.23.0 // indirect + golang.org/x/mod v0.21.0 // indirect + golang.org/x/tools v0.25.0 // indirect ) require ( - dario.cat/mergo v1.0.0 // indirect + dario.cat/mergo v1.0.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.4.0 // indirect github.com/agext/levenshtein v1.2.3 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/atotto/clipboard v0.1.4 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.17.60 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.29 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.33 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.33 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.61 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.14 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.24.16 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.15 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.33.15 // indirect - github.com/aws/smithy-go v1.22.2 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect + github.com/aws/aws-sdk-go-v2/service/pricing v1.32.17 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.25.0 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.0 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.33.16 // indirect + github.com/aws/smithy-go v1.22.3 // indirect github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/charmbracelet/bubbles v0.20.0 // indirect - github.com/charmbracelet/bubbletea v1.1.0 // indirect - github.com/charmbracelet/lipgloss v0.13.0 // indirect - github.com/charmbracelet/x/ansi v0.2.3 // indirect - github.com/charmbracelet/x/term v0.2.0 // indirect + github.com/charmbracelet/bubbletea v1.3.3 // indirect + github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect + github.com/charmbracelet/lipgloss v1.1.0 // indirect + github.com/charmbracelet/x/ansi v0.8.0 // indirect + github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd // indirect + github.com/charmbracelet/x/term v0.2.1 // indirect github.com/cloudflare/circl v1.6.0 // indirect github.com/cyphar/filepath-securejoin v0.4.1 // indirect github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect + github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/uuid v1.6.0 // indirect - github.com/hashicorp/hcl/v2 v2.23.0 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect github.com/iwdgo/sigintwindows v0.2.2 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect @@ -102,8 +102,8 @@ require ( github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect github.com/muesli/cancelreader v0.2.2 // indirect github.com/muesli/reflow v0.3.0 // indirect - github.com/muesli/termenv v0.15.2 // indirect - github.com/pelletier/go-toml/v2 v2.2.2 // indirect + github.com/muesli/termenv v0.16.0 // indirect + github.com/pelletier/go-toml/v2 v2.2.3 // indirect github.com/pgavlin/fx v0.1.6 // indirect github.com/pjbgf/sha1cd v0.3.0 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect @@ -119,7 +119,8 @@ require ( github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.6.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect - github.com/zclconf/go-cty v1.14.1 // indirect + github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect + github.com/zclconf/go-cty v1.15.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/sync v0.11.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 // indirect @@ -138,8 +139,7 @@ require ( github.com/djherbis/times v1.6.0 // indirect github.com/emirpasic/gods v1.18.1 // indirect github.com/fsnotify/fsnotify v1.8.0 // indirect - github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect - github.com/go-git/go-billy/v5 v5.6.1 // indirect + github.com/go-git/go-billy/v5 v5.6.2 // indirect github.com/go-git/go-git/v5 v5.13.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/glog v1.2.4 // indirect @@ -154,14 +154,14 @@ require ( github.com/nxadm/tail v1.4.11 // indirect github.com/opentracing/basictracer-go v1.1.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect - github.com/pkg/errors v0.9.1 + github.com/pkg/errors v0.9.1 // indirect github.com/pkg/term v1.1.0 // indirect github.com/pulumi/pulumi-azure-native-sdk/managedidentity/v2 v2.53.0 github.com/rivo/uniseg v0.4.7 // indirect github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 // indirect github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect - github.com/spf13/pflag v1.0.5 + github.com/spf13/pflag v1.0.6 github.com/spf13/viper v1.19.0 github.com/texttheater/golang-levenshtein v1.0.1 // indirect github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect diff --git a/go.sum b/go.sum index d9af50ba4..f2d7ffde9 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,5 @@ -dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= -dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= +dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 h1:g0EZJwz7xkXQiZAI5xi9f3WWFYBlX1CPTrR+NDToRkQ= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0/go.mod h1:XCW7KnZet0Opnr7HccfUw1PLc4CjHqpcaxW8DHklNkQ= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2 h1:F0gBpfdPLGsw+nsgk6aqqkZS1jiixa5WwFe3fk/T3Ys= @@ -41,24 +41,22 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPd github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4= github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI= -github.com/aws/amazon-ec2-instance-selector/v2 v2.4.2-0.20231006140257-d989c5d76f0e h1:WWOuG4hx+k8KjLO0tHGrYUwY95nKJMMU9A/IGeIEKnU= -github.com/aws/amazon-ec2-instance-selector/v2 v2.4.2-0.20231006140257-d989c5d76f0e/go.mod h1:xN+IXvhDWtzz9rQrNVLK+wwXvNie/bc6gv9gSP1cxlg= -github.com/aws/aws-sdk-go v1.50.36 h1:PjWXHwZPuTLMR1NIb8nEjLucZBMzmf84TLoLbD8BZqk= -github.com/aws/aws-sdk-go v1.50.36/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= -github.com/aws/aws-sdk-go-v2 v1.36.2 h1:Ub6I4lq/71+tPb/atswvToaLGVMxKZvjYDVOWEExOcU= -github.com/aws/aws-sdk-go-v2 v1.36.2/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg= +github.com/aws/amazon-ec2-instance-selector/v3 v3.1.1 h1:8pw8T4Ae8tiXbxTxXhVStJSUJpEv29+YOQaDxIv3EaI= +github.com/aws/amazon-ec2-instance-selector/v3 v3.1.1/go.mod h1:RU/lVVsYHNN7Bwr2UmCw5z2aWPcNIHADY49bj082oYM= +github.com/aws/aws-sdk-go-v2 v1.36.3 h1:mJoei2CxPutQVxaATCzDUjcZEjVRdpsiiXi2o38yqWM= +github.com/aws/aws-sdk-go-v2 v1.36.3/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 h1:x6xsQXGSmW6frevwDA+vi/wqhp1ct18mVXYN08/93to= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2/go.mod h1:lPprDr1e6cJdyYeGXnRaJoP4Md+cDBvi2eOj00BlGmg= -github.com/aws/aws-sdk-go-v2/config v1.29.7 h1:71nqi6gUbAUiEQkypHQcNVSFJVUFANpSeUNShiwWX2M= -github.com/aws/aws-sdk-go-v2/config v1.29.7/go.mod h1:yqJQ3nh2HWw/uxd56bicyvmDW4KSc+4wN6lL8pYjynU= -github.com/aws/aws-sdk-go-v2/credentials v1.17.60 h1:1dq+ELaT5ogfmqtV1eocq8SpOK1NRsuUfmhQtD/XAh4= -github.com/aws/aws-sdk-go-v2/credentials v1.17.60/go.mod h1:HDes+fn/xo9VeszXqjBVkxOo/aUy8Mc6QqKvZk32GlE= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.29 h1:JO8pydejFKmGcUNiiwt75dzLHRWthkwApIvPoyUtXEg= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.29/go.mod h1:adxZ9i9DRmB8zAT0pO0yGnsmu0geomp5a3uq5XpgOJ8= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.33 h1:knLyPMw3r3JsU8MFHWctE4/e2qWbPaxDYLlohPvnY8c= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.33/go.mod h1:EBp2HQ3f+XCB+5J+IoEbGhoV7CpJbnrsd4asNXmTL0A= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.33 h1:K0+Ne08zqti8J9jwENxZ5NoUyBnaFDTu3apwQJWrwwA= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.33/go.mod h1:K97stwwzaWzmqxO8yLGHhClbVW1tC6VT1pDLk1pGrq4= +github.com/aws/aws-sdk-go-v2/config v1.29.8 h1:RpwAfYcV2lr/yRc4lWhUM9JRPQqKgKWmou3LV7UfWP4= +github.com/aws/aws-sdk-go-v2/config v1.29.8/go.mod h1:t+G7Fq1OcO8cXTPPXzxQSnj/5Xzdc9jAAD3Xrn9/Mgo= +github.com/aws/aws-sdk-go-v2/credentials v1.17.61 h1:Hd/uX6Wo2iUW1JWII+rmyCD7MMhOe7ALwQXN6sKDd1o= +github.com/aws/aws-sdk-go-v2/credentials v1.17.61/go.mod h1:L7vaLkwHY1qgW0gG1zG0z/X0sQ5tpIY5iI13+j3qI80= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 h1:x793wxmUWVDhshP8WW2mlnXuFrO4cOd3HLBroh1paFw= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30/go.mod h1:Jpne2tDnYiFascUEs2AWHJL9Yp7A5ZVy3TNyxaAjD6M= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 h1:ZK5jHhnrioRkUNOc+hOgQKlUL5JeC3S6JgLxtQ+Rm0Q= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34/go.mod h1:p4VfIceZokChbA9FzMbRGz5OV+lekcVtHlPKEO0gSZY= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 h1:SZwFm17ZUNNg5Np0ioo/gq8Mn6u9w19Mri8DnJ15Jf0= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34/go.mod h1:dFZsC0BLo346mvKQLWmoJxT+Sjp+qcVR1tRVHQGOH9Q= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5 h1:81KE7vaZzrl7yHBYHVEzYB8sypz11NMOZ40YlWvPxsU= @@ -73,22 +71,22 @@ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 h1:eAh2A4b github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3/go.mod h1:0yKJC/kb8sAnmlYa6Zs3QVYqaC8ug2AbnNChv5Ox3uA= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 h1:ZMeFZ5yk+Ek+jNr1+uwCd2tG89t6oTS5yVWpa6yy2es= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7/go.mod h1:mxV05U+4JiHqIpGqqYXOHLPKUC6bDXC44bsUhNjOEwY= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.14 h1:2scbY6//jy/s8+5vGrk7l1+UtHl0h9A4MjOO2k/TM2E= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.14/go.mod h1:bRpZPHZpSe5YRHmPfK3h1M7UBFCn2szHzyx0rw04zro= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 h1:dM9/92u2F1JbDaGooxTq18wmmFzbJRfXfVfy96/1CXM= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15/go.mod h1:SwFBy2vjtA0vZbjjaFtfN045boopadnoVPhu4Fv66vY= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5 h1:f9RyWNtS8oH7cZlbn+/JNPpjUk5+5fLd5lM9M0i49Ys= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5/go.mod h1:h5CoMZV2VF297/VLhRhO1WF+XYWOzXo+4HsObA4HjBQ= github.com/aws/aws-sdk-go-v2/service/pricing v1.32.17 h1:EtZFyL/uhaXlHjIwHW0KSJvppg+Ie1fzQ3wEXLEUj0I= github.com/aws/aws-sdk-go-v2/service/pricing v1.32.17/go.mod h1:l7bufyRvU+8mY0Z1BNWbWvjr59dlj9YrLKmeiz5CJ30= github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1 h1:6cnno47Me9bRykw9AEv9zkXE+5or7jz8TsskTTccbgc= github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1/go.mod h1:qmdkIIAC+GCLASF7R2whgNrJADz0QZPX+Seiw/i4S3o= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.16 h1:YV6xIKDJp6U7YB2bxfud9IENO1LRpGhe2Tv/OKtPrOQ= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.16/go.mod h1:DvbmMKgtpA6OihFJK13gHMZOZrCHttz8wPHGKXqU+3o= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.15 h1:kMyK3aKotq1aTBsj1eS8ERJLjqYRRRcsmP33ozlCvlk= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.15/go.mod h1:5uPZU7vSNzb8Y0dm75xTikinegPYK3uJmIHQZFq5Aqo= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.15 h1:ht1jVmeeo2anR7zDiYJLSnRYnO/9NILXXu42FP3rJg0= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.15/go.mod h1:xWZ5cOiFe3czngChE4LhCBqUxNwgfwndEF7XlYP/yD8= -github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ= -github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= +github.com/aws/aws-sdk-go-v2/service/sso v1.25.0 h1:2U9sF8nKy7UgyEeLiZTRg6ShBS22z8UnYpV6aRFL0is= +github.com/aws/aws-sdk-go-v2/service/sso v1.25.0/go.mod h1:qs4a9T5EMLl/Cajiw2TcbNt2UNo/Hqlyp+GiuG4CFDI= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.0 h1:wjAdc85cXdQR5uLx5FwWvGIHm4OPJhTyzUHU8craXtE= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.0/go.mod h1:MlYRNmYu/fGPoxBQVvBYr9nyr948aY/WLUvwBMBJubs= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.16 h1:BHEK2Q/7CMRMCb3nySi/w8UbIcPhKvYP5s1xf8/izn0= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.16/go.mod h1:cQnB8CUnxbMU82JvlqjKR2HBOm3fe9pWorWBza6MBJ4= +github.com/aws/smithy-go v1.22.3 h1:Z//5NuZCSW6R4PhQ93hShNbyBbn8BWCmCVCt+Q8Io5k= +github.com/aws/smithy-go v1.22.3/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= @@ -100,21 +98,25 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/charmbracelet/bubbles v0.20.0 h1:jSZu6qD8cRQ6k9OMfR1WlM+ruM8fkPWkHvQWD9LIutE= github.com/charmbracelet/bubbles v0.20.0/go.mod h1:39slydyswPy+uVOHZ5x/GjwVAFkCsV8IIVy+4MhzwwU= -github.com/charmbracelet/bubbletea v1.1.0 h1:FjAl9eAL3HBCHenhz/ZPjkKdScmaS5SK69JAK2YJK9c= -github.com/charmbracelet/bubbletea v1.1.0/go.mod h1:9Ogk0HrdbHolIKHdjfFpyXJmiCzGwy+FesYkZr7hYU4= -github.com/charmbracelet/lipgloss v0.13.0 h1:4X3PPeoWEDCMvzDvGmTajSyYPcZM4+y8sCA/SsA3cjw= -github.com/charmbracelet/lipgloss v0.13.0/go.mod h1:nw4zy0SBX/F/eAO1cWdcvy6qnkDUxr8Lw7dvFrAIbbY= -github.com/charmbracelet/x/ansi v0.2.3 h1:VfFN0NUpcjBRd4DnKfRaIRo53KRgey/nhOoEqosGDEY= -github.com/charmbracelet/x/ansi v0.2.3/go.mod h1:dk73KoMTT5AX5BsX0KrqhsTqAnhZZoCBjs7dGWp4Ktw= -github.com/charmbracelet/x/term v0.2.0 h1:cNB9Ot9q8I711MyZ7myUR5HFWL/lc3OpU8jZ4hwm0x0= -github.com/charmbracelet/x/term v0.2.0/go.mod h1:GVxgxAbjUrmpvIINHIQnJJKpMlHiZ4cktEQCN6GWyF0= +github.com/charmbracelet/bubbletea v1.3.3 h1:WpU6fCY0J2vDWM3zfS3vIDi/ULq3SYphZhkAGGvmEUY= +github.com/charmbracelet/bubbletea v1.3.3/go.mod h1:dtcUCyCGEX3g9tosuYiut3MXgY/Jsv9nKVdibKKRRXo= +github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc h1:4pZI35227imm7yK2bGPcfpFEmuY1gc2YSTShr4iJBfs= +github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc/go.mod h1:X4/0JoqgTIPSFcRA/P6INZzIuyqdFY5rm8tb41s9okk= +github.com/charmbracelet/lipgloss v1.1.0 h1:vYXsiLHVkK7fp74RkV7b2kq9+zDLoEU4MZoFqR/noCY= +github.com/charmbracelet/lipgloss v1.1.0/go.mod h1:/6Q8FR2o+kj8rz4Dq0zQc3vYf7X+B0binUUBwA0aL30= +github.com/charmbracelet/x/ansi v0.8.0 h1:9GTq3xq9caJW8ZrBTe0LIe2fvfLR/bYXKTx2llXn7xE= +github.com/charmbracelet/x/ansi v0.8.0/go.mod h1:wdYl/ONOLHLIVmQaxbIYEC/cRKOQyjTkowiI4blgS9Q= +github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd h1:vy0GVL4jeHEwG5YOXDmi86oYw2yuYUGqz6a8sLwg0X8= +github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs= +github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ= +github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg= github.com/cheggaaa/pb v1.0.29 h1:FckUN5ngEk2LpvuG0fw1GEFx6LtyY2pWI/Z2QgCnEYo= github.com/cheggaaa/pb v1.0.29/go.mod h1:W40334L7FMC5JKWldsTWbdGjLo0RxUKK73K+TuPxX30= github.com/cloudflare/circl v1.6.0 h1:cr5JKic4HI+LkINy2lg3W2jF8sHCVTBncJr5gIIq7qk= github.com/cloudflare/circl v1.6.0/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= github.com/coocood/freecache v1.2.4 h1:UdR6Yz/X1HW4fZOuH0Z94KwG851GWOSknua5VUbb/5M= github.com/coocood/freecache v1.2.4/go.mod h1:RBUWa/Cy+OHdfTGFEhEuE1pMCMX51Ncizj7rthiQ3vk= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -145,8 +147,8 @@ github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c= github.com/gliderlabs/ssh v0.3.8/go.mod h1:xYoytBv1sV0aL3CavoDuJIQNURXkkfPA/wxQ1pL1fAU= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= -github.com/go-git/go-billy/v5 v5.6.1 h1:u+dcrgaguSSkbjzHwelEjc0Yj300NUevrrPphk/SoRA= -github.com/go-git/go-billy/v5 v5.6.1/go.mod h1:0AsLr1z2+Uksi4NlElmMblP5rPcDZNRCD8ujZCRR2BE= +github.com/go-git/go-billy/v5 v5.6.2 h1:6Q86EsPXMa7c3YZ3aLAQsMA0VlWmy43r6FHqa/UNbRM= +github.com/go-git/go-billy/v5 v5.6.2/go.mod h1:rcFC2rAsp/erv7CMz9GczHcuD0D32fWzH+MJAU+jaUU= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= github.com/go-git/go-git/v5 v5.13.1 h1:DAQ9APonnlvSWpvolXWIuV6Q6zXy2wHbN4cVlNR5Q+M= @@ -175,8 +177,6 @@ github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/hcl/v2 v2.23.0 h1:Fphj1/gCylPxHutVSEOf2fBOh1VE4AuLV7+kbJf3qos= github.com/hashicorp/hcl/v2 v2.23.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA= -github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= -github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/iwdgo/sigintwindows v0.2.2 h1:P6oWzpvV7MrEAmhUgs+zmarrWkyL77ycZz4v7+1gYAE= @@ -234,8 +234,8 @@ github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELU github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo= github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s= github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8= -github.com/muesli/termenv v0.15.2 h1:GohcuySI0QmI3wN8Ok9PtKGkgkFIk7y6Vpb5PvrY+Wo= -github.com/muesli/termenv v0.15.2/go.mod h1:Epx+iuz8sNs7mNKhxzH4fWXGNpZwUaJKRS1noLXviQ8= +github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc= +github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk= github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= github.com/oliveagle/jsonpath v0.0.0-20180606110733-2e52cf6e6852 h1:Yl0tPBa8QPjGmesFh1D0rDy+q1Twx6FyU7VWHi8wZbI= @@ -249,8 +249,8 @@ github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+ github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= -github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= -github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= +github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= +github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= github.com/pgavlin/fx v0.1.6 h1:r9jEg69DhNoCd3Xh0+5mIbdbS3PqWrVWujkY76MFRTU= github.com/pgavlin/fx v0.1.6/go.mod h1:KWZJ6fqBBSh8GxHYqwYCf3rYE7Gp2p0N8tJp8xv9u9M= github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= @@ -319,6 +319,8 @@ github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6g github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= github.com/sahilm/fuzzy v0.1.1 h1:ceu5RHF8DGgoi+/dR5PsECjCDH1BE3Fnmpo7aVXOdRA= github.com/sahilm/fuzzy v0.1.1/go.mod h1:VFvziUEIMCrT6A6tw2RFIXPXXmzXbOsSHF0DOI8ZK9Y= +github.com/samber/lo v1.47.0 h1:z7RynLwP5nbyRscyvcD043DWYoOcYRv3mV8lBeqOCLc= +github.com/samber/lo v1.47.0/go.mod h1:RmDH9Ct32Qy3gduHQuKJ3gW1fMHAnE/fAzQuf6He5cU= github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 h1:lZUw3E0/J3roVtGQ+SCrUrg3ON6NgVqpn3+iol9aGu4= github.com/santhosh-tekuri/jsonschema/v5 v5.3.1/go.mod h1:uToXkOrWAZ6/Oc07xWQrPOhJotwFIyu2bBVN41fcDUY= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= @@ -334,15 +336,13 @@ github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= -github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= +github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= @@ -351,10 +351,6 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= @@ -367,10 +363,12 @@ github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVK github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= +github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no= +github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/zclconf/go-cty v1.14.1 h1:t9fyA35fwjjUMcmL5hLER+e/rEPqrbCK1/OSE4SI9KA= -github.com/zclconf/go-cty v1.14.1/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/zclconf/go-cty v1.15.0 h1:tTCRWxsexYUmtt/wVxgDClUe+uQusuI443uL6e+5sXQ= +github.com/zclconf/go-cty v1.15.0/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -381,14 +379,14 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus= golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= -golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -436,8 +434,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= -golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/pkg/provider/util/instancetypes/aws.go b/pkg/provider/util/instancetypes/aws.go index ce33f51df..d7ec5b96e 100644 --- a/pkg/provider/util/instancetypes/aws.go +++ b/pkg/provider/util/instancetypes/aws.go @@ -2,8 +2,9 @@ package instancetypes import ( "context" - "github.com/aws/amazon-ec2-instance-selector/v2/pkg/bytequantity" - "github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector" + + "github.com/aws/amazon-ec2-instance-selector/v3/pkg/bytequantity" + "github.com/aws/amazon-ec2-instance-selector/v3/pkg/selector" "github.com/aws/aws-sdk-go-v2/config" ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" ) diff --git a/vendor/dario.cat/mergo/.gitignore b/vendor/dario.cat/mergo/.gitignore index 529c3412b..45ad0f1ae 100644 --- a/vendor/dario.cat/mergo/.gitignore +++ b/vendor/dario.cat/mergo/.gitignore @@ -13,6 +13,9 @@ # Output of the go coverage tool, specifically when used with LiteIDE *.out +# Golang/Intellij +.idea + # Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 .glide/ diff --git a/vendor/dario.cat/mergo/README.md b/vendor/dario.cat/mergo/README.md index 7d0cf9f32..0b3c48889 100644 --- a/vendor/dario.cat/mergo/README.md +++ b/vendor/dario.cat/mergo/README.md @@ -44,13 +44,21 @@ Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the ## Status -It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, Microsoft, etc](https://github.com/imdario/mergo#mergo-in-the-wild). +Mergo is stable and frozen, ready for production. Check a short list of the projects using at large scale it [here](https://github.com/imdario/mergo#mergo-in-the-wild). + +No new features are accepted. They will be considered for a future v2 that improves the implementation and fixes bugs for corner cases. ### Important notes #### 1.0.0 -In [1.0.0](//github.com/imdario/mergo/releases/tag/1.0.0) Mergo moves to a vanity URL `dario.cat/mergo`. +In [1.0.0](//github.com/imdario/mergo/releases/tag/1.0.0) Mergo moves to a vanity URL `dario.cat/mergo`. No more v1 versions will be released. + +If the vanity URL is causing issues in your project due to a dependency pulling Mergo - it isn't a direct dependency in your project - it is recommended to use [replace](https://github.com/golang/go/wiki/Modules#when-should-i-use-the-replace-directive) to pin the version to the last one with the old import URL: + +``` +replace github.com/imdario/mergo => github.com/imdario/mergo v0.3.16 +``` #### 0.3.9 @@ -64,55 +72,24 @@ If you were using Mergo before April 6th, 2015, please check your project works If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes: -Buy Me a Coffee at ko-fi.com Donate using Liberapay Become my sponsor ### Mergo in the wild -- [moby/moby](https://github.com/moby/moby) -- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) -- [vmware/dispatch](https://github.com/vmware/dispatch) -- [Shopify/themekit](https://github.com/Shopify/themekit) -- [imdario/zas](https://github.com/imdario/zas) -- [matcornic/hermes](https://github.com/matcornic/hermes) -- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go) -- [kataras/iris](https://github.com/kataras/iris) -- [michaelsauter/crane](https://github.com/michaelsauter/crane) -- [go-task/task](https://github.com/go-task/task) -- [sensu/uchiwa](https://github.com/sensu/uchiwa) -- [ory/hydra](https://github.com/ory/hydra) -- [sisatech/vcli](https://github.com/sisatech/vcli) -- [dairycart/dairycart](https://github.com/dairycart/dairycart) -- [projectcalico/felix](https://github.com/projectcalico/felix) -- [resin-os/balena](https://github.com/resin-os/balena) -- [go-kivik/kivik](https://github.com/go-kivik/kivik) -- [Telefonica/govice](https://github.com/Telefonica/govice) -- [supergiant/supergiant](supergiant/supergiant) -- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce) -- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy) -- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel) -- [EagerIO/Stout](https://github.com/EagerIO/Stout) -- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api) -- [russross/canvasassignments](https://github.com/russross/canvasassignments) -- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api) -- [casualjim/exeggutor](https://github.com/casualjim/exeggutor) -- [divshot/gitling](https://github.com/divshot/gitling) -- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl) -- [andrerocker/deploy42](https://github.com/andrerocker/deploy42) -- [elwinar/rambler](https://github.com/elwinar/rambler) -- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman) -- [jfbus/impressionist](https://github.com/jfbus/impressionist) -- [Jmeyering/zealot](https://github.com/Jmeyering/zealot) -- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host) -- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go) -- [thoas/picfit](https://github.com/thoas/picfit) -- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) -- [jnuthong/item_search](https://github.com/jnuthong/item_search) -- [bukalapak/snowboard](https://github.com/bukalapak/snowboard) -- [containerssh/containerssh](https://github.com/containerssh/containerssh) -- [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser) -- [tjpnz/structbot](https://github.com/tjpnz/structbot) +Mergo is used by [thousands](https://deps.dev/go/dario.cat%2Fmergo/v1.0.0/dependents) [of](https://deps.dev/go/github.com%2Fimdario%2Fmergo/v0.3.16/dependents) [projects](https://deps.dev/go/github.com%2Fimdario%2Fmergo/v0.3.12), including: + +* [containerd/containerd](https://github.com/containerd/containerd) +* [datadog/datadog-agent](https://github.com/datadog/datadog-agent) +* [docker/cli/](https://github.com/docker/cli/) +* [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser) +* [go-micro/go-micro](https://github.com/go-micro/go-micro) +* [grafana/loki](https://github.com/grafana/loki) +* [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) +* [masterminds/sprig](github.com/Masterminds/sprig) +* [moby/moby](https://github.com/moby/moby) +* [slackhq/nebula](https://github.com/slackhq/nebula) +* [volcano-sh/volcano](https://github.com/volcano-sh/volcano) ## Install @@ -141,6 +118,39 @@ if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { } ``` +If you need to override pointers, so the source pointer's value is assigned to the destination's pointer, you must use `WithoutDereference`: + +```go +package main + +import ( + "fmt" + + "dario.cat/mergo" +) + +type Foo struct { + A *string + B int64 +} + +func main() { + first := "first" + second := "second" + src := Foo{ + A: &first, + B: 2, + } + + dest := Foo{ + A: &second, + B: 1, + } + + mergo.Merge(&dest, src, mergo.WithOverride, mergo.WithoutDereference) +} +``` + Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field. ```go diff --git a/vendor/dario.cat/mergo/map.go b/vendor/dario.cat/mergo/map.go index b50d5c2a4..759b4f74f 100644 --- a/vendor/dario.cat/mergo/map.go +++ b/vendor/dario.cat/mergo/map.go @@ -58,7 +58,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf } fieldName := field.Name fieldName = changeInitialCase(fieldName, unicode.ToLower) - if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v), !config.ShouldNotDereference) || overwrite) { + if _, ok := dstMap[fieldName]; !ok || (!isEmptyValue(reflect.ValueOf(src.Field(i).Interface()), !config.ShouldNotDereference) && overwrite) || config.overwriteWithEmptyValue { dstMap[fieldName] = src.Field(i).Interface() } } diff --git a/vendor/dario.cat/mergo/merge.go b/vendor/dario.cat/mergo/merge.go index 0ef9b2138..fd47c95b2 100644 --- a/vendor/dario.cat/mergo/merge.go +++ b/vendor/dario.cat/mergo/merge.go @@ -269,7 +269,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { return } - } else { + } else if src.Elem().Kind() != reflect.Struct { if overwriteWithEmptySrc || (overwrite && !src.IsNil()) || dst.IsNil() { dst.Set(src) } diff --git a/vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/awsapi/selectorec2.go b/vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/awsapi/selectorec2.go deleted file mode 100644 index 8523d2915..000000000 --- a/vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/awsapi/selectorec2.go +++ /dev/null @@ -1,12 +0,0 @@ -package awsapi - -import ( - "context" - "github.com/aws/aws-sdk-go-v2/service/ec2" -) - -type SelectorInterface interface { - ec2.DescribeInstanceTypeOfferingsAPIClient - ec2.DescribeInstanceTypesAPIClient - DescribeAvailabilityZones(ctx context.Context, params *ec2.DescribeAvailabilityZonesInput, optFns ...func(*ec2.Options)) (*ec2.DescribeAvailabilityZonesOutput, error) -} diff --git a/vendor/github.com/aws/amazon-ec2-instance-selector/v2/LICENSE b/vendor/github.com/aws/amazon-ec2-instance-selector/v3/LICENSE similarity index 100% rename from vendor/github.com/aws/amazon-ec2-instance-selector/v2/LICENSE rename to vendor/github.com/aws/amazon-ec2-instance-selector/v3/LICENSE diff --git a/vendor/github.com/aws/amazon-ec2-instance-selector/v2/NOTICE b/vendor/github.com/aws/amazon-ec2-instance-selector/v3/NOTICE similarity index 100% rename from vendor/github.com/aws/amazon-ec2-instance-selector/v2/NOTICE rename to vendor/github.com/aws/amazon-ec2-instance-selector/v3/NOTICE diff --git a/vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/awsapi/selectorec2.go b/vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/awsapi/selectorec2.go new file mode 100644 index 000000000..ea8d681b6 --- /dev/null +++ b/vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/awsapi/selectorec2.go @@ -0,0 +1,26 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package awsapi + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/service/ec2" +) + +type SelectorInterface interface { + ec2.DescribeInstanceTypeOfferingsAPIClient + ec2.DescribeInstanceTypesAPIClient + DescribeAvailabilityZones(ctx context.Context, params *ec2.DescribeAvailabilityZonesInput, optFns ...func(*ec2.Options)) (*ec2.DescribeAvailabilityZonesOutput, error) +} diff --git a/vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/bytequantity/bytequantity.go b/vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/bytequantity/bytequantity.go similarity index 78% rename from vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/bytequantity/bytequantity.go rename to vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/bytequantity/bytequantity.go index b442a2cd6..b907ae12b 100644 --- a/vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/bytequantity/bytequantity.go +++ b/vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/bytequantity/bytequantity.go @@ -1,15 +1,14 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at +// http://www.apache.org/licenses/LICENSE-2.0 // -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package bytequantity @@ -22,7 +21,7 @@ import ( ) const ( - /// Examples: 1mb, 1 gb, 1.0tb, 1mib, 2g, 2.001 t + /// Examples: 1mb, 1 gb, 1.0tb, 1mib, 2g, 2.001 t. byteQuantityRegex = `^([0-9]+\.?[0-9]{0,3})[ ]?(mi?b?|gi?b?|ti?b?)?$` mib = "MiB" gib = "GiB" @@ -33,7 +32,7 @@ const ( maxTiB = math.MaxUint64 / tbConvert ) -// ByteQuantity is a data type representing a byte quantity +// ByteQuantity is a data type representing a byte quantity. type ByteQuantity struct { Quantity uint64 } @@ -54,7 +53,7 @@ func ParseToByteQuantity(byteQuantityStr string) (ByteQuantity, error) { } quantity := uint64(0) switch strings.ToLower(string(unit[0])) { - //mib + // mib case "m": inputDecSplit := strings.Split(quantityStr, ".") if len(inputDecSplit) == 2 { @@ -72,9 +71,9 @@ func ParseToByteQuantity(byteQuantityStr string) (ByteQuantity, error) { if err != nil { return ByteQuantity{}, err } - //gib + // gib case "g": - quantityDec, err := strconv.ParseFloat(quantityStr, 10) + quantityDec, err := strconv.ParseFloat(quantityStr, 64) if err != nil { return ByteQuantity{}, err } @@ -82,9 +81,9 @@ func ParseToByteQuantity(byteQuantityStr string) (ByteQuantity, error) { return ByteQuantity{}, fmt.Errorf("error GiB value is too large") } quantity = uint64(quantityDec * gbConvert) - //tib + // tib case "t": - quantityDec, err := strconv.ParseFloat(quantityStr, 10) + quantityDec, err := strconv.ParseFloat(quantityStr, 64) if err != nil { return ByteQuantity{}, err } @@ -101,53 +100,53 @@ func ParseToByteQuantity(byteQuantityStr string) (ByteQuantity, error) { }, nil } -// FromTiB returns a byte quantity of the passed in tebibytes quantity +// FromTiB returns a byte quantity of the passed in tebibytes quantity. func FromTiB(tib uint64) ByteQuantity { return ByteQuantity{ Quantity: tib * tbConvert, } } -// FromGiB returns a byte quantity of the passed in gibibytes quantity +// FromGiB returns a byte quantity of the passed in gibibytes quantity. func FromGiB(gib uint64) ByteQuantity { return ByteQuantity{ Quantity: gib * gbConvert, } } -// FromMiB returns a byte quantity of the passed in mebibytes quantity +// FromMiB returns a byte quantity of the passed in mebibytes quantity. func FromMiB(mib uint64) ByteQuantity { return ByteQuantity{ Quantity: mib, } } -// StringMiB returns a byte quantity in a mebibytes string representation +// StringMiB returns a byte quantity in a mebibytes string representation. func (bq ByteQuantity) StringMiB() string { return fmt.Sprintf("%.0f %s", bq.MiB(), mib) } -// StringGiB returns a byte quantity in a gibibytes string representation +// StringGiB returns a byte quantity in a gibibytes string representation. func (bq ByteQuantity) StringGiB() string { return fmt.Sprintf("%.3f %s", bq.GiB(), gib) } -// StringTiB returns a byte quantity in a tebibytes string representation +// StringTiB returns a byte quantity in a tebibytes string representation. func (bq ByteQuantity) StringTiB() string { return fmt.Sprintf("%.3f %s", bq.TiB(), tib) } -// MiB returns a byte quantity in mebibytes +// MiB returns a byte quantity in mebibytes. func (bq ByteQuantity) MiB() float64 { return float64(bq.Quantity) } -// GiB returns a byte quantity in gibibytes +// GiB returns a byte quantity in gibibytes. func (bq ByteQuantity) GiB() float64 { return float64(bq.Quantity) * 1 / gbConvert } -// TiB returns a byte quantity in tebibytes +// TiB returns a byte quantity in tebibytes. func (bq ByteQuantity) TiB() float64 { return float64(bq.Quantity) * 1 / tbConvert } diff --git a/vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/ec2pricing/ec2pricing.go b/vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/ec2pricing/ec2pricing.go similarity index 68% rename from vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/ec2pricing/ec2pricing.go rename to vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/ec2pricing/ec2pricing.go index 18e7f5fc1..ddef1af4b 100644 --- a/vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/ec2pricing/ec2pricing.go +++ b/vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/ec2pricing/ec2pricing.go @@ -1,20 +1,21 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at +// http://www.apache.org/licenses/LICENSE-2.0 // -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package ec2pricing import ( "context" + "fmt" + "log" "time" "github.com/aws/aws-sdk-go-v2/aws" @@ -29,17 +30,16 @@ const ( serviceCode = "AmazonEC2" ) -var ( - DefaultSpotDaysBack = 30 -) +var DefaultSpotDaysBack = 30 -// EC2Pricing is the public struct to interface with AWS pricing APIs +// EC2Pricing is the public struct to interface with AWS pricing APIs. type EC2Pricing struct { ODPricing *OnDemandPricing SpotPricing *SpotPricing + logger *log.Logger } -// EC2PricingIface is the EC2Pricing interface mainly used to mock out ec2pricing during testing +// EC2PricingIface is the EC2Pricing interface mainly used to mock out ec2pricing during testing. type EC2PricingIface interface { GetOnDemandInstanceTypeCost(ctx context.Context, instanceType ec2types.InstanceType) (float64, error) GetSpotInstanceTypeNDayAvgCost(ctx context.Context, instanceType ec2types.InstanceType, availabilityZones []string, days int) (float64, error) @@ -48,46 +48,57 @@ type EC2PricingIface interface { OnDemandCacheCount() int SpotCacheCount() int Save() error + SetLogger(*log.Logger) } // use us-east-1 since pricing only has endpoints in us-east-1 and ap-south-1 // TODO: In the future we may want to allow the client to select which endpoint is used through some mechanism -// but that would likely happen through overriding this entire function as its signature is fixed +// +// but that would likely happen through overriding this entire function as its signature is fixed func modifyPricingRegion(opt *pricing.Options) { opt.Region = "us-east-1" } -// New creates an instance of instance-selector EC2Pricing +// New creates an instance of instance-selector EC2Pricing. func New(ctx context.Context, cfg aws.Config) (*EC2Pricing, error) { - pricingClient := pricing.NewFromConfig(cfg, modifyPricingRegion) - ec2Client := ec2.NewFromConfig(cfg) - return &EC2Pricing{ - ODPricing: LoadODCacheOrNew(ctx, pricingClient, cfg.Region, 0, ""), - SpotPricing: LoadSpotCacheOrNew(ctx, ec2Client, cfg.Region, 0, "", DefaultSpotDaysBack), - }, nil + return NewWithCache(ctx, cfg, 0, "") } func NewWithCache(ctx context.Context, cfg aws.Config, ttl time.Duration, cacheDir string) (*EC2Pricing, error) { pricingClient := pricing.NewFromConfig(cfg, modifyPricingRegion) ec2Client := ec2.NewFromConfig(cfg) + odPricingCache, err := LoadODCacheOrNew(ctx, pricingClient, cfg.Region, ttl, cacheDir) + if err != nil { + return nil, fmt.Errorf("unable to initialize the OD pricing cache: %w", err) + } + spotPricingCache, err := LoadSpotCacheOrNew(ctx, ec2Client, cfg.Region, ttl, cacheDir, DefaultSpotDaysBack) + if err != nil { + return nil, fmt.Errorf("unable to initialize the spot pricing cache: %w", err) + } return &EC2Pricing{ - ODPricing: LoadODCacheOrNew(ctx, pricingClient, cfg.Region, ttl, cacheDir), - SpotPricing: LoadSpotCacheOrNew(ctx, ec2Client, cfg.Region, ttl, cacheDir, DefaultSpotDaysBack), + ODPricing: odPricingCache, + SpotPricing: spotPricingCache, }, nil } -// OnDemandCacheCount returns the number of items in the OD cache +func (p *EC2Pricing) SetLogger(logger *log.Logger) { + p.logger = logger + p.ODPricing.SetLogger(logger) + p.SpotPricing.SetLogger(logger) +} + +// OnDemandCacheCount returns the number of items in the OD cache. func (p *EC2Pricing) OnDemandCacheCount() int { return p.ODPricing.Count() } -// SpotCacheCount returns the number of items in the spot cache +// SpotCacheCount returns the number of items in the spot cache. func (p *EC2Pricing) SpotCacheCount() int { return p.SpotPricing.Count() } // GetSpotInstanceTypeNDayAvgCost retrieves the spot price history for a given AZ from the past N days and averages the price -// Passing an empty list for availabilityZones will retrieve avg cost for all AZs in the current AWSSession's region +// Passing an empty list for availabilityZones will retrieve avg cost for all AZs in the current AWSSession's region. func (p *EC2Pricing) GetSpotInstanceTypeNDayAvgCost(ctx context.Context, instanceType ec2types.InstanceType, availabilityZones []string, days int) (float64, error) { if len(availabilityZones) == 0 { return p.SpotPricing.Get(ctx, instanceType, "", days) @@ -108,17 +119,17 @@ func (p *EC2Pricing) GetSpotInstanceTypeNDayAvgCost(ctx context.Context, instanc return costs[0], nil } -// GetOnDemandInstanceTypeCost retrieves the on-demand hourly cost for the specified instance type +// GetOnDemandInstanceTypeCost retrieves the on-demand hourly cost for the specified instance type. func (p *EC2Pricing) GetOnDemandInstanceTypeCost(ctx context.Context, instanceType ec2types.InstanceType) (float64, error) { return p.ODPricing.Get(ctx, instanceType) } -// RefreshOnDemandCache makes a bulk request to the pricing api to retrieve all instance type pricing and stores them in a local cache +// RefreshOnDemandCache makes a bulk request to the pricing api to retrieve all instance type pricing and stores them in a local cache. func (p *EC2Pricing) RefreshOnDemandCache(ctx context.Context) error { return p.ODPricing.Refresh(ctx) } -// RefreshSpotCache makes a bulk request to the ec2 api to retrieve all spot instance type pricing and stores them in a local cache +// RefreshSpotCache makes a bulk request to the ec2 api to retrieve all spot instance type pricing and stores them in a local cache. func (p *EC2Pricing) RefreshSpotCache(ctx context.Context, days int) error { return p.SpotPricing.Refresh(ctx, days) } diff --git a/vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/ec2pricing/odpricing.go b/vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/ec2pricing/odpricing.go similarity index 72% rename from vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/ec2pricing/odpricing.go rename to vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/ec2pricing/odpricing.go index 19cde3c7c..eb234103b 100644 --- a/vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/ec2pricing/odpricing.go +++ b/vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/ec2pricing/odpricing.go @@ -1,15 +1,14 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at +// http://www.apache.org/licenses/LICENSE-2.0 // -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package ec2pricing @@ -18,19 +17,17 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" + "io" "log" "os" "path/filepath" "strconv" - "strings" "sync" "time" ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/aws/aws-sdk-go-v2/service/pricing" pricingtypes "github.com/aws/aws-sdk-go-v2/service/pricing/types" - "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/mitchellh/go-homedir" "github.com/patrickmn/go-cache" "go.uber.org/multierr" @@ -46,6 +43,7 @@ type OnDemandPricing struct { DirectoryPath string cache *cache.Cache pricingClient pricing.GetProductsAPIClient + logger *log.Logger sync.RWMutex } @@ -86,17 +84,10 @@ type PriceDimensionInfo struct { PricePerUnit map[string]string `json:"pricePerUnit"` } -func LoadODCacheOrNew(ctx context.Context, pricingClient pricing.GetProductsAPIClient, region string, fullRefreshTTL time.Duration, directoryPath string) *OnDemandPricing { +func LoadODCacheOrNew(ctx context.Context, pricingClient pricing.GetProductsAPIClient, region string, fullRefreshTTL time.Duration, directoryPath string) (*OnDemandPricing, error) { expandedDirPath, err := homedir.Expand(directoryPath) if err != nil { - log.Printf("Unable to load on-demand pricing cache directory %s: %v", expandedDirPath, err) - return &OnDemandPricing{ - Region: region, - FullRefreshTTL: 0, - DirectoryPath: directoryPath, - cache: cache.New(fullRefreshTTL, fullRefreshTTL), - pricingClient: pricingClient, - } + return nil, fmt.Errorf("unable to load on-demand pricing cache directory %s: %w", expandedDirPath, err) } odPricing := &OnDemandPricing{ Region: region, @@ -104,22 +95,25 @@ func LoadODCacheOrNew(ctx context.Context, pricingClient pricing.GetProductsAPIC DirectoryPath: expandedDirPath, pricingClient: pricingClient, cache: cache.New(fullRefreshTTL, fullRefreshTTL), + logger: log.New(io.Discard, "", 0), } if fullRefreshTTL <= 0 { - odPricing.Clear() - return odPricing + if err := odPricing.Clear(); err != nil { + return nil, fmt.Errorf("unable to clear od pricing cache due to ttl <= 0 %w", err) + } + return odPricing, nil } // Start the cache refresh job - go odCacheRefreshJob(ctx, odPricing) + go odPricing.odCacheRefreshJob(ctx) odCache, err := loadODCacheFrom(fullRefreshTTL, region, expandedDirPath) + if err != nil && !errors.Is(err, os.ErrNotExist) { + return nil, fmt.Errorf("an on-demand pricing cache file could not be loaded: %v", err) + } if err != nil { - if !errors.Is(err, os.ErrNotExist) { - log.Printf("An on-demand pricing cache file could not be loaded: %v", err) - } - return odPricing + odCache = cache.New(0, 0) } odPricing.cache = odCache - return odPricing + return odPricing, nil } func loadODCacheFrom(itemTTL time.Duration, region string, expandedDirPath string) (*cache.Cache, error) { @@ -140,18 +134,22 @@ func getODCacheFilePath(region string, directoryPath string) string { return filepath.Join(directoryPath, fmt.Sprintf("%s-%s", region, ODCacheFileName)) } -func odCacheRefreshJob(ctx context.Context, odPricing *OnDemandPricing) { - if odPricing.FullRefreshTTL <= 0 { +func (c *OnDemandPricing) odCacheRefreshJob(ctx context.Context) { + if c.FullRefreshTTL <= 0 { return } - refreshTicker := time.NewTicker(odPricing.FullRefreshTTL) + refreshTicker := time.NewTicker(c.FullRefreshTTL) for range refreshTicker.C { - if err := odPricing.Refresh(ctx); err != nil { - log.Println(err) + if err := c.Refresh(ctx); err != nil { + c.logger.Printf("Periodic OD Cache Refresh Error: %v", err) } } } +func (c *OnDemandPricing) SetLogger(logger *log.Logger) { + c.logger = logger +} + func (c *OnDemandPricing) Refresh(ctx context.Context) error { c.Lock() defer c.Unlock() @@ -182,7 +180,7 @@ func (c *OnDemandPricing) Get(ctx context.Context, instanceType ec2types.Instanc return costs[string(instanceType)], nil } -// Count of items in the cache +// Count of items in the cache. func (c *OnDemandPricing) Count() int { return c.cache.ItemCount() } @@ -195,23 +193,31 @@ func (c *OnDemandPricing) Save() error { if err != nil { return err } - if err := os.Mkdir(c.DirectoryPath, 0755); err != nil && !errors.Is(err, os.ErrExist) { + if err := os.Mkdir(c.DirectoryPath, 0o755); err != nil && !errors.Is(err, os.ErrExist) { return err } - return ioutil.WriteFile(getODCacheFilePath(c.Region, c.DirectoryPath), cacheBytes, 0644) + return os.WriteFile(getODCacheFilePath(c.Region, c.DirectoryPath), cacheBytes, 0600) } func (c *OnDemandPricing) Clear() error { c.Lock() defer c.Unlock() c.cache.Flush() - return os.Remove(getODCacheFilePath(c.Region, c.DirectoryPath)) + if err := os.Remove(getODCacheFilePath(c.Region, c.DirectoryPath)); err != nil && !os.IsNotExist(err) { + return err + } + return nil } // fetchOnDemandPricing makes a bulk request to the pricing api to retrieve all instance type pricing if the instanceType is the empty string // // or, if instanceType is specified, it can request a specific instance type pricing func (c *OnDemandPricing) fetchOnDemandPricing(ctx context.Context, instanceType ec2types.InstanceType) (map[string]float64, error) { + start := time.Now() + calls := 0 + defer func() { + c.logger.Printf("Took %s and %d calls to collect OD pricing", time.Since(start), calls) + }() odPricing := map[string]float64{} productInput := pricing.GetProductsInput{ ServiceCode: c.StringMe(serviceCode), @@ -222,9 +228,10 @@ func (c *OnDemandPricing) fetchOnDemandPricing(ctx context.Context, instanceType p := pricing.NewGetProductsPaginator(c.pricingClient, &productInput) for p.HasMorePages() { + calls++ pricingOutput, err := p.NextPage(ctx) if err != nil { - return nil, fmt.Errorf("failed to get a page, %w", err) + return nil, fmt.Errorf("failed to get next OD pricing page, %w", err) } for _, priceDoc := range pricingOutput.PriceList { @@ -240,8 +247,8 @@ func (c *OnDemandPricing) fetchOnDemandPricing(ctx context.Context, instanceType } // StringMe takes an interface and returns a pointer to a string value -// If the underlying interface kind is not string or *string then nil is returned -func (*OnDemandPricing) StringMe(i interface{}) *string { +// If the underlying interface kind is not string or *string then nil is returned. +func (c *OnDemandPricing) StringMe(i interface{}) *string { if i == nil { return nil } @@ -251,17 +258,16 @@ func (*OnDemandPricing) StringMe(i interface{}) *string { case string: return &v default: - log.Printf("%s cannot be converted to a string", i) + c.logger.Printf("%s cannot be converted to a string", i) return nil } } func (c *OnDemandPricing) getProductsInputFilters(instanceType ec2types.InstanceType) []pricingtypes.Filter { - regionDescription := c.getRegionForPricingAPI() filters := []pricingtypes.Filter{ {Type: pricingtypes.FilterTypeTermMatch, Field: c.StringMe("ServiceCode"), Value: c.StringMe(serviceCode)}, {Type: pricingtypes.FilterTypeTermMatch, Field: c.StringMe("operatingSystem"), Value: c.StringMe("linux")}, - {Type: pricingtypes.FilterTypeTermMatch, Field: c.StringMe("location"), Value: c.StringMe(regionDescription)}, + {Type: pricingtypes.FilterTypeTermMatch, Field: c.StringMe("regionCode"), Value: c.StringMe(c.Region)}, {Type: pricingtypes.FilterTypeTermMatch, Field: c.StringMe("capacitystatus"), Value: c.StringMe("used")}, {Type: pricingtypes.FilterTypeTermMatch, Field: c.StringMe("preInstalledSw"), Value: c.StringMe("NA")}, {Type: pricingtypes.FilterTypeTermMatch, Field: c.StringMe("tenancy"), Value: c.StringMe("shared")}, @@ -272,31 +278,7 @@ func (c *OnDemandPricing) getProductsInputFilters(instanceType ec2types.Instance return filters } -// getRegionForPricingAPI attempts to retrieve the region description based on the AWS session used to create -// the ec2pricing struct. It then uses the endpoints package in the aws sdk to retrieve the region description -// This is necessary because the pricing API uses the region description rather than a region ID -func (c *OnDemandPricing) getRegionForPricingAPI() string { - endpointResolver := endpoints.DefaultResolver() - partitions := endpointResolver.(endpoints.EnumPartitions).Partitions() - - // use us-east-1 as the default - regionDescription := "US East (N. Virginia)" - for _, partition := range partitions { - regions := partition.Regions() - if region, ok := regions[c.Region]; ok { - regionDescription = region.Description() - } - } - - // endpoints package returns European regions with the word "Europe," but the pricing API expects the word "EU." - // This formatting mismatch is only present with European regions. - // So replace "Europe" with "EU" if it exists in the regionDescription string. - regionDescription = strings.ReplaceAll(regionDescription, "Europe", "EU") - - return regionDescription -} - -// parseOndemandUnitPrice takes a priceList from the pricing API and parses its weirdness +// parseOndemandUnitPrice takes a priceList from the pricing API and parses its weirdness. func (c *OnDemandPricing) parseOndemandUnitPrice(priceList string) (string, float64, error) { var productPriceList PricingList err := json.Unmarshal([]byte(priceList), &productPriceList) diff --git a/vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/ec2pricing/spotpricing.go b/vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/ec2pricing/spotpricing.go similarity index 78% rename from vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/ec2pricing/spotpricing.go rename to vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/ec2pricing/spotpricing.go index cbf56e9fc..be3b1fda8 100644 --- a/vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/ec2pricing/spotpricing.go +++ b/vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/ec2pricing/spotpricing.go @@ -1,15 +1,14 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at +// http://www.apache.org/licenses/LICENSE-2.0 // -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package ec2pricing @@ -18,6 +17,7 @@ import ( "encoding/gob" "errors" "fmt" + "io" "log" "math" "os" @@ -44,6 +44,7 @@ type SpotPricing struct { DirectoryPath string cache *cache.Cache ec2Client ec2.DescribeSpotPriceHistoryAPIClient + logger *log.Logger sync.RWMutex } @@ -53,17 +54,10 @@ type spotPricingEntry struct { Zone string } -func LoadSpotCacheOrNew(ctx context.Context, ec2Client ec2.DescribeSpotPriceHistoryAPIClient, region string, fullRefreshTTL time.Duration, directoryPath string, days int) *SpotPricing { +func LoadSpotCacheOrNew(ctx context.Context, ec2Client ec2.DescribeSpotPriceHistoryAPIClient, region string, fullRefreshTTL time.Duration, directoryPath string, days int) (*SpotPricing, error) { expandedDirPath, err := homedir.Expand(directoryPath) if err != nil { - log.Printf("Unable to load spot pricing cache directory %s: %v", expandedDirPath, err) - return &SpotPricing{ - Region: region, - FullRefreshTTL: 0, - DirectoryPath: directoryPath, - cache: cache.New(fullRefreshTTL, fullRefreshTTL), - ec2Client: ec2Client, - } + return nil, fmt.Errorf("unable to load spot pricing cache directory %s: %w", expandedDirPath, err) } spotPricing := &SpotPricing{ Region: region, @@ -71,23 +65,26 @@ func LoadSpotCacheOrNew(ctx context.Context, ec2Client ec2.DescribeSpotPriceHist DirectoryPath: expandedDirPath, ec2Client: ec2Client, cache: cache.New(fullRefreshTTL, fullRefreshTTL), + logger: log.New(io.Discard, "", 0), } if fullRefreshTTL <= 0 { - spotPricing.Clear() - return spotPricing + if err := spotPricing.Clear(); err != nil { + return nil, err + } + return spotPricing, nil } gob.Register([]*spotPricingEntry{}) // Start the cache refresh job - go spotCacheRefreshJob(ctx, spotPricing, days) + go spotPricing.spotCacheRefreshJob(ctx, days) spotCache, err := loadSpotCacheFrom(fullRefreshTTL, region, expandedDirPath) + if err != nil && !os.IsNotExist(err) { + return nil, fmt.Errorf("a spot pricing cache file could not be loaded: %w", err) + } if err != nil { - if !errors.Is(err, os.ErrNotExist) { - log.Printf("A spot pricing cache file could not be loaded: %v", err) - } - return spotPricing + spotCache = cache.New(0, 0) } spotPricing.cache = spotCache - return spotPricing + return spotPricing, nil } func loadSpotCacheFrom(itemTTL time.Duration, region string, expandedDirPath string) (*cache.Cache, error) { @@ -109,18 +106,22 @@ func getSpotCacheFilePath(region string, directoryPath string) string { return filepath.Join(directoryPath, fmt.Sprintf("%s-%s", region, SpotCacheFileName)) } -func spotCacheRefreshJob(ctx context.Context, spotPricing *SpotPricing, days int) { - if spotPricing.FullRefreshTTL <= 0 { +func (c *SpotPricing) spotCacheRefreshJob(ctx context.Context, days int) { + if c.FullRefreshTTL <= 0 { return } - refreshTicker := time.NewTicker(spotPricing.FullRefreshTTL) + refreshTicker := time.NewTicker(c.FullRefreshTTL) for range refreshTicker.C { - if err := spotPricing.Refresh(ctx, days); err != nil { - log.Println(err) + if err := c.Refresh(ctx, days); err != nil { + c.logger.Printf("Periodic Spot Cache Refresh Error: %v", err) } } } +func (c *SpotPricing) SetLogger(logger *log.Logger) { + c.logger = logger +} + func (c *SpotPricing) Refresh(ctx context.Context, days int) error { c.Lock() defer c.Unlock() @@ -210,7 +211,7 @@ func (c *SpotPricing) filterOn(zone string, pricingEntries []*spotPricingEntry) return filtered } -// Count of items in the cache +// Count of items in the cache. func (c *SpotPricing) Count() int { return c.cache.ItemCount() } @@ -219,7 +220,7 @@ func (c *SpotPricing) Save() error { if c.FullRefreshTTL <= 0 || c.Count() == 0 { return nil } - if err := os.Mkdir(c.DirectoryPath, 0755); err != nil && !errors.Is(err, os.ErrExist) { + if err := os.Mkdir(c.DirectoryPath, 0o755); err != nil && !errors.Is(err, os.ErrExist) { return err } file, err := os.Create(getSpotCacheFilePath(c.Region, c.DirectoryPath)) @@ -235,12 +236,20 @@ func (c *SpotPricing) Clear() error { c.Lock() defer c.Unlock() c.cache.Flush() - return os.Remove(getSpotCacheFilePath(c.Region, c.DirectoryPath)) + if err := os.Remove(getSpotCacheFilePath(c.Region, c.DirectoryPath)); err != nil && !os.IsNotExist(err) { + return err + } + return nil } // fetchSpotPricingTimeSeries makes a bulk request to the ec2 api to retrieve all spot instance type pricing for the past n days -// If instanceType is empty, it will fetch for all instance types +// If instanceType is empty, it will fetch for all instance types. func (c *SpotPricing) fetchSpotPricingTimeSeries(ctx context.Context, instanceType ec2types.InstanceType, days int) (map[string][]*spotPricingEntry, error) { + start := time.Now() + calls := 0 + defer func() { + c.logger.Printf("Took %s and %d calls to collect Spot pricing", time.Since(start), calls) + }() spotTimeSeries := map[string][]*spotPricingEntry{} endTime := time.Now().UTC() startTime := endTime.Add(time.Hour * time.Duration(24*-1*days)) @@ -258,9 +267,10 @@ func (c *SpotPricing) fetchSpotPricingTimeSeries(ctx context.Context, instanceTy // Iterate through the Amazon S3 object pages. for p.HasMorePages() { + calls++ spotHistoryOutput, err := p.NextPage(ctx) if err != nil { - return nil, fmt.Errorf("failed to get a page, %w", err) + return nil, fmt.Errorf("failed to get a spot pricing page, %w", err) } for _, history := range spotHistoryOutput.SpotPriceHistory { diff --git a/vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/instancetypes/instancetypes.go b/vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/instancetypes/instancetypes.go similarity index 58% rename from vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/instancetypes/instancetypes.go rename to vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/instancetypes/instancetypes.go index f38512d37..4b2c4033d 100644 --- a/vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/instancetypes/instancetypes.go +++ b/vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/instancetypes/instancetypes.go @@ -1,15 +1,14 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at +// http://www.apache.org/licenses/LICENSE-2.0 // -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package instancetypes @@ -18,7 +17,7 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" + "io" "log" "os" "path/filepath" @@ -30,11 +29,9 @@ import ( "github.com/patrickmn/go-cache" ) -var ( - CacheFileName = "ec2-instance-types.json" -) +var CacheFileName = "ec2-instance-types.json" -// Details hold all the information on an ec2 instance type +// Details hold all the information on an ec2 instance type. type Details struct { ec2types.InstanceTypeInfo OndemandPricePerHour *float64 @@ -48,46 +45,48 @@ type Provider struct { lastFullRefresh *time.Time ec2Client ec2.DescribeInstanceTypesAPIClient cache *cache.Cache + logger *log.Logger } -func NewProvider(directoryPath string, region string, ttl time.Duration, ec2Client ec2.DescribeInstanceTypesAPIClient) *Provider { - expandedDirPath, err := homedir.Expand(directoryPath) - if err != nil { - log.Printf("Unable to expand instance type cache directory %s: %v", directoryPath, err) - } +// NewProvider creates a new Instance Types provider used to fetch Instance Type information from EC2. +func NewProvider(region string, ec2Client ec2.DescribeInstanceTypesAPIClient) *Provider { return &Provider{ Region: region, - DirectoryPath: expandedDirPath, - FullRefreshTTL: ttl, + DirectoryPath: "", + FullRefreshTTL: 0, ec2Client: ec2Client, - cache: cache.New(ttl, ttl), + cache: cache.New(0, 0), + logger: log.New(io.Discard, "", 0), } } -func LoadFromOrNew(directoryPath string, region string, ttl time.Duration, ec2Client ec2.DescribeInstanceTypesAPIClient) *Provider { +// NewProvider creates a new Instance Types provider used to fetch Instance Type information from EC2 and optionally cache. +func LoadFromOrNew(directoryPath string, region string, ttl time.Duration, ec2Client ec2.DescribeInstanceTypesAPIClient) (*Provider, error) { expandedDirPath, err := homedir.Expand(directoryPath) if err != nil { - log.Printf("Unable to load instance-type cache directory %s: %v", expandedDirPath, err) - return NewProvider(directoryPath, region, ttl, ec2Client) + return nil, fmt.Errorf("unable to load instance-type cache directory %s: %w", expandedDirPath, err) } if ttl <= 0 { - provider := NewProvider(directoryPath, region, ttl, ec2Client) - provider.Clear() - return provider + provider := NewProvider(region, ec2Client) + if err := provider.Clear(); err != nil { + return nil, err + } + return provider, nil } itCache, err := loadFrom(ttl, region, expandedDirPath) + if err != nil && !os.IsNotExist(err) { + return nil, fmt.Errorf("unable to load instance-type cache from %s: %w", expandedDirPath, err) + } if err != nil { - if !errors.Is(err, os.ErrNotExist) { - log.Printf("Unable to load instance-type cache from %s: %v", expandedDirPath, err) - } - return NewProvider(directoryPath, region, ttl, ec2Client) + itCache = cache.New(0, 0) } return &Provider{ Region: region, DirectoryPath: expandedDirPath, ec2Client: ec2Client, cache: itCache, - } + logger: log.New(io.Discard, "", 0), + }, nil } func loadFrom(ttl time.Duration, region string, expandedDirPath string) (*cache.Cache, error) { @@ -107,7 +106,17 @@ func getCacheFilePath(region string, expandedDirPath string) string { return filepath.Join(expandedDirPath, fmt.Sprintf("%s-%s", region, CacheFileName)) } +func (p *Provider) SetLogger(logger *log.Logger) { + p.logger = logger +} + func (p *Provider) Get(ctx context.Context, instanceTypes []ec2types.InstanceType) ([]*Details, error) { + p.logger.Printf("Getting instance types %v", instanceTypes) + start := time.Now() + calls := 0 + defer func() { + p.logger.Printf("Took %s and %d calls to collect Instance Types", time.Since(start), calls) + }() instanceTypeDetails := []*Details{} describeInstanceTypeOpts := &ec2.DescribeInstanceTypesInput{} if len(instanceTypes) != 0 { @@ -120,6 +129,10 @@ func (p *Provider) Get(ctx context.Context, instanceTypes []ec2types.InstanceTyp describeInstanceTypeOpts.InstanceTypes = append(describeInstanceTypeOpts.InstanceTypes, instanceType) } } + // if we were able to retrieve all from cache, return here, else continue to do a remote lookup + if len(describeInstanceTypeOpts.InstanceTypes) == 0 { + return instanceTypeDetails, nil + } } else if p.lastFullRefresh != nil && !p.isFullRefreshNeeded() { for _, item := range p.cache.Items() { instanceTypeDetails = append(instanceTypeDetails, item.Object.(*Details)) @@ -127,14 +140,14 @@ func (p *Provider) Get(ctx context.Context, instanceTypes []ec2types.InstanceTyp return instanceTypeDetails, nil } - s := ec2.NewDescribeInstanceTypesPaginator(p.ec2Client, &ec2.DescribeInstanceTypesInput{}) + s := ec2.NewDescribeInstanceTypesPaginator(p.ec2Client, describeInstanceTypeOpts) for s.HasMorePages() { + calls++ instanceTypeOutput, err := s.NextPage(ctx) if err != nil { - return nil, fmt.Errorf("failed to get a page, %w", err) + return nil, fmt.Errorf("failed to get next instance types page, %w", err) } - for _, instanceTypeInfo := range instanceTypeOutput.InstanceTypes { itDetails := &Details{InstanceTypeInfo: instanceTypeInfo} instanceTypeDetails = append(instanceTypeDetails, itDetails) @@ -164,15 +177,18 @@ func (p *Provider) Save() error { if err != nil { return err } - if err := os.Mkdir(p.DirectoryPath, 0755); err != nil && !errors.Is(err, os.ErrExist) { + if err := os.Mkdir(p.DirectoryPath, 0o755); err != nil && !errors.Is(err, os.ErrExist) { return err } - return ioutil.WriteFile(getCacheFilePath(p.Region, p.DirectoryPath), cacheBytes, 0644) + return os.WriteFile(getCacheFilePath(p.Region, p.DirectoryPath), cacheBytes, 0600) } func (p *Provider) Clear() error { p.cache.Flush() - return os.Remove(getCacheFilePath(p.Region, p.DirectoryPath)) + if err := os.Remove(getCacheFilePath(p.Region, p.DirectoryPath)); err != nil && !os.IsNotExist(err) { + return err + } + return nil } func (p *Provider) CacheCount() int { diff --git a/vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector/aggregates.go b/vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/selector/aggregates.go similarity index 79% rename from vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector/aggregates.go rename to vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/selector/aggregates.go index b5f3e613b..6d3fb740b 100644 --- a/vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector/aggregates.go +++ b/vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/selector/aggregates.go @@ -1,36 +1,53 @@ +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package selector import ( "context" "fmt" - "github.com/aws/amazon-ec2-instance-selector/v2/pkg/bytequantity" + "regexp" + "github.com/aws/aws-sdk-go-v2/service/ec2" ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" - "regexp" + + "github.com/aws/amazon-ec2-instance-selector/v3/pkg/bytequantity" ) const ( - // AggregateLowPercentile is the default lower percentile for resource ranges on similar instance type comparisons + // AggregateLowPercentile is the default lower percentile for resource ranges on similar instance type comparisons. AggregateLowPercentile = 0.9 - // AggregateHighPercentile is the default upper percentile for resource ranges on similar instance type comparisons + // AggregateHighPercentile is the default upper percentile for resource ranges on similar instance type comparisons. AggregateHighPercentile = 1.2 ) -// FiltersTransform can be implemented to provide custom transforms +var baseAllowedInstanceTypesRE = regexp.MustCompile(`^[cmr][3-9][agi]?\..*$|^t[2-9][gi]?\..*$`) + +// FiltersTransform can be implemented to provide custom transforms. type FiltersTransform interface { Transform(context.Context, Filters) (Filters, error) } -// TransformFn is the func type definition for a FiltersTransform +// TransformFn is the func type definition for a FiltersTransform. type TransformFn func(context.Context, Filters) (Filters, error) // Transform implements FiltersTransform interface on TransformFn -// This allows any TransformFn to be passed into funcs accepting FiltersTransform interface +// This allows any TransformFn to be passed into funcs accepting FiltersTransform interface. func (fn TransformFn) Transform(ctx context.Context, filters Filters) (Filters, error) { return fn(ctx, filters) } -// TransformBaseInstanceType transforms lower level filters based on the instanceTypeBase specs +// TransformBaseInstanceType transforms lower level filters based on the instanceTypeBase specs. func (itf Selector) TransformBaseInstanceType(ctx context.Context, filters Filters) (Filters, error) { if filters.InstanceTypeBase == nil { return filters, nil @@ -82,7 +99,7 @@ func (itf Selector) TransformBaseInstanceType(ctx context.Context, filters Filte return filters, nil } -// TransformFlexible transforms lower level filters based on a set of opinions +// TransformFlexible transforms lower level filters based on a set of opinions. func (itf Selector) TransformFlexible(ctx context.Context, filters Filters) (Filters, error) { if filters.Flexible == nil { return filters, nil @@ -101,11 +118,7 @@ func (itf Selector) TransformFlexible(ctx context.Context, filters Filters) (Fil } if filters.AllowList == nil { - baseAllowedInstanceTypes, err := regexp.Compile("^[cmr][3-9][ag]?\\..*$|^a[1-9]\\..*$|^t[2-9]\\..*$") - if err != nil { - return filters, err - } - filters.AllowList = baseAllowedInstanceTypes + filters.AllowList = baseAllowedInstanceTypesRE } if filters.VCpusRange == nil && filters.MemoryRange == nil { @@ -116,7 +129,7 @@ func (itf Selector) TransformFlexible(ctx context.Context, filters Filters) (Fil return filters, nil } -// TransformForService transforms lower level filters based on the service +// TransformForService transforms lower level filters based on the service. func (itf Selector) TransformForService(ctx context.Context, filters Filters) (Filters, error) { return itf.ServiceRegistry.ExecuteTransforms(filters) } diff --git a/vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector/comparators.go b/vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/selector/comparators.go similarity index 86% rename from vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector/comparators.go rename to vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/selector/comparators.go index eb639ecf9..77cf8fc0b 100644 --- a/vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector/comparators.go +++ b/vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/selector/comparators.go @@ -1,20 +1,18 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at +// http://www.apache.org/licenses/LICENSE-2.0 // -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package selector import ( - "log" "math" "reflect" "regexp" @@ -30,7 +28,10 @@ const ( required = "required" ) -var amdRegex = regexp.MustCompile("[a-zA-Z0-9]+a\\.[a-zA-Z0-9]") +var ( + networkPerfRE = regexp.MustCompile(`[0-9]+ Gigabit`) + generationRE = regexp.MustCompile(`[a-zA-Z]+([0-9]+)`) +) func isSupportedFromString(instanceTypeValue *string, target *string) bool { if target == nil { @@ -39,7 +40,7 @@ func isSupportedFromString(instanceTypeValue *string, target *string) bool { if instanceTypeValue == nil { return false } - return *instanceTypeValue == *target + return strings.EqualFold(*instanceTypeValue, *target) } func isSupportedFromStrings(instanceTypeValues []*string, target *string) bool { @@ -81,7 +82,7 @@ func isSupportedUsageClassType(instanceTypeValue []ec2types.UsageClassType, targ } for _, potentialType := range instanceTypeValue { - if potentialType == *target { + if strings.EqualFold(string(potentialType), string(*target)) { return true } } @@ -100,7 +101,7 @@ func isSupportedArchitectureType(instanceTypeValue []ec2types.ArchitectureType, } for _, potentialType := range instanceTypeValue { - if potentialType == *target { + if strings.EqualFold(string(potentialType), string(*target)) { return true } } @@ -118,7 +119,7 @@ func isSupportedVirtualizationType(instanceTypeValue []ec2types.VirtualizationTy return true } for _, potentialType := range instanceTypeValue { - if potentialType == *target { + if strings.EqualFold(string(potentialType), string(*target)) { return true } } @@ -132,7 +133,7 @@ func isSupportedInstanceTypeHypervisorType(instanceTypeValue ec2types.InstanceTy if reflect.ValueOf(*target).IsZero() { return true } - if instanceTypeValue == *target { + if strings.EqualFold(string(instanceTypeValue), string(*target)) { return true } return false @@ -149,7 +150,7 @@ func isSupportedRootDeviceType(instanceTypeValue []ec2types.RootDeviceType, targ return true } for _, potentialType := range instanceTypeValue { - if potentialType == *target { + if strings.EqualFold(string(potentialType), string(*target)) { return true } } @@ -163,7 +164,7 @@ func isMatchingCpuArchitecture(instanceTypeValue CPUManufacturer, target *CPUMan if reflect.ValueOf(*target).IsZero() { return true } - if instanceTypeValue == *target { + if strings.EqualFold(string(instanceTypeValue), string(*target)) { return true } return false @@ -302,12 +303,7 @@ func getNetworkPerformance(networkPerformance *string) *int { if networkPerformance == nil { return aws.Int(-1) } - re, err := regexp.Compile(`[0-9]+ Gigabit`) - if err != nil { - log.Printf("Unable to compile regexp to parse network performance: %s\n", *networkPerformance) - return nil - } - networkBandwidth := re.FindString(*networkPerformance) + networkBandwidth := networkPerfRE.FindString(*networkPerformance) if networkBandwidth == "" { return aws.Int(-1) } @@ -379,21 +375,24 @@ func getEBSOptimizedBaselineIOPS(ebsInfo *ec2types.EbsInfo) *int32 { return ebsInfo.EbsOptimizedInfo.BaselineIops } -func getCPUManufacturer(instanceTypeInfo *ec2types.InstanceTypeInfo) CPUManufacturer { - for _, it := range instanceTypeInfo.ProcessorInfo.SupportedArchitectures { - if it == ec2types.ArchitectureTypeArm64 { - return CPUManufacturerAWS - } +// getInstanceTypeGeneration returns the generation from an instance type name +// i.e. c7i.xlarge -> 7 +// if any error occurs, 0 will be returned. +func getInstanceTypeGeneration(instanceTypeName string) *int { + zero := 0 + matches := generationRE.FindStringSubmatch(instanceTypeName) + if len(matches) < 2 { + return &zero } - - if amdRegex.Match([]byte(instanceTypeInfo.InstanceType)) { - return CPUManufacturerAMD + gen, err := strconv.Atoi(matches[1]) + if err != nil { + return &zero } - return CPUManufacturerIntel + return &gen } // supportSyntaxToBool takes an instance spec field that uses ["unsupported", "supported", "required", or "default"] -// and transforms it to a *bool to use in filter execution +// and transforms it to a *bool to use in filter execution. func supportSyntaxToBool(instanceTypeSupport *string) *bool { if instanceTypeSupport == nil { return nil diff --git a/vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector/emr.go b/vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/selector/emr.go similarity index 92% rename from vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector/emr.go rename to vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/selector/emr.go index 212f16f61..3a04f751f 100644 --- a/vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector/emr.go +++ b/vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/selector/emr.go @@ -1,15 +1,14 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at +// http://www.apache.org/licenses/LICENSE-2.0 // -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package selector @@ -25,10 +24,10 @@ const ( fallbackVersion = "5.20.0" ) -// EMR is a Service type for a custom service filter transform +// EMR is a Service type for a custom service filter transform. type EMR struct{} -// Filters implements the Service interface contract for EMR +// Filters implements the Service interface contract for EMR. func (e EMR) Filters(version string) (Filters, error) { filters := Filters{} if version == "" { @@ -53,7 +52,7 @@ func (e EMR) Filters(version string) (Filters, error) { return filters, nil } -// getEMRInstanceTypes returns a list of instance types that emr supports +// getEMRInstanceTypes returns a list of instance types that emr supports. func (e EMR) getEMRInstanceTypes(version semver.Version) ([]string, error) { instanceTypes := []string{} diff --git a/vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector/outputs/bubbletea.go b/vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/selector/outputs/bubbletea.go similarity index 81% rename from vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector/outputs/bubbletea.go rename to vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/selector/outputs/bubbletea.go index 740768747..216cc8ee4 100644 --- a/vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector/outputs/bubbletea.go +++ b/vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/selector/outputs/bubbletea.go @@ -1,28 +1,28 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at +// http://www.apache.org/licenses/LICENSE-2.0 // -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package outputs import ( - "github.com/aws/amazon-ec2-instance-selector/v2/pkg/instancetypes" - "github.com/aws/amazon-ec2-instance-selector/v2/pkg/sorter" tea "github.com/charmbracelet/bubbletea" "github.com/charmbracelet/lipgloss" "github.com/muesli/termenv" + + "github.com/aws/amazon-ec2-instance-selector/v3/pkg/instancetypes" + "github.com/aws/amazon-ec2-instance-selector/v3/pkg/sorter" ) const ( - // can't get terminal dimensions on startup, so use this + // can't get terminal dimensions on startup, so use this. initialDimensionVal = 30 instanceTypeKey = "instance type" @@ -30,17 +30,15 @@ const ( ) const ( - // table states + // table states. stateTable = "table" stateVerbose = "verbose" stateSorting = "sorting" ) -var ( - controlsStyle = lipgloss.NewStyle().Faint(true) -) +var controlsStyle = lipgloss.NewStyle().Faint(true) -// BubbleTeaModel is used to hold the state of the bubble tea TUI +// BubbleTeaModel is used to hold the state of the bubble tea TUI. type BubbleTeaModel struct { // holds the output currentState of the model currentState string @@ -56,23 +54,23 @@ type BubbleTeaModel struct { } // NewBubbleTeaModel initializes a new bubble tea Model which represents -// a stylized table to display instance types +// a stylized table to display instance types. func NewBubbleTeaModel(instanceTypes []*instancetypes.Details) BubbleTeaModel { return BubbleTeaModel{ currentState: stateTable, tableModel: *initTableModel(instanceTypes), - verboseModel: *initVerboseModel(instanceTypes), + verboseModel: *initVerboseModel(), sortingModel: *initSortingModel(instanceTypes), } } -// Init is used by bubble tea to initialize a bubble tea table +// Init is used by bubble tea to initialize a bubble tea table. func (m BubbleTeaModel) Init() tea.Cmd { return nil } // Update is used by bubble tea to update the state of the bubble -// tea model based on user input +// tea model based on user input. func (m BubbleTeaModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) { switch msg := msg.(type) { case tea.KeyMsg: @@ -163,7 +161,7 @@ func (m BubbleTeaModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) { case tea.WindowSizeMsg: // This is needed to handle a bug with bubble tea // where resizing causes misprints (https://github.com/Evertras/bubble-table/issues/121) - termenv.ClearScreen() + termenv.ClearScreen() //nolint:staticcheck // handle screen resizing m.tableModel = m.tableModel.resizeView(msg) @@ -185,7 +183,7 @@ func (m BubbleTeaModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) { return m, cmd } -// View is used by bubble tea to render the bubble tea model +// View is used by bubble tea to render the bubble tea model. func (m BubbleTeaModel) View() string { switch m.currentState { case stateTable: diff --git a/vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector/outputs/outputs.go b/vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/selector/outputs/outputs.go similarity index 90% rename from vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector/outputs/outputs.go rename to vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/selector/outputs/outputs.go index 319cf9fc8..a460f5f26 100644 --- a/vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector/outputs/outputs.go +++ b/vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/selector/outputs/outputs.go @@ -1,15 +1,14 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at +// http://www.apache.org/licenses/LICENSE-2.0 // -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. // Package outputs provides types for implementing instance type output functions as well as prebuilt output functions. package outputs @@ -24,13 +23,13 @@ import ( "strings" "text/tabwriter" - "github.com/aws/amazon-ec2-instance-selector/v2/pkg/instancetypes" + "github.com/aws/amazon-ec2-instance-selector/v3/pkg/instancetypes" ) const columnTag = "column" // wideColumnsData stores the data that should be displayed on each column -// of a wide output row +// of a wide output row. type wideColumnsData struct { instanceName string `column:"Instance Type"` vcpu int32 `column:"VCPUs"` @@ -45,10 +44,10 @@ type wideColumnsData struct { gpuMemory string `column:"GPU Mem (GiB)"` gpuInfo string `column:"GPU Info"` odPrice string `column:"On-Demand Price/Hr"` - spotPrice string `column:"Spot Price/Hr (30d avg)"` + spotPrice string `column:"Spot Price/Hr"` } -// SimpleInstanceTypeOutput is an OutputFn which outputs a slice of instance type names +// SimpleInstanceTypeOutput is an OutputFn which outputs a slice of instance type names. func SimpleInstanceTypeOutput(instanceTypeInfoSlice []*instancetypes.Details) []string { instanceTypeStrings := []string{} for _, instanceTypeInfo := range instanceTypeInfoSlice { @@ -57,7 +56,7 @@ func SimpleInstanceTypeOutput(instanceTypeInfoSlice []*instancetypes.Details) [] return instanceTypeStrings } -// VerboseInstanceTypeOutput is an OutputFn which outputs a slice of instance type names +// VerboseInstanceTypeOutput is an OutputFn which outputs a slice of instance type names. func VerboseInstanceTypeOutput(instanceTypeInfoSlice []*instancetypes.Details) []string { output, err := json.MarshalIndent(instanceTypeInfoSlice, "", " ") if err != nil { @@ -70,7 +69,7 @@ func VerboseInstanceTypeOutput(instanceTypeInfoSlice []*instancetypes.Details) [ return []string{string(output)} } -// TableOutputShort is an OutputFn which returns a CLI table for easy reading +// TableOutputShort is an OutputFn which returns a CLI table for easy reading. func TableOutputShort(instanceTypeInfoSlice []*instancetypes.Details) []string { if len(instanceTypeInfoSlice) == 0 { return nil @@ -106,7 +105,7 @@ func TableOutputShort(instanceTypeInfoSlice []*instancetypes.Details) []string { return []string{buf.String()} } -// TableOutputWide is an OutputFn which returns a detailed CLI table for easy reading +// TableOutputWide is an OutputFn which returns a detailed CLI table for easy reading. func TableOutputWide(instanceTypeInfoSlice []*instancetypes.Details) []string { if len(instanceTypeInfoSlice) == 0 { return nil @@ -157,7 +156,7 @@ func TableOutputWide(instanceTypeInfoSlice []*instancetypes.Details) []string { return []string{buf.String()} } -// OneLineOutput is an output function which prints the instance type names on a single line separated by commas +// OneLineOutput is an output function which prints the instance type names on a single line separated by commas. func OneLineOutput(instanceTypeInfoSlice []*instancetypes.Details) []string { instanceTypeNames := []string{} for _, instanceType := range instanceTypeInfoSlice { @@ -196,7 +195,7 @@ func reverse(s string) string { } // getWideColumnsData returns the column data necessary for a wide output for each of -// the given instance types +// the given instance types. func getWideColumnsData(instanceTypes []*instancetypes.Details) []*wideColumnsData { columnsData := []*wideColumnsData{} @@ -254,7 +253,7 @@ func getWideColumnsData(instanceTypes []*instancetypes.Details) []*wideColumnsDa } // getUnderlyingValue returns the underlying value of the given -// reflect.Value type +// reflect.Value type. func getUnderlyingValue(value reflect.Value) interface{} { var val interface{} diff --git a/vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector/outputs/sortingView.go b/vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/selector/outputs/sortingView.go similarity index 83% rename from vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector/outputs/sortingView.go rename to vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/selector/outputs/sortingView.go index 671b4d4e4..cca1a236d 100644 --- a/vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector/outputs/sortingView.go +++ b/vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/selector/outputs/sortingView.go @@ -1,15 +1,14 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at +// http://www.apache.org/licenses/LICENSE-2.0 // -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package outputs @@ -18,31 +17,32 @@ import ( "io" "strings" - "github.com/aws/amazon-ec2-instance-selector/v2/pkg/instancetypes" - "github.com/aws/amazon-ec2-instance-selector/v2/pkg/sorter" "github.com/charmbracelet/bubbles/key" "github.com/charmbracelet/bubbles/list" "github.com/charmbracelet/bubbles/textinput" tea "github.com/charmbracelet/bubbletea" "github.com/charmbracelet/lipgloss" + + "github.com/aws/amazon-ec2-instance-selector/v3/pkg/instancetypes" + "github.com/aws/amazon-ec2-instance-selector/v3/pkg/sorter" ) const ( - // formatting + // formatting. sortDirectionPadding = 2 sortingTitlePadding = 3 sortingFooterPadding = 2 - // controls + // controls. sortingListControls = "Controls: ↑/↓ - up/down • enter - select filter • tab - toggle direction • esc - return to table • q - quit" sortingTextControls = "Controls: ↑/↓ - up/down • tab - toggle direction • enter - enter json path" - // sort direction text + // sort direction text. ascendingText = "ASCENDING" descendingText = "DESCENDING" ) -// sortingModel holds the state for the sorting view +// sortingModel holds the state for the sorting view. type sortingModel struct { // list which holds the available shorting shorthands shorthandList list.Model @@ -55,32 +55,32 @@ type sortingModel struct { isDescending bool } -// format styles +// format styles. var ( - // list + // list. listTitleStyle = lipgloss.NewStyle().Bold(true).Underline(true) listItemStyle = lipgloss.NewStyle().PaddingLeft(4) selectedItemStyle = lipgloss.NewStyle().PaddingLeft(2).Foreground(lipgloss.Color("170")) - // text + // text. descendingStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("#0096FF")) ascendingStyle = lipgloss.NewStyle().Foreground(lipgloss.Color("#DAF7A6")) sortDirectionStyle = lipgloss.NewStyle().Bold(true).Underline(true).PaddingLeft(2) ) -// implement Item interface for list +// implement Item interface for list. type item string func (i item) FilterValue() string { return "" } func (i item) Title() string { return string(i) } func (i item) Description() string { return "" } -// implement ItemDelegate for list +// implement ItemDelegate for list. type itemDelegate struct{} -func (d itemDelegate) Height() int { return 1 } -func (d itemDelegate) Spacing() int { return 0 } -func (d itemDelegate) Update(msg tea.Msg, m *list.Model) tea.Cmd { return nil } +func (d itemDelegate) Height() int { return 1 } +func (d itemDelegate) Spacing() int { return 0 } +func (d itemDelegate) Update(_ tea.Msg, _ *list.Model) tea.Cmd { return nil } func (d itemDelegate) Render(w io.Writer, m list.Model, index int, listItem list.Item) { i, ok := listItem.(item) if !ok { @@ -99,11 +99,11 @@ func (d itemDelegate) Render(w io.Writer, m list.Model, index int, listItem list } } - fmt.Fprintf(w, fn(str)) + fmt.Fprint(w, fn(str)) } // initSortingModel initializes and returns a new tableModel based on the given -// instance type details +// instance type details. func initSortingModel(instanceTypes []*instancetypes.Details) *sortingModel { shorthandList := list.New(*createListItems(), itemDelegate{}, initialDimensionVal, initialDimensionVal) shorthandList.Title = "Select sorting filter:" @@ -126,7 +126,7 @@ func initSortingModel(instanceTypes []*instancetypes.Details) *sortingModel { } } -// createListKeyMap creates a KeyMap with the controls for the shorthand list +// createListKeyMap creates a KeyMap with the controls for the shorthand list. func createListKeyMap() list.KeyMap { return list.KeyMap{ CursorDown: key.NewBinding( @@ -138,7 +138,7 @@ func createListKeyMap() list.KeyMap { } } -// createListItems creates a list item for shorthand sorting flag +// createListItems creates a list item for shorthand sorting flag. func createListItems() *[]list.Item { shorthandFlags := []string{ sorter.GPUCountField, @@ -166,7 +166,7 @@ func createListItems() *[]list.Item { // resizeSortingView will change the dimensions of the sorting view // in order to accommodate the new window dimensions represented by -// the given tea.WindowSizeMsg +// the given tea.WindowSizeMsg. func (m sortingModel) resizeView(msg tea.WindowSizeMsg) sortingModel { shorthandList := &m.shorthandList shorthandList.SetWidth(msg.Width) @@ -189,7 +189,7 @@ func (m sortingModel) resizeView(msg tea.WindowSizeMsg) sortingModel { return m } -// update updates the state of the sortingModel +// update updates the state of the sortingModel. func (m sortingModel) update(msg tea.Msg) (sortingModel, tea.Cmd) { var cmd tea.Cmd var cmds []tea.Cmd @@ -227,7 +227,7 @@ func (m sortingModel) update(msg tea.Msg) (sortingModel, tea.Cmd) { return m, tea.Batch(cmds...) } -// view returns a string representing the sorting view +// view returns a string representing the sorting view. func (m sortingModel) view() string { outputStr := strings.Builder{} diff --git a/vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector/outputs/tableView.go b/vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/selector/outputs/tableView.go similarity index 89% rename from vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector/outputs/tableView.go rename to vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/selector/outputs/tableView.go index 9dc05bcee..015007bc9 100644 --- a/vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector/outputs/tableView.go +++ b/vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/selector/outputs/tableView.go @@ -1,15 +1,14 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at +// http://www.apache.org/licenses/LICENSE-2.0 // -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package outputs @@ -18,21 +17,22 @@ import ( "reflect" "strings" - "github.com/aws/amazon-ec2-instance-selector/v2/pkg/instancetypes" - "github.com/aws/amazon-ec2-instance-selector/v2/pkg/sorter" "github.com/charmbracelet/bubbles/key" "github.com/charmbracelet/bubbles/textinput" tea "github.com/charmbracelet/bubbletea" "github.com/charmbracelet/lipgloss" "github.com/evertras/bubble-table/table" + + "github.com/aws/amazon-ec2-instance-selector/v3/pkg/instancetypes" + "github.com/aws/amazon-ec2-instance-selector/v3/pkg/sorter" ) const ( - // table formatting + // table formatting. headerAndFooterPadding = 8 headerPadding = 2 - // controls + // controls. tableControls = "Controls: ↑/↓ - up/down • ←/→ - left/right • shift + ←/→ - pg up/down • e - expand • f - filter • t - trim toggle • space - select • s - sort • q - quit" ellipses = "..." @@ -60,30 +60,28 @@ type tableModel struct { canSelectRows bool } -var ( - customBorder = table.Border{ - Top: "─", - Left: "│", - Right: "│", - Bottom: "─", - - TopRight: "╮", - TopLeft: "╭", - BottomRight: "╯", - BottomLeft: "╰", - - TopJunction: "┬", - LeftJunction: "├", - RightJunction: "┤", - BottomJunction: "┴", - InnerJunction: "┼", - - InnerDivider: "│", - } -) +var customBorder = table.Border{ + Top: "─", + Left: "│", + Right: "│", + Bottom: "─", + + TopRight: "╮", + TopLeft: "╭", + BottomRight: "╯", + BottomLeft: "╰", + + TopJunction: "┬", + LeftJunction: "├", + RightJunction: "┤", + BottomJunction: "┴", + InnerJunction: "┼", + + InnerDivider: "│", +} // initTableModel initializes and returns a new tableModel based on the given -// instance type details +// instance type details. func initTableModel(instanceTypes []*instancetypes.Details) *tableModel { table := createTable(instanceTypes) @@ -97,7 +95,7 @@ func initTableModel(instanceTypes []*instancetypes.Details) *tableModel { } } -// createFilterTextInput creates and styles a text input for filtering +// createFilterTextInput creates and styles a text input for filtering. func createFilterTextInput() textinput.Model { filterTextInput := textinput.New() filterTextInput.Prompt = "Filter: " @@ -106,7 +104,7 @@ func createFilterTextInput() textinput.Model { return filterTextInput } -// createRows creates a row for each instance type in the passed in list +// createRows creates a row for each instance type in the passed in list. func createRows(columnsData []*wideColumnsData, instanceTypes []*instancetypes.Details) *[]table.Row { rows := []table.Row{} @@ -139,7 +137,7 @@ func createRows(columnsData []*wideColumnsData, instanceTypes []*instancetypes.D return &rows } -// maxColWidth finds the maximum width element in the given column +// maxColWidth finds the maximum width element in the given column. func maxColWidth(columnsData []*wideColumnsData, columnHeader string) int { // default max width is the width of the header itself with padding maxWidth := len(columnHeader) + headerPadding @@ -171,7 +169,7 @@ func maxColWidth(columnsData []*wideColumnsData, columnHeader string) int { } // createColumns creates columns based on the tags in the wideColumnsData -// struct +// struct. func createColumns(columnsData []*wideColumnsData) *[]table.Column { columns := []table.Column{} @@ -189,7 +187,7 @@ func createColumns(columnsData []*wideColumnsData) *[]table.Column { return &columns } -// createTableKeyMap creates a KeyMap with the controls for the table +// createTableKeyMap creates a KeyMap with the controls for the table. func createTableKeyMap() *table.KeyMap { keys := table.KeyMap{ RowDown: key.NewBinding( @@ -216,7 +214,7 @@ func createTableKeyMap() *table.KeyMap { } // createTable creates an intractable table which contains information about all of -// the given instance types +// the given instance types. func createTable(instanceTypes []*instancetypes.Details) table.Model { // calculate and fetch all column data from instance types columnsData := getWideColumnsData(instanceTypes) @@ -241,7 +239,7 @@ func createTable(instanceTypes []*instancetypes.Details) table.Model { } // resizeView will change the dimensions of the table in order to accommodate -// the new window dimensions represented by the given tea.WindowSizeMsg +// the new window dimensions represented by the given tea.WindowSizeMsg. func (m tableModel) resizeView(msg tea.WindowSizeMsg) tableModel { // handle width changes m.table = m.table.WithMaxTotalWidth(msg.Width) @@ -266,7 +264,7 @@ func (m tableModel) resizeView(msg tea.WindowSizeMsg) tableModel { return m } -// updateFooter updates the page and controls string in the table footer +// updateFooter updates the page and controls string in the table footer. func (m tableModel) updateFooter() tableModel { controlsStr := tableControls @@ -289,7 +287,7 @@ func (m tableModel) updateFooter() tableModel { return m } -// update updates the state of the tableModel +// update updates the state of the tableModel. func (m tableModel) update(msg tea.Msg) (tableModel, tea.Cmd) { switch msg := msg.(type) { case tea.KeyMsg: @@ -364,7 +362,7 @@ func (m tableModel) update(msg tea.Msg) (tableModel, tea.Cmd) { return m, cmd } -// view returns a string representing the table view +// view returns a string representing the table view. func (m tableModel) view() string { outputStr := strings.Builder{} @@ -379,7 +377,7 @@ func (m tableModel) view() string { return outputStr.String() } -// sortTable sorts the table based on the sorting direction and sorting filter +// sortTable sorts the table based on the sorting direction and sorting filter. func (m tableModel) sortTable(sortFilter string, sortDirection string) (tableModel, error) { instanceTypes, rowMap := m.getInstanceTypeFromRows() _ = rowMap @@ -408,7 +406,7 @@ func (m tableModel) sortTable(sortFilter string, sortDirection string) (tableMod } // getInstanceTypeFromRows goes through the rows of the table model and returns both a list of instance -// types and a mapping of instances to rows +// types and a mapping of instances to rows. func (m tableModel) getInstanceTypeFromRows() ([]*instancetypes.Details, map[string]table.Row) { instanceTypes := []*instancetypes.Details{} rowMap := make(map[string]table.Row) @@ -437,7 +435,7 @@ func (m tableModel) getInstanceTypeFromRows() ([]*instancetypes.Details, map[str return instanceTypes, rowMap } -// getUnfilteredRows gets the rows in the given table model without any filtering applied +// getUnfilteredRows gets the rows in the given table model without any filtering applied. func (m tableModel) getUnfilteredRows() []table.Row { m.table = m.table.Filtered(false) rows := m.table.GetVisibleRows() @@ -445,7 +443,7 @@ func (m tableModel) getUnfilteredRows() []table.Row { return rows } -// trim will trim the table to only the selected rows +// trim will trim the table to only the selected rows. func (m tableModel) trim() tableModel { // store current state of rows before trimming m.originalRows = m.getUnfilteredRows() @@ -461,7 +459,7 @@ func (m tableModel) trim() tableModel { return m } -// untrim will return the table to the original rows +// untrim will return the table to the original rows. func (m tableModel) untrim() tableModel { m.table = m.table.WithRows(m.originalRows) diff --git a/vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector/outputs/verboseView.go b/vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/selector/outputs/verboseView.go similarity index 76% rename from vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector/outputs/verboseView.go rename to vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/selector/outputs/verboseView.go index 0852f73da..a2481b149 100644 --- a/vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector/outputs/verboseView.go +++ b/vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/selector/outputs/verboseView.go @@ -1,15 +1,14 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at +// http://www.apache.org/licenses/LICENSE-2.0 // -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package outputs @@ -18,7 +17,6 @@ import ( "math" "strings" - "github.com/aws/amazon-ec2-instance-selector/v2/pkg/instancetypes" ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/charmbracelet/bubbles/viewport" tea "github.com/charmbracelet/bubbletea" @@ -26,14 +24,14 @@ import ( ) const ( - // verbose view formatting + // verbose view formatting. outlinePadding = 8 - // controls + // controls. verboseControls = "Controls: ↑/↓ - up/down • esc - return to table • q - quit" ) -// verboseModel represents the current state of the verbose view +// verboseModel represents the current state of the verbose view. type verboseModel struct { // model for verbose output viewport viewport viewport.Model @@ -42,7 +40,7 @@ type verboseModel struct { focusedInstanceName ec2types.InstanceType } -// styling for viewport +// styling for viewport. var ( titleStyle = func() lipgloss.Style { b := lipgloss.RoundedBorder() @@ -53,13 +51,13 @@ var ( infoStyle = func() lipgloss.Style { b := lipgloss.RoundedBorder() b.Left = "┤" - return titleStyle.Copy().BorderStyle(b) + return titleStyle.BorderStyle(b) }() ) // initVerboseModel initializes and returns a new verboseModel based on the given -// instance type details -func initVerboseModel(instanceTypes []*instancetypes.Details) *verboseModel { +// instance type details. +func initVerboseModel() *verboseModel { viewportModel := viewport.New(initialDimensionVal, initialDimensionVal) viewportModel.MouseWheelEnabled = true @@ -69,7 +67,7 @@ func initVerboseModel(instanceTypes []*instancetypes.Details) *verboseModel { } // resizeView will change the dimensions of the verbose viewport in order to accommodate -// the new window dimensions represented by the given tea.WindowSizeMsg +// the new window dimensions represented by the given tea.WindowSizeMsg. func (m verboseModel) resizeView(msg tea.WindowSizeMsg) verboseModel { // handle width changes m.viewport.Width = msg.Width @@ -86,7 +84,7 @@ func (m verboseModel) resizeView(msg tea.WindowSizeMsg) verboseModel { return m } -// update updates the state of the verboseModel +// update updates the state of the verboseModel. func (m verboseModel) update(msg tea.Msg) (verboseModel, tea.Cmd) { var cmd tea.Cmd m.viewport, cmd = m.viewport.Update(msg) diff --git a/vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector/selector.go b/vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/selector/selector.go similarity index 75% rename from vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector/selector.go rename to vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/selector/selector.go index 897864c34..4057d2f05 100644 --- a/vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector/selector.go +++ b/vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/selector/selector.go @@ -1,15 +1,14 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at +// http://www.apache.org/licenses/LICENSE-2.0 // -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. // Package selector provides filtering logic for Amazon EC2 Instance Types based on declarative resource specfications. package selector @@ -17,6 +16,7 @@ package selector import ( "context" "fmt" + "io" "log" "reflect" "regexp" @@ -25,21 +25,20 @@ import ( "sync" "time" - "github.com/aws/amazon-ec2-instance-selector/v2/pkg/ec2pricing" - "github.com/aws/amazon-ec2-instance-selector/v2/pkg/instancetypes" - "github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector/outputs" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/service/ec2" ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" "go.uber.org/multierr" -) -var ( - // Version is overridden at compilation with the version based on the git tag - versionID = "dev" + "github.com/aws/amazon-ec2-instance-selector/v3/pkg/ec2pricing" + "github.com/aws/amazon-ec2-instance-selector/v3/pkg/instancetypes" + "github.com/aws/amazon-ec2-instance-selector/v3/pkg/selector/outputs" ) +// Version is overridden at compilation with the version based on the git tag +var versionID = "dev" + const ( locationFilterKey = "location" zoneIDLocationType = ec2types.LocationTypeAvailabilityZoneId @@ -47,7 +46,7 @@ const ( regionNameLocationType = ec2types.LocationTypeRegion sdkName = "instance-selector" - // Filter Keys + // Filter Keys. cpuArchitecture = "cpuArchitecture" cpuManufacturer = "cpuManufacturer" @@ -91,6 +90,7 @@ const ( freeTier = "freeTier" autoRecovery = "autoRecovery" dedicatedHosts = "dedicatedHosts" + generation = "generation" cpuArchitectureAMD64 = "amd64" @@ -99,26 +99,12 @@ const ( pricePerHour = "pricePerHour" ) -// New creates an instance of Selector provided an aws session +// New creates an instance of Selector provided an aws session. func New(ctx context.Context, cfg aws.Config) (*Selector, error) { - serviceRegistry := NewRegistry() - serviceRegistry.RegisterAWSServices() - ec2Client := ec2.NewFromConfig(cfg, func(options *ec2.Options) { - options.APIOptions = append(options.APIOptions, middleware.AddUserAgentKeyValue(sdkName, versionID)) - }) - pricingClient, err := ec2pricing.New(ctx, cfg) - if err != nil { - return nil, err - } - - return &Selector{ - EC2: ec2Client, - EC2Pricing: pricingClient, - InstanceTypesProvider: instancetypes.LoadFromOrNew("", cfg.Region, 0, ec2Client), - ServiceRegistry: serviceRegistry, - }, nil + return NewWithCache(ctx, cfg, 0, "") } +// NewWithCache creates an instance of Selector backed by an on-disk cache provided an aws session and cache configuration parameters. func NewWithCache(ctx context.Context, cfg aws.Config, ttl time.Duration, cacheDir string) (*Selector, error) { serviceRegistry := NewRegistry() serviceRegistry.RegisterAWSServices() @@ -130,59 +116,66 @@ func NewWithCache(ctx context.Context, cfg aws.Config, ttl time.Duration, cacheD return nil, err } + instanceTypeProvider, err := instancetypes.LoadFromOrNew(cacheDir, cfg.Region, ttl, ec2Client) + if err != nil { + return nil, fmt.Errorf("unable to initialize instance type provider: %w", err) + } + return &Selector{ EC2: ec2Client, EC2Pricing: pricingClient, - InstanceTypesProvider: instancetypes.LoadFromOrNew(cacheDir, cfg.Region, ttl, ec2Client), + InstanceTypesProvider: instanceTypeProvider, ServiceRegistry: serviceRegistry, + Logger: log.New(io.Discard, "", 0), }, nil } -func (itf Selector) Save() error { - return multierr.Append(itf.EC2Pricing.Save(), itf.InstanceTypesProvider.Save()) +// SetLogger can be called to log more detailed logs about what selector is doing +// including things like API timings +// If SetLogger is not called, no logs will be displayed. +func (s *Selector) SetLogger(logger *log.Logger) { + s.Logger = logger + s.InstanceTypesProvider.SetLogger(logger) + s.EC2Pricing.SetLogger(logger) +} + +// Save persists the selector cache data to disk if caching is configured. +func (s Selector) Save() error { + return multierr.Append(s.EC2Pricing.Save(), s.InstanceTypesProvider.Save()) } // Filter accepts a Filters struct which is used to select the available instance types -// matching the criteria within Filters and returns a simple list of instance type strings -// -// Deprecated: This function will be replaced with GetFilteredInstanceTypes() and -// OutputInstanceTypes() in the next major version. -func (itf Selector) Filter(ctx context.Context, filters Filters) ([]string, error) { +// matching the criteria within Filters and returns a simple list of instance type strings. +func (s Selector) Filter(ctx context.Context, filters Filters) ([]string, error) { outputFn := InstanceTypesOutputFn(outputs.SimpleInstanceTypeOutput) - output, _, err := itf.FilterWithOutput(ctx, filters, outputFn) + output, _, err := s.FilterWithOutput(ctx, filters, outputFn) return output, err } // FilterVerbose accepts a Filters struct which is used to select the available instance types -// matching the criteria within Filters and returns a list instanceTypeInfo -// -// Deprecated: This function will be replaced with GetFilteredInstanceTypes() in the next -// major version. -func (itf Selector) FilterVerbose(ctx context.Context, filters Filters) ([]*instancetypes.Details, error) { - instanceTypeInfoSlice, err := itf.rawFilter(ctx, filters) +// matching the criteria within Filters and returns a list instanceTypeInfo. +func (s Selector) FilterVerbose(ctx context.Context, filters Filters) ([]*instancetypes.Details, error) { + instanceTypeInfoSlice, err := s.rawFilter(ctx, filters) if err != nil { return nil, err } - instanceTypeInfoSlice, _ = itf.truncateResults(filters.MaxResults, instanceTypeInfoSlice) + instanceTypeInfoSlice, _ = s.truncateResults(filters.MaxResults, instanceTypeInfoSlice) return instanceTypeInfoSlice, nil } // FilterWithOutput accepts a Filters struct which is used to select the available instance types -// matching the criteria within Filters and returns a list of strings based on the custom outputFn -// -// Deprecated: This function will be replaced with GetFilteredInstanceTypes() and -// OutputInstanceTypes() in the next major version. -func (itf Selector) FilterWithOutput(ctx context.Context, filters Filters, outputFn InstanceTypesOutput) ([]string, int, error) { - instanceTypeInfoSlice, err := itf.rawFilter(ctx, filters) +// matching the criteria within Filters and returns a list of strings based on the custom outputFn. +func (s Selector) FilterWithOutput(ctx context.Context, filters Filters, outputFn InstanceTypesOutput) ([]string, int, error) { + instanceTypeInfoSlice, err := s.rawFilter(ctx, filters) if err != nil { return nil, 0, err } - instanceTypeInfoSlice, numOfItemsTruncated := itf.truncateResults(filters.MaxResults, instanceTypeInfoSlice) + instanceTypeInfoSlice, numOfItemsTruncated := s.truncateResults(filters.MaxResults, instanceTypeInfoSlice) output := outputFn.Output(instanceTypeInfoSlice) return output, numOfItemsTruncated, nil } -func (itf Selector) truncateResults(maxResults *int, instanceTypeInfoSlice []*instancetypes.Details) ([]*instancetypes.Details, int) { +func (s Selector) truncateResults(maxResults *int, instanceTypeInfoSlice []*instancetypes.Details) ([]*instancetypes.Details, int) { if maxResults == nil { return instanceTypeInfoSlice, 0 } @@ -194,11 +187,11 @@ func (itf Selector) truncateResults(maxResults *int, instanceTypeInfoSlice []*in } // AggregateFilterTransform takes higher level filters which are used to affect multiple raw filters in an opinionated way. -func (itf Selector) AggregateFilterTransform(ctx context.Context, filters Filters) (Filters, error) { +func (s Selector) AggregateFilterTransform(ctx context.Context, filters Filters) (Filters, error) { transforms := []FiltersTransform{ - TransformFn(itf.TransformBaseInstanceType), - TransformFn(itf.TransformFlexible), - TransformFn(itf.TransformForService), + TransformFn(s.TransformBaseInstanceType), + TransformFn(s.TransformFlexible), + TransformFn(s.TransformForService), } var err error for _, transform := range transforms { @@ -211,9 +204,9 @@ func (itf Selector) AggregateFilterTransform(ctx context.Context, filters Filter } // rawFilter accepts a Filters struct which is used to select the available instance types -// matching the criteria within Filters and returns the detailed specs of matching instance types -func (itf Selector) rawFilter(ctx context.Context, filters Filters) ([]*instancetypes.Details, error) { - filters, err := itf.AggregateFilterTransform(ctx, filters) +// matching the criteria within Filters and returns the detailed specs of matching instance types. +func (s Selector) rawFilter(ctx context.Context, filters Filters) ([]*instancetypes.Details, error) { + filters, err := s.AggregateFilterTransform(ctx, filters) if err != nil { return nil, err } @@ -231,12 +224,12 @@ func (itf Selector) rawFilter(ctx context.Context, filters Filters) ([]*instance } else if filters.Region != nil { locations = []string{*filters.Region} } - locationInstanceOfferings, err := itf.RetrieveInstanceTypesSupportedInLocations(ctx, locations) + locationInstanceOfferings, err := s.RetrieveInstanceTypesSupportedInLocations(ctx, locations) if err != nil { return nil, err } - instanceTypeDetails, err := itf.InstanceTypesProvider.Get(ctx, nil) + instanceTypeDetails, err := s.InstanceTypesProvider.Get(ctx, nil) if err != nil { return nil, err } @@ -247,33 +240,35 @@ func (itf Selector) rawFilter(ctx context.Context, filters Filters) ([]*instance wg.Add(1) go func(instanceTypeInfo instancetypes.Details) { defer wg.Done() - it, err := itf.prepareFilter(ctx, filters, instanceTypeInfo, availabilityZones, locationInstanceOfferings) + it, err := s.prepareFilter(ctx, filters, instanceTypeInfo, availabilityZones, locationInstanceOfferings) if err != nil { - log.Println(err) + s.Logger.Printf("Unable to prepare filter for %s, %v", instanceTypeInfo.InstanceType, err) } if it != nil { instanceTypes <- it } }(*instanceTypeInfo) } - wg.Wait() - close(instanceTypes) + go func() { + wg.Wait() + close(instanceTypes) + }() for it := range instanceTypes { filteredInstanceTypes = append(filteredInstanceTypes, it) } return sortInstanceTypeInfo(filteredInstanceTypes), nil } -func (itf Selector) prepareFilter(ctx context.Context, filters Filters, instanceTypeInfo instancetypes.Details, availabilityZones []string, locationInstanceOfferings map[ec2types.InstanceType]string) (*instancetypes.Details, error) { +func (s Selector) prepareFilter(ctx context.Context, filters Filters, instanceTypeInfo instancetypes.Details, availabilityZones []string, locationInstanceOfferings map[ec2types.InstanceType]string) (*instancetypes.Details, error) { instanceTypeName := instanceTypeInfo.InstanceType isFpga := instanceTypeInfo.FpgaInfo != nil var instanceTypeHourlyPriceForFilter float64 // Price used to filter based on usage class var instanceTypeHourlyPriceOnDemand, instanceTypeHourlyPriceSpot *float64 // If prices are fetched, populate the fields irrespective of the price filters - if itf.EC2Pricing.OnDemandCacheCount() > 0 { - price, err := itf.EC2Pricing.GetOnDemandInstanceTypeCost(ctx, instanceTypeName) + if s.EC2Pricing.OnDemandCacheCount() > 0 { + price, err := s.EC2Pricing.GetOnDemandInstanceTypeCost(ctx, instanceTypeName) if err != nil { - log.Printf("Could not retrieve instantaneous hourly on-demand price for instance type %s - %s\n", instanceTypeName, err) + s.Logger.Printf("Could not retrieve instantaneous hourly on-demand price for instance type %s - %s\n", instanceTypeName, err) } else { instanceTypeHourlyPriceOnDemand = &price instanceTypeInfo.OndemandPricePerHour = instanceTypeHourlyPriceOnDemand @@ -287,10 +282,10 @@ func (itf Selector) prepareFilter(ctx context.Context, filters Filters, instance } } - if itf.EC2Pricing.SpotCacheCount() > 0 && isSpotUsageClass { - price, err := itf.EC2Pricing.GetSpotInstanceTypeNDayAvgCost(ctx, instanceTypeName, availabilityZones, 30) + if s.EC2Pricing.SpotCacheCount() > 0 && isSpotUsageClass { + price, err := s.EC2Pricing.GetSpotInstanceTypeNDayAvgCost(ctx, instanceTypeName, availabilityZones, 30) if err != nil { - log.Printf("Could not retrieve 30 day avg hourly spot price for instance type %s\n", instanceTypeName) + s.Logger.Printf("Could not retrieve 30 day avg hourly spot price for instance type %s\n", instanceTypeName) } else { instanceTypeHourlyPriceSpot = &price instanceTypeInfo.SpotPrice = instanceTypeHourlyPriceSpot @@ -308,11 +303,22 @@ func (itf Selector) prepareFilter(ctx context.Context, filters Filters, instance eneaSupport := string(instanceTypeInfo.NetworkInfo.EnaSupport) ebsOptimizedSupport := string(instanceTypeInfo.EbsInfo.EbsOptimizedSupport) + // If an empty slice is passed, treat the filter as nil + filterInstanceTypes := filters.InstanceTypes + if filterInstanceTypes != nil && len(*filterInstanceTypes) == 0 { + filterInstanceTypes = nil + } + + var cpuManufacturerFilter *string + if filters.CPUManufacturer != nil { + cpuManufacturerFilter = aws.String(string(*filters.CPUManufacturer)) + } + // filterToInstanceSpecMappingPairs is a map of filter name [key] to filter pair [value]. // A filter pair includes user input filter value and instance spec value retrieved from DescribeInstanceTypes filterToInstanceSpecMappingPairs := map[string]filterPair{ cpuArchitecture: {filters.CPUArchitecture, instanceTypeInfo.ProcessorInfo.SupportedArchitectures}, - cpuManufacturer: {filters.CPUManufacturer, getCPUManufacturer(&instanceTypeInfo.InstanceTypeInfo)}, + cpuManufacturer: {cpuManufacturerFilter, instanceTypeInfo.ProcessorInfo.Manufacturer}, usageClass: {filters.UsageClass, instanceTypeInfo.SupportedUsageClasses}, rootDeviceType: {filters.RootDeviceType, instanceTypeInfo.SupportedRootDeviceTypes}, hibernationSupported: {filters.HibernationSupported, instanceTypeInfo.HibernationSupported}, @@ -334,7 +340,7 @@ func (itf Selector) prepareFilter(ctx context.Context, filters Filters, instance networkPerformance: {filters.NetworkPerformance, getNetworkPerformance(instanceTypeInfo.NetworkInfo.NetworkPerformance)}, networkEncryption: {filters.NetworkEncryption, instanceTypeInfo.NetworkInfo.EncryptionInTransitSupported}, ipv6: {filters.IPv6, instanceTypeInfo.NetworkInfo.Ipv6Supported}, - instanceTypes: {filters.InstanceTypes, instanceTypeInfo.InstanceType}, + instanceTypes: {filterInstanceTypes, aws.String(string(instanceTypeInfo.InstanceType))}, virtualizationType: {filters.VirtualizationType, instanceTypeInfo.SupportedVirtualizationTypes}, pricePerHour: {filters.PricePerHour, &instanceTypeHourlyPriceForFilter}, instanceStorageRange: {filters.InstanceStorageRange, getInstanceStorage(instanceTypeInfo.InstanceStorageInfo)}, @@ -352,6 +358,7 @@ func (itf Selector) prepareFilter(ctx context.Context, filters Filters, instance inferenceAcceleratorManufacturer: {filters.InferenceAcceleratorManufacturer, getInferenceAcceleratorManufacturers(instanceTypeInfo.InferenceAcceleratorInfo)}, inferenceAcceleratorModel: {filters.InferenceAcceleratorModel, getInferenceAcceleratorModels(instanceTypeInfo.InferenceAcceleratorInfo)}, dedicatedHosts: {filters.DedicatedHosts, instanceTypeInfo.DedicatedHostsSupported}, + generation: {filters.Generation, getInstanceTypeGeneration(string(instanceTypeInfo.InstanceType))}, } if isInDenyList(filters.DenyList, instanceTypeName) || !isInAllowList(filters.AllowList, instanceTypeName) { @@ -363,7 +370,7 @@ func (itf Selector) prepareFilter(ctx context.Context, filters Filters, instance } var isInstanceSupported bool - isInstanceSupported, err := itf.executeFilters(ctx, filterToInstanceSpecMappingPairs, instanceTypeName) + isInstanceSupported, err := s.executeFilters(ctx, filterToInstanceSpecMappingPairs, instanceTypeName) if err != nil { return nil, err } @@ -373,7 +380,7 @@ func (itf Selector) prepareFilter(ctx context.Context, filters Filters, instance return &instanceTypeInfo, nil } -// sortInstanceTypeInfo will sort based on instance type info alpha-numerically +// sortInstanceTypeInfo will sort based on instance type info alpha-numerically. func sortInstanceTypeInfo(instanceTypeInfoSlice []*instancetypes.Details) []*instancetypes.Details { if len(instanceTypeInfoSlice) < 2 { return instanceTypeInfoSlice @@ -388,9 +395,9 @@ func sortInstanceTypeInfo(instanceTypeInfoSlice []*instancetypes.Details) []*ins // executeFilters accepts a mapping of filter name to filter pairs which are iterated through // to determine if the instance type matches the filter values. -func (itf Selector) executeFilters(ctx context.Context, filterToInstanceSpecMapping map[string]filterPair, instanceType ec2types.InstanceType) (bool, error) { +func (s Selector) executeFilters(ctx context.Context, filterToInstanceSpecMapping map[string]filterPair, instanceType ec2types.InstanceType) (bool, error) { verdict := make(chan bool, len(filterToInstanceSpecMapping)+1) - errs := make(chan error) + errs := make(chan error, len(filterToInstanceSpecMapping)) ctx, cancel := context.WithCancel(ctx) defer cancel() var wg sync.WaitGroup @@ -432,6 +439,8 @@ func (itf Selector) executeFilters(ctx context.Context, filterToInstanceSpecMapp } } +// exec executes a specific filterPair (user value & instance spec) with a specific instance type +// If the filterPair matches, true is returned. func exec(instanceType ec2types.InstanceType, filterName string, filter filterPair) (bool, error) { filterVal := filter.filterValue instanceSpec := filter.instanceSpec @@ -443,7 +452,7 @@ func exec(instanceType ec2types.InstanceType, filterName string, filter filterPa instanceSpecType := reflect.ValueOf(instanceSpec).Type() filterType := filterValReflection.Type() filterDetailsMsg := fmt.Sprintf("filter (%s: %s => %s) corresponding to instance spec (%s => %s) for instance type %s", filterName, filterVal, filterType, instanceSpec, instanceSpecType, instanceType) - invalidInstanceSpecTypeMsg := fmt.Sprintf("Unable to process for %s", filterDetailsMsg) + errInvalidInstanceSpec := fmt.Errorf("unable to process for %s", filterDetailsMsg) // Determine appropriate filter comparator by switching on filter type switch filter := filterVal.(type) { @@ -458,7 +467,7 @@ func exec(instanceType ec2types.InstanceType, filterName string, filter filterPa return false, nil } default: - return false, fmt.Errorf(invalidInstanceSpecTypeMsg) + return false, errInvalidInstanceSpec } case *bool: switch iSpec := instanceSpec.(type) { @@ -467,7 +476,7 @@ func exec(instanceType ec2types.InstanceType, filterName string, filter filterPa return false, nil } default: - return false, fmt.Errorf(invalidInstanceSpecTypeMsg) + return false, errInvalidInstanceSpec } case *IntRangeFilter: switch iSpec := instanceSpec.(type) { @@ -475,12 +484,20 @@ func exec(instanceType ec2types.InstanceType, filterName string, filter filterPa if !isSupportedWithRangeInt64(iSpec, filter) { return false, nil } + case *int32: + var iSpec64 *int64 + if iSpec != nil { + iSpec64 = aws.Int64(int64(*iSpec)) + } + if !isSupportedWithRangeInt64(iSpec64, filter) { + return false, nil + } case *int: if !isSupportedWithRangeInt(iSpec, filter) { return false, nil } default: - return false, fmt.Errorf(invalidInstanceSpecTypeMsg) + return false, errInvalidInstanceSpec } case *Int32RangeFilter: switch iSpec := instanceSpec.(type) { @@ -489,7 +506,7 @@ func exec(instanceType ec2types.InstanceType, filterName string, filter filterPa return false, nil } default: - return false, fmt.Errorf(invalidInstanceSpecTypeMsg) + return false, errInvalidInstanceSpec } case *Float64RangeFilter: switch iSpec := instanceSpec.(type) { @@ -498,7 +515,7 @@ func exec(instanceType ec2types.InstanceType, filterName string, filter filterPa return false, nil } default: - return false, fmt.Errorf(invalidInstanceSpecTypeMsg) + return false, errInvalidInstanceSpec } case *ByteQuantityRangeFilter: mibRange := Uint64RangeFilter{ @@ -528,7 +545,7 @@ func exec(instanceType ec2types.InstanceType, filterName string, filter filterPa return false, nil } default: - return false, fmt.Errorf(invalidInstanceSpecTypeMsg) + return false, errInvalidInstanceSpec } case *float64: switch iSpec := instanceSpec.(type) { @@ -537,7 +554,7 @@ func exec(instanceType ec2types.InstanceType, filterName string, filter filterPa return false, nil } default: - return false, fmt.Errorf(invalidInstanceSpecTypeMsg) + return false, errInvalidInstanceSpec } case *ec2types.ArchitectureType: switch iSpec := instanceSpec.(type) { @@ -546,7 +563,7 @@ func exec(instanceType ec2types.InstanceType, filterName string, filter filterPa return false, nil } default: - return false, fmt.Errorf(invalidInstanceSpecTypeMsg) + return false, errInvalidInstanceSpec } case *ec2types.UsageClassType: switch iSpec := instanceSpec.(type) { @@ -555,7 +572,7 @@ func exec(instanceType ec2types.InstanceType, filterName string, filter filterPa return false, nil } default: - return false, fmt.Errorf(invalidInstanceSpecTypeMsg) + return false, errInvalidInstanceSpec } case *CPUManufacturer: switch iSpec := instanceSpec.(type) { @@ -564,7 +581,7 @@ func exec(instanceType ec2types.InstanceType, filterName string, filter filterPa return false, nil } default: - return false, fmt.Errorf(invalidInstanceSpecTypeMsg) + return false, errInvalidInstanceSpec } case *ec2types.VirtualizationType: switch iSpec := instanceSpec.(type) { @@ -573,7 +590,7 @@ func exec(instanceType ec2types.InstanceType, filterName string, filter filterPa return false, nil } default: - return false, fmt.Errorf(invalidInstanceSpecTypeMsg) + return false, errInvalidInstanceSpec } case *ec2types.InstanceTypeHypervisor: switch iSpec := instanceSpec.(type) { @@ -582,7 +599,7 @@ func exec(instanceType ec2types.InstanceType, filterName string, filter filterPa return false, nil } default: - return false, fmt.Errorf(invalidInstanceSpecTypeMsg) + return false, errInvalidInstanceSpec } case *ec2types.RootDeviceType: switch iSpec := instanceSpec.(type) { @@ -591,7 +608,7 @@ func exec(instanceType ec2types.InstanceType, filterName string, filter filterPa return false, nil } default: - return false, fmt.Errorf(invalidInstanceSpecTypeMsg) + return false, errInvalidInstanceSpec } case *[]string: switch iSpec := instanceSpec.(type) { @@ -607,24 +624,24 @@ func exec(instanceType ec2types.InstanceType, filterName string, filter filterPa return false, nil } default: - return false, fmt.Errorf(invalidInstanceSpecTypeMsg) + return false, errInvalidInstanceSpec } default: - return false, fmt.Errorf("No filter handler found for %s", filterDetailsMsg) + return false, fmt.Errorf("no filter handler found for %s", filterDetailsMsg) } return true, nil } // RetrieveInstanceTypesSupportedInLocations returns a map of instance type -> AZ or Region for all instance types supported in the intersected locations passed in // The location can be a zone-id (ie. use1-az1), a zone-name (us-east-1a), or a region name (us-east-1). -// Note that zone names are not necessarily the same across accounts -func (itf Selector) RetrieveInstanceTypesSupportedInLocations(ctx context.Context, locations []string) (map[ec2types.InstanceType]string, error) { +// Note that zone names are not necessarily the same across accounts. +func (s Selector) RetrieveInstanceTypesSupportedInLocations(ctx context.Context, locations []string) (map[ec2types.InstanceType]string, error) { if len(locations) == 0 { return nil, nil } availableInstanceTypes := map[ec2types.InstanceType]int{} for _, location := range locations { - locationType, err := itf.getLocationType(ctx, location) + locationType, err := s.getLocationType(ctx, location) if err != nil { return nil, err } @@ -639,12 +656,12 @@ func (itf Selector) RetrieveInstanceTypesSupportedInLocations(ctx context.Contex }, } - p := ec2.NewDescribeInstanceTypeOfferingsPaginator(itf.EC2, instanceTypeOfferingsInput) + p := ec2.NewDescribeInstanceTypeOfferingsPaginator(s.EC2, instanceTypeOfferingsInput) for p.HasMorePages() { instanceTypeOfferings, err := p.NextPage(ctx) if err != nil { - return nil, fmt.Errorf("Encountered an error when describing instance type offerings: %w", err) + return nil, fmt.Errorf("encountered an error when describing instance type offerings: %w", err) } for _, instanceType := range instanceTypeOfferings.InstanceTypeOfferings { @@ -666,8 +683,8 @@ func (itf Selector) RetrieveInstanceTypesSupportedInLocations(ctx context.Contex return availableInstanceTypesAllLocations, nil } -func (itf Selector) getLocationType(ctx context.Context, location string) (ec2types.LocationType, error) { - azs, err := itf.EC2.DescribeAvailabilityZones(ctx, &ec2.DescribeAvailabilityZonesInput{}) +func (s Selector) getLocationType(ctx context.Context, location string) (ec2types.LocationType, error) { + azs, err := s.EC2.DescribeAvailabilityZones(ctx, &ec2.DescribeAvailabilityZonesInput{}) if err != nil { return "", err } @@ -680,7 +697,7 @@ func (itf Selector) getLocationType(ctx context.Context, location string) (ec2ty return zoneIDLocationType, nil } } - return "", fmt.Errorf("The location passed in (%s) is not a valid zone-id, zone-name, or region name", location) + return "", fmt.Errorf("the location passed in (%s) is not a valid zone-id, zone-name, or region name", location) } func isSupportedInLocation(instanceOfferings map[ec2types.InstanceType]string, instanceType ec2types.InstanceType) bool { diff --git a/vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector/services.go b/vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/selector/services.go similarity index 73% rename from vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector/services.go rename to vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/selector/services.go index b46495c0f..4a1cf0b5d 100644 --- a/vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector/services.go +++ b/vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/selector/services.go @@ -1,15 +1,14 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at +// http://www.apache.org/licenses/LICENSE-2.0 // -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package selector @@ -17,36 +16,36 @@ import ( "fmt" "strings" - "github.com/imdario/mergo" + "dario.cat/mergo" ) -// Service is used to write custom service filter transforms +// Service is used to write custom service filter transforms. type Service interface { Filters(version string) (Filters, error) } -// ServiceFiltersFn is the func type definition for the Service interface +// ServiceFiltersFn is the func type definition for the Service interface. type ServiceFiltersFn func(version string) (Filters, error) // Filters implements the Service interface on ServiceFiltersFn -// This allows any ServiceFiltersFn to be passed into funcs accepting the Service interface +// This allows any ServiceFiltersFn to be passed into funcs accepting the Service interface. func (fn ServiceFiltersFn) Filters(version string) (Filters, error) { return fn(version) } -// ServiceRegistry is used to register service filter transforms +// ServiceRegistry is used to register service filter transforms. type ServiceRegistry struct { services map[string]*Service } -// NewRegistry creates a new instance of a ServiceRegistry +// NewRegistry creates a new instance of a ServiceRegistry. func NewRegistry() ServiceRegistry { return ServiceRegistry{ services: make(map[string]*Service), } } -// Register takes a service name and Service implementation that will be executed on an ExecuteTransforms call +// Register takes a service name and Service implementation that will be executed on an ExecuteTransforms call. func (sr *ServiceRegistry) Register(name string, service Service) { if sr.services == nil { sr.services = make(map[string]*Service) @@ -57,13 +56,13 @@ func (sr *ServiceRegistry) Register(name string, service Service) { sr.services[name] = &service } -// RegisterAWSServices registers the built-in AWS service filter transforms +// RegisterAWSServices registers the built-in AWS service filter transforms. func (sr *ServiceRegistry) RegisterAWSServices() { sr.Register("emr", &EMR{}) } // ExecuteTransforms will execute the ServiceRegistry's registered service filter transforms -// Filters.Service will be parsed as - and passed to Service.Filters +// Filters.Service will be parsed as - and passed to Service.Filters. func (sr *ServiceRegistry) ExecuteTransforms(filters Filters) (Filters, error) { if filters.Service == nil || *filters.Service == "" || *filters.Service == "eks" { return filters, nil diff --git a/vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector/types.go b/vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/selector/types.go similarity index 86% rename from vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector/types.go rename to vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/selector/types.go index 60395ebcb..9c76dd708 100644 --- a/vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector/types.go +++ b/vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/selector/types.go @@ -1,87 +1,89 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at +// http://www.apache.org/licenses/LICENSE-2.0 // -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package selector import ( "encoding/json" - "github.com/aws/amazon-ec2-instance-selector/v2/pkg/awsapi" - ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "log" "regexp" - "github.com/aws/amazon-ec2-instance-selector/v2/pkg/bytequantity" - "github.com/aws/amazon-ec2-instance-selector/v2/pkg/ec2pricing" - "github.com/aws/amazon-ec2-instance-selector/v2/pkg/instancetypes" + ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" + + "github.com/aws/amazon-ec2-instance-selector/v3/pkg/awsapi" + "github.com/aws/amazon-ec2-instance-selector/v3/pkg/bytequantity" + "github.com/aws/amazon-ec2-instance-selector/v3/pkg/ec2pricing" + "github.com/aws/amazon-ec2-instance-selector/v3/pkg/instancetypes" ) -// InstanceTypesOutput can be implemented to provide custom output to instance type results +// InstanceTypesOutput can be implemented to provide custom output to instance type results. type InstanceTypesOutput interface { Output([]*instancetypes.Details) []string } -// InstanceTypesOutputFn is the func type definition for InstanceTypesOuput +// InstanceTypesOutputFn is the func type definition for InstanceTypesOuput. type InstanceTypesOutputFn func([]*instancetypes.Details) []string // Output implements InstanceTypesOutput interface on InstanceTypesOutputFn -// This allows any InstanceTypesOutputFn to be passed into funcs accepting InstanceTypesOutput interface +// This allows any InstanceTypesOutputFn to be passed into funcs accepting InstanceTypesOutput interface. func (fn InstanceTypesOutputFn) Output(instanceTypes []*instancetypes.Details) []string { return fn(instanceTypes) } -// Selector is used to filter instance type resource specs +// Selector is used to filter instance type resource specs. type Selector struct { EC2 awsapi.SelectorInterface EC2Pricing ec2pricing.EC2PricingIface InstanceTypesProvider *instancetypes.Provider ServiceRegistry ServiceRegistry + Logger *log.Logger } // IntRangeFilter holds an upper and lower bound int -// The lower and upper bound are used to range filter resource specs +// The lower and upper bound are used to range filter resource specs. type IntRangeFilter struct { UpperBound int LowerBound int } // Int32RangeFilter holds an upper and lower bound int -// The lower and upper bound are used to range filter resource specs +// The lower and upper bound are used to range filter resource specs. type Int32RangeFilter struct { UpperBound int32 LowerBound int32 } // Uint64RangeFilter holds an upper and lower bound uint64 -// The lower and upper bound are used to range filter resource specs +// The lower and upper bound are used to range filter resource specs. type Uint64RangeFilter struct { UpperBound uint64 LowerBound uint64 } // ByteQuantityRangeFilter holds an upper and lower bound byte quantity -// The lower and upper bound are used to range filter resource specs +// The lower and upper bound are used to range filter resource specs. type ByteQuantityRangeFilter struct { UpperBound bytequantity.ByteQuantity LowerBound bytequantity.ByteQuantity } // Float64RangeFilter holds an upper and lower bound float64 -// The lower and upper bound are used to range filter resource specs +// The lower and upper bound are used to range filter resource specs. type Float64RangeFilter struct { UpperBound float64 LowerBound float64 } -// filterPair holds a tuple of the passed in filter value and the instance resource spec value +// filterPair holds a tuple of the passed in filter value and the instance resource spec value. type filterPair struct { filterValue interface{} instanceSpec interface{} @@ -95,7 +97,7 @@ func getRegexpString(r *regexp.Regexp) *string { return &rStr } -// MarshalIndent is used to return a pretty-print json representation of a Filters struct +// MarshalIndent is used to return a pretty-print json representation of a Filters struct. func (f *Filters) MarshalIndent(prefix, indent string) ([]byte, error) { type Alias Filters return json.MarshalIndent(&struct { @@ -109,7 +111,7 @@ func (f *Filters) MarshalIndent(prefix, indent string) ([]byte, error) { }, prefix, indent) } -// Filters is used to group instance type resource attributes for filtering +// Filters is used to group instance type resource attributes for filtering. type Filters struct { // AvailabilityZones is the AWS Availability Zones where instances will be provisioned. // Instance type capacity can vary between availability zones. @@ -271,11 +273,18 @@ type Filters struct { // DedicatedHosts filters on instance types that support dedicated hosts tenancy DedicatedHosts *bool + + // Generation filters on the instance type generation + // i.e. c7i.xlarge is 7 + // NOTE that generation is only comparable per instance family + // For example, i3 and c5 are both 5th generation, but the Generation filter will + // only filter on the number in the instance type name. + Generation *IntRangeFilter } type CPUManufacturer string -// Enum values for CPUManufacturer +// Enum values for CPUManufacturer. const ( CPUManufacturerAWS CPUManufacturer = "aws" CPUManufacturerAMD CPUManufacturer = "amd" @@ -287,18 +296,18 @@ const ( // ordering of this slice is not guaranteed to be stable across updates. func (CPUManufacturer) Values() []CPUManufacturer { return []CPUManufacturer{ - "aws", - "amd", - "intel", + CPUManufacturerAWS, + CPUManufacturerAMD, + CPUManufacturerIntel, } } -// ArchitectureTypeAMD64 is a legacy type we support for b/c that isn't in the API +// ArchitectureTypeAMD64 is a legacy type we support for b/c that isn't in the API. const ( ArchitectureTypeAMD64 ec2types.ArchitectureType = "amd64" ) -// ArchitectureTypeAMD64 is a legacy type we support for b/c that isn't in the API +// ArchitectureTypeAMD64 is a legacy type we support for b/c that isn't in the API. const ( VirtualizationTypePv ec2types.VirtualizationType = "pv" ) diff --git a/vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/sorter/sorter.go b/vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/sorter/sorter.go similarity index 93% rename from vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/sorter/sorter.go rename to vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/sorter/sorter.go index 311145b0d..1bb2fed28 100644 --- a/vendor/github.com/aws/amazon-ec2-instance-selector/v2/pkg/sorter/sorter.go +++ b/vendor/github.com/aws/amazon-ec2-instance-selector/v3/pkg/sorter/sorter.go @@ -1,15 +1,14 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at +// http://www.apache.org/licenses/LICENSE-2.0 // -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package sorter @@ -20,12 +19,13 @@ import ( "sort" "strings" - "github.com/aws/amazon-ec2-instance-selector/v2/pkg/instancetypes" "github.com/oliveagle/jsonpath" + + "github.com/aws/amazon-ec2-instance-selector/v3/pkg/instancetypes" ) const ( - // Sort direction + // Sort direction. SortAscending = "ascending" SortAsc = "asc" @@ -38,7 +38,7 @@ const ( GPUCountField = "gpus" InferenceAcceleratorsField = "inference-accelerators" - // shorthand flags + // shorthand flags. VCPUs = "vcpus" Memory = "memory" @@ -51,7 +51,7 @@ const ( EBSOptimizedBaselineThroughput = "ebs-optimized-baseline-throughput" EBSOptimizedBaselineIOPS = "ebs-optimized-baseline-iops" - // JSON field paths for shorthand flags + // JSON field paths for shorthand flags. instanceNamePath = ".InstanceType" vcpuPath = ".VCpuInfo.DefaultVCpus" @@ -67,14 +67,14 @@ const ( ) // sorterNode represents a sortable instance type which holds the value -// to sort by instance sort +// to sort by instance sort. type sorterNode struct { instanceType *instancetypes.Details fieldValue reflect.Value } // sorter is used to sort instance types based on a sorting field -// and direction +// and direction. type sorter struct { sorters []*sorterNode sortField string @@ -170,7 +170,7 @@ func formatSortField(sortField string) string { } // newSorterNode creates a new sorterNode object which represents the given instance type -// and can be used in sorting of instance types based on the given sortField +// and can be used in sorting of instance types based on the given sortField. func newSorterNode(instanceType *instancetypes.Details, sortField string) (*sorterNode, error) { // some important fields (such as gpu count) can not be accessed directly in the instancetypes.Details // struct, so we have special hard-coded flags to handle such cases @@ -223,7 +223,7 @@ func newSorterNode(instanceType *instancetypes.Details, sortField string) (*sort } // sort the instance types in the Sorter based on the Sorter's sort field and -// direction +// direction. func (s *sorter) sort() error { if len(s.sorters) <= 1 { return nil @@ -247,7 +247,7 @@ func (s *sorter) sort() error { } // isLess determines whether the first value (valI) is less than the -// second value (valJ) or not +// second value (valJ) or not. func isLess(valI, valJ reflect.Value, isDescending bool) (bool, error) { switch valI.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: @@ -334,7 +334,7 @@ func isLess(valI, valJ reflect.Value, isDescending bool) (bool, error) { } } -// instanceTypes returns the list of instance types held in the Sorter +// instanceTypes returns the list of instance types held in the Sorter. func (s *sorter) instanceTypes() []*instancetypes.Details { instanceTypes := []*instancetypes.Details{} @@ -347,7 +347,7 @@ func (s *sorter) instanceTypes() []*instancetypes.Details { // helper functions for special sorting fields -// getTotalGpusCount calculates the number of gpus in the given instance type +// getTotalGpusCount calculates the number of gpus in the given instance type. func getTotalGpusCount(instanceType *instancetypes.Details) *int32 { gpusInfo := instanceType.GpuInfo @@ -364,7 +364,7 @@ func getTotalGpusCount(instanceType *instancetypes.Details) *int32 { } // getTotalAcceleratorsCount calculates the total number of inference accelerators -// in the given instance type +// in the given instance type. func getTotalAcceleratorsCount(instanceType *instancetypes.Details) *int32 { acceleratorInfo := instanceType.InferenceAcceleratorInfo diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go index 781ac0ae2..623890e8d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go @@ -172,6 +172,17 @@ func (p *CredentialsCache) getCreds() (Credentials, bool) { return *c, true } +// ProviderSources returns a list of where the underlying credential provider +// has been sourced, if available. Returns empty if the provider doesn't implement +// the interface +func (p *CredentialsCache) ProviderSources() []CredentialSource { + asSource, ok := p.provider.(CredentialProviderSource) + if !ok { + return []CredentialSource{} + } + return asSource.ProviderSources() +} + // Invalidate will invalidate the cached credentials. The next call to Retrieve // will cause the provider's Retrieve method to be called. func (p *CredentialsCache) Invalidate() { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go index 98ba77056..4ad2ee440 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go @@ -70,6 +70,56 @@ func (AnonymousCredentials) Retrieve(context.Context) (Credentials, error) { fmt.Errorf("the AnonymousCredentials is not a valid credential provider, and cannot be used to sign AWS requests with") } +// CredentialSource is the source of the credential provider. +// A provider can have multiple credential sources: For example, a provider that reads a profile, calls ECS to +// get credentials and then assumes a role using STS will have all these as part of its provider chain. +type CredentialSource int + +const ( + // CredentialSourceUndefined is the sentinel zero value + CredentialSourceUndefined CredentialSource = iota + // CredentialSourceCode credentials resolved from code, cli parameters, session object, or client instance + CredentialSourceCode + // CredentialSourceEnvVars credentials resolved from environment variables + CredentialSourceEnvVars + // CredentialSourceEnvVarsSTSWebIDToken credentials resolved from environment variables for assuming a role with STS using a web identity token + CredentialSourceEnvVarsSTSWebIDToken + // CredentialSourceSTSAssumeRole credentials resolved from STS using AssumeRole + CredentialSourceSTSAssumeRole + // CredentialSourceSTSAssumeRoleSaml credentials resolved from STS using assume role with SAML + CredentialSourceSTSAssumeRoleSaml + // CredentialSourceSTSAssumeRoleWebID credentials resolved from STS using assume role with web identity + CredentialSourceSTSAssumeRoleWebID + // CredentialSourceSTSFederationToken credentials resolved from STS using a federation token + CredentialSourceSTSFederationToken + // CredentialSourceSTSSessionToken credentials resolved from STS using a session token S + CredentialSourceSTSSessionToken + // CredentialSourceProfile credentials resolved from a config file(s) profile with static credentials + CredentialSourceProfile + // CredentialSourceProfileSourceProfile credentials resolved from a source profile in a config file(s) profile + CredentialSourceProfileSourceProfile + // CredentialSourceProfileNamedProvider credentials resolved from a named provider in a config file(s) profile (like EcsContainer) + CredentialSourceProfileNamedProvider + // CredentialSourceProfileSTSWebIDToken credentials resolved from configuration for assuming a role with STS using web identity token in a config file(s) profile + CredentialSourceProfileSTSWebIDToken + // CredentialSourceProfileSSO credentials resolved from an SSO session in a config file(s) profile + CredentialSourceProfileSSO + // CredentialSourceSSO credentials resolved from an SSO session + CredentialSourceSSO + // CredentialSourceProfileSSOLegacy credentials resolved from an SSO session in a config file(s) profile using legacy format + CredentialSourceProfileSSOLegacy + // CredentialSourceSSOLegacy credentials resolved from an SSO session using legacy format + CredentialSourceSSOLegacy + // CredentialSourceProfileProcess credentials resolved from a process in a config file(s) profile + CredentialSourceProfileProcess + // CredentialSourceProcess credentials resolved from a process + CredentialSourceProcess + // CredentialSourceHTTP credentials resolved from an HTTP endpoint + CredentialSourceHTTP + // CredentialSourceIMDS credentials resolved from the instance metadata service (IMDS) + CredentialSourceIMDS +) + // A Credentials is the AWS credentials value for individual credential fields. type Credentials struct { // AWS Access key ID @@ -125,6 +175,13 @@ type CredentialsProvider interface { Retrieve(ctx context.Context) (Credentials, error) } +// CredentialProviderSource allows any credential provider to track +// all providers where a credential provider were sourced. For example, if the credentials came from a +// call to a role specified in the profile, this method will give the whole breadcrumb trail +type CredentialProviderSource interface { + ProviderSources() []CredentialSource +} + // CredentialsProviderFunc provides a helper wrapping a function value to // satisfy the CredentialsProvider interface. type CredentialsProviderFunc func(context.Context) (Credentials, error) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go index 8d6c8ef96..8e930fc6f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go @@ -3,4 +3,4 @@ package aws // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.36.2" +const goModuleVersion = "1.36.3" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go index 95b6268f4..6ee3391be 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go @@ -109,8 +109,57 @@ const ( UserAgentFeatureRequestChecksumWhenRequired = "a" UserAgentFeatureResponseChecksumWhenSupported = "b" UserAgentFeatureResponseChecksumWhenRequired = "c" + + UserAgentFeatureDynamoDBUserAgent = "d" // not yet implemented + + UserAgentFeatureCredentialsCode = "e" + UserAgentFeatureCredentialsJvmSystemProperties = "f" // n/a (this is not a JVM sdk) + UserAgentFeatureCredentialsEnvVars = "g" + UserAgentFeatureCredentialsEnvVarsStsWebIDToken = "h" + UserAgentFeatureCredentialsStsAssumeRole = "i" + UserAgentFeatureCredentialsStsAssumeRoleSaml = "j" // not yet implemented + UserAgentFeatureCredentialsStsAssumeRoleWebID = "k" + UserAgentFeatureCredentialsStsFederationToken = "l" // not yet implemented + UserAgentFeatureCredentialsStsSessionToken = "m" // not yet implemented + UserAgentFeatureCredentialsProfile = "n" + UserAgentFeatureCredentialsProfileSourceProfile = "o" + UserAgentFeatureCredentialsProfileNamedProvider = "p" + UserAgentFeatureCredentialsProfileStsWebIDToken = "q" + UserAgentFeatureCredentialsProfileSso = "r" + UserAgentFeatureCredentialsSso = "s" + UserAgentFeatureCredentialsProfileSsoLegacy = "t" + UserAgentFeatureCredentialsSsoLegacy = "u" + UserAgentFeatureCredentialsProfileProcess = "v" + UserAgentFeatureCredentialsProcess = "w" + UserAgentFeatureCredentialsBoto2ConfigFile = "x" // n/a (this is not boto/Python) + UserAgentFeatureCredentialsAwsSdkStore = "y" // n/a (this is used by .NET based sdk) + UserAgentFeatureCredentialsHTTP = "z" + UserAgentFeatureCredentialsIMDS = "0" ) +var credentialSourceToFeature = map[aws.CredentialSource]UserAgentFeature{ + aws.CredentialSourceCode: UserAgentFeatureCredentialsCode, + aws.CredentialSourceEnvVars: UserAgentFeatureCredentialsEnvVars, + aws.CredentialSourceEnvVarsSTSWebIDToken: UserAgentFeatureCredentialsEnvVarsStsWebIDToken, + aws.CredentialSourceSTSAssumeRole: UserAgentFeatureCredentialsStsAssumeRole, + aws.CredentialSourceSTSAssumeRoleSaml: UserAgentFeatureCredentialsStsAssumeRoleSaml, + aws.CredentialSourceSTSAssumeRoleWebID: UserAgentFeatureCredentialsStsAssumeRoleWebID, + aws.CredentialSourceSTSFederationToken: UserAgentFeatureCredentialsStsFederationToken, + aws.CredentialSourceSTSSessionToken: UserAgentFeatureCredentialsStsSessionToken, + aws.CredentialSourceProfile: UserAgentFeatureCredentialsProfile, + aws.CredentialSourceProfileSourceProfile: UserAgentFeatureCredentialsProfileSourceProfile, + aws.CredentialSourceProfileNamedProvider: UserAgentFeatureCredentialsProfileNamedProvider, + aws.CredentialSourceProfileSTSWebIDToken: UserAgentFeatureCredentialsProfileStsWebIDToken, + aws.CredentialSourceProfileSSO: UserAgentFeatureCredentialsProfileSso, + aws.CredentialSourceSSO: UserAgentFeatureCredentialsSso, + aws.CredentialSourceProfileSSOLegacy: UserAgentFeatureCredentialsProfileSsoLegacy, + aws.CredentialSourceSSOLegacy: UserAgentFeatureCredentialsSsoLegacy, + aws.CredentialSourceProfileProcess: UserAgentFeatureCredentialsProfileProcess, + aws.CredentialSourceProcess: UserAgentFeatureCredentialsProcess, + aws.CredentialSourceHTTP: UserAgentFeatureCredentialsHTTP, + aws.CredentialSourceIMDS: UserAgentFeatureCredentialsIMDS, +} + // RequestUserAgent is a build middleware that set the User-Agent for the request. type RequestUserAgent struct { sdkAgent, userAgent *smithyhttp.UserAgentBuilder @@ -263,6 +312,14 @@ func (u *RequestUserAgent) AddSDKAgentKeyValue(keyType SDKAgentKeyType, key, val u.userAgent.AddKeyValue(keyType.string(), strings.Map(rules, key)+"#"+strings.Map(rules, value)) } +// AddCredentialsSource adds the credential source as a feature on the User-Agent string +func (u *RequestUserAgent) AddCredentialsSource(source aws.CredentialSource) { + x, ok := credentialSourceToFeature[source] + if ok { + u.AddUserAgentFeature(x) + } +} + // ID the name of the middleware. func (u *RequestUserAgent) ID() string { return "UserAgent" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md index ac6843f1a..97f59c071 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.29.8 (2025-02-27) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.29.7 (2025-02-18) * **Bug Fix**: Bump go version to 1.22 diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go index f04c3bdf6..da7e89e28 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go @@ -3,4 +3,4 @@ package config // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.29.7" +const goModuleVersion = "1.29.8" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go index 7ae252e2e..b00259df0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go @@ -112,13 +112,15 @@ func resolveCredentialChain(ctx context.Context, cfg *aws.Config, configs config switch { case sharedProfileSet: - err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig, other) + ctx, err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig, other) case envConfig.Credentials.HasKeys(): - cfg.Credentials = credentials.StaticCredentialsProvider{Value: envConfig.Credentials} + ctx = addCredentialSource(ctx, aws.CredentialSourceEnvVars) + cfg.Credentials = credentials.StaticCredentialsProvider{Value: envConfig.Credentials, Source: getCredentialSources(ctx)} case len(envConfig.WebIdentityTokenFilePath) > 0: + ctx = addCredentialSource(ctx, aws.CredentialSourceEnvVarsSTSWebIDToken) err = assumeWebIdentity(ctx, cfg, envConfig.WebIdentityTokenFilePath, envConfig.RoleARN, envConfig.RoleSessionName, configs) default: - err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig, other) + ctx, err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig, other) } if err != nil { return err @@ -133,53 +135,71 @@ func resolveCredentialChain(ctx context.Context, cfg *aws.Config, configs config return nil } -func resolveCredsFromProfile(ctx context.Context, cfg *aws.Config, envConfig *EnvConfig, sharedConfig *SharedConfig, configs configs) (err error) { - +func resolveCredsFromProfile(ctx context.Context, cfg *aws.Config, envConfig *EnvConfig, sharedConfig *SharedConfig, configs configs) (ctx2 context.Context, err error) { switch { case sharedConfig.Source != nil: + ctx = addCredentialSource(ctx, aws.CredentialSourceProfileSourceProfile) // Assume IAM role with credentials source from a different profile. - err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig.Source, configs) + ctx, err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig.Source, configs) case sharedConfig.Credentials.HasKeys(): // Static Credentials from Shared Config/Credentials file. + ctx = addCredentialSource(ctx, aws.CredentialSourceProfile) cfg.Credentials = credentials.StaticCredentialsProvider{ - Value: sharedConfig.Credentials, + Value: sharedConfig.Credentials, + Source: getCredentialSources(ctx), } case len(sharedConfig.CredentialSource) != 0: - err = resolveCredsFromSource(ctx, cfg, envConfig, sharedConfig, configs) + ctx = addCredentialSource(ctx, aws.CredentialSourceProfileNamedProvider) + ctx, err = resolveCredsFromSource(ctx, cfg, envConfig, sharedConfig, configs) case len(sharedConfig.WebIdentityTokenFile) != 0: // Credentials from Assume Web Identity token require an IAM Role, and // that roll will be assumed. May be wrapped with another assume role // via SourceProfile. - return assumeWebIdentity(ctx, cfg, sharedConfig.WebIdentityTokenFile, sharedConfig.RoleARN, sharedConfig.RoleSessionName, configs) + ctx = addCredentialSource(ctx, aws.CredentialSourceProfileSTSWebIDToken) + return ctx, assumeWebIdentity(ctx, cfg, sharedConfig.WebIdentityTokenFile, sharedConfig.RoleARN, sharedConfig.RoleSessionName, configs) case sharedConfig.hasSSOConfiguration(): + if sharedConfig.hasLegacySSOConfiguration() { + ctx = addCredentialSource(ctx, aws.CredentialSourceProfileSSOLegacy) + ctx = addCredentialSource(ctx, aws.CredentialSourceSSOLegacy) + } else { + ctx = addCredentialSource(ctx, aws.CredentialSourceSSO) + } + if sharedConfig.SSOSession != nil { + ctx = addCredentialSource(ctx, aws.CredentialSourceProfileSSO) + } err = resolveSSOCredentials(ctx, cfg, sharedConfig, configs) case len(sharedConfig.CredentialProcess) != 0: // Get credentials from CredentialProcess + ctx = addCredentialSource(ctx, aws.CredentialSourceProfileProcess) + ctx = addCredentialSource(ctx, aws.CredentialSourceProcess) err = processCredentials(ctx, cfg, sharedConfig, configs) case len(envConfig.ContainerCredentialsRelativePath) != 0: + ctx = addCredentialSource(ctx, aws.CredentialSourceHTTP) err = resolveHTTPCredProvider(ctx, cfg, ecsContainerURI(envConfig.ContainerCredentialsRelativePath), envConfig.ContainerAuthorizationToken, configs) case len(envConfig.ContainerCredentialsEndpoint) != 0: + ctx = addCredentialSource(ctx, aws.CredentialSourceHTTP) err = resolveLocalHTTPCredProvider(ctx, cfg, envConfig.ContainerCredentialsEndpoint, envConfig.ContainerAuthorizationToken, configs) default: + ctx = addCredentialSource(ctx, aws.CredentialSourceIMDS) err = resolveEC2RoleCredentials(ctx, cfg, configs) } if err != nil { - return err + return ctx, err } if len(sharedConfig.RoleARN) > 0 { - return credsFromAssumeRole(ctx, cfg, sharedConfig, configs) + return ctx, credsFromAssumeRole(ctx, cfg, sharedConfig, configs) } - return nil + return ctx, nil } func resolveSSOCredentials(ctx context.Context, cfg *aws.Config, sharedConfig *SharedConfig, configs configs) error { @@ -198,6 +218,10 @@ func resolveSSOCredentials(ctx context.Context, cfg *aws.Config, sharedConfig *S cfgCopy := cfg.Copy() + options = append(options, func(o *ssocreds.Options) { + o.CredentialSources = getCredentialSources(ctx) + }) + if sharedConfig.SSOSession != nil { ssoTokenProviderOptionsFn, found, err := getSSOTokenProviderOptions(ctx, configs) if err != nil { @@ -242,6 +266,10 @@ func processCredentials(ctx context.Context, cfg *aws.Config, sharedConfig *Shar opts = append(opts, options) } + opts = append(opts, func(o *processcreds.Options) { + o.CredentialSources = getCredentialSources(ctx) + }) + cfg.Credentials = processcreds.NewProvider(sharedConfig.CredentialProcess, opts...) return nil @@ -323,6 +351,7 @@ func resolveHTTPCredProvider(ctx context.Context, cfg *aws.Config, url, authToke if cfg.Retryer != nil { options.Retryer = cfg.Retryer() } + options.CredentialSources = getCredentialSources(ctx) }, } @@ -346,28 +375,31 @@ func resolveHTTPCredProvider(ctx context.Context, cfg *aws.Config, url, authToke return nil } -func resolveCredsFromSource(ctx context.Context, cfg *aws.Config, envConfig *EnvConfig, sharedCfg *SharedConfig, configs configs) (err error) { +func resolveCredsFromSource(ctx context.Context, cfg *aws.Config, envConfig *EnvConfig, sharedCfg *SharedConfig, configs configs) (context.Context, error) { switch sharedCfg.CredentialSource { case credSourceEc2Metadata: - return resolveEC2RoleCredentials(ctx, cfg, configs) + ctx = addCredentialSource(ctx, aws.CredentialSourceIMDS) + return ctx, resolveEC2RoleCredentials(ctx, cfg, configs) case credSourceEnvironment: - cfg.Credentials = credentials.StaticCredentialsProvider{Value: envConfig.Credentials} + ctx = addCredentialSource(ctx, aws.CredentialSourceHTTP) + cfg.Credentials = credentials.StaticCredentialsProvider{Value: envConfig.Credentials, Source: getCredentialSources(ctx)} case credSourceECSContainer: + ctx = addCredentialSource(ctx, aws.CredentialSourceHTTP) if len(envConfig.ContainerCredentialsRelativePath) != 0 { - return resolveHTTPCredProvider(ctx, cfg, ecsContainerURI(envConfig.ContainerCredentialsRelativePath), envConfig.ContainerAuthorizationToken, configs) + return ctx, resolveHTTPCredProvider(ctx, cfg, ecsContainerURI(envConfig.ContainerCredentialsRelativePath), envConfig.ContainerAuthorizationToken, configs) } if len(envConfig.ContainerCredentialsEndpoint) != 0 { - return resolveLocalHTTPCredProvider(ctx, cfg, envConfig.ContainerCredentialsEndpoint, envConfig.ContainerAuthorizationToken, configs) + return ctx, resolveLocalHTTPCredProvider(ctx, cfg, envConfig.ContainerCredentialsEndpoint, envConfig.ContainerAuthorizationToken, configs) } - return fmt.Errorf("EcsContainer was specified as the credential_source, but neither 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' or AWS_CONTAINER_CREDENTIALS_FULL_URI' was set") + return ctx, fmt.Errorf("EcsContainer was specified as the credential_source, but neither 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' or AWS_CONTAINER_CREDENTIALS_FULL_URI' was set") default: - return fmt.Errorf("credential_source values must be EcsContainer, Ec2InstanceMetadata, or Environment") + return ctx, fmt.Errorf("credential_source values must be EcsContainer, Ec2InstanceMetadata, or Environment") } - return nil + return ctx, nil } func resolveEC2RoleCredentials(ctx context.Context, cfg *aws.Config, configs configs) error { @@ -386,6 +418,7 @@ func resolveEC2RoleCredentials(ctx context.Context, cfg *aws.Config, configs con if o.Client == nil { o.Client = imds.NewFromConfig(*cfg) } + o.CredentialSources = getCredentialSources(ctx) }) provider := ec2rolecreds.New(optFns...) @@ -394,7 +427,6 @@ func resolveEC2RoleCredentials(ctx context.Context, cfg *aws.Config, configs con if err != nil { return err } - return nil } @@ -473,6 +505,10 @@ func assumeWebIdentity(ctx context.Context, cfg *aws.Config, filepath string, ro RoleARN: roleARN, } + optFns = append(optFns, func(options *stscreds.WebIdentityRoleOptions) { + options.CredentialSources = getCredentialSources(ctx) + }) + for _, fn := range optFns { fn(&opts) } @@ -494,6 +530,8 @@ func assumeWebIdentity(ctx context.Context, cfg *aws.Config, filepath string, ro } func credsFromAssumeRole(ctx context.Context, cfg *aws.Config, sharedCfg *SharedConfig, configs configs) (err error) { + // resolve credentials early + credentialSources := getCredentialSources(ctx) optFns := []func(*stscreds.AssumeRoleOptions){ func(options *stscreds.AssumeRoleOptions) { options.RoleSessionName = sharedCfg.RoleSessionName @@ -511,6 +549,9 @@ func credsFromAssumeRole(ctx context.Context, cfg *aws.Config, sharedCfg *Shared if len(sharedCfg.MFASerial) != 0 { options.SerialNumber = aws.String(sharedCfg.MFASerial) } + + // add existing credential chain + options.CredentialSources = credentialSources }, } @@ -533,7 +574,6 @@ func credsFromAssumeRole(ctx context.Context, cfg *aws.Config, sharedCfg *Shared return AssumeRoleTokenProviderNotSetError{} } } - cfg.Credentials = stscreds.NewAssumeRoleProvider(sts.NewFromConfig(*cfg), sharedCfg.RoleARN, optFns...) return nil @@ -567,3 +607,21 @@ func wrapWithCredentialsCache( return aws.NewCredentialsCache(provider, optFns...), nil } + +// credentialSource stores the chain of providers that was used to create an instance of +// a credentials provider on the context +type credentialSource struct{} + +func addCredentialSource(ctx context.Context, source aws.CredentialSource) context.Context { + existing, ok := ctx.Value(credentialSource{}).([]aws.CredentialSource) + if !ok { + existing = []aws.CredentialSource{source} + } else { + existing = append(existing, source) + } + return context.WithValue(ctx, credentialSource{}, existing) +} + +func getCredentialSources(ctx context.Context) []aws.CredentialSource { + return ctx.Value(credentialSource{}).([]aws.CredentialSource) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md index 899fb31d2..8357cce2d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.17.61 (2025-02-27) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.17.60 (2025-02-18) * **Bug Fix**: Bump go version to 1.22 diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go index 5c699f166..a95e6c8bd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go @@ -47,6 +47,10 @@ type Options struct { // // If nil, the provider will default to the EC2 IMDS client. Client GetMetadataAPIClient + + // The chain of providers that was used to create this provider + // These values are for reporting purposes and are not meant to be set up directly + CredentialSources []aws.CredentialSource } // New returns an initialized Provider value configured to retrieve @@ -227,3 +231,11 @@ func requestCred(ctx context.Context, client GetMetadataAPIClient, credsName str return respCreds, nil } + +// ProviderSources returns the credential chain that was used to construct this provider +func (p *Provider) ProviderSources() []aws.CredentialSource { + if p.options.CredentialSources == nil { + return []aws.CredentialSource{aws.CredentialSourceIMDS} + } // If no source has been set, assume this is used directly which means just call to assume role + return p.options.CredentialSources +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go index 2386153a9..c8ac6d9ff 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go @@ -98,6 +98,10 @@ type Options struct { // // Will override AuthorizationToken if configured AuthorizationTokenProvider AuthTokenProvider + + // The chain of providers that was used to create this provider + // These values are for reporting purposes and are not meant to be set up directly + CredentialSources []aws.CredentialSource } // AuthTokenProvider defines an interface to dynamically load a value to be passed @@ -191,3 +195,13 @@ func (p *Provider) resolveAuthToken() (string, error) { return authToken, nil } + +var _ aws.CredentialProviderSource = (*Provider)(nil) + +// ProviderSources returns the credential chain that was used to construct this provider +func (p *Provider) ProviderSources() []aws.CredentialSource { + if p.options.CredentialSources == nil { + return []aws.CredentialSource{aws.CredentialSourceHTTP} + } + return p.options.CredentialSources +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go index 9467985e3..393107898 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go @@ -3,4 +3,4 @@ package credentials // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.17.60" +const goModuleVersion = "1.17.61" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go index 911fcc327..dfc6b2548 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go @@ -57,6 +57,9 @@ type Provider struct { type Options struct { // Timeout limits the time a process can run. Timeout time.Duration + // The chain of providers that was used to create this provider + // These values are for reporting purposes and are not meant to be set up directly + CredentialSources []aws.CredentialSource } // NewCommandBuilder provides the interface for specifying how command will be @@ -274,6 +277,14 @@ func (p *Provider) executeCredentialProcess(ctx context.Context) ([]byte, error) return out, nil } +// ProviderSources returns the credential chain that was used to construct this provider +func (p *Provider) ProviderSources() []aws.CredentialSource { + if p.options.CredentialSources == nil { + return []aws.CredentialSource{aws.CredentialSourceProcess} + } + return p.options.CredentialSources +} + func executeCommand(cmd *exec.Cmd, exec chan error) { // Start the command err := cmd.Start() diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go index 8c230be8e..3ed9cbb3e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go @@ -49,6 +49,10 @@ type Options struct { // Used by the SSOCredentialProvider if a token configuration // profile is used in the shared config SSOTokenProvider *SSOTokenProvider + + // The chain of providers that was used to create this provider. + // These values are for reporting purposes and are not meant to be set up directly + CredentialSources []aws.CredentialSource } // Provider is an AWS credential provider that retrieves temporary AWS @@ -133,6 +137,14 @@ func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) { }, nil } +// ProviderSources returns the credential chain that was used to construct this provider +func (p *Provider) ProviderSources() []aws.CredentialSource { + if p.options.CredentialSources == nil { + return []aws.CredentialSource{aws.CredentialSourceSSO} + } + return p.options.CredentialSources +} + // InvalidTokenError is the error type that is returned if loaded token has // expired or is otherwise invalid. To refresh the SSO session run AWS SSO // login with the corresponding profile. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go index d525cac09..a469abdb7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go @@ -22,6 +22,16 @@ func (*StaticCredentialsEmptyError) Error() string { // never expire. type StaticCredentialsProvider struct { Value aws.Credentials + // These values are for reporting purposes and are not meant to be set up directly + Source []aws.CredentialSource +} + +// ProviderSources returns the credential chain that was used to construct this provider +func (s StaticCredentialsProvider) ProviderSources() []aws.CredentialSource { + if s.Source == nil { + return []aws.CredentialSource{aws.CredentialSourceCode} // If no source has been set, assume this is used directly which means hardcoded creds + } + return s.Source } // NewStaticCredentialsProvider return a StaticCredentialsProvider initialized with the AWS diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go index 4c7f7993f..1ccf71e77 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go @@ -247,6 +247,10 @@ type AssumeRoleOptions struct { // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) // in the IAM User Guide. This parameter is optional. TransitiveTagKeys []string + + // The chain of providers that was used to create this provider + // These values are for reporting purposes and are not meant to be set up directly + CredentialSources []aws.CredentialSource } // NewAssumeRoleProvider constructs and returns a credentials provider that @@ -324,3 +328,11 @@ func (p *AssumeRoleProvider) Retrieve(ctx context.Context) (aws.Credentials, err AccountID: accountID, }, nil } + +// ProviderSources returns the credential chain that was used to construct this provider +func (p *AssumeRoleProvider) ProviderSources() []aws.CredentialSource { + if p.options.CredentialSources == nil { + return []aws.CredentialSource{aws.CredentialSourceSTSAssumeRole} + } // If no source has been set, assume this is used directly which means just call to assume role + return append(p.options.CredentialSources, aws.CredentialSourceSTSAssumeRole) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go index b4b719708..5f4286dda 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go @@ -64,6 +64,10 @@ type WebIdentityRoleOptions struct { // want to use as managed session policies. The policies must exist in the // same account as the role. PolicyARNs []types.PolicyDescriptorType + + // The chain of providers that was used to create this provider + // These values are for reporting purposes and are not meant to be set up directly + CredentialSources []aws.CredentialSource } // IdentityTokenRetriever is an interface for retrieving a JWT @@ -167,3 +171,11 @@ func getAccountID(u *types.AssumedRoleUser) string { } return parts[4] } + +// ProviderSources returns the credential chain that was used to construct this provider +func (p *WebIdentityRoleProvider) ProviderSources() []aws.CredentialSource { + if p.options.CredentialSources == nil { + return []aws.CredentialSource{aws.CredentialSourceSTSAssumeRoleWebID} + } + return p.options.CredentialSources +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md index 47e50bde7..1f69e820e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.16.30 (2025-02-27) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.16.29 (2025-02-18) * **Bug Fix**: Bump go version to 1.22 diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go index 38d1aa949..dba9ef600 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go @@ -3,4 +3,4 @@ package imds // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.16.29" +const goModuleVersion = "1.16.30" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md index 937ddbf49..eae3e16af 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.3.34 (2025-02-27) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.3.33 (2025-02-18) * **Bug Fix**: Bump go version to 1.22 diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go index 1aeb06029..eddabe634 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go @@ -3,4 +3,4 @@ package configsources // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.3.33" +const goModuleVersion = "1.3.34" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md index 2bf083e14..83e5bd28a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md @@ -1,3 +1,7 @@ +# v2.6.34 (2025-02-27) + +* **Dependency Update**: Updated to the latest SDK module versions + # v2.6.33 (2025-02-18) * **Bug Fix**: Bump go version to 1.22 diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go index f0509e332..735dba7ac 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go @@ -3,4 +3,4 @@ package endpoints // goModuleVersion is the tagged release for this module -const goModuleVersion = "2.6.33" +const goModuleVersion = "2.6.34" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md index cb83e7a6b..2b5ceb4b5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.12.15 (2025-02-27) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.12.14 (2025-02-18) * **Bug Fix**: Bump go version to 1.22 diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go index f419fe0a6..a165a100f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go @@ -3,4 +3,4 @@ package presignedurl // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.12.14" +const goModuleVersion = "1.12.15" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md index 2b5b45cd1..b3d70bd79 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md @@ -1,3 +1,8 @@ +# v1.25.0 (2025-02-27) + +* **Feature**: Track credential providers via User-Agent Feature ids +* **Dependency Update**: Updated to the latest SDK module versions + # v1.24.16 (2025-02-18) * **Bug Fix**: Bump go version to 1.22 diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go index 0b244f142..9f10e65ad 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go @@ -761,6 +761,37 @@ func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { return nil } +type setCredentialSourceMiddleware struct { + ua *awsmiddleware.RequestUserAgent + options Options +} + +func (m setCredentialSourceMiddleware) ID() string { return "SetCredentialSourceMiddleware" } + +func (m setCredentialSourceMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + asProviderSource, ok := m.options.Credentials.(aws.CredentialProviderSource) + if !ok { + return next.HandleBuild(ctx, in) + } + providerSources := asProviderSource.ProviderSources() + for _, source := range providerSources { + m.ua.AddCredentialsSource(source) + } + return next.HandleBuild(ctx, in) +} + +func addCredentialSource(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + mw := setCredentialSourceMiddleware{ua: ua, options: options} + return stack.Build.Insert(&mw, "UserAgent", middleware.Before) +} + func resolveTracerProvider(options *Options) { if options.TracerProvider == nil { options.TracerProvider = &tracing.NopTracerProvider{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go index a65602023..b8031eeea 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go @@ -123,6 +123,9 @@ func (c *Client) addOperationGetRoleCredentialsMiddlewares(stack *middleware.Sta if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetRoleCredentialsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go index 315526ef1..4294e4d3c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go @@ -128,6 +128,9 @@ func (c *Client) addOperationListAccountRolesMiddlewares(stack *middleware.Stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListAccountRolesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go index d867b78a6..1db72a995 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go @@ -127,6 +127,9 @@ func (c *Client) addOperationListAccountsMiddlewares(stack *middleware.Stack, op if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListAccountsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go index 434b43085..2ca66ca50 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go @@ -122,6 +122,9 @@ func (c *Client) addOperationLogoutMiddlewares(stack *middleware.Stack, options if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpLogoutValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go index 007dfdc66..429aa0cdb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go @@ -3,4 +3,4 @@ package sso // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.24.16" +const goModuleVersion = "1.25.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md index ac92dae97..c040c3ed9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md @@ -1,3 +1,8 @@ +# v1.29.0 (2025-02-27) + +* **Feature**: Track credential providers via User-Agent Feature ids +* **Dependency Update**: Updated to the latest SDK module versions + # v1.28.15 (2025-02-18) * **Bug Fix**: Bump go version to 1.22 diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go index 9b7f4acc8..57440b1fa 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go @@ -761,6 +761,37 @@ func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { return nil } +type setCredentialSourceMiddleware struct { + ua *awsmiddleware.RequestUserAgent + options Options +} + +func (m setCredentialSourceMiddleware) ID() string { return "SetCredentialSourceMiddleware" } + +func (m setCredentialSourceMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + asProviderSource, ok := m.options.Credentials.(aws.CredentialProviderSource) + if !ok { + return next.HandleBuild(ctx, in) + } + providerSources := asProviderSource.ProviderSources() + for _, source := range providerSources { + m.ua.AddCredentialsSource(source) + } + return next.HandleBuild(ctx, in) +} + +func addCredentialSource(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + mw := setCredentialSourceMiddleware{ua: ua, options: options} + return stack.Build.Insert(&mw, "UserAgent", middleware.Before) +} + func resolveTracerProvider(options *Options) { if options.TracerProvider == nil { options.TracerProvider = &tracing.NopTracerProvider{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go index 2ab352447..493878338 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go @@ -194,6 +194,9 @@ func (c *Client) addOperationCreateTokenMiddlewares(stack *middleware.Stack, opt if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateTokenValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go index e5253ce88..2a3ce35b3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go @@ -226,6 +226,9 @@ func (c *Client) addOperationCreateTokenWithIAMMiddlewares(stack *middleware.Sta if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateTokenWithIAMValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go index 2022270db..1e2d3828f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go @@ -164,6 +164,9 @@ func (c *Client) addOperationRegisterClientMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpRegisterClientValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go index 203ca5e67..de0108f1f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go @@ -146,6 +146,9 @@ func (c *Client) addOperationStartDeviceAuthorizationMiddlewares(stack *middlewa if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpStartDeviceAuthorizationValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go index 572dc2567..5467b2f28 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go @@ -3,4 +3,4 @@ package ssooidc // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.28.15" +const goModuleVersion = "1.29.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md index 156004c49..557e03bd0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.33.16 (2025-02-27) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.33.15 (2025-02-18) * **Bug Fix**: Bump go version to 1.22 diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go index 25787325f..fca363d2f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go @@ -765,6 +765,37 @@ func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { return nil } +type setCredentialSourceMiddleware struct { + ua *awsmiddleware.RequestUserAgent + options Options +} + +func (m setCredentialSourceMiddleware) ID() string { return "SetCredentialSourceMiddleware" } + +func (m setCredentialSourceMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + asProviderSource, ok := m.options.Credentials.(aws.CredentialProviderSource) + if !ok { + return next.HandleBuild(ctx, in) + } + providerSources := asProviderSource.ProviderSources() + for _, source := range providerSources { + m.ua.AddCredentialsSource(source) + } + return next.HandleBuild(ctx, in) +} + +func addCredentialSource(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + mw := setCredentialSourceMiddleware{ua: ua, options: options} + return stack.Build.Insert(&mw, "UserAgent", middleware.Before) +} + func resolveTracerProvider(options *Options) { if options.TracerProvider == nil { options.TracerProvider = &tracing.NopTracerProvider{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go index d05632774..524e36eb6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go @@ -478,6 +478,9 @@ func (c *Client) addOperationAssumeRoleMiddlewares(stack *middleware.Stack, opti if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAssumeRoleValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go index d0e117ac9..400f809e3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go @@ -410,6 +410,9 @@ func (c *Client) addOperationAssumeRoleWithSAMLMiddlewares(stack *middleware.Sta if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAssumeRoleWithSAMLValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go index 0ae4bc173..e5708cbd1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go @@ -430,6 +430,9 @@ func (c *Client) addOperationAssumeRoleWithWebIdentityMiddlewares(stack *middlew if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAssumeRoleWithWebIdentityValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoot.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoot.go index cd976e573..a0f7a4671 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoot.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoot.go @@ -175,6 +175,9 @@ func (c *Client) addOperationAssumeRootMiddlewares(stack *middleware.Stack, opti if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAssumeRootValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go index a56840e1b..9e7cb17d3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go @@ -147,6 +147,9 @@ func (c *Client) addOperationDecodeAuthorizationMessageMiddlewares(stack *middle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDecodeAuthorizationMessageValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go index c80b0550b..28c05f13b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go @@ -138,6 +138,9 @@ func (c *Client) addOperationGetAccessKeyInfoMiddlewares(stack *middleware.Stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetAccessKeyInfoValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go index 49304bdaf..de137b7dc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go @@ -129,6 +129,9 @@ func (c *Client) addOperationGetCallerIdentityMiddlewares(stack *middleware.Stac if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetCallerIdentity(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go index e2ecc792a..67c041b30 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go @@ -351,6 +351,9 @@ func (c *Client) addOperationGetFederationTokenMiddlewares(stack *middleware.Sta if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetFederationTokenValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go index fdc451117..903d151ce 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go @@ -200,6 +200,9 @@ func (c *Client) addOperationGetSessionTokenMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetSessionToken(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go index 12500e7a1..32af427ca 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go @@ -3,4 +3,4 @@ package sts // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.33.15" +const goModuleVersion = "1.33.16" diff --git a/vendor/github.com/aws/aws-sdk-go/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/aws/aws-sdk-go/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt deleted file mode 100644 index 899129ecc..000000000 --- a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt +++ /dev/null @@ -1,3 +0,0 @@ -AWS SDK for Go -Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. -Copyright 2014-2015 Stripe, Inc. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go deleted file mode 100644 index 99849c0e1..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go +++ /dev/null @@ -1,164 +0,0 @@ -// Package awserr represents API error interface accessors for the SDK. -package awserr - -// An Error wraps lower level errors with code, message and an original error. -// The underlying concrete error type may also satisfy other interfaces which -// can be to used to obtain more specific information about the error. -// -// Calling Error() or String() will always include the full information about -// an error based on its underlying type. -// -// Example: -// -// output, err := s3manage.Upload(svc, input, opts) -// if err != nil { -// if awsErr, ok := err.(awserr.Error); ok { -// // Get error details -// log.Println("Error:", awsErr.Code(), awsErr.Message()) -// -// // Prints out full error message, including original error if there was one. -// log.Println("Error:", awsErr.Error()) -// -// // Get original error -// if origErr := awsErr.OrigErr(); origErr != nil { -// // operate on original error. -// } -// } else { -// fmt.Println(err.Error()) -// } -// } -// -type Error interface { - // Satisfy the generic error interface. - error - - // Returns the short phrase depicting the classification of the error. - Code() string - - // Returns the error details message. - Message() string - - // Returns the original error if one was set. Nil is returned if not set. - OrigErr() error -} - -// BatchError is a batch of errors which also wraps lower level errors with -// code, message, and original errors. Calling Error() will include all errors -// that occurred in the batch. -// -// Deprecated: Replaced with BatchedErrors. Only defined for backwards -// compatibility. -type BatchError interface { - // Satisfy the generic error interface. - error - - // Returns the short phrase depicting the classification of the error. - Code() string - - // Returns the error details message. - Message() string - - // Returns the original error if one was set. Nil is returned if not set. - OrigErrs() []error -} - -// BatchedErrors is a batch of errors which also wraps lower level errors with -// code, message, and original errors. Calling Error() will include all errors -// that occurred in the batch. -// -// Replaces BatchError -type BatchedErrors interface { - // Satisfy the base Error interface. - Error - - // Returns the original error if one was set. Nil is returned if not set. - OrigErrs() []error -} - -// New returns an Error object described by the code, message, and origErr. -// -// If origErr satisfies the Error interface it will not be wrapped within a new -// Error object and will instead be returned. -func New(code, message string, origErr error) Error { - var errs []error - if origErr != nil { - errs = append(errs, origErr) - } - return newBaseError(code, message, errs) -} - -// NewBatchError returns an BatchedErrors with a collection of errors as an -// array of errors. -func NewBatchError(code, message string, errs []error) BatchedErrors { - return newBaseError(code, message, errs) -} - -// A RequestFailure is an interface to extract request failure information from -// an Error such as the request ID of the failed request returned by a service. -// RequestFailures may not always have a requestID value if the request failed -// prior to reaching the service such as a connection error. -// -// Example: -// -// output, err := s3manage.Upload(svc, input, opts) -// if err != nil { -// if reqerr, ok := err.(RequestFailure); ok { -// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID()) -// } else { -// log.Println("Error:", err.Error()) -// } -// } -// -// Combined with awserr.Error: -// -// output, err := s3manage.Upload(svc, input, opts) -// if err != nil { -// if awsErr, ok := err.(awserr.Error); ok { -// // Generic AWS Error with Code, Message, and original error (if any) -// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) -// -// if reqErr, ok := err.(awserr.RequestFailure); ok { -// // A service error occurred -// fmt.Println(reqErr.StatusCode(), reqErr.RequestID()) -// } -// } else { -// fmt.Println(err.Error()) -// } -// } -// -type RequestFailure interface { - Error - - // The status code of the HTTP response. - StatusCode() int - - // The request ID returned by the service for a request failure. This will - // be empty if no request ID is available such as the request failed due - // to a connection error. - RequestID() string -} - -// NewRequestFailure returns a wrapped error with additional information for -// request status code, and service requestID. -// -// Should be used to wrap all request which involve service requests. Even if -// the request failed without a service response, but had an HTTP status code -// that may be meaningful. -func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { - return newRequestError(err, statusCode, reqID) -} - -// UnmarshalError provides the interface for the SDK failing to unmarshal data. -type UnmarshalError interface { - awsError - Bytes() []byte -} - -// NewUnmarshalError returns an initialized UnmarshalError error wrapper adding -// the bytes that fail to unmarshal to the error. -func NewUnmarshalError(err error, msg string, bytes []byte) UnmarshalError { - return &unmarshalError{ - awsError: New("UnmarshalError", msg, err), - bytes: bytes, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go deleted file mode 100644 index 9cf7eaf40..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go +++ /dev/null @@ -1,221 +0,0 @@ -package awserr - -import ( - "encoding/hex" - "fmt" -) - -// SprintError returns a string of the formatted error code. -// -// Both extra and origErr are optional. If they are included their lines -// will be added, but if they are not included their lines will be ignored. -func SprintError(code, message, extra string, origErr error) string { - msg := fmt.Sprintf("%s: %s", code, message) - if extra != "" { - msg = fmt.Sprintf("%s\n\t%s", msg, extra) - } - if origErr != nil { - msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error()) - } - return msg -} - -// A baseError wraps the code and message which defines an error. It also -// can be used to wrap an original error object. -// -// Should be used as the root for errors satisfying the awserr.Error. Also -// for any error which does not fit into a specific error wrapper type. -type baseError struct { - // Classification of error - code string - - // Detailed information about error - message string - - // Optional original error this error is based off of. Allows building - // chained errors. - errs []error -} - -// newBaseError returns an error object for the code, message, and errors. -// -// code is a short no whitespace phrase depicting the classification of -// the error that is being created. -// -// message is the free flow string containing detailed information about the -// error. -// -// origErrs is the error objects which will be nested under the new errors to -// be returned. -func newBaseError(code, message string, origErrs []error) *baseError { - b := &baseError{ - code: code, - message: message, - errs: origErrs, - } - - return b -} - -// Error returns the string representation of the error. -// -// See ErrorWithExtra for formatting. -// -// Satisfies the error interface. -func (b baseError) Error() string { - size := len(b.errs) - if size > 0 { - return SprintError(b.code, b.message, "", errorList(b.errs)) - } - - return SprintError(b.code, b.message, "", nil) -} - -// String returns the string representation of the error. -// Alias for Error to satisfy the stringer interface. -func (b baseError) String() string { - return b.Error() -} - -// Code returns the short phrase depicting the classification of the error. -func (b baseError) Code() string { - return b.code -} - -// Message returns the error details message. -func (b baseError) Message() string { - return b.message -} - -// OrigErr returns the original error if one was set. Nil is returned if no -// error was set. This only returns the first element in the list. If the full -// list is needed, use BatchedErrors. -func (b baseError) OrigErr() error { - switch len(b.errs) { - case 0: - return nil - case 1: - return b.errs[0] - default: - if err, ok := b.errs[0].(Error); ok { - return NewBatchError(err.Code(), err.Message(), b.errs[1:]) - } - return NewBatchError("BatchedErrors", - "multiple errors occurred", b.errs) - } -} - -// OrigErrs returns the original errors if one was set. An empty slice is -// returned if no error was set. -func (b baseError) OrigErrs() []error { - return b.errs -} - -// So that the Error interface type can be included as an anonymous field -// in the requestError struct and not conflict with the error.Error() method. -type awsError Error - -// A requestError wraps a request or service error. -// -// Composed of baseError for code, message, and original error. -type requestError struct { - awsError - statusCode int - requestID string - bytes []byte -} - -// newRequestError returns a wrapped error with additional information for -// request status code, and service requestID. -// -// Should be used to wrap all request which involve service requests. Even if -// the request failed without a service response, but had an HTTP status code -// that may be meaningful. -// -// Also wraps original errors via the baseError. -func newRequestError(err Error, statusCode int, requestID string) *requestError { - return &requestError{ - awsError: err, - statusCode: statusCode, - requestID: requestID, - } -} - -// Error returns the string representation of the error. -// Satisfies the error interface. -func (r requestError) Error() string { - extra := fmt.Sprintf("status code: %d, request id: %s", - r.statusCode, r.requestID) - return SprintError(r.Code(), r.Message(), extra, r.OrigErr()) -} - -// String returns the string representation of the error. -// Alias for Error to satisfy the stringer interface. -func (r requestError) String() string { - return r.Error() -} - -// StatusCode returns the wrapped status code for the error -func (r requestError) StatusCode() int { - return r.statusCode -} - -// RequestID returns the wrapped requestID -func (r requestError) RequestID() string { - return r.requestID -} - -// OrigErrs returns the original errors if one was set. An empty slice is -// returned if no error was set. -func (r requestError) OrigErrs() []error { - if b, ok := r.awsError.(BatchedErrors); ok { - return b.OrigErrs() - } - return []error{r.OrigErr()} -} - -type unmarshalError struct { - awsError - bytes []byte -} - -// Error returns the string representation of the error. -// Satisfies the error interface. -func (e unmarshalError) Error() string { - extra := hex.Dump(e.bytes) - return SprintError(e.Code(), e.Message(), extra, e.OrigErr()) -} - -// String returns the string representation of the error. -// Alias for Error to satisfy the stringer interface. -func (e unmarshalError) String() string { - return e.Error() -} - -// Bytes returns the bytes that failed to unmarshal. -func (e unmarshalError) Bytes() []byte { - return e.bytes -} - -// An error list that satisfies the golang interface -type errorList []error - -// Error returns the string representation of the error. -// -// Satisfies the error interface. -func (e errorList) Error() string { - msg := "" - // How do we want to handle the array size being zero - if size := len(e); size > 0 { - for i := 0; i < size; i++ { - msg += e[i].Error() - // We check the next index to see if it is within the slice. - // If it is, then we append a newline. We do this, because unit tests - // could be broken with the additional '\n' - if i+1 < size { - msg += "\n" - } - } - } - return msg -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go deleted file mode 100644 index cad3b9a48..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go +++ /dev/null @@ -1,193 +0,0 @@ -package endpoints - -import ( - "encoding/json" - "fmt" - "io" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -type modelDefinition map[string]json.RawMessage - -// A DecodeModelOptions are the options for how the endpoints model definition -// are decoded. -type DecodeModelOptions struct { - SkipCustomizations bool -} - -// Set combines all of the option functions together. -func (d *DecodeModelOptions) Set(optFns ...func(*DecodeModelOptions)) { - for _, fn := range optFns { - fn(d) - } -} - -// DecodeModel unmarshals a Regions and Endpoint model definition file into -// a endpoint Resolver. If the file format is not supported, or an error occurs -// when unmarshaling the model an error will be returned. -// -// Casting the return value of this func to a EnumPartitions will -// allow you to get a list of the partitions in the order the endpoints -// will be resolved in. -// -// resolver, err := endpoints.DecodeModel(reader) -// -// partitions := resolver.(endpoints.EnumPartitions).Partitions() -// for _, p := range partitions { -// // ... inspect partitions -// } -func DecodeModel(r io.Reader, optFns ...func(*DecodeModelOptions)) (Resolver, error) { - var opts DecodeModelOptions - opts.Set(optFns...) - - // Get the version of the partition file to determine what - // unmarshaling model to use. - modelDef := modelDefinition{} - if err := json.NewDecoder(r).Decode(&modelDef); err != nil { - return nil, newDecodeModelError("failed to decode endpoints model", err) - } - - var version string - if b, ok := modelDef["version"]; ok { - version = string(b) - } else { - return nil, newDecodeModelError("endpoints version not found in model", nil) - } - - if version == "3" { - return decodeV3Endpoints(modelDef, opts) - } - - return nil, newDecodeModelError( - fmt.Sprintf("endpoints version %s, not supported", version), nil) -} - -func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resolver, error) { - b, ok := modelDef["partitions"] - if !ok { - return nil, newDecodeModelError("endpoints model missing partitions", nil) - } - - ps := partitions{} - if err := json.Unmarshal(b, &ps); err != nil { - return nil, newDecodeModelError("failed to decode endpoints model", err) - } - - if opts.SkipCustomizations { - return ps, nil - } - - // Customization - for i := 0; i < len(ps); i++ { - p := &ps[i] - custRegionalS3(p) - custRmIotDataService(p) - custFixAppAutoscalingChina(p) - custFixAppAutoscalingUsGov(p) - } - - return ps, nil -} - -func custRegionalS3(p *partition) { - if p.ID != "aws" { - return - } - - service, ok := p.Services["s3"] - if !ok { - return - } - - const awsGlobal = "aws-global" - const usEast1 = "us-east-1" - - // If global endpoint already exists no customization needed. - if _, ok := service.Endpoints[endpointKey{Region: awsGlobal}]; ok { - return - } - - service.PartitionEndpoint = awsGlobal - if _, ok := service.Endpoints[endpointKey{Region: usEast1}]; !ok { - service.Endpoints[endpointKey{Region: usEast1}] = endpoint{} - } - service.Endpoints[endpointKey{Region: awsGlobal}] = endpoint{ - Hostname: "s3.amazonaws.com", - CredentialScope: credentialScope{ - Region: usEast1, - }, - } - - p.Services["s3"] = service -} - -func custRmIotDataService(p *partition) { - delete(p.Services, "data.iot") -} - -func custFixAppAutoscalingChina(p *partition) { - if p.ID != "aws-cn" { - return - } - - const serviceName = "application-autoscaling" - s, ok := p.Services[serviceName] - if !ok { - return - } - - const expectHostname = `autoscaling.{region}.amazonaws.com` - serviceDefault := s.Defaults[defaultKey{}] - if e, a := expectHostname, serviceDefault.Hostname; e != a { - fmt.Printf("custFixAppAutoscalingChina: ignoring customization, expected %s, got %s\n", e, a) - return - } - serviceDefault.Hostname = expectHostname + ".cn" - s.Defaults[defaultKey{}] = serviceDefault - p.Services[serviceName] = s -} - -func custFixAppAutoscalingUsGov(p *partition) { - if p.ID != "aws-us-gov" { - return - } - - const serviceName = "application-autoscaling" - s, ok := p.Services[serviceName] - if !ok { - return - } - - serviceDefault := s.Defaults[defaultKey{}] - if a := serviceDefault.CredentialScope.Service; a != "" { - fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty credential scope service, got %s\n", a) - return - } - - if a := serviceDefault.Hostname; a != "" { - fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty hostname, got %s\n", a) - return - } - - serviceDefault.CredentialScope.Service = "application-autoscaling" - serviceDefault.Hostname = "autoscaling.{region}.amazonaws.com" - - if s.Defaults == nil { - s.Defaults = make(endpointDefaults) - } - - s.Defaults[defaultKey{}] = serviceDefault - - p.Services[serviceName] = s -} - -type decodeModelError struct { - awsError -} - -func newDecodeModelError(msg string, err error) decodeModelError { - return decodeModelError{ - awsError: awserr.New("DecodeEndpointsModelError", msg, err), - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go deleted file mode 100644 index ad0d14b86..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ /dev/null @@ -1,45748 +0,0 @@ -// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. - -package endpoints - -import ( - "regexp" -) - -// Partition identifiers -const ( - AwsPartitionID = "aws" // AWS Standard partition. - AwsCnPartitionID = "aws-cn" // AWS China partition. - AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition. - AwsIsoPartitionID = "aws-iso" // AWS ISO (US) partition. - AwsIsoBPartitionID = "aws-iso-b" // AWS ISOB (US) partition. - AwsIsoEPartitionID = "aws-iso-e" // AWS ISOE (Europe) partition. - AwsIsoFPartitionID = "aws-iso-f" // AWS ISOF partition. -) - -// AWS Standard partition's regions. -const ( - AfSouth1RegionID = "af-south-1" // Africa (Cape Town). - ApEast1RegionID = "ap-east-1" // Asia Pacific (Hong Kong). - ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo). - ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul). - ApNortheast3RegionID = "ap-northeast-3" // Asia Pacific (Osaka). - ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai). - ApSouth2RegionID = "ap-south-2" // Asia Pacific (Hyderabad). - ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore). - ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney). - ApSoutheast3RegionID = "ap-southeast-3" // Asia Pacific (Jakarta). - ApSoutheast4RegionID = "ap-southeast-4" // Asia Pacific (Melbourne). - CaCentral1RegionID = "ca-central-1" // Canada (Central). - CaWest1RegionID = "ca-west-1" // Canada West (Calgary). - EuCentral1RegionID = "eu-central-1" // Europe (Frankfurt). - EuCentral2RegionID = "eu-central-2" // Europe (Zurich). - EuNorth1RegionID = "eu-north-1" // Europe (Stockholm). - EuSouth1RegionID = "eu-south-1" // Europe (Milan). - EuSouth2RegionID = "eu-south-2" // Europe (Spain). - EuWest1RegionID = "eu-west-1" // Europe (Ireland). - EuWest2RegionID = "eu-west-2" // Europe (London). - EuWest3RegionID = "eu-west-3" // Europe (Paris). - IlCentral1RegionID = "il-central-1" // Israel (Tel Aviv). - MeCentral1RegionID = "me-central-1" // Middle East (UAE). - MeSouth1RegionID = "me-south-1" // Middle East (Bahrain). - SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). - UsEast1RegionID = "us-east-1" // US East (N. Virginia). - UsEast2RegionID = "us-east-2" // US East (Ohio). - UsWest1RegionID = "us-west-1" // US West (N. California). - UsWest2RegionID = "us-west-2" // US West (Oregon). -) - -// AWS China partition's regions. -const ( - CnNorth1RegionID = "cn-north-1" // China (Beijing). - CnNorthwest1RegionID = "cn-northwest-1" // China (Ningxia). -) - -// AWS GovCloud (US) partition's regions. -const ( - UsGovEast1RegionID = "us-gov-east-1" // AWS GovCloud (US-East). - UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US-West). -) - -// AWS ISO (US) partition's regions. -const ( - UsIsoEast1RegionID = "us-iso-east-1" // US ISO East. - UsIsoWest1RegionID = "us-iso-west-1" // US ISO WEST. -) - -// AWS ISOB (US) partition's regions. -const ( - UsIsobEast1RegionID = "us-isob-east-1" // US ISOB East (Ohio). -) - -// AWS ISOE (Europe) partition's regions. -const () - -// AWS ISOF partition's regions. -const () - -// DefaultResolver returns an Endpoint resolver that will be able -// to resolve endpoints for: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), AWS ISOB (US), AWS ISOE (Europe), and AWS ISOF. -// -// Use DefaultPartitions() to get the list of the default partitions. -func DefaultResolver() Resolver { - return defaultPartitions -} - -// DefaultPartitions returns a list of the partitions the SDK is bundled -// with. The available partitions are: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), AWS ISOB (US), AWS ISOE (Europe), and AWS ISOF. -// -// partitions := endpoints.DefaultPartitions -// for _, p := range partitions { -// // ... inspect partitions -// } -func DefaultPartitions() []Partition { - return defaultPartitions.Partitions() -} - -var defaultPartitions = partitions{ - awsPartition, - awscnPartition, - awsusgovPartition, - awsisoPartition, - awsisobPartition, - awsisoePartition, - awsisofPartition, -} - -// AwsPartition returns the Resolver for AWS Standard. -func AwsPartition() Partition { - return awsPartition.Partition() -} - -var awsPartition = partition{ - ID: "aws", - Name: "AWS Standard", - DNSSuffix: "amazonaws.com", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - defaultKey{ - Variant: dualStackVariant, - }: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - DNSSuffix: "api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - defaultKey{ - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - Regions: regions{ - "af-south-1": region{ - Description: "Africa (Cape Town)", - }, - "ap-east-1": region{ - Description: "Asia Pacific (Hong Kong)", - }, - "ap-northeast-1": region{ - Description: "Asia Pacific (Tokyo)", - }, - "ap-northeast-2": region{ - Description: "Asia Pacific (Seoul)", - }, - "ap-northeast-3": region{ - Description: "Asia Pacific (Osaka)", - }, - "ap-south-1": region{ - Description: "Asia Pacific (Mumbai)", - }, - "ap-south-2": region{ - Description: "Asia Pacific (Hyderabad)", - }, - "ap-southeast-1": region{ - Description: "Asia Pacific (Singapore)", - }, - "ap-southeast-2": region{ - Description: "Asia Pacific (Sydney)", - }, - "ap-southeast-3": region{ - Description: "Asia Pacific (Jakarta)", - }, - "ap-southeast-4": region{ - Description: "Asia Pacific (Melbourne)", - }, - "ca-central-1": region{ - Description: "Canada (Central)", - }, - "ca-west-1": region{ - Description: "Canada West (Calgary)", - }, - "eu-central-1": region{ - Description: "Europe (Frankfurt)", - }, - "eu-central-2": region{ - Description: "Europe (Zurich)", - }, - "eu-north-1": region{ - Description: "Europe (Stockholm)", - }, - "eu-south-1": region{ - Description: "Europe (Milan)", - }, - "eu-south-2": region{ - Description: "Europe (Spain)", - }, - "eu-west-1": region{ - Description: "Europe (Ireland)", - }, - "eu-west-2": region{ - Description: "Europe (London)", - }, - "eu-west-3": region{ - Description: "Europe (Paris)", - }, - "il-central-1": region{ - Description: "Israel (Tel Aviv)", - }, - "me-central-1": region{ - Description: "Middle East (UAE)", - }, - "me-south-1": region{ - Description: "Middle East (Bahrain)", - }, - "sa-east-1": region{ - Description: "South America (Sao Paulo)", - }, - "us-east-1": region{ - Description: "US East (N. Virginia)", - }, - "us-east-2": region{ - Description: "US East (Ohio)", - }, - "us-west-1": region{ - Description: "US West (N. California)", - }, - "us-west-2": region{ - Description: "US West (Oregon)", - }, - }, - Services: services{ - "a4b": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, - "access-analyzer": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "access-analyzer-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "access-analyzer-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "access-analyzer-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "access-analyzer-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "access-analyzer-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "access-analyzer-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "access-analyzer-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "access-analyzer-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "access-analyzer-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "access-analyzer-fips.us-west-2.amazonaws.com", - }, - }, - }, - "account": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "account.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "acm": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "acm-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-central-1-fips", - }: endpoint{ - Hostname: "acm-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "acm-fips.ca-west-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-west-1-fips", - }: endpoint{ - Hostname: "acm-fips.ca-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "acm-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "acm-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "acm-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "acm-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "acm-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "acm-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "acm-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "acm-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "acm-pca": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "acm-pca-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "acm-pca-fips.ca-west-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "acm-pca-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ca-west-1", - }: endpoint{ - Hostname: "acm-pca-fips.ca-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "acm-pca-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "acm-pca-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "acm-pca-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "acm-pca-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "acm-pca-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "acm-pca-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "acm-pca-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "acm-pca-fips.us-west-2.amazonaws.com", - }, - }, - }, - "agreement-marketplace": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, - "airflow": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "amplify": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "amplifybackend": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "amplifyuibuilder": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "aoss": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "api.detective": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.detective-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-central-1-fips", - }: endpoint{ - Hostname: "api.detective-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.detective-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "api.detective-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.detective-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "api.detective-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.detective-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "api.detective-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.detective-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "api.detective-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "api.ecr": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecr-fips.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{ - Hostname: "api.ecr.af-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "af-south-1", - }, - }, - endpointKey{ - Region: "ap-east-1", - }: endpoint{ - Hostname: "api.ecr.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - }, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "api.ecr.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{ - Hostname: "api.ecr.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{ - Hostname: "api.ecr.ap-northeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-3", - }, - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{ - Hostname: "api.ecr.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - endpointKey{ - Region: "ap-south-2", - }: endpoint{ - Hostname: "api.ecr.ap-south-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-2", - }, - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{ - Hostname: "api.ecr.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "api.ecr.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{ - Hostname: "api.ecr.ap-southeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-3", - }, - }, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{ - Hostname: "api.ecr.ap-southeast-4.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-4", - }, - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{ - Hostname: "api.ecr.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - endpointKey{ - Region: "ca-west-1", - }: endpoint{ - Hostname: "api.ecr.ca-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-west-1", - }, - }, - endpointKey{ - Region: "dkr-us-east-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dkr-us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecr-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dkr-us-east-2", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dkr-us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecr-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dkr-us-west-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dkr-us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecr-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dkr-us-west-2", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dkr-us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecr-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{ - Hostname: "api.ecr.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - endpointKey{ - Region: "eu-central-2", - }: endpoint{ - Hostname: "api.ecr.eu-central-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-2", - }, - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{ - Hostname: "api.ecr.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - endpointKey{ - Region: "eu-south-1", - }: endpoint{ - Hostname: "api.ecr.eu-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-1", - }, - }, - endpointKey{ - Region: "eu-south-2", - }: endpoint{ - Hostname: "api.ecr.eu-south-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-2", - }, - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "api.ecr.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{ - Hostname: "api.ecr.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{ - Hostname: "api.ecr.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - endpointKey{ - Region: "fips-dkr-us-east-1", - }: endpoint{ - Hostname: "ecr-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-dkr-us-east-2", - }: endpoint{ - Hostname: "ecr-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-dkr-us-west-1", - }: endpoint{ - Hostname: "ecr-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-dkr-us-west-2", - }: endpoint{ - Hostname: "ecr-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "ecr-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "ecr-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "ecr-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "ecr-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{ - Hostname: "api.ecr.il-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "il-central-1", - }, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{ - Hostname: "api.ecr.me-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-central-1", - }, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{ - Hostname: "api.ecr.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{ - Hostname: "api.ecr.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "api.ecr.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecr-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - Hostname: "api.ecr.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecr-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{ - Hostname: "api.ecr.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecr-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "api.ecr.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecr-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "api.ecr-public": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "api.ecr-public.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "api.ecr-public.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "api.elastic-inference": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "api.elastic-inference.ap-northeast-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{ - Hostname: "api.elastic-inference.ap-northeast-2.amazonaws.com", - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "api.elastic-inference.eu-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "api.elastic-inference.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - Hostname: "api.elastic-inference.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "api.elastic-inference.us-west-2.amazonaws.com", - }, - }, - }, - "api.fleethub.iot": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.fleethub.iot-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "api.fleethub.iot-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "api.fleethub.iot-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "api.fleethub.iot-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "api.fleethub.iot-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.fleethub.iot-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.fleethub.iot-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.fleethub.iot-fips.us-west-2.amazonaws.com", - }, - }, - }, - "api.iotdeviceadvisor": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "api.iotdeviceadvisor.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "api.iotdeviceadvisor.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "api.iotdeviceadvisor.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "api.iotdeviceadvisor.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "api.iotwireless": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "api.iotwireless.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "api.iotwireless.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{ - Hostname: "api.iotwireless.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "api.iotwireless.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{ - Hostname: "api.iotwireless.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "api.iotwireless.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "api.iotwireless.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "api.mediatailor": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "api.pricing": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "pricing", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, - "api.sagemaker": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "api-fips.sagemaker.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api-fips.sagemaker.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "api-fips.sagemaker.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api-fips.sagemaker.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "api-fips.sagemaker.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api-fips.sagemaker.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "api-fips.sagemaker.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "api.tunneling.iot": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.tunneling.iot-fips.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.tunneling.iot-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "api.tunneling.iot-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-west-2.amazonaws.com", - }, - }, - }, - "apigateway": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "apigateway-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "apigateway-fips.ca-west-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "apigateway-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ca-west-1", - }: endpoint{ - Hostname: "apigateway-fips.ca-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "apigateway-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "apigateway-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "apigateway-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "apigateway-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "apigateway-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "apigateway-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "apigateway-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "apigateway-fips.us-west-2.amazonaws.com", - }, - }, - }, - "app-integrations": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "appconfig": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "appconfigdata": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "appflow": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "appflow-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "appflow-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "appflow-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "appflow-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "appflow-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "appflow-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "appflow-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "appflow-fips.us-west-2.amazonaws.com", - }, - }, - }, - "application-autoscaling": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "applicationinsights": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "appmesh": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "af-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.af-south-1.api.aws", - }, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.ap-east-1.api.aws", - }, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.ap-northeast-1.api.aws", - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.ap-northeast-2.api.aws", - }, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.ap-northeast-3.api.aws", - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.ap-south-1.api.aws", - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.ap-southeast-1.api.aws", - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.ap-southeast-2.api.aws", - }, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.ap-southeast-3.api.aws", - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.ca-central-1.api.aws", - }, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "appmesh-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "appmesh-fips.ca-central-1.api.aws", - }, - endpointKey{ - Region: "ca-central-1-fips", - }: endpoint{ - Hostname: "appmesh-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.eu-central-1.api.aws", - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.eu-north-1.api.aws", - }, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.eu-south-1.api.aws", - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.eu-west-1.api.aws", - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.eu-west-2.api.aws", - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.eu-west-3.api.aws", - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.il-central-1.api.aws", - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.me-south-1.api.aws", - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.sa-east-1.api.aws", - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.us-east-1.api.aws", - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "appmesh-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "appmesh-fips.us-east-1.api.aws", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "appmesh-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.us-east-2.api.aws", - }, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "appmesh-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "appmesh-fips.us-east-2.api.aws", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "appmesh-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.us-west-1.api.aws", - }, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "appmesh-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "appmesh-fips.us-west-1.api.aws", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "appmesh-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.us-west-2.api.aws", - }, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "appmesh-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "appmesh-fips.us-west-2.api.aws", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "appmesh-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "apprunner": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "apprunner-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "apprunner-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "apprunner-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "apprunner-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "apprunner-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "apprunner-fips.us-west-2.amazonaws.com", - }, - }, - }, - "appstream2": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - CredentialScope: credentialScope{ - Service: "appstream", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "fips", - }: endpoint{ - Hostname: "appstream2-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "appstream2-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "appstream2-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "appstream2-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "appstream2-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "appsync": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "aps": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "arc-zonal-shift": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "athena": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "af-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "athena.af-south-1.api.aws", - }, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "athena.ap-east-1.api.aws", - }, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "athena.ap-northeast-1.api.aws", - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "athena.ap-northeast-2.api.aws", - }, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "athena.ap-northeast-3.api.aws", - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "athena.ap-south-1.api.aws", - }, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "athena.ap-south-2.api.aws", - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "athena.ap-southeast-1.api.aws", - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "athena.ap-southeast-2.api.aws", - }, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "athena.ap-southeast-3.api.aws", - }, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "athena.ap-southeast-4.api.aws", - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "athena.ca-central-1.api.aws", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "athena.eu-central-1.api.aws", - }, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "athena.eu-central-2.api.aws", - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "athena.eu-north-1.api.aws", - }, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "athena.eu-south-1.api.aws", - }, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "athena.eu-south-2.api.aws", - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "athena.eu-west-1.api.aws", - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "athena.eu-west-2.api.aws", - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "athena.eu-west-3.api.aws", - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "athena-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "athena-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "athena-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "athena-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "athena.il-central-1.api.aws", - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "athena.me-central-1.api.aws", - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "athena.me-south-1.api.aws", - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "athena.sa-east-1.api.aws", - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "athena.us-east-1.api.aws", - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "athena-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "athena-fips.us-east-1.api.aws", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "athena.us-east-2.api.aws", - }, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "athena-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "athena-fips.us-east-2.api.aws", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "athena.us-west-1.api.aws", - }, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "athena-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "athena-fips.us-west-1.api.aws", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "athena.us-west-2.api.aws", - }, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "athena-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "athena-fips.us-west-2.api.aws", - }, - }, - }, - "auditmanager": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "auditmanager-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "auditmanager-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "auditmanager-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "auditmanager-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "auditmanager-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "auditmanager-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "auditmanager-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "auditmanager-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "autoscaling": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "autoscaling-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "autoscaling-fips.ca-west-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "autoscaling-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ca-west-1", - }: endpoint{ - Hostname: "autoscaling-fips.ca-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "autoscaling-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "autoscaling-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "autoscaling-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "autoscaling-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "autoscaling-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "autoscaling-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "autoscaling-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "autoscaling-fips.us-west-2.amazonaws.com", - }, - }, - }, - "autoscaling-plans": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "backup": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "backup-gateway": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "backupstorage": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "batch": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.batch.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "fips.batch.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "fips.batch.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "fips.batch.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "fips.batch.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.batch.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.batch.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.batch.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.batch.us-west-2.amazonaws.com", - }, - }, - }, - "bedrock": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "bedrock-ap-northeast-1", - }: endpoint{ - Hostname: "bedrock.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "bedrock-ap-southeast-1", - }: endpoint{ - Hostname: "bedrock.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - endpointKey{ - Region: "bedrock-eu-central-1", - }: endpoint{ - Hostname: "bedrock.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - endpointKey{ - Region: "bedrock-fips-us-east-1", - }: endpoint{ - Hostname: "bedrock-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "bedrock-fips-us-west-2", - }: endpoint{ - Hostname: "bedrock-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - endpointKey{ - Region: "bedrock-runtime-ap-northeast-1", - }: endpoint{ - Hostname: "bedrock-runtime.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "bedrock-runtime-ap-southeast-1", - }: endpoint{ - Hostname: "bedrock-runtime.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - endpointKey{ - Region: "bedrock-runtime-eu-central-1", - }: endpoint{ - Hostname: "bedrock-runtime.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - endpointKey{ - Region: "bedrock-runtime-fips-us-east-1", - }: endpoint{ - Hostname: "bedrock-runtime-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "bedrock-runtime-fips-us-west-2", - }: endpoint{ - Hostname: "bedrock-runtime-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - endpointKey{ - Region: "bedrock-runtime-us-east-1", - }: endpoint{ - Hostname: "bedrock-runtime.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "bedrock-runtime-us-west-2", - }: endpoint{ - Hostname: "bedrock-runtime.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - endpointKey{ - Region: "bedrock-us-east-1", - }: endpoint{ - Hostname: "bedrock.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "bedrock-us-west-2", - }: endpoint{ - Hostname: "bedrock.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "billingconductor": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "billingconductor.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "braket": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "budgets": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "budgets.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "cases": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{}, - }, - }, - "cassandra": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "cassandra-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "cassandra-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cassandra-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cassandra-fips.us-west-2.amazonaws.com", - }, - }, - }, - "catalog.marketplace": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, - "ce": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "ce.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "chime": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "chime.us-east-1.amazonaws.com", - Protocols: []string{"https"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "cleanrooms": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "cloud9": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "cloudcontrolapi": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudcontrolapi-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudcontrolapi-fips.ca-west-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "cloudcontrolapi-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ca-west-1", - }: endpoint{ - Hostname: "cloudcontrolapi-fips.ca-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "cloudcontrolapi-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "cloudcontrolapi-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "cloudcontrolapi-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "cloudcontrolapi-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudcontrolapi-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudcontrolapi-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudcontrolapi-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudcontrolapi-fips.us-west-2.amazonaws.com", - }, - }, - }, - "clouddirectory": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "cloudformation": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudformation-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "cloudformation-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudformation-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "cloudformation-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudformation-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "cloudformation-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudformation-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "cloudformation-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "cloudfront": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "cloudfront.amazonaws.com", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "cloudhsm": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, - "cloudhsmv2": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "cloudhsm", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "cloudsearch": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "cloudtrail": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "cloudtrail-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "cloudtrail-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "cloudtrail-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "cloudtrail-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudtrail-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudtrail-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudtrail-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudtrail-fips.us-west-2.amazonaws.com", - }, - }, - }, - "cloudtrail-data": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "codeartifact": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "codebuild": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codebuild-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "codebuild-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codebuild-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "codebuild-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codebuild-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "codebuild-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codebuild-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "codebuild-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "codecatalyst": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "codecatalyst.global.api.aws", - }, - }, - }, - "codecommit": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codecommit-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-central-1-fips", - }: endpoint{ - Hostname: "codecommit-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips", - }: endpoint{ - Hostname: "codecommit-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codecommit-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "codecommit-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codecommit-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "codecommit-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codecommit-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "codecommit-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codecommit-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "codecommit-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "codedeploy": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codedeploy-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "codedeploy-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codedeploy-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "codedeploy-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codedeploy-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "codedeploy-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codedeploy-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "codedeploy-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "codeguru-reviewer": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "codepipeline": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codepipeline-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "codepipeline-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "codepipeline-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "codepipeline-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "codepipeline-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "codepipeline-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codepipeline-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codepipeline-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codepipeline-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codepipeline-fips.us-west-2.amazonaws.com", - }, - }, - }, - "codestar": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "codestar-connections": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "codestar-notifications": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "cognito-identity": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "cognito-identity-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "cognito-identity-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "cognito-identity-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "cognito-identity-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cognito-identity-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cognito-identity-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cognito-identity-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cognito-identity-fips.us-west-2.amazonaws.com", - }, - }, - }, - "cognito-idp": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "cognito-idp-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "cognito-idp-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "cognito-idp-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "cognito-idp-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cognito-idp-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cognito-idp-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cognito-idp-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cognito-idp-fips.us-west-2.amazonaws.com", - }, - }, - }, - "cognito-sync": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "comprehend": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "comprehend-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "comprehend-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "comprehend-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "comprehend-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "comprehend-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "comprehend-fips.us-west-2.amazonaws.com", - }, - }, - }, - "comprehendmedical": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "comprehendmedical-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "comprehendmedical-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "comprehendmedical-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "comprehendmedical-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "comprehendmedical-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "comprehendmedical-fips.us-west-2.amazonaws.com", - }, - }, - }, - "compute-optimizer": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{ - Hostname: "compute-optimizer.af-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "af-south-1", - }, - }, - endpointKey{ - Region: "ap-east-1", - }: endpoint{ - Hostname: "compute-optimizer.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - }, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "compute-optimizer.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{ - Hostname: "compute-optimizer.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{ - Hostname: "compute-optimizer.ap-northeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-3", - }, - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{ - Hostname: "compute-optimizer.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - endpointKey{ - Region: "ap-south-2", - }: endpoint{ - Hostname: "compute-optimizer.ap-south-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-2", - }, - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{ - Hostname: "compute-optimizer.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "compute-optimizer.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{ - Hostname: "compute-optimizer.ap-southeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-3", - }, - }, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{ - Hostname: "compute-optimizer.ap-southeast-4.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-4", - }, - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{ - Hostname: "compute-optimizer.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{ - Hostname: "compute-optimizer.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - endpointKey{ - Region: "eu-central-2", - }: endpoint{ - Hostname: "compute-optimizer.eu-central-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-2", - }, - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{ - Hostname: "compute-optimizer.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - endpointKey{ - Region: "eu-south-1", - }: endpoint{ - Hostname: "compute-optimizer.eu-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-1", - }, - }, - endpointKey{ - Region: "eu-south-2", - }: endpoint{ - Hostname: "compute-optimizer.eu-south-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-2", - }, - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "compute-optimizer.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{ - Hostname: "compute-optimizer.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{ - Hostname: "compute-optimizer.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{ - Hostname: "compute-optimizer.il-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "il-central-1", - }, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{ - Hostname: "compute-optimizer.me-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-central-1", - }, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{ - Hostname: "compute-optimizer.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{ - Hostname: "compute-optimizer.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "compute-optimizer.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - Hostname: "compute-optimizer.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{ - Hostname: "compute-optimizer.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "compute-optimizer.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "config": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "config-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "config-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "config-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "config-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "config-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "config-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "config-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "config-fips.us-west-2.amazonaws.com", - }, - }, - }, - "connect": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "connect-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "connect-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "connect-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "connect-fips.us-west-2.amazonaws.com", - }, - }, - }, - "connect-campaigns": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "connect-campaigns-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "connect-campaigns-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "connect-campaigns-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "connect-campaigns-fips.us-west-2.amazonaws.com", - }, - }, - }, - "contact-lens": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "controltower": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "controltower-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-central-1-fips", - }: endpoint{ - Hostname: "controltower-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "controltower-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "controltower-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "controltower-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "controltower-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "controltower-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "controltower-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "controltower-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "controltower-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "cost-optimization-hub": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "cost-optimization-hub.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "cur": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, - "data-ats.iot": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - CredentialScope: credentialScope{ - Service: "iotdata", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "data.iot-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "data.iot-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Service: "iotdata", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "data.iot-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Service: "iotdata", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "data.iot-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Service: "iotdata", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "data.iot-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Service: "iotdata", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "data.iot-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Service: "iotdata", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "data.iot-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "data.iot-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "data.iot-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "data.iot-fips.us-west-2.amazonaws.com", - }, - }, - }, - "data.jobs.iot": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "data.jobs.iot-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "data.jobs.iot-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "data.jobs.iot-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "data.jobs.iot-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "data.jobs.iot-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "data.jobs.iot-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "data.jobs.iot-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "data.jobs.iot-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "data.jobs.iot-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "data.jobs.iot-fips.us-west-2.amazonaws.com", - }, - }, - }, - "data.mediastore": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "databrew": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "databrew-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "databrew-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "databrew-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "databrew-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "databrew-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "databrew-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "databrew-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "databrew-fips.us-west-2.amazonaws.com", - }, - }, - }, - "dataexchange": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "datapipeline": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "datasync": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "datasync-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "datasync-fips.ca-west-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "datasync-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ca-west-1", - }: endpoint{ - Hostname: "datasync-fips.ca-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "datasync-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "datasync-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "datasync-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "datasync-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "datasync-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "datasync-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "datasync-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "datasync-fips.us-west-2.amazonaws.com", - }, - }, - }, - "datazone": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - DNSSuffix: "api.aws", - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "api.aws", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{ - Hostname: "datazone.af-south-1.api.aws", - }, - endpointKey{ - Region: "ap-east-1", - }: endpoint{ - Hostname: "datazone.ap-east-1.api.aws", - }, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "datazone.ap-northeast-1.api.aws", - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{ - Hostname: "datazone.ap-northeast-2.api.aws", - }, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{ - Hostname: "datazone.ap-northeast-3.api.aws", - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{ - Hostname: "datazone.ap-south-1.api.aws", - }, - endpointKey{ - Region: "ap-south-2", - }: endpoint{ - Hostname: "datazone.ap-south-2.api.aws", - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{ - Hostname: "datazone.ap-southeast-1.api.aws", - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "datazone.ap-southeast-2.api.aws", - }, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{ - Hostname: "datazone.ap-southeast-3.api.aws", - }, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{ - Hostname: "datazone.ap-southeast-4.api.aws", - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{ - Hostname: "datazone.ca-central-1.api.aws", - }, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "datazone-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-west-1", - }: endpoint{ - Hostname: "datazone.ca-west-1.api.aws", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{ - Hostname: "datazone.eu-central-1.api.aws", - }, - endpointKey{ - Region: "eu-central-2", - }: endpoint{ - Hostname: "datazone.eu-central-2.api.aws", - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{ - Hostname: "datazone.eu-north-1.api.aws", - }, - endpointKey{ - Region: "eu-south-1", - }: endpoint{ - Hostname: "datazone.eu-south-1.api.aws", - }, - endpointKey{ - Region: "eu-south-2", - }: endpoint{ - Hostname: "datazone.eu-south-2.api.aws", - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "datazone.eu-west-1.api.aws", - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{ - Hostname: "datazone.eu-west-2.api.aws", - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{ - Hostname: "datazone.eu-west-3.api.aws", - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{ - Hostname: "datazone.il-central-1.api.aws", - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{ - Hostname: "datazone.me-central-1.api.aws", - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{ - Hostname: "datazone.me-south-1.api.aws", - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{ - Hostname: "datazone.sa-east-1.api.aws", - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "datazone.us-east-1.api.aws", - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "datazone-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - Hostname: "datazone.us-east-2.api.aws", - }, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "datazone-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{ - Hostname: "datazone.us-west-1.api.aws", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "datazone.us-west-2.api.aws", - }, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "datazone-fips.us-west-2.amazonaws.com", - }, - }, - }, - "dax": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "devicefarm": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "devops-guru": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "devops-guru-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "devops-guru-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "devops-guru-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "devops-guru-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "devops-guru-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "devops-guru-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "devops-guru-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "devops-guru-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "devops-guru-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "devops-guru-fips.us-west-2.amazonaws.com", - }, - }, - }, - "directconnect": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "directconnect-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "directconnect-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "directconnect-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "directconnect-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "directconnect-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "directconnect-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "directconnect-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "directconnect-fips.us-west-2.amazonaws.com", - }, - }, - }, - "discovery": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "dlm": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "dms": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "dms", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dms", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dms-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dms-fips", - }: endpoint{ - Hostname: "dms-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dms-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "dms-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dms-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "dms-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dms-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "dms-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dms-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "dms-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "docdb": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "rds.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{ - Hostname: "rds.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{ - Hostname: "rds.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{ - Hostname: "rds.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "rds.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{ - Hostname: "rds.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{ - Hostname: "rds.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "rds.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{ - Hostname: "rds.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{ - Hostname: "rds.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{ - Hostname: "rds.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "rds.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - Hostname: "rds.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "rds.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "drs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "drs-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "drs-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "drs-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "drs-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "drs-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "drs-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "drs-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "drs-fips.us-west-2.amazonaws.com", - }, - }, - }, - "ds": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ds-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ds-fips.ca-west-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "ds-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ca-west-1", - }: endpoint{ - Hostname: "ds-fips.ca-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "ds-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "ds-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "ds-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "ds-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ds-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ds-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ds-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ds-fips.us-west-2.amazonaws.com", - }, - }, - }, - "dynamodb": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-central-1-fips", - }: endpoint{ - Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dynamodb-fips.ca-west-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-west-1-fips", - }: endpoint{ - Hostname: "dynamodb-fips.ca-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "local", - }: endpoint{ - Hostname: "localhost:8000", - Protocols: []string{"http"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dynamodb-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "dynamodb-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dynamodb-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "dynamodb-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dynamodb-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "dynamodb-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dynamodb-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "dynamodb-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "ebs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ebs-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ebs-fips.ca-west-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "ebs-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ca-west-1", - }: endpoint{ - Hostname: "ebs-fips.ca-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "ebs-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "ebs-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "ebs-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "ebs-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ebs-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ebs-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ebs-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ebs-fips.us-west-2.amazonaws.com", - }, - }, - }, - "ec2": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "ec2.ap-south-1.api.aws", - }, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ec2-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ec2-fips.ca-west-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "ec2.eu-west-1.api.aws", - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "ec2-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ca-west-1", - }: endpoint{ - Hostname: "ec2-fips.ca-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "ec2-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "ec2-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "ec2-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "ec2-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "ec2.sa-east-1.api.aws", - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "ec2.us-east-1.api.aws", - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ec2-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "ec2.us-east-2.api.aws", - }, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ec2-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ec2-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "ec2.us-west-2.api.aws", - }, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ec2-fips.us-west-2.amazonaws.com", - }, - }, - }, - "ecs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "ecs-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "ecs-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "ecs-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "ecs-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecs-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecs-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecs-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecs-fips.us-west-2.amazonaws.com", - }, - }, - }, - "edge.sagemaker": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "eks": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.eks.{region}.{dnsSuffix}", - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "fips.eks.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "fips.eks.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "fips.eks.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "fips.eks.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.eks.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.eks.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.eks.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.eks.us-west-2.amazonaws.com", - }, - }, - }, - "eks-auth": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - DNSSuffix: "api.aws", - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "api.aws", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{ - Hostname: "eks-auth.af-south-1.api.aws", - }, - endpointKey{ - Region: "ap-east-1", - }: endpoint{ - Hostname: "eks-auth.ap-east-1.api.aws", - }, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "eks-auth.ap-northeast-1.api.aws", - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{ - Hostname: "eks-auth.ap-northeast-2.api.aws", - }, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{ - Hostname: "eks-auth.ap-northeast-3.api.aws", - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{ - Hostname: "eks-auth.ap-south-1.api.aws", - }, - endpointKey{ - Region: "ap-south-2", - }: endpoint{ - Hostname: "eks-auth.ap-south-2.api.aws", - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{ - Hostname: "eks-auth.ap-southeast-1.api.aws", - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "eks-auth.ap-southeast-2.api.aws", - }, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{ - Hostname: "eks-auth.ap-southeast-3.api.aws", - }, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{ - Hostname: "eks-auth.ap-southeast-4.api.aws", - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{ - Hostname: "eks-auth.ca-central-1.api.aws", - }, - endpointKey{ - Region: "ca-west-1", - }: endpoint{ - Hostname: "eks-auth.ca-west-1.api.aws", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{ - Hostname: "eks-auth.eu-central-1.api.aws", - }, - endpointKey{ - Region: "eu-central-2", - }: endpoint{ - Hostname: "eks-auth.eu-central-2.api.aws", - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{ - Hostname: "eks-auth.eu-north-1.api.aws", - }, - endpointKey{ - Region: "eu-south-1", - }: endpoint{ - Hostname: "eks-auth.eu-south-1.api.aws", - }, - endpointKey{ - Region: "eu-south-2", - }: endpoint{ - Hostname: "eks-auth.eu-south-2.api.aws", - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "eks-auth.eu-west-1.api.aws", - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{ - Hostname: "eks-auth.eu-west-2.api.aws", - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{ - Hostname: "eks-auth.eu-west-3.api.aws", - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{ - Hostname: "eks-auth.il-central-1.api.aws", - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{ - Hostname: "eks-auth.me-central-1.api.aws", - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{ - Hostname: "eks-auth.me-south-1.api.aws", - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{ - Hostname: "eks-auth.sa-east-1.api.aws", - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "eks-auth.us-east-1.api.aws", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - Hostname: "eks-auth.us-east-2.api.aws", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{ - Hostname: "eks-auth.us-west-1.api.aws", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "eks-auth.us-west-2.api.aws", - }, - }, - }, - "elasticache": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips", - }: endpoint{ - Hostname: "elasticache-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticache-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "elasticache-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticache-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "elasticache-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticache-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "elasticache-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticache-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "elasticache-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "elasticbeanstalk": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "elasticbeanstalk-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "elasticbeanstalk-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "elasticbeanstalk-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "elasticbeanstalk-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticbeanstalk-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticbeanstalk-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticbeanstalk-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticbeanstalk-fips.us-west-2.amazonaws.com", - }, - }, - }, - "elasticfilesystem": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "af-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.af-south-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-east-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-northeast-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-northeast-2.amazonaws.com", - }, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-northeast-3.amazonaws.com", - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-south-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-south-2.amazonaws.com", - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-southeast-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-southeast-2.amazonaws.com", - }, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-southeast-3.amazonaws.com", - }, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-southeast-4.amazonaws.com", - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.ca-west-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.eu-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.eu-central-2.amazonaws.com", - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.eu-north-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.eu-south-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.eu-south-2.amazonaws.com", - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.eu-west-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.eu-west-2.amazonaws.com", - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.eu-west-3.amazonaws.com", - }, - endpointKey{ - Region: "fips-af-south-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.af-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "af-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-east-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-northeast-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-northeast-2", - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-northeast-3", - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-northeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-3", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-south-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-south-2", - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-south-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-southeast-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-southeast-2", - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-southeast-3", - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-southeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-3", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-southeast-4", - }: endpoint{ - Hostname: "elasticfilesystem-fips.ap-southeast-4.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-4", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ca-west-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.ca-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-central-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-central-2", - }: endpoint{ - Hostname: "elasticfilesystem-fips.eu-central-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-north-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-south-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.eu-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-south-2", - }: endpoint{ - Hostname: "elasticfilesystem-fips.eu-south-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-west-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-west-2", - }: endpoint{ - Hostname: "elasticfilesystem-fips.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-west-3", - }: endpoint{ - Hostname: "elasticfilesystem-fips.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-il-central-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.il-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "il-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-me-central-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.me-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-me-south-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-sa-east-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.il-central-1.amazonaws.com", - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.me-central-1.amazonaws.com", - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.me-south-1.amazonaws.com", - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.sa-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-west-2.amazonaws.com", - }, - }, - }, - "elasticloadbalancing": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "elasticloadbalancing-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "elasticloadbalancing-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "elasticloadbalancing-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "elasticloadbalancing-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticloadbalancing-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticloadbalancing-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticloadbalancing-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticloadbalancing-fips.us-west-2.amazonaws.com", - }, - }, - }, - "elasticmapreduce": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - SSLCommonName: "{region}.{service}.{dnsSuffix}", - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticmapreduce-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticmapreduce-fips.ca-west-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{ - SSLCommonName: "{service}.{region}.{dnsSuffix}", - }, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "elasticmapreduce-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ca-west-1", - }: endpoint{ - Hostname: "elasticmapreduce-fips.ca-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "elasticmapreduce-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "elasticmapreduce-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "elasticmapreduce-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "elasticmapreduce-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - SSLCommonName: "{service}.{region}.{dnsSuffix}", - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticmapreduce-fips.us-east-1.amazonaws.com", - SSLCommonName: "{service}.{region}.{dnsSuffix}", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "elasticmapreduce.us-east-2.api.aws", - }, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticmapreduce-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticmapreduce-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticmapreduce-fips.us-west-2.amazonaws.com", - }, - }, - }, - "elastictranscoder": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "email": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "email-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "email-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "email-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "email-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "email-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "email-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "email-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "email-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "email-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "email-fips.us-west-2.amazonaws.com", - }, - }, - }, - "emr-containers": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "emr-containers-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "emr-containers-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "emr-containers-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "emr-containers-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "emr-containers-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "emr-containers-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "emr-containers-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "emr-containers-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "emr-containers-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "emr-containers-fips.us-west-2.amazonaws.com", - }, - }, - }, - "emr-serverless": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "emr-serverless-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "emr-serverless-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "emr-serverless-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "emr-serverless-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "emr-serverless-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "emr-serverless-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "emr-serverless-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "emr-serverless-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "emr-serverless-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "emr-serverless-fips.us-west-2.amazonaws.com", - }, - }, - }, - "entitlement.marketplace": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "aws-marketplace", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, - "es": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "af-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "aos.af-south-1.api.aws", - }, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "aos.ap-east-1.api.aws", - }, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "aos.ap-northeast-1.api.aws", - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "aos.ap-northeast-2.api.aws", - }, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "aos.ap-northeast-3.api.aws", - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "aos.ap-south-1.api.aws", - }, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "aos.ap-south-2.api.aws", - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "aos.ap-southeast-1.api.aws", - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "aos.ap-southeast-2.api.aws", - }, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "aos.ap-southeast-3.api.aws", - }, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "aos.ap-southeast-4.api.aws", - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "aos.ca-central-1.api.aws", - }, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "aos.ca-west-1.api.aws", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "aos.eu-central-1.api.aws", - }, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "aos.eu-central-2.api.aws", - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "aos.eu-north-1.api.aws", - }, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "aos.eu-south-1.api.aws", - }, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "aos.eu-south-2.api.aws", - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "aos.eu-west-1.api.aws", - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "aos.eu-west-2.api.aws", - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "aos.eu-west-3.api.aws", - }, - endpointKey{ - Region: "fips", - }: endpoint{ - Hostname: "es-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "aos.il-central-1.api.aws", - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "aos.me-central-1.api.aws", - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "aos.me-south-1.api.aws", - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "aos.sa-east-1.api.aws", - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "aos.us-east-1.api.aws", - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "es-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "es-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "aos.us-east-2.api.aws", - }, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "es-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "es-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "aos.us-west-1.api.aws", - }, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "es-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "es-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "aos.us-west-2.api.aws", - }, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "es-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "es-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "events": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "events-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "events-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "events-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "events-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "events-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "events-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "events-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "events-fips.us-west-2.amazonaws.com", - }, - }, - }, - "evidently": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "evidently.ap-northeast-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{ - Hostname: "evidently.ap-southeast-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "evidently.ap-southeast-2.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{ - Hostname: "evidently.eu-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{ - Hostname: "evidently.eu-north-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "evidently.eu-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "evidently.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - Hostname: "evidently.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "evidently.us-west-2.amazonaws.com", - }, - }, - }, - "finspace": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "finspace-api": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "firehose": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "firehose-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "firehose-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "firehose-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "firehose-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "firehose-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "firehose-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "firehose-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "firehose-fips.us-west-2.amazonaws.com", - }, - }, - }, - "fms": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "af-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.af-south-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.ap-east-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.ap-northeast-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.ap-northeast-2.amazonaws.com", - }, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.ap-south-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.ap-southeast-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.ap-southeast-2.amazonaws.com", - }, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.eu-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.eu-south-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.eu-west-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.eu-west-2.amazonaws.com", - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.eu-west-3.amazonaws.com", - }, - endpointKey{ - Region: "fips-af-south-1", - }: endpoint{ - Hostname: "fms-fips.af-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "af-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-east-1", - }: endpoint{ - Hostname: "fms-fips.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-northeast-1", - }: endpoint{ - Hostname: "fms-fips.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-northeast-2", - }: endpoint{ - Hostname: "fms-fips.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-south-1", - }: endpoint{ - Hostname: "fms-fips.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-southeast-1", - }: endpoint{ - Hostname: "fms-fips.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-southeast-2", - }: endpoint{ - Hostname: "fms-fips.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "fms-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-central-1", - }: endpoint{ - Hostname: "fms-fips.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-south-1", - }: endpoint{ - Hostname: "fms-fips.eu-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-west-1", - }: endpoint{ - Hostname: "fms-fips.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-west-2", - }: endpoint{ - Hostname: "fms-fips.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-west-3", - }: endpoint{ - Hostname: "fms-fips.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-me-south-1", - }: endpoint{ - Hostname: "fms-fips.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-sa-east-1", - }: endpoint{ - Hostname: "fms-fips.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "fms-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "fms-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "fms-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "fms-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.me-south-1.amazonaws.com", - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.sa-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.us-west-2.amazonaws.com", - }, - }, - }, - "forecast": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "forecast-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "forecast-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "forecast-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "forecast-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "forecast-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "forecast-fips.us-west-2.amazonaws.com", - }, - }, - }, - "forecastquery": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "forecastquery-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "forecastquery-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "forecastquery-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "forecastquery-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "forecastquery-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "forecastquery-fips.us-west-2.amazonaws.com", - }, - }, - }, - "frauddetector": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "fsx": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fsx-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "fsx-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-prod-ca-central-1", - }: endpoint{ - Hostname: "fsx-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-prod-us-east-1", - }: endpoint{ - Hostname: "fsx-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-prod-us-east-2", - }: endpoint{ - Hostname: "fsx-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-prod-us-west-1", - }: endpoint{ - Hostname: "fsx-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-prod-us-west-2", - }: endpoint{ - Hostname: "fsx-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "fsx-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "fsx-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "fsx-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "fsx-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "prod-ca-central-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "prod-ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fsx-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "prod-us-east-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "prod-us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fsx-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "prod-us-east-2", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "prod-us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fsx-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "prod-us-west-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "prod-us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fsx-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "prod-us-west-2", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "prod-us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fsx-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fsx-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fsx-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fsx-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fsx-fips.us-west-2.amazonaws.com", - }, - }, - }, - "gamelift": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "geo": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "glacier": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "glacier-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "glacier-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "glacier-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "glacier-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "glacier-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "glacier-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "glacier-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "glacier-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "glacier-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "glacier-fips.us-west-2.amazonaws.com", - }, - }, - }, - "glue": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "glue-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "glue-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "glue-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "glue-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "glue-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "glue-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "glue-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "glue-fips.us-west-2.amazonaws.com", - }, - }, - }, - "grafana": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "grafana.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{ - Hostname: "grafana.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{ - Hostname: "grafana.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "grafana.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{ - Hostname: "grafana.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "grafana.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{ - Hostname: "grafana.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "grafana.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - Hostname: "grafana.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "grafana.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "greengrass": service{ - IsRegionalized: boxedTrue, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "greengrass-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "greengrass-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "greengrass-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "greengrass-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "greengrass-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "greengrass-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "greengrass-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "greengrass-fips.us-west-2.amazonaws.com", - }, - }, - }, - "groundstation": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "groundstation-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "groundstation-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "groundstation-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "groundstation-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "groundstation-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "groundstation-fips.us-west-2.amazonaws.com", - }, - }, - }, - "guardduty": service{ - IsRegionalized: boxedTrue, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "guardduty-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "guardduty-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "guardduty-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "guardduty-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "guardduty-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "guardduty-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "guardduty-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "guardduty-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "health": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - SSLCommonName: "health.us-east-1.amazonaws.com", - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "global.health.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "health-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "health-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "healthlake": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "honeycode": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "iam": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "iam.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "aws-global", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iam-fips.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "aws-global-fips", - }: endpoint{ - Hostname: "iam-fips.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "iam", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "iam", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iam-fips.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "iam-fips", - }: endpoint{ - Hostname: "iam-fips.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "identity-chime": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "identity-chime-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "identity-chime-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "identitystore": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "importexport": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "importexport.amazonaws.com", - SignatureVersions: []string{"v2", "v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - Service: "IngestionService", - }, - }, - }, - }, - "ingest.timestream": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "ingest-fips-us-east-1", - }: endpoint{ - Hostname: "ingest.timestream-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "ingest-fips-us-east-2", - }: endpoint{ - Hostname: "ingest.timestream-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "ingest-fips-us-west-2", - }: endpoint{ - Hostname: "ingest.timestream-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "ingest-us-east-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "ingest-us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ingest.timestream-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "ingest-us-east-2", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "ingest-us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ingest.timestream-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "ingest-us-west-2", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "ingest-us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ingest.timestream-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "inspector": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "inspector-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "inspector-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "inspector-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "inspector-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "inspector-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "inspector-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "inspector-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "inspector-fips.us-west-2.amazonaws.com", - }, - }, - }, - "inspector2": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "inspector2-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "inspector2-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "inspector2-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "inspector2-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "inspector2-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "inspector2-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "inspector2-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "inspector2-fips.us-west-2.amazonaws.com", - }, - }, - }, - "internetmonitor": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - DNSSuffix: "api.aws", - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "api.aws", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{ - Hostname: "internetmonitor.af-south-1.api.aws", - }, - endpointKey{ - Region: "ap-east-1", - }: endpoint{ - Hostname: "internetmonitor.ap-east-1.api.aws", - }, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "internetmonitor.ap-northeast-1.api.aws", - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{ - Hostname: "internetmonitor.ap-northeast-2.api.aws", - }, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{ - Hostname: "internetmonitor.ap-northeast-3.api.aws", - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{ - Hostname: "internetmonitor.ap-south-1.api.aws", - }, - endpointKey{ - Region: "ap-south-2", - }: endpoint{ - Hostname: "internetmonitor.ap-south-2.api.aws", - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{ - Hostname: "internetmonitor.ap-southeast-1.api.aws", - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "internetmonitor.ap-southeast-2.api.aws", - }, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{ - Hostname: "internetmonitor.ap-southeast-3.api.aws", - }, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{ - Hostname: "internetmonitor.ap-southeast-4.api.aws", - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{ - Hostname: "internetmonitor.ca-central-1.api.aws", - }, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "internetmonitor-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-west-1", - }: endpoint{ - Hostname: "internetmonitor.ca-west-1.api.aws", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{ - Hostname: "internetmonitor.eu-central-1.api.aws", - }, - endpointKey{ - Region: "eu-central-2", - }: endpoint{ - Hostname: "internetmonitor.eu-central-2.api.aws", - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{ - Hostname: "internetmonitor.eu-north-1.api.aws", - }, - endpointKey{ - Region: "eu-south-1", - }: endpoint{ - Hostname: "internetmonitor.eu-south-1.api.aws", - }, - endpointKey{ - Region: "eu-south-2", - }: endpoint{ - Hostname: "internetmonitor.eu-south-2.api.aws", - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "internetmonitor.eu-west-1.api.aws", - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{ - Hostname: "internetmonitor.eu-west-2.api.aws", - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{ - Hostname: "internetmonitor.eu-west-3.api.aws", - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{ - Hostname: "internetmonitor.il-central-1.api.aws", - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{ - Hostname: "internetmonitor.me-central-1.api.aws", - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{ - Hostname: "internetmonitor.me-south-1.api.aws", - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{ - Hostname: "internetmonitor.sa-east-1.api.aws", - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "internetmonitor.us-east-1.api.aws", - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "internetmonitor-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - Hostname: "internetmonitor.us-east-2.api.aws", - }, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "internetmonitor-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{ - Hostname: "internetmonitor.us-west-1.api.aws", - }, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "internetmonitor-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "internetmonitor.us-west-2.api.aws", - }, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "internetmonitor-fips.us-west-2.amazonaws.com", - }, - }, - }, - "iot": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iot-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "iot-fips.ca-central-1.amazonaws.com", - - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "iot-fips.us-east-1.amazonaws.com", - - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "iot-fips.us-east-2.amazonaws.com", - - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "iot-fips.us-west-1.amazonaws.com", - - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "iot-fips.us-west-2.amazonaws.com", - - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iot-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iot-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iot-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iot-fips.us-west-2.amazonaws.com", - }, - }, - }, - "iotanalytics": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "iotevents": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iotevents-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "iotevents-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "iotevents-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "iotevents-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "iotevents-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iotevents-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iotevents-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iotevents-fips.us-west-2.amazonaws.com", - }, - }, - }, - "ioteventsdata": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "data.iotevents.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{ - Hostname: "data.iotevents.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{ - Hostname: "data.iotevents.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{ - Hostname: "data.iotevents.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "data.iotevents.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{ - Hostname: "data.iotevents.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "data.iotevents-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{ - Hostname: "data.iotevents.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "data.iotevents.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{ - Hostname: "data.iotevents.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "data.iotevents-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "data.iotevents-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "data.iotevents-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "data.iotevents-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "data.iotevents.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "data.iotevents-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - Hostname: "data.iotevents.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "data.iotevents-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "data.iotevents.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "data.iotevents-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "iotfleetwise": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, - "iotroborunner": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, - "iotsecuredtunneling": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.tunneling.iot-fips.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.tunneling.iot-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "api.tunneling.iot-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-west-2.amazonaws.com", - }, - }, - }, - "iotsitewise": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iotsitewise-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "iotsitewise-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "iotsitewise-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "iotsitewise-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "iotsitewise-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iotsitewise-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iotsitewise-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iotsitewise-fips.us-west-2.amazonaws.com", - }, - }, - }, - "iotthingsgraph": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "iotthingsgraph", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "iottwinmaker": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "api-ap-northeast-1", - }: endpoint{ - Hostname: "api.iottwinmaker.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "api-ap-northeast-2", - }: endpoint{ - Hostname: "api.iottwinmaker.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - endpointKey{ - Region: "api-ap-south-1", - }: endpoint{ - Hostname: "api.iottwinmaker.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - endpointKey{ - Region: "api-ap-southeast-1", - }: endpoint{ - Hostname: "api.iottwinmaker.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - endpointKey{ - Region: "api-ap-southeast-2", - }: endpoint{ - Hostname: "api.iottwinmaker.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "api-eu-central-1", - }: endpoint{ - Hostname: "api.iottwinmaker.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - endpointKey{ - Region: "api-eu-west-1", - }: endpoint{ - Hostname: "api.iottwinmaker.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "api-us-east-1", - }: endpoint{ - Hostname: "api.iottwinmaker.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "api-us-west-2", - }: endpoint{ - Hostname: "api.iottwinmaker.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - endpointKey{ - Region: "data-ap-northeast-1", - }: endpoint{ - Hostname: "data.iottwinmaker.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "data-ap-northeast-2", - }: endpoint{ - Hostname: "data.iottwinmaker.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - endpointKey{ - Region: "data-ap-south-1", - }: endpoint{ - Hostname: "data.iottwinmaker.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - endpointKey{ - Region: "data-ap-southeast-1", - }: endpoint{ - Hostname: "data.iottwinmaker.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - endpointKey{ - Region: "data-ap-southeast-2", - }: endpoint{ - Hostname: "data.iottwinmaker.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "data-eu-central-1", - }: endpoint{ - Hostname: "data.iottwinmaker.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - endpointKey{ - Region: "data-eu-west-1", - }: endpoint{ - Hostname: "data.iottwinmaker.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "data-us-east-1", - }: endpoint{ - Hostname: "data.iottwinmaker.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "data-us-west-2", - }: endpoint{ - Hostname: "data.iottwinmaker.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "fips-api-us-east-1", - }: endpoint{ - Hostname: "api.iottwinmaker-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "fips-api-us-west-2", - }: endpoint{ - Hostname: "api.iottwinmaker-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - endpointKey{ - Region: "fips-data-us-east-1", - }: endpoint{ - Hostname: "data.iottwinmaker-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "fips-data-us-west-2", - }: endpoint{ - Hostname: "data.iottwinmaker-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "iottwinmaker-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "iottwinmaker-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iottwinmaker-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iottwinmaker-fips.us-west-2.amazonaws.com", - }, - }, - }, - "iotwireless": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "api.iotwireless.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "api.iotwireless.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "api.iotwireless.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "api.iotwireless.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "api.iotwireless.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "ivs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "ivschat": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "ivsrealtime": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "kafka": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kafka-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "kafka-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "kafka-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "kafka-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "kafka-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "kafka-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kafka-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kafka-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kafka-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kafka-fips.us-west-2.amazonaws.com", - }, - }, - }, - "kafkaconnect": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "kendra": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "kendra-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "kendra-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "kendra-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kendra-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kendra-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kendra-fips.us-west-2.amazonaws.com", - }, - }, - }, - "kendra-ranking": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - DNSSuffix: "api.aws", - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "api.aws", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{ - Hostname: "kendra-ranking.af-south-1.api.aws", - }, - endpointKey{ - Region: "ap-east-1", - }: endpoint{ - Hostname: "kendra-ranking.ap-east-1.api.aws", - }, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "kendra-ranking.ap-northeast-1.api.aws", - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{ - Hostname: "kendra-ranking.ap-northeast-2.api.aws", - }, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{ - Hostname: "kendra-ranking.ap-northeast-3.api.aws", - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{ - Hostname: "kendra-ranking.ap-south-1.api.aws", - }, - endpointKey{ - Region: "ap-south-2", - }: endpoint{ - Hostname: "kendra-ranking.ap-south-2.api.aws", - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{ - Hostname: "kendra-ranking.ap-southeast-1.api.aws", - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "kendra-ranking.ap-southeast-2.api.aws", - }, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{ - Hostname: "kendra-ranking.ap-southeast-3.api.aws", - }, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{ - Hostname: "kendra-ranking.ap-southeast-4.api.aws", - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{ - Hostname: "kendra-ranking.ca-central-1.api.aws", - }, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kendra-ranking-fips.ca-central-1.api.aws", - }, - endpointKey{ - Region: "ca-west-1", - }: endpoint{ - Hostname: "kendra-ranking.ca-west-1.api.aws", - }, - endpointKey{ - Region: "eu-central-2", - }: endpoint{ - Hostname: "kendra-ranking.eu-central-2.api.aws", - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{ - Hostname: "kendra-ranking.eu-north-1.api.aws", - }, - endpointKey{ - Region: "eu-south-1", - }: endpoint{ - Hostname: "kendra-ranking.eu-south-1.api.aws", - }, - endpointKey{ - Region: "eu-south-2", - }: endpoint{ - Hostname: "kendra-ranking.eu-south-2.api.aws", - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "kendra-ranking.eu-west-1.api.aws", - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{ - Hostname: "kendra-ranking.eu-west-3.api.aws", - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{ - Hostname: "kendra-ranking.il-central-1.api.aws", - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{ - Hostname: "kendra-ranking.me-central-1.api.aws", - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{ - Hostname: "kendra-ranking.me-south-1.api.aws", - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{ - Hostname: "kendra-ranking.sa-east-1.api.aws", - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "kendra-ranking.us-east-1.api.aws", - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kendra-ranking-fips.us-east-1.api.aws", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - Hostname: "kendra-ranking.us-east-2.api.aws", - }, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kendra-ranking-fips.us-east-2.api.aws", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{ - Hostname: "kendra-ranking.us-west-1.api.aws", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "kendra-ranking.us-west-2.api.aws", - }, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kendra-ranking-fips.us-west-2.api.aws", - }, - }, - }, - "kinesis": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "kinesis-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "kinesis-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "kinesis-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "kinesis-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kinesis-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kinesis-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kinesis-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kinesis-fips.us-west-2.amazonaws.com", - }, - }, - }, - "kinesisanalytics": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "kinesisvideo": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "kms": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ProdFips", - }: endpoint{ - Hostname: "kms-fips.eu-central-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "af-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.af-south-1.amazonaws.com", - }, - endpointKey{ - Region: "af-south-1-fips", - }: endpoint{ - Hostname: "kms-fips.af-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "af-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.ap-east-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-east-1-fips", - }: endpoint{ - Hostname: "kms-fips.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.ap-northeast-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-northeast-1-fips", - }: endpoint{ - Hostname: "kms-fips.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.ap-northeast-2.amazonaws.com", - }, - endpointKey{ - Region: "ap-northeast-2-fips", - }: endpoint{ - Hostname: "kms-fips.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.ap-northeast-3.amazonaws.com", - }, - endpointKey{ - Region: "ap-northeast-3-fips", - }: endpoint{ - Hostname: "kms-fips.ap-northeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-3", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.ap-south-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-south-1-fips", - }: endpoint{ - Hostname: "kms-fips.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.ap-south-2.amazonaws.com", - }, - endpointKey{ - Region: "ap-south-2-fips", - }: endpoint{ - Hostname: "kms-fips.ap-south-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.ap-southeast-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-southeast-1-fips", - }: endpoint{ - Hostname: "kms-fips.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.ap-southeast-2.amazonaws.com", - }, - endpointKey{ - Region: "ap-southeast-2-fips", - }: endpoint{ - Hostname: "kms-fips.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.ap-southeast-3.amazonaws.com", - }, - endpointKey{ - Region: "ap-southeast-3-fips", - }: endpoint{ - Hostname: "kms-fips.ap-southeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-3", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.ap-southeast-4.amazonaws.com", - }, - endpointKey{ - Region: "ap-southeast-4-fips", - }: endpoint{ - Hostname: "kms-fips.ap-southeast-4.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-4", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-central-1-fips", - }: endpoint{ - Hostname: "kms-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.ca-west-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-west-1-fips", - }: endpoint{ - Hostname: "kms-fips.ca-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.eu-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1-fips", - }: endpoint{ - Hostname: "kms-fips.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.eu-central-2.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-2-fips", - }: endpoint{ - Hostname: "kms-fips.eu-central-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.eu-north-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-north-1-fips", - }: endpoint{ - Hostname: "kms-fips.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.eu-south-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-south-1-fips", - }: endpoint{ - Hostname: "kms-fips.eu-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.eu-south-2.amazonaws.com", - }, - endpointKey{ - Region: "eu-south-2-fips", - }: endpoint{ - Hostname: "kms-fips.eu-south-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.eu-west-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-west-1-fips", - }: endpoint{ - Hostname: "kms-fips.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.eu-west-2.amazonaws.com", - }, - endpointKey{ - Region: "eu-west-2-fips", - }: endpoint{ - Hostname: "kms-fips.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.eu-west-3.amazonaws.com", - }, - endpointKey{ - Region: "eu-west-3-fips", - }: endpoint{ - Hostname: "kms-fips.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.il-central-1.amazonaws.com", - }, - endpointKey{ - Region: "il-central-1-fips", - }: endpoint{ - Hostname: "kms-fips.il-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "il-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.me-central-1.amazonaws.com", - }, - endpointKey{ - Region: "me-central-1-fips", - }: endpoint{ - Hostname: "kms-fips.me-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.me-south-1.amazonaws.com", - }, - endpointKey{ - Region: "me-south-1-fips", - }: endpoint{ - Hostname: "kms-fips.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.sa-east-1.amazonaws.com", - }, - endpointKey{ - Region: "sa-east-1-fips", - }: endpoint{ - Hostname: "kms-fips.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "kms-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "kms-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "kms-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "kms-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "lakeformation": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "lakeformation-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "lakeformation-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "lakeformation-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "lakeformation-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "lakeformation-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "lakeformation-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "lakeformation-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "lakeformation-fips.us-west-2.amazonaws.com", - }, - }, - }, - "lambda": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "af-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.af-south-1.api.aws", - }, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.ap-east-1.api.aws", - }, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.ap-northeast-1.api.aws", - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.ap-northeast-2.api.aws", - }, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.ap-northeast-3.api.aws", - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.ap-south-1.api.aws", - }, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.ap-south-2.api.aws", - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.ap-southeast-1.api.aws", - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.ap-southeast-2.api.aws", - }, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.ap-southeast-3.api.aws", - }, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.ap-southeast-4.api.aws", - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.ca-central-1.api.aws", - }, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.ca-west-1.api.aws", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.eu-central-1.api.aws", - }, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.eu-central-2.api.aws", - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.eu-north-1.api.aws", - }, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.eu-south-1.api.aws", - }, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.eu-south-2.api.aws", - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.eu-west-1.api.aws", - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.eu-west-2.api.aws", - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.eu-west-3.api.aws", - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "lambda-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "lambda-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "lambda-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "lambda-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.il-central-1.api.aws", - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.me-central-1.api.aws", - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.me-south-1.api.aws", - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.sa-east-1.api.aws", - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.us-east-1.api.aws", - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "lambda-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.us-east-2.api.aws", - }, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "lambda-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.us-west-1.api.aws", - }, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "lambda-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.us-west-2.api.aws", - }, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "lambda-fips.us-west-2.amazonaws.com", - }, - }, - }, - "license-manager": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "license-manager-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "license-manager-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "license-manager-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "license-manager-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "license-manager-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "license-manager-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "license-manager-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "license-manager-fips.us-west-2.amazonaws.com", - }, - }, - }, - "license-manager-linux-subscriptions": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "license-manager-linux-subscriptions-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "license-manager-linux-subscriptions-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "license-manager-linux-subscriptions-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "license-manager-linux-subscriptions-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "license-manager-linux-subscriptions-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "license-manager-linux-subscriptions-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "license-manager-linux-subscriptions-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "license-manager-linux-subscriptions-fips.us-west-2.amazonaws.com", - }, - }, - }, - "license-manager-user-subscriptions": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "license-manager-user-subscriptions-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "license-manager-user-subscriptions-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "license-manager-user-subscriptions-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "license-manager-user-subscriptions-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "license-manager-user-subscriptions-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "license-manager-user-subscriptions-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "license-manager-user-subscriptions-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "license-manager-user-subscriptions-fips.us-west-2.amazonaws.com", - }, - }, - }, - "lightsail": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "logs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "af-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "logs.af-south-1.api.aws", - }, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "logs.ap-east-1.api.aws", - }, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "logs.ap-northeast-1.api.aws", - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "logs.ap-northeast-2.api.aws", - }, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "logs.ap-northeast-3.api.aws", - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "logs.ap-south-1.api.aws", - }, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "logs.ap-south-2.api.aws", - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "logs.ap-southeast-1.api.aws", - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "logs.ap-southeast-2.api.aws", - }, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "logs.ap-southeast-3.api.aws", - }, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "logs.ap-southeast-4.api.aws", - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "logs.ca-central-1.api.aws", - }, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "logs-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "logs.ca-west-1.api.aws", - }, - endpointKey{ - Region: "ca-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "logs-fips.ca-west-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "logs.eu-central-1.api.aws", - }, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "logs.eu-central-2.api.aws", - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "logs.eu-north-1.api.aws", - }, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "logs.eu-south-1.api.aws", - }, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "logs.eu-south-2.api.aws", - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "logs.eu-west-1.api.aws", - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "logs.eu-west-2.api.aws", - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "logs.eu-west-3.api.aws", - }, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "logs-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ca-west-1", - }: endpoint{ - Hostname: "logs-fips.ca-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "logs-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "logs-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "logs-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "logs-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "logs.il-central-1.api.aws", - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "logs.me-central-1.api.aws", - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "logs.me-south-1.api.aws", - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "logs.sa-east-1.api.aws", - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "logs.us-east-1.api.aws", - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "logs-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "logs.us-east-2.api.aws", - }, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "logs-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "logs.us-west-1.api.aws", - }, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "logs-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "logs.us-west-2.api.aws", - }, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "logs-fips.us-west-2.amazonaws.com", - }, - }, - }, - "lookoutequipment": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, - "lookoutmetrics": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "lookoutvision": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "m2": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{}, - }, - }, - "machinelearning": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, - "macie2": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "macie2-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "macie2-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "macie2-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "macie2-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "macie2-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "macie2-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "macie2-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "macie2-fips.us-west-2.amazonaws.com", - }, - }, - }, - "managedblockchain": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, - "managedblockchain-query": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, - "marketplacecommerceanalytics": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, - "media-pipelines-chime": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "media-pipelines-chime-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "media-pipelines-chime-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "media-pipelines-chime-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "media-pipelines-chime-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "mediaconnect": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "mediaconvert": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "mediaconvert-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "mediaconvert-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "mediaconvert-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "mediaconvert-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "mediaconvert-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "mediaconvert-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "mediaconvert-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "mediaconvert-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "mediaconvert-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "mediaconvert-fips.us-west-2.amazonaws.com", - }, - }, - }, - "medialive": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "medialive-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "medialive-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "medialive-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "medialive-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "medialive-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "medialive-fips.us-west-2.amazonaws.com", - }, - }, - }, - "mediapackage": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "mediapackage-vod": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "mediapackagev2": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "mediastore": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "meetings-chime": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "meetings-chime-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "meetings-chime-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "meetings-chime-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "meetings-chime-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "memory-db": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips", - }: endpoint{ - Hostname: "memory-db-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "messaging-chime": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "messaging-chime-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "messaging-chime-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "metering.marketplace": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "aws-marketplace", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "metrics.sagemaker": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "mgh": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "mgn": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "mgn-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "mgn-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "mgn-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "mgn-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "mgn-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "mgn-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "mgn-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "mgn-fips.us-west-2.amazonaws.com", - }, - }, - }, - "migrationhub-orchestrator": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "migrationhub-strategy": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "mobileanalytics": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, - "models-v2-lex": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "models.lex": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "lex", - }, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "models-fips.lex.{region}.{dnsSuffix}", - CredentialScope: credentialScope{ - Service: "lex", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "models-fips.lex.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "models-fips.lex.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "models-fips.lex.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "models-fips.lex.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "monitoring": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "monitoring-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "monitoring-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "monitoring-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "monitoring-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "monitoring-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "monitoring-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "monitoring-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "monitoring-fips.us-west-2.amazonaws.com", - }, - }, - }, - "mq": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "mq-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "mq-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "mq-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "mq-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "mq-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "mq-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "mq-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "mq-fips.us-west-2.amazonaws.com", - }, - }, - }, - "mturk-requester": service{ - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "sandbox", - }: endpoint{ - Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, - "neptune": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{ - Hostname: "rds.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - }, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "rds.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{ - Hostname: "rds.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{ - Hostname: "rds.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{ - Hostname: "rds.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "rds.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{ - Hostname: "rds.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{ - Hostname: "rds.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{ - Hostname: "rds.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "rds.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{ - Hostname: "rds.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{ - Hostname: "rds.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{ - Hostname: "rds.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{ - Hostname: "rds.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "rds.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - Hostname: "rds.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{ - Hostname: "rds.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "rds.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "network-firewall": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "network-firewall-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "network-firewall-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "network-firewall-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "network-firewall-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "network-firewall-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "network-firewall-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "network-firewall-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "network-firewall-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "network-firewall-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "network-firewall-fips.us-west-2.amazonaws.com", - }, - }, - }, - "networkmanager": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "networkmanager.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - endpointKey{ - Region: "aws-global", - Variant: fipsVariant, - }: endpoint{ - Hostname: "networkmanager-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - endpointKey{ - Region: "fips-aws-global", - }: endpoint{ - Hostname: "networkmanager-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "nimble": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "oam": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "oidc": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{ - Hostname: "oidc.af-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "af-south-1", - }, - }, - endpointKey{ - Region: "ap-east-1", - }: endpoint{ - Hostname: "oidc.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - }, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "oidc.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{ - Hostname: "oidc.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{ - Hostname: "oidc.ap-northeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-3", - }, - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{ - Hostname: "oidc.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{ - Hostname: "oidc.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "oidc.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{ - Hostname: "oidc.ap-southeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-3", - }, - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{ - Hostname: "oidc.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{ - Hostname: "oidc.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - endpointKey{ - Region: "eu-central-2", - }: endpoint{ - Hostname: "oidc.eu-central-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-2", - }, - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{ - Hostname: "oidc.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - endpointKey{ - Region: "eu-south-1", - }: endpoint{ - Hostname: "oidc.eu-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-1", - }, - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "oidc.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{ - Hostname: "oidc.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{ - Hostname: "oidc.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{ - Hostname: "oidc.il-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "il-central-1", - }, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{ - Hostname: "oidc.me-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-central-1", - }, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{ - Hostname: "oidc.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{ - Hostname: "oidc.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "oidc.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - Hostname: "oidc.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{ - Hostname: "oidc.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "oidc.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "omics": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{ - Hostname: "omics.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{ - Hostname: "omics.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "omics.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{ - Hostname: "omics.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "omics-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "omics-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{ - Hostname: "omics.il-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "il-central-1", - }, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "omics.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "omics-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "omics.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "omics-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "opsworks": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "opsworks-cm": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "organizations": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "organizations.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "aws-global", - Variant: fipsVariant, - }: endpoint{ - Hostname: "organizations-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "fips-aws-global", - }: endpoint{ - Hostname: "organizations-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "osis": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "outposts": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "outposts-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "outposts-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "outposts-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "outposts-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "outposts-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "outposts-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "outposts-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "outposts-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "outposts-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "outposts-fips.us-west-2.amazonaws.com", - }, - }, - }, - "participant.connect": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "participant.connect-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "participant.connect-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "participant.connect-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "participant.connect-fips.us-west-2.amazonaws.com", - }, - }, - }, - "personalize": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "pi": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "pinpoint": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "mobiletargeting", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{ - Hostname: "pinpoint.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "pinpoint-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "pinpoint-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "pinpoint-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "pinpoint-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "pinpoint-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "pinpoint.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "pinpoint-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - Hostname: "pinpoint.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "pinpoint-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "pinpoint.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "pinpoint-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "pipes": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "polly": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "polly-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "polly-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "polly-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "polly-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "polly-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "polly-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "polly-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "polly-fips.us-west-2.amazonaws.com", - }, - }, - }, - "portal.sso": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{ - Hostname: "portal.sso.af-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "af-south-1", - }, - }, - endpointKey{ - Region: "ap-east-1", - }: endpoint{ - Hostname: "portal.sso.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - }, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "portal.sso.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{ - Hostname: "portal.sso.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{ - Hostname: "portal.sso.ap-northeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-3", - }, - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{ - Hostname: "portal.sso.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{ - Hostname: "portal.sso.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "portal.sso.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{ - Hostname: "portal.sso.ap-southeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-3", - }, - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{ - Hostname: "portal.sso.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{ - Hostname: "portal.sso.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - endpointKey{ - Region: "eu-central-2", - }: endpoint{ - Hostname: "portal.sso.eu-central-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-2", - }, - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{ - Hostname: "portal.sso.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - endpointKey{ - Region: "eu-south-1", - }: endpoint{ - Hostname: "portal.sso.eu-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-1", - }, - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "portal.sso.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{ - Hostname: "portal.sso.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{ - Hostname: "portal.sso.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{ - Hostname: "portal.sso.il-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "il-central-1", - }, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{ - Hostname: "portal.sso.me-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-central-1", - }, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{ - Hostname: "portal.sso.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{ - Hostname: "portal.sso.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "portal.sso.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - Hostname: "portal.sso.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{ - Hostname: "portal.sso.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "portal.sso.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "private-networks": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "profile": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "profile-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "profile-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "profile-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "profile-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "profile-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "profile-fips.us-west-2.amazonaws.com", - }, - }, - }, - "projects.iot1click": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "proton": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "qbusiness": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - DNSSuffix: "api.aws", - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "api.aws", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{ - Hostname: "qbusiness.af-south-1.api.aws", - }, - endpointKey{ - Region: "ap-east-1", - }: endpoint{ - Hostname: "qbusiness.ap-east-1.api.aws", - }, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "qbusiness.ap-northeast-1.api.aws", - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{ - Hostname: "qbusiness.ap-northeast-2.api.aws", - }, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{ - Hostname: "qbusiness.ap-northeast-3.api.aws", - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{ - Hostname: "qbusiness.ap-south-1.api.aws", - }, - endpointKey{ - Region: "ap-south-2", - }: endpoint{ - Hostname: "qbusiness.ap-south-2.api.aws", - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{ - Hostname: "qbusiness.ap-southeast-1.api.aws", - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "qbusiness.ap-southeast-2.api.aws", - }, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{ - Hostname: "qbusiness.ap-southeast-3.api.aws", - }, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{ - Hostname: "qbusiness.ap-southeast-4.api.aws", - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{ - Hostname: "qbusiness.ca-central-1.api.aws", - }, - endpointKey{ - Region: "ca-west-1", - }: endpoint{ - Hostname: "qbusiness.ca-west-1.api.aws", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{ - Hostname: "qbusiness.eu-central-1.api.aws", - }, - endpointKey{ - Region: "eu-central-2", - }: endpoint{ - Hostname: "qbusiness.eu-central-2.api.aws", - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{ - Hostname: "qbusiness.eu-north-1.api.aws", - }, - endpointKey{ - Region: "eu-south-1", - }: endpoint{ - Hostname: "qbusiness.eu-south-1.api.aws", - }, - endpointKey{ - Region: "eu-south-2", - }: endpoint{ - Hostname: "qbusiness.eu-south-2.api.aws", - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "qbusiness.eu-west-1.api.aws", - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{ - Hostname: "qbusiness.eu-west-2.api.aws", - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{ - Hostname: "qbusiness.eu-west-3.api.aws", - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{ - Hostname: "qbusiness.il-central-1.api.aws", - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{ - Hostname: "qbusiness.me-central-1.api.aws", - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{ - Hostname: "qbusiness.me-south-1.api.aws", - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{ - Hostname: "qbusiness.sa-east-1.api.aws", - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "qbusiness.us-east-1.api.aws", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - Hostname: "qbusiness.us-east-2.api.aws", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{ - Hostname: "qbusiness.us-west-1.api.aws", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "qbusiness.us-west-2.api.aws", - }, - }, - }, - "qldb": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "qldb-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "qldb-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "qldb-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "qldb-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "qldb-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "qldb-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "qldb-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "qldb-fips.us-west-2.amazonaws.com", - }, - }, - }, - "quicksight": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "ram": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ram-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ram-fips.ca-west-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "ram-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ca-west-1", - }: endpoint{ - Hostname: "ram-fips.ca-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "ram-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "ram-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "ram-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "ram-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ram-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ram-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ram-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ram-fips.us-west-2.amazonaws.com", - }, - }, - }, - "rbin": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rbin-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rbin-fips.ca-west-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "rbin-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ca-west-1", - }: endpoint{ - Hostname: "rbin-fips.ca-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "rbin-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "rbin-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "rbin-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "rbin-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rbin-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rbin-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rbin-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rbin-fips.us-west-2.amazonaws.com", - }, - }, - }, - "rds": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-central-1-fips", - }: endpoint{ - Hostname: "rds-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.ca-west-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-west-1-fips", - }: endpoint{ - Hostname: "rds-fips.ca-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "rds-fips.ca-central-1", - }: endpoint{ - Hostname: "rds-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds-fips.ca-west-1", - }: endpoint{ - Hostname: "rds-fips.ca-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds-fips.us-east-1", - }: endpoint{ - Hostname: "rds-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds-fips.us-east-2", - }: endpoint{ - Hostname: "rds-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds-fips.us-west-1", - }: endpoint{ - Hostname: "rds-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds-fips.us-west-2", - }: endpoint{ - Hostname: "rds-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.ca-central-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.ca-west-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "ca-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.ca-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.ca-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-east-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-east-2", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-west-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-west-2", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - SSLCommonName: "{service}.{dnsSuffix}", - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.us-east-1.amazonaws.com", - SSLCommonName: "{service}.{dnsSuffix}", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "rds-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "rds-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "rds-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "rds-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "rds-data": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "rds-data-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "rds-data-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "rds-data-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "rds-data-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-data-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-data-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-data-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-data-fips.us-west-2.amazonaws.com", - }, - }, - }, - "redshift": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "redshift-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "redshift-fips.ca-west-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "redshift-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ca-west-1", - }: endpoint{ - Hostname: "redshift-fips.ca-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "redshift-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "redshift-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "redshift-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "redshift-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "redshift-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "redshift-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "redshift-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "redshift-fips.us-west-2.amazonaws.com", - }, - }, - }, - "redshift-serverless": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "redshift-serverless-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "redshift-serverless-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "redshift-serverless-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "redshift-serverless-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "redshift-serverless-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "redshift-serverless-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "redshift-serverless-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "redshift-serverless-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "redshift-serverless-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "redshift-serverless-fips.us-west-2.amazonaws.com", - }, - }, - }, - "rekognition": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rekognition-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-central-1-fips", - }: endpoint{ - Hostname: "rekognition-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "rekognition-fips.ca-central-1", - }: endpoint{ - Hostname: "rekognition-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition-fips.us-east-1", - }: endpoint{ - Hostname: "rekognition-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition-fips.us-east-2", - }: endpoint{ - Hostname: "rekognition-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition-fips.us-west-1", - }: endpoint{ - Hostname: "rekognition-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition-fips.us-west-2", - }: endpoint{ - Hostname: "rekognition-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition.ca-central-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition.ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rekognition-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition.us-east-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition.us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rekognition-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition.us-east-2", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition.us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rekognition-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition.us-west-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition.us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rekognition-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition.us-west-2", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition.us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rekognition-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rekognition-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "rekognition-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rekognition-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "rekognition-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rekognition-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "rekognition-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rekognition-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "rekognition-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "resiliencehub": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "resource-explorer-2": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "resource-groups": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "resource-groups-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "resource-groups-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "resource-groups-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "resource-groups-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "resource-groups-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "resource-groups-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "resource-groups-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "resource-groups-fips.us-west-2.amazonaws.com", - }, - }, - }, - "robomaker": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "rolesanywhere": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "rolesanywhere-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "rolesanywhere-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "rolesanywhere-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "rolesanywhere-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rolesanywhere-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rolesanywhere-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rolesanywhere-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rolesanywhere-fips.us-west-2.amazonaws.com", - }, - }, - }, - "route53": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "route53.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "aws-global", - Variant: fipsVariant, - }: endpoint{ - Hostname: "route53-fips.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "fips-aws-global", - }: endpoint{ - Hostname: "route53-fips.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "route53-recovery-control-config": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "route53-recovery-control-config.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "route53domains": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, - "route53resolver": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "rum": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "runtime-v2-lex": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "runtime.lex": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "lex", - }, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "runtime-fips.lex.{region}.{dnsSuffix}", - CredentialScope: credentialScope{ - Service: "lex", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "runtime-fips.lex.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "runtime-fips.lex.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "runtime-fips.lex.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "runtime-fips.lex.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "runtime.sagemaker": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "runtime-fips.sagemaker.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "runtime-fips.sagemaker.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "runtime-fips.sagemaker.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "runtime-fips.sagemaker.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "runtime-fips.sagemaker.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "runtime-fips.sagemaker.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "runtime-fips.sagemaker.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "runtime-fips.sagemaker.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "runtime-fips.sagemaker.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "s3": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedTrue, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - defaultKey{ - Variant: dualStackVariant, - }: endpoint{ - Hostname: "{service}.dualstack.{region}.{dnsSuffix}", - DNSSuffix: "amazonaws.com", - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - defaultKey{ - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "{service}-fips.dualstack.{region}.{dnsSuffix}", - DNSSuffix: "amazonaws.com", - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "af-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.af-south-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.ap-east-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "s3.ap-northeast-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "ap-northeast-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.ap-northeast-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.ap-northeast-2.amazonaws.com", - }, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.ap-northeast-3.amazonaws.com", - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.ap-south-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.ap-south-2.amazonaws.com", - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{ - Hostname: "s3.ap-southeast-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "ap-southeast-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.ap-southeast-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "s3.ap-southeast-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "ap-southeast-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.ap-southeast-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.ap-southeast-3.amazonaws.com", - }, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.ap-southeast-4.amazonaws.com", - }, - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "s3.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "s3-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "s3-fips.dualstack.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.ca-west-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "s3-fips.ca-west-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-west-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "s3-fips.dualstack.ca-west-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.eu-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.eu-central-2.amazonaws.com", - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.eu-north-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.eu-south-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.eu-south-2.amazonaws.com", - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "s3.eu-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "eu-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.eu-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.eu-west-2.amazonaws.com", - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.eu-west-3.amazonaws.com", - }, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "s3-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ca-west-1", - }: endpoint{ - Hostname: "s3-fips.ca-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "s3-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "s3-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "s3-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "s3-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.il-central-1.amazonaws.com", - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.me-central-1.amazonaws.com", - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.me-south-1.amazonaws.com", - }, - endpointKey{ - Region: "s3-external-1", - }: endpoint{ - Hostname: "s3-external-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{ - Hostname: "s3.sa-east-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "sa-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.sa-east-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "s3.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "us-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "s3-fips.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "s3-fips.dualstack.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "s3-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "s3-fips.dualstack.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{ - Hostname: "s3.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "us-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "s3-fips.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "s3-fips.dualstack.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "s3.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "us-west-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "s3-fips.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "s3-fips.dualstack.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - }, - }, - "s3-control": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, - }, - defaultKey{ - Variant: dualStackVariant, - }: endpoint{ - Hostname: "{service}.dualstack.{region}.{dnsSuffix}", - DNSSuffix: "amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, - }, - defaultKey{ - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "{service}-fips.dualstack.{region}.{dnsSuffix}", - DNSSuffix: "amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "s3-control.ap-northeast-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "ap-northeast-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.ap-northeast-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{ - Hostname: "s3-control.ap-northeast-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - endpointKey{ - Region: "ap-northeast-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.ap-northeast-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{ - Hostname: "s3-control.ap-northeast-3.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-northeast-3", - }, - }, - endpointKey{ - Region: "ap-northeast-3", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.ap-northeast-3.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-northeast-3", - }, - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{ - Hostname: "s3-control.ap-south-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - endpointKey{ - Region: "ap-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.ap-south-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{ - Hostname: "s3-control.ap-southeast-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - endpointKey{ - Region: "ap-southeast-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.ap-southeast-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "s3-control.ap-southeast-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "ap-southeast-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.ap-southeast-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{ - Hostname: "s3-control.ca-central-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - endpointKey{ - Region: "ca-central-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.ca-central-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "s3-control-fips.ca-central-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "s3-control-fips.dualstack.ca-central-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - endpointKey{ - Region: "ca-central-1-fips", - }: endpoint{ - Hostname: "s3-control-fips.ca-central-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{ - Hostname: "s3-control.eu-central-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - endpointKey{ - Region: "eu-central-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.eu-central-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{ - Hostname: "s3-control.eu-north-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - endpointKey{ - Region: "eu-north-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.eu-north-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "s3-control.eu-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "eu-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.eu-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{ - Hostname: "s3-control.eu-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - endpointKey{ - Region: "eu-west-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.eu-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{ - Hostname: "s3-control.eu-west-3.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - endpointKey{ - Region: "eu-west-3", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.eu-west-3.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{ - Hostname: "s3-control.sa-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - endpointKey{ - Region: "sa-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.sa-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "s3-control.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "s3-control-fips.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "s3-control-fips.dualstack.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "s3-control-fips.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - Hostname: "s3-control.us-east-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-east-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.us-east-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "s3-control-fips.us-east-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "s3-control-fips.dualstack.us-east-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "s3-control-fips.us-east-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{ - Hostname: "s3-control.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - endpointKey{ - Region: "us-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "s3-control-fips.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "s3-control-fips.dualstack.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "s3-control-fips.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "s3-control.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - endpointKey{ - Region: "us-west-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "s3-control-fips.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "s3-control-fips.dualstack.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "s3-control-fips.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "s3-outposts": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "af-south-1", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant | dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant | dualStackVariant, - }: endpoint{}, - }, - }, - "sagemaker-geospatial": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "savingsplans": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "savingsplans.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "scheduler": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "schemas": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "sdb": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"v2"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "sdb.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "secretsmanager": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "af-south-1", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "ca-central-1-fips", - }: endpoint{ - - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - Variant: fipsVariant, - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "ca-west-1-fips", - }: endpoint{ - - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant | dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant | dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - - Deprecated: boxedTrue, - }, - }, - }, - "securityhub": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "securityhub-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "securityhub-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "securityhub-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "securityhub-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "securityhub-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "securityhub-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "securityhub-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "securityhub-fips.us-west-2.amazonaws.com", - }, - }, - }, - "securitylake": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "securitylake-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "securitylake-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "securitylake-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "securitylake-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "securitylake-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "securitylake-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "securitylake-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "securitylake-fips.us-west-2.amazonaws.com", - }, - }, - }, - "serverlessrepo": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Protocols: []string{"https"}, - }, - }, - }, - "servicecatalog": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicecatalog-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "servicecatalog-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicecatalog-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "servicecatalog-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicecatalog-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "servicecatalog-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicecatalog-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "servicecatalog-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "servicecatalog-appregistry": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicecatalog-appregistry-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "servicecatalog-appregistry-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "servicecatalog-appregistry-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "servicecatalog-appregistry-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "servicecatalog-appregistry-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "servicecatalog-appregistry-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicecatalog-appregistry-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicecatalog-appregistry-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicecatalog-appregistry-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicecatalog-appregistry-fips.us-west-2.amazonaws.com", - }, - }, - }, - "servicediscovery": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "af-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "servicediscovery.af-south-1.api.aws", - }, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "servicediscovery.ap-east-1.api.aws", - }, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "servicediscovery.ap-northeast-1.api.aws", - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "servicediscovery.ap-northeast-2.api.aws", - }, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "servicediscovery.ap-northeast-3.api.aws", - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "servicediscovery.ap-south-1.api.aws", - }, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "servicediscovery.ap-south-2.api.aws", - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "servicediscovery.ap-southeast-1.api.aws", - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "servicediscovery.ap-southeast-2.api.aws", - }, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "servicediscovery.ap-southeast-3.api.aws", - }, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "servicediscovery.ap-southeast-4.api.aws", - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "servicediscovery.ca-central-1.api.aws", - }, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "servicediscovery-fips.ca-central-1.api.aws", - }, - endpointKey{ - Region: "ca-central-1-fips", - }: endpoint{ - Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "servicediscovery.eu-central-1.api.aws", - }, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "servicediscovery.eu-central-2.api.aws", - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "servicediscovery.eu-north-1.api.aws", - }, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "servicediscovery.eu-south-1.api.aws", - }, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "servicediscovery.eu-south-2.api.aws", - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "servicediscovery.eu-west-1.api.aws", - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "servicediscovery.eu-west-2.api.aws", - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "servicediscovery.eu-west-3.api.aws", - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "servicediscovery.il-central-1.api.aws", - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "servicediscovery.me-central-1.api.aws", - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "servicediscovery.me-south-1.api.aws", - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "servicediscovery.sa-east-1.api.aws", - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "servicediscovery.us-east-1.api.aws", - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicediscovery-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "servicediscovery-fips.us-east-1.api.aws", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "servicediscovery-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "servicediscovery.us-east-2.api.aws", - }, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicediscovery-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "servicediscovery-fips.us-east-2.api.aws", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "servicediscovery-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "servicediscovery.us-west-1.api.aws", - }, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicediscovery-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "servicediscovery-fips.us-west-1.api.aws", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "servicediscovery-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "servicediscovery.us-west-2.api.aws", - }, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicediscovery-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "servicediscovery-fips.us-west-2.api.aws", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "servicediscovery-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "servicequotas": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "session.qldb": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "session.qldb-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "session.qldb-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "session.qldb-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "session.qldb-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "session.qldb-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "session.qldb-fips.us-west-2.amazonaws.com", - }, - }, - }, - "shield": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - SSLCommonName: "shield.us-east-1.amazonaws.com", - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "shield.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "aws-global", - Variant: fipsVariant, - }: endpoint{ - Hostname: "shield-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "fips-aws-global", - }: endpoint{ - Hostname: "shield-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "signer": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "signer-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "signer-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "signer-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "signer-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-verification-us-east-1", - }: endpoint{ - Hostname: "verification.signer-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "fips-verification-us-east-2", - }: endpoint{ - Hostname: "verification.signer-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "fips-verification-us-west-1", - }: endpoint{ - Hostname: "verification.signer-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - endpointKey{ - Region: "fips-verification-us-west-2", - }: endpoint{ - Hostname: "verification.signer-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "signer-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "signer-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "signer-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "signer-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "verification-af-south-1", - }: endpoint{ - Hostname: "verification.signer.af-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "af-south-1", - }, - }, - endpointKey{ - Region: "verification-ap-east-1", - }: endpoint{ - Hostname: "verification.signer.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - }, - endpointKey{ - Region: "verification-ap-northeast-1", - }: endpoint{ - Hostname: "verification.signer.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "verification-ap-northeast-2", - }: endpoint{ - Hostname: "verification.signer.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - endpointKey{ - Region: "verification-ap-south-1", - }: endpoint{ - Hostname: "verification.signer.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - endpointKey{ - Region: "verification-ap-southeast-1", - }: endpoint{ - Hostname: "verification.signer.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - endpointKey{ - Region: "verification-ap-southeast-2", - }: endpoint{ - Hostname: "verification.signer.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "verification-ca-central-1", - }: endpoint{ - Hostname: "verification.signer.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - endpointKey{ - Region: "verification-eu-central-1", - }: endpoint{ - Hostname: "verification.signer.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - endpointKey{ - Region: "verification-eu-north-1", - }: endpoint{ - Hostname: "verification.signer.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - endpointKey{ - Region: "verification-eu-south-1", - }: endpoint{ - Hostname: "verification.signer.eu-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-1", - }, - }, - endpointKey{ - Region: "verification-eu-west-1", - }: endpoint{ - Hostname: "verification.signer.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "verification-eu-west-2", - }: endpoint{ - Hostname: "verification.signer.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - endpointKey{ - Region: "verification-eu-west-3", - }: endpoint{ - Hostname: "verification.signer.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - endpointKey{ - Region: "verification-me-south-1", - }: endpoint{ - Hostname: "verification.signer.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - }, - endpointKey{ - Region: "verification-sa-east-1", - }: endpoint{ - Hostname: "verification.signer.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - endpointKey{ - Region: "verification-us-east-1", - }: endpoint{ - Hostname: "verification.signer.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "verification-us-east-2", - }: endpoint{ - Hostname: "verification.signer.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "verification-us-west-1", - }: endpoint{ - Hostname: "verification.signer.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - endpointKey{ - Region: "verification-us-west-2", - }: endpoint{ - Hostname: "verification.signer.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "simspaceweaver": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "sms": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "sms-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sms-fips.us-west-2.amazonaws.com", - }, - }, - }, - "sms-voice": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sms-voice-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "sms-voice-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "sms-voice-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "sms-voice-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "sms-voice-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "sms-voice-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sms-voice-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sms-voice-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sms-voice-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sms-voice-fips.us-west-2.amazonaws.com", - }, - }, - }, - "snowball": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.ap-northeast-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.ap-northeast-2.amazonaws.com", - }, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.ap-northeast-3.amazonaws.com", - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.ap-south-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.ap-southeast-1.amazonaws.com", - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.ap-southeast-2.amazonaws.com", - }, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.eu-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.eu-west-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.eu-west-2.amazonaws.com", - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.eu-west-3.amazonaws.com", - }, - endpointKey{ - Region: "fips-ap-northeast-1", - }: endpoint{ - Hostname: "snowball-fips.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-northeast-2", - }: endpoint{ - Hostname: "snowball-fips.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-northeast-3", - }: endpoint{ - Hostname: "snowball-fips.ap-northeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-3", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-south-1", - }: endpoint{ - Hostname: "snowball-fips.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-southeast-1", - }: endpoint{ - Hostname: "snowball-fips.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-southeast-2", - }: endpoint{ - Hostname: "snowball-fips.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "snowball-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-central-1", - }: endpoint{ - Hostname: "snowball-fips.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-west-1", - }: endpoint{ - Hostname: "snowball-fips.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-west-2", - }: endpoint{ - Hostname: "snowball-fips.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-west-3", - }: endpoint{ - Hostname: "snowball-fips.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-sa-east-1", - }: endpoint{ - Hostname: "snowball-fips.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "snowball-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "snowball-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "snowball-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "snowball-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.sa-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.us-west-2.amazonaws.com", - }, - }, - }, - "sns": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sns-fips.ca-west-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-west-1", - }: endpoint{ - Hostname: "sns-fips.ca-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "sns-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "sns-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "sns-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "sns-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sns-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sns-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sns-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sns-fips.us-west-2.amazonaws.com", - }, - }, - }, - "sqs": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "sqs-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "sqs-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "sqs-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "sqs-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - SSLCommonName: "queue.{dnsSuffix}", - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sqs-fips.us-east-1.amazonaws.com", - SSLCommonName: "queue.{dnsSuffix}", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sqs-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sqs-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sqs-fips.us-west-2.amazonaws.com", - }, - }, - }, - "ssm": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ssm-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ssm-fips.ca-west-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "ssm-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ca-west-1", - }: endpoint{ - Hostname: "ssm-fips.ca-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "ssm-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "ssm-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "ssm-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "ssm-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ssm-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ssm-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ssm-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ssm-fips.us-west-2.amazonaws.com", - }, - }, - }, - "ssm-contacts": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "ssm-contacts-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "ssm-contacts-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "ssm-contacts-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "ssm-contacts-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ssm-contacts-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ssm-contacts-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ssm-contacts-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ssm-contacts-fips.us-west-2.amazonaws.com", - }, - }, - }, - "ssm-incidents": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ssm-incidents-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "ssm-incidents-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "ssm-incidents-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "ssm-incidents-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "ssm-incidents-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "ssm-incidents-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ssm-incidents-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ssm-incidents-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ssm-incidents-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ssm-incidents-fips.us-west-2.amazonaws.com", - }, - }, - }, - "ssm-sap": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ssm-sap-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "ssm-sap-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "ssm-sap-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "ssm-sap-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "ssm-sap-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "ssm-sap-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ssm-sap-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ssm-sap-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ssm-sap-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ssm-sap-fips.us-west-2.amazonaws.com", - }, - }, - }, - "sso": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "states": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "states-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "states-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "states-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "states-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "states-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "states-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "states-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "states-fips.us-west-2.amazonaws.com", - }, - }, - }, - "storagegateway": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "storagegateway-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-central-1-fips", - }: endpoint{ - Hostname: "storagegateway-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "storagegateway-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "storagegateway-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "storagegateway-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "storagegateway-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "storagegateway-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "storagegateway-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "storagegateway-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "storagegateway-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "streams.dynamodb": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "dynamodb", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "local", - }: endpoint{ - Hostname: "localhost:8000", - Protocols: []string{"http"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "sts": service{ - PartitionEndpoint: "aws-global", - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "sts.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sts-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "sts-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sts-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "sts-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sts-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1-fips", - }: endpoint{ - Hostname: "sts-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sts-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "sts-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "support": service{ - PartitionEndpoint: "aws-global", - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "support.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "supportapp": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "swf": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "swf-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "swf-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "swf-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "swf-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "swf-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "swf-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "swf-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "swf-fips.us-west-2.amazonaws.com", - }, - }, - }, - "synthetics": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "synthetics-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "synthetics-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "synthetics-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "synthetics-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "synthetics-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "synthetics-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "synthetics-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "synthetics-fips.us-west-2.amazonaws.com", - }, - }, - }, - "tagging": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "textract": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "textract-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "textract-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "textract-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "textract-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "textract-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "textract-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "textract-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "textract-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "textract-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "textract-fips.us-west-2.amazonaws.com", - }, - }, - }, - "thinclient": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "tnb": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "transcribe": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.transcribe.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.transcribe.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "fips.transcribe.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "fips.transcribe.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "fips.transcribe.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "fips.transcribe.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "fips.transcribe.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.transcribe.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.transcribe.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.transcribe.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.transcribe.us-west-2.amazonaws.com", - }, - }, - }, - "transcribestreaming": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "transcribestreaming-ca-central-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "transcribestreaming-ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "transcribestreaming-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "transcribestreaming-fips-ca-central-1", - }: endpoint{ - Hostname: "transcribestreaming-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "transcribestreaming-fips-us-east-1", - }: endpoint{ - Hostname: "transcribestreaming-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "transcribestreaming-fips-us-east-2", - }: endpoint{ - Hostname: "transcribestreaming-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "transcribestreaming-fips-us-west-2", - }: endpoint{ - Hostname: "transcribestreaming-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "transcribestreaming-us-east-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "transcribestreaming-us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "transcribestreaming-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "transcribestreaming-us-east-2", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "transcribestreaming-us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "transcribestreaming-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "transcribestreaming-us-west-2", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "transcribestreaming-us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "transcribestreaming-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "transfer": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "transfer-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "transfer-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "transfer-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "transfer-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "transfer-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "transfer-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "transfer-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "transfer-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "transfer-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "transfer-fips.us-west-2.amazonaws.com", - }, - }, - }, - "translate": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "translate-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "translate-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "translate-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2-fips", - }: endpoint{ - Hostname: "translate-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "translate-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "translate-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "verifiedpermissions": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "voice-chime": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "voice-chime-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-central-1-fips", - }: endpoint{ - Hostname: "voice-chime-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "voice-chime-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-1-fips", - }: endpoint{ - Hostname: "voice-chime-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "voice-chime-fips.us-west-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2-fips", - }: endpoint{ - Hostname: "voice-chime-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - }, - }, - "voiceid": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "voiceid-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "voiceid-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "voiceid-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "voiceid-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "voiceid-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "voiceid-fips.us-west-2.amazonaws.com", - }, - }, - }, - "vpc-lattice": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "waf": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "aws", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-fips.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "aws-fips", - }: endpoint{ - Hostname: "waf-fips.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "aws-global", - }: endpoint{ - Hostname: "waf.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "aws-global", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-fips.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "aws-global-fips", - }: endpoint{ - Hostname: "waf-fips.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "waf-regional": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{ - Hostname: "waf-regional.af-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "af-south-1", - }, - }, - endpointKey{ - Region: "af-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.af-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "af-south-1", - }, - }, - endpointKey{ - Region: "ap-east-1", - }: endpoint{ - Hostname: "waf-regional.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - }, - endpointKey{ - Region: "ap-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - }, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "waf-regional.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "ap-northeast-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{ - Hostname: "waf-regional.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - endpointKey{ - Region: "ap-northeast-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{ - Hostname: "waf-regional.ap-northeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-3", - }, - }, - endpointKey{ - Region: "ap-northeast-3", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.ap-northeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-3", - }, - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{ - Hostname: "waf-regional.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - endpointKey{ - Region: "ap-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - endpointKey{ - Region: "ap-south-2", - }: endpoint{ - Hostname: "waf-regional.ap-south-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-2", - }, - }, - endpointKey{ - Region: "ap-south-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.ap-south-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-2", - }, - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{ - Hostname: "waf-regional.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - endpointKey{ - Region: "ap-southeast-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "waf-regional.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "ap-southeast-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{ - Hostname: "waf-regional.ap-southeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-3", - }, - }, - endpointKey{ - Region: "ap-southeast-3", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.ap-southeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-3", - }, - }, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{ - Hostname: "waf-regional.ap-southeast-4.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-4", - }, - }, - endpointKey{ - Region: "ap-southeast-4", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.ap-southeast-4.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-4", - }, - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{ - Hostname: "waf-regional.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{ - Hostname: "waf-regional.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - endpointKey{ - Region: "eu-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - endpointKey{ - Region: "eu-central-2", - }: endpoint{ - Hostname: "waf-regional.eu-central-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-2", - }, - }, - endpointKey{ - Region: "eu-central-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.eu-central-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-2", - }, - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{ - Hostname: "waf-regional.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - endpointKey{ - Region: "eu-north-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - endpointKey{ - Region: "eu-south-1", - }: endpoint{ - Hostname: "waf-regional.eu-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-1", - }, - }, - endpointKey{ - Region: "eu-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.eu-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-1", - }, - }, - endpointKey{ - Region: "eu-south-2", - }: endpoint{ - Hostname: "waf-regional.eu-south-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-2", - }, - }, - endpointKey{ - Region: "eu-south-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.eu-south-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-2", - }, - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "waf-regional.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "eu-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{ - Hostname: "waf-regional.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - endpointKey{ - Region: "eu-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{ - Hostname: "waf-regional.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - endpointKey{ - Region: "eu-west-3", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - endpointKey{ - Region: "fips-af-south-1", - }: endpoint{ - Hostname: "waf-regional-fips.af-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "af-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-east-1", - }: endpoint{ - Hostname: "waf-regional-fips.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-northeast-1", - }: endpoint{ - Hostname: "waf-regional-fips.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-northeast-2", - }: endpoint{ - Hostname: "waf-regional-fips.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-northeast-3", - }: endpoint{ - Hostname: "waf-regional-fips.ap-northeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-3", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-south-1", - }: endpoint{ - Hostname: "waf-regional-fips.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-south-2", - }: endpoint{ - Hostname: "waf-regional-fips.ap-south-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-southeast-1", - }: endpoint{ - Hostname: "waf-regional-fips.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-southeast-2", - }: endpoint{ - Hostname: "waf-regional-fips.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-southeast-3", - }: endpoint{ - Hostname: "waf-regional-fips.ap-southeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-3", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-southeast-4", - }: endpoint{ - Hostname: "waf-regional-fips.ap-southeast-4.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-4", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "waf-regional-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-central-1", - }: endpoint{ - Hostname: "waf-regional-fips.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-central-2", - }: endpoint{ - Hostname: "waf-regional-fips.eu-central-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-north-1", - }: endpoint{ - Hostname: "waf-regional-fips.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-south-1", - }: endpoint{ - Hostname: "waf-regional-fips.eu-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-south-2", - }: endpoint{ - Hostname: "waf-regional-fips.eu-south-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-west-1", - }: endpoint{ - Hostname: "waf-regional-fips.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-west-2", - }: endpoint{ - Hostname: "waf-regional-fips.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-west-3", - }: endpoint{ - Hostname: "waf-regional-fips.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-il-central-1", - }: endpoint{ - Hostname: "waf-regional-fips.il-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "il-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-me-central-1", - }: endpoint{ - Hostname: "waf-regional-fips.me-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-me-south-1", - }: endpoint{ - Hostname: "waf-regional-fips.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-sa-east-1", - }: endpoint{ - Hostname: "waf-regional-fips.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "waf-regional-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "waf-regional-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "waf-regional-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "waf-regional-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{ - Hostname: "waf-regional.il-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "il-central-1", - }, - }, - endpointKey{ - Region: "il-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.il-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "il-central-1", - }, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{ - Hostname: "waf-regional.me-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-central-1", - }, - }, - endpointKey{ - Region: "me-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.me-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-central-1", - }, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{ - Hostname: "waf-regional.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - }, - endpointKey{ - Region: "me-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{ - Hostname: "waf-regional.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - endpointKey{ - Region: "sa-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "waf-regional.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - Hostname: "waf-regional.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{ - Hostname: "waf-regional.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "waf-regional.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "wafv2": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{ - Hostname: "wafv2.af-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "af-south-1", - }, - }, - endpointKey{ - Region: "af-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.af-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "af-south-1", - }, - }, - endpointKey{ - Region: "ap-east-1", - }: endpoint{ - Hostname: "wafv2.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - }, - endpointKey{ - Region: "ap-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - }, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{ - Hostname: "wafv2.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "ap-northeast-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - }, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{ - Hostname: "wafv2.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - endpointKey{ - Region: "ap-northeast-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{ - Hostname: "wafv2.ap-northeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-3", - }, - }, - endpointKey{ - Region: "ap-northeast-3", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.ap-northeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-3", - }, - }, - endpointKey{ - Region: "ap-south-1", - }: endpoint{ - Hostname: "wafv2.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - endpointKey{ - Region: "ap-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - endpointKey{ - Region: "ap-south-2", - }: endpoint{ - Hostname: "wafv2.ap-south-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-2", - }, - }, - endpointKey{ - Region: "ap-south-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.ap-south-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-2", - }, - }, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{ - Hostname: "wafv2.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - endpointKey{ - Region: "ap-southeast-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - }, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{ - Hostname: "wafv2.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "ap-southeast-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - }, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{ - Hostname: "wafv2.ap-southeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-3", - }, - }, - endpointKey{ - Region: "ap-southeast-3", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.ap-southeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-3", - }, - }, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{ - Hostname: "wafv2.ap-southeast-4.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-4", - }, - }, - endpointKey{ - Region: "ap-southeast-4", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.ap-southeast-4.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-4", - }, - }, - endpointKey{ - Region: "ca-central-1", - }: endpoint{ - Hostname: "wafv2.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{ - Hostname: "wafv2.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - endpointKey{ - Region: "eu-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - }, - endpointKey{ - Region: "eu-central-2", - }: endpoint{ - Hostname: "wafv2.eu-central-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-2", - }, - }, - endpointKey{ - Region: "eu-central-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.eu-central-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-2", - }, - }, - endpointKey{ - Region: "eu-north-1", - }: endpoint{ - Hostname: "wafv2.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - endpointKey{ - Region: "eu-north-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - }, - endpointKey{ - Region: "eu-south-1", - }: endpoint{ - Hostname: "wafv2.eu-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-1", - }, - }, - endpointKey{ - Region: "eu-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.eu-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-1", - }, - }, - endpointKey{ - Region: "eu-south-2", - }: endpoint{ - Hostname: "wafv2.eu-south-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-2", - }, - }, - endpointKey{ - Region: "eu-south-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.eu-south-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-2", - }, - }, - endpointKey{ - Region: "eu-west-1", - }: endpoint{ - Hostname: "wafv2.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "eu-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - endpointKey{ - Region: "eu-west-2", - }: endpoint{ - Hostname: "wafv2.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - endpointKey{ - Region: "eu-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - endpointKey{ - Region: "eu-west-3", - }: endpoint{ - Hostname: "wafv2.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - endpointKey{ - Region: "eu-west-3", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - endpointKey{ - Region: "fips-af-south-1", - }: endpoint{ - Hostname: "wafv2-fips.af-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "af-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-east-1", - }: endpoint{ - Hostname: "wafv2-fips.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-northeast-1", - }: endpoint{ - Hostname: "wafv2-fips.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-northeast-2", - }: endpoint{ - Hostname: "wafv2-fips.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-northeast-3", - }: endpoint{ - Hostname: "wafv2-fips.ap-northeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-3", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-south-1", - }: endpoint{ - Hostname: "wafv2-fips.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-south-2", - }: endpoint{ - Hostname: "wafv2-fips.ap-south-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-southeast-1", - }: endpoint{ - Hostname: "wafv2-fips.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-southeast-2", - }: endpoint{ - Hostname: "wafv2-fips.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-southeast-3", - }: endpoint{ - Hostname: "wafv2-fips.ap-southeast-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-3", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ap-southeast-4", - }: endpoint{ - Hostname: "wafv2-fips.ap-southeast-4.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-4", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "wafv2-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-central-1", - }: endpoint{ - Hostname: "wafv2-fips.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-central-2", - }: endpoint{ - Hostname: "wafv2-fips.eu-central-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-north-1", - }: endpoint{ - Hostname: "wafv2-fips.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-south-1", - }: endpoint{ - Hostname: "wafv2-fips.eu-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-south-2", - }: endpoint{ - Hostname: "wafv2-fips.eu-south-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-west-1", - }: endpoint{ - Hostname: "wafv2-fips.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-west-2", - }: endpoint{ - Hostname: "wafv2-fips.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-eu-west-3", - }: endpoint{ - Hostname: "wafv2-fips.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-il-central-1", - }: endpoint{ - Hostname: "wafv2-fips.il-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "il-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-me-central-1", - }: endpoint{ - Hostname: "wafv2-fips.me-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-me-south-1", - }: endpoint{ - Hostname: "wafv2-fips.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-sa-east-1", - }: endpoint{ - Hostname: "wafv2-fips.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "wafv2-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "wafv2-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "wafv2-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "wafv2-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{ - Hostname: "wafv2.il-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "il-central-1", - }, - }, - endpointKey{ - Region: "il-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.il-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "il-central-1", - }, - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{ - Hostname: "wafv2.me-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-central-1", - }, - }, - endpointKey{ - Region: "me-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.me-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-central-1", - }, - }, - endpointKey{ - Region: "me-south-1", - }: endpoint{ - Hostname: "wafv2.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - }, - endpointKey{ - Region: "me-south-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, - }, - endpointKey{ - Region: "sa-east-1", - }: endpoint{ - Hostname: "wafv2.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - endpointKey{ - Region: "sa-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{ - Hostname: "wafv2.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{ - Hostname: "wafv2.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{ - Hostname: "wafv2.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{ - Hostname: "wafv2.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - }, - }, - }, - "wellarchitected": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "wisdom": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "ui-ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ui-ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ui-ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ui-ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ui-ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ui-eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "ui-eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "ui-us-east-1", - }: endpoint{}, - endpointKey{ - Region: "ui-us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{}, - }, - }, - "workdocs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "workdocs-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "workdocs-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "workdocs-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "workdocs-fips.us-west-2.amazonaws.com", - }, - }, - }, - "workmail": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "workspaces": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "workspaces-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "workspaces-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "workspaces-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "workspaces-fips.us-west-2.amazonaws.com", - }, - }, - }, - "workspaces-web": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "xray": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "xray-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "xray-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "xray-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "xray-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "xray-fips.us-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "xray-fips.us-east-2.amazonaws.com", - }, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "xray-fips.us-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "xray-fips.us-west-2.amazonaws.com", - }, - }, - }, - }, -} - -// AwsCnPartition returns the Resolver for AWS China. -func AwsCnPartition() Partition { - return awscnPartition.Partition() -} - -var awscnPartition = partition{ - ID: "aws-cn", - Name: "AWS China", - DNSSuffix: "amazonaws.com.cn", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^cn\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - defaultKey{ - Variant: dualStackVariant, - }: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - DNSSuffix: "api.amazonwebservices.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "amazonaws.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - defaultKey{ - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "api.amazonwebservices.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - Regions: regions{ - "cn-north-1": region{ - Description: "China (Beijing)", - }, - "cn-northwest-1": region{ - Description: "China (Ningxia)", - }, - }, - Services: services{ - "access-analyzer": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "account": service{ - PartitionEndpoint: "aws-cn-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-cn-global", - }: endpoint{ - Hostname: "account.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "acm": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "airflow": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "api.ecr": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{ - Hostname: "api.ecr.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{ - Hostname: "api.ecr.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "api.pricing": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "pricing", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "api.sagemaker": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "api.tunneling.iot": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "apigateway": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "appconfig": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "appconfigdata": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "application-autoscaling": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "applicationinsights": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "appmesh": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-north-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.cn-north-1.api.amazonwebservices.com.cn", - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "appmesh.cn-northwest-1.api.amazonwebservices.com.cn", - }, - }, - }, - "appsync": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "arc-zonal-shift": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "athena": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-north-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "athena.cn-north-1.api.amazonwebservices.com.cn", - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "athena.cn-northwest-1.api.amazonwebservices.com.cn", - }, - }, - }, - "autoscaling": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "autoscaling-plans": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "backup": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "backupstorage": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "batch": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "budgets": service{ - PartitionEndpoint: "aws-cn-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-cn-global", - }: endpoint{ - Hostname: "budgets.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "cassandra": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "ce": service{ - PartitionEndpoint: "aws-cn-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-cn-global", - }: endpoint{ - Hostname: "ce.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "cloudcontrolapi": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "cloudformation": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "cloudfront": service{ - PartitionEndpoint: "aws-cn-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-cn-global", - }: endpoint{ - Hostname: "cloudfront.cn-northwest-1.amazonaws.com.cn", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "cloudtrail": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "codebuild": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "codecommit": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "codedeploy": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "codepipeline": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "cognito-identity": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - }, - }, - "compute-optimizer": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{ - Hostname: "compute-optimizer.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{ - Hostname: "compute-optimizer.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "config": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "cur": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "data-ats.iot": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - CredentialScope: credentialScope{ - Service: "iotdata", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{ - Hostname: "data.ats.iot.cn-north-1.amazonaws.com.cn", - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "data.jobs.iot": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "databrew": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "datasync": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "datazone": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - DNSSuffix: "api.amazonwebservices.com.cn", - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "api.amazonwebservices.com.cn", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{ - Hostname: "datazone.cn-north-1.api.amazonwebservices.com.cn", - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{ - Hostname: "datazone.cn-northwest-1.api.amazonwebservices.com.cn", - }, - }, - }, - "dax": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "directconnect": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "dlm": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "dms": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "docdb": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{ - Hostname: "rds.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "ds": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "dynamodb": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "ebs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "ec2": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "ecs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "eks": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "eks-auth": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - DNSSuffix: "api.amazonwebservices.com.cn", - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "api.amazonwebservices.com.cn", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{ - Hostname: "eks-auth.cn-north-1.api.amazonwebservices.com.cn", - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{ - Hostname: "eks-auth.cn-northwest-1.api.amazonwebservices.com.cn", - }, - }, - }, - "elasticache": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "elasticbeanstalk": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "elasticfilesystem": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-north-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.cn-north-1.amazonaws.com.cn", - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.cn-northwest-1.amazonaws.com.cn", - }, - endpointKey{ - Region: "fips-cn-north-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-cn-northwest-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "elasticloadbalancing": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "elasticmapreduce": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-north-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "elasticmapreduce.cn-north-1.api.amazonwebservices.com.cn", - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "elasticmapreduce.cn-northwest-1.api.amazonwebservices.com.cn", - }, - }, - }, - "emr-containers": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "emr-serverless": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "es": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-north-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "aos.cn-north-1.api.amazonwebservices.com.cn", - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "aos.cn-northwest-1.api.amazonwebservices.com.cn", - }, - }, - }, - "events": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "firehose": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-north-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "firehose.cn-north-1.api.amazonwebservices.com.cn", - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "firehose.cn-northwest-1.api.amazonwebservices.com.cn", - }, - }, - }, - "fms": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "fsx": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "gamelift": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "glacier": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "glue": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "greengrass": service{ - IsRegionalized: boxedTrue, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - }, - }, - "guardduty": service{ - IsRegionalized: boxedTrue, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "health": service{ - PartitionEndpoint: "aws-cn-global", - IsRegionalized: boxedFalse, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - SSLCommonName: "health.cn-northwest-1.amazonaws.com.cn", - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-cn-global", - }: endpoint{ - Hostname: "global.health.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "iam": service{ - PartitionEndpoint: "aws-cn-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-cn-global", - }: endpoint{ - Hostname: "iam.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - }, - }, - "identitystore": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "inspector2": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "internetmonitor": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - DNSSuffix: "api.amazonwebservices.com.cn", - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "api.amazonwebservices.com.cn", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{ - Hostname: "internetmonitor.cn-north-1.api.amazonwebservices.com.cn", - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{ - Hostname: "internetmonitor.cn-northwest-1.api.amazonwebservices.com.cn", - }, - }, - }, - "iot": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "iotanalytics": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - }, - }, - "iotevents": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - }, - }, - "ioteventsdata": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{ - Hostname: "data.iotevents.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - }, - }, - "iotsecuredtunneling": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "iotsitewise": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - }, - }, - "iottwinmaker": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "api-cn-north-1", - }: endpoint{ - Hostname: "api.iottwinmaker.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "data-cn-north-1", - }: endpoint{ - Hostname: "data.iottwinmaker.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - }, - }, - "kafka": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "kendra-ranking": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - DNSSuffix: "api.amazonwebservices.com.cn", - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "api.amazonwebservices.com.cn", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{ - Hostname: "kendra-ranking.cn-north-1.api.amazonwebservices.com.cn", - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{ - Hostname: "kendra-ranking.cn-northwest-1.api.amazonwebservices.com.cn", - }, - }, - }, - "kinesis": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "kinesisanalytics": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "kinesisvideo": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - }, - }, - "kms": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "lakeformation": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "lambda": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-north-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.cn-north-1.api.amazonwebservices.com.cn", - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.cn-northwest-1.api.amazonwebservices.com.cn", - }, - }, - }, - "license-manager": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "license-manager-linux-subscriptions": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "logs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "mediaconvert": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{ - Hostname: "subscribe.mediaconvert.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "memory-db": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "metrics.sagemaker": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "monitoring": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "mq": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "neptune": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{ - Hostname: "rds.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{ - Hostname: "rds.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "network-firewall": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "oam": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "oidc": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{ - Hostname: "oidc.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{ - Hostname: "oidc.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "organizations": service{ - PartitionEndpoint: "aws-cn-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-cn-global", - }: endpoint{ - Hostname: "organizations.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "personalize": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - }, - }, - "pi": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "pipes": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "polly": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "portal.sso": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{ - Hostname: "portal.sso.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{ - Hostname: "portal.sso.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "qbusiness": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - DNSSuffix: "api.amazonwebservices.com.cn", - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "api.amazonwebservices.com.cn", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{ - Hostname: "qbusiness.cn-north-1.api.amazonwebservices.com.cn", - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{ - Hostname: "qbusiness.cn-northwest-1.api.amazonwebservices.com.cn", - }, - }, - }, - "quicksight": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - }, - }, - "ram": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "rbin": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "rds": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "redshift": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "redshift-serverless": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - }, - }, - "resource-groups": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "rolesanywhere": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "route53": service{ - PartitionEndpoint: "aws-cn-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-cn-global", - }: endpoint{ - Hostname: "route53.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "route53resolver": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "runtime.sagemaker": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "s3": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - defaultKey{ - Variant: dualStackVariant, - }: endpoint{ - Hostname: "{service}.dualstack.{region}.{dnsSuffix}", - DNSSuffix: "amazonaws.com.cn", - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-north-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.cn-north-1.amazonaws.com.cn", - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.cn-northwest-1.amazonaws.com.cn", - }, - }, - }, - "s3-control": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, - }, - defaultKey{ - Variant: dualStackVariant, - }: endpoint{ - Hostname: "{service}.dualstack.{region}.{dnsSuffix}", - DNSSuffix: "amazonaws.com.cn", - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{ - Hostname: "s3-control.cn-north-1.amazonaws.com.cn", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - endpointKey{ - Region: "cn-north-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.cn-north-1.amazonaws.com.cn", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{ - Hostname: "s3-control.cn-northwest-1.amazonaws.com.cn", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - endpointKey{ - Region: "cn-northwest-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.cn-northwest-1.amazonaws.com.cn", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "savingsplans": service{ - IsRegionalized: boxedTrue, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{ - Hostname: "savingsplans.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{ - Hostname: "savingsplans.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "schemas": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "secretsmanager": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-north-1", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - Variant: dualStackVariant, - }: endpoint{}, - }, - }, - "securityhub": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "serverlessrepo": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - }, - }, - "servicecatalog": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "servicediscovery": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-north-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "servicediscovery.cn-north-1.api.amazonwebservices.com.cn", - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "servicediscovery.cn-northwest-1.api.amazonwebservices.com.cn", - }, - }, - }, - "servicequotas": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "signer": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - endpointKey{ - Region: "verification-cn-north-1", - }: endpoint{ - Hostname: "verification.signer.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - endpointKey{ - Region: "verification-cn-northwest-1", - }: endpoint{ - Hostname: "verification.signer.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "sms": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - }, - }, - "snowball": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-north-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.cn-north-1.amazonaws.com.cn", - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.cn-northwest-1.amazonaws.com.cn", - }, - endpointKey{ - Region: "fips-cn-north-1", - }: endpoint{ - Hostname: "snowball-fips.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-cn-northwest-1", - }: endpoint{ - Hostname: "snowball-fips.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "sns": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "sqs": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "ssm": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "sso": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "states": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-north-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "states.cn-north-1.api.amazonwebservices.com.cn", - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "states.cn-northwest-1.api.amazonwebservices.com.cn", - }, - }, - }, - "storagegateway": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "streams.dynamodb": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "dynamodb", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "sts": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "support": service{ - PartitionEndpoint: "aws-cn-global", - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-cn-global", - }: endpoint{ - Hostname: "support.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - }, - }, - "swf": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "synthetics": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "tagging": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "transcribe": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{ - Hostname: "cn.transcribe.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{ - Hostname: "cn.transcribe.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - }, - }, - "transcribestreaming": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "transfer": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "waf-regional": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{ - Hostname: "waf-regional.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - endpointKey{ - Region: "cn-north-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{ - Hostname: "waf-regional.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - endpointKey{ - Region: "cn-northwest-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - endpointKey{ - Region: "fips-cn-north-1", - }: endpoint{ - Hostname: "waf-regional-fips.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-cn-northwest-1", - }: endpoint{ - Hostname: "waf-regional-fips.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "wafv2": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{ - Hostname: "wafv2.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - endpointKey{ - Region: "cn-north-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{ - Hostname: "wafv2.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - endpointKey{ - Region: "cn-northwest-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, - endpointKey{ - Region: "fips-cn-north-1", - }: endpoint{ - Hostname: "wafv2-fips.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-cn-northwest-1", - }: endpoint{ - Hostname: "wafv2-fips.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "workspaces": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - "xray": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, - }, -} - -// AwsUsGovPartition returns the Resolver for AWS GovCloud (US). -func AwsUsGovPartition() Partition { - return awsusgovPartition.Partition() -} - -var awsusgovPartition = partition{ - ID: "aws-us-gov", - Name: "AWS GovCloud (US)", - DNSSuffix: "amazonaws.com", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^us\\-gov\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - defaultKey{ - Variant: dualStackVariant, - }: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - DNSSuffix: "api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - defaultKey{ - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "api.aws", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - Regions: regions{ - "us-gov-east-1": region{ - Description: "AWS GovCloud (US-East)", - }, - "us-gov-west-1": region{ - Description: "AWS GovCloud (US-West)", - }, - }, - Services: services{ - "access-analyzer": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "access-analyzer.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "access-analyzer.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "access-analyzer.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "access-analyzer.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "access-analyzer.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "access-analyzer.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "acm": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "acm.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "acm.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "acm.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "acm-pca": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "acm-pca.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "acm-pca.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "acm-pca.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "acm-pca.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "acm-pca.us-gov-west-1.amazonaws.com", - }, - }, - }, - "api.detective": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.detective-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "api.detective-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.detective-fips.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "api.detective-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "api.ecr": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecr-fips.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "dkr-us-gov-east-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dkr-us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dkr-us-gov-west-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dkr-us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-dkr-us-gov-east-1", - }: endpoint{ - Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-dkr-us-gov-west-1", - }: endpoint{ - Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "api.ecr.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "api.ecr.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "api.sagemaker": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "api-fips.sagemaker.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api-fips.sagemaker.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "api-fips.sagemaker.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1-fips-secondary", - }: endpoint{ - Hostname: "api.sagemaker.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1-secondary", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1-secondary", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.sagemaker.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "api.tunneling.iot": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.tunneling.iot-fips.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "apigateway": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "appconfig": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "appconfig.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "appconfig.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "appconfig.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "appconfig.us-gov-west-1.amazonaws.com", - }, - }, - }, - "appconfigdata": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "appconfigdata.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "appconfigdata.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "appconfigdata.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "appconfigdata.us-gov-west-1.amazonaws.com", - }, - }, - }, - "application-autoscaling": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Hostname: "autoscaling.{region}.amazonaws.com", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "application-autoscaling", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "application-autoscaling.us-gov-east-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "application-autoscaling.us-gov-east-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "application-autoscaling.us-gov-east-1.amazonaws.com", - Protocols: []string{"http", "https"}, - - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "application-autoscaling.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "application-autoscaling.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "application-autoscaling.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, - - Deprecated: boxedTrue, - }, - }, - }, - "applicationinsights": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "applicationinsights.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "applicationinsights.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "appstream2": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - CredentialScope: credentialScope{ - Service: "appstream", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips", - }: endpoint{ - Hostname: "appstream2-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "appstream2-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "appstream2-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "appstream2-fips.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "appstream2-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "arc-zonal-shift": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "athena": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "athena-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "athena-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "athena.us-gov-east-1.api.aws", - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "athena-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "athena-fips.us-gov-east-1.api.aws", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "athena.us-gov-west-1.api.aws", - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "athena-fips.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "athena-fips.us-gov-west-1.api.aws", - }, - }, - }, - "autoscaling": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "autoscaling.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "autoscaling-plans": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - }, - "backup": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "backup-gateway": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "backupstorage": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "batch": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "batch.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "batch.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "batch.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "batch.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "batch.us-gov-west-1.amazonaws.com", - }, - }, - }, - "bedrock": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "cassandra": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "cassandra.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cassandra.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "cassandra.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "cassandra.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cassandra.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "cassandra.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "cloudcontrolapi": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "cloudcontrolapi-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "cloudcontrolapi-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudcontrolapi-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudcontrolapi-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "clouddirectory": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "clouddirectory.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "clouddirectory.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "cloudformation": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "cloudformation.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudformation.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "cloudformation.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "cloudformation.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudformation.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "cloudformation.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "cloudhsm": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "cloudhsmv2": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "cloudhsm", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "cloudtrail": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudtrail.us-gov-west-1.amazonaws.com", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "cloudtrail.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "cloudtrail.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudtrail.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cloudtrail.us-gov-west-1.amazonaws.com", - }, - }, - }, - "codebuild": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codebuild-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "codebuild-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codebuild-fips.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "codebuild-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "codecommit": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips", - }: endpoint{ - Hostname: "codecommit-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codecommit-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "codecommit-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codecommit-fips.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "codecommit-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "codedeploy": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codedeploy-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "codedeploy-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codedeploy-fips.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "codedeploy-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "codepipeline": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "codepipeline-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "codepipeline-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codepipeline-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "codepipeline-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "codestar-connections": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - }, - }, - "cognito-identity": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "cognito-identity-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cognito-identity-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "cognito-idp": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "cognito-idp-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "cognito-idp-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "comprehend": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "comprehend-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "comprehend-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "comprehendmedical": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "comprehendmedical-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "comprehendmedical-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "compute-optimizer": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "compute-optimizer-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "compute-optimizer-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "config": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "config.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "config.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "config.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "config.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "config.us-gov-west-1.amazonaws.com", - }, - }, - }, - "connect": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "connect.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "connect.us-gov-west-1.amazonaws.com", - }, - }, - }, - "controltower": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "data-ats.iot": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - CredentialScope: credentialScope{ - Service: "iotdata", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "data.iot-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Service: "iotdata", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "data.iot-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Service: "iotdata", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "data.iot-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "data.iot-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "data.jobs.iot": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "data.jobs.iot-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "data.jobs.iot-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "data.jobs.iot-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "data.jobs.iot-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "databrew": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "databrew.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "databrew.us-gov-west-1.amazonaws.com", - }, - }, - }, - "datasync": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "datasync-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "datasync-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "datasync-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "datasync-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "datazone": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - DNSSuffix: "api.aws", - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "api.aws", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "datazone.us-gov-east-1.api.aws", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "datazone.us-gov-west-1.api.aws", - }, - }, - }, - "directconnect": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "directconnect.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "directconnect.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "dlm": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dlm.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "dlm.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dlm.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "dlm.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "dms": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "dms.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "dms", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dms", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dms.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dms-fips", - }: endpoint{ - Hostname: "dms.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dms.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "dms.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dms.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "dms.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "docdb": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "rds.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "drs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "drs-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "drs-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "drs-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "drs-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "ds": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "ds-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "ds-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ds-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ds-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "dynamodb": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "dynamodb.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dynamodb.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "dynamodb.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dynamodb.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "dynamodb.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "ebs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "ec2": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "ec2.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "ec2.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "ec2.us-gov-east-1.api.aws", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "ec2.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "ec2.us-gov-west-1.api.aws", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "ecs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "ecs-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "ecs-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecs-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ecs-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "eks": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "eks.{region}.{dnsSuffix}", - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "eks.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "eks.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "eks.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "eks.us-gov-west-1.amazonaws.com", - }, - }, - }, - "eks-auth": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - DNSSuffix: "api.aws", - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "api.aws", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "eks-auth.us-gov-east-1.api.aws", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "eks-auth.us-gov-west-1.api.aws", - }, - }, - }, - "elasticache": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticache.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips", - }: endpoint{ - Hostname: "elasticache.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticache.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "elasticache.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "elasticbeanstalk": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "elasticbeanstalk.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticbeanstalk.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "elasticbeanstalk.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "elasticfilesystem": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "elasticloadbalancing": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticloadbalancing.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "elasticloadbalancing.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "elasticloadbalancing.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticloadbalancing.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticloadbalancing.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - }, - }, - "elasticmapreduce": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticmapreduce.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "elasticmapreduce.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "elasticmapreduce.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "elasticmapreduce.us-gov-east-1.api.aws", - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticmapreduce.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "elasticmapreduce.us-gov-west-1.api.aws", - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticmapreduce.us-gov-west-1.amazonaws.com", - Protocols: []string{"https"}, - }, - }, - }, - "email": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "email-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "email-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "emr-containers": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "emr-serverless": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "es": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips", - }: endpoint{ - Hostname: "es-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "aos.us-gov-east-1.api.aws", - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "es-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "es-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "aos.us-gov-west-1.api.aws", - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "es-fips.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "es-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "events": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "events.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "events.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "events.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "events.us-gov-west-1.amazonaws.com", - }, - }, - }, - "firehose": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "firehose-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "firehose-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "firehose-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "firehose-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "fms": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "fms-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "fms-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fms-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "fsx": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-prod-us-gov-east-1", - }: endpoint{ - Hostname: "fsx-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-prod-us-gov-west-1", - }: endpoint{ - Hostname: "fsx-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "fsx-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "fsx-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "prod-us-gov-east-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "prod-us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fsx-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "prod-us-gov-west-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "prod-us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fsx-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fsx-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fsx-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "geo": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "geo-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "geo-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "glacier": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "glacier.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "glacier.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "glacier.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "glacier.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - }, - }, - "glue": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "glue-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "glue-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "glue.us-gov-east-1.api.aws", - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "glue-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "glue-fips.us-gov-east-1.api.aws", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "glue.us-gov-west-1.api.aws", - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "glue-fips.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "glue-fips.us-gov-west-1.api.aws", - }, - }, - }, - "greengrass": service{ - IsRegionalized: boxedTrue, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "dataplane-us-gov-east-1", - }: endpoint{ - Hostname: "greengrass-ats.iot.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "dataplane-us-gov-west-1", - }: endpoint{ - Hostname: "greengrass-ats.iot.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "greengrass.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "greengrass.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "greengrass.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "greengrass.us-gov-west-1.amazonaws.com", - }, - }, - }, - "guardduty": service{ - IsRegionalized: boxedTrue, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "guardduty.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "guardduty.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "guardduty.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "guardduty.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "guardduty.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "health": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - SSLCommonName: "health.us-gov-west-1.amazonaws.com", - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-us-gov-global", - }: endpoint{ - Hostname: "global.health.us-gov.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "health-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "health-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "iam": service{ - PartitionEndpoint: "aws-us-gov-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-us-gov-global", - }: endpoint{ - Hostname: "iam.us-gov.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "aws-us-gov-global", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iam.us-gov.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "aws-us-gov-global-fips", - }: endpoint{ - Hostname: "iam.us-gov.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "iam-govcloud", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "iam-govcloud", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iam.us-gov.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "iam-govcloud-fips", - }: endpoint{ - Hostname: "iam.us-gov.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "identitystore": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "identitystore.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "identitystore.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "identitystore.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "identitystore.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "identitystore.us-gov-west-1.amazonaws.com", - }, - }, - }, - "ingest.timestream": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ingest.timestream.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "ingest.timestream.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "inspector": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "inspector-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "inspector-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "inspector-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "inspector-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "inspector2": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "inspector2-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "inspector2-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "inspector2-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "inspector2-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "internetmonitor": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - DNSSuffix: "api.aws", - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "api.aws", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "internetmonitor.us-gov-east-1.api.aws", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "internetmonitor.us-gov-west-1.api.aws", - }, - }, - }, - "iot": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "iot-fips.us-gov-east-1.amazonaws.com", - - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "iot-fips.us-gov-west-1.amazonaws.com", - - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iot-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iot-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "iotevents": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "iotevents-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iotevents-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "ioteventsdata": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "data.iotevents-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "data.iotevents.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "data.iotevents-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "iotsecuredtunneling": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.tunneling.iot-fips.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "iotsitewise": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "iotsitewise-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iotsitewise-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "iottwinmaker": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "api-us-gov-west-1", - }: endpoint{ - Hostname: "api.iottwinmaker.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "data-us-gov-west-1", - }: endpoint{ - Hostname: "data.iottwinmaker.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "fips-api-us-gov-west-1", - }: endpoint{ - Hostname: "api.iottwinmaker-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "fips-data-us-gov-west-1", - }: endpoint{ - Hostname: "data.iottwinmaker-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "iottwinmaker-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "iottwinmaker-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "kafka": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "kafka.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kafka.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "kafka.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "kafka.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kafka.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "kafka.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "kendra": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "kendra-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kendra-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "kendra-ranking": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - DNSSuffix: "api.aws", - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "api.aws", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "kendra-ranking.us-gov-east-1.api.aws", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "kendra-ranking.us-gov-west-1.api.aws", - }, - }, - }, - "kinesis": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "kinesis.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "kinesis.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "kinesis.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kinesis.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "kinesis.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kinesis.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "kinesisanalytics": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "kms": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ProdFips", - }: endpoint{ - Hostname: "kms-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "kms-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "kms-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "lakeformation": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "lakeformation-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "lakeformation-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lakeformation.us-gov-east-1.api.aws", - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "lakeformation-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "lakeformation-fips.us-gov-east-1.api.aws", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lakeformation.us-gov-west-1.api.aws", - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "lakeformation-fips.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "lakeformation-fips.us-gov-west-1.api.aws", - }, - }, - }, - "lambda": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "lambda-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "lambda-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.us-gov-east-1.api.aws", - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "lambda-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "lambda.us-gov-west-1.api.aws", - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "lambda-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "license-manager": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "license-manager-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "license-manager-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "license-manager-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "license-manager-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "license-manager-linux-subscriptions": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "logs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "logs.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "logs.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "logs.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "logs.us-gov-west-1.amazonaws.com", - }, - }, - }, - "m2": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{}, - }, - }, - "managedblockchain": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "mediaconvert": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "mediaconvert.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "mediaconvert.us-gov-west-1.amazonaws.com", - }, - }, - }, - "meetings-chime": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "meetings-chime-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "meetings-chime-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "meetings-chime-fips.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "meetings-chime-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "metering.marketplace": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "aws-marketplace", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "metrics.sagemaker": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "mgn": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "mgn-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "mgn-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "mgn-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "mgn-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "models.lex": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "lex", - }, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "models-fips.lex.{region}.{dnsSuffix}", - CredentialScope: credentialScope{ - Service: "lex", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "models-fips.lex.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "models-fips.lex.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "monitoring": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "monitoring.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "monitoring.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "monitoring.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "monitoring.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "monitoring.us-gov-west-1.amazonaws.com", - }, - }, - }, - "mq": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "mq-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "mq-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "mq-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "mq-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "neptune": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "rds.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "rds.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "network-firewall": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "network-firewall-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "network-firewall-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "network-firewall-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "network-firewall-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "networkmanager": service{ - PartitionEndpoint: "aws-us-gov-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-us-gov-global", - }: endpoint{ - Hostname: "networkmanager.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "aws-us-gov-global", - Variant: fipsVariant, - }: endpoint{ - Hostname: "networkmanager.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "fips-aws-us-gov-global", - }: endpoint{ - Hostname: "networkmanager.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "oidc": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "oidc.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "oidc.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "organizations": service{ - PartitionEndpoint: "aws-us-gov-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-us-gov-global", - }: endpoint{ - Hostname: "organizations.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "aws-us-gov-global", - Variant: fipsVariant, - }: endpoint{ - Hostname: "organizations.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "fips-aws-us-gov-global", - }: endpoint{ - Hostname: "organizations.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "outposts": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "outposts.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "outposts.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "outposts.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "outposts.us-gov-west-1.amazonaws.com", - }, - }, - }, - "participant.connect": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "participant.connect.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "participant.connect.us-gov-west-1.amazonaws.com", - }, - }, - }, - "pi": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "pinpoint": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "mobiletargeting", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "pinpoint-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "pinpoint.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "pinpoint-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "polly": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "polly-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "polly-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "portal.sso": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "portal.sso.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "portal.sso.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "qbusiness": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - DNSSuffix: "api.aws", - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "api.aws", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "qbusiness.us-gov-east-1.api.aws", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "qbusiness.us-gov-west-1.api.aws", - }, - }, - }, - "quicksight": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "api", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "ram": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "ram.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ram.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "ram.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "ram.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ram.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "ram.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "rbin": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "rbin-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "rbin-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rbin-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rbin-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "rds": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "rds.us-gov-east-1", - }: endpoint{ - Hostname: "rds.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-gov-west-1", - }: endpoint{ - Hostname: "rds.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "rds.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "rds.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "redshift": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "redshift.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "redshift.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "rekognition": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "rekognition-fips.us-gov-west-1", - }: endpoint{ - Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition.us-gov-west-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rekognition.us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "resiliencehub": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "resiliencehub-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "resiliencehub-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "resiliencehub-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "resiliencehub-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "resource-groups": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "resource-groups.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "resource-groups.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "resource-groups.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "resource-groups.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "resource-groups.us-gov-west-1.amazonaws.com", - }, - }, - }, - "robomaker": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "rolesanywhere": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "rolesanywhere-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "rolesanywhere-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rolesanywhere-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rolesanywhere-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "route53": service{ - PartitionEndpoint: "aws-us-gov-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-us-gov-global", - }: endpoint{ - Hostname: "route53.us-gov.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "aws-us-gov-global", - Variant: fipsVariant, - }: endpoint{ - Hostname: "route53.us-gov.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "fips-aws-us-gov-global", - }: endpoint{ - Hostname: "route53.us-gov.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "route53resolver": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "route53resolver.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "route53resolver.us-gov-east-1.amazonaws.com", - - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "route53resolver.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "route53resolver.us-gov-west-1.amazonaws.com", - - Deprecated: boxedTrue, - }, - }, - }, - "runtime.lex": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "lex", - }, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "runtime-fips.lex.{region}.{dnsSuffix}", - CredentialScope: credentialScope{ - Service: "lex", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "runtime-fips.lex.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "runtime-fips.lex.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "runtime.sagemaker": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "runtime.sagemaker.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "runtime.sagemaker.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "runtime.sagemaker.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "s3": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - SignatureVersions: []string{"s3", "s3v4"}, - }, - defaultKey{ - Variant: dualStackVariant, - }: endpoint{ - Hostname: "{service}.dualstack.{region}.{dnsSuffix}", - DNSSuffix: "amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - defaultKey{ - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "{service}-fips.dualstack.{region}.{dnsSuffix}", - DNSSuffix: "amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "s3-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "s3-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "s3.us-gov-east-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.us-gov-east-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "s3-fips.us-gov-east-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "s3.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3.dualstack.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "s3-fips.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, - }, - }, - "s3-control": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, - }, - defaultKey{ - Variant: dualStackVariant, - }: endpoint{ - Hostname: "{service}.dualstack.{region}.{dnsSuffix}", - DNSSuffix: "amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, - }, - defaultKey{ - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "{service}-fips.dualstack.{region}.{dnsSuffix}", - DNSSuffix: "amazonaws.com", - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "s3-control.us-gov-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.us-gov-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "s3-control-fips.us-gov-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "s3-control-fips.dualstack.us-gov-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "s3-control-fips.us-gov-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "s3-control.us-gov-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "s3-control.dualstack.us-gov-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "s3-control-fips.us-gov-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "s3-control-fips.dualstack.us-gov-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "s3-control-fips.us-gov-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "s3-outposts": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{}, - }, - }, - "secretsmanager": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - - Deprecated: boxedTrue, - }, - }, - }, - "securityhub": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "securityhub-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "securityhub-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "securityhub-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "securityhub-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "serverlessrepo": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "serverlessrepo.us-gov-east-1.amazonaws.com", - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "serverlessrepo.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "serverlessrepo.us-gov-west-1.amazonaws.com", - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "serverlessrepo.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "servicecatalog": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicecatalog-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "servicecatalog-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicecatalog-fips.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "servicecatalog-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "servicecatalog-appregistry": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicecatalog-appregistry.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "servicediscovery": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "servicediscovery", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "servicediscovery", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "servicediscovery-fips", - }: endpoint{ - Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "servicediscovery.us-gov-east-1.api.aws", - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicediscovery-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "servicediscovery-fips.us-gov-east-1.api.aws", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "servicediscovery-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: dualStackVariant, - }: endpoint{ - Hostname: "servicediscovery.us-gov-west-1.api.aws", - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "servicediscovery-fips.us-gov-west-1.api.aws", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "servicequotas": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicequotas.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "servicequotas.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "servicequotas.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicequotas.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "servicequotas.us-gov-west-1.amazonaws.com", - }, - }, - }, - "simspaceweaver": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "simspaceweaver.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "simspaceweaver.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "simspaceweaver.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "simspaceweaver.us-gov-west-1.amazonaws.com", - }, - }, - }, - "sms": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "sms-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sms-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "sms-voice": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "sms-voice-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "sms-voice-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sms-voice-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sms-voice-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "snowball": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "snowball-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "snowball-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "snowball-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "sns": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "sns.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "sns.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sns.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sns.us-gov-west-1.amazonaws.com", - Protocols: []string{"https"}, - }, - }, - }, - "sqs": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "sqs.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "sqs.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "sqs.us-gov-west-1.amazonaws.com", - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "ssm": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "ssm.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "ssm.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "ssm.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ssm.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ssm.us-gov-west-1.amazonaws.com", - }, - }, - }, - "sso": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "sso.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sso.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "sso.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "sso.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sso.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "sso.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "states": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "states-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "states.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "states-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "states.us-gov-west-1.amazonaws.com", - }, - }, - }, - "storagegateway": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips", - }: endpoint{ - Hostname: "storagegateway-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "storagegateway-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "storagegateway-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "storagegateway-fips.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "storagegateway-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "streams.dynamodb": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "dynamodb", - }, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "streams.dynamodb.{region}.{dnsSuffix}", - CredentialScope: credentialScope{ - Service: "dynamodb", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "streams.dynamodb.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "streams.dynamodb.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "streams.dynamodb.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "streams.dynamodb.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "sts": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "sts.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sts.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "sts.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "sts.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "sts.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "support": service{ - PartitionEndpoint: "aws-us-gov-global", - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-us-gov-global", - }: endpoint{ - Hostname: "support.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "support.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "support.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "swf": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "swf.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "swf.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-east-1-fips", - }: endpoint{ - Hostname: "swf.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "swf.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "swf.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "swf.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "synthetics": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "synthetics-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "synthetics-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "synthetics-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "synthetics-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "tagging": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "textract": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "textract-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "textract-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "textract-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "textract-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "transcribe": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.transcribe.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "fips.transcribe.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "fips.transcribe.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.transcribe.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "fips.transcribe.us-gov-west-1.amazonaws.com", - }, - }, - }, - "transcribestreaming": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "transfer": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "transfer-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "transfer-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "transfer-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "transfer-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "translate": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "translate-fips.us-gov-west-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1-fips", - }: endpoint{ - Hostname: "translate-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "waf-regional": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "waf-regional-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "waf-regional-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "waf-regional.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "waf-regional.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "waf-regional-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "wafv2": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "wafv2-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "wafv2-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "wafv2.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "wafv2.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "wafv2-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - }, - }, - "wellarchitected": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, - "workspaces": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "workspaces-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "workspaces-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "workspaces-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "workspaces-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - "xray": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-gov-east-1", - }: endpoint{ - Hostname: "xray-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-gov-west-1", - }: endpoint{ - Hostname: "xray-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "xray-fips.us-gov-east-1.amazonaws.com", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "xray-fips.us-gov-west-1.amazonaws.com", - }, - }, - }, - }, -} - -// AwsIsoPartition returns the Resolver for AWS ISO (US). -func AwsIsoPartition() Partition { - return awsisoPartition.Partition() -} - -var awsisoPartition = partition{ - ID: "aws-iso", - Name: "AWS ISO (US)", - DNSSuffix: "c2s.ic.gov", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^us\\-iso\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "c2s.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - Regions: regions{ - "us-iso-east-1": region{ - Description: "US ISO East", - }, - "us-iso-west-1": region{ - Description: "US ISO WEST", - }, - }, - Services: services{ - "api.ecr": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{ - Hostname: "api.ecr.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - }, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{ - Hostname: "api.ecr.us-iso-west-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-west-1", - }, - }, - }, - }, - "api.pricing": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "pricing", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "api.sagemaker": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "apigateway": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "appconfig": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "appconfigdata": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "application-autoscaling": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "arc-zonal-shift": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "athena": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "autoscaling": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{ - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "cloudcontrolapi": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "cloudformation": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "cloudtrail": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "codedeploy": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "comprehend": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "config": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "datapipeline": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "datasync": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-iso-east-1", - }: endpoint{ - Hostname: "datasync-fips.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-iso-west-1", - }: endpoint{ - Hostname: "datasync-fips.us-iso-west-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "datasync-fips.us-iso-east-1.c2s.ic.gov", - }, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "datasync-fips.us-iso-west-1.c2s.ic.gov", - }, - }, - }, - "directconnect": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "dlm": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "dms": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "dms.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "dms", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dms", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dms.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dms-fips", - }: endpoint{ - Hostname: "dms.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dms.us-iso-east-1.c2s.ic.gov", - }, - endpointKey{ - Region: "us-iso-east-1-fips", - }: endpoint{ - Hostname: "dms.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dms.us-iso-west-1.c2s.ic.gov", - }, - endpointKey{ - Region: "us-iso-west-1-fips", - }: endpoint{ - Hostname: "dms.us-iso-west-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "ds": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "dynamodb": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{ - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "ebs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "ec2": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "ecs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "eks": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "elasticache": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "elasticfilesystem": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-iso-east-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-iso-west-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-iso-west-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-iso-east-1.c2s.ic.gov", - }, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-iso-west-1.c2s.ic.gov", - }, - }, - }, - "elasticloadbalancing": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{ - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "elasticmapreduce": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-iso-east-1", - }: endpoint{ - Hostname: "elasticmapreduce.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-iso-west-1", - }: endpoint{ - Hostname: "elasticmapreduce.us-iso-west-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "us-iso-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticmapreduce.us-iso-east-1.c2s.ic.gov", - Protocols: []string{"https"}, - }, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticmapreduce.us-iso-west-1.c2s.ic.gov", - }, - }, - }, - "es": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "events": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "firehose": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "glacier": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{ - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "glue": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "guardduty": service{ - IsRegionalized: boxedTrue, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "health": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "iam": service{ - PartitionEndpoint: "aws-iso-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-iso-global", - }: endpoint{ - Hostname: "iam.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - }, - }, - }, - "kinesis": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "kms": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ProdFips", - }: endpoint{ - Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", - }, - endpointKey{ - Region: "us-iso-east-1-fips", - }: endpoint{ - Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.us-iso-west-1.c2s.ic.gov", - }, - endpointKey{ - Region: "us-iso-west-1-fips", - }: endpoint{ - Hostname: "kms-fips.us-iso-west-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "lambda": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "license-manager": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "logs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "medialive": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "mediapackage": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "metrics.sagemaker": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "monitoring": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "outposts": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "ram": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-iso-east-1", - }: endpoint{ - Hostname: "ram-fips.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-iso-west-1", - }: endpoint{ - Hostname: "ram-fips.us-iso-west-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ram-fips.us-iso-east-1.c2s.ic.gov", - }, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ram-fips.us-iso-west-1.c2s.ic.gov", - }, - }, - }, - "rbin": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-iso-east-1", - }: endpoint{ - Hostname: "rbin-fips.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-iso-west-1", - }: endpoint{ - Hostname: "rbin-fips.us-iso-west-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rbin-fips.us-iso-east-1.c2s.ic.gov", - }, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rbin-fips.us-iso-west-1.c2s.ic.gov", - }, - }, - }, - "rds": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "rds-fips.us-iso-east-1", - }: endpoint{ - Hostname: "rds-fips.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds-fips.us-iso-west-1", - }: endpoint{ - Hostname: "rds-fips.us-iso-west-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-iso-east-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-iso-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-iso-west-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-iso-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-iso-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.us-iso-west-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.us-iso-east-1.c2s.ic.gov", - }, - endpointKey{ - Region: "us-iso-east-1-fips", - }: endpoint{ - Hostname: "rds-fips.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.us-iso-west-1.c2s.ic.gov", - }, - endpointKey{ - Region: "us-iso-west-1-fips", - }: endpoint{ - Hostname: "rds-fips.us-iso-west-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-west-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "redshift": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-iso-east-1", - }: endpoint{ - Hostname: "redshift-fips.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-iso-west-1", - }: endpoint{ - Hostname: "redshift-fips.us-iso-west-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "redshift-fips.us-iso-east-1.c2s.ic.gov", - }, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "redshift-fips.us-iso-west-1.c2s.ic.gov", - }, - }, - }, - "resource-groups": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "route53": service{ - PartitionEndpoint: "aws-iso-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-iso-global", - }: endpoint{ - Hostname: "route53.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - }, - }, - }, - "route53resolver": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "runtime.sagemaker": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "s3": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - SignatureVersions: []string{"s3v4"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-iso-east-1", - }: endpoint{ - Hostname: "s3-fips.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-iso-west-1", - }: endpoint{ - Hostname: "s3-fips.us-iso-west-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - endpointKey{ - Region: "us-iso-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "s3-fips.us-iso-east-1.c2s.ic.gov", - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - endpointKey{ - Region: "us-iso-east-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "s3-fips.dualstack.us-iso-east-1.c2s.ic.gov", - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "s3-fips.us-iso-west-1.c2s.ic.gov", - }, - endpointKey{ - Region: "us-iso-west-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "s3-fips.dualstack.us-iso-west-1.c2s.ic.gov", - }, - }, - }, - "secretsmanager": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "snowball": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "sns": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{ - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "sqs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{ - Protocols: []string{"http", "https"}, - }, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "ssm": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "states": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "streams.dynamodb": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "dynamodb", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "sts": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "support": service{ - PartitionEndpoint: "aws-iso-global", - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-iso-global", - }: endpoint{ - Hostname: "support.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - }, - }, - }, - "swf": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "synthetics": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "tagging": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - "transcribe": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "transcribestreaming": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "translate": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - }, - }, - "workspaces": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - }, - }, - }, -} - -// AwsIsoBPartition returns the Resolver for AWS ISOB (US). -func AwsIsoBPartition() Partition { - return awsisobPartition.Partition() -} - -var awsisobPartition = partition{ - ID: "aws-iso-b", - Name: "AWS ISOB (US)", - DNSSuffix: "sc2s.sgov.gov", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^us\\-isob\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "sc2s.sgov.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - Regions: regions{ - "us-isob-east-1": region{ - Description: "US ISOB East (Ohio)", - }, - }, - Services: services{ - "api.ecr": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{ - Hostname: "api.ecr.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - }, - }, - }, - "api.pricing": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "pricing", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "api.sagemaker": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "appconfig": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "appconfigdata": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "application-autoscaling": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "arc-zonal-shift": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "autoscaling": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "cloudcontrolapi": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "cloudformation": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "cloudtrail": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "codedeploy": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "config": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "directconnect": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "dlm": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "dms": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{}, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "dms.{region}.{dnsSuffix}", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "dms", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dms", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dms.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "dms-fips", - }: endpoint{ - Hostname: "dms.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-isob-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "dms.us-isob-east-1.sc2s.sgov.gov", - }, - endpointKey{ - Region: "us-isob-east-1-fips", - }: endpoint{ - Hostname: "dms.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "ds": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "dynamodb": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "ebs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "ec2": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "ecs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "eks": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "elasticache": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "elasticfilesystem": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-isob-east-1", - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-isob-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticfilesystem-fips.us-isob-east-1.sc2s.sgov.gov", - }, - }, - }, - "elasticloadbalancing": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{ - Protocols: []string{"https"}, - }, - }, - }, - "elasticmapreduce": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-isob-east-1", - }: endpoint{ - Hostname: "elasticmapreduce.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-isob-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "elasticmapreduce.us-isob-east-1.sc2s.sgov.gov", - }, - }, - }, - "es": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "events": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "glacier": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "health": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "iam": service{ - PartitionEndpoint: "aws-iso-b-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-iso-b-global", - }: endpoint{ - Hostname: "iam.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - }, - }, - }, - "kinesis": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "kms": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ProdFips", - }: endpoint{ - Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-isob-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", - }, - endpointKey{ - Region: "us-isob-east-1-fips", - }: endpoint{ - Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "lambda": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "license-manager": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "logs": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "metering.marketplace": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - CredentialScope: credentialScope{ - Service: "aws-marketplace", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "metrics.sagemaker": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "monitoring": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "outposts": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "ram": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-isob-east-1", - }: endpoint{ - Hostname: "ram-fips.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-isob-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ram-fips.us-isob-east-1.sc2s.sgov.gov", - }, - }, - }, - "rbin": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-isob-east-1", - }: endpoint{ - Hostname: "rbin-fips.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-isob-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rbin-fips.us-isob-east-1.sc2s.sgov.gov", - }, - }, - }, - "rds": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "rds-fips.us-isob-east-1", - }: endpoint{ - Hostname: "rds-fips.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-isob-east-1", - }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-isob-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-isob-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.us-isob-east-1.sc2s.sgov.gov", - }, - endpointKey{ - Region: "us-isob-east-1-fips", - }: endpoint{ - Hostname: "rds-fips.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - Deprecated: boxedTrue, - }, - }, - }, - "redshift": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-isob-east-1", - }: endpoint{ - Hostname: "redshift-fips.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-isob-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "redshift-fips.us-isob-east-1.sc2s.sgov.gov", - }, - }, - }, - "resource-groups": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "route53": service{ - PartitionEndpoint: "aws-iso-b-global", - IsRegionalized: boxedFalse, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-iso-b-global", - }: endpoint{ - Hostname: "route53.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - }, - }, - }, - "route53resolver": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "runtime.sagemaker": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "s3": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-isob-east-1", - }: endpoint{ - Hostname: "s3-fips.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-isob-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "s3-fips.us-isob-east-1.sc2s.sgov.gov", - }, - endpointKey{ - Region: "us-isob-east-1", - Variant: fipsVariant | dualStackVariant, - }: endpoint{ - Hostname: "s3-fips.dualstack.us-isob-east-1.sc2s.sgov.gov", - }, - }, - }, - "secretsmanager": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "snowball": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "sns": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "sqs": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "ssm": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "states": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "streams.dynamodb": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "dynamodb", - }, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "sts": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "support": service{ - PartitionEndpoint: "aws-iso-b-global", - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "aws-iso-b-global", - }: endpoint{ - Hostname: "support.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - }, - }, - }, - "swf": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "synthetics": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "tagging": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - "workspaces": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - }, - }, - }, -} - -// AwsIsoEPartition returns the Resolver for AWS ISOE (Europe). -func AwsIsoEPartition() Partition { - return awsisoePartition.Partition() -} - -var awsisoePartition = partition{ - ID: "aws-iso-e", - Name: "AWS ISOE (Europe)", - DNSSuffix: "cloud.adc-e.uk", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^eu\\-isoe\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "cloud.adc-e.uk", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - Regions: regions{}, - Services: services{}, -} - -// AwsIsoFPartition returns the Resolver for AWS ISOF. -func AwsIsoFPartition() Partition { - return awsisofPartition.Partition() -} - -var awsisofPartition = partition{ - ID: "aws-iso-f", - Name: "AWS ISOF", - DNSSuffix: "csp.hci.ic.gov", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^us\\-isof\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "csp.hci.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - }, - Regions: regions{}, - Services: services{}, -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go deleted file mode 100644 index ca8fc828e..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go +++ /dev/null @@ -1,141 +0,0 @@ -package endpoints - -// Service identifiers -// -// Deprecated: Use client package's EndpointsID value instead of these -// ServiceIDs. These IDs are not maintained, and are out of date. -const ( - A4bServiceID = "a4b" // A4b. - AcmServiceID = "acm" // Acm. - AcmPcaServiceID = "acm-pca" // AcmPca. - ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor. - ApiPricingServiceID = "api.pricing" // ApiPricing. - ApiSagemakerServiceID = "api.sagemaker" // ApiSagemaker. - ApigatewayServiceID = "apigateway" // Apigateway. - ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling. - Appstream2ServiceID = "appstream2" // Appstream2. - AppsyncServiceID = "appsync" // Appsync. - AthenaServiceID = "athena" // Athena. - AutoscalingServiceID = "autoscaling" // Autoscaling. - AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans. - BatchServiceID = "batch" // Batch. - BudgetsServiceID = "budgets" // Budgets. - CeServiceID = "ce" // Ce. - ChimeServiceID = "chime" // Chime. - Cloud9ServiceID = "cloud9" // Cloud9. - ClouddirectoryServiceID = "clouddirectory" // Clouddirectory. - CloudformationServiceID = "cloudformation" // Cloudformation. - CloudfrontServiceID = "cloudfront" // Cloudfront. - CloudhsmServiceID = "cloudhsm" // Cloudhsm. - Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2. - CloudsearchServiceID = "cloudsearch" // Cloudsearch. - CloudtrailServiceID = "cloudtrail" // Cloudtrail. - CodebuildServiceID = "codebuild" // Codebuild. - CodecommitServiceID = "codecommit" // Codecommit. - CodedeployServiceID = "codedeploy" // Codedeploy. - CodepipelineServiceID = "codepipeline" // Codepipeline. - CodestarServiceID = "codestar" // Codestar. - CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity. - CognitoIdpServiceID = "cognito-idp" // CognitoIdp. - CognitoSyncServiceID = "cognito-sync" // CognitoSync. - ComprehendServiceID = "comprehend" // Comprehend. - ConfigServiceID = "config" // Config. - CurServiceID = "cur" // Cur. - DatapipelineServiceID = "datapipeline" // Datapipeline. - DaxServiceID = "dax" // Dax. - DevicefarmServiceID = "devicefarm" // Devicefarm. - DirectconnectServiceID = "directconnect" // Directconnect. - DiscoveryServiceID = "discovery" // Discovery. - DmsServiceID = "dms" // Dms. - DsServiceID = "ds" // Ds. - DynamodbServiceID = "dynamodb" // Dynamodb. - Ec2ServiceID = "ec2" // Ec2. - Ec2metadataServiceID = "ec2metadata" // Ec2metadata. - EcrServiceID = "ecr" // Ecr. - EcsServiceID = "ecs" // Ecs. - ElasticacheServiceID = "elasticache" // Elasticache. - ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk. - ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem. - ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing. - ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce. - ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder. - EmailServiceID = "email" // Email. - EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace. - EsServiceID = "es" // Es. - EventsServiceID = "events" // Events. - FirehoseServiceID = "firehose" // Firehose. - FmsServiceID = "fms" // Fms. - GameliftServiceID = "gamelift" // Gamelift. - GlacierServiceID = "glacier" // Glacier. - GlueServiceID = "glue" // Glue. - GreengrassServiceID = "greengrass" // Greengrass. - GuarddutyServiceID = "guardduty" // Guardduty. - HealthServiceID = "health" // Health. - IamServiceID = "iam" // Iam. - ImportexportServiceID = "importexport" // Importexport. - InspectorServiceID = "inspector" // Inspector. - IotServiceID = "iot" // Iot. - IotanalyticsServiceID = "iotanalytics" // Iotanalytics. - KinesisServiceID = "kinesis" // Kinesis. - KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics. - KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo. - KmsServiceID = "kms" // Kms. - LambdaServiceID = "lambda" // Lambda. - LightsailServiceID = "lightsail" // Lightsail. - LogsServiceID = "logs" // Logs. - MachinelearningServiceID = "machinelearning" // Machinelearning. - MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics. - MediaconvertServiceID = "mediaconvert" // Mediaconvert. - MedialiveServiceID = "medialive" // Medialive. - MediapackageServiceID = "mediapackage" // Mediapackage. - MediastoreServiceID = "mediastore" // Mediastore. - MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace. - MghServiceID = "mgh" // Mgh. - MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics. - ModelsLexServiceID = "models.lex" // ModelsLex. - MonitoringServiceID = "monitoring" // Monitoring. - MturkRequesterServiceID = "mturk-requester" // MturkRequester. - NeptuneServiceID = "neptune" // Neptune. - OpsworksServiceID = "opsworks" // Opsworks. - OpsworksCmServiceID = "opsworks-cm" // OpsworksCm. - OrganizationsServiceID = "organizations" // Organizations. - PinpointServiceID = "pinpoint" // Pinpoint. - PollyServiceID = "polly" // Polly. - RdsServiceID = "rds" // Rds. - RedshiftServiceID = "redshift" // Redshift. - RekognitionServiceID = "rekognition" // Rekognition. - ResourceGroupsServiceID = "resource-groups" // ResourceGroups. - Route53ServiceID = "route53" // Route53. - Route53domainsServiceID = "route53domains" // Route53domains. - RuntimeLexServiceID = "runtime.lex" // RuntimeLex. - RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker. - S3ServiceID = "s3" // S3. - S3ControlServiceID = "s3-control" // S3Control. - SagemakerServiceID = "api.sagemaker" // Sagemaker. - SdbServiceID = "sdb" // Sdb. - SecretsmanagerServiceID = "secretsmanager" // Secretsmanager. - ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo. - ServicecatalogServiceID = "servicecatalog" // Servicecatalog. - ServicediscoveryServiceID = "servicediscovery" // Servicediscovery. - ShieldServiceID = "shield" // Shield. - SmsServiceID = "sms" // Sms. - SnowballServiceID = "snowball" // Snowball. - SnsServiceID = "sns" // Sns. - SqsServiceID = "sqs" // Sqs. - SsmServiceID = "ssm" // Ssm. - StatesServiceID = "states" // States. - StoragegatewayServiceID = "storagegateway" // Storagegateway. - StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb. - StsServiceID = "sts" // Sts. - SupportServiceID = "support" // Support. - SwfServiceID = "swf" // Swf. - TaggingServiceID = "tagging" // Tagging. - TransferServiceID = "transfer" // Transfer. - TranslateServiceID = "translate" // Translate. - WafServiceID = "waf" // Waf. - WafRegionalServiceID = "waf-regional" // WafRegional. - WorkdocsServiceID = "workdocs" // Workdocs. - WorkmailServiceID = "workmail" // Workmail. - WorkspacesServiceID = "workspaces" // Workspaces. - XrayServiceID = "xray" // Xray. -) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go deleted file mode 100644 index 66dec6beb..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go +++ /dev/null @@ -1,65 +0,0 @@ -// Package endpoints provides the types and functionality for defining regions -// and endpoints, as well as querying those definitions. -// -// The SDK's Regions and Endpoints metadata is code generated into the endpoints -// package, and is accessible via the DefaultResolver function. This function -// returns a endpoint Resolver will search the metadata and build an associated -// endpoint if one is found. The default resolver will search all partitions -// known by the SDK. e.g AWS Standard (aws), AWS China (aws-cn), and -// AWS GovCloud (US) (aws-us-gov). -// . -// -// # Enumerating Regions and Endpoint Metadata -// -// Casting the Resolver returned by DefaultResolver to a EnumPartitions interface -// will allow you to get access to the list of underlying Partitions with the -// Partitions method. This is helpful if you want to limit the SDK's endpoint -// resolving to a single partition, or enumerate regions, services, and endpoints -// in the partition. -// -// resolver := endpoints.DefaultResolver() -// partitions := resolver.(endpoints.EnumPartitions).Partitions() -// -// for _, p := range partitions { -// fmt.Println("Regions for", p.ID()) -// for id, _ := range p.Regions() { -// fmt.Println("*", id) -// } -// -// fmt.Println("Services for", p.ID()) -// for id, _ := range p.Services() { -// fmt.Println("*", id) -// } -// } -// -// # Using Custom Endpoints -// -// The endpoints package also gives you the ability to use your own logic how -// endpoints are resolved. This is a great way to define a custom endpoint -// for select services, without passing that logic down through your code. -// -// If a type implements the Resolver interface it can be used to resolve -// endpoints. To use this with the SDK's Session and Config set the value -// of the type to the EndpointsResolver field of aws.Config when initializing -// the session, or service client. -// -// In addition the ResolverFunc is a wrapper for a func matching the signature -// of Resolver.EndpointFor, converting it to a type that satisfies the -// Resolver interface. -// -// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) { -// if service == endpoints.S3ServiceID { -// return endpoints.ResolvedEndpoint{ -// URL: "s3.custom.endpoint.com", -// SigningRegion: "custom-signing-region", -// }, nil -// } -// -// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...) -// } -// -// sess := session.Must(session.NewSession(&aws.Config{ -// Region: aws.String("us-west-2"), -// EndpointResolver: endpoints.ResolverFunc(myCustomResolver), -// })) -package endpoints diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go deleted file mode 100644 index a686a48fa..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go +++ /dev/null @@ -1,708 +0,0 @@ -package endpoints - -import ( - "fmt" - "regexp" - "strings" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// A Logger is a minimalistic interface for the SDK to log messages to. -type Logger interface { - Log(...interface{}) -} - -// DualStackEndpointState is a constant to describe the dual-stack endpoint resolution -// behavior. -type DualStackEndpointState uint - -const ( - // DualStackEndpointStateUnset is the default value behavior for dual-stack endpoint - // resolution. - DualStackEndpointStateUnset DualStackEndpointState = iota - - // DualStackEndpointStateEnabled enable dual-stack endpoint resolution for endpoints. - DualStackEndpointStateEnabled - - // DualStackEndpointStateDisabled disables dual-stack endpoint resolution for endpoints. - DualStackEndpointStateDisabled -) - -// FIPSEndpointState is a constant to describe the FIPS endpoint resolution behavior. -type FIPSEndpointState uint - -const ( - // FIPSEndpointStateUnset is the default value behavior for FIPS endpoint resolution. - FIPSEndpointStateUnset FIPSEndpointState = iota - - // FIPSEndpointStateEnabled enables FIPS endpoint resolution for service endpoints. - FIPSEndpointStateEnabled - - // FIPSEndpointStateDisabled disables FIPS endpoint resolution for endpoints. - FIPSEndpointStateDisabled -) - -// Options provide the configuration needed to direct how the -// endpoints will be resolved. -type Options struct { - // DisableSSL forces the endpoint to be resolved as HTTP. - // instead of HTTPS if the service supports it. - DisableSSL bool - - // Sets the resolver to resolve the endpoint as a dualstack endpoint - // for the service. If dualstack support for a service is not known and - // StrictMatching is not enabled a dualstack endpoint for the service will - // be returned. This endpoint may not be valid. If StrictMatching is - // enabled only services that are known to support dualstack will return - // dualstack endpoints. - // - // Deprecated: This option will continue to function for S3 and S3 Control for backwards compatibility. - // UseDualStackEndpoint should be used to enable usage of a service's dual-stack endpoint for all service clients - // moving forward. For S3 and S3 Control, when UseDualStackEndpoint is set to a non-zero value it takes higher - // precedence then this option. - UseDualStack bool - - // Sets the resolver to resolve a dual-stack endpoint for the service. - UseDualStackEndpoint DualStackEndpointState - - // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. - UseFIPSEndpoint FIPSEndpointState - - // Enables strict matching of services and regions resolved endpoints. - // If the partition doesn't enumerate the exact service and region an - // error will be returned. This option will prevent returning endpoints - // that look valid, but may not resolve to any real endpoint. - StrictMatching bool - - // Enables resolving a service endpoint based on the region provided if the - // service does not exist. The service endpoint ID will be used as the service - // domain name prefix. By default the endpoint resolver requires the service - // to be known when resolving endpoints. - // - // If resolving an endpoint on the partition list the provided region will - // be used to determine which partition's domain name pattern to the service - // endpoint ID with. If both the service and region are unknown and resolving - // the endpoint on partition list an UnknownEndpointError error will be returned. - // - // If resolving and endpoint on a partition specific resolver that partition's - // domain name pattern will be used with the service endpoint ID. If both - // region and service do not exist when resolving an endpoint on a specific - // partition the partition's domain pattern will be used to combine the - // endpoint and region together. - // - // This option is ignored if StrictMatching is enabled. - ResolveUnknownService bool - - // Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6) - EC2MetadataEndpointMode EC2IMDSEndpointModeState - - // STS Regional Endpoint flag helps with resolving the STS endpoint - STSRegionalEndpoint STSRegionalEndpoint - - // S3 Regional Endpoint flag helps with resolving the S3 endpoint - S3UsEast1RegionalEndpoint S3UsEast1RegionalEndpoint - - // ResolvedRegion is the resolved region string. If provided (non-zero length) it takes priority - // over the region name passed to the ResolveEndpoint call. - ResolvedRegion string - - // Logger is the logger that will be used to log messages. - Logger Logger - - // Determines whether logging of deprecated endpoints usage is enabled. - LogDeprecated bool -} - -func (o Options) getEndpointVariant(service string) (v endpointVariant) { - const s3 = "s3" - const s3Control = "s3-control" - - if (o.UseDualStackEndpoint == DualStackEndpointStateEnabled) || - ((service == s3 || service == s3Control) && (o.UseDualStackEndpoint == DualStackEndpointStateUnset && o.UseDualStack)) { - v |= dualStackVariant - } - if o.UseFIPSEndpoint == FIPSEndpointStateEnabled { - v |= fipsVariant - } - return v -} - -// EC2IMDSEndpointModeState is an enum configuration variable describing the client endpoint mode. -type EC2IMDSEndpointModeState uint - -// Enumeration values for EC2IMDSEndpointModeState -const ( - EC2IMDSEndpointModeStateUnset EC2IMDSEndpointModeState = iota - EC2IMDSEndpointModeStateIPv4 - EC2IMDSEndpointModeStateIPv6 -) - -// SetFromString sets the EC2IMDSEndpointModeState based on the provided string value. Unknown values will default to EC2IMDSEndpointModeStateUnset -func (e *EC2IMDSEndpointModeState) SetFromString(v string) error { - v = strings.TrimSpace(v) - - switch { - case len(v) == 0: - *e = EC2IMDSEndpointModeStateUnset - case strings.EqualFold(v, "IPv6"): - *e = EC2IMDSEndpointModeStateIPv6 - case strings.EqualFold(v, "IPv4"): - *e = EC2IMDSEndpointModeStateIPv4 - default: - return fmt.Errorf("unknown EC2 IMDS endpoint mode, must be either IPv6 or IPv4") - } - return nil -} - -// STSRegionalEndpoint is an enum for the states of the STS Regional Endpoint -// options. -type STSRegionalEndpoint int - -func (e STSRegionalEndpoint) String() string { - switch e { - case LegacySTSEndpoint: - return "legacy" - case RegionalSTSEndpoint: - return "regional" - case UnsetSTSEndpoint: - return "" - default: - return "unknown" - } -} - -const ( - - // UnsetSTSEndpoint represents that STS Regional Endpoint flag is not specified. - UnsetSTSEndpoint STSRegionalEndpoint = iota - - // LegacySTSEndpoint represents when STS Regional Endpoint flag is specified - // to use legacy endpoints. - LegacySTSEndpoint - - // RegionalSTSEndpoint represents when STS Regional Endpoint flag is specified - // to use regional endpoints. - RegionalSTSEndpoint -) - -// GetSTSRegionalEndpoint function returns the STSRegionalEndpointFlag based -// on the input string provided in env config or shared config by the user. -// -// `legacy`, `regional` are the only case-insensitive valid strings for -// resolving the STS regional Endpoint flag. -func GetSTSRegionalEndpoint(s string) (STSRegionalEndpoint, error) { - switch { - case strings.EqualFold(s, "legacy"): - return LegacySTSEndpoint, nil - case strings.EqualFold(s, "regional"): - return RegionalSTSEndpoint, nil - default: - return UnsetSTSEndpoint, fmt.Errorf("unable to resolve the value of STSRegionalEndpoint for %v", s) - } -} - -// S3UsEast1RegionalEndpoint is an enum for the states of the S3 us-east-1 -// Regional Endpoint options. -type S3UsEast1RegionalEndpoint int - -func (e S3UsEast1RegionalEndpoint) String() string { - switch e { - case LegacyS3UsEast1Endpoint: - return "legacy" - case RegionalS3UsEast1Endpoint: - return "regional" - case UnsetS3UsEast1Endpoint: - return "" - default: - return "unknown" - } -} - -const ( - - // UnsetS3UsEast1Endpoint represents that S3 Regional Endpoint flag is not - // specified. - UnsetS3UsEast1Endpoint S3UsEast1RegionalEndpoint = iota - - // LegacyS3UsEast1Endpoint represents when S3 Regional Endpoint flag is - // specified to use legacy endpoints. - LegacyS3UsEast1Endpoint - - // RegionalS3UsEast1Endpoint represents when S3 Regional Endpoint flag is - // specified to use regional endpoints. - RegionalS3UsEast1Endpoint -) - -// GetS3UsEast1RegionalEndpoint function returns the S3UsEast1RegionalEndpointFlag based -// on the input string provided in env config or shared config by the user. -// -// `legacy`, `regional` are the only case-insensitive valid strings for -// resolving the S3 regional Endpoint flag. -func GetS3UsEast1RegionalEndpoint(s string) (S3UsEast1RegionalEndpoint, error) { - switch { - case strings.EqualFold(s, "legacy"): - return LegacyS3UsEast1Endpoint, nil - case strings.EqualFold(s, "regional"): - return RegionalS3UsEast1Endpoint, nil - default: - return UnsetS3UsEast1Endpoint, - fmt.Errorf("unable to resolve the value of S3UsEast1RegionalEndpoint for %v", s) - } -} - -// Set combines all of the option functions together. -func (o *Options) Set(optFns ...func(*Options)) { - for _, fn := range optFns { - fn(o) - } -} - -// DisableSSLOption sets the DisableSSL options. Can be used as a functional -// option when resolving endpoints. -func DisableSSLOption(o *Options) { - o.DisableSSL = true -} - -// UseDualStackOption sets the UseDualStack option. Can be used as a functional -// option when resolving endpoints. -// -// Deprecated: UseDualStackEndpointOption should be used to enable usage of a service's dual-stack endpoint. -// When DualStackEndpointState is set to a non-zero value it takes higher precedence then this option. -func UseDualStackOption(o *Options) { - o.UseDualStack = true -} - -// UseDualStackEndpointOption sets the UseDualStackEndpoint option to enabled. Can be used as a functional -// option when resolving endpoints. -func UseDualStackEndpointOption(o *Options) { - o.UseDualStackEndpoint = DualStackEndpointStateEnabled -} - -// UseFIPSEndpointOption sets the UseFIPSEndpoint option to enabled. Can be used as a functional -// option when resolving endpoints. -func UseFIPSEndpointOption(o *Options) { - o.UseFIPSEndpoint = FIPSEndpointStateEnabled -} - -// StrictMatchingOption sets the StrictMatching option. Can be used as a functional -// option when resolving endpoints. -func StrictMatchingOption(o *Options) { - o.StrictMatching = true -} - -// ResolveUnknownServiceOption sets the ResolveUnknownService option. Can be used -// as a functional option when resolving endpoints. -func ResolveUnknownServiceOption(o *Options) { - o.ResolveUnknownService = true -} - -// STSRegionalEndpointOption enables the STS endpoint resolver behavior to resolve -// STS endpoint to their regional endpoint, instead of the global endpoint. -func STSRegionalEndpointOption(o *Options) { - o.STSRegionalEndpoint = RegionalSTSEndpoint -} - -// A Resolver provides the interface for functionality to resolve endpoints. -// The build in Partition and DefaultResolver return value satisfy this interface. -type Resolver interface { - EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) -} - -// ResolverFunc is a helper utility that wraps a function so it satisfies the -// Resolver interface. This is useful when you want to add additional endpoint -// resolving logic, or stub out specific endpoints with custom values. -type ResolverFunc func(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) - -// EndpointFor wraps the ResolverFunc function to satisfy the Resolver interface. -func (fn ResolverFunc) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { - return fn(service, region, opts...) -} - -var schemeRE = regexp.MustCompile("^([^:]+)://") - -// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no -// scheme. If disableSSL is true HTTP will set HTTP instead of the default HTTPS. -// -// If disableSSL is set, it will only set the URL's scheme if the URL does not -// contain a scheme. -func AddScheme(endpoint string, disableSSL bool) string { - if !schemeRE.MatchString(endpoint) { - scheme := "https" - if disableSSL { - scheme = "http" - } - endpoint = fmt.Sprintf("%s://%s", scheme, endpoint) - } - - return endpoint -} - -// EnumPartitions a provides a way to retrieve the underlying partitions that -// make up the SDK's default Resolver, or any resolver decoded from a model -// file. -// -// Use this interface with DefaultResolver and DecodeModels to get the list of -// Partitions. -type EnumPartitions interface { - Partitions() []Partition -} - -// RegionsForService returns a map of regions for the partition and service. -// If either the partition or service does not exist false will be returned -// as the second parameter. -// -// This example shows how to get the regions for DynamoDB in the AWS partition. -// -// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID) -// -// This is equivalent to using the partition directly. -// -// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions() -func RegionsForService(ps []Partition, partitionID, serviceID string) (map[string]Region, bool) { - for _, p := range ps { - if p.ID() != partitionID { - continue - } - if _, ok := p.p.Services[serviceID]; !(ok || serviceID == Ec2metadataServiceID) { - break - } - - s := Service{ - id: serviceID, - p: p.p, - } - return s.Regions(), true - } - - return map[string]Region{}, false -} - -// PartitionForRegion returns the first partition which includes the region -// passed in. This includes both known regions and regions which match -// a pattern supported by the partition which may include regions that are -// not explicitly known by the partition. Use the Regions method of the -// returned Partition if explicit support is needed. -func PartitionForRegion(ps []Partition, regionID string) (Partition, bool) { - for _, p := range ps { - if _, ok := p.p.Regions[regionID]; ok || p.p.RegionRegex.MatchString(regionID) { - return p, true - } - } - - return Partition{}, false -} - -// A Partition provides the ability to enumerate the partition's regions -// and services. -type Partition struct { - id, dnsSuffix string - p *partition -} - -// DNSSuffix returns the base domain name of the partition. -func (p Partition) DNSSuffix() string { return p.dnsSuffix } - -// ID returns the identifier of the partition. -func (p Partition) ID() string { return p.id } - -// EndpointFor attempts to resolve the endpoint based on service and region. -// See Options for information on configuring how the endpoint is resolved. -// -// If the service cannot be found in the metadata the UnknownServiceError -// error will be returned. This validation will occur regardless if -// StrictMatching is enabled. To enable resolving unknown services set the -// "ResolveUnknownService" option to true. When StrictMatching is disabled -// this option allows the partition resolver to resolve a endpoint based on -// the service endpoint ID provided. -// -// When resolving endpoints you can choose to enable StrictMatching. This will -// require the provided service and region to be known by the partition. -// If the endpoint cannot be strictly resolved an error will be returned. This -// mode is useful to ensure the endpoint resolved is valid. Without -// StrictMatching enabled the endpoint returned may look valid but may not work. -// StrictMatching requires the SDK to be updated if you want to take advantage -// of new regions and services expansions. -// -// Errors that can be returned. -// - UnknownServiceError -// - UnknownEndpointError -func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { - return p.p.EndpointFor(service, region, opts...) -} - -// Regions returns a map of Regions indexed by their ID. This is useful for -// enumerating over the regions in a partition. -func (p Partition) Regions() map[string]Region { - rs := make(map[string]Region, len(p.p.Regions)) - for id, r := range p.p.Regions { - rs[id] = Region{ - id: id, - desc: r.Description, - p: p.p, - } - } - - return rs -} - -// Services returns a map of Service indexed by their ID. This is useful for -// enumerating over the services in a partition. -func (p Partition) Services() map[string]Service { - ss := make(map[string]Service, len(p.p.Services)) - - for id := range p.p.Services { - ss[id] = Service{ - id: id, - p: p.p, - } - } - - // Since we have removed the customization that injected this into the model - // we still need to pretend that this is a modeled service. - if _, ok := ss[Ec2metadataServiceID]; !ok { - ss[Ec2metadataServiceID] = Service{ - id: Ec2metadataServiceID, - p: p.p, - } - } - - return ss -} - -// A Region provides information about a region, and ability to resolve an -// endpoint from the context of a region, given a service. -type Region struct { - id, desc string - p *partition -} - -// ID returns the region's identifier. -func (r Region) ID() string { return r.id } - -// Description returns the region's description. The region description -// is free text, it can be empty, and it may change between SDK releases. -func (r Region) Description() string { return r.desc } - -// ResolveEndpoint resolves an endpoint from the context of the region given -// a service. See Partition.EndpointFor for usage and errors that can be returned. -func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) { - return r.p.EndpointFor(service, r.id, opts...) -} - -// Services returns a list of all services that are known to be in this region. -func (r Region) Services() map[string]Service { - ss := map[string]Service{} - for id, s := range r.p.Services { - if _, ok := s.Endpoints[endpointKey{Region: r.id}]; ok { - ss[id] = Service{ - id: id, - p: r.p, - } - } - } - - return ss -} - -// A Service provides information about a service, and ability to resolve an -// endpoint from the context of a service, given a region. -type Service struct { - id string - p *partition -} - -// ID returns the identifier for the service. -func (s Service) ID() string { return s.id } - -// ResolveEndpoint resolves an endpoint from the context of a service given -// a region. See Partition.EndpointFor for usage and errors that can be returned. -func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (ResolvedEndpoint, error) { - return s.p.EndpointFor(s.id, region, opts...) -} - -// Regions returns a map of Regions that the service is present in. -// -// A region is the AWS region the service exists in. Whereas a Endpoint is -// an URL that can be resolved to a instance of a service. -func (s Service) Regions() map[string]Region { - rs := map[string]Region{} - - service, ok := s.p.Services[s.id] - - // Since ec2metadata customization has been removed we need to check - // if it was defined in non-standard endpoints.json file. If it's not - // then we can return the empty map as there is no regional-endpoints for IMDS. - // Otherwise, we iterate need to iterate the non-standard model. - if s.id == Ec2metadataServiceID && !ok { - return rs - } - - for id := range service.Endpoints { - if id.Variant != 0 { - continue - } - if r, ok := s.p.Regions[id.Region]; ok { - rs[id.Region] = Region{ - id: id.Region, - desc: r.Description, - p: s.p, - } - } - } - - return rs -} - -// Endpoints returns a map of Endpoints indexed by their ID for all known -// endpoints for a service. -// -// A region is the AWS region the service exists in. Whereas a Endpoint is -// an URL that can be resolved to a instance of a service. -func (s Service) Endpoints() map[string]Endpoint { - es := make(map[string]Endpoint, len(s.p.Services[s.id].Endpoints)) - for id := range s.p.Services[s.id].Endpoints { - if id.Variant != 0 { - continue - } - es[id.Region] = Endpoint{ - id: id.Region, - serviceID: s.id, - p: s.p, - } - } - - return es -} - -// A Endpoint provides information about endpoints, and provides the ability -// to resolve that endpoint for the service, and the region the endpoint -// represents. -type Endpoint struct { - id string - serviceID string - p *partition -} - -// ID returns the identifier for an endpoint. -func (e Endpoint) ID() string { return e.id } - -// ServiceID returns the identifier the endpoint belongs to. -func (e Endpoint) ServiceID() string { return e.serviceID } - -// ResolveEndpoint resolves an endpoint from the context of a service and -// region the endpoint represents. See Partition.EndpointFor for usage and -// errors that can be returned. -func (e Endpoint) ResolveEndpoint(opts ...func(*Options)) (ResolvedEndpoint, error) { - return e.p.EndpointFor(e.serviceID, e.id, opts...) -} - -// A ResolvedEndpoint is an endpoint that has been resolved based on a partition -// service, and region. -type ResolvedEndpoint struct { - // The endpoint URL - URL string - - // The endpoint partition - PartitionID string - - // The region that should be used for signing requests. - SigningRegion string - - // The service name that should be used for signing requests. - SigningName string - - // States that the signing name for this endpoint was derived from metadata - // passed in, but was not explicitly modeled. - SigningNameDerived bool - - // The signing method that should be used for signing requests. - SigningMethod string -} - -// So that the Error interface type can be included as an anonymous field -// in the requestError struct and not conflict with the error.Error() method. -type awsError awserr.Error - -// A EndpointNotFoundError is returned when in StrictMatching mode, and the -// endpoint for the service and region cannot be found in any of the partitions. -type EndpointNotFoundError struct { - awsError - Partition string - Service string - Region string -} - -// A UnknownServiceError is returned when the service does not resolve to an -// endpoint. Includes a list of all known services for the partition. Returned -// when a partition does not support the service. -type UnknownServiceError struct { - awsError - Partition string - Service string - Known []string -} - -// NewUnknownServiceError builds and returns UnknownServiceError. -func NewUnknownServiceError(p, s string, known []string) UnknownServiceError { - return UnknownServiceError{ - awsError: awserr.New("UnknownServiceError", - "could not resolve endpoint for unknown service", nil), - Partition: p, - Service: s, - Known: known, - } -} - -// String returns the string representation of the error. -func (e UnknownServiceError) Error() string { - extra := fmt.Sprintf("partition: %q, service: %q", - e.Partition, e.Service) - if len(e.Known) > 0 { - extra += fmt.Sprintf(", known: %v", e.Known) - } - return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) -} - -// String returns the string representation of the error. -func (e UnknownServiceError) String() string { - return e.Error() -} - -// A UnknownEndpointError is returned when in StrictMatching mode and the -// service is valid, but the region does not resolve to an endpoint. Includes -// a list of all known endpoints for the service. -type UnknownEndpointError struct { - awsError - Partition string - Service string - Region string - Known []string -} - -// NewUnknownEndpointError builds and returns UnknownEndpointError. -func NewUnknownEndpointError(p, s, r string, known []string) UnknownEndpointError { - return UnknownEndpointError{ - awsError: awserr.New("UnknownEndpointError", - "could not resolve endpoint", nil), - Partition: p, - Service: s, - Region: r, - Known: known, - } -} - -// String returns the string representation of the error. -func (e UnknownEndpointError) Error() string { - extra := fmt.Sprintf("partition: %q, service: %q, region: %q", - e.Partition, e.Service, e.Region) - if len(e.Known) > 0 { - extra += fmt.Sprintf(", known: %v", e.Known) - } - return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) -} - -// String returns the string representation of the error. -func (e UnknownEndpointError) String() string { - return e.Error() -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go deleted file mode 100644 index df75e899a..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go +++ /dev/null @@ -1,24 +0,0 @@ -package endpoints - -var legacyGlobalRegions = map[string]map[string]struct{}{ - "sts": { - "ap-northeast-1": {}, - "ap-south-1": {}, - "ap-southeast-1": {}, - "ap-southeast-2": {}, - "ca-central-1": {}, - "eu-central-1": {}, - "eu-north-1": {}, - "eu-west-1": {}, - "eu-west-2": {}, - "eu-west-3": {}, - "sa-east-1": {}, - "us-east-1": {}, - "us-east-2": {}, - "us-west-1": {}, - "us-west-2": {}, - }, - "s3": { - "us-east-1": {}, - }, -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go deleted file mode 100644 index 89f6627dc..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go +++ /dev/null @@ -1,594 +0,0 @@ -package endpoints - -import ( - "encoding/json" - "fmt" - "regexp" - "strconv" - "strings" -) - -const ( - ec2MetadataEndpointIPv6 = "http://[fd00:ec2::254]/latest" - ec2MetadataEndpointIPv4 = "http://169.254.169.254/latest" -) - -const dnsSuffixTemplateKey = "{dnsSuffix}" - -// defaultKey is a compound map key of a variant and other values. -type defaultKey struct { - Variant endpointVariant - ServiceVariant serviceVariant -} - -// endpointKey is a compound map key of a region and associated variant value. -type endpointKey struct { - Region string - Variant endpointVariant -} - -// endpointVariant is a bit field to describe the endpoints attributes. -type endpointVariant uint64 - -// serviceVariant is a bit field to describe the service endpoint attributes. -type serviceVariant uint64 - -const ( - // fipsVariant indicates that the endpoint is FIPS capable. - fipsVariant endpointVariant = 1 << (64 - 1 - iota) - - // dualStackVariant indicates that the endpoint is DualStack capable. - dualStackVariant -) - -var regionValidationRegex = regexp.MustCompile(`^[[:alnum:]]([[:alnum:]\-]*[[:alnum:]])?$`) - -type partitions []partition - -func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { - var opt Options - opt.Set(opts...) - - if len(opt.ResolvedRegion) > 0 { - region = opt.ResolvedRegion - } - - for i := 0; i < len(ps); i++ { - if !ps[i].canResolveEndpoint(service, region, opt) { - continue - } - - return ps[i].EndpointFor(service, region, opts...) - } - - // If loose matching fallback to first partition format to use - // when resolving the endpoint. - if !opt.StrictMatching && len(ps) > 0 { - return ps[0].EndpointFor(service, region, opts...) - } - - return ResolvedEndpoint{}, NewUnknownEndpointError("all partitions", service, region, []string{}) -} - -// Partitions satisfies the EnumPartitions interface and returns a list -// of Partitions representing each partition represented in the SDK's -// endpoints model. -func (ps partitions) Partitions() []Partition { - parts := make([]Partition, 0, len(ps)) - for i := 0; i < len(ps); i++ { - parts = append(parts, ps[i].Partition()) - } - - return parts -} - -type endpointWithVariants struct { - endpoint - Variants []endpointWithTags `json:"variants"` -} - -type endpointWithTags struct { - endpoint - Tags []string `json:"tags"` -} - -type endpointDefaults map[defaultKey]endpoint - -func (p *endpointDefaults) UnmarshalJSON(data []byte) error { - if *p == nil { - *p = make(endpointDefaults) - } - - var e endpointWithVariants - if err := json.Unmarshal(data, &e); err != nil { - return err - } - - (*p)[defaultKey{Variant: 0}] = e.endpoint - - e.Hostname = "" - e.DNSSuffix = "" - - for _, variant := range e.Variants { - endpointVariant, unknown := parseVariantTags(variant.Tags) - if unknown { - continue - } - - var ve endpoint - ve.mergeIn(e.endpoint) - ve.mergeIn(variant.endpoint) - - (*p)[defaultKey{Variant: endpointVariant}] = ve - } - - return nil -} - -func parseVariantTags(tags []string) (ev endpointVariant, unknown bool) { - if len(tags) == 0 { - unknown = true - return - } - - for _, tag := range tags { - switch { - case strings.EqualFold("fips", tag): - ev |= fipsVariant - case strings.EqualFold("dualstack", tag): - ev |= dualStackVariant - default: - unknown = true - } - } - return ev, unknown -} - -type partition struct { - ID string `json:"partition"` - Name string `json:"partitionName"` - DNSSuffix string `json:"dnsSuffix"` - RegionRegex regionRegex `json:"regionRegex"` - Defaults endpointDefaults `json:"defaults"` - Regions regions `json:"regions"` - Services services `json:"services"` -} - -func (p partition) Partition() Partition { - return Partition{ - dnsSuffix: p.DNSSuffix, - id: p.ID, - p: &p, - } -} - -func (p partition) canResolveEndpoint(service, region string, options Options) bool { - s, hasService := p.Services[service] - _, hasEndpoint := s.Endpoints[endpointKey{ - Region: region, - Variant: options.getEndpointVariant(service), - }] - - if hasEndpoint && hasService { - return true - } - - if options.StrictMatching { - return false - } - - return p.RegionRegex.MatchString(region) -} - -func allowLegacyEmptyRegion(service string) bool { - legacy := map[string]struct{}{ - "budgets": {}, - "ce": {}, - "chime": {}, - "cloudfront": {}, - "ec2metadata": {}, - "iam": {}, - "importexport": {}, - "organizations": {}, - "route53": {}, - "sts": {}, - "support": {}, - "waf": {}, - } - - _, allowed := legacy[service] - return allowed -} - -func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) { - var opt Options - opt.Set(opts...) - - if len(opt.ResolvedRegion) > 0 { - region = opt.ResolvedRegion - } - - s, hasService := p.Services[service] - - if service == Ec2metadataServiceID && !hasService { - endpoint := getEC2MetadataEndpoint(p.ID, service, opt.EC2MetadataEndpointMode) - return endpoint, nil - } - - if len(service) == 0 || !(hasService || opt.ResolveUnknownService) { - // Only return error if the resolver will not fallback to creating - // endpoint based on service endpoint ID passed in. - return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services)) - } - - if len(region) == 0 && allowLegacyEmptyRegion(service) && len(s.PartitionEndpoint) != 0 { - region = s.PartitionEndpoint - } - - if r, ok := isLegacyGlobalRegion(service, region, opt); ok { - region = r - } - - variant := opt.getEndpointVariant(service) - - endpoints := s.Endpoints - - serviceDefaults, hasServiceDefault := s.Defaults[defaultKey{Variant: variant}] - // If we searched for a variant which may have no explicit service defaults, - // then we need to inherit the standard service defaults except the hostname and dnsSuffix - if variant != 0 && !hasServiceDefault { - serviceDefaults = s.Defaults[defaultKey{}] - serviceDefaults.Hostname = "" - serviceDefaults.DNSSuffix = "" - } - - partitionDefaults, hasPartitionDefault := p.Defaults[defaultKey{Variant: variant}] - - var dnsSuffix string - if len(serviceDefaults.DNSSuffix) > 0 { - dnsSuffix = serviceDefaults.DNSSuffix - } else if variant == 0 { - // For legacy reasons the partition dnsSuffix is not in the defaults, so if we looked for - // a non-variant endpoint then we need to set the dnsSuffix. - dnsSuffix = p.DNSSuffix - } - - noDefaults := !hasServiceDefault && !hasPartitionDefault - - e, hasEndpoint := s.endpointForRegion(region, endpoints, variant) - if len(region) == 0 || (!hasEndpoint && (opt.StrictMatching || noDefaults)) { - return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(endpoints, variant)) - } - - defs := []endpoint{partitionDefaults, serviceDefaults} - - return e.resolve(service, p.ID, region, dnsSuffixTemplateKey, dnsSuffix, defs, opt) -} - -func getEC2MetadataEndpoint(partitionID, service string, mode EC2IMDSEndpointModeState) ResolvedEndpoint { - switch mode { - case EC2IMDSEndpointModeStateIPv6: - return ResolvedEndpoint{ - URL: ec2MetadataEndpointIPv6, - PartitionID: partitionID, - SigningRegion: "aws-global", - SigningName: service, - SigningNameDerived: true, - SigningMethod: "v4", - } - case EC2IMDSEndpointModeStateIPv4: - fallthrough - default: - return ResolvedEndpoint{ - URL: ec2MetadataEndpointIPv4, - PartitionID: partitionID, - SigningRegion: "aws-global", - SigningName: service, - SigningNameDerived: true, - SigningMethod: "v4", - } - } -} - -func isLegacyGlobalRegion(service string, region string, opt Options) (string, bool) { - if opt.getEndpointVariant(service) != 0 { - return "", false - } - - const ( - sts = "sts" - s3 = "s3" - awsGlobal = "aws-global" - ) - - switch { - case service == sts && opt.STSRegionalEndpoint == RegionalSTSEndpoint: - return region, false - case service == s3 && opt.S3UsEast1RegionalEndpoint == RegionalS3UsEast1Endpoint: - return region, false - default: - if _, ok := legacyGlobalRegions[service][region]; ok { - return awsGlobal, true - } - } - - return region, false -} - -func serviceList(ss services) []string { - list := make([]string, 0, len(ss)) - for k := range ss { - list = append(list, k) - } - return list -} -func endpointList(es serviceEndpoints, variant endpointVariant) []string { - list := make([]string, 0, len(es)) - for k := range es { - if k.Variant != variant { - continue - } - list = append(list, k.Region) - } - return list -} - -type regionRegex struct { - *regexp.Regexp -} - -func (rr *regionRegex) UnmarshalJSON(b []byte) (err error) { - // Strip leading and trailing quotes - regex, err := strconv.Unquote(string(b)) - if err != nil { - return fmt.Errorf("unable to strip quotes from regex, %v", err) - } - - rr.Regexp, err = regexp.Compile(regex) - if err != nil { - return fmt.Errorf("unable to unmarshal region regex, %v", err) - } - return nil -} - -type regions map[string]region - -type region struct { - Description string `json:"description"` -} - -type services map[string]service - -type service struct { - PartitionEndpoint string `json:"partitionEndpoint"` - IsRegionalized boxedBool `json:"isRegionalized,omitempty"` - Defaults endpointDefaults `json:"defaults"` - Endpoints serviceEndpoints `json:"endpoints"` -} - -func (s *service) endpointForRegion(region string, endpoints serviceEndpoints, variant endpointVariant) (endpoint, bool) { - if e, ok := endpoints[endpointKey{Region: region, Variant: variant}]; ok { - return e, true - } - - if s.IsRegionalized == boxedFalse { - return endpoints[endpointKey{Region: s.PartitionEndpoint, Variant: variant}], region == s.PartitionEndpoint - } - - // Unable to find any matching endpoint, return - // blank that will be used for generic endpoint creation. - return endpoint{}, false -} - -type serviceEndpoints map[endpointKey]endpoint - -func (s *serviceEndpoints) UnmarshalJSON(data []byte) error { - if *s == nil { - *s = make(serviceEndpoints) - } - - var regionToEndpoint map[string]endpointWithVariants - - if err := json.Unmarshal(data, ®ionToEndpoint); err != nil { - return err - } - - for region, e := range regionToEndpoint { - (*s)[endpointKey{Region: region}] = e.endpoint - - e.Hostname = "" - e.DNSSuffix = "" - - for _, variant := range e.Variants { - endpointVariant, unknown := parseVariantTags(variant.Tags) - if unknown { - continue - } - - var ve endpoint - ve.mergeIn(e.endpoint) - ve.mergeIn(variant.endpoint) - - (*s)[endpointKey{Region: region, Variant: endpointVariant}] = ve - } - } - - return nil -} - -type endpoint struct { - Hostname string `json:"hostname"` - Protocols []string `json:"protocols"` - CredentialScope credentialScope `json:"credentialScope"` - - DNSSuffix string `json:"dnsSuffix"` - - // Signature Version not used - SignatureVersions []string `json:"signatureVersions"` - - // SSLCommonName not used. - SSLCommonName string `json:"sslCommonName"` - - Deprecated boxedBool `json:"deprecated"` -} - -// isZero returns whether the endpoint structure is an empty (zero) value. -func (e endpoint) isZero() bool { - switch { - case len(e.Hostname) != 0: - return false - case len(e.Protocols) != 0: - return false - case e.CredentialScope != (credentialScope{}): - return false - case len(e.SignatureVersions) != 0: - return false - case len(e.SSLCommonName) != 0: - return false - } - return true -} - -const ( - defaultProtocol = "https" - defaultSigner = "v4" -) - -var ( - protocolPriority = []string{"https", "http"} - signerPriority = []string{"v4", "v2"} -) - -func getByPriority(s []string, p []string, def string) string { - if len(s) == 0 { - return def - } - - for i := 0; i < len(p); i++ { - for j := 0; j < len(s); j++ { - if s[j] == p[i] { - return s[j] - } - } - } - - return s[0] -} - -func (e endpoint) resolve(service, partitionID, region, dnsSuffixTemplateVariable, dnsSuffix string, defs []endpoint, opts Options) (ResolvedEndpoint, error) { - var merged endpoint - for _, def := range defs { - merged.mergeIn(def) - } - merged.mergeIn(e) - e = merged - - signingRegion := e.CredentialScope.Region - if len(signingRegion) == 0 { - signingRegion = region - } - - signingName := e.CredentialScope.Service - var signingNameDerived bool - if len(signingName) == 0 { - signingName = service - signingNameDerived = true - } - - hostname := e.Hostname - - if !validateInputRegion(region) { - return ResolvedEndpoint{}, fmt.Errorf("invalid region identifier format provided") - } - - if len(merged.DNSSuffix) > 0 { - dnsSuffix = merged.DNSSuffix - } - - u := strings.Replace(hostname, "{service}", service, 1) - u = strings.Replace(u, "{region}", region, 1) - u = strings.Replace(u, dnsSuffixTemplateVariable, dnsSuffix, 1) - - scheme := getEndpointScheme(e.Protocols, opts.DisableSSL) - u = fmt.Sprintf("%s://%s", scheme, u) - - if e.Deprecated == boxedTrue && opts.LogDeprecated && opts.Logger != nil { - opts.Logger.Log(fmt.Sprintf("endpoint identifier %q, url %q marked as deprecated", region, u)) - } - - return ResolvedEndpoint{ - URL: u, - PartitionID: partitionID, - SigningRegion: signingRegion, - SigningName: signingName, - SigningNameDerived: signingNameDerived, - SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), - }, nil -} - -func getEndpointScheme(protocols []string, disableSSL bool) string { - if disableSSL { - return "http" - } - - return getByPriority(protocols, protocolPriority, defaultProtocol) -} - -func (e *endpoint) mergeIn(other endpoint) { - if len(other.Hostname) > 0 { - e.Hostname = other.Hostname - } - if len(other.Protocols) > 0 { - e.Protocols = other.Protocols - } - if len(other.SignatureVersions) > 0 { - e.SignatureVersions = other.SignatureVersions - } - if len(other.CredentialScope.Region) > 0 { - e.CredentialScope.Region = other.CredentialScope.Region - } - if len(other.CredentialScope.Service) > 0 { - e.CredentialScope.Service = other.CredentialScope.Service - } - if len(other.SSLCommonName) > 0 { - e.SSLCommonName = other.SSLCommonName - } - if len(other.DNSSuffix) > 0 { - e.DNSSuffix = other.DNSSuffix - } - if other.Deprecated != boxedBoolUnset { - e.Deprecated = other.Deprecated - } -} - -type credentialScope struct { - Region string `json:"region"` - Service string `json:"service"` -} - -type boxedBool int - -func (b *boxedBool) UnmarshalJSON(buf []byte) error { - v, err := strconv.ParseBool(string(buf)) - if err != nil { - return err - } - - if v { - *b = boxedTrue - } else { - *b = boxedFalse - } - - return nil -} - -const ( - boxedBoolUnset boxedBool = iota - boxedFalse - boxedTrue -) - -func validateInputRegion(region string) bool { - return regionValidationRegex.MatchString(region) -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go deleted file mode 100644 index 84922bca8..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go +++ /dev/null @@ -1,412 +0,0 @@ -//go:build codegen -// +build codegen - -package endpoints - -import ( - "fmt" - "io" - "reflect" - "strings" - "text/template" - "unicode" -) - -// A CodeGenOptions are the options for code generating the endpoints into -// Go code from the endpoints model definition. -type CodeGenOptions struct { - // Options for how the model will be decoded. - DecodeModelOptions DecodeModelOptions - - // Disables code generation of the service endpoint prefix IDs defined in - // the model. - DisableGenerateServiceIDs bool -} - -// Set combines all of the option functions together -func (d *CodeGenOptions) Set(optFns ...func(*CodeGenOptions)) { - for _, fn := range optFns { - fn(d) - } -} - -// CodeGenModel given a endpoints model file will decode it and attempt to -// generate Go code from the model definition. Error will be returned if -// the code is unable to be generated, or decoded. -func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGenOptions)) error { - var opts CodeGenOptions - opts.Set(optFns...) - - resolver, err := DecodeModel(modelFile, func(d *DecodeModelOptions) { - *d = opts.DecodeModelOptions - }) - if err != nil { - return err - } - - v := struct { - Resolver - CodeGenOptions - }{ - Resolver: resolver, - CodeGenOptions: opts, - } - - tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl)) - if err := tmpl.ExecuteTemplate(outFile, "defaults", v); err != nil { - return fmt.Errorf("failed to execute template, %v", err) - } - - return nil -} - -func toSymbol(v string) string { - out := []rune{} - for _, c := range strings.Title(v) { - if !(unicode.IsNumber(c) || unicode.IsLetter(c)) { - continue - } - - out = append(out, c) - } - - return string(out) -} - -func quoteString(v string) string { - return fmt.Sprintf("%q", v) -} - -func regionConstName(p, r string) string { - return toSymbol(p) + toSymbol(r) -} - -func partitionGetter(id string) string { - return fmt.Sprintf("%sPartition", toSymbol(id)) -} - -func partitionVarName(id string) string { - return fmt.Sprintf("%sPartition", strings.ToLower(toSymbol(id))) -} - -func listPartitionNames(ps partitions) string { - names := []string{} - switch len(ps) { - case 1: - return ps[0].Name - case 2: - return fmt.Sprintf("%s and %s", ps[0].Name, ps[1].Name) - default: - for i, p := range ps { - if i == len(ps)-1 { - names = append(names, "and "+p.Name) - } else { - names = append(names, p.Name) - } - } - return strings.Join(names, ", ") - } -} - -func boxedBoolIfSet(msg string, v boxedBool) string { - switch v { - case boxedTrue: - return fmt.Sprintf(msg, "boxedTrue") - case boxedFalse: - return fmt.Sprintf(msg, "boxedFalse") - default: - return "" - } -} - -func stringIfSet(msg, v string) string { - if len(v) == 0 { - return "" - } - - return fmt.Sprintf(msg, v) -} - -func stringSliceIfSet(msg string, vs []string) string { - if len(vs) == 0 { - return "" - } - - names := []string{} - for _, v := range vs { - names = append(names, `"`+v+`"`) - } - - return fmt.Sprintf(msg, strings.Join(names, ",")) -} - -func endpointIsSet(v endpoint) bool { - return !reflect.DeepEqual(v, endpoint{}) -} - -func serviceSet(ps partitions) map[string]struct{} { - set := map[string]struct{}{} - for _, p := range ps { - for id := range p.Services { - set[id] = struct{}{} - } - } - - return set -} - -func endpointVariantSetter(variant endpointVariant) (string, error) { - if variant == 0 { - return "0", nil - } - - if variant > (fipsVariant | dualStackVariant) { - return "", fmt.Errorf("unknown endpoint variant") - } - - var symbols []string - if variant&fipsVariant != 0 { - symbols = append(symbols, "fipsVariant") - } - if variant&dualStackVariant != 0 { - symbols = append(symbols, "dualStackVariant") - } - v := strings.Join(symbols, "|") - - return v, nil -} - -func endpointKeySetter(e endpointKey) (string, error) { - var sb strings.Builder - sb.WriteString("endpointKey{\n") - sb.WriteString(fmt.Sprintf("Region: %q,\n", e.Region)) - if e.Variant != 0 { - variantSetter, err := endpointVariantSetter(e.Variant) - if err != nil { - return "", err - } - sb.WriteString(fmt.Sprintf("Variant: %s,\n", variantSetter)) - } - sb.WriteString("}") - return sb.String(), nil -} - -func defaultKeySetter(e defaultKey) (string, error) { - var sb strings.Builder - sb.WriteString("defaultKey{\n") - if e.Variant != 0 { - variantSetter, err := endpointVariantSetter(e.Variant) - if err != nil { - return "", err - } - sb.WriteString(fmt.Sprintf("Variant: %s,\n", variantSetter)) - } - sb.WriteString("}") - return sb.String(), nil -} - -var funcMap = template.FuncMap{ - "ToSymbol": toSymbol, - "QuoteString": quoteString, - "RegionConst": regionConstName, - "PartitionGetter": partitionGetter, - "PartitionVarName": partitionVarName, - "ListPartitionNames": listPartitionNames, - "BoxedBoolIfSet": boxedBoolIfSet, - "StringIfSet": stringIfSet, - "StringSliceIfSet": stringSliceIfSet, - "EndpointIsSet": endpointIsSet, - "ServicesSet": serviceSet, - "EndpointVariantSetter": endpointVariantSetter, - "EndpointKeySetter": endpointKeySetter, - "DefaultKeySetter": defaultKeySetter, -} - -const v3Tmpl = ` -{{ define "defaults" -}} -// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. - -package endpoints - -import ( - "regexp" -) - - {{ template "partition consts" $.Resolver }} - - {{ range $_, $partition := $.Resolver }} - {{ template "partition region consts" $partition }} - {{ end }} - - {{ if not $.DisableGenerateServiceIDs -}} - {{ template "service consts" $.Resolver }} - {{- end }} - - {{ template "endpoint resolvers" $.Resolver }} -{{- end }} - -{{ define "partition consts" }} - // Partition identifiers - const ( - {{ range $_, $p := . -}} - {{ ToSymbol $p.ID }}PartitionID = {{ QuoteString $p.ID }} // {{ $p.Name }} partition. - {{ end -}} - ) -{{- end }} - -{{ define "partition region consts" }} - // {{ .Name }} partition's regions. - const ( - {{ range $id, $region := .Regions -}} - {{ ToSymbol $id }}RegionID = {{ QuoteString $id }} // {{ $region.Description }}. - {{ end -}} - ) -{{- end }} - -{{ define "service consts" }} - // Service identifiers - const ( - {{ $serviceSet := ServicesSet . -}} - {{ range $id, $_ := $serviceSet -}} - {{ ToSymbol $id }}ServiceID = {{ QuoteString $id }} // {{ ToSymbol $id }}. - {{ end -}} - ) -{{- end }} - -{{ define "endpoint resolvers" }} - // DefaultResolver returns an Endpoint resolver that will be able - // to resolve endpoints for: {{ ListPartitionNames . }}. - // - // Use DefaultPartitions() to get the list of the default partitions. - func DefaultResolver() Resolver { - return defaultPartitions - } - - // DefaultPartitions returns a list of the partitions the SDK is bundled - // with. The available partitions are: {{ ListPartitionNames . }}. - // - // partitions := endpoints.DefaultPartitions - // for _, p := range partitions { - // // ... inspect partitions - // } - func DefaultPartitions() []Partition { - return defaultPartitions.Partitions() - } - - var defaultPartitions = partitions{ - {{ range $_, $partition := . -}} - {{ PartitionVarName $partition.ID }}, - {{ end }} - } - - {{ range $_, $partition := . -}} - {{ $name := PartitionGetter $partition.ID -}} - // {{ $name }} returns the Resolver for {{ $partition.Name }}. - func {{ $name }}() Partition { - return {{ PartitionVarName $partition.ID }}.Partition() - } - var {{ PartitionVarName $partition.ID }} = {{ template "gocode Partition" $partition }} - {{ end }} -{{ end }} - -{{ define "default partitions" }} - func DefaultPartitions() []Partition { - return []partition{ - {{ range $_, $partition := . -}} - // {{ ToSymbol $partition.ID}}Partition(), - {{ end }} - } - } -{{ end }} - -{{ define "gocode Partition" -}} -partition{ - {{ StringIfSet "ID: %q,\n" .ID -}} - {{ StringIfSet "Name: %q,\n" .Name -}} - {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}} - RegionRegex: {{ template "gocode RegionRegex" .RegionRegex }}, - {{ if (gt (len .Defaults) 0) -}} - Defaults: {{ template "gocode Defaults" .Defaults -}}, - {{ end -}} - Regions: {{ template "gocode Regions" .Regions }}, - Services: {{ template "gocode Services" .Services }}, -} -{{- end }} - -{{ define "gocode RegionRegex" -}} -regionRegex{ - Regexp: func() *regexp.Regexp{ - reg, _ := regexp.Compile({{ QuoteString .Regexp.String }}) - return reg - }(), -} -{{- end }} - -{{ define "gocode Regions" -}} -regions{ - {{ range $id, $region := . -}} - "{{ $id }}": {{ template "gocode Region" $region }}, - {{ end -}} -} -{{- end }} - -{{ define "gocode Region" -}} -region{ - {{ StringIfSet "Description: %q,\n" .Description -}} -} -{{- end }} - -{{ define "gocode Services" -}} -services{ - {{ range $id, $service := . -}} - "{{ $id }}": {{ template "gocode Service" $service }}, - {{ end }} -} -{{- end }} - -{{ define "gocode Service" -}} -service{ - {{ StringIfSet "PartitionEndpoint: %q,\n" .PartitionEndpoint -}} - {{ BoxedBoolIfSet "IsRegionalized: %s,\n" .IsRegionalized -}} - {{ if (gt (len .Defaults) 0) -}} - Defaults: {{ template "gocode Defaults" .Defaults -}}, - {{ end -}} - {{ if .Endpoints -}} - Endpoints: {{ template "gocode Endpoints" .Endpoints }}, - {{- end }} -} -{{- end }} - -{{ define "gocode Defaults" -}} -endpointDefaults{ - {{ range $id, $endpoint := . -}} - {{ DefaultKeySetter $id }}: {{ template "gocode Endpoint" $endpoint }}, - {{ end }} -} -{{- end }} - -{{ define "gocode Endpoints" -}} -serviceEndpoints{ - {{ range $id, $endpoint := . -}} - {{ EndpointKeySetter $id }}: {{ template "gocode Endpoint" $endpoint }}, - {{ end }} -} -{{- end }} - -{{ define "gocode Endpoint" -}} -endpoint{ - {{ StringIfSet "Hostname: %q,\n" .Hostname -}} - {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}} - {{ StringIfSet "SSLCommonName: %q,\n" .SSLCommonName -}} - {{ StringSliceIfSet "Protocols: []string{%s},\n" .Protocols -}} - {{ StringSliceIfSet "SignatureVersions: []string{%s},\n" .SignatureVersions -}} - {{ if or .CredentialScope.Region .CredentialScope.Service -}} - CredentialScope: credentialScope{ - {{ StringIfSet "Region: %q,\n" .CredentialScope.Region -}} - {{ StringIfSet "Service: %q,\n" .CredentialScope.Service -}} - }, - {{- end }} - {{ BoxedBoolIfSet "Deprecated: %s,\n" .Deprecated -}} -} -{{- end }} -` diff --git a/vendor/github.com/aws/smithy-go/CHANGELOG.md b/vendor/github.com/aws/smithy-go/CHANGELOG.md index de39171cf..4df632dce 100644 --- a/vendor/github.com/aws/smithy-go/CHANGELOG.md +++ b/vendor/github.com/aws/smithy-go/CHANGELOG.md @@ -1,3 +1,13 @@ +# Release (2025-02-17) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/smithy-go`: v1.22.3 + * **Bug Fix**: Fix HTTP metrics data race. + * **Bug Fix**: Replace usages of deprecated ioutil package. + # Release (2025-01-21) ## General Highlights diff --git a/vendor/github.com/aws/smithy-go/go_module_metadata.go b/vendor/github.com/aws/smithy-go/go_module_metadata.go index a51ceca4c..d12d95891 100644 --- a/vendor/github.com/aws/smithy-go/go_module_metadata.go +++ b/vendor/github.com/aws/smithy-go/go_module_metadata.go @@ -3,4 +3,4 @@ package smithy // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.22.2" +const goModuleVersion = "1.22.3" diff --git a/vendor/github.com/charmbracelet/bubbletea/.gitignore b/vendor/github.com/charmbracelet/bubbletea/.gitignore index 9cc52352e..abd7c0612 100644 --- a/vendor/github.com/charmbracelet/bubbletea/.gitignore +++ b/vendor/github.com/charmbracelet/bubbletea/.gitignore @@ -20,3 +20,4 @@ tutorials/basics/basics tutorials/commands/commands .idea coverage.txt +dist/ diff --git a/vendor/github.com/charmbracelet/bubbletea/.golangci-soft.yml b/vendor/github.com/charmbracelet/bubbletea/.golangci-soft.yml index 1b6824bb2..d325d4fcc 100644 --- a/vendor/github.com/charmbracelet/bubbletea/.golangci-soft.yml +++ b/vendor/github.com/charmbracelet/bubbletea/.golangci-soft.yml @@ -1,5 +1,6 @@ run: tests: false + issues-exit-code: 0 issues: include: @@ -14,16 +15,13 @@ issues: linters: enable: - # - dupl - exhaustive - # - exhaustivestruct - goconst - godot - godox - - gomnd + - mnd - gomoddirectives - goprintffuncname - # - lll - misspell - nakedret - nestif @@ -34,13 +32,9 @@ linters: # disable default linters, they are already enabled in .golangci.yml disable: - - deadcode - errcheck - gosimple - govet - ineffassign - staticcheck - - structcheck - - typecheck - unused - - varcheck diff --git a/vendor/github.com/charmbracelet/bubbletea/.golangci.yml b/vendor/github.com/charmbracelet/bubbletea/.golangci.yml index 3affce914..d6789e014 100644 --- a/vendor/github.com/charmbracelet/bubbletea/.golangci.yml +++ b/vendor/github.com/charmbracelet/bubbletea/.golangci.yml @@ -15,12 +15,10 @@ issues: linters: enable: - bodyclose - - exportloopref - gofumpt - goimports - gosec - nilerr - - predeclared - revive - rowserrcheck - sqlclosecheck diff --git a/vendor/github.com/charmbracelet/bubbletea/.goreleaser.yml b/vendor/github.com/charmbracelet/bubbletea/.goreleaser.yml index 40d9f298d..3353d0202 100644 --- a/vendor/github.com/charmbracelet/bubbletea/.goreleaser.yml +++ b/vendor/github.com/charmbracelet/bubbletea/.goreleaser.yml @@ -1,6 +1,5 @@ +# yaml-language-server: $schema=https://goreleaser.com/static/schema-pro.json +version: 2 includes: - from_url: url: charmbracelet/meta/main/goreleaser-lib.yaml - -# yaml-language-server: $schema=https://goreleaser.com/static/schema-pro.json - diff --git a/vendor/github.com/charmbracelet/bubbletea/CONTRIBUTING.md b/vendor/github.com/charmbracelet/bubbletea/CONTRIBUTING.md deleted file mode 100644 index 19ee18c76..000000000 --- a/vendor/github.com/charmbracelet/bubbletea/CONTRIBUTING.md +++ /dev/null @@ -1,13 +0,0 @@ -# Contributing - -Pull requests are welcome for any changes. - -Consider opening an issue for larger changes to get feedback on the idea from the team. - -If your change touches parts of the Bubble Tea renderer or internals, make sure -that all the examples in the `examples/` folder continue to run correctly. - -For commit messages, please use conventional commits[^1] to make it easier to -generate release notes. - -[^1]: https://www.conventionalcommits.org/en/v1.0.0 diff --git a/vendor/github.com/charmbracelet/bubbletea/README.md b/vendor/github.com/charmbracelet/bubbletea/README.md index 9a8d93e2a..58dd1828e 100644 --- a/vendor/github.com/charmbracelet/bubbletea/README.md +++ b/vendor/github.com/charmbracelet/bubbletea/README.md @@ -3,8 +3,8 @@

Bubble Tea Title Treatment
Latest Release - GoDoc - Build Status + GoDoc + Build Status phorm.ai

@@ -17,9 +17,8 @@ complex terminal applications, either inline, full-window, or a mix of both.

Bubble Tea is in use in production and includes a number of features and -performance optimizations we’ve added along the way. Among those is a standard -framerate-based renderer, a renderer for high-performance scrollable -regions which works alongside the main renderer, and mouse support. +performance optimizations we’ve added along the way. Among those is +a framerate-based renderer, mouse support, focus reporting and more. To get started, see the tutorial below, the [examples][examples], the [docs][docs], the [video tutorials][youtube] and some common [resources](#libraries-we-use-with-bubble-tea). @@ -35,7 +34,7 @@ Be sure to check out [Bubbles][bubbles], a library of common UI components for B Text Input Example from Bubbles

-*** +--- ## Tutorial @@ -49,7 +48,7 @@ By the way, the non-annotated source code for this program is available [on GitHub][tut-source]. [elm]: https://guide.elm-lang.org/architecture/ -[tut-source]:https://github.com/charmbracelet/bubbletea/tree/master/tutorials/basics +[tut-source]: https://github.com/charmbracelet/bubbletea/tree/main/tutorials/basics ### Enough! Let's get to it. @@ -61,6 +60,10 @@ import will be the Bubble Tea library, which we'll call `tea` for short. ```go package main +// These imports will be used later on the tutorial. If you save the file +// now, Go might complain they are unused, but that's fine. +// You may also need to run `go mod tidy` to download bubbletea and its +// dependencies. import ( "fmt" "os" @@ -72,9 +75,9 @@ import ( Bubble Tea programs are comprised of a **model** that describes the application state and three simple methods on that model: -* **Init**, a function that returns an initial command for the application to run. -* **Update**, a function that handles incoming events and updates the model accordingly. -* **View**, a function that renders the UI based on the data in the model. +- **Init**, a function that returns an initial command for the application to run. +- **Update**, a function that handles incoming events and updates the model accordingly. +- **View**, a function that renders the UI based on the data in the model. ### The Model @@ -254,8 +257,8 @@ look at the [Command Tutorial][cmd]. It's pretty simple. There are also several [Bubble Tea examples][examples] available and, of course, there are [Go Docs][docs]. -[cmd]: http://github.com/charmbracelet/bubbletea/tree/master/tutorials/commands/ -[examples]: http://github.com/charmbracelet/bubbletea/tree/master/examples +[cmd]: https://github.com/charmbracelet/bubbletea/tree/main/tutorials/commands/ +[examples]: https://github.com/charmbracelet/bubbletea/tree/main/examples [docs]: https://pkg.go.dev/github.com/charmbracelet/bubbletea?tab=doc ## Debugging @@ -275,11 +278,11 @@ $ dlv connect 127.0.0.1:43000 ``` If you do not explicitly supply the `--listen` flag, the port used will vary -per run, so passing this in makes the debugger easier to use from a script -or your IDE of choice. +per run, so passing this in makes the debugger easier to use from a script +or your IDE of choice. -Additionally, we pass in `--api-version=2` because delve defaults to version 1 -for backwards compatibility reasons. However, delve recommends using version 2 +Additionally, we pass in `--api-version=2` because delve defaults to version 1 +for backwards compatibility reasons. However, delve recommends using version 2 for all new development and some clients may no longer work with version 1. For more information, see the [Delve documentation](https://github.com/go-delve/delve/tree/master/Documentation/api). @@ -305,127 +308,68 @@ your program in another window. ## Libraries we use with Bubble Tea -* [Bubbles][bubbles]: Common Bubble Tea components such as text inputs, viewports, spinners and so on -* [Lip Gloss][lipgloss]: Style, format and layout tools for terminal applications -* [Harmonica][harmonica]: A spring animation library for smooth, natural motion -* [BubbleZone][bubblezone]: Easy mouse event tracking for Bubble Tea components -* [ntcharts][ntcharts]: A terminal charting library built for Bubble Tea and [Lip Gloss][lipgloss] -* [Termenv][termenv]: Advanced ANSI styling for terminal applications -* [Reflow][reflow]: Advanced ANSI-aware methods for working with text +- [Bubbles][bubbles]: Common Bubble Tea components such as text inputs, viewports, spinners and so on +- [Lip Gloss][lipgloss]: Style, format and layout tools for terminal applications +- [Harmonica][harmonica]: A spring animation library for smooth, natural motion +- [BubbleZone][bubblezone]: Easy mouse event tracking for Bubble Tea components +- [ntcharts][ntcharts]: A terminal charting library built for Bubble Tea and [Lip Gloss][lipgloss] [bubbles]: https://github.com/charmbracelet/bubbles [lipgloss]: https://github.com/charmbracelet/lipgloss [harmonica]: https://github.com/charmbracelet/harmonica [bubblezone]: https://github.com/lrstanley/bubblezone [ntcharts]: https://github.com/NimbleMarkets/ntcharts -[termenv]: https://github.com/muesli/termenv -[reflow]: https://github.com/muesli/reflow ## Bubble Tea in the Wild -For some Bubble Tea programs in production, see: - -* [ASCII Movie](https://github.com/gabe565/ascii-movie): a Star Wars ASCII art movie player -* [AT CLI](https://github.com/daskycodes/at_cli): execute AT Commands via serial port connections -* [Aztify](https://github.com/Azure/aztfy): bring Microsoft Azure resources under Terraform -* [brows](https://github.com/rubysolo/brows): a GitHub release browser -* [Canard](https://github.com/mrusme/canard): an RSS client -* [charm](https://github.com/charmbracelet/charm): the official Charm user account manager -* [chatgpt-cli](https://github.com/j178/chatgpt): a CLI for ChatGPT -* [chatgpt-tui](https://github.com/tearingItUp786/chatgpt-tui): a TUI for ChatGPT with SQLite sessions -* [ChatGPTUI](https://github.com/dwisiswant0/chatgptui): a TUI for ChatGPT -* [chezmoi](https://github.com/twpayne/chezmoi): securely manage your dotfiles across multiple machines -* [chip-8](https://github.com/braheezy/chip-8): a CHIP-8 interpreter -* [chtop](https://github.com/chhetripradeep/chtop): monitor your ClickHouse node without leaving the terminal -* [circumflex](https://github.com/bensadeh/circumflex): read Hacker News in the terminal -* [clidle](https://github.com/ajeetdsouza/clidle): a Wordle clone -* [cLive](https://github.com/koki-develop/clive): automate terminal operations and view them live in a browser -* [container-canary](https://github.com/NVIDIA/container-canary): a container validator -* [countdown](https://github.com/aldernero/countdown): a multi-event countdown timer -* [CRT](https://github.com/BigJk/crt): a simple terminal emulator for running Bubble Tea in a dedicated window, with optional shaders -* [cueitup](https://github.com/dhth/cueitup): inspect messages in an AWS SQS queue in a simple and deliberate manner -* [Daytona](https://github.com/daytonaio/daytona): an development environment manager -* [dns53](https://github.com/purpleclay/dns53): dynamic DNS with Amazon Route53; expose your EC2 quickly, securely and privately -* [eks-node-viewer](https://github.com/awslabs/eks-node-viewer): a tool for visualizing dynamic node usage within an EKS cluster -* [End Of Eden](https://github.com/BigJk/end_of_eden): a "Slay the Spire"-like, roguelike deck-builder game -* [enola](https://github.com/sherlock-project/enola): find social media accounts by username across social networks -* [flapioca](https://github.com/kbrgl/flapioca): Flappy Bird on the CLI! -* [fm](https://github.com/knipferrc/fm): a terminal-based file manager -* [fork-cleaner](https://github.com/caarlos0/fork-cleaner): clean up old and inactive forks in your GitHub account -* [fractals-cli](https://github.com/MicheleFiladelfia/fractals-cli): a multiplatform terminal fractal explorer -* [fztea](https://github.com/jon4hz/fztea): a Flipper Zero TUI -* [gama](https://github.com/termkit/gama): manage GitHub Actions from the terminal -* [gambit](https://github.com/maaslalani/gambit): chess in the terminal -* [gembro](https://git.sr.ht/~rafael/gembro): a mouse-driven Gemini browser -* [gh-b](https://github.com/joaom00/gh-b): a GitHub CLI extension for managing branches -* [gh-dash](https://www.github.com/dlvhdr/gh-dash): a GitHub CLI extension for PRs and issues -* [gitflow-toolkit](https://github.com/mritd/gitflow-toolkit): a GitFlow submission tool -* [Glow](https://github.com/charmbracelet/glow): a markdown reader, browser, and online markdown stash -* [go-sweep](https://github.com/maxpaulus43/go-sweep): Minesweeper in the terminal -* [gocovsh](https://github.com/orlangure/gocovsh): explore Go coverage reports from the CLI -* [got](https://github.com/fedeztk/got): a simple translator and text-to-speech app built on simplytranslate's APIs -* [gum](https://github.com/charmbracelet/gum): interactivity and styling for shells and shell scripts -* [hiSHtory](https://github.com/ddworken/hishtory): your shell history in context: synced, and queryable -* [httpit](https://github.com/gonetx/httpit): a rapid http(s) benchmark tool -* [Huh?](https://github.com/charmbracelet/huh): an interactive prompt and form toolkit -* [IDNT](https://github.com/r-darwish/idnt): a batch software uninstaller -* [json-log-viewer](https://github.com/hedhyw/json-log-viewer): an interactive JSON log viewer -* [kboard](https://github.com/CamiloGarciaLaRotta/kboard): a typing game -* [kplay](https://github.com/dhth/kplay): inspect messages in a Kafka topic -* [laboon](https://github.com/arisnacg/laboon): a Docker-desktop-style container manager -* [mc](https://github.com/minio/mc): the official [MinIO](https://min.io) client -* [mergestat](https://github.com/mergestat/mergestat): run SQL queries on git repositories -* [meteor](https://github.com/stefanlogue/meteor): a highly customizable conventional commit message tool -* [mods](https://github.com/charmbracelet/mods): AI on the CLI, built for pipelines -* [nachrichten](https://github.com/zMoooooritz/nachrichten): access up-to-date news in German provided by the [Tagesschau](https://www.tagesschau.de/) -* [Neon Modem Overdrive](https://github.com/mrusme/neonmodem): a BBS-style TUI client for Discourse, Lemmy, Lobste.rs and Hacker News -* [nom](https://github.com/guyfedwards/nom): an RSS reader and manager -* [Noted](https://github.com/torbratsberg/noted): a note viewer and manager -* [outtasync](https://github.com/dhth/outtasync): identify CloudFormation stacks that are out of sync with their template files -* [pathos](https://github.com/chip/pathos): a PATH environment variable editor -* [Plandex](https://github.com/plandex-ai/plandex): a terminal-based AI coding engine for complex tasks -* [portal](https://github.com/ZinoKader/portal): secure transfers between computers -* [prs](https://github.com/dhth/prs): stay up to date with your PRs -* [puffin](https://github.com/siddhantac/puffin): a TUI for hledger to manage your finances -* [pug](https://github.com/leg100/pug): terraform task manager -* [punchout](https://github.com/dhth/punchout): takes the suck out of logging time on JIRA -* [redis-viewer](https://github.com/SaltFishPr/redis-viewer): a Redis database browser -* [redis_tui](https://github.com/mat2cc/redis_tui): a Redis database browser -* [schemas](https://github.com/dhth/schemas): lets you inspect postgres schemas in the terminal -* [scrabbler](https://github.com/wI2L/scrabbler): an automatic draw tool for your duplicate Scrabble games -* [sku](https://github.com/fedeztk/sku): Sudoku on the CLI -* [Slides](https://github.com/maaslalani/slides): a markdown-based presentation tool -* [SlurmCommander](https://github.com/CLIP-HPC/SlurmCommander): a Slurm workload manager -* [Soft Serve](https://github.com/charmbracelet/soft-serve): a command-line-first Git server that runs a TUI over SSH -* [solitaire-tui](https://github.com/brianstrauch/solitaire-tui): Klondike Solitaire for the terminal -* [StormForge Optimize Controller](https://github.com/thestormforge/optimize-controller): a tool for experimenting with application configurations in Kubernetes -* [Storydb](https://github.com/grrlopes/storydb): an improved bash/zsh-style ctrl+r command history finder -* [STTG](https://github.com/wille1101/sttg): a teletext client for SVT, Sweden’s national public television station -* [sttr](https://github.com/abhimanyu003/sttr): a general-purpose text transformer -* [superfile](https://github.com/MHNightCat/superfile) a fancy, modern terminal-based file manager -* [tasktimer](https://github.com/caarlos0/tasktimer): a dead-simple task timer -* [termdbms](https://github.com/mathaou/termdbms): a keyboard and mouse driven database browser -* [tgpt](https://github.com/aandrew-me/tgpt): conversational AI for the CLI; no API keys necessary -* [ticker](https://github.com/achannarasappa/ticker): a terminal stock viewer and stock position tracker -* [trainer](https://github.com/rusinikita/trainer): a Go concurrency coding interview simulator with learning materials -* [tran](https://github.com/abdfnx/tran): securely transfer stuff between computers (based on [portal](https://github.com/ZinoKader/portal)) -* [Trufflehog](https://github.com/trufflesecurity/trufflehog): find leaked credentials -* [Typer](https://github.com/maaslalani/typer): a typing test -* [typioca](https://github.com/bloznelis/typioca): a typing test -* [tz](https://github.com/oz/tz): a scheduling aid for people in multiple time zones -* [ugm](https://github.com/ariasmn/ugm): a unix user and group browser -* [walk](https://github.com/antonmedv/walk): a terminal navigator -* [wander](https://github.com/robinovitch61/wander): a HashiCorp Nomad terminal client -* [WG Commander](https://github.com/AndrianBdn/wg-cmd): a TUI for a simple WireGuard VPN setup -* [wishlist](https://github.com/charmbracelet/wishlist): an SSH directory +There are over [10,000 applications](https://github.com/charmbracelet/bubbletea/network/dependents) built with Bubble Tea! Here are a handful of ’em. + +### Staff favourites + +- [chezmoi](https://github.com/twpayne/chezmoi): securely manage your dotfiles across multiple machines +- [circumflex](https://github.com/bensadeh/circumflex): read Hacker News in the terminal +- [gh-dash](https://www.github.com/dlvhdr/gh-dash): a GitHub CLI extension for PRs and issues +- [Tetrigo](https://github.com/Broderick-Westrope/tetrigo): Tetris in the terminal +- [Signls](https://github.com/emprcl/signls): a generative midi sequencer designed for composition and live performance +- [Superfile](https://github.com/yorukot/superfile): a super file manager + +### In Industry + +- Microsoft Azure – [Aztify](https://github.com/Azure/aztfy): bring Microsoft Azure resources under Terraform +- Daytona – [Daytona](https://github.com/daytonaio/daytona): open source dev environment manager +- Cockroach Labs – [CockroachDB](https://github.com/cockroachdb/cockroach): a cloud-native, high-availability distributed SQL database +- Truffle Security Co. – [Trufflehog](https://github.com/trufflesecurity/trufflehog): find leaked credentials +- NVIDIA – [container-canary](https://github.com/NVIDIA/container-canary): a container validator +- AWS – [eks-node-viewer](https://github.com/awslabs/eks-node-viewer): a tool for visualizing dynamic node usage within an EKS cluster +- MinIO – [mc](https://github.com/minio/mc): the official [MinIO](https://min.io) client +- Ubuntu – [Authd](https://github.com/ubuntu/authd): an authentication daemon for cloud-based identity providers + +### Charm stuff + +- [Glow](https://github.com/charmbracelet/glow): a markdown reader, browser, and online markdown stash +- [Huh?](https://github.com/charmbracelet/huh): an interactive prompt and form toolkit +- [Mods](https://github.com/charmbracelet/mods): AI on the CLI, built for pipelines +- [Wishlist](https://github.com/charmbracelet/wishlist): an SSH directory (and bastion!) + +### There’s so much more where that came from + +For more applications built with Bubble Tea see [Charm & Friends][community]. +Is there something cool you made with Bubble Tea you want to share? [PRs][community] are +welcome! + +## Contributing + +See [contributing][contribute]. + +[contribute]: https://github.com/charmbracelet/bubbletea/contribute ## Feedback -We'd love to hear your thoughts on this project. Feel free to drop us a note! +We’d love to hear your thoughts on this project. Feel free to drop us a note! -* [Twitter](https://twitter.com/charmcli) -* [The Fediverse](https://mastodon.social/@charmcli) -* [Discord](https://charm.sh/chat) +- [Twitter](https://twitter.com/charmcli) +- [The Fediverse](https://mastodon.social/@charmcli) +- [Discord](https://charm.sh/chat) ## Acknowledgments @@ -437,12 +381,13 @@ of days past. [elm]: https://guide.elm-lang.org/architecture/ [gotea]: https://github.com/tj/go-tea [zb]: https://de.wikipedia.org/wiki/Zeichenorientierte_Benutzerschnittstelle +[community]: https://github.com/charm-and-friends/charm-in-the-wild ## License -[MIT](https://github.com/charmbracelet/bubbletea/raw/master/LICENSE) +[MIT](https://github.com/charmbracelet/bubbletea/raw/main/LICENSE) -*** +--- Part of [Charm](https://charm.sh). diff --git a/vendor/github.com/charmbracelet/bubbletea/inputreader_windows.go b/vendor/github.com/charmbracelet/bubbletea/inputreader_windows.go index 449df4790..dd5277cf0 100644 --- a/vendor/github.com/charmbracelet/bubbletea/inputreader_windows.go +++ b/vendor/github.com/charmbracelet/bubbletea/inputreader_windows.go @@ -57,7 +57,7 @@ func newInputReader(r io.Reader) (cancelreader.CancelReader, error) { func (r *conInputReader) Cancel() bool { r.setCanceled() - return windows.CancelIo(r.conin) == nil + return windows.CancelIoEx(r.conin, nil) == nil || windows.CancelIo(r.conin) == nil } // Close implements cancelreader.CancelReader. @@ -73,8 +73,11 @@ func (r *conInputReader) Close() error { } // Read implements cancelreader.CancelReader. -func (*conInputReader) Read(_ []byte) (n int, err error) { - return 0, nil +func (r *conInputReader) Read(_ []byte) (n int, err error) { + if r.isCanceled() { + err = cancelreader.ErrCanceled + } + return } func prepareConsole(input windows.Handle, modes ...uint32) (originalMode uint32, err error) { @@ -105,3 +108,10 @@ func (c *cancelMixin) setCanceled() { c.unsafeCanceled = true } + +func (c *cancelMixin) isCanceled() bool { + c.lock.Lock() + defer c.lock.Unlock() + + return c.unsafeCanceled +} diff --git a/vendor/github.com/charmbracelet/bubbletea/key_windows.go b/vendor/github.com/charmbracelet/bubbletea/key_windows.go index b693efd65..d59ff1c46 100644 --- a/vendor/github.com/charmbracelet/bubbletea/key_windows.go +++ b/vendor/github.com/charmbracelet/bubbletea/key_windows.go @@ -10,23 +10,26 @@ import ( "github.com/erikgeiser/coninput" localereader "github.com/mattn/go-localereader" - "golang.org/x/sys/windows" + "github.com/muesli/cancelreader" ) func readInputs(ctx context.Context, msgs chan<- Msg, input io.Reader) error { if coninReader, ok := input.(*conInputReader); ok { - return readConInputs(ctx, msgs, coninReader.conin) + return readConInputs(ctx, msgs, coninReader) } return readAnsiInputs(ctx, msgs, localereader.NewReader(input)) } -func readConInputs(ctx context.Context, msgsch chan<- Msg, con windows.Handle) error { +func readConInputs(ctx context.Context, msgsch chan<- Msg, con *conInputReader) error { var ps coninput.ButtonState // keep track of previous mouse state var ws coninput.WindowBufferSizeEventRecord // keep track of the last window size event for { - events, err := coninput.ReadNConsoleInputs(con, 16) + events, err := coninput.ReadNConsoleInputs(con.conin, 16) if err != nil { + if con.isCanceled() { + return cancelreader.ErrCanceled + } return fmt.Errorf("read coninput events: %w", err) } @@ -274,7 +277,11 @@ func keyType(e coninput.KeyEventRecord) KeyType { case coninput.VK_DELETE: return KeyDelete default: - if e.ControlKeyState&(coninput.LEFT_CTRL_PRESSED|coninput.RIGHT_CTRL_PRESSED) == 0 { + switch { + case e.ControlKeyState.Contains(coninput.LEFT_CTRL_PRESSED) && e.ControlKeyState.Contains(coninput.RIGHT_ALT_PRESSED): + // AltGr is pressed, then it's a rune. + fallthrough + case !e.ControlKeyState.Contains(coninput.LEFT_CTRL_PRESSED) && !e.ControlKeyState.Contains(coninput.RIGHT_CTRL_PRESSED): return KeyRunes } @@ -334,7 +341,7 @@ func keyType(e coninput.KeyEventRecord) KeyType { case '\x1a': return KeyCtrlZ case '\x1b': - return KeyCtrlCloseBracket + return KeyCtrlOpenBracket // KeyEscape case '\x1c': return KeyCtrlBackslash case '\x1f': @@ -344,6 +351,8 @@ func keyType(e coninput.KeyEventRecord) KeyType { switch code { case coninput.VK_OEM_4: return KeyCtrlOpenBracket + case coninput.VK_OEM_6: + return KeyCtrlCloseBracket } return KeyRunes diff --git a/vendor/github.com/charmbracelet/bubbletea/options.go b/vendor/github.com/charmbracelet/bubbletea/options.go index 4a56fc811..c509353b1 100644 --- a/vendor/github.com/charmbracelet/bubbletea/options.go +++ b/vendor/github.com/charmbracelet/bubbletea/options.go @@ -185,6 +185,9 @@ func WithoutRenderer() ProgramOption { // // This feature is provisional, and may be changed or removed in a future version // of this package. +// +// Deprecated: this incurs a noticeable performance hit. A future release will +// optimize ANSI automatically without the performance penalty. func WithANSICompressor() ProgramOption { return func(p *Program) { p.startupOptions |= withANSICompressor @@ -235,9 +238,13 @@ func WithFPS(fps int) ProgramOption { } } -// WithReportFocus enables reporting when the terminal gains and lost focus. +// WithReportFocus enables reporting when the terminal gains and loses +// focus. When this is enabled [FocusMsg] and [BlurMsg] messages will be sent +// to your Update method. // -// You can then check for FocusMsg and BlurMsg in your model's Update method. +// Note that while most terminals and multiplexers support focus reporting, +// some do not. Also note that tmux needs to be configured to report focus +// events. func WithReportFocus() ProgramOption { return func(p *Program) { p.startupOptions |= withReportFocus diff --git a/vendor/github.com/charmbracelet/bubbletea/standard_renderer.go b/vendor/github.com/charmbracelet/bubbletea/standard_renderer.go index 59448e1d4..45e8c82d2 100644 --- a/vendor/github.com/charmbracelet/bubbletea/standard_renderer.go +++ b/vendor/github.com/charmbracelet/bubbletea/standard_renderer.go @@ -34,7 +34,9 @@ type standardRenderer struct { ticker *time.Ticker done chan struct{} lastRender string + lastRenderedLines []string linesRendered int + altLinesRendered int useANSICompressor bool once sync.Once @@ -161,13 +163,20 @@ func (r *standardRenderer) flush() { defer r.mtx.Unlock() if r.buf.Len() == 0 || r.buf.String() == r.lastRender { - // Nothing to do + // Nothing to do. return } - // Output buffer + // Output buffer. buf := &bytes.Buffer{} + // Moving to the beginning of the section, that we rendered. + if r.altScreenActive { + buf.WriteString(ansi.CursorHomePosition) + } else if r.linesRendered > 1 { + buf.WriteString(ansi.CursorUp(r.linesRendered - 1)) + } + newLines := strings.Split(r.buf.String(), "\n") // If we know the output's height, we can use it to determine how many @@ -178,97 +187,87 @@ func (r *standardRenderer) flush() { newLines = newLines[len(newLines)-r.height:] } - numLinesThisFlush := len(newLines) - oldLines := strings.Split(r.lastRender, "\n") - skipLines := make(map[int]struct{}) flushQueuedMessages := len(r.queuedMessageLines) > 0 && !r.altScreenActive - // Clear any lines we painted in the last render. - if r.linesRendered > 0 { - for i := r.linesRendered - 1; i > 0; i-- { - // if we are clearing queued messages, we want to clear all lines, since - // printing messages allows for native terminal word-wrap, we - // don't have control over the queued lines - if flushQueuedMessages { - buf.WriteString(ansi.EraseEntireLine) - } else if (len(newLines) <= len(oldLines)) && (len(newLines) > i && len(oldLines) > i) && (newLines[i] == oldLines[i]) { - // If the number of lines we want to render hasn't increased and - // new line is the same as the old line we can skip rendering for - // this line as a performance optimization. - skipLines[i] = struct{}{} - } else if _, exists := r.ignoreLines[i]; !exists { - buf.WriteString(ansi.EraseEntireLine) - } - - buf.WriteString(ansi.CursorUp1) - } - - if _, exists := r.ignoreLines[0]; !exists { - // We need to return to the start of the line here to properly - // erase it. Going back the entire width of the terminal will - // usually be farther than we need to go, but terminal emulators - // will stop the cursor at the start of the line as a rule. - // - // We use this sequence in particular because it's part of the ANSI - // standard (whereas others are proprietary to, say, VT100/VT52). - // If cursor previous line (ESC[ + + F) were better supported - // we could use that above to eliminate this step. - buf.WriteString(ansi.CursorLeft(r.width)) - buf.WriteString(ansi.EraseEntireLine) - } - } - - // Merge the set of lines we're skipping as a rendering optimization with - // the set of lines we've explicitly asked the renderer to ignore. - for k, v := range r.ignoreLines { - skipLines[k] = v - } - if flushQueuedMessages { - // Dump the lines we've queued up for printing + // Dump the lines we've queued up for printing. for _, line := range r.queuedMessageLines { + if ansi.StringWidth(line) < r.width { + // We only erase the rest of the line when the line is shorter than + // the width of the terminal. When the cursor reaches the end of + // the line, any escape sequences that follow will only affect the + // last cell of the line. + + // Removing previously rendered content at the end of line. + line = line + ansi.EraseLineRight + } + _, _ = buf.WriteString(line) _, _ = buf.WriteString("\r\n") } - // clear the queued message lines + // Clear the queued message lines. r.queuedMessageLines = []string{} } - // Paint new lines + // Paint new lines. for i := 0; i < len(newLines); i++ { - if _, skip := skipLines[i]; skip { + canSkip := !flushQueuedMessages && // Queuing messages triggers repaint -> we don't have access to previous frame content. + len(r.lastRenderedLines) > i && r.lastRenderedLines[i] == newLines[i] // Previously rendered line is the same. + + if _, ignore := r.ignoreLines[i]; ignore || canSkip { // Unless this is the last line, move the cursor down. if i < len(newLines)-1 { - buf.WriteString(ansi.CursorDown1) - } - } else { - if i == 0 && r.lastRender == "" { - // On first render, reset the cursor to the start of the line - // before writing anything. - buf.WriteByte('\r') + buf.WriteByte('\n') } + continue + } - line := newLines[i] - - // Truncate lines wider than the width of the window to avoid - // wrapping, which will mess up rendering. If we don't have the - // width of the window this will be ignored. - // - // Note that on Windows we only get the width of the window on - // program initialization, so after a resize this won't perform - // correctly (signal SIGWINCH is not supported on Windows). - if r.width > 0 { - line = ansi.Truncate(line, r.width, "") - } + if i == 0 && r.lastRender == "" { + // On first render, reset the cursor to the start of the line + // before writing anything. + buf.WriteByte('\r') + } - _, _ = buf.WriteString(line) + line := newLines[i] + + // Truncate lines wider than the width of the window to avoid + // wrapping, which will mess up rendering. If we don't have the + // width of the window this will be ignored. + // + // Note that on Windows we only get the width of the window on + // program initialization, so after a resize this won't perform + // correctly (signal SIGWINCH is not supported on Windows). + if r.width > 0 { + line = ansi.Truncate(line, r.width, "") + } - if i < len(newLines)-1 { - _, _ = buf.WriteString("\r\n") - } + if ansi.StringWidth(line) < r.width { + // We only erase the rest of the line when the line is shorter than + // the width of the terminal. When the cursor reaches the end of + // the line, any escape sequences that follow will only affect the + // last cell of the line. + + // Removing previously rendered content at the end of line. + line = line + ansi.EraseLineRight + } + + _, _ = buf.WriteString(line) + + if i < len(newLines)-1 { + _, _ = buf.WriteString("\r\n") } } - r.linesRendered = numLinesThisFlush + + // Clearing left over content from last render. + if r.lastLinesRendered() > len(newLines) { + buf.WriteString(ansi.EraseScreenBelow) + } + + if r.altScreenActive { + r.altLinesRendered = len(newLines) + } else { + r.linesRendered = len(newLines) + } // Make sure the cursor is at the start of the last line to keep rendering // behavior consistent. @@ -276,16 +275,29 @@ func (r *standardRenderer) flush() { // This case fixes a bug in macOS terminal. In other terminals the // other case seems to do the job regardless of whether or not we're // using the full terminal window. - buf.WriteString(ansi.MoveCursor(r.linesRendered, 0)) + buf.WriteString(ansi.CursorPosition(0, len(newLines))) } else { - buf.WriteString(ansi.CursorLeft(r.width)) + buf.WriteString(ansi.CursorBackward(r.width)) } _, _ = r.out.Write(buf.Bytes()) r.lastRender = r.buf.String() + + // Save previously rendered lines for comparison in the next render. If we + // don't do this, we can't skip rendering lines that haven't changed. + // See https://github.com/charmbracelet/bubbletea/pull/1233 + r.lastRenderedLines = newLines r.buf.Reset() } +// lastLinesRendered returns the number of lines rendered lastly. +func (r *standardRenderer) lastLinesRendered() int { + if r.altScreenActive { + return r.altLinesRendered + } + return r.linesRendered +} + // write writes to the internal buffer. The buffer will be outputted via the // ticker which calls flush(). func (r *standardRenderer) write(s string) { @@ -306,14 +318,15 @@ func (r *standardRenderer) write(s string) { func (r *standardRenderer) repaint() { r.lastRender = "" + r.lastRenderedLines = nil } func (r *standardRenderer) clearScreen() { r.mtx.Lock() defer r.mtx.Unlock() - r.execute(ansi.EraseEntireDisplay) - r.execute(ansi.MoveCursorOrigin) + r.execute(ansi.EraseEntireScreen) + r.execute(ansi.CursorHomePosition) r.repaint() } @@ -334,7 +347,7 @@ func (r *standardRenderer) enterAltScreen() { } r.altScreenActive = true - r.execute(ansi.EnableAltScreenBuffer) + r.execute(ansi.SetAltScreenSaveCursorMode) // Ensure that the terminal is cleared, even when it doesn't support // alt screen (or alt screen support is disabled, like GNU screen by @@ -342,8 +355,8 @@ func (r *standardRenderer) enterAltScreen() { // // Note: we can't use r.clearScreen() here because the mutex is already // locked. - r.execute(ansi.EraseEntireDisplay) - r.execute(ansi.MoveCursorOrigin) + r.execute(ansi.EraseEntireScreen) + r.execute(ansi.CursorHomePosition) // cmd.exe and other terminals keep separate cursor states for the AltScreen // and the main buffer. We have to explicitly reset the cursor visibility @@ -354,6 +367,9 @@ func (r *standardRenderer) enterAltScreen() { r.execute(ansi.ShowCursor) } + // Entering the alt screen resets the lines rendered count. + r.altLinesRendered = 0 + r.repaint() } @@ -366,7 +382,7 @@ func (r *standardRenderer) exitAltScreen() { } r.altScreenActive = false - r.execute(ansi.DisableAltScreenBuffer) + r.execute(ansi.ResetAltScreenSaveCursorMode) // cmd.exe and other terminals keep separate cursor states for the AltScreen // and the main buffer. We have to explicitly reset the cursor visibility @@ -400,49 +416,49 @@ func (r *standardRenderer) enableMouseCellMotion() { r.mtx.Lock() defer r.mtx.Unlock() - r.execute(ansi.EnableMouseCellMotion) + r.execute(ansi.SetButtonEventMouseMode) } func (r *standardRenderer) disableMouseCellMotion() { r.mtx.Lock() defer r.mtx.Unlock() - r.execute(ansi.DisableMouseCellMotion) + r.execute(ansi.ResetButtonEventMouseMode) } func (r *standardRenderer) enableMouseAllMotion() { r.mtx.Lock() defer r.mtx.Unlock() - r.execute(ansi.EnableMouseAllMotion) + r.execute(ansi.SetAnyEventMouseMode) } func (r *standardRenderer) disableMouseAllMotion() { r.mtx.Lock() defer r.mtx.Unlock() - r.execute(ansi.DisableMouseAllMotion) + r.execute(ansi.ResetAnyEventMouseMode) } func (r *standardRenderer) enableMouseSGRMode() { r.mtx.Lock() defer r.mtx.Unlock() - r.execute(ansi.EnableMouseSgrExt) + r.execute(ansi.SetSgrExtMouseMode) } func (r *standardRenderer) disableMouseSGRMode() { r.mtx.Lock() defer r.mtx.Unlock() - r.execute(ansi.DisableMouseSgrExt) + r.execute(ansi.ResetSgrExtMouseMode) } func (r *standardRenderer) enableBracketedPaste() { r.mtx.Lock() defer r.mtx.Unlock() - r.execute(ansi.EnableBracketedPaste) + r.execute(ansi.SetBracketedPasteMode) r.bpActive = true } @@ -450,7 +466,7 @@ func (r *standardRenderer) disableBracketedPaste() { r.mtx.Lock() defer r.mtx.Unlock() - r.execute(ansi.DisableBracketedPaste) + r.execute(ansi.ResetBracketedPasteMode) r.bpActive = false } @@ -465,7 +481,7 @@ func (r *standardRenderer) enableReportFocus() { r.mtx.Lock() defer r.mtx.Unlock() - r.execute(ansi.EnableReportFocus) + r.execute(ansi.SetFocusEventMode) r.reportingFocus = true } @@ -473,7 +489,7 @@ func (r *standardRenderer) disableReportFocus() { r.mtx.Lock() defer r.mtx.Unlock() - r.execute(ansi.DisableReportFocus) + r.execute(ansi.ResetFocusEventMode) r.reportingFocus = false } @@ -494,7 +510,7 @@ func (r *standardRenderer) setWindowTitle(title string) { func (r *standardRenderer) setIgnoredLines(from int, to int) { // Lock if we're going to be clearing some lines since we don't want // anything jacking our cursor. - if r.linesRendered > 0 { + if r.lastLinesRendered() > 0 { r.mtx.Lock() defer r.mtx.Unlock() } @@ -507,16 +523,17 @@ func (r *standardRenderer) setIgnoredLines(from int, to int) { } // Erase ignored lines - if r.linesRendered > 0 { + lastLinesRendered := r.lastLinesRendered() + if lastLinesRendered > 0 { buf := &bytes.Buffer{} - for i := r.linesRendered - 1; i >= 0; i-- { + for i := lastLinesRendered - 1; i >= 0; i-- { if _, exists := r.ignoreLines[i]; exists { buf.WriteString(ansi.EraseEntireLine) } - buf.WriteString(ansi.CursorUp1) + buf.WriteString(ansi.CUU1) } - buf.WriteString(ansi.MoveCursor(r.linesRendered, 0)) // put cursor back + buf.WriteString(ansi.CursorPosition(0, lastLinesRendered)) // put cursor back _, _ = r.out.Write(buf.Bytes()) } } @@ -546,20 +563,23 @@ func (r *standardRenderer) clearIgnoredLines() { // use in high-performance rendering, such as a pager that could potentially // be rendering very complicated ansi. In cases where the content is simpler // standard Bubble Tea rendering should suffice. +// +// Deprecated: This option is deprecated and will be removed in a future +// version of this package. func (r *standardRenderer) insertTop(lines []string, topBoundary, bottomBoundary int) { r.mtx.Lock() defer r.mtx.Unlock() buf := &bytes.Buffer{} - buf.WriteString(ansi.SetScrollingRegion(topBoundary, bottomBoundary)) - buf.WriteString(ansi.MoveCursor(topBoundary, 0)) + buf.WriteString(ansi.SetTopBottomMargins(topBoundary, bottomBoundary)) + buf.WriteString(ansi.CursorPosition(0, topBoundary)) buf.WriteString(ansi.InsertLine(len(lines))) _, _ = buf.WriteString(strings.Join(lines, "\r\n")) - buf.WriteString(ansi.SetScrollingRegion(0, r.height)) + buf.WriteString(ansi.SetTopBottomMargins(0, r.height)) // Move cursor back to where the main rendering routine expects it to be - buf.WriteString(ansi.MoveCursor(r.linesRendered, 0)) + buf.WriteString(ansi.CursorPosition(0, r.lastLinesRendered())) _, _ = r.out.Write(buf.Bytes()) } @@ -573,19 +593,22 @@ func (r *standardRenderer) insertTop(lines []string, topBoundary, bottomBoundary // See note in insertTop() for caveats, how this function only makes sense for // full-window applications, and how it differs from the normal way we do // rendering in Bubble Tea. +// +// Deprecated: This option is deprecated and will be removed in a future +// version of this package. func (r *standardRenderer) insertBottom(lines []string, topBoundary, bottomBoundary int) { r.mtx.Lock() defer r.mtx.Unlock() buf := &bytes.Buffer{} - buf.WriteString(ansi.SetScrollingRegion(topBoundary, bottomBoundary)) - buf.WriteString(ansi.MoveCursor(bottomBoundary, 0)) + buf.WriteString(ansi.SetTopBottomMargins(topBoundary, bottomBoundary)) + buf.WriteString(ansi.CursorPosition(0, bottomBoundary)) _, _ = buf.WriteString("\r\n" + strings.Join(lines, "\r\n")) - buf.WriteString(ansi.SetScrollingRegion(0, r.height)) + buf.WriteString(ansi.SetTopBottomMargins(0, r.height)) // Move cursor back to where the main rendering routine expects it to be - buf.WriteString(ansi.MoveCursor(r.linesRendered, 0)) + buf.WriteString(ansi.CursorPosition(0, r.lastLinesRendered())) _, _ = r.out.Write(buf.Bytes()) } @@ -657,6 +680,8 @@ type syncScrollAreaMsg struct { // should also be called on resize (WindowSizeMsg). // // For high-performance, scroll-based rendering only. +// +// Deprecated: This option will be removed in a future version of this package. func SyncScrollArea(lines []string, topBoundary int, bottomBoundary int) Cmd { return func() Msg { return syncScrollAreaMsg{ @@ -673,6 +698,8 @@ type clearScrollAreaMsg struct{} // those lines to the main rendering routine. // // For high-performance, scroll-based rendering only. +// +// Deprecated: This option will be removed in a future version of this package. func ClearScrollArea() Msg { return clearScrollAreaMsg{} } @@ -688,6 +715,8 @@ type scrollUpMsg struct { // from view. // // For high-performance, scroll-based rendering only. +// +// Deprecated: This option will be removed in a future version of this package. func ScrollUp(newLines []string, topBoundary, bottomBoundary int) Cmd { return func() Msg { return scrollUpMsg{ @@ -709,6 +738,8 @@ type scrollDownMsg struct { // disappear from view. // // For high-performance, scroll-based rendering only. +// +// Deprecated: This option will be removed in a future version of this package. func ScrollDown(newLines []string, topBoundary, bottomBoundary int) Cmd { return func() Msg { return scrollDownMsg{ diff --git a/vendor/github.com/charmbracelet/bubbletea/tea.go b/vendor/github.com/charmbracelet/bubbletea/tea.go index 5ed970762..743a866e9 100644 --- a/vendor/github.com/charmbracelet/bubbletea/tea.go +++ b/vendor/github.com/charmbracelet/bubbletea/tea.go @@ -26,9 +26,13 @@ import ( "golang.org/x/sync/errgroup" ) -// ErrProgramKilled is returned by [Program.Run] when the program got killed. +// ErrProgramKilled is returned by [Program.Run] when the program gets killed. var ErrProgramKilled = errors.New("program was killed") +// ErrInterrupted is returned by [Program.Run] when the program get a SIGINT +// signal, or when it receives a [InterruptMsg]. +var ErrInterrupted = errors.New("program was interrupted") + // Msg contain data from the result of a IO operation. Msgs trigger the update // function and, henceforth, the UI. type Msg interface{} @@ -128,6 +132,10 @@ func (h channelHandlers) shutdown() { type Program struct { initialModel Model + // handlers is a list of channels that need to be waited on before the + // program can exit. + handlers channelHandlers + // Configuration options that will set as the program is initializing, // treated as bits. These options can be set via various ProgramOptions. startupOptions startupOptions @@ -182,8 +190,8 @@ func Quit() Msg { return QuitMsg{} } -// QuitMsg signals that the program should quit. You can send a QuitMsg with -// Quit. +// QuitMsg signals that the program should quit. You can send a [QuitMsg] with +// [Quit]. type QuitMsg struct{} // Suspend is a special command that tells the Bubble Tea program to suspend. @@ -195,13 +203,28 @@ func Suspend() Msg { // This usually happens when ctrl+z is pressed on common programs, but since // bubbletea puts the terminal in raw mode, we need to handle it in a // per-program basis. -// You can send this message with Suspend. +// +// You can send this message with [Suspend()]. type SuspendMsg struct{} // ResumeMsg can be listen to to do something once a program is resumed back // from a suspend state. type ResumeMsg struct{} +// InterruptMsg signals the program should suspend. +// This usually happens when ctrl+c is pressed on common programs, but since +// bubbletea puts the terminal in raw mode, we need to handle it in a +// per-program basis. +// +// You can send this message with [Interrupt()]. +type InterruptMsg struct{} + +// Interrupt is a special command that tells the Bubble Tea program to +// interrupt. +func Interrupt() Msg { + return InterruptMsg{} +} + // NewProgram creates a new Program. func NewProgram(model Model, opts ...ProgramOption) *Program { p := &Program{ @@ -259,9 +282,14 @@ func (p *Program) handleSignals() chan struct{} { case <-p.ctx.Done(): return - case <-sig: + case s := <-sig: if atomic.LoadUint32(&p.ignoreSignals) == 0 { - p.msgs <- QuitMsg{} + switch s { + case syscall.SIGINT: + p.msgs <- InterruptMsg{} + default: + p.msgs <- QuitMsg{} + } return } } @@ -312,6 +340,11 @@ func (p *Program) handleCommands(cmds chan Cmd) chan struct{} { // possible to cancel them so we'll have to leak the goroutine // until Cmd returns. go func() { + // Recover from panics. + if !p.startupOptions.has(withoutCatchPanics) { + defer p.recoverFromPanic() + } + msg := cmd() // this can be long. p.Send(msg) }() @@ -353,6 +386,9 @@ func (p *Program) eventLoop(model Model, cmds chan Cmd) (Model, error) { case QuitMsg: return model, nil + case InterruptMsg: + return model, ErrInterrupted + case SuspendMsg: if suspendSupported { p.suspend() @@ -460,7 +496,7 @@ func (p *Program) eventLoop(model Model, cmds chan Cmd) (Model, error) { // terminated by either [Program.Quit], [Program.Kill], or its signal handler. // Returns the final model. func (p *Program) Run() (Model, error) { - handlers := channelHandlers{} + p.handlers = channelHandlers{} cmds := make(chan Cmd) p.errs = make(chan error) p.finished = make(chan struct{}, 1) @@ -507,19 +543,12 @@ func (p *Program) Run() (Model, error) { // Handle signals. if !p.startupOptions.has(withoutSignalHandler) { - handlers.add(p.handleSignals()) + p.handlers.add(p.handleSignals()) } // Recover from panics. if !p.startupOptions.has(withoutCatchPanics) { - defer func() { - if r := recover(); r != nil { - p.shutdown(true) - fmt.Printf("Caught panic:\n\n%s\n\nRestoring terminal...\n\n", r) - debug.PrintStack() - return - } - }() + defer p.recoverFromPanic() } // If no renderer is set use the standard one. @@ -561,7 +590,7 @@ func (p *Program) Run() (Model, error) { model := p.initialModel if initCmd := model.Init(); initCmd != nil { ch := make(chan struct{}) - handlers.add(ch) + p.handlers.add(ch) go func() { defer close(ch) @@ -584,36 +613,22 @@ func (p *Program) Run() (Model, error) { } // Handle resize events. - handlers.add(p.handleResize()) + p.handlers.add(p.handleResize()) // Process commands. - handlers.add(p.handleCommands(cmds)) + p.handlers.add(p.handleCommands(cmds)) // Run event loop, handle updates and draw. model, err := p.eventLoop(model, cmds) - killed := p.ctx.Err() != nil - if killed { + killed := p.ctx.Err() != nil || err != nil + if killed && err == nil { err = fmt.Errorf("%w: %s", ErrProgramKilled, p.ctx.Err()) - } else { + } + if err == nil { // Ensure we rendered the final state of the model. p.renderer.write(model.View()) } - // Tear down. - p.cancel() - - // Check if the cancel reader has been setup before waiting and closing. - if p.cancelReader != nil { - // Wait for input loop to finish. - if p.cancelReader.Cancel() { - p.waitForReadLoop() - } - _ = p.cancelReader.Close() - } - - // Wait for all handlers to finish. - handlers.shutdown() - // Restore terminal state. p.shutdown(killed) @@ -668,7 +683,7 @@ func (p *Program) Quit() { // The final render that you would normally see when quitting will be skipped. // [program.Run] returns a [ErrProgramKilled] error. func (p *Program) Kill() { - p.cancel() + p.shutdown(true) } // Wait waits/blocks until the underlying Program finished shutting down. @@ -679,6 +694,22 @@ func (p *Program) Wait() { // shutdown performs operations to free up resources and restore the terminal // to its original state. func (p *Program) shutdown(kill bool) { + p.cancel() + + // Wait for all handlers to finish. + p.handlers.shutdown() + + // Check if the cancel reader has been setup before waiting and closing. + if p.cancelReader != nil { + // Wait for input loop to finish. + if p.cancelReader.Cancel() { + if !kill { + p.waitForReadLoop() + } + } + _ = p.cancelReader.Close() + } + if p.renderer != nil { if kill { p.renderer.kill() @@ -688,7 +719,19 @@ func (p *Program) shutdown(kill bool) { } _ = p.restoreTerminalState() - p.finished <- struct{}{} + if !kill { + p.finished <- struct{}{} + } +} + +// recoverFromPanic recovers from a panic, prints the stack trace, and restores +// the terminal to a usable state. +func (p *Program) recoverFromPanic() { + if r := recover(); r != nil { + p.shutdown(true) + fmt.Printf("Caught panic:\n\n%s\n\nRestoring terminal...\n\n", r) + debug.PrintStack() + } } // ReleaseTerminal restores the original terminal state and cancels the input diff --git a/vendor/github.com/charmbracelet/lipgloss/.golangci-soft.yml b/vendor/github.com/charmbracelet/colorprofile/.golangci-soft.yml similarity index 81% rename from vendor/github.com/charmbracelet/lipgloss/.golangci-soft.yml rename to vendor/github.com/charmbracelet/colorprofile/.golangci-soft.yml index 1b6824bb2..d325d4fcc 100644 --- a/vendor/github.com/charmbracelet/lipgloss/.golangci-soft.yml +++ b/vendor/github.com/charmbracelet/colorprofile/.golangci-soft.yml @@ -1,5 +1,6 @@ run: tests: false + issues-exit-code: 0 issues: include: @@ -14,16 +15,13 @@ issues: linters: enable: - # - dupl - exhaustive - # - exhaustivestruct - goconst - godot - godox - - gomnd + - mnd - gomoddirectives - goprintffuncname - # - lll - misspell - nakedret - nestif @@ -34,13 +32,9 @@ linters: # disable default linters, they are already enabled in .golangci.yml disable: - - deadcode - errcheck - gosimple - govet - ineffassign - staticcheck - - structcheck - - typecheck - unused - - varcheck diff --git a/vendor/github.com/charmbracelet/colorprofile/.golangci.yml b/vendor/github.com/charmbracelet/colorprofile/.golangci.yml new file mode 100644 index 000000000..d6789e014 --- /dev/null +++ b/vendor/github.com/charmbracelet/colorprofile/.golangci.yml @@ -0,0 +1,28 @@ +run: + tests: false + +issues: + include: + - EXC0001 + - EXC0005 + - EXC0011 + - EXC0012 + - EXC0013 + + max-issues-per-linter: 0 + max-same-issues: 0 + +linters: + enable: + - bodyclose + - gofumpt + - goimports + - gosec + - nilerr + - revive + - rowserrcheck + - sqlclosecheck + - tparallel + - unconvert + - unparam + - whitespace diff --git a/vendor/github.com/charmbracelet/colorprofile/.goreleaser.yml b/vendor/github.com/charmbracelet/colorprofile/.goreleaser.yml new file mode 100644 index 000000000..40d9f298d --- /dev/null +++ b/vendor/github.com/charmbracelet/colorprofile/.goreleaser.yml @@ -0,0 +1,6 @@ +includes: + - from_url: + url: charmbracelet/meta/main/goreleaser-lib.yaml + +# yaml-language-server: $schema=https://goreleaser.com/static/schema-pro.json + diff --git a/vendor/github.com/charmbracelet/colorprofile/LICENSE b/vendor/github.com/charmbracelet/colorprofile/LICENSE new file mode 100644 index 000000000..b7974b076 --- /dev/null +++ b/vendor/github.com/charmbracelet/colorprofile/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020-2024 Charmbracelet, Inc + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/charmbracelet/colorprofile/README.md b/vendor/github.com/charmbracelet/colorprofile/README.md new file mode 100644 index 000000000..c72b2f4b5 --- /dev/null +++ b/vendor/github.com/charmbracelet/colorprofile/README.md @@ -0,0 +1,103 @@ +# Colorprofile + +

+ Latest Release + GoDoc + Build Status +

+ +A simple, powerful—and at times magical—package for detecting terminal color +profiles and performing color (and CSI) degradation. + +## Detecting the terminal’s color profile + +Detecting the terminal’s color profile is easy. + +```go +import "github.com/charmbracelet/colorprofile" + +// Detect the color profile. If you’re planning on writing to stderr you'd want +// to use os.Stderr instead. +p := colorprofile.Detect(os.Stdout, os.Environ()) + +// Comment on the profile. +fmt.Printf("You know, your colors are quite %s.", func() string { + switch p { + case colorprofile.TrueColor: + return "fancy" + case colorprofile.ANSI256: + return "1990s fancy" + case colorprofile.ANSI: + return "normcore" + case colorprofile.Ascii: + return "ancient" + case colorprofile.NoTTY: + return "naughty!" + } + return "...IDK" // this should never happen +}()) +``` + +## Downsampling colors + +When necessary, colors can be downsampled to a given profile, or manually +downsampled to a specific profile. + +```go +p := colorprofile.Detect(os.Stdout, os.Environ()) +c := color.RGBA{0x6b, 0x50, 0xff, 0xff} // #6b50ff + +// Downsample to the detected profile, when necessary. +convertedColor := p.Convert(c) + +// Or manually convert to a given profile. +ansi256Color := colorprofile.ANSI256.Convert(c) +ansiColor := colorprofile.ANSI.Convert(c) +noColor := colorprofile.Ascii.Convert(c) +noANSI := colorprofile.NoTTY.Convert(c) +``` + +## Automatic downsampling with a Writer + +You can also magically downsample colors in ANSI output, when necessary. If +output is not a TTY ANSI will be dropped entirely. + +```go +myFancyANSI := "\x1b[38;2;107;80;255mCute \x1b[1;3mpuppy!!\x1b[m" + +// Automatically downsample for the terminal at stdout. +w := colorprofile.NewWriter(os.Stdout, os.Environ()) +fmt.Fprintf(w, myFancyANSI) + +// Downsample to 4-bit ANSI. +w.Profile = colorprofile.ANSI +fmt.Fprintf(w, myFancyANSI) + +// Ascii-fy, no colors. +w.Profile = colorprofile.Ascii +fmt.Fprintf(w, myFancyANSI) + +// Strip ANSI altogether. +w.Profile = colorprofile.NoTTY +fmt.Fprintf(w, myFancyANSI) // not as fancy +``` + +## Feedback + +We’d love to hear your thoughts on this project. Feel free to drop us a note! + +- [Twitter](https://twitter.com/charmcli) +- [The Fediverse](https://mastodon.social/@charmcli) +- [Discord](https://charm.sh/chat) + +## License + +[MIT](https://github.com/charmbracelet/bubbletea/raw/master/LICENSE) + +--- + +Part of [Charm](https://charm.sh). + +The Charm logo + +Charm热爱开源 • Charm loves open source • نحنُ نحب المصادر المفتوحة diff --git a/vendor/github.com/charmbracelet/colorprofile/env.go b/vendor/github.com/charmbracelet/colorprofile/env.go new file mode 100644 index 000000000..8df3d8f7e --- /dev/null +++ b/vendor/github.com/charmbracelet/colorprofile/env.go @@ -0,0 +1,287 @@ +package colorprofile + +import ( + "bytes" + "io" + "os/exec" + "runtime" + "strconv" + "strings" + + "github.com/charmbracelet/x/term" + "github.com/xo/terminfo" +) + +// Detect returns the color profile based on the terminal output, and +// environment variables. This respects NO_COLOR, CLICOLOR, and CLICOLOR_FORCE +// environment variables. +// +// The rules as follows: +// - TERM=dumb is always treated as NoTTY unless CLICOLOR_FORCE=1 is set. +// - If COLORTERM=truecolor, and the profile is not NoTTY, it gest upgraded to TrueColor. +// - Using any 256 color terminal (e.g. TERM=xterm-256color) will set the profile to ANSI256. +// - Using any color terminal (e.g. TERM=xterm-color) will set the profile to ANSI. +// - Using CLICOLOR=1 without TERM defined should be treated as ANSI if the +// output is a terminal. +// - NO_COLOR takes precedence over CLICOLOR/CLICOLOR_FORCE, and will disable +// colors but not text decoration, i.e. bold, italic, faint, etc. +// +// See https://no-color.org/ and https://bixense.com/clicolors/ for more information. +func Detect(output io.Writer, env []string) Profile { + out, ok := output.(term.File) + isatty := ok && term.IsTerminal(out.Fd()) + environ := newEnviron(env) + term := environ.get("TERM") + isDumb := term == "dumb" + envp := colorProfile(isatty, environ) + if envp == TrueColor || envNoColor(environ) { + // We already know we have TrueColor, or NO_COLOR is set. + return envp + } + + if isatty && !isDumb { + tip := Terminfo(term) + tmuxp := tmux(environ) + + // Color profile is the maximum of env, terminfo, and tmux. + return max(envp, max(tip, tmuxp)) + } + + return envp +} + +// Env returns the color profile based on the terminal environment variables. +// This respects NO_COLOR, CLICOLOR, and CLICOLOR_FORCE environment variables. +// +// The rules as follows: +// - TERM=dumb is always treated as NoTTY unless CLICOLOR_FORCE=1 is set. +// - If COLORTERM=truecolor, and the profile is not NoTTY, it gest upgraded to TrueColor. +// - Using any 256 color terminal (e.g. TERM=xterm-256color) will set the profile to ANSI256. +// - Using any color terminal (e.g. TERM=xterm-color) will set the profile to ANSI. +// - Using CLICOLOR=1 without TERM defined should be treated as ANSI if the +// output is a terminal. +// - NO_COLOR takes precedence over CLICOLOR/CLICOLOR_FORCE, and will disable +// colors but not text decoration, i.e. bold, italic, faint, etc. +// +// See https://no-color.org/ and https://bixense.com/clicolors/ for more information. +func Env(env []string) (p Profile) { + return colorProfile(true, newEnviron(env)) +} + +func colorProfile(isatty bool, env environ) (p Profile) { + isDumb := env.get("TERM") == "dumb" + envp := envColorProfile(env) + if !isatty || isDumb { + // Check if the output is a terminal. + // Treat dumb terminals as NoTTY + p = NoTTY + } else { + p = envp + } + + if envNoColor(env) && isatty { + if p > Ascii { + p = Ascii + } + return + } + + if cliColorForced(env) { + if p < ANSI { + p = ANSI + } + if envp > p { + p = envp + } + + return + } + + if cliColor(env) { + if isatty && !isDumb && p < ANSI { + p = ANSI + } + } + + return p +} + +// envNoColor returns true if the environment variables explicitly disable color output +// by setting NO_COLOR (https://no-color.org/). +func envNoColor(env environ) bool { + noColor, _ := strconv.ParseBool(env.get("NO_COLOR")) + return noColor +} + +func cliColor(env environ) bool { + cliColor, _ := strconv.ParseBool(env.get("CLICOLOR")) + return cliColor +} + +func cliColorForced(env environ) bool { + cliColorForce, _ := strconv.ParseBool(env.get("CLICOLOR_FORCE")) + return cliColorForce +} + +func colorTerm(env environ) bool { + colorTerm := strings.ToLower(env.get("COLORTERM")) + return colorTerm == "truecolor" || colorTerm == "24bit" || + colorTerm == "yes" || colorTerm == "true" +} + +// envColorProfile returns infers the color profile from the environment. +func envColorProfile(env environ) (p Profile) { + term, ok := env.lookup("TERM") + if !ok || len(term) == 0 || term == "dumb" { + p = NoTTY + if runtime.GOOS == "windows" { + // Use Windows API to detect color profile. Windows Terminal and + // cmd.exe don't define $TERM. + if wcp, ok := windowsColorProfile(env); ok { + p = wcp + } + } + } else { + p = ANSI + } + + parts := strings.Split(term, "-") + switch parts[0] { + case "alacritty", + "contour", + "foot", + "ghostty", + "kitty", + "rio", + "st", + "wezterm": + return TrueColor + case "xterm": + if len(parts) > 1 { + switch parts[1] { + case "ghostty", "kitty": + // These terminals can be defined as xterm-TERMNAME + return TrueColor + } + } + case "tmux", "screen": + if p < ANSI256 { + p = ANSI256 + } + } + + if isCloudShell, _ := strconv.ParseBool(env.get("GOOGLE_CLOUD_SHELL")); isCloudShell { + return TrueColor + } + + // GNU Screen doesn't support TrueColor + // Tmux doesn't support $COLORTERM + if colorTerm(env) && !strings.HasPrefix(term, "screen") && !strings.HasPrefix(term, "tmux") { + return TrueColor + } + + if strings.HasSuffix(term, "256color") && p < ANSI256 { + p = ANSI256 + } + + return +} + +// Terminfo returns the color profile based on the terminal's terminfo +// database. This relies on the Tc and RGB capabilities to determine if the +// terminal supports TrueColor. +// If term is empty or "dumb", it returns NoTTY. +func Terminfo(term string) (p Profile) { + if len(term) == 0 || term == "dumb" { + return NoTTY + } + + p = ANSI + ti, err := terminfo.Load(term) + if err != nil { + return + } + + extbools := ti.ExtBoolCapsShort() + if _, ok := extbools["Tc"]; ok { + return TrueColor + } + + if _, ok := extbools["RGB"]; ok { + return TrueColor + } + + return +} + +// Tmux returns the color profile based on `tmux info` output. Tmux supports +// overriding the terminal's color capabilities, so this function will return +// the color profile based on the tmux configuration. +func Tmux(env []string) Profile { + return tmux(newEnviron(env)) +} + +// tmux returns the color profile based on the tmux environment variables. +func tmux(env environ) (p Profile) { + if tmux, ok := env.lookup("TMUX"); !ok || len(tmux) == 0 { + // Not in tmux + return NoTTY + } + + // Check if tmux has either Tc or RGB capabilities. Otherwise, return + // ANSI256. + p = ANSI256 + cmd := exec.Command("tmux", "info") + out, err := cmd.Output() + if err != nil { + return + } + + for _, line := range bytes.Split(out, []byte("\n")) { + if (bytes.Contains(line, []byte("Tc")) || bytes.Contains(line, []byte("RGB"))) && + bytes.Contains(line, []byte("true")) { + return TrueColor + } + } + + return +} + +// environ is a map of environment variables. +type environ map[string]string + +// newEnviron returns a new environment map from a slice of environment +// variables. +func newEnviron(environ []string) environ { + m := make(map[string]string, len(environ)) + for _, e := range environ { + parts := strings.SplitN(e, "=", 2) + var value string + if len(parts) == 2 { + value = parts[1] + } + m[parts[0]] = value + } + return m +} + +// lookup returns the value of an environment variable and a boolean indicating +// if it exists. +func (e environ) lookup(key string) (string, bool) { + v, ok := e[key] + return v, ok +} + +// get returns the value of an environment variable and empty string if it +// doesn't exist. +func (e environ) get(key string) string { + v, _ := e.lookup(key) + return v +} + +func max[T ~byte | ~int](a, b T) T { + if a > b { + return a + } + return b +} diff --git a/vendor/github.com/charmbracelet/colorprofile/env_other.go b/vendor/github.com/charmbracelet/colorprofile/env_other.go new file mode 100644 index 000000000..080994bc1 --- /dev/null +++ b/vendor/github.com/charmbracelet/colorprofile/env_other.go @@ -0,0 +1,8 @@ +//go:build !windows +// +build !windows + +package colorprofile + +func windowsColorProfile(map[string]string) (Profile, bool) { + return 0, false +} diff --git a/vendor/github.com/charmbracelet/colorprofile/env_windows.go b/vendor/github.com/charmbracelet/colorprofile/env_windows.go new file mode 100644 index 000000000..3b9c28f96 --- /dev/null +++ b/vendor/github.com/charmbracelet/colorprofile/env_windows.go @@ -0,0 +1,45 @@ +//go:build windows +// +build windows + +package colorprofile + +import ( + "strconv" + + "golang.org/x/sys/windows" +) + +func windowsColorProfile(env map[string]string) (Profile, bool) { + if env["ConEmuANSI"] == "ON" { + return TrueColor, true + } + + if len(env["WT_SESSION"]) > 0 { + // Windows Terminal supports TrueColor + return TrueColor, true + } + + major, _, build := windows.RtlGetNtVersionNumbers() + if build < 10586 || major < 10 { + // No ANSI support before WindowsNT 10 build 10586 + if len(env["ANSICON"]) > 0 { + ansiconVer := env["ANSICON_VER"] + cv, err := strconv.Atoi(ansiconVer) + if err != nil || cv < 181 { + // No 8 bit color support before ANSICON 1.81 + return ANSI, true + } + + return ANSI256, true + } + + return NoTTY, true + } + + if build < 14931 { + // No true color support before build 14931 + return ANSI256, true + } + + return TrueColor, true +} diff --git a/vendor/github.com/charmbracelet/colorprofile/profile.go b/vendor/github.com/charmbracelet/colorprofile/profile.go new file mode 100644 index 000000000..97e37ac36 --- /dev/null +++ b/vendor/github.com/charmbracelet/colorprofile/profile.go @@ -0,0 +1,399 @@ +package colorprofile + +import ( + "image/color" + "math" + + "github.com/charmbracelet/x/ansi" + "github.com/lucasb-eyer/go-colorful" +) + +// Profile is a color profile: NoTTY, Ascii, ANSI, ANSI256, or TrueColor. +type Profile byte + +const ( + // NoTTY, not a terminal profile. + NoTTY Profile = iota + // Ascii, uncolored profile. + Ascii //nolint:revive + // ANSI, 4-bit color profile. + ANSI + // ANSI256, 8-bit color profile. + ANSI256 + // TrueColor, 24-bit color profile. + TrueColor +) + +// String returns the string representation of a Profile. +func (p Profile) String() string { + switch p { + case TrueColor: + return "TrueColor" + case ANSI256: + return "ANSI256" + case ANSI: + return "ANSI" + case Ascii: + return "Ascii" + case NoTTY: + return "NoTTY" + } + return "Unknown" +} + +// Convert transforms a given Color to a Color supported within the Profile. +func (p Profile) Convert(c color.Color) color.Color { + if p <= Ascii { + return nil + } + + switch c := c.(type) { + case ansi.BasicColor: + return c + + case ansi.ExtendedColor: + if p == ANSI { + return ansi256ToANSIColor(c) + } + return c + + case ansi.TrueColor, color.Color: + h, ok := colorful.MakeColor(c) + if !ok { + return nil + } + if p != TrueColor { + ac := hexToANSI256Color(h) + if p == ANSI { + return ansi256ToANSIColor(ac) + } + return ac + } + return c + } + + return c +} + +func hexToANSI256Color(c colorful.Color) ansi.ExtendedColor { + v2ci := func(v float64) int { + if v < 48 { + return 0 + } + if v < 115 { + return 1 + } + return int((v - 35) / 40) + } + + // Calculate the nearest 0-based color index at 16..231 + r := v2ci(c.R * 255.0) // 0..5 each + g := v2ci(c.G * 255.0) + b := v2ci(c.B * 255.0) + ci := 36*r + 6*g + b /* 0..215 */ + + // Calculate the represented colors back from the index + i2cv := [6]int{0, 0x5f, 0x87, 0xaf, 0xd7, 0xff} + cr := i2cv[r] // r/g/b, 0..255 each + cg := i2cv[g] + cb := i2cv[b] + + // Calculate the nearest 0-based gray index at 232..255 + var grayIdx int + average := (cr + cg + cb) / 3 + if average > 238 { + grayIdx = 23 + } else { + grayIdx = (average - 3) / 10 // 0..23 + } + gv := 8 + 10*grayIdx // same value for r/g/b, 0..255 + + // Return the one which is nearer to the original input rgb value + c2 := colorful.Color{R: float64(cr) / 255.0, G: float64(cg) / 255.0, B: float64(cb) / 255.0} + g2 := colorful.Color{R: float64(gv) / 255.0, G: float64(gv) / 255.0, B: float64(gv) / 255.0} + colorDist := c.DistanceHSLuv(c2) + grayDist := c.DistanceHSLuv(g2) + + if colorDist <= grayDist { + return ansi.ExtendedColor(16 + ci) //nolint:gosec + } + return ansi.ExtendedColor(232 + grayIdx) //nolint:gosec +} + +func ansi256ToANSIColor(c ansi.ExtendedColor) ansi.BasicColor { + var r int + md := math.MaxFloat64 + + h, _ := colorful.Hex(ansiHex[c]) + for i := 0; i <= 15; i++ { + hb, _ := colorful.Hex(ansiHex[i]) + d := h.DistanceHSLuv(hb) + + if d < md { + md = d + r = i + } + } + + return ansi.BasicColor(r) //nolint:gosec +} + +// RGB values of ANSI colors (0-255). +var ansiHex = []string{ + "#000000", + "#800000", + "#008000", + "#808000", + "#000080", + "#800080", + "#008080", + "#c0c0c0", + "#808080", + "#ff0000", + "#00ff00", + "#ffff00", + "#0000ff", + "#ff00ff", + "#00ffff", + "#ffffff", + "#000000", + "#00005f", + "#000087", + "#0000af", + "#0000d7", + "#0000ff", + "#005f00", + "#005f5f", + "#005f87", + "#005faf", + "#005fd7", + "#005fff", + "#008700", + "#00875f", + "#008787", + "#0087af", + "#0087d7", + "#0087ff", + "#00af00", + "#00af5f", + "#00af87", + "#00afaf", + "#00afd7", + "#00afff", + "#00d700", + "#00d75f", + "#00d787", + "#00d7af", + "#00d7d7", + "#00d7ff", + "#00ff00", + "#00ff5f", + "#00ff87", + "#00ffaf", + "#00ffd7", + "#00ffff", + "#5f0000", + "#5f005f", + "#5f0087", + "#5f00af", + "#5f00d7", + "#5f00ff", + "#5f5f00", + "#5f5f5f", + "#5f5f87", + "#5f5faf", + "#5f5fd7", + "#5f5fff", + "#5f8700", + "#5f875f", + "#5f8787", + "#5f87af", + "#5f87d7", + "#5f87ff", + "#5faf00", + "#5faf5f", + "#5faf87", + "#5fafaf", + "#5fafd7", + "#5fafff", + "#5fd700", + "#5fd75f", + "#5fd787", + "#5fd7af", + "#5fd7d7", + "#5fd7ff", + "#5fff00", + "#5fff5f", + "#5fff87", + "#5fffaf", + "#5fffd7", + "#5fffff", + "#870000", + "#87005f", + "#870087", + "#8700af", + "#8700d7", + "#8700ff", + "#875f00", + "#875f5f", + "#875f87", + "#875faf", + "#875fd7", + "#875fff", + "#878700", + "#87875f", + "#878787", + "#8787af", + "#8787d7", + "#8787ff", + "#87af00", + "#87af5f", + "#87af87", + "#87afaf", + "#87afd7", + "#87afff", + "#87d700", + "#87d75f", + "#87d787", + "#87d7af", + "#87d7d7", + "#87d7ff", + "#87ff00", + "#87ff5f", + "#87ff87", + "#87ffaf", + "#87ffd7", + "#87ffff", + "#af0000", + "#af005f", + "#af0087", + "#af00af", + "#af00d7", + "#af00ff", + "#af5f00", + "#af5f5f", + "#af5f87", + "#af5faf", + "#af5fd7", + "#af5fff", + "#af8700", + "#af875f", + "#af8787", + "#af87af", + "#af87d7", + "#af87ff", + "#afaf00", + "#afaf5f", + "#afaf87", + "#afafaf", + "#afafd7", + "#afafff", + "#afd700", + "#afd75f", + "#afd787", + "#afd7af", + "#afd7d7", + "#afd7ff", + "#afff00", + "#afff5f", + "#afff87", + "#afffaf", + "#afffd7", + "#afffff", + "#d70000", + "#d7005f", + "#d70087", + "#d700af", + "#d700d7", + "#d700ff", + "#d75f00", + "#d75f5f", + "#d75f87", + "#d75faf", + "#d75fd7", + "#d75fff", + "#d78700", + "#d7875f", + "#d78787", + "#d787af", + "#d787d7", + "#d787ff", + "#d7af00", + "#d7af5f", + "#d7af87", + "#d7afaf", + "#d7afd7", + "#d7afff", + "#d7d700", + "#d7d75f", + "#d7d787", + "#d7d7af", + "#d7d7d7", + "#d7d7ff", + "#d7ff00", + "#d7ff5f", + "#d7ff87", + "#d7ffaf", + "#d7ffd7", + "#d7ffff", + "#ff0000", + "#ff005f", + "#ff0087", + "#ff00af", + "#ff00d7", + "#ff00ff", + "#ff5f00", + "#ff5f5f", + "#ff5f87", + "#ff5faf", + "#ff5fd7", + "#ff5fff", + "#ff8700", + "#ff875f", + "#ff8787", + "#ff87af", + "#ff87d7", + "#ff87ff", + "#ffaf00", + "#ffaf5f", + "#ffaf87", + "#ffafaf", + "#ffafd7", + "#ffafff", + "#ffd700", + "#ffd75f", + "#ffd787", + "#ffd7af", + "#ffd7d7", + "#ffd7ff", + "#ffff00", + "#ffff5f", + "#ffff87", + "#ffffaf", + "#ffffd7", + "#ffffff", + "#080808", + "#121212", + "#1c1c1c", + "#262626", + "#303030", + "#3a3a3a", + "#444444", + "#4e4e4e", + "#585858", + "#626262", + "#6c6c6c", + "#767676", + "#808080", + "#8a8a8a", + "#949494", + "#9e9e9e", + "#a8a8a8", + "#b2b2b2", + "#bcbcbc", + "#c6c6c6", + "#d0d0d0", + "#dadada", + "#e4e4e4", + "#eeeeee", +} diff --git a/vendor/github.com/charmbracelet/colorprofile/writer.go b/vendor/github.com/charmbracelet/colorprofile/writer.go new file mode 100644 index 000000000..d04b3b99d --- /dev/null +++ b/vendor/github.com/charmbracelet/colorprofile/writer.go @@ -0,0 +1,166 @@ +package colorprofile + +import ( + "bytes" + "image/color" + "io" + "strconv" + + "github.com/charmbracelet/x/ansi" +) + +// NewWriter creates a new color profile writer that downgrades color sequences +// based on the detected color profile. +// +// If environ is nil, it will use os.Environ() to get the environment variables. +// +// It queries the given writer to determine if it supports ANSI escape codes. +// If it does, along with the given environment variables, it will determine +// the appropriate color profile to use for color formatting. +// +// This respects the NO_COLOR, CLICOLOR, and CLICOLOR_FORCE environment variables. +func NewWriter(w io.Writer, environ []string) *Writer { + return &Writer{ + Forward: w, + Profile: Detect(w, environ), + } +} + +// Writer represents a color profile writer that writes ANSI sequences to the +// underlying writer. +type Writer struct { + Forward io.Writer + Profile Profile +} + +// Write writes the given text to the underlying writer. +func (w *Writer) Write(p []byte) (int, error) { + switch w.Profile { + case TrueColor: + return w.Forward.Write(p) + case NoTTY: + return io.WriteString(w.Forward, ansi.Strip(string(p))) + default: + return w.downsample(p) + } +} + +// downsample downgrades the given text to the appropriate color profile. +func (w *Writer) downsample(p []byte) (int, error) { + var buf bytes.Buffer + var state byte + + parser := ansi.GetParser() + defer ansi.PutParser(parser) + + for len(p) > 0 { + parser.Reset() + seq, _, read, newState := ansi.DecodeSequence(p, state, parser) + + switch { + case ansi.HasCsiPrefix(seq) && parser.Command() == 'm': + handleSgr(w, parser, &buf) + default: + // If we're not a style SGR sequence, just write the bytes. + if n, err := buf.Write(seq); err != nil { + return n, err + } + } + + p = p[read:] + state = newState + } + + return w.Forward.Write(buf.Bytes()) +} + +// WriteString writes the given text to the underlying writer. +func (w *Writer) WriteString(s string) (n int, err error) { + return w.Write([]byte(s)) +} + +func handleSgr(w *Writer, p *ansi.Parser, buf *bytes.Buffer) { + var style ansi.Style + params := p.Params() + for i := 0; i < len(params); i++ { + param := params[i] + + switch param := param.Param(0); param { + case 0: + // SGR default parameter is 0. We use an empty string to reduce the + // number of bytes written to the buffer. + style = append(style, "") + case 30, 31, 32, 33, 34, 35, 36, 37: // 8-bit foreground color + if w.Profile < ANSI { + continue + } + style = style.ForegroundColor( + w.Profile.Convert(ansi.BasicColor(param - 30))) //nolint:gosec + case 38: // 16 or 24-bit foreground color + var c color.Color + if n := ansi.ReadStyleColor(params[i:], &c); n > 0 { + i += n - 1 + } + if w.Profile < ANSI { + continue + } + style = style.ForegroundColor(w.Profile.Convert(c)) + case 39: // default foreground color + if w.Profile < ANSI { + continue + } + style = style.DefaultForegroundColor() + case 40, 41, 42, 43, 44, 45, 46, 47: // 8-bit background color + if w.Profile < ANSI { + continue + } + style = style.BackgroundColor( + w.Profile.Convert(ansi.BasicColor(param - 40))) //nolint:gosec + case 48: // 16 or 24-bit background color + var c color.Color + if n := ansi.ReadStyleColor(params[i:], &c); n > 0 { + i += n - 1 + } + if w.Profile < ANSI { + continue + } + style = style.BackgroundColor(w.Profile.Convert(c)) + case 49: // default background color + if w.Profile < ANSI { + continue + } + style = style.DefaultBackgroundColor() + case 58: // 16 or 24-bit underline color + var c color.Color + if n := ansi.ReadStyleColor(params[i:], &c); n > 0 { + i += n - 1 + } + if w.Profile < ANSI { + continue + } + style = style.UnderlineColor(w.Profile.Convert(c)) + case 59: // default underline color + if w.Profile < ANSI { + continue + } + style = style.DefaultUnderlineColor() + case 90, 91, 92, 93, 94, 95, 96, 97: // 8-bit bright foreground color + if w.Profile < ANSI { + continue + } + style = style.ForegroundColor( + w.Profile.Convert(ansi.BasicColor(param - 90 + 8))) //nolint:gosec + case 100, 101, 102, 103, 104, 105, 106, 107: // 8-bit bright background color + if w.Profile < ANSI { + continue + } + style = style.BackgroundColor( + w.Profile.Convert(ansi.BasicColor(param - 100 + 8))) //nolint:gosec + default: + // If this is not a color attribute, just append it to the style. + style = append(style, strconv.Itoa(param)) + } + } + + _, _ = buf.WriteString(style.String()) +} diff --git a/vendor/github.com/charmbracelet/lipgloss/.gitignore b/vendor/github.com/charmbracelet/lipgloss/.gitignore index 53e1c2b75..db482015d 100644 --- a/vendor/github.com/charmbracelet/lipgloss/.gitignore +++ b/vendor/github.com/charmbracelet/lipgloss/.gitignore @@ -1 +1,2 @@ ssh_example_ed25519* +dist/ diff --git a/vendor/github.com/charmbracelet/lipgloss/.golangci.yml b/vendor/github.com/charmbracelet/lipgloss/.golangci.yml index 3affce914..90c5c08bd 100644 --- a/vendor/github.com/charmbracelet/lipgloss/.golangci.yml +++ b/vendor/github.com/charmbracelet/lipgloss/.golangci.yml @@ -15,12 +15,22 @@ issues: linters: enable: - bodyclose - - exportloopref + - exhaustive + - goconst + - godot + - godox - gofumpt - goimports + - gomoddirectives + - goprintffuncname - gosec + - misspell + - nakedret + - nestif - nilerr - - predeclared + - noctx + - nolintlint + - prealloc - revive - rowserrcheck - sqlclosecheck @@ -28,3 +38,4 @@ linters: - unconvert - unparam - whitespace + - wrapcheck diff --git a/vendor/github.com/charmbracelet/lipgloss/.goreleaser.yml b/vendor/github.com/charmbracelet/lipgloss/.goreleaser.yml index c61970e07..3353d0202 100644 --- a/vendor/github.com/charmbracelet/lipgloss/.goreleaser.yml +++ b/vendor/github.com/charmbracelet/lipgloss/.goreleaser.yml @@ -1,5 +1,5 @@ +# yaml-language-server: $schema=https://goreleaser.com/static/schema-pro.json +version: 2 includes: - from_url: url: charmbracelet/meta/main/goreleaser-lib.yaml -# yaml-language-server: $schema=https://goreleaser.com/static/schema-pro.json - diff --git a/vendor/github.com/charmbracelet/lipgloss/README.md b/vendor/github.com/charmbracelet/lipgloss/README.md index 42661d6ca..cee2371ce 100644 --- a/vendor/github.com/charmbracelet/lipgloss/README.md +++ b/vendor/github.com/charmbracelet/lipgloss/README.md @@ -10,7 +10,7 @@ Style definitions for nice terminal layouts. Built with TUIs in mind. -![Lip Gloss example](https://stuff.charm.sh/lipgloss/lipgloss-example.png) +![Lip Gloss example](https://github.com/user-attachments/assets/7950b1c1-e0e3-427e-8e7d-6f7f6ad17ca7) Lip Gloss takes an expressive, declarative approach to terminal rendering. Users familiar with CSS will feel at home with Lip Gloss. @@ -402,7 +402,7 @@ block := lipgloss.Place(30, 80, lipgloss.Right, lipgloss.Bottom, fancyStyledPara You can also style the whitespace. For details, see [the docs][docs]. -### Rendering Tables +## Rendering Tables Lip Gloss ships with a table rendering sub-package. @@ -425,17 +425,28 @@ rows := [][]string{ Use the table package to style and render the table. ```go +var ( + purple = lipgloss.Color("99") + gray = lipgloss.Color("245") + lightGray = lipgloss.Color("241") + + headerStyle = lipgloss.NewStyle().Foreground(purple).Bold(true).Align(lipgloss.Center) + cellStyle = lipgloss.NewStyle().Padding(0, 1).Width(14) + oddRowStyle = cellStyle.Foreground(gray) + evenRowStyle = cellStyle.Foreground(lightGray) +) + t := table.New(). Border(lipgloss.NormalBorder()). - BorderStyle(lipgloss.NewStyle().Foreground(lipgloss.Color("99"))). + BorderStyle(lipgloss.NewStyle().Foreground(purple)). StyleFunc(func(row, col int) lipgloss.Style { switch { - case row == 0: - return HeaderStyle + case row == table.HeaderRow: + return headerStyle case row%2 == 0: - return EvenRowStyle + return evenRowStyle default: - return OddRowStyle + return oddRowStyle } }). Headers("LANGUAGE", "FORMAL", "INFORMAL"). @@ -453,6 +464,45 @@ fmt.Println(t) ![Table Example](https://github.com/charmbracelet/lipgloss/assets/42545625/6e4b70c4-f494-45da-a467-bdd27df30d5d) +> [!WARNING] +> Table `Rows` need to be declared before `Offset` otherwise it does nothing. + +### Table Borders + +There are helpers to generate tables in markdown or ASCII style: + +#### Markdown Table + +```go +table.New().Border(lipgloss.MarkdownBorder()).BorderTop(false).BorderBottom(false) +``` + +``` +| LANGUAGE | FORMAL | INFORMAL | +|----------|--------------|-----------| +| Chinese | Nǐn hǎo | Nǐ hǎo | +| French | Bonjour | Salut | +| Russian | Zdravstvuyte | Privet | +| Spanish | Hola | ¿Qué tal? | +``` + +#### ASCII Table + +```go +table.New().Border(lipgloss.ASCIIBorder()) +``` + +``` ++----------+--------------+-----------+ +| LANGUAGE | FORMAL | INFORMAL | ++----------+--------------+-----------+ +| Chinese | Nǐn hǎo | Nǐ hǎo | +| French | Bonjour | Salut | +| Russian | Zdravstvuyte | Privet | +| Spanish | Hola | ¿Qué tal? | ++----------+--------------+-----------+ +``` + For more on tables see [the docs](https://pkg.go.dev/github.com/charmbracelet/lipgloss?tab=doc) and [examples](https://github.com/charmbracelet/lipgloss/tree/master/examples/table). ## Rendering Lists @@ -483,15 +533,15 @@ Lists have the ability to nest. ```go l := list.New( - "A", list.New("Artichoke"), - "B", list.New("Baking Flour", "Bananas", "Barley", "Bean Sprouts"), - "C", list.New("Cashew Apple", "Cashews", "Coconut Milk", "Curry Paste", "Currywurst"), - "D", list.New("Dill", "Dragonfruit", "Dried Shrimp"), - "E", list.New("Eggs"), - "F", list.New("Fish Cake", "Furikake"), - "J", list.New("Jicama"), - "K", list.New("Kohlrabi"), - "L", list.New("Leeks", "Lentils", "Licorice Root"), + "A", list.New("Artichoke"), + "B", list.New("Baking Flour", "Bananas", "Barley", "Bean Sprouts"), + "C", list.New("Cashew Apple", "Cashews", "Coconut Milk", "Curry Paste", "Currywurst"), + "D", list.New("Dill", "Dragonfruit", "Dried Shrimp"), + "E", list.New("Eggs"), + "F", list.New("Fish Cake", "Furikake"), + "J", list.New("Jicama"), + "K", list.New("Kohlrabi"), + "L", list.New("Leeks", "Lentils", "Licorice Root"), ) ``` @@ -513,15 +563,15 @@ enumeratorStyle := lipgloss.NewStyle().Foreground(lipgloss.Color("99")).MarginRi itemStyle := lipgloss.NewStyle().Foreground(lipgloss.Color("212")).MarginRight(1) l := list.New( - "Glossier", - "Claire’s Boutique", - "Nyx", - "Mac", - "Milk", -). - Enumerator(list.Roman). - EnumeratorStyle(enumeratorStyle). - ItemStyle(itemStyle) + "Glossier", + "Claire’s Boutique", + "Nyx", + "Mac", + "Milk", + ). + Enumerator(list.Roman). + EnumeratorStyle(enumeratorStyle). + ItemStyle(itemStyle) ``` Print the list. @@ -574,7 +624,7 @@ Define a new tree. ```go t := tree.Root("."). - Child("A", "B", "C") + Child("A", "B", "C") ``` Print the tree. @@ -592,18 +642,20 @@ Trees have the ability to nest. ```go t := tree.Root("."). - Child("Item 1"). - Child( - tree.Root("Item 2"). - Child("Item 2.1"). - Child("Item 2.2"). - Child("Item 2.3"), - ). - Child( - tree.Root("Item 3"). - Child("Item 3.1"). - Child("Item 3.2"), - ) + Child("macOS"). + Child( + tree.New(). + Root("Linux"). + Child("NixOS"). + Child("Arch Linux (btw)"). + Child("Void Linux"), + ). + Child( + tree.New(). + Root("BSD"). + Child("FreeBSD"). + Child("OpenBSD"), + ) ``` Print the tree. @@ -613,34 +665,40 @@ fmt.Println(t) ```

-Tree Example (simple) +Tree Example (simple)

Trees can be customized via their enumeration function as well as using `lipgloss.Style`s. ```go -enumeratorStyle := lipgloss.NewStyle().Foreground(lipgloss.Color("99")).MarginRight(1) -itemStyle := lipgloss.NewStyle().Foreground(lipgloss.Color("212")).MarginRight(1) - -t := tree.Root("Makeup"). - Child( - "Glossier", - "Claire’s Boutique", - "Nyx", - "Mac", - "Milk", - ). - Enumerator(tree.RoundedEnumerator). - EnumeratorStyle(enumeratorStyle). - ItemStyle(itemStyle). - RootStyle(lipgloss.NewStyle().Foreground(lipgloss.Color("#04B575"))) +enumeratorStyle := lipgloss.NewStyle().Foreground(lipgloss.Color("63")).MarginRight(1) +rootStyle := lipgloss.NewStyle().Foreground(lipgloss.Color("35")) +itemStyle := lipgloss.NewStyle().Foreground(lipgloss.Color("212")) + +t := tree. + Root("⁜ Makeup"). + Child( + "Glossier", + "Fenty Beauty", + tree.New().Child( + "Gloss Bomb Universal Lip Luminizer", + "Hot Cheeks Velour Blushlighter", + ), + "Nyx", + "Mac", + "Milk", + ). + Enumerator(tree.RoundedEnumerator). + EnumeratorStyle(enumeratorStyle). + RootStyle(rootStyle). + ItemStyle(itemStyle) ``` Print the tree.

-Tree Example (makeup) +Tree Example (makeup)

The predefined enumerators for trees are `DefaultEnumerator` and `RoundedEnumerator`. @@ -726,6 +784,12 @@ the stylesheet-based Markdown renderer. [glamour]: https://github.com/charmbracelet/glamour +## Contributing + +See [contributing][contribute]. + +[contribute]: https://github.com/charmbracelet/lipgloss/contribute + ## Feedback We’d love to hear your thoughts on this project. Feel free to drop us a note! diff --git a/vendor/github.com/charmbracelet/lipgloss/Taskfile.yaml b/vendor/github.com/charmbracelet/lipgloss/Taskfile.yaml new file mode 100644 index 000000000..0b4a7711a --- /dev/null +++ b/vendor/github.com/charmbracelet/lipgloss/Taskfile.yaml @@ -0,0 +1,19 @@ +# https://taskfile.dev + +version: '3' + +tasks: + lint: + desc: Run base linters + cmds: + - golangci-lint run + + test: + desc: Run tests + cmds: + - go test ./... {{.CLI_ARGS}} + + test:table: + desc: Run table tests + cmds: + - go test ./table {{.CLI_ARGS}} diff --git a/vendor/github.com/charmbracelet/lipgloss/align.go b/vendor/github.com/charmbracelet/lipgloss/align.go index 805d1e0d2..ce654b232 100644 --- a/vendor/github.com/charmbracelet/lipgloss/align.go +++ b/vendor/github.com/charmbracelet/lipgloss/align.go @@ -30,8 +30,8 @@ func alignTextHorizontal(str string, pos Position, width int, style *termenv.Sty l = s + l case Center: // Note: remainder goes on the right. - left := shortAmount / 2 //nolint:gomnd - right := left + shortAmount%2 //nolint:gomnd + left := shortAmount / 2 //nolint:mnd + right := left + shortAmount%2 //nolint:mnd leftSpaces := strings.Repeat(" ", left) rightSpaces := strings.Repeat(" ", right) @@ -69,7 +69,7 @@ func alignTextVertical(str string, pos Position, height int, _ *termenv.Style) s case Top: return str + strings.Repeat("\n", height-strHeight) case Center: - topPadding, bottomPadding := (height-strHeight)/2, (height-strHeight)/2 //nolint:gomnd + topPadding, bottomPadding := (height-strHeight)/2, (height-strHeight)/2 //nolint:mnd if strHeight+topPadding+bottomPadding > height { topPadding-- } else if strHeight+topPadding+bottomPadding < height { diff --git a/vendor/github.com/charmbracelet/lipgloss/borders.go b/vendor/github.com/charmbracelet/lipgloss/borders.go index deb6b35ac..b36f87438 100644 --- a/vendor/github.com/charmbracelet/lipgloss/borders.go +++ b/vendor/github.com/charmbracelet/lipgloss/borders.go @@ -100,14 +100,19 @@ var ( } blockBorder = Border{ - Top: "█", - Bottom: "█", - Left: "█", - Right: "█", - TopLeft: "█", - TopRight: "█", - BottomLeft: "█", - BottomRight: "█", + Top: "█", + Bottom: "█", + Left: "█", + Right: "█", + TopLeft: "█", + TopRight: "█", + BottomLeft: "█", + BottomRight: "█", + MiddleLeft: "█", + MiddleRight: "█", + Middle: "█", + MiddleTop: "█", + MiddleBottom: "█", } outerHalfBlockBorder = Border{ @@ -179,6 +184,38 @@ var ( MiddleTop: " ", MiddleBottom: " ", } + + markdownBorder = Border{ + Top: "-", + Bottom: "-", + Left: "|", + Right: "|", + TopLeft: "|", + TopRight: "|", + BottomLeft: "|", + BottomRight: "|", + MiddleLeft: "|", + MiddleRight: "|", + Middle: "|", + MiddleTop: "|", + MiddleBottom: "|", + } + + asciiBorder = Border{ + Top: "-", + Bottom: "-", + Left: "|", + Right: "|", + TopLeft: "+", + TopRight: "+", + BottomLeft: "+", + BottomRight: "+", + MiddleLeft: "+", + MiddleRight: "+", + Middle: "+", + MiddleTop: "+", + MiddleBottom: "+", + } ) // NormalBorder returns a standard-type border with a normal weight and 90 @@ -226,13 +263,23 @@ func HiddenBorder() Border { return hiddenBorder } +// MarkdownBorder return a table border in markdown style. +// +// Make sure to disable top and bottom border for the best result. This will +// ensure that the output is valid markdown. +// +// table.New().Border(lipgloss.MarkdownBorder()).BorderTop(false).BorderBottom(false) +func MarkdownBorder() Border { + return markdownBorder +} + +// ASCIIBorder returns a table border with ASCII characters. +func ASCIIBorder() Border { + return asciiBorder +} + func (s Style) applyBorder(str string) string { var ( - topSet = s.isSet(borderTopKey) - rightSet = s.isSet(borderRightKey) - bottomSet = s.isSet(borderBottomKey) - leftSet = s.isSet(borderLeftKey) - border = s.getBorderStyle() hasTop = s.getAsBool(borderTopKey, false) hasRight = s.getAsBool(borderRightKey, false) @@ -252,7 +299,7 @@ func (s Style) applyBorder(str string) string { // If a border is set and no sides have been specifically turned on or off // render borders on all sides. - if border != noBorder && !(topSet || rightSet || bottomSet || leftSet) { + if s.implicitBorders() { hasTop = true hasRight = true hasBottom = true diff --git a/vendor/github.com/charmbracelet/lipgloss/color.go b/vendor/github.com/charmbracelet/lipgloss/color.go index 5dfb3cfe4..6caf3a3d5 100644 --- a/vendor/github.com/charmbracelet/lipgloss/color.go +++ b/vendor/github.com/charmbracelet/lipgloss/color.go @@ -35,7 +35,7 @@ func (NoColor) color(*Renderer) termenv.Color { // // Deprecated. func (n NoColor) RGBA() (r, g, b, a uint32) { - return 0x0, 0x0, 0x0, 0xFFFF //nolint:gomnd + return 0x0, 0x0, 0x0, 0xFFFF //nolint:mnd } // Color specifies a color by hex or ANSI value. For example: diff --git a/vendor/github.com/charmbracelet/lipgloss/get.go b/vendor/github.com/charmbracelet/lipgloss/get.go index 9c2f06fe1..422b4ce95 100644 --- a/vendor/github.com/charmbracelet/lipgloss/get.go +++ b/vendor/github.com/charmbracelet/lipgloss/get.go @@ -300,7 +300,7 @@ func (s Style) GetBorderTopWidth() int { // runes of varying widths, the widest rune is returned. If no border exists on // the top edge, 0 is returned. func (s Style) GetBorderTopSize() int { - if !s.getAsBool(borderTopKey, false) { + if !s.getAsBool(borderTopKey, false) && !s.implicitBorders() { return 0 } return s.getBorderStyle().GetTopSize() @@ -310,7 +310,7 @@ func (s Style) GetBorderTopSize() int { // runes of varying widths, the widest rune is returned. If no border exists on // the left edge, 0 is returned. func (s Style) GetBorderLeftSize() int { - if !s.getAsBool(borderLeftKey, false) { + if !s.getAsBool(borderLeftKey, false) && !s.implicitBorders() { return 0 } return s.getBorderStyle().GetLeftSize() @@ -320,7 +320,7 @@ func (s Style) GetBorderLeftSize() int { // contain runes of varying widths, the widest rune is returned. If no border // exists on the left edge, 0 is returned. func (s Style) GetBorderBottomSize() int { - if !s.getAsBool(borderBottomKey, false) { + if !s.getAsBool(borderBottomKey, false) && !s.implicitBorders() { return 0 } return s.getBorderStyle().GetBottomSize() @@ -330,7 +330,7 @@ func (s Style) GetBorderBottomSize() int { // contain runes of varying widths, the widest rune is returned. If no border // exists on the right edge, 0 is returned. func (s Style) GetBorderRightSize() int { - if !s.getAsBool(borderRightKey, false) { + if !s.getAsBool(borderRightKey, false) && !s.implicitBorders() { return 0 } return s.getBorderStyle().GetRightSize() @@ -519,6 +519,20 @@ func (s Style) getBorderStyle() Border { return s.borderStyle } +// Returns whether or not the style has implicit borders. This happens when +// a border style has been set but no border sides have been explicitly turned +// on or off. +func (s Style) implicitBorders() bool { + var ( + borderStyle = s.getBorderStyle() + topSet = s.isSet(borderTopKey) + rightSet = s.isSet(borderRightKey) + bottomSet = s.isSet(borderBottomKey) + leftSet = s.isSet(borderLeftKey) + ) + return borderStyle != noBorder && !(topSet || rightSet || bottomSet || leftSet) +} + func (s Style) getAsTransform(propKey) func(string) string { if !s.isSet(transformKey) { return nil diff --git a/vendor/github.com/charmbracelet/lipgloss/ranges.go b/vendor/github.com/charmbracelet/lipgloss/ranges.go new file mode 100644 index 000000000..d17169987 --- /dev/null +++ b/vendor/github.com/charmbracelet/lipgloss/ranges.go @@ -0,0 +1,48 @@ +package lipgloss + +import ( + "strings" + + "github.com/charmbracelet/x/ansi" +) + +// StyleRanges allows to, given a string, style ranges of it differently. +// The function will take into account existing styles. +// Ranges should not overlap. +func StyleRanges(s string, ranges ...Range) string { + if len(ranges) == 0 { + return s + } + + var buf strings.Builder + lastIdx := 0 + stripped := ansi.Strip(s) + + // Use Truncate and TruncateLeft to style match.MatchedIndexes without + // losing the original option style: + for _, rng := range ranges { + // Add the text before this match + if rng.Start > lastIdx { + buf.WriteString(ansi.Cut(s, lastIdx, rng.Start)) + } + // Add the matched range with its highlight + buf.WriteString(rng.Style.Render(ansi.Cut(stripped, rng.Start, rng.End))) + lastIdx = rng.End + } + + // Add any remaining text after the last match + buf.WriteString(ansi.TruncateLeft(s, lastIdx, "")) + + return buf.String() +} + +// NewRange returns a range that can be used with [StyleRanges]. +func NewRange(start, end int, style Style) Range { + return Range{start, end, style} +} + +// Range to be used with [StyleRanges]. +type Range struct { + Start, End int + Style Style +} diff --git a/vendor/github.com/charmbracelet/lipgloss/set.go b/vendor/github.com/charmbracelet/lipgloss/set.go index ed6e272c6..fde38faec 100644 --- a/vendor/github.com/charmbracelet/lipgloss/set.go +++ b/vendor/github.com/charmbracelet/lipgloss/set.go @@ -710,19 +710,19 @@ func whichSidesInt(i ...int) (top, right, bottom, left int, ok bool) { left = i[0] right = i[0] ok = true - case 2: //nolint:gomnd + case 2: //nolint:mnd top = i[0] bottom = i[0] left = i[1] right = i[1] ok = true - case 3: //nolint:gomnd + case 3: //nolint:mnd top = i[0] left = i[1] right = i[1] bottom = i[2] ok = true - case 4: //nolint:gomnd + case 4: //nolint:mnd top = i[0] right = i[1] bottom = i[2] @@ -743,19 +743,19 @@ func whichSidesBool(i ...bool) (top, right, bottom, left bool, ok bool) { left = i[0] right = i[0] ok = true - case 2: //nolint:gomnd + case 2: //nolint:mnd top = i[0] bottom = i[0] left = i[1] right = i[1] ok = true - case 3: //nolint:gomnd + case 3: //nolint:mnd top = i[0] left = i[1] right = i[1] bottom = i[2] ok = true - case 4: //nolint:gomnd + case 4: //nolint:mnd top = i[0] right = i[1] bottom = i[2] @@ -776,19 +776,19 @@ func whichSidesColor(i ...TerminalColor) (top, right, bottom, left TerminalColor left = i[0] right = i[0] ok = true - case 2: //nolint:gomnd + case 2: //nolint:mnd top = i[0] bottom = i[0] left = i[1] right = i[1] ok = true - case 3: //nolint:gomnd + case 3: //nolint:mnd top = i[0] left = i[1] right = i[1] bottom = i[2] ok = true - case 4: //nolint:gomnd + case 4: //nolint:mnd top = i[0] right = i[1] bottom = i[2] diff --git a/vendor/github.com/charmbracelet/lipgloss/style.go b/vendor/github.com/charmbracelet/lipgloss/style.go index 4343d3cd6..59fa3ab21 100644 --- a/vendor/github.com/charmbracelet/lipgloss/style.go +++ b/vendor/github.com/charmbracelet/lipgloss/style.go @@ -5,6 +5,7 @@ import ( "unicode" "github.com/charmbracelet/x/ansi" + "github.com/charmbracelet/x/cellbuf" "github.com/muesli/termenv" ) @@ -353,6 +354,8 @@ func (s Style) Render(strs ...string) string { // Potentially convert tabs to spaces str = s.maybeConvertTabs(str) + // carriage returns can cause strange behaviour when rendering. + str = strings.ReplaceAll(str, "\r\n", "\n") // Strip newlines in single line mode if inline { @@ -362,7 +365,7 @@ func (s Style) Render(strs ...string) string { // Word wrap if !inline && width > 0 { wrapAt := width - leftPadding - rightPadding - str = ansi.Wrap(str, wrapAt, "") + str = cellbuf.Wrap(str, wrapAt, "") } // Render core text @@ -429,7 +432,7 @@ func (s Style) Render(strs ...string) string { { numLines := strings.Count(str, "\n") - if !(numLines == 0 && width == 0) { + if numLines != 0 || width != 0 { var st *termenv.Style if colorWhitespace || styleWhitespace { st = &teWhitespace @@ -562,14 +565,14 @@ func pad(str string, n int, style *termenv.Style) string { return b.String() } -func max(a, b int) int { //nolint:unparam +func max(a, b int) int { //nolint:unparam,predeclared if a > b { return a } return b } -func min(a, b int) int { +func min(a, b int) int { //nolint:predeclared if a < b { return a } diff --git a/vendor/github.com/charmbracelet/x/ansi/background.go b/vendor/github.com/charmbracelet/x/ansi/background.go index f519af08f..2383cf09f 100644 --- a/vendor/github.com/charmbracelet/x/ansi/background.go +++ b/vendor/github.com/charmbracelet/x/ansi/background.go @@ -1,9 +1,73 @@ package ansi import ( + "fmt" "image/color" ) +// Colorizer is a [color.Color] interface that can be formatted as a string. +type Colorizer interface { + color.Color + fmt.Stringer +} + +// HexColorizer is a [color.Color] that can be formatted as a hex string. +type HexColorizer struct{ color.Color } + +var _ Colorizer = HexColorizer{} + +// String returns the color as a hex string. If the color is nil, an empty +// string is returned. +func (h HexColorizer) String() string { + if h.Color == nil { + return "" + } + r, g, b, _ := h.RGBA() + // Get the lower 8 bits + r &= 0xff + g &= 0xff + b &= 0xff + return fmt.Sprintf("#%02x%02x%02x", uint8(r), uint8(g), uint8(b)) //nolint:gosec +} + +// XRGBColorizer is a [color.Color] that can be formatted as an XParseColor +// rgb: string. +// +// See: https://linux.die.net/man/3/xparsecolor +type XRGBColorizer struct{ color.Color } + +var _ Colorizer = XRGBColorizer{} + +// String returns the color as an XParseColor rgb: string. If the color is nil, +// an empty string is returned. +func (x XRGBColorizer) String() string { + if x.Color == nil { + return "" + } + r, g, b, _ := x.RGBA() + // Get the lower 8 bits + return fmt.Sprintf("rgb:%04x/%04x/%04x", r, g, b) +} + +// XRGBAColorizer is a [color.Color] that can be formatted as an XParseColor +// rgba: string. +// +// See: https://linux.die.net/man/3/xparsecolor +type XRGBAColorizer struct{ color.Color } + +var _ Colorizer = XRGBAColorizer{} + +// String returns the color as an XParseColor rgba: string. If the color is nil, +// an empty string is returned. +func (x XRGBAColorizer) String() string { + if x.Color == nil { + return "" + } + r, g, b, a := x.RGBA() + // Get the lower 8 bits + return fmt.Sprintf("rgba:%04x/%04x/%04x/%04x", r, g, b, a) +} + // SetForegroundColor returns a sequence that sets the default terminal // foreground color. // @@ -14,7 +78,16 @@ import ( // // See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h3-Operating-System-Commands func SetForegroundColor(c color.Color) string { - return "\x1b]10;" + colorToHexString(c) + "\x07" + var s string + switch c := c.(type) { + case Colorizer: + s = c.String() + case fmt.Stringer: + s = c.String() + default: + s = HexColorizer{c}.String() + } + return "\x1b]10;" + s + "\x07" } // RequestForegroundColor is a sequence that requests the current default @@ -23,6 +96,12 @@ func SetForegroundColor(c color.Color) string { // See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h3-Operating-System-Commands const RequestForegroundColor = "\x1b]10;?\x07" +// ResetForegroundColor is a sequence that resets the default terminal +// foreground color. +// +// See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h3-Operating-System-Commands +const ResetForegroundColor = "\x1b]110\x07" + // SetBackgroundColor returns a sequence that sets the default terminal // background color. // @@ -33,7 +112,16 @@ const RequestForegroundColor = "\x1b]10;?\x07" // // See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h3-Operating-System-Commands func SetBackgroundColor(c color.Color) string { - return "\x1b]11;" + colorToHexString(c) + "\x07" + var s string + switch c := c.(type) { + case Colorizer: + s = c.String() + case fmt.Stringer: + s = c.String() + default: + s = HexColorizer{c}.String() + } + return "\x1b]11;" + s + "\x07" } // RequestBackgroundColor is a sequence that requests the current default @@ -42,6 +130,12 @@ func SetBackgroundColor(c color.Color) string { // See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h3-Operating-System-Commands const RequestBackgroundColor = "\x1b]11;?\x07" +// ResetBackgroundColor is a sequence that resets the default terminal +// background color. +// +// See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h3-Operating-System-Commands +const ResetBackgroundColor = "\x1b]111\x07" + // SetCursorColor returns a sequence that sets the terminal cursor color. // // OSC 12 ; color ST @@ -51,7 +145,16 @@ const RequestBackgroundColor = "\x1b]11;?\x07" // // See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h3-Operating-System-Commands func SetCursorColor(c color.Color) string { - return "\x1b]12;" + colorToHexString(c) + "\x07" + var s string + switch c := c.(type) { + case Colorizer: + s = c.String() + case fmt.Stringer: + s = c.String() + default: + s = HexColorizer{c}.String() + } + return "\x1b]12;" + s + "\x07" } // RequestCursorColor is a sequence that requests the current terminal cursor @@ -59,3 +162,8 @@ func SetCursorColor(c color.Color) string { // // See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h3-Operating-System-Commands const RequestCursorColor = "\x1b]12;?\x07" + +// ResetCursorColor is a sequence that resets the terminal cursor color. +// +// See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h3-Operating-System-Commands +const ResetCursorColor = "\x1b]112\x07" diff --git a/vendor/github.com/charmbracelet/x/ansi/c0.go b/vendor/github.com/charmbracelet/x/ansi/c0.go index 13e3c6c31..28ff7c2a3 100644 --- a/vendor/github.com/charmbracelet/x/ansi/c0.go +++ b/vendor/github.com/charmbracelet/x/ansi/c0.go @@ -69,4 +69,11 @@ const ( RS = 0x1E // US is the unit separator character (Caret: ^_). US = 0x1F + + // LS0 is the locking shift 0 character. + // This is an alias for [SI]. + LS0 = SI + // LS1 is the locking shift 1 character. + // This is an alias for [SO]. + LS1 = SO ) diff --git a/vendor/github.com/charmbracelet/x/ansi/charset.go b/vendor/github.com/charmbracelet/x/ansi/charset.go new file mode 100644 index 000000000..50fff51fc --- /dev/null +++ b/vendor/github.com/charmbracelet/x/ansi/charset.go @@ -0,0 +1,55 @@ +package ansi + +// SelectCharacterSet sets the G-set character designator to the specified +// character set. +// +// ESC Ps Pd +// +// Where Ps is the G-set character designator, and Pd is the identifier. +// For 94-character sets, the designator can be one of: +// - ( G0 +// - ) G1 +// - * G2 +// - + G3 +// +// For 96-character sets, the designator can be one of: +// - - G1 +// - . G2 +// - / G3 +// +// Some common 94-character sets are: +// - 0 DEC Special Drawing Set +// - A United Kingdom (UK) +// - B United States (USASCII) +// +// Examples: +// +// ESC ( B Select character set G0 = United States (USASCII) +// ESC ( 0 Select character set G0 = Special Character and Line Drawing Set +// ESC ) 0 Select character set G1 = Special Character and Line Drawing Set +// ESC * A Select character set G2 = United Kingdom (UK) +// +// See: https://vt100.net/docs/vt510-rm/SCS.html +func SelectCharacterSet(gset byte, charset byte) string { + return "\x1b" + string(gset) + string(charset) +} + +// SCS is an alias for SelectCharacterSet. +func SCS(gset byte, charset byte) string { + return SelectCharacterSet(gset, charset) +} + +// Locking Shift 1 Right (LS1R) shifts G1 into GR character set. +const LS1R = "\x1b~" + +// Locking Shift 2 (LS2) shifts G2 into GL character set. +const LS2 = "\x1bn" + +// Locking Shift 2 Right (LS2R) shifts G2 into GR character set. +const LS2R = "\x1b}" + +// Locking Shift 3 (LS3) shifts G3 into GL character set. +const LS3 = "\x1bo" + +// Locking Shift 3 Right (LS3R) shifts G3 into GR character set. +const LS3R = "\x1b|" diff --git a/vendor/github.com/charmbracelet/x/ansi/color.go b/vendor/github.com/charmbracelet/x/ansi/color.go index 2ff78bd7f..77f8a08d1 100644 --- a/vendor/github.com/charmbracelet/x/ansi/color.go +++ b/vendor/github.com/charmbracelet/x/ansi/color.go @@ -178,7 +178,7 @@ func ansiToRGB(ansi uint32) (uint32, uint32, uint32) { // // r, g, b := hexToRGB(0x0000FF) func hexToRGB(hex uint32) (uint32, uint32, uint32) { - return hex >> 16, hex >> 8 & 0xff, hex & 0xff + return hex >> 16 & 0xff, hex >> 8 & 0xff, hex & 0xff } // toRGBA converts an RGB 8-bit color values to 32-bit color values suitable diff --git a/vendor/github.com/charmbracelet/x/ansi/csi.go b/vendor/github.com/charmbracelet/x/ansi/csi.go deleted file mode 100644 index b7e5bd2da..000000000 --- a/vendor/github.com/charmbracelet/x/ansi/csi.go +++ /dev/null @@ -1,141 +0,0 @@ -package ansi - -import ( - "bytes" - "strconv" - - "github.com/charmbracelet/x/ansi/parser" -) - -// CsiSequence represents a control sequence introducer (CSI) sequence. -// -// The sequence starts with a CSI sequence, CSI (0x9B) in a 8-bit environment -// or ESC [ (0x1B 0x5B) in a 7-bit environment, followed by any number of -// parameters in the range of 0x30-0x3F, then by any number of intermediate -// byte in the range of 0x20-0x2F, then finally with a single final byte in the -// range of 0x20-0x7E. -// -// CSI P..P I..I F -// -// See ECMA-48 § 5.4. -type CsiSequence struct { - // Params contains the raw parameters of the sequence. - // This is a slice of integers, where each integer is a 32-bit integer - // containing the parameter value in the lower 31 bits and a flag in the - // most significant bit indicating whether there are more sub-parameters. - Params []int - - // Cmd contains the raw command of the sequence. - // The command is a 32-bit integer containing the CSI command byte in the - // lower 8 bits, the private marker in the next 8 bits, and the intermediate - // byte in the next 8 bits. - // - // CSI ? u - // - // Is represented as: - // - // 'u' | '?' << 8 - Cmd int -} - -var _ Sequence = CsiSequence{} - -// Marker returns the marker byte of the CSI sequence. -// This is always gonna be one of the following '<' '=' '>' '?' and in the -// range of 0x3C-0x3F. -// Zero is returned if the sequence does not have a marker. -func (s CsiSequence) Marker() int { - return parser.Marker(s.Cmd) -} - -// Intermediate returns the intermediate byte of the CSI sequence. -// An intermediate byte is in the range of 0x20-0x2F. This includes these -// characters from ' ', '!', '"', '#', '$', '%', '&', ”', '(', ')', '*', '+', -// ',', '-', '.', '/'. -// Zero is returned if the sequence does not have an intermediate byte. -func (s CsiSequence) Intermediate() int { - return parser.Intermediate(s.Cmd) -} - -// Command returns the command byte of the CSI sequence. -func (s CsiSequence) Command() int { - return parser.Command(s.Cmd) -} - -// Param returns the parameter at the given index. -// It returns -1 if the parameter does not exist. -func (s CsiSequence) Param(i int) int { - return parser.Param(s.Params, i) -} - -// HasMore returns true if the parameter has more sub-parameters. -func (s CsiSequence) HasMore(i int) bool { - return parser.HasMore(s.Params, i) -} - -// Subparams returns the sub-parameters of the given parameter. -// It returns nil if the parameter does not exist. -func (s CsiSequence) Subparams(i int) []int { - return parser.Subparams(s.Params, i) -} - -// Len returns the number of parameters in the sequence. -// This will return the number of parameters in the sequence, excluding any -// sub-parameters. -func (s CsiSequence) Len() int { - return parser.Len(s.Params) -} - -// Range iterates over the parameters of the sequence and calls the given -// function for each parameter. -// The function should return false to stop the iteration. -func (s CsiSequence) Range(fn func(i int, param int, hasMore bool) bool) { - parser.Range(s.Params, fn) -} - -// Clone returns a copy of the CSI sequence. -func (s CsiSequence) Clone() Sequence { - return CsiSequence{ - Params: append([]int(nil), s.Params...), - Cmd: s.Cmd, - } -} - -// String returns a string representation of the sequence. -// The string will always be in the 7-bit format i.e (ESC [ P..P I..I F). -func (s CsiSequence) String() string { - return s.buffer().String() -} - -// buffer returns a buffer containing the sequence. -func (s CsiSequence) buffer() *bytes.Buffer { - var b bytes.Buffer - b.WriteString("\x1b[") - if m := s.Marker(); m != 0 { - b.WriteByte(byte(m)) - } - s.Range(func(i, param int, hasMore bool) bool { - if param >= 0 { - b.WriteString(strconv.Itoa(param)) - } - if i < len(s.Params)-1 { - if hasMore { - b.WriteByte(':') - } else { - b.WriteByte(';') - } - } - return true - }) - if i := s.Intermediate(); i != 0 { - b.WriteByte(byte(i)) - } - b.WriteByte(byte(s.Command())) - return &b -} - -// Bytes returns the byte representation of the sequence. -// The bytes will always be in the 7-bit format i.e (ESC [ P..P I..I F). -func (s CsiSequence) Bytes() []byte { - return s.buffer().Bytes() -} diff --git a/vendor/github.com/charmbracelet/x/ansi/ctrl.go b/vendor/github.com/charmbracelet/x/ansi/ctrl.go index 21beb9cd1..8ca744cf2 100644 --- a/vendor/github.com/charmbracelet/x/ansi/ctrl.go +++ b/vendor/github.com/charmbracelet/x/ansi/ctrl.go @@ -1,12 +1,62 @@ package ansi +import ( + "strconv" + "strings" +) + +// RequestNameVersion (XTVERSION) is a control sequence that requests the +// terminal's name and version. It responds with a DSR sequence identifying the +// terminal. +// +// CSI > 0 q +// DCS > | text ST +// +// See https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h3-PC-Style-Function-Keys +const ( + RequestNameVersion = "\x1b[>q" + XTVERSION = RequestNameVersion +) + // RequestXTVersion is a control sequence that requests the terminal's XTVERSION. It responds with a DSR sequence identifying the version. // // CSI > Ps q // DCS > | text ST // // See https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h3-PC-Style-Function-Keys -const RequestXTVersion = "\x1b[>0q" +// +// Deprecated: use [RequestNameVersion] instead. +const RequestXTVersion = RequestNameVersion + +// PrimaryDeviceAttributes (DA1) is a control sequence that reports the +// terminal's primary device attributes. +// +// CSI c +// CSI 0 c +// CSI ? Ps ; ... c +// +// If no attributes are given, or if the attribute is 0, this function returns +// the request sequence. Otherwise, it returns the response sequence. +// +// See https://vt100.net/docs/vt510-rm/DA1.html +func PrimaryDeviceAttributes(attrs ...int) string { + if len(attrs) == 0 { + return RequestPrimaryDeviceAttributes + } else if len(attrs) == 1 && attrs[0] == 0 { + return "\x1b[0c" + } + + as := make([]string, len(attrs)) + for i, a := range attrs { + as[i] = strconv.Itoa(a) + } + return "\x1b[?" + strings.Join(as, ";") + "c" +} + +// DA1 is an alias for [PrimaryDeviceAttributes]. +func DA1(attrs ...int) string { + return PrimaryDeviceAttributes(attrs...) +} // RequestPrimaryDeviceAttributes is a control sequence that requests the // terminal's primary device attributes (DA1). @@ -15,3 +65,73 @@ const RequestXTVersion = "\x1b[>0q" // // See https://vt100.net/docs/vt510-rm/DA1.html const RequestPrimaryDeviceAttributes = "\x1b[c" + +// SecondaryDeviceAttributes (DA2) is a control sequence that reports the +// terminal's secondary device attributes. +// +// CSI > c +// CSI > 0 c +// CSI > Ps ; ... c +// +// See https://vt100.net/docs/vt510-rm/DA2.html +func SecondaryDeviceAttributes(attrs ...int) string { + if len(attrs) == 0 { + return RequestSecondaryDeviceAttributes + } + + as := make([]string, len(attrs)) + for i, a := range attrs { + as[i] = strconv.Itoa(a) + } + return "\x1b[>" + strings.Join(as, ";") + "c" +} + +// DA2 is an alias for [SecondaryDeviceAttributes]. +func DA2(attrs ...int) string { + return SecondaryDeviceAttributes(attrs...) +} + +// RequestSecondaryDeviceAttributes is a control sequence that requests the +// terminal's secondary device attributes (DA2). +// +// CSI > c +// +// See https://vt100.net/docs/vt510-rm/DA2.html +const RequestSecondaryDeviceAttributes = "\x1b[>c" + +// TertiaryDeviceAttributes (DA3) is a control sequence that reports the +// terminal's tertiary device attributes. +// +// CSI = c +// CSI = 0 c +// DCS ! | Text ST +// +// Where Text is the unit ID for the terminal. +// +// If no unit ID is given, or if the unit ID is 0, this function returns the +// request sequence. Otherwise, it returns the response sequence. +// +// See https://vt100.net/docs/vt510-rm/DA3.html +func TertiaryDeviceAttributes(unitID string) string { + switch unitID { + case "": + return RequestTertiaryDeviceAttributes + case "0": + return "\x1b[=0c" + } + + return "\x1bP!|" + unitID + "\x1b\\" +} + +// DA3 is an alias for [TertiaryDeviceAttributes]. +func DA3(unitID string) string { + return TertiaryDeviceAttributes(unitID) +} + +// RequestTertiaryDeviceAttributes is a control sequence that requests the +// terminal's tertiary device attributes (DA3). +// +// CSI = c +// +// See https://vt100.net/docs/vt510-rm/DA3.html +const RequestTertiaryDeviceAttributes = "\x1b[=c" diff --git a/vendor/github.com/charmbracelet/x/ansi/cursor.go b/vendor/github.com/charmbracelet/x/ansi/cursor.go index ec1fc4d20..0c364d608 100644 --- a/vendor/github.com/charmbracelet/x/ansi/cursor.go +++ b/vendor/github.com/charmbracelet/x/ansi/cursor.go @@ -8,7 +8,10 @@ import "strconv" // ESC 7 // // See: https://vt100.net/docs/vt510-rm/DECSC.html -const SaveCursor = "\x1b7" +const ( + SaveCursor = "\x1b7" + DECSC = SaveCursor +) // RestoreCursor (DECRC) is an escape sequence that restores the cursor // position. @@ -16,10 +19,13 @@ const SaveCursor = "\x1b7" // ESC 8 // // See: https://vt100.net/docs/vt510-rm/DECRC.html -const RestoreCursor = "\x1b8" +const ( + RestoreCursor = "\x1b8" + DECRC = RestoreCursor +) -// RequestCursorPosition (CPR) is an escape sequence that requests the current -// cursor position. +// RequestCursorPosition is an escape sequence that requests the current cursor +// position. // // CSI 6 n // @@ -30,6 +36,8 @@ const RestoreCursor = "\x1b8" // // Where Pl is the line number and Pc is the column number. // See: https://vt100.net/docs/vt510-rm/CPR.html +// +// Deprecated: use [RequestCursorPositionReport] instead. const RequestCursorPosition = "\x1b[6n" // RequestExtendedCursorPosition (DECXCPR) is a sequence for requesting the @@ -45,6 +53,8 @@ const RequestCursorPosition = "\x1b[6n" // Where Pl is the line number, Pc is the column number, and Pp is the page // number. // See: https://vt100.net/docs/vt510-rm/DECXCPR.html +// +// Deprecated: use [RequestExtendedCursorPositionReport] instead. const RequestExtendedCursorPosition = "\x1b[?6n" // CursorUp (CUU) returns a sequence for moving the cursor up n cells. @@ -60,9 +70,19 @@ func CursorUp(n int) string { return "\x1b[" + s + "A" } +// CUU is an alias for [CursorUp]. +func CUU(n int) string { + return CursorUp(n) +} + +// CUU1 is a sequence for moving the cursor up one cell. +const CUU1 = "\x1b[A" + // CursorUp1 is a sequence for moving the cursor up one cell. // // This is equivalent to CursorUp(1). +// +// Deprecated: use [CUU1] instead. const CursorUp1 = "\x1b[A" // CursorDown (CUD) returns a sequence for moving the cursor down n cells. @@ -78,17 +98,27 @@ func CursorDown(n int) string { return "\x1b[" + s + "B" } +// CUD is an alias for [CursorDown]. +func CUD(n int) string { + return CursorDown(n) +} + +// CUD1 is a sequence for moving the cursor down one cell. +const CUD1 = "\x1b[B" + // CursorDown1 is a sequence for moving the cursor down one cell. // // This is equivalent to CursorDown(1). +// +// Deprecated: use [CUD1] instead. const CursorDown1 = "\x1b[B" -// CursorRight (CUF) returns a sequence for moving the cursor right n cells. +// CursorForward (CUF) returns a sequence for moving the cursor right n cells. // -// CSI n C +// # CSI n C // // See: https://vt100.net/docs/vt510-rm/CUF.html -func CursorRight(n int) string { +func CursorForward(n int) string { var s string if n > 1 { s = strconv.Itoa(n) @@ -96,17 +126,38 @@ func CursorRight(n int) string { return "\x1b[" + s + "C" } +// CUF is an alias for [CursorForward]. +func CUF(n int) string { + return CursorForward(n) +} + +// CUF1 is a sequence for moving the cursor right one cell. +const CUF1 = "\x1b[C" + +// CursorRight (CUF) returns a sequence for moving the cursor right n cells. +// +// CSI n C +// +// See: https://vt100.net/docs/vt510-rm/CUF.html +// +// Deprecated: use [CursorForward] instead. +func CursorRight(n int) string { + return CursorForward(n) +} + // CursorRight1 is a sequence for moving the cursor right one cell. // // This is equivalent to CursorRight(1). -const CursorRight1 = "\x1b[C" +// +// Deprecated: use [CUF1] instead. +const CursorRight1 = CUF1 -// CursorLeft (CUB) returns a sequence for moving the cursor left n cells. +// CursorBackward (CUB) returns a sequence for moving the cursor left n cells. // -// CSI n D +// # CSI n D // // See: https://vt100.net/docs/vt510-rm/CUB.html -func CursorLeft(n int) string { +func CursorBackward(n int) string { var s string if n > 1 { s = strconv.Itoa(n) @@ -114,10 +165,31 @@ func CursorLeft(n int) string { return "\x1b[" + s + "D" } +// CUB is an alias for [CursorBackward]. +func CUB(n int) string { + return CursorBackward(n) +} + +// CUB1 is a sequence for moving the cursor left one cell. +const CUB1 = "\x1b[D" + +// CursorLeft (CUB) returns a sequence for moving the cursor left n cells. +// +// CSI n D +// +// See: https://vt100.net/docs/vt510-rm/CUB.html +// +// Deprecated: use [CursorBackward] instead. +func CursorLeft(n int) string { + return CursorBackward(n) +} + // CursorLeft1 is a sequence for moving the cursor left one cell. // // This is equivalent to CursorLeft(1). -const CursorLeft1 = "\x1b[D" +// +// Deprecated: use [CUB1] instead. +const CursorLeft1 = CUB1 // CursorNextLine (CNL) returns a sequence for moving the cursor to the // beginning of the next line n times. @@ -133,6 +205,11 @@ func CursorNextLine(n int) string { return "\x1b[" + s + "E" } +// CNL is an alias for [CursorNextLine]. +func CNL(n int) string { + return CursorNextLine(n) +} + // CursorPreviousLine (CPL) returns a sequence for moving the cursor to the // beginning of the previous line n times. // @@ -147,25 +224,266 @@ func CursorPreviousLine(n int) string { return "\x1b[" + s + "F" } -// MoveCursor (CUP) returns a sequence for moving the cursor to the given row -// and column. +// CPL is an alias for [CursorPreviousLine]. +func CPL(n int) string { + return CursorPreviousLine(n) +} + +// CursorHorizontalAbsolute (CHA) returns a sequence for moving the cursor to +// the given column. +// +// Default is 1. +// +// CSI n G +// +// See: https://vt100.net/docs/vt510-rm/CHA.html +func CursorHorizontalAbsolute(col int) string { + var s string + if col > 0 { + s = strconv.Itoa(col) + } + return "\x1b[" + s + "G" +} + +// CHA is an alias for [CursorHorizontalAbsolute]. +func CHA(col int) string { + return CursorHorizontalAbsolute(col) +} + +// CursorPosition (CUP) returns a sequence for setting the cursor to the +// given row and column. +// +// Default is 1,1. // // CSI n ; m H // // See: https://vt100.net/docs/vt510-rm/CUP.html -func MoveCursor(row, col int) string { - if row < 0 { - row = 0 +func CursorPosition(col, row int) string { + if row <= 0 && col <= 0 { + return HomeCursorPosition + } + + var r, c string + if row > 0 { + r = strconv.Itoa(row) } - if col < 0 { - col = 0 + if col > 0 { + c = strconv.Itoa(col) } - return "\x1b[" + strconv.Itoa(row) + ";" + strconv.Itoa(col) + "H" + return "\x1b[" + r + ";" + c + "H" } +// CUP is an alias for [CursorPosition]. +func CUP(col, row int) string { + return CursorPosition(col, row) +} + +// CursorHomePosition is a sequence for moving the cursor to the upper left +// corner of the scrolling region. This is equivalent to `CursorPosition(1, 1)`. +const CursorHomePosition = "\x1b[H" + +// SetCursorPosition (CUP) returns a sequence for setting the cursor to the +// given row and column. +// +// CSI n ; m H +// +// See: https://vt100.net/docs/vt510-rm/CUP.html +// +// Deprecated: use [CursorPosition] instead. +func SetCursorPosition(col, row int) string { + if row <= 0 && col <= 0 { + return HomeCursorPosition + } + + var r, c string + if row > 0 { + r = strconv.Itoa(row) + } + if col > 0 { + c = strconv.Itoa(col) + } + return "\x1b[" + r + ";" + c + "H" +} + +// HomeCursorPosition is a sequence for moving the cursor to the upper left +// corner of the scrolling region. This is equivalent to `SetCursorPosition(1, 1)`. +// +// Deprecated: use [CursorHomePosition] instead. +const HomeCursorPosition = CursorHomePosition + +// MoveCursor (CUP) returns a sequence for setting the cursor to the +// given row and column. +// +// CSI n ; m H +// +// See: https://vt100.net/docs/vt510-rm/CUP.html +// +// Deprecated: use [CursorPosition] instead. +func MoveCursor(col, row int) string { + return SetCursorPosition(col, row) +} + +// CursorOrigin is a sequence for moving the cursor to the upper left corner of +// the display. This is equivalent to `SetCursorPosition(1, 1)`. +// +// Deprecated: use [CursorHomePosition] instead. +const CursorOrigin = "\x1b[1;1H" + // MoveCursorOrigin is a sequence for moving the cursor to the upper left -// corner of the screen. This is equivalent to MoveCursor(1, 1). -const MoveCursorOrigin = "\x1b[1;1H" +// corner of the display. This is equivalent to `SetCursorPosition(1, 1)`. +// +// Deprecated: use [CursorHomePosition] instead. +const MoveCursorOrigin = CursorOrigin + +// CursorHorizontalForwardTab (CHT) returns a sequence for moving the cursor to +// the next tab stop n times. +// +// Default is 1. +// +// CSI n I +// +// See: https://vt100.net/docs/vt510-rm/CHT.html +func CursorHorizontalForwardTab(n int) string { + var s string + if n > 1 { + s = strconv.Itoa(n) + } + return "\x1b[" + s + "I" +} + +// CHT is an alias for [CursorHorizontalForwardTab]. +func CHT(n int) string { + return CursorHorizontalForwardTab(n) +} + +// EraseCharacter (ECH) returns a sequence for erasing n characters and moving +// the cursor to the right. This doesn't affect other cell attributes. +// +// Default is 1. +// +// CSI n X +// +// See: https://vt100.net/docs/vt510-rm/ECH.html +func EraseCharacter(n int) string { + var s string + if n > 1 { + s = strconv.Itoa(n) + } + return "\x1b[" + s + "X" +} + +// ECH is an alias for [EraseCharacter]. +func ECH(n int) string { + return EraseCharacter(n) +} + +// CursorBackwardTab (CBT) returns a sequence for moving the cursor to the +// previous tab stop n times. +// +// Default is 1. +// +// CSI n Z +// +// See: https://vt100.net/docs/vt510-rm/CBT.html +func CursorBackwardTab(n int) string { + var s string + if n > 1 { + s = strconv.Itoa(n) + } + return "\x1b[" + s + "Z" +} + +// CBT is an alias for [CursorBackwardTab]. +func CBT(n int) string { + return CursorBackwardTab(n) +} + +// VerticalPositionAbsolute (VPA) returns a sequence for moving the cursor to +// the given row. +// +// Default is 1. +// +// CSI n d +// +// See: https://vt100.net/docs/vt510-rm/VPA.html +func VerticalPositionAbsolute(row int) string { + var s string + if row > 0 { + s = strconv.Itoa(row) + } + return "\x1b[" + s + "d" +} + +// VPA is an alias for [VerticalPositionAbsolute]. +func VPA(row int) string { + return VerticalPositionAbsolute(row) +} + +// VerticalPositionRelative (VPR) returns a sequence for moving the cursor down +// n rows relative to the current position. +// +// Default is 1. +// +// CSI n e +// +// See: https://vt100.net/docs/vt510-rm/VPR.html +func VerticalPositionRelative(n int) string { + var s string + if n > 1 { + s = strconv.Itoa(n) + } + return "\x1b[" + s + "e" +} + +// VPR is an alias for [VerticalPositionRelative]. +func VPR(n int) string { + return VerticalPositionRelative(n) +} + +// HorizontalVerticalPosition (HVP) returns a sequence for moving the cursor to +// the given row and column. +// +// Default is 1,1. +// +// CSI n ; m f +// +// This has the same effect as [CursorPosition]. +// +// See: https://vt100.net/docs/vt510-rm/HVP.html +func HorizontalVerticalPosition(col, row int) string { + var r, c string + if row > 0 { + r = strconv.Itoa(row) + } + if col > 0 { + c = strconv.Itoa(col) + } + return "\x1b[" + r + ";" + c + "f" +} + +// HVP is an alias for [HorizontalVerticalPosition]. +func HVP(col, row int) string { + return HorizontalVerticalPosition(col, row) +} + +// HorizontalVerticalHomePosition is a sequence for moving the cursor to the +// upper left corner of the scrolling region. This is equivalent to +// `HorizontalVerticalPosition(1, 1)`. +const HorizontalVerticalHomePosition = "\x1b[f" + +// SaveCurrentCursorPosition (SCOSC) is a sequence for saving the current cursor +// position for SCO console mode. +// +// CSI s +// +// This acts like [DECSC], except the page number where the cursor is located +// is not saved. +// +// See: https://vt100.net/docs/vt510-rm/SCOSC.html +const ( + SaveCurrentCursorPosition = "\x1b[s" + SCOSC = SaveCurrentCursorPosition +) // SaveCursorPosition (SCP or SCOSC) is a sequence for saving the cursor // position. @@ -176,8 +494,24 @@ const MoveCursorOrigin = "\x1b[1;1H" // not saved. // // See: https://vt100.net/docs/vt510-rm/SCOSC.html +// +// Deprecated: use [SaveCurrentCursorPosition] instead. const SaveCursorPosition = "\x1b[s" +// RestoreCurrentCursorPosition (SCORC) is a sequence for restoring the current +// cursor position for SCO console mode. +// +// CSI u +// +// This acts like [DECRC], except the page number where the cursor was saved is +// not restored. +// +// See: https://vt100.net/docs/vt510-rm/SCORC.html +const ( + RestoreCurrentCursorPosition = "\x1b[u" + SCORC = RestoreCurrentCursorPosition +) + // RestoreCursorPosition (RCP or SCORC) is a sequence for restoring the cursor // position. // @@ -187,10 +521,14 @@ const SaveCursorPosition = "\x1b[s" // cursor was saved. // // See: https://vt100.net/docs/vt510-rm/SCORC.html +// +// Deprecated: use [RestoreCurrentCursorPosition] instead. const RestoreCursorPosition = "\x1b[u" // SetCursorStyle (DECSCUSR) returns a sequence for changing the cursor style. // +// Default is 1. +// // CSI Ps SP q // // Where Ps is the cursor style: @@ -212,6 +550,11 @@ func SetCursorStyle(style int) string { return "\x1b[" + strconv.Itoa(style) + " q" } +// DECSCUSR is an alias for [SetCursorStyle]. +func DECSCUSR(style int) string { + return SetCursorStyle(style) +} + // SetPointerShape returns a sequence for changing the mouse pointer cursor // shape. Use "default" for the default pointer shape. // @@ -233,3 +576,58 @@ func SetCursorStyle(style int) string { func SetPointerShape(shape string) string { return "\x1b]22;" + shape + "\x07" } + +// ReverseIndex (RI) is an escape sequence for moving the cursor up one line in +// the same column. If the cursor is at the top margin, the screen scrolls +// down. +// +// This has the same effect as [RI]. +const ReverseIndex = "\x1bM" + +// HorizontalPositionAbsolute (HPA) returns a sequence for moving the cursor to +// the given column. This has the same effect as [CUP]. +// +// Default is 1. +// +// CSI n ` +// +// See: https://vt100.net/docs/vt510-rm/HPA.html +func HorizontalPositionAbsolute(col int) string { + var s string + if col > 0 { + s = strconv.Itoa(col) + } + return "\x1b[" + s + "`" +} + +// HPA is an alias for [HorizontalPositionAbsolute]. +func HPA(col int) string { + return HorizontalPositionAbsolute(col) +} + +// HorizontalPositionRelative (HPR) returns a sequence for moving the cursor +// right n columns relative to the current position. This has the same effect +// as [CUP]. +// +// Default is 1. +// +// CSI n a +// +// See: https://vt100.net/docs/vt510-rm/HPR.html +func HorizontalPositionRelative(n int) string { + var s string + if n > 0 { + s = strconv.Itoa(n) + } + return "\x1b[" + s + "a" +} + +// HPR is an alias for [HorizontalPositionRelative]. +func HPR(n int) string { + return HorizontalPositionRelative(n) +} + +// Index (IND) is an escape sequence for moving the cursor down one line in the +// same column. If the cursor is at the bottom margin, the screen scrolls up. +// This has the same effect as [IND]. +const Index = "\x1bD" diff --git a/vendor/github.com/charmbracelet/x/ansi/cwd.go b/vendor/github.com/charmbracelet/x/ansi/cwd.go new file mode 100644 index 000000000..b03ac1bb9 --- /dev/null +++ b/vendor/github.com/charmbracelet/x/ansi/cwd.go @@ -0,0 +1,26 @@ +package ansi + +import ( + "net/url" + "path" +) + +// NotifyWorkingDirectory returns a sequence that notifies the terminal +// of the current working directory. +// +// OSC 7 ; Pt BEL +// +// Where Pt is a URL in the format "file://[host]/[path]". +// Set host to "localhost" if this is a path on the local computer. +// +// See: https://wezfurlong.org/wezterm/shell-integration.html#osc-7-escape-sequence-to-set-the-working-directory +// See: https://iterm2.com/documentation-escape-codes.html#:~:text=RemoteHost%20and%20CurrentDir%3A-,OSC%207,-%3B%20%5BPs%5D%20ST +func NotifyWorkingDirectory(host string, paths ...string) string { + path := path.Join(paths...) + u := &url.URL{ + Scheme: "file", + Host: host, + Path: path, + } + return "\x1b]7;" + u.String() + "\x07" +} diff --git a/vendor/github.com/charmbracelet/x/ansi/dcs.go b/vendor/github.com/charmbracelet/x/ansi/dcs.go deleted file mode 100644 index 185f0b52b..000000000 --- a/vendor/github.com/charmbracelet/x/ansi/dcs.go +++ /dev/null @@ -1,148 +0,0 @@ -package ansi - -import ( - "bytes" - "strconv" - - "github.com/charmbracelet/x/ansi/parser" -) - -// DcsSequence represents a Device Control String (DCS) escape sequence. -// -// The DCS sequence is used to send device control strings to the terminal. The -// sequence starts with the C1 control code character DCS (0x9B) or ESC P in -// 7-bit environments, followed by parameter bytes, intermediate bytes, a -// command byte, followed by data bytes, and ends with the C1 control code -// character ST (0x9C) or ESC \ in 7-bit environments. -// -// This follows the parameter string format. -// See ECMA-48 § 5.4.1 -type DcsSequence struct { - // Params contains the raw parameters of the sequence. - // This is a slice of integers, where each integer is a 32-bit integer - // containing the parameter value in the lower 31 bits and a flag in the - // most significant bit indicating whether there are more sub-parameters. - Params []int - - // Data contains the string raw data of the sequence. - // This is the data between the final byte and the escape sequence terminator. - Data []byte - - // Cmd contains the raw command of the sequence. - // The command is a 32-bit integer containing the DCS command byte in the - // lower 8 bits, the private marker in the next 8 bits, and the intermediate - // byte in the next 8 bits. - // - // DCS > 0 ; 1 $ r ST - // - // Is represented as: - // - // 'r' | '>' << 8 | '$' << 16 - Cmd int -} - -var _ Sequence = DcsSequence{} - -// Marker returns the marker byte of the DCS sequence. -// This is always gonna be one of the following '<' '=' '>' '?' and in the -// range of 0x3C-0x3F. -// Zero is returned if the sequence does not have a marker. -func (s DcsSequence) Marker() int { - return parser.Marker(s.Cmd) -} - -// Intermediate returns the intermediate byte of the DCS sequence. -// An intermediate byte is in the range of 0x20-0x2F. This includes these -// characters from ' ', '!', '"', '#', '$', '%', '&', ”', '(', ')', '*', '+', -// ',', '-', '.', '/'. -// Zero is returned if the sequence does not have an intermediate byte. -func (s DcsSequence) Intermediate() int { - return parser.Intermediate(s.Cmd) -} - -// Command returns the command byte of the CSI sequence. -func (s DcsSequence) Command() int { - return parser.Command(s.Cmd) -} - -// Param returns the parameter at the given index. -// It returns -1 if the parameter does not exist. -func (s DcsSequence) Param(i int) int { - return parser.Param(s.Params, i) -} - -// HasMore returns true if the parameter has more sub-parameters. -func (s DcsSequence) HasMore(i int) bool { - return parser.HasMore(s.Params, i) -} - -// Subparams returns the sub-parameters of the given parameter. -// It returns nil if the parameter does not exist. -func (s DcsSequence) Subparams(i int) []int { - return parser.Subparams(s.Params, i) -} - -// Len returns the number of parameters in the sequence. -// This will return the number of parameters in the sequence, excluding any -// sub-parameters. -func (s DcsSequence) Len() int { - return parser.Len(s.Params) -} - -// Range iterates over the parameters of the sequence and calls the given -// function for each parameter. -// The function should return false to stop the iteration. -func (s DcsSequence) Range(fn func(i int, param int, hasMore bool) bool) { - parser.Range(s.Params, fn) -} - -// Clone returns a copy of the DCS sequence. -func (s DcsSequence) Clone() Sequence { - return DcsSequence{ - Params: append([]int(nil), s.Params...), - Data: append([]byte(nil), s.Data...), - Cmd: s.Cmd, - } -} - -// String returns a string representation of the sequence. -// The string will always be in the 7-bit format i.e (ESC P p..p i..i f ESC \). -func (s DcsSequence) String() string { - return s.buffer().String() -} - -// buffer returns a buffer containing the sequence. -func (s DcsSequence) buffer() *bytes.Buffer { - var b bytes.Buffer - b.WriteString("\x1bP") - if m := s.Marker(); m != 0 { - b.WriteByte(byte(m)) - } - s.Range(func(i, param int, hasMore bool) bool { - if param >= -1 { - b.WriteString(strconv.Itoa(param)) - } - if i < len(s.Params)-1 { - if hasMore { - b.WriteByte(':') - } else { - b.WriteByte(';') - } - } - return true - }) - if i := s.Intermediate(); i != 0 { - b.WriteByte(byte(i)) - } - b.WriteByte(byte(s.Command())) - b.Write(s.Data) - b.WriteByte(ESC) - b.WriteByte('\\') - return &b -} - -// Bytes returns the byte representation of the sequence. -// The bytes will always be in the 7-bit format i.e (ESC P p..p i..i F ESC \). -func (s DcsSequence) Bytes() []byte { - return s.buffer().Bytes() -} diff --git a/vendor/github.com/charmbracelet/x/ansi/focus.go b/vendor/github.com/charmbracelet/x/ansi/focus.go new file mode 100644 index 000000000..4e0207ceb --- /dev/null +++ b/vendor/github.com/charmbracelet/x/ansi/focus.go @@ -0,0 +1,9 @@ +package ansi + +// Focus is an escape sequence to notify the terminal that it has focus. +// This is used with [FocusEventMode]. +const Focus = "\x1b[I" + +// Blur is an escape sequence to notify the terminal that it has lost focus. +// This is used with [FocusEventMode]. +const Blur = "\x1b[O" diff --git a/vendor/github.com/charmbracelet/x/ansi/graphics.go b/vendor/github.com/charmbracelet/x/ansi/graphics.go new file mode 100644 index 000000000..604fef47c --- /dev/null +++ b/vendor/github.com/charmbracelet/x/ansi/graphics.go @@ -0,0 +1,199 @@ +package ansi + +import ( + "bytes" + "encoding/base64" + "errors" + "fmt" + "image" + "io" + "os" + "strings" + + "github.com/charmbracelet/x/ansi/kitty" +) + +// KittyGraphics returns a sequence that encodes the given image in the Kitty +// graphics protocol. +// +// APC G [comma separated options] ; [base64 encoded payload] ST +// +// See https://sw.kovidgoyal.net/kitty/graphics-protocol/ +func KittyGraphics(payload []byte, opts ...string) string { + var buf bytes.Buffer + buf.WriteString("\x1b_G") + buf.WriteString(strings.Join(opts, ",")) + if len(payload) > 0 { + buf.WriteString(";") + buf.Write(payload) + } + buf.WriteString("\x1b\\") + return buf.String() +} + +var ( + // KittyGraphicsTempDir is the directory where temporary files are stored. + // This is used in [WriteKittyGraphics] along with [os.CreateTemp]. + KittyGraphicsTempDir = "" + + // KittyGraphicsTempPattern is the pattern used to create temporary files. + // This is used in [WriteKittyGraphics] along with [os.CreateTemp]. + // The Kitty Graphics protocol requires the file path to contain the + // substring "tty-graphics-protocol". + KittyGraphicsTempPattern = "tty-graphics-protocol-*" +) + +// WriteKittyGraphics writes an image using the Kitty Graphics protocol with +// the given options to w. It chunks the written data if o.Chunk is true. +// +// You can omit m and use nil when rendering an image from a file. In this +// case, you must provide a file path in o.File and use o.Transmission = +// [kitty.File]. You can also use o.Transmission = [kitty.TempFile] to write +// the image to a temporary file. In that case, the file path is ignored, and +// the image is written to a temporary file that is automatically deleted by +// the terminal. +// +// See https://sw.kovidgoyal.net/kitty/graphics-protocol/ +func WriteKittyGraphics(w io.Writer, m image.Image, o *kitty.Options) error { + if o == nil { + o = &kitty.Options{} + } + + if o.Transmission == 0 && len(o.File) != 0 { + o.Transmission = kitty.File + } + + var data bytes.Buffer // the data to be encoded into base64 + e := &kitty.Encoder{ + Compress: o.Compression == kitty.Zlib, + Format: o.Format, + } + + switch o.Transmission { + case kitty.Direct: + if err := e.Encode(&data, m); err != nil { + return fmt.Errorf("failed to encode direct image: %w", err) + } + + case kitty.SharedMemory: + // TODO: Implement shared memory + return fmt.Errorf("shared memory transmission is not yet implemented") + + case kitty.File: + if len(o.File) == 0 { + return kitty.ErrMissingFile + } + + f, err := os.Open(o.File) + if err != nil { + return fmt.Errorf("failed to open file: %w", err) + } + + defer f.Close() //nolint:errcheck + + stat, err := f.Stat() + if err != nil { + return fmt.Errorf("failed to get file info: %w", err) + } + + mode := stat.Mode() + if !mode.IsRegular() { + return fmt.Errorf("file is not a regular file") + } + + // Write the file path to the buffer + if _, err := data.WriteString(f.Name()); err != nil { + return fmt.Errorf("failed to write file path to buffer: %w", err) + } + + case kitty.TempFile: + f, err := os.CreateTemp(KittyGraphicsTempDir, KittyGraphicsTempPattern) + if err != nil { + return fmt.Errorf("failed to create file: %w", err) + } + + defer f.Close() //nolint:errcheck + + if err := e.Encode(f, m); err != nil { + return fmt.Errorf("failed to encode image to file: %w", err) + } + + // Write the file path to the buffer + if _, err := data.WriteString(f.Name()); err != nil { + return fmt.Errorf("failed to write file path to buffer: %w", err) + } + } + + // Encode image to base64 + var payload bytes.Buffer // the base64 encoded image to be written to w + b64 := base64.NewEncoder(base64.StdEncoding, &payload) + if _, err := data.WriteTo(b64); err != nil { + return fmt.Errorf("failed to write base64 encoded image to payload: %w", err) + } + if err := b64.Close(); err != nil { + return err + } + + // If not chunking, write all at once + if !o.Chunk { + _, err := io.WriteString(w, KittyGraphics(payload.Bytes(), o.Options()...)) + return err + } + + // Write in chunks + var ( + err error + n int + ) + chunk := make([]byte, kitty.MaxChunkSize) + isFirstChunk := true + + for { + // Stop if we read less than the chunk size [kitty.MaxChunkSize]. + n, err = io.ReadFull(&payload, chunk) + if errors.Is(err, io.ErrUnexpectedEOF) || errors.Is(err, io.EOF) { + break + } + if err != nil { + return fmt.Errorf("failed to read chunk: %w", err) + } + + opts := buildChunkOptions(o, isFirstChunk, false) + if _, err := io.WriteString(w, KittyGraphics(chunk[:n], opts...)); err != nil { + return err + } + + isFirstChunk = false + } + + // Write the last chunk + opts := buildChunkOptions(o, isFirstChunk, true) + _, err = io.WriteString(w, KittyGraphics(chunk[:n], opts...)) + return err +} + +// buildChunkOptions creates the options slice for a chunk +func buildChunkOptions(o *kitty.Options, isFirstChunk, isLastChunk bool) []string { + var opts []string + if isFirstChunk { + opts = o.Options() + } else { + // These options are allowed in subsequent chunks + if o.Quite > 0 { + opts = append(opts, fmt.Sprintf("q=%d", o.Quite)) + } + if o.Action == kitty.Frame { + opts = append(opts, "a=f") + } + } + + if !isFirstChunk || !isLastChunk { + // We don't need to encode the (m=) option when we only have one chunk. + if isLastChunk { + opts = append(opts, "m=0") + } else { + opts = append(opts, "m=1") + } + } + return opts +} diff --git a/vendor/github.com/charmbracelet/x/ansi/iterm2.go b/vendor/github.com/charmbracelet/x/ansi/iterm2.go new file mode 100644 index 000000000..0ecb336da --- /dev/null +++ b/vendor/github.com/charmbracelet/x/ansi/iterm2.go @@ -0,0 +1,18 @@ +package ansi + +import "fmt" + +// ITerm2 returns a sequence that uses the iTerm2 proprietary protocol. Use the +// iterm2 package for a more convenient API. +// +// OSC 1337 ; key = value ST +// +// Example: +// +// ITerm2(iterm2.File{...}) +// +// See https://iterm2.com/documentation-escape-codes.html +// See https://iterm2.com/documentation-images.html +func ITerm2(data any) string { + return "\x1b]1337;" + fmt.Sprint(data) + "\x07" +} diff --git a/vendor/github.com/charmbracelet/x/ansi/keypad.go b/vendor/github.com/charmbracelet/x/ansi/keypad.go new file mode 100644 index 000000000..9183c6a7e --- /dev/null +++ b/vendor/github.com/charmbracelet/x/ansi/keypad.go @@ -0,0 +1,28 @@ +package ansi + +// Keypad Application Mode (DECKPAM) is a mode that determines whether the +// keypad sends application sequences or ANSI sequences. +// +// This works like enabling [DECNKM]. +// Use [NumericKeypadMode] to set the numeric keypad mode. +// +// ESC = +// +// See: https://vt100.net/docs/vt510-rm/DECKPAM.html +const ( + KeypadApplicationMode = "\x1b=" + DECKPAM = KeypadApplicationMode +) + +// Keypad Numeric Mode (DECKPNM) is a mode that determines whether the keypad +// sends application sequences or ANSI sequences. +// +// This works the same as disabling [DECNKM]. +// +// ESC > +// +// See: https://vt100.net/docs/vt510-rm/DECKPNM.html +const ( + KeypadNumericMode = "\x1b>" + DECKPNM = KeypadNumericMode +) diff --git a/vendor/github.com/charmbracelet/x/ansi/kitty.go b/vendor/github.com/charmbracelet/x/ansi/kitty.go index c56d8d1c9..124ab839e 100644 --- a/vendor/github.com/charmbracelet/x/ansi/kitty.go +++ b/vendor/github.com/charmbracelet/x/ansi/kitty.go @@ -72,7 +72,7 @@ func PushKittyKeyboard(flags int) string { // Keyboard stack to disable the protocol. // // This is equivalent to PushKittyKeyboard(0). -const DisableKittyKeyboard = "\x1b[>0u" +const DisableKittyKeyboard = "\x1b[>u" // PopKittyKeyboard returns a sequence to pop n number of flags from the // terminal Kitty Keyboard stack. diff --git a/vendor/github.com/charmbracelet/x/ansi/kitty/decoder.go b/vendor/github.com/charmbracelet/x/ansi/kitty/decoder.go new file mode 100644 index 000000000..fbd08441f --- /dev/null +++ b/vendor/github.com/charmbracelet/x/ansi/kitty/decoder.go @@ -0,0 +1,85 @@ +package kitty + +import ( + "compress/zlib" + "fmt" + "image" + "image/color" + "image/png" + "io" +) + +// Decoder is a decoder for the Kitty graphics protocol. It supports decoding +// images in the 24-bit [RGB], 32-bit [RGBA], and [PNG] formats. It can also +// decompress data using zlib. +// The default format is 32-bit [RGBA]. +type Decoder struct { + // Uses zlib decompression. + Decompress bool + + // Can be one of [RGB], [RGBA], or [PNG]. + Format int + + // Width of the image in pixels. This can be omitted if the image is [PNG] + // formatted. + Width int + + // Height of the image in pixels. This can be omitted if the image is [PNG] + // formatted. + Height int +} + +// Decode decodes the image data from r in the specified format. +func (d *Decoder) Decode(r io.Reader) (image.Image, error) { + if d.Decompress { + zr, err := zlib.NewReader(r) + if err != nil { + return nil, fmt.Errorf("failed to create zlib reader: %w", err) + } + + defer zr.Close() //nolint:errcheck + r = zr + } + + if d.Format == 0 { + d.Format = RGBA + } + + switch d.Format { + case RGBA, RGB: + return d.decodeRGBA(r, d.Format == RGBA) + + case PNG: + return png.Decode(r) + + default: + return nil, fmt.Errorf("unsupported format: %d", d.Format) + } +} + +// decodeRGBA decodes the image data in 32-bit RGBA or 24-bit RGB formats. +func (d *Decoder) decodeRGBA(r io.Reader, alpha bool) (image.Image, error) { + m := image.NewRGBA(image.Rect(0, 0, d.Width, d.Height)) + + var buf []byte + if alpha { + buf = make([]byte, 4) + } else { + buf = make([]byte, 3) + } + + for y := 0; y < d.Height; y++ { + for x := 0; x < d.Width; x++ { + if _, err := io.ReadFull(r, buf[:]); err != nil { + return nil, fmt.Errorf("failed to read pixel data: %w", err) + } + if alpha { + m.SetRGBA(x, y, color.RGBA{buf[0], buf[1], buf[2], buf[3]}) + } else { + m.SetRGBA(x, y, color.RGBA{buf[0], buf[1], buf[2], 0xff}) + } + } + } + + return m, nil +} diff --git a/vendor/github.com/charmbracelet/x/ansi/kitty/encoder.go b/vendor/github.com/charmbracelet/x/ansi/kitty/encoder.go new file mode 100644 index 000000000..f668b9e31 --- /dev/null +++ b/vendor/github.com/charmbracelet/x/ansi/kitty/encoder.go @@ -0,0 +1,64 @@ +package kitty + +import ( + "compress/zlib" + "fmt" + "image" + "image/png" + "io" +) + +// Encoder is an encoder for the Kitty graphics protocol. It supports encoding +// images in the 24-bit [RGB], 32-bit [RGBA], and [PNG] formats, and +// compressing the data using zlib. +// The default format is 32-bit [RGBA]. +type Encoder struct { + // Uses zlib compression. + Compress bool + + // Can be one of [RGBA], [RGB], or [PNG]. + Format int +} + +// Encode encodes the image data in the specified format and writes it to w. +func (e *Encoder) Encode(w io.Writer, m image.Image) error { + if m == nil { + return nil + } + + if e.Compress { + zw := zlib.NewWriter(w) + defer zw.Close() //nolint:errcheck + w = zw + } + + if e.Format == 0 { + e.Format = RGBA + } + + switch e.Format { + case RGBA, RGB: + bounds := m.Bounds() + for y := bounds.Min.Y; y < bounds.Max.Y; y++ { + for x := bounds.Min.X; x < bounds.Max.X; x++ { + r, g, b, a := m.At(x, y).RGBA() + switch e.Format { + case RGBA: + w.Write([]byte{byte(r >> 8), byte(g >> 8), byte(b >> 8), byte(a >> 8)}) //nolint:errcheck + case RGB: + w.Write([]byte{byte(r >> 8), byte(g >> 8), byte(b >> 8)}) //nolint:errcheck + } + } + } + + case PNG: + if err := png.Encode(w, m); err != nil { + return fmt.Errorf("failed to encode PNG: %w", err) + } + + default: + return fmt.Errorf("unsupported format: %d", e.Format) + } + + return nil +} diff --git a/vendor/github.com/charmbracelet/x/ansi/kitty/graphics.go b/vendor/github.com/charmbracelet/x/ansi/kitty/graphics.go new file mode 100644 index 000000000..490e7a8a3 --- /dev/null +++ b/vendor/github.com/charmbracelet/x/ansi/kitty/graphics.go @@ -0,0 +1,414 @@ +package kitty + +import "errors" + +// ErrMissingFile is returned when the file path is missing. +var ErrMissingFile = errors.New("missing file path") + +// MaxChunkSize is the maximum chunk size for the image data. +const MaxChunkSize = 1024 * 4 + +// Placeholder is a special Unicode character that can be used as a placeholder +// for an image. +const Placeholder = '\U0010EEEE' + +// Graphics image format. +const ( + // 32-bit RGBA format. + RGBA = 32 + + // 24-bit RGB format. + RGB = 24 + + // PNG format. + PNG = 100 +) + +// Compression types. +const ( + Zlib = 'z' +) + +// Transmission types. +const ( + // The data transmitted directly in the escape sequence. + Direct = 'd' + + // The data transmitted in a regular file. + File = 'f' + + // A temporary file is used and deleted after transmission. + TempFile = 't' + + // A shared memory object. + // For POSIX see https://pubs.opengroup.org/onlinepubs/9699919799/functions/shm_open.html + // For Windows see https://docs.microsoft.com/en-us/windows/win32/memory/creating-named-shared-memory + SharedMemory = 's' +) + +// Action types. +const ( + // Transmit image data. + Transmit = 't' + // TransmitAndPut transmit image data and display (put) it. + TransmitAndPut = 'T' + // Query terminal for image info. + Query = 'q' + // Put (display) previously transmitted image. + Put = 'p' + // Delete image. + Delete = 'd' + // Frame transmits data for animation frames. + Frame = 'f' + // Animate controls animation. + Animate = 'a' + // Compose composes animation frames. + Compose = 'c' +) + +// Delete types. +const ( + // Delete all placements visible on screen + DeleteAll = 'a' + // Delete all images with the specified id, specified using the i key. If + // you specify a p key for the placement id as well, then only the + // placement with the specified image id and placement id will be deleted. + DeleteID = 'i' + // Delete newest image with the specified number, specified using the I + // key. If you specify a p key for the placement id as well, then only the + // placement with the specified number and placement id will be deleted. + DeleteNumber = 'n' + // Delete all placements that intersect with the current cursor position. + DeleteCursor = 'c' + // Delete animation frames. + DeleteFrames = 'f' + // Delete all placements that intersect a specific cell, the cell is + // specified using the x and y keys + DeleteCell = 'p' + // Delete all placements that intersect a specific cell having a specific + // z-index. The cell and z-index is specified using the x, y and z keys. + DeleteCellZ = 'q' + // Delete all images whose id is greater than or equal to the value of the x + // key and less than or equal to the value of the y. + DeleteRange = 'r' + // Delete all placements that intersect the specified column, specified using + // the x key. + DeleteColumn = 'x' + // Delete all placements that intersect the specified row, specified using + // the y key. + DeleteRow = 'y' + // Delete all placements that have the specified z-index, specified using the + // z key. + DeleteZ = 'z' +) + +// Diacritic returns the diacritic rune at the specified index. If the index is +// out of bounds, the first diacritic rune is returned. +func Diacritic(i int) rune { + if i < 0 || i >= len(diacritics) { + return diacritics[0] + } + return diacritics[i] +} + +// From https://sw.kovidgoyal.net/kitty/_downloads/f0a0de9ec8d9ff4456206db8e0814937/rowcolumn-diacritics.txt +// See https://sw.kovidgoyal.net/kitty/graphics-protocol/#unicode-placeholders for further explanation. +var diacritics = []rune{ + '\u0305', + '\u030D', + '\u030E', + '\u0310', + '\u0312', + '\u033D', + '\u033E', + '\u033F', + '\u0346', + '\u034A', + '\u034B', + '\u034C', + '\u0350', + '\u0351', + '\u0352', + '\u0357', + '\u035B', + '\u0363', + '\u0364', + '\u0365', + '\u0366', + '\u0367', + '\u0368', + '\u0369', + '\u036A', + '\u036B', + '\u036C', + '\u036D', + '\u036E', + '\u036F', + '\u0483', + '\u0484', + '\u0485', + '\u0486', + '\u0487', + '\u0592', + '\u0593', + '\u0594', + '\u0595', + '\u0597', + '\u0598', + '\u0599', + '\u059C', + '\u059D', + '\u059E', + '\u059F', + '\u05A0', + '\u05A1', + '\u05A8', + '\u05A9', + '\u05AB', + '\u05AC', + '\u05AF', + '\u05C4', + '\u0610', + '\u0611', + '\u0612', + '\u0613', + '\u0614', + '\u0615', + '\u0616', + '\u0617', + '\u0657', + '\u0658', + '\u0659', + '\u065A', + '\u065B', + '\u065D', + '\u065E', + '\u06D6', + '\u06D7', + '\u06D8', + '\u06D9', + '\u06DA', + '\u06DB', + '\u06DC', + '\u06DF', + '\u06E0', + '\u06E1', + '\u06E2', + '\u06E4', + '\u06E7', + '\u06E8', + '\u06EB', + '\u06EC', + '\u0730', + '\u0732', + '\u0733', + '\u0735', + '\u0736', + '\u073A', + '\u073D', + '\u073F', + '\u0740', + '\u0741', + '\u0743', + '\u0745', + '\u0747', + '\u0749', + '\u074A', + '\u07EB', + '\u07EC', + '\u07ED', + '\u07EE', + '\u07EF', + '\u07F0', + '\u07F1', + '\u07F3', + '\u0816', + '\u0817', + '\u0818', + '\u0819', + '\u081B', + '\u081C', + '\u081D', + '\u081E', + '\u081F', + '\u0820', + '\u0821', + '\u0822', + '\u0823', + '\u0825', + '\u0826', + '\u0827', + '\u0829', + '\u082A', + '\u082B', + '\u082C', + '\u082D', + '\u0951', + '\u0953', + '\u0954', + '\u0F82', + '\u0F83', + '\u0F86', + '\u0F87', + '\u135D', + '\u135E', + '\u135F', + '\u17DD', + '\u193A', + '\u1A17', + '\u1A75', + '\u1A76', + '\u1A77', + '\u1A78', + '\u1A79', + '\u1A7A', + '\u1A7B', + '\u1A7C', + '\u1B6B', + '\u1B6D', + '\u1B6E', + '\u1B6F', + '\u1B70', + '\u1B71', + '\u1B72', + '\u1B73', + '\u1CD0', + '\u1CD1', + '\u1CD2', + '\u1CDA', + '\u1CDB', + '\u1CE0', + '\u1DC0', + '\u1DC1', + '\u1DC3', + '\u1DC4', + '\u1DC5', + '\u1DC6', + '\u1DC7', + '\u1DC8', + '\u1DC9', + '\u1DCB', + '\u1DCC', + '\u1DD1', + '\u1DD2', + '\u1DD3', + '\u1DD4', + '\u1DD5', + '\u1DD6', + '\u1DD7', + '\u1DD8', + '\u1DD9', + '\u1DDA', + '\u1DDB', + '\u1DDC', + '\u1DDD', + '\u1DDE', + '\u1DDF', + '\u1DE0', + '\u1DE1', + '\u1DE2', + '\u1DE3', + '\u1DE4', + '\u1DE5', + '\u1DE6', + '\u1DFE', + '\u20D0', + '\u20D1', + '\u20D4', + '\u20D5', + '\u20D6', + '\u20D7', + '\u20DB', + '\u20DC', + '\u20E1', + '\u20E7', + '\u20E9', + '\u20F0', + '\u2CEF', + '\u2CF0', + '\u2CF1', + '\u2DE0', + '\u2DE1', + '\u2DE2', + '\u2DE3', + '\u2DE4', + '\u2DE5', + '\u2DE6', + '\u2DE7', + '\u2DE8', + '\u2DE9', + '\u2DEA', + '\u2DEB', + '\u2DEC', + '\u2DED', + '\u2DEE', + '\u2DEF', + '\u2DF0', + '\u2DF1', + '\u2DF2', + '\u2DF3', + '\u2DF4', + '\u2DF5', + '\u2DF6', + '\u2DF7', + '\u2DF8', + '\u2DF9', + '\u2DFA', + '\u2DFB', + '\u2DFC', + '\u2DFD', + '\u2DFE', + '\u2DFF', + '\uA66F', + '\uA67C', + '\uA67D', + '\uA6F0', + '\uA6F1', + '\uA8E0', + '\uA8E1', + '\uA8E2', + '\uA8E3', + '\uA8E4', + '\uA8E5', + '\uA8E6', + '\uA8E7', + '\uA8E8', + '\uA8E9', + '\uA8EA', + '\uA8EB', + '\uA8EC', + '\uA8ED', + '\uA8EE', + '\uA8EF', + '\uA8F0', + '\uA8F1', + '\uAAB0', + '\uAAB2', + '\uAAB3', + '\uAAB7', + '\uAAB8', + '\uAABE', + '\uAABF', + '\uAAC1', + '\uFE20', + '\uFE21', + '\uFE22', + '\uFE23', + '\uFE24', + '\uFE25', + '\uFE26', + '\U00010A0F', + '\U00010A38', + '\U0001D185', + '\U0001D186', + '\U0001D187', + '\U0001D188', + '\U0001D189', + '\U0001D1AA', + '\U0001D1AB', + '\U0001D1AC', + '\U0001D1AD', + '\U0001D242', + '\U0001D243', + '\U0001D244', +} diff --git a/vendor/github.com/charmbracelet/x/ansi/kitty/options.go b/vendor/github.com/charmbracelet/x/ansi/kitty/options.go new file mode 100644 index 000000000..a8d907bdd --- /dev/null +++ b/vendor/github.com/charmbracelet/x/ansi/kitty/options.go @@ -0,0 +1,367 @@ +package kitty + +import ( + "encoding" + "fmt" + "strconv" + "strings" +) + +var ( + _ encoding.TextMarshaler = Options{} + _ encoding.TextUnmarshaler = &Options{} +) + +// Options represents a Kitty Graphics Protocol options. +type Options struct { + // Common options. + + // Action (a=t) is the action to be performed on the image. Can be one of + // [Transmit], [TransmitDisplay], [Query], [Put], [Delete], [Frame], + // [Animate], [Compose]. + Action byte + + // Quite mode (q=0) is the quiet mode. Can be either zero, one, or two + // where zero is the default, 1 suppresses OK responses, and 2 suppresses + // both OK and error responses. + Quite byte + + // Transmission options. + + // ID (i=) is the image ID. The ID is a unique identifier for the image. + // Must be a positive integer up to [math.MaxUint32]. + ID int + + // PlacementID (p=) is the placement ID. The placement ID is a unique + // identifier for the placement of the image. Must be a positive integer up + // to [math.MaxUint32]. + PlacementID int + + // Number (I=0) is the number of images to be transmitted. + Number int + + // Format (f=32) is the image format. One of [RGBA], [RGB], [PNG]. + Format int + + // ImageWidth (s=0) is the transmitted image width. + ImageWidth int + + // ImageHeight (v=0) is the transmitted image height. + ImageHeight int + + // Compression (o=) is the image compression type. Can be [Zlib] or zero. + Compression byte + + // Transmission (t=d) is the image transmission type. Can be [Direct], [File], + // [TempFile], or[SharedMemory]. + Transmission byte + + // File is the file path to be used when the transmission type is [File]. + // If [Options.Transmission] is omitted i.e. zero and this is non-empty, + // the transmission type is set to [File]. + File string + + // Size (S=0) is the size to be read from the transmission medium. + Size int + + // Offset (O=0) is the offset byte to start reading from the transmission + // medium. + Offset int + + // Chunk (m=) whether the image is transmitted in chunks. Can be either + // zero or one. When true, the image is transmitted in chunks. Each chunk + // must be a multiple of 4, and up to [MaxChunkSize] bytes. Each chunk must + // have the m=1 option except for the last chunk which must have m=0. + Chunk bool + + // Display options. + + // X (x=0) is the pixel X coordinate of the image to start displaying. + X int + + // Y (y=0) is the pixel Y coordinate of the image to start displaying. + Y int + + // Z (z=0) is the Z coordinate of the image to display. + Z int + + // Width (w=0) is the width of the image to display. + Width int + + // Height (h=0) is the height of the image to display. + Height int + + // OffsetX (X=0) is the OffsetX coordinate of the cursor cell to start + // displaying the image. OffsetX=0 is the leftmost cell. This must be + // smaller than the terminal cell width. + OffsetX int + + // OffsetY (Y=0) is the OffsetY coordinate of the cursor cell to start + // displaying the image. OffsetY=0 is the topmost cell. This must be + // smaller than the terminal cell height. + OffsetY int + + // Columns (c=0) is the number of columns to display the image. The image + // will be scaled to fit the number of columns. + Columns int + + // Rows (r=0) is the number of rows to display the image. The image will be + // scaled to fit the number of rows. + Rows int + + // VirtualPlacement (U=0) whether to use virtual placement. This is used + // with Unicode [Placeholder] to display images. + VirtualPlacement bool + + // DoNotMoveCursor (C=0) whether to move the cursor after displaying the + // image. + DoNotMoveCursor bool + + // ParentID (P=0) is the parent image ID. The parent ID is the ID of the + // image that is the parent of the current image. This is used with Unicode + // [Placeholder] to display images relative to the parent image. + ParentID int + + // ParentPlacementID (Q=0) is the parent placement ID. The parent placement + // ID is the ID of the placement of the parent image. This is used with + // Unicode [Placeholder] to display images relative to the parent image. + ParentPlacementID int + + // Delete options. + + // Delete (d=a) is the delete action. Can be one of [DeleteAll], + // [DeleteID], [DeleteNumber], [DeleteCursor], [DeleteFrames], + // [DeleteCell], [DeleteCellZ], [DeleteRange], [DeleteColumn], [DeleteRow], + // [DeleteZ]. + Delete byte + + // DeleteResources indicates whether to delete the resources associated + // with the image. + DeleteResources bool +} + +// Options returns the options as a slice of a key-value pairs. +func (o *Options) Options() (opts []string) { + opts = []string{} + if o.Format == 0 { + o.Format = RGBA + } + + if o.Action == 0 { + o.Action = Transmit + } + + if o.Delete == 0 { + o.Delete = DeleteAll + } + + if o.Transmission == 0 { + if len(o.File) > 0 { + o.Transmission = File + } else { + o.Transmission = Direct + } + } + + if o.Format != RGBA { + opts = append(opts, fmt.Sprintf("f=%d", o.Format)) + } + + if o.Quite > 0 { + opts = append(opts, fmt.Sprintf("q=%d", o.Quite)) + } + + if o.ID > 0 { + opts = append(opts, fmt.Sprintf("i=%d", o.ID)) + } + + if o.PlacementID > 0 { + opts = append(opts, fmt.Sprintf("p=%d", o.PlacementID)) + } + + if o.Number > 0 { + opts = append(opts, fmt.Sprintf("I=%d", o.Number)) + } + + if o.ImageWidth > 0 { + opts = append(opts, fmt.Sprintf("s=%d", o.ImageWidth)) + } + + if o.ImageHeight > 0 { + opts = append(opts, fmt.Sprintf("v=%d", o.ImageHeight)) + } + + if o.Transmission != Direct { + opts = append(opts, fmt.Sprintf("t=%c", o.Transmission)) + } + + if o.Size > 0 { + opts = append(opts, fmt.Sprintf("S=%d", o.Size)) + } + + if o.Offset > 0 { + opts = append(opts, fmt.Sprintf("O=%d", o.Offset)) + } + + if o.Compression == Zlib { + opts = append(opts, fmt.Sprintf("o=%c", o.Compression)) + } + + if o.VirtualPlacement { + opts = append(opts, "U=1") + } + + if o.DoNotMoveCursor { + opts = append(opts, "C=1") + } + + if o.ParentID > 0 { + opts = append(opts, fmt.Sprintf("P=%d", o.ParentID)) + } + + if o.ParentPlacementID > 0 { + opts = append(opts, fmt.Sprintf("Q=%d", o.ParentPlacementID)) + } + + if o.X > 0 { + opts = append(opts, fmt.Sprintf("x=%d", o.X)) + } + + if o.Y > 0 { + opts = append(opts, fmt.Sprintf("y=%d", o.Y)) + } + + if o.Z > 0 { + opts = append(opts, fmt.Sprintf("z=%d", o.Z)) + } + + if o.Width > 0 { + opts = append(opts, fmt.Sprintf("w=%d", o.Width)) + } + + if o.Height > 0 { + opts = append(opts, fmt.Sprintf("h=%d", o.Height)) + } + + if o.OffsetX > 0 { + opts = append(opts, fmt.Sprintf("X=%d", o.OffsetX)) + } + + if o.OffsetY > 0 { + opts = append(opts, fmt.Sprintf("Y=%d", o.OffsetY)) + } + + if o.Columns > 0 { + opts = append(opts, fmt.Sprintf("c=%d", o.Columns)) + } + + if o.Rows > 0 { + opts = append(opts, fmt.Sprintf("r=%d", o.Rows)) + } + + if o.Delete != DeleteAll || o.DeleteResources { + da := o.Delete + if o.DeleteResources { + da = da - ' ' // to uppercase + } + + opts = append(opts, fmt.Sprintf("d=%c", da)) + } + + if o.Action != Transmit { + opts = append(opts, fmt.Sprintf("a=%c", o.Action)) + } + + return +} + +// String returns the string representation of the options. +func (o Options) String() string { + return strings.Join(o.Options(), ",") +} + +// MarshalText returns the string representation of the options. +func (o Options) MarshalText() ([]byte, error) { + return []byte(o.String()), nil +} + +// UnmarshalText parses the options from the given string. +func (o *Options) UnmarshalText(text []byte) error { + opts := strings.Split(string(text), ",") + for _, opt := range opts { + ps := strings.SplitN(opt, "=", 2) + if len(ps) != 2 || len(ps[1]) == 0 { + continue + } + + switch ps[0] { + case "a": + o.Action = ps[1][0] + case "o": + o.Compression = ps[1][0] + case "t": + o.Transmission = ps[1][0] + case "d": + d := ps[1][0] + if d >= 'A' && d <= 'Z' { + o.DeleteResources = true + d = d + ' ' // to lowercase + } + o.Delete = d + case "i", "q", "p", "I", "f", "s", "v", "S", "O", "m", "x", "y", "z", "w", "h", "X", "Y", "c", "r", "U", "P", "Q": + v, err := strconv.Atoi(ps[1]) + if err != nil { + continue + } + + switch ps[0] { + case "i": + o.ID = v + case "q": + o.Quite = byte(v) + case "p": + o.PlacementID = v + case "I": + o.Number = v + case "f": + o.Format = v + case "s": + o.ImageWidth = v + case "v": + o.ImageHeight = v + case "S": + o.Size = v + case "O": + o.Offset = v + case "m": + o.Chunk = v == 0 || v == 1 + case "x": + o.X = v + case "y": + o.Y = v + case "z": + o.Z = v + case "w": + o.Width = v + case "h": + o.Height = v + case "X": + o.OffsetX = v + case "Y": + o.OffsetY = v + case "c": + o.Columns = v + case "r": + o.Rows = v + case "U": + o.VirtualPlacement = v == 1 + case "P": + o.ParentID = v + case "Q": + o.ParentPlacementID = v + } + } + } + + return nil +} diff --git a/vendor/github.com/charmbracelet/x/ansi/method.go b/vendor/github.com/charmbracelet/x/ansi/method.go new file mode 100644 index 000000000..0218809c9 --- /dev/null +++ b/vendor/github.com/charmbracelet/x/ansi/method.go @@ -0,0 +1,172 @@ +package ansi + +// Method is a type that represents the how the renderer should calculate the +// display width of cells. +type Method uint8 + +// Display width modes. +const ( + WcWidth Method = iota + GraphemeWidth +) + +// StringWidth returns the width of a string in cells. This is the number of +// cells that the string will occupy when printed in a terminal. ANSI escape +// codes are ignored and wide characters (such as East Asians and emojis) are +// accounted for. +func (m Method) StringWidth(s string) int { + return stringWidth(m, s) +} + +// Truncate truncates a string to a given length, adding a tail to the end if +// the string is longer than the given length. This function is aware of ANSI +// escape codes and will not break them, and accounts for wide-characters (such +// as East-Asian characters and emojis). +func (m Method) Truncate(s string, length int, tail string) string { + return truncate(m, s, length, tail) +} + +// TruncateLeft truncates a string to a given length, adding a prefix to the +// beginning if the string is longer than the given length. This function is +// aware of ANSI escape codes and will not break them, and accounts for +// wide-characters (such as East-Asian characters and emojis). +func (m Method) TruncateLeft(s string, length int, prefix string) string { + return truncateLeft(m, s, length, prefix) +} + +// Cut the string, without adding any prefix or tail strings. This function is +// aware of ANSI escape codes and will not break them, and accounts for +// wide-characters (such as East-Asian characters and emojis). Note that the +// [left] parameter is inclusive, while [right] isn't. +func (m Method) Cut(s string, left, right int) string { + return cut(m, s, left, right) +} + +// Hardwrap wraps a string or a block of text to a given line length, breaking +// word boundaries. This will preserve ANSI escape codes and will account for +// wide-characters in the string. +// When preserveSpace is true, spaces at the beginning of a line will be +// preserved. +// This treats the text as a sequence of graphemes. +func (m Method) Hardwrap(s string, length int, preserveSpace bool) string { + return hardwrap(m, s, length, preserveSpace) +} + +// Wordwrap wraps a string or a block of text to a given line length, not +// breaking word boundaries. This will preserve ANSI escape codes and will +// account for wide-characters in the string. +// The breakpoints string is a list of characters that are considered +// breakpoints for word wrapping. A hyphen (-) is always considered a +// breakpoint. +// +// Note: breakpoints must be a string of 1-cell wide rune characters. +func (m Method) Wordwrap(s string, length int, breakpoints string) string { + return wordwrap(m, s, length, breakpoints) +} + +// Wrap wraps a string or a block of text to a given line length, breaking word +// boundaries if necessary. This will preserve ANSI escape codes and will +// account for wide-characters in the string. The breakpoints string is a list +// of characters that are considered breakpoints for word wrapping. A hyphen +// (-) is always considered a breakpoint. +// +// Note: breakpoints must be a string of 1-cell wide rune characters. +func (m Method) Wrap(s string, length int, breakpoints string) string { + return wrap(m, s, length, breakpoints) +} + +// DecodeSequence decodes the first ANSI escape sequence or a printable +// grapheme from the given data. It returns the sequence slice, the number of +// bytes read, the cell width for each sequence, and the new state. +// +// The cell width will always be 0 for control and escape sequences, 1 for +// ASCII printable characters, and the number of cells other Unicode characters +// occupy. It uses the uniseg package to calculate the width of Unicode +// graphemes and characters. This means it will always do grapheme clustering +// (mode 2027). +// +// Passing a non-nil [*Parser] as the last argument will allow the decoder to +// collect sequence parameters, data, and commands. The parser cmd will have +// the packed command value that contains intermediate and prefix characters. +// In the case of a OSC sequence, the cmd will be the OSC command number. Use +// [Cmd] and [Param] types to unpack command intermediates and prefixes as well +// as parameters. +// +// Zero [Cmd] means the CSI, DCS, or ESC sequence is invalid. Moreover, checking the +// validity of other data sequences, OSC, DCS, etc, will require checking for +// the returned sequence terminator bytes such as ST (ESC \\) and BEL). +// +// We store the command byte in [Cmd] in the most significant byte, the +// prefix byte in the next byte, and the intermediate byte in the least +// significant byte. This is done to avoid using a struct to store the command +// and its intermediates and prefixes. The command byte is always the least +// significant byte i.e. [Cmd & 0xff]. Use the [Cmd] type to unpack the +// command, intermediate, and prefix bytes. Note that we only collect the last +// prefix character and intermediate byte. +// +// The [p.Params] slice will contain the parameters of the sequence. Any +// sub-parameter will have the [parser.HasMoreFlag] set. Use the [Param] type +// to unpack the parameters. +// +// Example: +// +// var state byte // the initial state is always zero [NormalState] +// p := NewParser(32, 1024) // create a new parser with a 32 params buffer and 1024 data buffer (optional) +// input := []byte("\x1b[31mHello, World!\x1b[0m") +// for len(input) > 0 { +// seq, width, n, newState := DecodeSequence(input, state, p) +// log.Printf("seq: %q, width: %d", seq, width) +// state = newState +// input = input[n:] +// } +func (m Method) DecodeSequence(data []byte, state byte, p *Parser) (seq []byte, width, n int, newState byte) { + return decodeSequence(m, data, state, p) +} + +// DecodeSequenceInString decodes the first ANSI escape sequence or a printable +// grapheme from the given data. It returns the sequence slice, the number of +// bytes read, the cell width for each sequence, and the new state. +// +// The cell width will always be 0 for control and escape sequences, 1 for +// ASCII printable characters, and the number of cells other Unicode characters +// occupy. It uses the uniseg package to calculate the width of Unicode +// graphemes and characters. This means it will always do grapheme clustering +// (mode 2027). +// +// Passing a non-nil [*Parser] as the last argument will allow the decoder to +// collect sequence parameters, data, and commands. The parser cmd will have +// the packed command value that contains intermediate and prefix characters. +// In the case of a OSC sequence, the cmd will be the OSC command number. Use +// [Cmd] and [Param] types to unpack command intermediates and prefixes as well +// as parameters. +// +// Zero [Cmd] means the CSI, DCS, or ESC sequence is invalid. Moreover, checking the +// validity of other data sequences, OSC, DCS, etc, will require checking for +// the returned sequence terminator bytes such as ST (ESC \\) and BEL). +// +// We store the command byte in [Cmd] in the most significant byte, the +// prefix byte in the next byte, and the intermediate byte in the least +// significant byte. This is done to avoid using a struct to store the command +// and its intermediates and prefixes. The command byte is always the least +// significant byte i.e. [Cmd & 0xff]. Use the [Cmd] type to unpack the +// command, intermediate, and prefix bytes. Note that we only collect the last +// prefix character and intermediate byte. +// +// The [p.Params] slice will contain the parameters of the sequence. Any +// sub-parameter will have the [parser.HasMoreFlag] set. Use the [Param] type +// to unpack the parameters. +// +// Example: +// +// var state byte // the initial state is always zero [NormalState] +// p := NewParser(32, 1024) // create a new parser with a 32 params buffer and 1024 data buffer (optional) +// input := []byte("\x1b[31mHello, World!\x1b[0m") +// for len(input) > 0 { +// seq, width, n, newState := DecodeSequenceInString(input, state, p) +// log.Printf("seq: %q, width: %d", seq, width) +// state = newState +// input = input[n:] +// } +func (m Method) DecodeSequenceInString(data string, state byte, p *Parser) (seq string, width, n int, newState byte) { + return decodeSequence(m, data, state, p) +} diff --git a/vendor/github.com/charmbracelet/x/ansi/mode.go b/vendor/github.com/charmbracelet/x/ansi/mode.go index 7d25ddc54..57f3f0a8e 100644 --- a/vendor/github.com/charmbracelet/x/ansi/mode.go +++ b/vendor/github.com/charmbracelet/x/ansi/mode.go @@ -1,109 +1,677 @@ package ansi -// This file define uses multiple sequences to set (SM), reset (RM), and request -// (DECRQM) different ANSI and DEC modes. +import ( + "strconv" + "strings" +) + +// ModeSetting represents a mode setting. +type ModeSetting byte + +// ModeSetting constants. +const ( + ModeNotRecognized ModeSetting = iota + ModeSet + ModeReset + ModePermanentlySet + ModePermanentlyReset +) + +// IsNotRecognized returns true if the mode is not recognized. +func (m ModeSetting) IsNotRecognized() bool { + return m == ModeNotRecognized +} + +// IsSet returns true if the mode is set or permanently set. +func (m ModeSetting) IsSet() bool { + return m == ModeSet || m == ModePermanentlySet +} + +// IsReset returns true if the mode is reset or permanently reset. +func (m ModeSetting) IsReset() bool { + return m == ModeReset || m == ModePermanentlyReset +} + +// IsPermanentlySet returns true if the mode is permanently set. +func (m ModeSetting) IsPermanentlySet() bool { + return m == ModePermanentlySet +} + +// IsPermanentlyReset returns true if the mode is permanently reset. +func (m ModeSetting) IsPermanentlyReset() bool { + return m == ModePermanentlyReset +} + +// Mode represents an interface for terminal modes. +// Modes can be set, reset, and requested. +type Mode interface { + Mode() int +} + +// SetMode (SM) returns a sequence to set a mode. +// The mode arguments are a list of modes to set. +// +// If one of the modes is a [DECMode], the function will returns two escape +// sequences. +// +// ANSI format: +// +// CSI Pd ; ... ; Pd h +// +// DEC format: +// +// CSI ? Pd ; ... ; Pd h // // See: https://vt100.net/docs/vt510-rm/SM.html +func SetMode(modes ...Mode) string { + return setMode(false, modes...) +} + +// SM is an alias for [SetMode]. +func SM(modes ...Mode) string { + return SetMode(modes...) +} + +// ResetMode (RM) returns a sequence to reset a mode. +// The mode arguments are a list of modes to reset. +// +// If one of the modes is a [DECMode], the function will returns two escape +// sequences. +// +// ANSI format: +// +// CSI Pd ; ... ; Pd l +// +// DEC format: +// +// CSI ? Pd ; ... ; Pd l +// // See: https://vt100.net/docs/vt510-rm/RM.html -// See: https://vt100.net/docs/vt510-rm/DECRQM.html +func ResetMode(modes ...Mode) string { + return setMode(true, modes...) +} + +// RM is an alias for [ResetMode]. +func RM(modes ...Mode) string { + return ResetMode(modes...) +} + +func setMode(reset bool, modes ...Mode) (s string) { + if len(modes) == 0 { + return + } + + cmd := "h" + if reset { + cmd = "l" + } + + seq := "\x1b[" + if len(modes) == 1 { + switch modes[0].(type) { + case DECMode: + seq += "?" + } + return seq + strconv.Itoa(modes[0].Mode()) + cmd + } + + dec := make([]string, 0, len(modes)/2) + ansi := make([]string, 0, len(modes)/2) + for _, m := range modes { + switch m.(type) { + case DECMode: + dec = append(dec, strconv.Itoa(m.Mode())) + case ANSIMode: + ansi = append(ansi, strconv.Itoa(m.Mode())) + } + } + + if len(ansi) > 0 { + s += seq + strings.Join(ansi, ";") + cmd + } + if len(dec) > 0 { + s += seq + "?" + strings.Join(dec, ";") + cmd + } + return +} + +// RequestMode (DECRQM) returns a sequence to request a mode from the terminal. +// The terminal responds with a report mode function [DECRPM]. // -// The terminal then responds to the request with a Report Mode function -// (DECRPM) in the format: +// ANSI format: +// +// CSI Pa $ p +// +// DEC format: +// +// CSI ? Pa $ p +// +// See: https://vt100.net/docs/vt510-rm/DECRQM.html +func RequestMode(m Mode) string { + seq := "\x1b[" + switch m.(type) { + case DECMode: + seq += "?" + } + return seq + strconv.Itoa(m.Mode()) + "$p" +} + +// DECRQM is an alias for [RequestMode]. +func DECRQM(m Mode) string { + return RequestMode(m) +} + +// ReportMode (DECRPM) returns a sequence that the terminal sends to the host +// in response to a mode request [DECRQM]. // // ANSI format: // -// CSI Pa ; Ps ; $ y +// CSI Pa ; Ps ; $ y // // DEC format: // -// CSI ? Pa ; Ps $ y +// CSI ? Pa ; Ps $ y // // Where Pa is the mode number, and Ps is the mode value. +// +// 0: Not recognized +// 1: Set +// 2: Reset +// 3: Permanent set +// 4: Permanent reset +// // See: https://vt100.net/docs/vt510-rm/DECRPM.html +func ReportMode(mode Mode, value ModeSetting) string { + if value > 4 { + value = 0 + } + switch mode.(type) { + case DECMode: + return "\x1b[?" + strconv.Itoa(mode.Mode()) + ";" + strconv.Itoa(int(value)) + "$y" + } + return "\x1b[" + strconv.Itoa(mode.Mode()) + ";" + strconv.Itoa(int(value)) + "$y" +} + +// DECRPM is an alias for [ReportMode]. +func DECRPM(mode Mode, value ModeSetting) string { + return ReportMode(mode, value) +} + +// ANSIMode represents an ANSI terminal mode. +type ANSIMode int //nolint:revive + +// Mode returns the ANSI mode as an integer. +func (m ANSIMode) Mode() int { + return int(m) +} + +// DECMode represents a private DEC terminal mode. +type DECMode int -// Application Cursor Keys (DECCKM) is a mode that determines whether the -// cursor keys send ANSI cursor sequences or application sequences. +// Mode returns the DEC mode as an integer. +func (m DECMode) Mode() int { + return int(m) +} + +// Keyboard Action Mode (KAM) is a mode that controls locking of the keyboard. +// When the keyboard is locked, it cannot send data to the terminal. +// +// See: https://vt100.net/docs/vt510-rm/KAM.html +const ( + KeyboardActionMode = ANSIMode(2) + KAM = KeyboardActionMode + + SetKeyboardActionMode = "\x1b[2h" + ResetKeyboardActionMode = "\x1b[2l" + RequestKeyboardActionMode = "\x1b[2$p" +) + +// Insert/Replace Mode (IRM) is a mode that determines whether characters are +// inserted or replaced when typed. +// +// When enabled, characters are inserted at the cursor position pushing the +// characters to the right. When disabled, characters replace the character at +// the cursor position. +// +// See: https://vt100.net/docs/vt510-rm/IRM.html +const ( + InsertReplaceMode = ANSIMode(4) + IRM = InsertReplaceMode + + SetInsertReplaceMode = "\x1b[4h" + ResetInsertReplaceMode = "\x1b[4l" + RequestInsertReplaceMode = "\x1b[4$p" +) + +// Send Receive Mode (SRM) or Local Echo Mode is a mode that determines whether +// the terminal echoes characters back to the host. When enabled, the terminal +// sends characters to the host as they are typed. +// +// See: https://vt100.net/docs/vt510-rm/SRM.html +const ( + SendReceiveMode = ANSIMode(12) + LocalEchoMode = SendReceiveMode + SRM = SendReceiveMode + + SetSendReceiveMode = "\x1b[12h" + ResetSendReceiveMode = "\x1b[12l" + RequestSendReceiveMode = "\x1b[12$p" + + SetLocalEchoMode = "\x1b[12h" + ResetLocalEchoMode = "\x1b[12l" + RequestLocalEchoMode = "\x1b[12$p" +) + +// Line Feed/New Line Mode (LNM) is a mode that determines whether the terminal +// interprets the line feed character as a new line. +// +// When enabled, the terminal interprets the line feed character as a new line. +// When disabled, the terminal interprets the line feed character as a line feed. +// +// A new line moves the cursor to the first position of the next line. +// A line feed moves the cursor down one line without changing the column +// scrolling the screen if necessary. +// +// See: https://vt100.net/docs/vt510-rm/LNM.html +const ( + LineFeedNewLineMode = ANSIMode(20) + LNM = LineFeedNewLineMode + + SetLineFeedNewLineMode = "\x1b[20h" + ResetLineFeedNewLineMode = "\x1b[20l" + RequestLineFeedNewLineMode = "\x1b[20$p" +) + +// Cursor Keys Mode (DECCKM) is a mode that determines whether the cursor keys +// send ANSI cursor sequences or application sequences. // // See: https://vt100.net/docs/vt510-rm/DECCKM.html +const ( + CursorKeysMode = DECMode(1) + DECCKM = CursorKeysMode + + SetCursorKeysMode = "\x1b[?1h" + ResetCursorKeysMode = "\x1b[?1l" + RequestCursorKeysMode = "\x1b[?1$p" +) + +// Deprecated: use [SetCursorKeysMode] and [ResetCursorKeysMode] instead. const ( EnableCursorKeys = "\x1b[?1h" DisableCursorKeys = "\x1b[?1l" - RequestCursorKeys = "\x1b[?1$p" +) + +// Origin Mode (DECOM) is a mode that determines whether the cursor moves to the +// home position or the margin position. +// +// See: https://vt100.net/docs/vt510-rm/DECOM.html +const ( + OriginMode = DECMode(6) + DECOM = OriginMode + + SetOriginMode = "\x1b[?6h" + ResetOriginMode = "\x1b[?6l" + RequestOriginMode = "\x1b[?6$p" +) + +// Auto Wrap Mode (DECAWM) is a mode that determines whether the cursor wraps +// to the next line when it reaches the right margin. +// +// See: https://vt100.net/docs/vt510-rm/DECAWM.html +const ( + AutoWrapMode = DECMode(7) + DECAWM = AutoWrapMode + + SetAutoWrapMode = "\x1b[?7h" + ResetAutoWrapMode = "\x1b[?7l" + RequestAutoWrapMode = "\x1b[?7$p" +) + +// X10 Mouse Mode is a mode that determines whether the mouse reports on button +// presses. +// +// The terminal responds with the following encoding: +// +// CSI M CbCxCy +// +// Where Cb is the button-1, where it can be 1, 2, or 3. +// Cx and Cy are the x and y coordinates of the mouse event. +// +// See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h2-Mouse-Tracking +const ( + X10MouseMode = DECMode(9) + + SetX10MouseMode = "\x1b[?9h" + ResetX10MouseMode = "\x1b[?9l" + RequestX10MouseMode = "\x1b[?9$p" ) // Text Cursor Enable Mode (DECTCEM) is a mode that shows/hides the cursor. // // See: https://vt100.net/docs/vt510-rm/DECTCEM.html const ( - ShowCursor = "\x1b[?25h" - HideCursor = "\x1b[?25l" + TextCursorEnableMode = DECMode(25) + DECTCEM = TextCursorEnableMode + + SetTextCursorEnableMode = "\x1b[?25h" + ResetTextCursorEnableMode = "\x1b[?25l" + RequestTextCursorEnableMode = "\x1b[?25$p" +) + +// These are aliases for [SetTextCursorEnableMode] and [ResetTextCursorEnableMode]. +const ( + ShowCursor = SetTextCursorEnableMode + HideCursor = ResetTextCursorEnableMode +) + +// Text Cursor Enable Mode (DECTCEM) is a mode that shows/hides the cursor. +// +// See: https://vt100.net/docs/vt510-rm/DECTCEM.html +// +// Deprecated: use [SetTextCursorEnableMode] and [ResetTextCursorEnableMode] instead. +const ( + CursorEnableMode = DECMode(25) RequestCursorVisibility = "\x1b[?25$p" ) +// Numeric Keypad Mode (DECNKM) is a mode that determines whether the keypad +// sends application sequences or numeric sequences. +// +// This works like [DECKPAM] and [DECKPNM], but uses different sequences. +// +// See: https://vt100.net/docs/vt510-rm/DECNKM.html +const ( + NumericKeypadMode = DECMode(66) + DECNKM = NumericKeypadMode + + SetNumericKeypadMode = "\x1b[?66h" + ResetNumericKeypadMode = "\x1b[?66l" + RequestNumericKeypadMode = "\x1b[?66$p" +) + +// Backarrow Key Mode (DECBKM) is a mode that determines whether the backspace +// key sends a backspace or delete character. Disabled by default. +// +// See: https://vt100.net/docs/vt510-rm/DECBKM.html +const ( + BackarrowKeyMode = DECMode(67) + DECBKM = BackarrowKeyMode + + SetBackarrowKeyMode = "\x1b[?67h" + ResetBackarrowKeyMode = "\x1b[?67l" + RequestBackarrowKeyMode = "\x1b[?67$p" +) + +// Left Right Margin Mode (DECLRMM) is a mode that determines whether the left +// and right margins can be set with [DECSLRM]. +// +// See: https://vt100.net/docs/vt510-rm/DECLRMM.html +const ( + LeftRightMarginMode = DECMode(69) + DECLRMM = LeftRightMarginMode + + SetLeftRightMarginMode = "\x1b[?69h" + ResetLeftRightMarginMode = "\x1b[?69l" + RequestLeftRightMarginMode = "\x1b[?69$p" +) + +// Normal Mouse Mode is a mode that determines whether the mouse reports on +// button presses and releases. It will also report modifier keys, wheel +// events, and extra buttons. +// +// It uses the same encoding as [X10MouseMode] with a few differences: +// +// See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h2-Mouse-Tracking +const ( + NormalMouseMode = DECMode(1000) + + SetNormalMouseMode = "\x1b[?1000h" + ResetNormalMouseMode = "\x1b[?1000l" + RequestNormalMouseMode = "\x1b[?1000$p" +) + // VT Mouse Tracking is a mode that determines whether the mouse reports on // button press and release. // // See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h2-Mouse-Tracking +// +// Deprecated: use [NormalMouseMode] instead. const ( + MouseMode = DECMode(1000) + EnableMouse = "\x1b[?1000h" DisableMouse = "\x1b[?1000l" RequestMouse = "\x1b[?1000$p" ) +// Highlight Mouse Tracking is a mode that determines whether the mouse reports +// on button presses, releases, and highlighted cells. +// +// It uses the same encoding as [NormalMouseMode] with a few differences: +// +// On highlight events, the terminal responds with the following encoding: +// +// CSI t CxCy +// CSI T CxCyCxCyCxCy +// +// Where the parameters are startx, starty, endx, endy, mousex, and mousey. +const ( + HighlightMouseMode = DECMode(1001) + + SetHighlightMouseMode = "\x1b[?1001h" + ResetHighlightMouseMode = "\x1b[?1001l" + RequestHighlightMouseMode = "\x1b[?1001$p" +) + // VT Hilite Mouse Tracking is a mode that determines whether the mouse reports on // button presses, releases, and highlighted cells. // // See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h2-Mouse-Tracking +// +// Deprecated: use [HighlightMouseMode] instead. const ( + MouseHiliteMode = DECMode(1001) + EnableMouseHilite = "\x1b[?1001h" DisableMouseHilite = "\x1b[?1001l" RequestMouseHilite = "\x1b[?1001$p" ) +// Button Event Mouse Tracking is essentially the same as [NormalMouseMode], +// but it also reports button-motion events when a button is pressed. +// +// See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h2-Mouse-Tracking +const ( + ButtonEventMouseMode = DECMode(1002) + + SetButtonEventMouseMode = "\x1b[?1002h" + ResetButtonEventMouseMode = "\x1b[?1002l" + RequestButtonEventMouseMode = "\x1b[?1002$p" +) + // Cell Motion Mouse Tracking is a mode that determines whether the mouse // reports on button press, release, and motion events. // // See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h2-Mouse-Tracking +// +// Deprecated: use [ButtonEventMouseMode] instead. const ( + MouseCellMotionMode = DECMode(1002) + EnableMouseCellMotion = "\x1b[?1002h" DisableMouseCellMotion = "\x1b[?1002l" RequestMouseCellMotion = "\x1b[?1002$p" ) +// Any Event Mouse Tracking is the same as [ButtonEventMouseMode], except that +// all motion events are reported even if no mouse buttons are pressed. +// +// See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h2-Mouse-Tracking +const ( + AnyEventMouseMode = DECMode(1003) + + SetAnyEventMouseMode = "\x1b[?1003h" + ResetAnyEventMouseMode = "\x1b[?1003l" + RequestAnyEventMouseMode = "\x1b[?1003$p" +) + // All Mouse Tracking is a mode that determines whether the mouse reports on // button press, release, motion, and highlight events. // // See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h2-Mouse-Tracking +// +// Deprecated: use [AnyEventMouseMode] instead. const ( + MouseAllMotionMode = DECMode(1003) + EnableMouseAllMotion = "\x1b[?1003h" DisableMouseAllMotion = "\x1b[?1003l" RequestMouseAllMotion = "\x1b[?1003$p" ) -// Report Focus is a mode that makes the terminal report focus-in and focus-out events. +// Focus Event Mode is a mode that determines whether the terminal reports focus +// and blur events. +// +// The terminal sends the following encoding: // -// See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h3-FocusIn_FocusOut +// CSI I // Focus In +// CSI O // Focus Out +// +// See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h2-Focus-Tracking +const ( + FocusEventMode = DECMode(1004) + + SetFocusEventMode = "\x1b[?1004h" + ResetFocusEventMode = "\x1b[?1004l" + RequestFocusEventMode = "\x1b[?1004$p" +) + +// Deprecated: use [SetFocusEventMode], [ResetFocusEventMode], and +// [RequestFocusEventMode] instead. const ( + ReportFocusMode = DECMode(1004) + EnableReportFocus = "\x1b[?1004h" DisableReportFocus = "\x1b[?1004l" RequestReportFocus = "\x1b[?1004$p" ) -// SGR Mouse Extension is a mode that determines whether the mouse reports events -// formatted with SGR parameters. +// SGR Extended Mouse Mode is a mode that changes the mouse tracking encoding +// to use SGR parameters. +// +// The terminal responds with the following encoding: +// +// CSI < Cb ; Cx ; Cy M +// +// Where Cb is the same as [NormalMouseMode], and Cx and Cy are the x and y. // // See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h2-Mouse-Tracking const ( + SgrExtMouseMode = DECMode(1006) + + SetSgrExtMouseMode = "\x1b[?1006h" + ResetSgrExtMouseMode = "\x1b[?1006l" + RequestSgrExtMouseMode = "\x1b[?1006$p" +) + +// Deprecated: use [SgrExtMouseMode] [SetSgrExtMouseMode], +// [ResetSgrExtMouseMode], and [RequestSgrExtMouseMode] instead. +const ( + MouseSgrExtMode = DECMode(1006) EnableMouseSgrExt = "\x1b[?1006h" DisableMouseSgrExt = "\x1b[?1006l" RequestMouseSgrExt = "\x1b[?1006$p" ) +// UTF-8 Extended Mouse Mode is a mode that changes the mouse tracking encoding +// to use UTF-8 parameters. +// +// See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h2-Mouse-Tracking +const ( + Utf8ExtMouseMode = DECMode(1005) + + SetUtf8ExtMouseMode = "\x1b[?1005h" + ResetUtf8ExtMouseMode = "\x1b[?1005l" + RequestUtf8ExtMouseMode = "\x1b[?1005$p" +) + +// URXVT Extended Mouse Mode is a mode that changes the mouse tracking encoding +// to use an alternate encoding. +// +// See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h2-Mouse-Tracking +const ( + UrxvtExtMouseMode = DECMode(1015) + + SetUrxvtExtMouseMode = "\x1b[?1015h" + ResetUrxvtExtMouseMode = "\x1b[?1015l" + RequestUrxvtExtMouseMode = "\x1b[?1015$p" +) + +// SGR Pixel Extended Mouse Mode is a mode that changes the mouse tracking +// encoding to use SGR parameters with pixel coordinates. +// +// This is similar to [SgrExtMouseMode], but also reports pixel coordinates. +// +// See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h2-Mouse-Tracking +const ( + SgrPixelExtMouseMode = DECMode(1016) + + SetSgrPixelExtMouseMode = "\x1b[?1016h" + ResetSgrPixelExtMouseMode = "\x1b[?1016l" + RequestSgrPixelExtMouseMode = "\x1b[?1016$p" +) + +// Alternate Screen Mode is a mode that determines whether the alternate screen +// buffer is active. When this mode is enabled, the alternate screen buffer is +// cleared. +// +// See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h2-The-Alternate-Screen-Buffer +const ( + AltScreenMode = DECMode(1047) + + SetAltScreenMode = "\x1b[?1047h" + ResetAltScreenMode = "\x1b[?1047l" + RequestAltScreenMode = "\x1b[?1047$p" +) + +// Save Cursor Mode is a mode that saves the cursor position. +// This is equivalent to [SaveCursor] and [RestoreCursor]. +// +// See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h2-The-Alternate-Screen-Buffer +const ( + SaveCursorMode = DECMode(1048) + + SetSaveCursorMode = "\x1b[?1048h" + ResetSaveCursorMode = "\x1b[?1048l" + RequestSaveCursorMode = "\x1b[?1048$p" +) + +// Alternate Screen Save Cursor Mode is a mode that saves the cursor position as in +// [SaveCursorMode], switches to the alternate screen buffer as in [AltScreenMode], +// and clears the screen on switch. +// +// See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h2-The-Alternate-Screen-Buffer +const ( + AltScreenSaveCursorMode = DECMode(1049) + + SetAltScreenSaveCursorMode = "\x1b[?1049h" + ResetAltScreenSaveCursorMode = "\x1b[?1049l" + RequestAltScreenSaveCursorMode = "\x1b[?1049$p" +) + // Alternate Screen Buffer is a mode that determines whether the alternate screen // buffer is active. // // See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h2-The-Alternate-Screen-Buffer +// +// Deprecated: use [AltScreenSaveCursorMode] instead. const ( + AltScreenBufferMode = DECMode(1049) + + SetAltScreenBufferMode = "\x1b[?1049h" + ResetAltScreenBufferMode = "\x1b[?1049l" + RequestAltScreenBufferMode = "\x1b[?1049$p" + EnableAltScreenBuffer = "\x1b[?1049h" DisableAltScreenBuffer = "\x1b[?1049l" RequestAltScreenBuffer = "\x1b[?1049$p" @@ -114,6 +682,16 @@ const ( // // See: https://cirw.in/blog/bracketed-paste // See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h2-Bracketed-Paste-Mode +const ( + BracketedPasteMode = DECMode(2004) + + SetBracketedPasteMode = "\x1b[?2004h" + ResetBracketedPasteMode = "\x1b[?2004l" + RequestBracketedPasteMode = "\x1b[?2004$p" +) + +// Deprecated: use [SetBracketedPasteMode], [ResetBracketedPasteMode], and +// [RequestBracketedPasteMode] instead. const ( EnableBracketedPaste = "\x1b[?2004h" DisableBracketedPaste = "\x1b[?2004l" @@ -125,6 +703,18 @@ const ( // // See: https://gist.github.com/christianparpart/d8a62cc1ab659194337d73e399004036 const ( + SynchronizedOutputMode = DECMode(2026) + + SetSynchronizedOutputMode = "\x1b[?2026h" + ResetSynchronizedOutputMode = "\x1b[?2026l" + RequestSynchronizedOutputMode = "\x1b[?2026$p" +) + +// Deprecated: use [SynchronizedOutputMode], [SetSynchronizedOutputMode], and +// [ResetSynchronizedOutputMode], and [RequestSynchronizedOutputMode] instead. +const ( + SyncdOutputMode = DECMode(2026) + EnableSyncdOutput = "\x1b[?2026h" DisableSyncdOutput = "\x1b[?2026l" RequestSyncdOutput = "\x1b[?2026$p" @@ -136,6 +726,16 @@ const ( // emojis. // // See: https://github.com/contour-terminal/terminal-unicode-core +const ( + GraphemeClusteringMode = DECMode(2027) + + SetGraphemeClusteringMode = "\x1b[?2027h" + ResetGraphemeClusteringMode = "\x1b[?2027l" + RequestGraphemeClusteringMode = "\x1b[?2027$p" +) + +// Deprecated: use [SetGraphemeClusteringMode], [ResetGraphemeClusteringMode], and +// [RequestGraphemeClusteringMode] instead. const ( EnableGraphemeClustering = "\x1b[?2027h" DisableGraphemeClustering = "\x1b[?2027l" @@ -146,6 +746,16 @@ const ( // Win32 console and Conpty. // // See: https://github.com/microsoft/terminal/blob/main/doc/specs/%234999%20-%20Improved%20keyboard%20handling%20in%20Conpty.md +const ( + Win32InputMode = DECMode(9001) + + SetWin32InputMode = "\x1b[?9001h" + ResetWin32InputMode = "\x1b[?9001l" + RequestWin32InputMode = "\x1b[?9001$p" +) + +// Deprecated: use [SetWin32InputMode], [ResetWin32InputMode], and +// [RequestWin32InputMode] instead. const ( EnableWin32Input = "\x1b[?9001h" DisableWin32Input = "\x1b[?9001l" diff --git a/vendor/github.com/charmbracelet/x/ansi/modes.go b/vendor/github.com/charmbracelet/x/ansi/modes.go new file mode 100644 index 000000000..1bec5bc80 --- /dev/null +++ b/vendor/github.com/charmbracelet/x/ansi/modes.go @@ -0,0 +1,71 @@ +package ansi + +// Modes represents the terminal modes that can be set or reset. By default, +// all modes are [ModeNotRecognized]. +type Modes map[Mode]ModeSetting + +// NewModes creates a new Modes map. By default, all modes are +// [ModeNotRecognized]. +func NewModes() Modes { + return make(Modes) +} + +// Get returns the setting of a terminal mode. If the mode is not set, it +// returns [ModeNotRecognized]. +func (m Modes) Get(mode Mode) ModeSetting { + return m[mode] +} + +// Delete deletes a terminal mode. This has the same effect as setting the mode +// to [ModeNotRecognized]. +func (m Modes) Delete(mode Mode) { + delete(m, mode) +} + +// Set sets a terminal mode to [ModeSet]. +func (m Modes) Set(modes ...Mode) { + for _, mode := range modes { + m[mode] = ModeSet + } +} + +// PermanentlySet sets a terminal mode to [ModePermanentlySet]. +func (m Modes) PermanentlySet(modes ...Mode) { + for _, mode := range modes { + m[mode] = ModePermanentlySet + } +} + +// Reset sets a terminal mode to [ModeReset]. +func (m Modes) Reset(modes ...Mode) { + for _, mode := range modes { + m[mode] = ModeReset + } +} + +// PermanentlyReset sets a terminal mode to [ModePermanentlyReset]. +func (m Modes) PermanentlyReset(modes ...Mode) { + for _, mode := range modes { + m[mode] = ModePermanentlyReset + } +} + +// IsSet returns true if the mode is set to [ModeSet] or [ModePermanentlySet]. +func (m Modes) IsSet(mode Mode) bool { + return m[mode].IsSet() +} + +// IsPermanentlySet returns true if the mode is set to [ModePermanentlySet]. +func (m Modes) IsPermanentlySet(mode Mode) bool { + return m[mode].IsPermanentlySet() +} + +// IsReset returns true if the mode is set to [ModeReset] or [ModePermanentlyReset]. +func (m Modes) IsReset(mode Mode) bool { + return m[mode].IsReset() +} + +// IsPermanentlyReset returns true if the mode is set to [ModePermanentlyReset]. +func (m Modes) IsPermanentlyReset(mode Mode) bool { + return m[mode].IsPermanentlyReset() +} diff --git a/vendor/github.com/charmbracelet/x/ansi/mouse.go b/vendor/github.com/charmbracelet/x/ansi/mouse.go new file mode 100644 index 000000000..95b0127fd --- /dev/null +++ b/vendor/github.com/charmbracelet/x/ansi/mouse.go @@ -0,0 +1,172 @@ +package ansi + +import ( + "fmt" +) + +// MouseButton represents the button that was pressed during a mouse message. +type MouseButton byte + +// Mouse event buttons +// +// This is based on X11 mouse button codes. +// +// 1 = left button +// 2 = middle button (pressing the scroll wheel) +// 3 = right button +// 4 = turn scroll wheel up +// 5 = turn scroll wheel down +// 6 = push scroll wheel left +// 7 = push scroll wheel right +// 8 = 4th button (aka browser backward button) +// 9 = 5th button (aka browser forward button) +// 10 +// 11 +// +// Other buttons are not supported. +const ( + MouseNone MouseButton = iota + MouseButton1 + MouseButton2 + MouseButton3 + MouseButton4 + MouseButton5 + MouseButton6 + MouseButton7 + MouseButton8 + MouseButton9 + MouseButton10 + MouseButton11 + + MouseLeft = MouseButton1 + MouseMiddle = MouseButton2 + MouseRight = MouseButton3 + MouseWheelUp = MouseButton4 + MouseWheelDown = MouseButton5 + MouseWheelLeft = MouseButton6 + MouseWheelRight = MouseButton7 + MouseBackward = MouseButton8 + MouseForward = MouseButton9 + MouseRelease = MouseNone +) + +var mouseButtons = map[MouseButton]string{ + MouseNone: "none", + MouseLeft: "left", + MouseMiddle: "middle", + MouseRight: "right", + MouseWheelUp: "wheelup", + MouseWheelDown: "wheeldown", + MouseWheelLeft: "wheelleft", + MouseWheelRight: "wheelright", + MouseBackward: "backward", + MouseForward: "forward", + MouseButton10: "button10", + MouseButton11: "button11", +} + +// String returns a string representation of the mouse button. +func (b MouseButton) String() string { + return mouseButtons[b] +} + +// EncodeMouseButton returns a byte representing a mouse button. +// The button is a bitmask of the following leftmost values: +// +// - The first two bits are the button number: +// 0 = left button, wheel up, or button no. 8 aka (backwards) +// 1 = middle button, wheel down, or button no. 9 aka (forwards) +// 2 = right button, wheel left, or button no. 10 +// 3 = release event, wheel right, or button no. 11 +// +// - The third bit indicates whether the shift key was pressed. +// +// - The fourth bit indicates the alt key was pressed. +// +// - The fifth bit indicates the control key was pressed. +// +// - The sixth bit indicates motion events. Combined with button number 3, i.e. +// release event, it represents a drag event. +// +// - The seventh bit indicates a wheel event. +// +// - The eighth bit indicates additional buttons. +// +// If button is [MouseNone], and motion is false, this returns a release event. +// If button is undefined, this function returns 0xff. +func EncodeMouseButton(b MouseButton, motion, shift, alt, ctrl bool) (m byte) { + // mouse bit shifts + const ( + bitShift = 0b0000_0100 + bitAlt = 0b0000_1000 + bitCtrl = 0b0001_0000 + bitMotion = 0b0010_0000 + bitWheel = 0b0100_0000 + bitAdd = 0b1000_0000 // additional buttons 8-11 + + bitsMask = 0b0000_0011 + ) + + if b == MouseNone { + m = bitsMask + } else if b >= MouseLeft && b <= MouseRight { + m = byte(b - MouseLeft) + } else if b >= MouseWheelUp && b <= MouseWheelRight { + m = byte(b - MouseWheelUp) + m |= bitWheel + } else if b >= MouseBackward && b <= MouseButton11 { + m = byte(b - MouseBackward) + m |= bitAdd + } else { + m = 0xff // invalid button + } + + if shift { + m |= bitShift + } + if alt { + m |= bitAlt + } + if ctrl { + m |= bitCtrl + } + if motion { + m |= bitMotion + } + + return +} + +// x10Offset is the offset for X10 mouse events. +// See https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#Mouse%20Tracking +const x10Offset = 32 + +// MouseX10 returns an escape sequence representing a mouse event in X10 mode. +// Note that this requires the terminal support X10 mouse modes. +// +// CSI M Cb Cx Cy +// +// See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#Mouse%20Tracking +func MouseX10(b byte, x, y int) string { + return "\x1b[M" + string(b+x10Offset) + string(byte(x)+x10Offset+1) + string(byte(y)+x10Offset+1) +} + +// MouseSgr returns an escape sequence representing a mouse event in SGR mode. +// +// CSI < Cb ; Cx ; Cy M +// CSI < Cb ; Cx ; Cy m (release) +// +// See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#Mouse%20Tracking +func MouseSgr(b byte, x, y int, release bool) string { + s := 'M' + if release { + s = 'm' + } + if x < 0 { + x = -x + } + if y < 0 { + y = -y + } + return fmt.Sprintf("\x1b[<%d;%d;%d%c", b, x+1, y+1, s) +} diff --git a/vendor/github.com/charmbracelet/x/ansi/notification.go b/vendor/github.com/charmbracelet/x/ansi/notification.go new file mode 100644 index 000000000..c712f3405 --- /dev/null +++ b/vendor/github.com/charmbracelet/x/ansi/notification.go @@ -0,0 +1,13 @@ +package ansi + +// Notify sends a desktop notification using iTerm's OSC 9. +// +// OSC 9 ; Mc ST +// OSC 9 ; Mc BEL +// +// Where Mc is the notification body. +// +// See: https://iterm2.com/documentation-escape-codes.html +func Notify(s string) string { + return "\x1b]9;" + s + "\x07" +} diff --git a/vendor/github.com/charmbracelet/x/ansi/osc.go b/vendor/github.com/charmbracelet/x/ansi/osc.go deleted file mode 100644 index 40b543c29..000000000 --- a/vendor/github.com/charmbracelet/x/ansi/osc.go +++ /dev/null @@ -1,69 +0,0 @@ -package ansi - -import ( - "bytes" - "strings" -) - -// OscSequence represents an OSC sequence. -// -// The sequence starts with a OSC sequence, OSC (0x9D) in a 8-bit environment -// or ESC ] (0x1B 0x5D) in a 7-bit environment, followed by positive integer identifier, -// then by arbitrary data terminated by a ST (0x9C) in a 8-bit environment, -// ESC \ (0x1B 0x5C) in a 7-bit environment, or BEL (0x07) for backwards compatibility. -// -// OSC Ps ; Pt ST -// OSC Ps ; Pt BEL -// -// See ECMA-48 § 5.7. -type OscSequence struct { - // Data contains the raw data of the sequence including the identifier - // command. - Data []byte - - // Cmd contains the raw command of the sequence. - Cmd int -} - -var _ Sequence = OscSequence{} - -// Command returns the command of the OSC sequence. -func (s OscSequence) Command() int { - return s.Cmd -} - -// Params returns the parameters of the OSC sequence split by ';'. -// The first element is the identifier command. -func (s OscSequence) Params() []string { - return strings.Split(string(s.Data), ";") -} - -// Clone returns a copy of the OSC sequence. -func (s OscSequence) Clone() Sequence { - return OscSequence{ - Data: append([]byte(nil), s.Data...), - Cmd: s.Cmd, - } -} - -// String returns the string representation of the OSC sequence. -// To be more compatible with different terminal, this will always return a -// 7-bit formatted sequence, terminated by BEL. -func (s OscSequence) String() string { - return s.buffer().String() -} - -// Bytes returns the byte representation of the OSC sequence. -// To be more compatible with different terminal, this will always return a -// 7-bit formatted sequence, terminated by BEL. -func (s OscSequence) Bytes() []byte { - return s.buffer().Bytes() -} - -func (s OscSequence) buffer() *bytes.Buffer { - var b bytes.Buffer - b.WriteString("\x1b]") - b.Write(s.Data) - b.WriteByte(BEL) - return &b -} diff --git a/vendor/github.com/charmbracelet/x/ansi/params.go b/vendor/github.com/charmbracelet/x/ansi/params.go deleted file mode 100644 index a1bb42498..000000000 --- a/vendor/github.com/charmbracelet/x/ansi/params.go +++ /dev/null @@ -1,45 +0,0 @@ -package ansi - -import ( - "bytes" -) - -// Params parses and returns a list of control sequence parameters. -// -// Parameters are positive integers separated by semicolons. Empty parameters -// default to zero. Parameters can have sub-parameters separated by colons. -// -// Any non-parameter bytes are ignored. This includes bytes that are not in the -// range of 0x30-0x3B. -// -// See ECMA-48 § 5.4.1. -func Params(p []byte) [][]uint { - if len(p) == 0 { - return [][]uint{} - } - - // Filter out non-parameter bytes i.e. non 0x30-0x3B. - p = bytes.TrimFunc(p, func(r rune) bool { - return r < 0x30 || r > 0x3B - }) - - parts := bytes.Split(p, []byte{';'}) - params := make([][]uint, len(parts)) - for i, part := range parts { - sparts := bytes.Split(part, []byte{':'}) - params[i] = make([]uint, len(sparts)) - for j, spart := range sparts { - params[i][j] = bytesToUint16(spart) - } - } - - return params -} - -func bytesToUint16(b []byte) uint { - var n uint - for _, c := range b { - n = n*10 + uint(c-'0') - } - return n -} diff --git a/vendor/github.com/charmbracelet/x/ansi/parser.go b/vendor/github.com/charmbracelet/x/ansi/parser.go index 63654c04f..882e1ed7a 100644 --- a/vendor/github.com/charmbracelet/x/ansi/parser.go +++ b/vendor/github.com/charmbracelet/x/ansi/parser.go @@ -7,9 +7,6 @@ import ( "github.com/charmbracelet/x/ansi/parser" ) -// ParserDispatcher is a function that dispatches a sequence. -type ParserDispatcher func(Sequence) - // Parser represents a DEC ANSI compatible sequence parser. // // It uses a state machine to parse ANSI escape sequences and control @@ -20,137 +17,194 @@ type ParserDispatcher func(Sequence) // //go:generate go run ./gen.go type Parser struct { - // Params contains the raw parameters of the sequence. + handler Handler + + // params contains the raw parameters of the sequence. // These parameters used when constructing CSI and DCS sequences. - Params []int + params []int - // Data contains the raw data of the sequence. + // data contains the raw data of the sequence. // These data used when constructing OSC, DCS, SOS, PM, and APC sequences. - Data []byte + data []byte - // DataLen keeps track of the length of the data buffer. - // If DataLen is -1, the data buffer is unlimited and will grow as needed. - // Otherwise, DataLen is limited by the size of the Data buffer. - DataLen int + // dataLen keeps track of the length of the data buffer. + // If dataLen is -1, the data buffer is unlimited and will grow as needed. + // Otherwise, dataLen is limited by the size of the data buffer. + dataLen int - // ParamsLen keeps track of the number of parameters. - // This is limited by the size of the Params buffer. + // paramsLen keeps track of the number of parameters. + // This is limited by the size of the params buffer. // // This is also used when collecting UTF-8 runes to keep track of the // number of rune bytes collected. - ParamsLen int + paramsLen int - // Cmd contains the raw command along with the private marker and + // cmd contains the raw command along with the private prefix and // intermediate bytes of the sequence. // The first lower byte contains the command byte, the next byte contains - // the private marker, and the next byte contains the intermediate byte. + // the private prefix, and the next byte contains the intermediate byte. // // This is also used when collecting UTF-8 runes treating it as a slice of // 4 bytes. - Cmd int + cmd int + + // state is the current state of the parser. + state byte +} + +// NewParser returns a new parser with the default settings. +// The [Parser] uses a default size of 32 for the parameters and 64KB for the +// data buffer. Use [Parser.SetParamsSize] and [Parser.SetDataSize] to set the +// size of the parameters and data buffer respectively. +func NewParser() *Parser { + p := new(Parser) + p.SetParamsSize(parser.MaxParamsSize) + p.SetDataSize(1024 * 64) // 64KB data buffer + return p +} - // State is the current state of the parser. - State byte +// SetParamsSize sets the size of the parameters buffer. +// This is used when constructing CSI and DCS sequences. +func (p *Parser) SetParamsSize(size int) { + p.params = make([]int, size) } -// NewParser returns a new parser with the given sizes allocated. -// If dataSize is zero, the underlying data buffer will be unlimited and will +// SetDataSize sets the size of the data buffer. +// This is used when constructing OSC, DCS, SOS, PM, and APC sequences. +// If size is less than or equal to 0, the data buffer is unlimited and will // grow as needed. -func NewParser(paramsSize, dataSize int) *Parser { - s := new(Parser) - if dataSize <= 0 { - dataSize = 0 - s.DataLen = -1 +func (p *Parser) SetDataSize(size int) { + if size <= 0 { + size = 0 + p.dataLen = -1 } - s.Params = make([]int, paramsSize) - s.Data = make([]byte, dataSize) - return s + p.data = make([]byte, size) +} + +// Params returns the list of parsed packed parameters. +func (p *Parser) Params() Params { + return unsafe.Slice((*Param)(unsafe.Pointer(&p.params[0])), p.paramsLen) +} + +// Param returns the parameter at the given index and falls back to the default +// value if the parameter is missing. If the index is out of bounds, it returns +// the default value and false. +func (p *Parser) Param(i, def int) (int, bool) { + if i < 0 || i >= p.paramsLen { + return def, false + } + return Param(p.params[i]).Param(def), true +} + +// Command returns the packed command of the last dispatched sequence. Use +// [Cmd] to unpack the command. +func (p *Parser) Command() int { + return p.cmd +} + +// Rune returns the last dispatched sequence as a rune. +func (p *Parser) Rune() rune { + rw := utf8ByteLen(byte(p.cmd & 0xff)) + if rw == -1 { + return utf8.RuneError + } + r, _ := utf8.DecodeRune((*[utf8.UTFMax]byte)(unsafe.Pointer(&p.cmd))[:rw]) + return r +} + +// Control returns the last dispatched sequence as a control code. +func (p *Parser) Control() byte { + return byte(p.cmd & 0xff) +} + +// Data returns the raw data of the last dispatched sequence. +func (p *Parser) Data() []byte { + return p.data[:p.dataLen] } // Reset resets the parser to its initial state. func (p *Parser) Reset() { p.clear() - p.State = parser.GroundState + p.state = parser.GroundState } // clear clears the parser parameters and command. func (p *Parser) clear() { - if len(p.Params) > 0 { - p.Params[0] = parser.MissingParam + if len(p.params) > 0 { + p.params[0] = parser.MissingParam } - p.ParamsLen = 0 - p.Cmd = 0 + p.paramsLen = 0 + p.cmd = 0 } -// clearCmd clears the parser command, params len and data len. -func (p *Parser) clearCmd() { - p.Cmd = 0 - p.ParamsLen = 0 - p.DataLen = 0 +// State returns the current state of the parser. +func (p *Parser) State() parser.State { + return p.state } // StateName returns the name of the current state. func (p *Parser) StateName() string { - return parser.StateNames[p.State] + return parser.StateNames[p.state] } // Parse parses the given dispatcher and byte buffer. -func (p *Parser) Parse(dispatcher ParserDispatcher, b []byte) { +// Deprecated: Loop over the buffer and call [Parser.Advance] instead. +func (p *Parser) Parse(b []byte) { for i := 0; i < len(b); i++ { - p.Advance(dispatcher, b[i], i < len(b)-1) + p.Advance(b[i]) } } -// Advance advances the parser with the given dispatcher and byte. -func (p *Parser) Advance(dispatcher ParserDispatcher, b byte, more bool) parser.Action { - switch p.State { +// Advance advances the parser using the given byte. It returns the action +// performed by the parser. +func (p *Parser) Advance(b byte) parser.Action { + switch p.state { case parser.Utf8State: // We handle UTF-8 here. - return p.advanceUtf8(dispatcher, b) + return p.advanceUtf8(b) default: - return p.advance(dispatcher, b, more) + return p.advance(b) } } func (p *Parser) collectRune(b byte) { - if p.ParamsLen >= utf8.UTFMax { + if p.paramsLen >= utf8.UTFMax { return } - shift := p.ParamsLen * 8 - p.Cmd &^= 0xff << shift - p.Cmd |= int(b) << shift - p.ParamsLen++ + shift := p.paramsLen * 8 + p.cmd &^= 0xff << shift + p.cmd |= int(b) << shift + p.paramsLen++ } -func (p *Parser) advanceUtf8(dispatcher ParserDispatcher, b byte) parser.Action { +func (p *Parser) advanceUtf8(b byte) parser.Action { // Collect UTF-8 rune bytes. p.collectRune(b) - rw := utf8ByteLen(byte(p.Cmd & 0xff)) + rw := utf8ByteLen(byte(p.cmd & 0xff)) if rw == -1 { // We panic here because the first byte comes from the state machine, // if this panics, it means there is a bug in the state machine! panic("invalid rune") // unreachable } - if p.ParamsLen < rw { + if p.paramsLen < rw { return parser.CollectAction } // We have enough bytes to decode the rune using unsafe - r, _ := utf8.DecodeRune((*[utf8.UTFMax]byte)(unsafe.Pointer(&p.Cmd))[:rw]) - if dispatcher != nil { - dispatcher(Rune(r)) + if p.handler.Print != nil { + p.handler.Print(p.Rune()) } - p.State = parser.GroundState - p.ParamsLen = 0 + p.state = parser.GroundState + p.paramsLen = 0 return parser.PrintAction } -func (p *Parser) advance(d ParserDispatcher, b byte, more bool) parser.Action { - state, action := parser.Table.Transition(p.State, b) +func (p *Parser) advance(b byte) parser.Action { + state, action := parser.Table.Transition(p.state, b) // We need to clear the parser state if the state changes from EscapeState. // This is because when we enter the EscapeState, we don't get a chance to @@ -158,59 +212,53 @@ func (p *Parser) advance(d ParserDispatcher, b byte, more bool) parser.Action { // ST (\x1b\\ or \x9c), we dispatch the current sequence and transition to // EscapeState. However, the parser state is not cleared in this case and // we need to clear it here before dispatching the esc sequence. - if p.State != state { - if p.State == parser.EscapeState { - p.performAction(d, parser.ClearAction, state, b) + if p.state != state { + if p.state == parser.EscapeState { + p.performAction(parser.ClearAction, state, b) } if action == parser.PutAction && - p.State == parser.DcsEntryState && state == parser.DcsStringState { + p.state == parser.DcsEntryState && state == parser.DcsStringState { // XXX: This is a special case where we need to start collecting // non-string parameterized data i.e. doesn't follow the ECMA-48 § // 5.4.1 string parameters format. - p.performAction(d, parser.StartAction, state, 0) + p.performAction(parser.StartAction, state, 0) } } // Handle special cases switch { - case b == ESC && p.State == parser.EscapeState: + case b == ESC && p.state == parser.EscapeState: // Two ESCs in a row - p.performAction(d, parser.ExecuteAction, state, b) - if !more { - // Two ESCs at the end of the buffer - p.performAction(d, parser.ExecuteAction, state, b) - } - case b == ESC && !more: - // Last byte is an ESC - p.performAction(d, parser.ExecuteAction, state, b) - case p.State == parser.EscapeState && b == 'P' && !more: - // ESC P (DCS) at the end of the buffer - p.performAction(d, parser.DispatchAction, state, b) - case p.State == parser.EscapeState && b == 'X' && !more: - // ESC X (SOS) at the end of the buffer - p.performAction(d, parser.DispatchAction, state, b) - case p.State == parser.EscapeState && b == '[' && !more: - // ESC [ (CSI) at the end of the buffer - p.performAction(d, parser.DispatchAction, state, b) - case p.State == parser.EscapeState && b == ']' && !more: - // ESC ] (OSC) at the end of the buffer - p.performAction(d, parser.DispatchAction, state, b) - case p.State == parser.EscapeState && b == '^' && !more: - // ESC ^ (PM) at the end of the buffer - p.performAction(d, parser.DispatchAction, state, b) - case p.State == parser.EscapeState && b == '_' && !more: - // ESC _ (APC) at the end of the buffer - p.performAction(d, parser.DispatchAction, state, b) + p.performAction(parser.ExecuteAction, state, b) default: - p.performAction(d, action, state, b) + p.performAction(action, state, b) } - p.State = state + p.state = state return action } -func (p *Parser) performAction(dispatcher ParserDispatcher, action parser.Action, state parser.State, b byte) { +func (p *Parser) parseStringCmd() { + // Try to parse the command + datalen := len(p.data) + if p.dataLen >= 0 { + datalen = p.dataLen + } + for i := 0; i < datalen; i++ { + d := p.data[i] + if d < '0' || d > '9' { + break + } + if p.cmd == parser.MissingCommand { + p.cmd = 0 + } + p.cmd *= 10 + p.cmd += int(d - '0') + } +} + +func (p *Parser) performAction(action parser.Action, state parser.State, b byte) { switch action { case parser.IgnoreAction: break @@ -219,140 +267,139 @@ func (p *Parser) performAction(dispatcher ParserDispatcher, action parser.Action p.clear() case parser.PrintAction: - if dispatcher != nil { - dispatcher(Rune(b)) + p.cmd = int(b) + if p.handler.Print != nil { + p.handler.Print(rune(b)) } case parser.ExecuteAction: - if dispatcher != nil { - dispatcher(ControlCode(b)) + p.cmd = int(b) + if p.handler.Execute != nil { + p.handler.Execute(b) } - case parser.MarkerAction: - // Collect private marker - // we only store the last marker - p.Cmd &^= 0xff << parser.MarkerShift - p.Cmd |= int(b) << parser.MarkerShift + case parser.PrefixAction: + // Collect private prefix + // we only store the last prefix + p.cmd &^= 0xff << parser.PrefixShift + p.cmd |= int(b) << parser.PrefixShift case parser.CollectAction: if state == parser.Utf8State { // Reset the UTF-8 counter - p.ParamsLen = 0 + p.paramsLen = 0 p.collectRune(b) } else { // Collect intermediate bytes // we only store the last intermediate byte - p.Cmd &^= 0xff << parser.IntermedShift - p.Cmd |= int(b) << parser.IntermedShift + p.cmd &^= 0xff << parser.IntermedShift + p.cmd |= int(b) << parser.IntermedShift } case parser.ParamAction: // Collect parameters - if p.ParamsLen >= len(p.Params) { + if p.paramsLen >= len(p.params) { break } if b >= '0' && b <= '9' { - if p.Params[p.ParamsLen] == parser.MissingParam { - p.Params[p.ParamsLen] = 0 + if p.params[p.paramsLen] == parser.MissingParam { + p.params[p.paramsLen] = 0 } - p.Params[p.ParamsLen] *= 10 - p.Params[p.ParamsLen] += int(b - '0') + p.params[p.paramsLen] *= 10 + p.params[p.paramsLen] += int(b - '0') } if b == ':' { - p.Params[p.ParamsLen] |= parser.HasMoreFlag + p.params[p.paramsLen] |= parser.HasMoreFlag } if b == ';' || b == ':' { - p.ParamsLen++ - if p.ParamsLen < len(p.Params) { - p.Params[p.ParamsLen] = parser.MissingParam + p.paramsLen++ + if p.paramsLen < len(p.params) { + p.params[p.paramsLen] = parser.MissingParam } } case parser.StartAction: - if p.DataLen < 0 && p.Data != nil { - p.Data = p.Data[:0] + if p.dataLen < 0 && p.data != nil { + p.data = p.data[:0] } else { - p.DataLen = 0 + p.dataLen = 0 } - if p.State >= parser.DcsEntryState && p.State <= parser.DcsStringState { + if p.state >= parser.DcsEntryState && p.state <= parser.DcsStringState { // Collect the command byte for DCS - p.Cmd |= int(b) + p.cmd |= int(b) } else { - p.Cmd = parser.MissingCommand + p.cmd = parser.MissingCommand } case parser.PutAction: - switch p.State { + switch p.state { case parser.OscStringState: - if b == ';' && p.Cmd == parser.MissingCommand { - // Try to parse the command - datalen := len(p.Data) - if p.DataLen >= 0 { - datalen = p.DataLen - } - for i := 0; i < datalen; i++ { - d := p.Data[i] - if d < '0' || d > '9' { - break - } - if p.Cmd == parser.MissingCommand { - p.Cmd = 0 - } - p.Cmd *= 10 - p.Cmd += int(d - '0') - } + if b == ';' && p.cmd == parser.MissingCommand { + p.parseStringCmd() } } - if p.DataLen < 0 { - p.Data = append(p.Data, b) + if p.dataLen < 0 { + p.data = append(p.data, b) } else { - if p.DataLen < len(p.Data) { - p.Data[p.DataLen] = b - p.DataLen++ + if p.dataLen < len(p.data) { + p.data[p.dataLen] = b + p.dataLen++ } } case parser.DispatchAction: // Increment the last parameter - if p.ParamsLen > 0 && p.ParamsLen < len(p.Params)-1 || - p.ParamsLen == 0 && len(p.Params) > 0 && p.Params[0] != parser.MissingParam { - p.ParamsLen++ + if p.paramsLen > 0 && p.paramsLen < len(p.params)-1 || + p.paramsLen == 0 && len(p.params) > 0 && p.params[0] != parser.MissingParam { + p.paramsLen++ } - if dispatcher == nil { - break + if p.state == parser.OscStringState && p.cmd == parser.MissingCommand { + // Ensure we have a command for OSC + p.parseStringCmd() } - var seq Sequence - data := p.Data - if p.DataLen >= 0 { - data = data[:p.DataLen] + data := p.data + if p.dataLen >= 0 { + data = data[:p.dataLen] } - switch p.State { + switch p.state { case parser.CsiEntryState, parser.CsiParamState, parser.CsiIntermediateState: - p.Cmd |= int(b) - seq = CsiSequence{Cmd: p.Cmd, Params: p.Params[:p.ParamsLen]} + p.cmd |= int(b) + if p.handler.HandleCsi != nil { + p.handler.HandleCsi(Cmd(p.cmd), p.Params()) + } case parser.EscapeState, parser.EscapeIntermediateState: - p.Cmd |= int(b) - seq = EscSequence(p.Cmd) + p.cmd |= int(b) + if p.handler.HandleEsc != nil { + p.handler.HandleEsc(Cmd(p.cmd)) + } case parser.DcsEntryState, parser.DcsParamState, parser.DcsIntermediateState, parser.DcsStringState: - seq = DcsSequence{Cmd: p.Cmd, Params: p.Params[:p.ParamsLen], Data: data} + if p.handler.HandleDcs != nil { + p.handler.HandleDcs(Cmd(p.cmd), p.Params(), data) + } case parser.OscStringState: - seq = OscSequence{Cmd: p.Cmd, Data: data} + if p.handler.HandleOsc != nil { + p.handler.HandleOsc(p.cmd, data) + } case parser.SosStringState: - seq = SosSequence{Data: data} + if p.handler.HandleSos != nil { + p.handler.HandleSos(data) + } case parser.PmStringState: - seq = PmSequence{Data: data} + if p.handler.HandlePm != nil { + p.handler.HandlePm(data) + } case parser.ApcStringState: - seq = ApcSequence{Data: data} + if p.handler.HandleApc != nil { + p.handler.HandleApc(data) + } } - - dispatcher(seq) } } diff --git a/vendor/github.com/charmbracelet/x/ansi/parser/const.go b/vendor/github.com/charmbracelet/x/ansi/parser/const.go index 54b7383b4..d62dbf379 100644 --- a/vendor/github.com/charmbracelet/x/ansi/parser/const.go +++ b/vendor/github.com/charmbracelet/x/ansi/parser/const.go @@ -8,7 +8,7 @@ const ( NoneAction Action = iota ClearAction CollectAction - MarkerAction + PrefixAction DispatchAction ExecuteAction StartAction // Start of a data string @@ -24,7 +24,7 @@ var ActionNames = []string{ "NoneAction", "ClearAction", "CollectAction", - "MarkerAction", + "PrefixAction", "DispatchAction", "ExecuteAction", "StartAction", diff --git a/vendor/github.com/charmbracelet/x/ansi/parser/seq.go b/vendor/github.com/charmbracelet/x/ansi/parser/seq.go index c99f1632f..29f491d1c 100644 --- a/vendor/github.com/charmbracelet/x/ansi/parser/seq.go +++ b/vendor/github.com/charmbracelet/x/ansi/parser/seq.go @@ -4,9 +4,9 @@ import "math" // Shift and masks for sequence parameters and intermediates. const ( - MarkerShift = 8 + PrefixShift = 8 IntermedShift = 16 - CommandMask = 0xff + FinalMask = 0xff HasMoreFlag = math.MinInt32 ParamMask = ^HasMoreFlag MissingParam = ParamMask @@ -22,12 +22,12 @@ const ( DefaultParamValue = 0 ) -// Marker returns the marker byte of the sequence. +// Prefix returns the prefix byte of the sequence. // This is always gonna be one of the following '<' '=' '>' '?' and in the // range of 0x3C-0x3F. -// Zero is returned if the sequence does not have a marker. -func Marker(cmd int) int { - return (cmd >> MarkerShift) & CommandMask +// Zero is returned if the sequence does not have a prefix. +func Prefix(cmd int) int { + return (cmd >> PrefixShift) & FinalMask } // Intermediate returns the intermediate byte of the sequence. @@ -36,12 +36,12 @@ func Marker(cmd int) int { // ',', '-', '.', '/'. // Zero is returned if the sequence does not have an intermediate byte. func Intermediate(cmd int) int { - return (cmd >> IntermedShift) & CommandMask + return (cmd >> IntermedShift) & FinalMask } // Command returns the command byte of the CSI sequence. func Command(cmd int) int { - return cmd & CommandMask + return cmd & FinalMask } // Param returns the parameter at the given index. diff --git a/vendor/github.com/charmbracelet/x/ansi/parser/transition_table.go b/vendor/github.com/charmbracelet/x/ansi/parser/transition_table.go index 5d368ebf1..558a5eacc 100644 --- a/vendor/github.com/charmbracelet/x/ansi/parser/transition_table.go +++ b/vendor/github.com/charmbracelet/x/ansi/parser/transition_table.go @@ -178,7 +178,7 @@ func GenerateTransitionTable() TransitionTable { table.AddRange(0x20, 0x2F, DcsEntryState, CollectAction, DcsIntermediateState) // Dcs_entry -> Dcs_param table.AddRange(0x30, 0x3B, DcsEntryState, ParamAction, DcsParamState) - table.AddRange(0x3C, 0x3F, DcsEntryState, MarkerAction, DcsParamState) + table.AddRange(0x3C, 0x3F, DcsEntryState, PrefixAction, DcsParamState) // Dcs_entry -> Dcs_passthrough table.AddRange(0x08, 0x0D, DcsEntryState, PutAction, DcsStringState) // Follows ECMA-48 § 8.3.27 // XXX: allows passing ESC (not a ECMA-48 standard) this to allow for @@ -254,7 +254,7 @@ func GenerateTransitionTable() TransitionTable { table.AddRange(0x20, 0x2F, CsiEntryState, CollectAction, CsiIntermediateState) // Csi_entry -> Csi_param table.AddRange(0x30, 0x3B, CsiEntryState, ParamAction, CsiParamState) - table.AddRange(0x3C, 0x3F, CsiEntryState, MarkerAction, CsiParamState) + table.AddRange(0x3C, 0x3F, CsiEntryState, PrefixAction, CsiParamState) // Osc_string table.AddRange(0x00, 0x06, OscStringState, IgnoreAction, OscStringState) diff --git a/vendor/github.com/charmbracelet/x/ansi/parser_decode.go b/vendor/github.com/charmbracelet/x/ansi/parser_decode.go index 76688d0b8..3e5047394 100644 --- a/vendor/github.com/charmbracelet/x/ansi/parser_decode.go +++ b/vendor/github.com/charmbracelet/x/ansi/parser_decode.go @@ -1,11 +1,10 @@ package ansi import ( - "bytes" - "strings" "unicode/utf8" "github.com/charmbracelet/x/ansi/parser" + "github.com/mattn/go-runewidth" "github.com/rivo/uniseg" ) @@ -16,7 +15,7 @@ type State = byte // ANSI escape sequence states used by [DecodeSequence]. const ( NormalState State = iota - MarkerState + PrefixState ParamsState IntermedState EscapeState @@ -35,22 +34,22 @@ const ( // // Passing a non-nil [*Parser] as the last argument will allow the decoder to // collect sequence parameters, data, and commands. The parser cmd will have -// the packed command value that contains intermediate and marker characters. +// the packed command value that contains intermediate and prefix characters. // In the case of a OSC sequence, the cmd will be the OSC command number. Use -// [Cmd] and [Param] types to unpack command intermediates and markers as well +// [Cmd] and [Param] types to unpack command intermediates and prefixes as well // as parameters. // -// Zero [p.Cmd] means the CSI, DCS, or ESC sequence is invalid. Moreover, checking the +// Zero [Cmd] means the CSI, DCS, or ESC sequence is invalid. Moreover, checking the // validity of other data sequences, OSC, DCS, etc, will require checking for // the returned sequence terminator bytes such as ST (ESC \\) and BEL). // -// We store the command byte in [p.Cmd] in the most significant byte, the -// marker byte in the next byte, and the intermediate byte in the least +// We store the command byte in [Cmd] in the most significant byte, the +// prefix byte in the next byte, and the intermediate byte in the least // significant byte. This is done to avoid using a struct to store the command -// and its intermediates and markers. The command byte is always the least -// significant byte i.e. [p.Cmd & 0xff]. Use the [Cmd] type to unpack the -// command, intermediate, and marker bytes. Note that we only collect the last -// marker character and intermediate byte. +// and its intermediates and prefixes. The command byte is always the least +// significant byte i.e. [Cmd & 0xff]. Use the [Cmd] type to unpack the +// command, intermediate, and prefix bytes. Note that we only collect the last +// prefix character and intermediate byte. // // The [p.Params] slice will contain the parameters of the sequence. Any // sub-parameter will have the [parser.HasMoreFlag] set. Use the [Param] type @@ -67,7 +66,63 @@ const ( // state = newState // input = input[n:] // } +// +// This function treats the text as a sequence of grapheme clusters. func DecodeSequence[T string | []byte](b T, state byte, p *Parser) (seq T, width int, n int, newState byte) { + return decodeSequence(GraphemeWidth, b, state, p) +} + +// DecodeSequenceWc decodes the first ANSI escape sequence or a printable +// grapheme from the given data. It returns the sequence slice, the number of +// bytes read, the cell width for each sequence, and the new state. +// +// The cell width will always be 0 for control and escape sequences, 1 for +// ASCII printable characters, and the number of cells other Unicode characters +// occupy. It uses the uniseg package to calculate the width of Unicode +// graphemes and characters. This means it will always do grapheme clustering +// (mode 2027). +// +// Passing a non-nil [*Parser] as the last argument will allow the decoder to +// collect sequence parameters, data, and commands. The parser cmd will have +// the packed command value that contains intermediate and prefix characters. +// In the case of a OSC sequence, the cmd will be the OSC command number. Use +// [Cmd] and [Param] types to unpack command intermediates and prefixes as well +// as parameters. +// +// Zero [Cmd] means the CSI, DCS, or ESC sequence is invalid. Moreover, checking the +// validity of other data sequences, OSC, DCS, etc, will require checking for +// the returned sequence terminator bytes such as ST (ESC \\) and BEL). +// +// We store the command byte in [Cmd] in the most significant byte, the +// prefix byte in the next byte, and the intermediate byte in the least +// significant byte. This is done to avoid using a struct to store the command +// and its intermediates and prefixes. The command byte is always the least +// significant byte i.e. [Cmd & 0xff]. Use the [Cmd] type to unpack the +// command, intermediate, and prefix bytes. Note that we only collect the last +// prefix character and intermediate byte. +// +// The [p.Params] slice will contain the parameters of the sequence. Any +// sub-parameter will have the [parser.HasMoreFlag] set. Use the [Param] type +// to unpack the parameters. +// +// Example: +// +// var state byte // the initial state is always zero [NormalState] +// p := NewParser(32, 1024) // create a new parser with a 32 params buffer and 1024 data buffer (optional) +// input := []byte("\x1b[31mHello, World!\x1b[0m") +// for len(input) > 0 { +// seq, width, n, newState := DecodeSequenceWc(input, state, p) +// log.Printf("seq: %q, width: %d", seq, width) +// state = newState +// input = input[n:] +// } +// +// This function treats the text as a sequence of wide characters and runes. +func DecodeSequenceWc[T string | []byte](b T, state byte, p *Parser) (seq T, width int, n int, newState byte) { + return decodeSequence(WcWidth, b, state, p) +} + +func decodeSequence[T string | []byte](m Method, b T, state State, p *Parser) (seq T, width int, n int, newState byte) { for i := 0; i < len(b); i++ { c := b[i] @@ -76,39 +131,39 @@ func DecodeSequence[T string | []byte](b T, state byte, p *Parser) (seq T, width switch c { case ESC: if p != nil { - if len(p.Params) > 0 { - p.Params[0] = parser.MissingParam + if len(p.params) > 0 { + p.params[0] = parser.MissingParam } - p.Cmd = 0 - p.ParamsLen = 0 - p.DataLen = 0 + p.cmd = 0 + p.paramsLen = 0 + p.dataLen = 0 } state = EscapeState continue case CSI, DCS: if p != nil { - if len(p.Params) > 0 { - p.Params[0] = parser.MissingParam + if len(p.params) > 0 { + p.params[0] = parser.MissingParam } - p.Cmd = 0 - p.ParamsLen = 0 - p.DataLen = 0 + p.cmd = 0 + p.paramsLen = 0 + p.dataLen = 0 } - state = MarkerState + state = PrefixState continue case OSC, APC, SOS, PM: if p != nil { - p.Cmd = parser.MissingCommand - p.DataLen = 0 + p.cmd = parser.MissingCommand + p.dataLen = 0 } state = StringState continue } if p != nil { - p.DataLen = 0 - p.ParamsLen = 0 - p.Cmd = 0 + p.dataLen = 0 + p.paramsLen = 0 + p.cmd = 0 } if c > US && c < DEL { // ASCII printable characters @@ -122,18 +177,21 @@ func DecodeSequence[T string | []byte](b T, state byte, p *Parser) (seq T, width if utf8.RuneStart(c) { seq, _, width, _ = FirstGraphemeCluster(b, -1) + if m == WcWidth { + width = runewidth.StringWidth(string(seq)) + } i += len(seq) return b[:i], width, i, NormalState } // Invalid UTF-8 sequence return b[:i], 0, i, NormalState - case MarkerState: + case PrefixState: if c >= '<' && c <= '?' { if p != nil { - // We only collect the last marker character. - p.Cmd &^= 0xff << parser.MarkerShift - p.Cmd |= int(c) << parser.MarkerShift + // We only collect the last prefix character. + p.cmd &^= 0xff << parser.PrefixShift + p.cmd |= int(c) << parser.PrefixShift } break } @@ -143,27 +201,27 @@ func DecodeSequence[T string | []byte](b T, state byte, p *Parser) (seq T, width case ParamsState: if c >= '0' && c <= '9' { if p != nil { - if p.Params[p.ParamsLen] == parser.MissingParam { - p.Params[p.ParamsLen] = 0 + if p.params[p.paramsLen] == parser.MissingParam { + p.params[p.paramsLen] = 0 } - p.Params[p.ParamsLen] *= 10 - p.Params[p.ParamsLen] += int(c - '0') + p.params[p.paramsLen] *= 10 + p.params[p.paramsLen] += int(c - '0') } break } if c == ':' { if p != nil { - p.Params[p.ParamsLen] |= parser.HasMoreFlag + p.params[p.paramsLen] |= parser.HasMoreFlag } } if c == ';' || c == ':' { if p != nil { - p.ParamsLen++ - if p.ParamsLen < len(p.Params) { - p.Params[p.ParamsLen] = parser.MissingParam + p.paramsLen++ + if p.paramsLen < len(p.params) { + p.params[p.paramsLen] = parser.MissingParam } } break @@ -174,35 +232,36 @@ func DecodeSequence[T string | []byte](b T, state byte, p *Parser) (seq T, width case IntermedState: if c >= ' ' && c <= '/' { if p != nil { - p.Cmd &^= 0xff << parser.IntermedShift - p.Cmd |= int(c) << parser.IntermedShift + p.cmd &^= 0xff << parser.IntermedShift + p.cmd |= int(c) << parser.IntermedShift } break } - state = NormalState + if p != nil { + // Increment the last parameter + if p.paramsLen > 0 && p.paramsLen < len(p.params)-1 || + p.paramsLen == 0 && len(p.params) > 0 && p.params[0] != parser.MissingParam { + p.paramsLen++ + } + } + if c >= '@' && c <= '~' { if p != nil { - // Increment the last parameter - if p.ParamsLen > 0 && p.ParamsLen < len(p.Params)-1 || - p.ParamsLen == 0 && len(p.Params) > 0 && p.Params[0] != parser.MissingParam { - p.ParamsLen++ - } - - p.Cmd &^= 0xff - p.Cmd |= int(c) + p.cmd &^= 0xff + p.cmd |= int(c) } if HasDcsPrefix(b) { // Continue to collect DCS data if p != nil { - p.DataLen = 0 + p.dataLen = 0 } state = StringState continue } - return b[:i+1], 0, i + 1, state + return b[:i+1], 0, i + 1, NormalState } // Invalid CSI/DCS sequence @@ -211,18 +270,18 @@ func DecodeSequence[T string | []byte](b T, state byte, p *Parser) (seq T, width switch c { case '[', 'P': if p != nil { - if len(p.Params) > 0 { - p.Params[0] = parser.MissingParam + if len(p.params) > 0 { + p.params[0] = parser.MissingParam } - p.ParamsLen = 0 - p.Cmd = 0 + p.paramsLen = 0 + p.cmd = 0 } - state = MarkerState + state = PrefixState continue case ']', 'X', '^', '_': if p != nil { - p.Cmd = parser.MissingCommand - p.DataLen = 0 + p.cmd = parser.MissingCommand + p.dataLen = 0 } state = StringState continue @@ -230,14 +289,14 @@ func DecodeSequence[T string | []byte](b T, state byte, p *Parser) (seq T, width if c >= ' ' && c <= '/' { if p != nil { - p.Cmd &^= 0xff << parser.IntermedShift - p.Cmd |= int(c) << parser.IntermedShift + p.cmd &^= 0xff << parser.IntermedShift + p.cmd |= int(c) << parser.IntermedShift } continue } else if c >= '0' && c <= '~' { if p != nil { - p.Cmd &^= 0xff - p.Cmd |= int(c) + p.cmd &^= 0xff + p.cmd |= int(c) } return b[:i+1], 0, i + 1, NormalState } @@ -248,15 +307,31 @@ func DecodeSequence[T string | []byte](b T, state byte, p *Parser) (seq T, width switch c { case BEL: if HasOscPrefix(b) { + parseOscCmd(p) return b[:i+1], 0, i + 1, NormalState } case CAN, SUB: + if HasOscPrefix(b) { + // Ensure we parse the OSC command number + parseOscCmd(p) + } + // Cancel the sequence return b[:i], 0, i, NormalState case ST: + if HasOscPrefix(b) { + // Ensure we parse the OSC command number + parseOscCmd(p) + } + return b[:i+1], 0, i + 1, NormalState case ESC: if HasStPrefix(b[i:]) { + if HasOscPrefix(b) { + // Ensure we parse the OSC command number + parseOscCmd(p) + } + // End of string 7-bit (ST) return b[:i+2], 0, i + 2, NormalState } @@ -265,23 +340,13 @@ func DecodeSequence[T string | []byte](b T, state byte, p *Parser) (seq T, width return b[:i], 0, i, NormalState } - if p != nil && p.DataLen < len(p.Data) { - p.Data[p.DataLen] = c - p.DataLen++ + if p != nil && p.dataLen < len(p.data) { + p.data[p.dataLen] = c + p.dataLen++ // Parse the OSC command number - if c == ';' && p.Cmd == parser.MissingCommand && HasOscPrefix(b) { - for j := 0; j < p.DataLen; j++ { - d := p.Data[j] - if d < '0' || d > '9' { - break - } - if p.Cmd == parser.MissingCommand { - p.Cmd = 0 - } - p.Cmd *= 10 - p.Cmd += int(d - '0') - } + if c == ';' && HasOscPrefix(b) { + parseOscCmd(p) } } } @@ -290,16 +355,21 @@ func DecodeSequence[T string | []byte](b T, state byte, p *Parser) (seq T, width return b, 0, len(b), state } -// Index returns the index of the first occurrence of the given byte slice in -// the data. It returns -1 if the byte slice is not found. -func Index[T string | []byte](data, b T) int { - switch data := any(data).(type) { - case string: - return strings.Index(data, string(b)) - case []byte: - return bytes.Index(data, []byte(b)) +func parseOscCmd(p *Parser) { + if p == nil || p.cmd != parser.MissingCommand { + return + } + for j := 0; j < p.dataLen; j++ { + d := p.data[j] + if d < '0' || d > '9' { + break + } + if p.cmd == parser.MissingCommand { + p.cmd = 0 + } + p.cmd *= 10 + p.cmd += int(d - '0') } - panic("unreachable") } // Equal returns true if the given byte slices are equal. @@ -380,30 +450,47 @@ func FirstGraphemeCluster[T string | []byte](b T, state int) (T, T, int, int) { } // Cmd represents a sequence command. This is used to pack/unpack a sequence -// command with its intermediate and marker characters. Those are commonly +// command with its intermediate and prefix characters. Those are commonly // found in CSI and DCS sequences. type Cmd int -// Marker returns the marker byte of the CSI sequence. +// Prefix returns the unpacked prefix byte of the CSI sequence. // This is always gonna be one of the following '<' '=' '>' '?' and in the // range of 0x3C-0x3F. -// Zero is returned if the sequence does not have a marker. -func (c Cmd) Marker() int { - return parser.Marker(int(c)) +// Zero is returned if the sequence does not have a prefix. +func (c Cmd) Prefix() byte { + return byte(parser.Prefix(int(c))) } -// Intermediate returns the intermediate byte of the CSI sequence. +// Intermediate returns the unpacked intermediate byte of the CSI sequence. // An intermediate byte is in the range of 0x20-0x2F. This includes these // characters from ' ', '!', '"', '#', '$', '%', '&', ”', '(', ')', '*', '+', // ',', '-', '.', '/'. // Zero is returned if the sequence does not have an intermediate byte. -func (c Cmd) Intermediate() int { - return parser.Intermediate(int(c)) +func (c Cmd) Intermediate() byte { + return byte(parser.Intermediate(int(c))) } -// Command returns the command byte of the CSI sequence. -func (c Cmd) Command() int { - return parser.Command(int(c)) +// Final returns the unpacked command byte of the CSI sequence. +func (c Cmd) Final() byte { + return byte(parser.Command(int(c))) +} + +// Command packs a command with the given prefix, intermediate, and final. A +// zero byte means the sequence does not have a prefix or intermediate. +// +// Prefixes are in the range of 0x3C-0x3F that is one of `<=>?`. +// +// Intermediates are in the range of 0x20-0x2F that is anything in +// `!"#$%&'()*+,-./`. +// +// Final bytes are in the range of 0x40-0x7E that is anything in the range +// `@A–Z[\]^_`a–z{|}~`. +func Command(prefix, inter, final byte) (c int) { + c = int(final) + c |= int(prefix) << parser.PrefixShift + c |= int(inter) << parser.IntermedShift + return } // Param represents a sequence parameter. Sequence parameters with @@ -411,13 +498,27 @@ func (c Cmd) Command() int { // the parameters from a CSI and DCS sequences. type Param int -// Param returns the parameter at the given index. -// It returns -1 if the parameter does not exist. -func (s Param) Param() int { - return int(s) & parser.ParamMask +// Param returns the unpacked parameter at the given index. +// It returns the default value if the parameter is missing. +func (s Param) Param(def int) int { + p := int(s) & parser.ParamMask + if p == parser.MissingParam { + return def + } + return p } -// HasMore returns true if the parameter has more sub-parameters. +// HasMore unpacks the HasMoreFlag from the parameter. func (s Param) HasMore() bool { - return int(s)&parser.HasMoreFlag != 0 + return s&parser.HasMoreFlag != 0 +} + +// Parameter packs an escape code parameter with the given parameter and +// whether this parameter has following sub-parameters. +func Parameter(p int, hasMore bool) (s int) { + s = p & parser.ParamMask + if hasMore { + s |= parser.HasMoreFlag + } + return } diff --git a/vendor/github.com/charmbracelet/x/ansi/parser_handler.go b/vendor/github.com/charmbracelet/x/ansi/parser_handler.go new file mode 100644 index 000000000..03f9ed4cb --- /dev/null +++ b/vendor/github.com/charmbracelet/x/ansi/parser_handler.go @@ -0,0 +1,60 @@ +package ansi + +import "unsafe" + +// Params represents a list of packed parameters. +type Params []Param + +// Param returns the parameter at the given index and if it is part of a +// sub-parameters. It falls back to the default value if the parameter is +// missing. If the index is out of bounds, it returns the default value and +// false. +func (p Params) Param(i, def int) (int, bool, bool) { + if i < 0 || i >= len(p) { + return def, false, false + } + return p[i].Param(def), p[i].HasMore(), true +} + +// ForEach iterates over the parameters and calls the given function for each +// parameter. If a parameter is part of a sub-parameter, it will be called with +// hasMore set to true. +// Use def to set a default value for missing parameters. +func (p Params) ForEach(def int, f func(i, param int, hasMore bool)) { + for i := range p { + f(i, p[i].Param(def), p[i].HasMore()) + } +} + +// ToParams converts a list of integers to a list of parameters. +func ToParams(params []int) Params { + return unsafe.Slice((*Param)(unsafe.Pointer(¶ms[0])), len(params)) +} + +// Handler handles actions performed by the parser. +// It is used to handle ANSI escape sequences, control characters, and runes. +type Handler struct { + // Print is called when a printable rune is encountered. + Print func(r rune) + // Execute is called when a control character is encountered. + Execute func(b byte) + // HandleCsi is called when a CSI sequence is encountered. + HandleCsi func(cmd Cmd, params Params) + // HandleEsc is called when an ESC sequence is encountered. + HandleEsc func(cmd Cmd) + // HandleDcs is called when a DCS sequence is encountered. + HandleDcs func(cmd Cmd, params Params, data []byte) + // HandleOsc is called when an OSC sequence is encountered. + HandleOsc func(cmd int, data []byte) + // HandlePm is called when a PM sequence is encountered. + HandlePm func(data []byte) + // HandleApc is called when an APC sequence is encountered. + HandleApc func(data []byte) + // HandleSos is called when a SOS sequence is encountered. + HandleSos func(data []byte) +} + +// SetHandler sets the handler for the parser. +func (p *Parser) SetHandler(h Handler) { + p.handler = h +} diff --git a/vendor/github.com/charmbracelet/x/ansi/parser_sync.go b/vendor/github.com/charmbracelet/x/ansi/parser_sync.go new file mode 100644 index 000000000..65d25a9a6 --- /dev/null +++ b/vendor/github.com/charmbracelet/x/ansi/parser_sync.go @@ -0,0 +1,29 @@ +package ansi + +import ( + "sync" + + "github.com/charmbracelet/x/ansi/parser" +) + +var parserPool = sync.Pool{ + New: func() any { + p := NewParser() + p.SetParamsSize(parser.MaxParamsSize) + p.SetDataSize(1024 * 1024 * 4) // 4MB of data buffer + return p + }, +} + +// GetParser returns a parser from a sync pool. +func GetParser() *Parser { + return parserPool.Get().(*Parser) +} + +// PutParser returns a parser to a sync pool. The parser is reset +// automatically. +func PutParser(p *Parser) { + p.Reset() + p.dataLen = 0 + parserPool.Put(p) +} diff --git a/vendor/github.com/charmbracelet/x/ansi/paste.go b/vendor/github.com/charmbracelet/x/ansi/paste.go new file mode 100644 index 000000000..2f9ea6f79 --- /dev/null +++ b/vendor/github.com/charmbracelet/x/ansi/paste.go @@ -0,0 +1,7 @@ +package ansi + +// BracketedPasteStart is the control sequence to enable bracketed paste mode. +const BracketedPasteStart = "\x1b[200~" + +// BracketedPasteEnd is the control sequence to disable bracketed paste mode. +const BracketedPasteEnd = "\x1b[201~" diff --git a/vendor/github.com/charmbracelet/x/ansi/reset.go b/vendor/github.com/charmbracelet/x/ansi/reset.go new file mode 100644 index 000000000..c1b89ea49 --- /dev/null +++ b/vendor/github.com/charmbracelet/x/ansi/reset.go @@ -0,0 +1,11 @@ +package ansi + +// ResetInitialState (RIS) resets the terminal to its initial state. +// +// ESC c +// +// See: https://vt100.net/docs/vt510-rm/RIS.html +const ( + ResetInitialState = "\x1bc" + RIS = ResetInitialState +) diff --git a/vendor/github.com/charmbracelet/x/ansi/screen.go b/vendor/github.com/charmbracelet/x/ansi/screen.go index 4909cf07c..c76e4f0d6 100644 --- a/vendor/github.com/charmbracelet/x/ansi/screen.go +++ b/vendor/github.com/charmbracelet/x/ansi/screen.go @@ -1,30 +1,44 @@ package ansi -import "strconv" +import ( + "strconv" + "strings" +) -// EraseDisplay (ED) clears the screen or parts of the screen. Possible values: +// EraseDisplay (ED) clears the display or parts of the display. A screen is +// the shown part of the terminal display excluding the scrollback buffer. +// Possible values: +// +// Default is 0. // // 0: Clear from cursor to end of screen. // 1: Clear from cursor to beginning of the screen. // 2: Clear entire screen (and moves cursor to upper left on DOS). -// 3: Clear entire screen and delete all lines saved in the scrollback buffer. +// 3: Clear entire display which delete all lines saved in the scrollback buffer (xterm). // // CSI J // // See: https://vt100.net/docs/vt510-rm/ED.html func EraseDisplay(n int) string { - if n < 0 { - n = 0 + var s string + if n > 0 { + s = strconv.Itoa(n) } - return "\x1b[" + strconv.Itoa(n) + "J" + return "\x1b[" + s + "J" +} + +// ED is an alias for [EraseDisplay]. +func ED(n int) string { + return EraseDisplay(n) } // EraseDisplay constants. // These are the possible values for the EraseDisplay function. const ( - EraseDisplayRight = "\x1b[0J" - EraseDisplayLeft = "\x1b[1J" - EraseEntireDisplay = "\x1b[2J" + EraseScreenBelow = "\x1b[J" + EraseScreenAbove = "\x1b[1J" + EraseEntireScreen = "\x1b[2J" + EraseEntireDisplay = "\x1b[3J" ) // EraseLine (EL) clears the current line or parts of the line. Possible values: @@ -39,16 +53,22 @@ const ( // // See: https://vt100.net/docs/vt510-rm/EL.html func EraseLine(n int) string { - if n < 0 { - n = 0 + var s string + if n > 0 { + s = strconv.Itoa(n) } - return "\x1b[" + strconv.Itoa(n) + "K" + return "\x1b[" + s + "K" +} + +// EL is an alias for [EraseLine]. +func EL(n int) string { + return EraseLine(n) } // EraseLine constants. // These are the possible values for the EraseLine function. const ( - EraseLineRight = "\x1b[0K" + EraseLineRight = "\x1b[K" EraseLineLeft = "\x1b[1K" EraseEntireLine = "\x1b[2K" ) @@ -56,7 +76,7 @@ const ( // ScrollUp (SU) scrolls the screen up n lines. New lines are added at the // bottom of the screen. // -// CSI S +// CSI Pn S // // See: https://vt100.net/docs/vt510-rm/SU.html func ScrollUp(n int) string { @@ -67,10 +87,20 @@ func ScrollUp(n int) string { return "\x1b[" + s + "S" } +// PanDown is an alias for [ScrollUp]. +func PanDown(n int) string { + return ScrollUp(n) +} + +// SU is an alias for [ScrollUp]. +func SU(n int) string { + return ScrollUp(n) +} + // ScrollDown (SD) scrolls the screen down n lines. New lines are added at the // top of the screen. // -// CSI T +// CSI Pn T // // See: https://vt100.net/docs/vt510-rm/SD.html func ScrollDown(n int) string { @@ -81,10 +111,20 @@ func ScrollDown(n int) string { return "\x1b[" + s + "T" } +// PanUp is an alias for [ScrollDown]. +func PanUp(n int) string { + return ScrollDown(n) +} + +// SD is an alias for [ScrollDown]. +func SD(n int) string { + return ScrollDown(n) +} + // InsertLine (IL) inserts n blank lines at the current cursor position. // Existing lines are moved down. // -// CSI L +// CSI Pn L // // See: https://vt100.net/docs/vt510-rm/IL.html func InsertLine(n int) string { @@ -95,10 +135,15 @@ func InsertLine(n int) string { return "\x1b[" + s + "L" } +// IL is an alias for [InsertLine]. +func IL(n int) string { + return InsertLine(n) +} + // DeleteLine (DL) deletes n lines at the current cursor position. Existing // lines are moved up. // -// CSI M +// CSI Pn M // // See: https://vt100.net/docs/vt510-rm/DL.html func DeleteLine(n int) string { @@ -109,12 +154,67 @@ func DeleteLine(n int) string { return "\x1b[" + s + "M" } +// DL is an alias for [DeleteLine]. +func DL(n int) string { + return DeleteLine(n) +} + +// SetTopBottomMargins (DECSTBM) sets the top and bottom margins for the scrolling +// region. The default is the entire screen. +// +// Default is 1 and the bottom of the screen. +// +// CSI Pt ; Pb r +// +// See: https://vt100.net/docs/vt510-rm/DECSTBM.html +func SetTopBottomMargins(top, bot int) string { + var t, b string + if top > 0 { + t = strconv.Itoa(top) + } + if bot > 0 { + b = strconv.Itoa(bot) + } + return "\x1b[" + t + ";" + b + "r" +} + +// DECSTBM is an alias for [SetTopBottomMargins]. +func DECSTBM(top, bot int) string { + return SetTopBottomMargins(top, bot) +} + +// SetLeftRightMargins (DECSLRM) sets the left and right margins for the scrolling +// region. +// +// Default is 1 and the right of the screen. +// +// CSI Pl ; Pr s +// +// See: https://vt100.net/docs/vt510-rm/DECSLRM.html +func SetLeftRightMargins(left, right int) string { + var l, r string + if left > 0 { + l = strconv.Itoa(left) + } + if right > 0 { + r = strconv.Itoa(right) + } + return "\x1b[" + l + ";" + r + "s" +} + +// DECSLRM is an alias for [SetLeftRightMargins]. +func DECSLRM(left, right int) string { + return SetLeftRightMargins(left, right) +} + // SetScrollingRegion (DECSTBM) sets the top and bottom margins for the scrolling // region. The default is the entire screen. // // CSI ; r // // See: https://vt100.net/docs/vt510-rm/DECSTBM.html +// +// Deprecated: use [SetTopBottomMargins] instead. func SetScrollingRegion(t, b int) string { if t < 0 { t = 0 @@ -124,3 +224,187 @@ func SetScrollingRegion(t, b int) string { } return "\x1b[" + strconv.Itoa(t) + ";" + strconv.Itoa(b) + "r" } + +// InsertCharacter (ICH) inserts n blank characters at the current cursor +// position. Existing characters move to the right. Characters moved past the +// right margin are lost. ICH has no effect outside the scrolling margins. +// +// Default is 1. +// +// CSI Pn @ +// +// See: https://vt100.net/docs/vt510-rm/ICH.html +func InsertCharacter(n int) string { + var s string + if n > 1 { + s = strconv.Itoa(n) + } + return "\x1b[" + s + "@" +} + +// ICH is an alias for [InsertCharacter]. +func ICH(n int) string { + return InsertCharacter(n) +} + +// DeleteCharacter (DCH) deletes n characters at the current cursor position. +// As the characters are deleted, the remaining characters move to the left and +// the cursor remains at the same position. +// +// Default is 1. +// +// CSI Pn P +// +// See: https://vt100.net/docs/vt510-rm/DCH.html +func DeleteCharacter(n int) string { + var s string + if n > 1 { + s = strconv.Itoa(n) + } + return "\x1b[" + s + "P" +} + +// DCH is an alias for [DeleteCharacter]. +func DCH(n int) string { + return DeleteCharacter(n) +} + +// SetTabEvery8Columns (DECST8C) sets the tab stops at every 8 columns. +// +// CSI ? 5 W +// +// See: https://vt100.net/docs/vt510-rm/DECST8C.html +const ( + SetTabEvery8Columns = "\x1b[?5W" + DECST8C = SetTabEvery8Columns +) + +// HorizontalTabSet (HTS) sets a horizontal tab stop at the current cursor +// column. +// +// This is equivalent to [HTS]. +// +// ESC H +// +// See: https://vt100.net/docs/vt510-rm/HTS.html +const HorizontalTabSet = "\x1bH" + +// TabClear (TBC) clears tab stops. +// +// Default is 0. +// +// Possible values: +// 0: Clear tab stop at the current column. (default) +// 3: Clear all tab stops. +// +// CSI Pn g +// +// See: https://vt100.net/docs/vt510-rm/TBC.html +func TabClear(n int) string { + var s string + if n > 0 { + s = strconv.Itoa(n) + } + return "\x1b[" + s + "g" +} + +// TBC is an alias for [TabClear]. +func TBC(n int) string { + return TabClear(n) +} + +// RequestPresentationStateReport (DECRQPSR) requests the terminal to send a +// report of the presentation state. This includes the cursor information [DECCIR], +// and tab stop [DECTABSR] reports. +// +// Default is 0. +// +// Possible values: +// 0: Error, request ignored. +// 1: Cursor information report [DECCIR]. +// 2: Tab stop report [DECTABSR]. +// +// CSI Ps $ w +// +// See: https://vt100.net/docs/vt510-rm/DECRQPSR.html +func RequestPresentationStateReport(n int) string { + var s string + if n > 0 { + s = strconv.Itoa(n) + } + return "\x1b[" + s + "$w" +} + +// DECRQPSR is an alias for [RequestPresentationStateReport]. +func DECRQPSR(n int) string { + return RequestPresentationStateReport(n) +} + +// TabStopReport (DECTABSR) is the response to a tab stop report request. +// It reports the tab stops set in the terminal. +// +// The response is a list of tab stops separated by a slash (/) character. +// +// DCS 2 $ u D ... D ST +// +// Where D is a decimal number representing a tab stop. +// +// See: https://vt100.net/docs/vt510-rm/DECTABSR.html +func TabStopReport(stops ...int) string { + var s []string + for _, v := range stops { + s = append(s, strconv.Itoa(v)) + } + return "\x1bP2$u" + strings.Join(s, "/") + "\x1b\\" +} + +// DECTABSR is an alias for [TabStopReport]. +func DECTABSR(stops ...int) string { + return TabStopReport(stops...) +} + +// CursorInformationReport (DECCIR) is the response to a cursor information +// report request. It reports the cursor position, visual attributes, and +// character protection attributes. It also reports the status of origin mode +// [DECOM] and the current active character set. +// +// The response is a list of values separated by a semicolon (;) character. +// +// DCS 1 $ u D ... D ST +// +// Where D is a decimal number representing a value. +// +// See: https://vt100.net/docs/vt510-rm/DECCIR.html +func CursorInformationReport(values ...int) string { + var s []string + for _, v := range values { + s = append(s, strconv.Itoa(v)) + } + return "\x1bP1$u" + strings.Join(s, ";") + "\x1b\\" +} + +// DECCIR is an alias for [CursorInformationReport]. +func DECCIR(values ...int) string { + return CursorInformationReport(values...) +} + +// RepeatPreviousCharacter (REP) repeats the previous character n times. +// This is identical to typing the same character n times. +// +// Default is 1. +// +// CSI Pn b +// +// See: ECMA-48 § 8.3.103 +func RepeatPreviousCharacter(n int) string { + var s string + if n > 1 { + s = strconv.Itoa(n) + } + return "\x1b[" + s + "b" +} + +// REP is an alias for [RepeatPreviousCharacter]. +func REP(n int) string { + return RepeatPreviousCharacter(n) +} diff --git a/vendor/github.com/charmbracelet/x/ansi/sequence.go b/vendor/github.com/charmbracelet/x/ansi/sequence.go deleted file mode 100644 index f294a229a..000000000 --- a/vendor/github.com/charmbracelet/x/ansi/sequence.go +++ /dev/null @@ -1,199 +0,0 @@ -package ansi - -import ( - "bytes" - - "github.com/charmbracelet/x/ansi/parser" -) - -// Sequence represents an ANSI sequence. This can be a control sequence, escape -// sequence, a printable character, etc. -type Sequence interface { - // String returns the string representation of the sequence. - String() string - // Bytes returns the byte representation of the sequence. - Bytes() []byte - // Clone returns a copy of the sequence. - Clone() Sequence -} - -// Rune represents a printable character. -type Rune rune - -var _ Sequence = Rune(0) - -// Bytes implements Sequence. -func (r Rune) Bytes() []byte { - return []byte(string(r)) -} - -// String implements Sequence. -func (r Rune) String() string { - return string(r) -} - -// Clone implements Sequence. -func (r Rune) Clone() Sequence { - return r -} - -// ControlCode represents a control code character. This is a character that -// is not printable and is used to control the terminal. This would be a -// character in the C0 or C1 set in the range of 0x00-0x1F and 0x80-0x9F. -type ControlCode byte - -var _ Sequence = ControlCode(0) - -// Bytes implements Sequence. -func (c ControlCode) Bytes() []byte { - return []byte{byte(c)} -} - -// String implements Sequence. -func (c ControlCode) String() string { - return string(c) -} - -// Clone implements Sequence. -func (c ControlCode) Clone() Sequence { - return c -} - -// EscSequence represents an escape sequence. -type EscSequence int - -var _ Sequence = EscSequence(0) - -// buffer returns the buffer of the escape sequence. -func (e EscSequence) buffer() *bytes.Buffer { - var b bytes.Buffer - b.WriteByte('\x1b') - if i := parser.Intermediate(int(e)); i != 0 { - b.WriteByte(byte(i)) - } - b.WriteByte(byte(e.Command())) - return &b -} - -// Bytes implements Sequence. -func (e EscSequence) Bytes() []byte { - return e.buffer().Bytes() -} - -// String implements Sequence. -func (e EscSequence) String() string { - return e.buffer().String() -} - -// Clone implements Sequence. -func (e EscSequence) Clone() Sequence { - return e -} - -// Command returns the command byte of the escape sequence. -func (e EscSequence) Command() int { - return parser.Command(int(e)) -} - -// Intermediate returns the intermediate byte of the escape sequence. -func (e EscSequence) Intermediate() int { - return parser.Intermediate(int(e)) -} - -// SosSequence represents a SOS sequence. -type SosSequence struct { - // Data contains the raw data of the sequence. - Data []byte -} - -var _ Sequence = &SosSequence{} - -// Clone implements Sequence. -func (s SosSequence) Clone() Sequence { - return SosSequence{Data: append([]byte(nil), s.Data...)} -} - -// Bytes implements Sequence. -func (s SosSequence) Bytes() []byte { - return s.buffer().Bytes() -} - -// String implements Sequence. -func (s SosSequence) String() string { - return s.buffer().String() -} - -func (s SosSequence) buffer() *bytes.Buffer { - var b bytes.Buffer - b.WriteByte('\x1b') - b.WriteByte('X') - b.Write(s.Data) - b.WriteString("\x1b\\") - return &b -} - -// PmSequence represents a PM sequence. -type PmSequence struct { - // Data contains the raw data of the sequence. - Data []byte -} - -var _ Sequence = &PmSequence{} - -// Clone implements Sequence. -func (s PmSequence) Clone() Sequence { - return PmSequence{Data: append([]byte(nil), s.Data...)} -} - -// Bytes implements Sequence. -func (s PmSequence) Bytes() []byte { - return s.buffer().Bytes() -} - -// String implements Sequence. -func (s PmSequence) String() string { - return s.buffer().String() -} - -// buffer returns the buffer of the PM sequence. -func (s PmSequence) buffer() *bytes.Buffer { - var b bytes.Buffer - b.WriteByte('\x1b') - b.WriteByte('^') - b.Write(s.Data) - b.WriteString("\x1b\\") - return &b -} - -// ApcSequence represents an APC sequence. -type ApcSequence struct { - // Data contains the raw data of the sequence. - Data []byte -} - -var _ Sequence = &ApcSequence{} - -// Clone implements Sequence. -func (s ApcSequence) Clone() Sequence { - return ApcSequence{Data: append([]byte(nil), s.Data...)} -} - -// Bytes implements Sequence. -func (s ApcSequence) Bytes() []byte { - return s.buffer().Bytes() -} - -// String implements Sequence. -func (s ApcSequence) String() string { - return s.buffer().String() -} - -// buffer returns the buffer of the APC sequence. -func (s ApcSequence) buffer() *bytes.Buffer { - var b bytes.Buffer - b.WriteByte('\x1b') - b.WriteByte('_') - b.Write(s.Data) - b.WriteString("\x1b\\") - return &b -} diff --git a/vendor/github.com/charmbracelet/x/ansi/sgr.go b/vendor/github.com/charmbracelet/x/ansi/sgr.go new file mode 100644 index 000000000..1a18c98ef --- /dev/null +++ b/vendor/github.com/charmbracelet/x/ansi/sgr.go @@ -0,0 +1,95 @@ +package ansi + +import "strconv" + +// Select Graphic Rendition (SGR) is a command that sets display attributes. +// +// Default is 0. +// +// CSI Ps ; Ps ... m +// +// See: https://vt100.net/docs/vt510-rm/SGR.html +func SelectGraphicRendition(ps ...Attr) string { + if len(ps) == 0 { + return ResetStyle + } + + var s Style + for _, p := range ps { + attr, ok := attrStrings[p] + if ok { + s = append(s, attr) + } else { + if p < 0 { + p = 0 + } + s = append(s, strconv.Itoa(p)) + } + } + + return s.String() +} + +// SGR is an alias for [SelectGraphicRendition]. +func SGR(ps ...Attr) string { + return SelectGraphicRendition(ps...) +} + +var attrStrings = map[int]string{ + ResetAttr: "0", + BoldAttr: "1", + FaintAttr: "2", + ItalicAttr: "3", + UnderlineAttr: "4", + SlowBlinkAttr: "5", + RapidBlinkAttr: "6", + ReverseAttr: "7", + ConcealAttr: "8", + StrikethroughAttr: "9", + NoBoldAttr: "21", + NormalIntensityAttr: "22", + NoItalicAttr: "23", + NoUnderlineAttr: "24", + NoBlinkAttr: "25", + NoReverseAttr: "27", + NoConcealAttr: "28", + NoStrikethroughAttr: "29", + BlackForegroundColorAttr: "30", + RedForegroundColorAttr: "31", + GreenForegroundColorAttr: "32", + YellowForegroundColorAttr: "33", + BlueForegroundColorAttr: "34", + MagentaForegroundColorAttr: "35", + CyanForegroundColorAttr: "36", + WhiteForegroundColorAttr: "37", + ExtendedForegroundColorAttr: "38", + DefaultForegroundColorAttr: "39", + BlackBackgroundColorAttr: "40", + RedBackgroundColorAttr: "41", + GreenBackgroundColorAttr: "42", + YellowBackgroundColorAttr: "43", + BlueBackgroundColorAttr: "44", + MagentaBackgroundColorAttr: "45", + CyanBackgroundColorAttr: "46", + WhiteBackgroundColorAttr: "47", + ExtendedBackgroundColorAttr: "48", + DefaultBackgroundColorAttr: "49", + ExtendedUnderlineColorAttr: "58", + DefaultUnderlineColorAttr: "59", + BrightBlackForegroundColorAttr: "90", + BrightRedForegroundColorAttr: "91", + BrightGreenForegroundColorAttr: "92", + BrightYellowForegroundColorAttr: "93", + BrightBlueForegroundColorAttr: "94", + BrightMagentaForegroundColorAttr: "95", + BrightCyanForegroundColorAttr: "96", + BrightWhiteForegroundColorAttr: "97", + BrightBlackBackgroundColorAttr: "100", + BrightRedBackgroundColorAttr: "101", + BrightGreenBackgroundColorAttr: "102", + BrightYellowBackgroundColorAttr: "103", + BrightBlueBackgroundColorAttr: "104", + BrightMagentaBackgroundColorAttr: "105", + BrightCyanBackgroundColorAttr: "106", + BrightWhiteBackgroundColorAttr: "107", +} diff --git a/vendor/github.com/charmbracelet/x/ansi/status.go b/vendor/github.com/charmbracelet/x/ansi/status.go new file mode 100644 index 000000000..4337e189d --- /dev/null +++ b/vendor/github.com/charmbracelet/x/ansi/status.go @@ -0,0 +1,144 @@ +package ansi + +import ( + "strconv" + "strings" +) + +// StatusReport represents a terminal status report. +type StatusReport interface { + // StatusReport returns the status report identifier. + StatusReport() int +} + +// ANSIReport represents an ANSI terminal status report. +type ANSIStatusReport int //nolint:revive + +// Report returns the status report identifier. +func (s ANSIStatusReport) StatusReport() int { + return int(s) +} + +// DECStatusReport represents a DEC terminal status report. +type DECStatusReport int + +// Status returns the status report identifier. +func (s DECStatusReport) StatusReport() int { + return int(s) +} + +// DeviceStatusReport (DSR) is a control sequence that reports the terminal's +// status. +// The terminal responds with a DSR sequence. +// +// CSI Ps n +// CSI ? Ps n +// +// If one of the statuses is a [DECStatus], the sequence will use the DEC +// format. +// +// See also https://vt100.net/docs/vt510-rm/DSR.html +func DeviceStatusReport(statues ...StatusReport) string { + var dec bool + list := make([]string, len(statues)) + seq := "\x1b[" + for i, status := range statues { + list[i] = strconv.Itoa(status.StatusReport()) + switch status.(type) { + case DECStatusReport: + dec = true + } + } + if dec { + seq += "?" + } + return seq + strings.Join(list, ";") + "n" +} + +// DSR is an alias for [DeviceStatusReport]. +func DSR(status StatusReport) string { + return DeviceStatusReport(status) +} + +// RequestCursorPositionReport is an escape sequence that requests the current +// cursor position. +// +// CSI 6 n +// +// The terminal will report the cursor position as a CSI sequence in the +// following format: +// +// CSI Pl ; Pc R +// +// Where Pl is the line number and Pc is the column number. +// See: https://vt100.net/docs/vt510-rm/CPR.html +const RequestCursorPositionReport = "\x1b[6n" + +// RequestExtendedCursorPositionReport (DECXCPR) is a sequence for requesting +// the cursor position report including the current page number. +// +// CSI ? 6 n +// +// The terminal will report the cursor position as a CSI sequence in the +// following format: +// +// CSI ? Pl ; Pc ; Pp R +// +// Where Pl is the line number, Pc is the column number, and Pp is the page +// number. +// See: https://vt100.net/docs/vt510-rm/DECXCPR.html +const RequestExtendedCursorPositionReport = "\x1b[?6n" + +// CursorPositionReport (CPR) is a control sequence that reports the cursor's +// position. +// +// CSI Pl ; Pc R +// +// Where Pl is the line number and Pc is the column number. +// +// See also https://vt100.net/docs/vt510-rm/CPR.html +func CursorPositionReport(line, column int) string { + if line < 1 { + line = 1 + } + if column < 1 { + column = 1 + } + return "\x1b[" + strconv.Itoa(line) + ";" + strconv.Itoa(column) + "R" +} + +// CPR is an alias for [CursorPositionReport]. +func CPR(line, column int) string { + return CursorPositionReport(line, column) +} + +// ExtendedCursorPositionReport (DECXCPR) is a control sequence that reports the +// cursor's position along with the page number (optional). +// +// CSI ? Pl ; Pc R +// CSI ? Pl ; Pc ; Pv R +// +// Where Pl is the line number, Pc is the column number, and Pv is the page +// number. +// +// If the page number is zero or negative, the returned sequence won't include +// the page number. +// +// See also https://vt100.net/docs/vt510-rm/DECXCPR.html +func ExtendedCursorPositionReport(line, column, page int) string { + if line < 1 { + line = 1 + } + if column < 1 { + column = 1 + } + if page < 1 { + return "\x1b[?" + strconv.Itoa(line) + ";" + strconv.Itoa(column) + "R" + } + return "\x1b[?" + strconv.Itoa(line) + ";" + strconv.Itoa(column) + ";" + strconv.Itoa(page) + "R" +} + +// DECXCPR is an alias for [ExtendedCursorPositionReport]. +func DECXCPR(line, column, page int) string { + return ExtendedCursorPositionReport(line, column, page) +} diff --git a/vendor/github.com/charmbracelet/x/ansi/style.go b/vendor/github.com/charmbracelet/x/ansi/style.go index 96e3c532d..46ddcaa99 100644 --- a/vendor/github.com/charmbracelet/x/ansi/style.go +++ b/vendor/github.com/charmbracelet/x/ansi/style.go @@ -12,10 +12,10 @@ import ( const ResetStyle = "\x1b[m" // Attr is a SGR (Select Graphic Rendition) style attribute. -type Attr = string +type Attr = int // Style represents an ANSI SGR (Select Graphic Rendition) style. -type Style []Attr +type Style []string // String returns the ANSI SGR (Select Graphic Rendition) style sequence for // the given style. @@ -36,186 +36,357 @@ func (s Style) Styled(str string) string { // Reset appends the reset style attribute to the style. func (s Style) Reset() Style { - return append(s, ResetAttr) + return append(s, resetAttr) } // Bold appends the bold style attribute to the style. func (s Style) Bold() Style { - return append(s, BoldAttr) + return append(s, boldAttr) } // Faint appends the faint style attribute to the style. func (s Style) Faint() Style { - return append(s, FaintAttr) + return append(s, faintAttr) } // Italic appends the italic style attribute to the style. func (s Style) Italic() Style { - return append(s, ItalicAttr) + return append(s, italicAttr) } // Underline appends the underline style attribute to the style. func (s Style) Underline() Style { - return append(s, UnderlineAttr) + return append(s, underlineAttr) +} + +// UnderlineStyle appends the underline style attribute to the style. +func (s Style) UnderlineStyle(u UnderlineStyle) Style { + switch u { + case NoUnderlineStyle: + return s.NoUnderline() + case SingleUnderlineStyle: + return s.Underline() + case DoubleUnderlineStyle: + return append(s, doubleUnderlineStyle) + case CurlyUnderlineStyle: + return append(s, curlyUnderlineStyle) + case DottedUnderlineStyle: + return append(s, dottedUnderlineStyle) + case DashedUnderlineStyle: + return append(s, dashedUnderlineStyle) + } + return s } // DoubleUnderline appends the double underline style attribute to the style. +// This is a convenience method for UnderlineStyle(DoubleUnderlineStyle). func (s Style) DoubleUnderline() Style { - return append(s, DoubleUnderlineAttr) + return s.UnderlineStyle(DoubleUnderlineStyle) } // CurlyUnderline appends the curly underline style attribute to the style. +// This is a convenience method for UnderlineStyle(CurlyUnderlineStyle). func (s Style) CurlyUnderline() Style { - return append(s, CurlyUnderlineAttr) + return s.UnderlineStyle(CurlyUnderlineStyle) } // DottedUnderline appends the dotted underline style attribute to the style. +// This is a convenience method for UnderlineStyle(DottedUnderlineStyle). func (s Style) DottedUnderline() Style { - return append(s, DottedUnderlineAttr) + return s.UnderlineStyle(DottedUnderlineStyle) } // DashedUnderline appends the dashed underline style attribute to the style. +// This is a convenience method for UnderlineStyle(DashedUnderlineStyle). func (s Style) DashedUnderline() Style { - return append(s, DashedUnderlineAttr) + return s.UnderlineStyle(DashedUnderlineStyle) } // SlowBlink appends the slow blink style attribute to the style. func (s Style) SlowBlink() Style { - return append(s, SlowBlinkAttr) + return append(s, slowBlinkAttr) } // RapidBlink appends the rapid blink style attribute to the style. func (s Style) RapidBlink() Style { - return append(s, RapidBlinkAttr) + return append(s, rapidBlinkAttr) } // Reverse appends the reverse style attribute to the style. func (s Style) Reverse() Style { - return append(s, ReverseAttr) + return append(s, reverseAttr) } // Conceal appends the conceal style attribute to the style. func (s Style) Conceal() Style { - return append(s, ConcealAttr) + return append(s, concealAttr) } // Strikethrough appends the strikethrough style attribute to the style. func (s Style) Strikethrough() Style { - return append(s, StrikethroughAttr) + return append(s, strikethroughAttr) } // NoBold appends the no bold style attribute to the style. func (s Style) NoBold() Style { - return append(s, NoBoldAttr) + return append(s, noBoldAttr) } // NormalIntensity appends the normal intensity style attribute to the style. func (s Style) NormalIntensity() Style { - return append(s, NormalIntensityAttr) + return append(s, normalIntensityAttr) } // NoItalic appends the no italic style attribute to the style. func (s Style) NoItalic() Style { - return append(s, NoItalicAttr) + return append(s, noItalicAttr) } // NoUnderline appends the no underline style attribute to the style. func (s Style) NoUnderline() Style { - return append(s, NoUnderlineAttr) + return append(s, noUnderlineAttr) } // NoBlink appends the no blink style attribute to the style. func (s Style) NoBlink() Style { - return append(s, NoBlinkAttr) + return append(s, noBlinkAttr) } // NoReverse appends the no reverse style attribute to the style. func (s Style) NoReverse() Style { - return append(s, NoReverseAttr) + return append(s, noReverseAttr) } // NoConceal appends the no conceal style attribute to the style. func (s Style) NoConceal() Style { - return append(s, NoConcealAttr) + return append(s, noConcealAttr) } // NoStrikethrough appends the no strikethrough style attribute to the style. func (s Style) NoStrikethrough() Style { - return append(s, NoStrikethroughAttr) + return append(s, noStrikethroughAttr) } // DefaultForegroundColor appends the default foreground color style attribute to the style. func (s Style) DefaultForegroundColor() Style { - return append(s, DefaultForegroundColorAttr) + return append(s, defaultForegroundColorAttr) } // DefaultBackgroundColor appends the default background color style attribute to the style. func (s Style) DefaultBackgroundColor() Style { - return append(s, DefaultBackgroundColorAttr) + return append(s, defaultBackgroundColorAttr) } // DefaultUnderlineColor appends the default underline color style attribute to the style. func (s Style) DefaultUnderlineColor() Style { - return append(s, DefaultUnderlineColorAttr) + return append(s, defaultUnderlineColorAttr) } // ForegroundColor appends the foreground color style attribute to the style. func (s Style) ForegroundColor(c Color) Style { - return append(s, ForegroundColorAttr(c)) + return append(s, foregroundColorString(c)) } // BackgroundColor appends the background color style attribute to the style. func (s Style) BackgroundColor(c Color) Style { - return append(s, BackgroundColorAttr(c)) + return append(s, backgroundColorString(c)) } // UnderlineColor appends the underline color style attribute to the style. func (s Style) UnderlineColor(c Color) Style { - return append(s, UnderlineColorAttr(c)) + return append(s, underlineColorString(c)) } +// UnderlineStyle represents an ANSI SGR (Select Graphic Rendition) underline +// style. +type UnderlineStyle = byte + +const ( + doubleUnderlineStyle = "4:2" + curlyUnderlineStyle = "4:3" + dottedUnderlineStyle = "4:4" + dashedUnderlineStyle = "4:5" +) + +const ( + // NoUnderlineStyle is the default underline style. + NoUnderlineStyle UnderlineStyle = iota + // SingleUnderlineStyle is a single underline style. + SingleUnderlineStyle + // DoubleUnderlineStyle is a double underline style. + DoubleUnderlineStyle + // CurlyUnderlineStyle is a curly underline style. + CurlyUnderlineStyle + // DottedUnderlineStyle is a dotted underline style. + DottedUnderlineStyle + // DashedUnderlineStyle is a dashed underline style. + DashedUnderlineStyle +) + // SGR (Select Graphic Rendition) style attributes. // See: https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_(Select_Graphic_Rendition)_parameters const ( - ResetAttr Attr = "0" - BoldAttr Attr = "1" - FaintAttr Attr = "2" - ItalicAttr Attr = "3" - UnderlineAttr Attr = "4" - DoubleUnderlineAttr Attr = "4:2" - CurlyUnderlineAttr Attr = "4:3" - DottedUnderlineAttr Attr = "4:4" - DashedUnderlineAttr Attr = "4:5" - SlowBlinkAttr Attr = "5" - RapidBlinkAttr Attr = "6" - ReverseAttr Attr = "7" - ConcealAttr Attr = "8" - StrikethroughAttr Attr = "9" - NoBoldAttr Attr = "21" // Some terminals treat this as double underline. - NormalIntensityAttr Attr = "22" - NoItalicAttr Attr = "23" - NoUnderlineAttr Attr = "24" - NoBlinkAttr Attr = "25" - NoReverseAttr Attr = "27" - NoConcealAttr Attr = "28" - NoStrikethroughAttr Attr = "29" - DefaultForegroundColorAttr Attr = "39" - DefaultBackgroundColorAttr Attr = "49" - DefaultUnderlineColorAttr Attr = "59" + ResetAttr Attr = 0 + BoldAttr Attr = 1 + FaintAttr Attr = 2 + ItalicAttr Attr = 3 + UnderlineAttr Attr = 4 + SlowBlinkAttr Attr = 5 + RapidBlinkAttr Attr = 6 + ReverseAttr Attr = 7 + ConcealAttr Attr = 8 + StrikethroughAttr Attr = 9 + NoBoldAttr Attr = 21 // Some terminals treat this as double underline. + NormalIntensityAttr Attr = 22 + NoItalicAttr Attr = 23 + NoUnderlineAttr Attr = 24 + NoBlinkAttr Attr = 25 + NoReverseAttr Attr = 27 + NoConcealAttr Attr = 28 + NoStrikethroughAttr Attr = 29 + BlackForegroundColorAttr Attr = 30 + RedForegroundColorAttr Attr = 31 + GreenForegroundColorAttr Attr = 32 + YellowForegroundColorAttr Attr = 33 + BlueForegroundColorAttr Attr = 34 + MagentaForegroundColorAttr Attr = 35 + CyanForegroundColorAttr Attr = 36 + WhiteForegroundColorAttr Attr = 37 + ExtendedForegroundColorAttr Attr = 38 + DefaultForegroundColorAttr Attr = 39 + BlackBackgroundColorAttr Attr = 40 + RedBackgroundColorAttr Attr = 41 + GreenBackgroundColorAttr Attr = 42 + YellowBackgroundColorAttr Attr = 43 + BlueBackgroundColorAttr Attr = 44 + MagentaBackgroundColorAttr Attr = 45 + CyanBackgroundColorAttr Attr = 46 + WhiteBackgroundColorAttr Attr = 47 + ExtendedBackgroundColorAttr Attr = 48 + DefaultBackgroundColorAttr Attr = 49 + ExtendedUnderlineColorAttr Attr = 58 + DefaultUnderlineColorAttr Attr = 59 + BrightBlackForegroundColorAttr Attr = 90 + BrightRedForegroundColorAttr Attr = 91 + BrightGreenForegroundColorAttr Attr = 92 + BrightYellowForegroundColorAttr Attr = 93 + BrightBlueForegroundColorAttr Attr = 94 + BrightMagentaForegroundColorAttr Attr = 95 + BrightCyanForegroundColorAttr Attr = 96 + BrightWhiteForegroundColorAttr Attr = 97 + BrightBlackBackgroundColorAttr Attr = 100 + BrightRedBackgroundColorAttr Attr = 101 + BrightGreenBackgroundColorAttr Attr = 102 + BrightYellowBackgroundColorAttr Attr = 103 + BrightBlueBackgroundColorAttr Attr = 104 + BrightMagentaBackgroundColorAttr Attr = 105 + BrightCyanBackgroundColorAttr Attr = 106 + BrightWhiteBackgroundColorAttr Attr = 107 + + RGBColorIntroducerAttr Attr = 2 + ExtendedColorIntroducerAttr Attr = 5 ) -// ForegroundColorAttr returns the style SGR attribute for the given foreground -// color. +const ( + resetAttr = "0" + boldAttr = "1" + faintAttr = "2" + italicAttr = "3" + underlineAttr = "4" + slowBlinkAttr = "5" + rapidBlinkAttr = "6" + reverseAttr = "7" + concealAttr = "8" + strikethroughAttr = "9" + noBoldAttr = "21" + normalIntensityAttr = "22" + noItalicAttr = "23" + noUnderlineAttr = "24" + noBlinkAttr = "25" + noReverseAttr = "27" + noConcealAttr = "28" + noStrikethroughAttr = "29" + blackForegroundColorAttr = "30" + redForegroundColorAttr = "31" + greenForegroundColorAttr = "32" + yellowForegroundColorAttr = "33" + blueForegroundColorAttr = "34" + magentaForegroundColorAttr = "35" + cyanForegroundColorAttr = "36" + whiteForegroundColorAttr = "37" + extendedForegroundColorAttr = "38" + defaultForegroundColorAttr = "39" + blackBackgroundColorAttr = "40" + redBackgroundColorAttr = "41" + greenBackgroundColorAttr = "42" + yellowBackgroundColorAttr = "43" + blueBackgroundColorAttr = "44" + magentaBackgroundColorAttr = "45" + cyanBackgroundColorAttr = "46" + whiteBackgroundColorAttr = "47" + extendedBackgroundColorAttr = "48" + defaultBackgroundColorAttr = "49" + extendedUnderlineColorAttr = "58" + defaultUnderlineColorAttr = "59" + brightBlackForegroundColorAttr = "90" + brightRedForegroundColorAttr = "91" + brightGreenForegroundColorAttr = "92" + brightYellowForegroundColorAttr = "93" + brightBlueForegroundColorAttr = "94" + brightMagentaForegroundColorAttr = "95" + brightCyanForegroundColorAttr = "96" + brightWhiteForegroundColorAttr = "97" + brightBlackBackgroundColorAttr = "100" + brightRedBackgroundColorAttr = "101" + brightGreenBackgroundColorAttr = "102" + brightYellowBackgroundColorAttr = "103" + brightBlueBackgroundColorAttr = "104" + brightMagentaBackgroundColorAttr = "105" + brightCyanBackgroundColorAttr = "106" + brightWhiteBackgroundColorAttr = "107" +) + +// foregroundColorString returns the style SGR attribute for the given +// foreground color. // See: https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_(Select_Graphic_Rendition)_parameters -func ForegroundColorAttr(c Color) Attr { +func foregroundColorString(c Color) string { switch c := c.(type) { case BasicColor: // 3-bit or 4-bit ANSI foreground // "3" or "9" where n is the color number from 0 to 7 - if c < 8 { - return "3" + string('0'+c) - } else if c < 16 { - return "9" + string('0'+c-8) + switch c { + case Black: + return blackForegroundColorAttr + case Red: + return redForegroundColorAttr + case Green: + return greenForegroundColorAttr + case Yellow: + return yellowForegroundColorAttr + case Blue: + return blueForegroundColorAttr + case Magenta: + return magentaForegroundColorAttr + case Cyan: + return cyanForegroundColorAttr + case White: + return whiteForegroundColorAttr + case BrightBlack: + return brightBlackForegroundColorAttr + case BrightRed: + return brightRedForegroundColorAttr + case BrightGreen: + return brightGreenForegroundColorAttr + case BrightYellow: + return brightYellowForegroundColorAttr + case BrightBlue: + return brightBlueForegroundColorAttr + case BrightMagenta: + return brightMagentaForegroundColorAttr + case BrightCyan: + return brightCyanForegroundColorAttr + case BrightWhite: + return brightWhiteForegroundColorAttr } case ExtendedColor: // 256-color ANSI foreground @@ -230,21 +401,50 @@ func ForegroundColorAttr(c Color) Attr { strconv.FormatUint(uint64(shift(g)), 10) + ";" + strconv.FormatUint(uint64(shift(b)), 10) } - return DefaultForegroundColorAttr + return defaultForegroundColorAttr } -// BackgroundColorAttr returns the style SGR attribute for the given background -// color. +// backgroundColorString returns the style SGR attribute for the given +// background color. // See: https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_(Select_Graphic_Rendition)_parameters -func BackgroundColorAttr(c Color) Attr { +func backgroundColorString(c Color) string { switch c := c.(type) { case BasicColor: // 3-bit or 4-bit ANSI foreground // "4" or "10" where n is the color number from 0 to 7 - if c < 8 { - return "4" + string('0'+c) - } else { - return "10" + string('0'+c-8) + switch c { + case Black: + return blackBackgroundColorAttr + case Red: + return redBackgroundColorAttr + case Green: + return greenBackgroundColorAttr + case Yellow: + return yellowBackgroundColorAttr + case Blue: + return blueBackgroundColorAttr + case Magenta: + return magentaBackgroundColorAttr + case Cyan: + return cyanBackgroundColorAttr + case White: + return whiteBackgroundColorAttr + case BrightBlack: + return brightBlackBackgroundColorAttr + case BrightRed: + return brightRedBackgroundColorAttr + case BrightGreen: + return brightGreenBackgroundColorAttr + case BrightYellow: + return brightYellowBackgroundColorAttr + case BrightBlue: + return brightBlueBackgroundColorAttr + case BrightMagenta: + return brightMagentaBackgroundColorAttr + case BrightCyan: + return brightCyanBackgroundColorAttr + case BrightWhite: + return brightWhiteBackgroundColorAttr } case ExtendedColor: // 256-color ANSI foreground @@ -259,13 +459,13 @@ func BackgroundColorAttr(c Color) Attr { strconv.FormatUint(uint64(shift(g)), 10) + ";" + strconv.FormatUint(uint64(shift(b)), 10) } - return DefaultBackgroundColorAttr + return defaultBackgroundColorAttr } -// UnderlineColorAttr returns the style SGR attribute for the given underline +// underlineColorString returns the style SGR attribute for the given underline // color. // See: https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_(Select_Graphic_Rendition)_parameters -func UnderlineColorAttr(c Color) Attr { +func underlineColorString(c Color) string { switch c := c.(type) { // NOTE: we can't use 3-bit and 4-bit ANSI color codes with underline // color, use 256-color instead. @@ -285,12 +485,176 @@ func UnderlineColorAttr(c Color) Attr { strconv.FormatUint(uint64(shift(g)), 10) + ";" + strconv.FormatUint(uint64(shift(b)), 10) } - return DefaultUnderlineColorAttr -} + return defaultUnderlineColorAttr +} + +// ReadStyleColor decodes a color from a slice of parameters. It returns the +// number of parameters read and the color. This function is used to read SGR +// color parameters following the ITU T.416 standard. +// +// It supports reading the following color types: +// - 0: implementation defined +// - 1: transparent +// - 2: RGB direct color +// - 3: CMY direct color +// - 4: CMYK direct color +// - 5: indexed color +// - 6: RGBA direct color (WezTerm extension) +// +// The parameters can be separated by semicolons (;) or colons (:). Mixing +// separators is not allowed. +// +// The specs supports defining a color space id, a color tolerance value, and a +// tolerance color space id. However, these values have no effect on the +// returned color and will be ignored. +// +// This implementation includes a few modifications to the specs: +// 1. Support for legacy color values separated by semicolons (;) with respect to RGB, and indexed colors +// 2. Support ignoring and omitting the color space id (second parameter) with respect to RGB colors +// 3. Support ignoring and omitting the 6th parameter with respect to RGB and CMY colors +// 4. Support reading RGBA colors +func ReadStyleColor(params Params, co *color.Color) (n int) { + if len(params) < 2 { // Need at least SGR type and color type + return 0 + } + + // First parameter indicates one of 38, 48, or 58 (foreground, background, or underline) + s := params[0] + p := params[1] + colorType := p.Param(0) + n = 2 + + paramsfn := func() (p1, p2, p3, p4 int) { + // Where should we start reading the color? + switch { + case s.HasMore() && p.HasMore() && len(params) > 8 && params[2].HasMore() && params[3].HasMore() && params[4].HasMore() && params[5].HasMore() && params[6].HasMore() && params[7].HasMore(): + // We have color space id, a 6th parameter, a tolerance value, and a tolerance color space + n += 7 + return params[3].Param(0), params[4].Param(0), params[5].Param(0), params[6].Param(0) + case s.HasMore() && p.HasMore() && len(params) > 7 && params[2].HasMore() && params[3].HasMore() && params[4].HasMore() && params[5].HasMore() && params[6].HasMore(): + // We have color space id, a 6th parameter, and a tolerance value + n += 6 + return params[3].Param(0), params[4].Param(0), params[5].Param(0), params[6].Param(0) + case s.HasMore() && p.HasMore() && len(params) > 6 && params[2].HasMore() && params[3].HasMore() && params[4].HasMore() && params[5].HasMore(): + // We have color space id and a 6th parameter + // 48 : 4 : : 1 : 2 : 3 :4 + n += 5 + return params[3].Param(0), params[4].Param(0), params[5].Param(0), params[6].Param(0) + case s.HasMore() && p.HasMore() && len(params) > 5 && params[2].HasMore() && params[3].HasMore() && params[4].HasMore() && !params[5].HasMore(): + // We have color space + // 48 : 3 : : 1 : 2 : 3 + n += 4 + return params[3].Param(0), params[4].Param(0), params[5].Param(0), -1 + case s.HasMore() && p.HasMore() && p.Param(0) == 2 && params[2].HasMore() && params[3].HasMore() && !params[4].HasMore(): + // We have color values separated by colons (:) + // 48 : 2 : 1 : 2 : 3 + fallthrough + case !s.HasMore() && !p.HasMore() && p.Param(0) == 2 && !params[2].HasMore() && !params[3].HasMore() && !params[4].HasMore(): + // Support legacy color values separated by semicolons (;) + // 48 ; 2 ; 1 ; 2 ; 3 + n += 3 + return params[2].Param(0), params[3].Param(0), params[4].Param(0), -1 + } + // Ambiguous SGR color + return -1, -1, -1, -1 + } + + switch colorType { + case 0: // implementation defined + return 2 + case 1: // transparent + *co = color.Transparent + return 2 + case 2: // RGB direct color + if len(params) < 5 { + return 0 + } + + r, g, b, _ := paramsfn() + if r == -1 || g == -1 || b == -1 { + return 0 + } + + *co = color.RGBA{ + R: uint8(r), //nolint:gosec + G: uint8(g), //nolint:gosec + B: uint8(b), //nolint:gosec + A: 0xff, + } + return + + case 3: // CMY direct color + if len(params) < 5 { + return 0 + } + + c, m, y, _ := paramsfn() + if c == -1 || m == -1 || y == -1 { + return 0 + } + + *co = color.CMYK{ + C: uint8(c), //nolint:gosec + M: uint8(m), //nolint:gosec + Y: uint8(y), //nolint:gosec + K: 0, + } + return + + case 4: // CMYK direct color + if len(params) < 6 { + return 0 + } + + c, m, y, k := paramsfn() + if c == -1 || m == -1 || y == -1 || k == -1 { + return 0 + } + + *co = color.CMYK{ + C: uint8(c), //nolint:gosec + M: uint8(m), //nolint:gosec + Y: uint8(y), //nolint:gosec + K: uint8(k), //nolint:gosec + } + return + + case 5: // indexed color + if len(params) < 3 { + return 0 + } + switch { + case s.HasMore() && p.HasMore() && !params[2].HasMore(): + // Colon separated indexed color + // 38 : 5 : 234 + case !s.HasMore() && !p.HasMore() && !params[2].HasMore(): + // Legacy semicolon indexed color + // 38 ; 5 ; 234 + default: + return 0 + } + *co = ExtendedColor(params[2].Param(0)) //nolint:gosec + return 3 + + case 6: // RGBA direct color + if len(params) < 6 { + return 0 + } + + r, g, b, a := paramsfn() + if r == -1 || g == -1 || b == -1 || a == -1 { + return 0 + } + + *co = color.RGBA{ + R: uint8(r), //nolint:gosec + G: uint8(g), //nolint:gosec + B: uint8(b), //nolint:gosec + A: uint8(a), //nolint:gosec + } + return -func shift(v uint32) uint32 { - if v > 0xff { - return v >> 8 + default: + return 0 } - return v } diff --git a/vendor/github.com/charmbracelet/x/ansi/termcap.go b/vendor/github.com/charmbracelet/x/ansi/termcap.go index 1dfc52a6e..3c5c7da92 100644 --- a/vendor/github.com/charmbracelet/x/ansi/termcap.go +++ b/vendor/github.com/charmbracelet/x/ansi/termcap.go @@ -14,7 +14,7 @@ import ( // // See: https://man7.org/linux/man-pages/man5/terminfo.5.html // See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h3-Operating-System-Commands -func RequestTermcap(caps ...string) string { +func XTGETTCAP(caps ...string) string { if len(caps) == 0 { return "" } @@ -29,3 +29,13 @@ func RequestTermcap(caps ...string) string { return s + "\x1b\\" } + +// RequestTermcap is an alias for [XTGETTCAP]. +func RequestTermcap(caps ...string) string { + return XTGETTCAP(caps...) +} + +// RequestTerminfo is an alias for [XTGETTCAP]. +func RequestTerminfo(caps ...string) string { + return XTGETTCAP(caps...) +} diff --git a/vendor/github.com/charmbracelet/x/ansi/truncate.go b/vendor/github.com/charmbracelet/x/ansi/truncate.go index db0782c81..1fa3efefe 100644 --- a/vendor/github.com/charmbracelet/x/ansi/truncate.go +++ b/vendor/github.com/charmbracelet/x/ansi/truncate.go @@ -4,14 +4,65 @@ import ( "bytes" "github.com/charmbracelet/x/ansi/parser" + "github.com/mattn/go-runewidth" "github.com/rivo/uniseg" ) -// Truncate truncates a string to a given length, adding a tail to the -// end if the string is longer than the given length. -// This function is aware of ANSI escape codes and will not break them, and -// accounts for wide-characters (such as East Asians and emojis). +// Cut the string, without adding any prefix or tail strings. This function is +// aware of ANSI escape codes and will not break them, and accounts for +// wide-characters (such as East-Asian characters and emojis). Note that the +// [left] parameter is inclusive, while [right] isn't. +// This treats the text as a sequence of graphemes. +func Cut(s string, left, right int) string { + return cut(GraphemeWidth, s, left, right) +} + +// CutWc the string, without adding any prefix or tail strings. This function is +// aware of ANSI escape codes and will not break them, and accounts for +// wide-characters (such as East-Asian characters and emojis). Note that the +// [left] parameter is inclusive, while [right] isn't. +// This treats the text as a sequence of wide characters and runes. +func CutWc(s string, left, right int) string { + return cut(WcWidth, s, left, right) +} + +func cut(m Method, s string, left, right int) string { + if right <= left { + return "" + } + + truncate := Truncate + truncateLeft := TruncateLeft + if m == WcWidth { + truncate = TruncateWc + truncateLeft = TruncateWc + } + + if left == 0 { + return truncate(s, right, "") + } + return truncateLeft(Truncate(s, right, ""), left, "") +} + +// Truncate truncates a string to a given length, adding a tail to the end if +// the string is longer than the given length. This function is aware of ANSI +// escape codes and will not break them, and accounts for wide-characters (such +// as East-Asian characters and emojis). +// This treats the text as a sequence of graphemes. func Truncate(s string, length int, tail string) string { + return truncate(GraphemeWidth, s, length, tail) +} + +// TruncateWc truncates a string to a given length, adding a tail to the end if +// the string is longer than the given length. This function is aware of ANSI +// escape codes and will not break them, and accounts for wide-characters (such +// as East-Asian characters and emojis). +// This treats the text as a sequence of wide characters and runes. +func TruncateWc(s string, length int, tail string) string { + return truncate(WcWidth, s, length, tail) +} + +func truncate(m Method, s string, length int, tail string) string { if sw := StringWidth(s); sw <= length { return s } @@ -33,6 +84,7 @@ func Truncate(s string, length int, tail string) string { // Here we iterate over the bytes of the string and collect printable // characters and runes. We also keep track of the width of the string // in cells. + // // Once we reach the given length, we start ignoring characters and only // collect ANSI escape codes until we reach the end of string. for i < len(b) { @@ -41,6 +93,9 @@ func Truncate(s string, length int, tail string) string { // This action happens when we transition to the Utf8State. var width int cluster, _, width, _ = uniseg.FirstGraphemeCluster(b[i:], -1) + if m == WcWidth { + width = runewidth.StringWidth(string(cluster)) + } // increment the index by the length of the cluster i += len(cluster) @@ -105,3 +160,123 @@ func Truncate(s string, length int, tail string) string { return buf.String() } + +// TruncateLeft truncates a string from the left side by removing n characters, +// adding a prefix to the beginning if the string is longer than n. +// This function is aware of ANSI escape codes and will not break them, and +// accounts for wide-characters (such as East-Asian characters and emojis). +// This treats the text as a sequence of graphemes. +func TruncateLeft(s string, n int, prefix string) string { + return truncateLeft(GraphemeWidth, s, n, prefix) +} + +// TruncateLeftWc truncates a string from the left side by removing n characters, +// adding a prefix to the beginning if the string is longer than n. +// This function is aware of ANSI escape codes and will not break them, and +// accounts for wide-characters (such as East-Asian characters and emojis). +// This treats the text as a sequence of wide characters and runes. +func TruncateLeftWc(s string, n int, prefix string) string { + return truncateLeft(WcWidth, s, n, prefix) +} + +func truncateLeft(m Method, s string, n int, prefix string) string { + if n <= 0 { + return s + } + + var cluster []byte + var buf bytes.Buffer + curWidth := 0 + ignoring := true + pstate := parser.GroundState + b := []byte(s) + i := 0 + + for i < len(b) { + if !ignoring { + buf.Write(b[i:]) + break + } + + state, action := parser.Table.Transition(pstate, b[i]) + if state == parser.Utf8State { + var width int + cluster, _, width, _ = uniseg.FirstGraphemeCluster(b[i:], -1) + if m == WcWidth { + width = runewidth.StringWidth(string(cluster)) + } + + i += len(cluster) + curWidth += width + + if curWidth > n && ignoring { + ignoring = false + buf.WriteString(prefix) + } + + if ignoring { + continue + } + + if curWidth > n { + buf.Write(cluster) + } + + pstate = parser.GroundState + continue + } + + switch action { + case parser.PrintAction: + curWidth++ + + if curWidth > n && ignoring { + ignoring = false + buf.WriteString(prefix) + } + + if ignoring { + i++ + continue + } + + fallthrough + default: + buf.WriteByte(b[i]) + i++ + } + + pstate = state + if curWidth > n && ignoring { + ignoring = false + buf.WriteString(prefix) + } + } + + return buf.String() +} + +// ByteToGraphemeRange takes start and stop byte positions and converts them to +// grapheme-aware char positions. +// You can use this with [Truncate], [TruncateLeft], and [Cut]. +func ByteToGraphemeRange(str string, byteStart, byteStop int) (charStart, charStop int) { + bytePos, charPos := 0, 0 + gr := uniseg.NewGraphemes(str) + for byteStart > bytePos { + if !gr.Next() { + break + } + bytePos += len(gr.Str()) + charPos += max(1, gr.Width()) + } + charStart = charPos + for byteStop > bytePos { + if !gr.Next() { + break + } + bytePos += len(gr.Str()) + charPos += max(1, gr.Width()) + } + charStop = charPos + return +} diff --git a/vendor/github.com/charmbracelet/x/ansi/util.go b/vendor/github.com/charmbracelet/x/ansi/util.go index 767093f92..301ef15ff 100644 --- a/vendor/github.com/charmbracelet/x/ansi/util.go +++ b/vendor/github.com/charmbracelet/x/ansi/util.go @@ -3,6 +3,10 @@ package ansi import ( "fmt" "image/color" + "strconv" + "strings" + + "github.com/lucasb-eyer/go-colorful" ) // colorToHexString returns a hex string representation of a color. @@ -27,3 +31,76 @@ func colorToHexString(c color.Color) string { func rgbToHex(r, g, b uint32) uint32 { return r<<16 + g<<8 + b } + +type shiftable interface { + ~uint | ~uint16 | ~uint32 | ~uint64 +} + +func shift[T shiftable](x T) T { + if x > 0xff { + x >>= 8 + } + return x +} + +// XParseColor is a helper function that parses a string into a color.Color. It +// provides a similar interface to the XParseColor function in Xlib. It +// supports the following formats: +// +// - #RGB +// - #RRGGBB +// - rgb:RRRR/GGGG/BBBB +// - rgba:RRRR/GGGG/BBBB/AAAA +// +// If the string is not a valid color, nil is returned. +// +// See: https://linux.die.net/man/3/xparsecolor +func XParseColor(s string) color.Color { + switch { + case strings.HasPrefix(s, "#"): + c, err := colorful.Hex(s) + if err != nil { + return nil + } + + return c + case strings.HasPrefix(s, "rgb:"): + parts := strings.Split(s[4:], "/") + if len(parts) != 3 { + return nil + } + + r, _ := strconv.ParseUint(parts[0], 16, 32) + g, _ := strconv.ParseUint(parts[1], 16, 32) + b, _ := strconv.ParseUint(parts[2], 16, 32) + + return color.RGBA{uint8(shift(r)), uint8(shift(g)), uint8(shift(b)), 255} //nolint:gosec + case strings.HasPrefix(s, "rgba:"): + parts := strings.Split(s[5:], "/") + if len(parts) != 4 { + return nil + } + + r, _ := strconv.ParseUint(parts[0], 16, 32) + g, _ := strconv.ParseUint(parts[1], 16, 32) + b, _ := strconv.ParseUint(parts[2], 16, 32) + a, _ := strconv.ParseUint(parts[3], 16, 32) + + return color.RGBA{uint8(shift(r)), uint8(shift(g)), uint8(shift(b)), uint8(shift(a))} //nolint:gosec + } + return nil +} + +type ordered interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 | + ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr | + ~float32 | ~float64 | + ~string +} + +func max[T ordered](a, b T) T { //nolint:predeclared + if a > b { + return a + } + return b +} diff --git a/vendor/github.com/charmbracelet/x/ansi/width.go b/vendor/github.com/charmbracelet/x/ansi/width.go index 80890e42c..d0487d350 100644 --- a/vendor/github.com/charmbracelet/x/ansi/width.go +++ b/vendor/github.com/charmbracelet/x/ansi/width.go @@ -4,6 +4,7 @@ import ( "bytes" "github.com/charmbracelet/x/ansi/parser" + "github.com/mattn/go-runewidth" "github.com/rivo/uniseg" ) @@ -62,7 +63,21 @@ func Strip(s string) string { // cells that the string will occupy when printed in a terminal. ANSI escape // codes are ignored and wide characters (such as East Asians and emojis) are // accounted for. +// This treats the text as a sequence of grapheme clusters. func StringWidth(s string) int { + return stringWidth(GraphemeWidth, s) +} + +// StringWidthWc returns the width of a string in cells. This is the number of +// cells that the string will occupy when printed in a terminal. ANSI escape +// codes are ignored and wide characters (such as East Asians and emojis) are +// accounted for. +// This treats the text as a sequence of wide characters and runes. +func StringWidthWc(s string) int { + return stringWidth(WcWidth, s) +} + +func stringWidth(m Method, s string) int { if s == "" { return 0 } @@ -78,6 +93,9 @@ func StringWidth(s string) int { if state == parser.Utf8State { var w int cluster, _, w, _ = uniseg.FirstGraphemeClusterInString(s[i:], -1) + if m == WcWidth { + w = runewidth.StringWidth(cluster) + } width += w i += len(cluster) - 1 pstate = parser.GroundState diff --git a/vendor/github.com/charmbracelet/x/ansi/winop.go b/vendor/github.com/charmbracelet/x/ansi/winop.go new file mode 100644 index 000000000..0238780d0 --- /dev/null +++ b/vendor/github.com/charmbracelet/x/ansi/winop.go @@ -0,0 +1,53 @@ +package ansi + +import ( + "strconv" + "strings" +) + +const ( + // ResizeWindowWinOp is a window operation that resizes the terminal + // window. + ResizeWindowWinOp = 4 + + // RequestWindowSizeWinOp is a window operation that requests a report of + // the size of the terminal window in pixels. The response is in the form: + // CSI 4 ; height ; width t + RequestWindowSizeWinOp = 14 + + // RequestCellSizeWinOp is a window operation that requests a report of + // the size of the terminal cell size in pixels. The response is in the form: + // CSI 6 ; height ; width t + RequestCellSizeWinOp = 16 +) + +// WindowOp (XTWINOPS) is a sequence that manipulates the terminal window. +// +// CSI Ps ; Ps ; Ps t +// +// Ps is a semicolon-separated list of parameters. +// See https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h4-Functions-using-CSI-_-ordered-by-the-final-character-lparen-s-rparen:CSI-Ps;Ps;Ps-t.1EB0 +func WindowOp(p int, ps ...int) string { + if p <= 0 { + return "" + } + + if len(ps) == 0 { + return "\x1b[" + strconv.Itoa(p) + "t" + } + + params := make([]string, 0, len(ps)+1) + params = append(params, strconv.Itoa(p)) + for _, p := range ps { + if p >= 0 { + params = append(params, strconv.Itoa(p)) + } + } + + return "\x1b[" + strings.Join(params, ";") + "t" +} + +// XTWINOPS is an alias for [WindowOp]. +func XTWINOPS(p int, ps ...int) string { + return WindowOp(p, ps...) +} diff --git a/vendor/github.com/charmbracelet/x/ansi/wrap.go b/vendor/github.com/charmbracelet/x/ansi/wrap.go index 2cab5cec1..6b9958008 100644 --- a/vendor/github.com/charmbracelet/x/ansi/wrap.go +++ b/vendor/github.com/charmbracelet/x/ansi/wrap.go @@ -6,6 +6,7 @@ import ( "unicode/utf8" "github.com/charmbracelet/x/ansi/parser" + "github.com/mattn/go-runewidth" "github.com/rivo/uniseg" ) @@ -17,7 +18,22 @@ const nbsp = 0xA0 // wide-characters in the string. // When preserveSpace is true, spaces at the beginning of a line will be // preserved. +// This treats the text as a sequence of graphemes. func Hardwrap(s string, limit int, preserveSpace bool) string { + return hardwrap(GraphemeWidth, s, limit, preserveSpace) +} + +// HardwrapWc wraps a string or a block of text to a given line length, breaking +// word boundaries. This will preserve ANSI escape codes and will account for +// wide-characters in the string. +// When preserveSpace is true, spaces at the beginning of a line will be +// preserved. +// This treats the text as a sequence of wide characters and runes. +func HardwrapWc(s string, limit int, preserveSpace bool) string { + return hardwrap(WcWidth, s, limit, preserveSpace) +} + +func hardwrap(m Method, s string, limit int, preserveSpace bool) string { if limit < 1 { return s } @@ -42,6 +58,9 @@ func Hardwrap(s string, limit int, preserveSpace bool) string { if state == parser.Utf8State { var width int cluster, _, width, _ = uniseg.FirstGraphemeCluster(b[i:], -1) + if m == WcWidth { + width = runewidth.StringWidth(string(cluster)) + } i += len(cluster) if curWidth+width > limit { @@ -83,7 +102,9 @@ func Hardwrap(s string, limit int, preserveSpace bool) string { } buf.WriteByte(b[i]) - curWidth++ + if action == parser.PrintAction { + curWidth++ + } default: buf.WriteByte(b[i]) } @@ -106,7 +127,27 @@ func Hardwrap(s string, limit int, preserveSpace bool) string { // breakpoint. // // Note: breakpoints must be a string of 1-cell wide rune characters. +// +// This treats the text as a sequence of graphemes. func Wordwrap(s string, limit int, breakpoints string) string { + return wordwrap(GraphemeWidth, s, limit, breakpoints) +} + +// WordwrapWc wraps a string or a block of text to a given line length, not +// breaking word boundaries. This will preserve ANSI escape codes and will +// account for wide-characters in the string. +// The breakpoints string is a list of characters that are considered +// breakpoints for word wrapping. A hyphen (-) is always considered a +// breakpoint. +// +// Note: breakpoints must be a string of 1-cell wide rune characters. +// +// This treats the text as a sequence of wide characters and runes. +func WordwrapWc(s string, limit int, breakpoints string) string { + return wordwrap(WcWidth, s, limit, breakpoints) +} + +func wordwrap(m Method, s string, limit int, breakpoints string) string { if limit < 1 { return s } @@ -152,6 +193,9 @@ func Wordwrap(s string, limit int, breakpoints string) string { if state == parser.Utf8State { var width int cluster, _, width, _ = uniseg.FirstGraphemeCluster(b[i:], -1) + if m == WcWidth { + width = runewidth.StringWidth(string(cluster)) + } i += len(cluster) r, _ := utf8.DecodeRune(cluster) @@ -234,7 +278,26 @@ func Wordwrap(s string, limit int, breakpoints string) string { // (-) is always considered a breakpoint. // // Note: breakpoints must be a string of 1-cell wide rune characters. +// +// This treats the text as a sequence of graphemes. func Wrap(s string, limit int, breakpoints string) string { + return wrap(GraphemeWidth, s, limit, breakpoints) +} + +// WrapWc wraps a string or a block of text to a given line length, breaking word +// boundaries if necessary. This will preserve ANSI escape codes and will +// account for wide-characters in the string. The breakpoints string is a list +// of characters that are considered breakpoints for word wrapping. A hyphen +// (-) is always considered a breakpoint. +// +// Note: breakpoints must be a string of 1-cell wide rune characters. +// +// This treats the text as a sequence of wide characters and runes. +func WrapWc(s string, limit int, breakpoints string) string { + return wrap(WcWidth, s, limit, breakpoints) +} + +func wrap(m Method, s string, limit int, breakpoints string) string { if limit < 1 { return s } @@ -280,6 +343,9 @@ func Wrap(s string, limit int, breakpoints string) string { if state == parser.Utf8State { var width int cluster, _, width, _ = uniseg.FirstGraphemeCluster(b[i:], -1) + if m == WcWidth { + width = runewidth.StringWidth(string(cluster)) + } i += len(cluster) r, _ := utf8.DecodeRune(cluster) @@ -349,6 +415,9 @@ func Wrap(s string, limit int, breakpoints string) string { curWidth++ } default: + if curWidth == limit { + addNewline() + } word.WriteRune(r) wordLen++ @@ -373,14 +442,17 @@ func Wrap(s string, limit int, breakpoints string) string { i++ } - if word.Len() != 0 { - // Preserve ANSI wrapped spaces at the end of string + if wordLen == 0 { if curWidth+space.Len() > limit { - buf.WriteByte('\n') + curWidth = 0 + } else { + // preserve whitespaces + buf.Write(space.Bytes()) } - addSpace() + space.Reset() } - buf.Write(word.Bytes()) + + addWord() return buf.String() } diff --git a/vendor/github.com/charmbracelet/x/ansi/xterm.go b/vendor/github.com/charmbracelet/x/ansi/xterm.go index f87712a64..83fd4bdc4 100644 --- a/vendor/github.com/charmbracelet/x/ansi/xterm.go +++ b/vendor/github.com/charmbracelet/x/ansi/xterm.go @@ -2,6 +2,84 @@ package ansi import "strconv" +// KeyModifierOptions (XTMODKEYS) sets/resets xterm key modifier options. +// +// Default is 0. +// +// CSI > Pp m +// CSI > Pp ; Pv m +// +// If Pv is omitted, the resource is reset to its initial value. +// +// See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h3-Functions-using-CSI-_-ordered-by-the-final-character_s_ +func KeyModifierOptions(p int, vs ...int) string { + var pp, pv string + if p > 0 { + pp = strconv.Itoa(p) + } + + if len(vs) == 0 { + return "\x1b[>" + strconv.Itoa(p) + "m" + } + + v := vs[0] + if v > 0 { + pv = strconv.Itoa(v) + return "\x1b[>" + pp + ";" + pv + "m" + } + + return "\x1b[>" + pp + "m" +} + +// XTMODKEYS is an alias for [KeyModifierOptions]. +func XTMODKEYS(p int, vs ...int) string { + return KeyModifierOptions(p, vs...) +} + +// SetKeyModifierOptions sets xterm key modifier options. +// This is an alias for [KeyModifierOptions]. +func SetKeyModifierOptions(pp int, pv int) string { + return KeyModifierOptions(pp, pv) +} + +// ResetKeyModifierOptions resets xterm key modifier options. +// This is an alias for [KeyModifierOptions]. +func ResetKeyModifierOptions(pp int) string { + return KeyModifierOptions(pp) +} + +// QueryKeyModifierOptions (XTQMODKEYS) requests xterm key modifier options. +// +// Default is 0. +// +// CSI ? Pp m +// +// See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h3-Functions-using-CSI-_-ordered-by-the-final-character_s_ +func QueryKeyModifierOptions(pp int) string { + var p string + if pp > 0 { + p = strconv.Itoa(pp) + } + return "\x1b[?" + p + "m" +} + +// XTQMODKEYS is an alias for [QueryKeyModifierOptions]. +func XTQMODKEYS(pp int) string { + return QueryKeyModifierOptions(pp) +} + +// Modify Other Keys (modifyOtherKeys) is an xterm feature that allows the +// terminal to modify the behavior of certain keys to send different escape +// sequences when pressed. +// +// See: https://invisible-island.net/xterm/manpage/xterm.html#VT100-Widget-Resources:modifyOtherKeys +const ( + SetModifyOtherKeys1 = "\x1b[>4;1m" + SetModifyOtherKeys2 = "\x1b[>4;2m" + ResetModifyOtherKeys = "\x1b[>4m" + QueryModifyOtherKeys = "\x1b[?4m" +) + // ModifyOtherKeys returns a sequence that sets XTerm modifyOtherKeys mode. // The mode argument specifies the mode to set. // @@ -13,6 +91,8 @@ import "strconv" // // See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h3-Functions-using-CSI-_-ordered-by-the-final-character_s_ // See: https://invisible-island.net/xterm/manpage/xterm.html#VT100-Widget-Resources:modifyOtherKeys +// +// Deprecated: use [SetModifyOtherKeys1] or [SetModifyOtherKeys2] instead. func ModifyOtherKeys(mode int) string { return "\x1b[>4;" + strconv.Itoa(mode) + "m" } @@ -23,6 +103,8 @@ func ModifyOtherKeys(mode int) string { // // See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h3-Functions-using-CSI-_-ordered-by-the-final-character_s_ // See: https://invisible-island.net/xterm/manpage/xterm.html#VT100-Widget-Resources:modifyOtherKeys +// +// Deprecated: use [ResetModifyOtherKeys] instead. const DisableModifyOtherKeys = "\x1b[>4;0m" // EnableModifyOtherKeys1 enables the modifyOtherKeys mode 1. @@ -31,6 +113,8 @@ const DisableModifyOtherKeys = "\x1b[>4;0m" // // See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h3-Functions-using-CSI-_-ordered-by-the-final-character_s_ // See: https://invisible-island.net/xterm/manpage/xterm.html#VT100-Widget-Resources:modifyOtherKeys +// +// Deprecated: use [SetModifyOtherKeys1] instead. const EnableModifyOtherKeys1 = "\x1b[>4;1m" // EnableModifyOtherKeys2 enables the modifyOtherKeys mode 2. @@ -39,6 +123,8 @@ const EnableModifyOtherKeys1 = "\x1b[>4;1m" // // See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h3-Functions-using-CSI-_-ordered-by-the-final-character_s_ // See: https://invisible-island.net/xterm/manpage/xterm.html#VT100-Widget-Resources:modifyOtherKeys +// +// Deprecated: use [SetModifyOtherKeys2] instead. const EnableModifyOtherKeys2 = "\x1b[>4;2m" // RequestModifyOtherKeys requests the modifyOtherKeys mode. @@ -47,4 +133,6 @@ const EnableModifyOtherKeys2 = "\x1b[>4;2m" // // See: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h3-Functions-using-CSI-_-ordered-by-the-final-character_s_ // See: https://invisible-island.net/xterm/manpage/xterm.html#VT100-Widget-Resources:modifyOtherKeys +// +// Deprecated: use [QueryModifyOtherKeys] instead. const RequestModifyOtherKeys = "\x1b[?4m" diff --git a/vendor/github.com/charmbracelet/x/cellbuf/LICENSE b/vendor/github.com/charmbracelet/x/cellbuf/LICENSE new file mode 100644 index 000000000..65a5654e2 --- /dev/null +++ b/vendor/github.com/charmbracelet/x/cellbuf/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Charmbracelet, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/charmbracelet/x/cellbuf/buffer.go b/vendor/github.com/charmbracelet/x/cellbuf/buffer.go new file mode 100644 index 000000000..790d1f7c4 --- /dev/null +++ b/vendor/github.com/charmbracelet/x/cellbuf/buffer.go @@ -0,0 +1,473 @@ +package cellbuf + +import ( + "strings" + + "github.com/mattn/go-runewidth" + "github.com/rivo/uniseg" +) + +// NewCell returns a new cell. This is a convenience function that initializes a +// new cell with the given content. The cell's width is determined by the +// content using [runewidth.RuneWidth]. +// This will only account for the first combined rune in the content. If the +// content is empty, it will return an empty cell with a width of 0. +func NewCell(r rune, comb ...rune) (c *Cell) { + c = new(Cell) + c.Rune = r + c.Width = runewidth.RuneWidth(r) + for _, r := range comb { + if runewidth.RuneWidth(r) > 0 { + break + } + c.Comb = append(c.Comb, r) + } + c.Comb = comb + c.Width = runewidth.StringWidth(string(append([]rune{r}, comb...))) + return +} + +// NewCellString returns a new cell with the given string content. This is a +// convenience function that initializes a new cell with the given content. The +// cell's width is determined by the content using [runewidth.StringWidth]. +// This will only use the first combined rune in the string. If the string is +// empty, it will return an empty cell with a width of 0. +func NewCellString(s string) (c *Cell) { + c = new(Cell) + for i, r := range s { + if i == 0 { + c.Rune = r + // We only care about the first rune's width + c.Width = runewidth.RuneWidth(r) + } else { + if runewidth.RuneWidth(r) > 0 { + break + } + c.Comb = append(c.Comb, r) + } + } + return +} + +// NewGraphemeCell returns a new cell. This is a convenience function that +// initializes a new cell with the given content. The cell's width is determined +// by the content using [uniseg.FirstGraphemeClusterInString]. +// This is used when the content is a grapheme cluster i.e. a sequence of runes +// that form a single visual unit. +// This will only return the first grapheme cluster in the string. If the +// string is empty, it will return an empty cell with a width of 0. +func NewGraphemeCell(s string) (c *Cell) { + g, _, w, _ := uniseg.FirstGraphemeClusterInString(s, -1) + return newGraphemeCell(g, w) +} + +func newGraphemeCell(s string, w int) (c *Cell) { + c = new(Cell) + c.Width = w + for i, r := range s { + if i == 0 { + c.Rune = r + } else { + c.Comb = append(c.Comb, r) + } + } + return +} + +// Line represents a line in the terminal. +// A nil cell represents an blank cell, a cell with a space character and a +// width of 1. +// If a cell has no content and a width of 0, it is a placeholder for a wide +// cell. +type Line []*Cell + +// Width returns the width of the line. +func (l Line) Width() int { + return len(l) +} + +// Len returns the length of the line. +func (l Line) Len() int { + return len(l) +} + +// String returns the string representation of the line. Any trailing spaces +// are removed. +func (l Line) String() (s string) { + for _, c := range l { + if c == nil { + s += " " + } else if c.Empty() { + continue + } else { + s += c.String() + } + } + s = strings.TrimRight(s, " ") + return +} + +// At returns the cell at the given x position. +// If the cell does not exist, it returns nil. +func (l Line) At(x int) *Cell { + if x < 0 || x >= len(l) { + return nil + } + + c := l[x] + if c == nil { + newCell := BlankCell + return &newCell + } + + return c +} + +// Set sets the cell at the given x position. If a wide cell is given, it will +// set the cell and the following cells to [EmptyCell]. It returns true if the +// cell was set. +func (l Line) Set(x int, c *Cell) bool { + return l.set(x, c, true) +} + +func (l Line) set(x int, c *Cell, clone bool) bool { + width := l.Width() + if x < 0 || x >= width { + return false + } + + // When a wide cell is partially overwritten, we need + // to fill the rest of the cell with space cells to + // avoid rendering issues. + prev := l.At(x) + if prev != nil && prev.Width > 1 { + // Writing to the first wide cell + for j := 0; j < prev.Width && x+j < l.Width(); j++ { + l[x+j] = prev.Clone().Blank() + } + } else if prev != nil && prev.Width == 0 { + // Writing to wide cell placeholders + for j := 1; j < maxCellWidth && x-j >= 0; j++ { + wide := l.At(x - j) + if wide != nil && wide.Width > 1 && j < wide.Width { + for k := 0; k < wide.Width; k++ { + l[x-j+k] = wide.Clone().Blank() + } + break + } + } + } + + if clone && c != nil { + // Clone the cell if not nil. + c = c.Clone() + } + + if c != nil && x+c.Width > width { + // If the cell is too wide, we write blanks with the same style. + for i := 0; i < c.Width && x+i < width; i++ { + l[x+i] = c.Clone().Blank() + } + } else { + l[x] = c + + // Mark wide cells with an empty cell zero width + // We set the wide cell down below + if c != nil && c.Width > 1 { + for j := 1; j < c.Width && x+j < l.Width(); j++ { + var wide Cell + l[x+j] = &wide + } + } + } + + return true +} + +// Buffer is a 2D grid of cells representing a screen or terminal. +type Buffer struct { + // Lines holds the lines of the buffer. + Lines []Line +} + +// NewBuffer creates a new buffer with the given width and height. +// This is a convenience function that initializes a new buffer and resizes it. +func NewBuffer(width int, height int) *Buffer { + b := new(Buffer) + b.Resize(width, height) + return b +} + +// String returns the string representation of the buffer. +func (b *Buffer) String() (s string) { + for i, l := range b.Lines { + s += l.String() + if i < len(b.Lines)-1 { + s += "\r\n" + } + } + return +} + +// Line returns a pointer to the line at the given y position. +// If the line does not exist, it returns nil. +func (b *Buffer) Line(y int) Line { + if y < 0 || y >= len(b.Lines) { + return nil + } + return b.Lines[y] +} + +// Cell implements Screen. +func (b *Buffer) Cell(x int, y int) *Cell { + if y < 0 || y >= len(b.Lines) { + return nil + } + return b.Lines[y].At(x) +} + +// maxCellWidth is the maximum width a terminal cell can get. +const maxCellWidth = 4 + +// SetCell sets the cell at the given x, y position. +func (b *Buffer) SetCell(x, y int, c *Cell) bool { + return b.setCell(x, y, c, true) +} + +// setCell sets the cell at the given x, y position. This will always clone and +// allocates a new cell if c is not nil. +func (b *Buffer) setCell(x, y int, c *Cell, clone bool) bool { + if y < 0 || y >= len(b.Lines) { + return false + } + return b.Lines[y].set(x, c, clone) +} + +// Height implements Screen. +func (b *Buffer) Height() int { + return len(b.Lines) +} + +// Width implements Screen. +func (b *Buffer) Width() int { + if len(b.Lines) == 0 { + return 0 + } + return b.Lines[0].Width() +} + +// Bounds returns the bounds of the buffer. +func (b *Buffer) Bounds() Rectangle { + return Rect(0, 0, b.Width(), b.Height()) +} + +// Resize resizes the buffer to the given width and height. +func (b *Buffer) Resize(width int, height int) { + if width == 0 || height == 0 { + b.Lines = nil + return + } + + if width > b.Width() { + line := make(Line, width-b.Width()) + for i := range b.Lines { + b.Lines[i] = append(b.Lines[i], line...) + } + } else if width < b.Width() { + for i := range b.Lines { + b.Lines[i] = b.Lines[i][:width] + } + } + + if height > len(b.Lines) { + for i := len(b.Lines); i < height; i++ { + b.Lines = append(b.Lines, make(Line, width)) + } + } else if height < len(b.Lines) { + b.Lines = b.Lines[:height] + } +} + +// FillRect fills the buffer with the given cell and rectangle. +func (b *Buffer) FillRect(c *Cell, rect Rectangle) { + cellWidth := 1 + if c != nil && c.Width > 1 { + cellWidth = c.Width + } + for y := rect.Min.Y; y < rect.Max.Y; y++ { + for x := rect.Min.X; x < rect.Max.X; x += cellWidth { + b.setCell(x, y, c, false) //nolint:errcheck + } + } +} + +// Fill fills the buffer with the given cell and rectangle. +func (b *Buffer) Fill(c *Cell) { + b.FillRect(c, b.Bounds()) +} + +// Clear clears the buffer with space cells and rectangle. +func (b *Buffer) Clear() { + b.ClearRect(b.Bounds()) +} + +// ClearRect clears the buffer with space cells within the specified +// rectangles. Only cells within the rectangle's bounds are affected. +func (b *Buffer) ClearRect(rect Rectangle) { + b.FillRect(nil, rect) +} + +// InsertLine inserts n lines at the given line position, with the given +// optional cell, within the specified rectangles. If no rectangles are +// specified, it inserts lines in the entire buffer. Only cells within the +// rectangle's horizontal bounds are affected. Lines are pushed out of the +// rectangle bounds and lost. This follows terminal [ansi.IL] behavior. +// It returns the pushed out lines. +func (b *Buffer) InsertLine(y, n int, c *Cell) { + b.InsertLineRect(y, n, c, b.Bounds()) +} + +// InsertLineRect inserts new lines at the given line position, with the +// given optional cell, within the rectangle bounds. Only cells within the +// rectangle's horizontal bounds are affected. Lines are pushed out of the +// rectangle bounds and lost. This follows terminal [ansi.IL] behavior. +func (b *Buffer) InsertLineRect(y, n int, c *Cell, rect Rectangle) { + if n <= 0 || y < rect.Min.Y || y >= rect.Max.Y || y >= b.Height() { + return + } + + // Limit number of lines to insert to available space + if y+n > rect.Max.Y { + n = rect.Max.Y - y + } + + // Move existing lines down within the bounds + for i := rect.Max.Y - 1; i >= y+n; i-- { + for x := rect.Min.X; x < rect.Max.X; x++ { + // We don't need to clone c here because we're just moving lines down. + b.setCell(x, i, b.Lines[i-n][x], false) + } + } + + // Clear the newly inserted lines within bounds + for i := y; i < y+n; i++ { + for x := rect.Min.X; x < rect.Max.X; x++ { + b.setCell(x, i, c, true) + } + } +} + +// DeleteLineRect deletes lines at the given line position, with the given +// optional cell, within the rectangle bounds. Only cells within the +// rectangle's bounds are affected. Lines are shifted up within the bounds and +// new blank lines are created at the bottom. This follows terminal [ansi.DL] +// behavior. +func (b *Buffer) DeleteLineRect(y, n int, c *Cell, rect Rectangle) { + if n <= 0 || y < rect.Min.Y || y >= rect.Max.Y || y >= b.Height() { + return + } + + // Limit deletion count to available space in scroll region + if n > rect.Max.Y-y { + n = rect.Max.Y - y + } + + // Shift cells up within the bounds + for dst := y; dst < rect.Max.Y-n; dst++ { + src := dst + n + for x := rect.Min.X; x < rect.Max.X; x++ { + // We don't need to clone c here because we're just moving cells up. + // b.lines[dst][x] = b.lines[src][x] + b.setCell(x, dst, b.Lines[src][x], false) + } + } + + // Fill the bottom n lines with blank cells + for i := rect.Max.Y - n; i < rect.Max.Y; i++ { + for x := rect.Min.X; x < rect.Max.X; x++ { + b.setCell(x, i, c, true) + } + } +} + +// DeleteLine deletes n lines at the given line position, with the given +// optional cell, within the specified rectangles. If no rectangles are +// specified, it deletes lines in the entire buffer. +func (b *Buffer) DeleteLine(y, n int, c *Cell) { + b.DeleteLineRect(y, n, c, b.Bounds()) +} + +// InsertCell inserts new cells at the given position, with the given optional +// cell, within the specified rectangles. If no rectangles are specified, it +// inserts cells in the entire buffer. This follows terminal [ansi.ICH] +// behavior. +func (b *Buffer) InsertCell(x, y, n int, c *Cell) { + b.InsertCellRect(x, y, n, c, b.Bounds()) +} + +// InsertCellRect inserts new cells at the given position, with the given +// optional cell, within the rectangle bounds. Only cells within the +// rectangle's bounds are affected, following terminal [ansi.ICH] behavior. +func (b *Buffer) InsertCellRect(x, y, n int, c *Cell, rect Rectangle) { + if n <= 0 || y < rect.Min.Y || y >= rect.Max.Y || y >= b.Height() || + x < rect.Min.X || x >= rect.Max.X || x >= b.Width() { + return + } + + // Limit number of cells to insert to available space + if x+n > rect.Max.X { + n = rect.Max.X - x + } + + // Move existing cells within rectangle bounds to the right + for i := rect.Max.X - 1; i >= x+n && i-n >= rect.Min.X; i-- { + // We don't need to clone c here because we're just moving cells to the + // right. + // b.lines[y][i] = b.lines[y][i-n] + b.setCell(i, y, b.Lines[y][i-n], false) + } + + // Clear the newly inserted cells within rectangle bounds + for i := x; i < x+n && i < rect.Max.X; i++ { + b.setCell(i, y, c, true) + } +} + +// DeleteCell deletes cells at the given position, with the given optional +// cell, within the specified rectangles. If no rectangles are specified, it +// deletes cells in the entire buffer. This follows terminal [ansi.DCH] +// behavior. +func (b *Buffer) DeleteCell(x, y, n int, c *Cell) { + b.DeleteCellRect(x, y, n, c, b.Bounds()) +} + +// DeleteCellRect deletes cells at the given position, with the given +// optional cell, within the rectangle bounds. Only cells within the +// rectangle's bounds are affected, following terminal [ansi.DCH] behavior. +func (b *Buffer) DeleteCellRect(x, y, n int, c *Cell, rect Rectangle) { + if n <= 0 || y < rect.Min.Y || y >= rect.Max.Y || y >= b.Height() || + x < rect.Min.X || x >= rect.Max.X || x >= b.Width() { + return + } + + // Calculate how many positions we can actually delete + remainingCells := rect.Max.X - x + if n > remainingCells { + n = remainingCells + } + + // Shift the remaining cells to the left + for i := x; i < rect.Max.X-n; i++ { + if i+n < rect.Max.X { + // We don't need to clone c here because we're just moving cells to + // the left. + // b.lines[y][i] = b.lines[y][i+n] + b.setCell(i, y, b.Lines[y][i+n], false) + } + } + + // Fill the vacated positions with the given cell + for i := rect.Max.X - n; i < rect.Max.X; i++ { + b.setCell(i, y, c, true) + } +} diff --git a/vendor/github.com/charmbracelet/x/cellbuf/cell.go b/vendor/github.com/charmbracelet/x/cellbuf/cell.go new file mode 100644 index 000000000..991c919e7 --- /dev/null +++ b/vendor/github.com/charmbracelet/x/cellbuf/cell.go @@ -0,0 +1,503 @@ +package cellbuf + +import ( + "github.com/charmbracelet/x/ansi" +) + +var ( + // BlankCell is a cell with a single space, width of 1, and no style or link. + BlankCell = Cell{Rune: ' ', Width: 1} + + // EmptyCell is just an empty cell used for comparisons and as a placeholder + // for wide cells. + EmptyCell = Cell{} +) + +// Cell represents a single cell in the terminal screen. +type Cell struct { + // The style of the cell. Nil style means no style. Zero value prints a + // reset sequence. + Style Style + + // Link is the hyperlink of the cell. + Link Link + + // Comb is the combining runes of the cell. This is nil if the cell is a + // single rune or if it's a zero width cell that is part of a wider cell. + Comb []rune + + // Width is the mono-space width of the grapheme cluster. + Width int + + // Rune is the main rune of the cell. This is zero if the cell is part of a + // wider cell. + Rune rune +} + +// Append appends runes to the cell without changing the width. This is useful +// when we want to use the cell to store escape sequences or other runes that +// don't affect the width of the cell. +func (c *Cell) Append(r ...rune) { + for i, r := range r { + if i == 0 && c.Rune == 0 { + c.Rune = r + continue + } + c.Comb = append(c.Comb, r) + } +} + +// String returns the string content of the cell excluding any styles, links, +// and escape sequences. +func (c Cell) String() string { + if c.Rune == 0 { + return "" + } + if len(c.Comb) == 0 { + return string(c.Rune) + } + return string(append([]rune{c.Rune}, c.Comb...)) +} + +// Equal returns whether the cell is equal to the other cell. +func (c *Cell) Equal(o *Cell) bool { + return o != nil && + c.Width == o.Width && + c.Rune == o.Rune && + runesEqual(c.Comb, o.Comb) && + c.Style.Equal(&o.Style) && + c.Link.Equal(&o.Link) +} + +// Empty returns whether the cell is an empty cell. An empty cell is a cell +// with a width of 0, a rune of 0, and no combining runes. +func (c Cell) Empty() bool { + return c.Width == 0 && + c.Rune == 0 && + len(c.Comb) == 0 +} + +// Reset resets the cell to the default state zero value. +func (c *Cell) Reset() { + c.Rune = 0 + c.Comb = nil + c.Width = 0 + c.Style.Reset() + c.Link.Reset() +} + +// Clear returns whether the cell consists of only attributes that don't +// affect appearance of a space character. +func (c *Cell) Clear() bool { + return c.Rune == ' ' && len(c.Comb) == 0 && c.Width == 1 && c.Style.Clear() && c.Link.Empty() +} + +// Clone returns a copy of the cell. +func (c *Cell) Clone() (n *Cell) { + n = new(Cell) + *n = *c + return +} + +// Blank makes the cell a blank cell by setting the rune to a space, comb to +// nil, and the width to 1. +func (c *Cell) Blank() *Cell { + c.Rune = ' ' + c.Comb = nil + c.Width = 1 + return c +} + +// Link represents a hyperlink in the terminal screen. +type Link struct { + URL string + Params string +} + +// String returns a string representation of the hyperlink. +func (h Link) String() string { + return h.URL +} + +// Reset resets the hyperlink to the default state zero value. +func (h *Link) Reset() { + h.URL = "" + h.Params = "" +} + +// Equal returns whether the hyperlink is equal to the other hyperlink. +func (h *Link) Equal(o *Link) bool { + return o != nil && h.URL == o.URL && h.Params == o.Params +} + +// Empty returns whether the hyperlink is empty. +func (h Link) Empty() bool { + return h.URL == "" && h.Params == "" +} + +// AttrMask is a bitmask for text attributes that can change the look of text. +// These attributes can be combined to create different styles. +type AttrMask uint8 + +// These are the available text attributes that can be combined to create +// different styles. +const ( + BoldAttr AttrMask = 1 << iota + FaintAttr + ItalicAttr + SlowBlinkAttr + RapidBlinkAttr + ReverseAttr + ConcealAttr + StrikethroughAttr + + ResetAttr AttrMask = 0 +) + +// UnderlineStyle is the style of underline to use for text. +type UnderlineStyle = ansi.UnderlineStyle + +// These are the available underline styles. +const ( + NoUnderline = ansi.NoUnderlineStyle + SingleUnderline = ansi.SingleUnderlineStyle + DoubleUnderline = ansi.DoubleUnderlineStyle + CurlyUnderline = ansi.CurlyUnderlineStyle + DottedUnderline = ansi.DottedUnderlineStyle + DashedUnderline = ansi.DashedUnderlineStyle +) + +// Style represents the Style of a cell. +type Style struct { + Fg ansi.Color + Bg ansi.Color + Ul ansi.Color + Attrs AttrMask + UlStyle UnderlineStyle +} + +// Sequence returns the ANSI sequence that sets the style. +func (s Style) Sequence() string { + if s.Empty() { + return ansi.ResetStyle + } + + var b ansi.Style + + if s.Attrs != 0 { + if s.Attrs&BoldAttr != 0 { + b = b.Bold() + } + if s.Attrs&FaintAttr != 0 { + b = b.Faint() + } + if s.Attrs&ItalicAttr != 0 { + b = b.Italic() + } + if s.Attrs&SlowBlinkAttr != 0 { + b = b.SlowBlink() + } + if s.Attrs&RapidBlinkAttr != 0 { + b = b.RapidBlink() + } + if s.Attrs&ReverseAttr != 0 { + b = b.Reverse() + } + if s.Attrs&ConcealAttr != 0 { + b = b.Conceal() + } + if s.Attrs&StrikethroughAttr != 0 { + b = b.Strikethrough() + } + } + if s.UlStyle != NoUnderline { + switch s.UlStyle { + case SingleUnderline: + b = b.Underline() + case DoubleUnderline: + b = b.DoubleUnderline() + case CurlyUnderline: + b = b.CurlyUnderline() + case DottedUnderline: + b = b.DottedUnderline() + case DashedUnderline: + b = b.DashedUnderline() + } + } + if s.Fg != nil { + b = b.ForegroundColor(s.Fg) + } + if s.Bg != nil { + b = b.BackgroundColor(s.Bg) + } + if s.Ul != nil { + b = b.UnderlineColor(s.Ul) + } + + return b.String() +} + +// DiffSequence returns the ANSI sequence that sets the style as a diff from +// another style. +func (s Style) DiffSequence(o Style) string { + if o.Empty() { + return s.Sequence() + } + + var b ansi.Style + + if !colorEqual(s.Fg, o.Fg) { + b = b.ForegroundColor(s.Fg) + } + + if !colorEqual(s.Bg, o.Bg) { + b = b.BackgroundColor(s.Bg) + } + + if !colorEqual(s.Ul, o.Ul) { + b = b.UnderlineColor(s.Ul) + } + + var ( + noBlink bool + isNormal bool + ) + + if s.Attrs != o.Attrs { + if s.Attrs&BoldAttr != o.Attrs&BoldAttr { + if s.Attrs&BoldAttr != 0 { + b = b.Bold() + } else if !isNormal { + isNormal = true + b = b.NormalIntensity() + } + } + if s.Attrs&FaintAttr != o.Attrs&FaintAttr { + if s.Attrs&FaintAttr != 0 { + b = b.Faint() + } else if !isNormal { + b = b.NormalIntensity() + } + } + if s.Attrs&ItalicAttr != o.Attrs&ItalicAttr { + if s.Attrs&ItalicAttr != 0 { + b = b.Italic() + } else { + b = b.NoItalic() + } + } + if s.Attrs&SlowBlinkAttr != o.Attrs&SlowBlinkAttr { + if s.Attrs&SlowBlinkAttr != 0 { + b = b.SlowBlink() + } else if !noBlink { + noBlink = true + b = b.NoBlink() + } + } + if s.Attrs&RapidBlinkAttr != o.Attrs&RapidBlinkAttr { + if s.Attrs&RapidBlinkAttr != 0 { + b = b.RapidBlink() + } else if !noBlink { + b = b.NoBlink() + } + } + if s.Attrs&ReverseAttr != o.Attrs&ReverseAttr { + if s.Attrs&ReverseAttr != 0 { + b = b.Reverse() + } else { + b = b.NoReverse() + } + } + if s.Attrs&ConcealAttr != o.Attrs&ConcealAttr { + if s.Attrs&ConcealAttr != 0 { + b = b.Conceal() + } else { + b = b.NoConceal() + } + } + if s.Attrs&StrikethroughAttr != o.Attrs&StrikethroughAttr { + if s.Attrs&StrikethroughAttr != 0 { + b = b.Strikethrough() + } else { + b = b.NoStrikethrough() + } + } + } + + if s.UlStyle != o.UlStyle { + b = b.UnderlineStyle(s.UlStyle) + } + + return b.String() +} + +// Equal returns true if the style is equal to the other style. +func (s *Style) Equal(o *Style) bool { + return s.Attrs == o.Attrs && + s.UlStyle == o.UlStyle && + colorEqual(s.Fg, o.Fg) && + colorEqual(s.Bg, o.Bg) && + colorEqual(s.Ul, o.Ul) +} + +func colorEqual(c, o ansi.Color) bool { + if c == nil && o == nil { + return true + } + if c == nil || o == nil { + return false + } + cr, cg, cb, ca := c.RGBA() + or, og, ob, oa := o.RGBA() + return cr == or && cg == og && cb == ob && ca == oa +} + +// Bold sets the bold attribute. +func (s *Style) Bold(v bool) *Style { + if v { + s.Attrs |= BoldAttr + } else { + s.Attrs &^= BoldAttr + } + return s +} + +// Faint sets the faint attribute. +func (s *Style) Faint(v bool) *Style { + if v { + s.Attrs |= FaintAttr + } else { + s.Attrs &^= FaintAttr + } + return s +} + +// Italic sets the italic attribute. +func (s *Style) Italic(v bool) *Style { + if v { + s.Attrs |= ItalicAttr + } else { + s.Attrs &^= ItalicAttr + } + return s +} + +// SlowBlink sets the slow blink attribute. +func (s *Style) SlowBlink(v bool) *Style { + if v { + s.Attrs |= SlowBlinkAttr + } else { + s.Attrs &^= SlowBlinkAttr + } + return s +} + +// RapidBlink sets the rapid blink attribute. +func (s *Style) RapidBlink(v bool) *Style { + if v { + s.Attrs |= RapidBlinkAttr + } else { + s.Attrs &^= RapidBlinkAttr + } + return s +} + +// Reverse sets the reverse attribute. +func (s *Style) Reverse(v bool) *Style { + if v { + s.Attrs |= ReverseAttr + } else { + s.Attrs &^= ReverseAttr + } + return s +} + +// Conceal sets the conceal attribute. +func (s *Style) Conceal(v bool) *Style { + if v { + s.Attrs |= ConcealAttr + } else { + s.Attrs &^= ConcealAttr + } + return s +} + +// Strikethrough sets the strikethrough attribute. +func (s *Style) Strikethrough(v bool) *Style { + if v { + s.Attrs |= StrikethroughAttr + } else { + s.Attrs &^= StrikethroughAttr + } + return s +} + +// UnderlineStyle sets the underline style. +func (s *Style) UnderlineStyle(style UnderlineStyle) *Style { + s.UlStyle = style + return s +} + +// Underline sets the underline attribute. +// This is a syntactic sugar for [UnderlineStyle]. +func (s *Style) Underline(v bool) *Style { + if v { + return s.UnderlineStyle(SingleUnderline) + } + return s.UnderlineStyle(NoUnderline) +} + +// Foreground sets the foreground color. +func (s *Style) Foreground(c ansi.Color) *Style { + s.Fg = c + return s +} + +// Background sets the background color. +func (s *Style) Background(c ansi.Color) *Style { + s.Bg = c + return s +} + +// UnderlineColor sets the underline color. +func (s *Style) UnderlineColor(c ansi.Color) *Style { + s.Ul = c + return s +} + +// Reset resets the style to default. +func (s *Style) Reset() *Style { + s.Fg = nil + s.Bg = nil + s.Ul = nil + s.Attrs = ResetAttr + s.UlStyle = NoUnderline + return s +} + +// Empty returns true if the style is empty. +func (s *Style) Empty() bool { + return s.Fg == nil && s.Bg == nil && s.Ul == nil && s.Attrs == ResetAttr && s.UlStyle == NoUnderline +} + +// Clear returns whether the style consists of only attributes that don't +// affect appearance of a space character. +func (s *Style) Clear() bool { + return s.UlStyle == NoUnderline && + s.Attrs&^(BoldAttr|FaintAttr|ItalicAttr|SlowBlinkAttr|RapidBlinkAttr) == 0 && + s.Fg == nil && + s.Bg == nil && + s.Ul == nil +} + +func runesEqual(a, b []rune) bool { + if len(a) != len(b) { + return false + } + for i, r := range a { + if r != b[i] { + return false + } + } + return true +} diff --git a/vendor/github.com/charmbracelet/x/cellbuf/errors.go b/vendor/github.com/charmbracelet/x/cellbuf/errors.go new file mode 100644 index 000000000..64258fe33 --- /dev/null +++ b/vendor/github.com/charmbracelet/x/cellbuf/errors.go @@ -0,0 +1,6 @@ +package cellbuf + +import "errors" + +// ErrOutOfBounds is returned when the given x, y position is out of bounds. +var ErrOutOfBounds = errors.New("out of bounds") diff --git a/vendor/github.com/charmbracelet/x/cellbuf/geom.go b/vendor/github.com/charmbracelet/x/cellbuf/geom.go new file mode 100644 index 000000000..c12e6fb1d --- /dev/null +++ b/vendor/github.com/charmbracelet/x/cellbuf/geom.go @@ -0,0 +1,21 @@ +package cellbuf + +import ( + "image" +) + +// Position represents an x, y position. +type Position = image.Point + +// Pos is a shorthand for Position{X: x, Y: y}. +func Pos(x, y int) Position { + return image.Pt(x, y) +} + +// Rectange represents a rectangle. +type Rectangle = image.Rectangle + +// Rect is a shorthand for Rectangle. +func Rect(x, y, w, h int) Rectangle { + return image.Rect(x, y, x+w, y+h) +} diff --git a/vendor/github.com/charmbracelet/x/cellbuf/hardscroll.go b/vendor/github.com/charmbracelet/x/cellbuf/hardscroll.go new file mode 100644 index 000000000..402ac06a6 --- /dev/null +++ b/vendor/github.com/charmbracelet/x/cellbuf/hardscroll.go @@ -0,0 +1,272 @@ +package cellbuf + +import ( + "strings" + + "github.com/charmbracelet/x/ansi" +) + +// scrollOptimize optimizes the screen to transform the old buffer into the new +// buffer. +func (s *Screen) scrollOptimize() { + height := s.newbuf.Height() + if s.oldnum == nil || len(s.oldnum) < height { + s.oldnum = make([]int, height) + } + + // Calculate the indices + s.updateHashmap() + if len(s.hashtab) < height { + return + } + + // Pass 1 - from top to bottom scrolling up + for i := 0; i < height; { + for i < height && (s.oldnum[i] == newIndex || s.oldnum[i] <= i) { + i++ + } + if i >= height { + break + } + + shift := s.oldnum[i] - i // shift > 0 + start := i + + i++ + for i < height && s.oldnum[i] != newIndex && s.oldnum[i]-i == shift { + i++ + } + end := i - 1 + shift + + if !s.scrolln(shift, start, end, height-1) { + continue + } + } + + // Pass 2 - from bottom to top scrolling down + for i := height - 1; i >= 0; { + for i >= 0 && (s.oldnum[i] == newIndex || s.oldnum[i] >= i) { + i-- + } + if i < 0 { + break + } + + shift := s.oldnum[i] - i // shift < 0 + end := i + + i-- + for i >= 0 && s.oldnum[i] != newIndex && s.oldnum[i]-i == shift { + i-- + } + + start := i + 1 - (-shift) + if !s.scrolln(shift, start, end, height-1) { + continue + } + } +} + +// scrolln scrolls the screen up by n lines. +func (s *Screen) scrolln(n, top, bot, maxY int) (v bool) { //nolint:unparam + const ( + nonDestScrollRegion = false + memoryBelow = false + ) + + blank := s.clearBlank() + if n > 0 { + // Scroll up (forward) + v = s.scrollUp(n, top, bot, 0, maxY, blank) + if !v { + s.buf.WriteString(ansi.SetTopBottomMargins(top+1, bot+1)) + + // XXX: How should we handle this in inline mode when not using alternate screen? + s.cur.X, s.cur.Y = -1, -1 + v = s.scrollUp(n, top, bot, top, bot, blank) + s.buf.WriteString(ansi.SetTopBottomMargins(1, maxY+1)) + s.cur.X, s.cur.Y = -1, -1 + } + + if !v { + v = s.scrollIdl(n, top, bot-n+1, blank) + } + + // Clear newly shifted-in lines. + if v && + (nonDestScrollRegion || (memoryBelow && bot == maxY)) { + if bot == maxY { + s.move(0, bot-n+1) + s.clearToBottom(nil) + } else { + for i := 0; i < n; i++ { + s.move(0, bot-i) + s.clearToEnd(nil, false) + } + } + } + } else if n < 0 { + // Scroll down (backward) + v = s.scrollDown(-n, top, bot, 0, maxY, blank) + if !v { + s.buf.WriteString(ansi.SetTopBottomMargins(top+1, bot+1)) + + // XXX: How should we handle this in inline mode when not using alternate screen? + s.cur.X, s.cur.Y = -1, -1 + v = s.scrollDown(-n, top, bot, top, bot, blank) + s.buf.WriteString(ansi.SetTopBottomMargins(1, maxY+1)) + s.cur.X, s.cur.Y = -1, -1 + + if !v { + v = s.scrollIdl(-n, bot+n+1, top, blank) + } + + // Clear newly shifted-in lines. + if v && + (nonDestScrollRegion || (memoryBelow && top == 0)) { + for i := 0; i < -n; i++ { + s.move(0, top+i) + s.clearToEnd(nil, false) + } + } + } + } + + if !v { + return + } + + s.scrollBuffer(s.curbuf, n, top, bot, blank) + + // shift hash values too, they can be reused + s.scrollOldhash(n, top, bot) + + return true +} + +// scrollBuffer scrolls the buffer by n lines. +func (s *Screen) scrollBuffer(b *Buffer, n, top, bot int, blank *Cell) { + if top < 0 || bot < top || bot >= b.Height() { + // Nothing to scroll + return + } + + if n < 0 { + // shift n lines downwards + limit := top - n + for line := bot; line >= limit && line >= 0 && line >= top; line-- { + copy(b.Lines[line], b.Lines[line+n]) + } + for line := top; line < limit && line <= b.Height()-1 && line <= bot; line++ { + b.FillRect(blank, Rect(0, line, b.Width(), 1)) + } + } + + if n > 0 { + // shift n lines upwards + limit := bot - n + for line := top; line <= limit && line <= b.Height()-1 && line <= bot; line++ { + copy(b.Lines[line], b.Lines[line+n]) + } + for line := bot; line > limit && line >= 0 && line >= top; line-- { + b.FillRect(blank, Rect(0, line, b.Width(), 1)) + } + } + + s.touchLine(b.Width(), b.Height(), top, bot-top+1, true) +} + +// touchLine marks the line as touched. +func (s *Screen) touchLine(width, height, y, n int, changed bool) { + if n < 0 || y < 0 || y >= height { + return // Nothing to touch + } + + for i := y; i < y+n && i < height; i++ { + if changed { + s.touch[i] = lineData{firstCell: 0, lastCell: width - 1} + } else { + delete(s.touch, i) + } + } +} + +// scrollUp scrolls the screen up by n lines. +func (s *Screen) scrollUp(n, top, bot, minY, maxY int, blank *Cell) bool { + if n == 1 && top == minY && bot == maxY { + s.move(0, bot) + s.updatePen(blank) + s.buf.WriteByte('\n') + } else if n == 1 && bot == maxY { + s.move(0, top) + s.updatePen(blank) + s.buf.WriteString(ansi.DeleteLine(1)) + } else if top == minY && bot == maxY { + if s.xtermLike { + s.move(0, bot) + } else { + s.move(0, top) + } + s.updatePen(blank) + if s.xtermLike { + s.buf.WriteString(ansi.ScrollUp(n)) + } else { + s.buf.WriteString(strings.Repeat("\n", n)) + } + } else if bot == maxY { + s.move(0, top) + s.updatePen(blank) + s.buf.WriteString(ansi.DeleteLine(n)) + } else { + return false + } + return true +} + +// scrollDown scrolls the screen down by n lines. +func (s *Screen) scrollDown(n, top, bot, minY, maxY int, blank *Cell) bool { + if n == 1 && top == minY && bot == maxY { + s.move(0, top) + s.updatePen(blank) + s.buf.WriteString(ansi.ReverseIndex) + } else if n == 1 && bot == maxY { + s.move(0, top) + s.updatePen(blank) + s.buf.WriteString(ansi.InsertLine(1)) + } else if top == minY && bot == maxY { + s.move(0, top) + s.updatePen(blank) + if s.xtermLike { + s.buf.WriteString(ansi.ScrollDown(n)) + } else { + s.buf.WriteString(strings.Repeat(ansi.ReverseIndex, n)) + } + } else if bot == maxY { + s.move(0, top) + s.updatePen(blank) + s.buf.WriteString(ansi.InsertLine(n)) + } else { + return false + } + return true +} + +// scrollIdl scrolls the screen n lines by using [ansi.DL] at del and using +// [ansi.IL] at ins. +func (s *Screen) scrollIdl(n, del, ins int, blank *Cell) bool { + if n < 0 { + return false + } + + // Delete lines + s.move(0, del) + s.updatePen(blank) + s.buf.WriteString(ansi.DeleteLine(n)) + + // Insert lines + s.move(0, ins) + s.updatePen(blank) + s.buf.WriteString(ansi.InsertLine(n)) + + return true +} diff --git a/vendor/github.com/charmbracelet/x/cellbuf/hashmap.go b/vendor/github.com/charmbracelet/x/cellbuf/hashmap.go new file mode 100644 index 000000000..0d25b5492 --- /dev/null +++ b/vendor/github.com/charmbracelet/x/cellbuf/hashmap.go @@ -0,0 +1,301 @@ +package cellbuf + +import ( + "github.com/charmbracelet/x/ansi" +) + +// hash returns the hash value of a [Line]. +func hash(l Line) (h uint64) { + for _, c := range l { + var r rune + if c == nil { + r = ansi.SP + } else { + r = c.Rune + } + h += (h << 5) + uint64(r) + } + return +} + +// hashmap represents a single [Line] hash. +type hashmap struct { + value uint64 + oldcount, newcount int + oldindex, newindex int +} + +// The value used to indicate lines created by insertions and scrolls. +const newIndex = -1 + +// updateHashmap updates the hashmap with the new hash value. +func (s *Screen) updateHashmap() { + height := s.newbuf.Height() + if len(s.oldhash) >= height && len(s.newhash) >= height { + // rehash changed lines + for i := 0; i < height; i++ { + _, ok := s.touch[i] + if ok { + s.oldhash[i] = hash(s.curbuf.Line(i)) + s.newhash[i] = hash(s.newbuf.Line(i)) + } + } + } else { + // rehash all + if len(s.oldhash) != height { + s.oldhash = make([]uint64, height) + } + if len(s.newhash) != height { + s.newhash = make([]uint64, height) + } + for i := 0; i < height; i++ { + s.oldhash[i] = hash(s.curbuf.Line(i)) + s.newhash[i] = hash(s.newbuf.Line(i)) + } + } + + s.hashtab = make([]hashmap, height*2) + for i := 0; i < height; i++ { + hashval := s.oldhash[i] + + // Find matching hash or empty slot + idx := 0 + for idx < len(s.hashtab) && s.hashtab[idx].value != 0 { + if s.hashtab[idx].value == hashval { + break + } + idx++ + } + + s.hashtab[idx].value = hashval // in case this is a new hash + s.hashtab[idx].oldcount++ + s.hashtab[idx].oldindex = i + } + for i := 0; i < height; i++ { + hashval := s.newhash[i] + + // Find matching hash or empty slot + idx := 0 + for idx < len(s.hashtab) && s.hashtab[idx].value != 0 { + if s.hashtab[idx].value == hashval { + break + } + idx++ + } + + s.hashtab[idx].value = hashval // in case this is a new hash + s.hashtab[idx].newcount++ + s.hashtab[idx].newindex = i + + s.oldnum[i] = newIndex // init old indices slice + } + + // Mark line pair corresponding to unique hash pairs. + for i := 0; i < len(s.hashtab) && s.hashtab[i].value != 0; i++ { + hsp := &s.hashtab[i] + if hsp.oldcount == 1 && hsp.newcount == 1 && hsp.oldindex != hsp.newindex { + s.oldnum[hsp.newindex] = hsp.oldindex + } + } + + s.growHunks() + + // Eliminate bad or impossible shifts. This includes removing those hunks + // which could not grow because of conflicts, as well those which are to be + // moved too far, they are likely to destroy more than carry. + for i := 0; i < height; { + var start, shift, size int + for i < height && s.oldnum[i] == newIndex { + i++ + } + if i >= height { + break + } + start = i + shift = s.oldnum[i] - i + i++ + for i < height && s.oldnum[i] != newIndex && s.oldnum[i]-i == shift { + i++ + } + size = i - start + if size < 3 || size+min(size/8, 2) < abs(shift) { + for start < i { + s.oldnum[start] = newIndex + start++ + } + } + } + + // After clearing invalid hunks, try grow the rest. + s.growHunks() +} + +// scrollOldhash +func (s *Screen) scrollOldhash(n, top, bot int) { + if len(s.oldhash) == 0 { + return + } + + size := bot - top + 1 - abs(n) + if n > 0 { + // Move existing hashes up + copy(s.oldhash[top:], s.oldhash[top+n:top+n+size]) + // Recalculate hashes for newly shifted-in lines + for i := bot; i > bot-n; i-- { + s.oldhash[i] = hash(s.curbuf.Line(i)) + } + } else { + // Move existing hashes down + copy(s.oldhash[top-n:], s.oldhash[top:top+size]) + // Recalculate hashes for newly shifted-in lines + for i := top; i < top-n; i++ { + s.oldhash[i] = hash(s.curbuf.Line(i)) + } + } +} + +func (s *Screen) growHunks() { + var ( + backLimit int // limits for cells to fill + backRefLimit int // limit for references + i int + nextHunk int + ) + + height := s.newbuf.Height() + for i < height && s.oldnum[i] == newIndex { + i++ + } + for ; i < height; i = nextHunk { + var ( + forwardLimit int + forwardRefLimit int + end int + start = i + shift = s.oldnum[i] - i + ) + + // get forward limit + i = start + 1 + for i < height && + s.oldnum[i] != newIndex && + s.oldnum[i]-i == shift { + i++ + } + + end = i + for i < height && s.oldnum[i] == newIndex { + i++ + } + + nextHunk = i + forwardLimit = i + if i >= height || s.oldnum[i] >= i { + forwardRefLimit = i + } else { + forwardRefLimit = s.oldnum[i] + } + + i = start - 1 + + // grow back + if shift < 0 { + backLimit = backRefLimit + (-shift) + } + for i >= backLimit { + if s.newhash[i] == s.oldhash[i+shift] || + s.costEffective(i+shift, i, shift < 0) { + s.oldnum[i] = i + shift + } else { + break + } + i-- + } + + i = end + // grow forward + if shift > 0 { + forwardLimit = forwardRefLimit - shift + } + for i < forwardLimit { + if s.newhash[i] == s.oldhash[i+shift] || + s.costEffective(i+shift, i, shift > 0) { + s.oldnum[i] = i + shift + } else { + break + } + i++ + } + + backLimit = i + backRefLimit = backLimit + if shift > 0 { + backRefLimit += shift + } + } +} + +// costEffective returns true if the cost of moving line 'from' to line 'to' seems to be +// cost effective. 'blank' indicates whether the line 'to' would become blank. +func (s *Screen) costEffective(from, to int, blank bool) bool { + if from == to { + return false + } + + newFrom := s.oldnum[from] + if newFrom == newIndex { + newFrom = from + } + + // On the left side of >= is the cost before moving. On the right side -- + // cost after moving. + + // Calculate costs before moving. + var costBeforeMove int + if blank { + // Cost of updating blank line at destination. + costBeforeMove = s.updateCostBlank(s.newbuf.Line(to)) + } else { + // Cost of updating exiting line at destination. + costBeforeMove = s.updateCost(s.curbuf.Line(to), s.newbuf.Line(to)) + } + + // Add cost of updating source line + costBeforeMove += s.updateCost(s.curbuf.Line(newFrom), s.newbuf.Line(from)) + + // Calculate costs after moving. + var costAfterMove int + if newFrom == from { + // Source becomes blank after move + costAfterMove = s.updateCostBlank(s.newbuf.Line(from)) + } else { + // Source gets updated from another line + costAfterMove = s.updateCost(s.curbuf.Line(newFrom), s.newbuf.Line(from)) + } + + // Add cost of moving source line to destination + costAfterMove += s.updateCost(s.curbuf.Line(from), s.newbuf.Line(to)) + + // Return true if moving is cost effective (costs less or equal) + return costBeforeMove >= costAfterMove +} + +func (s *Screen) updateCost(from, to Line) (cost int) { + var fidx, tidx int + for i := s.newbuf.Width() - 1; i > 0; i, fidx, tidx = i-1, fidx+1, tidx+1 { + if !cellEqual(from.At(fidx), to.At(tidx)) { + cost++ + } + } + return +} + +func (s *Screen) updateCostBlank(to Line) (cost int) { + var tidx int + for i := s.newbuf.Width() - 1; i > 0; i, tidx = i-1, tidx+1 { + if !cellEqual(nil, to.At(tidx)) { + cost++ + } + } + return +} diff --git a/vendor/github.com/charmbracelet/x/cellbuf/link.go b/vendor/github.com/charmbracelet/x/cellbuf/link.go new file mode 100644 index 000000000..112f8e8ab --- /dev/null +++ b/vendor/github.com/charmbracelet/x/cellbuf/link.go @@ -0,0 +1,14 @@ +package cellbuf + +import ( + "github.com/charmbracelet/colorprofile" +) + +// Convert converts a hyperlink to respect the given color profile. +func ConvertLink(h Link, p colorprofile.Profile) Link { + if p == colorprofile.NoTTY { + return Link{} + } + + return h +} diff --git a/vendor/github.com/charmbracelet/x/cellbuf/screen.go b/vendor/github.com/charmbracelet/x/cellbuf/screen.go new file mode 100644 index 000000000..963b9cac3 --- /dev/null +++ b/vendor/github.com/charmbracelet/x/cellbuf/screen.go @@ -0,0 +1,1457 @@ +package cellbuf + +import ( + "bytes" + "errors" + "io" + "os" + "strings" + "sync" + + "github.com/charmbracelet/colorprofile" + "github.com/charmbracelet/x/ansi" + "github.com/charmbracelet/x/term" +) + +// ErrInvalidDimensions is returned when the dimensions of a window are invalid +// for the operation. +var ErrInvalidDimensions = errors.New("invalid dimensions") + +// notLocal returns whether the coordinates are not considered local movement +// using the defined thresholds. +// This takes the number of columns, and the coordinates of the current and +// target positions. +func notLocal(cols, fx, fy, tx, ty int) bool { + // The typical distance for a [ansi.CUP] sequence. Anything less than this + // is considered local movement. + const longDist = 8 - 1 + return (tx > longDist) && + (tx < cols-1-longDist) && + (abs(ty-fy)+abs(tx-fx) > longDist) +} + +// relativeCursorMove returns the relative cursor movement sequence using one or two +// of the following sequences [ansi.CUU], [ansi.CUD], [ansi.CUF], [ansi.CUB], +// [ansi.VPA], [ansi.HPA]. +// When overwrite is true, this will try to optimize the sequence by using the +// screen cells values to move the cursor instead of using escape sequences. +func relativeCursorMove(s *Screen, fx, fy, tx, ty int, overwrite, useTabs, useBackspace bool) string { + var seq strings.Builder + + width, height := s.newbuf.Width(), s.newbuf.Height() + if ty != fy { + var yseq string + if s.xtermLike && !s.opts.RelativeCursor { + yseq = ansi.VerticalPositionAbsolute(ty + 1) + } + + // OPTIM: Use [ansi.LF] and [ansi.ReverseIndex] as optimizations. + + if ty > fy { + n := ty - fy + if cud := ansi.CursorDown(n); yseq == "" || len(cud) < len(yseq) { + yseq = cud + } + shouldScroll := !s.opts.AltScreen && fy+n >= s.scrollHeight + if lf := strings.Repeat("\n", n); shouldScroll || (fy+n < height && len(lf) < len(yseq)) { + // TODO: Ensure we're not unintentionally scrolling the screen down. + yseq = lf + s.scrollHeight = max(s.scrollHeight, fy+n) + } + } else if ty < fy { + n := fy - ty + if cuu := ansi.CursorUp(n); yseq == "" || len(cuu) < len(yseq) { + yseq = cuu + } + if n == 1 && fy-1 > 0 { + // TODO: Ensure we're not unintentionally scrolling the screen up. + yseq = ansi.ReverseIndex + } + } + + seq.WriteString(yseq) + } + + if tx != fx { + var xseq string + if s.xtermLike && !s.opts.RelativeCursor { + xseq = ansi.HorizontalPositionAbsolute(tx + 1) + } + + if tx > fx { + n := tx - fx + if useTabs { + var tabs int + var col int + for col = fx; s.tabs.Next(col) <= tx; col = s.tabs.Next(col) { + tabs++ + if col == s.tabs.Next(col) || col >= width-1 { + break + } + } + + if tabs > 0 { + cht := ansi.CursorHorizontalForwardTab(tabs) + tab := strings.Repeat("\t", tabs) + if false && s.xtermLike && len(cht) < len(tab) { + // TODO: The linux console and some terminals such as + // Alacritty don't support [ansi.CHT]. Enable this when + // we have a way to detect this, or after 5 years when + // we're sure everyone has updated their terminals :P + seq.WriteString(cht) + } else { + seq.WriteString(tab) + } + + n = tx - col + fx = col + } + } + + if cuf := ansi.CursorForward(n); xseq == "" || len(cuf) < len(xseq) { + xseq = cuf + } + + // If we have no attribute and style changes, overwrite is cheaper. + var ovw string + if overwrite && ty >= 0 { + for i := 0; i < n; i++ { + cell := s.newbuf.Cell(fx+i, ty) + if cell != nil && cell.Width > 0 { + i += cell.Width - 1 + if !cell.Style.Equal(&s.cur.Style) || !cell.Link.Equal(&s.cur.Link) { + overwrite = false + break + } + } + } + } + + if overwrite && ty >= 0 { + for i := 0; i < n; i++ { + cell := s.newbuf.Cell(fx+i, ty) + if cell != nil && cell.Width > 0 { + ovw += cell.String() + i += cell.Width - 1 + } else { + ovw += " " + } + } + } + + if overwrite && len(ovw) < len(xseq) { + xseq = ovw + } + } else if tx < fx { + n := fx - tx + if useTabs && s.xtermLike { + // VT100 does not support backward tabs [ansi.CBT]. + + col := fx + + var cbt int // cursor backward tabs count + for s.tabs.Prev(col) >= tx { + col = s.tabs.Prev(col) + cbt++ + if col == s.tabs.Prev(col) || col <= 0 { + break + } + } + + if cbt > 0 { + seq.WriteString(ansi.CursorBackwardTab(cbt)) + n = col - tx + } + } + + if cub := ansi.CursorBackward(n); xseq == "" || len(cub) < len(xseq) { + xseq = cub + } + + if useBackspace && n < len(xseq) { + xseq = strings.Repeat("\b", n) + } + } + + seq.WriteString(xseq) + } + + return seq.String() +} + +// moveCursor moves and returns the cursor movement sequence to move the cursor +// to the specified position. +// When overwrite is true, this will try to optimize the sequence by using the +// screen cells values to move the cursor instead of using escape sequences. +func moveCursor(s *Screen, x, y int, overwrite bool) (seq string) { + fx, fy := s.cur.X, s.cur.Y + + if !s.opts.RelativeCursor { + // Method #0: Use [ansi.CUP] if the distance is long. + seq = ansi.CursorPosition(x+1, y+1) + if fx == -1 || fy == -1 || notLocal(s.newbuf.Width(), fx, fy, x, y) { + return + } + } + + // Optimize based on options. + trials := 0 + if s.opts.HardTabs { + trials |= 2 // 0b10 in binary + } + if s.opts.Backspace { + trials |= 1 // 0b01 in binary + } + + // Try all possible combinations of hard tabs and backspace optimizations. + for i := 0; i <= trials; i++ { + // Skip combinations that are not enabled. + if i & ^trials != 0 { + continue + } + + useHardTabs := i&2 != 0 + useBackspace := i&1 != 0 + + // Method #1: Use local movement sequences. + nseq := relativeCursorMove(s, fx, fy, x, y, overwrite, useHardTabs, useBackspace) + if (i == 0 && len(seq) == 0) || len(nseq) < len(seq) { + seq = nseq + } + + // Method #2: Use [ansi.CR] and local movement sequences. + nseq = "\r" + relativeCursorMove(s, 0, fy, x, y, overwrite, useHardTabs, useBackspace) + if len(nseq) < len(seq) { + seq = nseq + } + + if !s.opts.RelativeCursor { + // Method #3: Use [ansi.CursorHomePosition] and local movement sequences. + nseq = ansi.CursorHomePosition + relativeCursorMove(s, 0, 0, x, y, overwrite, useHardTabs, useBackspace) + if len(nseq) < len(seq) { + seq = nseq + } + } + } + + return +} + +// moveCursor moves the cursor to the specified position. +func (s *Screen) moveCursor(x, y int, overwrite bool) { + if !s.opts.AltScreen && s.cur.X == -1 && s.cur.Y == -1 { + // First cursor movement in inline mode, move the cursor to the first + // column before moving to the target position. + s.buf.WriteByte('\r') //nolint:errcheck + s.cur.X, s.cur.Y = 0, 0 + } + s.buf.WriteString(moveCursor(s, x, y, overwrite)) //nolint:errcheck + s.cur.X, s.cur.Y = x, y +} + +func (s *Screen) move(x, y int) { + // XXX: Make sure we use the max height and width of the buffer in case + // we're in the middle of a resize operation. + width := max(s.newbuf.Width(), s.curbuf.Width()) + height := max(s.newbuf.Height(), s.curbuf.Height()) + + if width > 0 && x >= width { + // Handle autowrap + y += (x / width) + x %= width + } + + // XXX: Disable styles if there's any + // Some move operations such as [ansi.LF] can apply styles to the new + // cursor position, thus, we need to reset the styles before moving the + // cursor. + blank := s.clearBlank() + resetPen := y != s.cur.Y && !blank.Equal(&BlankCell) + if resetPen { + s.updatePen(nil) + } + + // Reset wrap around (phantom cursor) state + if s.atPhantom { + s.cur.X = 0 + s.buf.WriteByte('\r') //nolint:errcheck + s.atPhantom = false // reset phantom cell state + } + + // TODO: Investigate if we need to handle this case and/or if we need the + // following code. + // + // if width > 0 && s.cur.X >= width { + // l := (s.cur.X + 1) / width + // + // s.cur.Y += l + // if height > 0 && s.cur.Y >= height { + // l -= s.cur.Y - height - 1 + // } + // + // if l > 0 { + // s.cur.X = 0 + // s.buf.WriteString("\r" + strings.Repeat("\n", l)) //nolint:errcheck + // } + // } + + if height > 0 { + if s.cur.Y > height-1 { + s.cur.Y = height - 1 + } + if y > height-1 { + y = height - 1 + } + } + + if x == s.cur.X && y == s.cur.Y { + // We give up later because we need to run checks for the phantom cell + // and others before we can determine if we can give up. + return + } + + // We set the new cursor in [Screen.moveCursor]. + s.moveCursor(x, y, true) // Overwrite cells if possible +} + +// Cursor represents a terminal Cursor. +type Cursor struct { + Style + Link + Position +} + +// ScreenOptions are options for the screen. +type ScreenOptions struct { + // Term is the terminal type to use when writing to the screen. When empty, + // `$TERM` is used from [os.Getenv]. + Term string + // Profile is the color profile to use when writing to the screen. + Profile colorprofile.Profile + // RelativeCursor is whether to use relative cursor movements. This is + // useful when alt-screen is not used or when using inline mode. + RelativeCursor bool + // AltScreen is whether to use the alternate screen buffer. + AltScreen bool + // ShowCursor is whether to show the cursor. + ShowCursor bool + // HardTabs is whether to use hard tabs to optimize cursor movements. + HardTabs bool + // Backspace is whether to use backspace characters to move the cursor. + Backspace bool +} + +// lineData represents the metadata for a line. +type lineData struct { + // first and last changed cell indices + firstCell, lastCell int + // old index used for scrolling + oldIndex int //nolint:unused +} + +// Screen represents the terminal screen. +type Screen struct { + w io.Writer + buf *bytes.Buffer // buffer for writing to the screen + curbuf *Buffer // the current buffer + newbuf *Buffer // the new buffer + tabs *TabStops + touch map[int]lineData + queueAbove []string // the queue of strings to write above the screen + oldhash, newhash []uint64 // the old and new hash values for each line + hashtab []hashmap // the hashmap table + oldnum []int // old indices from previous hash + cur, saved Cursor // the current and saved cursors + opts ScreenOptions + mu sync.Mutex + method ansi.Method + scrollHeight int // keeps track of how many lines we've scrolled down (inline mode) + altScreenMode bool // whether alternate screen mode is enabled + cursorHidden bool // whether text cursor mode is enabled + clear bool // whether to force clear the screen + xtermLike bool // whether to use xterm-like optimizations, otherwise, it uses vt100 only + queuedText bool // whether we have queued non-zero width text queued up + atPhantom bool // whether the cursor is out of bounds and at a phantom cell +} + +// SetMethod sets the method used to calculate the width of cells. +func (s *Screen) SetMethod(method ansi.Method) { + s.method = method +} + +// UseBackspaces sets whether to use backspace characters to move the cursor. +func (s *Screen) UseBackspaces(v bool) { + s.opts.Backspace = v +} + +// UseHardTabs sets whether to use hard tabs to optimize cursor movements. +func (s *Screen) UseHardTabs(v bool) { + s.opts.HardTabs = v +} + +// SetColorProfile sets the color profile to use when writing to the screen. +func (s *Screen) SetColorProfile(p colorprofile.Profile) { + s.opts.Profile = p +} + +// SetRelativeCursor sets whether to use relative cursor movements. +func (s *Screen) SetRelativeCursor(v bool) { + s.opts.RelativeCursor = v +} + +// EnterAltScreen enters the alternate screen buffer. +func (s *Screen) EnterAltScreen() { + s.opts.AltScreen = true + s.clear = true + s.saved = s.cur +} + +// ExitAltScreen exits the alternate screen buffer. +func (s *Screen) ExitAltScreen() { + s.opts.AltScreen = false + s.clear = true + s.cur = s.saved +} + +// ShowCursor shows the cursor. +func (s *Screen) ShowCursor() { + s.opts.ShowCursor = true +} + +// HideCursor hides the cursor. +func (s *Screen) HideCursor() { + s.opts.ShowCursor = false +} + +// Bounds implements Window. +func (s *Screen) Bounds() Rectangle { + // Always return the new buffer bounds. + return s.newbuf.Bounds() +} + +// Cell implements Window. +func (s *Screen) Cell(x int, y int) *Cell { + return s.newbuf.Cell(x, y) +} + +// Redraw forces a full redraw of the screen. +func (s *Screen) Redraw() { + s.mu.Lock() + s.clear = true + s.mu.Unlock() +} + +// Clear clears the screen with blank cells. This is a convenience method for +// [Screen.Fill] with a nil cell. +func (s *Screen) Clear() bool { + return s.ClearRect(s.newbuf.Bounds()) +} + +// ClearRect clears the given rectangle with blank cells. This is a convenience +// method for [Screen.FillRect] with a nil cell. +func (s *Screen) ClearRect(r Rectangle) bool { + return s.FillRect(nil, r) +} + +// SetCell implements Window. +func (s *Screen) SetCell(x int, y int, cell *Cell) (v bool) { + s.mu.Lock() + defer s.mu.Unlock() + cellWidth := 1 + if cell != nil { + cellWidth = cell.Width + } + if prev := s.curbuf.Cell(x, y); !cellEqual(prev, cell) { + chg, ok := s.touch[y] + if !ok { + chg = lineData{firstCell: x, lastCell: x + cellWidth} + } else { + chg.firstCell = min(chg.firstCell, x) + chg.lastCell = max(chg.lastCell, x+cellWidth) + } + s.touch[y] = chg + } + + return s.newbuf.SetCell(x, y, cell) +} + +// Fill implements Window. +func (s *Screen) Fill(cell *Cell) bool { + return s.FillRect(cell, s.newbuf.Bounds()) +} + +// FillRect implements Window. +func (s *Screen) FillRect(cell *Cell, r Rectangle) bool { + s.mu.Lock() + defer s.mu.Unlock() + s.newbuf.FillRect(cell, r) + for i := r.Min.Y; i < r.Max.Y; i++ { + s.touch[i] = lineData{firstCell: r.Min.X, lastCell: r.Max.X} + } + return true +} + +// isXtermLike returns whether the terminal is xterm-like. This means that the +// terminal supports ECMA-48 and ANSI X3.64 escape sequences. +// TODO: Should this be a lookup table into each $TERM terminfo database? Like +// we could keep a map of ANSI escape sequence to terminfo capability name and +// check if the database supports the escape sequence. Instead of keeping a +// list of terminal names here. +func isXtermLike(termtype string) (v bool) { + parts := strings.Split(termtype, "-") + if len(parts) == 0 { + return + } + + switch parts[0] { + case + "alacritty", + "contour", + "foot", + "ghostty", + "kitty", + "linux", + "rio", + "screen", + "st", + "tmux", + "wezterm", + "xterm": + v = true + } + + return +} + +// NewScreen creates a new Screen. +func NewScreen(w io.Writer, width, height int, opts *ScreenOptions) (s *Screen) { + s = new(Screen) + s.w = w + if opts != nil { + s.opts = *opts + } + + if s.opts.Term == "" { + s.opts.Term = os.Getenv("TERM") + } + + if width <= 0 || height <= 0 { + if f, ok := w.(term.File); ok { + width, height, _ = term.GetSize(f.Fd()) + } + } + if width < 0 { + width = 0 + } + if height < 0 { + height = 0 + } + + s.buf = new(bytes.Buffer) + s.xtermLike = isXtermLike(s.opts.Term) + s.curbuf = NewBuffer(width, height) + s.newbuf = NewBuffer(width, height) + s.cur = Cursor{Position: Pos(-1, -1)} // start at -1 to force a move + s.saved = s.cur + s.reset() + + return +} + +// Width returns the width of the screen. +func (s *Screen) Width() int { + return s.newbuf.Width() +} + +// Height returns the height of the screen. +func (s *Screen) Height() int { + return s.newbuf.Height() +} + +// cellEqual returns whether the two cells are equal. A nil cell is considered +// a [BlankCell]. +func cellEqual(a, b *Cell) bool { + if a == b { + return true + } + if a == nil { + a = &BlankCell + } + if b == nil { + b = &BlankCell + } + return a.Equal(b) +} + +// putCell draws a cell at the current cursor position. +func (s *Screen) putCell(cell *Cell) { + width, height := s.newbuf.Width(), s.newbuf.Height() + if s.opts.AltScreen && s.cur.X == width-1 && s.cur.Y == height-1 { + s.putCellLR(cell) + } else { + s.putAttrCell(cell) + } +} + +// wrapCursor wraps the cursor to the next line. +// +//nolint:unused +func (s *Screen) wrapCursor() { + const autoRightMargin = true + if autoRightMargin { + // Assume we have auto wrap mode enabled. + s.cur.X = 0 + s.cur.Y++ + } else { + s.cur.X-- + } +} + +func (s *Screen) putAttrCell(cell *Cell) { + if cell != nil && cell.Empty() { + // XXX: Zero width cells are special and should not be written to the + // screen no matter what other attributes they have. + // Zero width cells are used for wide characters that are split into + // multiple cells. + return + } + + if cell == nil { + cell = s.clearBlank() + } + + // We're at pending wrap state (phantom cell), incoming cell should + // wrap. + if s.atPhantom { + s.wrapCursor() + s.atPhantom = false + } + + s.updatePen(cell) + s.buf.WriteRune(cell.Rune) //nolint:errcheck + for _, c := range cell.Comb { + s.buf.WriteRune(c) //nolint:errcheck + } + + s.cur.X += cell.Width + + if cell.Width > 0 { + s.queuedText = true + } + + if s.cur.X >= s.newbuf.Width() { + s.atPhantom = true + } +} + +// putCellLR draws a cell at the lower right corner of the screen. +func (s *Screen) putCellLR(cell *Cell) { + // Optimize for the lower right corner cell. + curX := s.cur.X + if cell == nil || !cell.Empty() { + s.buf.WriteString(ansi.ResetAutoWrapMode) //nolint:errcheck + s.putAttrCell(cell) + // Writing to lower-right corner cell should not wrap. + s.atPhantom = false + s.cur.X = curX + s.buf.WriteString(ansi.SetAutoWrapMode) //nolint:errcheck + } +} + +// updatePen updates the cursor pen styles. +func (s *Screen) updatePen(cell *Cell) { + if cell == nil { + cell = &BlankCell + } + + if s.opts.Profile != 0 { + // Downsample colors to the given color profile. + cell.Style = ConvertStyle(cell.Style, s.opts.Profile) + cell.Link = ConvertLink(cell.Link, s.opts.Profile) + } + + if !cell.Style.Equal(&s.cur.Style) { + seq := cell.Style.DiffSequence(s.cur.Style) + if cell.Style.Empty() && len(seq) > len(ansi.ResetStyle) { + seq = ansi.ResetStyle + } + s.buf.WriteString(seq) //nolint:errcheck + s.cur.Style = cell.Style + } + if !cell.Link.Equal(&s.cur.Link) { + s.buf.WriteString(ansi.SetHyperlink(cell.Link.URL, cell.Link.Params)) //nolint:errcheck + s.cur.Link = cell.Link + } +} + +// emitRange emits a range of cells to the buffer. It it equivalent to calling +// [Screen.putCell] for each cell in the range. This is optimized to use +// [ansi.ECH] and [ansi.REP]. +// Returns whether the cursor is at the end of interval or somewhere in the +// middle. +func (s *Screen) emitRange(line Line, n int) (eoi bool) { + for n > 0 { + var count int + for n > 1 && !cellEqual(line.At(0), line.At(1)) { + s.putCell(line.At(0)) + line = line[1:] + n-- + } + + cell0 := line[0] + if n == 1 { + s.putCell(cell0) + return false + } + + count = 2 + for count < n && cellEqual(line.At(count), cell0) { + count++ + } + + ech := ansi.EraseCharacter(count) + cup := ansi.CursorPosition(s.cur.X+count, s.cur.Y) + rep := ansi.RepeatPreviousCharacter(count) + if s.xtermLike && count > len(ech)+len(cup) && cell0 != nil && cell0.Clear() { + s.updatePen(cell0) + s.buf.WriteString(ech) //nolint:errcheck + + // If this is the last cell, we don't need to move the cursor. + if count < n { + s.move(s.cur.X+count, s.cur.Y) + } else { + return true // cursor in the middle + } + } else if s.xtermLike && count > len(rep) && + (cell0 == nil || (len(cell0.Comb) == 0 && cell0.Rune < 256)) { + // We only support ASCII characters. Most terminals will handle + // non-ASCII characters correctly, but some might not, ahem xterm. + // + // NOTE: [ansi.REP] only repeats the last rune and won't work + // if the last cell contains multiple runes. + + wrapPossible := s.cur.X+count >= s.newbuf.Width() + repCount := count + if wrapPossible { + repCount-- + } + + s.updatePen(cell0) + s.putCell(cell0) + repCount-- // cell0 is a single width cell ASCII character + + s.buf.WriteString(ansi.RepeatPreviousCharacter(repCount)) //nolint:errcheck + s.cur.X += repCount + if wrapPossible { + s.putCell(cell0) + } + } else { + for i := 0; i < count; i++ { + s.putCell(line.At(i)) + } + } + + line = line[clamp(count, 0, len(line)):] + n -= count + } + + return +} + +// putRange puts a range of cells from the old line to the new line. +// Returns whether the cursor is at the end of interval or somewhere in the +// middle. +func (s *Screen) putRange(oldLine, newLine Line, y, start, end int) (eoi bool) { + inline := min(len(ansi.CursorPosition(start+1, y+1)), + min(len(ansi.HorizontalPositionAbsolute(start+1)), + len(ansi.CursorForward(start+1)))) + if (end - start + 1) > inline { + var j, same int + for j, same = start, 0; j <= end; j++ { + oldCell, newCell := oldLine.At(j), newLine.At(j) + if same == 0 && oldCell != nil && oldCell.Empty() { + continue + } + if cellEqual(oldCell, newCell) { + same++ + } else { + if same > end-start { + s.emitRange(newLine[start:], j-same-start) + s.move(j, y) + start = j + } + same = 0 + } + } + + i := s.emitRange(newLine[start:], j-same-start) + + // Always return 1 for the next [Screen.move] after a [Screen.putRange] if + // we found identical characters at end of interval. + if same == 0 { + return i + } + return true + } + + return s.emitRange(newLine[start:], end-start+1) +} + +// clearToEnd clears the screen from the current cursor position to the end of +// line. +func (s *Screen) clearToEnd(blank *Cell, force bool) { //nolint:unparam + if s.cur.Y >= 0 { + curline := s.curbuf.Line(s.cur.Y) + for j := s.cur.X; j < s.curbuf.Width(); j++ { + if j >= 0 { + c := curline.At(j) + if !cellEqual(c, blank) { + curline.Set(j, blank) + force = true + } + } + } + } + + if force { + s.updatePen(blank) + count := s.newbuf.Width() - s.cur.X + if s.el0Cost() <= count { + s.buf.WriteString(ansi.EraseLineRight) //nolint:errcheck + } else { + for i := 0; i < count; i++ { + s.putCell(blank) + } + } + } +} + +// clearBlank returns a blank cell based on the current cursor background color. +func (s *Screen) clearBlank() *Cell { + c := BlankCell + if !s.cur.Style.Empty() || !s.cur.Link.Empty() { + c.Style = s.cur.Style + c.Link = s.cur.Link + } + return &c +} + +// insertCells inserts the count cells pointed by the given line at the current +// cursor position. +func (s *Screen) insertCells(line Line, count int) { + if s.xtermLike { + // Use [ansi.ICH] as an optimization. + s.buf.WriteString(ansi.InsertCharacter(count)) //nolint:errcheck + } else { + // Otherwise, use [ansi.IRM] mode. + s.buf.WriteString(ansi.SetInsertReplaceMode) //nolint:errcheck + } + + for i := 0; count > 0; i++ { + s.putAttrCell(line[i]) + count-- + } + + if !s.xtermLike { + s.buf.WriteString(ansi.ResetInsertReplaceMode) //nolint:errcheck + } +} + +// el0Cost returns the cost of using [ansi.EL] 0 i.e. [ansi.EraseLineRight]. If +// this terminal supports background color erase, it can be cheaper to use +// [ansi.EL] 0 i.e. [ansi.EraseLineRight] to clear +// trailing spaces. +func (s *Screen) el0Cost() int { + if s.xtermLike { + return 0 + } + return len(ansi.EraseLineRight) +} + +// transformLine transforms the given line in the current window to the +// corresponding line in the new window. It uses [ansi.ICH] and [ansi.DCH] to +// insert or delete characters. +func (s *Screen) transformLine(y int) { + var firstCell, oLastCell, nLastCell int // first, old last, new last index + oldLine := s.curbuf.Line(y) + newLine := s.newbuf.Line(y) + + // Find the first changed cell in the line + var lineChanged bool + for i := 0; i < s.newbuf.Width(); i++ { + if !cellEqual(newLine.At(i), oldLine.At(i)) { + lineChanged = true + break + } + } + + const ceolStandoutGlitch = false + if ceolStandoutGlitch && lineChanged { + s.move(0, y) + s.clearToEnd(nil, false) + s.putRange(oldLine, newLine, y, 0, s.newbuf.Width()-1) + } else { + blank := newLine.At(0) + + // It might be cheaper to clear leading spaces with [ansi.EL] 1 i.e. + // [ansi.EraseLineLeft]. + if blank == nil || blank.Clear() { + var oFirstCell, nFirstCell int + for oFirstCell = 0; oFirstCell < s.curbuf.Width(); oFirstCell++ { + if !cellEqual(oldLine.At(oFirstCell), blank) { + break + } + } + for nFirstCell = 0; nFirstCell < s.newbuf.Width(); nFirstCell++ { + if !cellEqual(newLine.At(nFirstCell), blank) { + break + } + } + + if nFirstCell == oFirstCell { + firstCell = nFirstCell + + // Find the first differing cell + for firstCell < s.newbuf.Width() && + cellEqual(oldLine.At(firstCell), newLine.At(firstCell)) { + firstCell++ + } + } else if oFirstCell > nFirstCell { + firstCell = nFirstCell + } else if oFirstCell < nFirstCell { + firstCell = oFirstCell + el1Cost := len(ansi.EraseLineLeft) + if el1Cost < nFirstCell-oFirstCell { + if nFirstCell >= s.newbuf.Width() { + s.move(0, y) + s.updatePen(blank) + s.buf.WriteString(ansi.EraseLineRight) //nolint:errcheck + } else { + s.move(nFirstCell-1, y) + s.updatePen(blank) + s.buf.WriteString(ansi.EraseLineLeft) //nolint:errcheck + } + + for firstCell < nFirstCell { + oldLine.Set(firstCell, blank) + firstCell++ + } + } + } + } else { + // Find the first differing cell + for firstCell < s.newbuf.Width() && cellEqual(newLine.At(firstCell), oldLine.At(firstCell)) { + firstCell++ + } + } + + // If we didn't find one, we're done + if firstCell >= s.newbuf.Width() { + return + } + + blank = newLine.At(s.newbuf.Width() - 1) + if blank != nil && !blank.Clear() { + // Find the last differing cell + nLastCell = s.newbuf.Width() - 1 + for nLastCell > firstCell && cellEqual(newLine.At(nLastCell), oldLine.At(nLastCell)) { + nLastCell-- + } + + if nLastCell >= firstCell { + s.move(firstCell, y) + s.putRange(oldLine, newLine, y, firstCell, nLastCell) + if firstCell < len(oldLine) && firstCell < len(newLine) { + copy(oldLine[firstCell:], newLine[firstCell:]) + } else { + copy(oldLine, newLine) + } + } + + return + } + + // Find last non-blank cell in the old line. + oLastCell = s.curbuf.Width() - 1 + for oLastCell > firstCell && cellEqual(oldLine.At(oLastCell), blank) { + oLastCell-- + } + + // Find last non-blank cell in the new line. + nLastCell = s.newbuf.Width() - 1 + for nLastCell > firstCell && cellEqual(newLine.At(nLastCell), blank) { + nLastCell-- + } + + if nLastCell == firstCell && s.el0Cost() < oLastCell-nLastCell { + s.move(firstCell, y) + if !cellEqual(newLine.At(firstCell), blank) { + s.putCell(newLine.At(firstCell)) + } + s.clearToEnd(blank, false) + } else if nLastCell != oLastCell && + !cellEqual(newLine.At(nLastCell), oldLine.At(oLastCell)) { + s.move(firstCell, y) + if oLastCell-nLastCell > s.el0Cost() { + if s.putRange(oldLine, newLine, y, firstCell, nLastCell) { + s.move(nLastCell+1, y) + } + s.clearToEnd(blank, false) + } else { + n := max(nLastCell, oLastCell) + s.putRange(oldLine, newLine, y, firstCell, n) + } + } else { + nLastNonBlank := nLastCell + oLastNonBlank := oLastCell + + // Find the last cells that really differ. + // Can be -1 if no cells differ. + for cellEqual(newLine.At(nLastCell), oldLine.At(oLastCell)) { + if !cellEqual(newLine.At(nLastCell-1), oldLine.At(oLastCell-1)) { + break + } + nLastCell-- + oLastCell-- + if nLastCell == -1 || oLastCell == -1 { + break + } + } + + n := min(oLastCell, nLastCell) + if n >= firstCell { + s.move(firstCell, y) + s.putRange(oldLine, newLine, y, firstCell, n) + } + + if oLastCell < nLastCell { + m := max(nLastNonBlank, oLastNonBlank) + if n != 0 { + for n > 0 { + wide := newLine.At(n + 1) + if wide == nil || !wide.Empty() { + break + } + n-- + oLastCell-- + } + } else if n >= firstCell && newLine.At(n) != nil && newLine.At(n).Width > 1 { + next := newLine.At(n + 1) + for next != nil && next.Empty() { + n++ + oLastCell++ + } + } + + s.move(n+1, y) + ichCost := 3 + nLastCell - oLastCell + if s.xtermLike && (nLastCell < nLastNonBlank || ichCost > (m-n)) { + s.putRange(oldLine, newLine, y, n+1, m) + } else { + s.insertCells(newLine[n+1:], nLastCell-oLastCell) + } + } else if oLastCell > nLastCell { + s.move(n+1, y) + dchCost := 3 + oLastCell - nLastCell + if dchCost > len(ansi.EraseLineRight)+nLastNonBlank-(n+1) { + if s.putRange(oldLine, newLine, y, n+1, nLastNonBlank) { + s.move(nLastNonBlank+1, y) + } + s.clearToEnd(blank, false) + } else { + s.updatePen(blank) + s.deleteCells(oLastCell - nLastCell) + } + } + } + } + + // Update the old line with the new line + if firstCell < len(oldLine) && firstCell < len(newLine) { + copy(oldLine[firstCell:], newLine[firstCell:]) + } else { + copy(oldLine, newLine) + } +} + +// deleteCells deletes the count cells at the current cursor position and moves +// the rest of the line to the left. This is equivalent to [ansi.DCH]. +func (s *Screen) deleteCells(count int) { + // [ansi.DCH] will shift in cells from the right margin so we need to + // ensure that they are the right style. + s.buf.WriteString(ansi.DeleteCharacter(count)) //nolint:errcheck +} + +// clearToBottom clears the screen from the current cursor position to the end +// of the screen. +func (s *Screen) clearToBottom(blank *Cell) { + row, col := s.cur.Y, s.cur.X + if row < 0 { + row = 0 + } + + s.updatePen(blank) + s.buf.WriteString(ansi.EraseScreenBelow) //nolint:errcheck + // Clear the rest of the current line + s.curbuf.ClearRect(Rect(col, row, s.curbuf.Width()-col, 1)) + // Clear everything below the current line + s.curbuf.ClearRect(Rect(0, row+1, s.curbuf.Width(), s.curbuf.Height()-row-1)) +} + +// clearBottom tests if clearing the end of the screen would satisfy part of +// the screen update. Scan backwards through lines in the screen checking if +// each is blank and one or more are changed. +// It returns the top line. +func (s *Screen) clearBottom(total int) (top int) { + if total <= 0 { + return + } + + top = total + last := s.newbuf.Width() + blank := s.clearBlank() + canClearWithBlank := blank == nil || blank.Clear() + + if canClearWithBlank { + var row int + for row = total - 1; row >= 0; row-- { + oldLine := s.curbuf.Line(row) + newLine := s.newbuf.Line(row) + + var col int + ok := true + for col = 0; ok && col < last; col++ { + ok = cellEqual(newLine.At(col), blank) + } + if !ok { + break + } + + for col = 0; ok && col < last; col++ { + ok = len(oldLine) == last && cellEqual(oldLine.At(col), blank) + } + if !ok { + top = row + } + } + + if top < total { + s.move(0, top-1) // top is 1-based + s.clearToBottom(blank) + if s.oldhash != nil && s.newhash != nil && + row < len(s.oldhash) && row < len(s.newhash) { + for row := top; row < s.newbuf.Height(); row++ { + s.oldhash[row] = s.newhash[row] + } + } + } + } + + return +} + +// clearScreen clears the screen and put cursor at home. +func (s *Screen) clearScreen(blank *Cell) { + s.updatePen(blank) + s.buf.WriteString(ansi.CursorHomePosition) //nolint:errcheck + s.buf.WriteString(ansi.EraseEntireScreen) //nolint:errcheck + s.cur.X, s.cur.Y = 0, 0 + s.curbuf.Fill(blank) +} + +// clearBelow clears everything below and including the row. +func (s *Screen) clearBelow(blank *Cell, row int) { + s.move(0, row) + s.clearToBottom(blank) +} + +// clearUpdate forces a screen redraw. +func (s *Screen) clearUpdate() { + blank := s.clearBlank() + var nonEmpty int + if s.opts.AltScreen { + // XXX: We're using the maximum height of the two buffers to ensure + // we write newly added lines to the screen in [Screen.transformLine]. + nonEmpty = max(s.curbuf.Height(), s.newbuf.Height()) + s.clearScreen(blank) + } else { + nonEmpty = s.newbuf.Height() + s.clearBelow(blank, 0) + } + nonEmpty = s.clearBottom(nonEmpty) + for i := 0; i < nonEmpty; i++ { + s.transformLine(i) + } +} + +// Flush flushes the buffer to the screen. +func (s *Screen) Flush() (err error) { + s.mu.Lock() + defer s.mu.Unlock() + return s.flush() +} + +func (s *Screen) flush() (err error) { + // Write the buffer + if s.buf.Len() > 0 { + _, err = s.w.Write(s.buf.Bytes()) //nolint:errcheck + if err == nil { + s.buf.Reset() + } + } + + return +} + +// Render renders changes of the screen to the internal buffer. Call +// [Screen.Flush] to flush pending changes to the screen. +func (s *Screen) Render() { + s.mu.Lock() + s.render() + s.mu.Unlock() +} + +func (s *Screen) render() { + // Do we need to render anything? + if s.opts.AltScreen == s.altScreenMode && + !s.opts.ShowCursor == s.cursorHidden && + !s.clear && + len(s.touch) == 0 && + len(s.queueAbove) == 0 { + return + } + + // TODO: Investigate whether this is necessary. Theoretically, terminals + // can add/remove tab stops and we should be able to handle that. We could + // use [ansi.DECTABSR] to read the tab stops, but that's not implemented in + // most terminals :/ + // // Are we using hard tabs? If so, ensure tabs are using the + // // default interval using [ansi.DECST8C]. + // if s.opts.HardTabs && !s.initTabs { + // s.buf.WriteString(ansi.SetTabEvery8Columns) + // s.initTabs = true + // } + + // Do we need alt-screen mode? + if s.opts.AltScreen != s.altScreenMode { + if s.opts.AltScreen { + s.buf.WriteString(ansi.SetAltScreenSaveCursorMode) + } else { + s.buf.WriteString(ansi.ResetAltScreenSaveCursorMode) + } + s.altScreenMode = s.opts.AltScreen + } + + // Do we need text cursor mode? + if !s.opts.ShowCursor != s.cursorHidden { + s.cursorHidden = !s.opts.ShowCursor + if s.cursorHidden { + s.buf.WriteString(ansi.HideCursor) + } + } + + // Do we have queued strings to write above the screen? + if len(s.queueAbove) > 0 { + // TODO: Use scrolling region if available. + // TODO: Use [Screen.Write] [io.Writer] interface. + + // We need to scroll the screen up by the number of lines in the queue. + // We can't use [ansi.SU] because we want the cursor to move down until + // it reaches the bottom of the screen. + s.move(0, s.newbuf.Height()-1) + s.buf.WriteString(strings.Repeat("\n", len(s.queueAbove))) + s.cur.Y += len(s.queueAbove) + // XXX: Now go to the top of the screen, insert new lines, and write + // the queued strings. It is important to use [Screen.moveCursor] + // instead of [Screen.move] because we don't want to perform any checks + // on the cursor position. + s.moveCursor(0, 0, false) + s.buf.WriteString(ansi.InsertLine(len(s.queueAbove))) + for _, line := range s.queueAbove { + s.buf.WriteString(line + "\r\n") + } + + // Clear the queue + s.queueAbove = s.queueAbove[:0] + } + + var nonEmpty int + + // XXX: In inline mode, after a screen resize, we need to clear the extra + // lines at the bottom of the screen. This is because in inline mode, we + // don't use the full screen height and the current buffer size might be + // larger than the new buffer size. + partialClear := !s.opts.AltScreen && s.cur.X != -1 && s.cur.Y != -1 && + s.curbuf.Width() == s.newbuf.Width() && + s.curbuf.Height() > 0 && + s.curbuf.Height() > s.newbuf.Height() + + if !s.clear && partialClear { + s.clearBelow(nil, s.newbuf.Height()-1) + } + + if s.clear { + s.clearUpdate() + s.clear = false + } else if len(s.touch) > 0 { + if s.opts.AltScreen { + // Optimize scrolling for the alternate screen buffer. + // TODO: Should we optimize for inline mode as well? If so, we need + // to know the actual cursor position to use [ansi.DECSTBM]. + s.scrollOptimize() + } + + var changedLines int + var i int + + if s.opts.AltScreen { + nonEmpty = min(s.curbuf.Height(), s.newbuf.Height()) + } else { + nonEmpty = s.newbuf.Height() + } + + nonEmpty = s.clearBottom(nonEmpty) + for i = 0; i < nonEmpty; i++ { + _, ok := s.touch[i] + if ok { + s.transformLine(i) + changedLines++ + } + } + } + + // Sync windows and screen + s.touch = make(map[int]lineData, s.newbuf.Height()) + + if s.curbuf.Width() != s.newbuf.Width() || s.curbuf.Height() != s.newbuf.Height() { + // Resize the old buffer to match the new buffer. + _, oldh := s.curbuf.Width(), s.curbuf.Height() + s.curbuf.Resize(s.newbuf.Width(), s.newbuf.Height()) + // Sync new lines to old lines + for i := oldh - 1; i < s.newbuf.Height(); i++ { + copy(s.curbuf.Line(i), s.newbuf.Line(i)) + } + } + + s.updatePen(nil) // nil indicates a blank cell with no styles + + // Do we have enough changes to justify toggling the cursor? + if s.buf.Len() > 1 && s.opts.ShowCursor && !s.cursorHidden && s.queuedText { + nb := new(bytes.Buffer) + nb.Grow(s.buf.Len() + len(ansi.HideCursor) + len(ansi.ShowCursor)) + nb.WriteString(ansi.HideCursor) + nb.Write(s.buf.Bytes()) + nb.WriteString(ansi.ShowCursor) + *s.buf = *nb + } + + s.queuedText = false +} + +// Close writes the final screen update and resets the screen. +func (s *Screen) Close() (err error) { + s.mu.Lock() + defer s.mu.Unlock() + + s.render() + s.updatePen(nil) + // Go to the bottom of the screen + s.move(0, s.newbuf.Height()-1) + + if s.altScreenMode { + s.buf.WriteString(ansi.ResetAltScreenSaveCursorMode) + s.altScreenMode = false + } + + if s.cursorHidden { + s.buf.WriteString(ansi.ShowCursor) + s.cursorHidden = false + } + + // Write the buffer + err = s.flush() + if err != nil { + return + } + + s.reset() + return +} + +// reset resets the screen to its initial state. +func (s *Screen) reset() { + s.scrollHeight = 0 + s.cursorHidden = false + s.altScreenMode = false + s.touch = make(map[int]lineData, s.newbuf.Height()) + if s.curbuf != nil { + s.curbuf.Clear() + } + if s.newbuf != nil { + s.newbuf.Clear() + } + s.buf.Reset() + s.tabs = DefaultTabStops(s.newbuf.Width()) + s.oldhash, s.newhash = nil, nil + + // We always disable HardTabs when termtype is "linux". + if strings.HasPrefix(s.opts.Term, "linux") { + s.opts.HardTabs = false + } +} + +// Resize resizes the screen. +func (s *Screen) Resize(width, height int) bool { + oldw := s.newbuf.Width() + oldh := s.newbuf.Height() + + if s.opts.AltScreen || width != oldw { + // We only clear the whole screen if the width changes. Adding/removing + // rows is handled by the [Screen.render] and [Screen.transformLine] + // methods. + s.clear = true + } + + // Clear new columns and lines + if width > oldh { + s.ClearRect(Rect(max(oldw-1, 0), 0, width-oldw, height)) + } else if width < oldw { + s.ClearRect(Rect(max(width-1, 0), 0, oldw-width, height)) + } + + if height > oldh { + s.ClearRect(Rect(0, max(oldh-1, 0), width, height-oldh)) + } else if height < oldh { + s.ClearRect(Rect(0, max(height-1, 0), width, oldh-height)) + } + + s.mu.Lock() + s.newbuf.Resize(width, height) + s.tabs.Resize(width) + s.oldhash, s.newhash = nil, nil + s.scrollHeight = 0 // reset scroll lines + s.mu.Unlock() + + return true +} + +// MoveTo moves the cursor to the given position. +func (s *Screen) MoveTo(x, y int) { + s.mu.Lock() + s.move(x, y) + s.mu.Unlock() +} + +// InsertAbove inserts string above the screen. The inserted string is not +// managed by the screen. This does nothing when alternate screen mode is +// enabled. +func (s *Screen) InsertAbove(str string) { + if s.opts.AltScreen { + return + } + s.mu.Lock() + for _, line := range strings.Split(str, "\n") { + s.queueAbove = append(s.queueAbove, s.method.Truncate(line, s.Width(), "")) + } + s.mu.Unlock() +} diff --git a/vendor/github.com/charmbracelet/x/cellbuf/sequence.go b/vendor/github.com/charmbracelet/x/cellbuf/sequence.go new file mode 100644 index 000000000..613eefef8 --- /dev/null +++ b/vendor/github.com/charmbracelet/x/cellbuf/sequence.go @@ -0,0 +1,131 @@ +package cellbuf + +import ( + "bytes" + "image/color" + + "github.com/charmbracelet/x/ansi" +) + +// ReadStyle reads a Select Graphic Rendition (SGR) escape sequences from a +// list of parameters. +func ReadStyle(params ansi.Params, pen *Style) { + if len(params) == 0 { + pen.Reset() + return + } + + for i := 0; i < len(params); i++ { + param, hasMore, _ := params.Param(i, 0) + switch param { + case 0: // Reset + pen.Reset() + case 1: // Bold + pen.Bold(true) + case 2: // Dim/Faint + pen.Faint(true) + case 3: // Italic + pen.Italic(true) + case 4: // Underline + nextParam, _, ok := params.Param(i+1, 0) + if hasMore && ok { // Only accept subparameters i.e. separated by ":" + switch nextParam { + case 0, 1, 2, 3, 4, 5: + i++ + switch nextParam { + case 0: // No Underline + pen.UnderlineStyle(NoUnderline) + case 1: // Single Underline + pen.UnderlineStyle(SingleUnderline) + case 2: // Double Underline + pen.UnderlineStyle(DoubleUnderline) + case 3: // Curly Underline + pen.UnderlineStyle(CurlyUnderline) + case 4: // Dotted Underline + pen.UnderlineStyle(DottedUnderline) + case 5: // Dashed Underline + pen.UnderlineStyle(DashedUnderline) + } + } + } else { + // Single Underline + pen.Underline(true) + } + case 5: // Slow Blink + pen.SlowBlink(true) + case 6: // Rapid Blink + pen.RapidBlink(true) + case 7: // Reverse + pen.Reverse(true) + case 8: // Conceal + pen.Conceal(true) + case 9: // Crossed-out/Strikethrough + pen.Strikethrough(true) + case 22: // Normal Intensity (not bold or faint) + pen.Bold(false).Faint(false) + case 23: // Not italic, not Fraktur + pen.Italic(false) + case 24: // Not underlined + pen.Underline(false) + case 25: // Blink off + pen.SlowBlink(false).RapidBlink(false) + case 27: // Positive (not reverse) + pen.Reverse(false) + case 28: // Reveal + pen.Conceal(false) + case 29: // Not crossed out + pen.Strikethrough(false) + case 30, 31, 32, 33, 34, 35, 36, 37: // Set foreground + pen.Foreground(ansi.Black + ansi.BasicColor(param-30)) //nolint:gosec + case 38: // Set foreground 256 or truecolor + var c color.Color + n := ReadStyleColor(params[i:], &c) + if n > 0 { + pen.Foreground(c) + i += n - 1 + } + case 39: // Default foreground + pen.Foreground(nil) + case 40, 41, 42, 43, 44, 45, 46, 47: // Set background + pen.Background(ansi.Black + ansi.BasicColor(param-40)) //nolint:gosec + case 48: // Set background 256 or truecolor + var c color.Color + n := ReadStyleColor(params[i:], &c) + if n > 0 { + pen.Background(c) + i += n - 1 + } + case 49: // Default Background + pen.Background(nil) + case 58: // Set underline color + var c color.Color + n := ReadStyleColor(params[i:], &c) + if n > 0 { + pen.UnderlineColor(c) + i += n - 1 + } + case 59: // Default underline color + pen.UnderlineColor(nil) + case 90, 91, 92, 93, 94, 95, 96, 97: // Set bright foreground + pen.Foreground(ansi.BrightBlack + ansi.BasicColor(param-90)) //nolint:gosec + case 100, 101, 102, 103, 104, 105, 106, 107: // Set bright background + pen.Background(ansi.BrightBlack + ansi.BasicColor(param-100)) //nolint:gosec + } + } +} + +// ReadLink reads a hyperlink escape sequence from a data buffer. +func ReadLink(p []byte, link *Link) { + params := bytes.Split(p, []byte{';'}) + if len(params) != 3 { + return + } + link.Params = string(params[1]) + link.URL = string(params[2]) +} + +// ReadStyleColor reads a color from a list of parameters. +// See [ansi.ReadStyleColor] for more information. +func ReadStyleColor(params ansi.Params, c *color.Color) int { + return ansi.ReadStyleColor(params, c) +} diff --git a/vendor/github.com/charmbracelet/x/cellbuf/style.go b/vendor/github.com/charmbracelet/x/cellbuf/style.go new file mode 100644 index 000000000..82c4afb73 --- /dev/null +++ b/vendor/github.com/charmbracelet/x/cellbuf/style.go @@ -0,0 +1,31 @@ +package cellbuf + +import ( + "github.com/charmbracelet/colorprofile" +) + +// Convert converts a style to respect the given color profile. +func ConvertStyle(s Style, p colorprofile.Profile) Style { + switch p { + case colorprofile.TrueColor: + return s + case colorprofile.Ascii: + s.Fg = nil + s.Bg = nil + s.Ul = nil + case colorprofile.NoTTY: + return Style{} + } + + if s.Fg != nil { + s.Fg = p.Convert(s.Fg) + } + if s.Bg != nil { + s.Bg = p.Convert(s.Bg) + } + if s.Ul != nil { + s.Ul = p.Convert(s.Ul) + } + + return s +} diff --git a/vendor/github.com/charmbracelet/x/cellbuf/tabstop.go b/vendor/github.com/charmbracelet/x/cellbuf/tabstop.go new file mode 100644 index 000000000..24eec449a --- /dev/null +++ b/vendor/github.com/charmbracelet/x/cellbuf/tabstop.go @@ -0,0 +1,137 @@ +package cellbuf + +// DefaultTabInterval is the default tab interval. +const DefaultTabInterval = 8 + +// TabStops represents horizontal line tab stops. +type TabStops struct { + stops []int + interval int + width int +} + +// NewTabStops creates a new set of tab stops from a number of columns and an +// interval. +func NewTabStops(width, interval int) *TabStops { + ts := new(TabStops) + ts.interval = interval + ts.width = width + ts.stops = make([]int, (width+(interval-1))/interval) + ts.init(0, width) + return ts +} + +// DefaultTabStops creates a new set of tab stops with the default interval. +func DefaultTabStops(cols int) *TabStops { + return NewTabStops(cols, DefaultTabInterval) +} + +// Resize resizes the tab stops to the given width. +func (ts *TabStops) Resize(width int) { + if width == ts.width { + return + } + + if width < ts.width { + size := (width + (ts.interval - 1)) / ts.interval + ts.stops = ts.stops[:size] + } else { + size := (width - ts.width + (ts.interval - 1)) / ts.interval + ts.stops = append(ts.stops, make([]int, size)...) + } + + ts.init(ts.width, width) + ts.width = width +} + +// IsStop returns true if the given column is a tab stop. +func (ts TabStops) IsStop(col int) bool { + mask := ts.mask(col) + i := col >> 3 + if i < 0 || i >= len(ts.stops) { + return false + } + return ts.stops[i]&mask != 0 +} + +// Next returns the next tab stop after the given column. +func (ts TabStops) Next(col int) int { + return ts.Find(col, 1) +} + +// Prev returns the previous tab stop before the given column. +func (ts TabStops) Prev(col int) int { + return ts.Find(col, -1) +} + +// Find returns the prev/next tab stop before/after the given column and delta. +// If delta is positive, it returns the next tab stop after the given column. +// If delta is negative, it returns the previous tab stop before the given column. +// If delta is zero, it returns the given column. +func (ts TabStops) Find(col, delta int) int { + if delta == 0 { + return col + } + + var prev bool + count := delta + if count < 0 { + count = -count + prev = true + } + + for count > 0 { + if !prev { + if col >= ts.width-1 { + return col + } + + col++ + } else { + if col < 1 { + return col + } + + col-- + } + + if ts.IsStop(col) { + count-- + } + } + + return col +} + +// Set adds a tab stop at the given column. +func (ts *TabStops) Set(col int) { + mask := ts.mask(col) + ts.stops[col>>3] |= mask +} + +// Reset removes the tab stop at the given column. +func (ts *TabStops) Reset(col int) { + mask := ts.mask(col) + ts.stops[col>>3] &= ^mask +} + +// Clear removes all tab stops. +func (ts *TabStops) Clear() { + ts.stops = make([]int, len(ts.stops)) +} + +// mask returns the mask for the given column. +func (ts *TabStops) mask(col int) int { + return 1 << (col & (ts.interval - 1)) +} + +// init initializes the tab stops starting from col until width. +func (ts *TabStops) init(col, width int) { + for x := col; x < width; x++ { + if x%ts.interval == 0 { + ts.Set(x) + } else { + ts.Reset(x) + } + } +} diff --git a/vendor/github.com/charmbracelet/x/cellbuf/utils.go b/vendor/github.com/charmbracelet/x/cellbuf/utils.go new file mode 100644 index 000000000..b0452fa9a --- /dev/null +++ b/vendor/github.com/charmbracelet/x/cellbuf/utils.go @@ -0,0 +1,38 @@ +package cellbuf + +import ( + "strings" +) + +// Height returns the height of a string. +func Height(s string) int { + return strings.Count(s, "\n") + 1 +} + +func min(a, b int) int { //nolint:predeclared + if a > b { + return b + } + return a +} + +func max(a, b int) int { //nolint:predeclared + if a > b { + return a + } + return b +} + +func clamp(v, low, high int) int { + if high < low { + low, high = high, low + } + return min(high, max(low, v)) +} + +func abs(a int) int { + if a < 0 { + return -a + } + return a +} diff --git a/vendor/github.com/charmbracelet/x/cellbuf/wrap.go b/vendor/github.com/charmbracelet/x/cellbuf/wrap.go new file mode 100644 index 000000000..59a2a3375 --- /dev/null +++ b/vendor/github.com/charmbracelet/x/cellbuf/wrap.go @@ -0,0 +1,178 @@ +package cellbuf + +import ( + "bytes" + "unicode" + "unicode/utf8" + + "github.com/charmbracelet/x/ansi" +) + +// Wrap returns a string that is wrapped to the specified limit applying any +// ANSI escape sequences in the string. It tries to wrap the string at word +// boundaries, but will break words if necessary. +// +// The breakpoints string is a list of characters that are considered +// breakpoints for word wrapping. A hyphen (-) is always considered a +// breakpoint. +// +// Note: breakpoints must be a string of 1-cell wide rune characters. +func Wrap(s string, limit int, breakpoints string) string { + if len(s) == 0 { + return "" + } + + if limit < 1 { + return s + } + + p := ansi.GetParser() + defer ansi.PutParser(p) + + var ( + buf bytes.Buffer + word bytes.Buffer + space bytes.Buffer + style, curStyle Style + link, curLink Link + curWidth int + wordLen int + ) + + addSpace := func() { + curWidth += space.Len() + buf.Write(space.Bytes()) + space.Reset() + } + + addWord := func() { + if word.Len() == 0 { + return + } + + curLink = link + curStyle = style + + addSpace() + curWidth += wordLen + buf.Write(word.Bytes()) + word.Reset() + wordLen = 0 + } + + addNewline := func() { + if !curStyle.Empty() { + buf.WriteString(ansi.ResetStyle) + } + if !curLink.Empty() { + buf.WriteString(ansi.ResetHyperlink()) + } + buf.WriteByte('\n') + if !curLink.Empty() { + buf.WriteString(ansi.SetHyperlink(curLink.URL, curLink.Params)) + } + if !curStyle.Empty() { + buf.WriteString(curStyle.Sequence()) + } + curWidth = 0 + space.Reset() + } + + var state byte + for len(s) > 0 { + seq, width, n, newState := ansi.DecodeSequence(s, state, p) + switch width { + case 0: + if ansi.Equal(seq, "\t") { + addWord() + space.WriteString(seq) + break + } else if ansi.Equal(seq, "\n") { + if wordLen == 0 { + if curWidth+space.Len() > limit { + curWidth = 0 + } else { + // preserve whitespaces + buf.Write(space.Bytes()) + } + space.Reset() + } + + addWord() + addNewline() + break + } else if ansi.HasCsiPrefix(seq) && p.Command() == 'm' { + // SGR style sequence [ansi.SGR] + ReadStyle(p.Params(), &style) + } else if ansi.HasOscPrefix(seq) && p.Command() == 8 { + // Hyperlink sequence [ansi.SetHyperlink] + ReadLink(p.Data(), &link) + } + + word.WriteString(seq) + default: + if len(seq) == 1 { + // ASCII + r, _ := utf8.DecodeRuneInString(seq) + if unicode.IsSpace(r) { + addWord() + space.WriteRune(r) + break + } else if r == '-' || runeContainsAny(r, breakpoints) { + addSpace() + if curWidth+wordLen+width <= limit { + addWord() + buf.WriteString(seq) + curWidth += width + break + } + } + } + + if wordLen+width > limit { + // Hardwrap the word if it's too long + addWord() + } + + word.WriteString(seq) + wordLen += width + + if curWidth+wordLen+space.Len() > limit { + addNewline() + } + } + + s = s[n:] + state = newState + } + + if wordLen == 0 { + if curWidth+space.Len() > limit { + curWidth = 0 + } else { + // preserve whitespaces + buf.Write(space.Bytes()) + } + space.Reset() + } + + addWord() + + if !curLink.Empty() { + buf.WriteString(ansi.ResetHyperlink()) + } + if !curStyle.Empty() { + buf.WriteString(ansi.ResetStyle) + } + + return buf.String() +} + +func runeContainsAny[T string | []rune](r rune, s T) bool { + for _, c := range []rune(s) { + if c == r { + return true + } + } + return false +} diff --git a/vendor/github.com/charmbracelet/x/cellbuf/writer.go b/vendor/github.com/charmbracelet/x/cellbuf/writer.go new file mode 100644 index 000000000..ae8b2a810 --- /dev/null +++ b/vendor/github.com/charmbracelet/x/cellbuf/writer.go @@ -0,0 +1,339 @@ +package cellbuf + +import ( + "bytes" + "fmt" + "strings" + + "github.com/charmbracelet/x/ansi" +) + +// CellBuffer is a cell buffer that represents a set of cells in a screen or a +// grid. +type CellBuffer interface { + // Cell returns the cell at the given position. + Cell(x, y int) *Cell + // SetCell sets the cell at the given position to the given cell. It + // returns whether the cell was set successfully. + SetCell(x, y int, c *Cell) bool + // Bounds returns the bounds of the cell buffer. + Bounds() Rectangle +} + +// FillRect fills the rectangle within the cell buffer with the given cell. +// This will not fill cells outside the bounds of the cell buffer. +func FillRect(s CellBuffer, c *Cell, rect Rectangle) { + for y := rect.Min.Y; y < rect.Max.Y; y++ { + for x := rect.Min.X; x < rect.Max.X; x++ { + s.SetCell(x, y, c) //nolint:errcheck + } + } +} + +// Fill fills the cell buffer with the given cell. +func Fill(s CellBuffer, c *Cell) { + FillRect(s, c, s.Bounds()) +} + +// ClearRect clears the rectangle within the cell buffer with blank cells. +func ClearRect(s CellBuffer, rect Rectangle) { + FillRect(s, nil, rect) +} + +// Clear clears the cell buffer with blank cells. +func Clear(s CellBuffer) { + Fill(s, nil) +} + +// SetContentRect clears the rectangle within the cell buffer with blank cells, +// and sets the given string as its content. If the height or width of the +// string exceeds the height or width of the cell buffer, it will be truncated. +func SetContentRect(s CellBuffer, str string, rect Rectangle) { + // Replace all "\n" with "\r\n" to ensure the cursor is reset to the start + // of the line. Make sure we don't replace "\r\n" with "\r\r\n". + str = strings.ReplaceAll(str, "\r\n", "\n") + str = strings.ReplaceAll(str, "\n", "\r\n") + ClearRect(s, rect) + printString(s, ansi.GraphemeWidth, rect.Min.X, rect.Min.Y, rect, str, true, "") +} + +// SetContent clears the cell buffer with blank cells, and sets the given string +// as its content. If the height or width of the string exceeds the height or +// width of the cell buffer, it will be truncated. +func SetContent(s CellBuffer, str string) { + SetContentRect(s, str, s.Bounds()) +} + +// Render returns a string representation of the grid with ANSI escape sequences. +func Render(d CellBuffer) string { + var buf bytes.Buffer + height := d.Bounds().Dy() + for y := 0; y < height; y++ { + _, line := RenderLine(d, y) + buf.WriteString(line) + if y < height-1 { + buf.WriteString("\r\n") + } + } + return buf.String() +} + +// RenderLine returns a string representation of the yth line of the grid along +// with the width of the line. +func RenderLine(d CellBuffer, n int) (w int, line string) { + var pen Style + var link Link + var buf bytes.Buffer + var pendingLine string + var pendingWidth int // this ignores space cells until we hit a non-space cell + + writePending := func() { + // If there's no pending line, we don't need to do anything. + if len(pendingLine) == 0 { + return + } + buf.WriteString(pendingLine) + w += pendingWidth + pendingWidth = 0 + pendingLine = "" + } + + for x := 0; x < d.Bounds().Dx(); x++ { + if cell := d.Cell(x, n); cell != nil && cell.Width > 0 { + // Convert the cell's style and link to the given color profile. + cellStyle := cell.Style + cellLink := cell.Link + if cellStyle.Empty() && !pen.Empty() { + writePending() + buf.WriteString(ansi.ResetStyle) //nolint:errcheck + pen.Reset() + } + if !cellStyle.Equal(&pen) { + writePending() + seq := cellStyle.DiffSequence(pen) + buf.WriteString(seq) // nolint:errcheck + pen = cellStyle + } + + // Write the URL escape sequence + if cellLink != link && link.URL != "" { + writePending() + buf.WriteString(ansi.ResetHyperlink()) //nolint:errcheck + link.Reset() + } + if cellLink != link { + writePending() + buf.WriteString(ansi.SetHyperlink(cellLink.URL, cellLink.Params)) //nolint:errcheck + link = cellLink + } + + // We only write the cell content if it's not empty. If it is, we + // append it to the pending line and width to be evaluated later. + if cell.Equal(&BlankCell) { + pendingLine += cell.String() + pendingWidth += cell.Width + } else { + writePending() + buf.WriteString(cell.String()) + w += cell.Width + } + } + } + if link.URL != "" { + buf.WriteString(ansi.ResetHyperlink()) //nolint:errcheck + } + if !pen.Empty() { + buf.WriteString(ansi.ResetStyle) //nolint:errcheck + } + return w, strings.TrimRight(buf.String(), " ") // Trim trailing spaces +} + +// ScreenWriter represents a writer that writes to a [Screen] parsing ANSI +// escape sequences and Unicode characters and converting them into cells that +// can be written to a cell [Buffer]. +type ScreenWriter struct { + *Screen +} + +// NewScreenWriter creates a new ScreenWriter that writes to the given Screen. +// This is a convenience function for creating a ScreenWriter. +func NewScreenWriter(s *Screen) *ScreenWriter { + return &ScreenWriter{s} +} + +// Write writes the given bytes to the screen. +// This will recognize ANSI [ansi.SGR] style and [ansi.SetHyperlink] escape +// sequences. +func (s *ScreenWriter) Write(p []byte) (n int, err error) { + printString(s.Screen, s.method, + s.cur.X, s.cur.Y, s.Bounds(), + p, false, "") + return len(p), nil +} + +// SetContent clears the screen with blank cells, and sets the given string as +// its content. If the height or width of the string exceeds the height or +// width of the screen, it will be truncated. +// +// This will recognize ANSI [ansi.SGR] style and [ansi.SetHyperlink] escape sequences. +func (s *ScreenWriter) SetContent(str string) { + s.SetContentRect(str, s.Bounds()) +} + +// SetContentRect clears the rectangle within the screen with blank cells, and +// sets the given string as its content. If the height or width of the string +// exceeds the height or width of the screen, it will be truncated. +// +// This will recognize ANSI [ansi.SGR] style and [ansi.SetHyperlink] escape +// sequences. +func (s *ScreenWriter) SetContentRect(str string, rect Rectangle) { + // Replace all "\n" with "\r\n" to ensure the cursor is reset to the start + // of the line. Make sure we don't replace "\r\n" with "\r\r\n". + str = strings.ReplaceAll(str, "\r\n", "\n") + str = strings.ReplaceAll(str, "\n", "\r\n") + s.ClearRect(rect) + printString(s.Screen, s.method, + rect.Min.X, rect.Min.Y, rect, + str, true, "") +} + +// Print prints the string at the current cursor position. It will wrap the +// string to the width of the screen if it exceeds the width of the screen. +// This will recognize ANSI [ansi.SGR] style and [ansi.SetHyperlink] escape +// sequences. +func (s *ScreenWriter) Print(str string, v ...interface{}) { + if len(v) > 0 { + str = fmt.Sprintf(str, v...) + } + printString(s.Screen, s.method, + s.cur.X, s.cur.Y, s.Bounds(), + str, false, "") +} + +// PrintAt prints the string at the given position. It will wrap the string to +// the width of the screen if it exceeds the width of the screen. +// This will recognize ANSI [ansi.SGR] style and [ansi.SetHyperlink] escape +// sequences. +func (s *ScreenWriter) PrintAt(x, y int, str string, v ...interface{}) { + if len(v) > 0 { + str = fmt.Sprintf(str, v...) + } + printString(s.Screen, s.method, + x, y, s.Bounds(), + str, false, "") +} + +// PrintCrop prints the string at the current cursor position and truncates the +// text if it exceeds the width of the screen. Use tail to specify a string to +// append if the string is truncated. +// This will recognize ANSI [ansi.SGR] style and [ansi.SetHyperlink] escape +// sequences. +func (s *ScreenWriter) PrintCrop(str string, tail string) { + printString(s.Screen, s.method, + s.cur.X, s.cur.Y, s.Bounds(), + str, true, tail) +} + +// PrintCropAt prints the string at the given position and truncates the text +// if it exceeds the width of the screen. Use tail to specify a string to append +// if the string is truncated. +// This will recognize ANSI [ansi.SGR] style and [ansi.SetHyperlink] escape +// sequences. +func (s *ScreenWriter) PrintCropAt(x, y int, str string, tail string) { + printString(s.Screen, s.method, + x, y, s.Bounds(), + str, true, tail) +} + +// printString draws a string starting at the given position. +func printString[T []byte | string]( + s CellBuffer, + m ansi.Method, + x, y int, + bounds Rectangle, str T, + truncate bool, tail string, +) { + p := ansi.GetParser() + defer ansi.PutParser(p) + + var tailc Cell + if truncate && len(tail) > 0 { + if m == ansi.WcWidth { + tailc = *NewCellString(tail) + } else { + tailc = *NewGraphemeCell(tail) + } + } + + decoder := ansi.DecodeSequenceWc[T] + if m == ansi.GraphemeWidth { + decoder = ansi.DecodeSequence[T] + } + + var cell Cell + var style Style + var link Link + var state byte + for len(str) > 0 { + seq, width, n, newState := decoder(str, state, p) + + switch width { + case 1, 2, 3, 4: // wide cells can go up to 4 cells wide + cell.Width += width + cell.Append([]rune(string(seq))...) + + if !truncate && x+cell.Width > bounds.Max.X && y+1 < bounds.Max.Y { + // Wrap the string to the width of the window + x = bounds.Min.X + y++ + } + if Pos(x, y).In(bounds) { + if truncate && tailc.Width > 0 && x+cell.Width > bounds.Max.X-tailc.Width { + // Truncate the string and append the tail if any. + cell := tailc + cell.Style = style + cell.Link = link + s.SetCell(x, y, &cell) + x += tailc.Width + } else { + // Print the cell to the screen + cell.Style = style + cell.Link = link + s.SetCell(x, y, &cell) //nolint:errcheck + x += width + } + } + + // String is too long for the line, truncate it. + // Make sure we reset the cell for the next iteration. + cell.Reset() + default: + // Valid sequences always have a non-zero Cmd. + // TODO: Handle cursor movement and other sequences + switch { + case ansi.HasCsiPrefix(seq) && p.Command() == 'm': + // SGR - Select Graphic Rendition + ReadStyle(p.Params(), &style) + case ansi.HasOscPrefix(seq) && p.Command() == 8: + // Hyperlinks + ReadLink(p.Data(), &link) + case ansi.Equal(seq, T("\n")): + y++ + case ansi.Equal(seq, T("\r")): + x = bounds.Min.X + default: + cell.Append([]rune(string(seq))...) + } + } + + // Advance the state and data + state = newState + str = str[n:] + } + + // Make sure to set the last cell if it's not empty. + if !cell.Empty() { + s.SetCell(x, y, &cell) //nolint:errcheck + cell.Reset() + } +} diff --git a/vendor/github.com/charmbracelet/x/term/term_windows.go b/vendor/github.com/charmbracelet/x/term/term_windows.go index 9e1c4740b..fe7afdec9 100644 --- a/vendor/github.com/charmbracelet/x/term/term_windows.go +++ b/vendor/github.com/charmbracelet/x/term/term_windows.go @@ -25,6 +25,7 @@ func makeRaw(fd uintptr) (*State, error) { return nil, err } raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT) + raw |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT if err := windows.SetConsoleMode(windows.Handle(fd), raw); err != nil { return nil, err } diff --git a/vendor/github.com/imdario/mergo/.deepsource.toml b/vendor/github.com/imdario/mergo/.deepsource.toml deleted file mode 100644 index 8a0681af8..000000000 --- a/vendor/github.com/imdario/mergo/.deepsource.toml +++ /dev/null @@ -1,12 +0,0 @@ -version = 1 - -test_patterns = [ - "*_test.go" -] - -[[analyzers]] -name = "go" -enabled = true - - [analyzers.meta] - import_path = "github.com/imdario/mergo" \ No newline at end of file diff --git a/vendor/github.com/imdario/mergo/.gitignore b/vendor/github.com/imdario/mergo/.gitignore deleted file mode 100644 index 529c3412b..000000000 --- a/vendor/github.com/imdario/mergo/.gitignore +++ /dev/null @@ -1,33 +0,0 @@ -#### joe made this: http://goel.io/joe - -#### go #### -# Binaries for programs and plugins -*.exe -*.dll -*.so -*.dylib - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 -.glide/ - -#### vim #### -# Swap -[._]*.s[a-v][a-z] -[._]*.sw[a-p] -[._]s[a-v][a-z] -[._]sw[a-p] - -# Session -Session.vim - -# Temporary -.netrwhist -*~ -# Auto-generated tag files -tags diff --git a/vendor/github.com/imdario/mergo/.travis.yml b/vendor/github.com/imdario/mergo/.travis.yml deleted file mode 100644 index d324c43ba..000000000 --- a/vendor/github.com/imdario/mergo/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go -arch: - - amd64 - - ppc64le -install: - - go get -t - - go get golang.org/x/tools/cmd/cover - - go get github.com/mattn/goveralls -script: - - go test -race -v ./... -after_script: - - $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN diff --git a/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md b/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md deleted file mode 100644 index 469b44907..000000000 --- a/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,46 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at i@dario.im. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/imdario/mergo/CONTRIBUTING.md b/vendor/github.com/imdario/mergo/CONTRIBUTING.md deleted file mode 100644 index 0a1ff9f94..000000000 --- a/vendor/github.com/imdario/mergo/CONTRIBUTING.md +++ /dev/null @@ -1,112 +0,0 @@ - -# Contributing to mergo - -First off, thanks for taking the time to contribute! ❤️ - -All types of contributions are encouraged and valued. See the [Table of Contents](#table-of-contents) for different ways to help and details about how this project handles them. Please make sure to read the relevant section before making your contribution. It will make it a lot easier for us maintainers and smooth out the experience for all involved. The community looks forward to your contributions. 🎉 - -> And if you like the project, but just don't have time to contribute, that's fine. There are other easy ways to support the project and show your appreciation, which we would also be very happy about: -> - Star the project -> - Tweet about it -> - Refer this project in your project's readme -> - Mention the project at local meetups and tell your friends/colleagues - - -## Table of Contents - -- [Code of Conduct](#code-of-conduct) -- [I Have a Question](#i-have-a-question) -- [I Want To Contribute](#i-want-to-contribute) -- [Reporting Bugs](#reporting-bugs) -- [Suggesting Enhancements](#suggesting-enhancements) - -## Code of Conduct - -This project and everyone participating in it is governed by the -[mergo Code of Conduct](https://github.com/imdario/mergoblob/master/CODE_OF_CONDUCT.md). -By participating, you are expected to uphold this code. Please report unacceptable behavior -to <>. - - -## I Have a Question - -> If you want to ask a question, we assume that you have read the available [Documentation](https://pkg.go.dev/github.com/imdario/mergo). - -Before you ask a question, it is best to search for existing [Issues](https://github.com/imdario/mergo/issues) that might help you. In case you have found a suitable issue and still need clarification, you can write your question in this issue. It is also advisable to search the internet for answers first. - -If you then still feel the need to ask a question and need clarification, we recommend the following: - -- Open an [Issue](https://github.com/imdario/mergo/issues/new). -- Provide as much context as you can about what you're running into. -- Provide project and platform versions (nodejs, npm, etc), depending on what seems relevant. - -We will then take care of the issue as soon as possible. - -## I Want To Contribute - -> ### Legal Notice -> When contributing to this project, you must agree that you have authored 100% of the content, that you have the necessary rights to the content and that the content you contribute may be provided under the project license. - -### Reporting Bugs - - -#### Before Submitting a Bug Report - -A good bug report shouldn't leave others needing to chase you up for more information. Therefore, we ask you to investigate carefully, collect information and describe the issue in detail in your report. Please complete the following steps in advance to help us fix any potential bug as fast as possible. - -- Make sure that you are using the latest version. -- Determine if your bug is really a bug and not an error on your side e.g. using incompatible environment components/versions (Make sure that you have read the [documentation](). If you are looking for support, you might want to check [this section](#i-have-a-question)). -- To see if other users have experienced (and potentially already solved) the same issue you are having, check if there is not already a bug report existing for your bug or error in the [bug tracker](https://github.com/imdario/mergoissues?q=label%3Abug). -- Also make sure to search the internet (including Stack Overflow) to see if users outside of the GitHub community have discussed the issue. -- Collect information about the bug: -- Stack trace (Traceback) -- OS, Platform and Version (Windows, Linux, macOS, x86, ARM) -- Version of the interpreter, compiler, SDK, runtime environment, package manager, depending on what seems relevant. -- Possibly your input and the output -- Can you reliably reproduce the issue? And can you also reproduce it with older versions? - - -#### How Do I Submit a Good Bug Report? - -> You must never report security related issues, vulnerabilities or bugs including sensitive information to the issue tracker, or elsewhere in public. Instead sensitive bugs must be sent by email to . - - -We use GitHub issues to track bugs and errors. If you run into an issue with the project: - -- Open an [Issue](https://github.com/imdario/mergo/issues/new). (Since we can't be sure at this point whether it is a bug or not, we ask you not to talk about a bug yet and not to label the issue.) -- Explain the behavior you would expect and the actual behavior. -- Please provide as much context as possible and describe the *reproduction steps* that someone else can follow to recreate the issue on their own. This usually includes your code. For good bug reports you should isolate the problem and create a reduced test case. -- Provide the information you collected in the previous section. - -Once it's filed: - -- The project team will label the issue accordingly. -- A team member will try to reproduce the issue with your provided steps. If there are no reproduction steps or no obvious way to reproduce the issue, the team will ask you for those steps and mark the issue as `needs-repro`. Bugs with the `needs-repro` tag will not be addressed until they are reproduced. -- If the team is able to reproduce the issue, it will be marked `needs-fix`, as well as possibly other tags (such as `critical`), and the issue will be left to be implemented by someone. - -### Suggesting Enhancements - -This section guides you through submitting an enhancement suggestion for mergo, **including completely new features and minor improvements to existing functionality**. Following these guidelines will help maintainers and the community to understand your suggestion and find related suggestions. - - -#### Before Submitting an Enhancement - -- Make sure that you are using the latest version. -- Read the [documentation]() carefully and find out if the functionality is already covered, maybe by an individual configuration. -- Perform a [search](https://github.com/imdario/mergo/issues) to see if the enhancement has already been suggested. If it has, add a comment to the existing issue instead of opening a new one. -- Find out whether your idea fits with the scope and aims of the project. It's up to you to make a strong case to convince the project's developers of the merits of this feature. Keep in mind that we want features that will be useful to the majority of our users and not just a small subset. If you're just targeting a minority of users, consider writing an add-on/plugin library. - - -#### How Do I Submit a Good Enhancement Suggestion? - -Enhancement suggestions are tracked as [GitHub issues](https://github.com/imdario/mergo/issues). - -- Use a **clear and descriptive title** for the issue to identify the suggestion. -- Provide a **step-by-step description of the suggested enhancement** in as many details as possible. -- **Describe the current behavior** and **explain which behavior you expected to see instead** and why. At this point you can also tell which alternatives do not work for you. -- You may want to **include screenshots and animated GIFs** which help you demonstrate the steps or point out the part which the suggestion is related to. You can use [this tool](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) or [this tool](https://github.com/GNOME/byzanz) on Linux. -- **Explain why this enhancement would be useful** to most mergo users. You may also want to point out the other projects that solved it better and which could serve as inspiration. - - -## Attribution -This guide is based on the **contributing-gen**. [Make your own](https://github.com/bttger/contributing-gen)! diff --git a/vendor/github.com/imdario/mergo/LICENSE b/vendor/github.com/imdario/mergo/LICENSE deleted file mode 100644 index 686680298..000000000 --- a/vendor/github.com/imdario/mergo/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2013 Dario Castañé. All rights reserved. -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md deleted file mode 100644 index ffbbb62c7..000000000 --- a/vendor/github.com/imdario/mergo/README.md +++ /dev/null @@ -1,242 +0,0 @@ -# Mergo - -[![GitHub release][5]][6] -[![GoCard][7]][8] -[![Test status][1]][2] -[![OpenSSF Scorecard][21]][22] -[![OpenSSF Best Practices][19]][20] -[![Coverage status][9]][10] -[![Sourcegraph][11]][12] -[![FOSSA status][13]][14] - -[![GoDoc][3]][4] -[![Become my sponsor][15]][16] -[![Tidelift][17]][18] - -[1]: https://github.com/imdario/mergo/workflows/tests/badge.svg?branch=master -[2]: https://github.com/imdario/mergo/actions/workflows/tests.yml -[3]: https://godoc.org/github.com/imdario/mergo?status.svg -[4]: https://godoc.org/github.com/imdario/mergo -[5]: https://img.shields.io/github/release/imdario/mergo.svg -[6]: https://github.com/imdario/mergo/releases -[7]: https://goreportcard.com/badge/imdario/mergo -[8]: https://goreportcard.com/report/github.com/imdario/mergo -[9]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master -[10]: https://coveralls.io/github/imdario/mergo?branch=master -[11]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg -[12]: https://sourcegraph.com/github.com/imdario/mergo?badge -[13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield -[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield -[15]: https://img.shields.io/github/sponsors/imdario -[16]: https://github.com/sponsors/imdario -[17]: https://tidelift.com/badges/package/go/github.com%2Fimdario%2Fmergo -[18]: https://tidelift.com/subscription/pkg/go-github.com-imdario-mergo -[19]: https://bestpractices.coreinfrastructure.org/projects/7177/badge -[20]: https://bestpractices.coreinfrastructure.org/projects/7177 -[21]: https://api.securityscorecards.dev/projects/github.com/imdario/mergo/badge -[22]: https://api.securityscorecards.dev/projects/github.com/imdario/mergo - -A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. - -Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). - -Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche. - -## Status - -It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, Microsoft, etc](https://github.com/imdario/mergo#mergo-in-the-wild). - -### Important note - -Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds support for go modules. - -Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code. - -If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). - -### Donations - -If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes: - -Buy Me a Coffee at ko-fi.com -Donate using Liberapay -Become my sponsor - -### Mergo in the wild - -- [moby/moby](https://github.com/moby/moby) -- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) -- [vmware/dispatch](https://github.com/vmware/dispatch) -- [Shopify/themekit](https://github.com/Shopify/themekit) -- [imdario/zas](https://github.com/imdario/zas) -- [matcornic/hermes](https://github.com/matcornic/hermes) -- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go) -- [kataras/iris](https://github.com/kataras/iris) -- [michaelsauter/crane](https://github.com/michaelsauter/crane) -- [go-task/task](https://github.com/go-task/task) -- [sensu/uchiwa](https://github.com/sensu/uchiwa) -- [ory/hydra](https://github.com/ory/hydra) -- [sisatech/vcli](https://github.com/sisatech/vcli) -- [dairycart/dairycart](https://github.com/dairycart/dairycart) -- [projectcalico/felix](https://github.com/projectcalico/felix) -- [resin-os/balena](https://github.com/resin-os/balena) -- [go-kivik/kivik](https://github.com/go-kivik/kivik) -- [Telefonica/govice](https://github.com/Telefonica/govice) -- [supergiant/supergiant](supergiant/supergiant) -- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce) -- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy) -- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel) -- [EagerIO/Stout](https://github.com/EagerIO/Stout) -- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api) -- [russross/canvasassignments](https://github.com/russross/canvasassignments) -- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api) -- [casualjim/exeggutor](https://github.com/casualjim/exeggutor) -- [divshot/gitling](https://github.com/divshot/gitling) -- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl) -- [andrerocker/deploy42](https://github.com/andrerocker/deploy42) -- [elwinar/rambler](https://github.com/elwinar/rambler) -- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman) -- [jfbus/impressionist](https://github.com/jfbus/impressionist) -- [Jmeyering/zealot](https://github.com/Jmeyering/zealot) -- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host) -- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go) -- [thoas/picfit](https://github.com/thoas/picfit) -- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) -- [jnuthong/item_search](https://github.com/jnuthong/item_search) -- [bukalapak/snowboard](https://github.com/bukalapak/snowboard) -- [containerssh/containerssh](https://github.com/containerssh/containerssh) -- [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser) -- [tjpnz/structbot](https://github.com/tjpnz/structbot) - -## Install - - go get github.com/imdario/mergo - - // use in your .go code - import ( - "github.com/imdario/mergo" - ) - -## Usage - -You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are zero values](https://golang.org/ref/spec#The_zero_value) too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). - -```go -if err := mergo.Merge(&dst, src); err != nil { - // ... -} -``` - -Also, you can merge overwriting values using the transformer `WithOverride`. - -```go -if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { - // ... -} -``` - -Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field. - -```go -if err := mergo.Map(&dst, srcMap); err != nil { - // ... -} -``` - -Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values. - -Here is a nice example: - -```go -package main - -import ( - "fmt" - "github.com/imdario/mergo" -) - -type Foo struct { - A string - B int64 -} - -func main() { - src := Foo{ - A: "one", - B: 2, - } - dest := Foo{ - A: "two", - } - mergo.Merge(&dest, src) - fmt.Println(dest) - // Will print - // {two 2} -} -``` - -Note: if test are failing due missing package, please execute: - - go get gopkg.in/yaml.v3 - -### Transformers - -Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`? - -```go -package main - -import ( - "fmt" - "github.com/imdario/mergo" - "reflect" - "time" -) - -type timeTransformer struct { -} - -func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { - if typ == reflect.TypeOf(time.Time{}) { - return func(dst, src reflect.Value) error { - if dst.CanSet() { - isZero := dst.MethodByName("IsZero") - result := isZero.Call([]reflect.Value{}) - if result[0].Bool() { - dst.Set(src) - } - } - return nil - } - } - return nil -} - -type Snapshot struct { - Time time.Time - // ... -} - -func main() { - src := Snapshot{time.Now()} - dest := Snapshot{} - mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) - fmt.Println(dest) - // Will print - // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } -} -``` - -## Contact me - -If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario) - -## About - -Written by [Dario Castañé](http://dario.im). - -## License - -[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). - -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large) diff --git a/vendor/github.com/imdario/mergo/SECURITY.md b/vendor/github.com/imdario/mergo/SECURITY.md deleted file mode 100644 index a5de61f77..000000000 --- a/vendor/github.com/imdario/mergo/SECURITY.md +++ /dev/null @@ -1,14 +0,0 @@ -# Security Policy - -## Supported Versions - -| Version | Supported | -| ------- | ------------------ | -| 0.3.x | :white_check_mark: | -| < 0.3 | :x: | - -## Security contact information - -To report a security vulnerability, please use the -[Tidelift security contact](https://tidelift.com/security). -Tidelift will coordinate the fix and disclosure. diff --git a/vendor/github.com/imdario/mergo/doc.go b/vendor/github.com/imdario/mergo/doc.go deleted file mode 100644 index fcd985f99..000000000 --- a/vendor/github.com/imdario/mergo/doc.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. - -Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). - -Status - -It is ready for production use. It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc. - -Important note - -Please keep in mind that a problematic PR broke 0.3.9. We reverted it in 0.3.10. We consider 0.3.10 as stable but not bug-free. . Also, this version adds suppot for go modules. - -Keep in mind that in 0.3.2, Mergo changed Merge() and Map() signatures to support transformers. We added an optional/variadic argument so that it won't break the existing code. - -If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u github.com/imdario/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). - -Install - -Do your usual installation procedure: - - go get github.com/imdario/mergo - - // use in your .go code - import ( - "github.com/imdario/mergo" - ) - -Usage - -You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as they are zero values too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). - - if err := mergo.Merge(&dst, src); err != nil { - // ... - } - -Also, you can merge overwriting values using the transformer WithOverride. - - if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { - // ... - } - -Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field. - - if err := mergo.Map(&dst, srcMap); err != nil { - // ... - } - -Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values. - -Here is a nice example: - - package main - - import ( - "fmt" - "github.com/imdario/mergo" - ) - - type Foo struct { - A string - B int64 - } - - func main() { - src := Foo{ - A: "one", - B: 2, - } - dest := Foo{ - A: "two", - } - mergo.Merge(&dest, src) - fmt.Println(dest) - // Will print - // {two 2} - } - -Transformers - -Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, time.Time is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero time.Time? - - package main - - import ( - "fmt" - "github.com/imdario/mergo" - "reflect" - "time" - ) - - type timeTransformer struct { - } - - func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { - if typ == reflect.TypeOf(time.Time{}) { - return func(dst, src reflect.Value) error { - if dst.CanSet() { - isZero := dst.MethodByName("IsZero") - result := isZero.Call([]reflect.Value{}) - if result[0].Bool() { - dst.Set(src) - } - } - return nil - } - } - return nil - } - - type Snapshot struct { - Time time.Time - // ... - } - - func main() { - src := Snapshot{time.Now()} - dest := Snapshot{} - mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) - fmt.Println(dest) - // Will print - // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } - } - -Contact me - -If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): https://twitter.com/im_dario - -About - -Written by Dario Castañé: https://da.rio.hn - -License - -BSD 3-Clause license, as Go language. - -*/ -package mergo diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go deleted file mode 100644 index b50d5c2a4..000000000 --- a/vendor/github.com/imdario/mergo/map.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright 2014 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "fmt" - "reflect" - "unicode" - "unicode/utf8" -) - -func changeInitialCase(s string, mapper func(rune) rune) string { - if s == "" { - return s - } - r, n := utf8.DecodeRuneInString(s) - return string(mapper(r)) + s[n:] -} - -func isExported(field reflect.StructField) bool { - r, _ := utf8.DecodeRuneInString(field.Name) - return r >= 'A' && r <= 'Z' -} - -// Traverses recursively both values, assigning src's fields values to dst. -// The map argument tracks comparisons that have already been seen, which allows -// short circuiting on recursive types. -func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { - overwrite := config.Overwrite - if dst.CanAddr() { - addr := dst.UnsafeAddr() - h := 17 * addr - seen := visited[h] - typ := dst.Type() - for p := seen; p != nil; p = p.next { - if p.ptr == addr && p.typ == typ { - return nil - } - } - // Remember, remember... - visited[h] = &visit{typ, seen, addr} - } - zeroValue := reflect.Value{} - switch dst.Kind() { - case reflect.Map: - dstMap := dst.Interface().(map[string]interface{}) - for i, n := 0, src.NumField(); i < n; i++ { - srcType := src.Type() - field := srcType.Field(i) - if !isExported(field) { - continue - } - fieldName := field.Name - fieldName = changeInitialCase(fieldName, unicode.ToLower) - if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v), !config.ShouldNotDereference) || overwrite) { - dstMap[fieldName] = src.Field(i).Interface() - } - } - case reflect.Ptr: - if dst.IsNil() { - v := reflect.New(dst.Type().Elem()) - dst.Set(v) - } - dst = dst.Elem() - fallthrough - case reflect.Struct: - srcMap := src.Interface().(map[string]interface{}) - for key := range srcMap { - config.overwriteWithEmptyValue = true - srcValue := srcMap[key] - fieldName := changeInitialCase(key, unicode.ToUpper) - dstElement := dst.FieldByName(fieldName) - if dstElement == zeroValue { - // We discard it because the field doesn't exist. - continue - } - srcElement := reflect.ValueOf(srcValue) - dstKind := dstElement.Kind() - srcKind := srcElement.Kind() - if srcKind == reflect.Ptr && dstKind != reflect.Ptr { - srcElement = srcElement.Elem() - srcKind = reflect.TypeOf(srcElement.Interface()).Kind() - } else if dstKind == reflect.Ptr { - // Can this work? I guess it can't. - if srcKind != reflect.Ptr && srcElement.CanAddr() { - srcPtr := srcElement.Addr() - srcElement = reflect.ValueOf(srcPtr) - srcKind = reflect.Ptr - } - } - - if !srcElement.IsValid() { - continue - } - if srcKind == dstKind { - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface { - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } else if srcKind == reflect.Map { - if err = deepMap(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } else { - return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind) - } - } - } - return -} - -// Map sets fields' values in dst from src. -// src can be a map with string keys or a struct. dst must be the opposite: -// if src is a map, dst must be a valid pointer to struct. If src is a struct, -// dst must be map[string]interface{}. -// It won't merge unexported (private) fields and will do recursively -// any exported field. -// If dst is a map, keys will be src fields' names in lower camel case. -// Missing key in src that doesn't match a field in dst will be skipped. This -// doesn't apply if dst is a map. -// This is separated method from Merge because it is cleaner and it keeps sane -// semantics: merging equal types, mapping different (restricted) types. -func Map(dst, src interface{}, opts ...func(*Config)) error { - return _map(dst, src, opts...) -} - -// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by -// non-empty src attribute values. -// Deprecated: Use Map(…) with WithOverride -func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { - return _map(dst, src, append(opts, WithOverride)...) -} - -func _map(dst, src interface{}, opts ...func(*Config)) error { - if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { - return ErrNonPointerArgument - } - var ( - vDst, vSrc reflect.Value - err error - ) - config := &Config{} - - for _, opt := range opts { - opt(config) - } - - if vDst, vSrc, err = resolveValues(dst, src); err != nil { - return err - } - // To be friction-less, we redirect equal-type arguments - // to deepMerge. Only because arguments can be anything. - if vSrc.Kind() == vDst.Kind() { - return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) - } - switch vSrc.Kind() { - case reflect.Struct: - if vDst.Kind() != reflect.Map { - return ErrExpectedMapAsDestination - } - case reflect.Map: - if vDst.Kind() != reflect.Struct { - return ErrExpectedStructAsDestination - } - default: - return ErrNotSupported - } - return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, config) -} diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go deleted file mode 100644 index 0ef9b2138..000000000 --- a/vendor/github.com/imdario/mergo/merge.go +++ /dev/null @@ -1,409 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "fmt" - "reflect" -) - -func hasMergeableFields(dst reflect.Value) (exported bool) { - for i, n := 0, dst.NumField(); i < n; i++ { - field := dst.Type().Field(i) - if field.Anonymous && dst.Field(i).Kind() == reflect.Struct { - exported = exported || hasMergeableFields(dst.Field(i)) - } else if isExportedComponent(&field) { - exported = exported || len(field.PkgPath) == 0 - } - } - return -} - -func isExportedComponent(field *reflect.StructField) bool { - pkgPath := field.PkgPath - if len(pkgPath) > 0 { - return false - } - c := field.Name[0] - if 'a' <= c && c <= 'z' || c == '_' { - return false - } - return true -} - -type Config struct { - Transformers Transformers - Overwrite bool - ShouldNotDereference bool - AppendSlice bool - TypeCheck bool - overwriteWithEmptyValue bool - overwriteSliceWithEmptyValue bool - sliceDeepCopy bool - debug bool -} - -type Transformers interface { - Transformer(reflect.Type) func(dst, src reflect.Value) error -} - -// Traverses recursively both values, assigning src's fields values to dst. -// The map argument tracks comparisons that have already been seen, which allows -// short circuiting on recursive types. -func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { - overwrite := config.Overwrite - typeCheck := config.TypeCheck - overwriteWithEmptySrc := config.overwriteWithEmptyValue - overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue - sliceDeepCopy := config.sliceDeepCopy - - if !src.IsValid() { - return - } - if dst.CanAddr() { - addr := dst.UnsafeAddr() - h := 17 * addr - seen := visited[h] - typ := dst.Type() - for p := seen; p != nil; p = p.next { - if p.ptr == addr && p.typ == typ { - return nil - } - } - // Remember, remember... - visited[h] = &visit{typ, seen, addr} - } - - if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() { - if fn := config.Transformers.Transformer(dst.Type()); fn != nil { - err = fn(dst, src) - return - } - } - - switch dst.Kind() { - case reflect.Struct: - if hasMergeableFields(dst) { - for i, n := 0, dst.NumField(); i < n; i++ { - if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil { - return - } - } - } else { - if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) { - dst.Set(src) - } - } - case reflect.Map: - if dst.IsNil() && !src.IsNil() { - if dst.CanSet() { - dst.Set(reflect.MakeMap(dst.Type())) - } else { - dst = src - return - } - } - - if src.Kind() != reflect.Map { - if overwrite && dst.CanSet() { - dst.Set(src) - } - return - } - - for _, key := range src.MapKeys() { - srcElement := src.MapIndex(key) - if !srcElement.IsValid() { - continue - } - dstElement := dst.MapIndex(key) - switch srcElement.Kind() { - case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice: - if srcElement.IsNil() { - if overwrite { - dst.SetMapIndex(key, srcElement) - } - continue - } - fallthrough - default: - if !srcElement.CanInterface() { - continue - } - switch reflect.TypeOf(srcElement.Interface()).Kind() { - case reflect.Struct: - fallthrough - case reflect.Ptr: - fallthrough - case reflect.Map: - srcMapElm := srcElement - dstMapElm := dstElement - if srcMapElm.CanInterface() { - srcMapElm = reflect.ValueOf(srcMapElm.Interface()) - if dstMapElm.IsValid() { - dstMapElm = reflect.ValueOf(dstMapElm.Interface()) - } - } - if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil { - return - } - case reflect.Slice: - srcSlice := reflect.ValueOf(srcElement.Interface()) - - var dstSlice reflect.Value - if !dstElement.IsValid() || dstElement.IsNil() { - dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len()) - } else { - dstSlice = reflect.ValueOf(dstElement.Interface()) - } - - if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy { - if typeCheck && srcSlice.Type() != dstSlice.Type() { - return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) - } - dstSlice = srcSlice - } else if config.AppendSlice { - if srcSlice.Type() != dstSlice.Type() { - return fmt.Errorf("cannot append two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) - } - dstSlice = reflect.AppendSlice(dstSlice, srcSlice) - } else if sliceDeepCopy { - i := 0 - for ; i < srcSlice.Len() && i < dstSlice.Len(); i++ { - srcElement := srcSlice.Index(i) - dstElement := dstSlice.Index(i) - - if srcElement.CanInterface() { - srcElement = reflect.ValueOf(srcElement.Interface()) - } - if dstElement.CanInterface() { - dstElement = reflect.ValueOf(dstElement.Interface()) - } - - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } - - } - dst.SetMapIndex(key, dstSlice) - } - } - - if dstElement.IsValid() && !isEmptyValue(dstElement, !config.ShouldNotDereference) { - if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice { - continue - } - if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map && reflect.TypeOf(dstElement.Interface()).Kind() == reflect.Map { - continue - } - } - - if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement, !config.ShouldNotDereference)) { - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - dst.SetMapIndex(key, srcElement) - } - } - - // Ensure that all keys in dst are deleted if they are not in src. - if overwriteWithEmptySrc { - for _, key := range dst.MapKeys() { - srcElement := src.MapIndex(key) - if !srcElement.IsValid() { - dst.SetMapIndex(key, reflect.Value{}) - } - } - } - case reflect.Slice: - if !dst.CanSet() { - break - } - if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy { - dst.Set(src) - } else if config.AppendSlice { - if src.Type() != dst.Type() { - return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type()) - } - dst.Set(reflect.AppendSlice(dst, src)) - } else if sliceDeepCopy { - for i := 0; i < src.Len() && i < dst.Len(); i++ { - srcElement := src.Index(i) - dstElement := dst.Index(i) - if srcElement.CanInterface() { - srcElement = reflect.ValueOf(srcElement.Interface()) - } - if dstElement.CanInterface() { - dstElement = reflect.ValueOf(dstElement.Interface()) - } - - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } - } - case reflect.Ptr: - fallthrough - case reflect.Interface: - if isReflectNil(src) { - if overwriteWithEmptySrc && dst.CanSet() && src.Type().AssignableTo(dst.Type()) { - dst.Set(src) - } - break - } - - if src.Kind() != reflect.Interface { - if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) { - if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) { - dst.Set(src) - } - } else if src.Kind() == reflect.Ptr { - if !config.ShouldNotDereference { - if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { - return - } - } else { - if overwriteWithEmptySrc || (overwrite && !src.IsNil()) || dst.IsNil() { - dst.Set(src) - } - } - } else if dst.Elem().Type() == src.Type() { - if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil { - return - } - } else { - return ErrDifferentArgumentsTypes - } - break - } - - if dst.IsNil() || overwrite { - if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) { - dst.Set(src) - } - break - } - - if dst.Elem().Kind() == src.Elem().Kind() { - if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { - return - } - break - } - default: - mustSet := (isEmptyValue(dst, !config.ShouldNotDereference) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) - if mustSet { - if dst.CanSet() { - dst.Set(src) - } else { - dst = src - } - } - } - - return -} - -// Merge will fill any empty for value type attributes on the dst struct using corresponding -// src attributes if they themselves are not empty. dst and src must be valid same-type structs -// and dst must be a pointer to struct. -// It won't merge unexported (private) fields and will do recursively any exported field. -func Merge(dst, src interface{}, opts ...func(*Config)) error { - return merge(dst, src, opts...) -} - -// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overridden by -// non-empty src attribute values. -// Deprecated: use Merge(…) with WithOverride -func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { - return merge(dst, src, append(opts, WithOverride)...) -} - -// WithTransformers adds transformers to merge, allowing to customize the merging of some types. -func WithTransformers(transformers Transformers) func(*Config) { - return func(config *Config) { - config.Transformers = transformers - } -} - -// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values. -func WithOverride(config *Config) { - config.Overwrite = true -} - -// WithOverwriteWithEmptyValue will make merge override non empty dst attributes with empty src attributes values. -func WithOverwriteWithEmptyValue(config *Config) { - config.Overwrite = true - config.overwriteWithEmptyValue = true -} - -// WithOverrideEmptySlice will make merge override empty dst slice with empty src slice. -func WithOverrideEmptySlice(config *Config) { - config.overwriteSliceWithEmptyValue = true -} - -// WithoutDereference prevents dereferencing pointers when evaluating whether they are empty -// (i.e. a non-nil pointer is never considered empty). -func WithoutDereference(config *Config) { - config.ShouldNotDereference = true -} - -// WithAppendSlice will make merge append slices instead of overwriting it. -func WithAppendSlice(config *Config) { - config.AppendSlice = true -} - -// WithTypeCheck will make merge check types while overwriting it (must be used with WithOverride). -func WithTypeCheck(config *Config) { - config.TypeCheck = true -} - -// WithSliceDeepCopy will merge slice element one by one with Overwrite flag. -func WithSliceDeepCopy(config *Config) { - config.sliceDeepCopy = true - config.Overwrite = true -} - -func merge(dst, src interface{}, opts ...func(*Config)) error { - if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { - return ErrNonPointerArgument - } - var ( - vDst, vSrc reflect.Value - err error - ) - - config := &Config{} - - for _, opt := range opts { - opt(config) - } - - if vDst, vSrc, err = resolveValues(dst, src); err != nil { - return err - } - if vDst.Type() != vSrc.Type() { - return ErrDifferentArgumentsTypes - } - return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) -} - -// IsReflectNil is the reflect value provided nil -func isReflectNil(v reflect.Value) bool { - k := v.Kind() - switch k { - case reflect.Interface, reflect.Slice, reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr: - // Both interface and slice are nil if first word is 0. - // Both are always bigger than a word; assume flagIndir. - return v.IsNil() - default: - return false - } -} diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go deleted file mode 100644 index 0a721e2d8..000000000 --- a/vendor/github.com/imdario/mergo/mergo.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "errors" - "reflect" -) - -// Errors reported by Mergo when it finds invalid arguments. -var ( - ErrNilArguments = errors.New("src and dst must not be nil") - ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type") - ErrNotSupported = errors.New("only structs, maps, and slices are supported") - ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") - ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") - ErrNonPointerArgument = errors.New("dst must be a pointer") -) - -// During deepMerge, must keep track of checks that are -// in progress. The comparison algorithm assumes that all -// checks in progress are true when it reencounters them. -// Visited are stored in a map indexed by 17 * a1 + a2; -type visit struct { - typ reflect.Type - next *visit - ptr uintptr -} - -// From src/pkg/encoding/json/encode.go. -func isEmptyValue(v reflect.Value, shouldDereference bool) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - if v.IsNil() { - return true - } - if shouldDereference { - return isEmptyValue(v.Elem(), shouldDereference) - } - return false - case reflect.Func: - return v.IsNil() - case reflect.Invalid: - return true - } - return false -} - -func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { - if dst == nil || src == nil { - err = ErrNilArguments - return - } - vDst = reflect.ValueOf(dst).Elem() - if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map && vDst.Kind() != reflect.Slice { - err = ErrNotSupported - return - } - vSrc = reflect.ValueOf(src) - // We check if vSrc is a pointer to dereference it. - if vSrc.Kind() == reflect.Ptr { - vSrc = vSrc.Elem() - } - return -} diff --git a/vendor/github.com/muesli/termenv/.golangci-soft.yml b/vendor/github.com/muesli/termenv/.golangci-soft.yml index ef456e060..84e3d41de 100644 --- a/vendor/github.com/muesli/termenv/.golangci-soft.yml +++ b/vendor/github.com/muesli/termenv/.golangci-soft.yml @@ -20,10 +20,9 @@ linters: - goconst - godot - godox - - gomnd + - mnd - gomoddirectives - goprintffuncname - - ifshort # - lll - misspell - nakedret @@ -35,13 +34,10 @@ linters: # disable default linters, they are already enabled in .golangci.yml disable: - - deadcode - errcheck - gosimple - govet - ineffassign - staticcheck - - structcheck - typecheck - unused - - varcheck diff --git a/vendor/github.com/muesli/termenv/.golangci.yml b/vendor/github.com/muesli/termenv/.golangci.yml index a5a91d0d9..684d54bfa 100644 --- a/vendor/github.com/muesli/termenv/.golangci.yml +++ b/vendor/github.com/muesli/termenv/.golangci.yml @@ -15,7 +15,6 @@ issues: linters: enable: - bodyclose - - exportloopref - goimports - gosec - nilerr diff --git a/vendor/github.com/muesli/termenv/README.md b/vendor/github.com/muesli/termenv/README.md index 29dcf0176..fa7929d4e 100644 --- a/vendor/github.com/muesli/termenv/README.md +++ b/vendor/github.com/muesli/termenv/README.md @@ -307,7 +307,7 @@ termenv.DisableBracketedPaste() ### Color Support -- 24-bit (RGB): alacritty, foot, iTerm, kitty, Konsole, st, tmux, vte-based, wezterm, Windows Terminal +- 24-bit (RGB): alacritty, foot, iTerm, kitty, Konsole, st, tmux, vte-based, wezterm, Ghostty, Windows Terminal - 8-bit (256): rxvt, screen, xterm, Apple Terminal - 4-bit (16): Linux Console @@ -350,7 +350,7 @@ You can help improve this list! Check out [how to](ansi_compat.md) and open an i | Terminal | Copy to Clipboard (OSC52) | Hyperlinks (OSC8) | Notifications (OSC777) | | ---------------- | :-----------------------: | :---------------: | :--------------------: | -| alacritty | ✅ | ❌[^alacritty] | ❌ | +| alacritty | ✅ | ✅[^alacritty] | ❌ | | foot | ✅ | ✅ | ✅ | | kitty | ✅ | ✅ | ✅ | | Konsole | ❌[^konsole] | ✅ | ❌ | @@ -374,7 +374,7 @@ You can help improve this list! Check out [how to](ansi_compat.md) and open an i [^apple]: OSC52 works with a [workaround](https://github.com/roy2220/osc52pty). [^tmux]: OSC8 is not supported, for more info see [issue#911](https://github.com/tmux/tmux/issues/911). [^screen]: OSC8 is not supported, for more info see [bug#50952](https://savannah.gnu.org/bugs/index.php?50952). -[^alacritty]: OSC8 is not supported, for more info see [issue#922](https://github.com/alacritty/alacritty/issues/922). +[^alacritty]: OSC8 is supported since [v0.11.0](https://github.com/alacritty/alacritty/releases/tag/v0.11.0) diff --git a/vendor/github.com/muesli/termenv/ansicolors.go b/vendor/github.com/muesli/termenv/ansicolors.go index ee303e22a..1a301b0fe 100644 --- a/vendor/github.com/muesli/termenv/ansicolors.go +++ b/vendor/github.com/muesli/termenv/ansicolors.go @@ -1,6 +1,6 @@ package termenv -// ANSI color codes +// ANSI color codes. const ( ANSIBlack ANSIColor = iota ANSIRed diff --git a/vendor/github.com/muesli/termenv/color.go b/vendor/github.com/muesli/termenv/color.go index 1a216e93a..59e639b11 100644 --- a/vendor/github.com/muesli/termenv/color.go +++ b/vendor/github.com/muesli/termenv/color.go @@ -9,12 +9,10 @@ import ( "github.com/lucasb-eyer/go-colorful" ) -var ( - // ErrInvalidColor gets returned when a color is invalid. - ErrInvalidColor = errors.New("invalid color") -) +// ErrInvalidColor gets returned when a color is invalid. +var ErrInvalidColor = errors.New("invalid color") -// Foreground and Background sequence codes +// Foreground and Background sequence codes. const ( Foreground = "38" Background = "48" @@ -73,6 +71,8 @@ func (c NoColor) Sequence(_ bool) string { } // Sequence returns the ANSI Sequence for the color. +// +//nolint:mnd func (c ANSIColor) Sequence(bg bool) string { col := int(c) bgMod := func(c int) int { @@ -83,9 +83,9 @@ func (c ANSIColor) Sequence(bg bool) string { } if col < 8 { - return fmt.Sprintf("%d", bgMod(col)+30) + return fmt.Sprintf("%d", bgMod(col)+30) //nolint:mnd } - return fmt.Sprintf("%d", bgMod(col-8)+90) + return fmt.Sprintf("%d", bgMod(col-8)+90) //nolint:mnd } // Sequence returns the ANSI Sequence for the color. @@ -108,7 +108,7 @@ func (c RGBColor) Sequence(bg bool) string { if bg { prefix = Background } - return fmt.Sprintf("%s;2;%d;%d;%d", prefix, uint8(f.R*255), uint8(f.G*255), uint8(f.B*255)) + return fmt.Sprintf("%s;2;%d;%d;%d", prefix, uint8(f.R*255), uint8(f.G*255), uint8(f.B*255)) //nolint:mnd } func xTermColor(s string) (RGBColor, error) { @@ -158,6 +158,7 @@ func ansi256ToANSIColor(c ANSI256Color) ANSIColor { return ANSIColor(r) } +//nolint:mnd func hexToANSI256Color(c colorful.Color) ANSI256Color { v2ci := func(v float64) int { if v < 48 { diff --git a/vendor/github.com/muesli/termenv/constants_zos.go b/vendor/github.com/muesli/termenv/constants_zos.go new file mode 100644 index 000000000..4262f03b9 --- /dev/null +++ b/vendor/github.com/muesli/termenv/constants_zos.go @@ -0,0 +1,8 @@ +package termenv + +import "golang.org/x/sys/unix" + +const ( + tcgetattr = unix.TCGETS + tcsetattr = unix.TCSETS +) diff --git a/vendor/github.com/muesli/termenv/output.go b/vendor/github.com/muesli/termenv/output.go index e22d369c9..e4434de03 100644 --- a/vendor/github.com/muesli/termenv/output.go +++ b/vendor/github.com/muesli/termenv/output.go @@ -6,12 +6,12 @@ import ( "sync" ) -var ( - // output is the default global output. - output = NewOutput(os.Stdout) -) +// output is the default global output. +var output = NewOutput(os.Stdout) // File represents a file descriptor. +// +// Deprecated: Use *os.File instead. type File interface { io.ReadWriter Fd() uintptr @@ -23,7 +23,7 @@ type OutputOption = func(*Output) // Output is a terminal output. type Output struct { Profile - tty io.Writer + w io.Writer environ Environ assumeTTY bool @@ -61,10 +61,10 @@ func SetDefaultOutput(o *Output) { output = o } -// NewOutput returns a new Output for the given file descriptor. -func NewOutput(tty io.Writer, opts ...OutputOption) *Output { +// NewOutput returns a new Output for the given writer. +func NewOutput(w io.Writer, opts ...OutputOption) *Output { o := &Output{ - tty: tty, + w: w, environ: &osEnviron{}, Profile: -1, fgSync: &sync.Once{}, @@ -73,8 +73,8 @@ func NewOutput(tty io.Writer, opts ...OutputOption) *Output { bgColor: NoColor{}, } - if o.tty == nil { - o.tty = os.Stdout + if o.w == nil { + o.w = os.Stdout } for _, opt := range opts { opt(o) @@ -175,20 +175,28 @@ func (o *Output) BackgroundColor() Color { func (o *Output) HasDarkBackground() bool { c := ConvertToRGB(o.BackgroundColor()) _, _, l := c.Hsl() - return l < 0.5 + return l < 0.5 //nolint:mnd } // TTY returns the terminal's file descriptor. This may be nil if the output is // not a terminal. +// +// Deprecated: Use Writer() instead. func (o Output) TTY() File { - if f, ok := o.tty.(File); ok { + if f, ok := o.w.(File); ok { return f } return nil } +// Writer returns the underlying writer. This may be of type io.Writer, +// io.ReadWriter, or *os.File. +func (o Output) Writer() io.Writer { + return o.w +} + func (o Output) Write(p []byte) (int, error) { - return o.tty.Write(p) + return o.w.Write(p) //nolint:wrapcheck } // WriteString writes the given string to the output. diff --git a/vendor/github.com/muesli/termenv/profile.go b/vendor/github.com/muesli/termenv/profile.go index fa128e209..7d38f5fb0 100644 --- a/vendor/github.com/muesli/termenv/profile.go +++ b/vendor/github.com/muesli/termenv/profile.go @@ -12,16 +12,31 @@ import ( type Profile int const ( - // TrueColor, 24-bit color profile + // TrueColor, 24-bit color profile. TrueColor = Profile(iota) - // ANSI256, 8-bit color profile + // ANSI256, 8-bit color profile. ANSI256 - // ANSI, 4-bit color profile + // ANSI, 4-bit color profile. ANSI - // Ascii, uncolored profile + // Ascii, uncolored profile. Ascii //nolint:revive ) +// Name returns the profile name as a string. +func (p Profile) Name() string { + switch p { + case Ascii: + return "Ascii" + case ANSI: + return "ANSI" + case ANSI256: + return "ANSI256" + case TrueColor: + return "TrueColor" + } + return "Unknown" +} + // String returns a new Style. func (p Profile) String(s ...string) Style { return Style{ @@ -80,7 +95,7 @@ func (p Profile) Color(s string) Color { return nil } - if i < 16 { + if i < 16 { //nolint:mnd c = ANSIColor(i) } else { c = ANSI256Color(i) diff --git a/vendor/github.com/muesli/termenv/screen.go b/vendor/github.com/muesli/termenv/screen.go index a71181b61..75c11d011 100644 --- a/vendor/github.com/muesli/termenv/screen.go +++ b/vendor/github.com/muesli/termenv/screen.go @@ -71,234 +71,234 @@ const ( // Reset the terminal to its default style, removing any active styles. func (o Output) Reset() { - fmt.Fprint(o.tty, CSI+ResetSeq+"m") + fmt.Fprint(o.w, CSI+ResetSeq+"m") //nolint:errcheck } // SetForegroundColor sets the default foreground color. func (o Output) SetForegroundColor(color Color) { - fmt.Fprintf(o.tty, OSC+SetForegroundColorSeq, color) + fmt.Fprintf(o.w, OSC+SetForegroundColorSeq, color) //nolint:errcheck } // SetBackgroundColor sets the default background color. func (o Output) SetBackgroundColor(color Color) { - fmt.Fprintf(o.tty, OSC+SetBackgroundColorSeq, color) + fmt.Fprintf(o.w, OSC+SetBackgroundColorSeq, color) //nolint:errcheck } // SetCursorColor sets the cursor color. func (o Output) SetCursorColor(color Color) { - fmt.Fprintf(o.tty, OSC+SetCursorColorSeq, color) + fmt.Fprintf(o.w, OSC+SetCursorColorSeq, color) //nolint:errcheck } // RestoreScreen restores a previously saved screen state. func (o Output) RestoreScreen() { - fmt.Fprint(o.tty, CSI+RestoreScreenSeq) + fmt.Fprint(o.w, CSI+RestoreScreenSeq) //nolint:errcheck } // SaveScreen saves the screen state. func (o Output) SaveScreen() { - fmt.Fprint(o.tty, CSI+SaveScreenSeq) + fmt.Fprint(o.w, CSI+SaveScreenSeq) //nolint:errcheck } // AltScreen switches to the alternate screen buffer. The former view can be // restored with ExitAltScreen(). func (o Output) AltScreen() { - fmt.Fprint(o.tty, CSI+AltScreenSeq) + fmt.Fprint(o.w, CSI+AltScreenSeq) //nolint:errcheck } // ExitAltScreen exits the alternate screen buffer and returns to the former // terminal view. func (o Output) ExitAltScreen() { - fmt.Fprint(o.tty, CSI+ExitAltScreenSeq) + fmt.Fprint(o.w, CSI+ExitAltScreenSeq) //nolint:errcheck } // ClearScreen clears the visible portion of the terminal. func (o Output) ClearScreen() { - fmt.Fprintf(o.tty, CSI+EraseDisplaySeq, 2) + fmt.Fprintf(o.w, CSI+EraseDisplaySeq, 2) //nolint:errcheck,mnd o.MoveCursor(1, 1) } // MoveCursor moves the cursor to a given position. func (o Output) MoveCursor(row int, column int) { - fmt.Fprintf(o.tty, CSI+CursorPositionSeq, row, column) + fmt.Fprintf(o.w, CSI+CursorPositionSeq, row, column) //nolint:errcheck } // HideCursor hides the cursor. func (o Output) HideCursor() { - fmt.Fprint(o.tty, CSI+HideCursorSeq) + fmt.Fprint(o.w, CSI+HideCursorSeq) //nolint:errcheck } // ShowCursor shows the cursor. func (o Output) ShowCursor() { - fmt.Fprint(o.tty, CSI+ShowCursorSeq) + fmt.Fprint(o.w, CSI+ShowCursorSeq) //nolint:errcheck } // SaveCursorPosition saves the cursor position. func (o Output) SaveCursorPosition() { - fmt.Fprint(o.tty, CSI+SaveCursorPositionSeq) + fmt.Fprint(o.w, CSI+SaveCursorPositionSeq) //nolint:errcheck } // RestoreCursorPosition restores a saved cursor position. func (o Output) RestoreCursorPosition() { - fmt.Fprint(o.tty, CSI+RestoreCursorPositionSeq) + fmt.Fprint(o.w, CSI+RestoreCursorPositionSeq) //nolint:errcheck } // CursorUp moves the cursor up a given number of lines. func (o Output) CursorUp(n int) { - fmt.Fprintf(o.tty, CSI+CursorUpSeq, n) + fmt.Fprintf(o.w, CSI+CursorUpSeq, n) //nolint:errcheck } // CursorDown moves the cursor down a given number of lines. func (o Output) CursorDown(n int) { - fmt.Fprintf(o.tty, CSI+CursorDownSeq, n) + fmt.Fprintf(o.w, CSI+CursorDownSeq, n) //nolint:errcheck } // CursorForward moves the cursor up a given number of lines. func (o Output) CursorForward(n int) { - fmt.Fprintf(o.tty, CSI+CursorForwardSeq, n) + fmt.Fprintf(o.w, CSI+CursorForwardSeq, n) //nolint:errcheck } // CursorBack moves the cursor backwards a given number of cells. func (o Output) CursorBack(n int) { - fmt.Fprintf(o.tty, CSI+CursorBackSeq, n) + fmt.Fprintf(o.w, CSI+CursorBackSeq, n) //nolint:errcheck } // CursorNextLine moves the cursor down a given number of lines and places it at // the beginning of the line. func (o Output) CursorNextLine(n int) { - fmt.Fprintf(o.tty, CSI+CursorNextLineSeq, n) + fmt.Fprintf(o.w, CSI+CursorNextLineSeq, n) //nolint:errcheck } // CursorPrevLine moves the cursor up a given number of lines and places it at // the beginning of the line. func (o Output) CursorPrevLine(n int) { - fmt.Fprintf(o.tty, CSI+CursorPreviousLineSeq, n) + fmt.Fprintf(o.w, CSI+CursorPreviousLineSeq, n) //nolint:errcheck } // ClearLine clears the current line. func (o Output) ClearLine() { - fmt.Fprint(o.tty, CSI+EraseEntireLineSeq) + fmt.Fprint(o.w, CSI+EraseEntireLineSeq) //nolint:errcheck } // ClearLineLeft clears the line to the left of the cursor. func (o Output) ClearLineLeft() { - fmt.Fprint(o.tty, CSI+EraseLineLeftSeq) + fmt.Fprint(o.w, CSI+EraseLineLeftSeq) //nolint:errcheck } // ClearLineRight clears the line to the right of the cursor. func (o Output) ClearLineRight() { - fmt.Fprint(o.tty, CSI+EraseLineRightSeq) + fmt.Fprint(o.w, CSI+EraseLineRightSeq) //nolint:errcheck } // ClearLines clears a given number of lines. func (o Output) ClearLines(n int) { - clearLine := fmt.Sprintf(CSI+EraseLineSeq, 2) + clearLine := fmt.Sprintf(CSI+EraseLineSeq, 2) //nolint:mnd cursorUp := fmt.Sprintf(CSI+CursorUpSeq, 1) - fmt.Fprint(o.tty, clearLine+strings.Repeat(cursorUp+clearLine, n)) + fmt.Fprint(o.w, clearLine+strings.Repeat(cursorUp+clearLine, n)) //nolint:errcheck } // ChangeScrollingRegion sets the scrolling region of the terminal. func (o Output) ChangeScrollingRegion(top, bottom int) { - fmt.Fprintf(o.tty, CSI+ChangeScrollingRegionSeq, top, bottom) + fmt.Fprintf(o.w, CSI+ChangeScrollingRegionSeq, top, bottom) //nolint:errcheck } // InsertLines inserts the given number of lines at the top of the scrollable // region, pushing lines below down. func (o Output) InsertLines(n int) { - fmt.Fprintf(o.tty, CSI+InsertLineSeq, n) + fmt.Fprintf(o.w, CSI+InsertLineSeq, n) //nolint:errcheck } // DeleteLines deletes the given number of lines, pulling any lines in // the scrollable region below up. func (o Output) DeleteLines(n int) { - fmt.Fprintf(o.tty, CSI+DeleteLineSeq, n) + fmt.Fprintf(o.w, CSI+DeleteLineSeq, n) //nolint:errcheck } // EnableMousePress enables X10 mouse mode. Button press events are sent only. func (o Output) EnableMousePress() { - fmt.Fprint(o.tty, CSI+EnableMousePressSeq) + fmt.Fprint(o.w, CSI+EnableMousePressSeq) //nolint:errcheck } // DisableMousePress disables X10 mouse mode. func (o Output) DisableMousePress() { - fmt.Fprint(o.tty, CSI+DisableMousePressSeq) + fmt.Fprint(o.w, CSI+DisableMousePressSeq) //nolint:errcheck } // EnableMouse enables Mouse Tracking mode. func (o Output) EnableMouse() { - fmt.Fprint(o.tty, CSI+EnableMouseSeq) + fmt.Fprint(o.w, CSI+EnableMouseSeq) //nolint:errcheck } // DisableMouse disables Mouse Tracking mode. func (o Output) DisableMouse() { - fmt.Fprint(o.tty, CSI+DisableMouseSeq) + fmt.Fprint(o.w, CSI+DisableMouseSeq) //nolint:errcheck } // EnableMouseHilite enables Hilite Mouse Tracking mode. func (o Output) EnableMouseHilite() { - fmt.Fprint(o.tty, CSI+EnableMouseHiliteSeq) + fmt.Fprint(o.w, CSI+EnableMouseHiliteSeq) //nolint:errcheck } // DisableMouseHilite disables Hilite Mouse Tracking mode. func (o Output) DisableMouseHilite() { - fmt.Fprint(o.tty, CSI+DisableMouseHiliteSeq) + fmt.Fprint(o.w, CSI+DisableMouseHiliteSeq) //nolint:errcheck } // EnableMouseCellMotion enables Cell Motion Mouse Tracking mode. func (o Output) EnableMouseCellMotion() { - fmt.Fprint(o.tty, CSI+EnableMouseCellMotionSeq) + fmt.Fprint(o.w, CSI+EnableMouseCellMotionSeq) //nolint:errcheck } // DisableMouseCellMotion disables Cell Motion Mouse Tracking mode. func (o Output) DisableMouseCellMotion() { - fmt.Fprint(o.tty, CSI+DisableMouseCellMotionSeq) + fmt.Fprint(o.w, CSI+DisableMouseCellMotionSeq) //nolint:errcheck } // EnableMouseAllMotion enables All Motion Mouse mode. func (o Output) EnableMouseAllMotion() { - fmt.Fprint(o.tty, CSI+EnableMouseAllMotionSeq) + fmt.Fprint(o.w, CSI+EnableMouseAllMotionSeq) //nolint:errcheck } // DisableMouseAllMotion disables All Motion Mouse mode. func (o Output) DisableMouseAllMotion() { - fmt.Fprint(o.tty, CSI+DisableMouseAllMotionSeq) + fmt.Fprint(o.w, CSI+DisableMouseAllMotionSeq) //nolint:errcheck } // EnableMouseExtendedMotion enables Extended Mouse mode (SGR). This should be // enabled in conjunction with EnableMouseCellMotion, and EnableMouseAllMotion. func (o Output) EnableMouseExtendedMode() { - fmt.Fprint(o.tty, CSI+EnableMouseExtendedModeSeq) + fmt.Fprint(o.w, CSI+EnableMouseExtendedModeSeq) //nolint:errcheck } // DisableMouseExtendedMotion disables Extended Mouse mode (SGR). func (o Output) DisableMouseExtendedMode() { - fmt.Fprint(o.tty, CSI+DisableMouseExtendedModeSeq) + fmt.Fprint(o.w, CSI+DisableMouseExtendedModeSeq) //nolint:errcheck } // EnableMousePixelsMotion enables Pixel Motion Mouse mode (SGR-Pixels). This // should be enabled in conjunction with EnableMouseCellMotion, and // EnableMouseAllMotion. func (o Output) EnableMousePixelsMode() { - fmt.Fprint(o.tty, CSI+EnableMousePixelsModeSeq) + fmt.Fprint(o.w, CSI+EnableMousePixelsModeSeq) //nolint:errcheck } // DisableMousePixelsMotion disables Pixel Motion Mouse mode (SGR-Pixels). func (o Output) DisableMousePixelsMode() { - fmt.Fprint(o.tty, CSI+DisableMousePixelsModeSeq) + fmt.Fprint(o.w, CSI+DisableMousePixelsModeSeq) //nolint:errcheck } // SetWindowTitle sets the terminal window title. func (o Output) SetWindowTitle(title string) { - fmt.Fprintf(o.tty, OSC+SetWindowTitleSeq, title) + fmt.Fprintf(o.w, OSC+SetWindowTitleSeq, title) //nolint:errcheck } // EnableBracketedPaste enables bracketed paste. func (o Output) EnableBracketedPaste() { - fmt.Fprintf(o.tty, CSI+EnableBracketedPasteSeq) + fmt.Fprintf(o.w, CSI+EnableBracketedPasteSeq) //nolint:errcheck } // DisableBracketedPaste disables bracketed paste. func (o Output) DisableBracketedPaste() { - fmt.Fprintf(o.tty, CSI+DisableBracketedPasteSeq) + fmt.Fprintf(o.w, CSI+DisableBracketedPasteSeq) //nolint:errcheck } // Legacy functions. diff --git a/vendor/github.com/muesli/termenv/style.go b/vendor/github.com/muesli/termenv/style.go index 83b0b4d7a..dedc1f9fd 100644 --- a/vendor/github.com/muesli/termenv/style.go +++ b/vendor/github.com/muesli/termenv/style.go @@ -4,7 +4,7 @@ import ( "fmt" "strings" - "github.com/mattn/go-runewidth" + "github.com/rivo/uniseg" ) // Sequence definitions. @@ -122,5 +122,5 @@ func (t Style) CrossOut() Style { // Width returns the width required to print all runes in Style. func (t Style) Width() int { - return runewidth.StringWidth(t.string) + return uniseg.StringWidth(t.string) } diff --git a/vendor/github.com/muesli/termenv/templatehelper.go b/vendor/github.com/muesli/termenv/templatehelper.go index d75b0796c..4c7c80f5b 100644 --- a/vendor/github.com/muesli/termenv/templatehelper.go +++ b/vendor/github.com/muesli/termenv/templatehelper.go @@ -10,6 +10,8 @@ func (o Output) TemplateFuncs() template.FuncMap { } // TemplateFuncs contains a few useful template helpers. +// +//nolint:mnd func TemplateFuncs(p Profile) template.FuncMap { if p == Ascii { return noopTemplateFuncs diff --git a/vendor/github.com/muesli/termenv/termenv.go b/vendor/github.com/muesli/termenv/termenv.go index 4ceb27177..d702cd55e 100644 --- a/vendor/github.com/muesli/termenv/termenv.go +++ b/vendor/github.com/muesli/termenv/termenv.go @@ -2,6 +2,7 @@ package termenv import ( "errors" + "os" "github.com/mattn/go-isatty" ) @@ -12,15 +13,15 @@ var ( ) const ( - // Escape character + // Escape character. ESC = '\x1b' - // Bell + // Bell. BEL = '\a' - // Control Sequence Introducer + // Control Sequence Introducer. CSI = string(ESC) + "[" - // Operating System Command + // Operating System Command. OSC = string(ESC) + "]" - // String Terminator + // String Terminator. ST = string(ESC) + `\` ) @@ -31,11 +32,11 @@ func (o *Output) isTTY() bool { if len(o.environ.Getenv("CI")) > 0 { return false } - if o.TTY() == nil { - return false + if f, ok := o.Writer().(*os.File); ok { + return isatty.IsTerminal(f.Fd()) } - return isatty.IsTerminal(o.TTY().Fd()) + return false } // ColorProfile returns the supported color profile: diff --git a/vendor/github.com/muesli/termenv/termenv_posix.go b/vendor/github.com/muesli/termenv/termenv_posix.go index b2109b749..c971dd998 100644 --- a/vendor/github.com/muesli/termenv/termenv_posix.go +++ b/vendor/github.com/muesli/termenv/termenv_posix.go @@ -1,5 +1,5 @@ -//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd -// +build darwin dragonfly freebsd linux netbsd openbsd +//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || zos +// +build darwin dragonfly freebsd linux netbsd openbsd zos package termenv diff --git a/vendor/github.com/muesli/termenv/termenv_unix.go b/vendor/github.com/muesli/termenv/termenv_unix.go index 24d519a5d..bef49ca3b 100644 --- a/vendor/github.com/muesli/termenv/termenv_unix.go +++ b/vendor/github.com/muesli/termenv/termenv_unix.go @@ -1,5 +1,5 @@ -//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris -// +build darwin dragonfly freebsd linux netbsd openbsd solaris +//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build darwin dragonfly freebsd linux netbsd openbsd solaris zos package termenv @@ -14,7 +14,7 @@ import ( ) const ( - // timeout for OSC queries + // timeout for OSC queries. OSCTimeout = 5 * time.Second ) @@ -50,9 +50,15 @@ func (o *Output) ColorProfile() Profile { } switch term { - case "xterm-kitty", "wezterm": + case + "alacritty", + "contour", + "rio", + "wezterm", + "xterm-ghostty", + "xterm-kitty": return TrueColor - case "linux": + case "linux", "xterm": return ANSI } @@ -69,6 +75,7 @@ func (o *Output) ColorProfile() Profile { return Ascii } +//nolint:mnd func (o Output) foregroundColor() Color { s, err := o.termStatusReport(10) if err == nil { @@ -91,6 +98,7 @@ func (o Output) foregroundColor() Color { return ANSIColor(7) } +//nolint:mnd func (o Output) backgroundColor() Color { s, err := o.termStatusReport(11) if err == nil { @@ -117,15 +125,15 @@ func (o *Output) waitForData(timeout time.Duration) error { fd := o.TTY().Fd() tv := unix.NsecToTimeval(int64(timeout)) var readfds unix.FdSet - readfds.Set(int(fd)) + readfds.Set(int(fd)) //nolint:gosec for { - n, err := unix.Select(int(fd)+1, &readfds, nil, nil, &tv) + n, err := unix.Select(int(fd)+1, &readfds, nil, nil, &tv) //nolint:gosec if err == unix.EINTR { continue } if err != nil { - return err + return err //nolint:wrapcheck } if n == 0 { return fmt.Errorf("timeout") @@ -147,7 +155,7 @@ func (o *Output) readNextByte() (byte, error) { var b [1]byte n, err := o.TTY().Read(b[:]) if err != nil { - return 0, err + return 0, err //nolint:wrapcheck } if n == 0 { @@ -215,7 +223,7 @@ func (o *Output) readNextResponse() (response string, isOSC bool, err error) { } // both responses have less than 25 bytes, so if we read more, that's an error - if len(response) > 25 { + if len(response) > 25 { //nolint:mnd break } } @@ -227,7 +235,7 @@ func (o Output) termStatusReport(sequence int) (string, error) { // screen/tmux can't support OSC, because they can be connected to multiple // terminals concurrently. term := o.environ.Getenv("TERM") - if strings.HasPrefix(term, "screen") || strings.HasPrefix(term, "tmux") { + if strings.HasPrefix(term, "screen") || strings.HasPrefix(term, "tmux") || strings.HasPrefix(term, "dumb") { return "", ErrStatusReport } @@ -237,7 +245,7 @@ func (o Output) termStatusReport(sequence int) (string, error) { } if !o.unsafe { - fd := int(tty.Fd()) + fd := int(tty.Fd()) //nolint:gosec // if in background, we can't control the terminal if !isForeground(fd) { return "", ErrStatusReport @@ -258,10 +266,10 @@ func (o Output) termStatusReport(sequence int) (string, error) { } // first, send OSC query, which is ignored by terminal which do not support it - fmt.Fprintf(tty, OSC+"%d;?"+ST, sequence) + fmt.Fprintf(tty, OSC+"%d;?"+ST, sequence) //nolint:errcheck // then, query cursor position, should be supported by all terminals - fmt.Fprintf(tty, CSI+"6n") + fmt.Fprintf(tty, CSI+"6n") //nolint:errcheck // read the next response res, isOSC, err := o.readNextResponse() diff --git a/vendor/github.com/muesli/termenv/termenv_windows.go b/vendor/github.com/muesli/termenv/termenv_windows.go index 1d9c61871..f9b1def05 100644 --- a/vendor/github.com/muesli/termenv/termenv_windows.go +++ b/vendor/github.com/muesli/termenv/termenv_windows.go @@ -5,6 +5,7 @@ package termenv import ( "fmt" + "os" "strconv" "golang.org/x/sys/windows" @@ -103,8 +104,8 @@ func EnableVirtualTerminalProcessing(o *Output) (restoreFunc func() error, err e } // If o is not a tty, then there is nothing to do. - tty := o.TTY() - if tty == nil { + tty, ok := o.Writer().(*os.File) + if tty == nil || !ok { return } diff --git a/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml b/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml index 1d8b69e65..ec52857a3 100644 --- a/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml +++ b/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml @@ -1,3 +1,4 @@ +version: 2 before: hooks: - go mod tidy diff --git a/vendor/github.com/pelletier/go-toml/v2/README.md b/vendor/github.com/pelletier/go-toml/v2/README.md index d964b25fe..0755e5564 100644 --- a/vendor/github.com/pelletier/go-toml/v2/README.md +++ b/vendor/github.com/pelletier/go-toml/v2/README.md @@ -565,7 +565,7 @@ complete solutions exist out there. ## Versioning -Expect for parts explicitely marked otherwise, go-toml follows [Semantic +Expect for parts explicitly marked otherwise, go-toml follows [Semantic Versioning](https://semver.org). The supported version of [TOML](https://github.com/toml-lang/toml) is indicated at the beginning of this document. The last two major versions of Go are supported (see [Go Release diff --git a/vendor/github.com/pelletier/go-toml/v2/marshaler.go b/vendor/github.com/pelletier/go-toml/v2/marshaler.go index 7f4e20c12..161acd934 100644 --- a/vendor/github.com/pelletier/go-toml/v2/marshaler.go +++ b/vendor/github.com/pelletier/go-toml/v2/marshaler.go @@ -8,7 +8,7 @@ import ( "io" "math" "reflect" - "sort" + "slices" "strconv" "strings" "time" @@ -280,7 +280,7 @@ func (enc *Encoder) encode(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, e } hasTextMarshaler := v.Type().Implements(textMarshalerType) - if hasTextMarshaler || (v.CanAddr() && reflect.PtrTo(v.Type()).Implements(textMarshalerType)) { + if hasTextMarshaler || (v.CanAddr() && reflect.PointerTo(v.Type()).Implements(textMarshalerType)) { if !hasTextMarshaler { v = v.Addr() } @@ -631,6 +631,18 @@ func (enc *Encoder) keyToString(k reflect.Value) (string, error) { return "", fmt.Errorf("toml: error marshalling key %v from text: %w", k, err) } return string(keyB), nil + + case keyType.Kind() == reflect.Int || keyType.Kind() == reflect.Int8 || keyType.Kind() == reflect.Int16 || keyType.Kind() == reflect.Int32 || keyType.Kind() == reflect.Int64: + return strconv.FormatInt(k.Int(), 10), nil + + case keyType.Kind() == reflect.Uint || keyType.Kind() == reflect.Uint8 || keyType.Kind() == reflect.Uint16 || keyType.Kind() == reflect.Uint32 || keyType.Kind() == reflect.Uint64: + return strconv.FormatUint(k.Uint(), 10), nil + + case keyType.Kind() == reflect.Float32: + return strconv.FormatFloat(k.Float(), 'f', -1, 32), nil + + case keyType.Kind() == reflect.Float64: + return strconv.FormatFloat(k.Float(), 'f', -1, 64), nil } return "", fmt.Errorf("toml: type %s is not supported as a map key", keyType.Kind()) } @@ -668,8 +680,8 @@ func (enc *Encoder) encodeMap(b []byte, ctx encoderCtx, v reflect.Value) ([]byte } func sortEntriesByKey(e []entry) { - sort.Slice(e, func(i, j int) bool { - return e[i].Key < e[j].Key + slices.SortFunc(e, func(a, b entry) int { + return strings.Compare(a.Key, b.Key) }) } @@ -732,7 +744,7 @@ func walkStruct(ctx encoderCtx, t *table, v reflect.Value) { if fieldType.Anonymous { if fieldType.Type.Kind() == reflect.Struct { walkStruct(ctx, t, f) - } else if fieldType.Type.Kind() == reflect.Pointer && !f.IsNil() && f.Elem().Kind() == reflect.Struct { + } else if fieldType.Type.Kind() == reflect.Ptr && !f.IsNil() && f.Elem().Kind() == reflect.Struct { walkStruct(ctx, t, f.Elem()) } continue @@ -951,7 +963,7 @@ func willConvertToTable(ctx encoderCtx, v reflect.Value) bool { if !v.IsValid() { return false } - if v.Type() == timeType || v.Type().Implements(textMarshalerType) || (v.Kind() != reflect.Ptr && v.CanAddr() && reflect.PtrTo(v.Type()).Implements(textMarshalerType)) { + if v.Type() == timeType || v.Type().Implements(textMarshalerType) || (v.Kind() != reflect.Ptr && v.CanAddr() && reflect.PointerTo(v.Type()).Implements(textMarshalerType)) { return false } diff --git a/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go b/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go index 98231bae6..c3df8bee1 100644 --- a/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go +++ b/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go @@ -5,9 +5,9 @@ import ( "errors" "fmt" "io" - "io/ioutil" "math" "reflect" + "strconv" "strings" "sync/atomic" "time" @@ -21,10 +21,8 @@ import ( // // It is a shortcut for Decoder.Decode() with the default options. func Unmarshal(data []byte, v interface{}) error { - p := unstable.Parser{} - p.Reset(data) - d := decoder{p: &p} - + d := decoder{} + d.p.Reset(data) return d.FromParser(v) } @@ -117,27 +115,25 @@ func (d *Decoder) EnableUnmarshalerInterface() *Decoder { // Inline Table -> same as Table // Array of Tables -> same as Array and Table func (d *Decoder) Decode(v interface{}) error { - b, err := ioutil.ReadAll(d.r) + b, err := io.ReadAll(d.r) if err != nil { return fmt.Errorf("toml: %w", err) } - p := unstable.Parser{} - p.Reset(b) dec := decoder{ - p: &p, strict: strict{ Enabled: d.strict, }, unmarshalerInterface: d.unmarshalerInterface, } + dec.p.Reset(b) return dec.FromParser(v) } type decoder struct { // Which parser instance in use for this decoding session. - p *unstable.Parser + p unstable.Parser // Flag indicating that the current expression is stashed. // If set to true, calling nextExpr will not actually pull a new expression @@ -1078,12 +1074,39 @@ func (d *decoder) keyFromData(keyType reflect.Type, data []byte) (reflect.Value, } return mk, nil - case reflect.PtrTo(keyType).Implements(textUnmarshalerType): + case reflect.PointerTo(keyType).Implements(textUnmarshalerType): mk := reflect.New(keyType) if err := mk.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { return reflect.Value{}, fmt.Errorf("toml: error unmarshalling key type %s from text: %w", stringType, err) } return mk.Elem(), nil + + case keyType.Kind() == reflect.Int || keyType.Kind() == reflect.Int8 || keyType.Kind() == reflect.Int16 || keyType.Kind() == reflect.Int32 || keyType.Kind() == reflect.Int64: + key, err := strconv.ParseInt(string(data), 10, 64) + if err != nil { + return reflect.Value{}, fmt.Errorf("toml: error parsing key of type %s from integer: %w", stringType, err) + } + return reflect.ValueOf(key).Convert(keyType), nil + case keyType.Kind() == reflect.Uint || keyType.Kind() == reflect.Uint8 || keyType.Kind() == reflect.Uint16 || keyType.Kind() == reflect.Uint32 || keyType.Kind() == reflect.Uint64: + key, err := strconv.ParseUint(string(data), 10, 64) + if err != nil { + return reflect.Value{}, fmt.Errorf("toml: error parsing key of type %s from unsigned integer: %w", stringType, err) + } + return reflect.ValueOf(key).Convert(keyType), nil + + case keyType.Kind() == reflect.Float32: + key, err := strconv.ParseFloat(string(data), 32) + if err != nil { + return reflect.Value{}, fmt.Errorf("toml: error parsing key of type %s from float: %w", stringType, err) + } + return reflect.ValueOf(float32(key)), nil + + case keyType.Kind() == reflect.Float64: + key, err := strconv.ParseFloat(string(data), 64) + if err != nil { + return reflect.Value{}, fmt.Errorf("toml: error parsing key of type %s from float: %w", stringType, err) + } + return reflect.ValueOf(float64(key)), nil } return reflect.Value{}, fmt.Errorf("toml: cannot convert map key of type %s to expected type %s", stringType, keyType) } diff --git a/vendor/github.com/spf13/cobra/.golangci.yml b/vendor/github.com/spf13/cobra/.golangci.yml index a618ec24d..2c8f4808c 100644 --- a/vendor/github.com/spf13/cobra/.golangci.yml +++ b/vendor/github.com/spf13/cobra/.golangci.yml @@ -26,33 +26,28 @@ linters: - errcheck #- exhaustive #- funlen - - gas #- gochecknoinits - goconst - #- gocritic + - gocritic #- gocyclo - #- gofmt + - gofmt - goimports - - golint #- gomnd #- goprintffuncname - #- gosec - #- gosimple + - gosec + - gosimple - govet - ineffassign - - interfacer #- lll - - maligned - - megacheck - #- misspell + - misspell #- nakedret #- noctx - #- nolintlint + - nolintlint #- rowserrcheck #- scopelint - #- staticcheck + - staticcheck #- structcheck ! deprecated since v1.49.0; replaced by 'unused' - #- stylecheck + - stylecheck #- typecheck - unconvert #- unparam diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md index 6444f4b7f..71757151c 100644 --- a/vendor/github.com/spf13/cobra/README.md +++ b/vendor/github.com/spf13/cobra/README.md @@ -1,4 +1,5 @@ -![cobra logo](assets/CobraMain.png) + +![cobra logo](https://github.com/user-attachments/assets/cbc3adf8-0dff-46e9-a88d-5e2d971c169e) Cobra is a library for creating powerful modern CLI applications. @@ -105,7 +106,7 @@ go install github.com/spf13/cobra-cli@latest For complete details on using the Cobra-CLI generator, please read [The Cobra Generator README](https://github.com/spf13/cobra-cli/blob/main/README.md) -For complete details on using the Cobra library, please read the [The Cobra User Guide](site/content/user_guide.md). +For complete details on using the Cobra library, please read [The Cobra User Guide](site/content/user_guide.md). # License diff --git a/vendor/github.com/spf13/cobra/active_help.go b/vendor/github.com/spf13/cobra/active_help.go index 5f965e057..b3e2dadfe 100644 --- a/vendor/github.com/spf13/cobra/active_help.go +++ b/vendor/github.com/spf13/cobra/active_help.go @@ -17,21 +17,17 @@ package cobra import ( "fmt" "os" - "regexp" - "strings" ) const ( activeHelpMarker = "_activeHelp_ " // The below values should not be changed: programs will be using them explicitly // in their user documentation, and users will be using them explicitly. - activeHelpEnvVarSuffix = "_ACTIVE_HELP" - activeHelpGlobalEnvVar = "COBRA_ACTIVE_HELP" + activeHelpEnvVarSuffix = "ACTIVE_HELP" + activeHelpGlobalEnvVar = configEnvVarGlobalPrefix + "_" + activeHelpEnvVarSuffix activeHelpGlobalDisable = "0" ) -var activeHelpEnvVarPrefixSubstRegexp = regexp.MustCompile(`[^A-Z0-9_]`) - // AppendActiveHelp adds the specified string to the specified array to be used as ActiveHelp. // Such strings will be processed by the completion script and will be shown as ActiveHelp // to the user. @@ -39,7 +35,7 @@ var activeHelpEnvVarPrefixSubstRegexp = regexp.MustCompile(`[^A-Z0-9_]`) // This function can be called multiple times before and/or after completions are added to // the array. Each time this function is called with the same array, the new // ActiveHelp line will be shown below the previous ones when completion is triggered. -func AppendActiveHelp(compArray []string, activeHelpStr string) []string { +func AppendActiveHelp(compArray []Completion, activeHelpStr string) []Completion { return append(compArray, fmt.Sprintf("%s%s", activeHelpMarker, activeHelpStr)) } @@ -60,8 +56,5 @@ func GetActiveHelpConfig(cmd *Command) string { // variable. It has the format _ACTIVE_HELP where is the name of the // root command in upper case, with all non-ASCII-alphanumeric characters replaced by `_`. func activeHelpEnvVar(name string) string { - // This format should not be changed: users will be using it explicitly. - activeHelpEnvVar := strings.ToUpper(fmt.Sprintf("%s%s", name, activeHelpEnvVarSuffix)) - activeHelpEnvVar = activeHelpEnvVarPrefixSubstRegexp.ReplaceAllString(activeHelpEnvVar, "_") - return activeHelpEnvVar + return configEnvVar(name, activeHelpEnvVarSuffix) } diff --git a/vendor/github.com/spf13/cobra/args.go b/vendor/github.com/spf13/cobra/args.go index e79ec33a8..ed1e70cea 100644 --- a/vendor/github.com/spf13/cobra/args.go +++ b/vendor/github.com/spf13/cobra/args.go @@ -52,9 +52,9 @@ func OnlyValidArgs(cmd *Command, args []string) error { if len(cmd.ValidArgs) > 0 { // Remove any description that may be included in ValidArgs. // A description is following a tab character. - var validArgs []string + validArgs := make([]string, 0, len(cmd.ValidArgs)) for _, v := range cmd.ValidArgs { - validArgs = append(validArgs, strings.Split(v, "\t")[0]) + validArgs = append(validArgs, strings.SplitN(v, "\t", 2)[0]) } for _, v := range args { if !stringInSlice(v, validArgs) { diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go index 8a5315184..f4d198cbc 100644 --- a/vendor/github.com/spf13/cobra/bash_completions.go +++ b/vendor/github.com/spf13/cobra/bash_completions.go @@ -597,19 +597,16 @@ func writeRequiredFlag(buf io.StringWriter, cmd *Command) { if nonCompletableFlag(flag) { return } - for key := range flag.Annotations { - switch key { - case BashCompOneRequiredFlag: - format := " must_have_one_flag+=(\"--%s" - if flag.Value.Type() != "bool" { - format += "=" - } - format += cbn - WriteStringAndCheck(buf, fmt.Sprintf(format, flag.Name)) - - if len(flag.Shorthand) > 0 { - WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_flag+=(\"-%s"+cbn, flag.Shorthand)) - } + if _, ok := flag.Annotations[BashCompOneRequiredFlag]; ok { + format := " must_have_one_flag+=(\"--%s" + if flag.Value.Type() != "bool" { + format += "=" + } + format += cbn + WriteStringAndCheck(buf, fmt.Sprintf(format, flag.Name)) + + if len(flag.Shorthand) > 0 { + WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_flag+=(\"-%s"+cbn, flag.Shorthand)) } } }) @@ -621,7 +618,7 @@ func writeRequiredNouns(buf io.StringWriter, cmd *Command) { for _, value := range cmd.ValidArgs { // Remove any description that may be included following a tab character. // Descriptions are not supported by bash completion. - value = strings.Split(value, "\t")[0] + value = strings.SplitN(value, "\t", 2)[0] WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_noun+=(%q)\n", value)) } if cmd.ValidArgsFunction != nil { diff --git a/vendor/github.com/spf13/cobra/bash_completionsV2.go b/vendor/github.com/spf13/cobra/bash_completionsV2.go index 1cce5c329..d2397aa36 100644 --- a/vendor/github.com/spf13/cobra/bash_completionsV2.go +++ b/vendor/github.com/spf13/cobra/bash_completionsV2.go @@ -146,7 +146,7 @@ __%[1]s_process_completion_results() { if (((directive & shellCompDirectiveFilterFileExt) != 0)); then # File extension filtering - local fullFilter filter filteringCmd + local fullFilter="" filter filteringCmd # Do not use quotes around the $completions variable or else newline # characters will be kept. @@ -177,20 +177,71 @@ __%[1]s_process_completion_results() { __%[1]s_handle_special_char "$cur" = # Print the activeHelp statements before we finish + __%[1]s_handle_activeHelp +} + +__%[1]s_handle_activeHelp() { + # Print the activeHelp statements if ((${#activeHelp[*]} != 0)); then - printf "\n"; - printf "%%s\n" "${activeHelp[@]}" - printf "\n" - - # The prompt format is only available from bash 4.4. - # We test if it is available before using it. - if (x=${PS1@P}) 2> /dev/null; then - printf "%%s" "${PS1@P}${COMP_LINE[@]}" - else - # Can't print the prompt. Just print the - # text the user had typed, it is workable enough. - printf "%%s" "${COMP_LINE[@]}" + if [ -z $COMP_TYPE ]; then + # Bash v3 does not set the COMP_TYPE variable. + printf "\n"; + printf "%%s\n" "${activeHelp[@]}" + printf "\n" + __%[1]s_reprint_commandLine + return fi + + # Only print ActiveHelp on the second TAB press + if [ $COMP_TYPE -eq 63 ]; then + printf "\n" + printf "%%s\n" "${activeHelp[@]}" + + if ((${#COMPREPLY[*]} == 0)); then + # When there are no completion choices from the program, file completion + # may kick in if the program has not disabled it; in such a case, we want + # to know if any files will match what the user typed, so that we know if + # there will be completions presented, so that we know how to handle ActiveHelp. + # To find out, we actually trigger the file completion ourselves; + # the call to _filedir will fill COMPREPLY if files match. + if (((directive & shellCompDirectiveNoFileComp) == 0)); then + __%[1]s_debug "Listing files" + _filedir + fi + fi + + if ((${#COMPREPLY[*]} != 0)); then + # If there are completion choices to be shown, print a delimiter. + # Re-printing the command-line will automatically be done + # by the shell when it prints the completion choices. + printf -- "--" + else + # When there are no completion choices at all, we need + # to re-print the command-line since the shell will + # not be doing it itself. + __%[1]s_reprint_commandLine + fi + elif [ $COMP_TYPE -eq 37 ] || [ $COMP_TYPE -eq 42 ]; then + # For completion type: menu-complete/menu-complete-backward and insert-completions + # the completions are immediately inserted into the command-line, so we first + # print the activeHelp message and reprint the command-line since the shell won't. + printf "\n" + printf "%%s\n" "${activeHelp[@]}" + + __%[1]s_reprint_commandLine + fi + fi +} + +__%[1]s_reprint_commandLine() { + # The prompt format is only available from bash 4.4. + # We test if it is available before using it. + if (x=${PS1@P}) 2> /dev/null; then + printf "%%s" "${PS1@P}${COMP_LINE[@]}" + else + # Can't print the prompt. Just print the + # text the user had typed, it is workable enough. + printf "%%s" "${COMP_LINE[@]}" fi } @@ -201,6 +252,8 @@ __%[1]s_extract_activeHelp() { local endIndex=${#activeHelpMarker} while IFS='' read -r comp; do + [[ -z $comp ]] && continue + if [[ ${comp:0:endIndex} == $activeHelpMarker ]]; then comp=${comp:endIndex} __%[1]s_debug "ActiveHelp found: $comp" @@ -223,16 +276,21 @@ __%[1]s_handle_completion_types() { # If the user requested inserting one completion at a time, or all # completions at once on the command-line we must remove the descriptions. # https://github.com/spf13/cobra/issues/1508 - local tab=$'\t' comp - while IFS='' read -r comp; do - [[ -z $comp ]] && continue - # Strip any description - comp=${comp%%%%$tab*} - # Only consider the completions that match - if [[ $comp == "$cur"* ]]; then - COMPREPLY+=("$comp") - fi - done < <(printf "%%s\n" "${completions[@]}") + + # If there are no completions, we don't need to do anything + (( ${#completions[@]} == 0 )) && return 0 + + local tab=$'\t' + + # Strip any description and escape the completion to handled special characters + IFS=$'\n' read -ra completions -d '' < <(printf "%%q\n" "${completions[@]%%%%$tab*}") + + # Only consider the completions that match + IFS=$'\n' read -ra COMPREPLY -d '' < <(IFS=$'\n'; compgen -W "${completions[*]}" -- "${cur}") + + # compgen looses the escaping so we need to escape all completions again since they will + # all be inserted on the command-line. + IFS=$'\n' read -ra COMPREPLY -d '' < <(printf "%%q\n" "${COMPREPLY[@]}") ;; *) @@ -243,11 +301,25 @@ __%[1]s_handle_completion_types() { } __%[1]s_handle_standard_completion_case() { - local tab=$'\t' comp + local tab=$'\t' + + # If there are no completions, we don't need to do anything + (( ${#completions[@]} == 0 )) && return 0 # Short circuit to optimize if we don't have descriptions if [[ "${completions[*]}" != *$tab* ]]; then - IFS=$'\n' read -ra COMPREPLY -d '' < <(compgen -W "${completions[*]}" -- "$cur") + # First, escape the completions to handle special characters + IFS=$'\n' read -ra completions -d '' < <(printf "%%q\n" "${completions[@]}") + # Only consider the completions that match what the user typed + IFS=$'\n' read -ra COMPREPLY -d '' < <(IFS=$'\n'; compgen -W "${completions[*]}" -- "${cur}") + + # compgen looses the escaping so, if there is only a single completion, we need to + # escape it again because it will be inserted on the command-line. If there are multiple + # completions, we don't want to escape them because they will be printed in a list + # and we don't want to show escape characters in that list. + if (( ${#COMPREPLY[@]} == 1 )); then + COMPREPLY[0]=$(printf "%%q" "${COMPREPLY[0]}") + fi return 0 fi @@ -256,23 +328,39 @@ __%[1]s_handle_standard_completion_case() { # Look for the longest completion so that we can format things nicely while IFS='' read -r compline; do [[ -z $compline ]] && continue - # Strip any description before checking the length - comp=${compline%%%%$tab*} + + # Before checking if the completion matches what the user typed, + # we need to strip any description and escape the completion to handle special + # characters because those escape characters are part of what the user typed. + # Don't call "printf" in a sub-shell because it will be much slower + # since we are in a loop. + printf -v comp "%%q" "${compline%%%%$tab*}" &>/dev/null || comp=$(printf "%%q" "${compline%%%%$tab*}") + # Only consider the completions that match [[ $comp == "$cur"* ]] || continue + + # The completions matches. Add it to the list of full completions including + # its description. We don't escape the completion because it may get printed + # in a list if there are more than one and we don't want show escape characters + # in that list. COMPREPLY+=("$compline") + + # Strip any description before checking the length, and again, don't escape + # the completion because this length is only used when printing the completions + # in a list and we don't want show escape characters in that list. + comp=${compline%%%%$tab*} if ((${#comp}>longest)); then longest=${#comp} fi done < <(printf "%%s\n" "${completions[@]}") - # If there is a single completion left, remove the description text + # If there is a single completion left, remove the description text and escape any special characters if ((${#COMPREPLY[*]} == 1)); then __%[1]s_debug "COMPREPLY[0]: ${COMPREPLY[0]}" - comp="${COMPREPLY[0]%%%%$tab*}" - __%[1]s_debug "Removed description from single completion, which is now: ${comp}" - COMPREPLY[0]=$comp - else # Format the descriptions + COMPREPLY[0]=$(printf "%%q" "${COMPREPLY[0]%%%%$tab*}") + __%[1]s_debug "Removed description from single completion, which is now: ${COMPREPLY[0]}" + else + # Format the descriptions __%[1]s_format_comp_descriptions $longest fi } diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go index a6b160ce5..d9cd2414e 100644 --- a/vendor/github.com/spf13/cobra/cobra.go +++ b/vendor/github.com/spf13/cobra/cobra.go @@ -176,12 +176,16 @@ func rpad(s string, padding int) string { return fmt.Sprintf(formattedString, s) } -// tmpl executes the given template text on data, writing the result to w. -func tmpl(w io.Writer, text string, data interface{}) error { - t := template.New("top") - t.Funcs(templateFuncs) - template.Must(t.Parse(text)) - return t.Execute(w, data) +func tmpl(text string) *tmplFunc { + return &tmplFunc{ + tmpl: text, + fn: func(w io.Writer, data interface{}) error { + t := template.New("top") + t.Funcs(templateFuncs) + template.Must(t.Parse(text)) + return t.Execute(w, data) + }, + } } // ld compares two strings and returns the levenshtein distance between them. @@ -193,8 +197,6 @@ func ld(s, t string, ignoreCase bool) int { d := make([][]int, len(s)+1) for i := range d { d[i] = make([]int, len(t)+1) - } - for i := range d { d[i][0] = i } for j := range d[0] { diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go index 2fbe6c131..dbb2c298b 100644 --- a/vendor/github.com/spf13/cobra/command.go +++ b/vendor/github.com/spf13/cobra/command.go @@ -33,6 +33,9 @@ import ( const ( FlagSetByCobraAnnotation = "cobra_annotation_flag_set_by_cobra" CommandDisplayNameAnnotation = "cobra_annotation_command_display_name" + + helpFlagName = "help" + helpCommandName = "help" ) // FParseErrWhitelist configures Flag parse errors to be ignored @@ -80,11 +83,11 @@ type Command struct { Example string // ValidArgs is list of all valid non-flag arguments that are accepted in shell completions - ValidArgs []string + ValidArgs []Completion // ValidArgsFunction is an optional function that provides valid non-flag arguments for shell completion. // It is a dynamic version of using ValidArgs. // Only one of ValidArgs and ValidArgsFunction can be used for a command. - ValidArgsFunction func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) + ValidArgsFunction CompletionFunc // Expected arguments Args PositionalArgs @@ -154,8 +157,10 @@ type Command struct { // pflags contains persistent flags. pflags *flag.FlagSet // lflags contains local flags. + // This field does not represent internal state, it's used as a cache to optimise LocalFlags function call lflags *flag.FlagSet // iflags contains inherited flags. + // This field does not represent internal state, it's used as a cache to optimise InheritedFlags function call iflags *flag.FlagSet // parentsPflags is all persistent flags of cmd's parents. parentsPflags *flag.FlagSet @@ -166,12 +171,12 @@ type Command struct { // usageFunc is usage func defined by user. usageFunc func(*Command) error // usageTemplate is usage template defined by user. - usageTemplate string + usageTemplate *tmplFunc // flagErrorFunc is func defined by user and it's called when the parsing of // flags returns an error. flagErrorFunc func(*Command, error) error // helpTemplate is help template defined by user. - helpTemplate string + helpTemplate *tmplFunc // helpFunc is help func defined by user. helpFunc func(*Command, []string) // helpCommand is command with usage 'help'. If it's not defined by user, @@ -184,7 +189,7 @@ type Command struct { completionCommandGroupID string // versionTemplate is the version template defined by user. - versionTemplate string + versionTemplate *tmplFunc // errPrefix is the error message prefix defined by user. errPrefix string @@ -279,6 +284,7 @@ func (c *Command) SetArgs(a []string) { // SetOutput sets the destination for usage and error messages. // If output is nil, os.Stderr is used. +// // Deprecated: Use SetOut and/or SetErr instead func (c *Command) SetOutput(output io.Writer) { c.outWriter = output @@ -310,7 +316,11 @@ func (c *Command) SetUsageFunc(f func(*Command) error) { // SetUsageTemplate sets usage template. Can be defined by Application. func (c *Command) SetUsageTemplate(s string) { - c.usageTemplate = s + if s == "" { + c.usageTemplate = nil + return + } + c.usageTemplate = tmpl(s) } // SetFlagErrorFunc sets a function to generate an error when flag parsing @@ -346,12 +356,20 @@ func (c *Command) SetCompletionCommandGroupID(groupID string) { // SetHelpTemplate sets help template to be used. Application can use it to set custom template. func (c *Command) SetHelpTemplate(s string) { - c.helpTemplate = s + if s == "" { + c.helpTemplate = nil + return + } + c.helpTemplate = tmpl(s) } // SetVersionTemplate sets version template to be used. Application can use it to set custom template. func (c *Command) SetVersionTemplate(s string) { - c.versionTemplate = s + if s == "" { + c.versionTemplate = nil + return + } + c.versionTemplate = tmpl(s) } // SetErrPrefix sets error message prefix to be used. Application can use it to set custom prefix. @@ -432,7 +450,8 @@ func (c *Command) UsageFunc() (f func(*Command) error) { } return func(c *Command) error { c.mergePersistentFlags() - err := tmpl(c.OutOrStderr(), c.UsageTemplate(), c) + fn := c.getUsageTemplateFunc() + err := fn(c.OutOrStderr(), c) if err != nil { c.PrintErrln(err) } @@ -440,6 +459,19 @@ func (c *Command) UsageFunc() (f func(*Command) error) { } } +// getUsageTemplateFunc returns the usage template function for the command +// going up the command tree if necessary. +func (c *Command) getUsageTemplateFunc() func(w io.Writer, data interface{}) error { + if c.usageTemplate != nil { + return c.usageTemplate.fn + } + + if c.HasParent() { + return c.parent.getUsageTemplateFunc() + } + return defaultUsageFunc +} + // Usage puts out the usage for the command. // Used when a user provides invalid input. // Can be defined by user by overriding UsageFunc. @@ -458,15 +490,30 @@ func (c *Command) HelpFunc() func(*Command, []string) { } return func(c *Command, a []string) { c.mergePersistentFlags() + fn := c.getHelpTemplateFunc() // The help should be sent to stdout // See https://github.com/spf13/cobra/issues/1002 - err := tmpl(c.OutOrStdout(), c.HelpTemplate(), c) + err := fn(c.OutOrStdout(), c) if err != nil { c.PrintErrln(err) } } } +// getHelpTemplateFunc returns the help template function for the command +// going up the command tree if necessary. +func (c *Command) getHelpTemplateFunc() func(w io.Writer, data interface{}) error { + if c.helpTemplate != nil { + return c.helpTemplate.fn + } + + if c.HasParent() { + return c.parent.getHelpTemplateFunc() + } + + return defaultHelpFunc +} + // Help puts out the help for the command. // Used when a user calls help [command]. // Can be defined by user by overriding HelpFunc. @@ -541,71 +588,55 @@ func (c *Command) NamePadding() int { } // UsageTemplate returns usage template for the command. +// This function is kept for backwards-compatibility reasons. func (c *Command) UsageTemplate() string { - if c.usageTemplate != "" { - return c.usageTemplate + if c.usageTemplate != nil { + return c.usageTemplate.tmpl } if c.HasParent() { return c.parent.UsageTemplate() } - return `Usage:{{if .Runnable}} - {{.UseLine}}{{end}}{{if .HasAvailableSubCommands}} - {{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}} - -Aliases: - {{.NameAndAliases}}{{end}}{{if .HasExample}} - -Examples: -{{.Example}}{{end}}{{if .HasAvailableSubCommands}}{{$cmds := .Commands}}{{if eq (len .Groups) 0}} - -Available Commands:{{range $cmds}}{{if (or .IsAvailableCommand (eq .Name "help"))}} - {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{else}}{{range $group := .Groups}} - -{{.Title}}{{range $cmds}}{{if (and (eq .GroupID $group.ID) (or .IsAvailableCommand (eq .Name "help")))}} - {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if not .AllChildCommandsHaveGroup}} - -Additional Commands:{{range $cmds}}{{if (and (eq .GroupID "") (or .IsAvailableCommand (eq .Name "help")))}} - {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}} - -Flags: -{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}} - -Global Flags: -{{.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasHelpSubCommands}} - -Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}} - {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableSubCommands}} - -Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}} -` + return defaultUsageTemplate } // HelpTemplate return help template for the command. +// This function is kept for backwards-compatibility reasons. func (c *Command) HelpTemplate() string { - if c.helpTemplate != "" { - return c.helpTemplate + if c.helpTemplate != nil { + return c.helpTemplate.tmpl } if c.HasParent() { return c.parent.HelpTemplate() } - return `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}} - -{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` + return defaultHelpTemplate } // VersionTemplate return version template for the command. +// This function is kept for backwards-compatibility reasons. func (c *Command) VersionTemplate() string { - if c.versionTemplate != "" { - return c.versionTemplate + if c.versionTemplate != nil { + return c.versionTemplate.tmpl } if c.HasParent() { return c.parent.VersionTemplate() } - return `{{with .Name}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}} -` + return defaultVersionTemplate +} + +// getVersionTemplateFunc returns the version template function for the command +// going up the command tree if necessary. +func (c *Command) getVersionTemplateFunc() func(w io.Writer, data interface{}) error { + if c.versionTemplate != nil { + return c.versionTemplate.fn + } + + if c.HasParent() { + return c.parent.getVersionTemplateFunc() + } + return defaultVersionFunc } // ErrPrefix return error message prefix for the command @@ -706,7 +737,7 @@ Loop: // This is not a flag or a flag value. Check to see if it matches what we're looking for, and if so, // return the args, excluding the one at this position. if s == x { - ret := []string{} + ret := make([]string, 0, len(args)-1) ret = append(ret, args[:pos]...) ret = append(ret, args[pos+1:]...) return ret @@ -754,14 +785,14 @@ func (c *Command) findSuggestions(arg string) string { if c.SuggestionsMinimumDistance <= 0 { c.SuggestionsMinimumDistance = 2 } - suggestionsString := "" + var sb strings.Builder if suggestions := c.SuggestionsFor(arg); len(suggestions) > 0 { - suggestionsString += "\n\nDid you mean this?\n" + sb.WriteString("\n\nDid you mean this?\n") for _, s := range suggestions { - suggestionsString += fmt.Sprintf("\t%v\n", s) + _, _ = fmt.Fprintf(&sb, "\t%v\n", s) } } - return suggestionsString + return sb.String() } func (c *Command) findNext(next string) *Command { @@ -873,7 +904,7 @@ func (c *Command) ArgsLenAtDash() int { func (c *Command) execute(a []string) (err error) { if c == nil { - return fmt.Errorf("Called Execute() on a nil Command") + return fmt.Errorf("called Execute() on a nil Command") } if len(c.Deprecated) > 0 { @@ -892,7 +923,7 @@ func (c *Command) execute(a []string) (err error) { // If help is called, regardless of other flags, return we want help. // Also say we need help if the command isn't runnable. - helpVal, err := c.Flags().GetBool("help") + helpVal, err := c.Flags().GetBool(helpFlagName) if err != nil { // should be impossible to get here as we always declare a help // flag in InitDefaultHelpFlag() @@ -912,7 +943,8 @@ func (c *Command) execute(a []string) (err error) { return err } if versionVal { - err := tmpl(c.OutOrStdout(), c.VersionTemplate(), c) + fn := c.getVersionTemplateFunc() + err := fn(c.OutOrStdout(), c) if err != nil { c.Println(err) } @@ -1066,12 +1098,6 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { // initialize help at the last point to allow for user overriding c.InitDefaultHelpCmd() - // initialize completion at the last point to allow for user overriding - c.InitDefaultCompletionCmd() - - // Now that all commands have been created, let's make sure all groups - // are properly created also - c.checkCommandGroups() args := c.args @@ -1080,9 +1106,16 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { args = os.Args[1:] } - // initialize the hidden command to be used for shell completion + // initialize the __complete command to be used for shell completion c.initCompleteCmd(args) + // initialize the default completion command + c.InitDefaultCompletionCmd(args...) + + // Now that all commands have been created, let's make sure all groups + // are properly created also + c.checkCommandGroups() + var flags []string if c.TraverseChildren { cmd, flags, err = c.Traverse(args) @@ -1185,15 +1218,16 @@ func (c *Command) checkCommandGroups() { // If c already has help flag, it will do nothing. func (c *Command) InitDefaultHelpFlag() { c.mergePersistentFlags() - if c.Flags().Lookup("help") == nil { + if c.Flags().Lookup(helpFlagName) == nil { usage := "help for " - if c.Name() == "" { + name := c.DisplayName() + if name == "" { usage += "this command" } else { - usage += c.Name() + usage += name } - c.Flags().BoolP("help", "h", false, usage) - _ = c.Flags().SetAnnotation("help", FlagSetByCobraAnnotation, []string{"true"}) + c.Flags().BoolP(helpFlagName, "h", false, usage) + _ = c.Flags().SetAnnotation(helpFlagName, FlagSetByCobraAnnotation, []string{"true"}) } } @@ -1212,7 +1246,7 @@ func (c *Command) InitDefaultVersionFlag() { if c.Name() == "" { usage += "this command" } else { - usage += c.Name() + usage += c.DisplayName() } if c.Flags().ShorthandLookup("v") == nil { c.Flags().BoolP("version", "v", false, usage) @@ -1236,9 +1270,9 @@ func (c *Command) InitDefaultHelpCmd() { Use: "help [command]", Short: "Help about any command", Long: `Help provides help for any command in the application. -Simply type ` + c.Name() + ` help [path to command] for full details.`, - ValidArgsFunction: func(c *Command, args []string, toComplete string) ([]string, ShellCompDirective) { - var completions []string +Simply type ` + c.DisplayName() + ` help [path to command] for full details.`, + ValidArgsFunction: func(c *Command, args []string, toComplete string) ([]Completion, ShellCompDirective) { + var completions []Completion cmd, _, e := c.Root().Find(args) if e != nil { return nil, ShellCompDirectiveNoFileComp @@ -1250,7 +1284,7 @@ Simply type ` + c.Name() + ` help [path to command] for full details.`, for _, subCmd := range cmd.Commands() { if subCmd.IsAvailableCommand() || subCmd == cmd.helpCommand { if strings.HasPrefix(subCmd.Name(), toComplete) { - completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short)) + completions = append(completions, CompletionWithDesc(subCmd.Name(), subCmd.Short)) } } } @@ -1427,6 +1461,12 @@ func (c *Command) CommandPath() string { if c.HasParent() { return c.Parent().CommandPath() + " " + c.Name() } + return c.DisplayName() +} + +// DisplayName returns the name to display in help text. Returns command Name() +// If CommandDisplayNameAnnoation is not set +func (c *Command) DisplayName() string { if displayName, ok := c.Annotations[CommandDisplayNameAnnotation]; ok { return displayName } @@ -1436,10 +1476,11 @@ func (c *Command) CommandPath() string { // UseLine puts out the full usage for a given command (including parents). func (c *Command) UseLine() string { var useline string + use := strings.Replace(c.Use, c.Name(), c.DisplayName(), 1) if c.HasParent() { - useline = c.parent.CommandPath() + " " + c.Use + useline = c.parent.CommandPath() + " " + use } else { - useline = c.Use + useline = use } if c.DisableFlagsInUseLine { return useline @@ -1452,7 +1493,6 @@ func (c *Command) UseLine() string { // DebugFlags used to determine which flags have been assigned to which commands // and which persist. -// nolint:goconst func (c *Command) DebugFlags() { c.Println("DebugFlags called on", c.Name()) var debugflags func(*Command) @@ -1642,7 +1682,7 @@ func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) f // to this command (local and persistent declared here and by all parents). func (c *Command) Flags() *flag.FlagSet { if c.flags == nil { - c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.flags = flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1653,10 +1693,11 @@ func (c *Command) Flags() *flag.FlagSet { } // LocalNonPersistentFlags are flags specific to this command which will NOT persist to subcommands. +// This function does not modify the flags of the current command, it's purpose is to return the current state. func (c *Command) LocalNonPersistentFlags() *flag.FlagSet { persistentFlags := c.PersistentFlags() - out := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + out := flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError) c.LocalFlags().VisitAll(func(f *flag.Flag) { if persistentFlags.Lookup(f.Name) == nil { out.AddFlag(f) @@ -1666,11 +1707,12 @@ func (c *Command) LocalNonPersistentFlags() *flag.FlagSet { } // LocalFlags returns the local FlagSet specifically set in the current command. +// This function does not modify the flags of the current command, it's purpose is to return the current state. func (c *Command) LocalFlags() *flag.FlagSet { c.mergePersistentFlags() if c.lflags == nil { - c.lflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.lflags = flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1693,11 +1735,12 @@ func (c *Command) LocalFlags() *flag.FlagSet { } // InheritedFlags returns all flags which were inherited from parent commands. +// This function does not modify the flags of the current command, it's purpose is to return the current state. func (c *Command) InheritedFlags() *flag.FlagSet { c.mergePersistentFlags() if c.iflags == nil { - c.iflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.iflags = flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1718,6 +1761,7 @@ func (c *Command) InheritedFlags() *flag.FlagSet { } // NonInheritedFlags returns all flags which were not inherited from parent commands. +// This function does not modify the flags of the current command, it's purpose is to return the current state. func (c *Command) NonInheritedFlags() *flag.FlagSet { return c.LocalFlags() } @@ -1725,7 +1769,7 @@ func (c *Command) NonInheritedFlags() *flag.FlagSet { // PersistentFlags returns the persistent FlagSet specifically set in the current command. func (c *Command) PersistentFlags() *flag.FlagSet { if c.pflags == nil { - c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.pflags = flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1738,9 +1782,9 @@ func (c *Command) PersistentFlags() *flag.FlagSet { func (c *Command) ResetFlags() { c.flagErrorBuf = new(bytes.Buffer) c.flagErrorBuf.Reset() - c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.flags = flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError) c.flags.SetOutput(c.flagErrorBuf) - c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.pflags = flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError) c.pflags.SetOutput(c.flagErrorBuf) c.lflags = nil @@ -1857,7 +1901,7 @@ func (c *Command) mergePersistentFlags() { // If c.parentsPflags == nil, it makes new. func (c *Command) updateParentsPflags() { if c.parentsPflags == nil { - c.parentsPflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.parentsPflags = flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError) c.parentsPflags.SetOutput(c.flagErrorBuf) c.parentsPflags.SortFlags = false } @@ -1883,3 +1927,141 @@ func commandNameMatches(s string, t string) bool { return s == t } + +// tmplFunc holds a template and a function that will execute said template. +type tmplFunc struct { + tmpl string + fn func(io.Writer, interface{}) error +} + +var defaultUsageTemplate = `Usage:{{if .Runnable}} + {{.UseLine}}{{end}}{{if .HasAvailableSubCommands}} + {{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}} + +Aliases: + {{.NameAndAliases}}{{end}}{{if .HasExample}} + +Examples: +{{.Example}}{{end}}{{if .HasAvailableSubCommands}}{{$cmds := .Commands}}{{if eq (len .Groups) 0}} + +Available Commands:{{range $cmds}}{{if (or .IsAvailableCommand (eq .Name "help"))}} + {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{else}}{{range $group := .Groups}} + +{{.Title}}{{range $cmds}}{{if (and (eq .GroupID $group.ID) (or .IsAvailableCommand (eq .Name "help")))}} + {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if not .AllChildCommandsHaveGroup}} + +Additional Commands:{{range $cmds}}{{if (and (eq .GroupID "") (or .IsAvailableCommand (eq .Name "help")))}} + {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}} + +Flags: +{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}} + +Global Flags: +{{.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasHelpSubCommands}} + +Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}} + {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableSubCommands}} + +Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}} +` + +// defaultUsageFunc is equivalent to executing defaultUsageTemplate. The two should be changed in sync. +func defaultUsageFunc(w io.Writer, in interface{}) error { + c := in.(*Command) + fmt.Fprint(w, "Usage:") + if c.Runnable() { + fmt.Fprintf(w, "\n %s", c.UseLine()) + } + if c.HasAvailableSubCommands() { + fmt.Fprintf(w, "\n %s [command]", c.CommandPath()) + } + if len(c.Aliases) > 0 { + fmt.Fprintf(w, "\n\nAliases:\n") + fmt.Fprintf(w, " %s", c.NameAndAliases()) + } + if c.HasExample() { + fmt.Fprintf(w, "\n\nExamples:\n") + fmt.Fprintf(w, "%s", c.Example) + } + if c.HasAvailableSubCommands() { + cmds := c.Commands() + if len(c.Groups()) == 0 { + fmt.Fprintf(w, "\n\nAvailable Commands:") + for _, subcmd := range cmds { + if subcmd.IsAvailableCommand() || subcmd.Name() == helpCommandName { + fmt.Fprintf(w, "\n %s %s", rpad(subcmd.Name(), subcmd.NamePadding()), subcmd.Short) + } + } + } else { + for _, group := range c.Groups() { + fmt.Fprintf(w, "\n\n%s", group.Title) + for _, subcmd := range cmds { + if subcmd.GroupID == group.ID && (subcmd.IsAvailableCommand() || subcmd.Name() == helpCommandName) { + fmt.Fprintf(w, "\n %s %s", rpad(subcmd.Name(), subcmd.NamePadding()), subcmd.Short) + } + } + } + if !c.AllChildCommandsHaveGroup() { + fmt.Fprintf(w, "\n\nAdditional Commands:") + for _, subcmd := range cmds { + if subcmd.GroupID == "" && (subcmd.IsAvailableCommand() || subcmd.Name() == helpCommandName) { + fmt.Fprintf(w, "\n %s %s", rpad(subcmd.Name(), subcmd.NamePadding()), subcmd.Short) + } + } + } + } + } + if c.HasAvailableLocalFlags() { + fmt.Fprintf(w, "\n\nFlags:\n") + fmt.Fprint(w, trimRightSpace(c.LocalFlags().FlagUsages())) + } + if c.HasAvailableInheritedFlags() { + fmt.Fprintf(w, "\n\nGlobal Flags:\n") + fmt.Fprint(w, trimRightSpace(c.InheritedFlags().FlagUsages())) + } + if c.HasHelpSubCommands() { + fmt.Fprintf(w, "\n\nAdditional help topcis:") + for _, subcmd := range c.Commands() { + if subcmd.IsAdditionalHelpTopicCommand() { + fmt.Fprintf(w, "\n %s %s", rpad(subcmd.CommandPath(), subcmd.CommandPathPadding()), subcmd.Short) + } + } + } + if c.HasAvailableSubCommands() { + fmt.Fprintf(w, "\n\nUse \"%s [command] --help\" for more information about a command.", c.CommandPath()) + } + fmt.Fprintln(w) + return nil +} + +var defaultHelpTemplate = `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}} + +{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` + +// defaultHelpFunc is equivalent to executing defaultHelpTemplate. The two should be changed in sync. +func defaultHelpFunc(w io.Writer, in interface{}) error { + c := in.(*Command) + usage := c.Long + if usage == "" { + usage = c.Short + } + usage = trimRightSpace(usage) + if usage != "" { + fmt.Fprintln(w, usage) + fmt.Fprintln(w) + } + if c.Runnable() || c.HasSubCommands() { + fmt.Fprint(w, c.UsageString()) + } + return nil +} + +var defaultVersionTemplate = `{{with .DisplayName}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}} +` + +// defaultVersionFunc is equivalent to executing defaultVersionTemplate. The two should be changed in sync. +func defaultVersionFunc(w io.Writer, in interface{}) error { + c := in.(*Command) + _, err := fmt.Fprintf(w, "%s version %s\n", c.DisplayName(), c.Version) + return err +} diff --git a/vendor/github.com/spf13/cobra/completions.go b/vendor/github.com/spf13/cobra/completions.go index b60f6b200..a1752f763 100644 --- a/vendor/github.com/spf13/cobra/completions.go +++ b/vendor/github.com/spf13/cobra/completions.go @@ -17,6 +17,8 @@ package cobra import ( "fmt" "os" + "regexp" + "strconv" "strings" "sync" @@ -33,7 +35,7 @@ const ( ) // Global map of flag completion functions. Make sure to use flagCompletionMutex before you try to read and write from it. -var flagCompletionFunctions = map[*pflag.Flag]func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective){} +var flagCompletionFunctions = map[*pflag.Flag]CompletionFunc{} // lock for reading and writing from flagCompletionFunctions var flagCompletionMutex = &sync.RWMutex{} @@ -115,22 +117,50 @@ type CompletionOptions struct { HiddenDefaultCmd bool } +// Completion is a string that can be used for completions +// +// two formats are supported: +// - the completion choice +// - the completion choice with a textual description (separated by a TAB). +// +// [CompletionWithDesc] can be used to create a completion string with a textual description. +// +// Note: Go type alias is used to provide a more descriptive name in the documentation, but any string can be used. +type Completion = string + +// CompletionFunc is a function that provides completion results. +type CompletionFunc = func(cmd *Command, args []string, toComplete string) ([]Completion, ShellCompDirective) + +// CompletionWithDesc returns a [Completion] with a description by using the TAB delimited format. +func CompletionWithDesc(choice string, description string) Completion { + return choice + "\t" + description +} + // NoFileCompletions can be used to disable file completion for commands that should // not trigger file completions. -func NoFileCompletions(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) { +// +// This method satisfies [CompletionFunc]. +// It can be used with [Command.RegisterFlagCompletionFunc] and for [Command.ValidArgsFunction]. +func NoFileCompletions(cmd *Command, args []string, toComplete string) ([]Completion, ShellCompDirective) { return nil, ShellCompDirectiveNoFileComp } // FixedCompletions can be used to create a completion function which always // returns the same results. -func FixedCompletions(choices []string, directive ShellCompDirective) func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) { - return func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) { +// +// This method returns a function that satisfies [CompletionFunc] +// It can be used with [Command.RegisterFlagCompletionFunc] and for [Command.ValidArgsFunction]. +func FixedCompletions(choices []Completion, directive ShellCompDirective) CompletionFunc { + return func(cmd *Command, args []string, toComplete string) ([]Completion, ShellCompDirective) { return choices, directive } } // RegisterFlagCompletionFunc should be called to register a function to provide completion for a flag. -func (c *Command) RegisterFlagCompletionFunc(flagName string, f func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)) error { +// +// You can use pre-defined completion functions such as [FixedCompletions] or [NoFileCompletions], +// or you can define your own. +func (c *Command) RegisterFlagCompletionFunc(flagName string, f CompletionFunc) error { flag := c.Flag(flagName) if flag == nil { return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' does not exist", flagName) @@ -146,7 +176,7 @@ func (c *Command) RegisterFlagCompletionFunc(flagName string, f func(cmd *Comman } // GetFlagCompletionFunc returns the completion function for the given flag of the command, if available. -func (c *Command) GetFlagCompletionFunc(flagName string) (func(*Command, []string, string) ([]string, ShellCompDirective), bool) { +func (c *Command) GetFlagCompletionFunc(flagName string) (CompletionFunc, bool) { flag := c.Flag(flagName) if flag == nil { return nil, false @@ -211,24 +241,29 @@ func (c *Command) initCompleteCmd(args []string) { // 2- Even without completions, we need to print the directive } - noDescriptions := (cmd.CalledAs() == ShellCompNoDescRequestCmd) + noDescriptions := cmd.CalledAs() == ShellCompNoDescRequestCmd + if !noDescriptions { + if doDescriptions, err := strconv.ParseBool(getEnvConfig(cmd, configEnvVarSuffixDescriptions)); err == nil { + noDescriptions = !doDescriptions + } + } + noActiveHelp := GetActiveHelpConfig(finalCmd) == activeHelpGlobalDisable + out := finalCmd.OutOrStdout() for _, comp := range completions { - if GetActiveHelpConfig(finalCmd) == activeHelpGlobalDisable { - // Remove all activeHelp entries in this case - if strings.HasPrefix(comp, activeHelpMarker) { - continue - } + if noActiveHelp && strings.HasPrefix(comp, activeHelpMarker) { + // Remove all activeHelp entries if it's disabled. + continue } if noDescriptions { // Remove any description that may be included following a tab character. - comp = strings.Split(comp, "\t")[0] + comp = strings.SplitN(comp, "\t", 2)[0] } // Make sure we only write the first line to the output. // This is needed if a description contains a linebreak. // Otherwise the shell scripts will interpret the other lines as new flags // and could therefore provide a wrong completion. - comp = strings.Split(comp, "\n")[0] + comp = strings.SplitN(comp, "\n", 2)[0] // Finally trim the completion. This is especially important to get rid // of a trailing tab when there are no description following it. @@ -237,14 +272,14 @@ func (c *Command) initCompleteCmd(args []string) { // although there is no description). comp = strings.TrimSpace(comp) - // Print each possible completion to stdout for the completion script to consume. - fmt.Fprintln(finalCmd.OutOrStdout(), comp) + // Print each possible completion to the output for the completion script to consume. + fmt.Fprintln(out, comp) } // As the last printout, print the completion directive for the completion script to parse. // The directive integer must be that last character following a single colon (:). // The completion script expects : - fmt.Fprintf(finalCmd.OutOrStdout(), ":%d\n", directive) + fmt.Fprintf(out, ":%d\n", directive) // Print some helpful info to stderr for the user to understand. // Output from stderr must be ignored by the completion script. @@ -263,7 +298,15 @@ func (c *Command) initCompleteCmd(args []string) { } } -func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDirective, error) { +// SliceValue is a reduced version of [pflag.SliceValue]. It is used to detect +// flags that accept multiple values and therefore can provide completion +// multiple times. +type SliceValue interface { + // GetSlice returns the flag value list as an array of strings. + GetSlice() []string +} + +func (c *Command) getCompletions(args []string) (*Command, []Completion, ShellCompDirective, error) { // The last argument, which is not completely typed by the user, // should not be part of the list of arguments toComplete := args[len(args)-1] @@ -291,7 +334,7 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi } if err != nil { // Unable to find the real command. E.g., someInvalidCmd - return c, []string{}, ShellCompDirectiveDefault, fmt.Errorf("Unable to find a command for arguments: %v", trimmedArgs) + return c, []Completion{}, ShellCompDirectiveDefault, fmt.Errorf("unable to find a command for arguments: %v", trimmedArgs) } finalCmd.ctx = c.ctx @@ -321,7 +364,7 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi // Parse the flags early so we can check if required flags are set if err = finalCmd.ParseFlags(finalArgs); err != nil { - return finalCmd, []string{}, ShellCompDirectiveDefault, fmt.Errorf("Error while parsing flags from args %v: %s", finalArgs, err.Error()) + return finalCmd, []Completion{}, ShellCompDirectiveDefault, fmt.Errorf("Error while parsing flags from args %v: %s", finalArgs, err.Error()) } realArgCount := finalCmd.Flags().NArg() @@ -333,14 +376,14 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi if flagErr != nil { // If error type is flagCompError and we don't want flagCompletion we should ignore the error if _, ok := flagErr.(*flagCompError); !(ok && !flagCompletion) { - return finalCmd, []string{}, ShellCompDirectiveDefault, flagErr + return finalCmd, []Completion{}, ShellCompDirectiveDefault, flagErr } } // Look for the --help or --version flags. If they are present, // there should be no further completions. if helpOrVersionFlagPresent(finalCmd) { - return finalCmd, []string{}, ShellCompDirectiveNoFileComp, nil + return finalCmd, []Completion{}, ShellCompDirectiveNoFileComp, nil } // We only remove the flags from the arguments if DisableFlagParsing is not set. @@ -369,11 +412,11 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi return finalCmd, subDir, ShellCompDirectiveFilterDirs, nil } // Directory completion - return finalCmd, []string{}, ShellCompDirectiveFilterDirs, nil + return finalCmd, []Completion{}, ShellCompDirectiveFilterDirs, nil } } - var completions []string + var completions []Completion var directive ShellCompDirective // Enforce flag groups before doing flag completions @@ -392,10 +435,14 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi // If we have not found any required flags, only then can we show regular flags if len(completions) == 0 { doCompleteFlags := func(flag *pflag.Flag) { - if !flag.Changed || + _, acceptsMultiple := flag.Value.(SliceValue) + acceptsMultiple = acceptsMultiple || strings.Contains(flag.Value.Type(), "Slice") || - strings.Contains(flag.Value.Type(), "Array") { - // If the flag is not already present, or if it can be specified multiple times (Array or Slice) + strings.Contains(flag.Value.Type(), "Array") || + strings.HasPrefix(flag.Value.Type(), "stringTo") + + if !flag.Changed || acceptsMultiple { + // If the flag is not already present, or if it can be specified multiple times (Array, Slice, or stringTo) // we suggest it as a completion completions = append(completions, getFlagNameCompletions(flag, toComplete)...) } @@ -455,7 +502,7 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi for _, subCmd := range finalCmd.Commands() { if subCmd.IsAvailableCommand() || subCmd == finalCmd.helpCommand { if strings.HasPrefix(subCmd.Name(), toComplete) { - completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short)) + completions = append(completions, CompletionWithDesc(subCmd.Name(), subCmd.Short)) } directive = ShellCompDirectiveNoFileComp } @@ -500,7 +547,7 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi } // Find the completion function for the flag or command - var completionFn func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) + var completionFn CompletionFunc if flag != nil && flagCompletion { flagCompletionMutex.RLock() completionFn = flagCompletionFunctions[flag] @@ -511,7 +558,7 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi if completionFn != nil { // Go custom completion defined for this flag or command. // Call the registered completion function to get the completions. - var comps []string + var comps []Completion comps, directive = completionFn(finalCmd, finalArgs, toComplete) completions = append(completions, comps...) } @@ -524,23 +571,23 @@ func helpOrVersionFlagPresent(cmd *Command) bool { len(versionFlag.Annotations[FlagSetByCobraAnnotation]) > 0 && versionFlag.Changed { return true } - if helpFlag := cmd.Flags().Lookup("help"); helpFlag != nil && + if helpFlag := cmd.Flags().Lookup(helpFlagName); helpFlag != nil && len(helpFlag.Annotations[FlagSetByCobraAnnotation]) > 0 && helpFlag.Changed { return true } return false } -func getFlagNameCompletions(flag *pflag.Flag, toComplete string) []string { +func getFlagNameCompletions(flag *pflag.Flag, toComplete string) []Completion { if nonCompletableFlag(flag) { - return []string{} + return []Completion{} } - var completions []string + var completions []Completion flagName := "--" + flag.Name if strings.HasPrefix(flagName, toComplete) { // Flag without the = - completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage)) + completions = append(completions, CompletionWithDesc(flagName, flag.Usage)) // Why suggest both long forms: --flag and --flag= ? // This forces the user to *always* have to type either an = or a space after the flag name. @@ -552,20 +599,20 @@ func getFlagNameCompletions(flag *pflag.Flag, toComplete string) []string { // if len(flag.NoOptDefVal) == 0 { // // Flag requires a value, so it can be suffixed with = // flagName += "=" - // completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage)) + // completions = append(completions, CompletionWithDesc(flagName, flag.Usage)) // } } flagName = "-" + flag.Shorthand if len(flag.Shorthand) > 0 && strings.HasPrefix(flagName, toComplete) { - completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage)) + completions = append(completions, CompletionWithDesc(flagName, flag.Usage)) } return completions } -func completeRequireFlags(finalCmd *Command, toComplete string) []string { - var completions []string +func completeRequireFlags(finalCmd *Command, toComplete string) []Completion { + var completions []Completion doCompleteRequiredFlags := func(flag *pflag.Flag) { if _, present := flag.Annotations[BashCompOneRequiredFlag]; present { @@ -680,8 +727,8 @@ func checkIfFlagCompletion(finalCmd *Command, args []string, lastArg string) (*p // 1- the feature has been explicitly disabled by the program, // 2- c has no subcommands (to avoid creating one), // 3- c already has a 'completion' command provided by the program. -func (c *Command) InitDefaultCompletionCmd() { - if c.CompletionOptions.DisableDefaultCmd || !c.HasSubCommands() { +func (c *Command) InitDefaultCompletionCmd(args ...string) { + if c.CompletionOptions.DisableDefaultCmd { return } @@ -694,6 +741,16 @@ func (c *Command) InitDefaultCompletionCmd() { haveNoDescFlag := !c.CompletionOptions.DisableNoDescFlag && !c.CompletionOptions.DisableDescriptions + // Special case to know if there are sub-commands or not. + hasSubCommands := false + for _, cmd := range c.commands { + if cmd.Name() != ShellCompRequestCmd && cmd.Name() != helpCommandName { + // We found a real sub-command (not 'help' or '__complete') + hasSubCommands = true + break + } + } + completionCmd := &Command{ Use: compCmdName, Short: "Generate the autocompletion script for the specified shell", @@ -707,6 +764,22 @@ See each sub-command's help for details on how to use the generated script. } c.AddCommand(completionCmd) + if !hasSubCommands { + // If the 'completion' command will be the only sub-command, + // we only create it if it is actually being called. + // This avoids breaking programs that would suddenly find themselves with + // a subcommand, which would prevent them from accepting arguments. + // We also create the 'completion' command if the user is triggering + // shell completion for it (prog __complete completion '') + subCmd, cmdArgs, err := c.Find(args) + if err != nil || subCmd.Name() != compCmdName && + !(subCmd.Name() == ShellCompRequestCmd && len(cmdArgs) > 1 && cmdArgs[0] == compCmdName) { + // The completion command is not being called or being completed so we remove it. + c.RemoveCommand(completionCmd) + return + } + } + out := c.OutOrStdout() noDesc := c.CompletionOptions.DisableDescriptions shortDesc := "Generate the autocompletion script for %s" @@ -899,3 +972,34 @@ func CompError(msg string) { func CompErrorln(msg string) { CompError(fmt.Sprintf("%s\n", msg)) } + +// These values should not be changed: users will be using them explicitly. +const ( + configEnvVarGlobalPrefix = "COBRA" + configEnvVarSuffixDescriptions = "COMPLETION_DESCRIPTIONS" +) + +var configEnvVarPrefixSubstRegexp = regexp.MustCompile(`[^A-Z0-9_]`) + +// configEnvVar returns the name of the program-specific configuration environment +// variable. It has the format _ where is the name of the +// root command in upper case, with all non-ASCII-alphanumeric characters replaced by `_`. +func configEnvVar(name, suffix string) string { + // This format should not be changed: users will be using it explicitly. + v := strings.ToUpper(fmt.Sprintf("%s_%s", name, suffix)) + v = configEnvVarPrefixSubstRegexp.ReplaceAllString(v, "_") + return v +} + +// getEnvConfig returns the value of the configuration environment variable +// _ where is the name of the root command in upper +// case, with all non-ASCII-alphanumeric characters replaced by `_`. +// If the value is empty or not set, the value of the environment variable +// COBRA_ is returned instead. +func getEnvConfig(cmd *Command, suffix string) string { + v := os.Getenv(configEnvVar(cmd.Root().Name(), suffix)) + if v == "" { + v = os.Getenv(configEnvVar(configEnvVarGlobalPrefix, suffix)) + } + return v +} diff --git a/vendor/github.com/spf13/cobra/flag_groups.go b/vendor/github.com/spf13/cobra/flag_groups.go index 0671ec5f2..560612fd3 100644 --- a/vendor/github.com/spf13/cobra/flag_groups.go +++ b/vendor/github.com/spf13/cobra/flag_groups.go @@ -23,9 +23,9 @@ import ( ) const ( - requiredAsGroup = "cobra_annotation_required_if_others_set" - oneRequired = "cobra_annotation_one_required" - mutuallyExclusive = "cobra_annotation_mutually_exclusive" + requiredAsGroupAnnotation = "cobra_annotation_required_if_others_set" + oneRequiredAnnotation = "cobra_annotation_one_required" + mutuallyExclusiveAnnotation = "cobra_annotation_mutually_exclusive" ) // MarkFlagsRequiredTogether marks the given flags with annotations so that Cobra errors @@ -37,7 +37,7 @@ func (c *Command) MarkFlagsRequiredTogether(flagNames ...string) { if f == nil { panic(fmt.Sprintf("Failed to find flag %q and mark it as being required in a flag group", v)) } - if err := c.Flags().SetAnnotation(v, requiredAsGroup, append(f.Annotations[requiredAsGroup], strings.Join(flagNames, " "))); err != nil { + if err := c.Flags().SetAnnotation(v, requiredAsGroupAnnotation, append(f.Annotations[requiredAsGroupAnnotation], strings.Join(flagNames, " "))); err != nil { // Only errs if the flag isn't found. panic(err) } @@ -53,7 +53,7 @@ func (c *Command) MarkFlagsOneRequired(flagNames ...string) { if f == nil { panic(fmt.Sprintf("Failed to find flag %q and mark it as being in a one-required flag group", v)) } - if err := c.Flags().SetAnnotation(v, oneRequired, append(f.Annotations[oneRequired], strings.Join(flagNames, " "))); err != nil { + if err := c.Flags().SetAnnotation(v, oneRequiredAnnotation, append(f.Annotations[oneRequiredAnnotation], strings.Join(flagNames, " "))); err != nil { // Only errs if the flag isn't found. panic(err) } @@ -70,7 +70,7 @@ func (c *Command) MarkFlagsMutuallyExclusive(flagNames ...string) { panic(fmt.Sprintf("Failed to find flag %q and mark it as being in a mutually exclusive flag group", v)) } // Each time this is called is a single new entry; this allows it to be a member of multiple groups if needed. - if err := c.Flags().SetAnnotation(v, mutuallyExclusive, append(f.Annotations[mutuallyExclusive], strings.Join(flagNames, " "))); err != nil { + if err := c.Flags().SetAnnotation(v, mutuallyExclusiveAnnotation, append(f.Annotations[mutuallyExclusiveAnnotation], strings.Join(flagNames, " "))); err != nil { panic(err) } } @@ -91,9 +91,9 @@ func (c *Command) ValidateFlagGroups() error { oneRequiredGroupStatus := map[string]map[string]bool{} mutuallyExclusiveGroupStatus := map[string]map[string]bool{} flags.VisitAll(func(pflag *flag.Flag) { - processFlagForGroupAnnotation(flags, pflag, requiredAsGroup, groupStatus) - processFlagForGroupAnnotation(flags, pflag, oneRequired, oneRequiredGroupStatus) - processFlagForGroupAnnotation(flags, pflag, mutuallyExclusive, mutuallyExclusiveGroupStatus) + processFlagForGroupAnnotation(flags, pflag, requiredAsGroupAnnotation, groupStatus) + processFlagForGroupAnnotation(flags, pflag, oneRequiredAnnotation, oneRequiredGroupStatus) + processFlagForGroupAnnotation(flags, pflag, mutuallyExclusiveAnnotation, mutuallyExclusiveGroupStatus) }) if err := validateRequiredFlagGroups(groupStatus); err != nil { @@ -130,7 +130,7 @@ func processFlagForGroupAnnotation(flags *flag.FlagSet, pflag *flag.Flag, annota continue } - groupStatus[group] = map[string]bool{} + groupStatus[group] = make(map[string]bool, len(flagnames)) for _, name := range flagnames { groupStatus[group][name] = false } @@ -232,9 +232,9 @@ func (c *Command) enforceFlagGroupsForCompletion() { oneRequiredGroupStatus := map[string]map[string]bool{} mutuallyExclusiveGroupStatus := map[string]map[string]bool{} c.Flags().VisitAll(func(pflag *flag.Flag) { - processFlagForGroupAnnotation(flags, pflag, requiredAsGroup, groupStatus) - processFlagForGroupAnnotation(flags, pflag, oneRequired, oneRequiredGroupStatus) - processFlagForGroupAnnotation(flags, pflag, mutuallyExclusive, mutuallyExclusiveGroupStatus) + processFlagForGroupAnnotation(flags, pflag, requiredAsGroupAnnotation, groupStatus) + processFlagForGroupAnnotation(flags, pflag, oneRequiredAnnotation, oneRequiredGroupStatus) + processFlagForGroupAnnotation(flags, pflag, mutuallyExclusiveAnnotation, mutuallyExclusiveGroupStatus) }) // If a flag that is part of a group is present, we make all the other flags @@ -253,17 +253,17 @@ func (c *Command) enforceFlagGroupsForCompletion() { // If none of the flags of a one-required group are present, we make all the flags // of that group required so that the shell completion suggests them automatically for flagList, flagnameAndStatus := range oneRequiredGroupStatus { - set := 0 + isSet := false - for _, isSet := range flagnameAndStatus { + for _, isSet = range flagnameAndStatus { if isSet { - set++ + break } } // None of the flags of the group are set, mark all flags in the group // as required - if set == 0 { + if !isSet { for _, fName := range strings.Split(flagList, " ") { _ = c.MarkFlagRequired(fName) } diff --git a/vendor/github.com/spf13/cobra/powershell_completions.go b/vendor/github.com/spf13/cobra/powershell_completions.go index 551951939..746dcb92e 100644 --- a/vendor/github.com/spf13/cobra/powershell_completions.go +++ b/vendor/github.com/spf13/cobra/powershell_completions.go @@ -28,8 +28,8 @@ import ( func genPowerShellComp(buf io.StringWriter, name string, includeDesc bool) { // Variables should not contain a '-' or ':' character nameForVar := name - nameForVar = strings.Replace(nameForVar, "-", "_", -1) - nameForVar = strings.Replace(nameForVar, ":", "_", -1) + nameForVar = strings.ReplaceAll(nameForVar, "-", "_") + nameForVar = strings.ReplaceAll(nameForVar, ":", "_") compCmd := ShellCompRequestCmd if !includeDesc { @@ -162,7 +162,10 @@ filter __%[1]s_escapeStringWithSpecialChars { if (-Not $Description) { $Description = " " } - @{Name="$Name";Description="$Description"} + New-Object -TypeName PSCustomObject -Property @{ + Name = "$Name" + Description = "$Description" + } } @@ -240,7 +243,12 @@ filter __%[1]s_escapeStringWithSpecialChars { __%[1]s_debug "Only one completion left" # insert space after value - [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space, "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + $CompletionText = $($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space + if ($ExecutionContext.SessionState.LanguageMode -eq "FullLanguage"){ + [System.Management.Automation.CompletionResult]::new($CompletionText, "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + } else { + $CompletionText + } } else { # Add the proper number of spaces to align the descriptions @@ -255,7 +263,12 @@ filter __%[1]s_escapeStringWithSpecialChars { $Description = " ($($comp.Description))" } - [System.Management.Automation.CompletionResult]::new("$($comp.Name)$Description", "$($comp.Name)$Description", 'ParameterValue', "$($comp.Description)") + $CompletionText = "$($comp.Name)$Description" + if ($ExecutionContext.SessionState.LanguageMode -eq "FullLanguage"){ + [System.Management.Automation.CompletionResult]::new($CompletionText, "$($comp.Name)$Description", 'ParameterValue', "$($comp.Description)") + } else { + $CompletionText + } } } @@ -264,7 +277,13 @@ filter __%[1]s_escapeStringWithSpecialChars { # insert space after value # MenuComplete will automatically show the ToolTip of # the highlighted value at the bottom of the suggestions. - [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space, "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + + $CompletionText = $($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space + if ($ExecutionContext.SessionState.LanguageMode -eq "FullLanguage"){ + [System.Management.Automation.CompletionResult]::new($CompletionText, "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + } else { + $CompletionText + } } # TabCompleteNext and in case we get something unknown @@ -272,7 +291,13 @@ filter __%[1]s_escapeStringWithSpecialChars { # Like MenuComplete but we don't want to add a space here because # the user need to press space anyway to get the completion. # Description will not be shown because that's not possible with TabCompleteNext - [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars), "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + + $CompletionText = $($comp.Name | __%[1]s_escapeStringWithSpecialChars) + if ($ExecutionContext.SessionState.LanguageMode -eq "FullLanguage"){ + [System.Management.Automation.CompletionResult]::new($CompletionText, "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + } else { + $CompletionText + } } } diff --git a/vendor/github.com/spf13/pflag/.editorconfig b/vendor/github.com/spf13/pflag/.editorconfig new file mode 100644 index 000000000..4492e9f9f --- /dev/null +++ b/vendor/github.com/spf13/pflag/.editorconfig @@ -0,0 +1,12 @@ +root = true + +[*] +charset = utf-8 +end_of_line = lf +indent_size = 4 +indent_style = space +insert_final_newline = true +trim_trailing_whitespace = true + +[*.go] +indent_style = tab diff --git a/vendor/github.com/spf13/pflag/.golangci.yaml b/vendor/github.com/spf13/pflag/.golangci.yaml new file mode 100644 index 000000000..b274f2484 --- /dev/null +++ b/vendor/github.com/spf13/pflag/.golangci.yaml @@ -0,0 +1,4 @@ +linters: + disable-all: true + enable: + - nolintlint diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go index 24a5036e9..7c058de37 100644 --- a/vendor/github.com/spf13/pflag/flag.go +++ b/vendor/github.com/spf13/pflag/flag.go @@ -160,7 +160,7 @@ type FlagSet struct { args []string // arguments after flags argsLenAtDash int // len(args) when a '--' was located when parsing, or -1 if no -- errorHandling ErrorHandling - output io.Writer // nil means stderr; use out() accessor + output io.Writer // nil means stderr; use Output() accessor interspersed bool // allow interspersed option/non-option args normalizeNameFunc func(f *FlagSet, name string) NormalizedName @@ -255,13 +255,20 @@ func (f *FlagSet) normalizeFlagName(name string) NormalizedName { return n(f, name) } -func (f *FlagSet) out() io.Writer { +// Output returns the destination for usage and error messages. os.Stderr is returned if +// output was not set or was set to nil. +func (f *FlagSet) Output() io.Writer { if f.output == nil { return os.Stderr } return f.output } +// Name returns the name of the flag set. +func (f *FlagSet) Name() string { + return f.name +} + // SetOutput sets the destination for usage and error messages. // If output is nil, os.Stderr is used. func (f *FlagSet) SetOutput(output io.Writer) { @@ -358,7 +365,7 @@ func (f *FlagSet) ShorthandLookup(name string) *Flag { } if len(name) > 1 { msg := fmt.Sprintf("can not look up shorthand which is more than one ASCII character: %q", name) - fmt.Fprintf(f.out(), msg) + fmt.Fprintf(f.Output(), msg) panic(msg) } c := name[0] @@ -482,7 +489,7 @@ func (f *FlagSet) Set(name, value string) error { } if flag.Deprecated != "" { - fmt.Fprintf(f.out(), "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated) + fmt.Fprintf(f.Output(), "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated) } return nil } @@ -523,7 +530,7 @@ func Set(name, value string) error { // otherwise, the default values of all defined flags in the set. func (f *FlagSet) PrintDefaults() { usages := f.FlagUsages() - fmt.Fprint(f.out(), usages) + fmt.Fprint(f.Output(), usages) } // defaultIsZeroValue returns true if the default value for this flag represents @@ -758,7 +765,7 @@ func PrintDefaults() { // defaultUsage is the default function to print a usage message. func defaultUsage(f *FlagSet) { - fmt.Fprintf(f.out(), "Usage of %s:\n", f.name) + fmt.Fprintf(f.Output(), "Usage of %s:\n", f.name) f.PrintDefaults() } @@ -844,7 +851,7 @@ func (f *FlagSet) AddFlag(flag *Flag) { _, alreadyThere := f.formal[normalizedFlagName] if alreadyThere { msg := fmt.Sprintf("%s flag redefined: %s", f.name, flag.Name) - fmt.Fprintln(f.out(), msg) + fmt.Fprintln(f.Output(), msg) panic(msg) // Happens only if flags are declared with identical names } if f.formal == nil { @@ -860,7 +867,7 @@ func (f *FlagSet) AddFlag(flag *Flag) { } if len(flag.Shorthand) > 1 { msg := fmt.Sprintf("%q shorthand is more than one ASCII character", flag.Shorthand) - fmt.Fprintf(f.out(), msg) + fmt.Fprintf(f.Output(), msg) panic(msg) } if f.shorthands == nil { @@ -870,7 +877,7 @@ func (f *FlagSet) AddFlag(flag *Flag) { used, alreadyThere := f.shorthands[c] if alreadyThere { msg := fmt.Sprintf("unable to redefine %q shorthand in %q flagset: it's already used for %q flag", c, f.name, used.Name) - fmt.Fprintf(f.out(), msg) + fmt.Fprintf(f.Output(), msg) panic(msg) } f.shorthands[c] = flag @@ -909,7 +916,7 @@ func VarP(value Value, name, shorthand, usage string) { func (f *FlagSet) failf(format string, a ...interface{}) error { err := fmt.Errorf(format, a...) if f.errorHandling != ContinueOnError { - fmt.Fprintln(f.out(), err) + fmt.Fprintln(f.Output(), err) f.usage() } return err @@ -1060,7 +1067,7 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse } if flag.ShorthandDeprecated != "" { - fmt.Fprintf(f.out(), "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated) + fmt.Fprintf(f.Output(), "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated) } err = fn(flag, value) diff --git a/vendor/github.com/spf13/pflag/ip.go b/vendor/github.com/spf13/pflag/ip.go index 3d414ba69..06b8bcb57 100644 --- a/vendor/github.com/spf13/pflag/ip.go +++ b/vendor/github.com/spf13/pflag/ip.go @@ -16,6 +16,9 @@ func newIPValue(val net.IP, p *net.IP) *ipValue { func (i *ipValue) String() string { return net.IP(*i).String() } func (i *ipValue) Set(s string) error { + if s == "" { + return nil + } ip := net.ParseIP(strings.TrimSpace(s)) if ip == nil { return fmt.Errorf("failed to parse IP: %q", s) diff --git a/vendor/github.com/spf13/pflag/ipnet_slice.go b/vendor/github.com/spf13/pflag/ipnet_slice.go new file mode 100644 index 000000000..6b541aa87 --- /dev/null +++ b/vendor/github.com/spf13/pflag/ipnet_slice.go @@ -0,0 +1,147 @@ +package pflag + +import ( + "fmt" + "io" + "net" + "strings" +) + +// -- ipNetSlice Value +type ipNetSliceValue struct { + value *[]net.IPNet + changed bool +} + +func newIPNetSliceValue(val []net.IPNet, p *[]net.IPNet) *ipNetSliceValue { + ipnsv := new(ipNetSliceValue) + ipnsv.value = p + *ipnsv.value = val + return ipnsv +} + +// Set converts, and assigns, the comma-separated IPNet argument string representation as the []net.IPNet value of this flag. +// If Set is called on a flag that already has a []net.IPNet assigned, the newly converted values will be appended. +func (s *ipNetSliceValue) Set(val string) error { + + // remove all quote characters + rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "") + + // read flag arguments with CSV parser + ipNetStrSlice, err := readAsCSV(rmQuote.Replace(val)) + if err != nil && err != io.EOF { + return err + } + + // parse ip values into slice + out := make([]net.IPNet, 0, len(ipNetStrSlice)) + for _, ipNetStr := range ipNetStrSlice { + _, n, err := net.ParseCIDR(strings.TrimSpace(ipNetStr)) + if err != nil { + return fmt.Errorf("invalid string being converted to CIDR: %s", ipNetStr) + } + out = append(out, *n) + } + + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + + s.changed = true + + return nil +} + +// Type returns a string that uniquely represents this flag's type. +func (s *ipNetSliceValue) Type() string { + return "ipNetSlice" +} + +// String defines a "native" format for this net.IPNet slice flag value. +func (s *ipNetSliceValue) String() string { + + ipNetStrSlice := make([]string, len(*s.value)) + for i, n := range *s.value { + ipNetStrSlice[i] = n.String() + } + + out, _ := writeAsCSV(ipNetStrSlice) + return "[" + out + "]" +} + +func ipNetSliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Emtpy string would cause a slice with one (empty) entry + if len(val) == 0 { + return []net.IPNet{}, nil + } + ss := strings.Split(val, ",") + out := make([]net.IPNet, len(ss)) + for i, sval := range ss { + _, n, err := net.ParseCIDR(strings.TrimSpace(sval)) + if err != nil { + return nil, fmt.Errorf("invalid string being converted to CIDR: %s", sval) + } + out[i] = *n + } + return out, nil +} + +// GetIPNetSlice returns the []net.IPNet value of a flag with the given name +func (f *FlagSet) GetIPNetSlice(name string) ([]net.IPNet, error) { + val, err := f.getFlagType(name, "ipNetSlice", ipNetSliceConv) + if err != nil { + return []net.IPNet{}, err + } + return val.([]net.IPNet), nil +} + +// IPNetSliceVar defines a ipNetSlice flag with specified name, default value, and usage string. +// The argument p points to a []net.IPNet variable in which to store the value of the flag. +func (f *FlagSet) IPNetSliceVar(p *[]net.IPNet, name string, value []net.IPNet, usage string) { + f.VarP(newIPNetSliceValue(value, p), name, "", usage) +} + +// IPNetSliceVarP is like IPNetSliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPNetSliceVarP(p *[]net.IPNet, name, shorthand string, value []net.IPNet, usage string) { + f.VarP(newIPNetSliceValue(value, p), name, shorthand, usage) +} + +// IPNetSliceVar defines a []net.IPNet flag with specified name, default value, and usage string. +// The argument p points to a []net.IPNet variable in which to store the value of the flag. +func IPNetSliceVar(p *[]net.IPNet, name string, value []net.IPNet, usage string) { + CommandLine.VarP(newIPNetSliceValue(value, p), name, "", usage) +} + +// IPNetSliceVarP is like IPNetSliceVar, but accepts a shorthand letter that can be used after a single dash. +func IPNetSliceVarP(p *[]net.IPNet, name, shorthand string, value []net.IPNet, usage string) { + CommandLine.VarP(newIPNetSliceValue(value, p), name, shorthand, usage) +} + +// IPNetSlice defines a []net.IPNet flag with specified name, default value, and usage string. +// The return value is the address of a []net.IPNet variable that stores the value of that flag. +func (f *FlagSet) IPNetSlice(name string, value []net.IPNet, usage string) *[]net.IPNet { + p := []net.IPNet{} + f.IPNetSliceVarP(&p, name, "", value, usage) + return &p +} + +// IPNetSliceP is like IPNetSlice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPNetSliceP(name, shorthand string, value []net.IPNet, usage string) *[]net.IPNet { + p := []net.IPNet{} + f.IPNetSliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// IPNetSlice defines a []net.IPNet flag with specified name, default value, and usage string. +// The return value is the address of a []net.IP variable that stores the value of the flag. +func IPNetSlice(name string, value []net.IPNet, usage string) *[]net.IPNet { + return CommandLine.IPNetSliceP(name, "", value, usage) +} + +// IPNetSliceP is like IPNetSlice, but accepts a shorthand letter that can be used after a single dash. +func IPNetSliceP(name, shorthand string, value []net.IPNet, usage string) *[]net.IPNet { + return CommandLine.IPNetSliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/string_array.go b/vendor/github.com/spf13/pflag/string_array.go index 4894af818..d1ff0a96b 100644 --- a/vendor/github.com/spf13/pflag/string_array.go +++ b/vendor/github.com/spf13/pflag/string_array.go @@ -31,11 +31,7 @@ func (s *stringArrayValue) Append(val string) error { func (s *stringArrayValue) Replace(val []string) error { out := make([]string, len(val)) for i, d := range val { - var err error out[i] = d - if err != nil { - return err - } } *s.value = out return nil diff --git a/vendor/github.com/xo/terminfo/.gitignore b/vendor/github.com/xo/terminfo/.gitignore new file mode 100644 index 000000000..368e0c06c --- /dev/null +++ b/vendor/github.com/xo/terminfo/.gitignore @@ -0,0 +1,9 @@ +/.cache/ + +/cmd/infocmp/infocmp +/cmd/infocmp/.out/ + +/infocmp +/.out/ + +*.txt diff --git a/vendor/github.com/xo/terminfo/LICENSE b/vendor/github.com/xo/terminfo/LICENSE new file mode 100644 index 000000000..197dadb12 --- /dev/null +++ b/vendor/github.com/xo/terminfo/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Anmol Sethi + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/xo/terminfo/README.md b/vendor/github.com/xo/terminfo/README.md new file mode 100644 index 000000000..e5002d239 --- /dev/null +++ b/vendor/github.com/xo/terminfo/README.md @@ -0,0 +1,139 @@ +# About terminfo [![GoDoc][1]][2] + +Package `terminfo` provides a pure-Go implementation of reading information +from the terminfo database. + +`terminfo` is meant as a replacement for `ncurses` in simple Go programs. + +## Installing + +Install in the usual Go way: + +```sh +$ go get -u github.com/xo/terminfo +``` + +## Using + +Please see the [GoDoc API listing][2] for more information on using `terminfo`. + +```go +// _examples/simple/main.go +package main + +import ( + "bytes" + "fmt" + "log" + "os" + "os/signal" + "strings" + "sync" + "syscall" + + "github.com/xo/terminfo" +) + +func main() { + //r := rand.New(nil) + + // load terminfo + ti, err := terminfo.LoadFromEnv() + if err != nil { + log.Fatal(err) + } + + // cleanup + defer func() { + err := recover() + termreset(ti) + if err != nil { + log.Fatal(err) + } + }() + + terminit(ti) + termtitle(ti, "simple example!") + termputs(ti, 3, 3, "Ctrl-C to exit") + maxColors := termcolors(ti) + if maxColors > 256 { + maxColors = 256 + } + for i := 0; i < maxColors; i++ { + termputs(ti, 5+i/16, 5+i%16, ti.Colorf(i, 0, "█")) + } + + // wait for signal + sigs := make(chan os.Signal, 1) + signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) + <-sigs +} + +// terminit initializes the special CA mode on the terminal, and makes the +// cursor invisible. +func terminit(ti *terminfo.Terminfo) { + buf := new(bytes.Buffer) + // set the cursor invisible + ti.Fprintf(buf, terminfo.CursorInvisible) + // enter special mode + ti.Fprintf(buf, terminfo.EnterCaMode) + // clear the screen + ti.Fprintf(buf, terminfo.ClearScreen) + os.Stdout.Write(buf.Bytes()) +} + +// termreset is the inverse of terminit. +func termreset(ti *terminfo.Terminfo) { + buf := new(bytes.Buffer) + ti.Fprintf(buf, terminfo.ExitCaMode) + ti.Fprintf(buf, terminfo.CursorNormal) + os.Stdout.Write(buf.Bytes()) +} + +// termputs puts a string at row, col, interpolating v. +func termputs(ti *terminfo.Terminfo, row, col int, s string, v ...interface{}) { + buf := new(bytes.Buffer) + ti.Fprintf(buf, terminfo.CursorAddress, row, col) + fmt.Fprintf(buf, s, v...) + os.Stdout.Write(buf.Bytes()) +} + +// sl is the status line terminfo. +var sl *terminfo.Terminfo + +// termtitle sets the window title. +func termtitle(ti *terminfo.Terminfo, s string) { + var once sync.Once + once.Do(func() { + if ti.Has(terminfo.HasStatusLine) { + return + } + // load the sl xterm if terminal is an xterm or has COLORTERM + if strings.Contains(strings.ToLower(os.Getenv("TERM")), "xterm") || os.Getenv("COLORTERM") == "truecolor" { + sl, _ = terminfo.Load("xterm+sl") + } + }) + if sl != nil { + ti = sl + } + if !ti.Has(terminfo.HasStatusLine) { + return + } + buf := new(bytes.Buffer) + ti.Fprintf(buf, terminfo.ToStatusLine) + fmt.Fprint(buf, s) + ti.Fprintf(buf, terminfo.FromStatusLine) + os.Stdout.Write(buf.Bytes()) +} + +// termcolors returns the maximum colors available for the terminal. +func termcolors(ti *terminfo.Terminfo) int { + if colors := ti.Num(terminfo.MaxColors); colors > 0 { + return colors + } + return int(terminfo.ColorLevelBasic) +} +``` + +[1]: https://godoc.org/github.com/xo/terminfo?status.svg +[2]: https://godoc.org/github.com/xo/terminfo diff --git a/vendor/github.com/xo/terminfo/caps.go b/vendor/github.com/xo/terminfo/caps.go new file mode 100644 index 000000000..e5e1d41f1 --- /dev/null +++ b/vendor/github.com/xo/terminfo/caps.go @@ -0,0 +1,31 @@ +package terminfo + +// BoolCapName returns the bool capability name. +func BoolCapName(i int) string { + return boolCapNames[2*i] +} + +// BoolCapNameShort returns the short bool capability name. +func BoolCapNameShort(i int) string { + return boolCapNames[2*i+1] +} + +// NumCapName returns the num capability name. +func NumCapName(i int) string { + return numCapNames[2*i] +} + +// NumCapNameShort returns the short num capability name. +func NumCapNameShort(i int) string { + return numCapNames[2*i+1] +} + +// StringCapName returns the string capability name. +func StringCapName(i int) string { + return stringCapNames[2*i] +} + +// StringCapNameShort returns the short string capability name. +func StringCapNameShort(i int) string { + return stringCapNames[2*i+1] +} diff --git a/vendor/github.com/xo/terminfo/capvals.go b/vendor/github.com/xo/terminfo/capvals.go new file mode 100644 index 000000000..0c2274e3a --- /dev/null +++ b/vendor/github.com/xo/terminfo/capvals.go @@ -0,0 +1,1525 @@ +package terminfo + +// Code generated by gen.go. DO NOT EDIT. +// Bool capabilities. +const ( + // The AutoLeftMargin [auto_left_margin, bw] bool capability indicates cub1 wraps from column 0 to last column. + AutoLeftMargin = iota + // The AutoRightMargin [auto_right_margin, am] bool capability indicates terminal has automatic margins. + AutoRightMargin + // The NoEscCtlc [no_esc_ctlc, xsb] bool capability indicates beehive (f1=escape, f2=ctrl C). + NoEscCtlc + // The CeolStandoutGlitch [ceol_standout_glitch, xhp] bool capability indicates standout not erased by overwriting (hp). + CeolStandoutGlitch + // The EatNewlineGlitch [eat_newline_glitch, xenl] bool capability indicates newline ignored after 80 cols (concept). + EatNewlineGlitch + // The EraseOverstrike [erase_overstrike, eo] bool capability indicates can erase overstrikes with a blank. + EraseOverstrike + // The GenericType [generic_type, gn] bool capability indicates generic line type. + GenericType + // The HardCopy [hard_copy, hc] bool capability indicates hardcopy terminal. + HardCopy + // The HasMetaKey [has_meta_key, km] bool capability indicates Has a meta key (i.e., sets 8th-bit). + HasMetaKey + // The HasStatusLine [has_status_line, hs] bool capability indicates has extra status line. + HasStatusLine + // The InsertNullGlitch [insert_null_glitch, in] bool capability indicates insert mode distinguishes nulls. + InsertNullGlitch + // The MemoryAbove [memory_above, da] bool capability indicates display may be retained above the screen. + MemoryAbove + // The MemoryBelow [memory_below, db] bool capability indicates display may be retained below the screen. + MemoryBelow + // The MoveInsertMode [move_insert_mode, mir] bool capability indicates safe to move while in insert mode. + MoveInsertMode + // The MoveStandoutMode [move_standout_mode, msgr] bool capability indicates safe to move while in standout mode. + MoveStandoutMode + // The OverStrike [over_strike, os] bool capability indicates terminal can overstrike. + OverStrike + // The StatusLineEscOk [status_line_esc_ok, eslok] bool capability indicates escape can be used on the status line. + StatusLineEscOk + // The DestTabsMagicSmso [dest_tabs_magic_smso, xt] bool capability indicates tabs destructive, magic so char (t1061). + DestTabsMagicSmso + // The TildeGlitch [tilde_glitch, hz] bool capability indicates cannot print ~'s (Hazeltine). + TildeGlitch + // The TransparentUnderline [transparent_underline, ul] bool capability indicates underline character overstrikes. + TransparentUnderline + // The XonXoff [xon_xoff, xon] bool capability indicates terminal uses xon/xoff handshaking. + XonXoff + // The NeedsXonXoff [needs_xon_xoff, nxon] bool capability indicates padding will not work, xon/xoff required. + NeedsXonXoff + // The PrtrSilent [prtr_silent, mc5i] bool capability indicates printer will not echo on screen. + PrtrSilent + // The HardCursor [hard_cursor, chts] bool capability indicates cursor is hard to see. + HardCursor + // The NonRevRmcup [non_rev_rmcup, nrrmc] bool capability indicates smcup does not reverse rmcup. + NonRevRmcup + // The NoPadChar [no_pad_char, npc] bool capability indicates pad character does not exist. + NoPadChar + // The NonDestScrollRegion [non_dest_scroll_region, ndscr] bool capability indicates scrolling region is non-destructive. + NonDestScrollRegion + // The CanChange [can_change, ccc] bool capability indicates terminal can re-define existing colors. + CanChange + // The BackColorErase [back_color_erase, bce] bool capability indicates screen erased with background color. + BackColorErase + // The HueLightnessSaturation [hue_lightness_saturation, hls] bool capability indicates terminal uses only HLS color notation (Tektronix). + HueLightnessSaturation + // The ColAddrGlitch [col_addr_glitch, xhpa] bool capability indicates only positive motion for hpa/mhpa caps. + ColAddrGlitch + // The CrCancelsMicroMode [cr_cancels_micro_mode, crxm] bool capability indicates using cr turns off micro mode. + CrCancelsMicroMode + // The HasPrintWheel [has_print_wheel, daisy] bool capability indicates printer needs operator to change character set. + HasPrintWheel + // The RowAddrGlitch [row_addr_glitch, xvpa] bool capability indicates only positive motion for vpa/mvpa caps. + RowAddrGlitch + // The SemiAutoRightMargin [semi_auto_right_margin, sam] bool capability indicates printing in last column causes cr. + SemiAutoRightMargin + // The CpiChangesRes [cpi_changes_res, cpix] bool capability indicates changing character pitch changes resolution. + CpiChangesRes + // The LpiChangesRes [lpi_changes_res, lpix] bool capability indicates changing line pitch changes resolution. + LpiChangesRes + // The BackspacesWithBs [backspaces_with_bs, OTbs] bool capability indicates uses ^H to move left. + BackspacesWithBs + // The CrtNoScrolling [crt_no_scrolling, OTns] bool capability indicates crt cannot scroll. + CrtNoScrolling + // The NoCorrectlyWorkingCr [no_correctly_working_cr, OTnc] bool capability indicates no way to go to start of line. + NoCorrectlyWorkingCr + // The GnuHasMetaKey [gnu_has_meta_key, OTMT] bool capability indicates has meta key. + GnuHasMetaKey + // The LinefeedIsNewline [linefeed_is_newline, OTNL] bool capability indicates move down with \n. + LinefeedIsNewline + // The HasHardwareTabs [has_hardware_tabs, OTpt] bool capability indicates has 8-char tabs invoked with ^I. + HasHardwareTabs + // The ReturnDoesClrEol [return_does_clr_eol, OTxr] bool capability indicates return clears the line. + ReturnDoesClrEol +) + +// Num capabilities. +const ( + // The Columns [columns, cols] num capability is number of columns in a line. + Columns = iota + // The InitTabs [init_tabs, it] num capability is tabs initially every # spaces. + InitTabs + // The Lines [lines, lines] num capability is number of lines on screen or page. + Lines + // The LinesOfMemory [lines_of_memory, lm] num capability is lines of memory if > line. 0 means varies. + LinesOfMemory + // The MagicCookieGlitch [magic_cookie_glitch, xmc] num capability is number of blank characters left by smso or rmso. + MagicCookieGlitch + // The PaddingBaudRate [padding_baud_rate, pb] num capability is lowest baud rate where padding needed. + PaddingBaudRate + // The VirtualTerminal [virtual_terminal, vt] num capability is virtual terminal number (CB/unix). + VirtualTerminal + // The WidthStatusLine [width_status_line, wsl] num capability is number of columns in status line. + WidthStatusLine + // The NumLabels [num_labels, nlab] num capability is number of labels on screen. + NumLabels + // The LabelHeight [label_height, lh] num capability is rows in each label. + LabelHeight + // The LabelWidth [label_width, lw] num capability is columns in each label. + LabelWidth + // The MaxAttributes [max_attributes, ma] num capability is maximum combined attributes terminal can handle. + MaxAttributes + // The MaximumWindows [maximum_windows, wnum] num capability is maximum number of definable windows. + MaximumWindows + // The MaxColors [max_colors, colors] num capability is maximum number of colors on screen. + MaxColors + // The MaxPairs [max_pairs, pairs] num capability is maximum number of color-pairs on the screen. + MaxPairs + // The NoColorVideo [no_color_video, ncv] num capability is video attributes that cannot be used with colors. + NoColorVideo + // The BufferCapacity [buffer_capacity, bufsz] num capability is numbers of bytes buffered before printing. + BufferCapacity + // The DotVertSpacing [dot_vert_spacing, spinv] num capability is spacing of pins vertically in pins per inch. + DotVertSpacing + // The DotHorzSpacing [dot_horz_spacing, spinh] num capability is spacing of dots horizontally in dots per inch. + DotHorzSpacing + // The MaxMicroAddress [max_micro_address, maddr] num capability is maximum value in micro_..._address. + MaxMicroAddress + // The MaxMicroJump [max_micro_jump, mjump] num capability is maximum value in parm_..._micro. + MaxMicroJump + // The MicroColSize [micro_col_size, mcs] num capability is character step size when in micro mode. + MicroColSize + // The MicroLineSize [micro_line_size, mls] num capability is line step size when in micro mode. + MicroLineSize + // The NumberOfPins [number_of_pins, npins] num capability is numbers of pins in print-head. + NumberOfPins + // The OutputResChar [output_res_char, orc] num capability is horizontal resolution in units per line. + OutputResChar + // The OutputResLine [output_res_line, orl] num capability is vertical resolution in units per line. + OutputResLine + // The OutputResHorzInch [output_res_horz_inch, orhi] num capability is horizontal resolution in units per inch. + OutputResHorzInch + // The OutputResVertInch [output_res_vert_inch, orvi] num capability is vertical resolution in units per inch. + OutputResVertInch + // The PrintRate [print_rate, cps] num capability is print rate in characters per second. + PrintRate + // The WideCharSize [wide_char_size, widcs] num capability is character step size when in double wide mode. + WideCharSize + // The Buttons [buttons, btns] num capability is number of buttons on mouse. + Buttons + // The BitImageEntwining [bit_image_entwining, bitwin] num capability is number of passes for each bit-image row. + BitImageEntwining + // The BitImageType [bit_image_type, bitype] num capability is type of bit-image device. + BitImageType + // The MagicCookieGlitchUl [magic_cookie_glitch_ul, OTug] num capability is number of blanks left by ul. + MagicCookieGlitchUl + // The CarriageReturnDelay [carriage_return_delay, OTdC] num capability is pad needed for CR. + CarriageReturnDelay + // The NewLineDelay [new_line_delay, OTdN] num capability is pad needed for LF. + NewLineDelay + // The BackspaceDelay [backspace_delay, OTdB] num capability is padding required for ^H. + BackspaceDelay + // The HorizontalTabDelay [horizontal_tab_delay, OTdT] num capability is padding required for ^I. + HorizontalTabDelay + // The NumberOfFunctionKeys [number_of_function_keys, OTkn] num capability is count of function keys. + NumberOfFunctionKeys +) + +// String capabilities. +const ( + // The BackTab [back_tab, cbt] string capability is the back tab (P). + BackTab = iota + // The Bell [bell, bel] string capability is the audible signal (bell) (P). + Bell + // The CarriageReturn [carriage_return, cr] string capability is the carriage return (P*) (P*). + CarriageReturn + // The ChangeScrollRegion [change_scroll_region, csr] string capability is the change region to line #1 to line #2 (P). + ChangeScrollRegion + // The ClearAllTabs [clear_all_tabs, tbc] string capability is the clear all tab stops (P). + ClearAllTabs + // The ClearScreen [clear_screen, clear] string capability is the clear screen and home cursor (P*). + ClearScreen + // The ClrEol [clr_eol, el] string capability is the clear to end of line (P). + ClrEol + // The ClrEos [clr_eos, ed] string capability is the clear to end of screen (P*). + ClrEos + // The ColumnAddress [column_address, hpa] string capability is the horizontal position #1, absolute (P). + ColumnAddress + // The CommandCharacter [command_character, cmdch] string capability is the terminal settable cmd character in prototype !?. + CommandCharacter + // The CursorAddress [cursor_address, cup] string capability is the move to row #1 columns #2. + CursorAddress + // The CursorDown [cursor_down, cud1] string capability is the down one line. + CursorDown + // The CursorHome [cursor_home, home] string capability is the home cursor (if no cup). + CursorHome + // The CursorInvisible [cursor_invisible, civis] string capability is the make cursor invisible. + CursorInvisible + // The CursorLeft [cursor_left, cub1] string capability is the move left one space. + CursorLeft + // The CursorMemAddress [cursor_mem_address, mrcup] string capability is the memory relative cursor addressing, move to row #1 columns #2. + CursorMemAddress + // The CursorNormal [cursor_normal, cnorm] string capability is the make cursor appear normal (undo civis/cvvis). + CursorNormal + // The CursorRight [cursor_right, cuf1] string capability is the non-destructive space (move right one space). + CursorRight + // The CursorToLl [cursor_to_ll, ll] string capability is the last line, first column (if no cup). + CursorToLl + // The CursorUp [cursor_up, cuu1] string capability is the up one line. + CursorUp + // The CursorVisible [cursor_visible, cvvis] string capability is the make cursor very visible. + CursorVisible + // The DeleteCharacter [delete_character, dch1] string capability is the delete character (P*). + DeleteCharacter + // The DeleteLine [delete_line, dl1] string capability is the delete line (P*). + DeleteLine + // The DisStatusLine [dis_status_line, dsl] string capability is the disable status line. + DisStatusLine + // The DownHalfLine [down_half_line, hd] string capability is the half a line down. + DownHalfLine + // The EnterAltCharsetMode [enter_alt_charset_mode, smacs] string capability is the start alternate character set (P). + EnterAltCharsetMode + // The EnterBlinkMode [enter_blink_mode, blink] string capability is the turn on blinking. + EnterBlinkMode + // The EnterBoldMode [enter_bold_mode, bold] string capability is the turn on bold (extra bright) mode. + EnterBoldMode + // The EnterCaMode [enter_ca_mode, smcup] string capability is the string to start programs using cup. + EnterCaMode + // The EnterDeleteMode [enter_delete_mode, smdc] string capability is the enter delete mode. + EnterDeleteMode + // The EnterDimMode [enter_dim_mode, dim] string capability is the turn on half-bright mode. + EnterDimMode + // The EnterInsertMode [enter_insert_mode, smir] string capability is the enter insert mode. + EnterInsertMode + // The EnterSecureMode [enter_secure_mode, invis] string capability is the turn on blank mode (characters invisible). + EnterSecureMode + // The EnterProtectedMode [enter_protected_mode, prot] string capability is the turn on protected mode. + EnterProtectedMode + // The EnterReverseMode [enter_reverse_mode, rev] string capability is the turn on reverse video mode. + EnterReverseMode + // The EnterStandoutMode [enter_standout_mode, smso] string capability is the begin standout mode. + EnterStandoutMode + // The EnterUnderlineMode [enter_underline_mode, smul] string capability is the begin underline mode. + EnterUnderlineMode + // The EraseChars [erase_chars, ech] string capability is the erase #1 characters (P). + EraseChars + // The ExitAltCharsetMode [exit_alt_charset_mode, rmacs] string capability is the end alternate character set (P). + ExitAltCharsetMode + // The ExitAttributeMode [exit_attribute_mode, sgr0] string capability is the turn off all attributes. + ExitAttributeMode + // The ExitCaMode [exit_ca_mode, rmcup] string capability is the strings to end programs using cup. + ExitCaMode + // The ExitDeleteMode [exit_delete_mode, rmdc] string capability is the end delete mode. + ExitDeleteMode + // The ExitInsertMode [exit_insert_mode, rmir] string capability is the exit insert mode. + ExitInsertMode + // The ExitStandoutMode [exit_standout_mode, rmso] string capability is the exit standout mode. + ExitStandoutMode + // The ExitUnderlineMode [exit_underline_mode, rmul] string capability is the exit underline mode. + ExitUnderlineMode + // The FlashScreen [flash_screen, flash] string capability is the visible bell (may not move cursor). + FlashScreen + // The FormFeed [form_feed, ff] string capability is the hardcopy terminal page eject (P*). + FormFeed + // The FromStatusLine [from_status_line, fsl] string capability is the return from status line. + FromStatusLine + // The Init1string [init_1string, is1] string capability is the initialization string. + Init1string + // The Init2string [init_2string, is2] string capability is the initialization string. + Init2string + // The Init3string [init_3string, is3] string capability is the initialization string. + Init3string + // The InitFile [init_file, if] string capability is the name of initialization file. + InitFile + // The InsertCharacter [insert_character, ich1] string capability is the insert character (P). + InsertCharacter + // The InsertLine [insert_line, il1] string capability is the insert line (P*). + InsertLine + // The InsertPadding [insert_padding, ip] string capability is the insert padding after inserted character. + InsertPadding + // The KeyBackspace [key_backspace, kbs] string capability is the backspace key. + KeyBackspace + // The KeyCatab [key_catab, ktbc] string capability is the clear-all-tabs key. + KeyCatab + // The KeyClear [key_clear, kclr] string capability is the clear-screen or erase key. + KeyClear + // The KeyCtab [key_ctab, kctab] string capability is the clear-tab key. + KeyCtab + // The KeyDc [key_dc, kdch1] string capability is the delete-character key. + KeyDc + // The KeyDl [key_dl, kdl1] string capability is the delete-line key. + KeyDl + // The KeyDown [key_down, kcud1] string capability is the down-arrow key. + KeyDown + // The KeyEic [key_eic, krmir] string capability is the sent by rmir or smir in insert mode. + KeyEic + // The KeyEol [key_eol, kel] string capability is the clear-to-end-of-line key. + KeyEol + // The KeyEos [key_eos, ked] string capability is the clear-to-end-of-screen key. + KeyEos + // The KeyF0 [key_f0, kf0] string capability is the F0 function key. + KeyF0 + // The KeyF1 [key_f1, kf1] string capability is the F1 function key. + KeyF1 + // The KeyF10 [key_f10, kf10] string capability is the F10 function key. + KeyF10 + // The KeyF2 [key_f2, kf2] string capability is the F2 function key. + KeyF2 + // The KeyF3 [key_f3, kf3] string capability is the F3 function key. + KeyF3 + // The KeyF4 [key_f4, kf4] string capability is the F4 function key. + KeyF4 + // The KeyF5 [key_f5, kf5] string capability is the F5 function key. + KeyF5 + // The KeyF6 [key_f6, kf6] string capability is the F6 function key. + KeyF6 + // The KeyF7 [key_f7, kf7] string capability is the F7 function key. + KeyF7 + // The KeyF8 [key_f8, kf8] string capability is the F8 function key. + KeyF8 + // The KeyF9 [key_f9, kf9] string capability is the F9 function key. + KeyF9 + // The KeyHome [key_home, khome] string capability is the home key. + KeyHome + // The KeyIc [key_ic, kich1] string capability is the insert-character key. + KeyIc + // The KeyIl [key_il, kil1] string capability is the insert-line key. + KeyIl + // The KeyLeft [key_left, kcub1] string capability is the left-arrow key. + KeyLeft + // The KeyLl [key_ll, kll] string capability is the lower-left key (home down). + KeyLl + // The KeyNpage [key_npage, knp] string capability is the next-page key. + KeyNpage + // The KeyPpage [key_ppage, kpp] string capability is the previous-page key. + KeyPpage + // The KeyRight [key_right, kcuf1] string capability is the right-arrow key. + KeyRight + // The KeySf [key_sf, kind] string capability is the scroll-forward key. + KeySf + // The KeySr [key_sr, kri] string capability is the scroll-backward key. + KeySr + // The KeyStab [key_stab, khts] string capability is the set-tab key. + KeyStab + // The KeyUp [key_up, kcuu1] string capability is the up-arrow key. + KeyUp + // The KeypadLocal [keypad_local, rmkx] string capability is the leave 'keyboard_transmit' mode. + KeypadLocal + // The KeypadXmit [keypad_xmit, smkx] string capability is the enter 'keyboard_transmit' mode. + KeypadXmit + // The LabF0 [lab_f0, lf0] string capability is the label on function key f0 if not f0. + LabF0 + // The LabF1 [lab_f1, lf1] string capability is the label on function key f1 if not f1. + LabF1 + // The LabF10 [lab_f10, lf10] string capability is the label on function key f10 if not f10. + LabF10 + // The LabF2 [lab_f2, lf2] string capability is the label on function key f2 if not f2. + LabF2 + // The LabF3 [lab_f3, lf3] string capability is the label on function key f3 if not f3. + LabF3 + // The LabF4 [lab_f4, lf4] string capability is the label on function key f4 if not f4. + LabF4 + // The LabF5 [lab_f5, lf5] string capability is the label on function key f5 if not f5. + LabF5 + // The LabF6 [lab_f6, lf6] string capability is the label on function key f6 if not f6. + LabF6 + // The LabF7 [lab_f7, lf7] string capability is the label on function key f7 if not f7. + LabF7 + // The LabF8 [lab_f8, lf8] string capability is the label on function key f8 if not f8. + LabF8 + // The LabF9 [lab_f9, lf9] string capability is the label on function key f9 if not f9. + LabF9 + // The MetaOff [meta_off, rmm] string capability is the turn off meta mode. + MetaOff + // The MetaOn [meta_on, smm] string capability is the turn on meta mode (8th-bit on). + MetaOn + // The Newline [newline, nel] string capability is the newline (behave like cr followed by lf). + Newline + // The PadChar [pad_char, pad] string capability is the padding char (instead of null). + PadChar + // The ParmDch [parm_dch, dch] string capability is the delete #1 characters (P*). + ParmDch + // The ParmDeleteLine [parm_delete_line, dl] string capability is the delete #1 lines (P*). + ParmDeleteLine + // The ParmDownCursor [parm_down_cursor, cud] string capability is the down #1 lines (P*). + ParmDownCursor + // The ParmIch [parm_ich, ich] string capability is the insert #1 characters (P*). + ParmIch + // The ParmIndex [parm_index, indn] string capability is the scroll forward #1 lines (P). + ParmIndex + // The ParmInsertLine [parm_insert_line, il] string capability is the insert #1 lines (P*). + ParmInsertLine + // The ParmLeftCursor [parm_left_cursor, cub] string capability is the move #1 characters to the left (P). + ParmLeftCursor + // The ParmRightCursor [parm_right_cursor, cuf] string capability is the move #1 characters to the right (P*). + ParmRightCursor + // The ParmRindex [parm_rindex, rin] string capability is the scroll back #1 lines (P). + ParmRindex + // The ParmUpCursor [parm_up_cursor, cuu] string capability is the up #1 lines (P*). + ParmUpCursor + // The PkeyKey [pkey_key, pfkey] string capability is the program function key #1 to type string #2. + PkeyKey + // The PkeyLocal [pkey_local, pfloc] string capability is the program function key #1 to execute string #2. + PkeyLocal + // The PkeyXmit [pkey_xmit, pfx] string capability is the program function key #1 to transmit string #2. + PkeyXmit + // The PrintScreen [print_screen, mc0] string capability is the print contents of screen. + PrintScreen + // The PrtrOff [prtr_off, mc4] string capability is the turn off printer. + PrtrOff + // The PrtrOn [prtr_on, mc5] string capability is the turn on printer. + PrtrOn + // The RepeatChar [repeat_char, rep] string capability is the repeat char #1 #2 times (P*). + RepeatChar + // The Reset1string [reset_1string, rs1] string capability is the reset string. + Reset1string + // The Reset2string [reset_2string, rs2] string capability is the reset string. + Reset2string + // The Reset3string [reset_3string, rs3] string capability is the reset string. + Reset3string + // The ResetFile [reset_file, rf] string capability is the name of reset file. + ResetFile + // The RestoreCursor [restore_cursor, rc] string capability is the restore cursor to position of last save_cursor. + RestoreCursor + // The RowAddress [row_address, vpa] string capability is the vertical position #1 absolute (P). + RowAddress + // The SaveCursor [save_cursor, sc] string capability is the save current cursor position (P). + SaveCursor + // The ScrollForward [scroll_forward, ind] string capability is the scroll text up (P). + ScrollForward + // The ScrollReverse [scroll_reverse, ri] string capability is the scroll text down (P). + ScrollReverse + // The SetAttributes [set_attributes, sgr] string capability is the define video attributes #1-#9 (PG9). + SetAttributes + // The SetTab [set_tab, hts] string capability is the set a tab in every row, current columns. + SetTab + // The SetWindow [set_window, wind] string capability is the current window is lines #1-#2 cols #3-#4. + SetWindow + // The Tab [tab, ht] string capability is the tab to next 8-space hardware tab stop. + Tab + // The ToStatusLine [to_status_line, tsl] string capability is the move to status line, column #1. + ToStatusLine + // The UnderlineChar [underline_char, uc] string capability is the underline char and move past it. + UnderlineChar + // The UpHalfLine [up_half_line, hu] string capability is the half a line up. + UpHalfLine + // The InitProg [init_prog, iprog] string capability is the path name of program for initialization. + InitProg + // The KeyA1 [key_a1, ka1] string capability is the upper left of keypad. + KeyA1 + // The KeyA3 [key_a3, ka3] string capability is the upper right of keypad. + KeyA3 + // The KeyB2 [key_b2, kb2] string capability is the center of keypad. + KeyB2 + // The KeyC1 [key_c1, kc1] string capability is the lower left of keypad. + KeyC1 + // The KeyC3 [key_c3, kc3] string capability is the lower right of keypad. + KeyC3 + // The PrtrNon [prtr_non, mc5p] string capability is the turn on printer for #1 bytes. + PrtrNon + // The CharPadding [char_padding, rmp] string capability is the like ip but when in insert mode. + CharPadding + // The AcsChars [acs_chars, acsc] string capability is the graphics charset pairs, based on vt100. + AcsChars + // The PlabNorm [plab_norm, pln] string capability is the program label #1 to show string #2. + PlabNorm + // The KeyBtab [key_btab, kcbt] string capability is the back-tab key. + KeyBtab + // The EnterXonMode [enter_xon_mode, smxon] string capability is the turn on xon/xoff handshaking. + EnterXonMode + // The ExitXonMode [exit_xon_mode, rmxon] string capability is the turn off xon/xoff handshaking. + ExitXonMode + // The EnterAmMode [enter_am_mode, smam] string capability is the turn on automatic margins. + EnterAmMode + // The ExitAmMode [exit_am_mode, rmam] string capability is the turn off automatic margins. + ExitAmMode + // The XonCharacter [xon_character, xonc] string capability is the XON character. + XonCharacter + // The XoffCharacter [xoff_character, xoffc] string capability is the XOFF character. + XoffCharacter + // The EnaAcs [ena_acs, enacs] string capability is the enable alternate char set. + EnaAcs + // The LabelOn [label_on, smln] string capability is the turn on soft labels. + LabelOn + // The LabelOff [label_off, rmln] string capability is the turn off soft labels. + LabelOff + // The KeyBeg [key_beg, kbeg] string capability is the begin key. + KeyBeg + // The KeyCancel [key_cancel, kcan] string capability is the cancel key. + KeyCancel + // The KeyClose [key_close, kclo] string capability is the close key. + KeyClose + // The KeyCommand [key_command, kcmd] string capability is the command key. + KeyCommand + // The KeyCopy [key_copy, kcpy] string capability is the copy key. + KeyCopy + // The KeyCreate [key_create, kcrt] string capability is the create key. + KeyCreate + // The KeyEnd [key_end, kend] string capability is the end key. + KeyEnd + // The KeyEnter [key_enter, kent] string capability is the enter/send key. + KeyEnter + // The KeyExit [key_exit, kext] string capability is the exit key. + KeyExit + // The KeyFind [key_find, kfnd] string capability is the find key. + KeyFind + // The KeyHelp [key_help, khlp] string capability is the help key. + KeyHelp + // The KeyMark [key_mark, kmrk] string capability is the mark key. + KeyMark + // The KeyMessage [key_message, kmsg] string capability is the message key. + KeyMessage + // The KeyMove [key_move, kmov] string capability is the move key. + KeyMove + // The KeyNext [key_next, knxt] string capability is the next key. + KeyNext + // The KeyOpen [key_open, kopn] string capability is the open key. + KeyOpen + // The KeyOptions [key_options, kopt] string capability is the options key. + KeyOptions + // The KeyPrevious [key_previous, kprv] string capability is the previous key. + KeyPrevious + // The KeyPrint [key_print, kprt] string capability is the print key. + KeyPrint + // The KeyRedo [key_redo, krdo] string capability is the redo key. + KeyRedo + // The KeyReference [key_reference, kref] string capability is the reference key. + KeyReference + // The KeyRefresh [key_refresh, krfr] string capability is the refresh key. + KeyRefresh + // The KeyReplace [key_replace, krpl] string capability is the replace key. + KeyReplace + // The KeyRestart [key_restart, krst] string capability is the restart key. + KeyRestart + // The KeyResume [key_resume, kres] string capability is the resume key. + KeyResume + // The KeySave [key_save, ksav] string capability is the save key. + KeySave + // The KeySuspend [key_suspend, kspd] string capability is the suspend key. + KeySuspend + // The KeyUndo [key_undo, kund] string capability is the undo key. + KeyUndo + // The KeySbeg [key_sbeg, kBEG] string capability is the shifted begin key. + KeySbeg + // The KeyScancel [key_scancel, kCAN] string capability is the shifted cancel key. + KeyScancel + // The KeyScommand [key_scommand, kCMD] string capability is the shifted command key. + KeyScommand + // The KeyScopy [key_scopy, kCPY] string capability is the shifted copy key. + KeyScopy + // The KeyScreate [key_screate, kCRT] string capability is the shifted create key. + KeyScreate + // The KeySdc [key_sdc, kDC] string capability is the shifted delete-character key. + KeySdc + // The KeySdl [key_sdl, kDL] string capability is the shifted delete-line key. + KeySdl + // The KeySelect [key_select, kslt] string capability is the select key. + KeySelect + // The KeySend [key_send, kEND] string capability is the shifted end key. + KeySend + // The KeySeol [key_seol, kEOL] string capability is the shifted clear-to-end-of-line key. + KeySeol + // The KeySexit [key_sexit, kEXT] string capability is the shifted exit key. + KeySexit + // The KeySfind [key_sfind, kFND] string capability is the shifted find key. + KeySfind + // The KeyShelp [key_shelp, kHLP] string capability is the shifted help key. + KeyShelp + // The KeyShome [key_shome, kHOM] string capability is the shifted home key. + KeyShome + // The KeySic [key_sic, kIC] string capability is the shifted insert-character key. + KeySic + // The KeySleft [key_sleft, kLFT] string capability is the shifted left-arrow key. + KeySleft + // The KeySmessage [key_smessage, kMSG] string capability is the shifted message key. + KeySmessage + // The KeySmove [key_smove, kMOV] string capability is the shifted move key. + KeySmove + // The KeySnext [key_snext, kNXT] string capability is the shifted next key. + KeySnext + // The KeySoptions [key_soptions, kOPT] string capability is the shifted options key. + KeySoptions + // The KeySprevious [key_sprevious, kPRV] string capability is the shifted previous key. + KeySprevious + // The KeySprint [key_sprint, kPRT] string capability is the shifted print key. + KeySprint + // The KeySredo [key_sredo, kRDO] string capability is the shifted redo key. + KeySredo + // The KeySreplace [key_sreplace, kRPL] string capability is the shifted replace key. + KeySreplace + // The KeySright [key_sright, kRIT] string capability is the shifted right-arrow key. + KeySright + // The KeySrsume [key_srsume, kRES] string capability is the shifted resume key. + KeySrsume + // The KeySsave [key_ssave, kSAV] string capability is the shifted save key. + KeySsave + // The KeySsuspend [key_ssuspend, kSPD] string capability is the shifted suspend key. + KeySsuspend + // The KeySundo [key_sundo, kUND] string capability is the shifted undo key. + KeySundo + // The ReqForInput [req_for_input, rfi] string capability is the send next input char (for ptys). + ReqForInput + // The KeyF11 [key_f11, kf11] string capability is the F11 function key. + KeyF11 + // The KeyF12 [key_f12, kf12] string capability is the F12 function key. + KeyF12 + // The KeyF13 [key_f13, kf13] string capability is the F13 function key. + KeyF13 + // The KeyF14 [key_f14, kf14] string capability is the F14 function key. + KeyF14 + // The KeyF15 [key_f15, kf15] string capability is the F15 function key. + KeyF15 + // The KeyF16 [key_f16, kf16] string capability is the F16 function key. + KeyF16 + // The KeyF17 [key_f17, kf17] string capability is the F17 function key. + KeyF17 + // The KeyF18 [key_f18, kf18] string capability is the F18 function key. + KeyF18 + // The KeyF19 [key_f19, kf19] string capability is the F19 function key. + KeyF19 + // The KeyF20 [key_f20, kf20] string capability is the F20 function key. + KeyF20 + // The KeyF21 [key_f21, kf21] string capability is the F21 function key. + KeyF21 + // The KeyF22 [key_f22, kf22] string capability is the F22 function key. + KeyF22 + // The KeyF23 [key_f23, kf23] string capability is the F23 function key. + KeyF23 + // The KeyF24 [key_f24, kf24] string capability is the F24 function key. + KeyF24 + // The KeyF25 [key_f25, kf25] string capability is the F25 function key. + KeyF25 + // The KeyF26 [key_f26, kf26] string capability is the F26 function key. + KeyF26 + // The KeyF27 [key_f27, kf27] string capability is the F27 function key. + KeyF27 + // The KeyF28 [key_f28, kf28] string capability is the F28 function key. + KeyF28 + // The KeyF29 [key_f29, kf29] string capability is the F29 function key. + KeyF29 + // The KeyF30 [key_f30, kf30] string capability is the F30 function key. + KeyF30 + // The KeyF31 [key_f31, kf31] string capability is the F31 function key. + KeyF31 + // The KeyF32 [key_f32, kf32] string capability is the F32 function key. + KeyF32 + // The KeyF33 [key_f33, kf33] string capability is the F33 function key. + KeyF33 + // The KeyF34 [key_f34, kf34] string capability is the F34 function key. + KeyF34 + // The KeyF35 [key_f35, kf35] string capability is the F35 function key. + KeyF35 + // The KeyF36 [key_f36, kf36] string capability is the F36 function key. + KeyF36 + // The KeyF37 [key_f37, kf37] string capability is the F37 function key. + KeyF37 + // The KeyF38 [key_f38, kf38] string capability is the F38 function key. + KeyF38 + // The KeyF39 [key_f39, kf39] string capability is the F39 function key. + KeyF39 + // The KeyF40 [key_f40, kf40] string capability is the F40 function key. + KeyF40 + // The KeyF41 [key_f41, kf41] string capability is the F41 function key. + KeyF41 + // The KeyF42 [key_f42, kf42] string capability is the F42 function key. + KeyF42 + // The KeyF43 [key_f43, kf43] string capability is the F43 function key. + KeyF43 + // The KeyF44 [key_f44, kf44] string capability is the F44 function key. + KeyF44 + // The KeyF45 [key_f45, kf45] string capability is the F45 function key. + KeyF45 + // The KeyF46 [key_f46, kf46] string capability is the F46 function key. + KeyF46 + // The KeyF47 [key_f47, kf47] string capability is the F47 function key. + KeyF47 + // The KeyF48 [key_f48, kf48] string capability is the F48 function key. + KeyF48 + // The KeyF49 [key_f49, kf49] string capability is the F49 function key. + KeyF49 + // The KeyF50 [key_f50, kf50] string capability is the F50 function key. + KeyF50 + // The KeyF51 [key_f51, kf51] string capability is the F51 function key. + KeyF51 + // The KeyF52 [key_f52, kf52] string capability is the F52 function key. + KeyF52 + // The KeyF53 [key_f53, kf53] string capability is the F53 function key. + KeyF53 + // The KeyF54 [key_f54, kf54] string capability is the F54 function key. + KeyF54 + // The KeyF55 [key_f55, kf55] string capability is the F55 function key. + KeyF55 + // The KeyF56 [key_f56, kf56] string capability is the F56 function key. + KeyF56 + // The KeyF57 [key_f57, kf57] string capability is the F57 function key. + KeyF57 + // The KeyF58 [key_f58, kf58] string capability is the F58 function key. + KeyF58 + // The KeyF59 [key_f59, kf59] string capability is the F59 function key. + KeyF59 + // The KeyF60 [key_f60, kf60] string capability is the F60 function key. + KeyF60 + // The KeyF61 [key_f61, kf61] string capability is the F61 function key. + KeyF61 + // The KeyF62 [key_f62, kf62] string capability is the F62 function key. + KeyF62 + // The KeyF63 [key_f63, kf63] string capability is the F63 function key. + KeyF63 + // The ClrBol [clr_bol, el1] string capability is the Clear to beginning of line. + ClrBol + // The ClearMargins [clear_margins, mgc] string capability is the clear right and left soft margins. + ClearMargins + // The SetLeftMargin [set_left_margin, smgl] string capability is the set left soft margin at current column. (ML is not in BSD termcap). + SetLeftMargin + // The SetRightMargin [set_right_margin, smgr] string capability is the set right soft margin at current column. + SetRightMargin + // The LabelFormat [label_format, fln] string capability is the label format. + LabelFormat + // The SetClock [set_clock, sclk] string capability is the set clock, #1 hrs #2 mins #3 secs. + SetClock + // The DisplayClock [display_clock, dclk] string capability is the display clock. + DisplayClock + // The RemoveClock [remove_clock, rmclk] string capability is the remove clock. + RemoveClock + // The CreateWindow [create_window, cwin] string capability is the define a window #1 from #2,#3 to #4,#5. + CreateWindow + // The GotoWindow [goto_window, wingo] string capability is the go to window #1. + GotoWindow + // The Hangup [hangup, hup] string capability is the hang-up phone. + Hangup + // The DialPhone [dial_phone, dial] string capability is the dial number #1. + DialPhone + // The QuickDial [quick_dial, qdial] string capability is the dial number #1 without checking. + QuickDial + // The Tone [tone, tone] string capability is the select touch tone dialing. + Tone + // The Pulse [pulse, pulse] string capability is the select pulse dialing. + Pulse + // The FlashHook [flash_hook, hook] string capability is the flash switch hook. + FlashHook + // The FixedPause [fixed_pause, pause] string capability is the pause for 2-3 seconds. + FixedPause + // The WaitTone [wait_tone, wait] string capability is the wait for dial-tone. + WaitTone + // The User0 [user0, u0] string capability is the User string #0. + User0 + // The User1 [user1, u1] string capability is the User string #1. + User1 + // The User2 [user2, u2] string capability is the User string #2. + User2 + // The User3 [user3, u3] string capability is the User string #3. + User3 + // The User4 [user4, u4] string capability is the User string #4. + User4 + // The User5 [user5, u5] string capability is the User string #5. + User5 + // The User6 [user6, u6] string capability is the User string #6. + User6 + // The User7 [user7, u7] string capability is the User string #7. + User7 + // The User8 [user8, u8] string capability is the User string #8. + User8 + // The User9 [user9, u9] string capability is the User string #9. + User9 + // The OrigPair [orig_pair, op] string capability is the Set default pair to its original value. + OrigPair + // The OrigColors [orig_colors, oc] string capability is the Set all color pairs to the original ones. + OrigColors + // The InitializeColor [initialize_color, initc] string capability is the initialize color #1 to (#2,#3,#4). + InitializeColor + // The InitializePair [initialize_pair, initp] string capability is the Initialize color pair #1 to fg=(#2,#3,#4), bg=(#5,#6,#7). + InitializePair + // The SetColorPair [set_color_pair, scp] string capability is the Set current color pair to #1. + SetColorPair + // The SetForeground [set_foreground, setf] string capability is the Set foreground color #1. + SetForeground + // The SetBackground [set_background, setb] string capability is the Set background color #1. + SetBackground + // The ChangeCharPitch [change_char_pitch, cpi] string capability is the Change number of characters per inch to #1. + ChangeCharPitch + // The ChangeLinePitch [change_line_pitch, lpi] string capability is the Change number of lines per inch to #1. + ChangeLinePitch + // The ChangeResHorz [change_res_horz, chr] string capability is the Change horizontal resolution to #1. + ChangeResHorz + // The ChangeResVert [change_res_vert, cvr] string capability is the Change vertical resolution to #1. + ChangeResVert + // The DefineChar [define_char, defc] string capability is the Define a character #1, #2 dots wide, descender #3. + DefineChar + // The EnterDoublewideMode [enter_doublewide_mode, swidm] string capability is the Enter double-wide mode. + EnterDoublewideMode + // The EnterDraftQuality [enter_draft_quality, sdrfq] string capability is the Enter draft-quality mode. + EnterDraftQuality + // The EnterItalicsMode [enter_italics_mode, sitm] string capability is the Enter italic mode. + EnterItalicsMode + // The EnterLeftwardMode [enter_leftward_mode, slm] string capability is the Start leftward carriage motion. + EnterLeftwardMode + // The EnterMicroMode [enter_micro_mode, smicm] string capability is the Start micro-motion mode. + EnterMicroMode + // The EnterNearLetterQuality [enter_near_letter_quality, snlq] string capability is the Enter NLQ mode. + EnterNearLetterQuality + // The EnterNormalQuality [enter_normal_quality, snrmq] string capability is the Enter normal-quality mode. + EnterNormalQuality + // The EnterShadowMode [enter_shadow_mode, sshm] string capability is the Enter shadow-print mode. + EnterShadowMode + // The EnterSubscriptMode [enter_subscript_mode, ssubm] string capability is the Enter subscript mode. + EnterSubscriptMode + // The EnterSuperscriptMode [enter_superscript_mode, ssupm] string capability is the Enter superscript mode. + EnterSuperscriptMode + // The EnterUpwardMode [enter_upward_mode, sum] string capability is the Start upward carriage motion. + EnterUpwardMode + // The ExitDoublewideMode [exit_doublewide_mode, rwidm] string capability is the End double-wide mode. + ExitDoublewideMode + // The ExitItalicsMode [exit_italics_mode, ritm] string capability is the End italic mode. + ExitItalicsMode + // The ExitLeftwardMode [exit_leftward_mode, rlm] string capability is the End left-motion mode. + ExitLeftwardMode + // The ExitMicroMode [exit_micro_mode, rmicm] string capability is the End micro-motion mode. + ExitMicroMode + // The ExitShadowMode [exit_shadow_mode, rshm] string capability is the End shadow-print mode. + ExitShadowMode + // The ExitSubscriptMode [exit_subscript_mode, rsubm] string capability is the End subscript mode. + ExitSubscriptMode + // The ExitSuperscriptMode [exit_superscript_mode, rsupm] string capability is the End superscript mode. + ExitSuperscriptMode + // The ExitUpwardMode [exit_upward_mode, rum] string capability is the End reverse character motion. + ExitUpwardMode + // The MicroColumnAddress [micro_column_address, mhpa] string capability is the Like column_address in micro mode. + MicroColumnAddress + // The MicroDown [micro_down, mcud1] string capability is the Like cursor_down in micro mode. + MicroDown + // The MicroLeft [micro_left, mcub1] string capability is the Like cursor_left in micro mode. + MicroLeft + // The MicroRight [micro_right, mcuf1] string capability is the Like cursor_right in micro mode. + MicroRight + // The MicroRowAddress [micro_row_address, mvpa] string capability is the Like row_address #1 in micro mode. + MicroRowAddress + // The MicroUp [micro_up, mcuu1] string capability is the Like cursor_up in micro mode. + MicroUp + // The OrderOfPins [order_of_pins, porder] string capability is the Match software bits to print-head pins. + OrderOfPins + // The ParmDownMicro [parm_down_micro, mcud] string capability is the Like parm_down_cursor in micro mode. + ParmDownMicro + // The ParmLeftMicro [parm_left_micro, mcub] string capability is the Like parm_left_cursor in micro mode. + ParmLeftMicro + // The ParmRightMicro [parm_right_micro, mcuf] string capability is the Like parm_right_cursor in micro mode. + ParmRightMicro + // The ParmUpMicro [parm_up_micro, mcuu] string capability is the Like parm_up_cursor in micro mode. + ParmUpMicro + // The SelectCharSet [select_char_set, scs] string capability is the Select character set, #1. + SelectCharSet + // The SetBottomMargin [set_bottom_margin, smgb] string capability is the Set bottom margin at current line. + SetBottomMargin + // The SetBottomMarginParm [set_bottom_margin_parm, smgbp] string capability is the Set bottom margin at line #1 or (if smgtp is not given) #2 lines from bottom. + SetBottomMarginParm + // The SetLeftMarginParm [set_left_margin_parm, smglp] string capability is the Set left (right) margin at column #1. + SetLeftMarginParm + // The SetRightMarginParm [set_right_margin_parm, smgrp] string capability is the Set right margin at column #1. + SetRightMarginParm + // The SetTopMargin [set_top_margin, smgt] string capability is the Set top margin at current line. + SetTopMargin + // The SetTopMarginParm [set_top_margin_parm, smgtp] string capability is the Set top (bottom) margin at row #1. + SetTopMarginParm + // The StartBitImage [start_bit_image, sbim] string capability is the Start printing bit image graphics. + StartBitImage + // The StartCharSetDef [start_char_set_def, scsd] string capability is the Start character set definition #1, with #2 characters in the set. + StartCharSetDef + // The StopBitImage [stop_bit_image, rbim] string capability is the Stop printing bit image graphics. + StopBitImage + // The StopCharSetDef [stop_char_set_def, rcsd] string capability is the End definition of character set #1. + StopCharSetDef + // The SubscriptCharacters [subscript_characters, subcs] string capability is the List of subscriptable characters. + SubscriptCharacters + // The SuperscriptCharacters [superscript_characters, supcs] string capability is the List of superscriptable characters. + SuperscriptCharacters + // The TheseCauseCr [these_cause_cr, docr] string capability is the Printing any of these characters causes CR. + TheseCauseCr + // The ZeroMotion [zero_motion, zerom] string capability is the No motion for subsequent character. + ZeroMotion + // The CharSetNames [char_set_names, csnm] string capability is the Produce #1'th item from list of character set names. + CharSetNames + // The KeyMouse [key_mouse, kmous] string capability is the Mouse event has occurred. + KeyMouse + // The MouseInfo [mouse_info, minfo] string capability is the Mouse status information. + MouseInfo + // The ReqMousePos [req_mouse_pos, reqmp] string capability is the Request mouse position. + ReqMousePos + // The GetMouse [get_mouse, getm] string capability is the Curses should get button events, parameter #1 not documented. + GetMouse + // The SetAForeground [set_a_foreground, setaf] string capability is the Set foreground color to #1, using ANSI escape. + SetAForeground + // The SetABackground [set_a_background, setab] string capability is the Set background color to #1, using ANSI escape. + SetABackground + // The PkeyPlab [pkey_plab, pfxl] string capability is the Program function key #1 to type string #2 and show string #3. + PkeyPlab + // The DeviceType [device_type, devt] string capability is the Indicate language/codeset support. + DeviceType + // The CodeSetInit [code_set_init, csin] string capability is the Init sequence for multiple codesets. + CodeSetInit + // The Set0DesSeq [set0_des_seq, s0ds] string capability is the Shift to codeset 0 (EUC set 0, ASCII). + Set0DesSeq + // The Set1DesSeq [set1_des_seq, s1ds] string capability is the Shift to codeset 1. + Set1DesSeq + // The Set2DesSeq [set2_des_seq, s2ds] string capability is the Shift to codeset 2. + Set2DesSeq + // The Set3DesSeq [set3_des_seq, s3ds] string capability is the Shift to codeset 3. + Set3DesSeq + // The SetLrMargin [set_lr_margin, smglr] string capability is the Set both left and right margins to #1, #2. (ML is not in BSD termcap). + SetLrMargin + // The SetTbMargin [set_tb_margin, smgtb] string capability is the Sets both top and bottom margins to #1, #2. + SetTbMargin + // The BitImageRepeat [bit_image_repeat, birep] string capability is the Repeat bit image cell #1 #2 times. + BitImageRepeat + // The BitImageNewline [bit_image_newline, binel] string capability is the Move to next row of the bit image. + BitImageNewline + // The BitImageCarriageReturn [bit_image_carriage_return, bicr] string capability is the Move to beginning of same row. + BitImageCarriageReturn + // The ColorNames [color_names, colornm] string capability is the Give name for color #1. + ColorNames + // The DefineBitImageRegion [define_bit_image_region, defbi] string capability is the Define rectangular bit image region. + DefineBitImageRegion + // The EndBitImageRegion [end_bit_image_region, endbi] string capability is the End a bit-image region. + EndBitImageRegion + // The SetColorBand [set_color_band, setcolor] string capability is the Change to ribbon color #1. + SetColorBand + // The SetPageLength [set_page_length, slines] string capability is the Set page length to #1 lines. + SetPageLength + // The DisplayPcChar [display_pc_char, dispc] string capability is the Display PC character #1. + DisplayPcChar + // The EnterPcCharsetMode [enter_pc_charset_mode, smpch] string capability is the Enter PC character display mode. + EnterPcCharsetMode + // The ExitPcCharsetMode [exit_pc_charset_mode, rmpch] string capability is the Exit PC character display mode. + ExitPcCharsetMode + // The EnterScancodeMode [enter_scancode_mode, smsc] string capability is the Enter PC scancode mode. + EnterScancodeMode + // The ExitScancodeMode [exit_scancode_mode, rmsc] string capability is the Exit PC scancode mode. + ExitScancodeMode + // The PcTermOptions [pc_term_options, pctrm] string capability is the PC terminal options. + PcTermOptions + // The ScancodeEscape [scancode_escape, scesc] string capability is the Escape for scancode emulation. + ScancodeEscape + // The AltScancodeEsc [alt_scancode_esc, scesa] string capability is the Alternate escape for scancode emulation. + AltScancodeEsc + // The EnterHorizontalHlMode [enter_horizontal_hl_mode, ehhlm] string capability is the Enter horizontal highlight mode. + EnterHorizontalHlMode + // The EnterLeftHlMode [enter_left_hl_mode, elhlm] string capability is the Enter left highlight mode. + EnterLeftHlMode + // The EnterLowHlMode [enter_low_hl_mode, elohlm] string capability is the Enter low highlight mode. + EnterLowHlMode + // The EnterRightHlMode [enter_right_hl_mode, erhlm] string capability is the Enter right highlight mode. + EnterRightHlMode + // The EnterTopHlMode [enter_top_hl_mode, ethlm] string capability is the Enter top highlight mode. + EnterTopHlMode + // The EnterVerticalHlMode [enter_vertical_hl_mode, evhlm] string capability is the Enter vertical highlight mode. + EnterVerticalHlMode + // The SetAAttributes [set_a_attributes, sgr1] string capability is the Define second set of video attributes #1-#6. + SetAAttributes + // The SetPglenInch [set_pglen_inch, slength] string capability is the Set page length to #1 hundredth of an inch (some implementations use sL for termcap). + SetPglenInch + // The TermcapInit2 [termcap_init2, OTi2] string capability is the secondary initialization string. + TermcapInit2 + // The TermcapReset [termcap_reset, OTrs] string capability is the terminal reset string. + TermcapReset + // The LinefeedIfNotLf [linefeed_if_not_lf, OTnl] string capability is the use to move down. + LinefeedIfNotLf + // The BackspaceIfNotBs [backspace_if_not_bs, OTbc] string capability is the move left, if not ^H. + BackspaceIfNotBs + // The OtherNonFunctionKeys [other_non_function_keys, OTko] string capability is the list of self-mapped keycaps. + OtherNonFunctionKeys + // The ArrowKeyMap [arrow_key_map, OTma] string capability is the map motion-keys for vi version 2. + ArrowKeyMap + // The AcsUlcorner [acs_ulcorner, OTG2] string capability is the single upper left. + AcsUlcorner + // The AcsLlcorner [acs_llcorner, OTG3] string capability is the single lower left. + AcsLlcorner + // The AcsUrcorner [acs_urcorner, OTG1] string capability is the single upper right. + AcsUrcorner + // The AcsLrcorner [acs_lrcorner, OTG4] string capability is the single lower right. + AcsLrcorner + // The AcsLtee [acs_ltee, OTGR] string capability is the tee pointing right. + AcsLtee + // The AcsRtee [acs_rtee, OTGL] string capability is the tee pointing left. + AcsRtee + // The AcsBtee [acs_btee, OTGU] string capability is the tee pointing up. + AcsBtee + // The AcsTtee [acs_ttee, OTGD] string capability is the tee pointing down. + AcsTtee + // The AcsHline [acs_hline, OTGH] string capability is the single horizontal line. + AcsHline + // The AcsVline [acs_vline, OTGV] string capability is the single vertical line. + AcsVline + // The AcsPlus [acs_plus, OTGC] string capability is the single intersection. + AcsPlus + // The MemoryLock [memory_lock, meml] string capability is the lock memory above cursor. + MemoryLock + // The MemoryUnlock [memory_unlock, memu] string capability is the unlock memory. + MemoryUnlock + // The BoxChars1 [box_chars_1, box1] string capability is the box characters primary set. + BoxChars1 +) +const ( + // CapCountBool is the count of bool capabilities. + CapCountBool = ReturnDoesClrEol + 1 + // CapCountNum is the count of num capabilities. + CapCountNum = NumberOfFunctionKeys + 1 + // CapCountString is the count of string capabilities. + CapCountString = BoxChars1 + 1 +) + +// boolCapNames are the bool term cap names. +var boolCapNames = [...]string{ + "auto_left_margin", "bw", + "auto_right_margin", "am", + "no_esc_ctlc", "xsb", + "ceol_standout_glitch", "xhp", + "eat_newline_glitch", "xenl", + "erase_overstrike", "eo", + "generic_type", "gn", + "hard_copy", "hc", + "has_meta_key", "km", + "has_status_line", "hs", + "insert_null_glitch", "in", + "memory_above", "da", + "memory_below", "db", + "move_insert_mode", "mir", + "move_standout_mode", "msgr", + "over_strike", "os", + "status_line_esc_ok", "eslok", + "dest_tabs_magic_smso", "xt", + "tilde_glitch", "hz", + "transparent_underline", "ul", + "xon_xoff", "xon", + "needs_xon_xoff", "nxon", + "prtr_silent", "mc5i", + "hard_cursor", "chts", + "non_rev_rmcup", "nrrmc", + "no_pad_char", "npc", + "non_dest_scroll_region", "ndscr", + "can_change", "ccc", + "back_color_erase", "bce", + "hue_lightness_saturation", "hls", + "col_addr_glitch", "xhpa", + "cr_cancels_micro_mode", "crxm", + "has_print_wheel", "daisy", + "row_addr_glitch", "xvpa", + "semi_auto_right_margin", "sam", + "cpi_changes_res", "cpix", + "lpi_changes_res", "lpix", + "backspaces_with_bs", "OTbs", + "crt_no_scrolling", "OTns", + "no_correctly_working_cr", "OTnc", + "gnu_has_meta_key", "OTMT", + "linefeed_is_newline", "OTNL", + "has_hardware_tabs", "OTpt", + "return_does_clr_eol", "OTxr", +} + +// numCapNames are the num term cap names. +var numCapNames = [...]string{ + "columns", "cols", + "init_tabs", "it", + "lines", "lines", + "lines_of_memory", "lm", + "magic_cookie_glitch", "xmc", + "padding_baud_rate", "pb", + "virtual_terminal", "vt", + "width_status_line", "wsl", + "num_labels", "nlab", + "label_height", "lh", + "label_width", "lw", + "max_attributes", "ma", + "maximum_windows", "wnum", + "max_colors", "colors", + "max_pairs", "pairs", + "no_color_video", "ncv", + "buffer_capacity", "bufsz", + "dot_vert_spacing", "spinv", + "dot_horz_spacing", "spinh", + "max_micro_address", "maddr", + "max_micro_jump", "mjump", + "micro_col_size", "mcs", + "micro_line_size", "mls", + "number_of_pins", "npins", + "output_res_char", "orc", + "output_res_line", "orl", + "output_res_horz_inch", "orhi", + "output_res_vert_inch", "orvi", + "print_rate", "cps", + "wide_char_size", "widcs", + "buttons", "btns", + "bit_image_entwining", "bitwin", + "bit_image_type", "bitype", + "magic_cookie_glitch_ul", "OTug", + "carriage_return_delay", "OTdC", + "new_line_delay", "OTdN", + "backspace_delay", "OTdB", + "horizontal_tab_delay", "OTdT", + "number_of_function_keys", "OTkn", +} + +// stringCapNames are the string term cap names. +var stringCapNames = [...]string{ + "back_tab", "cbt", + "bell", "bel", + "carriage_return", "cr", + "change_scroll_region", "csr", + "clear_all_tabs", "tbc", + "clear_screen", "clear", + "clr_eol", "el", + "clr_eos", "ed", + "column_address", "hpa", + "command_character", "cmdch", + "cursor_address", "cup", + "cursor_down", "cud1", + "cursor_home", "home", + "cursor_invisible", "civis", + "cursor_left", "cub1", + "cursor_mem_address", "mrcup", + "cursor_normal", "cnorm", + "cursor_right", "cuf1", + "cursor_to_ll", "ll", + "cursor_up", "cuu1", + "cursor_visible", "cvvis", + "delete_character", "dch1", + "delete_line", "dl1", + "dis_status_line", "dsl", + "down_half_line", "hd", + "enter_alt_charset_mode", "smacs", + "enter_blink_mode", "blink", + "enter_bold_mode", "bold", + "enter_ca_mode", "smcup", + "enter_delete_mode", "smdc", + "enter_dim_mode", "dim", + "enter_insert_mode", "smir", + "enter_secure_mode", "invis", + "enter_protected_mode", "prot", + "enter_reverse_mode", "rev", + "enter_standout_mode", "smso", + "enter_underline_mode", "smul", + "erase_chars", "ech", + "exit_alt_charset_mode", "rmacs", + "exit_attribute_mode", "sgr0", + "exit_ca_mode", "rmcup", + "exit_delete_mode", "rmdc", + "exit_insert_mode", "rmir", + "exit_standout_mode", "rmso", + "exit_underline_mode", "rmul", + "flash_screen", "flash", + "form_feed", "ff", + "from_status_line", "fsl", + "init_1string", "is1", + "init_2string", "is2", + "init_3string", "is3", + "init_file", "if", + "insert_character", "ich1", + "insert_line", "il1", + "insert_padding", "ip", + "key_backspace", "kbs", + "key_catab", "ktbc", + "key_clear", "kclr", + "key_ctab", "kctab", + "key_dc", "kdch1", + "key_dl", "kdl1", + "key_down", "kcud1", + "key_eic", "krmir", + "key_eol", "kel", + "key_eos", "ked", + "key_f0", "kf0", + "key_f1", "kf1", + "key_f10", "kf10", + "key_f2", "kf2", + "key_f3", "kf3", + "key_f4", "kf4", + "key_f5", "kf5", + "key_f6", "kf6", + "key_f7", "kf7", + "key_f8", "kf8", + "key_f9", "kf9", + "key_home", "khome", + "key_ic", "kich1", + "key_il", "kil1", + "key_left", "kcub1", + "key_ll", "kll", + "key_npage", "knp", + "key_ppage", "kpp", + "key_right", "kcuf1", + "key_sf", "kind", + "key_sr", "kri", + "key_stab", "khts", + "key_up", "kcuu1", + "keypad_local", "rmkx", + "keypad_xmit", "smkx", + "lab_f0", "lf0", + "lab_f1", "lf1", + "lab_f10", "lf10", + "lab_f2", "lf2", + "lab_f3", "lf3", + "lab_f4", "lf4", + "lab_f5", "lf5", + "lab_f6", "lf6", + "lab_f7", "lf7", + "lab_f8", "lf8", + "lab_f9", "lf9", + "meta_off", "rmm", + "meta_on", "smm", + "newline", "nel", + "pad_char", "pad", + "parm_dch", "dch", + "parm_delete_line", "dl", + "parm_down_cursor", "cud", + "parm_ich", "ich", + "parm_index", "indn", + "parm_insert_line", "il", + "parm_left_cursor", "cub", + "parm_right_cursor", "cuf", + "parm_rindex", "rin", + "parm_up_cursor", "cuu", + "pkey_key", "pfkey", + "pkey_local", "pfloc", + "pkey_xmit", "pfx", + "print_screen", "mc0", + "prtr_off", "mc4", + "prtr_on", "mc5", + "repeat_char", "rep", + "reset_1string", "rs1", + "reset_2string", "rs2", + "reset_3string", "rs3", + "reset_file", "rf", + "restore_cursor", "rc", + "row_address", "vpa", + "save_cursor", "sc", + "scroll_forward", "ind", + "scroll_reverse", "ri", + "set_attributes", "sgr", + "set_tab", "hts", + "set_window", "wind", + "tab", "ht", + "to_status_line", "tsl", + "underline_char", "uc", + "up_half_line", "hu", + "init_prog", "iprog", + "key_a1", "ka1", + "key_a3", "ka3", + "key_b2", "kb2", + "key_c1", "kc1", + "key_c3", "kc3", + "prtr_non", "mc5p", + "char_padding", "rmp", + "acs_chars", "acsc", + "plab_norm", "pln", + "key_btab", "kcbt", + "enter_xon_mode", "smxon", + "exit_xon_mode", "rmxon", + "enter_am_mode", "smam", + "exit_am_mode", "rmam", + "xon_character", "xonc", + "xoff_character", "xoffc", + "ena_acs", "enacs", + "label_on", "smln", + "label_off", "rmln", + "key_beg", "kbeg", + "key_cancel", "kcan", + "key_close", "kclo", + "key_command", "kcmd", + "key_copy", "kcpy", + "key_create", "kcrt", + "key_end", "kend", + "key_enter", "kent", + "key_exit", "kext", + "key_find", "kfnd", + "key_help", "khlp", + "key_mark", "kmrk", + "key_message", "kmsg", + "key_move", "kmov", + "key_next", "knxt", + "key_open", "kopn", + "key_options", "kopt", + "key_previous", "kprv", + "key_print", "kprt", + "key_redo", "krdo", + "key_reference", "kref", + "key_refresh", "krfr", + "key_replace", "krpl", + "key_restart", "krst", + "key_resume", "kres", + "key_save", "ksav", + "key_suspend", "kspd", + "key_undo", "kund", + "key_sbeg", "kBEG", + "key_scancel", "kCAN", + "key_scommand", "kCMD", + "key_scopy", "kCPY", + "key_screate", "kCRT", + "key_sdc", "kDC", + "key_sdl", "kDL", + "key_select", "kslt", + "key_send", "kEND", + "key_seol", "kEOL", + "key_sexit", "kEXT", + "key_sfind", "kFND", + "key_shelp", "kHLP", + "key_shome", "kHOM", + "key_sic", "kIC", + "key_sleft", "kLFT", + "key_smessage", "kMSG", + "key_smove", "kMOV", + "key_snext", "kNXT", + "key_soptions", "kOPT", + "key_sprevious", "kPRV", + "key_sprint", "kPRT", + "key_sredo", "kRDO", + "key_sreplace", "kRPL", + "key_sright", "kRIT", + "key_srsume", "kRES", + "key_ssave", "kSAV", + "key_ssuspend", "kSPD", + "key_sundo", "kUND", + "req_for_input", "rfi", + "key_f11", "kf11", + "key_f12", "kf12", + "key_f13", "kf13", + "key_f14", "kf14", + "key_f15", "kf15", + "key_f16", "kf16", + "key_f17", "kf17", + "key_f18", "kf18", + "key_f19", "kf19", + "key_f20", "kf20", + "key_f21", "kf21", + "key_f22", "kf22", + "key_f23", "kf23", + "key_f24", "kf24", + "key_f25", "kf25", + "key_f26", "kf26", + "key_f27", "kf27", + "key_f28", "kf28", + "key_f29", "kf29", + "key_f30", "kf30", + "key_f31", "kf31", + "key_f32", "kf32", + "key_f33", "kf33", + "key_f34", "kf34", + "key_f35", "kf35", + "key_f36", "kf36", + "key_f37", "kf37", + "key_f38", "kf38", + "key_f39", "kf39", + "key_f40", "kf40", + "key_f41", "kf41", + "key_f42", "kf42", + "key_f43", "kf43", + "key_f44", "kf44", + "key_f45", "kf45", + "key_f46", "kf46", + "key_f47", "kf47", + "key_f48", "kf48", + "key_f49", "kf49", + "key_f50", "kf50", + "key_f51", "kf51", + "key_f52", "kf52", + "key_f53", "kf53", + "key_f54", "kf54", + "key_f55", "kf55", + "key_f56", "kf56", + "key_f57", "kf57", + "key_f58", "kf58", + "key_f59", "kf59", + "key_f60", "kf60", + "key_f61", "kf61", + "key_f62", "kf62", + "key_f63", "kf63", + "clr_bol", "el1", + "clear_margins", "mgc", + "set_left_margin", "smgl", + "set_right_margin", "smgr", + "label_format", "fln", + "set_clock", "sclk", + "display_clock", "dclk", + "remove_clock", "rmclk", + "create_window", "cwin", + "goto_window", "wingo", + "hangup", "hup", + "dial_phone", "dial", + "quick_dial", "qdial", + "tone", "tone", + "pulse", "pulse", + "flash_hook", "hook", + "fixed_pause", "pause", + "wait_tone", "wait", + "user0", "u0", + "user1", "u1", + "user2", "u2", + "user3", "u3", + "user4", "u4", + "user5", "u5", + "user6", "u6", + "user7", "u7", + "user8", "u8", + "user9", "u9", + "orig_pair", "op", + "orig_colors", "oc", + "initialize_color", "initc", + "initialize_pair", "initp", + "set_color_pair", "scp", + "set_foreground", "setf", + "set_background", "setb", + "change_char_pitch", "cpi", + "change_line_pitch", "lpi", + "change_res_horz", "chr", + "change_res_vert", "cvr", + "define_char", "defc", + "enter_doublewide_mode", "swidm", + "enter_draft_quality", "sdrfq", + "enter_italics_mode", "sitm", + "enter_leftward_mode", "slm", + "enter_micro_mode", "smicm", + "enter_near_letter_quality", "snlq", + "enter_normal_quality", "snrmq", + "enter_shadow_mode", "sshm", + "enter_subscript_mode", "ssubm", + "enter_superscript_mode", "ssupm", + "enter_upward_mode", "sum", + "exit_doublewide_mode", "rwidm", + "exit_italics_mode", "ritm", + "exit_leftward_mode", "rlm", + "exit_micro_mode", "rmicm", + "exit_shadow_mode", "rshm", + "exit_subscript_mode", "rsubm", + "exit_superscript_mode", "rsupm", + "exit_upward_mode", "rum", + "micro_column_address", "mhpa", + "micro_down", "mcud1", + "micro_left", "mcub1", + "micro_right", "mcuf1", + "micro_row_address", "mvpa", + "micro_up", "mcuu1", + "order_of_pins", "porder", + "parm_down_micro", "mcud", + "parm_left_micro", "mcub", + "parm_right_micro", "mcuf", + "parm_up_micro", "mcuu", + "select_char_set", "scs", + "set_bottom_margin", "smgb", + "set_bottom_margin_parm", "smgbp", + "set_left_margin_parm", "smglp", + "set_right_margin_parm", "smgrp", + "set_top_margin", "smgt", + "set_top_margin_parm", "smgtp", + "start_bit_image", "sbim", + "start_char_set_def", "scsd", + "stop_bit_image", "rbim", + "stop_char_set_def", "rcsd", + "subscript_characters", "subcs", + "superscript_characters", "supcs", + "these_cause_cr", "docr", + "zero_motion", "zerom", + "char_set_names", "csnm", + "key_mouse", "kmous", + "mouse_info", "minfo", + "req_mouse_pos", "reqmp", + "get_mouse", "getm", + "set_a_foreground", "setaf", + "set_a_background", "setab", + "pkey_plab", "pfxl", + "device_type", "devt", + "code_set_init", "csin", + "set0_des_seq", "s0ds", + "set1_des_seq", "s1ds", + "set2_des_seq", "s2ds", + "set3_des_seq", "s3ds", + "set_lr_margin", "smglr", + "set_tb_margin", "smgtb", + "bit_image_repeat", "birep", + "bit_image_newline", "binel", + "bit_image_carriage_return", "bicr", + "color_names", "colornm", + "define_bit_image_region", "defbi", + "end_bit_image_region", "endbi", + "set_color_band", "setcolor", + "set_page_length", "slines", + "display_pc_char", "dispc", + "enter_pc_charset_mode", "smpch", + "exit_pc_charset_mode", "rmpch", + "enter_scancode_mode", "smsc", + "exit_scancode_mode", "rmsc", + "pc_term_options", "pctrm", + "scancode_escape", "scesc", + "alt_scancode_esc", "scesa", + "enter_horizontal_hl_mode", "ehhlm", + "enter_left_hl_mode", "elhlm", + "enter_low_hl_mode", "elohlm", + "enter_right_hl_mode", "erhlm", + "enter_top_hl_mode", "ethlm", + "enter_vertical_hl_mode", "evhlm", + "set_a_attributes", "sgr1", + "set_pglen_inch", "slength", + "termcap_init2", "OTi2", + "termcap_reset", "OTrs", + "linefeed_if_not_lf", "OTnl", + "backspace_if_not_bs", "OTbc", + "other_non_function_keys", "OTko", + "arrow_key_map", "OTma", + "acs_ulcorner", "OTG2", + "acs_llcorner", "OTG3", + "acs_urcorner", "OTG1", + "acs_lrcorner", "OTG4", + "acs_ltee", "OTGR", + "acs_rtee", "OTGL", + "acs_btee", "OTGU", + "acs_ttee", "OTGD", + "acs_hline", "OTGH", + "acs_vline", "OTGV", + "acs_plus", "OTGC", + "memory_lock", "meml", + "memory_unlock", "memu", + "box_chars_1", "box1", +} diff --git a/vendor/github.com/xo/terminfo/color.go b/vendor/github.com/xo/terminfo/color.go new file mode 100644 index 000000000..76c439fc9 --- /dev/null +++ b/vendor/github.com/xo/terminfo/color.go @@ -0,0 +1,88 @@ +package terminfo + +import ( + "os" + "strconv" + "strings" +) + +// ColorLevel is the color level supported by a terminal. +type ColorLevel uint + +// ColorLevel values. +const ( + ColorLevelNone ColorLevel = iota + ColorLevelBasic + ColorLevelHundreds + ColorLevelMillions +) + +// String satisfies the Stringer interface. +func (c ColorLevel) String() string { + switch c { + case ColorLevelBasic: + return "basic" + case ColorLevelHundreds: + return "hundreds" + case ColorLevelMillions: + return "millions" + } + return "none" +} + +// ChromaFormatterName returns the github.com/alecthomas/chroma compatible +// formatter name for the color level. +func (c ColorLevel) ChromaFormatterName() string { + switch c { + case ColorLevelBasic: + return "terminal" + case ColorLevelHundreds: + return "terminal256" + case ColorLevelMillions: + return "terminal16m" + } + return "noop" +} + +// ColorLevelFromEnv returns the color level COLORTERM, FORCE_COLOR, +// TERM_PROGRAM, or determined from the TERM environment variable. +func ColorLevelFromEnv() (ColorLevel, error) { + // check for overriding environment variables + colorTerm, termProg, forceColor := os.Getenv("COLORTERM"), os.Getenv("TERM_PROGRAM"), os.Getenv("FORCE_COLOR") + switch { + case strings.Contains(colorTerm, "truecolor") || strings.Contains(colorTerm, "24bit") || termProg == "Hyper": + return ColorLevelMillions, nil + case colorTerm != "" || forceColor != "": + return ColorLevelBasic, nil + case termProg == "Apple_Terminal": + return ColorLevelHundreds, nil + case termProg == "iTerm.app": + ver := os.Getenv("TERM_PROGRAM_VERSION") + if ver == "" { + return ColorLevelHundreds, nil + } + i, err := strconv.Atoi(strings.Split(ver, ".")[0]) + if err != nil { + return ColorLevelNone, ErrInvalidTermProgramVersion + } + if i == 3 { + return ColorLevelMillions, nil + } + return ColorLevelHundreds, nil + } + // otherwise determine from TERM's max_colors capability + if term := os.Getenv("TERM"); term != "" { + ti, err := Load(term) + if err != nil { + return ColorLevelNone, err + } + v, ok := ti.Nums[MaxColors] + switch { + case !ok || v <= 16: + return ColorLevelNone, nil + case ok && v >= 256: + return ColorLevelHundreds, nil + } + } + return ColorLevelBasic, nil +} diff --git a/vendor/github.com/xo/terminfo/dec.go b/vendor/github.com/xo/terminfo/dec.go new file mode 100644 index 000000000..dacc88e38 --- /dev/null +++ b/vendor/github.com/xo/terminfo/dec.go @@ -0,0 +1,245 @@ +package terminfo + +import ( + "sort" +) + +const ( + // maxFileLength is the max file length. + maxFileLength = 4096 + // magic is the file magic for terminfo files. + magic = 0o432 + // magicExtended is the file magic for terminfo files with the extended + // number format. + magicExtended = 0o1036 +) + +// header fields. +const ( + fieldMagic = iota + fieldNameSize + fieldBoolCount + fieldNumCount + fieldStringCount + fieldTableSize +) + +// header extended fields. +const ( + fieldExtBoolCount = iota + fieldExtNumCount + fieldExtStringCount + fieldExtOffsetCount + fieldExtTableSize +) + +// hasInvalidCaps determines if the capabilities in h are invalid. +func hasInvalidCaps(h []int) bool { + return h[fieldBoolCount] > CapCountBool || + h[fieldNumCount] > CapCountNum || + h[fieldStringCount] > CapCountString +} + +// capLength returns the total length of the capabilities in bytes. +func capLength(h []int) int { + return h[fieldNameSize] + + h[fieldBoolCount] + + (h[fieldNameSize]+h[fieldBoolCount])%2 + // account for word align + h[fieldNumCount]*2 + + h[fieldStringCount]*2 + + h[fieldTableSize] +} + +// hasInvalidExtOffset determines if the extended offset field is valid. +func hasInvalidExtOffset(h []int) bool { + return h[fieldExtBoolCount]+ + h[fieldExtNumCount]+ + h[fieldExtStringCount]*2 != h[fieldExtOffsetCount] +} + +// extCapLength returns the total length of extended capabilities in bytes. +func extCapLength(h []int, numWidth int) int { + return h[fieldExtBoolCount] + + h[fieldExtBoolCount]%2 + // account for word align + h[fieldExtNumCount]*(numWidth/8) + + h[fieldExtOffsetCount]*2 + + h[fieldExtTableSize] +} + +// findNull finds the position of null in buf. +func findNull(buf []byte, i int) int { + for ; i < len(buf); i++ { + if buf[i] == 0 { + return i + } + } + return -1 +} + +// readStrings decodes n strings from string data table buf using the indexes in idx. +func readStrings(idx []int, buf []byte, n int) (map[int][]byte, int, error) { + var last int + m := make(map[int][]byte) + for i := 0; i < n; i++ { + start := idx[i] + if start < 0 { + continue + } + if end := findNull(buf, start); end != -1 { + m[i], last = buf[start:end], end+1 + } else { + return nil, 0, ErrInvalidStringTable + } + } + return m, last, nil +} + +// decoder holds state info while decoding a terminfo file. +type decoder struct { + buf []byte + pos int + n int +} + +// readBytes reads the next n bytes of buf, incrementing pos by n. +func (d *decoder) readBytes(n int) ([]byte, error) { + if d.n < d.pos+n { + return nil, ErrUnexpectedFileEnd + } + n, d.pos = d.pos, d.pos+n + return d.buf[n:d.pos], nil +} + +// readInts reads n number of ints with width w. +func (d *decoder) readInts(n, w int) ([]int, error) { + w /= 8 + l := n * w + buf, err := d.readBytes(l) + if err != nil { + return nil, err + } + // align + d.pos += d.pos % 2 + z := make([]int, n) + for i, j := 0, 0; i < l; i, j = i+w, j+1 { + switch w { + case 1: + z[i] = int(buf[i]) + case 2: + z[j] = int(int16(buf[i+1])<<8 | int16(buf[i])) + case 4: + z[j] = int(buf[i+3])<<24 | int(buf[i+2])<<16 | int(buf[i+1])<<8 | int(buf[i]) + } + } + return z, nil +} + +// readBools reads the next n bools. +func (d *decoder) readBools(n int) (map[int]bool, map[int]bool, error) { + buf, err := d.readInts(n, 8) + if err != nil { + return nil, nil, err + } + // process + bools, boolsM := make(map[int]bool), make(map[int]bool) + for i, b := range buf { + bools[i] = b == 1 + if int8(b) == -2 { + boolsM[i] = true + } + } + return bools, boolsM, nil +} + +// readNums reads the next n nums. +func (d *decoder) readNums(n, w int) (map[int]int, map[int]bool, error) { + buf, err := d.readInts(n, w) + if err != nil { + return nil, nil, err + } + // process + nums, numsM := make(map[int]int), make(map[int]bool) + for i := 0; i < n; i++ { + nums[i] = buf[i] + if buf[i] == -2 { + numsM[i] = true + } + } + return nums, numsM, nil +} + +// readStringTable reads the string data for n strings and the accompanying data +// table of length sz. +func (d *decoder) readStringTable(n, sz int) ([][]byte, []int, error) { + buf, err := d.readInts(n, 16) + if err != nil { + return nil, nil, err + } + // read string data table + data, err := d.readBytes(sz) + if err != nil { + return nil, nil, err + } + // align + d.pos += d.pos % 2 + // process + s := make([][]byte, n) + var m []int + for i := 0; i < n; i++ { + start := buf[i] + if start == -2 { + m = append(m, i) + } else if start >= 0 { + if end := findNull(data, start); end != -1 { + s[i] = data[start:end] + } else { + return nil, nil, ErrInvalidStringTable + } + } + } + return s, m, nil +} + +// readStrings reads the next n strings and processes the string data table of +// length sz. +func (d *decoder) readStrings(n, sz int) (map[int][]byte, map[int]bool, error) { + s, m, err := d.readStringTable(n, sz) + if err != nil { + return nil, nil, err + } + strs := make(map[int][]byte) + for k, v := range s { + if k == AcsChars { + v = canonicalizeAscChars(v) + } + strs[k] = v + } + strsM := make(map[int]bool, len(m)) + for _, k := range m { + strsM[k] = true + } + return strs, strsM, nil +} + +// canonicalizeAscChars reorders chars to be unique, in order. +// +// see repair_ascc in ncurses-6.3/progs/dump_entry.c +func canonicalizeAscChars(z []byte) []byte { + var c []byte + enc := make(map[byte]byte, len(z)/2) + for i := 0; i < len(z); i += 2 { + if _, ok := enc[z[i]]; !ok { + a, b := z[i], z[i+1] + // log.Printf(">>> a: %d %c, b: %d %c", a, a, b, b) + c, enc[a] = append(c, b), b + } + } + sort.Slice(c, func(i, j int) bool { + return c[i] < c[j] + }) + r := make([]byte, 2*len(c)) + for i := 0; i < len(c); i++ { + r[i*2], r[i*2+1] = c[i], enc[c[i]] + } + return r +} diff --git a/vendor/github.com/xo/terminfo/load.go b/vendor/github.com/xo/terminfo/load.go new file mode 100644 index 000000000..d7cd266cb --- /dev/null +++ b/vendor/github.com/xo/terminfo/load.go @@ -0,0 +1,64 @@ +package terminfo + +import ( + "os" + "os/user" + "path" + "strings" + "sync" +) + +// termCache is the terminfo cache. +var termCache = struct { + db map[string]*Terminfo + sync.RWMutex +}{ + db: make(map[string]*Terminfo), +} + +// Load follows the behavior described in terminfo(5) to find correct the +// terminfo file using the name, reads the file and then returns a Terminfo +// struct that describes the file. +func Load(name string) (*Terminfo, error) { + if name == "" { + return nil, ErrEmptyTermName + } + termCache.RLock() + ti, ok := termCache.db[name] + termCache.RUnlock() + if ok { + return ti, nil + } + var checkDirs []string + // check $TERMINFO + if dir := os.Getenv("TERMINFO"); dir != "" { + checkDirs = append(checkDirs, dir) + } + // check $HOME/.terminfo + u, err := user.Current() + if err != nil { + return nil, err + } + checkDirs = append(checkDirs, path.Join(u.HomeDir, ".terminfo")) + // check $TERMINFO_DIRS + if dirs := os.Getenv("TERMINFO_DIRS"); dirs != "" { + checkDirs = append(checkDirs, strings.Split(dirs, ":")...) + } + // check fallback directories + checkDirs = append(checkDirs, "/etc/terminfo", "/lib/terminfo", "/usr/share/terminfo") + for _, dir := range checkDirs { + ti, err = Open(dir, name) + if err != nil && err != ErrFileNotFound && !os.IsNotExist(err) { + return nil, err + } else if err == nil { + return ti, nil + } + } + return nil, ErrDatabaseDirectoryNotFound +} + +// LoadFromEnv loads the terminal info based on the name contained in +// environment variable TERM. +func LoadFromEnv() (*Terminfo, error) { + return Load(os.Getenv("TERM")) +} diff --git a/vendor/github.com/xo/terminfo/param.go b/vendor/github.com/xo/terminfo/param.go new file mode 100644 index 000000000..ed4cb86b6 --- /dev/null +++ b/vendor/github.com/xo/terminfo/param.go @@ -0,0 +1,405 @@ +package terminfo + +import ( + "bytes" + "fmt" + "io" + "strconv" + "strings" + "sync" +) + +// parametizer represents the a scan state for a parameterized string. +type parametizer struct { + // z is the string to parameterize + z []byte + // pos is the current position in s. + pos int + // nest is the current nest level. + nest int + // s is the variable stack. + s stack + // skipElse keeps the state of skipping else. + skipElse bool + // buf is the result buffer. + buf *bytes.Buffer + // params are the parameters to interpolate. + params [9]interface{} + // vars are dynamic variables. + vars [26]interface{} +} + +// staticVars are the static, global variables. +var staticVars = struct { + vars [26]interface{} + sync.Mutex +}{} + +var parametizerPool = sync.Pool{ + New: func() interface{} { + p := new(parametizer) + p.buf = bytes.NewBuffer(make([]byte, 0, 45)) + return p + }, +} + +// newParametizer returns a new initialized parametizer from the pool. +func newParametizer(z []byte) *parametizer { + p := parametizerPool.Get().(*parametizer) + p.z = z + return p +} + +// reset resets the parametizer. +func (p *parametizer) reset() { + p.pos, p.nest = 0, 0 + p.s.reset() + p.buf.Reset() + p.params, p.vars = [9]interface{}{}, [26]interface{}{} + parametizerPool.Put(p) +} + +// stateFn represents the state of the scanner as a function that returns the +// next state. +type stateFn func() stateFn + +// exec executes the parameterizer, interpolating the supplied parameters. +func (p *parametizer) exec() string { + for state := p.scanTextFn; state != nil; { + state = state() + } + return p.buf.String() +} + +// peek returns the next byte. +func (p *parametizer) peek() (byte, error) { + if p.pos >= len(p.z) { + return 0, io.EOF + } + return p.z[p.pos], nil +} + +// writeFrom writes the characters from ppos to pos to the buffer. +func (p *parametizer) writeFrom(ppos int) { + if p.pos > ppos { + // append remaining characters. + p.buf.Write(p.z[ppos:p.pos]) + } +} + +func (p *parametizer) scanTextFn() stateFn { + ppos := p.pos + for { + ch, err := p.peek() + if err != nil { + p.writeFrom(ppos) + return nil + } + if ch == '%' { + p.writeFrom(ppos) + p.pos++ + return p.scanCodeFn + } + p.pos++ + } +} + +func (p *parametizer) scanCodeFn() stateFn { + ch, err := p.peek() + if err != nil { + return nil + } + switch ch { + case '%': + p.buf.WriteByte('%') + case ':': + // this character is used to avoid interpreting "%-" and "%+" as operators. + // the next character is where the format really begins. + p.pos++ + _, err = p.peek() + if err != nil { + return nil + } + return p.scanFormatFn + case '#', ' ', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.': + return p.scanFormatFn + case 'o': + p.buf.WriteString(strconv.FormatInt(int64(p.s.popInt()), 8)) + case 'd': + p.buf.WriteString(strconv.Itoa(p.s.popInt())) + case 'x': + p.buf.WriteString(strconv.FormatInt(int64(p.s.popInt()), 16)) + case 'X': + p.buf.WriteString(strings.ToUpper(strconv.FormatInt(int64(p.s.popInt()), 16))) + case 's': + p.buf.WriteString(p.s.popString()) + case 'c': + p.buf.WriteByte(p.s.popByte()) + case 'p': + p.pos++ + return p.pushParamFn + case 'P': + p.pos++ + return p.setDsVarFn + case 'g': + p.pos++ + return p.getDsVarFn + case '\'': + p.pos++ + ch, err = p.peek() + if err != nil { + return nil + } + p.s.push(ch) + // skip the '\'' + p.pos++ + case '{': + p.pos++ + return p.pushIntfn + case 'l': + p.s.push(len(p.s.popString())) + case '+': + bi, ai := p.s.popInt(), p.s.popInt() + p.s.push(ai + bi) + case '-': + bi, ai := p.s.popInt(), p.s.popInt() + p.s.push(ai - bi) + case '*': + bi, ai := p.s.popInt(), p.s.popInt() + p.s.push(ai * bi) + case '/': + bi, ai := p.s.popInt(), p.s.popInt() + if bi != 0 { + p.s.push(ai / bi) + } else { + p.s.push(0) + } + case 'm': + bi, ai := p.s.popInt(), p.s.popInt() + if bi != 0 { + p.s.push(ai % bi) + } else { + p.s.push(0) + } + case '&': + bi, ai := p.s.popInt(), p.s.popInt() + p.s.push(ai & bi) + case '|': + bi, ai := p.s.popInt(), p.s.popInt() + p.s.push(ai | bi) + case '^': + bi, ai := p.s.popInt(), p.s.popInt() + p.s.push(ai ^ bi) + case '=': + bi, ai := p.s.popInt(), p.s.popInt() + p.s.push(ai == bi) + case '>': + bi, ai := p.s.popInt(), p.s.popInt() + p.s.push(ai > bi) + case '<': + bi, ai := p.s.popInt(), p.s.popInt() + p.s.push(ai < bi) + case 'A': + bi, ai := p.s.popBool(), p.s.popBool() + p.s.push(ai && bi) + case 'O': + bi, ai := p.s.popBool(), p.s.popBool() + p.s.push(ai || bi) + case '!': + p.s.push(!p.s.popBool()) + case '~': + p.s.push(^p.s.popInt()) + case 'i': + for i := range p.params[:2] { + if n, ok := p.params[i].(int); ok { + p.params[i] = n + 1 + } + } + case '?', ';': + case 't': + return p.scanThenFn + case 'e': + p.skipElse = true + return p.skipTextFn + } + p.pos++ + return p.scanTextFn +} + +func (p *parametizer) scanFormatFn() stateFn { + // the character was already read, so no need to check the error. + ch, _ := p.peek() + // 6 should be the maximum length of a format string, for example "%:-9.9d". + f := []byte{'%', ch, 0, 0, 0, 0} + var err error + for { + p.pos++ + ch, err = p.peek() + if err != nil { + return nil + } + f = append(f, ch) + switch ch { + case 'o', 'd', 'x', 'X': + fmt.Fprintf(p.buf, string(f), p.s.popInt()) + break + case 's': + fmt.Fprintf(p.buf, string(f), p.s.popString()) + break + case 'c': + fmt.Fprintf(p.buf, string(f), p.s.popByte()) + break + } + } + p.pos++ + return p.scanTextFn +} + +func (p *parametizer) pushParamFn() stateFn { + ch, err := p.peek() + if err != nil { + return nil + } + if ai := int(ch - '1'); ai >= 0 && ai < len(p.params) { + p.s.push(p.params[ai]) + } else { + p.s.push(0) + } + // skip the '}' + p.pos++ + return p.scanTextFn +} + +func (p *parametizer) setDsVarFn() stateFn { + ch, err := p.peek() + if err != nil { + return nil + } + if ch >= 'A' && ch <= 'Z' { + staticVars.Lock() + staticVars.vars[int(ch-'A')] = p.s.pop() + staticVars.Unlock() + } else if ch >= 'a' && ch <= 'z' { + p.vars[int(ch-'a')] = p.s.pop() + } + p.pos++ + return p.scanTextFn +} + +func (p *parametizer) getDsVarFn() stateFn { + ch, err := p.peek() + if err != nil { + return nil + } + var a byte + if ch >= 'A' && ch <= 'Z' { + a = 'A' + } else if ch >= 'a' && ch <= 'z' { + a = 'a' + } + staticVars.Lock() + p.s.push(staticVars.vars[int(ch-a)]) + staticVars.Unlock() + p.pos++ + return p.scanTextFn +} + +func (p *parametizer) pushIntfn() stateFn { + var ai int + for { + ch, err := p.peek() + if err != nil { + return nil + } + p.pos++ + if ch < '0' || ch > '9' { + p.s.push(ai) + return p.scanTextFn + } + ai = (ai * 10) + int(ch-'0') + } +} + +func (p *parametizer) scanThenFn() stateFn { + p.pos++ + if p.s.popBool() { + return p.scanTextFn + } + p.skipElse = false + return p.skipTextFn +} + +func (p *parametizer) skipTextFn() stateFn { + for { + ch, err := p.peek() + if err != nil { + return nil + } + p.pos++ + if ch == '%' { + break + } + } + if p.skipElse { + return p.skipElseFn + } + return p.skipThenFn +} + +func (p *parametizer) skipThenFn() stateFn { + ch, err := p.peek() + if err != nil { + return nil + } + p.pos++ + switch ch { + case ';': + if p.nest == 0 { + return p.scanTextFn + } + p.nest-- + case '?': + p.nest++ + case 'e': + if p.nest == 0 { + return p.scanTextFn + } + } + return p.skipTextFn +} + +func (p *parametizer) skipElseFn() stateFn { + ch, err := p.peek() + if err != nil { + return nil + } + p.pos++ + switch ch { + case ';': + if p.nest == 0 { + return p.scanTextFn + } + p.nest-- + case '?': + p.nest++ + } + return p.skipTextFn +} + +// Printf evaluates a parameterized terminfo value z, interpolating params. +func Printf(z []byte, params ...interface{}) string { + p := newParametizer(z) + defer p.reset() + // make sure we always have 9 parameters -- makes it easier + // later to skip checks and its faster + for i := 0; i < len(p.params) && i < len(params); i++ { + p.params[i] = params[i] + } + return p.exec() +} + +// Fprintf evaluates a parameterized terminfo value z, interpolating params and +// writing to w. +func Fprintf(w io.Writer, z []byte, params ...interface{}) { + w.Write([]byte(Printf(z, params...))) +} diff --git a/vendor/github.com/xo/terminfo/stack.go b/vendor/github.com/xo/terminfo/stack.go new file mode 100644 index 000000000..a6de39503 --- /dev/null +++ b/vendor/github.com/xo/terminfo/stack.go @@ -0,0 +1,48 @@ +package terminfo + +type stack []interface{} + +func (s *stack) push(v interface{}) { + *s = append(*s, v) +} + +func (s *stack) pop() interface{} { + if len(*s) == 0 { + return nil + } + v := (*s)[len(*s)-1] + *s = (*s)[:len(*s)-1] + return v +} + +func (s *stack) popInt() int { + if i, ok := s.pop().(int); ok { + return i + } + return 0 +} + +func (s *stack) popBool() bool { + if b, ok := s.pop().(bool); ok { + return b + } + return false +} + +func (s *stack) popByte() byte { + if b, ok := s.pop().(byte); ok { + return b + } + return 0 +} + +func (s *stack) popString() string { + if a, ok := s.pop().(string); ok { + return a + } + return "" +} + +func (s *stack) reset() { + *s = (*s)[:0] +} diff --git a/vendor/github.com/xo/terminfo/terminfo.go b/vendor/github.com/xo/terminfo/terminfo.go new file mode 100644 index 000000000..69e3b6064 --- /dev/null +++ b/vendor/github.com/xo/terminfo/terminfo.go @@ -0,0 +1,479 @@ +// Package terminfo implements reading terminfo files in pure go. +package terminfo + +//go:generate go run gen.go + +import ( + "io" + "io/ioutil" + "path" + "strconv" + "strings" +) + +// Error is a terminfo error. +type Error string + +// Error satisfies the error interface. +func (err Error) Error() string { + return string(err) +} + +const ( + // ErrInvalidFileSize is the invalid file size error. + ErrInvalidFileSize Error = "invalid file size" + // ErrUnexpectedFileEnd is the unexpected file end error. + ErrUnexpectedFileEnd Error = "unexpected file end" + // ErrInvalidStringTable is the invalid string table error. + ErrInvalidStringTable Error = "invalid string table" + // ErrInvalidMagic is the invalid magic error. + ErrInvalidMagic Error = "invalid magic" + // ErrInvalidHeader is the invalid header error. + ErrInvalidHeader Error = "invalid header" + // ErrInvalidNames is the invalid names error. + ErrInvalidNames Error = "invalid names" + // ErrInvalidExtendedHeader is the invalid extended header error. + ErrInvalidExtendedHeader Error = "invalid extended header" + // ErrEmptyTermName is the empty term name error. + ErrEmptyTermName Error = "empty term name" + // ErrDatabaseDirectoryNotFound is the database directory not found error. + ErrDatabaseDirectoryNotFound Error = "database directory not found" + // ErrFileNotFound is the file not found error. + ErrFileNotFound Error = "file not found" + // ErrInvalidTermProgramVersion is the invalid TERM_PROGRAM_VERSION error. + ErrInvalidTermProgramVersion Error = "invalid TERM_PROGRAM_VERSION" +) + +// Terminfo describes a terminal's capabilities. +type Terminfo struct { + // File is the original source file. + File string + // Names are the provided cap names. + Names []string + // Bools are the bool capabilities. + Bools map[int]bool + // BoolsM are the missing bool capabilities. + BoolsM map[int]bool + // Nums are the num capabilities. + Nums map[int]int + // NumsM are the missing num capabilities. + NumsM map[int]bool + // Strings are the string capabilities. + Strings map[int][]byte + // StringsM are the missing string capabilities. + StringsM map[int]bool + // ExtBools are the extended bool capabilities. + ExtBools map[int]bool + // ExtBoolsNames is the map of extended bool capabilities to their index. + ExtBoolNames map[int][]byte + // ExtNums are the extended num capabilities. + ExtNums map[int]int + // ExtNumsNames is the map of extended num capabilities to their index. + ExtNumNames map[int][]byte + // ExtStrings are the extended string capabilities. + ExtStrings map[int][]byte + // ExtStringsNames is the map of extended string capabilities to their index. + ExtStringNames map[int][]byte +} + +// Decode decodes the terminfo data contained in buf. +func Decode(buf []byte) (*Terminfo, error) { + var err error + // check max file length + if len(buf) >= maxFileLength { + return nil, ErrInvalidFileSize + } + d := &decoder{ + buf: buf, + n: len(buf), + } + // read header + h, err := d.readInts(6, 16) + if err != nil { + return nil, err + } + var numWidth int + // check magic + switch { + case h[fieldMagic] == magic: + numWidth = 16 + case h[fieldMagic] == magicExtended: + numWidth = 32 + default: + return nil, ErrInvalidMagic + } + // check header + if hasInvalidCaps(h) { + return nil, ErrInvalidHeader + } + // check remaining length + if d.n-d.pos < capLength(h) { + return nil, ErrUnexpectedFileEnd + } + // read names + names, err := d.readBytes(h[fieldNameSize]) + if err != nil { + return nil, err + } + // check name is terminated properly + i := findNull(names, 0) + if i == -1 { + return nil, ErrInvalidNames + } + names = names[:i] + // read bool caps + bools, boolsM, err := d.readBools(h[fieldBoolCount]) + if err != nil { + return nil, err + } + // read num caps + nums, numsM, err := d.readNums(h[fieldNumCount], numWidth) + if err != nil { + return nil, err + } + // read string caps + strs, strsM, err := d.readStrings(h[fieldStringCount], h[fieldTableSize]) + if err != nil { + return nil, err + } + ti := &Terminfo{ + Names: strings.Split(string(names), "|"), + Bools: bools, + BoolsM: boolsM, + Nums: nums, + NumsM: numsM, + Strings: strs, + StringsM: strsM, + } + // at the end of file, so no extended caps + if d.pos >= d.n { + return ti, nil + } + // decode extended header + eh, err := d.readInts(5, 16) + if err != nil { + return nil, err + } + // check extended offset field + if hasInvalidExtOffset(eh) { + return nil, ErrInvalidExtendedHeader + } + // check extended cap lengths + if d.n-d.pos != extCapLength(eh, numWidth) { + return nil, ErrInvalidExtendedHeader + } + // read extended bool caps + ti.ExtBools, _, err = d.readBools(eh[fieldExtBoolCount]) + if err != nil { + return nil, err + } + // read extended num caps + ti.ExtNums, _, err = d.readNums(eh[fieldExtNumCount], numWidth) + if err != nil { + return nil, err + } + // read extended string data table indexes + extIndexes, err := d.readInts(eh[fieldExtOffsetCount], 16) + if err != nil { + return nil, err + } + // read string data table + extData, err := d.readBytes(eh[fieldExtTableSize]) + if err != nil { + return nil, err + } + // precautionary check that exactly at end of file + if d.pos != d.n { + return nil, ErrUnexpectedFileEnd + } + var last int + // read extended string caps + ti.ExtStrings, last, err = readStrings(extIndexes, extData, eh[fieldExtStringCount]) + if err != nil { + return nil, err + } + extIndexes, extData = extIndexes[eh[fieldExtStringCount]:], extData[last:] + // read extended bool names + ti.ExtBoolNames, _, err = readStrings(extIndexes, extData, eh[fieldExtBoolCount]) + if err != nil { + return nil, err + } + extIndexes = extIndexes[eh[fieldExtBoolCount]:] + // read extended num names + ti.ExtNumNames, _, err = readStrings(extIndexes, extData, eh[fieldExtNumCount]) + if err != nil { + return nil, err + } + extIndexes = extIndexes[eh[fieldExtNumCount]:] + // read extended string names + ti.ExtStringNames, _, err = readStrings(extIndexes, extData, eh[fieldExtStringCount]) + if err != nil { + return nil, err + } + // extIndexes = extIndexes[eh[fieldExtStringCount]:] + return ti, nil +} + +// Open reads the terminfo file name from the specified directory dir. +func Open(dir, name string) (*Terminfo, error) { + var err error + var buf []byte + var filename string + for _, f := range []string{ + path.Join(dir, name[0:1], name), + path.Join(dir, strconv.FormatUint(uint64(name[0]), 16), name), + } { + buf, err = ioutil.ReadFile(f) + if err == nil { + filename = f + break + } + } + if buf == nil { + return nil, ErrFileNotFound + } + // decode + ti, err := Decode(buf) + if err != nil { + return nil, err + } + // save original file name + ti.File = filename + // add to cache + termCache.Lock() + for _, n := range ti.Names { + termCache.db[n] = ti + } + termCache.Unlock() + return ti, nil +} + +// boolCaps returns all bool and extended capabilities using f to format the +// index key. +func (ti *Terminfo) boolCaps(f func(int) string, extended bool) map[string]bool { + m := make(map[string]bool, len(ti.Bools)+len(ti.ExtBools)) + if !extended { + for k, v := range ti.Bools { + m[f(k)] = v + } + } else { + for k, v := range ti.ExtBools { + m[string(ti.ExtBoolNames[k])] = v + } + } + return m +} + +// BoolCaps returns all bool capabilities. +func (ti *Terminfo) BoolCaps() map[string]bool { + return ti.boolCaps(BoolCapName, false) +} + +// BoolCapsShort returns all bool capabilities, using the short name as the +// index. +func (ti *Terminfo) BoolCapsShort() map[string]bool { + return ti.boolCaps(BoolCapNameShort, false) +} + +// ExtBoolCaps returns all extended bool capabilities. +func (ti *Terminfo) ExtBoolCaps() map[string]bool { + return ti.boolCaps(BoolCapName, true) +} + +// ExtBoolCapsShort returns all extended bool capabilities, using the short +// name as the index. +func (ti *Terminfo) ExtBoolCapsShort() map[string]bool { + return ti.boolCaps(BoolCapNameShort, true) +} + +// numCaps returns all num and extended capabilities using f to format the +// index key. +func (ti *Terminfo) numCaps(f func(int) string, extended bool) map[string]int { + m := make(map[string]int, len(ti.Nums)+len(ti.ExtNums)) + if !extended { + for k, v := range ti.Nums { + m[f(k)] = v + } + } else { + for k, v := range ti.ExtNums { + m[string(ti.ExtNumNames[k])] = v + } + } + return m +} + +// NumCaps returns all num capabilities. +func (ti *Terminfo) NumCaps() map[string]int { + return ti.numCaps(NumCapName, false) +} + +// NumCapsShort returns all num capabilities, using the short name as the +// index. +func (ti *Terminfo) NumCapsShort() map[string]int { + return ti.numCaps(NumCapNameShort, false) +} + +// ExtNumCaps returns all extended num capabilities. +func (ti *Terminfo) ExtNumCaps() map[string]int { + return ti.numCaps(NumCapName, true) +} + +// ExtNumCapsShort returns all extended num capabilities, using the short +// name as the index. +func (ti *Terminfo) ExtNumCapsShort() map[string]int { + return ti.numCaps(NumCapNameShort, true) +} + +// stringCaps returns all string and extended capabilities using f to format the +// index key. +func (ti *Terminfo) stringCaps(f func(int) string, extended bool) map[string][]byte { + m := make(map[string][]byte, len(ti.Strings)+len(ti.ExtStrings)) + if !extended { + for k, v := range ti.Strings { + m[f(k)] = v + } + } else { + for k, v := range ti.ExtStrings { + m[string(ti.ExtStringNames[k])] = v + } + } + return m +} + +// StringCaps returns all string capabilities. +func (ti *Terminfo) StringCaps() map[string][]byte { + return ti.stringCaps(StringCapName, false) +} + +// StringCapsShort returns all string capabilities, using the short name as the +// index. +func (ti *Terminfo) StringCapsShort() map[string][]byte { + return ti.stringCaps(StringCapNameShort, false) +} + +// ExtStringCaps returns all extended string capabilities. +func (ti *Terminfo) ExtStringCaps() map[string][]byte { + return ti.stringCaps(StringCapName, true) +} + +// ExtStringCapsShort returns all extended string capabilities, using the short +// name as the index. +func (ti *Terminfo) ExtStringCapsShort() map[string][]byte { + return ti.stringCaps(StringCapNameShort, true) +} + +// Has determines if the bool cap i is present. +func (ti *Terminfo) Has(i int) bool { + return ti.Bools[i] +} + +// Num returns the num cap i, or -1 if not present. +func (ti *Terminfo) Num(i int) int { + n, ok := ti.Nums[i] + if !ok { + return -1 + } + return n +} + +// Printf formats the string cap i, interpolating parameters v. +func (ti *Terminfo) Printf(i int, v ...interface{}) string { + return Printf(ti.Strings[i], v...) +} + +// Fprintf prints the string cap i to writer w, interpolating parameters v. +func (ti *Terminfo) Fprintf(w io.Writer, i int, v ...interface{}) { + Fprintf(w, ti.Strings[i], v...) +} + +// Color takes a foreground and background color and returns string that sets +// them for this terminal. +func (ti *Terminfo) Colorf(fg, bg int, str string) string { + maxColors := int(ti.Nums[MaxColors]) + // map bright colors to lower versions if the color table only holds 8. + if maxColors == 8 { + if fg > 7 && fg < 16 { + fg -= 8 + } + if bg > 7 && bg < 16 { + bg -= 8 + } + } + var s string + if maxColors > fg && fg >= 0 { + s += ti.Printf(SetAForeground, fg) + } + if maxColors > bg && bg >= 0 { + s += ti.Printf(SetABackground, bg) + } + return s + str + ti.Printf(ExitAttributeMode) +} + +// Goto returns a string suitable for addressing the cursor at the given +// row and column. The origin 0, 0 is in the upper left corner of the screen. +func (ti *Terminfo) Goto(row, col int) string { + return Printf(ti.Strings[CursorAddress], row, col) +} + +// Puts emits the string to the writer, but expands inline padding indications +// (of the form $<[delay]> where [delay] is msec) to a suitable number of +// padding characters (usually null bytes) based upon the supplied baud. At +// high baud rates, more padding characters will be inserted. +/*func (ti *Terminfo) Puts(w io.Writer, s string, lines, baud int) (int, error) { + var err error + for { + start := strings.Index(s, "$<") + if start == -1 { + // most strings don't need padding, which is good news! + return io.WriteString(w, s) + } + end := strings.Index(s, ">") + if end == -1 { + // unterminated... just emit bytes unadulterated. + return io.WriteString(w, "$<"+s) + } + var c int + c, err = io.WriteString(w, s[:start]) + if err != nil { + return n + c, err + } + n += c + s = s[start+2:] + val := s[:end] + s = s[end+1:] + var ms int + var dot, mandatory, asterisk bool + unit := 1000 + for _, ch := range val { + switch { + case ch >= '0' && ch <= '9': + ms = (ms * 10) + int(ch-'0') + if dot { + unit *= 10 + } + case ch == '.' && !dot: + dot = true + case ch == '*' && !asterisk: + ms *= lines + asterisk = true + case ch == '/': + mandatory = true + default: + break + } + } + z, pad := ((baud/8)/unit)*ms, ti.Strings[PadChar] + b := make([]byte, len(pad)*z) + for bp := copy(b, pad); bp < len(b); bp *= 2 { + copy(b[bp:], b[:bp]) + } + if (!ti.Bools[XonXoff] && baud > int(ti.Nums[PaddingBaudRate])) || mandatory { + c, err = w.Write(b) + if err != nil { + return n + c, err + } + n += c + } + } + return n, nil +}*/ diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/conversion_dynamic.go b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_dynamic.go index 3b554e01b..95f3925b5 100644 --- a/vendor/github.com/zclconf/go-cty/cty/convert/conversion_dynamic.go +++ b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_dynamic.go @@ -40,6 +40,11 @@ func dynamicPassthrough(in cty.Value, path cty.Path) (cty.Value, error) { // perspective, and will panic if it finds that they are not. For example if // in is an object and out is a map, this function will still attempt to iterate // through both as if they were the same. +// While the outermost in and out types may be compatible from a Convert +// perspective, inner types may not match when converting between maps and +// objects with optional attributes when the optional attributes don't match +// the map element type. Therefor in the case of a non-primitive type mismatch, +// we have to assume conversion was possible and pass the out type through. func dynamicReplace(in, out cty.Type) cty.Type { if in == cty.DynamicPseudoType || in == cty.NilType { // Short circuit this case, there's no point worrying about this if in @@ -56,11 +61,9 @@ func dynamicReplace(in, out cty.Type) cty.Type { // return it unchanged. return out case out.IsMapType(): - var elemType cty.Type - // Maps are compatible with other maps or objects. if in.IsMapType() { - elemType = dynamicReplace(in.ElementType(), out.ElementType()) + return cty.Map(dynamicReplace(in.ElementType(), out.ElementType())) } if in.IsObjectType() { @@ -69,10 +72,10 @@ func dynamicReplace(in, out cty.Type) cty.Type { types = append(types, t) } unifiedType, _ := unify(types, true) - elemType = dynamicReplace(unifiedType, out.ElementType()) + return cty.Map(dynamicReplace(unifiedType, out.ElementType())) } - return cty.Map(elemType) + return out case out.IsObjectType(): // Objects are compatible with other objects and maps. outTypes := map[string]cty.Type{} @@ -97,33 +100,29 @@ func dynamicReplace(in, out cty.Type) cty.Type { return cty.Object(outTypes) case out.IsSetType(): - var elemType cty.Type - // Sets are compatible with other sets, lists, tuples. if in.IsSetType() || in.IsListType() { - elemType = dynamicReplace(in.ElementType(), out.ElementType()) + return cty.Set(dynamicReplace(in.ElementType(), out.ElementType())) } if in.IsTupleType() { unifiedType, _ := unify(in.TupleElementTypes(), true) - elemType = dynamicReplace(unifiedType, out.ElementType()) + return cty.Set(dynamicReplace(unifiedType, out.ElementType())) } - return cty.Set(elemType) + return out case out.IsListType(): - var elemType cty.Type - // Lists are compatible with other lists, sets, and tuples. if in.IsSetType() || in.IsListType() { - elemType = dynamicReplace(in.ElementType(), out.ElementType()) + return cty.List(dynamicReplace(in.ElementType(), out.ElementType())) } if in.IsTupleType() { unifiedType, _ := unify(in.TupleElementTypes(), true) - elemType = dynamicReplace(unifiedType, out.ElementType()) + return cty.List(dynamicReplace(unifiedType, out.ElementType())) } - return cty.List(elemType) + return out case out.IsTupleType(): // Tuples are only compatible with other tuples var types []cty.Type diff --git a/vendor/github.com/zclconf/go-cty/cty/primitive_type.go b/vendor/github.com/zclconf/go-cty/cty/primitive_type.go index 3ce2540bb..2beea652d 100644 --- a/vendor/github.com/zclconf/go-cty/cty/primitive_type.go +++ b/vendor/github.com/zclconf/go-cty/cty/primitive_type.go @@ -1,6 +1,8 @@ package cty -import "math/big" +import ( + "math/big" +) // primitiveType is the hidden implementation of the various primitive types // that are exposed as variables in this package. @@ -77,6 +79,18 @@ func rawNumberEqual(a, b *big.Float) bool { case a.Sign() != b.Sign(): return false default: + // First check if these are integers, and compare them directly. Floats + // need a more nuanced approach. + aInt, aAcc := a.Int(nil) + bInt, bAcc := b.Int(nil) + if aAcc != bAcc { + // only one is an exact integer value, so they can't be equal + return false + } + if aAcc == big.Exact { + return aInt.Cmp(bInt) == 0 + } + // This format and precision matches that used by cty/json.Marshal, // and thus achieves our definition of "two numbers are equal if // we'd use the same JSON serialization for both of them". diff --git a/vendor/github.com/zclconf/go-cty/cty/walk.go b/vendor/github.com/zclconf/go-cty/cty/walk.go index 87ba32e79..a18af04d6 100644 --- a/vendor/github.com/zclconf/go-cty/cty/walk.go +++ b/vendor/github.com/zclconf/go-cty/cty/walk.go @@ -213,7 +213,7 @@ func transform(path Path, val Value, t Transformer) (Value, error) { atys := ty.AttributeTypes() newAVs := make(map[string]Value) for name := range atys { - av := val.GetAttr(name) + av := rawVal.GetAttr(name) path := append(path, GetAttrStep{ Name: name, }) diff --git a/vendor/golang.org/x/mod/LICENSE b/vendor/golang.org/x/mod/LICENSE index 6a66aea5e..2a7cf70da 100644 --- a/vendor/golang.org/x/mod/LICENSE +++ b/vendor/golang.org/x/mod/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/tools/LICENSE b/vendor/golang.org/x/tools/LICENSE index 6a66aea5e..2a7cf70da 100644 --- a/vendor/golang.org/x/tools/LICENSE +++ b/vendor/golang.org/x/tools/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go index c2b4b711b..8f7afcb5d 100644 --- a/vendor/golang.org/x/tools/go/packages/external.go +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -82,7 +82,7 @@ type DriverResponse struct { type driver func(cfg *Config, patterns ...string) (*DriverResponse, error) // findExternalDriver returns the file path of a tool that supplies -// the build system package structure, or "" if not found." +// the build system package structure, or "" if not found. // If GOPACKAGESDRIVER is set in the environment findExternalTool returns its // value, otherwise it searches for a binary named gopackagesdriver on the PATH. func findExternalDriver(cfg *Config) driver { diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 34306ddd3..0b6bfaff8 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -46,7 +46,6 @@ import ( // // Unfortunately there are a number of open bugs related to // interactions among the LoadMode bits: -// - https://github.com/golang/go/issues/48226 // - https://github.com/golang/go/issues/56633 // - https://github.com/golang/go/issues/56677 // - https://github.com/golang/go/issues/58726 @@ -76,7 +75,7 @@ const ( // NeedTypes adds Types, Fset, and IllTyped. NeedTypes - // NeedSyntax adds Syntax. + // NeedSyntax adds Syntax and Fset. NeedSyntax // NeedTypesInfo adds TypesInfo. @@ -961,12 +960,14 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { } if ld.requestedMode&NeedTypes == 0 { ld.pkgs[i].Types = nil - ld.pkgs[i].Fset = nil ld.pkgs[i].IllTyped = false } if ld.requestedMode&NeedSyntax == 0 { ld.pkgs[i].Syntax = nil } + if ld.requestedMode&NeedTypes == 0 && ld.requestedMode&NeedSyntax == 0 { + ld.pkgs[i].Fset = nil + } if ld.requestedMode&NeedTypesInfo == 0 { ld.pkgs[i].TypesInfo = nil } @@ -1499,6 +1500,10 @@ func impliedLoadMode(loadMode LoadMode) LoadMode { // All these things require knowing the import graph. loadMode |= NeedImports } + if loadMode&NeedTypes != 0 { + // Types require the GoVersion from Module. + loadMode |= NeedModule + } return loadMode } diff --git a/vendor/golang.org/x/tools/go/packages/visit.go b/vendor/golang.org/x/tools/go/packages/visit.go index a1dcc40b7..df14ffd94 100644 --- a/vendor/golang.org/x/tools/go/packages/visit.go +++ b/vendor/golang.org/x/tools/go/packages/visit.go @@ -49,11 +49,20 @@ func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) { // PrintErrors returns the number of errors printed. func PrintErrors(pkgs []*Package) int { var n int + errModules := make(map[*Module]bool) Visit(pkgs, nil, func(pkg *Package) { for _, err := range pkg.Errors { fmt.Fprintln(os.Stderr, err) n++ } + + // Print pkg.Module.Error once if present. + mod := pkg.Module + if mod != nil && mod.Error != nil && !errModules[mod] { + errModules[mod] = true + fmt.Fprintln(os.Stderr, mod.Error.Err) + n++ + } }) return n } diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go index d648c3d07..9ada17775 100644 --- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -51,7 +51,7 @@ type Path string // // PO package->object Package.Scope.Lookup // OT object->type Object.Type -// TT type->type Type.{Elem,Key,{,{,Recv}Type}Params,Results,Underlying} [EKPRUTrC] +// TT type->type Type.{Elem,Key,{,{,Recv}Type}Params,Results,Underlying,Rhs} [EKPRUTrCa] // TO type->object Type.{At,Field,Method,Obj} [AFMO] // // All valid paths start with a package and end at an object @@ -63,7 +63,7 @@ type Path string // - The only PO operator is Package.Scope.Lookup, which requires an identifier. // - The only OT operator is Object.Type, // which we encode as '.' because dot cannot appear in an identifier. -// - The TT operators are encoded as [EKPRUTrC]; +// - The TT operators are encoded as [EKPRUTrCa]; // two of these ({,Recv}TypeParams) require an integer operand, // which is encoded as a string of decimal digits. // - The TO operators are encoded as [AFMO]; @@ -106,6 +106,7 @@ const ( opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature) opRecvTypeParam = 'r' // .RecvTypeParams.At(i) (Signature) opConstraint = 'C' // .Constraint() (TypeParam) + opRhs = 'a' // .Rhs() (Alias) // type->object operators opAt = 'A' // .At(i) (Tuple) @@ -279,21 +280,26 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { path = append(path, opType) T := o.Type() + if alias, ok := T.(*aliases.Alias); ok { + if r := findTypeParam(obj, aliases.TypeParams(alias), path, opTypeParam, nil); r != nil { + return Path(r), nil + } + if r := find(obj, aliases.Rhs(alias), append(path, opRhs), nil); r != nil { + return Path(r), nil + } - if tname.IsAlias() { - // type alias + } else if tname.IsAlias() { + // legacy alias if r := find(obj, T, path, nil); r != nil { return Path(r), nil } - } else { - if named, _ := T.(*types.Named); named != nil { - if r := findTypeParam(obj, named.TypeParams(), path, opTypeParam, nil); r != nil { - // generic named type - return Path(r), nil - } - } + + } else if named, ok := T.(*types.Named); ok { // defined (named) type - if r := find(obj, T.Underlying(), append(path, opUnderlying), nil); r != nil { + if r := findTypeParam(obj, named.TypeParams(), path, opTypeParam, nil); r != nil { + return Path(r), nil + } + if r := find(obj, named.Underlying(), append(path, opUnderlying), nil); r != nil { return Path(r), nil } } @@ -657,6 +663,16 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { } t = named.Underlying() + case opRhs: + if alias, ok := t.(*aliases.Alias); ok { + t = aliases.Rhs(alias) + } else if false && aliases.Enabled() { + // The Enabled check is too expensive, so for now we + // simply assume that aliases are not enabled. + // TODO(adonovan): replace with "if true {" when go1.24 is assured. + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want alias)", code, t, t) + } + case opTypeParam: hasTypeParams, ok := t.(hasTypeParams) // Named, Signature if !ok { diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases.go b/vendor/golang.org/x/tools/internal/aliases/aliases.go index c24c2eee4..f7798e335 100644 --- a/vendor/golang.org/x/tools/internal/aliases/aliases.go +++ b/vendor/golang.org/x/tools/internal/aliases/aliases.go @@ -22,11 +22,17 @@ import ( // GODEBUG=gotypesalias=... by invoking the type checker. The Enabled // function is expensive and should be called once per task (e.g. // package import), not once per call to NewAlias. -func NewAlias(enabled bool, pos token.Pos, pkg *types.Package, name string, rhs types.Type) *types.TypeName { +// +// Precondition: enabled || len(tparams)==0. +// If materialized aliases are disabled, there must not be any type parameters. +func NewAlias(enabled bool, pos token.Pos, pkg *types.Package, name string, rhs types.Type, tparams []*types.TypeParam) *types.TypeName { if enabled { tname := types.NewTypeName(pos, pkg, name, nil) - newAlias(tname, rhs) + newAlias(tname, rhs, tparams) return tname } + if len(tparams) > 0 { + panic("cannot create an alias with type parameters when gotypesalias is not enabled") + } return types.NewTypeName(pos, pkg, name, rhs) } diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go index c027b9f31..a775fcc4b 100644 --- a/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go +++ b/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go @@ -15,15 +15,21 @@ import ( // It will never be created by go/types. type Alias struct{} -func (*Alias) String() string { panic("unreachable") } -func (*Alias) Underlying() types.Type { panic("unreachable") } -func (*Alias) Obj() *types.TypeName { panic("unreachable") } -func Rhs(alias *Alias) types.Type { panic("unreachable") } +func (*Alias) String() string { panic("unreachable") } +func (*Alias) Underlying() types.Type { panic("unreachable") } +func (*Alias) Obj() *types.TypeName { panic("unreachable") } +func Rhs(alias *Alias) types.Type { panic("unreachable") } +func TypeParams(alias *Alias) *types.TypeParamList { panic("unreachable") } +func SetTypeParams(alias *Alias, tparams []*types.TypeParam) { panic("unreachable") } +func TypeArgs(alias *Alias) *types.TypeList { panic("unreachable") } +func Origin(alias *Alias) *Alias { panic("unreachable") } // Unalias returns the type t for go <=1.21. func Unalias(t types.Type) types.Type { return t } -func newAlias(name *types.TypeName, rhs types.Type) *Alias { panic("unreachable") } +func newAlias(name *types.TypeName, rhs types.Type, tparams []*types.TypeParam) *Alias { + panic("unreachable") +} // Enabled reports whether [NewAlias] should create [types.Alias] types. // diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go index b32995484..31c159e42 100644 --- a/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go +++ b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go @@ -28,16 +28,51 @@ func Rhs(alias *Alias) types.Type { return Unalias(alias) } +// TypeParams returns the type parameter list of the alias. +func TypeParams(alias *Alias) *types.TypeParamList { + if alias, ok := any(alias).(interface{ TypeParams() *types.TypeParamList }); ok { + return alias.TypeParams() // go1.23+ + } + return nil +} + +// SetTypeParams sets the type parameters of the alias type. +func SetTypeParams(alias *Alias, tparams []*types.TypeParam) { + if alias, ok := any(alias).(interface { + SetTypeParams(tparams []*types.TypeParam) + }); ok { + alias.SetTypeParams(tparams) // go1.23+ + } else if len(tparams) > 0 { + panic("cannot set type parameters of an Alias type in go1.22") + } +} + +// TypeArgs returns the type arguments used to instantiate the Alias type. +func TypeArgs(alias *Alias) *types.TypeList { + if alias, ok := any(alias).(interface{ TypeArgs() *types.TypeList }); ok { + return alias.TypeArgs() // go1.23+ + } + return nil // empty (go1.22) +} + +// Origin returns the generic Alias type of which alias is an instance. +// If alias is not an instance of a generic alias, Origin returns alias. +func Origin(alias *Alias) *Alias { + if alias, ok := any(alias).(interface{ Origin() *types.Alias }); ok { + return alias.Origin() // go1.23+ + } + return alias // not an instance of a generic alias (go1.22) +} + // Unalias is a wrapper of types.Unalias. func Unalias(t types.Type) types.Type { return types.Unalias(t) } // newAlias is an internal alias around types.NewAlias. // Direct usage is discouraged as the moment. // Try to use NewAlias instead. -func newAlias(tname *types.TypeName, rhs types.Type) *Alias { +func newAlias(tname *types.TypeName, rhs types.Type, tparams []*types.TypeParam) *Alias { a := types.NewAlias(tname, rhs) - // TODO(go.dev/issue/65455): Remove kludgy workaround to set a.actual as a side-effect. - Unalias(a) + SetTypeParams(a, tparams) return a } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go index deeb67f31..5f283281a 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -2,9 +2,227 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Indexed binary package export. -// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go; -// see that file for specification of the format. +// Indexed package export. +// +// The indexed export data format is an evolution of the previous +// binary export data format. Its chief contribution is introducing an +// index table, which allows efficient random access of individual +// declarations and inline function bodies. In turn, this allows +// avoiding unnecessary work for compilation units that import large +// packages. +// +// +// The top-level data format is structured as: +// +// Header struct { +// Tag byte // 'i' +// Version uvarint +// StringSize uvarint +// DataSize uvarint +// } +// +// Strings [StringSize]byte +// Data [DataSize]byte +// +// MainIndex []struct{ +// PkgPath stringOff +// PkgName stringOff +// PkgHeight uvarint +// +// Decls []struct{ +// Name stringOff +// Offset declOff +// } +// } +// +// Fingerprint [8]byte +// +// uvarint means a uint64 written out using uvarint encoding. +// +// []T means a uvarint followed by that many T objects. In other +// words: +// +// Len uvarint +// Elems [Len]T +// +// stringOff means a uvarint that indicates an offset within the +// Strings section. At that offset is another uvarint, followed by +// that many bytes, which form the string value. +// +// declOff means a uvarint that indicates an offset within the Data +// section where the associated declaration can be found. +// +// +// There are five kinds of declarations, distinguished by their first +// byte: +// +// type Var struct { +// Tag byte // 'V' +// Pos Pos +// Type typeOff +// } +// +// type Func struct { +// Tag byte // 'F' or 'G' +// Pos Pos +// TypeParams []typeOff // only present if Tag == 'G' +// Signature Signature +// } +// +// type Const struct { +// Tag byte // 'C' +// Pos Pos +// Value Value +// } +// +// type Type struct { +// Tag byte // 'T' or 'U' +// Pos Pos +// TypeParams []typeOff // only present if Tag == 'U' +// Underlying typeOff +// +// Methods []struct{ // omitted if Underlying is an interface type +// Pos Pos +// Name stringOff +// Recv Param +// Signature Signature +// } +// } +// +// type Alias struct { +// Tag byte // 'A' or 'B' +// Pos Pos +// TypeParams []typeOff // only present if Tag == 'B' +// Type typeOff +// } +// +// // "Automatic" declaration of each typeparam +// type TypeParam struct { +// Tag byte // 'P' +// Pos Pos +// Implicit bool +// Constraint typeOff +// } +// +// typeOff means a uvarint that either indicates a predeclared type, +// or an offset into the Data section. If the uvarint is less than +// predeclReserved, then it indicates the index into the predeclared +// types list (see predeclared in bexport.go for order). Otherwise, +// subtracting predeclReserved yields the offset of a type descriptor. +// +// Value means a type, kind, and type-specific value. See +// (*exportWriter).value for details. +// +// +// There are twelve kinds of type descriptors, distinguished by an itag: +// +// type DefinedType struct { +// Tag itag // definedType +// Name stringOff +// PkgPath stringOff +// } +// +// type PointerType struct { +// Tag itag // pointerType +// Elem typeOff +// } +// +// type SliceType struct { +// Tag itag // sliceType +// Elem typeOff +// } +// +// type ArrayType struct { +// Tag itag // arrayType +// Len uint64 +// Elem typeOff +// } +// +// type ChanType struct { +// Tag itag // chanType +// Dir uint64 // 1 RecvOnly; 2 SendOnly; 3 SendRecv +// Elem typeOff +// } +// +// type MapType struct { +// Tag itag // mapType +// Key typeOff +// Elem typeOff +// } +// +// type FuncType struct { +// Tag itag // signatureType +// PkgPath stringOff +// Signature Signature +// } +// +// type StructType struct { +// Tag itag // structType +// PkgPath stringOff +// Fields []struct { +// Pos Pos +// Name stringOff +// Type typeOff +// Embedded bool +// Note stringOff +// } +// } +// +// type InterfaceType struct { +// Tag itag // interfaceType +// PkgPath stringOff +// Embeddeds []struct { +// Pos Pos +// Type typeOff +// } +// Methods []struct { +// Pos Pos +// Name stringOff +// Signature Signature +// } +// } +// +// // Reference to a type param declaration +// type TypeParamType struct { +// Tag itag // typeParamType +// Name stringOff +// PkgPath stringOff +// } +// +// // Instantiation of a generic type (like List[T2] or List[int]) +// type InstanceType struct { +// Tag itag // instanceType +// Pos pos +// TypeArgs []typeOff +// BaseType typeOff +// } +// +// type UnionType struct { +// Tag itag // interfaceType +// Terms []struct { +// tilde bool +// Type typeOff +// } +// } +// +// +// +// type Signature struct { +// Params []Param +// Results []Param +// Variadic bool // omitted if Results is empty +// } +// +// type Param struct { +// Pos Pos +// Name stringOff +// Type typOff +// } +// +// +// Pos encodes a file:line:column triple, incorporating a simple delta +// encoding scheme within a data object. See exportWriter.pos for +// details. package gcimporter @@ -523,9 +741,22 @@ func (p *iexporter) doDecl(obj types.Object) { } if obj.IsAlias() { - w.tag(aliasTag) + alias, materialized := t.(*aliases.Alias) // may fail when aliases are not enabled + + var tparams *types.TypeParamList + if materialized { + tparams = aliases.TypeParams(alias) + } + if tparams.Len() == 0 { + w.tag(aliasTag) + } else { + w.tag(genericAliasTag) + } w.pos(obj.Pos()) - if alias, ok := t.(*aliases.Alias); ok { + if tparams.Len() > 0 { + w.tparamList(obj.Name(), tparams, obj.Pkg()) + } + if materialized { // Preserve materialized aliases, // even of non-exported types. t = aliases.Rhs(alias) @@ -745,7 +976,13 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { } switch t := t.(type) { case *aliases.Alias: - // TODO(adonovan): support parameterized aliases, following *types.Named. + if targs := aliases.TypeArgs(t); targs.Len() > 0 { + w.startType(instanceType) + w.pos(t.Obj().Pos()) + w.typeList(targs, pkg) + w.typ(aliases.Origin(t), pkg) + return + } w.startType(aliasType) w.qualifiedType(t.Obj()) diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index 136aa0365..ed2d56295 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // Indexed package import. -// See cmd/compile/internal/gc/iexport.go for the export data format. +// See iexport.go for the export data format. // This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go. @@ -562,14 +562,14 @@ func (r *importReader) obj(name string) { pos := r.pos() switch tag { - case aliasTag: + case aliasTag, genericAliasTag: + var tparams []*types.TypeParam + if tag == genericAliasTag { + tparams = r.tparamList() + } typ := r.typ() - // TODO(adonovan): support generic aliases: - // if tag == genericAliasTag { - // tparams := r.tparamList() - // alias.SetTypeParams(tparams) - // } - r.declare(aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ)) + obj := aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ, tparams) + r.declare(obj) case constTag: typ, val := r.value() @@ -862,7 +862,7 @@ func (r *importReader) string() string { return r.p.stringAt(r.uint64()) } func (r *importReader) doType(base *types.Named) (res types.Type) { k := r.kind() if debug { - r.p.trace("importing type %d (base: %s)", k, base) + r.p.trace("importing type %d (base: %v)", k, base) r.p.indent++ defer func() { r.p.indent-- diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go index 2c0770688..f0742f540 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go @@ -52,8 +52,7 @@ func (pr *pkgReader) later(fn func()) { // See cmd/compile/internal/noder.derivedInfo. type derivedInfo struct { - idx pkgbits.Index - needed bool + idx pkgbits.Index } // See cmd/compile/internal/noder.typeInfo. @@ -110,13 +109,17 @@ func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[st r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic) pkg := r.pkg() - r.Bool() // has init + if r.Version().Has(pkgbits.HasInit) { + r.Bool() + } for i, n := 0, r.Len(); i < n; i++ { // As if r.obj(), but avoiding the Scope.Lookup call, // to avoid eager loading of imports. r.Sync(pkgbits.SyncObject) - assert(!r.Bool()) + if r.Version().Has(pkgbits.DerivedFuncInstance) { + assert(!r.Bool()) + } r.p.objIdx(r.Reloc(pkgbits.RelocObj)) assert(r.Len() == 0) } @@ -165,7 +168,7 @@ type readerDict struct { // tparams is a slice of the constructed TypeParams for the element. tparams []*types.TypeParam - // devived is a slice of types derived from tparams, which may be + // derived is a slice of types derived from tparams, which may be // instantiated while reading the current element. derived []derivedInfo derivedTypes []types.Type // lazily instantiated from derived @@ -471,7 +474,9 @@ func (r *reader) param() *types.Var { func (r *reader) obj() (types.Object, []types.Type) { r.Sync(pkgbits.SyncObject) - assert(!r.Bool()) + if r.Version().Has(pkgbits.DerivedFuncInstance) { + assert(!r.Bool()) + } pkg, name := r.p.objIdx(r.Reloc(pkgbits.RelocObj)) obj := pkgScope(pkg).Lookup(name) @@ -525,8 +530,12 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { case pkgbits.ObjAlias: pos := r.pos() + var tparams []*types.TypeParam + if r.Version().Has(pkgbits.AliasTypeParamNames) { + tparams = r.typeParamNames() + } typ := r.typ() - declare(aliases.NewAlias(r.p.aliases, pos, objPkg, objName, typ)) + declare(aliases.NewAlias(r.p.aliases, pos, objPkg, objName, typ, tparams)) case pkgbits.ObjConst: pos := r.pos() @@ -632,7 +641,10 @@ func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict { dict.derived = make([]derivedInfo, r.Len()) dict.derivedTypes = make([]types.Type, len(dict.derived)) for i := range dict.derived { - dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()} + dict.derived[i] = derivedInfo{idx: r.Reloc(pkgbits.RelocType)} + if r.Version().Has(pkgbits.DerivedInfoNeeded) { + assert(!r.Bool()) + } } pr.retireReader(r) diff --git a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go index 2acd85851..f6cb37c5c 100644 --- a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go +++ b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go @@ -21,10 +21,7 @@ import ( // export data. type PkgDecoder struct { // version is the file format version. - version uint32 - - // aliases determines whether types.Aliases should be created - aliases bool + version Version // sync indicates whether the file uses sync markers. sync bool @@ -71,12 +68,9 @@ func (pr *PkgDecoder) SyncMarkers() bool { return pr.sync } // NewPkgDecoder returns a PkgDecoder initialized to read the Unified // IR export data from input. pkgPath is the package path for the // compilation unit that produced the export data. -// -// TODO(mdempsky): Remove pkgPath parameter; unneeded since CL 391014. func NewPkgDecoder(pkgPath, input string) PkgDecoder { pr := PkgDecoder{ pkgPath: pkgPath, - //aliases: aliases.Enabled(), } // TODO(mdempsky): Implement direct indexing of input string to @@ -84,14 +78,15 @@ func NewPkgDecoder(pkgPath, input string) PkgDecoder { r := strings.NewReader(input) - assert(binary.Read(r, binary.LittleEndian, &pr.version) == nil) + var ver uint32 + assert(binary.Read(r, binary.LittleEndian, &ver) == nil) + pr.version = Version(ver) - switch pr.version { - default: - panic(fmt.Errorf("unsupported version: %v", pr.version)) - case 0: - // no flags - case 1: + if pr.version >= numVersions { + panic(fmt.Errorf("cannot decode %q, export data version %d is greater than maximum supported version %d", pkgPath, pr.version, numVersions-1)) + } + + if pr.version.Has(Flags) { var flags uint32 assert(binary.Read(r, binary.LittleEndian, &flags) == nil) pr.sync = flags&flagSyncMarkers != 0 @@ -106,7 +101,9 @@ func NewPkgDecoder(pkgPath, input string) PkgDecoder { assert(err == nil) pr.elemData = input[pos:] - assert(len(pr.elemData)-8 == int(pr.elemEnds[len(pr.elemEnds)-1])) + + const fingerprintSize = 8 + assert(len(pr.elemData)-fingerprintSize == int(pr.elemEnds[len(pr.elemEnds)-1])) return pr } @@ -140,7 +137,7 @@ func (pr *PkgDecoder) AbsIdx(k RelocKind, idx Index) int { absIdx += int(pr.elemEndsEnds[k-1]) } if absIdx >= int(pr.elemEndsEnds[k]) { - errorf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds) + panicf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds) } return absIdx } @@ -197,9 +194,7 @@ func (pr *PkgDecoder) NewDecoderRaw(k RelocKind, idx Index) Decoder { Idx: idx, } - // TODO(mdempsky) r.data.Reset(...) after #44505 is resolved. - r.Data = *strings.NewReader(pr.DataIdx(k, idx)) - + r.Data.Reset(pr.DataIdx(k, idx)) r.Sync(SyncRelocs) r.Relocs = make([]RelocEnt, r.Len()) for i := range r.Relocs { @@ -248,7 +243,7 @@ type Decoder struct { func (r *Decoder) checkErr(err error) { if err != nil { - errorf("unexpected decoding error: %w", err) + panicf("unexpected decoding error: %w", err) } } @@ -519,3 +514,6 @@ func (pr *PkgDecoder) PeekObj(idx Index) (string, string, CodeObj) { return path, name, tag } + +// Version reports the version of the bitstream. +func (w *Decoder) Version() Version { return w.common.version } diff --git a/vendor/golang.org/x/tools/internal/pkgbits/encoder.go b/vendor/golang.org/x/tools/internal/pkgbits/encoder.go index 6482617a4..c17a12399 100644 --- a/vendor/golang.org/x/tools/internal/pkgbits/encoder.go +++ b/vendor/golang.org/x/tools/internal/pkgbits/encoder.go @@ -12,18 +12,15 @@ import ( "io" "math/big" "runtime" + "strings" ) -// currentVersion is the current version number. -// -// - v0: initial prototype -// -// - v1: adds the flags uint32 word -const currentVersion uint32 = 1 - // A PkgEncoder provides methods for encoding a package's Unified IR // export data. type PkgEncoder struct { + // version of the bitstream. + version Version + // elems holds the bitstream for previously encoded elements. elems [numRelocs][]string @@ -47,8 +44,9 @@ func (pw *PkgEncoder) SyncMarkers() bool { return pw.syncFrames >= 0 } // export data files, but can help diagnosing desync errors in // higher-level Unified IR reader/writer code. If syncFrames is // negative, then sync markers are omitted entirely. -func NewPkgEncoder(syncFrames int) PkgEncoder { +func NewPkgEncoder(version Version, syncFrames int) PkgEncoder { return PkgEncoder{ + version: version, stringsIdx: make(map[string]Index), syncFrames: syncFrames, } @@ -64,13 +62,15 @@ func (pw *PkgEncoder) DumpTo(out0 io.Writer) (fingerprint [8]byte) { assert(binary.Write(out, binary.LittleEndian, x) == nil) } - writeUint32(currentVersion) + writeUint32(uint32(pw.version)) - var flags uint32 - if pw.SyncMarkers() { - flags |= flagSyncMarkers + if pw.version.Has(Flags) { + var flags uint32 + if pw.SyncMarkers() { + flags |= flagSyncMarkers + } + writeUint32(flags) } - writeUint32(flags) // Write elemEndsEnds. var sum uint32 @@ -159,7 +159,7 @@ type Encoder struct { // Flush finalizes the element's bitstream and returns its Index. func (w *Encoder) Flush() Index { - var sb bytes.Buffer // TODO(mdempsky): strings.Builder after #44505 is resolved + var sb strings.Builder // Backup the data so we write the relocations at the front. var tmp bytes.Buffer @@ -189,7 +189,7 @@ func (w *Encoder) Flush() Index { func (w *Encoder) checkErr(err error) { if err != nil { - errorf("unexpected encoding error: %v", err) + panicf("unexpected encoding error: %v", err) } } @@ -320,8 +320,14 @@ func (w *Encoder) Code(c Code) { // section (if not already present), and then writing a relocation // into the element bitstream. func (w *Encoder) String(s string) { + w.StringRef(w.p.StringIdx(s)) +} + +// StringRef writes a reference to the given index, which must be a +// previously encoded string value. +func (w *Encoder) StringRef(idx Index) { w.Sync(SyncString) - w.Reloc(RelocString, w.p.StringIdx(s)) + w.Reloc(RelocString, idx) } // Strings encodes and writes a variable-length slice of strings into @@ -348,7 +354,7 @@ func (w *Encoder) Value(val constant.Value) { func (w *Encoder) scalar(val constant.Value) { switch v := constant.Val(val).(type) { default: - errorf("unhandled %v (%v)", val, val.Kind()) + panicf("unhandled %v (%v)", val, val.Kind()) case bool: w.Code(ValBool) w.Bool(v) @@ -381,3 +387,6 @@ func (w *Encoder) bigFloat(v *big.Float) { b := v.Append(nil, 'p', -1) w.String(string(b)) // TODO: More efficient encoding. } + +// Version reports the version of the bitstream. +func (w *Encoder) Version() Version { return w.p.version } diff --git a/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go b/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go deleted file mode 100644 index 5294f6a63..000000000 --- a/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.7 -// +build !go1.7 - -// TODO(mdempsky): Remove after #44505 is resolved - -package pkgbits - -import "runtime" - -func walkFrames(pcs []uintptr, visit frameVisitor) { - for _, pc := range pcs { - fn := runtime.FuncForPC(pc) - file, line := fn.FileLine(pc) - - visit(file, line, fn.Name(), pc-fn.Entry()) - } -} diff --git a/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go b/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go deleted file mode 100644 index 2324ae7ad..000000000 --- a/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.7 -// +build go1.7 - -package pkgbits - -import "runtime" - -// walkFrames calls visit for each call frame represented by pcs. -// -// pcs should be a slice of PCs, as returned by runtime.Callers. -func walkFrames(pcs []uintptr, visit frameVisitor) { - if len(pcs) == 0 { - return - } - - frames := runtime.CallersFrames(pcs) - for { - frame, more := frames.Next() - visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry) - if !more { - return - } - } -} diff --git a/vendor/golang.org/x/tools/internal/pkgbits/support.go b/vendor/golang.org/x/tools/internal/pkgbits/support.go index ad26d3b28..50534a295 100644 --- a/vendor/golang.org/x/tools/internal/pkgbits/support.go +++ b/vendor/golang.org/x/tools/internal/pkgbits/support.go @@ -12,6 +12,6 @@ func assert(b bool) { } } -func errorf(format string, args ...interface{}) { +func panicf(format string, args ...any) { panic(fmt.Errorf(format, args...)) } diff --git a/vendor/golang.org/x/tools/internal/pkgbits/sync.go b/vendor/golang.org/x/tools/internal/pkgbits/sync.go index 5bd51ef71..1520b73af 100644 --- a/vendor/golang.org/x/tools/internal/pkgbits/sync.go +++ b/vendor/golang.org/x/tools/internal/pkgbits/sync.go @@ -6,6 +6,7 @@ package pkgbits import ( "fmt" + "runtime" "strings" ) @@ -23,6 +24,24 @@ func fmtFrames(pcs ...uintptr) []string { type frameVisitor func(file string, line int, name string, offset uintptr) +// walkFrames calls visit for each call frame represented by pcs. +// +// pcs should be a slice of PCs, as returned by runtime.Callers. +func walkFrames(pcs []uintptr, visit frameVisitor) { + if len(pcs) == 0 { + return + } + + frames := runtime.CallersFrames(pcs) + for { + frame, more := frames.Next() + visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry) + if !more { + return + } + } +} + // SyncMarker is an enum type that represents markers that may be // written to export data to ensure the reader and writer stay // synchronized. @@ -110,4 +129,8 @@ const ( SyncStmtsEnd SyncLabel SyncOptLabel + + SyncMultiExpr + SyncRType + SyncConvRTTI ) diff --git a/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go b/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go index 4a5b0ca5f..582ad56d3 100644 --- a/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go +++ b/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go @@ -74,11 +74,14 @@ func _() { _ = x[SyncStmtsEnd-64] _ = x[SyncLabel-65] _ = x[SyncOptLabel-66] + _ = x[SyncMultiExpr-67] + _ = x[SyncRType-68] + _ = x[SyncConvRTTI-69] } -const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabel" +const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabelMultiExprRTypeConvRTTI" -var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458} +var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458, 467, 472, 480} func (i SyncMarker) String() string { i -= 1 diff --git a/vendor/golang.org/x/tools/internal/pkgbits/version.go b/vendor/golang.org/x/tools/internal/pkgbits/version.go new file mode 100644 index 000000000..53af9df22 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/pkgbits/version.go @@ -0,0 +1,85 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +// Version indicates a version of a unified IR bitstream. +// Each Version indicates the addition, removal, or change of +// new data in the bitstream. +// +// These are serialized to disk and the interpretation remains fixed. +type Version uint32 + +const ( + // V0: initial prototype. + // + // All data that is not assigned a Field is in version V0 + // and has not been deprecated. + V0 Version = iota + + // V1: adds the Flags uint32 word + V1 + + // V2: removes unused legacy fields and supports type parameters for aliases. + // - remove the legacy "has init" bool from the public root + // - remove obj's "derived func instance" bool + // - add a TypeParamNames field to ObjAlias + // - remove derived info "needed" bool + V2 + + numVersions = iota +) + +// Field denotes a unit of data in the serialized unified IR bitstream. +// It is conceptually a like field in a structure. +// +// We only really need Fields when the data may or may not be present +// in a stream based on the Version of the bitstream. +// +// Unlike much of pkgbits, Fields are not serialized and +// can change values as needed. +type Field int + +const ( + // Flags in a uint32 in the header of a bitstream + // that is used to indicate whether optional features are enabled. + Flags Field = iota + + // Deprecated: HasInit was a bool indicating whether a package + // has any init functions. + HasInit + + // Deprecated: DerivedFuncInstance was a bool indicating + // whether an object was a function instance. + DerivedFuncInstance + + // ObjAlias has a list of TypeParamNames. + AliasTypeParamNames + + // Deprecated: DerivedInfoNeeded was a bool indicating + // whether a type was a derived type. + DerivedInfoNeeded + + numFields = iota +) + +// introduced is the version a field was added. +var introduced = [numFields]Version{ + Flags: V1, + AliasTypeParamNames: V2, +} + +// removed is the version a field was removed in or 0 for fields +// that have not yet been deprecated. +// (So removed[f]-1 is the last version it is included in.) +var removed = [numFields]Version{ + HasInit: V2, + DerivedFuncInstance: V2, + DerivedInfoNeeded: V2, +} + +// Has reports whether field f is present in a bitstream at version v. +func (v Version) Has(f Field) bool { + return introduced[f] <= v && (v < removed[f] || removed[f] == V0) +} diff --git a/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/vendor/golang.org/x/tools/internal/stdlib/manifest.go index a928acf29..cdaac9ab3 100644 --- a/vendor/golang.org/x/tools/internal/stdlib/manifest.go +++ b/vendor/golang.org/x/tools/internal/stdlib/manifest.go @@ -951,7 +951,7 @@ var PackageSymbols = map[string][]Symbol{ {"ParseSessionState", Func, 21}, {"QUICClient", Func, 21}, {"QUICConfig", Type, 21}, - {"QUICConfig.EnableStoreSessionEvent", Field, 23}, + {"QUICConfig.EnableSessionEvents", Field, 23}, {"QUICConfig.TLSConfig", Field, 21}, {"QUICConn", Type, 21}, {"QUICEncryptionLevel", Type, 21}, diff --git a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go index 834e05381..131caab28 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go @@ -838,7 +838,7 @@ const ( // InvalidCap occurs when an argument to the cap built-in function is not of // supported type. // - // See https://golang.org/ref/spec#Lengthand_capacity for information on + // See https://golang.org/ref/spec#Length_and_capacity for information on // which underlying types are supported as arguments to cap and len. // // Example: @@ -859,7 +859,7 @@ const ( // InvalidCopy occurs when the arguments are not of slice type or do not // have compatible type. // - // See https://golang.org/ref/spec#Appendingand_copying_slices for more + // See https://golang.org/ref/spec#Appending_and_copying_slices for more // information on the type requirements for the copy built-in. // // Example: @@ -897,7 +897,7 @@ const ( // InvalidLen occurs when an argument to the len built-in function is not of // supported type. // - // See https://golang.org/ref/spec#Lengthand_capacity for information on + // See https://golang.org/ref/spec#Length_and_capacity for information on // which underlying types are supported as arguments to cap and len. // // Example: @@ -914,7 +914,7 @@ const ( // InvalidMake occurs when make is called with an unsupported type argument. // - // See https://golang.org/ref/spec#Makingslices_maps_and_channels for + // See https://golang.org/ref/spec#Making_slices_maps_and_channels for // information on the types that may be created using make. // // Example: diff --git a/vendor/golang.org/x/tools/internal/versions/constraint.go b/vendor/golang.org/x/tools/internal/versions/constraint.go new file mode 100644 index 000000000..179063d48 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/constraint.go @@ -0,0 +1,13 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package versions + +import "go/build/constraint" + +// ConstraintGoVersion is constraint.GoVersion (if built with go1.21+). +// Otherwise nil. +// +// Deprecate once x/tools is after go1.21. +var ConstraintGoVersion func(x constraint.Expr) string diff --git a/vendor/golang.org/x/tools/internal/versions/constraint_go121.go b/vendor/golang.org/x/tools/internal/versions/constraint_go121.go new file mode 100644 index 000000000..38011407d --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/constraint_go121.go @@ -0,0 +1,14 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 +// +build go1.21 + +package versions + +import "go/build/constraint" + +func init() { + ConstraintGoVersion = constraint.GoVersion +} diff --git a/vendor/modules.txt b/vendor/modules.txt index b7ca6da39..85bb6aed1 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,4 +1,4 @@ -# dario.cat/mergo v1.0.0 +# dario.cat/mergo v1.0.1 ## explicit; go 1.13 dario.cat/mergo # github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 @@ -113,20 +113,16 @@ github.com/apparentlymart/go-textseg/v15/textseg # github.com/atotto/clipboard v0.1.4 ## explicit github.com/atotto/clipboard -# github.com/aws/amazon-ec2-instance-selector/v2 v2.4.2-0.20231006140257-d989c5d76f0e -## explicit; go 1.20 -github.com/aws/amazon-ec2-instance-selector/v2/pkg/awsapi -github.com/aws/amazon-ec2-instance-selector/v2/pkg/bytequantity -github.com/aws/amazon-ec2-instance-selector/v2/pkg/ec2pricing -github.com/aws/amazon-ec2-instance-selector/v2/pkg/instancetypes -github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector -github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector/outputs -github.com/aws/amazon-ec2-instance-selector/v2/pkg/sorter -# github.com/aws/aws-sdk-go v1.50.36 -## explicit; go 1.19 -github.com/aws/aws-sdk-go/aws/awserr -github.com/aws/aws-sdk-go/aws/endpoints -# github.com/aws/aws-sdk-go-v2 v1.36.2 +# github.com/aws/amazon-ec2-instance-selector/v3 v3.1.1 +## explicit; go 1.23 +github.com/aws/amazon-ec2-instance-selector/v3/pkg/awsapi +github.com/aws/amazon-ec2-instance-selector/v3/pkg/bytequantity +github.com/aws/amazon-ec2-instance-selector/v3/pkg/ec2pricing +github.com/aws/amazon-ec2-instance-selector/v3/pkg/instancetypes +github.com/aws/amazon-ec2-instance-selector/v3/pkg/selector +github.com/aws/amazon-ec2-instance-selector/v3/pkg/selector/outputs +github.com/aws/amazon-ec2-instance-selector/v3/pkg/sorter +# github.com/aws/aws-sdk-go-v2 v1.36.3 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/aws github.com/aws/aws-sdk-go-v2/aws/arn @@ -158,10 +154,10 @@ github.com/aws/aws-sdk-go-v2/internal/timeconv ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi -# github.com/aws/aws-sdk-go-v2/config v1.29.7 +# github.com/aws/aws-sdk-go-v2/config v1.29.8 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/config -# github.com/aws/aws-sdk-go-v2/credentials v1.17.60 +# github.com/aws/aws-sdk-go-v2/credentials v1.17.61 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/credentials github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds @@ -170,14 +166,14 @@ github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client github.com/aws/aws-sdk-go-v2/credentials/processcreds github.com/aws/aws-sdk-go-v2/credentials/ssocreds github.com/aws/aws-sdk-go-v2/credentials/stscreds -# github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.29 +# github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/feature/ec2/imds github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config -# github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.33 +# github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/internal/configsources -# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.33 +# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 # github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 @@ -209,7 +205,7 @@ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding # github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/service/internal/checksum -# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.14 +# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/service/internal/presigned-url # github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5 @@ -229,23 +225,23 @@ github.com/aws/aws-sdk-go-v2/service/s3/internal/arn github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints github.com/aws/aws-sdk-go-v2/service/s3/types -# github.com/aws/aws-sdk-go-v2/service/sso v1.24.16 +# github.com/aws/aws-sdk-go-v2/service/sso v1.25.0 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/service/sso github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints github.com/aws/aws-sdk-go-v2/service/sso/types -# github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.15 +# github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.0 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/service/ssooidc github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints github.com/aws/aws-sdk-go-v2/service/ssooidc/types -# github.com/aws/aws-sdk-go-v2/service/sts v1.33.15 +# github.com/aws/aws-sdk-go-v2/service/sts v1.33.16 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/service/sts github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints github.com/aws/aws-sdk-go-v2/service/sts/types -# github.com/aws/smithy-go v1.22.2 -## explicit; go 1.21 +# github.com/aws/smithy-go v1.22.3 +## explicit; go 1.22 github.com/aws/smithy-go github.com/aws/smithy-go/auth github.com/aws/smithy-go/auth/bearer @@ -296,17 +292,24 @@ github.com/charmbracelet/bubbles/runeutil github.com/charmbracelet/bubbles/spinner github.com/charmbracelet/bubbles/textinput github.com/charmbracelet/bubbles/viewport -# github.com/charmbracelet/bubbletea v1.1.0 +# github.com/charmbracelet/bubbletea v1.3.3 ## explicit; go 1.18 github.com/charmbracelet/bubbletea -# github.com/charmbracelet/lipgloss v0.13.0 +# github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc +## explicit; go 1.18 +github.com/charmbracelet/colorprofile +# github.com/charmbracelet/lipgloss v1.1.0 ## explicit; go 1.18 github.com/charmbracelet/lipgloss -# github.com/charmbracelet/x/ansi v0.2.3 +# github.com/charmbracelet/x/ansi v0.8.0 ## explicit; go 1.18 github.com/charmbracelet/x/ansi +github.com/charmbracelet/x/ansi/kitty github.com/charmbracelet/x/ansi/parser -# github.com/charmbracelet/x/term v0.2.0 +# github.com/charmbracelet/x/cellbuf v0.0.13-0.20250311204145-2c3ea96c31dd +## explicit; go 1.18 +github.com/charmbracelet/x/cellbuf +# github.com/charmbracelet/x/term v0.2.1 ## explicit; go 1.18 github.com/charmbracelet/x/term # github.com/cheggaaa/pb v1.0.29 @@ -359,7 +362,7 @@ github.com/go-git/gcfg github.com/go-git/gcfg/scanner github.com/go-git/gcfg/token github.com/go-git/gcfg/types -# github.com/go-git/go-billy/v5 v5.6.1 +# github.com/go-git/go-billy/v5 v5.6.2 ## explicit; go 1.21 github.com/go-git/go-billy/v5 github.com/go-git/go-billy/v5/helper/chroot @@ -457,9 +460,6 @@ github.com/hashicorp/hcl/json/token # github.com/hashicorp/hcl/v2 v2.23.0 ## explicit; go 1.18 github.com/hashicorp/hcl/v2 -# github.com/imdario/mergo v0.3.16 -## explicit; go 1.13 -github.com/imdario/mergo # github.com/inconshreveable/mousetrap v1.1.0 ## explicit; go 1.18 github.com/inconshreveable/mousetrap @@ -518,7 +518,7 @@ github.com/muesli/cancelreader github.com/muesli/reflow/ansi github.com/muesli/reflow/truncate github.com/muesli/reflow/wordwrap -# github.com/muesli/termenv v0.15.2 +# github.com/muesli/termenv v0.16.0 ## explicit; go 1.17 github.com/muesli/termenv # github.com/nxadm/tail v1.4.11 @@ -541,8 +541,8 @@ github.com/opentracing/opentracing-go/log # github.com/patrickmn/go-cache v2.1.0+incompatible ## explicit github.com/patrickmn/go-cache -# github.com/pelletier/go-toml/v2 v2.2.2 -## explicit; go 1.16 +# github.com/pelletier/go-toml/v2 v2.2.3 +## explicit; go 1.21.0 github.com/pelletier/go-toml/v2 github.com/pelletier/go-toml/v2/internal/characters github.com/pelletier/go-toml/v2/internal/danger @@ -753,10 +753,10 @@ github.com/spf13/afero/mem # github.com/spf13/cast v1.6.0 ## explicit; go 1.19 github.com/spf13/cast -# github.com/spf13/cobra v1.8.0 +# github.com/spf13/cobra v1.9.1 ## explicit; go 1.15 github.com/spf13/cobra -# github.com/spf13/pflag v1.0.5 +# github.com/spf13/pflag v1.0.6 ## explicit; go 1.12 github.com/spf13/pflag # github.com/spf13/viper v1.19.0 @@ -798,7 +798,10 @@ github.com/uber/jaeger-lib/metrics # github.com/xanzy/ssh-agent v0.3.3 ## explicit; go 1.16 github.com/xanzy/ssh-agent -# github.com/zclconf/go-cty v1.14.1 +# github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e +## explicit; go 1.19 +github.com/xo/terminfo +# github.com/zclconf/go-cty v1.15.0 ## explicit; go 1.18 github.com/zclconf/go-cty/cty github.com/zclconf/go-cty/cty/convert @@ -832,16 +835,16 @@ golang.org/x/crypto/ssh golang.org/x/crypto/ssh/agent golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/ssh/knownhosts -# golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 -## explicit; go 1.20 +# golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 +## explicit; go 1.22.0 golang.org/x/exp/constraints golang.org/x/exp/maps golang.org/x/exp/slices golang.org/x/exp/slog golang.org/x/exp/slog/internal golang.org/x/exp/slog/internal/buffer -# golang.org/x/mod v0.19.0 -## explicit; go 1.18 +# golang.org/x/mod v0.21.0 +## explicit; go 1.22.0 golang.org/x/mod/semver # golang.org/x/net v0.35.0 ## explicit; go 1.18 @@ -881,8 +884,8 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/tools v0.23.0 -## explicit; go 1.19 +# golang.org/x/tools v0.25.0 +## explicit; go 1.22.0 golang.org/x/tools/cmd/stringer golang.org/x/tools/go/gcexportdata golang.org/x/tools/go/packages From 9f131559c2ce2e33369e0a847dbdba6b433865b2 Mon Sep 17 00:00:00 2001 From: Adrian Riobo Date: Fri, 21 Mar 2025 09:49:43 +0100 Subject: [PATCH 02/11] chore: Use fedora 41 as builder for multistage build Due to go major updates we need to build with go 1.23 which is not available as go toolset on UBI9. This commit will use fedora 41 as it contains go 1.23 as builder on the multistage Containerfile Signed-off-by: Adrian Riobo --- oci/Containerfile | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/oci/Containerfile b/oci/Containerfile index 196bacc1a..0b0771358 100644 --- a/oci/Containerfile +++ b/oci/Containerfile @@ -1,5 +1,6 @@ -# go toolset 9.5-1733160835-1.22.7-1733160835 -FROM registry.access.redhat.com/ubi9/go-toolset:1.22.9-1739801907 as builder +# Moved to Fedora 41 and install go tools until ubi9 go tools support go 1.23 +FROM quay.io/fedora/fedora:41 as builder +RUN dnf install -y make go ARG TARGETARCH USER root WORKDIR /workspace From 4771bf46d1574c17c048d8b0063dcd411279c377 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 17 Mar 2025 11:44:00 +0000 Subject: [PATCH 03/11] chore(deps): update module golang.org/x/net to v0.36.0 [security] --- go.mod | 8 +- go.sum | 12 +- tools/go.mod | 26 +- tools/go.sum | 48 +-- .../OpenPeeDeeP/depguard/v2/README.md | 30 +- .../OpenPeeDeeP/depguard/v2/depguard.go | 6 +- .../OpenPeeDeeP/depguard/v2/settings.go | 14 +- .../github.com/golangci/dupl/.travis.yml | 5 - .../vendor/github.com/golangci/dupl/README.md | 63 --- .../golangci/dupl/{main.go => lib/lib.go} | 66 +--- .../github.com/golangci/dupl/printer/html.go | 14 +- .../golangci/dupl/printer/issuer.go | 56 +++ .../golangci/dupl/printer/plumbing.go | 44 +-- .../golangci/dupl/suffixtree/suffixtree.go | 2 +- .../github.com/golangci/dupl/syntax/syntax.go | 20 +- .../golangci-lint/pkg/commands/run.go | 4 + .../golangci-lint/pkg/config/config.go | 2 + .../golangci-lint/pkg/golinters/dupl/dupl.go | 4 +- .../github.com/securego/gosec/v2/action.yml | 2 +- .../golang.org/x/sync/errgroup/errgroup.go | 2 +- .../golang.org/x/sync/errgroup/go120.go | 13 - .../golang.org/x/sync/errgroup/pre_go120.go | 14 - .../x/tools/go/analysis/analysis.go | 8 +- .../go/analysis/passes/appends/appends.go | 2 +- .../go/analysis/passes/asmdecl/asmdecl.go | 6 +- .../go/analysis/passes/buildssa/buildssa.go | 2 +- .../go/analysis/passes/buildtag/buildtag.go | 2 +- .../go/analysis/passes/cgocall/cgocall.go | 2 +- .../go/analysis/passes/composite/composite.go | 2 +- .../go/analysis/passes/copylock/copylock.go | 4 +- .../go/analysis/passes/ctrlflow/ctrlflow.go | 2 +- .../go/analysis/passes/directive/directive.go | 2 +- .../passes/fieldalignment/fieldalignment.go | 2 +- .../go/analysis/passes/findcall/findcall.go | 2 +- .../passes/framepointer/framepointer.go | 2 +- .../passes/ifaceassert/ifaceassert.go | 2 +- .../go/analysis/passes/inspect/inspect.go | 2 +- .../passes/loopclosure/loopclosure.go | 2 +- .../analysis/passes/lostcancel/lostcancel.go | 2 +- .../go/analysis/passes/nilfunc/nilfunc.go | 2 +- .../go/analysis/passes/nilness/nilness.go | 4 +- .../go/analysis/passes/pkgfact/pkgfact.go | 2 +- .../tools/go/analysis/passes/printf/printf.go | 18 +- .../reflectvaluecompare.go | 2 +- .../tools/go/analysis/passes/shadow/shadow.go | 2 +- .../x/tools/go/analysis/passes/shift/shift.go | 2 +- .../analysis/passes/stdmethods/stdmethods.go | 2 +- .../analysis/passes/stringintconv/string.go | 2 +- .../go/analysis/passes/structtag/structtag.go | 2 +- .../testinggoroutine/testinggoroutine.go | 2 +- .../x/tools/go/analysis/passes/tests/tests.go | 2 +- .../passes/unreachable/unreachable.go | 2 +- .../go/analysis/passes/unsafeptr/unsafeptr.go | 2 +- .../passes/unusedresult/unusedresult.go | 6 +- .../x/tools/go/analysis/validate.go | 2 +- .../x/tools/go/ast/astutil/rewrite.go | 2 +- .../golang.org/x/tools/go/ast/astutil/util.go | 2 + .../x/tools/go/buildutil/fakecontext.go | 4 +- .../golang.org/x/tools/go/buildutil/tags.go | 2 +- .../golang.org/x/tools/go/internal/cgo/cgo.go | 2 +- .../golang.org/x/tools/go/loader/loader.go | 2 +- .../x/tools/go/packages/packages.go | 12 +- .../golang.org/x/tools/go/ssa/builder.go | 26 +- .../vendor/golang.org/x/tools/go/ssa/emit.go | 2 +- .../vendor/golang.org/x/tools/go/ssa/lift.go | 19 +- .../vendor/golang.org/x/tools/go/ssa/mode.go | 2 +- .../vendor/golang.org/x/tools/go/ssa/print.go | 2 +- .../golang.org/x/tools/go/ssa/sanity.go | 6 +- .../golang.org/x/tools/go/ssa/source.go | 2 +- .../vendor/golang.org/x/tools/go/ssa/util.go | 6 +- .../golang.org/x/tools/go/ssa/wrappers.go | 6 +- .../x/tools/go/types/typeutil/map.go | 9 +- .../internal/analysisinternal/analysis.go | 60 ++- .../x/tools/internal/event/keys/keys.go | 6 +- .../x/tools/internal/event/label/label.go | 6 +- .../x/tools/internal/gcimporter/bimport.go | 2 +- .../x/tools/internal/gcimporter/iexport.go | 6 +- .../x/tools/internal/gcimporter/iimport.go | 2 +- .../tools/internal/gcimporter/ureader_yes.go | 2 +- .../x/tools/internal/gopathwalk/walk.go | 4 +- .../x/tools/internal/imports/fix.go | 4 +- .../internal/packagesinternal/packages.go | 2 +- .../x/tools/internal/stdlib/deps.go | 359 ++++++++++++++++++ .../x/tools/internal/stdlib/import.go | 89 +++++ .../x/tools/internal/stdlib/manifest.go | 20 +- .../x/tools/internal/stdlib/stdlib.go | 2 +- .../x/tools/internal/typeparams/normalize.go | 2 +- .../x/tools/internal/typesinternal/types.go | 6 +- tools/vendor/modules.txt | 36 +- vendor/go.uber.org/atomic/CHANGELOG.md | 10 + vendor/go.uber.org/atomic/bool.go | 2 +- vendor/go.uber.org/atomic/duration.go | 2 +- vendor/go.uber.org/atomic/error.go | 14 +- vendor/go.uber.org/atomic/float32.go | 2 +- vendor/go.uber.org/atomic/float64.go | 2 +- vendor/go.uber.org/atomic/int32.go | 2 +- vendor/go.uber.org/atomic/int64.go | 2 +- vendor/go.uber.org/atomic/pointer_go118.go | 41 +- .../atomic/pointer_go118_pre119.go | 60 +++ vendor/go.uber.org/atomic/string.go | 23 +- vendor/go.uber.org/atomic/string_ext.go | 15 +- vendor/go.uber.org/atomic/time.go | 2 +- vendor/go.uber.org/atomic/uint32.go | 2 +- vendor/go.uber.org/atomic/uint64.go | 2 +- vendor/go.uber.org/atomic/uintptr.go | 2 +- vendor/golang.org/x/crypto/ssh/handshake.go | 47 ++- vendor/golang.org/x/net/context/context.go | 112 +++++- vendor/golang.org/x/net/context/go17.go | 72 ---- vendor/golang.org/x/net/context/go19.go | 20 - vendor/golang.org/x/net/context/pre_go17.go | 300 --------------- vendor/golang.org/x/net/context/pre_go19.go | 109 ------ vendor/golang.org/x/net/http2/server.go | 121 ++---- vendor/golang.org/x/net/http2/transport.go | 52 ++- .../x/net/internal/httpcommon/headermap.go | 6 +- .../x/net/internal/httpcommon/request.go | 166 ++++++-- vendor/golang.org/x/net/proxy/per_host.go | 8 +- vendor/modules.txt | 10 +- 117 files changed, 1334 insertions(+), 1141 deletions(-) delete mode 100644 tools/vendor/github.com/golangci/dupl/.travis.yml delete mode 100644 tools/vendor/github.com/golangci/dupl/README.md rename tools/vendor/github.com/golangci/dupl/{main.go => lib/lib.go} (51%) create mode 100644 tools/vendor/github.com/golangci/dupl/printer/issuer.go delete mode 100644 tools/vendor/golang.org/x/sync/errgroup/go120.go delete mode 100644 tools/vendor/golang.org/x/sync/errgroup/pre_go120.go create mode 100644 tools/vendor/golang.org/x/tools/internal/stdlib/deps.go create mode 100644 tools/vendor/golang.org/x/tools/internal/stdlib/import.go create mode 100644 vendor/go.uber.org/atomic/pointer_go118_pre119.go delete mode 100644 vendor/golang.org/x/net/context/go17.go delete mode 100644 vendor/golang.org/x/net/context/go19.go delete mode 100644 vendor/golang.org/x/net/context/pre_go17.go delete mode 100644 vendor/golang.org/x/net/context/pre_go19.go diff --git a/go.mod b/go.mod index 0dc4cdcfb..c313e1bd7 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/redhat-developer/mapt -go 1.23 +go 1.23.0 toolchain go1.23.7 @@ -167,9 +167,9 @@ require ( github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect github.com/uber/jaeger-lib v2.4.1+incompatible // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect - go.uber.org/atomic v1.10.0 // indirect - golang.org/x/crypto v0.33.0 // indirect - golang.org/x/net v0.35.0 // indirect + go.uber.org/atomic v1.11.0 // indirect + golang.org/x/crypto v0.35.0 // indirect + golang.org/x/net v0.36.0 // indirect golang.org/x/sys v0.30.0 // indirect golang.org/x/term v0.29.0 // indirect golang.org/x/text v0.22.0 // indirect diff --git a/go.sum b/go.sum index f2d7ffde9..d8520cff4 100644 --- a/go.sum +++ b/go.sum @@ -369,16 +369,16 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/zclconf/go-cty v1.15.0 h1:tTCRWxsexYUmtt/wVxgDClUe+uQusuI443uL6e+5sXQ= github.com/zclconf/go-cty v1.15.0/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= -go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= -go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus= -golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M= +golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs= +golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ= golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= @@ -393,8 +393,8 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8= -golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk= +golang.org/x/net v0.36.0 h1:vWF2fRbw4qslQsQzgFqZff+BItCvGFQqKzKIzx1rmoA= +golang.org/x/net v0.36.0/go.mod h1:bFmbeoIPfrw4sMHNhb4J9f6+tPziuGjq7Jk/38fxi1I= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= diff --git a/tools/go.mod b/tools/go.mod index 16f3a7671..1b7ec0b07 100644 --- a/tools/go.mod +++ b/tools/go.mod @@ -2,9 +2,9 @@ module github.com/redhat-developer/mapt/tools go 1.23.0 -toolchain go1.23.6 +toolchain go1.23.7 -require github.com/golangci/golangci-lint v1.64.5 +require github.com/golangci/golangci-lint v1.64.8 require ( 4d63.com/gocheckcompilerdirectives v1.2.1 // indirect @@ -17,9 +17,9 @@ require ( github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c // indirect github.com/Crocmagnon/fatcontext v0.7.1 // indirect github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect - github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.0 // indirect - github.com/Masterminds/semver/v3 v3.3.0 // indirect - github.com/OpenPeeDeeP/depguard/v2 v2.2.0 // indirect + github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.1 // indirect + github.com/Masterminds/semver/v3 v3.3.1 // indirect + github.com/OpenPeeDeeP/depguard/v2 v2.2.1 // indirect github.com/alecthomas/go-check-sumtype v0.3.1 // indirect github.com/alexkohler/nakedret/v2 v2.0.5 // indirect github.com/alexkohler/prealloc v1.0.0 // indirect @@ -65,7 +65,7 @@ require ( github.com/gobwas/glob v0.2.3 // indirect github.com/gofrs/flock v0.12.1 // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect + github.com/golangci/dupl v0.0.0-20250308024227-f665c8d69b32 // indirect github.com/golangci/go-printf-func-name v0.1.0 // indirect github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d // indirect github.com/golangci/misspell v0.6.0 // indirect @@ -140,7 +140,7 @@ require ( github.com/santhosh-tekuri/jsonschema/v6 v6.0.1 // indirect github.com/sashamelentyev/interfacebloat v1.1.0 // indirect github.com/sashamelentyev/usestdlibvars v1.28.0 // indirect - github.com/securego/gosec/v2 v2.22.1 // indirect + github.com/securego/gosec/v2 v2.22.2 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/sivchari/containedctx v1.0.3 // indirect github.com/sivchari/tenv v1.12.1 // indirect @@ -179,16 +179,16 @@ require ( go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.24.0 // indirect golang.org/x/exp/typeparams v0.0.0-20250210185358-939b2ce775ac // indirect - golang.org/x/mod v0.23.0 // indirect - golang.org/x/sync v0.11.0 // indirect - golang.org/x/sys v0.30.0 // indirect + golang.org/x/mod v0.24.0 // indirect + golang.org/x/sync v0.12.0 // indirect + golang.org/x/sys v0.31.0 // indirect golang.org/x/text v0.22.0 // indirect - golang.org/x/tools v0.30.0 // indirect - google.golang.org/protobuf v1.36.4 // indirect + golang.org/x/tools v0.31.0 // indirect + google.golang.org/protobuf v1.36.5 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - honnef.co/go/tools v0.6.0 // indirect + honnef.co/go/tools v0.6.1 // indirect mvdan.cc/gofumpt v0.7.0 // indirect mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f // indirect ) diff --git a/tools/go.sum b/tools/go.sum index 93e98a6b1..3f1384126 100644 --- a/tools/go.sum +++ b/tools/go.sum @@ -53,12 +53,12 @@ github.com/Crocmagnon/fatcontext v0.7.1 h1:SC/VIbRRZQeQWj/TcQBS6JmrXcfA+BU4OGSVU github.com/Crocmagnon/fatcontext v0.7.1/go.mod h1:1wMvv3NXEBJucFGfwOJBxSVWcoIO6emV215SMkW9MFU= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= -github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.0 h1:/fTUt5vmbkAcMBt4YQiuC23cV0kEsN1MVMNqeOW43cU= -github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.0/go.mod h1:ONJg5sxcbsdQQ4pOW8TGdTidT2TMAUy/2Xhr8mrYaao= -github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0= -github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= -github.com/OpenPeeDeeP/depguard/v2 v2.2.0 h1:vDfG60vDtIuf0MEOhmLlLLSzqaRM8EMcgJPdp74zmpA= -github.com/OpenPeeDeeP/depguard/v2 v2.2.0/go.mod h1:CIzddKRvLBC4Au5aYP/i3nyaWQ+ClszLIuVocRiCYFQ= +github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.1 h1:Sz1JIXEcSfhz7fUi7xHnhpIE0thVASYjvosApmHuD2k= +github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.1/go.mod h1:n/LSCXNuIYqVfBlVXyHfMQkZDdp1/mmxfSjADd3z1Zg= +github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4= +github.com/Masterminds/semver/v3 v3.3.1/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/OpenPeeDeeP/depguard/v2 v2.2.1 h1:vckeWVESWp6Qog7UZSARNqfu/cZqvki8zsuj3piCMx4= +github.com/OpenPeeDeeP/depguard/v2 v2.2.1/go.mod h1:q4DKzC4UcVaAvcfd41CZh0PWpGgzrVxUYBlgKNGquUo= github.com/alecthomas/assert/v2 v2.11.0 h1:2Q9r3ki8+JYXvGsDyBXwH3LcJ+WK5D0gc5E8vS6K3D0= github.com/alecthomas/assert/v2 v2.11.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k= github.com/alecthomas/go-check-sumtype v0.3.1 h1:u9aUvbGINJxLVXiFvHUlPEaD7VDULsrxJb4Aq31NLkU= @@ -229,14 +229,14 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM= -github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= +github.com/golangci/dupl v0.0.0-20250308024227-f665c8d69b32 h1:WUvBfQL6EW/40l6OmeSBYQJNSif4O11+bmWEz+C7FYw= +github.com/golangci/dupl v0.0.0-20250308024227-f665c8d69b32/go.mod h1:NUw9Zr2Sy7+HxzdjIULge71wI6yEg1lWQr7Evcu8K0E= github.com/golangci/go-printf-func-name v0.1.0 h1:dVokQP+NMTO7jwO4bwsRwLWeudOVUPPyAKJuzv8pEJU= github.com/golangci/go-printf-func-name v0.1.0/go.mod h1:wqhWFH5mUdJQhweRnldEywnR5021wTdZSNgwYceV14s= github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d h1:viFft9sS/dxoYY0aiOTsLKO2aZQAPT4nlQCsimGcSGE= github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d/go.mod h1:ivJ9QDg0XucIkmwhzCDsqcnxxlDStoTl89jDMIoNxKY= -github.com/golangci/golangci-lint v1.64.5 h1:5omC86XFBKXZgCrVdUWU+WNHKd+CWCxNx717KXnzKZY= -github.com/golangci/golangci-lint v1.64.5/go.mod h1:WZnwq8TF0z61h3jLQ7Sk5trcP7b3kUFxLD6l1ivtdvU= +github.com/golangci/golangci-lint v1.64.8 h1:y5TdeVidMtBGG32zgSC7ZXTFNHrsJkDnpO4ItB3Am+I= +github.com/golangci/golangci-lint v1.64.8/go.mod h1:5cEsUQBSr6zi8XI8OjmcY2Xmliqc4iYL7YoPrL+zLJ4= github.com/golangci/misspell v0.6.0 h1:JCle2HUTNWirNlDIAUO44hUsKhOFqGPoC4LZxlaSXDs= github.com/golangci/misspell v0.6.0/go.mod h1:keMNyY6R9isGaSAu+4Q8NMBwMPkh15Gtc8UCVoDtAWo= github.com/golangci/plugin-module-register v0.1.1 h1:TCmesur25LnyJkpsVrupv1Cdzo+2f7zX0H6Jkw1Ol6c= @@ -484,8 +484,8 @@ github.com/sashamelentyev/interfacebloat v1.1.0 h1:xdRdJp0irL086OyW1H/RTZTr1h/tM github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ= github.com/sashamelentyev/usestdlibvars v1.28.0 h1:jZnudE2zKCtYlGzLVreNp5pmCdOxXUzwsMDBkR21cyQ= github.com/sashamelentyev/usestdlibvars v1.28.0/go.mod h1:9nl0jgOfHKWNFS43Ojw0i7aRoS4j6EBye3YBhmAIRF8= -github.com/securego/gosec/v2 v2.22.1 h1:IcBt3TpI5Y9VN1YlwjSpM2cHu0i3Iw52QM+PQeg7jN8= -github.com/securego/gosec/v2 v2.22.1/go.mod h1:4bb95X4Jz7VSEPdVjC0hD7C/yR6kdeUBvCPOy9gDQ0g= +github.com/securego/gosec/v2 v2.22.2 h1:IXbuI7cJninj0nRpZSLCUlotsj8jGusohfONMrHoF6g= +github.com/securego/gosec/v2 v2.22.2/go.mod h1:UEBGA+dSKb+VqM6TdehR7lnQtIIMorYJ4/9CW1KVQBE= github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -652,8 +652,8 @@ golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM= -golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= +golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -694,8 +694,8 @@ golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8= -golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk= +golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c= +golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -717,8 +717,8 @@ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= -golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= +golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -771,8 +771,8 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= -golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= @@ -854,8 +854,8 @@ golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= -golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY= -golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY= +golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU= +golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -963,8 +963,8 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.6.0 h1:TAODvD3knlq75WCp2nyGJtT4LeRV/o7NN9nYPeVJXf8= -honnef.co/go/tools v0.6.0/go.mod h1:3puzxxljPCe8RGJX7BIy1plGbxEOZni5mR2aXe3/uk4= +honnef.co/go/tools v0.6.1 h1:R094WgE8K4JirYjBaOpz/AvTyUu/3wbmAoskKN/pxTI= +honnef.co/go/tools v0.6.1/go.mod h1:3puzxxljPCe8RGJX7BIy1plGbxEOZni5mR2aXe3/uk4= mvdan.cc/gofumpt v0.7.0 h1:bg91ttqXmi9y2xawvkuMXyvAA/1ZGJqYAEGjXuP0JXU= mvdan.cc/gofumpt v0.7.0/go.mod h1:txVFJy/Sc/mvaycET54pV8SW8gWxTlUuGHVEcncmNUo= mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f h1:lMpcwN6GxNbWtbpI1+xzFLSW8XzX0u72NttUGVFjO3U= diff --git a/tools/vendor/github.com/OpenPeeDeeP/depguard/v2/README.md b/tools/vendor/github.com/OpenPeeDeeP/depguard/v2/README.md index 2ccfa22c5..0bf603b2b 100644 --- a/tools/vendor/github.com/OpenPeeDeeP/depguard/v2/README.md +++ b/tools/vendor/github.com/OpenPeeDeeP/depguard/v2/README.md @@ -7,7 +7,7 @@ allow specific packages within a repository. ## Install ```bash -go install github.com/OpenPeeDeeP/depguard@latest +go install github.com/OpenPeeDeeP/depguard/cmd/depguard@latest ``` ## Config @@ -49,7 +49,7 @@ the linter's output. - `files` - list of file globs that will match this list of settings to compare against - `allow` - list of allowed packages - `deny` - map of packages that are not allowed where the value is a suggestion -= `listMode` - the mode to use for package matching +- `listMode` - the mode to use for package matching Files are matched using [Globs](https://github.com/gobwas/glob). If the files list is empty, then all files will match that list. Prefixing a file @@ -153,11 +153,29 @@ would be allowed. ```yaml Main: deny: - - github.com/OpenPeeDeeP/depguard$ + github.com/OpenPeeDeeP/depguard$: Please use v2 ``` -## Golangci-lint +## golangci-lint This linter was built with -[Golangci-lint](https://github.com/golangci/golangci-lint) in mind. It is compatible -and read their docs to see how to implement all their linters, including this one. +[golangci-lint](https://github.com/golangci/golangci-lint) in mind, read the [linters docs](https://golangci-lint.run/usage/linters/#depguard) to see how to configure all their linters, including this one. + +The config is similar to the YAML depguard config documented above, however due to [golangci-lint limitation](https://github.com/golangci/golangci-lint/pull/4227) the `deny` value must be provided as a list, with `pkg` and `desc` keys (otherwise a [panic](https://github.com/OpenPeeDeeP/depguard/issues/74) may occur): + +```yaml +# golangci-lint config +linters-settings: + depguard: + rules: + prevent_unmaintained_packages: + list-mode: lax # allow unless explicitely denied + files: + - $all + - "!$test" + allow: + - $gostd + deny: + - pkg: io/ioutil + desc: "replaced by io and os packages since Go 1.16: https://tip.golang.org/doc/go1.16#ioutil" +``` diff --git a/tools/vendor/github.com/OpenPeeDeeP/depguard/v2/depguard.go b/tools/vendor/github.com/OpenPeeDeeP/depguard/v2/depguard.go index 2729091e8..af07b9bb6 100644 --- a/tools/vendor/github.com/OpenPeeDeeP/depguard/v2/depguard.go +++ b/tools/vendor/github.com/OpenPeeDeeP/depguard/v2/depguard.go @@ -47,12 +47,12 @@ func (ua *UncompiledAnalyzer) Compile() error { return nil } -func (settings LinterSettings) run(pass *analysis.Pass) (interface{}, error) { - s, err := settings.compile() +func (s LinterSettings) run(pass *analysis.Pass) (interface{}, error) { + settings, err := s.compile() if err != nil { return nil, err } - return s.run(pass) + return settings.run(pass) } func newAnalyzer(run func(*analysis.Pass) (interface{}, error)) *analysis.Analyzer { diff --git a/tools/vendor/github.com/OpenPeeDeeP/depguard/v2/settings.go b/tools/vendor/github.com/OpenPeeDeeP/depguard/v2/settings.go index 311cacc88..5bc74f8d0 100644 --- a/tools/vendor/github.com/OpenPeeDeeP/depguard/v2/settings.go +++ b/tools/vendor/github.com/OpenPeeDeeP/depguard/v2/settings.go @@ -202,9 +202,9 @@ func (l LinterSettings) compile() (linterSettings, error) { return li, nil } -func (ls linterSettings) whichLists(fileName string) []*list { +func (s linterSettings) whichLists(fileName string) []*list { var matches []*list - for _, l := range ls { + for _, l := range s { if l.fileMatch(fileName) { matches = append(matches, l) } @@ -236,5 +236,13 @@ func strInPrefixList(str string, prefixList []string) (bool, int) { if ioc[len(ioc)-1] == '$' { return str == ioc[:len(ioc)-1], idx } - return strings.HasPrefix(str, prefixList[idx]), idx + + // There is no sep chars in ioc so it is a GOROOT import that is being matched to the import (str) (see $gostd expander) + // AND the import contains a period which GOROOT cannot have. This eliminates the go.evil.me/pkg scenario + // BUT should still allow /os/exec and ./os/exec imports which are very uncommon + if !strings.ContainsAny(ioc, "./") && strings.ContainsRune(str, '.') { + return false, idx + } + + return strings.HasPrefix(str, ioc), idx } diff --git a/tools/vendor/github.com/golangci/dupl/.travis.yml b/tools/vendor/github.com/golangci/dupl/.travis.yml deleted file mode 100644 index 33de24c0f..000000000 --- a/tools/vendor/github.com/golangci/dupl/.travis.yml +++ /dev/null @@ -1,5 +0,0 @@ -language: go -go: - - 1.3 - - 1.8 - - 1.9 diff --git a/tools/vendor/github.com/golangci/dupl/README.md b/tools/vendor/github.com/golangci/dupl/README.md deleted file mode 100644 index f34901d7a..000000000 --- a/tools/vendor/github.com/golangci/dupl/README.md +++ /dev/null @@ -1,63 +0,0 @@ -# dupl [![Build Status](https://travis-ci.org/mibk/dupl.png)](https://travis-ci.org/mibk/dupl) - -**dupl** is a tool written in Go for finding code clones. So far it can find clones only -in the Go source files. The method uses suffix tree for serialized ASTs. It ignores values -of AST nodes. It just operates with their types (e.g. `if a == 13 {}` and `if x == 100 {}` are -considered the same provided it exceeds the minimal token sequence size). - -Due to the used method dupl can report so called "false positives" on the output. These are -the ones we do not consider clones (whether they are too small, or the values of the matched -tokens are completely different). - -## Installation - -```bash -go get -u github.com/golangci/dupl -``` - -## Usage - -``` -Usage of dupl: - dupl [flags] [paths] - -Paths: - If the given path is a file, dupl will use it regardless of - the file extension. If it is a directory it will recursively - search for *.go files in that directory. - - If no path is given dupl will recursively search for *.go - files in the current directory. - -Flags: - -files - read file names from stdin one at each line - -html - output the results as HTML, including duplicate code fragments - -plumbing - plumbing (easy-to-parse) output for consumption by scripts or tools - -t, -threshold size - minimum token sequence size as a clone (default 15) - -vendor - check files in vendor directory - -v, -verbose - explain what is being done - -Examples: - dupl -t 100 - Search clones in the current directory of size at least - 100 tokens. - dupl $(find app/ -name '*_test.go') - Search for clones in tests in the app directory. - find app/ -name '*_test.go' |dupl -files - The same as above. -``` - -## Example - -The reduced output of this command with the following parameters for the [Docker](https://www.docker.com) source code -looks like [this](http://htmlpreview.github.io/?https://github.com/golangci/dupl/blob/master/_output_example/docker.html). - -```bash -$ dupl -t 200 -html >docker.html -``` diff --git a/tools/vendor/github.com/golangci/dupl/main.go b/tools/vendor/github.com/golangci/dupl/lib/lib.go similarity index 51% rename from tools/vendor/github.com/golangci/dupl/main.go rename to tools/vendor/github.com/golangci/dupl/lib/lib.go index 3030a97ae..3000a8f38 100644 --- a/tools/vendor/github.com/golangci/dupl/main.go +++ b/tools/vendor/github.com/golangci/dupl/lib/lib.go @@ -1,11 +1,8 @@ -package dupl +// Package lib Golangci-lint: altered version of main.go +package lib import ( - "flag" - "fmt" - "io/ioutil" "os" - "path/filepath" "sort" "github.com/golangci/dupl/job" @@ -13,27 +10,6 @@ import ( "github.com/golangci/dupl/syntax" ) -const defaultThreshold = 15 - -var ( - paths = []string{"."} - vendor = flag.Bool("dupl.vendor", false, "") - verbose = flag.Bool("dupl.verbose", false, "") - files = flag.Bool("dupl.files", false, "") - - html = flag.Bool("dupl.html", false, "") - plumbing = flag.Bool("dupl.plumbing", false, "") -) - -const ( - vendorDirPrefix = "vendor" + string(filepath.Separator) - vendorDirInPath = string(filepath.Separator) + vendorDirPrefix -) - -func init() { - flag.BoolVar(verbose, "dupl.v", false, "alias for -verbose") -} - func Run(files []string, threshold int) ([]printer.Issue, error) { fchan := make(chan string, 1024) go func() { @@ -75,7 +51,7 @@ func makeIssues(duplChan <-chan syntax.Match) ([]printer.Issue, error) { } sort.Strings(keys) - p := printer.NewPlumbing(ioutil.ReadFile) + p := printer.NewIssuer(os.ReadFile) var issues []printer.Issue for _, k := range keys { @@ -110,39 +86,3 @@ func unique(group [][]*syntax.Node) [][]*syntax.Node { } return newGroup } - -func usage() { - fmt.Fprintln(os.Stderr, `Usage: dupl [flags] [paths] - -Paths: - If the given path is a file, dupl will use it regardless of - the file extension. If it is a directory, it will recursively - search for *.go files in that directory. - - If no path is given, dupl will recursively search for *.go - files in the current directory. - -Flags: - -files - read file names from stdin one at each line - -html - output the results as HTML, including duplicate code fragments - -plumbing - plumbing (easy-to-parse) output for consumption by scripts or tools - -t, -threshold size - minimum token sequence size as a clone (default 15) - -vendor - check files in vendor directory - -v, -verbose - explain what is being done - -Examples: - dupl -t 100 - Search clones in the current directory of size at least - 100 tokens. - dupl $(find app/ -name '*_test.go') - Search for clones in tests in the app directory. - find app/ -name '*_test.go' |dupl -files - The same as above.`) - os.Exit(2) -} diff --git a/tools/vendor/github.com/golangci/dupl/printer/html.go b/tools/vendor/github.com/golangci/dupl/printer/html.go index 5ad9e25c7..ac1474141 100644 --- a/tools/vendor/github.com/golangci/dupl/printer/html.go +++ b/tools/vendor/github.com/golangci/dupl/printer/html.go @@ -3,6 +3,7 @@ package printer import ( "bytes" "fmt" + "html" "io" "regexp" "sort" @@ -10,17 +11,17 @@ import ( "github.com/golangci/dupl/syntax" ) -type html struct { +type htmlprinter struct { iota int w io.Writer ReadFile } func NewHTML(w io.Writer, fread ReadFile) Printer { - return &html{w: w, ReadFile: fread} + return &htmlprinter{w: w, ReadFile: fread} } -func (p *html) PrintHeader() error { +func (p *htmlprinter) PrintHeader() error { _, err := fmt.Fprint(p.w, ` Duplicates @@ -35,7 +36,7 @@ func (p *html) PrintHeader() error { return err } -func (p *html) PrintClones(dups [][]*syntax.Node) error { +func (p *htmlprinter) PrintClones(dups [][]*syntax.Node) error { p.iota++ fmt.Fprintf(p.w, "

#%d found %d clones

\n", p.iota, len(dups)) @@ -63,12 +64,13 @@ func (p *html) PrintClones(dups [][]*syntax.Node) error { sort.Sort(byNameAndLine(clones)) for _, cl := range clones { - fmt.Fprintf(p.w, "

%s:%d

\n
%s
\n", cl.filename, cl.lineStart, cl.fragment) + fmt.Fprintf(p.w, "

%s:%d

\n
%s
\n", cl.filename, cl.lineStart, + html.EscapeString(string(cl.fragment))) } return nil } -func (*html) PrintFooter() error { return nil } +func (*htmlprinter) PrintFooter() error { return nil } func findLineBeg(file []byte, index int) int { for i := index; i >= 0; i-- { diff --git a/tools/vendor/github.com/golangci/dupl/printer/issuer.go b/tools/vendor/github.com/golangci/dupl/printer/issuer.go new file mode 100644 index 000000000..9b79f5705 --- /dev/null +++ b/tools/vendor/github.com/golangci/dupl/printer/issuer.go @@ -0,0 +1,56 @@ +package printer + +// Golangci-lint: altered version of plumbing.go + +import ( + "sort" + + "github.com/golangci/dupl/syntax" +) + +type Clone clone + +func (c Clone) Filename() string { + return c.filename +} + +func (c Clone) LineStart() int { + return c.lineStart +} + +func (c Clone) LineEnd() int { + return c.lineEnd +} + +type Issue struct { + From, To Clone +} + +type Issuer struct { + ReadFile +} + +func NewIssuer(fread ReadFile) *Issuer { + return &Issuer{fread} +} + +func (p *Issuer) MakeIssues(dups [][]*syntax.Node) ([]Issue, error) { + clones, err := prepareClonesInfo(p.ReadFile, dups) + if err != nil { + return nil, err + } + + sort.Sort(byNameAndLine(clones)) + + var issues []Issue + + for i, cl := range clones { + nextCl := clones[(i+1)%len(clones)] + issues = append(issues, Issue{ + From: Clone(cl), + To: Clone(nextCl), + }) + } + + return issues, nil +} diff --git a/tools/vendor/github.com/golangci/dupl/printer/plumbing.go b/tools/vendor/github.com/golangci/dupl/printer/plumbing.go index cf39d01b7..b0577ddd5 100644 --- a/tools/vendor/github.com/golangci/dupl/printer/plumbing.go +++ b/tools/vendor/github.com/golangci/dupl/printer/plumbing.go @@ -1,50 +1,36 @@ package printer import ( + "fmt" + "io" "sort" "github.com/golangci/dupl/syntax" ) -type Clone clone - -func (c Clone) Filename() string { - return c.filename -} - -func (c Clone) LineStart() int { - return c.lineStart -} - -func (c Clone) LineEnd() int { - return c.lineEnd -} - -type Issue struct { - From, To Clone -} - -type Plumbing struct { +type plumbing struct { + w io.Writer ReadFile } -func NewPlumbing(fread ReadFile) *Plumbing { - return &Plumbing{fread} +func NewPlumbing(w io.Writer, fread ReadFile) Printer { + return &plumbing{w, fread} } -func (p *Plumbing) MakeIssues(dups [][]*syntax.Node) ([]Issue, error) { +func (p *plumbing) PrintHeader() error { return nil } + +func (p *plumbing) PrintClones(dups [][]*syntax.Node) error { clones, err := prepareClonesInfo(p.ReadFile, dups) if err != nil { - return nil, err + return err } sort.Sort(byNameAndLine(clones)) - var issues []Issue for i, cl := range clones { nextCl := clones[(i+1)%len(clones)] - issues = append(issues, Issue{ - From: Clone(cl), - To: Clone(nextCl), - }) + fmt.Fprintf(p.w, "%s:%d-%d: duplicate of %s:%d-%d\n", cl.filename, cl.lineStart, cl.lineEnd, + nextCl.filename, nextCl.lineStart, nextCl.lineEnd) } - return issues, nil + return nil } + +func (p *plumbing) PrintFooter() error { return nil } diff --git a/tools/vendor/github.com/golangci/dupl/suffixtree/suffixtree.go b/tools/vendor/github.com/golangci/dupl/suffixtree/suffixtree.go index 738015025..871469e8d 100644 --- a/tools/vendor/github.com/golangci/dupl/suffixtree/suffixtree.go +++ b/tools/vendor/github.com/golangci/dupl/suffixtree/suffixtree.go @@ -41,7 +41,7 @@ func New() *STree { // Update refreshes the suffix tree to by new data. func (t *STree) Update(data ...Token) { t.data = append(t.data, data...) - for _ = range data { + for range data { t.update() t.s, t.start = t.canonize(t.s, t.start, t.end) t.end++ diff --git a/tools/vendor/github.com/golangci/dupl/syntax/syntax.go b/tools/vendor/github.com/golangci/dupl/syntax/syntax.go index e2c750afd..9b11d3119 100644 --- a/tools/vendor/github.com/golangci/dupl/syntax/syntax.go +++ b/tools/vendor/github.com/golangci/dupl/syntax/syntax.go @@ -6,6 +6,19 @@ import ( "github.com/golangci/dupl/suffixtree" ) +// To avoid "goroutine stack exceeds" with gigantic slices (Composite Literals). +// 10_000 => 0.89s +// 20_000 => 1.53s +// 30_000 => 2.57s +// 40_000 => 3.89s +// 50_000 => 5.58s +// 60_000 => 7.95s +// 70_000 => 10.15s +// 80_000 => 13.11s +// 90_000 => 16.62s +// 100_000 => 21.42s +const maxChildrenSerial = 10_000 + type Node struct { Type int Filename string @@ -40,7 +53,12 @@ func Serialize(n *Node) []*Node { func serial(n *Node, stream *[]*Node) int { *stream = append(*stream, n) var count int - for _, child := range n.Children { + for i, child := range n.Children { + // To avoid "goroutine stack exceeds" with gigantic slices (Composite Literals). + if i > maxChildrenSerial { + break + } + count += serial(child, stream) } n.Owns = count diff --git a/tools/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go b/tools/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go index 3aa467dae..57f3cdd99 100644 --- a/tools/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go +++ b/tools/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go @@ -186,6 +186,10 @@ func (c *runCommand) persistentPostRunE(_ *cobra.Command, _ []string) error { } func (c *runCommand) preRunE(_ *cobra.Command, args []string) error { + if c.cfg.GetConfigDir() != "" && c.cfg.Version != "" { + return errors.New("you are using a configuration file for golangci-lint v2 with golangci-lint v1: please use golangci-lint v2") + } + dbManager, err := lintersdb.NewManager(c.log.Child(logutils.DebugKeyLintersDB), c.cfg, lintersdb.NewLinterBuilder(), lintersdb.NewPluginModuleBuilder(c.log), lintersdb.NewPluginGoBuilder(c.log)) if err != nil { diff --git a/tools/vendor/github.com/golangci/golangci-lint/pkg/config/config.go b/tools/vendor/github.com/golangci/golangci-lint/pkg/config/config.go index 4ddd89617..ee7a62b7e 100644 --- a/tools/vendor/github.com/golangci/golangci-lint/pkg/config/config.go +++ b/tools/vendor/github.com/golangci/golangci-lint/pkg/config/config.go @@ -20,6 +20,8 @@ type Config struct { cfgDir string // Path to the directory containing golangci-lint config file. basePath string // Path the root directory related to [Run.RelativePathMode]. + Version string `mapstructure:"version"` // From v2, to be able to detect v2 config file. + Run Run `mapstructure:"run"` Output Output `mapstructure:"output"` diff --git a/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupl/dupl.go b/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupl/dupl.go index d2bb3d8d8..6d1a9809c 100644 --- a/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupl/dupl.go +++ b/tools/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupl/dupl.go @@ -5,7 +5,7 @@ import ( "go/token" "sync" - duplAPI "github.com/golangci/dupl" + duplAPI "github.com/golangci/dupl/lib" "golang.org/x/tools/go/analysis" "github.com/golangci/golangci-lint/pkg/config" @@ -45,7 +45,7 @@ func New(settings *config.DuplSettings) *goanalysis.Linter { return goanalysis.NewLinter( linterName, - "Tool for code clone detection", + "Detects duplicate fragments of code.", []*analysis.Analyzer{analyzer}, nil, ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { diff --git a/tools/vendor/github.com/securego/gosec/v2/action.yml b/tools/vendor/github.com/securego/gosec/v2/action.yml index 7aa2ea0da..edd7e7083 100644 --- a/tools/vendor/github.com/securego/gosec/v2/action.yml +++ b/tools/vendor/github.com/securego/gosec/v2/action.yml @@ -10,7 +10,7 @@ inputs: runs: using: 'docker' - image: 'docker://securego/gosec:2.22.0' + image: 'docker://securego/gosec:2.22.1' args: - ${{ inputs.args }} diff --git a/tools/vendor/golang.org/x/sync/errgroup/errgroup.go b/tools/vendor/golang.org/x/sync/errgroup/errgroup.go index b8322598a..a4ea5d14f 100644 --- a/tools/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/tools/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -46,7 +46,7 @@ func (g *Group) done() { // returns a non-nil error or the first time Wait returns, whichever occurs // first. func WithContext(ctx context.Context) (*Group, context.Context) { - ctx, cancel := withCancelCause(ctx) + ctx, cancel := context.WithCancelCause(ctx) return &Group{cancel: cancel}, ctx } diff --git a/tools/vendor/golang.org/x/sync/errgroup/go120.go b/tools/vendor/golang.org/x/sync/errgroup/go120.go deleted file mode 100644 index f93c740b6..000000000 --- a/tools/vendor/golang.org/x/sync/errgroup/go120.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.20 - -package errgroup - -import "context" - -func withCancelCause(parent context.Context) (context.Context, func(error)) { - return context.WithCancelCause(parent) -} diff --git a/tools/vendor/golang.org/x/sync/errgroup/pre_go120.go b/tools/vendor/golang.org/x/sync/errgroup/pre_go120.go deleted file mode 100644 index 88ce33434..000000000 --- a/tools/vendor/golang.org/x/sync/errgroup/pre_go120.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.20 - -package errgroup - -import "context" - -func withCancelCause(parent context.Context) (context.Context, func(error)) { - ctx, cancel := context.WithCancel(parent) - return ctx, func(error) { cancel() } -} diff --git a/tools/vendor/golang.org/x/tools/go/analysis/analysis.go b/tools/vendor/golang.org/x/tools/go/analysis/analysis.go index 3a73084a5..a7df4d1fe 100644 --- a/tools/vendor/golang.org/x/tools/go/analysis/analysis.go +++ b/tools/vendor/golang.org/x/tools/go/analysis/analysis.go @@ -45,7 +45,7 @@ type Analyzer struct { // To pass analysis results between packages (and thus // potentially between address spaces), use Facts, which are // serializable. - Run func(*Pass) (interface{}, error) + Run func(*Pass) (any, error) // RunDespiteErrors allows the driver to invoke // the Run method of this analyzer even on a @@ -112,7 +112,7 @@ type Pass struct { // The map keys are the elements of Analysis.Required, // and the type of each corresponding value is the required // analysis's ResultType. - ResultOf map[*Analyzer]interface{} + ResultOf map[*Analyzer]any // ReadFile returns the contents of the named file. // @@ -186,7 +186,7 @@ type ObjectFact struct { // Reportf is a helper function that reports a Diagnostic using the // specified position and formatted error message. -func (pass *Pass) Reportf(pos token.Pos, format string, args ...interface{}) { +func (pass *Pass) Reportf(pos token.Pos, format string, args ...any) { msg := fmt.Sprintf(format, args...) pass.Report(Diagnostic{Pos: pos, Message: msg}) } @@ -201,7 +201,7 @@ type Range interface { // ReportRangef is a helper function that reports a Diagnostic using the // range provided. ast.Node values can be passed in as the range because // they satisfy the Range interface. -func (pass *Pass) ReportRangef(rng Range, format string, args ...interface{}) { +func (pass *Pass) ReportRangef(rng Range, format string, args ...any) { msg := fmt.Sprintf(format, args...) pass.Report(Diagnostic{Pos: rng.Pos(), End: rng.End(), Message: msg}) } diff --git a/tools/vendor/golang.org/x/tools/go/analysis/passes/appends/appends.go b/tools/vendor/golang.org/x/tools/go/analysis/passes/appends/appends.go index 6976f0d90..e554c3cc9 100644 --- a/tools/vendor/golang.org/x/tools/go/analysis/passes/appends/appends.go +++ b/tools/vendor/golang.org/x/tools/go/analysis/passes/appends/appends.go @@ -29,7 +29,7 @@ var Analyzer = &analysis.Analyzer{ Run: run, } -func run(pass *analysis.Pass) (interface{}, error) { +func run(pass *analysis.Pass) (any, error) { inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) nodeFilter := []ast.Node{ diff --git a/tools/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go b/tools/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go index a47ecbae7..436b03cb2 100644 --- a/tools/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go +++ b/tools/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go @@ -150,7 +150,7 @@ var ( abiSuff = re(`^(.+)<(ABI.+)>$`) ) -func run(pass *analysis.Pass) (interface{}, error) { +func run(pass *analysis.Pass) (any, error) { // No work if no assembly files. var sfiles []string for _, fname := range pass.OtherFiles { @@ -226,7 +226,7 @@ Files: for lineno, line := range lines { lineno++ - badf := func(format string, args ...interface{}) { + badf := func(format string, args ...any) { pass.Reportf(analysisutil.LineStart(tf, lineno), "[%s] %s: %s", arch, fnName, fmt.Sprintf(format, args...)) } @@ -646,7 +646,7 @@ func asmParseDecl(pass *analysis.Pass, decl *ast.FuncDecl) map[string]*asmFunc { } // asmCheckVar checks a single variable reference. -func asmCheckVar(badf func(string, ...interface{}), fn *asmFunc, line, expr string, off int, v *asmVar, archDef *asmArch) { +func asmCheckVar(badf func(string, ...any), fn *asmFunc, line, expr string, off int, v *asmVar, archDef *asmArch) { m := asmOpcode.FindStringSubmatch(line) if m == nil { if !strings.HasPrefix(strings.TrimSpace(line), "//") { diff --git a/tools/vendor/golang.org/x/tools/go/analysis/passes/buildssa/buildssa.go b/tools/vendor/golang.org/x/tools/go/analysis/passes/buildssa/buildssa.go index f077ea282..f49fea517 100644 --- a/tools/vendor/golang.org/x/tools/go/analysis/passes/buildssa/buildssa.go +++ b/tools/vendor/golang.org/x/tools/go/analysis/passes/buildssa/buildssa.go @@ -32,7 +32,7 @@ type SSA struct { SrcFuncs []*ssa.Function } -func run(pass *analysis.Pass) (interface{}, error) { +func run(pass *analysis.Pass) (any, error) { // We must create a new Program for each Package because the // analysis API provides no place to hang a Program shared by // all Packages. Consequently, SSA Packages and Functions do not diff --git a/tools/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go b/tools/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go index e7434e8fe..6c7a0df58 100644 --- a/tools/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go +++ b/tools/vendor/golang.org/x/tools/go/analysis/passes/buildtag/buildtag.go @@ -26,7 +26,7 @@ var Analyzer = &analysis.Analyzer{ Run: runBuildTag, } -func runBuildTag(pass *analysis.Pass) (interface{}, error) { +func runBuildTag(pass *analysis.Pass) (any, error) { for _, f := range pass.Files { checkGoFile(pass, f) } diff --git a/tools/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go b/tools/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go index 4f3bb035d..d9189b5b6 100644 --- a/tools/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go +++ b/tools/vendor/golang.org/x/tools/go/analysis/passes/cgocall/cgocall.go @@ -55,7 +55,7 @@ func run(pass *analysis.Pass) (any, error) { return nil, nil } -func checkCgo(fset *token.FileSet, f *ast.File, info *types.Info, reportf func(token.Pos, string, ...interface{})) { +func checkCgo(fset *token.FileSet, f *ast.File, info *types.Info, reportf func(token.Pos, string, ...any)) { ast.Inspect(f, func(n ast.Node) bool { call, ok := n.(*ast.CallExpr) if !ok { diff --git a/tools/vendor/golang.org/x/tools/go/analysis/passes/composite/composite.go b/tools/vendor/golang.org/x/tools/go/analysis/passes/composite/composite.go index f56c3e622..60c6afe49 100644 --- a/tools/vendor/golang.org/x/tools/go/analysis/passes/composite/composite.go +++ b/tools/vendor/golang.org/x/tools/go/analysis/passes/composite/composite.go @@ -51,7 +51,7 @@ func init() { // runUnkeyedLiteral checks if a composite literal is a struct literal with // unkeyed fields. -func run(pass *analysis.Pass) (interface{}, error) { +func run(pass *analysis.Pass) (any, error) { inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) nodeFilter := []ast.Node{ diff --git a/tools/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go b/tools/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go index a9f02ac62..49c14d498 100644 --- a/tools/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go +++ b/tools/vendor/golang.org/x/tools/go/analysis/passes/copylock/copylock.go @@ -36,7 +36,7 @@ var Analyzer = &analysis.Analyzer{ Run: run, } -func run(pass *analysis.Pass) (interface{}, error) { +func run(pass *analysis.Pass) (any, error) { inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) var goversion string // effective file version ("" => unknown) @@ -378,7 +378,7 @@ var lockerType *types.Interface // Construct a sync.Locker interface type. func init() { - nullary := types.NewSignature(nil, nil, nil, false) // func() + nullary := types.NewSignatureType(nil, nil, nil, nil, nil, false) // func() methods := []*types.Func{ types.NewFunc(token.NoPos, nil, "Lock", nullary), types.NewFunc(token.NoPos, nil, "Unlock", nullary), diff --git a/tools/vendor/golang.org/x/tools/go/analysis/passes/ctrlflow/ctrlflow.go b/tools/vendor/golang.org/x/tools/go/analysis/passes/ctrlflow/ctrlflow.go index d21adeee9..951aaed00 100644 --- a/tools/vendor/golang.org/x/tools/go/analysis/passes/ctrlflow/ctrlflow.go +++ b/tools/vendor/golang.org/x/tools/go/analysis/passes/ctrlflow/ctrlflow.go @@ -80,7 +80,7 @@ func (c *CFGs) FuncLit(lit *ast.FuncLit) *cfg.CFG { return c.funcLits[lit].cfg } -func run(pass *analysis.Pass) (interface{}, error) { +func run(pass *analysis.Pass) (any, error) { inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) // Because CFG construction consumes and produces noReturn diff --git a/tools/vendor/golang.org/x/tools/go/analysis/passes/directive/directive.go b/tools/vendor/golang.org/x/tools/go/analysis/passes/directive/directive.go index b20540238..bebec8914 100644 --- a/tools/vendor/golang.org/x/tools/go/analysis/passes/directive/directive.go +++ b/tools/vendor/golang.org/x/tools/go/analysis/passes/directive/directive.go @@ -40,7 +40,7 @@ var Analyzer = &analysis.Analyzer{ Run: runDirective, } -func runDirective(pass *analysis.Pass) (interface{}, error) { +func runDirective(pass *analysis.Pass) (any, error) { for _, f := range pass.Files { checkGoFile(pass, f) } diff --git a/tools/vendor/golang.org/x/tools/go/analysis/passes/fieldalignment/fieldalignment.go b/tools/vendor/golang.org/x/tools/go/analysis/passes/fieldalignment/fieldalignment.go index 93fa39140..e2ddc83b6 100644 --- a/tools/vendor/golang.org/x/tools/go/analysis/passes/fieldalignment/fieldalignment.go +++ b/tools/vendor/golang.org/x/tools/go/analysis/passes/fieldalignment/fieldalignment.go @@ -65,7 +65,7 @@ var Analyzer = &analysis.Analyzer{ Run: run, } -func run(pass *analysis.Pass) (interface{}, error) { +func run(pass *analysis.Pass) (any, error) { inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) nodeFilter := []ast.Node{ (*ast.StructType)(nil), diff --git a/tools/vendor/golang.org/x/tools/go/analysis/passes/findcall/findcall.go b/tools/vendor/golang.org/x/tools/go/analysis/passes/findcall/findcall.go index 2671573d1..9db4de1c2 100644 --- a/tools/vendor/golang.org/x/tools/go/analysis/passes/findcall/findcall.go +++ b/tools/vendor/golang.org/x/tools/go/analysis/passes/findcall/findcall.go @@ -38,7 +38,7 @@ func init() { Analyzer.Flags.StringVar(&name, "name", name, "name of the function to find") } -func run(pass *analysis.Pass) (interface{}, error) { +func run(pass *analysis.Pass) (any, error) { for _, f := range pass.Files { ast.Inspect(f, func(n ast.Node) bool { if call, ok := n.(*ast.CallExpr); ok { diff --git a/tools/vendor/golang.org/x/tools/go/analysis/passes/framepointer/framepointer.go b/tools/vendor/golang.org/x/tools/go/analysis/passes/framepointer/framepointer.go index 8012de99d..ba94fd68e 100644 --- a/tools/vendor/golang.org/x/tools/go/analysis/passes/framepointer/framepointer.go +++ b/tools/vendor/golang.org/x/tools/go/analysis/passes/framepointer/framepointer.go @@ -113,7 +113,7 @@ var arm64Branch = map[string]bool{ "RET": true, } -func run(pass *analysis.Pass) (interface{}, error) { +func run(pass *analysis.Pass) (any, error) { arch, ok := arches[build.Default.GOARCH] if !ok { return nil, nil diff --git a/tools/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/ifaceassert.go b/tools/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/ifaceassert.go index 5f07ed3ff..4022dbe7c 100644 --- a/tools/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/ifaceassert.go +++ b/tools/vendor/golang.org/x/tools/go/analysis/passes/ifaceassert/ifaceassert.go @@ -52,7 +52,7 @@ func assertableTo(free *typeparams.Free, v, t types.Type) *types.Func { return nil } -func run(pass *analysis.Pass) (interface{}, error) { +func run(pass *analysis.Pass) (any, error) { inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) nodeFilter := []ast.Node{ (*ast.TypeAssertExpr)(nil), diff --git a/tools/vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go b/tools/vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go index 3b121cb0c..ee1972f56 100644 --- a/tools/vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go +++ b/tools/vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go @@ -44,6 +44,6 @@ var Analyzer = &analysis.Analyzer{ ResultType: reflect.TypeOf(new(inspector.Inspector)), } -func run(pass *analysis.Pass) (interface{}, error) { +func run(pass *analysis.Pass) (any, error) { return inspector.New(pass.Files), nil } diff --git a/tools/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/loopclosure.go b/tools/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/loopclosure.go index d31812421..64df1b106 100644 --- a/tools/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/loopclosure.go +++ b/tools/vendor/golang.org/x/tools/go/analysis/passes/loopclosure/loopclosure.go @@ -30,7 +30,7 @@ var Analyzer = &analysis.Analyzer{ Run: run, } -func run(pass *analysis.Pass) (interface{}, error) { +func run(pass *analysis.Pass) (any, error) { inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) nodeFilter := []ast.Node{ diff --git a/tools/vendor/golang.org/x/tools/go/analysis/passes/lostcancel/lostcancel.go b/tools/vendor/golang.org/x/tools/go/analysis/passes/lostcancel/lostcancel.go index f8a661aa5..a7fee1809 100644 --- a/tools/vendor/golang.org/x/tools/go/analysis/passes/lostcancel/lostcancel.go +++ b/tools/vendor/golang.org/x/tools/go/analysis/passes/lostcancel/lostcancel.go @@ -47,7 +47,7 @@ var contextPackage = "context" // containing the assignment, we assume that other uses exist. // // checkLostCancel analyzes a single named or literal function. -func run(pass *analysis.Pass) (interface{}, error) { +func run(pass *analysis.Pass) (any, error) { // Fast path: bypass check if file doesn't use context.WithCancel. if !analysisinternal.Imports(pass.Pkg, contextPackage) { return nil, nil diff --git a/tools/vendor/golang.org/x/tools/go/analysis/passes/nilfunc/nilfunc.go b/tools/vendor/golang.org/x/tools/go/analysis/passes/nilfunc/nilfunc.go index 778f7f1f8..3ac2dcd49 100644 --- a/tools/vendor/golang.org/x/tools/go/analysis/passes/nilfunc/nilfunc.go +++ b/tools/vendor/golang.org/x/tools/go/analysis/passes/nilfunc/nilfunc.go @@ -30,7 +30,7 @@ var Analyzer = &analysis.Analyzer{ Run: run, } -func run(pass *analysis.Pass) (interface{}, error) { +func run(pass *analysis.Pass) (any, error) { inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) nodeFilter := []ast.Node{ diff --git a/tools/vendor/golang.org/x/tools/go/analysis/passes/nilness/nilness.go b/tools/vendor/golang.org/x/tools/go/analysis/passes/nilness/nilness.go index faaf12a93..af61ae608 100644 --- a/tools/vendor/golang.org/x/tools/go/analysis/passes/nilness/nilness.go +++ b/tools/vendor/golang.org/x/tools/go/analysis/passes/nilness/nilness.go @@ -28,7 +28,7 @@ var Analyzer = &analysis.Analyzer{ Requires: []*analysis.Analyzer{buildssa.Analyzer}, } -func run(pass *analysis.Pass) (interface{}, error) { +func run(pass *analysis.Pass) (any, error) { ssainput := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA) for _, fn := range ssainput.SrcFuncs { runFunc(pass, fn) @@ -37,7 +37,7 @@ func run(pass *analysis.Pass) (interface{}, error) { } func runFunc(pass *analysis.Pass, fn *ssa.Function) { - reportf := func(category string, pos token.Pos, format string, args ...interface{}) { + reportf := func(category string, pos token.Pos, format string, args ...any) { // We ignore nil-checking ssa.Instructions // that don't correspond to syntax. if pos.IsValid() { diff --git a/tools/vendor/golang.org/x/tools/go/analysis/passes/pkgfact/pkgfact.go b/tools/vendor/golang.org/x/tools/go/analysis/passes/pkgfact/pkgfact.go index 077c87808..31748795d 100644 --- a/tools/vendor/golang.org/x/tools/go/analysis/passes/pkgfact/pkgfact.go +++ b/tools/vendor/golang.org/x/tools/go/analysis/passes/pkgfact/pkgfact.go @@ -53,7 +53,7 @@ type pairsFact []string func (f *pairsFact) AFact() {} func (f *pairsFact) String() string { return "pairs(" + strings.Join(*f, ", ") + ")" } -func run(pass *analysis.Pass) (interface{}, error) { +func run(pass *analysis.Pass) (any, error) { result := make(map[string]string) // At each import, print the fact from the imported diff --git a/tools/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go b/tools/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go index 81600a283..a28ed365d 100644 --- a/tools/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go +++ b/tools/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go @@ -924,9 +924,14 @@ func checkPrint(pass *analysis.Pass, call *ast.CallExpr, name string) { // The % in "abc 0.0%" couldn't be a formatting directive. s = strings.TrimSuffix(s, "%") if strings.Contains(s, "%") { - m := printFormatRE.FindStringSubmatch(s) - if m != nil { - pass.ReportRangef(call, "%s call has possible Printf formatting directive %s", name, m[0]) + for _, m := range printFormatRE.FindAllString(s, -1) { + // Allow %XX where XX are hex digits, + // as this is common in URLs. + if len(m) >= 3 && isHex(m[1]) && isHex(m[2]) { + continue + } + pass.ReportRangef(call, "%s call has possible Printf formatting directive %s", name, m) + break // report only the first one } } } @@ -992,3 +997,10 @@ func (ss stringSet) Set(flag string) error { // // Remove this after the 1.24 release. var suppressNonconstants bool + +// isHex reports whether b is a hex digit. +func isHex(b byte) bool { + return '0' <= b && b <= '9' || + 'A' <= b && b <= 'F' || + 'a' <= b && b <= 'f' +} diff --git a/tools/vendor/golang.org/x/tools/go/analysis/passes/reflectvaluecompare/reflectvaluecompare.go b/tools/vendor/golang.org/x/tools/go/analysis/passes/reflectvaluecompare/reflectvaluecompare.go index 72435b2fc..d0632dbda 100644 --- a/tools/vendor/golang.org/x/tools/go/analysis/passes/reflectvaluecompare/reflectvaluecompare.go +++ b/tools/vendor/golang.org/x/tools/go/analysis/passes/reflectvaluecompare/reflectvaluecompare.go @@ -28,7 +28,7 @@ var Analyzer = &analysis.Analyzer{ Run: run, } -func run(pass *analysis.Pass) (interface{}, error) { +func run(pass *analysis.Pass) (any, error) { inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) nodeFilter := []ast.Node{ diff --git a/tools/vendor/golang.org/x/tools/go/analysis/passes/shadow/shadow.go b/tools/vendor/golang.org/x/tools/go/analysis/passes/shadow/shadow.go index 30258c991..8f768bb76 100644 --- a/tools/vendor/golang.org/x/tools/go/analysis/passes/shadow/shadow.go +++ b/tools/vendor/golang.org/x/tools/go/analysis/passes/shadow/shadow.go @@ -36,7 +36,7 @@ func init() { Analyzer.Flags.BoolVar(&strict, "strict", strict, "whether to be strict about shadowing; can be noisy") } -func run(pass *analysis.Pass) (interface{}, error) { +func run(pass *analysis.Pass) (any, error) { inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) spans := make(map[types.Object]span) diff --git a/tools/vendor/golang.org/x/tools/go/analysis/passes/shift/shift.go b/tools/vendor/golang.org/x/tools/go/analysis/passes/shift/shift.go index 46b5f6d68..57987b3d2 100644 --- a/tools/vendor/golang.org/x/tools/go/analysis/passes/shift/shift.go +++ b/tools/vendor/golang.org/x/tools/go/analysis/passes/shift/shift.go @@ -34,7 +34,7 @@ var Analyzer = &analysis.Analyzer{ Run: run, } -func run(pass *analysis.Pass) (interface{}, error) { +func run(pass *analysis.Pass) (any, error) { inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) // Do a complete pass to compute dead nodes. diff --git a/tools/vendor/golang.org/x/tools/go/analysis/passes/stdmethods/stdmethods.go b/tools/vendor/golang.org/x/tools/go/analysis/passes/stdmethods/stdmethods.go index 28f51b1ec..a0bdf001a 100644 --- a/tools/vendor/golang.org/x/tools/go/analysis/passes/stdmethods/stdmethods.go +++ b/tools/vendor/golang.org/x/tools/go/analysis/passes/stdmethods/stdmethods.go @@ -66,7 +66,7 @@ var canonicalMethods = map[string]struct{ args, results []string }{ "WriteTo": {[]string{"=io.Writer"}, []string{"int64", "error"}}, // io.WriterTo } -func run(pass *analysis.Pass) (interface{}, error) { +func run(pass *analysis.Pass) (any, error) { inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) nodeFilter := []ast.Node{ diff --git a/tools/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go b/tools/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go index f56e6ecaa..a23721cd2 100644 --- a/tools/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go +++ b/tools/vendor/golang.org/x/tools/go/analysis/passes/stringintconv/string.go @@ -70,7 +70,7 @@ func typeName(t types.Type) string { return "" } -func run(pass *analysis.Pass) (interface{}, error) { +func run(pass *analysis.Pass) (any, error) { inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) nodeFilter := []ast.Node{ (*ast.File)(nil), diff --git a/tools/vendor/golang.org/x/tools/go/analysis/passes/structtag/structtag.go b/tools/vendor/golang.org/x/tools/go/analysis/passes/structtag/structtag.go index 4115ef769..d92650340 100644 --- a/tools/vendor/golang.org/x/tools/go/analysis/passes/structtag/structtag.go +++ b/tools/vendor/golang.org/x/tools/go/analysis/passes/structtag/structtag.go @@ -34,7 +34,7 @@ var Analyzer = &analysis.Analyzer{ Run: run, } -func run(pass *analysis.Pass) (interface{}, error) { +func run(pass *analysis.Pass) (any, error) { inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) nodeFilter := []ast.Node{ diff --git a/tools/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go b/tools/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go index fef5a6014..f49ac4eb1 100644 --- a/tools/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go +++ b/tools/vendor/golang.org/x/tools/go/analysis/passes/testinggoroutine/testinggoroutine.go @@ -36,7 +36,7 @@ var Analyzer = &analysis.Analyzer{ Run: run, } -func run(pass *analysis.Pass) (interface{}, error) { +func run(pass *analysis.Pass) (any, error) { inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) if !analysisinternal.Imports(pass.Pkg, "testing") { diff --git a/tools/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go b/tools/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go index 285b34218..9f59006eb 100644 --- a/tools/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go +++ b/tools/vendor/golang.org/x/tools/go/analysis/passes/tests/tests.go @@ -47,7 +47,7 @@ var acceptedFuzzTypes = []types.Type{ types.NewSlice(types.Universe.Lookup("byte").Type()), } -func run(pass *analysis.Pass) (interface{}, error) { +func run(pass *analysis.Pass) (any, error) { for _, f := range pass.Files { if !strings.HasSuffix(pass.Fset.File(f.FileStart).Name(), "_test.go") { continue diff --git a/tools/vendor/golang.org/x/tools/go/analysis/passes/unreachable/unreachable.go b/tools/vendor/golang.org/x/tools/go/analysis/passes/unreachable/unreachable.go index b810db7ee..fcf5fbd90 100644 --- a/tools/vendor/golang.org/x/tools/go/analysis/passes/unreachable/unreachable.go +++ b/tools/vendor/golang.org/x/tools/go/analysis/passes/unreachable/unreachable.go @@ -30,7 +30,7 @@ var Analyzer = &analysis.Analyzer{ Run: run, } -func run(pass *analysis.Pass) (interface{}, error) { +func run(pass *analysis.Pass) (any, error) { inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) nodeFilter := []ast.Node{ diff --git a/tools/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go b/tools/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go index fb5b944fa..57c6da64f 100644 --- a/tools/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go +++ b/tools/vendor/golang.org/x/tools/go/analysis/passes/unsafeptr/unsafeptr.go @@ -30,7 +30,7 @@ var Analyzer = &analysis.Analyzer{ Run: run, } -func run(pass *analysis.Pass) (interface{}, error) { +func run(pass *analysis.Pass) (any, error) { inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) nodeFilter := []ast.Node{ diff --git a/tools/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go b/tools/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go index d7cc1e6ae..932f1347e 100644 --- a/tools/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go +++ b/tools/vendor/golang.org/x/tools/go/analysis/passes/unusedresult/unusedresult.go @@ -85,7 +85,7 @@ func init() { "comma-separated list of names of methods of type func() string whose results must be used") } -func run(pass *analysis.Pass) (interface{}, error) { +func run(pass *analysis.Pass) (any, error) { inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) // Split functions into (pkg, name) pairs to save allocation later. @@ -130,9 +130,7 @@ func run(pass *analysis.Pass) (interface{}, error) { } // func() string -var sigNoArgsStringResult = types.NewSignature(nil, nil, - types.NewTuple(types.NewParam(token.NoPos, nil, "", types.Typ[types.String])), - false) +var sigNoArgsStringResult = types.NewSignatureType(nil, nil, nil, nil, types.NewTuple(types.NewParam(token.NoPos, nil, "", types.Typ[types.String])), false) type stringSetFlag map[string]bool diff --git a/tools/vendor/golang.org/x/tools/go/analysis/validate.go b/tools/vendor/golang.org/x/tools/go/analysis/validate.go index 4f2c40456..145393921 100644 --- a/tools/vendor/golang.org/x/tools/go/analysis/validate.go +++ b/tools/vendor/golang.org/x/tools/go/analysis/validate.go @@ -63,7 +63,7 @@ func Validate(analyzers []*Analyzer) error { return fmt.Errorf("fact type %s registered by two analyzers: %v, %v", t, a, prev) } - if t.Kind() != reflect.Ptr { + if t.Kind() != reflect.Pointer { return fmt.Errorf("%s: fact type %s is not a pointer", a, t) } factTypes[t] = a diff --git a/tools/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go b/tools/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go index 58934f766..5c8dbbb7a 100644 --- a/tools/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go +++ b/tools/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go @@ -183,7 +183,7 @@ type application struct { func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast.Node) { // convert typed nil into untyped nil - if v := reflect.ValueOf(n); v.Kind() == reflect.Ptr && v.IsNil() { + if v := reflect.ValueOf(n); v.Kind() == reflect.Pointer && v.IsNil() { n = nil } diff --git a/tools/vendor/golang.org/x/tools/go/ast/astutil/util.go b/tools/vendor/golang.org/x/tools/go/ast/astutil/util.go index ca71e3e10..c820b2084 100644 --- a/tools/vendor/golang.org/x/tools/go/ast/astutil/util.go +++ b/tools/vendor/golang.org/x/tools/go/ast/astutil/util.go @@ -8,4 +8,6 @@ import "go/ast" // Unparen returns e with any enclosing parentheses stripped. // Deprecated: use [ast.Unparen]. +// +//go:fix inline func Unparen(e ast.Expr) ast.Expr { return ast.Unparen(e) } diff --git a/tools/vendor/golang.org/x/tools/go/buildutil/fakecontext.go b/tools/vendor/golang.org/x/tools/go/buildutil/fakecontext.go index 763d18809..1f75141d5 100644 --- a/tools/vendor/golang.org/x/tools/go/buildutil/fakecontext.go +++ b/tools/vendor/golang.org/x/tools/go/buildutil/fakecontext.go @@ -95,7 +95,7 @@ func (s byName) Less(i, j int) bool { return s[i].Name() < s[j].Name() } type fakeFileInfo string func (fi fakeFileInfo) Name() string { return string(fi) } -func (fakeFileInfo) Sys() interface{} { return nil } +func (fakeFileInfo) Sys() any { return nil } func (fakeFileInfo) ModTime() time.Time { return time.Time{} } func (fakeFileInfo) IsDir() bool { return false } func (fakeFileInfo) Size() int64 { return 0 } @@ -104,7 +104,7 @@ func (fakeFileInfo) Mode() os.FileMode { return 0644 } type fakeDirInfo string func (fd fakeDirInfo) Name() string { return string(fd) } -func (fakeDirInfo) Sys() interface{} { return nil } +func (fakeDirInfo) Sys() any { return nil } func (fakeDirInfo) ModTime() time.Time { return time.Time{} } func (fakeDirInfo) IsDir() bool { return true } func (fakeDirInfo) Size() int64 { return 0 } diff --git a/tools/vendor/golang.org/x/tools/go/buildutil/tags.go b/tools/vendor/golang.org/x/tools/go/buildutil/tags.go index 32c8d1424..410c8e72d 100644 --- a/tools/vendor/golang.org/x/tools/go/buildutil/tags.go +++ b/tools/vendor/golang.org/x/tools/go/buildutil/tags.go @@ -51,7 +51,7 @@ func (v *TagsFlag) Set(s string) error { return nil } -func (v *TagsFlag) Get() interface{} { return *v } +func (v *TagsFlag) Get() any { return *v } func splitQuotedFields(s string) ([]string, error) { // See $GOROOT/src/cmd/internal/quoted/quoted.go (Split) diff --git a/tools/vendor/golang.org/x/tools/go/internal/cgo/cgo.go b/tools/vendor/golang.org/x/tools/go/internal/cgo/cgo.go index 697974bb9..735efeb53 100644 --- a/tools/vendor/golang.org/x/tools/go/internal/cgo/cgo.go +++ b/tools/vendor/golang.org/x/tools/go/internal/cgo/cgo.go @@ -203,7 +203,7 @@ func envList(key, def string) []string { // stringList's arguments should be a sequence of string or []string values. // stringList flattens them into a single []string. -func stringList(args ...interface{}) []string { +func stringList(args ...any) []string { var x []string for _, arg := range args { switch arg := arg.(type) { diff --git a/tools/vendor/golang.org/x/tools/go/loader/loader.go b/tools/vendor/golang.org/x/tools/go/loader/loader.go index 2d4865f66..d06f95ad7 100644 --- a/tools/vendor/golang.org/x/tools/go/loader/loader.go +++ b/tools/vendor/golang.org/x/tools/go/loader/loader.go @@ -215,7 +215,7 @@ func (conf *Config) fset() *token.FileSet { // src specifies the parser input as a string, []byte, or io.Reader, and // filename is its apparent name. If src is nil, the contents of // filename are read from the file system. -func (conf *Config) ParseFile(filename string, src interface{}) (*ast.File, error) { +func (conf *Config) ParseFile(filename string, src any) (*ast.File, error) { // TODO(adonovan): use conf.build() etc like parseFiles does. return parser.ParseFile(conf.fset(), filename, src, conf.ParserMode) } diff --git a/tools/vendor/golang.org/x/tools/go/packages/packages.go b/tools/vendor/golang.org/x/tools/go/packages/packages.go index c3a59b8eb..6665a04c1 100644 --- a/tools/vendor/golang.org/x/tools/go/packages/packages.go +++ b/tools/vendor/golang.org/x/tools/go/packages/packages.go @@ -141,6 +141,8 @@ const ( LoadAllSyntax = LoadSyntax | NeedDeps // Deprecated: NeedExportsFile is a historical misspelling of NeedExportFile. + // + //go:fix inline NeedExportsFile = NeedExportFile ) @@ -161,7 +163,7 @@ type Config struct { // If the user provides a logger, debug logging is enabled. // If the GOPACKAGESDEBUG environment variable is set to true, // but the logger is nil, default to log.Printf. - Logf func(format string, args ...interface{}) + Logf func(format string, args ...any) // Dir is the directory in which to run the build system's query tool // that provides information about the packages. @@ -564,13 +566,13 @@ type ModuleError struct { } func init() { - packagesinternal.GetDepsErrors = func(p interface{}) []*packagesinternal.PackageError { + packagesinternal.GetDepsErrors = func(p any) []*packagesinternal.PackageError { return p.(*Package).depsErrors } - packagesinternal.SetModFile = func(config interface{}, value string) { + packagesinternal.SetModFile = func(config any, value string) { config.(*Config).modFile = value } - packagesinternal.SetModFlag = func(config interface{}, value string) { + packagesinternal.SetModFlag = func(config any, value string) { config.(*Config).modFlag = value } packagesinternal.TypecheckCgo = int(typecheckCgo) @@ -739,7 +741,7 @@ func newLoader(cfg *Config) *loader { if debug { ld.Config.Logf = log.Printf } else { - ld.Config.Logf = func(format string, args ...interface{}) {} + ld.Config.Logf = func(format string, args ...any) {} } } if ld.Config.Mode == 0 { diff --git a/tools/vendor/golang.org/x/tools/go/ssa/builder.go b/tools/vendor/golang.org/x/tools/go/ssa/builder.go index 4cd71260b..84ccbc092 100644 --- a/tools/vendor/golang.org/x/tools/go/ssa/builder.go +++ b/tools/vendor/golang.org/x/tools/go/ssa/builder.go @@ -82,6 +82,8 @@ import ( "runtime" "sync" + "slices" + "golang.org/x/tools/internal/typeparams" "golang.org/x/tools/internal/versions" ) @@ -559,7 +561,7 @@ func (sb *storebuf) emit(fn *Function) { // literal that may reference parts of the LHS. func (b *builder) assign(fn *Function, loc lvalue, e ast.Expr, isZero bool, sb *storebuf) { // Can we initialize it in place? - if e, ok := unparen(e).(*ast.CompositeLit); ok { + if e, ok := ast.Unparen(e).(*ast.CompositeLit); ok { // A CompositeLit never evaluates to a pointer, // so if the type of the location is a pointer, // an &-operation is implied. @@ -614,7 +616,7 @@ func (b *builder) assign(fn *Function, loc lvalue, e ast.Expr, isZero bool, sb * // expr lowers a single-result expression e to SSA form, emitting code // to fn and returning the Value defined by the expression. func (b *builder) expr(fn *Function, e ast.Expr) Value { - e = unparen(e) + e = ast.Unparen(e) tv := fn.info.Types[e] @@ -704,7 +706,7 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { return y } // Call to "intrinsic" built-ins, e.g. new, make, panic. - if id, ok := unparen(e.Fun).(*ast.Ident); ok { + if id, ok := ast.Unparen(e.Fun).(*ast.Ident); ok { if obj, ok := fn.info.Uses[id].(*types.Builtin); ok { if v := b.builtin(fn, obj, e.Args, fn.typ(tv.Type), e.Lparen); v != nil { return v @@ -721,7 +723,7 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { switch e.Op { case token.AND: // &X --- potentially escaping. addr := b.addr(fn, e.X, true) - if _, ok := unparen(e.X).(*ast.StarExpr); ok { + if _, ok := ast.Unparen(e.X).(*ast.StarExpr); ok { // &*p must panic if p is nil (http://golang.org/s/go12nil). // For simplicity, we'll just (suboptimally) rely // on the side effects of a load. @@ -1002,7 +1004,7 @@ func (b *builder) setCallFunc(fn *Function, e *ast.CallExpr, c *CallCommon) { c.pos = e.Lparen // Is this a method call? - if selector, ok := unparen(e.Fun).(*ast.SelectorExpr); ok { + if selector, ok := ast.Unparen(e.Fun).(*ast.SelectorExpr); ok { sel := fn.selection(selector) if sel != nil && sel.kind == types.MethodVal { obj := sel.obj.(*types.Func) @@ -1372,7 +1374,7 @@ func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero // An &-operation may be implied: // map[*struct{}]bool{&struct{}{}: true} wantAddr := false - if _, ok := unparen(e.Key).(*ast.CompositeLit); ok { + if _, ok := ast.Unparen(e.Key).(*ast.CompositeLit); ok { wantAddr = isPointerCore(t.Key()) } @@ -1547,9 +1549,9 @@ func (b *builder) typeSwitchStmt(fn *Function, s *ast.TypeSwitchStmt, label *lbl var x Value switch ass := s.Assign.(type) { case *ast.ExprStmt: // x.(type) - x = b.expr(fn, unparen(ass.X).(*ast.TypeAssertExpr).X) + x = b.expr(fn, ast.Unparen(ass.X).(*ast.TypeAssertExpr).X) case *ast.AssignStmt: // y := x.(type) - x = b.expr(fn, unparen(ass.Rhs[0]).(*ast.TypeAssertExpr).X) + x = b.expr(fn, ast.Unparen(ass.Rhs[0]).(*ast.TypeAssertExpr).X) } done := fn.newBasicBlock("typeswitch.done") @@ -1667,7 +1669,7 @@ func (b *builder) selectStmt(fn *Function, s *ast.SelectStmt, label *lblock) { } case *ast.AssignStmt: // x := <-ch - recv := unparen(comm.Rhs[0]).(*ast.UnaryExpr) + recv := ast.Unparen(comm.Rhs[0]).(*ast.UnaryExpr) st = &SelectState{ Dir: types.RecvOnly, Chan: b.expr(fn, recv.X), @@ -1678,7 +1680,7 @@ func (b *builder) selectStmt(fn *Function, s *ast.SelectStmt, label *lblock) { } case *ast.ExprStmt: // <-ch - recv := unparen(comm.X).(*ast.UnaryExpr) + recv := ast.Unparen(comm.X).(*ast.UnaryExpr) st = &SelectState{ Dir: types.RecvOnly, Chan: b.expr(fn, recv.X), @@ -2021,8 +2023,8 @@ func (b *builder) forStmtGo122(fn *Function, s *ast.ForStmt, label *lblock) { // Remove instructions for phi, load, and store. // lift() will remove the unused i_next *Alloc. isDead := func(i Instruction) bool { return dead[i] } - loop.Instrs = removeInstrsIf(loop.Instrs, isDead) - post.Instrs = removeInstrsIf(post.Instrs, isDead) + loop.Instrs = slices.DeleteFunc(loop.Instrs, isDead) + post.Instrs = slices.DeleteFunc(post.Instrs, isDead) } } diff --git a/tools/vendor/golang.org/x/tools/go/ssa/emit.go b/tools/vendor/golang.org/x/tools/go/ssa/emit.go index a3d41ad95..bca79adc4 100644 --- a/tools/vendor/golang.org/x/tools/go/ssa/emit.go +++ b/tools/vendor/golang.org/x/tools/go/ssa/emit.go @@ -81,7 +81,7 @@ func emitDebugRef(f *Function, e ast.Expr, v Value, isAddr bool) { panic("nil") } var obj types.Object - e = unparen(e) + e = ast.Unparen(e) if id, ok := e.(*ast.Ident); ok { if isBlankIdent(id) { return diff --git a/tools/vendor/golang.org/x/tools/go/ssa/lift.go b/tools/vendor/golang.org/x/tools/go/ssa/lift.go index aada3dc32..6138ca82e 100644 --- a/tools/vendor/golang.org/x/tools/go/ssa/lift.go +++ b/tools/vendor/golang.org/x/tools/go/ssa/lift.go @@ -43,6 +43,7 @@ import ( "go/token" "math/big" "os" + "slices" "golang.org/x/tools/internal/typeparams" ) @@ -105,23 +106,7 @@ func buildDomFrontier(fn *Function) domFrontier { } func removeInstr(refs []Instruction, instr Instruction) []Instruction { - return removeInstrsIf(refs, func(i Instruction) bool { return i == instr }) -} - -func removeInstrsIf(refs []Instruction, p func(Instruction) bool) []Instruction { - // TODO(taking): replace with go1.22 slices.DeleteFunc. - i := 0 - for _, ref := range refs { - if p(ref) { - continue - } - refs[i] = ref - i++ - } - for j := i; j != len(refs); j++ { - refs[j] = nil // aid GC - } - return refs[:i] + return slices.DeleteFunc(refs, func(i Instruction) bool { return i == instr }) } // lift replaces local and new Allocs accessed only with diff --git a/tools/vendor/golang.org/x/tools/go/ssa/mode.go b/tools/vendor/golang.org/x/tools/go/ssa/mode.go index 8381639a5..61c91452c 100644 --- a/tools/vendor/golang.org/x/tools/go/ssa/mode.go +++ b/tools/vendor/golang.org/x/tools/go/ssa/mode.go @@ -108,4 +108,4 @@ func (m *BuilderMode) Set(s string) error { } // Get returns m. -func (m BuilderMode) Get() interface{} { return m } +func (m BuilderMode) Get() any { return m } diff --git a/tools/vendor/golang.org/x/tools/go/ssa/print.go b/tools/vendor/golang.org/x/tools/go/ssa/print.go index 432c4b05b..8b92d0846 100644 --- a/tools/vendor/golang.org/x/tools/go/ssa/print.go +++ b/tools/vendor/golang.org/x/tools/go/ssa/print.go @@ -387,7 +387,7 @@ func (s *MapUpdate) String() string { func (s *DebugRef) String() string { p := s.Parent().Prog.Fset.Position(s.Pos()) - var descr interface{} + var descr any if s.object != nil { descr = s.object // e.g. "var x int" } else { diff --git a/tools/vendor/golang.org/x/tools/go/ssa/sanity.go b/tools/vendor/golang.org/x/tools/go/ssa/sanity.go index e35e4d793..97ef886e3 100644 --- a/tools/vendor/golang.org/x/tools/go/ssa/sanity.go +++ b/tools/vendor/golang.org/x/tools/go/ssa/sanity.go @@ -48,7 +48,7 @@ func mustSanityCheck(fn *Function, reporter io.Writer) { } } -func (s *sanity) diagnostic(prefix, format string, args ...interface{}) { +func (s *sanity) diagnostic(prefix, format string, args ...any) { fmt.Fprintf(s.reporter, "%s: function %s", prefix, s.fn) if s.block != nil { fmt.Fprintf(s.reporter, ", block %s", s.block) @@ -58,12 +58,12 @@ func (s *sanity) diagnostic(prefix, format string, args ...interface{}) { io.WriteString(s.reporter, "\n") } -func (s *sanity) errorf(format string, args ...interface{}) { +func (s *sanity) errorf(format string, args ...any) { s.insane = true s.diagnostic("Error", format, args...) } -func (s *sanity) warnf(format string, args ...interface{}) { +func (s *sanity) warnf(format string, args ...any) { s.diagnostic("Warning", format, args...) } diff --git a/tools/vendor/golang.org/x/tools/go/ssa/source.go b/tools/vendor/golang.org/x/tools/go/ssa/source.go index 055a6b1ef..d0cc1f486 100644 --- a/tools/vendor/golang.org/x/tools/go/ssa/source.go +++ b/tools/vendor/golang.org/x/tools/go/ssa/source.go @@ -153,7 +153,7 @@ func findNamedFunc(pkg *Package, pos token.Pos) *Function { // the ssa.Value.) func (f *Function) ValueForExpr(e ast.Expr) (value Value, isAddr bool) { if f.debugInfo() { // (opt) - e = unparen(e) + e = ast.Unparen(e) for _, b := range f.Blocks { for _, instr := range b.Instrs { if ref, ok := instr.(*DebugRef); ok { diff --git a/tools/vendor/golang.org/x/tools/go/ssa/util.go b/tools/vendor/golang.org/x/tools/go/ssa/util.go index 4a056cbe0..9a73984a6 100644 --- a/tools/vendor/golang.org/x/tools/go/ssa/util.go +++ b/tools/vendor/golang.org/x/tools/go/ssa/util.go @@ -35,8 +35,6 @@ func assert(p bool, msg string) { //// AST utilities -func unparen(e ast.Expr) ast.Expr { return ast.Unparen(e) } - // isBlankIdent returns true iff e is an Ident with name "_". // They have no associated types.Object, and thus no type. func isBlankIdent(e ast.Expr) bool { @@ -168,7 +166,7 @@ func declaredWithin(obj types.Object, fn *types.Func) bool { // returns a closure that prints the corresponding "end" message. // Call using 'defer logStack(...)()' to show builder stack on panic. // Don't forget trailing parens! -func logStack(format string, args ...interface{}) func() { +func logStack(format string, args ...any) func() { msg := fmt.Sprintf(format, args...) io.WriteString(os.Stderr, msg) io.WriteString(os.Stderr, "\n") @@ -195,7 +193,7 @@ func makeLen(T types.Type) *Builtin { lenParams := types.NewTuple(anonVar(T)) return &Builtin{ name: "len", - sig: types.NewSignature(nil, lenParams, lenResults, false), + sig: types.NewSignatureType(nil, nil, nil, lenParams, lenResults, false), } } diff --git a/tools/vendor/golang.org/x/tools/go/ssa/wrappers.go b/tools/vendor/golang.org/x/tools/go/ssa/wrappers.go index d09b4f250..aeb160eff 100644 --- a/tools/vendor/golang.org/x/tools/go/ssa/wrappers.go +++ b/tools/vendor/golang.org/x/tools/go/ssa/wrappers.go @@ -106,9 +106,7 @@ func (b *builder) buildWrapper(fn *Function) { var c Call c.Call.Value = &Builtin{ name: "ssa:wrapnilchk", - sig: types.NewSignature(nil, - types.NewTuple(anonVar(fn.method.recv), anonVar(tString), anonVar(tString)), - types.NewTuple(anonVar(fn.method.recv)), false), + sig: types.NewSignatureType(nil, nil, nil, types.NewTuple(anonVar(fn.method.recv), anonVar(tString), anonVar(tString)), types.NewTuple(anonVar(fn.method.recv)), false), } c.Call.Args = []Value{ v, @@ -262,7 +260,7 @@ func createThunk(prog *Program, sel *selection) *Function { } func changeRecv(s *types.Signature, recv *types.Var) *types.Signature { - return types.NewSignature(recv, s.Params(), s.Results(), s.Variadic()) + return types.NewSignatureType(recv, nil, nil, s.Params(), s.Results(), s.Variadic()) } // A local version of *types.Selection. diff --git a/tools/vendor/golang.org/x/tools/go/types/typeutil/map.go b/tools/vendor/golang.org/x/tools/go/types/typeutil/map.go index 43261147c..b6d542c64 100644 --- a/tools/vendor/golang.org/x/tools/go/types/typeutil/map.go +++ b/tools/vendor/golang.org/x/tools/go/types/typeutil/map.go @@ -389,8 +389,13 @@ func (hasher) hashTypeName(tname *types.TypeName) uint32 { // path, and whether or not it is a package-level typename. It // is rare for a package to define multiple local types with // the same name.) - hash := uintptr(unsafe.Pointer(tname)) - return uint32(hash ^ (hash >> 32)) + ptr := uintptr(unsafe.Pointer(tname)) + if unsafe.Sizeof(ptr) == 8 { + hash := uint64(ptr) + return uint32(hash ^ (hash >> 32)) + } else { + return uint32(ptr) + } } // shallowHash computes a hash of t without looking at any of its diff --git a/tools/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go b/tools/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go index abf708111..5eb7ac5a9 100644 --- a/tools/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go +++ b/tools/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go @@ -23,6 +23,8 @@ import ( "golang.org/x/tools/internal/typesinternal" ) +// Deprecated: this heuristic is ill-defined. +// TODO(adonovan): move to sole use in gopls/internal/cache. func TypeErrorEndPos(fset *token.FileSet, src []byte, start token.Pos) token.Pos { // Get the end position for the type error. file := fset.File(start) @@ -255,16 +257,16 @@ func AddImport(info *types.Info, file *ast.File, preferredName, pkgpath, member newName = fmt.Sprintf("%s%d", preferredName, i) } - // For now, keep it real simple: create a new import - // declaration before the first existing declaration (which - // must exist), including its comments, and let goimports tidy it up. + // Create a new import declaration either before the first existing + // declaration (which must exist), including its comments; or + // inside the declaration, if it is an import group. // // Use a renaming import whenever the preferred name is not // available, or the chosen name does not match the last // segment of its path. - newText := fmt.Sprintf("import %q\n\n", pkgpath) + newText := fmt.Sprintf("%q", pkgpath) if newName != preferredName || newName != pathpkg.Base(pkgpath) { - newText = fmt.Sprintf("import %s %q\n\n", newName, pkgpath) + newText = fmt.Sprintf("%s %q", newName, pkgpath) } decl0 := file.Decls[0] var before ast.Node = decl0 @@ -278,9 +280,17 @@ func AddImport(info *types.Info, file *ast.File, preferredName, pkgpath, member before = decl0.Doc } } + // If the first decl is an import group, add this new import at the end. + if gd, ok := before.(*ast.GenDecl); ok && gd.Tok == token.IMPORT && gd.Rparen.IsValid() { + pos = gd.Rparen + newText = "\t" + newText + "\n" + } else { + pos = before.Pos() + newText = "import " + newText + "\n\n" + } return newName, newName + ".", []analysis.TextEdit{{ - Pos: before.Pos(), - End: before.Pos(), + Pos: pos, + End: pos, NewText: []byte(newText), }} } @@ -409,18 +419,19 @@ func validateFix(fset *token.FileSet, fix *analysis.SuggestedFix) error { start := edit.Pos file := fset.File(start) if file == nil { - return fmt.Errorf("missing file info for pos (%v)", edit.Pos) + return fmt.Errorf("no token.File for TextEdit.Pos (%v)", edit.Pos) } if end := edit.End; end.IsValid() { if end < start { - return fmt.Errorf("pos (%v) > end (%v)", edit.Pos, edit.End) + return fmt.Errorf("TextEdit.Pos (%v) > TextEdit.End (%v)", edit.Pos, edit.End) } endFile := fset.File(end) if endFile == nil { - return fmt.Errorf("malformed end position %v", end) + return fmt.Errorf("no token.File for TextEdit.End (%v; File(start).FileEnd is %d)", end, file.Base()+file.Size()) } if endFile != file { - return fmt.Errorf("edit spans files %v and %v", file.Name(), endFile.Name()) + return fmt.Errorf("edit #%d spans files (%v and %v)", + i, file.Position(edit.Pos), endFile.Position(edit.End)) } } else { edit.End = start // update the SuggestedFix @@ -449,3 +460,30 @@ func validateFix(fset *token.FileSet, fix *analysis.SuggestedFix) error { return nil } + +// CanImport reports whether one package is allowed to import another. +// +// TODO(adonovan): allow customization of the accessibility relation +// (e.g. for Bazel). +func CanImport(from, to string) bool { + // TODO(adonovan): better segment hygiene. + if to == "internal" || strings.HasPrefix(to, "internal/") { + // Special case: only std packages may import internal/... + // We can't reliably know whether we're in std, so we + // use a heuristic on the first segment. + first, _, _ := strings.Cut(from, "/") + if strings.Contains(first, ".") { + return false // example.com/foo ∉ std + } + if first == "testdata" { + return false // testdata/foo ∉ std + } + } + if strings.HasSuffix(to, "/internal") { + return strings.HasPrefix(from, to[:len(to)-len("/internal")]) + } + if i := strings.LastIndex(to, "/internal/"); i >= 0 { + return strings.HasPrefix(from, to[:i]) + } + return true +} diff --git a/tools/vendor/golang.org/x/tools/internal/event/keys/keys.go b/tools/vendor/golang.org/x/tools/internal/event/keys/keys.go index a02206e30..4cfa51b61 100644 --- a/tools/vendor/golang.org/x/tools/internal/event/keys/keys.go +++ b/tools/vendor/golang.org/x/tools/internal/event/keys/keys.go @@ -32,7 +32,7 @@ func (k *Value) Format(w io.Writer, buf []byte, l label.Label) { } // Get can be used to get a label for the key from a label.Map. -func (k *Value) Get(lm label.Map) interface{} { +func (k *Value) Get(lm label.Map) any { if t := lm.Find(k); t.Valid() { return k.From(t) } @@ -40,10 +40,10 @@ func (k *Value) Get(lm label.Map) interface{} { } // From can be used to get a value from a Label. -func (k *Value) From(t label.Label) interface{} { return t.UnpackValue() } +func (k *Value) From(t label.Label) any { return t.UnpackValue() } // Of creates a new Label with this key and the supplied value. -func (k *Value) Of(value interface{}) label.Label { return label.OfValue(k, value) } +func (k *Value) Of(value any) label.Label { return label.OfValue(k, value) } // Tag represents a key for tagging labels that have no value. // These are used when the existence of the label is the entire information it diff --git a/tools/vendor/golang.org/x/tools/internal/event/label/label.go b/tools/vendor/golang.org/x/tools/internal/event/label/label.go index 0f526e1f9..7c00ca2a6 100644 --- a/tools/vendor/golang.org/x/tools/internal/event/label/label.go +++ b/tools/vendor/golang.org/x/tools/internal/event/label/label.go @@ -32,7 +32,7 @@ type Key interface { type Label struct { key Key packed uint64 - untyped interface{} + untyped any } // Map is the interface to a collection of Labels indexed by key. @@ -76,13 +76,13 @@ type mapChain struct { // OfValue creates a new label from the key and value. // This method is for implementing new key types, label creation should // normally be done with the Of method of the key. -func OfValue(k Key, value interface{}) Label { return Label{key: k, untyped: value} } +func OfValue(k Key, value any) Label { return Label{key: k, untyped: value} } // UnpackValue assumes the label was built using LabelOfValue and returns the value // that was passed to that constructor. // This method is for implementing new key types, for type safety normal // access should be done with the From method of the key. -func (t Label) UnpackValue() interface{} { return t.untyped } +func (t Label) UnpackValue() any { return t.untyped } // Of64 creates a new label from a key and a uint64. This is often // used for non uint64 values that can be packed into a uint64. diff --git a/tools/vendor/golang.org/x/tools/internal/gcimporter/bimport.go b/tools/vendor/golang.org/x/tools/internal/gcimporter/bimport.go index d79a605ed..734c46198 100644 --- a/tools/vendor/golang.org/x/tools/internal/gcimporter/bimport.go +++ b/tools/vendor/golang.org/x/tools/internal/gcimporter/bimport.go @@ -14,7 +14,7 @@ import ( "sync" ) -func errorf(format string, args ...interface{}) { +func errorf(format string, args ...any) { panic(fmt.Sprintf(format, args...)) } diff --git a/tools/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/tools/vendor/golang.org/x/tools/internal/gcimporter/iexport.go index 7dfc31a37..253d6493c 100644 --- a/tools/vendor/golang.org/x/tools/internal/gcimporter/iexport.go +++ b/tools/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -310,7 +310,7 @@ func IImportShallow(fset *token.FileSet, getPackages GetPackagesFunc, data []byt } // ReportFunc is the type of a function used to report formatted bugs. -type ReportFunc = func(string, ...interface{}) +type ReportFunc = func(string, ...any) // Current bundled export format version. Increase with each format change. // 0: initial implementation @@ -597,7 +597,7 @@ type filePositions struct { needed []uint64 // unordered list of needed file offsets } -func (p *iexporter) trace(format string, args ...interface{}) { +func (p *iexporter) trace(format string, args ...any) { if !trace { // Call sites should also be guarded, but having this check here allows // easily enabling/disabling debug trace statements. @@ -1583,6 +1583,6 @@ func (e internalError) Error() string { return "gcimporter: " + string(e) } // "internalErrorf" as the former is used for bugs, whose cause is // internal inconsistency, whereas the latter is used for ordinary // situations like bad input, whose cause is external. -func internalErrorf(format string, args ...interface{}) error { +func internalErrorf(format string, args ...any) error { return internalError(fmt.Sprintf(format, args...)) } diff --git a/tools/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/tools/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index 129439271..bc6c9741e 100644 --- a/tools/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/tools/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -400,7 +400,7 @@ type iimporter struct { indent int // for tracing support } -func (p *iimporter) trace(format string, args ...interface{}) { +func (p *iimporter) trace(format string, args ...any) { if !trace { // Call sites should also be guarded, but having this check here allows // easily enabling/disabling debug trace statements. diff --git a/tools/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/tools/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go index 522287d18..37b4a39e9 100644 --- a/tools/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go +++ b/tools/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go @@ -574,7 +574,7 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { recv := types.NewVar(fn.Pos(), fn.Pkg(), "", named) typesinternal.SetVarKind(recv, typesinternal.RecvVar) - methods[i] = types.NewFunc(fn.Pos(), fn.Pkg(), fn.Name(), types.NewSignature(recv, sig.Params(), sig.Results(), sig.Variadic())) + methods[i] = types.NewFunc(fn.Pos(), fn.Pkg(), fn.Name(), types.NewSignatureType(recv, nil, nil, sig.Params(), sig.Results(), sig.Variadic())) } embeds := make([]types.Type, iface.NumEmbeddeds()) diff --git a/tools/vendor/golang.org/x/tools/internal/gopathwalk/walk.go b/tools/vendor/golang.org/x/tools/internal/gopathwalk/walk.go index 836151551..984b79c2a 100644 --- a/tools/vendor/golang.org/x/tools/internal/gopathwalk/walk.go +++ b/tools/vendor/golang.org/x/tools/internal/gopathwalk/walk.go @@ -22,7 +22,7 @@ import ( // Options controls the behavior of a Walk call. type Options struct { // If Logf is non-nil, debug logging is enabled through this function. - Logf func(format string, args ...interface{}) + Logf func(format string, args ...any) // Search module caches. Also disables legacy goimports ignore rules. ModulesEnabled bool @@ -81,7 +81,7 @@ func WalkSkip(roots []Root, add func(root Root, dir string), skip func(root Root // walkDir creates a walker and starts fastwalk with this walker. func walkDir(root Root, add func(Root, string), skip func(root Root, dir string) bool, opts Options) { if opts.Logf == nil { - opts.Logf = func(format string, args ...interface{}) {} + opts.Logf = func(format string, args ...any) {} } if _, err := os.Stat(root.Path); os.IsNotExist(err) { opts.Logf("skipping nonexistent directory: %v", root.Path) diff --git a/tools/vendor/golang.org/x/tools/internal/imports/fix.go b/tools/vendor/golang.org/x/tools/internal/imports/fix.go index bf6b0aadd..737a9bfae 100644 --- a/tools/vendor/golang.org/x/tools/internal/imports/fix.go +++ b/tools/vendor/golang.org/x/tools/internal/imports/fix.go @@ -559,7 +559,7 @@ func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *P return err } apply(fset, f, fixes) - return err + return nil } // getFixes gets the import fixes that need to be made to f in order to fix the imports. @@ -1030,7 +1030,7 @@ func (e *ProcessEnv) GetResolver() (Resolver, error) { // // For gopls, we can optionally explicitly choose a resolver type, since we // already know the view type. - if len(e.Env["GOMOD"]) == 0 && len(e.Env["GOWORK"]) == 0 { + if e.Env["GOMOD"] == "" && (e.Env["GOWORK"] == "" || e.Env["GOWORK"] == "off") { e.resolver = newGopathResolver(e) e.logf("created gopath resolver") } else if r, err := newModuleResolver(e, e.ModCache); err != nil { diff --git a/tools/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/tools/vendor/golang.org/x/tools/internal/packagesinternal/packages.go index 784605914..25ebab663 100644 --- a/tools/vendor/golang.org/x/tools/internal/packagesinternal/packages.go +++ b/tools/vendor/golang.org/x/tools/internal/packagesinternal/packages.go @@ -17,4 +17,4 @@ var TypecheckCgo int var DepsErrors int // must be set as a LoadMode to call GetDepsErrors var SetModFlag = func(config any, value string) {} -var SetModFile = func(config interface{}, value string) {} +var SetModFile = func(config any, value string) {} diff --git a/tools/vendor/golang.org/x/tools/internal/stdlib/deps.go b/tools/vendor/golang.org/x/tools/internal/stdlib/deps.go new file mode 100644 index 000000000..7cca431cd --- /dev/null +++ b/tools/vendor/golang.org/x/tools/internal/stdlib/deps.go @@ -0,0 +1,359 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate.go. DO NOT EDIT. + +package stdlib + +type pkginfo struct { + name string + deps string // list of indices of dependencies, as varint-encoded deltas +} + +var deps = [...]pkginfo{ + {"archive/tar", "\x03k\x03E5\x01\v\x01#\x01\x01\x02\x05\t\x02\x01\x02\x02\v"}, + {"archive/zip", "\x02\x04a\a\x16\x0205\x01+\x05\x01\x10\x03\x02\r\x04"}, + {"bufio", "\x03k}E\x13"}, + {"bytes", "n+R\x03\fG\x02\x02"}, + {"cmp", ""}, + {"compress/bzip2", "\x02\x02\xe7\x01B"}, + {"compress/flate", "\x02l\x03z\r\x024\x01\x03"}, + {"compress/gzip", "\x02\x04a\a\x03\x15eT"}, + {"compress/lzw", "\x02l\x03z"}, + {"compress/zlib", "\x02\x04a\a\x03\x13\x01f"}, + {"container/heap", "\xae\x02"}, + {"container/list", ""}, + {"container/ring", ""}, + {"context", "n\\h\x01\f"}, + {"crypto", "\x84\x01gD"}, + {"crypto/aes", "\x10\n\a\x8e\x02"}, + {"crypto/cipher", "\x03\x1e\x01\x01\x1d\x11\x1d,Q"}, + {"crypto/des", "\x10\x13\x1d.,\x95\x01\x03"}, + {"crypto/dsa", "@\x04*}\x0e"}, + {"crypto/ecdh", "\x03\v\f\x0e\x04\x14\x04\r\x1d}"}, + {"crypto/ecdsa", "\x0e\x05\x03\x04\x01\x0e\x16\x01\x04\f\x01\x1d}\x0e\x04K\x01"}, + {"crypto/ed25519", "\x0e\x1c\x16\n\a\x1d}D"}, + {"crypto/elliptic", "0>}\x0e9"}, + {"crypto/fips140", " \x05\x91\x01"}, + {"crypto/hkdf", "-\x12\x01.\x16"}, + {"crypto/hmac", "\x1a\x14\x11\x01\x113"}, + {"crypto/internal/boring", "\x0e\x02\rg"}, + {"crypto/internal/boring/bbig", "\x1a\xdf\x01L"}, + {"crypto/internal/boring/bcache", "\xb3\x02\x12"}, + {"crypto/internal/boring/sig", ""}, + {"crypto/internal/cryptotest", "\x03\r\n)\x0e\x1a\x06\x13\x12#\a\t\x11\x11\x11\x1b\x01\f\f\x05\n"}, + {"crypto/internal/entropy", "E"}, + {"crypto/internal/fips140", ">0}9\f\x15"}, + {"crypto/internal/fips140/aes", "\x03\x1d\x03\x02\x13\x04\x01\x01\x05+\x8c\x015"}, + {"crypto/internal/fips140/aes/gcm", " \x01\x02\x02\x02\x11\x04\x01\x06+\x8a\x01"}, + {"crypto/internal/fips140/alias", "\xc5\x02"}, + {"crypto/internal/fips140/bigmod", "%\x17\x01\x06+\x8c\x01"}, + {"crypto/internal/fips140/check", " \x0e\x06\b\x02\xad\x01Z"}, + {"crypto/internal/fips140/check/checktest", "%\xff\x01!"}, + {"crypto/internal/fips140/drbg", "\x03\x1c\x01\x01\x04\x13\x04\b\x01)}\x0f8"}, + {"crypto/internal/fips140/ecdh", "\x03\x1d\x05\x02\t\f2}\x0f8"}, + {"crypto/internal/fips140/ecdsa", "\x03\x1d\x04\x01\x02\a\x02\x068}G"}, + {"crypto/internal/fips140/ed25519", "\x03\x1d\x05\x02\x04\v8\xc1\x01\x03"}, + {"crypto/internal/fips140/edwards25519", "%\a\f\x042\x8c\x018"}, + {"crypto/internal/fips140/edwards25519/field", "%\x13\x042\x8c\x01"}, + {"crypto/internal/fips140/hkdf", "\x03\x1d\x05\t\x06:"}, + {"crypto/internal/fips140/hmac", "\x03\x1d\x14\x01\x018"}, + {"crypto/internal/fips140/mlkem", "\x03\x1d\x05\x02\x0e\x03\x042"}, + {"crypto/internal/fips140/nistec", "%\f\a\x042\x8c\x01*\x0e\x13"}, + {"crypto/internal/fips140/nistec/fiat", "%\x136\x8c\x01"}, + {"crypto/internal/fips140/pbkdf2", "\x03\x1d\x05\t\x06:"}, + {"crypto/internal/fips140/rsa", "\x03\x1d\x04\x01\x02\r\x01\x01\x026}G"}, + {"crypto/internal/fips140/sha256", "\x03\x1d\x1c\x01\x06+\x8c\x01"}, + {"crypto/internal/fips140/sha3", "\x03\x1d\x18\x04\x011\x8c\x01K"}, + {"crypto/internal/fips140/sha512", "\x03\x1d\x1c\x01\x06+\x8c\x01"}, + {"crypto/internal/fips140/ssh", " \x05"}, + {"crypto/internal/fips140/subtle", "#\x19\xbe\x01"}, + {"crypto/internal/fips140/tls12", "\x03\x1d\x05\t\x06\x028"}, + {"crypto/internal/fips140/tls13", "\x03\x1d\x05\b\a\b2"}, + {"crypto/internal/fips140deps", ""}, + {"crypto/internal/fips140deps/byteorder", "\x9a\x01"}, + {"crypto/internal/fips140deps/cpu", "\xae\x01\a"}, + {"crypto/internal/fips140deps/godebug", "\xb6\x01"}, + {"crypto/internal/fips140hash", "5\x1a5\xc1\x01"}, + {"crypto/internal/fips140only", "'\r\x01\x01N25"}, + {"crypto/internal/fips140test", ""}, + {"crypto/internal/hpke", "\x0e\x01\x01\x03\x1a\x1d$,`M"}, + {"crypto/internal/impl", "\xb0\x02"}, + {"crypto/internal/randutil", "\xeb\x01\x12"}, + {"crypto/internal/sysrand", "\xd7\x01@\x1b\x01\f\x06"}, + {"crypto/internal/sysrand/internal/seccomp", "n"}, + {"crypto/md5", "\x0e2.\x16\x16`"}, + {"crypto/mlkem", "/"}, + {"crypto/pbkdf2", "2\r\x01.\x16"}, + {"crypto/rand", "\x1a\x06\a\x19\x04\x01)}\x0eL"}, + {"crypto/rc4", "#\x1d.\xc1\x01"}, + {"crypto/rsa", "\x0e\f\x01\t\x0f\f\x01\x04\x06\a\x1d\x03\x1325\r\x01"}, + {"crypto/sha1", "\x0e\f&.\x16\x16\x14L"}, + {"crypto/sha256", "\x0e\f\x1aP"}, + {"crypto/sha3", "\x0e'O\xc1\x01"}, + {"crypto/sha512", "\x0e\f\x1cN"}, + {"crypto/subtle", "8\x98\x01T"}, + {"crypto/tls", "\x03\b\x02\x01\x01\x01\x01\x02\x01\x01\x01\x03\x01\a\x01\v\x02\n\x01\b\x05\x03\x01\x01\x01\x01\x02\x01\x02\x01\x18\x02\x03\x13\x16\x14\b5\x16\x16\r\t\x01\x01\x01\x02\x01\f\x06\x02\x01"}, + {"crypto/tls/internal/fips140tls", " \x93\x02"}, + {"crypto/x509", "\x03\v\x01\x01\x01\x01\x01\x01\x01\x011\x03\x02\x01\x01\x02\x05\x01\x0e\x06\x02\x02\x03E5\x03\t\x01\x01\x01\a\x10\x05\t\x05\v\x01\x02\r\x02\x01\x01\x02\x03\x01"}, + {"crypto/x509/internal/macos", "\x03k'\x8f\x01\v\x10\x06"}, + {"crypto/x509/pkix", "d\x06\a\x88\x01F"}, + {"database/sql", "\x03\nK\x16\x03z\f\x06\"\x05\t\x02\x03\x01\f\x02\x02\x02"}, + {"database/sql/driver", "\ra\x03\xae\x01\x10\x10"}, + {"debug/buildinfo", "\x03X\x02\x01\x01\b\a\x03`\x18\x02\x01+\x10\x1e"}, + {"debug/dwarf", "\x03d\a\x03z1\x12\x01\x01"}, + {"debug/elf", "\x03\x06Q\r\a\x03`\x19\x01,\x18\x01\x15"}, + {"debug/gosym", "\x03d\n\xbd\x01\x01\x01\x02"}, + {"debug/macho", "\x03\x06Q\r\n`\x1a,\x18\x01"}, + {"debug/pe", "\x03\x06Q\r\a\x03`\x1a,\x18\x01\x15"}, + {"debug/plan9obj", "g\a\x03`\x1a,"}, + {"embed", "n+:\x18\x01S"}, + {"embed/internal/embedtest", ""}, + {"encoding", ""}, + {"encoding/ascii85", "\xeb\x01D"}, + {"encoding/asn1", "\x03k\x03\x87\x01\x01&\x0e\x02\x01\x0f\x03\x01"}, + {"encoding/base32", "\xeb\x01B\x02"}, + {"encoding/base64", "\x9a\x01QB\x02"}, + {"encoding/binary", "n}\r'\x0e\x05"}, + {"encoding/csv", "\x02\x01k\x03zE\x11\x02"}, + {"encoding/gob", "\x02`\x05\a\x03`\x1a\f\x01\x02\x1d\b\x13\x01\x0e\x02"}, + {"encoding/hex", "n\x03zB\x03"}, + {"encoding/json", "\x03\x01^\x04\b\x03z\r'\x0e\x02\x01\x02\x0f\x01\x01\x02"}, + {"encoding/pem", "\x03c\b}B\x03"}, + {"encoding/xml", "\x02\x01_\f\x03z4\x05\v\x01\x02\x0f\x02"}, + {"errors", "\xca\x01{"}, + {"expvar", "kK9\t\n\x15\r\t\x02\x03\x01\x10"}, + {"flag", "b\f\x03z,\b\x05\t\x02\x01\x0f"}, + {"fmt", "nE8\r\x1f\b\x0e\x02\x03\x11"}, + {"go/ast", "\x03\x01m\x0f\x01j\x03)\b\x0e\x02\x01"}, + {"go/ast/internal/tests", ""}, + {"go/build", "\x02\x01k\x03\x01\x03\x02\a\x02\x01\x17\x1e\x04\x02\t\x14\x12\x01+\x01\x04\x01\a\t\x02\x01\x11\x02\x02"}, + {"go/build/constraint", "n\xc1\x01\x01\x11\x02"}, + {"go/constant", "q\x10w\x01\x015\x01\x02\x11"}, + {"go/doc", "\x04m\x01\x06\t=-1\x11\x02\x01\x11\x02"}, + {"go/doc/comment", "\x03n\xbc\x01\x01\x01\x01\x11\x02"}, + {"go/format", "\x03n\x01\f\x01\x02jE"}, + {"go/importer", "t\a\x01\x01\x04\x01i9"}, + {"go/internal/gccgoimporter", "\x02\x01X\x13\x03\x05\v\x01g\x02,\x01\x05\x12\x01\v\b"}, + {"go/internal/gcimporter", "\x02o\x10\x01/\x05\x0e',\x16\x03\x02"}, + {"go/internal/srcimporter", "q\x01\x02\n\x03\x01i,\x01\x05\x13\x02\x13"}, + {"go/parser", "\x03k\x03\x01\x03\v\x01j\x01+\x06\x13"}, + {"go/printer", "q\x01\x03\x03\tj\r\x1f\x16\x02\x01\x02\n\x05\x02"}, + {"go/scanner", "\x03n\x10j2\x11\x01\x12\x02"}, + {"go/token", "\x04m\xbc\x01\x02\x03\x01\x0e\x02"}, + {"go/types", "\x03\x01\x06d\x03\x01\x04\b\x03\x02\x15\x1e\x06+\x04\x03\n%\a\t\x01\x01\x01\x02\x01\x0e\x02\x02"}, + {"go/version", "\xbb\x01u"}, + {"hash", "\xeb\x01"}, + {"hash/adler32", "n\x16\x16"}, + {"hash/crc32", "n\x16\x16\x14\x84\x01\x01"}, + {"hash/crc64", "n\x16\x16\x98\x01"}, + {"hash/fnv", "n\x16\x16`"}, + {"hash/maphash", "\x95\x01\x05\x1b\x03@M"}, + {"html", "\xb0\x02\x02\x11"}, + {"html/template", "\x03h\x06\x19,5\x01\v \x05\x01\x02\x03\r\x01\x02\v\x01\x03\x02"}, + {"image", "\x02l\x1f^\x0f5\x03\x01"}, + {"image/color", ""}, + {"image/color/palette", "\x8d\x01"}, + {"image/draw", "\x8c\x01\x01\x04"}, + {"image/gif", "\x02\x01\x05f\x03\x1b\x01\x01\x01\vQ"}, + {"image/internal/imageutil", "\x8c\x01"}, + {"image/jpeg", "\x02l\x1e\x01\x04Z"}, + {"image/png", "\x02\a^\n\x13\x02\x06\x01^D"}, + {"index/suffixarray", "\x03d\a}\r*\v\x01"}, + {"internal/abi", "\xb5\x01\x90\x01"}, + {"internal/asan", "\xc5\x02"}, + {"internal/bisect", "\xa4\x02\x0e\x01"}, + {"internal/buildcfg", "qG_\x06\x02\x05\v\x01"}, + {"internal/bytealg", "\xae\x01\x97\x01"}, + {"internal/byteorder", ""}, + {"internal/cfg", ""}, + {"internal/chacha8rand", "\x9a\x01\x1b\x90\x01"}, + {"internal/copyright", ""}, + {"internal/coverage", ""}, + {"internal/coverage/calloc", ""}, + {"internal/coverage/cfile", "k\x06\x17\x16\x01\x02\x01\x01\x01\x01\x01\x01\x01$\x01\x1e,\x06\a\v\x01\x03\f\x06"}, + {"internal/coverage/cformat", "\x04m-\x04I\f6\x01\x02\f"}, + {"internal/coverage/cmerge", "q-Z"}, + {"internal/coverage/decodecounter", "g\n-\v\x02@,\x18\x16"}, + {"internal/coverage/decodemeta", "\x02e\n\x17\x16\v\x02@,"}, + {"internal/coverage/encodecounter", "\x02e\n-\f\x01\x02>\f \x16"}, + {"internal/coverage/encodemeta", "\x02\x01d\n\x13\x04\x16\r\x02>,."}, + {"internal/coverage/pods", "\x04m-y\x06\x05\v\x02\x01"}, + {"internal/coverage/rtcov", "\xc5\x02"}, + {"internal/coverage/slicereader", "g\nzZ"}, + {"internal/coverage/slicewriter", "qz"}, + {"internal/coverage/stringtab", "q8\x04>"}, + {"internal/coverage/test", ""}, + {"internal/coverage/uleb128", ""}, + {"internal/cpu", "\xc5\x02"}, + {"internal/dag", "\x04m\xbc\x01\x03"}, + {"internal/diff", "\x03n\xbd\x01\x02"}, + {"internal/exportdata", "\x02\x01k\x03\x03]\x1a,\x01\x05\x12\x01\x02"}, + {"internal/filepathlite", "n+:\x19A"}, + {"internal/fmtsort", "\x04\x9b\x02\x0e"}, + {"internal/fuzz", "\x03\nA\x19\x04\x03\x03\x01\f\x0355\r\x02\x1d\x01\x05\x02\x05\v\x01\x02\x01\x01\v\x04\x02"}, + {"internal/goarch", ""}, + {"internal/godebug", "\x97\x01 {\x01\x12"}, + {"internal/godebugs", ""}, + {"internal/goexperiment", ""}, + {"internal/goos", ""}, + {"internal/goroot", "\x97\x02\x01\x05\x13\x02"}, + {"internal/gover", "\x04"}, + {"internal/goversion", ""}, + {"internal/itoa", ""}, + {"internal/lazyregexp", "\x97\x02\v\x0e\x02"}, + {"internal/lazytemplate", "\xeb\x01,\x19\x02\v"}, + {"internal/msan", "\xc5\x02"}, + {"internal/nettrace", ""}, + {"internal/obscuretestdata", "f\x85\x01,"}, + {"internal/oserror", "n"}, + {"internal/pkgbits", "\x03K\x19\a\x03\x05\vj\x0e\x1e\r\v\x01"}, + {"internal/platform", ""}, + {"internal/poll", "nO\x1a\x149\x0e\x01\x01\v\x06"}, + {"internal/profile", "\x03\x04g\x03z7\f\x01\x01\x0f"}, + {"internal/profilerecord", ""}, + {"internal/race", "\x95\x01\xb0\x01"}, + {"internal/reflectlite", "\x95\x01 3\x01P\x0e\x13\x12"}, + {"unsafe", ""}, + {"vendor/golang.org/x/crypto/chacha20", "\x10W\a\x8c\x01*&"}, + {"vendor/golang.org/x/crypto/chacha20poly1305", "\x10W\a\xd8\x01\x04\x01"}, + {"vendor/golang.org/x/crypto/cryptobyte", "d\n\x03\x88\x01& \n"}, + {"vendor/golang.org/x/crypto/cryptobyte/asn1", ""}, + {"vendor/golang.org/x/crypto/internal/alias", "\xc5\x02"}, + {"vendor/golang.org/x/crypto/internal/poly1305", "Q\x16\x93\x01"}, + {"vendor/golang.org/x/net/dns/dnsmessage", "n"}, + {"vendor/golang.org/x/net/http/httpguts", "\x81\x02\x14\x1b\x13\r"}, + {"vendor/golang.org/x/net/http/httpproxy", "n\x03\x90\x01\x15\x01\x19\x13\r"}, + {"vendor/golang.org/x/net/http2/hpack", "\x03k\x03zG"}, + {"vendor/golang.org/x/net/idna", "q\x87\x018\x13\x10\x02\x01"}, + {"vendor/golang.org/x/net/nettest", "\x03d\a\x03z\x11\x05\x16\x01\f\v\x01\x02\x02\x01\n"}, + {"vendor/golang.org/x/sys/cpu", "\x97\x02\r\v\x01\x15"}, + {"vendor/golang.org/x/text/secure/bidirule", "n\xd5\x01\x11\x01"}, + {"vendor/golang.org/x/text/transform", "\x03k}X"}, + {"vendor/golang.org/x/text/unicode/bidi", "\x03\bf~?\x15"}, + {"vendor/golang.org/x/text/unicode/norm", "g\nzG\x11\x11"}, + {"weak", "\x95\x01\x8f\x01!"}, +} diff --git a/tools/vendor/golang.org/x/tools/internal/stdlib/import.go b/tools/vendor/golang.org/x/tools/internal/stdlib/import.go new file mode 100644 index 000000000..f6909878a --- /dev/null +++ b/tools/vendor/golang.org/x/tools/internal/stdlib/import.go @@ -0,0 +1,89 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package stdlib + +// This file provides the API for the import graph of the standard library. +// +// Be aware that the compiler-generated code for every package +// implicitly depends on package "runtime" and a handful of others +// (see runtimePkgs in GOROOT/src/cmd/internal/objabi/pkgspecial.go). + +import ( + "encoding/binary" + "iter" + "slices" + "strings" +) + +// Imports returns the sequence of packages directly imported by the +// named standard packages, in name order. +// The imports of an unknown package are the empty set. +// +// The graph is built into the application and may differ from the +// graph in the Go source tree being analyzed by the application. +func Imports(pkgs ...string) iter.Seq[string] { + return func(yield func(string) bool) { + for _, pkg := range pkgs { + if i, ok := find(pkg); ok { + var depIndex uint64 + for data := []byte(deps[i].deps); len(data) > 0; { + delta, n := binary.Uvarint(data) + depIndex += delta + if !yield(deps[depIndex].name) { + return + } + data = data[n:] + } + } + } + } +} + +// Dependencies returns the set of all dependencies of the named +// standard packages, including the initial package, +// in a deterministic topological order. +// The dependencies of an unknown package are the empty set. +// +// The graph is built into the application and may differ from the +// graph in the Go source tree being analyzed by the application. +func Dependencies(pkgs ...string) iter.Seq[string] { + return func(yield func(string) bool) { + for _, pkg := range pkgs { + if i, ok := find(pkg); ok { + var seen [1 + len(deps)/8]byte // bit set of seen packages + var visit func(i int) bool + visit = func(i int) bool { + bit := byte(1) << (i % 8) + if seen[i/8]&bit == 0 { + seen[i/8] |= bit + var depIndex uint64 + for data := []byte(deps[i].deps); len(data) > 0; { + delta, n := binary.Uvarint(data) + depIndex += delta + if !visit(int(depIndex)) { + return false + } + data = data[n:] + } + if !yield(deps[i].name) { + return false + } + } + return true + } + if !visit(i) { + return + } + } + } + } +} + +// find returns the index of pkg in the deps table. +func find(pkg string) (int, bool) { + return slices.BinarySearchFunc(deps[:], pkg, func(p pkginfo, n string) int { + return strings.Compare(p.name, n) + }) +} diff --git a/tools/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/tools/vendor/golang.org/x/tools/internal/stdlib/manifest.go index 9f0b871ff..00776a31b 100644 --- a/tools/vendor/golang.org/x/tools/internal/stdlib/manifest.go +++ b/tools/vendor/golang.org/x/tools/internal/stdlib/manifest.go @@ -1,4 +1,4 @@ -// Copyright 2024 The Go Authors. All rights reserved. +// Copyright 2025 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -2151,6 +2151,8 @@ var PackageSymbols = map[string][]Symbol{ {"(Type).String", Method, 0}, {"(Version).GoString", Method, 0}, {"(Version).String", Method, 0}, + {"(VersionIndex).Index", Method, 24}, + {"(VersionIndex).IsHidden", Method, 24}, {"ARM_MAGIC_TRAMP_NUMBER", Const, 0}, {"COMPRESS_HIOS", Const, 6}, {"COMPRESS_HIPROC", Const, 6}, @@ -3834,6 +3836,7 @@ var PackageSymbols = map[string][]Symbol{ {"SymType", Type, 0}, {"SymVis", Type, 0}, {"Symbol", Type, 0}, + {"Symbol.HasVersion", Field, 24}, {"Symbol.Info", Field, 0}, {"Symbol.Library", Field, 13}, {"Symbol.Name", Field, 0}, @@ -3843,18 +3846,12 @@ var PackageSymbols = map[string][]Symbol{ {"Symbol.Value", Field, 0}, {"Symbol.Version", Field, 13}, {"Symbol.VersionIndex", Field, 24}, - {"Symbol.VersionScope", Field, 24}, - {"SymbolVersionScope", Type, 24}, {"Type", Type, 0}, {"VER_FLG_BASE", Const, 24}, {"VER_FLG_INFO", Const, 24}, {"VER_FLG_WEAK", Const, 24}, {"Version", Type, 0}, - {"VersionScopeGlobal", Const, 24}, - {"VersionScopeHidden", Const, 24}, - {"VersionScopeLocal", Const, 24}, - {"VersionScopeNone", Const, 24}, - {"VersionScopeSpecific", Const, 24}, + {"VersionIndex", Type, 24}, }, "debug/gosym": { {"(*DecodingError).Error", Method, 0}, @@ -7122,6 +7119,7 @@ var PackageSymbols = map[string][]Symbol{ {"FormatFileInfo", Func, 21}, {"Glob", Func, 16}, {"GlobFS", Type, 16}, + {"Lstat", Func, 25}, {"ModeAppend", Const, 16}, {"ModeCharDevice", Const, 16}, {"ModeDevice", Const, 16}, @@ -7146,6 +7144,8 @@ var PackageSymbols = map[string][]Symbol{ {"ReadDirFile", Type, 16}, {"ReadFile", Func, 16}, {"ReadFileFS", Type, 16}, + {"ReadLink", Func, 25}, + {"ReadLinkFS", Type, 25}, {"SkipAll", Var, 20}, {"SkipDir", Var, 16}, {"Stat", Func, 16}, @@ -9149,6 +9149,8 @@ var PackageSymbols = map[string][]Symbol{ {"(*ProcessState).SysUsage", Method, 0}, {"(*ProcessState).SystemTime", Method, 0}, {"(*ProcessState).UserTime", Method, 0}, + {"(*Root).Chmod", Method, 25}, + {"(*Root).Chown", Method, 25}, {"(*Root).Close", Method, 24}, {"(*Root).Create", Method, 24}, {"(*Root).FS", Method, 24}, @@ -16757,9 +16759,11 @@ var PackageSymbols = map[string][]Symbol{ }, "testing/fstest": { {"(MapFS).Glob", Method, 16}, + {"(MapFS).Lstat", Method, 25}, {"(MapFS).Open", Method, 16}, {"(MapFS).ReadDir", Method, 16}, {"(MapFS).ReadFile", Method, 16}, + {"(MapFS).ReadLink", Method, 25}, {"(MapFS).Stat", Method, 16}, {"(MapFS).Sub", Method, 16}, {"MapFS", Type, 16}, diff --git a/tools/vendor/golang.org/x/tools/internal/stdlib/stdlib.go b/tools/vendor/golang.org/x/tools/internal/stdlib/stdlib.go index 98904017f..3d96d3bf6 100644 --- a/tools/vendor/golang.org/x/tools/internal/stdlib/stdlib.go +++ b/tools/vendor/golang.org/x/tools/internal/stdlib/stdlib.go @@ -6,7 +6,7 @@ // Package stdlib provides a table of all exported symbols in the // standard library, along with the version at which they first -// appeared. +// appeared. It also provides the import graph of std packages. package stdlib import ( diff --git a/tools/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/tools/vendor/golang.org/x/tools/internal/typeparams/normalize.go index 93c80fdc9..f49802b8e 100644 --- a/tools/vendor/golang.org/x/tools/internal/typeparams/normalize.go +++ b/tools/vendor/golang.org/x/tools/internal/typeparams/normalize.go @@ -120,7 +120,7 @@ type termSet struct { terms termlist } -func indentf(depth int, format string, args ...interface{}) { +func indentf(depth int, format string, args ...any) { fmt.Fprintf(os.Stderr, strings.Repeat(".", depth)+format+"\n", args...) } diff --git a/tools/vendor/golang.org/x/tools/internal/typesinternal/types.go b/tools/vendor/golang.org/x/tools/internal/typesinternal/types.go index 345348796..edf0347ec 100644 --- a/tools/vendor/golang.org/x/tools/internal/typesinternal/types.go +++ b/tools/vendor/golang.org/x/tools/internal/typesinternal/types.go @@ -32,12 +32,14 @@ func SetUsesCgo(conf *types.Config) bool { return true } -// ReadGo116ErrorData extracts additional information from types.Error values +// ErrorCodeStartEnd extracts additional information from types.Error values // generated by Go version 1.16 and later: the error code, start position, and // end position. If all positions are valid, start <= err.Pos <= end. // // If the data could not be read, the final result parameter will be false. -func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos, ok bool) { +// +// TODO(adonovan): eliminate start/end when proposal #71803 is accepted. +func ErrorCodeStartEnd(err types.Error) (code ErrorCode, start, end token.Pos, ok bool) { var data [3]int // By coincidence all of these fields are ints, which simplifies things. v := reflect.ValueOf(err) diff --git a/tools/vendor/modules.txt b/tools/vendor/modules.txt index b124be6ac..820cdbe0d 100644 --- a/tools/vendor/modules.txt +++ b/tools/vendor/modules.txt @@ -43,8 +43,8 @@ github.com/GaijinEntertainment/go-exhaustruct/v3/internal/structure # github.com/Masterminds/semver/v3 v3.3.0 ## explicit; go 1.21 github.com/Masterminds/semver/v3 -# github.com/OpenPeeDeeP/depguard/v2 v2.2.0 -## explicit; go 1.20 +# github.com/OpenPeeDeeP/depguard/v2 v2.2.1 +## explicit; go 1.23.0 github.com/OpenPeeDeeP/depguard/v2 github.com/OpenPeeDeeP/depguard/v2/internal/utils # github.com/alecthomas/go-check-sumtype v0.3.1 @@ -219,10 +219,10 @@ github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/timestamp -# github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a -## explicit -github.com/golangci/dupl +# github.com/golangci/dupl v0.0.0-20250308024227-f665c8d69b32 +## explicit; go 1.22.0 github.com/golangci/dupl/job +github.com/golangci/dupl/lib github.com/golangci/dupl/printer github.com/golangci/dupl/suffixtree github.com/golangci/dupl/syntax @@ -233,7 +233,11 @@ github.com/golangci/go-printf-func-name/pkg/analyzer # github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d ## explicit; go 1.22.0 github.com/golangci/gofmt/gofmt +<<<<<<< HEAD # github.com/golangci/golangci-lint v1.64.5 +======= +# github.com/golangci/golangci-lint v1.64.8 +>>>>>>> 916eb401 (chore(deps): update module golang.org/x/net to v0.36.0 [security]) ## explicit; go 1.23.0 github.com/golangci/golangci-lint/cmd/golangci-lint github.com/golangci/golangci-lint/internal/cache @@ -681,8 +685,8 @@ github.com/sashamelentyev/interfacebloat/pkg/analyzer ## explicit; go 1.20 github.com/sashamelentyev/usestdlibvars/pkg/analyzer github.com/sashamelentyev/usestdlibvars/pkg/analyzer/internal/mapping -# github.com/securego/gosec/v2 v2.22.1 -## explicit; go 1.22.7 +# github.com/securego/gosec/v2 v2.22.2 +## explicit; go 1.23.0 github.com/securego/gosec/v2 github.com/securego/gosec/v2/analyzers github.com/securego/gosec/v2/cwe @@ -836,18 +840,18 @@ go.uber.org/zap/zapcore # golang.org/x/exp/typeparams v0.0.0-20250210185358-939b2ce775ac ## explicit; go 1.18 golang.org/x/exp/typeparams -# golang.org/x/mod v0.23.0 -## explicit; go 1.22.0 +# golang.org/x/mod v0.24.0 +## explicit; go 1.23.0 golang.org/x/mod/internal/lazyregexp golang.org/x/mod/modfile golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/sync v0.11.0 -## explicit; go 1.18 +# golang.org/x/sync v0.12.0 +## explicit; go 1.23.0 golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.30.0 -## explicit; go 1.18 +# golang.org/x/sys v0.31.0 +## explicit; go 1.23.0 golang.org/x/sys/unix golang.org/x/sys/windows # golang.org/x/text v0.22.0 @@ -868,8 +872,8 @@ golang.org/x/text/runes golang.org/x/text/transform golang.org/x/text/unicode/norm golang.org/x/text/width -# golang.org/x/tools v0.30.0 -## explicit; go 1.22.0 +# golang.org/x/tools v0.31.0 +## explicit; go 1.23.0 golang.org/x/tools/go/analysis golang.org/x/tools/go/analysis/passes/appends golang.org/x/tools/go/analysis/passes/asmdecl @@ -996,7 +1000,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# honnef.co/go/tools v0.6.0 +# honnef.co/go/tools v0.6.1 ## explicit; go 1.23 honnef.co/go/tools/analysis/callcheck honnef.co/go/tools/analysis/code diff --git a/vendor/go.uber.org/atomic/CHANGELOG.md b/vendor/go.uber.org/atomic/CHANGELOG.md index 5fe03f21b..6f87f33fa 100644 --- a/vendor/go.uber.org/atomic/CHANGELOG.md +++ b/vendor/go.uber.org/atomic/CHANGELOG.md @@ -4,6 +4,16 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [1.11.0] - 2023-05-02 +### Fixed +- Fix initialization of `Value` wrappers. + +### Added +- Add `String` method to `atomic.Pointer[T]` type allowing users to safely print +underlying values of pointers. + +[1.11.0]: https://github.com/uber-go/atomic/compare/v1.10.0...v1.11.0 + ## [1.10.0] - 2022-08-11 ### Added - Add `atomic.Float32` type for atomic operations on `float32`. diff --git a/vendor/go.uber.org/atomic/bool.go b/vendor/go.uber.org/atomic/bool.go index dfa2085f4..f0a2ddd14 100644 --- a/vendor/go.uber.org/atomic/bool.go +++ b/vendor/go.uber.org/atomic/bool.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicwrapper. -// Copyright (c) 2020-2022 Uber Technologies, Inc. +// Copyright (c) 2020-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/go.uber.org/atomic/duration.go b/vendor/go.uber.org/atomic/duration.go index 6f4157445..7c23868fc 100644 --- a/vendor/go.uber.org/atomic/duration.go +++ b/vendor/go.uber.org/atomic/duration.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicwrapper. -// Copyright (c) 2020-2022 Uber Technologies, Inc. +// Copyright (c) 2020-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/go.uber.org/atomic/error.go b/vendor/go.uber.org/atomic/error.go index 27b23ea16..b7e3f1291 100644 --- a/vendor/go.uber.org/atomic/error.go +++ b/vendor/go.uber.org/atomic/error.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicwrapper. -// Copyright (c) 2020-2022 Uber Technologies, Inc. +// Copyright (c) 2020-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -52,7 +52,17 @@ func (x *Error) Store(val error) { // CompareAndSwap is an atomic compare-and-swap for error values. func (x *Error) CompareAndSwap(old, new error) (swapped bool) { - return x.v.CompareAndSwap(packError(old), packError(new)) + if x.v.CompareAndSwap(packError(old), packError(new)) { + return true + } + + if old == _zeroError { + // If the old value is the empty value, then it's possible the + // underlying Value hasn't been set and is nil, so retry with nil. + return x.v.CompareAndSwap(nil, packError(new)) + } + + return false } // Swap atomically stores the given error and returns the old diff --git a/vendor/go.uber.org/atomic/float32.go b/vendor/go.uber.org/atomic/float32.go index 5d535a6d2..62c36334f 100644 --- a/vendor/go.uber.org/atomic/float32.go +++ b/vendor/go.uber.org/atomic/float32.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicwrapper. -// Copyright (c) 2020-2022 Uber Technologies, Inc. +// Copyright (c) 2020-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/go.uber.org/atomic/float64.go b/vendor/go.uber.org/atomic/float64.go index 11d5189a5..5bc11caab 100644 --- a/vendor/go.uber.org/atomic/float64.go +++ b/vendor/go.uber.org/atomic/float64.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicwrapper. -// Copyright (c) 2020-2022 Uber Technologies, Inc. +// Copyright (c) 2020-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/go.uber.org/atomic/int32.go b/vendor/go.uber.org/atomic/int32.go index b9a68f42c..5320eac10 100644 --- a/vendor/go.uber.org/atomic/int32.go +++ b/vendor/go.uber.org/atomic/int32.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicint. -// Copyright (c) 2020-2022 Uber Technologies, Inc. +// Copyright (c) 2020-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/go.uber.org/atomic/int64.go b/vendor/go.uber.org/atomic/int64.go index 78d260976..460821d00 100644 --- a/vendor/go.uber.org/atomic/int64.go +++ b/vendor/go.uber.org/atomic/int64.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicint. -// Copyright (c) 2020-2022 Uber Technologies, Inc. +// Copyright (c) 2020-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/go.uber.org/atomic/pointer_go118.go b/vendor/go.uber.org/atomic/pointer_go118.go index e0f47dba4..1fb6c03b2 100644 --- a/vendor/go.uber.org/atomic/pointer_go118.go +++ b/vendor/go.uber.org/atomic/pointer_go118.go @@ -18,43 +18,14 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -//go:build go1.18 && !go1.19 -// +build go1.18,!go1.19 +//go:build go1.18 +// +build go1.18 package atomic -import "unsafe" +import "fmt" -type Pointer[T any] struct { - _ nocmp // disallow non-atomic comparison - p UnsafePointer -} - -// NewPointer creates a new Pointer. -func NewPointer[T any](v *T) *Pointer[T] { - var p Pointer[T] - if v != nil { - p.p.Store(unsafe.Pointer(v)) - } - return &p -} - -// Load atomically loads the wrapped value. -func (p *Pointer[T]) Load() *T { - return (*T)(p.p.Load()) -} - -// Store atomically stores the passed value. -func (p *Pointer[T]) Store(val *T) { - p.p.Store(unsafe.Pointer(val)) -} - -// Swap atomically swaps the wrapped pointer and returns the old value. -func (p *Pointer[T]) Swap(val *T) (old *T) { - return (*T)(p.p.Swap(unsafe.Pointer(val))) -} - -// CompareAndSwap is an atomic compare-and-swap. -func (p *Pointer[T]) CompareAndSwap(old, new *T) (swapped bool) { - return p.p.CompareAndSwap(unsafe.Pointer(old), unsafe.Pointer(new)) +// String returns a human readable representation of a Pointer's underlying value. +func (p *Pointer[T]) String() string { + return fmt.Sprint(p.Load()) } diff --git a/vendor/go.uber.org/atomic/pointer_go118_pre119.go b/vendor/go.uber.org/atomic/pointer_go118_pre119.go new file mode 100644 index 000000000..e0f47dba4 --- /dev/null +++ b/vendor/go.uber.org/atomic/pointer_go118_pre119.go @@ -0,0 +1,60 @@ +// Copyright (c) 2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build go1.18 && !go1.19 +// +build go1.18,!go1.19 + +package atomic + +import "unsafe" + +type Pointer[T any] struct { + _ nocmp // disallow non-atomic comparison + p UnsafePointer +} + +// NewPointer creates a new Pointer. +func NewPointer[T any](v *T) *Pointer[T] { + var p Pointer[T] + if v != nil { + p.p.Store(unsafe.Pointer(v)) + } + return &p +} + +// Load atomically loads the wrapped value. +func (p *Pointer[T]) Load() *T { + return (*T)(p.p.Load()) +} + +// Store atomically stores the passed value. +func (p *Pointer[T]) Store(val *T) { + p.p.Store(unsafe.Pointer(val)) +} + +// Swap atomically swaps the wrapped pointer and returns the old value. +func (p *Pointer[T]) Swap(val *T) (old *T) { + return (*T)(p.p.Swap(unsafe.Pointer(val))) +} + +// CompareAndSwap is an atomic compare-and-swap. +func (p *Pointer[T]) CompareAndSwap(old, new *T) (swapped bool) { + return p.p.CompareAndSwap(unsafe.Pointer(old), unsafe.Pointer(new)) +} diff --git a/vendor/go.uber.org/atomic/string.go b/vendor/go.uber.org/atomic/string.go index c4bea70f4..061466c5b 100644 --- a/vendor/go.uber.org/atomic/string.go +++ b/vendor/go.uber.org/atomic/string.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicwrapper. -// Copyright (c) 2020-2022 Uber Technologies, Inc. +// Copyright (c) 2020-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -42,24 +42,31 @@ func NewString(val string) *String { // Load atomically loads the wrapped string. func (x *String) Load() string { - if v := x.v.Load(); v != nil { - return v.(string) - } - return _zeroString + return unpackString(x.v.Load()) } // Store atomically stores the passed string. func (x *String) Store(val string) { - x.v.Store(val) + x.v.Store(packString(val)) } // CompareAndSwap is an atomic compare-and-swap for string values. func (x *String) CompareAndSwap(old, new string) (swapped bool) { - return x.v.CompareAndSwap(old, new) + if x.v.CompareAndSwap(packString(old), packString(new)) { + return true + } + + if old == _zeroString { + // If the old value is the empty value, then it's possible the + // underlying Value hasn't been set and is nil, so retry with nil. + return x.v.CompareAndSwap(nil, packString(new)) + } + + return false } // Swap atomically stores the given string and returns the old // value. func (x *String) Swap(val string) (old string) { - return x.v.Swap(val).(string) + return unpackString(x.v.Swap(packString(val))) } diff --git a/vendor/go.uber.org/atomic/string_ext.go b/vendor/go.uber.org/atomic/string_ext.go index 1f63dfd5b..019109c86 100644 --- a/vendor/go.uber.org/atomic/string_ext.go +++ b/vendor/go.uber.org/atomic/string_ext.go @@ -1,4 +1,4 @@ -// Copyright (c) 2020-2022 Uber Technologies, Inc. +// Copyright (c) 2020-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -20,7 +20,18 @@ package atomic -//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped=Value -compareandswap -swap -file=string.go +//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped Value -pack packString -unpack unpackString -compareandswap -swap -file=string.go + +func packString(s string) interface{} { + return s +} + +func unpackString(v interface{}) string { + if s, ok := v.(string); ok { + return s + } + return "" +} // String returns the wrapped value. func (s *String) String() string { diff --git a/vendor/go.uber.org/atomic/time.go b/vendor/go.uber.org/atomic/time.go index 1660feb14..cc2a230c0 100644 --- a/vendor/go.uber.org/atomic/time.go +++ b/vendor/go.uber.org/atomic/time.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicwrapper. -// Copyright (c) 2020-2022 Uber Technologies, Inc. +// Copyright (c) 2020-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/go.uber.org/atomic/uint32.go b/vendor/go.uber.org/atomic/uint32.go index d6f04a96d..4adc294ac 100644 --- a/vendor/go.uber.org/atomic/uint32.go +++ b/vendor/go.uber.org/atomic/uint32.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicint. -// Copyright (c) 2020-2022 Uber Technologies, Inc. +// Copyright (c) 2020-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/go.uber.org/atomic/uint64.go b/vendor/go.uber.org/atomic/uint64.go index 2574bdd5e..0e2eddb30 100644 --- a/vendor/go.uber.org/atomic/uint64.go +++ b/vendor/go.uber.org/atomic/uint64.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicint. -// Copyright (c) 2020-2022 Uber Technologies, Inc. +// Copyright (c) 2020-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/go.uber.org/atomic/uintptr.go b/vendor/go.uber.org/atomic/uintptr.go index 81b275a7a..7d5b000d6 100644 --- a/vendor/go.uber.org/atomic/uintptr.go +++ b/vendor/go.uber.org/atomic/uintptr.go @@ -1,6 +1,6 @@ // @generated Code generated by gen-atomicint. -// Copyright (c) 2020-2022 Uber Technologies, Inc. +// Copyright (c) 2020-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go index fef687db0..c9202b05d 100644 --- a/vendor/golang.org/x/crypto/ssh/handshake.go +++ b/vendor/golang.org/x/crypto/ssh/handshake.go @@ -25,6 +25,11 @@ const debugHandshake = false // quickly. const chanSize = 16 +// maxPendingPackets sets the maximum number of packets to queue while waiting +// for KEX to complete. This limits the total pending data to maxPendingPackets +// * maxPacket bytes, which is ~16.8MB. +const maxPendingPackets = 64 + // keyingTransport is a packet based transport that supports key // changes. It need not be thread-safe. It should pass through // msgNewKeys in both directions. @@ -73,11 +78,19 @@ type handshakeTransport struct { incoming chan []byte readError error - mu sync.Mutex - writeError error - sentInitPacket []byte - sentInitMsg *kexInitMsg - pendingPackets [][]byte // Used when a key exchange is in progress. + mu sync.Mutex + // Condition for the above mutex. It is used to notify a completed key + // exchange or a write failure. Writes can wait for this condition while a + // key exchange is in progress. + writeCond *sync.Cond + writeError error + sentInitPacket []byte + sentInitMsg *kexInitMsg + // Used to queue writes when a key exchange is in progress. The length is + // limited by pendingPacketsSize. Once full, writes will block until the key + // exchange is completed or an error occurs. If not empty, it is emptied + // all at once when the key exchange is completed in kexLoop. + pendingPackets [][]byte writePacketsLeft uint32 writeBytesLeft int64 userAuthComplete bool // whether the user authentication phase is complete @@ -134,6 +147,7 @@ func newHandshakeTransport(conn keyingTransport, config *Config, clientVersion, config: config, } + t.writeCond = sync.NewCond(&t.mu) t.resetReadThresholds() t.resetWriteThresholds() @@ -260,6 +274,7 @@ func (t *handshakeTransport) recordWriteError(err error) { defer t.mu.Unlock() if t.writeError == nil && err != nil { t.writeError = err + t.writeCond.Broadcast() } } @@ -363,6 +378,8 @@ write: } } t.pendingPackets = t.pendingPackets[:0] + // Unblock writePacket if waiting for KEX. + t.writeCond.Broadcast() t.mu.Unlock() } @@ -577,11 +594,20 @@ func (t *handshakeTransport) writePacket(p []byte) error { } if t.sentInitMsg != nil { - // Copy the packet so the writer can reuse the buffer. - cp := make([]byte, len(p)) - copy(cp, p) - t.pendingPackets = append(t.pendingPackets, cp) - return nil + if len(t.pendingPackets) < maxPendingPackets { + // Copy the packet so the writer can reuse the buffer. + cp := make([]byte, len(p)) + copy(cp, p) + t.pendingPackets = append(t.pendingPackets, cp) + return nil + } + for t.sentInitMsg != nil { + // Block and wait for KEX to complete or an error. + t.writeCond.Wait() + if t.writeError != nil { + return t.writeError + } + } } if t.writeBytesLeft > 0 { @@ -598,6 +624,7 @@ func (t *handshakeTransport) writePacket(p []byte) error { if err := t.pushPacket(p); err != nil { t.writeError = err + t.writeCond.Broadcast() } return nil diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go index cf66309c4..db1c95fab 100644 --- a/vendor/golang.org/x/net/context/context.go +++ b/vendor/golang.org/x/net/context/context.go @@ -3,29 +3,31 @@ // license that can be found in the LICENSE file. // Package context defines the Context type, which carries deadlines, -// cancelation signals, and other request-scoped values across API boundaries +// cancellation signals, and other request-scoped values across API boundaries // and between processes. // As of Go 1.7 this package is available in the standard library under the -// name context. https://golang.org/pkg/context. +// name [context], and migrating to it can be done automatically with [go fix]. // -// Incoming requests to a server should create a Context, and outgoing calls to -// servers should accept a Context. The chain of function calls between must -// propagate the Context, optionally replacing it with a modified copy created -// using WithDeadline, WithTimeout, WithCancel, or WithValue. +// Incoming requests to a server should create a [Context], and outgoing +// calls to servers should accept a Context. The chain of function +// calls between them must propagate the Context, optionally replacing +// it with a derived Context created using [WithCancel], [WithDeadline], +// [WithTimeout], or [WithValue]. // // Programs that use Contexts should follow these rules to keep interfaces // consistent across packages and enable static analysis tools to check context // propagation: // // Do not store Contexts inside a struct type; instead, pass a Context -// explicitly to each function that needs it. The Context should be the first +// explicitly to each function that needs it. This is discussed further in +// https://go.dev/blog/context-and-structs. The Context should be the first // parameter, typically named ctx: // // func DoSomething(ctx context.Context, arg Arg) error { // // ... use ctx ... // } // -// Do not pass a nil Context, even if a function permits it. Pass context.TODO +// Do not pass a nil [Context], even if a function permits it. Pass [context.TODO] // if you are unsure about which Context to use. // // Use context Values only for request-scoped data that transits processes and @@ -34,9 +36,30 @@ // The same Context may be passed to functions running in different goroutines; // Contexts are safe for simultaneous use by multiple goroutines. // -// See http://blog.golang.org/context for example code for a server that uses +// See https://go.dev/blog/context for example code for a server that uses // Contexts. -package context // import "golang.org/x/net/context" +// +// [go fix]: https://go.dev/cmd/go#hdr-Update_packages_to_use_new_APIs +package context + +import ( + "context" // standard library's context, as of Go 1.7 + "time" +) + +// A Context carries a deadline, a cancellation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context = context.Context + +// Canceled is the error returned by [Context.Err] when the context is canceled +// for some reason other than its deadline passing. +var Canceled = context.Canceled + +// DeadlineExceeded is the error returned by [Context.Err] when the context is canceled +// due to its deadline passing. +var DeadlineExceeded = context.DeadlineExceeded // Background returns a non-nil, empty Context. It is never canceled, has no // values, and has no deadline. It is typically used by the main function, @@ -49,8 +72,73 @@ func Background() Context { // TODO returns a non-nil, empty Context. Code should use context.TODO when // it's unclear which Context to use or it is not yet available (because the // surrounding function has not yet been extended to accept a Context -// parameter). TODO is recognized by static analysis tools that determine -// whether Contexts are propagated correctly in a program. +// parameter). func TODO() Context { return todo } + +var ( + background = context.Background() + todo = context.TODO() +) + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// A CancelFunc may be called by multiple goroutines simultaneously. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc = context.CancelFunc + +// WithCancel returns a derived context that points to the parent context +// but has a new Done channel. The returned context's Done channel is closed +// when the returned cancel function is called or when the parent context's +// Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this [Context] complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + return context.WithCancel(parent) +} + +// WithDeadline returns a derived context that points to the parent context +// but has the deadline adjusted to be no later than d. If the parent's +// deadline is already earlier than d, WithDeadline(parent, d) is semantically +// equivalent to parent. The returned [Context.Done] channel is closed when +// the deadline expires, when the returned cancel function is called, +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this [Context] complete. +func WithDeadline(parent Context, d time.Time) (Context, CancelFunc) { + return context.WithDeadline(parent, d) +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this [Context] complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return context.WithTimeout(parent, timeout) +} + +// WithValue returns a derived context that points to the parent Context. +// In the derived context, the value associated with key is val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +// +// The provided key must be comparable and should not be of type +// string or any other built-in type to avoid collisions between +// packages using context. Users of WithValue should define their own +// types for keys. To avoid allocating when assigning to an +// interface{}, context keys often have concrete type +// struct{}. Alternatively, exported context key variables' static +// type should be a pointer or interface. +func WithValue(parent Context, key, val interface{}) Context { + return context.WithValue(parent, key, val) +} diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go deleted file mode 100644 index 0c1b86793..000000000 --- a/vendor/golang.org/x/net/context/go17.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.7 - -package context - -import ( - "context" // standard library's context, as of Go 1.7 - "time" -) - -var ( - todo = context.TODO() - background = context.Background() -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = context.Canceled - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = context.DeadlineExceeded - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - ctx, f := context.WithCancel(parent) - return ctx, f -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - ctx, f := context.WithDeadline(parent, deadline) - return ctx, f -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return context.WithValue(parent, key, val) -} diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go deleted file mode 100644 index e31e35a90..000000000 --- a/vendor/golang.org/x/net/context/go19.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.9 - -package context - -import "context" // standard library's context, as of Go 1.7 - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context = context.Context - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc = context.CancelFunc diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go deleted file mode 100644 index 065ff3dfa..000000000 --- a/vendor/golang.org/x/net/context/pre_go17.go +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.7 - -package context - -import ( - "errors" - "fmt" - "sync" - "time" -) - -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case background: - return "context.Background" - case todo: - return "context.TODO" - } - return "unknown empty Context" -} - -var ( - background = new(emptyCtx) - todo = new(emptyCtx) -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = errors.New("context canceled") - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = errors.New("context deadline exceeded") - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - c := newCancelCtx(parent) - propagateCancel(parent, c) - return c, func() { c.cancel(true, Canceled) } -} - -// newCancelCtx returns an initialized cancelCtx. -func newCancelCtx(parent Context) *cancelCtx { - return &cancelCtx{ - Context: parent, - done: make(chan struct{}), - } -} - -// propagateCancel arranges for child to be canceled when parent is. -func propagateCancel(parent Context, child canceler) { - if parent.Done() == nil { - return // parent is never canceled - } - if p, ok := parentCancelCtx(parent); ok { - p.mu.Lock() - if p.err != nil { - // parent has already been canceled - child.cancel(false, p.err) - } else { - if p.children == nil { - p.children = make(map[canceler]bool) - } - p.children[child] = true - } - p.mu.Unlock() - } else { - go func() { - select { - case <-parent.Done(): - child.cancel(false, parent.Err()) - case <-child.Done(): - } - }() - } -} - -// parentCancelCtx follows a chain of parent references until it finds a -// *cancelCtx. This function understands how each of the concrete types in this -// package represents its parent. -func parentCancelCtx(parent Context) (*cancelCtx, bool) { - for { - switch c := parent.(type) { - case *cancelCtx: - return c, true - case *timerCtx: - return c.cancelCtx, true - case *valueCtx: - parent = c.Context - default: - return nil, false - } - } -} - -// removeChild removes a context from its parent. -func removeChild(parent Context, child canceler) { - p, ok := parentCancelCtx(parent) - if !ok { - return - } - p.mu.Lock() - if p.children != nil { - delete(p.children, child) - } - p.mu.Unlock() -} - -// A canceler is a context type that can be canceled directly. The -// implementations are *cancelCtx and *timerCtx. -type canceler interface { - cancel(removeFromParent bool, err error) - Done() <-chan struct{} -} - -// A cancelCtx can be canceled. When canceled, it also cancels any children -// that implement canceler. -type cancelCtx struct { - Context - - done chan struct{} // closed by the first cancel call. - - mu sync.Mutex - children map[canceler]bool // set to nil by the first cancel call - err error // set to non-nil by the first cancel call -} - -func (c *cancelCtx) Done() <-chan struct{} { - return c.done -} - -func (c *cancelCtx) Err() error { - c.mu.Lock() - defer c.mu.Unlock() - return c.err -} - -func (c *cancelCtx) String() string { - return fmt.Sprintf("%v.WithCancel", c.Context) -} - -// cancel closes c.done, cancels each of c's children, and, if -// removeFromParent is true, removes c from its parent's children. -func (c *cancelCtx) cancel(removeFromParent bool, err error) { - if err == nil { - panic("context: internal error: missing cancel error") - } - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return // already canceled - } - c.err = err - close(c.done) - for child := range c.children { - // NOTE: acquiring the child's lock while holding parent's lock. - child.cancel(false, err) - } - c.children = nil - c.mu.Unlock() - - if removeFromParent { - removeChild(c.Context, c) - } -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { - // The current deadline is already sooner than the new one. - return WithCancel(parent) - } - c := &timerCtx{ - cancelCtx: newCancelCtx(parent), - deadline: deadline, - } - propagateCancel(parent, c) - d := deadline.Sub(time.Now()) - if d <= 0 { - c.cancel(true, DeadlineExceeded) // deadline has already passed - return c, func() { c.cancel(true, Canceled) } - } - c.mu.Lock() - defer c.mu.Unlock() - if c.err == nil { - c.timer = time.AfterFunc(d, func() { - c.cancel(true, DeadlineExceeded) - }) - } - return c, func() { c.cancel(true, Canceled) } -} - -// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to -// implement Done and Err. It implements cancel by stopping its timer then -// delegating to cancelCtx.cancel. -type timerCtx struct { - *cancelCtx - timer *time.Timer // Under cancelCtx.mu. - - deadline time.Time -} - -func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { - return c.deadline, true -} - -func (c *timerCtx) String() string { - return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) -} - -func (c *timerCtx) cancel(removeFromParent bool, err error) { - c.cancelCtx.cancel(false, err) - if removeFromParent { - // Remove this timerCtx from its parent cancelCtx's children. - removeChild(c.cancelCtx.Context, c) - } - c.mu.Lock() - if c.timer != nil { - c.timer.Stop() - c.timer = nil - } - c.mu.Unlock() -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return &valueCtx{parent, key, val} -} - -// A valueCtx carries a key-value pair. It implements Value for that key and -// delegates all other calls to the embedded Context. -type valueCtx struct { - Context - key, val interface{} -} - -func (c *valueCtx) String() string { - return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) -} - -func (c *valueCtx) Value(key interface{}) interface{} { - if c.key == key { - return c.val - } - return c.Context.Value(key) -} diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go deleted file mode 100644 index ec5a63803..000000000 --- a/vendor/golang.org/x/net/context/pre_go19.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.9 - -package context - -import "time" - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - // - // WithCancel arranges for Done to be closed when cancel is called; - // WithDeadline arranges for Done to be closed when the deadline - // expires; WithTimeout arranges for Done to be closed when the timeout - // elapses. - // - // Done is provided for use in select statements: - // - // // Stream generates values with DoSomething and sends them to out - // // until DoSomething returns an error or ctx.Done is closed. - // func Stream(ctx context.Context, out chan<- Value) error { - // for { - // v, err := DoSomething(ctx) - // if err != nil { - // return err - // } - // select { - // case <-ctx.Done(): - // return ctx.Err() - // case out <- v: - // } - // } - // } - // - // See http://blog.golang.org/pipelines for more examples of how to use - // a Done channel for cancelation. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - // - // A key identifies a specific value in a Context. Functions that wish - // to store values in Context typically allocate a key in a global - // variable then use that key as the argument to context.WithValue and - // Context.Value. A key can be any type that supports equality; - // packages should define keys as an unexported type to avoid - // collisions. - // - // Packages that define a Context key should provide type-safe accessors - // for the values stores using that key: - // - // // Package user defines a User type that's stored in Contexts. - // package user - // - // import "golang.org/x/net/context" - // - // // User is the type of value stored in the Contexts. - // type User struct {...} - // - // // key is an unexported type for keys defined in this package. - // // This prevents collisions with keys defined in other packages. - // type key int - // - // // userKey is the key for user.User values in Contexts. It is - // // unexported; clients use user.NewContext and user.FromContext - // // instead of using this key directly. - // var userKey key = 0 - // - // // NewContext returns a new Context that carries value u. - // func NewContext(ctx context.Context, u *User) context.Context { - // return context.WithValue(ctx, userKey, u) - // } - // - // // FromContext returns the User value stored in ctx, if any. - // func FromContext(ctx context.Context) (*User, bool) { - // u, ok := ctx.Value(userKey).(*User) - // return u, ok - // } - Value(key interface{}) interface{} -} - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc func() diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 7434b8784..b640deb0e 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -2233,25 +2233,25 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) { sc.serveG.check() - rp := requestParam{ - method: f.PseudoValue("method"), - scheme: f.PseudoValue("scheme"), - authority: f.PseudoValue("authority"), - path: f.PseudoValue("path"), - protocol: f.PseudoValue("protocol"), + rp := httpcommon.ServerRequestParam{ + Method: f.PseudoValue("method"), + Scheme: f.PseudoValue("scheme"), + Authority: f.PseudoValue("authority"), + Path: f.PseudoValue("path"), + Protocol: f.PseudoValue("protocol"), } // extended connect is disabled, so we should not see :protocol - if disableExtendedConnectProtocol && rp.protocol != "" { + if disableExtendedConnectProtocol && rp.Protocol != "" { return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol)) } - isConnect := rp.method == "CONNECT" + isConnect := rp.Method == "CONNECT" if isConnect { - if rp.protocol == "" && (rp.path != "" || rp.scheme != "" || rp.authority == "") { + if rp.Protocol == "" && (rp.Path != "" || rp.Scheme != "" || rp.Authority == "") { return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol)) } - } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") { + } else if rp.Method == "" || rp.Path == "" || (rp.Scheme != "https" && rp.Scheme != "http") { // See 8.1.2.6 Malformed Requests and Responses: // // Malformed requests or responses that are detected @@ -2265,15 +2265,16 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res return nil, nil, sc.countError("bad_path_method", streamError(f.StreamID, ErrCodeProtocol)) } - rp.header = make(http.Header) + header := make(http.Header) + rp.Header = header for _, hf := range f.RegularFields() { - rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value) + header.Add(sc.canonicalHeader(hf.Name), hf.Value) } - if rp.authority == "" { - rp.authority = rp.header.Get("Host") + if rp.Authority == "" { + rp.Authority = header.Get("Host") } - if rp.protocol != "" { - rp.header.Set(":protocol", rp.protocol) + if rp.Protocol != "" { + header.Set(":protocol", rp.Protocol) } rw, req, err := sc.newWriterAndRequestNoBody(st, rp) @@ -2282,7 +2283,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res } bodyOpen := !f.StreamEnded() if bodyOpen { - if vv, ok := rp.header["Content-Length"]; ok { + if vv, ok := rp.Header["Content-Length"]; ok { if cl, err := strconv.ParseUint(vv[0], 10, 63); err == nil { req.ContentLength = int64(cl) } else { @@ -2298,84 +2299,38 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res return rw, req, nil } -type requestParam struct { - method string - scheme, authority, path string - protocol string - header http.Header -} - -func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) { +func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp httpcommon.ServerRequestParam) (*responseWriter, *http.Request, error) { sc.serveG.check() var tlsState *tls.ConnectionState // nil if not scheme https - if rp.scheme == "https" { + if rp.Scheme == "https" { tlsState = sc.tlsState } - needsContinue := httpguts.HeaderValuesContainsToken(rp.header["Expect"], "100-continue") - if needsContinue { - rp.header.Del("Expect") - } - // Merge Cookie headers into one "; "-delimited value. - if cookies := rp.header["Cookie"]; len(cookies) > 1 { - rp.header.Set("Cookie", strings.Join(cookies, "; ")) - } - - // Setup Trailers - var trailer http.Header - for _, v := range rp.header["Trailer"] { - for _, key := range strings.Split(v, ",") { - key = http.CanonicalHeaderKey(textproto.TrimString(key)) - switch key { - case "Transfer-Encoding", "Trailer", "Content-Length": - // Bogus. (copy of http1 rules) - // Ignore. - default: - if trailer == nil { - trailer = make(http.Header) - } - trailer[key] = nil - } - } - } - delete(rp.header, "Trailer") - - var url_ *url.URL - var requestURI string - if rp.method == "CONNECT" && rp.protocol == "" { - url_ = &url.URL{Host: rp.authority} - requestURI = rp.authority // mimic HTTP/1 server behavior - } else { - var err error - url_, err = url.ParseRequestURI(rp.path) - if err != nil { - return nil, nil, sc.countError("bad_path", streamError(st.id, ErrCodeProtocol)) - } - requestURI = rp.path + res := httpcommon.NewServerRequest(rp) + if res.InvalidReason != "" { + return nil, nil, sc.countError(res.InvalidReason, streamError(st.id, ErrCodeProtocol)) } body := &requestBody{ conn: sc, stream: st, - needsContinue: needsContinue, + needsContinue: res.NeedsContinue, } - req := &http.Request{ - Method: rp.method, - URL: url_, + req := (&http.Request{ + Method: rp.Method, + URL: res.URL, RemoteAddr: sc.remoteAddrStr, - Header: rp.header, - RequestURI: requestURI, + Header: rp.Header, + RequestURI: res.RequestURI, Proto: "HTTP/2.0", ProtoMajor: 2, ProtoMinor: 0, TLS: tlsState, - Host: rp.authority, + Host: rp.Authority, Body: body, - Trailer: trailer, - } - req = req.WithContext(st.ctx) - + Trailer: res.Trailer, + }).WithContext(st.ctx) rw := sc.newResponseWriter(st, req) return rw, req, nil } @@ -3270,12 +3225,12 @@ func (sc *serverConn) startPush(msg *startPushRequest) { // we start in "half closed (remote)" for simplicity. // See further comments at the definition of stateHalfClosedRemote. promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote) - rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{ - method: msg.method, - scheme: msg.url.Scheme, - authority: msg.url.Host, - path: msg.url.RequestURI(), - header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE + rw, req, err := sc.newWriterAndRequestNoBody(promised, httpcommon.ServerRequestParam{ + Method: msg.method, + Scheme: msg.url.Scheme, + Authority: msg.url.Host, + Path: msg.url.RequestURI(), + Header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE }) if err != nil { // Should not happen, since we've already validated msg.url. diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index f2c166b61..f26356b9c 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -1286,6 +1286,19 @@ func (cc *ClientConn) responseHeaderTimeout() time.Duration { return 0 } +// actualContentLength returns a sanitized version of +// req.ContentLength, where 0 actually means zero (not unknown) and -1 +// means unknown. +func actualContentLength(req *http.Request) int64 { + if req.Body == nil || req.Body == http.NoBody { + return 0 + } + if req.ContentLength != 0 { + return req.ContentLength + } + return -1 +} + func (cc *ClientConn) decrStreamReservations() { cc.mu.Lock() defer cc.mu.Unlock() @@ -1310,7 +1323,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) reqCancel: req.Cancel, isHead: req.Method == "HEAD", reqBody: req.Body, - reqBodyContentLength: httpcommon.ActualContentLength(req), + reqBodyContentLength: actualContentLength(req), trace: httptrace.ContextClientTrace(ctx), peerClosed: make(chan struct{}), abort: make(chan struct{}), @@ -1318,7 +1331,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) donec: make(chan struct{}), } - cs.requestedGzip = httpcommon.IsRequestGzip(req, cc.t.disableCompression()) + cs.requestedGzip = httpcommon.IsRequestGzip(req.Method, req.Header, cc.t.disableCompression()) go cs.doRequest(req, streamf) @@ -1349,7 +1362,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) } res.Request = req res.TLS = cc.tlsState - if res.Body == noBody && httpcommon.ActualContentLength(req) == 0 { + if res.Body == noBody && actualContentLength(req) == 0 { // If there isn't a request or response body still being // written, then wait for the stream to be closed before // RoundTrip returns. @@ -1596,12 +1609,7 @@ func (cs *clientStream) encodeAndWriteHeaders(req *http.Request) error { // sent by writeRequestBody below, along with any Trailers, // again in form HEADERS{1}, CONTINUATION{0,}) cc.hbuf.Reset() - res, err := httpcommon.EncodeHeaders(httpcommon.EncodeHeadersParam{ - Request: req, - AddGzipHeader: cs.requestedGzip, - PeerMaxHeaderListSize: cc.peerMaxHeaderListSize, - DefaultUserAgent: defaultUserAgent, - }, func(name, value string) { + res, err := encodeRequestHeaders(req, cs.requestedGzip, cc.peerMaxHeaderListSize, func(name, value string) { cc.writeHeader(name, value) }) if err != nil { @@ -1617,6 +1625,22 @@ func (cs *clientStream) encodeAndWriteHeaders(req *http.Request) error { return err } +func encodeRequestHeaders(req *http.Request, addGzipHeader bool, peerMaxHeaderListSize uint64, headerf func(name, value string)) (httpcommon.EncodeHeadersResult, error) { + return httpcommon.EncodeHeaders(req.Context(), httpcommon.EncodeHeadersParam{ + Request: httpcommon.Request{ + Header: req.Header, + Trailer: req.Trailer, + URL: req.URL, + Host: req.Host, + Method: req.Method, + ActualContentLength: actualContentLength(req), + }, + AddGzipHeader: addGzipHeader, + PeerMaxHeaderListSize: peerMaxHeaderListSize, + DefaultUserAgent: defaultUserAgent, + }, headerf) +} + // cleanupWriteRequest performs post-request tasks. // // If err (the result of writeRequest) is non-nil and the stream is not closed, @@ -2186,6 +2210,13 @@ func (rl *clientConnReadLoop) cleanup() { } cc.cond.Broadcast() cc.mu.Unlock() + + if !cc.seenSettings { + // If we have a pending request that wants extended CONNECT, + // let it continue and fail with the connection error. + cc.extendedConnectAllowed = true + close(cc.seenSettingsChan) + } } // countReadFrameError calls Transport.CountError with a string @@ -2278,9 +2309,6 @@ func (rl *clientConnReadLoop) run() error { if VerboseLogs { cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err) } - if !cc.seenSettings { - close(cc.seenSettingsChan) - } return err } } diff --git a/vendor/golang.org/x/net/internal/httpcommon/headermap.go b/vendor/golang.org/x/net/internal/httpcommon/headermap.go index ad3fbacd6..92483d8e4 100644 --- a/vendor/golang.org/x/net/internal/httpcommon/headermap.go +++ b/vendor/golang.org/x/net/internal/httpcommon/headermap.go @@ -5,7 +5,7 @@ package httpcommon import ( - "net/http" + "net/textproto" "sync" ) @@ -82,7 +82,7 @@ func buildCommonHeaderMaps() { commonLowerHeader = make(map[string]string, len(common)) commonCanonHeader = make(map[string]string, len(common)) for _, v := range common { - chk := http.CanonicalHeaderKey(v) + chk := textproto.CanonicalMIMEHeaderKey(v) commonLowerHeader[chk] = v commonCanonHeader[v] = chk } @@ -104,7 +104,7 @@ func CanonicalHeader(v string) string { if s, ok := commonCanonHeader[v]; ok { return s } - return http.CanonicalHeaderKey(v) + return textproto.CanonicalMIMEHeaderKey(v) } // CachedCanonicalHeader returns the canonical form of a well-known header name. diff --git a/vendor/golang.org/x/net/internal/httpcommon/request.go b/vendor/golang.org/x/net/internal/httpcommon/request.go index 343914773..4b7055317 100644 --- a/vendor/golang.org/x/net/internal/httpcommon/request.go +++ b/vendor/golang.org/x/net/internal/httpcommon/request.go @@ -5,10 +5,12 @@ package httpcommon import ( + "context" "errors" "fmt" - "net/http" "net/http/httptrace" + "net/textproto" + "net/url" "sort" "strconv" "strings" @@ -21,9 +23,21 @@ var ( ErrRequestHeaderListSize = errors.New("request header list larger than peer's advertised limit") ) +// Request is a subset of http.Request. +// It'd be simpler to pass an *http.Request, of course, but we can't depend on net/http +// without creating a dependency cycle. +type Request struct { + URL *url.URL + Method string + Host string + Header map[string][]string + Trailer map[string][]string + ActualContentLength int64 // 0 means 0, -1 means unknown +} + // EncodeHeadersParam is parameters to EncodeHeaders. type EncodeHeadersParam struct { - Request *http.Request + Request Request // AddGzipHeader indicates that an "accept-encoding: gzip" header should be // added to the request. @@ -47,11 +61,11 @@ type EncodeHeadersResult struct { // It validates a request and calls headerf with each pseudo-header and header // for the request. // The headerf function is called with the validated, canonicalized header name. -func EncodeHeaders(param EncodeHeadersParam, headerf func(name, value string)) (res EncodeHeadersResult, _ error) { +func EncodeHeaders(ctx context.Context, param EncodeHeadersParam, headerf func(name, value string)) (res EncodeHeadersResult, _ error) { req := param.Request // Check for invalid connection-level headers. - if err := checkConnHeaders(req); err != nil { + if err := checkConnHeaders(req.Header); err != nil { return res, err } @@ -73,7 +87,10 @@ func EncodeHeaders(param EncodeHeadersParam, headerf func(name, value string)) ( // isNormalConnect is true if this is a non-extended CONNECT request. isNormalConnect := false - protocol := req.Header.Get(":protocol") + var protocol string + if vv := req.Header[":protocol"]; len(vv) > 0 { + protocol = vv[0] + } if req.Method == "CONNECT" && protocol == "" { isNormalConnect = true } else if protocol != "" && req.Method != "CONNECT" { @@ -107,9 +124,7 @@ func EncodeHeaders(param EncodeHeadersParam, headerf func(name, value string)) ( return res, fmt.Errorf("invalid HTTP trailer %s", err) } - contentLength := ActualContentLength(req) - - trailers, err := commaSeparatedTrailers(req) + trailers, err := commaSeparatedTrailers(req.Trailer) if err != nil { return res, err } @@ -123,7 +138,7 @@ func EncodeHeaders(param EncodeHeadersParam, headerf func(name, value string)) ( f(":authority", host) m := req.Method if m == "" { - m = http.MethodGet + m = "GET" } f(":method", m) if !isNormalConnect { @@ -198,8 +213,8 @@ func EncodeHeaders(param EncodeHeadersParam, headerf func(name, value string)) ( f(k, v) } } - if shouldSendReqContentLength(req.Method, contentLength) { - f("content-length", strconv.FormatInt(contentLength, 10)) + if shouldSendReqContentLength(req.Method, req.ActualContentLength) { + f("content-length", strconv.FormatInt(req.ActualContentLength, 10)) } if param.AddGzipHeader { f("accept-encoding", "gzip") @@ -225,7 +240,7 @@ func EncodeHeaders(param EncodeHeadersParam, headerf func(name, value string)) ( } } - trace := httptrace.ContextClientTrace(req.Context()) + trace := httptrace.ContextClientTrace(ctx) // Header list size is ok. Write the headers. enumerateHeaders(func(name, value string) { @@ -243,19 +258,19 @@ func EncodeHeaders(param EncodeHeadersParam, headerf func(name, value string)) ( } }) - res.HasBody = contentLength != 0 + res.HasBody = req.ActualContentLength != 0 res.HasTrailers = trailers != "" return res, nil } // IsRequestGzip reports whether we should add an Accept-Encoding: gzip header // for a request. -func IsRequestGzip(req *http.Request, disableCompression bool) bool { +func IsRequestGzip(method string, header map[string][]string, disableCompression bool) bool { // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? if !disableCompression && - req.Header.Get("Accept-Encoding") == "" && - req.Header.Get("Range") == "" && - req.Method != "HEAD" { + len(header["Accept-Encoding"]) == 0 && + len(header["Range"]) == 0 && + method != "HEAD" { // Request gzip only, not deflate. Deflate is ambiguous and // not as universally supported anyway. // See: https://zlib.net/zlib_faq.html#faq39 @@ -280,22 +295,22 @@ func IsRequestGzip(req *http.Request, disableCompression bool) bool { // // Certain headers are special-cased as okay but not transmitted later. // For example, we allow "Transfer-Encoding: chunked", but drop the header when encoding. -func checkConnHeaders(req *http.Request) error { - if v := req.Header.Get("Upgrade"); v != "" { - return fmt.Errorf("invalid Upgrade request header: %q", req.Header["Upgrade"]) +func checkConnHeaders(h map[string][]string) error { + if vv := h["Upgrade"]; len(vv) > 0 && (vv[0] != "" && vv[0] != "chunked") { + return fmt.Errorf("invalid Upgrade request header: %q", vv) } - if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { + if vv := h["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { return fmt.Errorf("invalid Transfer-Encoding request header: %q", vv) } - if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) { + if vv := h["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) { return fmt.Errorf("invalid Connection request header: %q", vv) } return nil } -func commaSeparatedTrailers(req *http.Request) (string, error) { - keys := make([]string, 0, len(req.Trailer)) - for k := range req.Trailer { +func commaSeparatedTrailers(trailer map[string][]string) (string, error) { + keys := make([]string, 0, len(trailer)) + for k := range trailer { k = CanonicalHeader(k) switch k { case "Transfer-Encoding", "Trailer", "Content-Length": @@ -310,19 +325,6 @@ func commaSeparatedTrailers(req *http.Request) (string, error) { return "", nil } -// ActualContentLength returns a sanitized version of -// req.ContentLength, where 0 actually means zero (not unknown) and -1 -// means unknown. -func ActualContentLength(req *http.Request) int64 { - if req.Body == nil || req.Body == http.NoBody { - return 0 - } - if req.ContentLength != 0 { - return req.ContentLength - } - return -1 -} - // validPseudoPath reports whether v is a valid :path pseudo-header // value. It must be either: // @@ -340,7 +342,7 @@ func validPseudoPath(v string) bool { return (len(v) > 0 && v[0] == '/') || v == "*" } -func validateHeaders(hdrs http.Header) string { +func validateHeaders(hdrs map[string][]string) string { for k, vv := range hdrs { if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" { return fmt.Sprintf("name %q", k) @@ -377,3 +379,89 @@ func shouldSendReqContentLength(method string, contentLength int64) bool { return false } } + +// ServerRequestParam is parameters to NewServerRequest. +type ServerRequestParam struct { + Method string + Scheme, Authority, Path string + Protocol string + Header map[string][]string +} + +// ServerRequestResult is the result of NewServerRequest. +type ServerRequestResult struct { + // Various http.Request fields. + URL *url.URL + RequestURI string + Trailer map[string][]string + + NeedsContinue bool // client provided an "Expect: 100-continue" header + + // If the request should be rejected, this is a short string suitable for passing + // to the http2 package's CountError function. + // It might be a bit odd to return errors this way rather than returing an error, + // but this ensures we don't forget to include a CountError reason. + InvalidReason string +} + +func NewServerRequest(rp ServerRequestParam) ServerRequestResult { + needsContinue := httpguts.HeaderValuesContainsToken(rp.Header["Expect"], "100-continue") + if needsContinue { + delete(rp.Header, "Expect") + } + // Merge Cookie headers into one "; "-delimited value. + if cookies := rp.Header["Cookie"]; len(cookies) > 1 { + rp.Header["Cookie"] = []string{strings.Join(cookies, "; ")} + } + + // Setup Trailers + var trailer map[string][]string + for _, v := range rp.Header["Trailer"] { + for _, key := range strings.Split(v, ",") { + key = textproto.CanonicalMIMEHeaderKey(textproto.TrimString(key)) + switch key { + case "Transfer-Encoding", "Trailer", "Content-Length": + // Bogus. (copy of http1 rules) + // Ignore. + default: + if trailer == nil { + trailer = make(map[string][]string) + } + trailer[key] = nil + } + } + } + delete(rp.Header, "Trailer") + + // "':authority' MUST NOT include the deprecated userinfo subcomponent + // for "http" or "https" schemed URIs." + // https://www.rfc-editor.org/rfc/rfc9113.html#section-8.3.1-2.3.8 + if strings.IndexByte(rp.Authority, '@') != -1 && (rp.Scheme == "http" || rp.Scheme == "https") { + return ServerRequestResult{ + InvalidReason: "userinfo_in_authority", + } + } + + var url_ *url.URL + var requestURI string + if rp.Method == "CONNECT" && rp.Protocol == "" { + url_ = &url.URL{Host: rp.Authority} + requestURI = rp.Authority // mimic HTTP/1 server behavior + } else { + var err error + url_, err = url.ParseRequestURI(rp.Path) + if err != nil { + return ServerRequestResult{ + InvalidReason: "bad_path", + } + } + requestURI = rp.Path + } + + return ServerRequestResult{ + URL: url_, + NeedsContinue: needsContinue, + RequestURI: requestURI, + Trailer: trailer, + } +} diff --git a/vendor/golang.org/x/net/proxy/per_host.go b/vendor/golang.org/x/net/proxy/per_host.go index d7d4b8b6e..32bdf435e 100644 --- a/vendor/golang.org/x/net/proxy/per_host.go +++ b/vendor/golang.org/x/net/proxy/per_host.go @@ -7,6 +7,7 @@ package proxy import ( "context" "net" + "net/netip" "strings" ) @@ -57,7 +58,8 @@ func (p *PerHost) DialContext(ctx context.Context, network, addr string) (c net. } func (p *PerHost) dialerForRequest(host string) Dialer { - if ip := net.ParseIP(host); ip != nil { + if nip, err := netip.ParseAddr(host); err == nil { + ip := net.IP(nip.AsSlice()) for _, net := range p.bypassNetworks { if net.Contains(ip) { return p.bypass @@ -108,8 +110,8 @@ func (p *PerHost) AddFromString(s string) { } continue } - if ip := net.ParseIP(host); ip != nil { - p.AddIP(ip) + if nip, err := netip.ParseAddr(host); err == nil { + p.AddIP(net.IP(nip.AsSlice())) continue } if strings.HasPrefix(host, "*.") { diff --git a/vendor/modules.txt b/vendor/modules.txt index 85bb6aed1..01300fd39 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -808,14 +808,14 @@ github.com/zclconf/go-cty/cty/convert github.com/zclconf/go-cty/cty/ctystrings github.com/zclconf/go-cty/cty/function github.com/zclconf/go-cty/cty/set -# go.uber.org/atomic v1.10.0 +# go.uber.org/atomic v1.11.0 ## explicit; go 1.18 go.uber.org/atomic # go.uber.org/multierr v1.11.0 ## explicit; go 1.19 go.uber.org/multierr -# golang.org/x/crypto v0.33.0 -## explicit; go 1.20 +# golang.org/x/crypto v0.35.0 +## explicit; go 1.23.0 golang.org/x/crypto/argon2 golang.org/x/crypto/blake2b golang.org/x/crypto/blowfish @@ -846,8 +846,8 @@ golang.org/x/exp/slog/internal/buffer # golang.org/x/mod v0.21.0 ## explicit; go 1.22.0 golang.org/x/mod/semver -# golang.org/x/net v0.35.0 -## explicit; go 1.18 +# golang.org/x/net v0.36.0 +## explicit; go 1.23.0 golang.org/x/net/context golang.org/x/net/http/httpguts golang.org/x/net/http2 From ab9d948885a60c61ff6465c985b9b5c661df0096 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 21 Mar 2025 09:05:20 +0000 Subject: [PATCH 04/11] fix(deps): update patch updates --- .github/workflows/build-oci.yaml | 4 + go.mod | 32 +- go.sum | 64 +- oci/Containerfile | 6 +- tools/go.mod | 8 +- tools/go.sum | 16 +- .../ckaznocha/intrange/.golangci.yml | 15 +- .../github.com/ckaznocha/intrange/go.work | 4 +- .../github.com/ckaznocha/intrange/intrange.go | 99 +- .../ghostiam/protogetter/flake.lock | 61 + .../github.com/ghostiam/protogetter/flake.nix | 69 + .../ghostiam/protogetter/processor.go | 39 +- .../ghostiam/protogetter/protogetter.go | 1 + tools/vendor/modules.txt | 14 +- .../azure-sdk-for-go/sdk/azcore/CHANGELOG.md | 7 + .../sdk/azcore/internal/shared/constants.go | 2 +- .../aws/aws-sdk-go-v2/config/CHANGELOG.md | 4 + .../config/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/credentials/CHANGELOG.md | 4 + .../credentials/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/service/sso/CHANGELOG.md | 4 + .../aws-sdk-go-v2/service/sso/generated.json | 1 + .../service/sso/go_module_metadata.go | 2 +- .../service/ssooidc/CHANGELOG.md | 4 + .../service/ssooidc/generated.json | 1 + .../service/ssooidc/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/service/sts/CHANGELOG.md | 4 + .../aws-sdk-go-v2/service/sts/generated.json | 1 + .../service/sts/go_module_metadata.go | 2 +- .../bubbletea/inputreader_other.go | 2 +- .../bubbletea/inputreader_windows.go | 18 +- .../github.com/charmbracelet/bubbletea/tea.go | 29 +- .../github.com/charmbracelet/bubbletea/tty.go | 9 +- .../v2/go/awsx/internal/pulumiUtilities.go | 4 +- .../github.com/pulumi/pulumi/sdk/v3/.version | 2 +- .../sdk/v3/go/auto/optrefresh/optrefresh.go | 9 + .../sdk/v3/go/auto/optrename/optrename.go | 66 + .../pulumi/pulumi/sdk/v3/go/auto/stack.go | 65 + .../pulumi/sdk/v3/go/common/apitype/policy.go | 1 + .../sdk/v3/go/common/apitype/service.go | 12 +- .../pulumi/sdk/v3/go/common/apitype/stacks.go | 16 +- .../pulumi/pulumi/sdk/v3/go/common/env/env.go | 10 +- .../sdk/v3/go/common/resource/config/crypt.go | 32 +- .../resource/plugin/langruntime_plugin.go | 20 +- .../v3/go/common/resource/plugin/plugin.go | 52 +- .../v3/go/common/resource/plugin/provider.go | 42 +- .../common/resource/plugin/provider_plugin.go | 59 +- .../common/resource/plugin/provider_server.go | 46 +- .../sdk/v3/go/common/util/cmdutil/args.go | 6 +- .../sdk/v3/go/common/util/cmdutil/exit.go | 101 +- .../go/common/util/errutil/format_exiterr.go | 34 + .../sdk/v3/go/common/workspace/plugins.go | 22 +- .../sdk/v3/go/common/workspace/templates.go | 116 +- .../pulumi/pulumi/sdk/v3/go/pulumi/context.go | 36 +- .../pulumi/pulumi/sdk/v3/go/pulumi/rpc.go | 50 +- .../pulumi/pulumi/sdk/v3/go/pulumi/run.go | 4 + .../pulumi/sdk/v3/proto/go/provider.pb.go | 1315 +++++++++-------- .../sdk/v3/proto/go/provider_grpc.pb.go | 74 +- .../sdk/v3/python/toolchain/toolchain.go | 19 +- .../pulumi/sdk/v3/python/toolchain/uv.go | 13 +- vendor/golang.org/x/crypto/ssh/messages.go | 2 + vendor/golang.org/x/crypto/ssh/tcpip.go | 2 +- vendor/golang.org/x/sync/errgroup/errgroup.go | 2 +- vendor/golang.org/x/sync/errgroup/go120.go | 13 - .../golang.org/x/sync/errgroup/pre_go120.go | 14 - vendor/modules.txt | 46 +- 66 files changed, 1825 insertions(+), 1012 deletions(-) create mode 100644 tools/vendor/github.com/ghostiam/protogetter/flake.lock create mode 100644 tools/vendor/github.com/ghostiam/protogetter/flake.nix create mode 100644 vendor/github.com/pulumi/pulumi/sdk/v3/go/auto/optrename/optrename.go create mode 100644 vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/errutil/format_exiterr.go delete mode 100644 vendor/golang.org/x/sync/errgroup/go120.go delete mode 100644 vendor/golang.org/x/sync/errgroup/pre_go120.go diff --git a/.github/workflows/build-oci.yaml b/.github/workflows/build-oci.yaml index dea51258f..a421d33ec 100644 --- a/.github/workflows/build-oci.yaml +++ b/.github/workflows/build-oci.yaml @@ -38,7 +38,11 @@ jobs: make oci-save - name: Upload mapt artifacts for PR +<<<<<<< HEAD uses: actions/upload-artifact@v4 +======= + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 +>>>>>>> 7c60066b (fix(deps): update patch updates) with: name: mapt path: mapt* diff --git a/go.mod b/go.mod index c313e1bd7..e8f453354 100644 --- a/go.mod +++ b/go.mod @@ -8,25 +8,25 @@ require ( github.com/coocood/freecache v1.2.4 github.com/pulumi/pulumi-command/sdk v1.0.1 github.com/pulumi/pulumi-random/sdk/v4 v4.16.7 - github.com/pulumi/pulumi/sdk/v3 v3.152.0 + github.com/pulumi/pulumi/sdk/v3 v3.154.0 github.com/sirupsen/logrus v1.9.3 github.com/spf13/cobra v1.9.1 golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 ) require ( - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.1 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6 v6.3.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resourcegraph/armresourcegraph v0.9.0 github.com/aws/amazon-ec2-instance-selector/v3 v3.1.1 github.com/aws/aws-sdk-go-v2 v1.36.3 - github.com/aws/aws-sdk-go-v2/config v1.29.8 + github.com/aws/aws-sdk-go-v2/config v1.29.9 github.com/aws/aws-sdk-go-v2/service/ec2 v1.203.1 github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1 github.com/pulumi/pulumi-aws-native/sdk v1.25.0 github.com/pulumi/pulumi-aws/sdk/v6 v6.69.0 - github.com/pulumi/pulumi-awsx/sdk/v2 v2.21.0 + github.com/pulumi/pulumi-awsx/sdk/v2 v2.21.1 github.com/pulumi/pulumi-azure-native-sdk/authorization/v2 v2.88.0 github.com/pulumi/pulumi-azure-native-sdk/compute/v2 v2.88.0 github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2 v2.88.0 @@ -61,7 +61,7 @@ require ( github.com/agext/levenshtein v1.2.3 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/atotto/clipboard v0.1.4 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.17.61 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.62 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect @@ -69,14 +69,14 @@ require ( github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect github.com/aws/aws-sdk-go-v2/service/pricing v1.32.17 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.25.0 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.0 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.33.16 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.25.1 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.1 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.33.17 // indirect github.com/aws/smithy-go v1.22.3 // indirect github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/charmbracelet/bubbles v0.20.0 // indirect - github.com/charmbracelet/bubbletea v1.3.3 // indirect + github.com/charmbracelet/bubbletea v1.3.4 // indirect github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect github.com/charmbracelet/lipgloss v1.1.0 // indirect github.com/charmbracelet/x/ansi v0.8.0 // indirect @@ -122,8 +122,8 @@ require ( github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect github.com/zclconf/go-cty v1.15.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/sync v0.11.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 // indirect + golang.org/x/sync v0.12.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) @@ -168,11 +168,11 @@ require ( github.com/uber/jaeger-lib v2.4.1+incompatible // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect go.uber.org/atomic v1.11.0 // indirect - golang.org/x/crypto v0.35.0 // indirect - golang.org/x/net v0.36.0 // indirect - golang.org/x/sys v0.30.0 // indirect - golang.org/x/term v0.29.0 // indirect - golang.org/x/text v0.22.0 // indirect + golang.org/x/crypto v0.36.0 // indirect + golang.org/x/net v0.37.0 // indirect + golang.org/x/sys v0.31.0 // indirect + golang.org/x/term v0.30.0 // indirect + golang.org/x/text v0.23.0 // indirect google.golang.org/grpc v1.67.1 // indirect google.golang.org/protobuf v1.35.1 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect diff --git a/go.sum b/go.sum index d8520cff4..fd29c4156 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,7 @@ dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 h1:g0EZJwz7xkXQiZAI5xi9f3WWFYBlX1CPTrR+NDToRkQ= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0/go.mod h1:XCW7KnZet0Opnr7HccfUw1PLc4CjHqpcaxW8DHklNkQ= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.1 h1:DSDNVxqkoXJiko6x8a90zidoYqnYYa6c1MTzDKzKkTo= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.1/go.mod h1:zGqV2R4Cr/k8Uye5w+dgQ06WJtEcbQG/8J7BB6hnCr4= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2 h1:F0gBpfdPLGsw+nsgk6aqqkZS1jiixa5WwFe3fk/T3Ys= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2/go.mod h1:SqINnQ9lVVdRlyC8cd1lCI0SdX4n2paeABd2K8ggfnE= github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY= @@ -47,10 +47,10 @@ github.com/aws/aws-sdk-go-v2 v1.36.3 h1:mJoei2CxPutQVxaATCzDUjcZEjVRdpsiiXi2o38y github.com/aws/aws-sdk-go-v2 v1.36.3/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 h1:x6xsQXGSmW6frevwDA+vi/wqhp1ct18mVXYN08/93to= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2/go.mod h1:lPprDr1e6cJdyYeGXnRaJoP4Md+cDBvi2eOj00BlGmg= -github.com/aws/aws-sdk-go-v2/config v1.29.8 h1:RpwAfYcV2lr/yRc4lWhUM9JRPQqKgKWmou3LV7UfWP4= -github.com/aws/aws-sdk-go-v2/config v1.29.8/go.mod h1:t+G7Fq1OcO8cXTPPXzxQSnj/5Xzdc9jAAD3Xrn9/Mgo= -github.com/aws/aws-sdk-go-v2/credentials v1.17.61 h1:Hd/uX6Wo2iUW1JWII+rmyCD7MMhOe7ALwQXN6sKDd1o= -github.com/aws/aws-sdk-go-v2/credentials v1.17.61/go.mod h1:L7vaLkwHY1qgW0gG1zG0z/X0sQ5tpIY5iI13+j3qI80= +github.com/aws/aws-sdk-go-v2/config v1.29.9 h1:Kg+fAYNaJeGXp1vmjtidss8O2uXIsXwaRqsQJKXVr+0= +github.com/aws/aws-sdk-go-v2/config v1.29.9/go.mod h1:oU3jj2O53kgOU4TXq/yipt6ryiooYjlkqqVaZk7gY/U= +github.com/aws/aws-sdk-go-v2/credentials v1.17.62 h1:fvtQY3zFzYJ9CfixuAQ96IxDrBajbBWGqjNTCa79ocU= +github.com/aws/aws-sdk-go-v2/credentials v1.17.62/go.mod h1:ElETBxIQqcxej++Cs8GyPBbgMys5DgQPTwo7cUPDKt8= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 h1:x793wxmUWVDhshP8WW2mlnXuFrO4cOd3HLBroh1paFw= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30/go.mod h1:Jpne2tDnYiFascUEs2AWHJL9Yp7A5ZVy3TNyxaAjD6M= github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 h1:ZK5jHhnrioRkUNOc+hOgQKlUL5JeC3S6JgLxtQ+Rm0Q= @@ -79,12 +79,12 @@ github.com/aws/aws-sdk-go-v2/service/pricing v1.32.17 h1:EtZFyL/uhaXlHjIwHW0KSJv github.com/aws/aws-sdk-go-v2/service/pricing v1.32.17/go.mod h1:l7bufyRvU+8mY0Z1BNWbWvjr59dlj9YrLKmeiz5CJ30= github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1 h1:6cnno47Me9bRykw9AEv9zkXE+5or7jz8TsskTTccbgc= github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1/go.mod h1:qmdkIIAC+GCLASF7R2whgNrJADz0QZPX+Seiw/i4S3o= -github.com/aws/aws-sdk-go-v2/service/sso v1.25.0 h1:2U9sF8nKy7UgyEeLiZTRg6ShBS22z8UnYpV6aRFL0is= -github.com/aws/aws-sdk-go-v2/service/sso v1.25.0/go.mod h1:qs4a9T5EMLl/Cajiw2TcbNt2UNo/Hqlyp+GiuG4CFDI= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.0 h1:wjAdc85cXdQR5uLx5FwWvGIHm4OPJhTyzUHU8craXtE= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.0/go.mod h1:MlYRNmYu/fGPoxBQVvBYr9nyr948aY/WLUvwBMBJubs= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.16 h1:BHEK2Q/7CMRMCb3nySi/w8UbIcPhKvYP5s1xf8/izn0= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.16/go.mod h1:cQnB8CUnxbMU82JvlqjKR2HBOm3fe9pWorWBza6MBJ4= +github.com/aws/aws-sdk-go-v2/service/sso v1.25.1 h1:8JdC7Gr9NROg1Rusk25IcZeTO59zLxsKgE0gkh5O6h0= +github.com/aws/aws-sdk-go-v2/service/sso v1.25.1/go.mod h1:qs4a9T5EMLl/Cajiw2TcbNt2UNo/Hqlyp+GiuG4CFDI= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.1 h1:KwuLovgQPcdjNMfFt9OhUd9a2OwcOKhxfvF4glTzLuA= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.1/go.mod h1:MlYRNmYu/fGPoxBQVvBYr9nyr948aY/WLUvwBMBJubs= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.17 h1:PZV5W8yk4OtH1JAuhV2PXwwO9v5G5Aoj+eMCn4T+1Kc= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.17/go.mod h1:cQnB8CUnxbMU82JvlqjKR2HBOm3fe9pWorWBza6MBJ4= github.com/aws/smithy-go v1.22.3 h1:Z//5NuZCSW6R4PhQ93hShNbyBbn8BWCmCVCt+Q8Io5k= github.com/aws/smithy-go v1.22.3/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= @@ -98,8 +98,8 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/charmbracelet/bubbles v0.20.0 h1:jSZu6qD8cRQ6k9OMfR1WlM+ruM8fkPWkHvQWD9LIutE= github.com/charmbracelet/bubbles v0.20.0/go.mod h1:39slydyswPy+uVOHZ5x/GjwVAFkCsV8IIVy+4MhzwwU= -github.com/charmbracelet/bubbletea v1.3.3 h1:WpU6fCY0J2vDWM3zfS3vIDi/ULq3SYphZhkAGGvmEUY= -github.com/charmbracelet/bubbletea v1.3.3/go.mod h1:dtcUCyCGEX3g9tosuYiut3MXgY/Jsv9nKVdibKKRRXo= +github.com/charmbracelet/bubbletea v1.3.4 h1:kCg7B+jSCFPLYRA52SDZjr51kG/fMUEoPoZrkaDHyoI= +github.com/charmbracelet/bubbletea v1.3.4/go.mod h1:dtcUCyCGEX3g9tosuYiut3MXgY/Jsv9nKVdibKKRRXo= github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc h1:4pZI35227imm7yK2bGPcfpFEmuY1gc2YSTShr4iJBfs= github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc/go.mod h1:X4/0JoqgTIPSFcRA/P6INZzIuyqdFY5rm8tb41s9okk= github.com/charmbracelet/lipgloss v1.1.0 h1:vYXsiLHVkK7fp74RkV7b2kq9+zDLoEU4MZoFqR/noCY= @@ -272,8 +272,8 @@ github.com/pulumi/pulumi-aws-native/sdk v1.25.0 h1:lboDYcrKaz7ggwbUWluybdhG1zNs8 github.com/pulumi/pulumi-aws-native/sdk v1.25.0/go.mod h1:Kuy19nXfmt4X9ySnWlKJwYrTc/PedPBaNfX6SCoX8gE= github.com/pulumi/pulumi-aws/sdk/v6 v6.69.0 h1:SO9xo9PIgufYORBPx82lSQN0CLg1CMab+rR6JJnGv/c= github.com/pulumi/pulumi-aws/sdk/v6 v6.69.0/go.mod h1:OaqbSRCkPxr8XpanQ7u3Xup9MgSnyqQFDxNhV2pXWzw= -github.com/pulumi/pulumi-awsx/sdk/v2 v2.21.0 h1:El2HV0GbOFFoQot0id6ZNjpjrF+HErIdreMG9WwJ7OA= -github.com/pulumi/pulumi-awsx/sdk/v2 v2.21.0/go.mod h1:ZpStejgr+RYJZwbFV6IpOxPb/wLSd86LUTC69UNQVVI= +github.com/pulumi/pulumi-awsx/sdk/v2 v2.21.1 h1:V0NaeP3Rx5WGysvNz6x13Dnd3visEgQubP/gzQ+X86Q= +github.com/pulumi/pulumi-awsx/sdk/v2 v2.21.1/go.mod h1:j8IUGf1uw47bXxHS5H98O3NZIXvVsb7bfv/zby1cVLE= github.com/pulumi/pulumi-azure-native-sdk/authorization/v2 v2.88.0 h1:xIb656wq0o/gOLel90Bj/3tRxwq+U+3RaktlNnHvTG4= github.com/pulumi/pulumi-azure-native-sdk/authorization/v2 v2.88.0/go.mod h1:DnvaZlI1YwSZhIKfPPsXq6Koy1XoOkgGwOorcZeIj0k= github.com/pulumi/pulumi-azure-native-sdk/compute/v2 v2.88.0 h1:TVO1fIAW7tu2sX7SEPfLH9e5DGH4cbbcG3qee58nEgk= @@ -300,8 +300,8 @@ github.com/pulumi/pulumi-random/sdk/v4 v4.16.7 h1:39rhOe/PTUGMYia8pR5T2wbxxMt2pw github.com/pulumi/pulumi-random/sdk/v4 v4.16.7/go.mod h1:cxxDhJzUPt/YElfvlWa15Q4NGF6XXS8kUs4OQsCxSBk= github.com/pulumi/pulumi-tls/sdk/v5 v5.0.9 h1:JH5TkGJDwlTqKdSJxd49DfrJOpVkRyGDvgKS9gwcV9U= github.com/pulumi/pulumi-tls/sdk/v5 v5.0.9/go.mod h1:3B+v+HC4bjAYf1mVxRVL/bDY20+VEMwfxRKgc3oeCrs= -github.com/pulumi/pulumi/sdk/v3 v3.152.0 h1:sPstmYhbf/ArkREjkPYRgFUf+iv1llskLY2/4aASdxc= -github.com/pulumi/pulumi/sdk/v3 v3.152.0/go.mod h1:+WC9aIDo8fMgd2g0jCHuZU2S/VYNLRAZ3QXt6YVgwaA= +github.com/pulumi/pulumi/sdk/v3 v3.154.0 h1:AK2XAIYwCVcCdvHuB87DqmnRLt7AlMRQobYb2VVycN8= +github.com/pulumi/pulumi/sdk/v3 v3.154.0/go.mod h1:+WC9aIDo8fMgd2g0jCHuZU2S/VYNLRAZ3QXt6YVgwaA= github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E= github.com/redis/go-redis/v9 v9.7.0/go.mod h1:f6zhXITC7JUJIlPEiBOTXxJgPLdZcA93GewI7inzyWw= github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= @@ -377,8 +377,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs= -golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ= +golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= +golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= @@ -393,13 +393,13 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.36.0 h1:vWF2fRbw4qslQsQzgFqZff+BItCvGFQqKzKIzx1rmoA= -golang.org/x/net v0.36.0/go.mod h1:bFmbeoIPfrw4sMHNhb4J9f6+tPziuGjq7Jk/38fxi1I= +golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c= +golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= -golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= +golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -418,16 +418,16 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= -golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU= -golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= +golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= +golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= -golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -440,8 +440,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 h1:e7S5W7MGGLaSu8j3YjdezkZ+m1/Nm0uRVRMEMGk26Xs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= diff --git a/oci/Containerfile b/oci/Containerfile index 0b0771358..61865fc60 100644 --- a/oci/Containerfile +++ b/oci/Containerfile @@ -7,7 +7,7 @@ WORKDIR /workspace COPY . . # renovate: datasource=github-releases depName=pulumi/pulumi -ENV PULUMI_VERSION 3.152.0 +ENV PULUMI_VERSION 3.154.0 ENV PULUMI_BASE_URL="https://github.com/pulumi/pulumi/releases/download/v${PULUMI_VERSION}/pulumi-v${PULUMI_VERSION}" ENV PULUMI_URL="${PULUMI_BASE_URL}-linux-x64.tar.gz" @@ -18,7 +18,7 @@ RUN GOARCH=${TARGETARCH} make build \ && tar -xzvf pulumicli.tar.gz # ubi 9.5-1732804088 -FROM registry.access.redhat.com/ubi9/ubi:9.5-1739751568 +FROM registry.access.redhat.com/ubi9/ubi:9.5-1741850090@sha256:8d53b60617e53e4dbd6c0d485c569801f48dbeb40b48c8424a67e33a07320968 ARG TARGETARCH LABEL org.opencontainers.image.authors="Redhat Developer" @@ -35,7 +35,7 @@ ENV AWS_SDK_LOAD_CONFIG=1 \ # renovate: datasource=github-releases depName=pulumi/pulumi-aws ARG PULUMI_AWS_VERSION=v6.69.0 # renovate: datasource=github-releases depName=pulumi/pulumi-awsx -ARG PULUMI_AWSX_VERSION=v2.21.0 +ARG PULUMI_AWSX_VERSION=v2.21.1 # renovate: datasource=github-releases depName=pulumi/pulumi-azure-native ARG PULUMI_AZURE_NATIVE_VERSION=v2.88.0 # renovate: datasource=github-releases depName=pulumi/pulumi-command diff --git a/tools/go.mod b/tools/go.mod index 1b7ec0b07..f8451efd5 100644 --- a/tools/go.mod +++ b/tools/go.mod @@ -31,8 +31,8 @@ require ( github.com/bkielbasa/cyclop v1.2.3 // indirect github.com/blizzy78/varnamelen v0.8.0 // indirect github.com/bombsimon/wsl/v4 v4.5.0 // indirect - github.com/breml/bidichk v0.3.2 // indirect - github.com/breml/errchkjson v0.4.0 // indirect + github.com/breml/bidichk v0.3.3 // indirect + github.com/breml/errchkjson v0.4.1 // indirect github.com/butuzov/ireturn v0.3.1 // indirect github.com/butuzov/mirror v1.3.0 // indirect github.com/catenacyber/perfsprint v0.8.1 // indirect @@ -40,7 +40,7 @@ require ( github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/charithe/durationcheck v0.0.10 // indirect github.com/chavacava/garif v0.1.0 // indirect - github.com/ckaznocha/intrange v0.3.0 // indirect + github.com/ckaznocha/intrange v0.3.1 // indirect github.com/curioswitch/go-reassign v0.3.0 // indirect github.com/daixiang0/gci v0.13.5 // indirect github.com/davecgh/go-spew v1.1.1 // indirect @@ -51,7 +51,7 @@ require ( github.com/firefart/nonamedreturns v1.0.5 // indirect github.com/fsnotify/fsnotify v1.5.4 // indirect github.com/fzipp/gocyclo v0.6.0 // indirect - github.com/ghostiam/protogetter v0.3.9 // indirect + github.com/ghostiam/protogetter v0.3.12 // indirect github.com/go-critic/go-critic v0.12.0 // indirect github.com/go-toolsmith/astcast v1.1.0 // indirect github.com/go-toolsmith/astcopy v1.1.0 // indirect diff --git a/tools/go.sum b/tools/go.sum index 3f1384126..582ab5add 100644 --- a/tools/go.sum +++ b/tools/go.sum @@ -94,10 +94,10 @@ github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k= github.com/bombsimon/wsl/v4 v4.5.0 h1:iZRsEvDdyhd2La0FVi5k6tYehpOR/R7qIUjmKk7N74A= github.com/bombsimon/wsl/v4 v4.5.0/go.mod h1:NOQ3aLF4nD7N5YPXMruR6ZXDOAqLoM0GEpLwTdvmOSc= -github.com/breml/bidichk v0.3.2 h1:xV4flJ9V5xWTqxL+/PMFF6dtJPvZLPsyixAoPe8BGJs= -github.com/breml/bidichk v0.3.2/go.mod h1:VzFLBxuYtT23z5+iVkamXO386OB+/sVwZOpIj6zXGos= -github.com/breml/errchkjson v0.4.0 h1:gftf6uWZMtIa/Is3XJgibewBm2ksAQSY/kABDNFTAdk= -github.com/breml/errchkjson v0.4.0/go.mod h1:AuBOSTHyLSaaAFlWsRSuRBIroCh3eh7ZHh5YeelDIk8= +github.com/breml/bidichk v0.3.3 h1:WSM67ztRusf1sMoqH6/c4OBCUlRVTKq+CbSeo0R17sE= +github.com/breml/bidichk v0.3.3/go.mod h1:ISbsut8OnjB367j5NseXEGGgO/th206dVa427kR8YTE= +github.com/breml/errchkjson v0.4.1 h1:keFSS8D7A2T0haP9kzZTi7o26r7kE3vymjZNeNDRDwg= +github.com/breml/errchkjson v0.4.1/go.mod h1:a23OvR6Qvcl7DG/Z4o0el6BRAjKnaReoPQFciAl9U3s= github.com/butuzov/ireturn v0.3.1 h1:mFgbEI6m+9W8oP/oDdfA34dLisRFCj2G6o/yiI1yZrY= github.com/butuzov/ireturn v0.3.1/go.mod h1:ZfRp+E7eJLC0NQmk1Nrm1LOrn/gQlOykv+cVPdiXH5M= github.com/butuzov/mirror v1.3.0 h1:HdWCXzmwlQHdVhwvsfBb2Au0r3HyINry3bDWLYXiKoc= @@ -118,8 +118,8 @@ github.com/chavacava/garif v0.1.0/go.mod h1:XMyYCkEL58DF0oyW4qDjjnPWONs2HBqYKI+U github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/ckaznocha/intrange v0.3.0 h1:VqnxtK32pxgkhJgYQEeOArVidIPg+ahLP7WBOXZd5ZY= -github.com/ckaznocha/intrange v0.3.0/go.mod h1:+I/o2d2A1FBHgGELbGxzIcyd3/9l9DuwjM8FsbSS3Lo= +github.com/ckaznocha/intrange v0.3.1 h1:j1onQyXvHUsPWujDH6WIjhyH26gkRt/txNlV7LspvJs= +github.com/ckaznocha/intrange v0.3.1/go.mod h1:QVepyz1AkUoFQkpEqksSYpNpUo3c5W7nWh/s6SHIJJk= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= @@ -152,8 +152,8 @@ github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwV github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo= github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= -github.com/ghostiam/protogetter v0.3.9 h1:j+zlLLWzqLay22Cz/aYwTHKQ88GE2DQ6GkWSYFOI4lQ= -github.com/ghostiam/protogetter v0.3.9/go.mod h1:WZ0nw9pfzsgxuRsPOFQomgDVSWtDLJRfQJEhsGbmQMA= +github.com/ghostiam/protogetter v0.3.12 h1:xTPjH97iKph27vXRRKV0OCke5sAMoHPbVeVstdzmCLE= +github.com/ghostiam/protogetter v0.3.12/go.mod h1:WZ0nw9pfzsgxuRsPOFQomgDVSWtDLJRfQJEhsGbmQMA= github.com/go-critic/go-critic v0.12.0 h1:iLosHZuye812wnkEz1Xu3aBwn5ocCPfc9yqmFG9pa6w= github.com/go-critic/go-critic v0.12.0/go.mod h1:DpE0P6OVc6JzVYzmM5gq5jMU31zLr4am5mB/VfFK64w= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= diff --git a/tools/vendor/github.com/ckaznocha/intrange/.golangci.yml b/tools/vendor/github.com/ckaznocha/intrange/.golangci.yml index b240f85ce..74a80105c 100644 --- a/tools/vendor/github.com/ckaznocha/intrange/.golangci.yml +++ b/tools/vendor/github.com/ckaznocha/intrange/.golangci.yml @@ -15,6 +15,7 @@ linters-settings: local-prefixes: github.com/ckaznocha/intrange govet: enable: + - appends - asmdecl - assign - atomic @@ -22,10 +23,11 @@ linters-settings: - bools - buildtag - cgocall - - composite - - copylock - - copyloopvar + - composites + - copylocks - deepequalerrors + - defers + - directive - errorsas - fieldalignment - findcall @@ -37,18 +39,25 @@ linters-settings: - nilfunc - nilness - printf + - reflectvaluecompare - shadow - shift + - sigchanyzer + - slog - sortslice - stdmethods + - stdversion - stringintconv - structtag - testinggoroutine - tests + - timeformat - unmarshal - unreachable - unsafeptr - unusedresult + - unusedwrite + - waitgroup misspell: locale: US linters: diff --git a/tools/vendor/github.com/ckaznocha/intrange/go.work b/tools/vendor/github.com/ckaznocha/intrange/go.work index 3814c99f9..b7e127dcb 100644 --- a/tools/vendor/github.com/ckaznocha/intrange/go.work +++ b/tools/vendor/github.com/ckaznocha/intrange/go.work @@ -1,4 +1,6 @@ -go 1.22 +go 1.23.0 + +toolchain go1.23.4 use ( . diff --git a/tools/vendor/github.com/ckaznocha/intrange/intrange.go b/tools/vendor/github.com/ckaznocha/intrange/intrange.go index 229c847d5..79aca7973 100644 --- a/tools/vendor/github.com/ckaznocha/intrange/intrange.go +++ b/tools/vendor/github.com/ckaznocha/intrange/intrange.go @@ -265,6 +265,15 @@ func checkForStmt(pass *analysis.Pass, forStmt *ast.ForStmt) { replacement = fmt.Sprintf("range %s", rangeX) } + if isFunctionOrMethodCall(operand) { + pass.Report(analysis.Diagnostic{ + Pos: forStmt.Pos(), + Message: msg + "\nBecause the key is returned by a function or method, take care to consider side effects.", + }) + + return + } + pass.Report(analysis.Diagnostic{ Pos: forStmt.Pos(), Message: msg, @@ -315,12 +324,11 @@ func checkRangeStmt(pass *analysis.Pass, rangeStmt *ast.RangeStmt) { return } - fn, ok := x.Fun.(*ast.Ident) - if !ok { + if _, ok = x.Fun.(*ast.Ident); !ok { return } - if fn.Name != "len" || len(x.Args) != 1 { + if !isLen(x) { return } @@ -385,7 +393,7 @@ func checkRangeStmt(pass *analysis.Pass, rangeStmt *ast.RangeStmt) { func findNExpr(expr ast.Expr) ast.Expr { switch e := expr.(type) { case *ast.CallExpr: - if fun, ok := e.Fun.(*ast.Ident); ok && fun.Name == "len" && len(e.Args) == 1 { + if isLen(e) { return findNExpr(e.Args[0]) } @@ -439,6 +447,8 @@ func recursiveOperandToString( return recursiveOperandToString(e.X, false) + "[" + recursiveOperandToString(e.Index, false) + "]" case *ast.BinaryExpr: return recursiveOperandToString(e.X, false) + " " + e.Op.String() + " " + recursiveOperandToString(e.Y, false) + case *ast.StarExpr: + return "*" + recursiveOperandToString(e.X, false) default: return "" } @@ -526,19 +536,7 @@ func isNumberLit(exp ast.Expr) bool { case *ast.CallExpr: switch fun := lit.Fun.(type) { case *ast.Ident: - switch fun.Name { - case - "int", - "int8", - "int16", - "int32", - "int64", - "uint", - "uint8", - "uint16", - "uint32", - "uint64": - default: + if !isIntCast(fun) { return false } default: @@ -573,19 +571,7 @@ func compareNumberLit(exp ast.Expr, val int) bool { case *ast.CallExpr: switch fun := lit.Fun.(type) { case *ast.Ident: - switch fun.Name { - case - "int", - "int8", - "int16", - "int32", - "int64", - "uint", - "uint8", - "uint16", - "uint32", - "uint64": - default: + if !isIntCast(fun) { return false } default: @@ -623,5 +609,58 @@ func operandToString( return s } + if operandIdent, ok := operand.(*ast.Ident); ok { + if operandType := pass.TypesInfo.TypeOf(operandIdent); operandType != nil && + operandType == t { + return s + } + } + return t.String() + "(" + s + ")" } + +func isFunctionOrMethodCall(expr ast.Expr) bool { + e, ok := expr.(*ast.CallExpr) + if !ok { + return false + } + + fun, ok := e.Fun.(*ast.Ident) + if !ok { + return true + } + + if isLen(e) || isIntCast(fun) { + return false + } + + return true +} + +func isIntCast(ident *ast.Ident) bool { + switch ident.Name { + case + "int", + "int8", + "int16", + "int32", + "int64", + "uint", + "uint8", + "uint16", + "uint32", + "uint64": + return true + default: + return false + } +} + +func isLen(exp *ast.CallExpr) bool { + fun, ok := exp.Fun.(*ast.Ident) + if !ok { + return false + } + + return fun.Name == "len" && len(exp.Args) == 1 +} diff --git a/tools/vendor/github.com/ghostiam/protogetter/flake.lock b/tools/vendor/github.com/ghostiam/protogetter/flake.lock new file mode 100644 index 000000000..5ed1735b1 --- /dev/null +++ b/tools/vendor/github.com/ghostiam/protogetter/flake.lock @@ -0,0 +1,61 @@ +{ + "nodes": { + "flake-utils": { + "inputs": { + "systems": "systems" + }, + "locked": { + "lastModified": 1731533236, + "narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "11707dc2f618dd54ca8739b309ec4fc024de578b", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "nixpkgs": { + "locked": { + "lastModified": 1742069588, + "narHash": "sha256-C7jVfohcGzdZRF6DO+ybyG/sqpo1h6bZi9T56sxLy+k=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "c80f6a7e10b39afcc1894e02ef785b1ad0b0d7e5", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixos-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "root": { + "inputs": { + "flake-utils": "flake-utils", + "nixpkgs": "nixpkgs" + } + }, + "systems": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/tools/vendor/github.com/ghostiam/protogetter/flake.nix b/tools/vendor/github.com/ghostiam/protogetter/flake.nix new file mode 100644 index 000000000..b6f01c4f5 --- /dev/null +++ b/tools/vendor/github.com/ghostiam/protogetter/flake.nix @@ -0,0 +1,69 @@ +{ + description = "A flake that builds a repo"; + + inputs = { + nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable"; + flake-utils.url = "github:numtide/flake-utils"; + }; + + outputs = + inputs@{ + nixpkgs, + flake-utils, + ... + }: + flake-utils.lib.eachDefaultSystem ( + system: + let + pkgs = import nixpkgs { inherit system; }; + buildInputs = with pkgs; [ + go + coreutils + curl + xmlstarlet + + # Protobuf + gRPC + protobuf + protoc-gen-go + protoc-gen-go-grpc + grpc + ]; + + defaultShellHook = '' + export SHELL="${pkgs.bashInteractive}/bin/bash" + + export FLAKE_ROOT=$(nix flake metadata | grep 'Resolved URL' | awk '{print $3}' | awk -F'://' '{print $2}') + export HISTFILE="$FLAKE_ROOT/.nix_bash_history" + + export GOROOT="${pkgs.go}/share/go" + ''; + in + { + # run: `nix develop` + devShells = { + default = pkgs.mkShell { + inherit buildInputs; + shellHook = defaultShellHook; + }; + + # Update IDEA paths. Use only if nix installed in whole system. + # run: `nix develop .#idea` + idea = pkgs.mkShell { + inherit buildInputs; + + shellHook = pkgs.lib.concatLines [ + defaultShellHook + '' + cd "$FLAKE_ROOT" + + echo "Replace GOPATH" + xmlstarlet ed -L -u '//project/component[@name="GOROOT"]/@url' -v 'file://${pkgs.go}/share/go' .idea/workspace.xml + + exit 0 + '' + ]; + }; + }; + } + ); +} diff --git a/tools/vendor/github.com/ghostiam/protogetter/processor.go b/tools/vendor/github.com/ghostiam/protogetter/processor.go index 44f346e85..f1c876a0a 100644 --- a/tools/vendor/github.com/ghostiam/protogetter/processor.go +++ b/tools/vendor/github.com/ghostiam/protogetter/processor.go @@ -33,12 +33,21 @@ func (c *processor) process(n ast.Node) (*Result, error) { switch x := n.(type) { case *ast.AssignStmt: // Skip any assignment to the field. - for _, s := range x.Lhs { + for i, s := range x.Lhs { c.filter.AddPos(s.Pos()) if se, ok := s.(*ast.StarExpr); ok { c.filter.AddPos(se.X.Pos()) } + + if len(x.Rhs) > i { + value := x.Rhs[i] + if se, ok := value.(*ast.SelectorExpr); ok { + if hasPointerKeyWithoutPointerGetter(c.info, s, se) { + c.filter.AddPos(se.Sel.Pos()) + } + } + } } case *ast.IncDecStmt: @@ -52,6 +61,13 @@ func (c *processor) process(n ast.Node) (*Result, error) { c.filter.AddPos(x.X.Pos()) } + case *ast.KeyValueExpr: + if se, ok := x.Value.(*ast.SelectorExpr); ok { + if hasPointerKeyWithoutPointerGetter(c.info, x.Key, se) { + c.filter.AddPos(se.Sel.Pos()) + } + } + case *ast.CallExpr: if !c.cfg.ReplaceFirstArgInAppend && len(x.Args) > 0 { if v, ok := x.Fun.(*ast.Ident); ok && v.Name == "append" { @@ -176,8 +192,11 @@ func (c *processor) processInner(expr ast.Expr) { c.processInner(x.X) c.write(".") + // Skip if the field is filtered. + isFiltered := c.filter.IsFiltered(x.Sel.Pos()) + // If getter exists, use it. - if methodIsExists(c.info, x.X, "Get"+x.Sel.Name) { + if methodIsExists(c.info, x.X, "Get"+x.Sel.Name) && !isFiltered { c.writeFrom(x.Sel.Name) c.writeTo("Get" + x.Sel.Name + "()") return @@ -218,7 +237,7 @@ func (c *processor) processInner(expr ast.Expr) { c.write("*") c.processInner(x.X) - case *ast.CompositeLit, *ast.TypeAssertExpr, *ast.ArrayType, *ast.FuncLit, *ast.SliceExpr: + case *ast.CompositeLit, *ast.TypeAssertExpr, *ast.ArrayType, *ast.FuncLit, *ast.SliceExpr, *ast.MapType: // Process the node as is. c.write(formatNode(x)) @@ -349,3 +368,17 @@ func getterResultHasPointer(info *types.Info, x ast.Expr, name string) (hasPoint return false, false } + +func hasPointerKeyWithoutPointerGetter(info *types.Info, key ast.Expr, value *ast.SelectorExpr) bool { + _, isPtr := info.TypeOf(key).(*types.Pointer) + if !isPtr { + return false + } + + getterHasPointer, ok := getterResultHasPointer(info, value.X, value.Sel.Name) + if !ok { + return false + } + + return !getterHasPointer +} diff --git a/tools/vendor/github.com/ghostiam/protogetter/protogetter.go b/tools/vendor/github.com/ghostiam/protogetter/protogetter.go index c1c42c75d..0dbcf31c9 100644 --- a/tools/vendor/github.com/ghostiam/protogetter/protogetter.go +++ b/tools/vendor/github.com/ghostiam/protogetter/protogetter.go @@ -96,6 +96,7 @@ func Run(pass *analysis.Pass, cfg *Config) error { (*ast.StarExpr)(nil), (*ast.IncDecStmt)(nil), (*ast.UnaryExpr)(nil), + (*ast.KeyValueExpr)(nil), } // Skip filtered files. diff --git a/tools/vendor/modules.txt b/tools/vendor/modules.txt index 820cdbe0d..1c0db20fd 100644 --- a/tools/vendor/modules.txt +++ b/tools/vendor/modules.txt @@ -81,11 +81,11 @@ github.com/blizzy78/varnamelen # github.com/bombsimon/wsl/v4 v4.5.0 ## explicit; go 1.22 github.com/bombsimon/wsl/v4 -# github.com/breml/bidichk v0.3.2 -## explicit; go 1.22.0 +# github.com/breml/bidichk v0.3.3 +## explicit; go 1.23.0 github.com/breml/bidichk/pkg/bidichk -# github.com/breml/errchkjson v0.4.0 -## explicit; go 1.22.0 +# github.com/breml/errchkjson v0.4.1 +## explicit; go 1.23.0 github.com/breml/errchkjson # github.com/butuzov/ireturn v0.3.1 ## explicit; go 1.18 @@ -119,8 +119,8 @@ github.com/charithe/durationcheck # github.com/chavacava/garif v0.1.0 ## explicit; go 1.16 github.com/chavacava/garif -# github.com/ckaznocha/intrange v0.3.0 -## explicit; go 1.22 +# github.com/ckaznocha/intrange v0.3.1 +## explicit; go 1.23.0 github.com/ckaznocha/intrange # github.com/curioswitch/go-reassign v0.3.0 ## explicit; go 1.21 @@ -161,7 +161,7 @@ github.com/fsnotify/fsnotify # github.com/fzipp/gocyclo v0.6.0 ## explicit; go 1.18 github.com/fzipp/gocyclo -# github.com/ghostiam/protogetter v0.3.9 +# github.com/ghostiam/protogetter v0.3.12 ## explicit; go 1.22.0 github.com/ghostiam/protogetter # github.com/go-critic/go-critic v0.12.0 diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md index cf422304e..bd9667d99 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md @@ -1,5 +1,12 @@ # Release History +## 1.17.1 (2025-03-20) + +### Other Changes + +* Upgraded to Go 1.23 +* Upgraded dependencies + ## 1.17.0 (2025-01-07) ### Features Added diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go index 44ab00d40..c8929e596 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go @@ -40,5 +40,5 @@ const ( Module = "azcore" // Version is the semantic version (see http://semver.org) of this module. - Version = "v1.17.0" + Version = "v1.17.1" ) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md index 97f59c071..9bcc21600 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.29.9 (2025-03-04.2) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.29.8 (2025-02-27) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go index da7e89e28..51b0c57ce 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go @@ -3,4 +3,4 @@ package config // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.29.8" +const goModuleVersion = "1.29.9" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md index 8357cce2d..c5fe7ddca 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.17.62 (2025-03-04.2) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.17.61 (2025-02-27) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go index 393107898..628b3d582 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go @@ -3,4 +3,4 @@ package credentials // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.17.61" +const goModuleVersion = "1.17.62" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md index b3d70bd79..957c0c940 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.25.1 (2025-03-04.2) + +* **Bug Fix**: Add assurance test for operation order. + # v1.25.0 (2025-02-27) * **Feature**: Track credential providers via User-Agent Feature ids diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json index abbb7ea18..1a88fe4df 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json @@ -25,6 +25,7 @@ "protocol_test.go", "serializers.go", "snapshot_test.go", + "sra_operation_order_test.go", "types/errors.go", "types/types.go", "validators.go" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go index 429aa0cdb..b4e199605 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go @@ -3,4 +3,4 @@ package sso // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.25.0" +const goModuleVersion = "1.25.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md index c040c3ed9..7f2d6f3ac 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.29.1 (2025-03-04.2) + +* **Bug Fix**: Add assurance test for operation order. + # v1.29.0 (2025-02-27) * **Feature**: Track credential providers via User-Agent Feature ids diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json index 23d72cf0f..35f180975 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json @@ -25,6 +25,7 @@ "protocol_test.go", "serializers.go", "snapshot_test.go", + "sra_operation_order_test.go", "types/errors.go", "types/types.go", "validators.go" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go index 5467b2f28..1fad8e390 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go @@ -3,4 +3,4 @@ package ssooidc // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.29.0" +const goModuleVersion = "1.29.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md index 557e03bd0..79ff72d75 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.33.17 (2025-03-04.2) + +* **Bug Fix**: Add assurance test for operation order. + # v1.33.16 (2025-02-27) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json index 643c5c694..86bb3b79b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json @@ -32,6 +32,7 @@ "protocol_test.go", "serializers.go", "snapshot_test.go", + "sra_operation_order_test.go", "types/errors.go", "types/types.go", "validators.go" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go index 32af427ca..18e1e4735 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go @@ -3,4 +3,4 @@ package sts // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.33.16" +const goModuleVersion = "1.33.17" diff --git a/vendor/github.com/charmbracelet/bubbletea/inputreader_other.go b/vendor/github.com/charmbracelet/bubbletea/inputreader_other.go index 8e63a87dc..3426a177c 100644 --- a/vendor/github.com/charmbracelet/bubbletea/inputreader_other.go +++ b/vendor/github.com/charmbracelet/bubbletea/inputreader_other.go @@ -9,6 +9,6 @@ import ( "github.com/muesli/cancelreader" ) -func newInputReader(r io.Reader) (cancelreader.CancelReader, error) { +func newInputReader(r io.Reader, _ bool) (cancelreader.CancelReader, error) { return cancelreader.NewReader(r) } diff --git a/vendor/github.com/charmbracelet/bubbletea/inputreader_windows.go b/vendor/github.com/charmbracelet/bubbletea/inputreader_windows.go index dd5277cf0..9af89428d 100644 --- a/vendor/github.com/charmbracelet/bubbletea/inputreader_windows.go +++ b/vendor/github.com/charmbracelet/bubbletea/inputreader_windows.go @@ -25,7 +25,7 @@ type conInputReader struct { var _ cancelreader.CancelReader = &conInputReader{} -func newInputReader(r io.Reader) (cancelreader.CancelReader, error) { +func newInputReader(r io.Reader, enableMouse bool) (cancelreader.CancelReader, error) { fallback := func(io.Reader) (cancelreader.CancelReader, error) { return cancelreader.NewReader(r) } @@ -38,11 +38,21 @@ func newInputReader(r io.Reader) (cancelreader.CancelReader, error) { return fallback(r) } - originalMode, err := prepareConsole(conin, - windows.ENABLE_MOUSE_INPUT, + modes := []uint32{ windows.ENABLE_WINDOW_INPUT, windows.ENABLE_EXTENDED_FLAGS, - ) + } + + // Since we have options to enable mouse events, [WithMouseCellMotion], + // [WithMouseAllMotion], and [EnableMouseCellMotion], + // [EnableMouseAllMotion], and [DisableMouse], we need to check if the user + // has enabled mouse events and add the appropriate mode accordingly. + // Otherwise, mouse events will be enabled all the time. + if enableMouse { + modes = append(modes, windows.ENABLE_MOUSE_INPUT) + } + + originalMode, err := prepareConsole(conin, modes...) if err != nil { return nil, fmt.Errorf("failed to prepare console input: %w", err) } diff --git a/vendor/github.com/charmbracelet/bubbletea/tea.go b/vendor/github.com/charmbracelet/bubbletea/tea.go index 743a866e9..0bc915e5b 100644 --- a/vendor/github.com/charmbracelet/bubbletea/tea.go +++ b/vendor/github.com/charmbracelet/bubbletea/tea.go @@ -16,6 +16,7 @@ import ( "io" "os" "os/signal" + "runtime" "runtime/debug" "sync" "sync/atomic" @@ -183,6 +184,9 @@ type Program struct { // fps is the frames per second we should set on the renderer, if // applicable, fps int + + // mouseMode is true if the program should enable mouse mode on Windows. + mouseMode bool } // Quit is a special command that tells the Bubble Tea program to exit. @@ -413,9 +417,25 @@ func (p *Program) eventLoop(model Model, cmds chan Cmd) (Model, error) { // mouse mode (1006) is a no-op if the terminal doesn't support it. p.renderer.enableMouseSGRMode() + // XXX: This is used to enable mouse mode on Windows. We need + // to reinitialize the cancel reader to get the mouse events to + // work. + if runtime.GOOS == "windows" && !p.mouseMode { + p.mouseMode = true + p.initCancelReader(true) //nolint:errcheck + } + case disableMouseMsg: p.disableMouse() + // XXX: On Windows, mouse mode is enabled on the input reader + // level. We need to instruct the input reader to stop reading + // mouse events. + if runtime.GOOS == "windows" && p.mouseMode { + p.mouseMode = false + p.initCancelReader(true) //nolint:errcheck + } + case showCursorMsg: p.renderer.showCursor() @@ -579,6 +599,11 @@ func (p *Program) Run() (Model, error) { p.renderer.enableMouseAllMotion() p.renderer.enableMouseSGRMode() } + + // XXX: Should we enable mouse mode on Windows? + // This needs to happen before initializing the cancel and input reader. + p.mouseMode = p.startupOptions&withMouseCellMotion != 0 || p.startupOptions&withMouseAllMotion != 0 + if p.startupOptions&withReportFocus != 0 { p.renderer.enableReportFocus() } @@ -607,7 +632,7 @@ func (p *Program) Run() (Model, error) { // Subscribe to user input. if p.input != nil { - if err := p.initCancelReader(); err != nil { + if err := p.initCancelReader(false); err != nil { return model, err } } @@ -763,7 +788,7 @@ func (p *Program) RestoreTerminal() error { if err := p.initTerminal(); err != nil { return err } - if err := p.initCancelReader(); err != nil { + if err := p.initCancelReader(false); err != nil { return err } if p.altScreenWasActive { diff --git a/vendor/github.com/charmbracelet/bubbletea/tty.go b/vendor/github.com/charmbracelet/bubbletea/tty.go index 02507782c..9490facac 100644 --- a/vendor/github.com/charmbracelet/bubbletea/tty.go +++ b/vendor/github.com/charmbracelet/bubbletea/tty.go @@ -75,9 +75,14 @@ func (p *Program) restoreInput() error { } // initCancelReader (re)commences reading inputs. -func (p *Program) initCancelReader() error { +func (p *Program) initCancelReader(cancel bool) error { + if cancel && p.cancelReader != nil { + p.cancelReader.Cancel() + p.waitForReadLoop() + } + var err error - p.cancelReader, err = newInputReader(p.input) + p.cancelReader, err = newInputReader(p.input, p.mouseMode) if err != nil { return fmt.Errorf("error creating cancelreader: %w", err) } diff --git a/vendor/github.com/pulumi/pulumi-awsx/sdk/v2/go/awsx/internal/pulumiUtilities.go b/vendor/github.com/pulumi/pulumi-awsx/sdk/v2/go/awsx/internal/pulumiUtilities.go index 68df11139..99a591934 100644 --- a/vendor/github.com/pulumi/pulumi-awsx/sdk/v2/go/awsx/internal/pulumiUtilities.go +++ b/vendor/github.com/pulumi/pulumi-awsx/sdk/v2/go/awsx/internal/pulumiUtilities.go @@ -165,7 +165,7 @@ func callPlainInner( func PkgResourceDefaultOpts(opts []pulumi.ResourceOption) []pulumi.ResourceOption { defaults := []pulumi.ResourceOption{} - version := semver.MustParse("2.21.0") + version := semver.MustParse("2.21.1") if !version.Equals(semver.Version{}) { defaults = append(defaults, pulumi.Version(version.String())) } @@ -176,7 +176,7 @@ func PkgResourceDefaultOpts(opts []pulumi.ResourceOption) []pulumi.ResourceOptio func PkgInvokeDefaultOpts(opts []pulumi.InvokeOption) []pulumi.InvokeOption { defaults := []pulumi.InvokeOption{} - version := semver.MustParse("2.21.0") + version := semver.MustParse("2.21.1") if !version.Equals(semver.Version{}) { defaults = append(defaults, pulumi.Version(version.String())) } diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/.version b/vendor/github.com/pulumi/pulumi/sdk/v3/.version index 9c8371d40..4c6305b6e 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/.version +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/.version @@ -1 +1 @@ -3.152.0 +3.154.0 diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/auto/optrefresh/optrefresh.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/auto/optrefresh/optrefresh.go index e0834c318..e1113919b 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/auto/optrefresh/optrefresh.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/auto/optrefresh/optrefresh.go @@ -122,6 +122,13 @@ func SuppressOutputs() Option { }) } +// Display operation as a rich diff showing the overall change. +func Diff() Option { + return optionFunc(func(o *Options) { + o.Diff = true + }) +} + // ConfigFile specifies a file to use for configuration values rather than detecting the file name func ConfigFile(path string) Option { return optionFunc(func(opts *Options) { @@ -169,6 +176,8 @@ type Options struct { SuppressOutputs bool // Run using the configuration values in the specified file rather than detecting the file name ConfigFile string + // When set, display operation as a rich diff showing the overall change + Diff bool } type optionFunc func(*Options) diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/auto/optrename/optrename.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/auto/optrename/optrename.go new file mode 100644 index 000000000..3d8ea50bd --- /dev/null +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/auto/optrename/optrename.go @@ -0,0 +1,66 @@ +// Copyright 2016-2025, Pulumi Corporation. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package optrename contains functional options to be used with stack rename operations +// github.com/sdk/v2/go/x/auto Stack.Rename(...optrename.Option) +package optrename + +import "io" + +// The new name of the stack. +func StackName(message string) Option { + return optionFunc(func(opts *Options) { + opts.StackName = message + }) +} + +// ErrorProgressStreams allows specifying one or more io.Writers to redirect incremental rename stderr +func ErrorProgressStreams(writers ...io.Writer) Option { + return optionFunc(func(opts *Options) { + opts.ErrorProgressStreams = writers + }) +} + +// ShowSecrets configures whether to show config secrets when they appear in the config. +func ShowSecrets(show bool) Option { + return optionFunc(func(opts *Options) { + opts.ShowSecrets = &show + }) +} + +// Option is a parameter to be applied to a Stack.Rename() operation +type Option interface { + ApplyOption(*Options) +} + +// ---------------------------------- implementation details ---------------------------------- + +// Options is an implementation detail +type Options struct { + // The new name for the stack. + StackName string + // ProgressStreams allows specifying one or more io.Writers to redirect incremental refresh stdout + ProgressStreams []io.Writer + // ErrorProgressStreams allows specifying one or more io.Writers to redirect incremental refresh stderr + ErrorProgressStreams []io.Writer + // Show config secrets when they appear. + ShowSecrets *bool +} + +type optionFunc func(*Options) + +// ApplyOption is an implementation detail +func (o optionFunc) ApplyOption(opts *Options) { + o(opts) +} diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/auto/stack.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/auto/stack.go index b21c4288f..a4728c146 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/auto/stack.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/auto/stack.go @@ -121,6 +121,7 @@ import ( "github.com/pulumi/pulumi/sdk/v3/go/auto/opthistory" "github.com/pulumi/pulumi/sdk/v3/go/auto/optpreview" "github.com/pulumi/pulumi/sdk/v3/go/auto/optrefresh" + "github.com/pulumi/pulumi/sdk/v3/go/auto/optrename" "github.com/pulumi/pulumi/sdk/v3/go/auto/optup" "github.com/pulumi/pulumi/sdk/v3/go/common/apitype" "github.com/pulumi/pulumi/sdk/v3/go/common/constant" @@ -772,6 +773,9 @@ func refreshOptsToCmd(o *optrefresh.Options, s *Stack, isPreview bool) []string if o.ConfigFile != "" { args = append(args, "--config-file="+o.ConfigFile) } + if o.Diff { + args = append(args, "--diff") + } // Apply the remote args, if needed. args = append(args, s.remoteArgs()...) @@ -856,6 +860,60 @@ func (s *Stack) PreviewDestroy(ctx context.Context, opts ...optdestroy.Option) ( return res, nil } +// Rename renames the current stack. +func (s *Stack) Rename(ctx context.Context, opts ...optrename.Option) (RenameResult, error) { + var res RenameResult + + renameOpts := &optrename.Options{} + for _, o := range opts { + o.ApplyOption(renameOpts) + } + + args := renameOptsToCmd(renameOpts, s) + + stdout, stderr, code, err := s.runPulumiCmdSync( + ctx, + renameOpts.ProgressStreams, /* additionalOutputs */ + renameOpts.ErrorProgressStreams, /* additionalErrorOutputs */ + args..., + ) + if err != nil { + return res, newAutoError(fmt.Errorf("failed to rename stack: %w", err), stdout, stderr, code) + } + + historyOpts := []opthistory.Option{} + if showSecrets := renameOpts.ShowSecrets; showSecrets != nil { + historyOpts = append(historyOpts, opthistory.ShowSecrets(*showSecrets)) + } + // If it's a remote workspace, explicitly set ShowSecrets to false to prevent attempting to + // load the project file. + if s.isRemote() { + historyOpts = append(historyOpts, opthistory.ShowSecrets(false)) + } + history, err := s.History(ctx, 1 /*pageSize*/, 1 /*page*/, historyOpts...) + if err != nil { + return res, fmt.Errorf("failed to rename stack: %w", err) + } + + var summary UpdateSummary + if len(history) > 0 { + summary = history[0] + } + + res = RenameResult{ + Summary: summary, + StdOut: stdout, + StdErr: stderr, + } + + return res, nil +} + +func renameOptsToCmd(renameOpts *optrename.Options, s *Stack) []string { + args := slice.Prealloc[string](3) + return append(args, "stack", "rename", renameOpts.StackName) +} + // Destroy deletes all resources in a stack, leaving all history and configuration intact. func (s *Stack) Destroy(ctx context.Context, opts ...optdestroy.Option) (DestroyResult, error) { var res DestroyResult @@ -1301,6 +1359,13 @@ func (rr *RefreshResult) GetPermalink() (string, error) { return GetPermalink(rr.StdOut) } +// RenameResult is the output of a successful Stack.Rename operation +type RenameResult struct { + StdOut string + StdErr string + Summary UpdateSummary +} + // DestroyResult is the output of a successful Stack.Destroy operation type DestroyResult struct { StdOut string diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/policy.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/policy.go index 5d3f9380e..58289fbb1 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/policy.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/policy.go @@ -214,6 +214,7 @@ type PolicyGroupSummary struct { Name string `json:"name"` IsOrgDefault bool `json:"isOrgDefault"` NumStacks int `json:"numStacks"` + NumAccounts int `json:"numAccounts,omitempty"` NumEnabledPolicyPacks int `json:"numEnabledPolicyPacks"` } diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/service.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/service.go index 47f6e9520..3e31c364b 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/service.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/service.go @@ -31,8 +31,8 @@ const ( // via the PatchUpdateCheckpointDeltaRequest API to save on network bytes. DeltaCheckpointUploadsV2 APICapability = "delta-checkpoint-uploads-v2" - // Indicates that the service backend supports stack bulk encryption. - BulkEncrypt APICapability = "bulk-encrypt" + // Indicates that the service backend supports batch encryption. + BatchEncrypt APICapability = "batch-encrypt" ) type DeltaCheckpointUploadsConfigV2 struct { @@ -61,8 +61,8 @@ type Capabilities struct { // If non-nil, indicates that delta checkpoint updates are supported. DeltaCheckpointUpdates *DeltaCheckpointUploadsConfigV2 - // Indicates whether the service supports bulk encryption. - BulkEncryption bool + // Indicates whether the service supports batch encryption. + BatchEncryption bool } // Parse decodes the CapabilitiesResponse into a Capabilities struct for ease of use. @@ -84,8 +84,8 @@ func (r CapabilitiesResponse) Parse() (Capabilities, error) { } parsed.DeltaCheckpointUpdates = &upcfg } - case BulkEncrypt: - parsed.BulkEncryption = true + case BatchEncrypt: + parsed.BatchEncryption = true default: continue } diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/stacks.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/stacks.go index d0e52aa4f..8a1f3486c 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/stacks.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/stacks.go @@ -78,14 +78,14 @@ type EncryptValueResponse struct { Ciphertext []byte `json:"ciphertext"` } -// BulkEncryptRequest defines the request body for encrypting multiple values. -type BulkEncryptRequest struct { +// BatchEncryptRequest defines the request body for encrypting multiple values. +type BatchEncryptRequest struct { // The values to encrypt. Plaintexts [][]byte `json:"plaintexts"` } -// BulkEncryptResponse defines the response body for multiple encrypted values. -type BulkEncryptResponse struct { +// BatchEncryptResponse defines the response body for multiple encrypted values. +type BatchEncryptResponse struct { // The encrypted values, in order of the plaintexts from the request. Ciphertexts [][]byte `json:"ciphertexts"` } @@ -108,14 +108,14 @@ type Log3rdPartyDecryptionEvent struct { CommandName string `json:"commandName,omitempty"` } -// BulkDecryptValueRequest defines the request body for bulk decrypting secret values. -type BulkDecryptValueRequest struct { +// BatchDecryptRequest defines the request body for bulk decrypting secret values. +type BatchDecryptRequest struct { Ciphertexts [][]byte `json:"ciphertexts"` } -// BulkDecryptValueResponse defines the response body for bulk decrypted secret values. The key in +// BatchDecryptResponse defines the response body for bulk decrypted secret values. The key in // the map is the base64 encoding of the ciphertext. -type BulkDecryptValueResponse struct { +type BatchDecryptResponse struct { Plaintexts map[string][]byte `json:"plaintexts"` } diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/env/env.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/env/env.go index dcd2a7d87..500385a91 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/env/env.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/env/env.go @@ -18,7 +18,9 @@ package env -import "github.com/pulumi/pulumi/sdk/v3/go/common/util/env" +import ( + "github.com/pulumi/pulumi/sdk/v3/go/common/util/env" +) // Re-export some types and functions from the env library. @@ -100,6 +102,12 @@ var SuppressCopilotLink = env.Bool("SUPPRESS_COPILOT_LINK", var FallbackToStateSecretsManager = env.Bool("FALLBACK_TO_STATE_SECRETS_MANAGER", "Use the snapshot secrets manager as a fallback when the stack configuration is missing or incomplete.") +var Parallel = env.Int("PARALLEL", + "Allow P resource operations to run in parallel at once (1 for no parallelism)") + +var AccessToken = env.String("ACCESS_TOKEN", + "The access token used to authenticate with the Pulumi Service.") + // List of overrides for Plugin Download URLs. The expected format is `regexp=URL`, and multiple pairs can // be specified separated by commas, e.g. `regexp1=URL1,regexp2=URL2` // diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/config/crypt.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/config/crypt.go index 28af746e7..f1eb8e937 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/config/crypt.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/config/crypt.go @@ -38,9 +38,9 @@ type Encrypter interface { type Decrypter interface { DecryptValue(ctx context.Context, ciphertext string) (string, error) - // BulkDecrypt supports bulk decryption of secrets. + // BatchDecrypt supports decryption of secrets in a single batch round-trip, where supported. // Returns a list of decrypted values in the same order as the input ciphertexts. - BulkDecrypt(ctx context.Context, ciphertexts []string) ([]string, error) + BatchDecrypt(ctx context.Context, ciphertexts []string) ([]string, error) } // Crypter can both encrypt and decrypt values. @@ -61,8 +61,8 @@ func (nopCrypter) DecryptValue(ctx context.Context, ciphertext string) (string, return ciphertext, nil } -func (nopCrypter) BulkDecrypt(ctx context.Context, ciphertexts []string) ([]string, error) { - return DefaultBulkDecrypt(ctx, NopDecrypter, ciphertexts) +func (nopCrypter) BatchDecrypt(ctx context.Context, ciphertexts []string) ([]string, error) { + return DefaultBatchDecrypt(ctx, NopDecrypter, ciphertexts) } func (nopCrypter) EncryptValue(ctx context.Context, plaintext string) (string, error) { @@ -89,8 +89,8 @@ func (b blindingCrypter) EncryptValue(ctx context.Context, plaintext string) (st return "[secret]", nil } -func (b blindingCrypter) BulkDecrypt(ctx context.Context, ciphertexts []string) ([]string, error) { - return DefaultBulkDecrypt(ctx, b, ciphertexts) +func (b blindingCrypter) BatchDecrypt(ctx context.Context, ciphertexts []string) ([]string, error) { + return DefaultBatchDecrypt(ctx, b, ciphertexts) } // NewPanicCrypter returns a new config crypter that will panic if used. @@ -108,7 +108,7 @@ func (p panicCrypter) DecryptValue(ctx context.Context, _ string) (string, error panic("attempt to decrypt value") } -func (p panicCrypter) BulkDecrypt(ctx context.Context, ciphertexts []string) ([]string, error) { +func (p panicCrypter) BatchDecrypt(ctx context.Context, ciphertexts []string) ([]string, error) { panic("attempt to bulk decrypt values") } @@ -178,8 +178,8 @@ func (s symmetricCrypter) DecryptValue(ctx context.Context, value string) (strin return string(msg), err } -func (s symmetricCrypter) BulkDecrypt(ctx context.Context, ciphertexts []string) ([]string, error) { - return DefaultBulkDecrypt(ctx, s, ciphertexts) +func (s symmetricCrypter) BatchDecrypt(ctx context.Context, ciphertexts []string) ([]string, error) { + return DefaultBatchDecrypt(ctx, s, ciphertexts) } // encryptAES256GCGM returns the ciphertext and the generated nonce @@ -220,14 +220,14 @@ func (c prefixCrypter) EncryptValue(ctx context.Context, plaintext string) (stri return c.prefix + plaintext, nil } -func (c prefixCrypter) BulkDecrypt(ctx context.Context, ciphertexts []string) ([]string, error) { - return DefaultBulkDecrypt(ctx, c, ciphertexts) +func (c prefixCrypter) BatchDecrypt(ctx context.Context, ciphertexts []string) ([]string, error) { + return DefaultBatchDecrypt(ctx, c, ciphertexts) } -// DefaultBulkDecrypt decrypts a list of ciphertexts. Each ciphertext is decrypted individually. The returned +// DefaultBatchDecrypt decrypts a list of ciphertexts. Each ciphertext is decrypted individually. The returned // map maps from ciphertext to plaintext. This should only be used by implementers of Decrypter to implement -// their BulkDecrypt method in cases where they can't do more efficient than just individual decryptions. -func DefaultBulkDecrypt(ctx context.Context, +// their BatchDecrypt method in cases where they can't do more efficient than just individual decryptions. +func DefaultBatchDecrypt(ctx context.Context, decrypter Decrypter, ciphertexts []string, ) ([]string, error) { if len(ciphertexts) == 0 { @@ -262,6 +262,6 @@ func (c *base64Crypter) DecryptValue(ctx context.Context, s string) (string, err return string(b), nil } -func (c *base64Crypter) BulkDecrypt(ctx context.Context, ciphertexts []string) ([]string, error) { - return DefaultBulkDecrypt(ctx, c, ciphertexts) +func (c *base64Crypter) BatchDecrypt(ctx context.Context, ciphertexts []string) ([]string, error) { + return DefaultBatchDecrypt(ctx, c, ciphertexts) } diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/langruntime_plugin.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/langruntime_plugin.go index 07faab96d..90072b13f 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/langruntime_plugin.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/langruntime_plugin.go @@ -296,14 +296,20 @@ func (h *langhost) GetRequiredPackages(info ProgramInfo) ([]workspace.PackageDes } } + source := info.Name + if strings.HasPrefix(info.Server, "git://") { + source = strings.TrimPrefix(info.Server, "git://") + info.Server = "" + } + + pluginSpec, err := workspace.NewPluginSpec( + source, apitype.PluginKind(info.Kind), version, info.Server, info.Checksums) + if err != nil { + return nil, err + } + results = append(results, workspace.PackageDescriptor{ - PluginSpec: workspace.PluginSpec{ - Name: info.Name, - Kind: apitype.PluginKind(info.Kind), - Version: version, - PluginDownloadURL: info.Server, - Checksums: info.Checksums, - }, + PluginSpec: pluginSpec, Parameterization: parameterization, }) } diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/plugin.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/plugin.go index db9ba6691..401d23ffa 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/plugin.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/plugin.go @@ -37,6 +37,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/health/grpc_health_v1" "google.golang.org/grpc/status" "github.com/pulumi/pulumi/sdk/v3/go/common/apitype" @@ -554,14 +555,55 @@ func buildPluginArguments(opts pluginArgumentOptions) []string { return args } +func (p *plugin) healthCheck() bool { + if p.Conn == nil { + return false + } + + // Check that the plugin looks alive by calling gRPC's Health Check service. + // Most plugins don't actually implement this service, which is OK as we treat + // an unimplemented status as OK. + + ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond) + defer cancel() + + healthy := make(chan bool, 1) + go func() { + health := grpc_health_v1.NewHealthClient(p.Conn) + req := &grpc_health_v1.HealthCheckRequest{} + + resp, err := health.Check(ctx, req) + if err != nil { + // Treat this as healthy as most plugins don't implement gRPC's Health + // Check service. An unimplemented status is enough for us to know the + // plugin is alive. + if status.Code(err) == codes.Unimplemented { + healthy <- true + return + } + + logging.V(9).Infof("healthCheck(): failed with: %v", err) + healthy <- false + return + } + + healthy <- resp.Status == grpc_health_v1.HealthCheckResponse_SERVING + }() + + select { + case result := <-healthy: + return result + case <-ctx.Done(): // hit deadline + return false + } +} + func (p *plugin) Close() error { - pluginCrashed := true + // Something has gone wrong with the plugin if it is not healthy and we have not yet + // shut it down. + pluginCrashed := !p.healthCheck() if p.Conn != nil { - // Something has gone wrong with the plugin if it is not ready to handle requests and we have not yet - // shut it down. - pluginCrashed = p.Conn.GetState() != connectivity.Ready - contract.IgnoreClose(p.Conn) } diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/provider.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/provider.go index c389c4561..1fe6e8870 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/provider.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/provider.go @@ -52,10 +52,29 @@ type ProviderHandshakeRequest struct { // that the engine has been asked to attach to an existing running provider instance via a host/port number), this // field will be empty. ProgramDirectory *string + + // If true the engine will send URN, Name, Type and ID to the provider as part of the configuration. + ConfigureWithUrn bool } // The type of responses sent as part of a Handshake call. -type ProviderHandshakeResponse struct{} +type ProviderHandshakeResponse struct { + // True if and only if the provider supports secrets. If true, the caller should pass secrets as strongly typed + // values to the provider. + AcceptSecrets bool + + // True if and only if the provider supports strongly typed resources. If true, the caller should pass resources as + // strongly typed values to the provider. + AcceptResources bool + + // True if and only if the provider supports output values as inputs. If true, the engine should pass output values + // to the provider where possible. + AcceptOutputs bool + + // True if the provider accepts and respects autonaming configuration that the engine provides on behalf of the + // user. + SupportsAutonamingConfiguration bool +} type ParameterizeParameters interface { isParameterizeParameters() @@ -114,6 +133,21 @@ type DiffConfigRequest struct { type DiffConfigResponse = DiffResult type ConfigureRequest struct { + // The URN of the provider being configured. N.B. This will be null if configure_with_urn was false in + // Handshake. + URN *resource.URN + // The name of the provider being configured. This must match the name specified by the `urn` field, and + // is passed so that providers do not have to implement URN parsing in order to extract the name of the + // provider. N.B. This will be null if configure_with_urn was false in Handshake. + Name *string + // The type of the provider being configured. This must match the type specified by the `urn` field, and + // is passed so that providers do not have to implement URN parsing in order to extract the type of the + // provider. N.B. This will be null if configure_with_urn was false in Handshake. + Type *tokens.Type + // The ID of the provider being configured. N.B. This will be null if configure_with_urn was false in + // Handshake. + ID *resource.ID + // A map of input properties for the provider. Inputs resource.PropertyMap } @@ -315,9 +349,9 @@ type Provider interface { // Handshake is the first call made by the engine to a provider. It is used to pass the engine's address to the // provider so that it may establish its own connections back, and to establish protocol configuration that will be - // used to communicate between the two parties. Providers that support Handshake implicitly support the set of - // feature flags previously handled by Configure prior to Handshake's introduction, such as secrets and resource - // references. + // used to communicate between the two parties. Providers that support Handshake should return a response consistent + // with those returned in response to Configure calls where there is overlap due to the use of Configure prior to + // Handshake's introduction. Handshake(context.Context, ProviderHandshakeRequest) (*ProviderHandshakeResponse, error) // Parameterize adds a sub-package to this provider instance. diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/provider_plugin.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/provider_plugin.go index b0ed2f91d..9f65ffbbb 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/provider_plugin.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/provider_plugin.go @@ -192,6 +192,7 @@ func NewProviderFromSubdir(host Host, ctx *Context, pkg tokens.Package, subdir s // If we're attaching then we don't know the root or program directory. RootDirectory: nil, ProgramDirectory: nil, + ConfigureWithUrn: true, } return handshake(ctx, bin, prefix, conn, req) } @@ -247,6 +248,7 @@ func NewProviderFromSubdir(host Host, ctx *Context, pkg tokens.Package, subdir s EngineAddress: host.ServerAddr(), RootDirectory: &dir, ProgramDirectory: &dir, + ConfigureWithUrn: true, } return handshake(ctx, bin, prefix, conn, req) } @@ -275,10 +277,11 @@ func NewProviderFromSubdir(host Host, ctx *Context, pkg tokens.Package, subdir s if handshakeRes != nil { p.protocol = &pluginProtocol{ - acceptSecrets: true, - acceptResources: true, - supportsPreview: true, - acceptOutputs: true, + acceptSecrets: handshakeRes.AcceptSecrets, + acceptResources: handshakeRes.AcceptResources, + supportsPreview: true, + acceptOutputs: handshakeRes.AcceptOutputs, + supportsAutonamingConfiguration: handshakeRes.SupportsAutonamingConfiguration, } } @@ -301,10 +304,11 @@ func handshake( req *ProviderHandshakeRequest, ) (*ProviderHandshakeResponse, error) { client := pulumirpc.NewResourceProviderClient(conn) - _, err := client.Handshake(ctx, &pulumirpc.ProviderHandshakeRequest{ + res, err := client.Handshake(ctx, &pulumirpc.ProviderHandshakeRequest{ EngineAddress: req.EngineAddress, RootDirectory: req.RootDirectory, ProgramDirectory: req.ProgramDirectory, + ConfigureWithUrn: req.ConfigureWithUrn, }) if err != nil { status, ok := status.FromError(err) @@ -316,7 +320,12 @@ func handshake( } logging.V(7).Infof("Handshake: success [%v]", bin) - return &ProviderHandshakeResponse{}, nil + return &ProviderHandshakeResponse{ + AcceptSecrets: res.GetAcceptSecrets(), + AcceptResources: res.GetAcceptResources(), + AcceptOutputs: res.GetAcceptOutputs(), + SupportsAutonamingConfiguration: res.GetSupportsAutonamingConfiguration(), + }, nil } func providerPluginDialOptions(ctx *Context, pkg tokens.Package, path string) []grpc.DialOption { @@ -355,6 +364,7 @@ func NewProviderFromPath(host Host, ctx *Context, path string) (Provider, error) EngineAddress: host.ServerAddr(), RootDirectory: &dir, ProgramDirectory: &dir, + ConfigureWithUrn: true, } return handshake(ctx, bin, prefix, conn, req) } @@ -379,10 +389,11 @@ func NewProviderFromPath(host Host, ctx *Context, path string) (Provider, error) if handshakeRes != nil { p.protocol = &pluginProtocol{ - acceptSecrets: true, - acceptResources: true, - supportsPreview: true, - acceptOutputs: true, + acceptSecrets: handshakeRes.AcceptSecrets, + acceptResources: handshakeRes.AcceptResources, + supportsPreview: true, + acceptOutputs: handshakeRes.AcceptOutputs, + supportsAutonamingConfiguration: handshakeRes.SupportsAutonamingConfiguration, } } @@ -448,16 +459,22 @@ func isDiffCheckConfigLogicallyUnimplemented(err *rpcerror.Error, providerType t } func (p *provider) Handshake(ctx context.Context, req ProviderHandshakeRequest) (*ProviderHandshakeResponse, error) { - _, err := p.clientRaw.Handshake(ctx, &pulumirpc.ProviderHandshakeRequest{ + res, err := p.clientRaw.Handshake(ctx, &pulumirpc.ProviderHandshakeRequest{ EngineAddress: req.EngineAddress, RootDirectory: req.RootDirectory, ProgramDirectory: req.ProgramDirectory, + ConfigureWithUrn: req.ConfigureWithUrn, }) if err != nil { return nil, err } - return &ProviderHandshakeResponse{}, nil + return &ProviderHandshakeResponse{ + AcceptSecrets: res.GetAcceptSecrets(), + AcceptResources: res.GetAcceptResources(), + AcceptOutputs: res.GetAcceptOutputs(), + SupportsAutonamingConfiguration: res.GetSupportsAutonamingConfiguration(), + }, nil } func (p *provider) Parameterize(ctx context.Context, request ParameterizeRequest) (ParameterizeResponse, error) { @@ -933,7 +950,25 @@ func (p *provider) Configure(ctx context.Context, req ConfigureRequest) (Configu // Spawn the configure to happen in parallel. This ensures that we remain responsive elsewhere that might // want to make forward progress, even as the configure call is happening. go func() { + var urn, typ, id *string + if req.URN != nil { + urnVal := string(*req.URN) + urn = &urnVal + } + if req.ID != nil { + idVal := string(*req.ID) + id = &idVal + } + if req.Type != nil { + typVal := string(*req.Type) + typ = &typVal + } + resp, err := p.clientRaw.Configure(p.requestContext(), &pulumirpc.ConfigureRequest{ + Urn: urn, + Name: req.Name, + Type: typ, + Id: id, AcceptSecrets: true, AcceptResources: true, SendsOldInputs: true, diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/provider_server.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/provider_server.go index 6e607f67d..4aeed6856 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/provider_server.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/provider_server.go @@ -135,16 +135,26 @@ func (p *providerServer) Handshake( ctx context.Context, req *pulumirpc.ProviderHandshakeRequest, ) (*pulumirpc.ProviderHandshakeResponse, error) { - _, err := p.provider.Handshake(ctx, ProviderHandshakeRequest{ + res, err := p.provider.Handshake(ctx, ProviderHandshakeRequest{ EngineAddress: req.EngineAddress, RootDirectory: req.RootDirectory, ProgramDirectory: req.ProgramDirectory, + ConfigureWithUrn: req.ConfigureWithUrn, }) if err != nil { return nil, err } - return &pulumirpc.ProviderHandshakeResponse{}, nil + return &pulumirpc.ProviderHandshakeResponse{ + // providerServer can shim support for all these features, so we always set them to true. Note that we do the same + // in Configure. + AcceptSecrets: true, + AcceptResources: true, + AcceptOutputs: true, + + // For features we don't shim, we just pass through the response from the provider as expected. + SupportsAutonamingConfiguration: res.SupportsAutonamingConfiguration, + }, nil } func (p *providerServer) Parameterize( @@ -360,14 +370,42 @@ func (p *providerServer) Configure(ctx context.Context, } } - if _, err := p.provider.Configure(ctx, ConfigureRequest{inputs}); err != nil { + var urn *resource.URN + if req.Urn != nil { + urnVal := resource.URN(*req.Urn) + urn = &urnVal + } + var id *resource.ID + if req.Id != nil { + idVal := resource.ID(*req.Id) + id = &idVal + } + var typ *tokens.Type + if req.Type != nil { + typVal := tokens.Type(*req.Type) + typ = &typVal + } + + _, err := p.provider.Configure(ctx, ConfigureRequest{ + URN: urn, + Name: req.Name, + Type: typ, + ID: id, + Inputs: inputs, + }) + if err != nil { return nil, err } p.keepSecrets = req.GetAcceptSecrets() p.keepResources = req.GetAcceptResources() return &pulumirpc.ConfigureResponse{ - AcceptSecrets: true, SupportsPreview: true, AcceptResources: true, AcceptOutputs: true, + // providerServer can shim support for all these features, so we always set them to true. Note that we do the same + // in Handshake (though Handshake implies SupportsPreview, so we don't shim that there). + AcceptSecrets: true, + SupportsPreview: true, + AcceptResources: true, + AcceptOutputs: true, }, nil } diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/cmdutil/args.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/cmdutil/args.go index 5ed125d80..39a0d1be0 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/cmdutil/args.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/cmdutil/args.go @@ -15,10 +15,10 @@ package cmdutil import ( + "errors" "fmt" "github.com/hashicorp/go-multierror" - "github.com/pulumi/pulumi/sdk/v3/go/common/util/contract" "github.com/spf13/cobra" ) @@ -27,10 +27,8 @@ func ArgsFunc(argsValidator cobra.PositionalArgs) cobra.PositionalArgs { return func(cmd *cobra.Command, args []string) error { err := argsValidator(cmd, args) if err != nil { - contract.IgnoreError(cmd.Help()) - Exit(err) + return errors.Join(cmd.Help(), err) } - return nil } } diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/cmdutil/exit.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/cmdutil/exit.go index 125426017..209d74f68 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/cmdutil/exit.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/cmdutil/exit.go @@ -17,7 +17,6 @@ package cmdutil import ( "fmt" "os" - "strings" "github.com/hashicorp/go-multierror" "github.com/pkg/errors" @@ -61,68 +60,47 @@ func DetailedError(err error) string { return msg } -// runPostCommandHooks runs any post-hooks present on the given cobra.Command. This logic is copied directly from -// cobra itself; see https://github.com/spf13/cobra/blob/4dab30cb33e6633c33c787106bafbfbfdde7842d/command.go#L768-L785 -// for the original. -func runPostCommandHooks(c *cobra.Command, args []string) error { - if c.PostRunE != nil { - if err := c.PostRunE(c, args); err != nil { - return err - } - } else if c.PostRun != nil { - c.PostRun(c, args) - } - for p := c; p != nil; p = p.Parent() { - if p.PersistentPostRunE != nil { - if err := p.PersistentPostRunE(c, args); err != nil { - return err - } - break - } else if p.PersistentPostRun != nil { - p.PersistentPostRun(c, args) - break +// RunFunc wraps an error-returning run func with standard Pulumi error handling. All +// Pulumi commands should wrap themselves in this to ensure consistent and appropriate +// error behavior. In particular, we want to avoid any calls to os.Exit in the middle of +// a callstack which might prohibit reaping of child processes, resources, etc. And we +// wish to avoid the default Cobra unhandled error behavior, because it is formatted +// incorrectly and needlessly prints usage. +// +// If run returns a BailError, we will not print an error message. +// +// Deprecated: Instead of using [RunFunc], you should call [DisplayErrorMessage] and then +// manually exit with `os.Exit(-1)` +func RunFunc(run func(cmd *cobra.Command, args []string) error) func(*cobra.Command, []string) { + return func(cmd *cobra.Command, args []string) { + err := run(cmd, args) + if err != nil { + DisplayErrorMessage(err) + os.Exit(-1) } } - return nil } -// RunFunc wraps an error-returning run func with standard Pulumi error handling. All Pulumi -// commands should wrap themselves in this or [RunResultFunc] to ensure consistent and appropriate -// error behavior. In particular, we want to avoid any calls to os.Exit in the middle of a -// callstack which might prohibit reaping of child processes, resources, etc. And we wish to avoid -// the default Cobra unhandled error behavior, because it is formatted incorrectly and needlessly -// prints usage. +// DisplayErrorMessage displays an error message to the user. // -// If run returns a BailError, we will not print an error message, but will still be a non-zero exit code. -func RunFunc(run func(cmd *cobra.Command, args []string) error) func(*cobra.Command, []string) { - return func(cmd *cobra.Command, args []string) { - if err := run(cmd, args); err != nil { - // Sadly, the fact that we hard-exit below means that it's up to us to replicate the Cobra post-run - // behavior here. - if postRunErr := runPostCommandHooks(cmd, args); postRunErr != nil { - err = result.MergeBails(err, postRunErr) - } - - // If we were asked to bail, that means we already printed out a message. We just need - // to quit at this point (with an error code so no one thinks we succeeded). Bailing - // always indicates a failure, just one we don't need to print a message for. - if result.IsBail(err) { - os.Exit(-1) - return - } - - // If there is a stack trace, and logging is enabled, append it. Otherwise, debug logging it. - var msg string - if logging.LogToStderr { - msg = DetailedError(err) - } else { - msg = errorMessage(err) - logging.V(3).Info(DetailedError(err)) - } +// DisplayErrorMessage respects [result.IsBail] and [logging.LogToStderr]. +func DisplayErrorMessage(err error) { + // If we were asked to bail, that means we already printed out a message. We just need + // to quit at this point (with an error code so no one thinks we succeeded). Bailing + // always indicates a failure, just one we don't need to print a message for. + if err == nil || result.IsBail(err) { + return + } - ExitError(msg) - } + var msg string + if logging.LogToStderr { + msg = DetailedError(err) + } else { + msg = errorMessage(err) + logging.V(3).Info(DetailedError(err)) } + + Diag().Errorf(diag.Message("", "%s"), msg) } // Exit exits with a given error. @@ -132,15 +110,8 @@ func Exit(err error) { // ExitError issues an error and exits with a standard error exit code. func ExitError(msg string) { - // Escape percent sign before passing the message as a format string (e.g., msg could contain %PATH% on Windows). - format := strings.ReplaceAll(msg, "%", "%%") - exitErrorCodef(-1, format) -} - -// exitErrorCodef formats the message with arguments, issues an error and exists with the given error exit code. -func exitErrorCodef(code int, format string, args ...interface{}) { - Diag().Errorf(diag.Message("", format), args...) - os.Exit(code) + Diag().Errorf(diag.Message("", "%s"), msg) + os.Exit(-1) } // errorMessage returns a message, possibly cleaning up the text if appropriate. diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/errutil/format_exiterr.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/errutil/format_exiterr.go new file mode 100644 index 000000000..856cd63f4 --- /dev/null +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/errutil/format_exiterr.go @@ -0,0 +1,34 @@ +// Copyright 2025, Pulumi Corporation. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errutil + +import ( + "errors" + "fmt" + "os/exec" + "strings" +) + +// ErrorWithStderr returns an error that includes the stderr output if the error is an ExitError. +func ErrorWithStderr(err error, message string) error { + var exitErr *exec.ExitError + if errors.As(err, &exitErr) { + stderr := strings.TrimSpace(string(exitErr.Stderr)) + if len(stderr) > 0 { + return fmt.Errorf("%s: %w: %s", message, exitErr, exitErr.Stderr) + } + } + return fmt.Errorf("%s: %w", message, err) +} diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/workspace/plugins.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/workspace/plugins.go index 2cdfddb7c..6905032ef 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/workspace/plugins.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/workspace/plugins.go @@ -1035,6 +1035,17 @@ func NewPluginSpec( // Prefix the url with `git://`, so we can later recognize this as a git URL. pluginDownloadURL = "git://" + u.String() isGitPlugin = true + // If there is no version specified, we version the plugin ourselves. This way the user gets + // a consistent experience once the plugin is installed, and won't have any problems when the repo + // is updated. The version will then be added to the plugins SDK, and will be reused when the NewPluginSpec + // is used, so the user gets a consistent experience. + if versionStr == "" && version == nil { + var err error + version, err = gitutil.GetLatestTagOrHash(context.Background(), url) + if err != nil { + return PluginSpec{}, err + } + } } } @@ -2293,6 +2304,11 @@ func getPluginInfoAndPath( match = LegacySelectCompatiblePlugin(plugins, kind, name, version) } + // If the plugin is located in a subdir, we need to fix up the path to include the subdir. + if subdir != "" && match != nil { + match.Path = filepath.Join(match.Path, subdir) + } + if match != nil { matchPath := getPluginPath(match) logging.V(6).Infof("GetPluginPath(%s, %s, %v): found in cache at %s", kind, name, version, matchPath) @@ -2331,10 +2347,6 @@ func SelectPrereleasePlugin( ) *PluginInfo { for _, cur := range plugins { if cur.Kind == kind && cur.Name == name && cur.Version != nil && cur.Version.EQ(*version) { - // If the plugin is located in a subdir, we need to fix up the path to include the subdir. - if subdir != "" { - cur.Path = filepath.Join(cur.Path, subdir) - } return &cur } } @@ -2496,6 +2508,8 @@ func getCandidateExtensions() []string { return []string{""} } +var PluginNameRegexp = regexp.MustCompile(`^(?P[a-zA-Z0-9-][a-zA-Z0-9-_.]*[a-zA-Z0-9])$`) + // pluginRegexp matches plugin directory names: pulumi-KIND-NAME-VERSION. var pluginRegexp = regexp.MustCompile( "^(?P[a-z]+)-" + // KIND diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/workspace/templates.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/workspace/templates.go index 9048a54c0..a5ce2c94a 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/workspace/templates.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/workspace/templates.go @@ -288,27 +288,37 @@ func RetrieveTemplates(ctx context.Context, templateNamePathOrURL string, offlin return RetrieveZIPTemplates(templateNamePathOrURL) } if IsTemplateURL(templateNamePathOrURL) { - return retrieveURLTemplates(ctx, templateNamePathOrURL, offline, templateKind) + return retrieveURLTemplates(ctx, templateNamePathOrURL, offline) } if isTemplateFileOrDirectory(templateNamePathOrURL) { return retrieveFileTemplates(templateNamePathOrURL) } - return retrievePulumiTemplates(ctx, templateNamePathOrURL, offline, templateKind) + + // We now assume that templateNamePathOrURL is a template name that points to the + // global templates set. + + pulumiTemplates, err := retrievePulumiTemplates(ctx, offline, templateKind) + if err != nil { + return TemplateRepository{}, err + } + + if templateNamePathOrURL != "" { + return findSpecificTemplate(pulumiTemplates, templateNamePathOrURL) + } + + return pulumiTemplates, nil } // retrieveURLTemplates retrieves the "template repository" at the specified URL. func retrieveURLTemplates( - ctx context.Context, rawurl string, offline bool, templateKind TemplateKind, + ctx context.Context, rawurl string, offline bool, ) (TemplateRepository, error) { if offline { return TemplateRepository{}, fmt.Errorf("cannot use %s offline", rawurl) } - var err error - - // Create a temp dir. - var temp string - if temp, err = os.MkdirTemp("", "pulumi-template-"); err != nil { + temp, err := os.MkdirTemp("", "pulumi-template-") + if err != nil { return TemplateRepository{}, err } @@ -337,10 +347,8 @@ func retrieveFileTemplates(path string) (TemplateRepository, error) { // Instead of retrieving to a temporary directory, the Pulumi templates are managed from // ~/.pulumi/templates. func retrievePulumiTemplates( - ctx context.Context, templateName string, offline bool, templateKind TemplateKind, + ctx context.Context, offline bool, templateKind TemplateKind, ) (TemplateRepository, error) { - templateName = strings.ToLower(templateName) - // Cleanup the template directory. if err := cleanupLegacyTemplateDir(templateKind); err != nil { return TemplateRepository{}, err @@ -371,27 +379,29 @@ func retrievePulumiTemplates( } } - subDir := templateDir - if templateName != "" { - subDir = filepath.Join(subDir, templateName) - - // Provide a nicer error message when the template can't be found (dir doesn't exist). - _, err := os.Stat(subDir) - if err != nil { - if errors.Is(err, fs.ErrNotExist) { - return TemplateRepository{}, newTemplateNotFoundError(templateDir, templateName) - } - contract.IgnoreError(err) - } - } - return TemplateRepository{ Root: templateDir, - SubDirectory: subDir, + SubDirectory: templateDir, ShouldDelete: false, }, nil } +func findSpecificTemplate(repo TemplateRepository, name string) (TemplateRepository, error) { + subDir := filepath.Join(repo.Root, name) + + // Provide a nicer error message when the template can't be found (dir doesn't exist). + _, err := os.Stat(subDir) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + return TemplateRepository{}, newTemplateNotFoundError(repo.Root, name) + } + contract.IgnoreError(err) + } + + repo.SubDirectory = subDir + return repo, nil +} + // RetrieveGitFolder downloads the repo to path and returns the full path on disk. func RetrieveGitFolder(ctx context.Context, rawurl string, path string) (string, error) { url, urlPath, err := gitutil.ParseGitRepoURL(rawurl) @@ -418,19 +428,22 @@ func RetrieveGitFolder(ctx context.Context, rawurl string, path string) (string, refAttempts = []plumbing.ReferenceName{ref} } - var cloneErr error + var cloneErrs []error for _, ref := range refAttempts { // Attempt the clone. If it succeeds, break - cloneErr = gitutil.GitCloneOrPull(ctx, url, ref, path, true /*shallow*/) - if cloneErr == nil { + err := gitutil.GitCloneOrPull(ctx, url, ref, path, true /*shallow*/) + if err == nil { break } + logging.V(10).Infof("Failed to clone %s@%s: %v", url, ref, err) + cloneErrs = append(cloneErrs, fmt.Errorf("ref '%s': %w", ref, err)) } - if cloneErr != nil { - return "", fmt.Errorf("failed to clone ref '%s': %w", refAttempts[len(refAttempts)-1], cloneErr) + if len(cloneErrs) == len(refAttempts) { + return "", fmt.Errorf("failed to clone %s: %w", rawurl, errors.Join(cloneErrs...)) } } else { if cloneErr := gitutil.GitCloneAndCheckoutCommit(ctx, url, commit, path); cloneErr != nil { + logging.V(10).Infof("Failed to clone %s@%s: %v", url, commit, err) return "", fmt.Errorf("failed to clone and checkout %s(%s): %w", url, commit, cloneErr) } } @@ -440,9 +453,11 @@ func RetrieveGitFolder(ctx context.Context, rawurl string, path string) (string, logging.V(10).Infof("Cloned %s at commit %s@%s to %s", url, ref, commit, fullPath) info, err := os.Stat(fullPath) if err != nil { + logging.V(10).Infof("Failed to stat %s after cloning %s: %v", fullPath, url, err) return "", err } if !info.IsDir() { + logging.V(10).Infof("%s was not a directory after cloning %s: %v", fullPath, url, err) return "", fmt.Errorf("%s is not a directory", fullPath) } @@ -692,16 +707,35 @@ func newExistingFilesError(existing []string) error { return errors.New(message) } +type TemplateNotFoundError struct { + templateName string + suggestions []string +} + +func (err TemplateNotFoundError) Error() string { + message := fmt.Sprintf("template '%s' not found", err.templateName) + if len(err.suggestions) == 0 { + return message + } + // Build-up error message with suggestions. + message = message + "\n\nDid you mean this?\n" + for _, suggestion := range err.suggestions { + message = message + fmt.Sprintf("\t%s\n", suggestion) + } + + return message +} + // newTemplateNotFoundError returns an error for when the template doesn't exist, // offering distance-based suggestions in the error message. -func newTemplateNotFoundError(templateDir string, templateName string) error { - message := fmt.Sprintf("template '%s' not found", templateName) - +func newTemplateNotFoundError(templateDir string, templateName string) TemplateNotFoundError { // Attempt to read the directory to offer suggestions. entries, err := os.ReadDir(templateDir) if err != nil { - contract.IgnoreError(err) - return errors.New(message) + // We failed to build suggestions, so just return the error we have. + return TemplateNotFoundError{ + templateName: templateName, + } } // Get suggestions based on levenshtein distance. @@ -715,15 +749,7 @@ func newTemplateNotFoundError(templateDir string, templateName string) error { } } - // Build-up error message with suggestions. - if len(suggestions) > 0 { - message = message + "\n\nDid you mean this?\n" - for _, suggestion := range suggestions { - message = message + fmt.Sprintf("\t%s\n", suggestion) - } - } - - return errors.New(message) + return TemplateNotFoundError{templateName, suggestions} } // transform returns a new string with ${PROJECT} and ${DESCRIPTION} replaced by diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/pulumi/context.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/pulumi/context.go index b28f437db..9ac347f33 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/pulumi/context.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/pulumi/context.go @@ -278,6 +278,9 @@ func (ctx *Context) Organization() string { // Project returns the current project name. func (ctx *Context) Project() string { return ctx.state.info.Project } +// RootDirectory returns the root of the current project, the location of the Pulumi.yaml. +func (ctx *Context) RootDirectory() string { return ctx.state.info.RootDirectory } + // Stack returns the current stack name being deployed into. func (ctx *Context) Stack() string { return ctx.state.info.Stack } @@ -421,7 +424,7 @@ func (ctx *Context) registerTransform(t ResourceTransform) (*pulumirpc.Callback, if umProperties == nil { umProperties = Map{} } - mProperties, _, err := marshalInput(umProperties, anyType, true) + mProperties, _, err := marshalInput(umProperties, anyType) if err != nil { return nil, fmt.Errorf("marshaling properties: %w", err) } @@ -596,7 +599,7 @@ func (ctx *Context) registerInvokeTransform(t InvokeTransform) (*pulumirpc.Callb if umArgs == nil { umArgs = Map{} } - mArgs, _, err := marshalInput(umArgs, anyType, true) + mArgs, _, err := marshalInput(umArgs, anyType) if err != nil { return nil, fmt.Errorf("marshaling properties: %w", err) } @@ -697,7 +700,9 @@ func (ctx *Context) invokePackageRaw( if args == nil { args = struct{}{} } - resolvedArgs, _, err := marshalInput(args, anyType, false) + resolvedArgs, _, err := marshalInputOptions(args, anyType, &marshalOptions{ + ErrorOnOutput: true, + }) if err != nil { return nil, fmt.Errorf("marshaling arguments: %w", err) } @@ -972,7 +977,12 @@ func (ctx *Context) CallPackage( } // Serialize all args, first by awaiting them, and then marshaling them to the requisite gRPC values. - resolvedArgs, argDeps, _, err := marshalInputs(args) + resolvedArgs, argDeps, _, err := marshalInputsOptions(args, &marshalOptions{ + // Exclude resource references from `argDependencies` when serializing inputs for call. + // This way, providers creating output instances based on `argDependencies` won't create + // outputs for properties that only contain resource references. + ExcludeResourceRefsFromDeps: ctx.state.keepResources, + }) if err != nil { return nil, fmt.Errorf("marshaling args: %w", err) } @@ -980,7 +990,12 @@ func (ctx *Context) CallPackage( // If we have a value for self, add it to the arguments. if self != nil { var deps []URN - resolvedSelf, selfDeps, err := marshalInput(self, reflect.TypeOf(self), true) + resolvedSelf, selfDeps, err := marshalInputOptions(self, reflect.TypeOf(self), &marshalOptions{ + // Exclude resource references from `argDependencies` when serializing inputs for call. + // This way, providers creating output instances based on `argDependencies` won't create + // outputs for properties that only contain resource references. + ExcludeResourceRefsFromDeps: ctx.state.keepResources, + }) if err != nil { return nil, fmt.Errorf("marshaling __self__: %w", err) } @@ -2186,7 +2201,14 @@ func (ctx *Context) prepareResourceInputs(res Resource, props Input, t string, o } // Serialize all properties, first by awaiting them, and then marshaling them to the requisite gRPC values. - resolvedProps, propertyDeps, rpcDeps, err := marshalInputs(props) + resolvedProps, propertyDeps, rpcDeps, err := marshalInputsOptions(props, &marshalOptions{ + // When remote is true (and the monitor supports resource references), + // exclude resource references from `propertyDependencies`. This way, + // component providers creating outputs for component inputs based on + // `propertyDependencies` won't create outputs for properties that only + // contain resource references. + ExcludeResourceRefsFromDeps: remote && ctx.state.keepResources, + }) if err != nil { return nil, fmt.Errorf("marshaling properties: %w", err) } @@ -2431,7 +2453,7 @@ func (ctx *Context) RegisterResourceOutputs(resource Resource, outs Map) error { return } - outsResolved, _, err := marshalInput(outs, anyType, true) + outsResolved, _, err := marshalInput(outs, anyType) if err != nil { return } diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/pulumi/rpc.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/pulumi/rpc.go index 9275db8b6..3e2261148 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/pulumi/rpc.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/pulumi/rpc.go @@ -103,8 +103,25 @@ func expandDependencies(ctx context.Context, deps []Resource) (map[URN]Resource, return set, nil } +// marshalOptions controls the options for marshaling inputs. +type marshalOptions struct { + // Set to true to error if any Outputs are present; otherwise Outputs will be awaited. + ErrorOnOutput bool + + // Set to true to exclude resource references from the set of dependencies identified + // during marshaling. This is useful for remote components (i.e. MLCs) where we want + // propertyDependencies to be empty for a property that only contains resource + // references. + ExcludeResourceRefsFromDeps bool +} + // marshalInputs turns resource property inputs into a map suitable for marshaling. func marshalInputs(props Input) (resource.PropertyMap, map[string][]URN, []URN, error) { + return marshalInputsOptions(props, nil) +} + +// marshalInputs turns resource property inputs into a map suitable for marshaling. +func marshalInputsOptions(props Input, opts *marshalOptions) (resource.PropertyMap, map[string][]URN, []URN, error) { deps := map[URN]struct{}{} pmap, pdeps := resource.PropertyMap{}, map[string][]URN{} @@ -114,7 +131,7 @@ func marshalInputs(props Input) (resource.PropertyMap, map[string][]URN, []URN, marshalProperty := func(pname string, pv interface{}, pt reflect.Type) error { // Get the underlying value, possibly waiting for an output to arrive. - v, resourceDeps, err := marshalInput(pv, pt, true) + v, resourceDeps, err := marshalInputOptions(pv, pt, opts) if err != nil { return fmt.Errorf("awaiting input property %q: %w", pname, err) } @@ -208,14 +225,21 @@ const rpcTokenUnknownValue = "04da6b54-80e4-46f7-96ec-b56ff0331ba9" const cannotAwaitFmt = "cannot marshal Output value of type %T; please use Apply to access the Output's value" // marshalInput marshals an input value, returning its raw serializable value along with any dependencies. -func marshalInput(v interface{}, destType reflect.Type, await bool) (resource.PropertyValue, []Resource, error) { - return marshalInputImpl(v, destType, await, false /*skipInputCheck*/) +func marshalInput(v interface{}, destType reflect.Type) (resource.PropertyValue, []Resource, error) { + return marshalInputOptions(v, destType, nil) +} + +// marshalInput marshals an input value, returning its raw serializable value along with any dependencies. +func marshalInputOptions( + v interface{}, destType reflect.Type, opts *marshalOptions, +) (resource.PropertyValue, []Resource, error) { + return marshalInputOptionsImpl(v, destType, opts, false /*skipInputCheck*/) } // marshalInputImpl marshals an input value, returning its raw serializable value along with any dependencies. -func marshalInputImpl(v interface{}, +func marshalInputOptionsImpl(v interface{}, destType reflect.Type, - await, + opts *marshalOptions, skipInputCheck bool, ) (resource.PropertyValue, []Resource, error) { var deps []Resource @@ -253,7 +277,7 @@ func marshalInputImpl(v interface{}, // If the input is an Output, await its value. The returned value is fully resolved. if output, ok := input.(Output); ok { - if !await { + if opts != nil && opts.ErrorOnOutput { return resource.PropertyValue{}, nil, fmt.Errorf(cannotAwaitFmt, output) } @@ -266,7 +290,7 @@ func marshalInputImpl(v interface{}, // Get the underlying value, if known. var element resource.PropertyValue if known { - element, _, err = marshalInputImpl(ov, destType, await, true /*skipInputCheck*/) + element, _, err = marshalInputOptionsImpl(ov, destType, opts, true /*skipInputCheck*/) if err != nil { return resource.PropertyValue{}, nil, err } @@ -338,7 +362,7 @@ func marshalInputImpl(v interface{}, if as := v.Assets(); as != nil { assets = make(map[string]interface{}) for k, a := range as { - aa, _, err := marshalInput(a, anyType, await) + aa, _, err := marshalInputOptions(a, anyType, opts) if err != nil { return resource.PropertyValue{}, nil, err } @@ -351,7 +375,9 @@ func marshalInputImpl(v interface{}, URI: v.URI(), }), deps, nil case Resource: - deps = append(deps, v) + if opts == nil || !opts.ExcludeResourceRefsFromDeps { + deps = append(deps, v) + } urn, known, secretURN, err := v.URN().awaitURN(context.Background()) if err != nil { @@ -425,7 +451,7 @@ func marshalInputImpl(v interface{}, arr := make([]resource.PropertyValue, 0, rv.Len()) for i := 0; i < rv.Len(); i++ { elem := rv.Index(i) - e, d, err := marshalInput(elem.Interface(), destElem, await) + e, d, err := marshalInputOptions(elem.Interface(), destElem, opts) if err != nil { return resource.PropertyValue{}, nil, err } @@ -451,7 +477,7 @@ func marshalInputImpl(v interface{}, obj := resource.PropertyMap{} for _, key := range rv.MapKeys() { value := rv.MapIndex(key) - mv, d, err := marshalInput(value.Interface(), destElem, await) + mv, d, err := marshalInputOptions(value.Interface(), destElem, opts) if err != nil { return resource.PropertyValue{}, nil, err } @@ -473,7 +499,7 @@ func marshalInputImpl(v interface{}, continue } - fv, d, err := marshalInput(rv.Field(i).Interface(), destField.Type, await) + fv, d, err := marshalInputOptions(rv.Field(i).Interface(), destField.Type, opts) if err != nil { return resource.PropertyValue{}, nil, err } diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/pulumi/run.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/pulumi/run.go index 355cf6d1f..d949d4f37 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/pulumi/run.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/pulumi/run.go @@ -143,6 +143,7 @@ type RunFunc func(ctx *Context) error // RunInfo contains all the metadata about a run request. type RunInfo struct { Project string + RootDirectory string Stack string Config map[string]string ConfigSecretKeys []string @@ -181,6 +182,7 @@ func getEnvInfo() RunInfo { return RunInfo{ Organization: os.Getenv(EnvOrganization), Project: os.Getenv(EnvProject), + RootDirectory: os.Getenv(EnvPulumiRootDirectory), Stack: os.Getenv(EnvStack), Config: config, ConfigSecretKeys: configSecretKeys, @@ -197,6 +199,8 @@ const ( EnvOrganization = "PULUMI_ORGANIZATION" // EnvProject is the envvar used to read the current Pulumi project name. EnvProject = "PULUMI_PROJECT" + // EnvPulumiRootDirectory is the envvar used to read the current Pulumi project root, location of Pulumi.yaml. + EnvPulumiRootDirectory = "PULUMI_ROOT_DIRECTORY" // EnvStack is the envvar used to read the current Pulumi stack name. EnvStack = "PULUMI_STACK" // EnvConfig is the envvar used to read the current Pulumi configuration variables. diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/proto/go/provider.pb.go b/vendor/github.com/pulumi/pulumi/sdk/v3/proto/go/provider.pb.go index b0b00f983..5531e7f9c 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/proto/go/provider.pb.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/proto/go/provider.pb.go @@ -227,6 +227,8 @@ type ProviderHandshakeRequest struct { // in the case that the engine has been asked to attach to an existing running provider instance via a host/port // number), this field will be empty. ProgramDirectory *string `protobuf:"bytes,3,opt,name=program_directory,json=programDirectory,proto3,oneof" json:"program_directory,omitempty"` + // If true the engine will send URN, Name, Type, and ID to the provider as part of the configuration. + ConfigureWithUrn bool `protobuf:"varint,4,opt,name=configure_with_urn,json=configureWithUrn,proto3" json:"configure_with_urn,omitempty"` } func (x *ProviderHandshakeRequest) Reset() { @@ -282,11 +284,33 @@ func (x *ProviderHandshakeRequest) GetProgramDirectory() string { return "" } +func (x *ProviderHandshakeRequest) GetConfigureWithUrn() bool { + if x != nil { + return x.ConfigureWithUrn + } + return false +} + // `ProviderHandshakeResponse` is the type of responses sent by a [](pulumirpc.ResourceProvider.Handshake) call. type ProviderHandshakeResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + // True if and only if the provider supports secrets. If true, the caller should pass secrets as strongly typed + // values to the provider. *Must* match the value returned in response to [](pulumirpc.ResourceProvider.Configure). + AcceptSecrets bool `protobuf:"varint,1,opt,name=accept_secrets,json=acceptSecrets,proto3" json:"accept_secrets,omitempty"` + // True if and only if the provider supports strongly typed resources. If true, the caller should pass resources as + // strongly typed values to the provider. *Must* match the value returned in response to + // [](pulumirpc.ResourceProvider.Configure). + AcceptResources bool `protobuf:"varint,2,opt,name=accept_resources,json=acceptResources,proto3" json:"accept_resources,omitempty"` + // True if and only if the provider supports output values as inputs. If true, the engine should pass output values + // to the provider where possible. *Must* match the value returned in response to + // [](pulumirpc.ResourceProvider.Configure). + AcceptOutputs bool `protobuf:"varint,3,opt,name=accept_outputs,json=acceptOutputs,proto3" json:"accept_outputs,omitempty"` + // True if the provider accepts and respects autonaming configuration that the engine provides on behalf of the + // user. *Must* match the value returned in response to [](pulumirpc.ResourceProvider.Configure). + SupportsAutonamingConfiguration bool `protobuf:"varint,4,opt,name=supports_autonaming_configuration,json=supportsAutonamingConfiguration,proto3" json:"supports_autonaming_configuration,omitempty"` } func (x *ProviderHandshakeResponse) Reset() { @@ -321,6 +345,34 @@ func (*ProviderHandshakeResponse) Descriptor() ([]byte, []int) { return file_pulumi_provider_proto_rawDescGZIP(), []int{1} } +func (x *ProviderHandshakeResponse) GetAcceptSecrets() bool { + if x != nil { + return x.AcceptSecrets + } + return false +} + +func (x *ProviderHandshakeResponse) GetAcceptResources() bool { + if x != nil { + return x.AcceptResources + } + return false +} + +func (x *ProviderHandshakeResponse) GetAcceptOutputs() bool { + if x != nil { + return x.AcceptOutputs + } + return false +} + +func (x *ProviderHandshakeResponse) GetSupportsAutonamingConfiguration() bool { + if x != nil { + return x.SupportsAutonamingConfiguration + } + return false +} + // `ParameterizeRequest` is the type of requests sent as part of a [](pulumirpc.ResourceProvider.Parameterize) call. A // `ParameterizeRequest` may contain either: // @@ -645,6 +697,20 @@ type ConfigureRequest struct { // [](pulumirpc.ResourceProvider.Delete) calls. If true, the provider should expect these fields to be populated in // these calls. *Must* be true if the caller has previously called [](pulumirpc.ResourceProvider.Handshake). SendsOldInputsToDelete bool `protobuf:"varint,6,opt,name=sends_old_inputs_to_delete,json=sendsOldInputsToDelete,proto3" json:"sends_old_inputs_to_delete,omitempty"` + // The ID of the provider being configured. N.B. This will be null if configure_with_urn was false in + // Handshake. + Id *string `protobuf:"bytes,7,opt,name=id,proto3,oneof" json:"id,omitempty"` + // The URN of the provider being configured. N.B. This will be null if configure_with_urn was false in + // Handshake. + Urn *string `protobuf:"bytes,8,opt,name=urn,proto3,oneof" json:"urn,omitempty"` + // The name of the provider being configured. This must match the name specified by the `urn` field, and + // is passed so that providers do not have to implement URN parsing in order to extract the name of the + // provider. N.B. This will be null if configure_with_urn was false in Handshake. + Name *string `protobuf:"bytes,9,opt,name=name,proto3,oneof" json:"name,omitempty"` + // The type of the provider being configured. This must match the type specified by the `urn` field, and + // is passed so that providers do not have to implement URN parsing in order to extract the type of the + // provider. N.B. This will be null if configure_with_urn was false in Handshake. + Type *string `protobuf:"bytes,10,opt,name=type,proto3,oneof" json:"type,omitempty"` } func (x *ConfigureRequest) Reset() { @@ -721,6 +787,34 @@ func (x *ConfigureRequest) GetSendsOldInputsToDelete() bool { return false } +func (x *ConfigureRequest) GetId() string { + if x != nil && x.Id != nil { + return *x.Id + } + return "" +} + +func (x *ConfigureRequest) GetUrn() string { + if x != nil && x.Urn != nil { + return *x.Urn + } + return "" +} + +func (x *ConfigureRequest) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *ConfigureRequest) GetType() string { + if x != nil && x.Type != nil { + return *x.Type + } + return "" +} + // `ConfigureResponse` is the type of responses sent by a [](pulumirpc.ResourceProvider.Configure) call. Its primary // purpose is to communicate features that the provider supports back to the caller. type ConfigureResponse struct { @@ -729,21 +823,24 @@ type ConfigureResponse struct { unknownFields protoimpl.UnknownFields // True if and only if the provider supports secrets. If true, the caller should pass secrets as strongly typed - // values to the provider. *Must* be true if the provider implements [](pulumirpc.ResourceProvider.Handshake). + // values to the provider. *Must* match the value returned in response to [](pulumirpc.ResourceProvider.Handshake) + // if the provider supports handshaking. AcceptSecrets bool `protobuf:"varint,1,opt,name=acceptSecrets,proto3" json:"acceptSecrets,omitempty"` // True if and only if the provider supports the `preview` field on [](pulumirpc.ResourceProvider.Create) and // [](pulumirpc.ResourceProvider.Update) calls. If true, the engine should invoke these calls with `preview` set to // `true` during previews. *Must* be true if the provider implements [](pulumirpc.ResourceProvider.Handshake). SupportsPreview bool `protobuf:"varint,2,opt,name=supportsPreview,proto3" json:"supportsPreview,omitempty"` // True if and only if the provider supports strongly typed resources. If true, the caller should pass resources as - // strongly typed values to the provider. *Must* be true if the provider implements - // [](pulumirpc.ResourceProvider.Handshake). + // strongly typed values to the provider. *Must* match the value returned in response to + // [](pulumirpc.ResourceProvider.Handshake) if the provider supports handshaking. AcceptResources bool `protobuf:"varint,3,opt,name=acceptResources,proto3" json:"acceptResources,omitempty"` // True if and only if the provider supports output values as inputs. If true, the engine should pass output values - // to the provider where possible. *Must* be true if the provider implements - // [](pulumirpc.ResourceProvider.Handshake). + // to the provider where possible. *Must* match the value returned in response to + // [](pulumirpc.ResourceProvider.Handshake) if the provider supports handshaking. AcceptOutputs bool `protobuf:"varint,4,opt,name=acceptOutputs,proto3" json:"acceptOutputs,omitempty"` - // True if the provider accepts and respects Autonaming configuration that the engine provides on behalf of user. + // True if the provider accepts and respects autonaming configuration that the engine provides on behalf of the + // user. *Must* match the value returned in response to [](pulumirpc.ResourceProvider.Handshake) if the provider + // supports handshaking. SupportsAutonamingConfiguration bool `protobuf:"varint,5,opt,name=supports_autonaming_configuration,json=supportsAutonamingConfiguration,proto3" json:"supports_autonaming_configuration,omitempty"` } @@ -2780,20 +2877,19 @@ func (x *ErrorResourceInitFailed) GetInputs() *structpb.Struct { return nil } -// GetMappingRequest allows providers to return ecosystem specific information to allow the provider to be -// converted from a source markup to Pulumi. It's expected that provider bridges that target a given ecosystem -// (e.g. Terraform, Kubernetes) would also publish a conversion plugin to convert markup from that ecosystem -// to Pulumi, using the bridged providers. +// `GetMappingRequest` is the type of requests sent as part of a [](pulumirpc.ResourceProvider.GetMapping) call. type GetMappingRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // the conversion key for the mapping being requested. + // The conversion key for the mapping being requested. This typically corresponds to the source language, such as + // `terraform` in the case of mapping Terraform names to Pulumi names. Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - // the optional provider key for the mapping being requested, if this is empty the provider should assume this - // request is from an old engine from before GetMappings and should return it's "primary" mapping. If this is set - // then the `provider` field in GetMappingResponse should be the same. + // An optional *source provider key* for the mapping being requested. If this is empty, the provider should assume + // that this request is from an old engine prior to the introduction of [](pulumirpc.ResourceProvider.GetMappings). + // In these cases the request should be answered with the "primary" mapping. If this field is set, the `provider` + // field in the corresponding [](pulumirpc.GetMappingResponse) should contain the same value. Provider string `protobuf:"bytes,2,opt,name=provider,proto3" json:"provider,omitempty"` } @@ -2843,16 +2939,17 @@ func (x *GetMappingRequest) GetProvider() string { return "" } -// GetMappingResponse returns convert plugin specific data for this provider. This will normally be human -// readable JSON, but the engine doesn't mandate any form. +// `GetMappingResponse` is the type of responses sent by a [](pulumirpc.ResourceProvider.GetMapping) call. The data +// within a `GetMappingResponse` will normally be human-readable JSON (e.g. an object mapping names from the source to +// Pulumi), but the engine doesn't mandate any specific format. type GetMappingResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // the provider key this is mapping for. For example the Pulumi provider "terraform-template" would return "template" for this. + // The *source provider key* that this mapping contains data for. Provider string `protobuf:"bytes,1,opt,name=provider,proto3" json:"provider,omitempty"` - // the conversion plugin specific data. + // Mapping data in a format specific to the conversion plugin/source language. Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` } @@ -2902,14 +2999,14 @@ func (x *GetMappingResponse) GetData() []byte { return nil } -// GetMappingsRequest allows providers to return ecosystem specific information without having to send back large data -// blobs for provider mappings that the engine doesn't then need. +// `GetMappingsRequest` is the type of requests sent as part of a [](pulumirpc.ResourceProvider.GetMappings) call. type GetMappingsRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // the conversion key for the mapping being requested. + // The conversion key for the mapping being requested. This typically corresponds to the source language, such as + // `terraform` in the case of mapping Terraform names to Pulumi names. Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` } @@ -2952,14 +3049,14 @@ func (x *GetMappingsRequest) GetKey() string { return "" } -// GetMappingsRequest returns a list of providers that this provider can provide mapping information for. +// `GetMappingsResponse` is the type of responses sent by a [](pulumirpc.ResourceProvider.GetMappings) call. type GetMappingsResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // the provider keys this provider can supply mappings for. For example the Pulumi provider "terraform-template" - // would return ["template"] for this. + // The set of *source provider keys* this provider can supply mappings for. For example the Pulumi provider + // `terraform-template` would return `["template"]` for this. Providers []string `protobuf:"bytes,1,rep,name=providers,proto3" json:"providers,omitempty"` } @@ -3526,7 +3623,7 @@ var file_pulumi_provider_proto_rawDesc = []byte{ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x22, 0xc8, 0x01, 0x0a, 0x18, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x48, + 0x74, 0x6f, 0x22, 0xf6, 0x01, 0x0a, 0x18, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x41, @@ -3536,600 +3633,623 @@ var file_pulumi_provider_proto_rawDesc = []byte{ 0x01, 0x01, 0x12, 0x30, 0x0a, 0x11, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x10, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, - 0x79, 0x88, 0x01, 0x01, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x64, 0x69, - 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x42, 0x14, 0x0a, 0x12, 0x5f, 0x70, 0x72, 0x6f, 0x67, - 0x72, 0x61, 0x6d, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x22, 0x1b, 0x0a, - 0x19, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, - 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xad, 0x02, 0x0a, 0x13, 0x50, - 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x43, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x2d, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x61, 0x72, - 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x41, 0x72, 0x67, 0x73, 0x48, - 0x00, 0x52, 0x04, 0x61, 0x72, 0x67, 0x73, 0x12, 0x46, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, - 0x70, 0x63, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x69, 0x7a, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, - 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, - 0x24, 0x0a, 0x0e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x41, 0x72, 0x67, - 0x73, 0x12, 0x12, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x04, 0x61, 0x72, 0x67, 0x73, 0x1a, 0x55, 0x0a, 0x0f, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, - 0x65, 0x72, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0c, 0x0a, 0x0a, - 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x22, 0x44, 0x0a, 0x14, 0x50, 0x61, - 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x22, 0x84, 0x01, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, - 0x27, 0x0a, 0x0f, 0x73, 0x75, 0x62, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x75, 0x62, 0x70, 0x61, 0x63, - 0x6b, 0x61, 0x67, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x12, 0x73, 0x75, 0x62, 0x70, - 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x73, 0x75, 0x62, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, - 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x2b, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, - 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x22, 0xfd, 0x02, 0x0a, 0x10, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, - 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x48, 0x0a, 0x09, 0x76, 0x61, 0x72, - 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x70, - 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, - 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, - 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, - 0x6c, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x04, 0x61, 0x72, 0x67, 0x73, - 0x12, 0x24, 0x0a, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, - 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x53, - 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x12, 0x28, 0x0a, 0x0f, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0f, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, - 0x12, 0x28, 0x0a, 0x10, 0x73, 0x65, 0x6e, 0x64, 0x73, 0x5f, 0x6f, 0x6c, 0x64, 0x5f, 0x69, 0x6e, - 0x70, 0x75, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x73, 0x65, 0x6e, 0x64, - 0x73, 0x4f, 0x6c, 0x64, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x1a, 0x73, 0x65, - 0x6e, 0x64, 0x73, 0x5f, 0x6f, 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x5f, 0x74, - 0x6f, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x16, - 0x73, 0x65, 0x6e, 0x64, 0x73, 0x4f, 0x6c, 0x64, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x54, 0x6f, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x1a, 0x3c, 0x0a, 0x0e, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, - 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x22, 0xff, 0x01, 0x0a, 0x11, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, - 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x61, 0x63, - 0x63, 0x65, 0x70, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x79, 0x88, 0x01, 0x01, 0x12, 0x2c, 0x0a, 0x12, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x65, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x75, 0x72, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x10, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x57, 0x69, 0x74, 0x68, 0x55, + 0x72, 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x64, 0x69, 0x72, 0x65, + 0x63, 0x74, 0x6f, 0x72, 0x79, 0x42, 0x14, 0x0a, 0x12, 0x5f, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x61, + 0x6d, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x22, 0xe0, 0x01, 0x0a, 0x19, + 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x63, 0x63, + 0x65, 0x70, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, - 0x12, 0x28, 0x0a, 0x0f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x50, 0x72, 0x65, 0x76, - 0x69, 0x65, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x73, 0x75, 0x70, 0x70, 0x6f, - 0x72, 0x74, 0x73, 0x50, 0x72, 0x65, 0x76, 0x69, 0x65, 0x77, 0x12, 0x28, 0x0a, 0x0f, 0x61, 0x63, - 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, + 0x12, 0x29, 0x0a, 0x10, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x61, 0x63, 0x63, 0x65, + 0x70, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x61, + 0x63, 0x63, 0x65, 0x70, 0x74, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x4f, 0x75, 0x74, 0x70, 0x75, + 0x74, 0x73, 0x12, 0x4a, 0x0a, 0x21, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x5f, 0x61, + 0x75, 0x74, 0x6f, 0x6e, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1f, 0x73, + 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x41, 0x75, 0x74, 0x6f, 0x6e, 0x61, 0x6d, 0x69, 0x6e, + 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xad, + 0x02, 0x0a, 0x13, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x69, 0x7a, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x43, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, + 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x41, + 0x72, 0x67, 0x73, 0x48, 0x00, 0x52, 0x04, 0x61, 0x72, 0x67, 0x73, 0x12, 0x46, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x70, 0x75, 0x6c, + 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, + 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x1a, 0x24, 0x0a, 0x0e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, + 0x73, 0x41, 0x72, 0x67, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x04, 0x61, 0x72, 0x67, 0x73, 0x1a, 0x55, 0x0a, 0x0f, 0x50, 0x61, 0x72, + 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x42, 0x0c, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x22, 0x44, + 0x0a, 0x14, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x69, 0x7a, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x84, 0x01, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x75, 0x62, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x75, + 0x62, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x12, + 0x73, 0x75, 0x62, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x73, 0x75, 0x62, 0x70, 0x61, 0x63, + 0x6b, 0x61, 0x67, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x2b, 0x0a, 0x11, 0x47, + 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0xfc, 0x03, 0x0a, 0x10, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x48, 0x0a, + 0x09, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2a, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x56, 0x61, + 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x76, 0x61, + 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x04, + 0x61, 0x72, 0x67, 0x73, 0x12, 0x24, 0x0a, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x53, 0x65, + 0x63, 0x72, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, 0x63, 0x63, + 0x65, 0x70, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x12, 0x28, 0x0a, 0x0f, 0x61, 0x63, + 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x73, 0x12, 0x24, 0x0a, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x4f, 0x75, - 0x74, 0x70, 0x75, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, 0x63, 0x63, - 0x65, 0x70, 0x74, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x12, 0x4a, 0x0a, 0x21, 0x73, 0x75, - 0x70, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x5f, 0x61, 0x75, 0x74, 0x6f, 0x6e, 0x61, 0x6d, 0x69, 0x6e, - 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x41, - 0x75, 0x74, 0x6f, 0x6e, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb2, 0x01, 0x0a, 0x19, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x75, 0x72, 0x65, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, - 0x4b, 0x65, 0x79, 0x73, 0x12, 0x51, 0x0a, 0x0b, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x4b, - 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x70, 0x75, 0x6c, 0x75, - 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x45, - 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x73, 0x2e, - 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x52, 0x0b, 0x6d, 0x69, 0x73, 0x73, - 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x73, 0x1a, 0x42, 0x0a, 0x0a, 0x4d, 0x69, 0x73, 0x73, 0x69, - 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x8b, 0x01, 0x0a, 0x0d, - 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, - 0x03, 0x74, 0x6f, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x6f, 0x6b, 0x12, - 0x2b, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x04, 0x61, 0x72, 0x67, 0x73, 0x4a, 0x04, 0x08, 0x03, - 0x10, 0x07, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x52, 0x07, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x52, 0x11, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x44, 0x6f, - 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x55, 0x52, 0x4c, 0x22, 0x76, 0x0a, 0x0e, 0x49, 0x6e, 0x76, - 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x72, - 0x65, 0x74, 0x75, 0x72, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, - 0x72, 0x75, 0x63, 0x74, 0x52, 0x06, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x12, 0x33, 0x0a, 0x08, - 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, - 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x08, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, - 0x73, 0x22, 0xae, 0x06, 0x0a, 0x0b, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x6f, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x74, 0x6f, 0x6b, 0x12, 0x2b, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x04, 0x61, 0x72, 0x67, 0x73, - 0x12, 0x55, 0x0a, 0x0f, 0x61, 0x72, 0x67, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, - 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x70, 0x75, 0x6c, 0x75, - 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x2e, 0x41, 0x72, 0x67, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x61, 0x72, 0x67, 0x44, 0x65, 0x70, 0x65, 0x6e, - 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, - 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x2a, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x65, 0x63, - 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x12, - 0x16, 0x0a, 0x06, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x06, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x72, 0x61, 0x6c, - 0x6c, 0x65, 0x6c, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x72, 0x61, 0x6c, - 0x6c, 0x65, 0x6c, 0x12, 0x28, 0x0a, 0x0f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x45, 0x6e, - 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6d, 0x6f, - 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x22, 0x0a, - 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0e, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x73, 0x5f, 0x6f, 0x75, 0x74, - 0x70, 0x75, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x13, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x73, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x73, 0x1a, 0x2a, 0x0a, 0x14, 0x41, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, - 0x74, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x12, 0x12, 0x0a, - 0x04, 0x75, 0x72, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x75, 0x72, 0x6e, - 0x73, 0x1a, 0x6f, 0x0a, 0x14, 0x41, 0x72, 0x67, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, - 0x63, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x41, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x70, 0x75, 0x6c, + 0x72, 0x63, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x65, 0x6e, 0x64, 0x73, 0x5f, 0x6f, 0x6c, + 0x64, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, + 0x73, 0x65, 0x6e, 0x64, 0x73, 0x4f, 0x6c, 0x64, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x12, 0x3a, + 0x0a, 0x1a, 0x73, 0x65, 0x6e, 0x64, 0x73, 0x5f, 0x6f, 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x70, 0x75, + 0x74, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x16, 0x73, 0x65, 0x6e, 0x64, 0x73, 0x4f, 0x6c, 0x64, 0x49, 0x6e, 0x70, 0x75, + 0x74, 0x73, 0x54, 0x6f, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x13, 0x0a, 0x02, 0x69, 0x64, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x02, 0x69, 0x64, 0x88, 0x01, 0x01, 0x12, + 0x15, 0x0a, 0x03, 0x75, 0x72, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x03, + 0x75, 0x72, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x09, 0x48, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, + 0x17, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x48, 0x03, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x88, 0x01, 0x01, 0x1a, 0x3c, 0x0a, 0x0e, 0x56, 0x61, 0x72, 0x69, + 0x61, 0x62, 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x05, 0x0a, 0x03, 0x5f, 0x69, 0x64, 0x42, 0x06, 0x0a, + 0x04, 0x5f, 0x75, 0x72, 0x6e, 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x42, 0x07, + 0x0a, 0x05, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x22, 0xff, 0x01, 0x0a, 0x11, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, + 0x0d, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x53, 0x65, 0x63, 0x72, + 0x65, 0x74, 0x73, 0x12, 0x28, 0x0a, 0x0f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x50, + 0x72, 0x65, 0x76, 0x69, 0x65, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x73, 0x75, + 0x70, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x50, 0x72, 0x65, 0x76, 0x69, 0x65, 0x77, 0x12, 0x28, 0x0a, + 0x0f, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x24, 0x0a, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x70, + 0x74, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, + 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x12, 0x4a, 0x0a, + 0x21, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x5f, 0x61, 0x75, 0x74, 0x6f, 0x6e, 0x61, + 0x6d, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, + 0x74, 0x73, 0x41, 0x75, 0x74, 0x6f, 0x6e, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb2, 0x01, 0x0a, 0x19, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x69, 0x73, 0x73, + 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x51, 0x0a, 0x0b, 0x6d, 0x69, 0x73, 0x73, 0x69, + 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x70, + 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, + 0x72, 0x65, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x4b, 0x65, + 0x79, 0x73, 0x2e, 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x52, 0x0b, 0x6d, + 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x73, 0x1a, 0x42, 0x0a, 0x0a, 0x4d, 0x69, + 0x73, 0x73, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x8b, + 0x01, 0x0a, 0x0d, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x10, 0x0a, 0x03, 0x74, 0x6f, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, + 0x6f, 0x6b, 0x12, 0x2b, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x04, 0x61, 0x72, 0x67, 0x73, 0x4a, + 0x04, 0x08, 0x03, 0x10, 0x07, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x52, + 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x52, 0x11, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x55, 0x52, 0x4c, 0x22, 0x76, 0x0a, 0x0e, + 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, + 0x0a, 0x06, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x06, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x12, + 0x33, 0x0a, 0x08, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x08, 0x66, 0x61, 0x69, 0x6c, + 0x75, 0x72, 0x65, 0x73, 0x22, 0xae, 0x06, 0x0a, 0x0b, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x6f, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x74, 0x6f, 0x6b, 0x12, 0x2b, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x04, 0x61, + 0x72, 0x67, 0x73, 0x12, 0x55, 0x0a, 0x0f, 0x61, 0x72, 0x67, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, + 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x70, + 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x2e, 0x41, 0x72, 0x67, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, + 0x63, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x61, 0x72, 0x67, 0x44, 0x65, + 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x2e, 0x41, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x70, 0x65, 0x6e, - 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x1a, 0x39, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x4a, 0x04, 0x08, - 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x0d, 0x10, 0x0e, 0x4a, - 0x04, 0x08, 0x10, 0x10, 0x11, 0x4a, 0x04, 0x08, 0x0f, 0x10, 0x10, 0x52, 0x08, 0x70, 0x72, 0x6f, - 0x76, 0x69, 0x64, 0x65, 0x72, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x11, - 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x55, 0x52, - 0x4c, 0x52, 0x0f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, - 0x6d, 0x73, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x22, 0xf2, 0x02, 0x0a, 0x0c, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x06, 0x72, 0x65, - 0x74, 0x75, 0x72, 0x6e, 0x12, 0x33, 0x0a, 0x08, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, - 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, - 0x70, 0x63, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, - 0x08, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x12, 0x5f, 0x0a, 0x12, 0x72, 0x65, 0x74, - 0x75, 0x72, 0x6e, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, - 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, - 0x65, 0x74, 0x75, 0x72, 0x6e, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x12, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x44, 0x65, - 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x1a, 0x28, 0x0a, 0x12, 0x52, 0x65, - 0x74, 0x75, 0x72, 0x6e, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, + 0x73, 0x74, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2a, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x10, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, + 0x79, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x06, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, + 0x72, 0x61, 0x6c, 0x6c, 0x65, 0x6c, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, + 0x72, 0x61, 0x6c, 0x6c, 0x65, 0x6c, 0x12, 0x28, 0x0a, 0x0f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, + 0x72, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x12, 0x22, 0x0a, 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x73, 0x5f, + 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x11, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x13, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x73, 0x4f, 0x75, 0x74, 0x70, + 0x75, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x1a, 0x2a, 0x0a, 0x14, 0x41, 0x72, 0x67, 0x75, + 0x6d, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x72, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, - 0x75, 0x72, 0x6e, 0x73, 0x1a, 0x71, 0x0a, 0x17, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x44, 0x65, - 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x40, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x2a, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, - 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, - 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd1, 0x03, 0x0a, 0x0c, 0x43, 0x68, 0x65, 0x63, - 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6e, 0x12, 0x2b, 0x0a, 0x04, 0x6f, 0x6c, - 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x75, 0x72, 0x6e, 0x73, 0x1a, 0x6f, 0x0a, 0x14, 0x41, 0x72, 0x67, 0x44, 0x65, 0x70, 0x65, 0x6e, + 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x41, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, + 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x41, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x44, 0x65, + 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x39, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x0d, + 0x10, 0x0e, 0x4a, 0x04, 0x08, 0x10, 0x10, 0x11, 0x4a, 0x04, 0x08, 0x0f, 0x10, 0x10, 0x52, 0x08, + 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x11, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, + 0x64, 0x55, 0x52, 0x4c, 0x52, 0x0f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, 0x73, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xf2, 0x02, 0x0a, 0x0c, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, + 0x06, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x12, 0x33, 0x0a, 0x08, 0x66, 0x61, 0x69, 0x6c, 0x75, + 0x72, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x70, 0x75, 0x6c, 0x75, + 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x46, 0x61, 0x69, 0x6c, 0x75, + 0x72, 0x65, 0x52, 0x08, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x12, 0x5f, 0x0a, 0x12, + 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, + 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, + 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x2e, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, + 0x63, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x12, 0x72, 0x65, 0x74, 0x75, 0x72, + 0x6e, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x1a, 0x28, 0x0a, + 0x12, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, + 0x69, 0x65, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x72, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x04, 0x75, 0x72, 0x6e, 0x73, 0x1a, 0x71, 0x0a, 0x17, 0x52, 0x65, 0x74, 0x75, 0x72, + 0x6e, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x40, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, + 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x74, + 0x75, 0x72, 0x6e, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd1, 0x03, 0x0a, 0x0c, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x75, + 0x72, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6e, 0x12, 0x2b, 0x0a, + 0x04, 0x6f, 0x6c, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, + 0x72, 0x75, 0x63, 0x74, 0x52, 0x04, 0x6f, 0x6c, 0x64, 0x73, 0x12, 0x2b, 0x0a, 0x04, 0x6e, 0x65, + 0x77, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, - 0x74, 0x52, 0x04, 0x6f, 0x6c, 0x64, 0x73, 0x12, 0x2b, 0x0a, 0x04, 0x6e, 0x65, 0x77, 0x73, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x04, - 0x6e, 0x65, 0x77, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x53, 0x65, - 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, - 0x53, 0x65, 0x65, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x49, 0x0a, 0x0a, - 0x61, 0x75, 0x74, 0x6f, 0x6e, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x29, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x6e, 0x61, - 0x6d, 0x69, 0x6e, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0a, 0x61, 0x75, 0x74, - 0x6f, 0x6e, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x1a, 0xab, 0x01, 0x0a, 0x11, 0x41, 0x75, 0x74, 0x6f, - 0x6e, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x23, 0x0a, - 0x0d, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x64, 0x4e, 0x61, - 0x6d, 0x65, 0x12, 0x42, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x2e, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x6e, 0x61, - 0x6d, 0x69, 0x6e, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4d, 0x6f, 0x64, 0x65, - 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x22, 0x2d, 0x0a, 0x04, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0b, - 0x0a, 0x07, 0x50, 0x52, 0x4f, 0x50, 0x4f, 0x53, 0x45, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x45, - 0x4e, 0x46, 0x4f, 0x52, 0x43, 0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x49, 0x53, 0x41, - 0x42, 0x4c, 0x45, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x52, 0x0e, 0x73, 0x65, 0x71, - 0x75, 0x65, 0x6e, 0x63, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x75, 0x0a, 0x0d, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x06, - 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, - 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x12, 0x33, 0x0a, - 0x08, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x17, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x68, 0x65, 0x63, - 0x6b, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x08, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, - 0x65, 0x73, 0x22, 0x42, 0x0a, 0x0c, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x46, 0x61, 0x69, 0x6c, 0x75, - 0x72, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x12, 0x16, - 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, - 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x22, 0x8f, 0x02, 0x0a, 0x0b, 0x44, 0x69, 0x66, 0x66, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6e, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6e, 0x12, 0x2b, 0x0a, 0x04, 0x6f, 0x6c, 0x64, 0x73, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x74, 0x52, 0x04, 0x6e, 0x65, 0x77, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x72, 0x61, 0x6e, 0x64, 0x6f, + 0x6d, 0x53, 0x65, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x72, 0x61, 0x6e, + 0x64, 0x6f, 0x6d, 0x53, 0x65, 0x65, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x49, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x6e, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x41, 0x75, 0x74, + 0x6f, 0x6e, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0a, + 0x61, 0x75, 0x74, 0x6f, 0x6e, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x1a, 0xab, 0x01, 0x0a, 0x11, 0x41, + 0x75, 0x74, 0x6f, 0x6e, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, + 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x42, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x41, 0x75, 0x74, + 0x6f, 0x6e, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4d, + 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x22, 0x2d, 0x0a, 0x04, 0x4d, 0x6f, 0x64, + 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x52, 0x4f, 0x50, 0x4f, 0x53, 0x45, 0x10, 0x00, 0x12, 0x0b, + 0x0a, 0x07, 0x45, 0x4e, 0x46, 0x4f, 0x52, 0x43, 0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, + 0x49, 0x53, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x52, 0x0e, + 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x75, + 0x0a, 0x0d, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x2f, 0x0a, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, + 0x12, 0x33, 0x0a, 0x08, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x08, 0x66, 0x61, 0x69, + 0x6c, 0x75, 0x72, 0x65, 0x73, 0x22, 0x42, 0x0a, 0x0c, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x46, 0x61, + 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, + 0x79, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x22, 0x8f, 0x02, 0x0a, 0x0b, 0x44, 0x69, + 0x66, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6e, 0x12, 0x2b, 0x0a, 0x04, 0x6f, + 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, + 0x63, 0x74, 0x52, 0x04, 0x6f, 0x6c, 0x64, 0x73, 0x12, 0x2b, 0x0a, 0x04, 0x6e, 0x65, 0x77, 0x73, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, - 0x04, 0x6f, 0x6c, 0x64, 0x73, 0x12, 0x2b, 0x0a, 0x04, 0x6e, 0x65, 0x77, 0x73, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x04, 0x6e, 0x65, - 0x77, 0x73, 0x12, 0x24, 0x0a, 0x0d, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x43, 0x68, 0x61, 0x6e, - 0x67, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x69, 0x67, 0x6e, 0x6f, 0x72, - 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x36, 0x0a, 0x0a, 0x6f, 0x6c, 0x64, 0x5f, - 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, - 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x09, 0x6f, 0x6c, 0x64, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x73, - 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0xc0, 0x01, 0x0a, 0x0c, 0x50, 0x72, 0x6f, - 0x70, 0x65, 0x72, 0x74, 0x79, 0x44, 0x69, 0x66, 0x66, 0x12, 0x30, 0x0a, 0x04, 0x6b, 0x69, 0x6e, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, - 0x72, 0x70, 0x63, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x44, 0x69, 0x66, 0x66, - 0x2e, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x69, - 0x6e, 0x70, 0x75, 0x74, 0x44, 0x69, 0x66, 0x66, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, - 0x69, 0x6e, 0x70, 0x75, 0x74, 0x44, 0x69, 0x66, 0x66, 0x22, 0x60, 0x0a, 0x04, 0x4b, 0x69, 0x6e, - 0x64, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, 0x44, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x41, 0x44, - 0x44, 0x5f, 0x52, 0x45, 0x50, 0x4c, 0x41, 0x43, 0x45, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x44, - 0x45, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x45, 0x4c, 0x45, 0x54, - 0x45, 0x5f, 0x52, 0x45, 0x50, 0x4c, 0x41, 0x43, 0x45, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x55, - 0x50, 0x44, 0x41, 0x54, 0x45, 0x10, 0x04, 0x12, 0x12, 0x0a, 0x0e, 0x55, 0x50, 0x44, 0x41, 0x54, - 0x45, 0x5f, 0x52, 0x45, 0x50, 0x4c, 0x41, 0x43, 0x45, 0x10, 0x05, 0x22, 0xdd, 0x03, 0x0a, 0x0c, - 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, - 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, - 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x73, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x73, 0x12, 0x30, 0x0a, 0x13, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x65, 0x66, 0x6f, - 0x72, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x13, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x70, - 0x6c, 0x61, 0x63, 0x65, 0x12, 0x3d, 0x0a, 0x07, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, - 0x63, 0x2e, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x44, - 0x69, 0x66, 0x66, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x52, 0x07, 0x63, 0x68, 0x61, 0x6e, - 0x67, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x64, 0x69, 0x66, 0x66, 0x73, 0x18, 0x05, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x05, 0x64, 0x69, 0x66, 0x66, 0x73, 0x12, 0x4d, 0x0a, 0x0c, 0x64, 0x65, 0x74, - 0x61, 0x69, 0x6c, 0x65, 0x64, 0x44, 0x69, 0x66, 0x66, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x29, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x44, 0x69, 0x66, 0x66, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, - 0x64, 0x44, 0x69, 0x66, 0x66, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x64, 0x65, 0x74, 0x61, - 0x69, 0x6c, 0x65, 0x64, 0x44, 0x69, 0x66, 0x66, 0x12, 0x28, 0x0a, 0x0f, 0x68, 0x61, 0x73, 0x44, - 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x44, 0x69, 0x66, 0x66, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x0f, 0x68, 0x61, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x44, 0x69, - 0x66, 0x66, 0x1a, 0x58, 0x0a, 0x11, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x44, 0x69, - 0x66, 0x66, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, - 0x69, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x44, 0x69, 0x66, - 0x66, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3d, 0x0a, 0x0b, - 0x44, 0x69, 0x66, 0x66, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x10, 0x0a, 0x0c, 0x44, - 0x49, 0x46, 0x46, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0d, 0x0a, - 0x09, 0x44, 0x49, 0x46, 0x46, 0x5f, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, - 0x44, 0x49, 0x46, 0x46, 0x5f, 0x53, 0x4f, 0x4d, 0x45, 0x10, 0x02, 0x22, 0xb6, 0x01, 0x0a, 0x0d, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, - 0x03, 0x75, 0x72, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6e, 0x12, - 0x37, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x0a, 0x70, 0x72, - 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, - 0x6f, 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, - 0x75, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x65, 0x76, 0x69, 0x65, 0x77, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x07, 0x70, 0x72, 0x65, 0x76, 0x69, 0x65, 0x77, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x74, 0x79, 0x70, 0x65, 0x22, 0x59, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x37, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, - 0x74, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, - 0x75, 0x63, 0x74, 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, - 0xc1, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, - 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, - 0x6e, 0x12, 0x37, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x0a, - 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x2f, 0x0a, 0x06, 0x69, 0x6e, - 0x70, 0x75, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, - 0x75, 0x63, 0x74, 0x52, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, - 0x79, 0x70, 0x65, 0x22, 0x88, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x02, 0x69, 0x64, 0x12, 0x37, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, - 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x04, 0x6e, 0x65, 0x77, 0x73, 0x12, 0x24, 0x0a, 0x0d, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x43, + 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x69, 0x67, + 0x6e, 0x6f, 0x72, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x36, 0x0a, 0x0a, 0x6f, + 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x09, 0x6f, 0x6c, 0x64, 0x49, 0x6e, 0x70, + 0x75, 0x74, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0xc0, 0x01, 0x0a, 0x0c, + 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x44, 0x69, 0x66, 0x66, 0x12, 0x30, 0x0a, 0x04, + 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x70, 0x75, 0x6c, + 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x44, + 0x69, 0x66, 0x66, 0x2e, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x1c, + 0x0a, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x44, 0x69, 0x66, 0x66, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x44, 0x69, 0x66, 0x66, 0x22, 0x60, 0x0a, 0x04, + 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, 0x44, 0x10, 0x00, 0x12, 0x0f, 0x0a, + 0x0b, 0x41, 0x44, 0x44, 0x5f, 0x52, 0x45, 0x50, 0x4c, 0x41, 0x43, 0x45, 0x10, 0x01, 0x12, 0x0a, + 0x0a, 0x06, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x45, + 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x52, 0x45, 0x50, 0x4c, 0x41, 0x43, 0x45, 0x10, 0x03, 0x12, 0x0a, + 0x0a, 0x06, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x10, 0x04, 0x12, 0x12, 0x0a, 0x0e, 0x55, 0x50, + 0x44, 0x41, 0x54, 0x45, 0x5f, 0x52, 0x45, 0x50, 0x4c, 0x41, 0x43, 0x45, 0x10, 0x05, 0x22, 0xdd, + 0x03, 0x0a, 0x0c, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x08, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x73, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x73, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x30, 0x0a, 0x13, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, + 0x65, 0x66, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x13, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, + 0x52, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x12, 0x3d, 0x0a, 0x07, 0x63, 0x68, 0x61, 0x6e, 0x67, + 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, + 0x69, 0x72, 0x70, 0x63, 0x2e, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x2e, 0x44, 0x69, 0x66, 0x66, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x52, 0x07, 0x63, + 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x64, 0x69, 0x66, 0x66, 0x73, 0x18, + 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x64, 0x69, 0x66, 0x66, 0x73, 0x12, 0x4d, 0x0a, 0x0c, + 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x44, 0x69, 0x66, 0x66, 0x18, 0x06, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x44, + 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x44, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x65, 0x64, 0x44, 0x69, 0x66, 0x66, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x64, + 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x44, 0x69, 0x66, 0x66, 0x12, 0x28, 0x0a, 0x0f, 0x68, + 0x61, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x44, 0x69, 0x66, 0x66, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x68, 0x61, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, + 0x64, 0x44, 0x69, 0x66, 0x66, 0x1a, 0x58, 0x0a, 0x11, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, + 0x64, 0x44, 0x69, 0x66, 0x66, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2d, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x70, 0x75, + 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, + 0x44, 0x69, 0x66, 0x66, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, + 0x3d, 0x0a, 0x0b, 0x44, 0x69, 0x66, 0x66, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x10, + 0x0a, 0x0c, 0x44, 0x49, 0x46, 0x46, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, + 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x49, 0x46, 0x46, 0x5f, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x01, 0x12, + 0x0d, 0x0a, 0x09, 0x44, 0x49, 0x46, 0x46, 0x5f, 0x53, 0x4f, 0x4d, 0x45, 0x10, 0x02, 0x22, 0xb6, + 0x01, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, + 0x72, 0x6e, 0x12, 0x37, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, + 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x74, + 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x74, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x65, 0x76, 0x69, 0x65, 0x77, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x70, 0x72, 0x65, 0x76, 0x69, 0x65, 0x77, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0x59, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x37, 0x0a, 0x0a, 0x70, 0x72, 0x6f, + 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, + 0x65, 0x73, 0x22, 0xc1, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, + 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x75, 0x72, 0x6e, 0x12, 0x37, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, + 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x2f, 0x0a, - 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x22, 0xc5, - 0x02, 0x0a, 0x0d, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, - 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, - 0x72, 0x6e, 0x12, 0x2b, 0x0a, 0x04, 0x6f, 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0x88, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x61, 0x64, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x37, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, + 0x72, 0x75, 0x63, 0x74, 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, + 0x12, 0x2f, 0x0a, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x04, 0x6f, 0x6c, 0x64, 0x73, 0x12, - 0x2b, 0x0a, 0x04, 0x6e, 0x65, 0x77, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x04, 0x6e, 0x65, 0x77, 0x73, 0x12, 0x18, 0x0a, 0x07, - 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x74, - 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x24, 0x0a, 0x0d, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, - 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x69, - 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, - 0x70, 0x72, 0x65, 0x76, 0x69, 0x65, 0x77, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x70, - 0x72, 0x65, 0x76, 0x69, 0x65, 0x77, 0x12, 0x36, 0x0a, 0x0a, 0x6f, 0x6c, 0x64, 0x5f, 0x69, 0x6e, - 0x70, 0x75, 0x74, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, - 0x75, 0x63, 0x74, 0x52, 0x09, 0x6f, 0x6c, 0x64, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x12, 0x12, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0x49, 0x0a, 0x0e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x70, - 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, - 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, - 0x73, 0x22, 0xe4, 0x01, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, + 0x73, 0x22, 0xc5, 0x02, 0x0a, 0x0d, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x75, 0x72, 0x6e, 0x12, 0x37, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, - 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, - 0x63, 0x74, 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x18, - 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, - 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x36, 0x0a, 0x0a, 0x6f, 0x6c, 0x64, 0x5f, - 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, - 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x09, 0x6f, 0x6c, 0x64, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x73, - 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0xad, 0x0b, 0x0a, 0x10, 0x43, 0x6f, 0x6e, - 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, - 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x63, 0x6b, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x12, 0x3f, 0x0a, - 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, - 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, - 0x75, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x16, - 0x0a, 0x06, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, - 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x72, 0x61, 0x6c, 0x6c, - 0x65, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x72, 0x61, 0x6c, 0x6c, - 0x65, 0x6c, 0x12, 0x28, 0x0a, 0x0f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x45, 0x6e, 0x64, - 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6d, 0x6f, 0x6e, - 0x69, 0x74, 0x6f, 0x72, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, - 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x09, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x2f, 0x0a, 0x06, - 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, + 0x52, 0x03, 0x75, 0x72, 0x6e, 0x12, 0x2b, 0x0a, 0x04, 0x6f, 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x04, 0x6f, 0x6c, + 0x64, 0x73, 0x12, 0x2b, 0x0a, 0x04, 0x6e, 0x65, 0x77, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x04, 0x6e, 0x65, 0x77, 0x73, 0x12, + 0x18, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x24, 0x0a, 0x0d, 0x69, 0x67, 0x6e, + 0x6f, 0x72, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x0d, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, + 0x18, 0x0a, 0x07, 0x70, 0x72, 0x65, 0x76, 0x69, 0x65, 0x77, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x07, 0x70, 0x72, 0x65, 0x76, 0x69, 0x65, 0x77, 0x12, 0x36, 0x0a, 0x0a, 0x6f, 0x6c, 0x64, + 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x09, 0x6f, 0x6c, 0x64, 0x49, 0x6e, 0x70, 0x75, 0x74, + 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0x49, 0x0a, 0x0e, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0a, 0x70, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, + 0x74, 0x69, 0x65, 0x73, 0x22, 0xe4, 0x01, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6e, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6e, 0x12, 0x37, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x70, + 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, - 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x12, 0x60, 0x0a, - 0x11, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, - 0x65, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, - 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x44, 0x65, 0x70, 0x65, 0x6e, - 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x69, 0x6e, - 0x70, 0x75, 0x74, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x12, - 0x48, 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x18, 0x0d, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, - 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, - 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, - 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x64, 0x65, 0x70, - 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x0c, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x12, 0x2a, 0x0a, - 0x10, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, - 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, - 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x6f, 0x72, 0x67, - 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, - 0x07, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, - 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x6c, 0x69, 0x61, 0x73, - 0x65, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, - 0x73, 0x12, 0x38, 0x0a, 0x17, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x53, - 0x65, 0x63, 0x72, 0x65, 0x74, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x18, 0x12, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x17, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x53, 0x65, - 0x63, 0x72, 0x65, 0x74, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x12, 0x52, 0x0a, 0x0e, 0x63, - 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x73, 0x18, 0x13, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, + 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, + 0x73, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x01, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x36, 0x0a, 0x0a, 0x6f, + 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x09, 0x6f, 0x6c, 0x64, 0x49, 0x6e, 0x70, + 0x75, 0x74, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0xad, 0x0b, 0x0a, 0x10, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x73, 0x52, - 0x0e, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x73, 0x12, - 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x57, 0x69, 0x74, 0x68, 0x18, 0x14, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x57, 0x69, 0x74, - 0x68, 0x12, 0x30, 0x0a, 0x13, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, - 0x65, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, - 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x70, 0x6c, - 0x61, 0x63, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x43, 0x68, 0x61, - 0x6e, 0x67, 0x65, 0x73, 0x18, 0x16, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x69, 0x67, 0x6e, 0x6f, - 0x72, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x2a, 0x0a, 0x10, 0x72, 0x65, 0x70, - 0x6c, 0x61, 0x63, 0x65, 0x4f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x17, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x10, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x4f, 0x6e, 0x43, 0x68, - 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x74, 0x61, 0x69, 0x6e, 0x4f, - 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x18, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x72, - 0x65, 0x74, 0x61, 0x69, 0x6e, 0x4f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x32, 0x0a, - 0x15, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x73, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x61, 0x63, - 0x63, 0x65, 0x70, 0x74, 0x73, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x73, 0x1a, 0x2a, 0x0a, 0x14, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x44, 0x65, 0x70, - 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x72, 0x6e, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x75, 0x72, 0x6e, 0x73, 0x1a, 0x58, 0x0a, - 0x0e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x73, 0x12, - 0x16, 0x0a, 0x06, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x75, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, - 0x16, 0x0a, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x1a, 0x39, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x1a, 0x76, 0x0a, 0x16, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x44, 0x65, 0x70, 0x65, 0x6e, - 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x46, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, - 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, - 0x75, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, - 0x72, 0x74, 0x79, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3c, 0x0a, 0x0e, 0x50, 0x72, - 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xdc, 0x02, 0x0a, 0x11, 0x43, 0x6f, 0x6e, - 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, - 0x0a, 0x03, 0x75, 0x72, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6e, - 0x12, 0x2d, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, + 0x61, 0x63, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x63, 0x6b, + 0x12, 0x3f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x27, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x6e, + 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x06, 0x64, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x72, + 0x61, 0x6c, 0x6c, 0x65, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x72, + 0x61, 0x6c, 0x6c, 0x65, 0x6c, 0x12, 0x28, 0x0a, 0x0f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, + 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, + 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, + 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, + 0x2f, 0x0a, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, - 0x61, 0x0a, 0x11, 0x73, 0x74, 0x61, 0x74, 0x65, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, - 0x63, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x70, 0x75, 0x6c, - 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x44, 0x65, + 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, + 0x12, 0x60, 0x0a, 0x11, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, + 0x6e, 0x63, 0x69, 0x65, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x70, 0x75, + 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x63, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x11, 0x73, 0x74, 0x61, 0x74, 0x65, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, - 0x65, 0x73, 0x1a, 0x2a, 0x0a, 0x14, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x44, 0x65, - 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x72, - 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x75, 0x72, 0x6e, 0x73, 0x1a, 0x77, - 0x0a, 0x16, 0x53, 0x74, 0x61, 0x74, 0x65, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, - 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x47, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x70, 0x75, 0x6c, 0x75, - 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, - 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xad, 0x01, 0x0a, 0x17, 0x45, 0x72, 0x72, 0x6f, - 0x72, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x69, 0x74, 0x46, 0x61, 0x69, - 0x6c, 0x65, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x02, 0x69, 0x64, 0x12, 0x37, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, - 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, - 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, - 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, - 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x73, 0x12, 0x2f, 0x0a, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, - 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x22, 0x41, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x4d, 0x61, - 0x70, 0x70, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x1a, - 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x22, 0x44, 0x0a, 0x12, 0x47, 0x65, - 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, - 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, - 0x22, 0x26, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x33, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x4d, - 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x1c, 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x32, 0xb3, 0x0b, - 0x0a, 0x10, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, - 0x65, 0x72, 0x12, 0x58, 0x0a, 0x09, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x12, - 0x23, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x72, 0x6f, 0x76, - 0x69, 0x64, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, - 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, - 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, - 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x69, 0x7a, 0x65, 0x12, 0x1e, 0x2e, 0x70, - 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, - 0x65, 0x72, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x70, - 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, - 0x65, 0x72, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0x48, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1b, 0x2e, 0x70, + 0x11, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, + 0x65, 0x73, 0x12, 0x48, 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x18, + 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, + 0x63, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x12, 0x22, 0x0a, 0x0c, + 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x18, 0x0f, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, + 0x12, 0x2a, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x4b, 0x65, 0x79, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x22, 0x0a, 0x0c, + 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x6c, + 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x61, 0x6c, 0x69, + 0x61, 0x73, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x17, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x18, + 0x12, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, + 0x6c, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x12, 0x52, + 0x0a, 0x0e, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x73, + 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, + 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x73, 0x52, 0x0e, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x57, 0x69, 0x74, + 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, + 0x57, 0x69, 0x74, 0x68, 0x12, 0x30, 0x0a, 0x13, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x65, + 0x66, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x13, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x52, + 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, + 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x16, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x69, + 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x2a, 0x0a, 0x10, + 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x4f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, + 0x18, 0x17, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x4f, + 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x74, 0x61, + 0x69, 0x6e, 0x4f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x18, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0e, 0x72, 0x65, 0x74, 0x61, 0x69, 0x6e, 0x4f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x12, 0x32, 0x0a, 0x15, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x73, 0x5f, 0x6f, 0x75, 0x74, 0x70, + 0x75, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x13, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x73, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x73, 0x1a, 0x2a, 0x0a, 0x14, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, + 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x12, 0x12, 0x0a, 0x04, + 0x75, 0x72, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x75, 0x72, 0x6e, 0x73, + 0x1a, 0x58, 0x0a, 0x0e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x1a, 0x39, 0x0a, 0x0b, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x76, 0x0a, 0x16, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x44, 0x65, + 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x46, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x30, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x6e, + 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x50, 0x72, + 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, + 0x65, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3c, 0x0a, + 0x0e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xdc, 0x02, 0x0a, 0x11, + 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x75, 0x72, 0x6e, 0x12, 0x2d, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x05, 0x73, 0x74, 0x61, + 0x74, 0x65, 0x12, 0x61, 0x0a, 0x11, 0x73, 0x74, 0x61, 0x74, 0x65, 0x44, 0x65, 0x70, 0x65, 0x6e, + 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, + 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, + 0x75, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x11, 0x73, 0x74, 0x61, 0x74, 0x65, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, + 0x6e, 0x63, 0x69, 0x65, 0x73, 0x1a, 0x2a, 0x0a, 0x14, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, + 0x79, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x12, 0x12, 0x0a, + 0x04, 0x75, 0x72, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x75, 0x72, 0x6e, + 0x73, 0x1a, 0x77, 0x0a, 0x16, 0x53, 0x74, 0x61, 0x74, 0x65, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, + 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x47, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x70, + 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x75, + 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x79, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xad, 0x01, 0x0a, 0x17, 0x45, + 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x69, 0x74, + 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x37, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, + 0x74, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, + 0x75, 0x63, 0x74, 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, + 0x18, 0x0a, 0x07, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x07, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x73, 0x12, 0x2f, 0x0a, 0x06, 0x69, 0x6e, 0x70, + 0x75, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, + 0x63, 0x74, 0x52, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x22, 0x41, 0x0a, 0x11, 0x47, 0x65, + 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x22, 0x44, 0x0a, + 0x12, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, + 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, + 0x61, 0x74, 0x61, 0x22, 0x26, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, + 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x33, 0x0a, 0x13, 0x47, + 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, + 0x32, 0xb3, 0x0b, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, + 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x58, 0x0a, 0x09, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, + 0x6b, 0x65, 0x12, 0x23, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x50, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, + 0x72, 0x70, 0x63, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, + 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x51, 0x0a, 0x0c, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x69, 0x7a, 0x65, 0x12, + 0x1e, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x1f, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x48, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, + 0x1b, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x70, 0x75, 0x6c, 0x75, - 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x42, 0x0a, 0x0b, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x17, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, - 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x18, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x68, - 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3f, 0x0a, - 0x0a, 0x44, 0x69, 0x66, 0x66, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x16, 0x2e, 0x70, 0x75, - 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, - 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x48, - 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x12, 0x1b, 0x2e, 0x70, 0x75, + 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x42, 0x0a, 0x0b, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x17, 0x2e, 0x70, 0x75, + 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, + 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x3f, 0x0a, 0x0a, 0x44, 0x69, 0x66, 0x66, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x16, + 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x44, 0x69, 0x66, 0x66, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, + 0x70, 0x63, 0x2e, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x48, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x12, 0x1b, + 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, - 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3f, 0x0a, 0x06, 0x49, 0x6e, 0x76, 0x6f, - 0x6b, 0x65, 0x12, 0x18, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x49, - 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x70, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3f, 0x0a, 0x06, 0x49, + 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x12, 0x18, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, + 0x63, 0x2e, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x19, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x49, 0x6e, 0x76, 0x6f, + 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x47, 0x0a, 0x0c, + 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x12, 0x18, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x47, 0x0a, 0x0c, 0x53, 0x74, 0x72, - 0x65, 0x61, 0x6d, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x12, 0x18, 0x2e, 0x70, 0x75, 0x6c, 0x75, - 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, - 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x30, 0x01, 0x12, 0x39, 0x0a, 0x04, 0x43, 0x61, 0x6c, 0x6c, 0x12, 0x16, 0x2e, 0x70, 0x75, 0x6c, - 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, - 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3c, 0x0a, - 0x05, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x17, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, - 0x70, 0x63, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x18, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x68, 0x65, 0x63, - 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x39, 0x0a, 0x04, 0x44, - 0x69, 0x66, 0x66, 0x12, 0x16, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, - 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x70, 0x75, - 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3f, 0x0a, 0x06, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x12, 0x18, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x70, 0x75, 0x6c, - 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x39, 0x0a, 0x04, 0x52, 0x65, 0x61, 0x64, 0x12, - 0x16, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x61, 0x64, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, - 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x3f, 0x0a, 0x06, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x18, 0x2e, 0x70, - 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, - 0x70, 0x63, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x3c, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x18, 0x2e, - 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, - 0x00, 0x12, 0x48, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12, 0x1b, + 0x70, 0x63, 0x2e, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x39, 0x0a, 0x04, 0x43, 0x61, 0x6c, 0x6c, 0x12, 0x16, 0x2e, + 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, + 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x3c, 0x0a, 0x05, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x17, 0x2e, 0x70, 0x75, 0x6c, 0x75, + 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x39, + 0x0a, 0x04, 0x44, 0x69, 0x66, 0x66, 0x12, 0x16, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, + 0x70, 0x63, 0x2e, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, + 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x44, 0x69, 0x66, 0x66, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3f, 0x0a, 0x06, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x12, 0x18, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, + 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x39, 0x0a, 0x04, 0x52, 0x65, + 0x61, 0x64, 0x12, 0x16, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x52, + 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x70, 0x75, 0x6c, + 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3f, 0x0a, 0x06, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, + 0x18, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x70, 0x75, 0x6c, 0x75, + 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3c, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x12, 0x18, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x63, + 0x74, 0x12, 0x1b, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, + 0x6e, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, - 0x72, 0x75, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x70, 0x75, - 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x63, - 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3a, 0x0a, 0x06, 0x43, - 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x16, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x40, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x50, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x72, 0x75, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3a, + 0x0a, 0x06, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, - 0x1a, 0x15, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x00, 0x12, 0x3b, 0x0a, 0x06, 0x41, 0x74, 0x74, - 0x61, 0x63, 0x68, 0x12, 0x17, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, - 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x41, 0x74, 0x74, 0x61, 0x63, 0x68, 0x1a, 0x16, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, - 0x70, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, - 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x47, - 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, - 0x67, 0x73, 0x12, 0x1d, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x47, - 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x1e, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x47, 0x65, - 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x42, 0x34, 0x5a, 0x32, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x2f, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x2f, - 0x73, 0x64, 0x6b, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x3b, - 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x40, 0x0a, 0x0d, 0x47, 0x65, + 0x74, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x1a, 0x15, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, + 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x00, 0x12, 0x3b, 0x0a, 0x06, + 0x41, 0x74, 0x74, 0x61, 0x63, 0x68, 0x12, 0x17, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, + 0x70, 0x63, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x41, 0x74, 0x74, 0x61, 0x63, 0x68, 0x1a, + 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x47, 0x65, 0x74, + 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, + 0x72, 0x70, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, + 0x63, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, + 0x70, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x1d, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, + 0x63, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, + 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x34, 0x5a, 0x32, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x2f, 0x70, 0x75, 0x6c, 0x75, + 0x6d, 0x69, 0x2f, 0x73, 0x64, 0x6b, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x67, 0x6f, 0x3b, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -4825,6 +4945,7 @@ func file_pulumi_provider_proto_init() { (*ParameterizeRequest_Args)(nil), (*ParameterizeRequest_Value)(nil), } + file_pulumi_provider_proto_msgTypes[6].OneofWrappers = []interface{}{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/proto/go/provider_grpc.pb.go b/vendor/github.com/pulumi/pulumi/sdk/v3/proto/go/provider_grpc.pb.go index b5d2fce6b..088092c51 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/proto/go/provider_grpc.pb.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/proto/go/provider_grpc.pb.go @@ -25,9 +25,9 @@ const _ = grpc.SupportPackageIsVersion7 type ResourceProviderClient interface { // `Handshake` is the first call made by the engine to a provider. It is used to pass the engine's address to the // provider so that it may establish its own connections back, and to establish protocol configuration that will be - // used to communicate between the two parties. Providers that support `Handshake` implicitly support the set of - // feature flags previously handled by `Configure` prior to `Handshake`'s introduction, such as secrets and resource - // references. + // used to communicate between the two parties. Providers that support `Handshake` should return responses + // consistent with those returned in response to [](pulumirpc.ResourceProvider.Configure) calls where there is + // overlap due to the use of `Configure` prior to `Handshake`'s introduction. Handshake(ctx context.Context, in *ProviderHandshakeRequest, opts ...grpc.CallOption) (*ProviderHandshakeResponse, error) // `Parameterize` is the primary means of supporting [parameterized providers](parameterized-providers), which allow // a caller to change a provider's behavior ahead of its [configuration](pulumirpc.ResourceProvider.Configure) and @@ -187,12 +187,33 @@ type ResourceProviderClient interface { GetPluginInfo(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*PluginInfo, error) // Attach sends the engine address to an already running plugin. Attach(ctx context.Context, in *PluginAttach, opts ...grpc.CallOption) (*emptypb.Empty, error) - // GetMapping fetches the mapping for this resource provider, if any. A provider should return an empty - // response (not an error) if it doesn't have a mapping for the given key. + // `GetMapping` returns mappings designed to aid in [converting programs and state from other + // ecosystems](converters). It accepts a "conversion key", which effectively corresponds to a source language, such + // as `terraform`, and a *source provider name*, which is the name of the provider *in the source language*. Given + // these, it returns source-specific mapping data for the provider requested. As an example, the Pulumi AWS + // provider, which is bridged from the Terraform AWS provider and thus capable of mapping names between the two, + // might respond to a call with key `terraform` and source provider name `aws` with mapping data for transforming + // (among other things) Terraform AWS names such as `aws_s3_bucket` into Pulumi AWS types such as + // `aws:s3/bucket:Bucket`. If a provider only supports a single source provider, or has some sensible default, it + // may respond also to a call in which the source provider name is empty (`""`), which will be made when the engine + // does not have sufficient knowledge to work out which provider offers a specific mapping. + // + // In general, it is expected that providers implemented by bridging an equivalent provider from another ecosystem + // (such as bridged Terraform providers built atop the `pulumi-terraform-bridge`, for instance) implement + // `GetMapping` to support conversion from that ecosystem into Pulumi using the same logic that underpins the + // bridging itself. GetMapping(ctx context.Context, in *GetMappingRequest, opts ...grpc.CallOption) (*GetMappingResponse, error) - // GetMappings is an optional method that returns what mappings (if any) a provider supports. If a provider does not - // implement this method the engine falls back to the old behaviour of just calling GetMapping without a name. - // If this method is implemented than the engine will then call GetMapping only with the names returned from this method. + // `GetMappings` is an optional method designed to aid in [converting programs and state from other + // ecosystems](converters). `GetMappings` accepts a "conversion key". This corresponds to a source language, for + // which we want to retrieve mappings for names etc. from that source language into Pulumi. An example key might + // therefore be `terraform` in the event that we wish to map e.g. Terraform resource names to Pulumi resource types. + // Given a key, `GetMappings` returns a list of *source provider names* for which calls to `GetMapping` will return + // mappings. So, continuing the Terraform example, the Pulumi AWS provider, which is bridged from the Terraform AWS + // provider and thus capable of mapping names between the two, might return the list `["aws"]` in response to a call + // with key `terraform`. + // + // If a provider does not implement `GetMappings`, the engine will fall back to calling `GetMapping` blindly without + // a source provider name (that is, with the value `""`). GetMappings(ctx context.Context, in *GetMappingsRequest, opts ...grpc.CallOption) (*GetMappingsResponse, error) } @@ -422,9 +443,9 @@ func (c *resourceProviderClient) GetMappings(ctx context.Context, in *GetMapping type ResourceProviderServer interface { // `Handshake` is the first call made by the engine to a provider. It is used to pass the engine's address to the // provider so that it may establish its own connections back, and to establish protocol configuration that will be - // used to communicate between the two parties. Providers that support `Handshake` implicitly support the set of - // feature flags previously handled by `Configure` prior to `Handshake`'s introduction, such as secrets and resource - // references. + // used to communicate between the two parties. Providers that support `Handshake` should return responses + // consistent with those returned in response to [](pulumirpc.ResourceProvider.Configure) calls where there is + // overlap due to the use of `Configure` prior to `Handshake`'s introduction. Handshake(context.Context, *ProviderHandshakeRequest) (*ProviderHandshakeResponse, error) // `Parameterize` is the primary means of supporting [parameterized providers](parameterized-providers), which allow // a caller to change a provider's behavior ahead of its [configuration](pulumirpc.ResourceProvider.Configure) and @@ -584,12 +605,33 @@ type ResourceProviderServer interface { GetPluginInfo(context.Context, *emptypb.Empty) (*PluginInfo, error) // Attach sends the engine address to an already running plugin. Attach(context.Context, *PluginAttach) (*emptypb.Empty, error) - // GetMapping fetches the mapping for this resource provider, if any. A provider should return an empty - // response (not an error) if it doesn't have a mapping for the given key. + // `GetMapping` returns mappings designed to aid in [converting programs and state from other + // ecosystems](converters). It accepts a "conversion key", which effectively corresponds to a source language, such + // as `terraform`, and a *source provider name*, which is the name of the provider *in the source language*. Given + // these, it returns source-specific mapping data for the provider requested. As an example, the Pulumi AWS + // provider, which is bridged from the Terraform AWS provider and thus capable of mapping names between the two, + // might respond to a call with key `terraform` and source provider name `aws` with mapping data for transforming + // (among other things) Terraform AWS names such as `aws_s3_bucket` into Pulumi AWS types such as + // `aws:s3/bucket:Bucket`. If a provider only supports a single source provider, or has some sensible default, it + // may respond also to a call in which the source provider name is empty (`""`), which will be made when the engine + // does not have sufficient knowledge to work out which provider offers a specific mapping. + // + // In general, it is expected that providers implemented by bridging an equivalent provider from another ecosystem + // (such as bridged Terraform providers built atop the `pulumi-terraform-bridge`, for instance) implement + // `GetMapping` to support conversion from that ecosystem into Pulumi using the same logic that underpins the + // bridging itself. GetMapping(context.Context, *GetMappingRequest) (*GetMappingResponse, error) - // GetMappings is an optional method that returns what mappings (if any) a provider supports. If a provider does not - // implement this method the engine falls back to the old behaviour of just calling GetMapping without a name. - // If this method is implemented than the engine will then call GetMapping only with the names returned from this method. + // `GetMappings` is an optional method designed to aid in [converting programs and state from other + // ecosystems](converters). `GetMappings` accepts a "conversion key". This corresponds to a source language, for + // which we want to retrieve mappings for names etc. from that source language into Pulumi. An example key might + // therefore be `terraform` in the event that we wish to map e.g. Terraform resource names to Pulumi resource types. + // Given a key, `GetMappings` returns a list of *source provider names* for which calls to `GetMapping` will return + // mappings. So, continuing the Terraform example, the Pulumi AWS provider, which is bridged from the Terraform AWS + // provider and thus capable of mapping names between the two, might return the list `["aws"]` in response to a call + // with key `terraform`. + // + // If a provider does not implement `GetMappings`, the engine will fall back to calling `GetMapping` blindly without + // a source provider name (that is, with the value `""`). GetMappings(context.Context, *GetMappingsRequest) (*GetMappingsResponse, error) mustEmbedUnimplementedResourceProviderServer() } diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/python/toolchain/toolchain.go b/vendor/github.com/pulumi/pulumi/sdk/v3/python/toolchain/toolchain.go index d3d29fe39..e1e2df019 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/python/toolchain/toolchain.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/python/toolchain/toolchain.go @@ -26,6 +26,7 @@ import ( "strings" "github.com/pulumi/pulumi/sdk/v3/go/common/util/contract" + "github.com/pulumi/pulumi/sdk/v3/go/common/util/errutil" "github.com/pulumi/pulumi/sdk/v3/go/common/util/fsutil" "github.com/pulumi/pulumi/sdk/v3/go/common/util/logging" ) @@ -213,11 +214,7 @@ func installPython(ctx context.Context, cwd string, showOutput bool, infoWriter, } err = cmd.Run() if err != nil { - var exitErr *exec.ExitError - if errors.As(err, &exitErr) { - return fmt.Errorf("error while running pyenv install: %s", string(exitErr.Stderr)) - } - return fmt.Errorf("error while running pyenv install: %s", err) + return errutil.ErrorWithStderr(err, "error while running pyenv install") } return nil } @@ -235,15 +232,3 @@ func searchup(currentDir, fileToFind string) (string, error) { } return searchup(parentDir, fileToFind) } - -// errorWithStderr returns an error that includes the stderr output if the error is an ExitError. -func errorWithStderr(err error, message string) error { - var exitErr *exec.ExitError - if errors.As(err, &exitErr) { - stderr := strings.TrimSpace(string(exitErr.Stderr)) - if len(stderr) > 0 { - return fmt.Errorf("%s: %w: %s", message, exitErr, exitErr.Stderr) - } - } - return fmt.Errorf("%s: %w", message, err) -} diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/python/toolchain/uv.go b/vendor/github.com/pulumi/pulumi/sdk/v3/python/toolchain/uv.go index 667a7a29b..8cd10f9cf 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/python/toolchain/uv.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/python/toolchain/uv.go @@ -29,6 +29,7 @@ import ( "github.com/blang/semver" "github.com/pulumi/pulumi/sdk/v3/go/common/util/contract" + "github.com/pulumi/pulumi/sdk/v3/go/common/util/errutil" "github.com/pulumi/pulumi/sdk/v3/go/common/util/logging" ) @@ -142,14 +143,14 @@ func (u *uv) InstallDependencies(ctx context.Context, cwd string, useLanguageVer initCmd := u.uvCommand(ctx, pyprojectTomlDir, showOutput, infoWriter, errorWriter, "init", "--no-readme", "--no-package", "--no-pin-python") if err := initCmd.Run(); err != nil { - return errorWithStderr(err, "error initializing python project") + return errutil.ErrorWithStderr(err, "error initializing python project") } if hasRequirementsTxt { requirementsTxt := filepath.Join(requirementsTxtDir, "requirements.txt") addCmd := u.uvCommand(ctx, cwd, showOutput, infoWriter, errorWriter, "add", "-r", requirementsTxt) if err := addCmd.Run(); err != nil { - return errorWithStderr(err, "error installing dependecies from requirements.txt") + return errutil.ErrorWithStderr(err, "error installing dependecies from requirements.txt") } // Remove the requirements.txt file, after calling `uv add`, the // dependencies are tracked in pyproject.toml. @@ -173,7 +174,7 @@ func (u *uv) InstallDependencies(ctx context.Context, cwd string, useLanguageVer // install the dependencies. syncCmd := u.uvCommand(ctx, cwd, showOutput, infoWriter, errorWriter, "sync") if err := syncCmd.Run(); err != nil { - return errorWithStderr(err, "error installing dependencies") + return errutil.ErrorWithStderr(err, "error installing dependencies") } return nil } @@ -184,7 +185,7 @@ func (u *uv) EnsureVenv(ctx context.Context, cwd string, useLanguageVersionTools venvCmd := u.uvCommand(ctx, cwd, showOutput, infoWriter, errorWriter, "venv", "--quiet", "--allow-existing", u.virtualenvPath) if err := venvCmd.Run(); err != nil { - return errorWithStderr(err, "error creating virtual environment") + return errutil.ErrorWithStderr(err, "error creating virtual environment") } return nil @@ -229,13 +230,13 @@ func (u *uv) ListPackages(ctx context.Context, transitive bool) ([]PythonPackage cmd.Dir = u.root pipCmd = cmd } else { - return nil, errorWithStderr(err, "checking for pip") + return nil, errutil.ErrorWithStderr(err, "checking for pip") } } output, err := pipCmd.Output() if err != nil { - return nil, errorWithStderr(err, "listing packages") + return nil, errutil.ErrorWithStderr(err, "listing packages") } var packages []PythonPackage diff --git a/vendor/golang.org/x/crypto/ssh/messages.go b/vendor/golang.org/x/crypto/ssh/messages.go index b55f86056..118427bc0 100644 --- a/vendor/golang.org/x/crypto/ssh/messages.go +++ b/vendor/golang.org/x/crypto/ssh/messages.go @@ -818,6 +818,8 @@ func decode(packet []byte) (interface{}, error) { return new(userAuthSuccessMsg), nil case msgUserAuthFailure: msg = new(userAuthFailureMsg) + case msgUserAuthBanner: + msg = new(userAuthBannerMsg) case msgUserAuthPubKeyOk: msg = new(userAuthPubKeyOkMsg) case msgGlobalRequest: diff --git a/vendor/golang.org/x/crypto/ssh/tcpip.go b/vendor/golang.org/x/crypto/ssh/tcpip.go index ef5059a11..93d844f03 100644 --- a/vendor/golang.org/x/crypto/ssh/tcpip.go +++ b/vendor/golang.org/x/crypto/ssh/tcpip.go @@ -459,7 +459,7 @@ func (c *Client) dial(laddr string, lport int, raddr string, rport int) (Channel return nil, err } go DiscardRequests(in) - return ch, err + return ch, nil } type tcpChan struct { diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go index b8322598a..a4ea5d14f 100644 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -46,7 +46,7 @@ func (g *Group) done() { // returns a non-nil error or the first time Wait returns, whichever occurs // first. func WithContext(ctx context.Context) (*Group, context.Context) { - ctx, cancel := withCancelCause(ctx) + ctx, cancel := context.WithCancelCause(ctx) return &Group{cancel: cancel}, ctx } diff --git a/vendor/golang.org/x/sync/errgroup/go120.go b/vendor/golang.org/x/sync/errgroup/go120.go deleted file mode 100644 index f93c740b6..000000000 --- a/vendor/golang.org/x/sync/errgroup/go120.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.20 - -package errgroup - -import "context" - -func withCancelCause(parent context.Context) (context.Context, func(error)) { - return context.WithCancelCause(parent) -} diff --git a/vendor/golang.org/x/sync/errgroup/pre_go120.go b/vendor/golang.org/x/sync/errgroup/pre_go120.go deleted file mode 100644 index 88ce33434..000000000 --- a/vendor/golang.org/x/sync/errgroup/pre_go120.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.20 - -package errgroup - -import "context" - -func withCancelCause(parent context.Context) (context.Context, func(error)) { - ctx, cancel := context.WithCancel(parent) - return ctx, func(error) { cancel() } -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 01300fd39..ab9e5587c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,8 +1,8 @@ # dario.cat/mergo v1.0.1 ## explicit; go 1.13 dario.cat/mergo -# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 -## explicit; go 1.18 +# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.1 +## explicit; go 1.23.0 github.com/Azure/azure-sdk-for-go/sdk/azcore github.com/Azure/azure-sdk-for-go/sdk/azcore/arm github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource @@ -154,10 +154,10 @@ github.com/aws/aws-sdk-go-v2/internal/timeconv ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi -# github.com/aws/aws-sdk-go-v2/config v1.29.8 +# github.com/aws/aws-sdk-go-v2/config v1.29.9 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/config -# github.com/aws/aws-sdk-go-v2/credentials v1.17.61 +# github.com/aws/aws-sdk-go-v2/credentials v1.17.62 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/credentials github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds @@ -225,17 +225,17 @@ github.com/aws/aws-sdk-go-v2/service/s3/internal/arn github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints github.com/aws/aws-sdk-go-v2/service/s3/types -# github.com/aws/aws-sdk-go-v2/service/sso v1.25.0 +# github.com/aws/aws-sdk-go-v2/service/sso v1.25.1 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/service/sso github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints github.com/aws/aws-sdk-go-v2/service/sso/types -# github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.0 +# github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.1 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/service/ssooidc github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints github.com/aws/aws-sdk-go-v2/service/ssooidc/types -# github.com/aws/aws-sdk-go-v2/service/sts v1.33.16 +# github.com/aws/aws-sdk-go-v2/service/sts v1.33.17 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/service/sts github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints @@ -292,7 +292,7 @@ github.com/charmbracelet/bubbles/runeutil github.com/charmbracelet/bubbles/spinner github.com/charmbracelet/bubbles/textinput github.com/charmbracelet/bubbles/viewport -# github.com/charmbracelet/bubbletea v1.3.3 +# github.com/charmbracelet/bubbletea v1.3.4 ## explicit; go 1.18 github.com/charmbracelet/bubbletea # github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc @@ -595,8 +595,8 @@ github.com/pulumi/pulumi-aws/sdk/v6/go/aws/iam github.com/pulumi/pulumi-aws/sdk/v6/go/aws/internal github.com/pulumi/pulumi-aws/sdk/v6/go/aws/lb github.com/pulumi/pulumi-aws/sdk/v6/go/aws/s3 -# github.com/pulumi/pulumi-awsx/sdk/v2 v2.21.0 -## explicit; go 1.21.12 +# github.com/pulumi/pulumi-awsx/sdk/v2 v2.21.1 +## explicit; go 1.22 github.com/pulumi/pulumi-awsx/sdk/v2/go/awsx/awsx github.com/pulumi/pulumi-awsx/sdk/v2/go/awsx/ecs github.com/pulumi/pulumi-awsx/sdk/v2/go/awsx/internal @@ -644,7 +644,7 @@ github.com/pulumi/pulumi-random/sdk/v4/go/random/internal ## explicit; go 1.21 github.com/pulumi/pulumi-tls/sdk/v5/go/tls github.com/pulumi/pulumi-tls/sdk/v5/go/tls/internal -# github.com/pulumi/pulumi/sdk/v3 v3.152.0 +# github.com/pulumi/pulumi/sdk/v3 v3.154.0 ## explicit; go 1.22 github.com/pulumi/pulumi/sdk/v3 github.com/pulumi/pulumi/sdk/v3/go/auto @@ -661,6 +661,7 @@ github.com/pulumi/pulumi/sdk/v3/go/auto/optremotepreview github.com/pulumi/pulumi/sdk/v3/go/auto/optremoterefresh github.com/pulumi/pulumi/sdk/v3/go/auto/optremoteup github.com/pulumi/pulumi/sdk/v3/go/auto/optremove +github.com/pulumi/pulumi/sdk/v3/go/auto/optrename github.com/pulumi/pulumi/sdk/v3/go/auto/optup github.com/pulumi/pulumi/sdk/v3/go/common/apitype github.com/pulumi/pulumi/sdk/v3/go/common/constant @@ -684,6 +685,7 @@ github.com/pulumi/pulumi/sdk/v3/go/common/util/ciutil github.com/pulumi/pulumi/sdk/v3/go/common/util/cmdutil github.com/pulumi/pulumi/sdk/v3/go/common/util/contract github.com/pulumi/pulumi/sdk/v3/go/common/util/env +github.com/pulumi/pulumi/sdk/v3/go/common/util/errutil github.com/pulumi/pulumi/sdk/v3/go/common/util/fsutil github.com/pulumi/pulumi/sdk/v3/go/common/util/gitutil github.com/pulumi/pulumi/sdk/v3/go/common/util/httputil @@ -814,7 +816,7 @@ go.uber.org/atomic # go.uber.org/multierr v1.11.0 ## explicit; go 1.19 go.uber.org/multierr -# golang.org/x/crypto v0.35.0 +# golang.org/x/crypto v0.36.0 ## explicit; go 1.23.0 golang.org/x/crypto/argon2 golang.org/x/crypto/blake2b @@ -846,7 +848,7 @@ golang.org/x/exp/slog/internal/buffer # golang.org/x/mod v0.21.0 ## explicit; go 1.22.0 golang.org/x/mod/semver -# golang.org/x/net v0.36.0 +# golang.org/x/net v0.37.0 ## explicit; go 1.23.0 golang.org/x/net/context golang.org/x/net/http/httpguts @@ -858,22 +860,22 @@ golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries golang.org/x/net/proxy golang.org/x/net/trace -# golang.org/x/sync v0.11.0 -## explicit; go 1.18 +# golang.org/x/sync v0.12.0 +## explicit; go 1.23.0 golang.org/x/sync/errgroup -# golang.org/x/sys v0.30.0 -## explicit; go 1.18 +# golang.org/x/sys v0.31.0 +## explicit; go 1.23.0 golang.org/x/sys/cpu golang.org/x/sys/execabs golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/term v0.29.0 -## explicit; go 1.18 +# golang.org/x/term v0.30.0 +## explicit; go 1.23.0 golang.org/x/term -# golang.org/x/text v0.22.0 -## explicit; go 1.18 +# golang.org/x/text v0.23.0 +## explicit; go 1.23.0 golang.org/x/text/encoding golang.org/x/text/encoding/internal golang.org/x/text/encoding/internal/identifier @@ -903,7 +905,7 @@ golang.org/x/tools/internal/stdlib golang.org/x/tools/internal/tokeninternal golang.org/x/tools/internal/typesinternal golang.org/x/tools/internal/versions -# google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 ## explicit; go 1.21 google.golang.org/genproto/googleapis/rpc/status # google.golang.org/grpc v1.67.1 From 64ce18f548dad6eb519a94583102ed4acaef1ee5 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 21 Mar 2025 09:07:31 +0000 Subject: [PATCH 05/11] fix(deps): update all dependencies --- .github/workflows/build-go.yaml | 2 +- .github/workflows/build-oci.yaml | 4 + .github/workflows/build-on-hosted-runner.yaml | 2 +- .github/workflows/push-oci-pr.yml | 4 + go.mod | 104 +- go.sum | 244 +- oci/Containerfile | 14 +- tools/go.mod | 92 +- tools/go.sum | 606 +- .../checkcompilerdirectives.go | 11 +- .../Antonboom/nilnil/pkg/analyzer/analyzer.go | 72 +- .../testifylint/analyzer/checkers_factory.go | 1 + .../internal/checkers/bool_compare.go | 8 +- .../internal/checkers/checkers_registry.go | 4 +- .../testifylint/internal/checkers/empty.go | 69 +- .../internal/checkers/equal_values.go | 51 + .../internal/checkers/error_is_as.go | 34 +- .../internal/checkers/expected_actual.go | 7 + .../internal/checkers/formatter.go | 120 +- .../internal/checkers/helpers_basic_type.go | 32 +- .../internal/checkers/helpers_bool.go | 6 +- .../internal/checkers/helpers_diagnostic.go | 26 +- .../internal/checkers/helpers_http.go | 2 +- .../internal/checkers/helpers_len.go | 26 - .../testifylint/internal/checkers/len.go | 102 +- .../internal/checkers/negative_positive.go | 35 +- .../checkers/suite_method_signature.go | 71 + .../internal/checkers/suite_thelper.go | 2 +- .../internal/checkers/useless_assert.go | 94 +- .../testifylint/internal/config/config.go | 7 +- .../github.com/BurntSushi/toml/README.md | 2 +- .../github.com/BurntSushi/toml/decode.go | 23 + .../github.com/BurntSushi/toml/encode.go | 46 +- .../github.com/BurntSushi/toml/error.go | 2 +- .../vendor/github.com/BurntSushi/toml/lex.go | 27 +- .../vendor/github.com/BurntSushi/toml/meta.go | 3 - .../github.com/BurntSushi/toml/parse.go | 3 +- .../Djarvur/go-err113/comparison.go | 16 +- .../Djarvur/go-err113/definition.go | 2 +- .../go-exhaustruct/v3/analyzer/analyzer.go | 2 +- .../Masterminds/semver/v3/version.go | 24 +- .../github.com/bombsimon/wsl/v4/README.md | 14 +- .../github.com/bombsimon/wsl/v4/analyzer.go | 2 + .../vendor/github.com/bombsimon/wsl/v4/wsl.go | 72 +- .../perfsprint/analyzer/analyzer.go | 74 +- .../perfsprint/analyzer/diagnostic.go | 15 +- .../github.com/fsnotify/fsnotify/.cirrus.yml | 14 + .../fsnotify/fsnotify/.gitattributes | 1 - .../github.com/fsnotify/fsnotify/.gitignore | 14 +- .../github.com/fsnotify/fsnotify/AUTHORS | 62 - .../github.com/fsnotify/fsnotify/CHANGELOG.md | 220 +- .../fsnotify/fsnotify/CONTRIBUTING.md | 162 +- .../github.com/fsnotify/fsnotify/LICENSE | 47 +- .../github.com/fsnotify/fsnotify/README.md | 228 +- .../fsnotify/fsnotify/backend_fen.go | 484 ++ .../fsnotify/fsnotify/backend_inotify.go | 658 ++ .../fsnotify/fsnotify/backend_kqueue.go | 733 ++ .../fsnotify/fsnotify/backend_other.go | 23 + .../fsnotify/fsnotify/backend_windows.go | 682 ++ .../github.com/fsnotify/fsnotify/fen.go | 38 - .../github.com/fsnotify/fsnotify/fsnotify.go | 495 +- .../fsnotify/fsnotify/fsnotify_unsupported.go | 36 - .../github.com/fsnotify/fsnotify/inotify.go | 351 - .../fsnotify/fsnotify/inotify_poller.go | 187 - .../fsnotify/fsnotify/internal/darwin.go | 39 + .../fsnotify/internal/debug_darwin.go | 57 + .../fsnotify/internal/debug_dragonfly.go | 33 + .../fsnotify/internal/debug_freebsd.go | 42 + .../fsnotify/internal/debug_kqueue.go | 32 + .../fsnotify/fsnotify/internal/debug_linux.go | 56 + .../fsnotify/internal/debug_netbsd.go | 25 + .../fsnotify/internal/debug_openbsd.go | 28 + .../fsnotify/internal/debug_solaris.go | 45 + .../fsnotify/internal/debug_windows.go | 40 + .../fsnotify/fsnotify/internal/freebsd.go | 31 + .../fsnotify/fsnotify/internal/internal.go | 2 + .../fsnotify/fsnotify/internal/unix.go | 31 + .../fsnotify/fsnotify/internal/unix2.go | 7 + .../fsnotify/fsnotify/internal/windows.go | 41 + .../github.com/fsnotify/fsnotify/kqueue.go | 535 -- .../fsnotify/fsnotify/open_mode_bsd.go | 12 - .../fsnotify/fsnotify/open_mode_darwin.go | 13 - .../fsnotify/fsnotify/system_bsd.go | 7 + .../fsnotify/fsnotify/system_darwin.go | 8 + .../github.com/fsnotify/fsnotify/windows.go | 586 -- .../checkers/commentedOutCode_checker.go | 30 +- .../go-critic/checkers/rulesdata/rulesdata.go | 641 +- .../go-critic/checkers/sqlQuery_checker.go | 7 + .../go-critic/go-critic/checkers/utils.go | 339 +- .../vendor/github.com/golang/protobuf/AUTHORS | 3 - .../github.com/golang/protobuf/CONTRIBUTORS | 3 - .../golang/protobuf/proto/buffer.go | 324 - .../golang/protobuf/proto/defaults.go | 63 - .../golang/protobuf/proto/deprecated.go | 113 - .../golang/protobuf/proto/discard.go | 58 - .../golang/protobuf/proto/extensions.go | 356 - .../golang/protobuf/proto/properties.go | 306 - .../github.com/golang/protobuf/proto/proto.go | 167 - .../golang/protobuf/proto/registry.go | 317 - .../golang/protobuf/proto/text_decode.go | 801 -- .../golang/protobuf/proto/text_encode.go | 560 -- .../github.com/golang/protobuf/proto/wire.go | 78 - .../golang/protobuf/proto/wrappers.go | 34 - .../github.com/golang/protobuf/ptypes/any.go | 180 - .../golang/protobuf/ptypes/any/any.pb.go | 62 - .../github.com/golang/protobuf/ptypes/doc.go | 10 - .../golang/protobuf/ptypes/duration.go | 76 - .../protobuf/ptypes/duration/duration.pb.go | 63 - .../golang/protobuf/ptypes/timestamp.go | 112 - .../protobuf/ptypes/timestamp/timestamp.pb.go | 64 - .../pkg/commands/config_verify.go | 57 +- .../pkg/lint/lintersdb/builder_linter.go | 2 +- .../go-cmp/cmp/internal/function/func.go | 7 + .../github.com/google/go-cmp/cmp/options.go | 10 +- .../gostaticanalysis/comment/.tagpr | 35 + .../gostaticanalysis/comment/CHANGELOG.md | 34 + .../gostaticanalysis/comment/comment.go | 23 +- .../gostaticanalysis/comment/version.txt | 1 + .../github.com/hashicorp/hcl/.gitignore | 9 - .../github.com/hashicorp/hcl/.travis.yml | 13 - tools/vendor/github.com/hashicorp/hcl/LICENSE | 354 - .../vendor/github.com/hashicorp/hcl/Makefile | 18 - .../vendor/github.com/hashicorp/hcl/README.md | 125 - .../github.com/hashicorp/hcl/appveyor.yml | 19 - .../github.com/hashicorp/hcl/decoder.go | 729 -- tools/vendor/github.com/hashicorp/hcl/hcl.go | 11 - .../github.com/hashicorp/hcl/hcl/ast/ast.go | 219 - .../github.com/hashicorp/hcl/hcl/ast/walk.go | 52 - .../hashicorp/hcl/hcl/parser/error.go | 17 - .../hashicorp/hcl/hcl/parser/parser.go | 532 -- .../hashicorp/hcl/hcl/printer/nodes.go | 789 -- .../hashicorp/hcl/hcl/printer/printer.go | 66 - .../hashicorp/hcl/hcl/scanner/scanner.go | 652 -- .../hashicorp/hcl/hcl/strconv/quote.go | 241 - .../hashicorp/hcl/hcl/token/position.go | 46 - .../hashicorp/hcl/hcl/token/token.go | 219 - .../hashicorp/hcl/json/parser/flatten.go | 117 - .../hashicorp/hcl/json/parser/parser.go | 313 - .../hashicorp/hcl/json/scanner/scanner.go | 451 - .../hashicorp/hcl/json/token/position.go | 46 - .../hashicorp/hcl/json/token/token.go | 118 - tools/vendor/github.com/hashicorp/hcl/lex.go | 38 - .../vendor/github.com/hashicorp/hcl/parse.go | 39 - .../kisielk/errcheck/errcheck/errcheck.go | 6 +- .../kisielk/errcheck/errcheck/tags.go | 12 - .../kisielk/errcheck/errcheck/tags_compat.go | 13 - .../kkHAIKE/contextcheck/contextcheck.go | 8 + .../github.com/ldez/exptostd/exptostd.go | 11 +- .../macabu/inamedparam/.golangci.yml | 16 +- .../github.com/macabu/inamedparam/README.md | 10 +- .../macabu/inamedparam/inamedparam.go | 7 +- .../magiconair/properties/.gitignore | 6 - .../magiconair/properties/CHANGELOG.md | 205 - .../magiconair/properties/LICENSE.md | 24 - .../magiconair/properties/README.md | 128 - .../magiconair/properties/decode.go | 289 - .../github.com/magiconair/properties/doc.go | 155 - .../magiconair/properties/integrate.go | 35 - .../github.com/magiconair/properties/lex.go | 395 - .../github.com/magiconair/properties/load.go | 293 - .../magiconair/properties/parser.go | 86 - .../magiconair/properties/properties.go | 848 -- .../magiconair/properties/rangecheck.go | 31 - .../golang_protobuf_extensions/LICENSE | 201 - .../golang_protobuf_extensions/NOTICE | 1 - .../pbutil/.gitignore | 1 - .../pbutil/Makefile | 7 - .../pbutil/decode.go | 75 - .../pbutil/encode.go | 46 - .../github.com/mgechev/revive/lint/package.go | 6 + .../mgechev/revive/rule/struct_tag.go | 19 +- .../mitchellh/mapstructure/README.md | 46 - .../mitchellh/mapstructure/decode_hooks.go | 279 - .../mitchellh/mapstructure/error.go | 50 - .../protobuf => munnerz/goautoneg}/LICENSE | 27 +- .../github.com/munnerz/goautoneg/Makefile | 13 + .../ww => munnerz}/goautoneg/README.txt | 0 .../ww => munnerz}/goautoneg/autoneg.go | 109 +- .../internal/expression/actual/actualarg.go | 3 +- .../internal/ginkgoinfo/ginkgoinfo.go | 26 + .../internal/gomegainfo/gomegainfo.go | 34 +- .../pelletier/go-toml/.dockerignore | 2 - .../github.com/pelletier/go-toml/.gitignore | 5 - .../pelletier/go-toml/CONTRIBUTING.md | 132 - .../github.com/pelletier/go-toml/Dockerfile | 11 - .../github.com/pelletier/go-toml/LICENSE | 247 - .../github.com/pelletier/go-toml/Makefile | 29 - .../go-toml/PULL_REQUEST_TEMPLATE.md | 5 - .../github.com/pelletier/go-toml/README.md | 176 - .../github.com/pelletier/go-toml/SECURITY.md | 19 - .../pelletier/go-toml/azure-pipelines.yml | 188 - .../github.com/pelletier/go-toml/benchmark.sh | 35 - .../github.com/pelletier/go-toml/doc.go | 23 - .../pelletier/go-toml/example-crlf.toml | 30 - .../github.com/pelletier/go-toml/example.toml | 30 - .../github.com/pelletier/go-toml/fuzz.go | 31 - .../github.com/pelletier/go-toml/fuzz.sh | 15 - .../pelletier/go-toml/keysparsing.go | 112 - .../github.com/pelletier/go-toml/lexer.go | 1031 --- .../github.com/pelletier/go-toml/localtime.go | 287 - .../github.com/pelletier/go-toml/marshal.go | 1308 --- .../go-toml/marshal_OrderPreserve_test.toml | 39 - .../pelletier/go-toml/marshal_test.toml | 39 - .../github.com/pelletier/go-toml/parser.go | 507 -- .../github.com/pelletier/go-toml/position.go | 29 - .../github.com/pelletier/go-toml/token.go | 136 - .../github.com/pelletier/go-toml/toml.go | 533 -- .../github.com/pelletier/go-toml/tomlpub.go | 71 - .../pelletier/go-toml/tomltree_create.go | 155 - .../pelletier/go-toml/tomltree_write.go | 552 -- .../pelletier/go-toml/tomltree_writepub.go | 6 - .../prometheus/client_golang/NOTICE | 5 - .../client_golang/prometheus/collector.go | 6 +- .../client_golang/prometheus/counter.go | 61 +- .../client_golang/prometheus/desc.go | 60 +- .../client_golang/prometheus/doc.go | 107 +- .../prometheus/expvar_collector.go | 2 +- .../client_golang/prometheus/gauge.go | 36 +- .../client_golang/prometheus/get_pid.go | 26 + .../prometheus/get_pid_gopherjs.go | 23 + .../client_golang/prometheus/go_collector.go | 73 +- .../prometheus/go_collector_go116.go | 17 +- ...lector_go117.go => go_collector_latest.go} | 326 +- .../client_golang/prometheus/histogram.go | 1606 +++- .../prometheus/internal/almost_equal.go | 60 + .../prometheus/internal/difflib.go | 655 ++ .../internal/go_collector_options.go | 34 + .../prometheus/internal/go_runtime_metrics.go | 21 +- .../prometheus/internal/metric.go | 28 +- .../client_golang/prometheus/labels.go | 109 +- .../client_golang/prometheus/metric.go | 139 +- .../client_golang/prometheus/num_threads.go | 25 + .../prometheus/num_threads_gopherjs.go | 22 + .../client_golang/prometheus/observer.go | 2 +- .../prometheus/process_collector.go | 66 +- .../prometheus/process_collector_darwin.go | 130 + .../process_collector_mem_cgo_darwin.c | 84 + .../process_collector_mem_cgo_darwin.go | 51 + .../process_collector_mem_nocgo_darwin.go | 39 + .../process_collector_not_supported.go | 33 + ....go => process_collector_procfsenabled.go} | 34 +- .../prometheus/process_collector_windows.go | 21 +- .../client_golang/prometheus/registry.go | 188 +- .../client_golang/prometheus/summary.go | 130 +- .../prometheus/testutil/promlint/problem.go | 33 + .../prometheus/testutil/promlint/promlint.go | 315 +- .../testutil/promlint/validation.go | 34 + .../validations/counter_validations.go | 40 + .../validations/duplicate_validations.go | 37 + .../validations/generic_name_validations.go | 101 + .../promlint/validations/help_validations.go | 32 + .../validations/histogram_validations.go | 63 + .../testutil/promlint/validations/units.go | 118 + .../client_golang/prometheus/timer.go | 39 +- .../client_golang/prometheus/value.go | 104 +- .../client_golang/prometheus/vec.go | 183 +- .../client_golang/prometheus/vnext.go | 23 + .../client_golang/prometheus/wrap.go | 8 +- .../prometheus/client_model/go/metrics.pb.go | 1602 +++- .../prometheus/common/expfmt/decode.go | 56 +- .../prometheus/common/expfmt/encode.go | 90 +- .../prometheus/common/expfmt/expfmt.go | 194 +- .../prometheus/common/expfmt/fuzz.go | 5 +- .../common/expfmt/openmetrics_create.go | 301 +- .../prometheus/common/expfmt/text_create.go | 121 +- .../prometheus/common/expfmt/text_parse.go | 178 +- .../prometheus/common/model/alert.go | 40 +- .../prometheus/common/model/labels.go | 27 +- .../prometheus/common/model/labelset.go | 11 - .../common/model/labelset_string.go | 43 + .../prometheus/common/model/metadata.go | 28 + .../prometheus/common/model/metric.go | 377 +- .../prometheus/common/model/signature.go | 6 +- .../prometheus/common/model/silence.go | 19 +- .../prometheus/common/model/time.go | 91 +- .../prometheus/common/model/value.go | 262 +- .../prometheus/common/model/value_float.go | 99 + .../common/model/value_histogram.go | 179 + .../prometheus/common/model/value_type.go | 83 + .../github.com/prometheus/procfs/.gitignore | 3 +- .../prometheus/procfs/.golangci.yml | 29 +- .../prometheus/procfs/CODE_OF_CONDUCT.md | 4 +- .../prometheus/procfs/CONTRIBUTING.md | 4 +- .../prometheus/procfs/MAINTAINERS.md | 3 +- .../github.com/prometheus/procfs/Makefile | 10 +- .../prometheus/procfs/Makefile.common | 130 +- .../github.com/prometheus/procfs/README.md | 10 +- .../github.com/prometheus/procfs/SECURITY.md | 2 +- .../github.com/prometheus/procfs/arp.go | 51 +- .../github.com/prometheus/procfs/buddyinfo.go | 10 +- .../github.com/prometheus/procfs/cpuinfo.go | 58 +- .../prometheus/procfs/cpuinfo_armx.go | 1 + .../procfs/cpuinfo_loong64.go} | 13 +- .../prometheus/procfs/cpuinfo_mipsx.go | 1 + .../prometheus/procfs/cpuinfo_others.go | 4 +- .../prometheus/procfs/cpuinfo_ppcx.go | 1 + .../prometheus/procfs/cpuinfo_riscvx.go | 1 + .../prometheus/procfs/cpuinfo_s390x.go | 1 + .../prometheus/procfs/cpuinfo_x86.go | 1 + .../github.com/prometheus/procfs/crypto.go | 7 +- .../github.com/prometheus/procfs/doc.go | 51 +- .../prometheus/procfs/fixtures.ttar | 7673 ----------------- .../vendor/github.com/prometheus/procfs/fs.go | 21 +- .../prometheus/procfs/fs_statfs_notype.go | 23 + .../prometheus/procfs/fs_statfs_type.go | 33 + .../github.com/prometheus/procfs/fscache.go | 12 +- .../prometheus/procfs/internal/fs/fs.go | 5 +- .../prometheus/procfs/internal/util/parse.go | 35 +- .../procfs/internal/util/readfile.go | 11 +- .../procfs/internal/util/sysreadfile.go | 8 +- .../internal/util/sysreadfile_compat.go | 3 +- .../github.com/prometheus/procfs/ipvs.go | 10 +- .../prometheus/procfs/kernel_random.go | 1 + .../github.com/prometheus/procfs/loadavg.go | 6 +- .../github.com/prometheus/procfs/mdstat.go | 70 +- .../github.com/prometheus/procfs/meminfo.go | 220 +- .../github.com/prometheus/procfs/mountinfo.go | 10 +- .../prometheus/procfs/mountstats.go | 119 +- .../prometheus/procfs/net_conntrackstat.go | 101 +- .../github.com/prometheus/procfs/net_dev.go | 8 +- .../prometheus/procfs/net_dev_snmp6.go | 96 + .../prometheus/procfs/net_ip_socket.go | 66 +- .../prometheus/procfs/net_protocols.go | 8 +- .../github.com/prometheus/procfs/net_route.go | 143 + .../prometheus/procfs/net_sockstat.go | 9 +- .../prometheus/procfs/net_softnet.go | 87 +- .../github.com/prometheus/procfs/net_tcp.go | 4 + .../prometheus/procfs/net_tls_stat.go | 119 + .../github.com/prometheus/procfs/net_unix.go | 22 +- .../prometheus/procfs/net_wireless.go | 182 + .../procfs/{xfrm.go => net_xfrm.go} | 11 +- .../github.com/prometheus/procfs/netstat.go | 60 +- .../github.com/prometheus/procfs/proc.go | 49 +- .../prometheus/procfs/proc_cgroup.go | 14 +- .../prometheus/procfs/proc_cgroups.go | 98 + .../prometheus/procfs/proc_environ.go | 2 +- .../prometheus/procfs/proc_fdinfo.go | 13 +- .../prometheus/procfs/proc_interrupts.go | 98 + .../github.com/prometheus/procfs/proc_io.go | 2 +- .../prometheus/procfs/proc_limits.go | 6 +- .../github.com/prometheus/procfs/proc_maps.go | 36 +- .../prometheus/procfs/proc_netstat.go | 443 + .../github.com/prometheus/procfs/proc_ns.go | 6 +- .../github.com/prometheus/procfs/proc_psi.go | 20 +- .../prometheus/procfs/proc_smaps.go | 31 +- .../github.com/prometheus/procfs/proc_snmp.go | 353 + .../prometheus/procfs/proc_snmp6.go | 381 + .../github.com/prometheus/procfs/proc_stat.go | 30 +- .../prometheus/procfs/proc_status.go | 124 +- .../github.com/prometheus/procfs/proc_sys.go | 51 + .../github.com/prometheus/procfs/schedstat.go | 6 +- .../github.com/prometheus/procfs/slab.go | 4 +- .../github.com/prometheus/procfs/softirqs.go | 160 + .../github.com/prometheus/procfs/stat.go | 60 +- .../github.com/prometheus/procfs/swaps.go | 8 +- .../github.com/prometheus/procfs/thread.go | 80 + .../vendor/github.com/prometheus/procfs/vm.go | 12 +- .../github.com/prometheus/procfs/zoneinfo.go | 9 +- .../rogpeppe/go-internal/diff/diff.go | 9 +- .../ryancurrah/gomodguard/.golangci.yml | 2 +- .../ryancurrah/gomodguard/README.md | 4 +- .../ryancurrah/gomodguard/processor.go | 4 - .../sagikazarmark/locafero/.editorconfig | 21 + .../github.com/sagikazarmark/locafero/.envrc | 4 + .../sagikazarmark/locafero}/.gitignore | 4 + .../sagikazarmark/locafero/.golangci.yaml | 27 + .../locafero}/LICENSE | 12 +- .../sagikazarmark/locafero/README.md | 37 + .../sagikazarmark/locafero/file_type.go | 28 + .../sagikazarmark/locafero/finder.go | 165 + .../sagikazarmark/locafero/flake.lock | 472 + .../sagikazarmark/locafero}/flake.nix | 27 +- .../github.com/sagikazarmark/locafero/glob.go | 5 + .../sagikazarmark/locafero/glob_windows.go | 8 + .../sagikazarmark/locafero/helpers.go | 41 + .../sagikazarmark/locafero/justfile | 11 + .../github.com/sourcegraph/conc/.golangci.yml | 11 + .../github.com/sourcegraph/conc}/LICENSE | 4 +- .../github.com/sourcegraph/conc/README.md | 464 + .../internal/multierror/multierror_go119.go | 10 + .../internal/multierror/multierror_go120.go | 10 + .../github.com/sourcegraph/conc/iter/iter.go | 85 + .../github.com/sourcegraph/conc/iter/map.go | 65 + .../sourcegraph/conc/panics/panics.go | 102 + .../github.com/sourcegraph/conc/panics/try.go | 11 + .../github.com/sourcegraph/conc/waitgroup.go | 52 + tools/vendor/github.com/spf13/afero/README.md | 39 +- tools/vendor/github.com/spf13/cast/README.md | 14 +- tools/vendor/github.com/spf13/cast/caste.go | 126 +- tools/vendor/github.com/spf13/cobra/README.md | 5 +- .../github.com/spf13/cobra/active_help.go | 2 +- .../spf13/cobra/bash_completionsV2.go | 152 +- tools/vendor/github.com/spf13/cobra/cobra.go | 16 +- .../vendor/github.com/spf13/cobra/command.go | 331 +- .../github.com/spf13/cobra/completions.go | 126 +- .../spf13/cobra/powershell_completions.go | 35 +- .../spf13/jwalterweatherman/.gitignore | 24 - .../spf13/jwalterweatherman/README.md | 148 - .../jwalterweatherman/default_notepad.go | 111 - .../spf13/jwalterweatherman/log_counter.go | 46 - .../spf13/jwalterweatherman/notepad.go | 225 - .../github.com/spf13/viper/.editorconfig | 3 + tools/vendor/github.com/spf13/viper/.envrc | 4 + .../vendor/github.com/spf13/viper/.gitignore | 3 + .../github.com/spf13/viper/.golangci.yaml | 25 +- .../vendor/github.com/spf13/viper/.yamlignore | 2 + .../github.com/spf13/viper/.yamllint.yaml | 6 + tools/vendor/github.com/spf13/viper/Makefile | 47 +- tools/vendor/github.com/spf13/viper/README.md | 113 +- .../github.com/spf13/viper/TROUBLESHOOTING.md | 4 +- .../vendor/github.com/spf13/viper/UPDATES.md | 126 + .../vendor/github.com/spf13/viper/encoding.go | 181 + .../github.com/spf13/viper/experimental.go | 8 + .../spf13/viper/experimental_logger.go | 11 - .../spf13/viper/{viper_go1_15.go => file.go} | 57 +- tools/vendor/github.com/spf13/viper/finder.go | 55 + tools/vendor/github.com/spf13/viper/flags.go | 4 +- .../vendor/github.com/spf13/viper/flake.lock | 472 + tools/vendor/github.com/spf13/viper/flake.nix | 57 + tools/vendor/github.com/spf13/viper/fs.go | 65 - .../spf13/viper/internal/encoding/decoder.go | 61 - .../viper/internal/encoding/dotenv/codec.go | 6 +- .../internal/encoding/dotenv/map_utils.go | 18 +- .../spf13/viper/internal/encoding/encoder.go | 60 - .../spf13/viper/internal/encoding/error.go | 7 - .../viper/internal/encoding/hcl/codec.go | 40 - .../viper/internal/encoding/ini/codec.go | 99 - .../viper/internal/encoding/ini/map_utils.go | 74 - .../internal/encoding/javaproperties/codec.go | 86 - .../encoding/javaproperties/map_utils.go | 74 - .../viper/internal/encoding/json/codec.go | 4 +- .../viper/internal/encoding/toml/codec.go | 33 +- .../viper/internal/encoding/toml/codec2.go | 19 - .../viper/internal/encoding/yaml/codec.go | 6 +- .../viper/internal/encoding/yaml/yaml2.go | 14 - .../viper/internal/encoding/yaml/yaml3.go | 14 - .../viper/internal/features/bind_struct.go | 5 + .../internal/features/bind_struct_default.go | 5 + .../spf13/viper/internal/features/finder.go | 5 + .../viper/internal/features/finder_default.go | 5 + tools/vendor/github.com/spf13/viper/logger.go | 78 +- tools/vendor/github.com/spf13/viper/remote.go | 256 + tools/vendor/github.com/spf13/viper/util.go | 76 +- tools/vendor/github.com/spf13/viper/viper.go | 971 +-- .../github.com/spf13/viper/viper_go1_16.go | 32 - tools/vendor/github.com/spf13/viper/watch.go | 12 - .../github.com/spf13/viper/watch_wasm.go | 30 - .../github.com/subosito/gotenv/CHANGELOG.md | 37 + .../github.com/subosito/gotenv/gotenv.go | 92 +- .../tdakkota/asciicheck/asciicheck.go | 107 +- .../vendor/github.com/tetafro/godot/README.md | 1 + .../github.com/tetafro/godot/getters.go | 31 + .../github.com/tetafro/godot/settings.go | 2 + .../xen0n/gosmopolitan/.golangci.yml | 6 +- tools/vendor/go.uber.org/atomic/.codecov.yml | 19 - tools/vendor/go.uber.org/atomic/.gitignore | 12 - tools/vendor/go.uber.org/atomic/.travis.yml | 27 - tools/vendor/go.uber.org/atomic/CHANGELOG.md | 76 - tools/vendor/go.uber.org/atomic/LICENSE.txt | 19 - tools/vendor/go.uber.org/atomic/Makefile | 78 - tools/vendor/go.uber.org/atomic/README.md | 63 - tools/vendor/go.uber.org/atomic/bool.go | 81 - tools/vendor/go.uber.org/atomic/doc.go | 23 - tools/vendor/go.uber.org/atomic/duration.go | 82 - .../vendor/go.uber.org/atomic/duration_ext.go | 40 - tools/vendor/go.uber.org/atomic/float64.go | 76 - .../vendor/go.uber.org/atomic/float64_ext.go | 47 - tools/vendor/go.uber.org/atomic/gen.go | 26 - tools/vendor/go.uber.org/atomic/int32.go | 102 - tools/vendor/go.uber.org/atomic/int64.go | 102 - tools/vendor/go.uber.org/atomic/nocmp.go | 35 - tools/vendor/go.uber.org/atomic/string.go | 54 - tools/vendor/go.uber.org/atomic/string_ext.go | 43 - tools/vendor/go.uber.org/atomic/uint32.go | 102 - tools/vendor/go.uber.org/atomic/uint64.go | 102 - tools/vendor/go.uber.org/atomic/value.go | 31 - tools/vendor/go.uber.org/multierr/.travis.yml | 23 - .../vendor/go.uber.org/multierr/CHANGELOG.md | 35 + tools/vendor/go.uber.org/multierr/LICENSE.txt | 2 +- tools/vendor/go.uber.org/multierr/Makefile | 6 +- tools/vendor/go.uber.org/multierr/README.md | 30 +- tools/vendor/go.uber.org/multierr/error.go | 415 +- .../error_post_go120.go} | 35 +- .../multierr/{go113.go => error_pre_go120.go} | 31 +- tools/vendor/go.uber.org/multierr/glide.yaml | 8 - tools/vendor/go.uber.org/zap/.golangci.yml | 77 + tools/vendor/go.uber.org/zap/.readme.tmpl | 10 +- tools/vendor/go.uber.org/zap/CHANGELOG.md | 292 +- .../go.uber.org/zap/{LICENSE.txt => LICENSE} | 0 tools/vendor/go.uber.org/zap/Makefile | 87 +- tools/vendor/go.uber.org/zap/README.md | 78 +- tools/vendor/go.uber.org/zap/array.go | 127 + tools/vendor/go.uber.org/zap/array_go118.go | 156 - tools/vendor/go.uber.org/zap/buffer/buffer.go | 5 + tools/vendor/go.uber.org/zap/buffer/pool.go | 20 +- tools/vendor/go.uber.org/zap/config.go | 84 +- tools/vendor/go.uber.org/zap/error.go | 14 +- tools/vendor/go.uber.org/zap/field.go | 196 +- tools/vendor/go.uber.org/zap/http_handler.go | 19 +- .../go.uber.org/zap/internal/level_enabler.go | 2 + .../bool_ext.go => zap/internal/pool/pool.go} | 49 +- .../stacktrace/stack.go} | 81 +- tools/vendor/go.uber.org/zap/level.go | 9 +- tools/vendor/go.uber.org/zap/logger.go | 87 +- tools/vendor/go.uber.org/zap/options.go | 15 + tools/vendor/go.uber.org/zap/sink.go | 5 +- tools/vendor/go.uber.org/zap/sugar.go | 108 +- tools/vendor/go.uber.org/zap/writer.go | 12 +- .../zap/zapcore/console_encoder.go | 16 +- tools/vendor/go.uber.org/zap/zapcore/core.go | 6 +- .../vendor/go.uber.org/zap/zapcore/encoder.go | 15 + tools/vendor/go.uber.org/zap/zapcore/entry.go | 22 +- tools/vendor/go.uber.org/zap/zapcore/error.go | 14 +- tools/vendor/go.uber.org/zap/zapcore/field.go | 2 +- .../go.uber.org/zap/zapcore/json_encoder.go | 157 +- .../error.go => zap/zapcore/lazy_with.go} | 47 +- .../vendor/go.uber.org/zap/zapcore/sampler.go | 9 +- .../golang.org/x/text/encoding/encoding.go | 335 + .../internal/identifier/identifier.go | 81 + .../text/encoding/internal/identifier/mib.go | 1627 ++++ .../x/text/encoding/internal/internal.go | 75 + .../x/text/encoding/unicode/override.go | 82 + .../x/text/encoding/unicode/unicode.go | 512 ++ .../x/text/internal/number/format.go | 2 - .../internal/utf8internal/utf8internal.go | 87 + .../golang.org/x/text/language/parse.go | 2 +- .../encoding/protodelim/protodelim.go | 160 + .../protobuf/encoding/prototext/decode.go | 5 - .../internal/editionssupport/editions.go | 18 - .../protobuf/internal/encoding/tag/tag.go | 8 +- .../protobuf/internal/filedesc/desc.go | 9 +- .../protobuf/internal/filedesc/desc_lazy.go | 9 - .../protobuf/internal/filetype/build.go | 2 +- .../protobuf/internal/flags/flags.go | 7 +- .../protobuf/internal/genid/goname.go | 5 - .../protobuf/internal/impl/codec_field.go | 75 - .../protobuf/internal/impl/codec_message.go | 3 - .../internal/impl/codec_message_opaque.go | 3 - .../protobuf/internal/impl/lazy.go | 2 +- .../protobuf/internal/impl/legacy_message.go | 5 +- .../protobuf/internal/impl/message.go | 13 - .../protobuf/internal/impl/message_opaque.go | 5 - .../protobuf/internal/impl/message_reflect.go | 5 - .../internal/impl/message_reflect_field.go | 76 - .../protobuf/internal/impl/pointer_unsafe.go | 1 - .../protobuf/internal/impl/validate.go | 24 +- .../protobuf/internal/impl/weak.go | 74 - .../protobuf/internal/version/version.go | 2 +- .../protobuf/proto/decode.go | 5 - .../protobuf/reflect/protodesc/desc.go | 292 - .../protobuf/reflect/protodesc/desc_init.go | 289 - .../reflect/protodesc/desc_resolve.go | 291 - .../reflect/protodesc/desc_validate.go | 371 - .../protobuf/reflect/protodesc/editions.go | 181 - .../protobuf/reflect/protodesc/proto.go | 274 - .../protobuf/reflect/protoreflect/type.go | 12 +- .../types/descriptorpb/descriptor.pb.go | 5310 ------------ .../types/gofeaturespb/go_features.pb.go | 345 - .../protobuf/types/known/anypb/any.pb.go | 479 - .../types/known/durationpb/duration.pb.go | 357 - tools/vendor/gopkg.in/ini.v1/.editorconfig | 12 - tools/vendor/gopkg.in/ini.v1/.gitignore | 7 - tools/vendor/gopkg.in/ini.v1/.golangci.yml | 27 - tools/vendor/gopkg.in/ini.v1/LICENSE | 191 - tools/vendor/gopkg.in/ini.v1/Makefile | 15 - tools/vendor/gopkg.in/ini.v1/README.md | 43 - tools/vendor/gopkg.in/ini.v1/codecov.yml | 16 - tools/vendor/gopkg.in/ini.v1/data_source.go | 76 - tools/vendor/gopkg.in/ini.v1/deprecated.go | 22 - tools/vendor/gopkg.in/ini.v1/error.go | 49 - tools/vendor/gopkg.in/ini.v1/file.go | 541 -- tools/vendor/gopkg.in/ini.v1/helper.go | 24 - tools/vendor/gopkg.in/ini.v1/ini.go | 176 - tools/vendor/gopkg.in/ini.v1/key.go | 837 -- tools/vendor/gopkg.in/ini.v1/parser.go | 520 -- tools/vendor/gopkg.in/ini.v1/section.go | 256 - tools/vendor/gopkg.in/ini.v1/struct.go | 747 -- tools/vendor/modules.txt | 225 +- tools/vendor/mvdan.cc/unparam/check/check.go | 2 +- vendor/github.com/BurntSushi/toml/README.md | 2 +- vendor/github.com/BurntSushi/toml/decode.go | 31 +- vendor/github.com/BurntSushi/toml/encode.go | 46 +- vendor/github.com/BurntSushi/toml/error.go | 65 +- vendor/github.com/BurntSushi/toml/lex.go | 33 +- vendor/github.com/BurntSushi/toml/meta.go | 3 - vendor/github.com/BurntSushi/toml/parse.go | 17 +- .../go-crypto/internal/byteutil/byteutil.go | 8 +- .../ProtonMail/go-crypto/ocb/ocb.go | 55 +- .../go-crypto/openpgp/armor/encode.go | 12 +- .../go-crypto/openpgp/errors/errors.go | 20 + .../go-crypto/openpgp/packet/aead_crypter.go | 120 +- .../openpgp/packet/aead_encrypted.go | 12 +- .../go-crypto/openpgp/packet/config.go | 12 + .../go-crypto/openpgp/packet/public_key.go | 7 +- .../go-crypto/openpgp/packet/signature.go | 4 +- .../packet/symmetrically_encrypted_aead.go | 27 +- .../github.com/aead/chacha20/chacha/chacha.go | 197 - .../aead/chacha20/chacha/chachaAVX2_amd64.s | 406 - .../aead/chacha20/chacha/chacha_386.go | 60 - .../aead/chacha20/chacha/chacha_386.s | 163 - .../aead/chacha20/chacha/chacha_amd64.go | 76 - .../aead/chacha20/chacha/chacha_amd64.s | 1072 --- .../aead/chacha20/chacha/chacha_generic.go | 319 - .../aead/chacha20/chacha/chacha_ref.go | 33 - .../github.com/aead/chacha20/chacha/const.s | 53 - .../github.com/aead/chacha20/chacha/macro.s | 163 - .../aws/protocol/eventstream/CHANGELOG.md | 32 + .../eventstream/go_module_metadata.go | 2 +- .../aws/protocol/eventstream/message.go | 22 +- .../aws/aws-sdk-go-v2/config/CHANGELOG.md | 4 - .../config/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/internal/v4a/CHANGELOG.md | 121 + .../internal/v4a/go_module_metadata.go | 2 +- .../internal/v4a/internal/v4/headers.go | 7 +- .../aws/aws-sdk-go-v2/internal/v4a/smithy.go | 8 +- .../aws-sdk-go-v2/service/ec2/CHANGELOG.md | 37 + .../aws-sdk-go-v2/service/ec2/api_client.go | 31 + .../ec2/api_op_AcceptAddressTransfer.go | 3 + ...ceptCapacityReservationBillingOwnership.go | 3 + ...op_AcceptReservedInstancesExchangeQuote.go | 3 + ...ansitGatewayMulticastDomainAssociations.go | 3 + ...p_AcceptTransitGatewayPeeringAttachment.go | 3 + ...pi_op_AcceptTransitGatewayVpcAttachment.go | 3 + .../api_op_AcceptVpcEndpointConnections.go | 3 + .../ec2/api_op_AcceptVpcPeeringConnection.go | 3 + .../service/ec2/api_op_AdvertiseByoipCidr.go | 3 + .../service/ec2/api_op_AllocateAddress.go | 3 + .../service/ec2/api_op_AllocateHosts.go | 3 + .../ec2/api_op_AllocateIpamPoolCidr.go | 3 + ...ySecurityGroupsToClientVpnTargetNetwork.go | 3 + .../service/ec2/api_op_AssignIpv6Addresses.go | 3 + .../ec2/api_op_AssignPrivateIpAddresses.go | 3 + .../api_op_AssignPrivateNatGatewayAddress.go | 3 + .../service/ec2/api_op_AssociateAddress.go | 3 + ...ssociateCapacityReservationBillingOwner.go | 3 + .../api_op_AssociateClientVpnTargetNetwork.go | 3 + .../ec2/api_op_AssociateDhcpOptions.go | 3 + ...i_op_AssociateEnclaveCertificateIamRole.go | 3 + .../ec2/api_op_AssociateIamInstanceProfile.go | 3 + .../api_op_AssociateInstanceEventWindow.go | 3 + .../service/ec2/api_op_AssociateIpamByoasn.go | 3 + .../api_op_AssociateIpamResourceDiscovery.go | 3 + .../ec2/api_op_AssociateNatGatewayAddress.go | 3 + .../service/ec2/api_op_AssociateRouteTable.go | 3 + .../ec2/api_op_AssociateSecurityGroupVpc.go | 3 + .../ec2/api_op_AssociateSubnetCidrBlock.go | 3 + ..._AssociateTransitGatewayMulticastDomain.go | 3 + ...i_op_AssociateTransitGatewayPolicyTable.go | 3 + ...pi_op_AssociateTransitGatewayRouteTable.go | 3 + .../ec2/api_op_AssociateTrunkInterface.go | 3 + .../ec2/api_op_AssociateVpcCidrBlock.go | 3 + .../ec2/api_op_AttachClassicLinkVpc.go | 3 + .../ec2/api_op_AttachInternetGateway.go | 3 + .../ec2/api_op_AttachNetworkInterface.go | 3 + ...pi_op_AttachVerifiedAccessTrustProvider.go | 3 + .../service/ec2/api_op_AttachVolume.go | 3 + .../service/ec2/api_op_AttachVpnGateway.go | 3 + .../ec2/api_op_AuthorizeClientVpnIngress.go | 3 + .../api_op_AuthorizeSecurityGroupEgress.go | 3 + .../api_op_AuthorizeSecurityGroupIngress.go | 3 + .../service/ec2/api_op_BundleInstance.go | 3 + .../service/ec2/api_op_CancelBundleTask.go | 3 + .../ec2/api_op_CancelCapacityReservation.go | 3 + .../api_op_CancelCapacityReservationFleets.go | 3 + .../ec2/api_op_CancelConversionTask.go | 3 + .../api_op_CancelDeclarativePoliciesReport.go | 3 + .../service/ec2/api_op_CancelExportTask.go | 3 + .../ec2/api_op_CancelImageLaunchPermission.go | 3 + .../service/ec2/api_op_CancelImportTask.go | 3 + .../api_op_CancelReservedInstancesListing.go | 3 + .../ec2/api_op_CancelSpotFleetRequests.go | 3 + .../ec2/api_op_CancelSpotInstanceRequests.go | 3 + .../ec2/api_op_ConfirmProductInstance.go | 3 + .../service/ec2/api_op_CopyFpgaImage.go | 3 + .../service/ec2/api_op_CopyImage.go | 16 + .../service/ec2/api_op_CopySnapshot.go | 3 + .../ec2/api_op_CreateCapacityReservation.go | 3 + ...op_CreateCapacityReservationBySplitting.go | 3 + .../api_op_CreateCapacityReservationFleet.go | 3 + .../ec2/api_op_CreateCarrierGateway.go | 3 + .../ec2/api_op_CreateClientVpnEndpoint.go | 3 + .../ec2/api_op_CreateClientVpnRoute.go | 3 + .../service/ec2/api_op_CreateCoipCidr.go | 3 + .../service/ec2/api_op_CreateCoipPool.go | 3 + .../ec2/api_op_CreateCustomerGateway.go | 3 + .../service/ec2/api_op_CreateDefaultSubnet.go | 3 + .../service/ec2/api_op_CreateDefaultVpc.go | 3 + .../service/ec2/api_op_CreateDhcpOptions.go | 3 + .../api_op_CreateEgressOnlyInternetGateway.go | 3 + .../service/ec2/api_op_CreateFleet.go | 3 + .../service/ec2/api_op_CreateFlowLogs.go | 3 + .../service/ec2/api_op_CreateFpgaImage.go | 3 + .../service/ec2/api_op_CreateImage.go | 3 + .../api_op_CreateInstanceConnectEndpoint.go | 3 + .../ec2/api_op_CreateInstanceEventWindow.go | 3 + .../ec2/api_op_CreateInstanceExportTask.go | 3 + .../ec2/api_op_CreateInternetGateway.go | 3 + .../service/ec2/api_op_CreateIpam.go | 3 + ...teIpamExternalResourceVerificationToken.go | 3 + .../service/ec2/api_op_CreateIpamPool.go | 3 + .../ec2/api_op_CreateIpamResourceDiscovery.go | 3 + .../service/ec2/api_op_CreateIpamScope.go | 3 + .../service/ec2/api_op_CreateKeyPair.go | 3 + .../ec2/api_op_CreateLaunchTemplate.go | 50 +- .../ec2/api_op_CreateLaunchTemplateVersion.go | 48 +- .../ec2/api_op_CreateLocalGatewayRoute.go | 3 + .../api_op_CreateLocalGatewayRouteTable.go | 3 + ...teTableVirtualInterfaceGroupAssociation.go | 3 + ...ateLocalGatewayRouteTableVpcAssociation.go | 3 + .../ec2/api_op_CreateManagedPrefixList.go | 3 + .../service/ec2/api_op_CreateNatGateway.go | 3 + .../service/ec2/api_op_CreateNetworkAcl.go | 3 + .../ec2/api_op_CreateNetworkAclEntry.go | 3 + ...api_op_CreateNetworkInsightsAccessScope.go | 3 + .../ec2/api_op_CreateNetworkInsightsPath.go | 3 + .../ec2/api_op_CreateNetworkInterface.go | 3 + ...api_op_CreateNetworkInterfacePermission.go | 3 + .../ec2/api_op_CreatePlacementGroup.go | 3 + .../ec2/api_op_CreatePublicIpv4Pool.go | 3 + .../ec2/api_op_CreateReplaceRootVolumeTask.go | 3 + .../api_op_CreateReservedInstancesListing.go | 3 + .../ec2/api_op_CreateRestoreImageTask.go | 3 + .../service/ec2/api_op_CreateRoute.go | 3 + .../service/ec2/api_op_CreateRouteTable.go | 3 + .../service/ec2/api_op_CreateSecurityGroup.go | 8 +- .../service/ec2/api_op_CreateSnapshot.go | 3 + .../service/ec2/api_op_CreateSnapshots.go | 3 + .../api_op_CreateSpotDatafeedSubscription.go | 3 + .../ec2/api_op_CreateStoreImageTask.go | 3 + .../service/ec2/api_op_CreateSubnet.go | 3 + .../ec2/api_op_CreateSubnetCidrReservation.go | 3 + .../service/ec2/api_op_CreateTags.go | 3 + .../ec2/api_op_CreateTrafficMirrorFilter.go | 3 + .../api_op_CreateTrafficMirrorFilterRule.go | 3 + .../ec2/api_op_CreateTrafficMirrorSession.go | 3 + .../ec2/api_op_CreateTrafficMirrorTarget.go | 3 + .../ec2/api_op_CreateTransitGateway.go | 3 + .../ec2/api_op_CreateTransitGatewayConnect.go | 3 + .../api_op_CreateTransitGatewayConnectPeer.go | 3 + ..._op_CreateTransitGatewayMulticastDomain.go | 3 + ...p_CreateTransitGatewayPeeringAttachment.go | 3 + .../api_op_CreateTransitGatewayPolicyTable.go | 3 + ...CreateTransitGatewayPrefixListReference.go | 3 + .../ec2/api_op_CreateTransitGatewayRoute.go | 3 + .../api_op_CreateTransitGatewayRouteTable.go | 3 + ...ateTransitGatewayRouteTableAnnouncement.go | 3 + ...pi_op_CreateTransitGatewayVpcAttachment.go | 3 + .../api_op_CreateVerifiedAccessEndpoint.go | 3 + .../ec2/api_op_CreateVerifiedAccessGroup.go | 3 + .../api_op_CreateVerifiedAccessInstance.go | 3 + ...pi_op_CreateVerifiedAccessTrustProvider.go | 3 + .../service/ec2/api_op_CreateVolume.go | 3 + .../service/ec2/api_op_CreateVpc.go | 3 + ..._op_CreateVpcBlockPublicAccessExclusion.go | 3 + .../service/ec2/api_op_CreateVpcEndpoint.go | 3 + ...CreateVpcEndpointConnectionNotification.go | 3 + ...p_CreateVpcEndpointServiceConfiguration.go | 3 + .../ec2/api_op_CreateVpcPeeringConnection.go | 3 + .../service/ec2/api_op_CreateVpnConnection.go | 3 + .../ec2/api_op_CreateVpnConnectionRoute.go | 3 + .../service/ec2/api_op_CreateVpnGateway.go | 3 + .../ec2/api_op_DeleteCarrierGateway.go | 3 + .../ec2/api_op_DeleteClientVpnEndpoint.go | 3 + .../ec2/api_op_DeleteClientVpnRoute.go | 3 + .../service/ec2/api_op_DeleteCoipCidr.go | 3 + .../service/ec2/api_op_DeleteCoipPool.go | 3 + .../ec2/api_op_DeleteCustomerGateway.go | 3 + .../service/ec2/api_op_DeleteDhcpOptions.go | 3 + .../api_op_DeleteEgressOnlyInternetGateway.go | 3 + .../service/ec2/api_op_DeleteFleets.go | 26 +- .../service/ec2/api_op_DeleteFlowLogs.go | 3 + .../service/ec2/api_op_DeleteFpgaImage.go | 3 + .../api_op_DeleteInstanceConnectEndpoint.go | 3 + .../ec2/api_op_DeleteInstanceEventWindow.go | 3 + .../ec2/api_op_DeleteInternetGateway.go | 3 + .../service/ec2/api_op_DeleteIpam.go | 3 + ...teIpamExternalResourceVerificationToken.go | 3 + .../service/ec2/api_op_DeleteIpamPool.go | 3 + .../ec2/api_op_DeleteIpamResourceDiscovery.go | 3 + .../service/ec2/api_op_DeleteIpamScope.go | 3 + .../service/ec2/api_op_DeleteKeyPair.go | 3 + .../ec2/api_op_DeleteLaunchTemplate.go | 3 + .../api_op_DeleteLaunchTemplateVersions.go | 5 +- .../ec2/api_op_DeleteLocalGatewayRoute.go | 3 + .../api_op_DeleteLocalGatewayRouteTable.go | 3 + ...teTableVirtualInterfaceGroupAssociation.go | 3 + ...eteLocalGatewayRouteTableVpcAssociation.go | 3 + .../ec2/api_op_DeleteManagedPrefixList.go | 3 + .../service/ec2/api_op_DeleteNatGateway.go | 3 + .../service/ec2/api_op_DeleteNetworkAcl.go | 3 + .../ec2/api_op_DeleteNetworkAclEntry.go | 3 + ...api_op_DeleteNetworkInsightsAccessScope.go | 3 + ...eleteNetworkInsightsAccessScopeAnalysis.go | 3 + .../api_op_DeleteNetworkInsightsAnalysis.go | 3 + .../ec2/api_op_DeleteNetworkInsightsPath.go | 3 + .../ec2/api_op_DeleteNetworkInterface.go | 3 + ...api_op_DeleteNetworkInterfacePermission.go | 3 + .../ec2/api_op_DeletePlacementGroup.go | 3 + .../ec2/api_op_DeletePublicIpv4Pool.go | 3 + .../api_op_DeleteQueuedReservedInstances.go | 3 + .../service/ec2/api_op_DeleteRoute.go | 3 + .../service/ec2/api_op_DeleteRouteTable.go | 3 + .../service/ec2/api_op_DeleteSecurityGroup.go | 3 + .../service/ec2/api_op_DeleteSnapshot.go | 3 + .../api_op_DeleteSpotDatafeedSubscription.go | 3 + .../service/ec2/api_op_DeleteSubnet.go | 3 + .../ec2/api_op_DeleteSubnetCidrReservation.go | 3 + .../service/ec2/api_op_DeleteTags.go | 3 + .../ec2/api_op_DeleteTrafficMirrorFilter.go | 3 + .../api_op_DeleteTrafficMirrorFilterRule.go | 3 + .../ec2/api_op_DeleteTrafficMirrorSession.go | 3 + .../ec2/api_op_DeleteTrafficMirrorTarget.go | 3 + .../ec2/api_op_DeleteTransitGateway.go | 3 + .../ec2/api_op_DeleteTransitGatewayConnect.go | 3 + .../api_op_DeleteTransitGatewayConnectPeer.go | 3 + ..._op_DeleteTransitGatewayMulticastDomain.go | 3 + ...p_DeleteTransitGatewayPeeringAttachment.go | 3 + .../api_op_DeleteTransitGatewayPolicyTable.go | 3 + ...DeleteTransitGatewayPrefixListReference.go | 3 + .../ec2/api_op_DeleteTransitGatewayRoute.go | 3 + .../api_op_DeleteTransitGatewayRouteTable.go | 3 + ...eteTransitGatewayRouteTableAnnouncement.go | 3 + ...pi_op_DeleteTransitGatewayVpcAttachment.go | 3 + .../api_op_DeleteVerifiedAccessEndpoint.go | 3 + .../ec2/api_op_DeleteVerifiedAccessGroup.go | 3 + .../api_op_DeleteVerifiedAccessInstance.go | 3 + ...pi_op_DeleteVerifiedAccessTrustProvider.go | 3 + .../service/ec2/api_op_DeleteVolume.go | 3 + .../service/ec2/api_op_DeleteVpc.go | 3 + ..._op_DeleteVpcBlockPublicAccessExclusion.go | 3 + ...eleteVpcEndpointConnectionNotifications.go | 3 + ..._DeleteVpcEndpointServiceConfigurations.go | 3 + .../service/ec2/api_op_DeleteVpcEndpoints.go | 3 + .../ec2/api_op_DeleteVpcPeeringConnection.go | 3 + .../service/ec2/api_op_DeleteVpnConnection.go | 3 + .../ec2/api_op_DeleteVpnConnectionRoute.go | 3 + .../service/ec2/api_op_DeleteVpnGateway.go | 3 + .../ec2/api_op_DeprovisionByoipCidr.go | 3 + .../ec2/api_op_DeprovisionIpamByoasn.go | 3 + .../ec2/api_op_DeprovisionIpamPoolCidr.go | 3 + .../api_op_DeprovisionPublicIpv4PoolCidr.go | 3 + .../service/ec2/api_op_DeregisterImage.go | 3 + ...sterInstanceEventNotificationAttributes.go | 3 + ...sterTransitGatewayMulticastGroupMembers.go | 3 + ...sterTransitGatewayMulticastGroupSources.go | 3 + .../ec2/api_op_DescribeAccountAttributes.go | 3 + .../ec2/api_op_DescribeAddressTransfers.go | 3 + .../service/ec2/api_op_DescribeAddresses.go | 3 + .../ec2/api_op_DescribeAddressesAttribute.go | 3 + .../ec2/api_op_DescribeAggregateIdFormat.go | 3 + .../ec2/api_op_DescribeAvailabilityZones.go | 13 +- ...wsNetworkPerformanceMetricSubscriptions.go | 3 + .../service/ec2/api_op_DescribeBundleTasks.go | 3 + .../service/ec2/api_op_DescribeByoipCidrs.go | 3 + ...p_DescribeCapacityBlockExtensionHistory.go | 3 + ...DescribeCapacityBlockExtensionOfferings.go | 3 + .../api_op_DescribeCapacityBlockOfferings.go | 3 + ...cribeCapacityReservationBillingRequests.go | 3 + ...pi_op_DescribeCapacityReservationFleets.go | 3 + .../api_op_DescribeCapacityReservations.go | 3 + .../ec2/api_op_DescribeCarrierGateways.go | 3 + .../api_op_DescribeClassicLinkInstances.go | 3 + ..._op_DescribeClientVpnAuthorizationRules.go | 3 + .../api_op_DescribeClientVpnConnections.go | 3 + .../ec2/api_op_DescribeClientVpnEndpoints.go | 3 + .../ec2/api_op_DescribeClientVpnRoutes.go | 3 + .../api_op_DescribeClientVpnTargetNetworks.go | 3 + .../service/ec2/api_op_DescribeCoipPools.go | 3 + .../ec2/api_op_DescribeConversionTasks.go | 3 + .../ec2/api_op_DescribeCustomerGateways.go | 3 + ...i_op_DescribeDeclarativePoliciesReports.go | 3 + .../service/ec2/api_op_DescribeDhcpOptions.go | 3 + ...i_op_DescribeEgressOnlyInternetGateways.go | 3 + .../service/ec2/api_op_DescribeElasticGpus.go | 3 + .../ec2/api_op_DescribeExportImageTasks.go | 3 + .../service/ec2/api_op_DescribeExportTasks.go | 3 + .../ec2/api_op_DescribeFastLaunchImages.go | 3 + .../api_op_DescribeFastSnapshotRestores.go | 3 + .../ec2/api_op_DescribeFleetHistory.go | 3 + .../ec2/api_op_DescribeFleetInstances.go | 3 + .../service/ec2/api_op_DescribeFleets.go | 3 + .../service/ec2/api_op_DescribeFlowLogs.go | 3 + .../ec2/api_op_DescribeFpgaImageAttribute.go | 3 + .../service/ec2/api_op_DescribeFpgaImages.go | 3 + ...api_op_DescribeHostReservationOfferings.go | 3 + .../ec2/api_op_DescribeHostReservations.go | 3 + .../service/ec2/api_op_DescribeHosts.go | 3 + ..._DescribeIamInstanceProfileAssociations.go | 3 + .../service/ec2/api_op_DescribeIdFormat.go | 3 + .../ec2/api_op_DescribeIdentityIdFormat.go | 3 + .../ec2/api_op_DescribeImageAttribute.go | 3 + .../service/ec2/api_op_DescribeImages.go | 3 + .../ec2/api_op_DescribeImportImageTasks.go | 3 + .../ec2/api_op_DescribeImportSnapshotTasks.go | 3 + .../ec2/api_op_DescribeInstanceAttribute.go | 3 + ...api_op_DescribeInstanceConnectEndpoints.go | 3 + ...op_DescribeInstanceCreditSpecifications.go | 3 + ...ribeInstanceEventNotificationAttributes.go | 3 + .../api_op_DescribeInstanceEventWindows.go | 3 + .../api_op_DescribeInstanceImageMetadata.go | 3 + .../ec2/api_op_DescribeInstanceStatus.go | 3 + .../ec2/api_op_DescribeInstanceTopology.go | 3 + .../api_op_DescribeInstanceTypeOfferings.go | 3 + .../ec2/api_op_DescribeInstanceTypes.go | 3 + .../service/ec2/api_op_DescribeInstances.go | 3 + .../ec2/api_op_DescribeInternetGateways.go | 3 + .../service/ec2/api_op_DescribeIpamByoasn.go | 3 + ...eIpamExternalResourceVerificationTokens.go | 3 + .../service/ec2/api_op_DescribeIpamPools.go | 3 + .../api_op_DescribeIpamResourceDiscoveries.go | 3 + ...scribeIpamResourceDiscoveryAssociations.go | 3 + .../service/ec2/api_op_DescribeIpamScopes.go | 3 + .../service/ec2/api_op_DescribeIpams.go | 3 + .../service/ec2/api_op_DescribeIpv6Pools.go | 3 + .../service/ec2/api_op_DescribeKeyPairs.go | 3 + .../api_op_DescribeLaunchTemplateVersions.go | 5 +- .../ec2/api_op_DescribeLaunchTemplates.go | 3 + ...eTableVirtualInterfaceGroupAssociations.go | 3 + ...beLocalGatewayRouteTableVpcAssociations.go | 3 + .../api_op_DescribeLocalGatewayRouteTables.go | 3 + ...cribeLocalGatewayVirtualInterfaceGroups.go | 3 + ...p_DescribeLocalGatewayVirtualInterfaces.go | 3 + .../ec2/api_op_DescribeLocalGateways.go | 3 + .../ec2/api_op_DescribeLockedSnapshots.go | 3 + .../service/ec2/api_op_DescribeMacHosts.go | 3 + .../ec2/api_op_DescribeManagedPrefixLists.go | 3 + .../ec2/api_op_DescribeMovingAddresses.go | 3 + .../service/ec2/api_op_DescribeNatGateways.go | 3 + .../service/ec2/api_op_DescribeNetworkAcls.go | 3 + ...cribeNetworkInsightsAccessScopeAnalyses.go | 3 + ..._op_DescribeNetworkInsightsAccessScopes.go | 3 + .../api_op_DescribeNetworkInsightsAnalyses.go | 3 + .../api_op_DescribeNetworkInsightsPaths.go | 3 + ...pi_op_DescribeNetworkInterfaceAttribute.go | 3 + ..._op_DescribeNetworkInterfacePermissions.go | 3 + .../ec2/api_op_DescribeNetworkInterfaces.go | 6 +- .../ec2/api_op_DescribePlacementGroups.go | 3 + .../service/ec2/api_op_DescribePrefixLists.go | 3 + .../ec2/api_op_DescribePrincipalIdFormat.go | 3 + .../ec2/api_op_DescribePublicIpv4Pools.go | 3 + .../service/ec2/api_op_DescribeRegions.go | 3 + .../api_op_DescribeReplaceRootVolumeTasks.go | 3 + .../ec2/api_op_DescribeReservedInstances.go | 3 + ...pi_op_DescribeReservedInstancesListings.go | 3 + ..._DescribeReservedInstancesModifications.go | 3 + ...i_op_DescribeReservedInstancesOfferings.go | 3 + .../service/ec2/api_op_DescribeRouteTables.go | 3 + ...p_DescribeScheduledInstanceAvailability.go | 3 + .../ec2/api_op_DescribeScheduledInstances.go | 3 + .../api_op_DescribeSecurityGroupReferences.go | 3 + .../ec2/api_op_DescribeSecurityGroupRules.go | 3 + ...op_DescribeSecurityGroupVpcAssociations.go | 3 + .../ec2/api_op_DescribeSecurityGroups.go | 3 + .../ec2/api_op_DescribeSnapshotAttribute.go | 3 + .../ec2/api_op_DescribeSnapshotTierStatus.go | 3 + .../service/ec2/api_op_DescribeSnapshots.go | 3 + ...api_op_DescribeSpotDatafeedSubscription.go | 3 + .../ec2/api_op_DescribeSpotFleetInstances.go | 3 + .../api_op_DescribeSpotFleetRequestHistory.go | 3 + .../ec2/api_op_DescribeSpotFleetRequests.go | 3 + .../api_op_DescribeSpotInstanceRequests.go | 3 + .../ec2/api_op_DescribeSpotPriceHistory.go | 3 + .../ec2/api_op_DescribeStaleSecurityGroups.go | 3 + .../ec2/api_op_DescribeStoreImageTasks.go | 3 + .../service/ec2/api_op_DescribeSubnets.go | 3 + .../service/ec2/api_op_DescribeTags.go | 3 + ...api_op_DescribeTrafficMirrorFilterRules.go | 3 + .../api_op_DescribeTrafficMirrorFilters.go | 3 + .../api_op_DescribeTrafficMirrorSessions.go | 3 + .../api_op_DescribeTrafficMirrorTargets.go | 3 + ...pi_op_DescribeTransitGatewayAttachments.go | 3 + ...i_op_DescribeTransitGatewayConnectPeers.go | 3 + .../api_op_DescribeTransitGatewayConnects.go | 3 + ..._DescribeTransitGatewayMulticastDomains.go | 3 + ...escribeTransitGatewayPeeringAttachments.go | 3 + ...i_op_DescribeTransitGatewayPolicyTables.go | 3 + ...beTransitGatewayRouteTableAnnouncements.go | 3 + ...pi_op_DescribeTransitGatewayRouteTables.go | 3 + ...op_DescribeTransitGatewayVpcAttachments.go | 3 + .../ec2/api_op_DescribeTransitGateways.go | 3 + ...i_op_DescribeTrunkInterfaceAssociations.go | 3 + .../api_op_DescribeVerifiedAccessEndpoints.go | 3 + .../api_op_DescribeVerifiedAccessGroups.go | 3 + ...fiedAccessInstanceLoggingConfigurations.go | 3 + .../api_op_DescribeVerifiedAccessInstances.go | 3 + ...op_DescribeVerifiedAccessTrustProviders.go | 3 + .../ec2/api_op_DescribeVolumeAttribute.go | 3 + .../ec2/api_op_DescribeVolumeStatus.go | 3 + .../service/ec2/api_op_DescribeVolumes.go | 3 + .../api_op_DescribeVolumesModifications.go | 3 + .../ec2/api_op_DescribeVpcAttribute.go | 3 + ..._DescribeVpcBlockPublicAccessExclusions.go | 3 + ..._op_DescribeVpcBlockPublicAccessOptions.go | 3 + .../ec2/api_op_DescribeVpcClassicLink.go | 3 + ...api_op_DescribeVpcClassicLinkDnsSupport.go | 3 + .../api_op_DescribeVpcEndpointAssociations.go | 3 + ...cribeVpcEndpointConnectionNotifications.go | 3 + .../api_op_DescribeVpcEndpointConnections.go | 3 + ...escribeVpcEndpointServiceConfigurations.go | 3 + ...p_DescribeVpcEndpointServicePermissions.go | 3 + .../ec2/api_op_DescribeVpcEndpointServices.go | 3 + .../ec2/api_op_DescribeVpcEndpoints.go | 3 + .../api_op_DescribeVpcPeeringConnections.go | 3 + .../service/ec2/api_op_DescribeVpcs.go | 3 + .../ec2/api_op_DescribeVpnConnections.go | 3 + .../service/ec2/api_op_DescribeVpnGateways.go | 3 + .../ec2/api_op_DetachClassicLinkVpc.go | 3 + .../ec2/api_op_DetachInternetGateway.go | 3 + .../ec2/api_op_DetachNetworkInterface.go | 3 + ...pi_op_DetachVerifiedAccessTrustProvider.go | 3 + .../service/ec2/api_op_DetachVolume.go | 3 + .../service/ec2/api_op_DetachVpnGateway.go | 3 + .../ec2/api_op_DisableAddressTransfer.go | 3 + .../api_op_DisableAllowedImagesSettings.go | 3 + ...AwsNetworkPerformanceMetricSubscription.go | 3 + .../api_op_DisableEbsEncryptionByDefault.go | 3 + .../service/ec2/api_op_DisableFastLaunch.go | 3 + .../ec2/api_op_DisableFastSnapshotRestores.go | 3 + .../service/ec2/api_op_DisableImage.go | 3 + .../api_op_DisableImageBlockPublicAccess.go | 3 + .../ec2/api_op_DisableImageDeprecation.go | 3 + ...op_DisableImageDeregistrationProtection.go | 3 + ..._op_DisableIpamOrganizationAdminAccount.go | 3 + .../ec2/api_op_DisableSerialConsoleAccess.go | 3 + ...api_op_DisableSnapshotBlockPublicAccess.go | 3 + ...ableTransitGatewayRouteTablePropagation.go | 3 + .../ec2/api_op_DisableVgwRoutePropagation.go | 3 + .../ec2/api_op_DisableVpcClassicLink.go | 3 + .../api_op_DisableVpcClassicLinkDnsSupport.go | 3 + .../service/ec2/api_op_DisassociateAddress.go | 3 + ...ssociateCapacityReservationBillingOwner.go | 3 + ...i_op_DisassociateClientVpnTargetNetwork.go | 3 + ...p_DisassociateEnclaveCertificateIamRole.go | 3 + .../api_op_DisassociateIamInstanceProfile.go | 3 + .../api_op_DisassociateInstanceEventWindow.go | 3 + .../ec2/api_op_DisassociateIpamByoasn.go | 3 + ...pi_op_DisassociateIpamResourceDiscovery.go | 3 + .../api_op_DisassociateNatGatewayAddress.go | 3 + .../ec2/api_op_DisassociateRouteTable.go | 3 + .../api_op_DisassociateSecurityGroupVpc.go | 3 + .../ec2/api_op_DisassociateSubnetCidrBlock.go | 3 + ...sassociateTransitGatewayMulticastDomain.go | 3 + ...p_DisassociateTransitGatewayPolicyTable.go | 3 + ...op_DisassociateTransitGatewayRouteTable.go | 3 + .../ec2/api_op_DisassociateTrunkInterface.go | 3 + .../ec2/api_op_DisassociateVpcCidrBlock.go | 3 + .../ec2/api_op_EnableAddressTransfer.go | 3 + .../ec2/api_op_EnableAllowedImagesSettings.go | 3 + ...AwsNetworkPerformanceMetricSubscription.go | 3 + .../api_op_EnableEbsEncryptionByDefault.go | 3 + .../service/ec2/api_op_EnableFastLaunch.go | 3 + .../ec2/api_op_EnableFastSnapshotRestores.go | 3 + .../service/ec2/api_op_EnableImage.go | 3 + .../api_op_EnableImageBlockPublicAccess.go | 3 + .../ec2/api_op_EnableImageDeprecation.go | 3 + ..._op_EnableImageDeregistrationProtection.go | 3 + ...i_op_EnableIpamOrganizationAdminAccount.go | 3 + ...ReachabilityAnalyzerOrganizationSharing.go | 3 + .../ec2/api_op_EnableSerialConsoleAccess.go | 3 + .../api_op_EnableSnapshotBlockPublicAccess.go | 3 + ...ableTransitGatewayRouteTablePropagation.go | 3 + .../ec2/api_op_EnableVgwRoutePropagation.go | 3 + .../service/ec2/api_op_EnableVolumeIO.go | 3 + .../ec2/api_op_EnableVpcClassicLink.go | 3 + .../api_op_EnableVpcClassicLinkDnsSupport.go | 3 + ...lientVpnClientCertificateRevocationList.go | 3 + ...i_op_ExportClientVpnClientConfiguration.go | 3 + .../service/ec2/api_op_ExportImage.go | 3 + .../ec2/api_op_ExportTransitGatewayRoutes.go | 3 + ...rifiedAccessInstanceClientConfiguration.go | 3 + .../ec2/api_op_GetAllowedImagesSettings.go | 3 + ...GetAssociatedEnclaveCertificateIamRoles.go | 3 + .../ec2/api_op_GetAssociatedIpv6PoolCidrs.go | 3 + .../api_op_GetAwsNetworkPerformanceData.go | 3 + .../ec2/api_op_GetCapacityReservationUsage.go | 29 +- .../service/ec2/api_op_GetCoipPoolUsage.go | 3 + .../service/ec2/api_op_GetConsoleOutput.go | 3 + .../ec2/api_op_GetConsoleScreenshot.go | 3 + ..._op_GetDeclarativePoliciesReportSummary.go | 3 + .../api_op_GetDefaultCreditSpecification.go | 3 + .../ec2/api_op_GetEbsDefaultKmsKeyId.go | 3 + .../ec2/api_op_GetEbsEncryptionByDefault.go | 3 + .../api_op_GetFlowLogsIntegrationTemplate.go | 3 + .../api_op_GetGroupsForCapacityReservation.go | 3 + ...pi_op_GetHostReservationPurchasePreview.go | 3 + .../api_op_GetImageBlockPublicAccessState.go | 3 + .../ec2/api_op_GetInstanceMetadataDefaults.go | 3 + .../service/ec2/api_op_GetInstanceTpmEkPub.go | 3 + ...etInstanceTypesFromInstanceRequirements.go | 3 + .../service/ec2/api_op_GetInstanceUefiData.go | 3 + .../ec2/api_op_GetIpamAddressHistory.go | 3 + .../ec2/api_op_GetIpamDiscoveredAccounts.go | 3 + ...api_op_GetIpamDiscoveredPublicAddresses.go | 3 + .../api_op_GetIpamDiscoveredResourceCidrs.go | 3 + .../ec2/api_op_GetIpamPoolAllocations.go | 3 + .../service/ec2/api_op_GetIpamPoolCidrs.go | 3 + .../ec2/api_op_GetIpamResourceCidrs.go | 3 + .../ec2/api_op_GetLaunchTemplateData.go | 3 + ...api_op_GetManagedPrefixListAssociations.go | 3 + .../ec2/api_op_GetManagedPrefixListEntries.go | 3 + ...workInsightsAccessScopeAnalysisFindings.go | 3 + ...op_GetNetworkInsightsAccessScopeContent.go | 3 + .../service/ec2/api_op_GetPasswordData.go | 3 + ...pi_op_GetReservedInstancesExchangeQuote.go | 3 + .../ec2/api_op_GetSecurityGroupsForVpc.go | 3 + .../api_op_GetSerialConsoleAccessStatus.go | 3 + ...pi_op_GetSnapshotBlockPublicAccessState.go | 3 + .../ec2/api_op_GetSpotPlacementScores.go | 3 + .../ec2/api_op_GetSubnetCidrReservations.go | 3 + ...GetTransitGatewayAttachmentPropagations.go | 3 + ...ansitGatewayMulticastDomainAssociations.go | 3 + ...etTransitGatewayPolicyTableAssociations.go | 3 + ..._op_GetTransitGatewayPolicyTableEntries.go | 3 + ...p_GetTransitGatewayPrefixListReferences.go | 3 + ...GetTransitGatewayRouteTableAssociations.go | 3 + ...GetTransitGatewayRouteTablePropagations.go | 3 + .../api_op_GetVerifiedAccessEndpointPolicy.go | 3 + ...api_op_GetVerifiedAccessEndpointTargets.go | 3 + .../api_op_GetVerifiedAccessGroupPolicy.go | 3 + ...tVpnConnectionDeviceSampleConfiguration.go | 3 + .../ec2/api_op_GetVpnConnectionDeviceTypes.go | 3 + .../api_op_GetVpnTunnelReplacementStatus.go | 3 + ...lientVpnClientCertificateRevocationList.go | 3 + .../service/ec2/api_op_ImportImage.go | 3 + .../service/ec2/api_op_ImportInstance.go | 3 + .../service/ec2/api_op_ImportKeyPair.go | 3 + .../service/ec2/api_op_ImportSnapshot.go | 3 + .../service/ec2/api_op_ImportVolume.go | 3 + .../ec2/api_op_ListImagesInRecycleBin.go | 3 + .../ec2/api_op_ListSnapshotsInRecycleBin.go | 3 + .../service/ec2/api_op_LockSnapshot.go | 3 + .../ec2/api_op_ModifyAddressAttribute.go | 3 + .../ec2/api_op_ModifyAvailabilityZoneGroup.go | 3 + .../ec2/api_op_ModifyCapacityReservation.go | 3 + .../api_op_ModifyCapacityReservationFleet.go | 3 + .../ec2/api_op_ModifyClientVpnEndpoint.go | 3 + ...api_op_ModifyDefaultCreditSpecification.go | 3 + .../ec2/api_op_ModifyEbsDefaultKmsKeyId.go | 3 + .../service/ec2/api_op_ModifyFleet.go | 3 + .../ec2/api_op_ModifyFpgaImageAttribute.go | 3 + .../service/ec2/api_op_ModifyHosts.go | 3 + .../service/ec2/api_op_ModifyIdFormat.go | 3 + .../ec2/api_op_ModifyIdentityIdFormat.go | 3 + .../ec2/api_op_ModifyImageAttribute.go | 3 + .../ec2/api_op_ModifyInstanceAttribute.go | 3 + ...fyInstanceCapacityReservationAttributes.go | 3 + .../ec2/api_op_ModifyInstanceCpuOptions.go | 3 + ...pi_op_ModifyInstanceCreditSpecification.go | 3 + .../api_op_ModifyInstanceEventStartTime.go | 3 + .../ec2/api_op_ModifyInstanceEventWindow.go | 3 + ...api_op_ModifyInstanceMaintenanceOptions.go | 3 + .../api_op_ModifyInstanceMetadataDefaults.go | 3 + .../api_op_ModifyInstanceMetadataOptions.go | 3 + ...ModifyInstanceNetworkPerformanceOptions.go | 3 + .../ec2/api_op_ModifyInstancePlacement.go | 3 + .../service/ec2/api_op_ModifyIpam.go | 3 + .../service/ec2/api_op_ModifyIpamPool.go | 3 + .../ec2/api_op_ModifyIpamResourceCidr.go | 3 + .../ec2/api_op_ModifyIpamResourceDiscovery.go | 3 + .../service/ec2/api_op_ModifyIpamScope.go | 3 + .../ec2/api_op_ModifyLaunchTemplate.go | 44 +- .../ec2/api_op_ModifyLocalGatewayRoute.go | 3 + .../ec2/api_op_ModifyManagedPrefixList.go | 3 + .../api_op_ModifyNetworkInterfaceAttribute.go | 3 + .../ec2/api_op_ModifyPrivateDnsNameOptions.go | 3 + .../ec2/api_op_ModifyReservedInstances.go | 3 + .../ec2/api_op_ModifySecurityGroupRules.go | 3 + .../ec2/api_op_ModifySnapshotAttribute.go | 3 + .../service/ec2/api_op_ModifySnapshotTier.go | 3 + .../ec2/api_op_ModifySpotFleetRequest.go | 3 + .../ec2/api_op_ModifySubnetAttribute.go | 3 + ...odifyTrafficMirrorFilterNetworkServices.go | 3 + .../api_op_ModifyTrafficMirrorFilterRule.go | 3 + .../ec2/api_op_ModifyTrafficMirrorSession.go | 3 + .../ec2/api_op_ModifyTransitGateway.go | 3 + ...ModifyTransitGatewayPrefixListReference.go | 3 + ...pi_op_ModifyTransitGatewayVpcAttachment.go | 3 + .../api_op_ModifyVerifiedAccessEndpoint.go | 3 + ...i_op_ModifyVerifiedAccessEndpointPolicy.go | 3 + .../ec2/api_op_ModifyVerifiedAccessGroup.go | 3 + .../api_op_ModifyVerifiedAccessGroupPolicy.go | 3 + .../api_op_ModifyVerifiedAccessInstance.go | 3 + ...ifiedAccessInstanceLoggingConfiguration.go | 3 + ...pi_op_ModifyVerifiedAccessTrustProvider.go | 3 + .../service/ec2/api_op_ModifyVolume.go | 3 + .../ec2/api_op_ModifyVolumeAttribute.go | 3 + .../service/ec2/api_op_ModifyVpcAttribute.go | 3 + ..._op_ModifyVpcBlockPublicAccessExclusion.go | 3 + ...pi_op_ModifyVpcBlockPublicAccessOptions.go | 3 + .../service/ec2/api_op_ModifyVpcEndpoint.go | 3 + ...ModifyVpcEndpointConnectionNotification.go | 3 + ...p_ModifyVpcEndpointServiceConfiguration.go | 3 + ...fyVpcEndpointServicePayerResponsibility.go | 3 + ..._op_ModifyVpcEndpointServicePermissions.go | 3 + ...pi_op_ModifyVpcPeeringConnectionOptions.go | 3 + .../service/ec2/api_op_ModifyVpcTenancy.go | 3 + .../service/ec2/api_op_ModifyVpnConnection.go | 3 + .../ec2/api_op_ModifyVpnConnectionOptions.go | 3 + .../ec2/api_op_ModifyVpnTunnelCertificate.go | 3 + .../ec2/api_op_ModifyVpnTunnelOptions.go | 3 + .../service/ec2/api_op_MonitorInstances.go | 3 + .../service/ec2/api_op_MoveAddressToVpc.go | 3 + .../service/ec2/api_op_MoveByoipCidrToIpam.go | 3 + ...api_op_MoveCapacityReservationInstances.go | 3 + .../service/ec2/api_op_ProvisionByoipCidr.go | 3 + .../service/ec2/api_op_ProvisionIpamByoasn.go | 3 + .../ec2/api_op_ProvisionIpamPoolCidr.go | 3 + .../ec2/api_op_ProvisionPublicIpv4PoolCidr.go | 3 + .../ec2/api_op_PurchaseCapacityBlock.go | 3 + .../api_op_PurchaseCapacityBlockExtension.go | 3 + .../ec2/api_op_PurchaseHostReservation.go | 3 + ...pi_op_PurchaseReservedInstancesOffering.go | 3 + .../ec2/api_op_PurchaseScheduledInstances.go | 3 + .../service/ec2/api_op_RebootInstances.go | 3 + .../service/ec2/api_op_RegisterImage.go | 3 + ...sterInstanceEventNotificationAttributes.go | 3 + ...sterTransitGatewayMulticastGroupMembers.go | 3 + ...sterTransitGatewayMulticastGroupSources.go | 3 + ...jectCapacityReservationBillingOwnership.go | 3 + ...ansitGatewayMulticastDomainAssociations.go | 3 + ...p_RejectTransitGatewayPeeringAttachment.go | 3 + ...pi_op_RejectTransitGatewayVpcAttachment.go | 3 + .../api_op_RejectVpcEndpointConnections.go | 3 + .../ec2/api_op_RejectVpcPeeringConnection.go | 3 + .../service/ec2/api_op_ReleaseAddress.go | 3 + .../service/ec2/api_op_ReleaseHosts.go | 3 + .../ec2/api_op_ReleaseIpamPoolAllocation.go | 3 + ...op_ReplaceIamInstanceProfileAssociation.go | 3 + ...aceImageCriteriaInAllowedImagesSettings.go | 3 + .../api_op_ReplaceNetworkAclAssociation.go | 3 + .../ec2/api_op_ReplaceNetworkAclEntry.go | 3 + .../service/ec2/api_op_ReplaceRoute.go | 3 + .../api_op_ReplaceRouteTableAssociation.go | 3 + .../ec2/api_op_ReplaceTransitGatewayRoute.go | 3 + .../service/ec2/api_op_ReplaceVpnTunnel.go | 3 + .../ec2/api_op_ReportInstanceStatus.go | 3 + .../service/ec2/api_op_RequestSpotFleet.go | 3 + .../ec2/api_op_RequestSpotInstances.go | 3 + .../ec2/api_op_ResetAddressAttribute.go | 3 + .../ec2/api_op_ResetEbsDefaultKmsKeyId.go | 3 + .../ec2/api_op_ResetFpgaImageAttribute.go | 3 + .../service/ec2/api_op_ResetImageAttribute.go | 3 + .../ec2/api_op_ResetInstanceAttribute.go | 3 + .../api_op_ResetNetworkInterfaceAttribute.go | 3 + .../ec2/api_op_ResetSnapshotAttribute.go | 3 + .../ec2/api_op_RestoreAddressToClassic.go | 3 + .../ec2/api_op_RestoreImageFromRecycleBin.go | 3 + .../api_op_RestoreManagedPrefixListVersion.go | 3 + .../api_op_RestoreSnapshotFromRecycleBin.go | 3 + .../service/ec2/api_op_RestoreSnapshotTier.go | 3 + .../ec2/api_op_RevokeClientVpnIngress.go | 3 + .../ec2/api_op_RevokeSecurityGroupEgress.go | 3 + .../ec2/api_op_RevokeSecurityGroupIngress.go | 3 + .../service/ec2/api_op_RunInstances.go | 3 + .../ec2/api_op_RunScheduledInstances.go | 3 + .../ec2/api_op_SearchLocalGatewayRoutes.go | 3 + ..._op_SearchTransitGatewayMulticastGroups.go | 3 + .../ec2/api_op_SearchTransitGatewayRoutes.go | 3 + .../ec2/api_op_SendDiagnosticInterrupt.go | 3 + .../api_op_StartDeclarativePoliciesReport.go | 3 + .../service/ec2/api_op_StartInstances.go | 3 + ...StartNetworkInsightsAccessScopeAnalysis.go | 3 + .../api_op_StartNetworkInsightsAnalysis.go | 3 + ...pcEndpointServicePrivateDnsVerification.go | 3 + .../service/ec2/api_op_StopInstances.go | 3 + .../api_op_TerminateClientVpnConnections.go | 3 + .../service/ec2/api_op_TerminateInstances.go | 3 + .../ec2/api_op_UnassignIpv6Addresses.go | 3 + .../ec2/api_op_UnassignPrivateIpAddresses.go | 3 + ...api_op_UnassignPrivateNatGatewayAddress.go | 3 + .../service/ec2/api_op_UnlockSnapshot.go | 3 + .../service/ec2/api_op_UnmonitorInstances.go | 3 + ...dateSecurityGroupRuleDescriptionsEgress.go | 3 + ...ateSecurityGroupRuleDescriptionsIngress.go | 3 + .../service/ec2/api_op_WithdrawByoipCidr.go | 3 + .../service/ec2/deserializers.go | 649 +- .../aws-sdk-go-v2/service/ec2/generated.json | 1 + .../service/ec2/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/service/ec2/serializers.go | 103 + .../aws-sdk-go-v2/service/ec2/types/enums.go | 91 + .../aws-sdk-go-v2/service/ec2/types/types.go | 490 +- .../aws-sdk-go-v2/service/ecs/CHANGELOG.md | 80 + .../aws-sdk-go-v2/service/ecs/api_client.go | 33 +- .../ecs/api_op_CreateCapacityProvider.go | 3 + .../service/ecs/api_op_CreateCluster.go | 3 + .../service/ecs/api_op_CreateService.go | 5 +- .../service/ecs/api_op_CreateTaskSet.go | 3 + .../ecs/api_op_DeleteAccountSetting.go | 3 + .../service/ecs/api_op_DeleteAttributes.go | 3 + .../ecs/api_op_DeleteCapacityProvider.go | 3 + .../service/ecs/api_op_DeleteCluster.go | 3 + .../service/ecs/api_op_DeleteService.go | 3 + .../ecs/api_op_DeleteTaskDefinitions.go | 3 + .../service/ecs/api_op_DeleteTaskSet.go | 3 + .../ecs/api_op_DeregisterContainerInstance.go | 3 + .../ecs/api_op_DeregisterTaskDefinition.go | 3 + .../ecs/api_op_DescribeCapacityProviders.go | 3 + .../service/ecs/api_op_DescribeClusters.go | 3 + .../ecs/api_op_DescribeContainerInstances.go | 3 + .../ecs/api_op_DescribeServiceDeployments.go | 3 + .../ecs/api_op_DescribeServiceRevisions.go | 3 + .../service/ecs/api_op_DescribeServices.go | 194 +- .../ecs/api_op_DescribeTaskDefinition.go | 3 + .../service/ecs/api_op_DescribeTaskSets.go | 3 + .../service/ecs/api_op_DescribeTasks.go | 134 +- .../ecs/api_op_DiscoverPollEndpoint.go | 3 + .../service/ecs/api_op_ExecuteCommand.go | 3 + .../service/ecs/api_op_GetTaskProtection.go | 3 + .../service/ecs/api_op_ListAccountSettings.go | 3 + .../service/ecs/api_op_ListAttributes.go | 3 + .../service/ecs/api_op_ListClusters.go | 3 + .../ecs/api_op_ListContainerInstances.go | 3 + .../ecs/api_op_ListServiceDeployments.go | 9 +- .../service/ecs/api_op_ListServices.go | 3 + .../ecs/api_op_ListServicesByNamespace.go | 3 + .../service/ecs/api_op_ListTagsForResource.go | 3 + .../ecs/api_op_ListTaskDefinitionFamilies.go | 3 + .../service/ecs/api_op_ListTaskDefinitions.go | 3 + .../service/ecs/api_op_ListTasks.go | 3 + .../service/ecs/api_op_PutAccountSetting.go | 3 + .../ecs/api_op_PutAccountSettingDefault.go | 3 + .../service/ecs/api_op_PutAttributes.go | 3 + .../ecs/api_op_PutClusterCapacityProviders.go | 3 + .../ecs/api_op_RegisterContainerInstance.go | 3 + .../ecs/api_op_RegisterTaskDefinition.go | 9 +- .../service/ecs/api_op_RunTask.go | 3 + .../service/ecs/api_op_StartTask.go | 3 + .../service/ecs/api_op_StopTask.go | 5 +- .../api_op_SubmitAttachmentStateChanges.go | 3 + .../ecs/api_op_SubmitContainerStateChange.go | 3 + .../ecs/api_op_SubmitTaskStateChange.go | 3 + .../service/ecs/api_op_TagResource.go | 19 + .../service/ecs/api_op_UntagResource.go | 3 + .../ecs/api_op_UpdateCapacityProvider.go | 3 + .../service/ecs/api_op_UpdateCluster.go | 3 + .../ecs/api_op_UpdateClusterSettings.go | 3 + .../ecs/api_op_UpdateContainerAgent.go | 3 + .../api_op_UpdateContainerInstancesState.go | 3 + .../service/ecs/api_op_UpdateService.go | 5 +- .../ecs/api_op_UpdateServicePrimaryTaskSet.go | 3 + .../ecs/api_op_UpdateTaskProtection.go | 3 + .../service/ecs/api_op_UpdateTaskSet.go | 3 + .../aws-sdk-go-v2/service/ecs/generated.json | 6 +- .../service/ecs/go_module_metadata.go | 2 +- .../ecs/internal/endpoints/endpoints.go | 14 + .../aws-sdk-go-v2/service/ecs/types/errors.go | 8 +- .../aws-sdk-go-v2/service/ecs/types/types.go | 87 +- .../aws-sdk-go-v2/service/iam/CHANGELOG.md | 9 + .../aws-sdk-go-v2/service/iam/api_client.go | 31 + ...i_op_AddClientIDToOpenIDConnectProvider.go | 3 + .../iam/api_op_AddRoleToInstanceProfile.go | 3 + .../service/iam/api_op_AddUserToGroup.go | 3 + .../service/iam/api_op_AttachGroupPolicy.go | 3 + .../service/iam/api_op_AttachRolePolicy.go | 3 + .../service/iam/api_op_AttachUserPolicy.go | 3 + .../service/iam/api_op_ChangePassword.go | 3 + .../service/iam/api_op_CreateAccessKey.go | 3 + .../service/iam/api_op_CreateAccountAlias.go | 3 + .../service/iam/api_op_CreateGroup.go | 3 + .../iam/api_op_CreateInstanceProfile.go | 3 + .../service/iam/api_op_CreateLoginProfile.go | 3 + .../iam/api_op_CreateOpenIDConnectProvider.go | 3 + .../service/iam/api_op_CreatePolicy.go | 3 + .../service/iam/api_op_CreatePolicyVersion.go | 3 + .../service/iam/api_op_CreateRole.go | 3 + .../service/iam/api_op_CreateSAMLProvider.go | 3 + .../iam/api_op_CreateServiceLinkedRole.go | 3 + .../api_op_CreateServiceSpecificCredential.go | 3 + .../service/iam/api_op_CreateUser.go | 3 + .../iam/api_op_CreateVirtualMFADevice.go | 3 + .../service/iam/api_op_DeactivateMFADevice.go | 3 + .../service/iam/api_op_DeleteAccessKey.go | 3 + .../service/iam/api_op_DeleteAccountAlias.go | 3 + .../iam/api_op_DeleteAccountPasswordPolicy.go | 3 + .../service/iam/api_op_DeleteGroup.go | 3 + .../service/iam/api_op_DeleteGroupPolicy.go | 3 + .../iam/api_op_DeleteInstanceProfile.go | 3 + .../service/iam/api_op_DeleteLoginProfile.go | 3 + .../iam/api_op_DeleteOpenIDConnectProvider.go | 3 + .../service/iam/api_op_DeletePolicy.go | 3 + .../service/iam/api_op_DeletePolicyVersion.go | 3 + .../service/iam/api_op_DeleteRole.go | 3 + .../api_op_DeleteRolePermissionsBoundary.go | 3 + .../service/iam/api_op_DeleteRolePolicy.go | 3 + .../service/iam/api_op_DeleteSAMLProvider.go | 3 + .../service/iam/api_op_DeleteSSHPublicKey.go | 3 + .../iam/api_op_DeleteServerCertificate.go | 3 + .../iam/api_op_DeleteServiceLinkedRole.go | 3 + .../api_op_DeleteServiceSpecificCredential.go | 3 + .../iam/api_op_DeleteSigningCertificate.go | 3 + .../service/iam/api_op_DeleteUser.go | 3 + .../api_op_DeleteUserPermissionsBoundary.go | 3 + .../service/iam/api_op_DeleteUserPolicy.go | 3 + .../iam/api_op_DeleteVirtualMFADevice.go | 3 + .../service/iam/api_op_DetachGroupPolicy.go | 3 + .../service/iam/api_op_DetachRolePolicy.go | 3 + .../service/iam/api_op_DetachUserPolicy.go | 3 + ...eOrganizationsRootCredentialsManagement.go | 3 + ...api_op_DisableOrganizationsRootSessions.go | 3 + .../service/iam/api_op_EnableMFADevice.go | 3 + ...eOrganizationsRootCredentialsManagement.go | 3 + .../api_op_EnableOrganizationsRootSessions.go | 3 + .../iam/api_op_GenerateCredentialReport.go | 3 + ...pi_op_GenerateOrganizationsAccessReport.go | 3 + ...i_op_GenerateServiceLastAccessedDetails.go | 3 + .../iam/api_op_GetAccessKeyLastUsed.go | 3 + .../api_op_GetAccountAuthorizationDetails.go | 3 + .../iam/api_op_GetAccountPasswordPolicy.go | 3 + .../service/iam/api_op_GetAccountSummary.go | 3 + .../api_op_GetContextKeysForCustomPolicy.go | 3 + ...api_op_GetContextKeysForPrincipalPolicy.go | 3 + .../service/iam/api_op_GetCredentialReport.go | 3 + .../service/iam/api_op_GetGroup.go | 3 + .../service/iam/api_op_GetGroupPolicy.go | 3 + .../service/iam/api_op_GetInstanceProfile.go | 3 + .../service/iam/api_op_GetLoginProfile.go | 3 + .../service/iam/api_op_GetMFADevice.go | 3 + .../iam/api_op_GetOpenIDConnectProvider.go | 3 + .../api_op_GetOrganizationsAccessReport.go | 3 + .../service/iam/api_op_GetPolicy.go | 3 + .../service/iam/api_op_GetPolicyVersion.go | 3 + .../service/iam/api_op_GetRole.go | 3 + .../service/iam/api_op_GetRolePolicy.go | 3 + .../service/iam/api_op_GetSAMLProvider.go | 3 + .../service/iam/api_op_GetSSHPublicKey.go | 3 + .../iam/api_op_GetServerCertificate.go | 3 + .../api_op_GetServiceLastAccessedDetails.go | 3 + ...tServiceLastAccessedDetailsWithEntities.go | 3 + ...i_op_GetServiceLinkedRoleDeletionStatus.go | 3 + .../service/iam/api_op_GetUser.go | 3 + .../service/iam/api_op_GetUserPolicy.go | 3 + .../service/iam/api_op_ListAccessKeys.go | 3 + .../service/iam/api_op_ListAccountAliases.go | 3 + .../iam/api_op_ListAttachedGroupPolicies.go | 3 + .../iam/api_op_ListAttachedRolePolicies.go | 3 + .../iam/api_op_ListAttachedUserPolicies.go | 3 + .../iam/api_op_ListEntitiesForPolicy.go | 3 + .../service/iam/api_op_ListGroupPolicies.go | 3 + .../service/iam/api_op_ListGroups.go | 3 + .../service/iam/api_op_ListGroupsForUser.go | 3 + .../iam/api_op_ListInstanceProfileTags.go | 3 + .../iam/api_op_ListInstanceProfiles.go | 3 + .../iam/api_op_ListInstanceProfilesForRole.go | 3 + .../service/iam/api_op_ListMFADeviceTags.go | 3 + .../service/iam/api_op_ListMFADevices.go | 3 + .../api_op_ListOpenIDConnectProviderTags.go | 3 + .../iam/api_op_ListOpenIDConnectProviders.go | 3 + .../iam/api_op_ListOrganizationsFeatures.go | 3 + .../service/iam/api_op_ListPolicies.go | 3 + ...pi_op_ListPoliciesGrantingServiceAccess.go | 3 + .../service/iam/api_op_ListPolicyTags.go | 3 + .../service/iam/api_op_ListPolicyVersions.go | 3 + .../service/iam/api_op_ListRolePolicies.go | 3 + .../service/iam/api_op_ListRoleTags.go | 3 + .../service/iam/api_op_ListRoles.go | 3 + .../iam/api_op_ListSAMLProviderTags.go | 3 + .../service/iam/api_op_ListSAMLProviders.go | 3 + .../service/iam/api_op_ListSSHPublicKeys.go | 3 + .../iam/api_op_ListServerCertificateTags.go | 3 + .../iam/api_op_ListServerCertificates.go | 3 + .../api_op_ListServiceSpecificCredentials.go | 3 + .../iam/api_op_ListSigningCertificates.go | 3 + .../service/iam/api_op_ListUserPolicies.go | 3 + .../service/iam/api_op_ListUserTags.go | 3 + .../service/iam/api_op_ListUsers.go | 3 + .../iam/api_op_ListVirtualMFADevices.go | 3 + .../service/iam/api_op_PutGroupPolicy.go | 3 + .../iam/api_op_PutRolePermissionsBoundary.go | 3 + .../service/iam/api_op_PutRolePolicy.go | 3 + .../iam/api_op_PutUserPermissionsBoundary.go | 3 + .../service/iam/api_op_PutUserPolicy.go | 3 + ...RemoveClientIDFromOpenIDConnectProvider.go | 3 + .../api_op_RemoveRoleFromInstanceProfile.go | 3 + .../service/iam/api_op_RemoveUserFromGroup.go | 3 + .../api_op_ResetServiceSpecificCredential.go | 3 + .../service/iam/api_op_ResyncMFADevice.go | 3 + .../iam/api_op_SetDefaultPolicyVersion.go | 3 + ...i_op_SetSecurityTokenServicePreferences.go | 3 + .../iam/api_op_SimulateCustomPolicy.go | 3 + .../iam/api_op_SimulatePrincipalPolicy.go | 3 + .../service/iam/api_op_TagInstanceProfile.go | 3 + .../service/iam/api_op_TagMFADevice.go | 3 + .../iam/api_op_TagOpenIDConnectProvider.go | 3 + .../service/iam/api_op_TagPolicy.go | 3 + .../service/iam/api_op_TagRole.go | 3 + .../service/iam/api_op_TagSAMLProvider.go | 3 + .../iam/api_op_TagServerCertificate.go | 3 + .../service/iam/api_op_TagUser.go | 3 + .../iam/api_op_UntagInstanceProfile.go | 3 + .../service/iam/api_op_UntagMFADevice.go | 3 + .../iam/api_op_UntagOpenIDConnectProvider.go | 3 + .../service/iam/api_op_UntagPolicy.go | 3 + .../service/iam/api_op_UntagRole.go | 3 + .../service/iam/api_op_UntagSAMLProvider.go | 3 + .../iam/api_op_UntagServerCertificate.go | 3 + .../service/iam/api_op_UntagUser.go | 3 + .../service/iam/api_op_UpdateAccessKey.go | 3 + .../iam/api_op_UpdateAccountPasswordPolicy.go | 3 + .../iam/api_op_UpdateAssumeRolePolicy.go | 3 + .../service/iam/api_op_UpdateGroup.go | 3 + .../service/iam/api_op_UpdateLoginProfile.go | 3 + ...p_UpdateOpenIDConnectProviderThumbprint.go | 3 + .../service/iam/api_op_UpdateRole.go | 3 + .../iam/api_op_UpdateRoleDescription.go | 3 + .../service/iam/api_op_UpdateSAMLProvider.go | 3 + .../service/iam/api_op_UpdateSSHPublicKey.go | 3 + .../iam/api_op_UpdateServerCertificate.go | 3 + .../api_op_UpdateServiceSpecificCredential.go | 3 + .../iam/api_op_UpdateSigningCertificate.go | 3 + .../service/iam/api_op_UpdateUser.go | 3 + .../service/iam/api_op_UploadSSHPublicKey.go | 3 + .../iam/api_op_UploadServerCertificate.go | 3 + .../iam/api_op_UploadSigningCertificate.go | 3 + .../aws-sdk-go-v2/service/iam/generated.json | 1 + .../service/iam/go_module_metadata.go | 2 +- .../service/internal/checksum/CHANGELOG.md | 139 + .../service/internal/checksum/algorithms.go | 12 + .../internal/checksum/go_module_metadata.go | 2 +- .../internal/checksum/middleware_add.go | 67 +- .../middleware_checksum_metrics_tracking.go | 90 + .../middleware_compute_input_checksum.go | 136 +- .../checksum/middleware_setup_context.go | 54 +- .../checksum/middleware_validate_output.go | 10 +- .../service/internal/s3shared/CHANGELOG.md | 121 + .../internal/s3shared/go_module_metadata.go | 2 +- .../internal/s3shared/metadata_retriever.go | 5 + .../service/pricing/CHANGELOG.md | 13 + .../service/pricing/api_client.go | 31 + .../pricing/api_op_DescribeServices.go | 3 + .../pricing/api_op_GetAttributeValues.go | 3 + .../pricing/api_op_GetPriceListFileUrl.go | 3 + .../service/pricing/api_op_GetProducts.go | 3 + .../service/pricing/api_op_ListPriceLists.go | 3 + .../service/pricing/deserializers.go | 4 +- .../service/pricing/generated.json | 1 + .../service/pricing/go_module_metadata.go | 2 +- .../aws/aws-sdk-go-v2/service/s3/CHANGELOG.md | 286 + .../aws-sdk-go-v2/service/s3/api_client.go | 499 +- .../service/s3/api_op_AbortMultipartUpload.go | 187 +- .../s3/api_op_CompleteMultipartUpload.go | 473 +- .../service/s3/api_op_CopyObject.go | 772 +- .../service/s3/api_op_CreateBucket.go | 255 +- ..._CreateBucketMetadataTableConfiguration.go | 293 + .../s3/api_op_CreateMultipartUpload.go | 743 +- .../service/s3/api_op_CreateSession.go | 283 +- .../service/s3/api_op_DeleteBucket.go | 91 +- ...i_op_DeleteBucketAnalyticsConfiguration.go | 64 +- .../service/s3/api_op_DeleteBucketCors.go | 55 +- .../s3/api_op_DeleteBucketEncryption.go | 97 +- ...teBucketIntelligentTieringConfiguration.go | 73 +- ...i_op_DeleteBucketInventoryConfiguration.go | 66 +- .../s3/api_op_DeleteBucketLifecycle.go | 100 +- ..._DeleteBucketMetadataTableConfiguration.go | 237 + ...api_op_DeleteBucketMetricsConfiguration.go | 73 +- .../api_op_DeleteBucketOwnershipControls.go | 52 +- .../service/s3/api_op_DeleteBucketPolicy.go | 130 +- .../s3/api_op_DeleteBucketReplication.go | 68 +- .../service/s3/api_op_DeleteBucketTagging.go | 49 +- .../service/s3/api_op_DeleteBucketWebsite.go | 67 +- .../service/s3/api_op_DeleteObject.go | 234 +- .../service/s3/api_op_DeleteObjectTagging.go | 86 +- .../service/s3/api_op_DeleteObjects.go | 259 +- .../s3/api_op_DeletePublicAccessBlock.go | 61 +- ...api_op_GetBucketAccelerateConfiguration.go | 92 +- .../service/s3/api_op_GetBucketAcl.go | 91 +- .../api_op_GetBucketAnalyticsConfiguration.go | 70 +- .../service/s3/api_op_GetBucketCors.go | 91 +- .../service/s3/api_op_GetBucketEncryption.go | 98 +- ...etBucketIntelligentTieringConfiguration.go | 73 +- .../api_op_GetBucketInventoryConfiguration.go | 62 +- .../api_op_GetBucketLifecycleConfiguration.go | 142 +- .../service/s3/api_op_GetBucketLocation.go | 101 +- .../service/s3/api_op_GetBucketLogging.go | 52 +- ..._op_GetBucketMetadataTableConfiguration.go | 242 + .../api_op_GetBucketMetricsConfiguration.go | 71 +- ...i_op_GetBucketNotificationConfiguration.go | 99 +- .../s3/api_op_GetBucketOwnershipControls.go | 52 +- .../service/s3/api_op_GetBucketPolicy.go | 157 +- .../s3/api_op_GetBucketPolicyStatus.go | 65 +- .../service/s3/api_op_GetBucketReplication.go | 72 +- .../s3/api_op_GetBucketRequestPayment.go | 44 +- .../service/s3/api_op_GetBucketTagging.go | 53 +- .../service/s3/api_op_GetBucketVersioning.go | 58 +- .../service/s3/api_op_GetBucketWebsite.go | 55 +- .../service/s3/api_op_GetObject.go | 624 +- .../service/s3/api_op_GetObjectAcl.go | 104 +- .../service/s3/api_op_GetObjectAttributes.go | 294 +- .../service/s3/api_op_GetObjectLegalHold.go | 70 +- .../s3/api_op_GetObjectLockConfiguration.go | 62 +- .../service/s3/api_op_GetObjectRetention.go | 70 +- .../service/s3/api_op_GetObjectTagging.go | 94 +- .../service/s3/api_op_GetObjectTorrent.go | 68 +- .../service/s3/api_op_GetPublicAccessBlock.go | 72 +- .../service/s3/api_op_HeadBucket.go | 263 +- .../service/s3/api_op_HeadObject.go | 669 +- ...pi_op_ListBucketAnalyticsConfigurations.go | 77 +- ...tBucketIntelligentTieringConfigurations.go | 73 +- ...pi_op_ListBucketInventoryConfigurations.go | 76 +- .../api_op_ListBucketMetricsConfigurations.go | 86 +- .../service/s3/api_op_ListBuckets.go | 192 +- .../service/s3/api_op_ListDirectoryBuckets.go | 94 +- .../service/s3/api_op_ListMultipartUploads.go | 314 +- .../service/s3/api_op_ListObjectVersions.go | 113 +- .../service/s3/api_op_ListObjects.go | 202 +- .../service/s3/api_op_ListObjectsV2.go | 306 +- .../service/s3/api_op_ListParts.go | 303 +- ...api_op_PutBucketAccelerateConfiguration.go | 102 +- .../service/s3/api_op_PutBucketAcl.go | 263 +- .../api_op_PutBucketAnalyticsConfiguration.go | 94 +- .../service/s3/api_op_PutBucketCors.go | 116 +- .../service/s3/api_op_PutBucketEncryption.go | 222 +- ...utBucketIntelligentTieringConfiguration.go | 97 +- .../api_op_PutBucketInventoryConfiguration.go | 126 +- .../api_op_PutBucketLifecycleConfiguration.go | 217 +- .../service/s3/api_op_PutBucketLogging.go | 149 +- .../api_op_PutBucketMetricsConfiguration.go | 77 +- ...i_op_PutBucketNotificationConfiguration.go | 112 +- .../s3/api_op_PutBucketOwnershipControls.go | 65 +- .../service/s3/api_op_PutBucketPolicy.go | 207 +- .../service/s3/api_op_PutBucketReplication.go | 158 +- .../s3/api_op_PutBucketRequestPayment.go | 86 +- .../service/s3/api_op_PutBucketTagging.go | 125 +- .../service/s3/api_op_PutBucketVersioning.go | 132 +- .../service/s3/api_op_PutBucketWebsite.go | 114 +- .../service/s3/api_op_PutObject.go | 785 +- .../service/s3/api_op_PutObjectAcl.go | 320 +- .../service/s3/api_op_PutObjectLegalHold.go | 83 +- .../s3/api_op_PutObjectLockConfiguration.go | 85 +- .../service/s3/api_op_PutObjectRetention.go | 99 +- .../service/s3/api_op_PutObjectTagging.go | 144 +- .../service/s3/api_op_PutPublicAccessBlock.go | 103 +- .../service/s3/api_op_RestoreObject.go | 225 +- .../service/s3/api_op_SelectObjectContent.go | 170 +- .../service/s3/api_op_UploadPart.go | 475 +- .../service/s3/api_op_UploadPartCopy.go | 441 +- .../s3/api_op_WriteGetObjectResponse.go | 225 +- .../aws/aws-sdk-go-v2/service/s3/auth.go | 49 +- .../aws-sdk-go-v2/service/s3/deserializers.go | 1741 +++- .../aws/aws-sdk-go-v2/service/s3/endpoints.go | 1096 ++- .../service/s3/express_user_agent.go | 43 + .../aws-sdk-go-v2/service/s3/generated.json | 6 +- .../service/s3/go_module_metadata.go | 2 +- .../s3/internal/endpoints/endpoints.go | 45 +- .../aws/aws-sdk-go-v2/service/s3/options.go | 49 +- .../aws-sdk-go-v2/service/s3/presign_post.go | 419 + .../aws-sdk-go-v2/service/s3/serializers.go | 1712 +++- .../aws-sdk-go-v2/service/s3/types/enums.go | 355 +- .../aws-sdk-go-v2/service/s3/types/errors.go | 140 +- .../aws-sdk-go-v2/service/s3/types/types.go | 2107 +++-- .../aws-sdk-go-v2/service/s3/uri_context.go | 23 + .../aws-sdk-go-v2/service/s3/validators.go | 197 +- .../v5/plumbing/transport/http/common.go | 3 +- .../github.com/go-git/go-git/v5/worktree.go | 3 +- .../go-git/go-git/v5/worktree_status.go | 2 + .../mapstructure/v2}/.editorconfig | 6 +- .../go-viper/mapstructure/v2/.envrc | 4 + .../go-viper/mapstructure/v2/.gitignore | 6 + .../go-viper/mapstructure/v2/.golangci.yaml | 23 + .../go-viper/mapstructure/v2}/CHANGELOG.md | 8 + .../go-viper/mapstructure/v2}/LICENSE | 0 .../go-viper/mapstructure/v2/README.md | 80 + .../go-viper/mapstructure/v2/decode_hooks.go | 630 ++ .../go-viper/mapstructure/v2/flake.lock | 472 + .../go-viper/mapstructure/v2/flake.nix | 39 + .../mapstructure/v2/internal/errors/errors.go | 11 + .../mapstructure/v2/internal/errors/join.go | 9 + .../v2/internal/errors/join_go1_19.go | 61 + .../go-viper/mapstructure/v2}/mapstructure.go | 332 +- .../mapstructure/v2/reflect_go1_19.go | 44 + .../mapstructure/v2/reflect_go1_20.go | 10 + vendor/github.com/hashicorp/hcl/.gitignore | 9 - vendor/github.com/hashicorp/hcl/.travis.yml | 13 - vendor/github.com/hashicorp/hcl/LICENSE | 354 - vendor/github.com/hashicorp/hcl/Makefile | 18 - vendor/github.com/hashicorp/hcl/README.md | 125 - vendor/github.com/hashicorp/hcl/appveyor.yml | 19 - vendor/github.com/hashicorp/hcl/decoder.go | 729 -- vendor/github.com/hashicorp/hcl/hcl.go | 11 - .../github.com/hashicorp/hcl/hcl/ast/ast.go | 219 - .../github.com/hashicorp/hcl/hcl/ast/walk.go | 52 - .../hashicorp/hcl/hcl/parser/error.go | 17 - .../hashicorp/hcl/hcl/parser/parser.go | 532 -- .../hashicorp/hcl/hcl/printer/nodes.go | 789 -- .../hashicorp/hcl/hcl/printer/printer.go | 66 - .../hashicorp/hcl/hcl/scanner/scanner.go | 652 -- .../hashicorp/hcl/hcl/strconv/quote.go | 241 - .../hashicorp/hcl/hcl/token/position.go | 46 - .../hashicorp/hcl/hcl/token/token.go | 219 - .../hashicorp/hcl/json/parser/flatten.go | 117 - .../hashicorp/hcl/json/parser/parser.go | 313 - .../hashicorp/hcl/json/scanner/scanner.go | 451 - .../hashicorp/hcl/json/token/position.go | 46 - .../hashicorp/hcl/json/token/token.go | 118 - vendor/github.com/hashicorp/hcl/lex.go | 38 - vendor/github.com/hashicorp/hcl/parse.go | 39 - .../jmespath/go-jmespath/.gitignore | 4 - .../jmespath/go-jmespath/.travis.yml | 28 - .../github.com/jmespath/go-jmespath/LICENSE | 13 - .../github.com/jmespath/go-jmespath/Makefile | 51 - .../github.com/jmespath/go-jmespath/README.md | 87 - vendor/github.com/jmespath/go-jmespath/api.go | 49 - .../go-jmespath/astnodetype_string.go | 16 - .../jmespath/go-jmespath/functions.go | 842 -- .../jmespath/go-jmespath/interpreter.go | 418 - .../github.com/jmespath/go-jmespath/lexer.go | 420 - .../github.com/jmespath/go-jmespath/parser.go | 603 -- .../jmespath/go-jmespath/toktype_string.go | 16 - .../github.com/jmespath/go-jmespath/util.go | 185 - .../magiconair/properties/.gitignore | 6 - .../magiconair/properties/CHANGELOG.md | 205 - .../magiconair/properties/LICENSE.md | 24 - .../magiconair/properties/README.md | 128 - .../magiconair/properties/decode.go | 289 - .../github.com/magiconair/properties/doc.go | 155 - .../magiconair/properties/integrate.go | 35 - .../github.com/magiconair/properties/lex.go | 395 - .../github.com/magiconair/properties/load.go | 293 - .../magiconair/properties/parser.go | 86 - .../magiconair/properties/properties.go | 848 -- .../magiconair/properties/rangecheck.go | 31 - .../mitchellh/mapstructure/CHANGELOG.md | 96 - .../github.com/mitchellh/mapstructure/LICENSE | 21 - .../mitchellh/mapstructure/README.md | 46 - .../mitchellh/mapstructure/decode_hooks.go | 279 - .../mitchellh/mapstructure/error.go | 50 - .../mitchellh/mapstructure/mapstructure.go | 1540 ---- vendor/github.com/pjbgf/sha1cd/Dockerfile.arm | 3 +- .../github.com/pjbgf/sha1cd/Dockerfile.arm64 | 2 +- vendor/github.com/pjbgf/sha1cd/LICENSE | 2 +- vendor/github.com/pjbgf/sha1cd/Makefile | 3 +- vendor/github.com/pjbgf/sha1cd/sha1cd.go | 2 + .../pjbgf/sha1cd/sha1cdblock_amd64.s | 3 +- .../pjbgf/sha1cd/ubc/{doc.go => ubc.go} | 2 + .../github.com/pjbgf/sha1cd/ubc/ubc_amd64.go | 14 + .../github.com/pjbgf/sha1cd/ubc/ubc_amd64.s | 1897 ++++ .../sha1cd/ubc/{check.go => ubc_generic.go} | 2 +- .../github.com/pjbgf/sha1cd/ubc/ubc_noasm.go | 12 + vendor/github.com/pulumi/esc/.version | 2 +- vendor/github.com/pulumi/esc/CHANGELOG.md | 14 + .../pulumi/esc/CHANGELOG_PENDING.md | 20 +- vendor/github.com/pulumi/esc/CONTRIBUTING.md | 8 + .../github.com/pulumi/esc/ast/environment.go | 3 + vendor/github.com/pulumi/esc/ast/expr.go | 10 +- vendor/github.com/pulumi/esc/eval/eval.go | 11 +- .../sdk/go/aws/internal/pulumiUtilities.go | 4 +- .../sdk/go/aws/pulumi-plugin.json | 2 +- .../sdk/go/aws/pulumiEnums.go | 4 + .../sdk/v6/go/aws/autoscaling/pulumiTypes.go | 19 + .../aws/cloudwatch/contributorInsightRule.go | 333 + .../contributorManagedInsightRule.go | 306 + .../sdk/v6/go/aws/cloudwatch/eventRule.go | 12 +- .../sdk/v6/go/aws/cloudwatch/eventTarget.go | 12 +- .../getContributorManagedInsightRules.go | 130 + .../getLogDataProtectionPolicyDocument.go | 15 +- .../sdk/v6/go/aws/cloudwatch/init.go | 14 + .../sdk/v6/go/aws/cloudwatch/pulumiTypes.go | 516 +- .../pulumi-aws/sdk/v6/go/aws/ec2/eip.go | 12 +- .../pulumi-aws/sdk/v6/go/aws/ec2/flowLog.go | 12 +- .../sdk/v6/go/aws/ec2/getLaunchTemplate.go | 16 +- .../sdk/v6/go/aws/ec2/getVpcEndpoint.go | 4 +- .../v6/go/aws/ec2/getVpcEndpointService.go | 4 +- .../sdk/v6/go/aws/ec2/getVpcIpam.go | 12 +- .../pulumi-aws/sdk/v6/go/aws/ec2/instance.go | 24 +- .../sdk/v6/go/aws/ec2/launchTemplate.go | 48 +- .../sdk/v6/go/aws/ec2/pulumiTypes.go | 416 +- .../sdk/v6/go/aws/ec2/spotInstanceRequest.go | 24 +- .../sdk/v6/go/aws/ec2/vpcEndpoint.go | 12 +- .../sdk/v6/go/aws/ecs/pulumiTypes.go | 16 +- .../sdk/v6/go/aws/iam/getPolicyDocument.go | 16 +- .../sdk/v6/go/aws/internal/pulumiUtilities.go | 4 +- .../sdk/v6/go/aws/lb/getListenerRule.go | 16 +- .../sdk/v6/go/aws/lb/pulumiTypes.go | 30 +- .../aws/s3/bucketLifecycleConfigurationV2.go | 14 +- .../sdk/v6/go/aws/s3/bucketObjectv2.go | 23 +- .../pulumi-aws/sdk/v6/go/aws/s3/bucketV2.go | 172 +- .../sdk/v6/go/aws/s3/directoryBucket.go | 44 +- .../sdk/v6/go/aws/s3/getBucketObject.go | 8 +- .../sdk/v6/go/aws/s3/getBucketObjects.go | 8 +- .../pulumi-aws/sdk/v6/go/aws/s3/getObject.go | 12 +- .../pulumi-aws/sdk/v6/go/aws/s3/objectCopy.go | 23 +- .../sdk/v6/go/aws/s3/pulumiTypes.go | 114 +- .../v2/go/awsx/internal/pulumiUtilities.go | 4 +- .../v2/getManagementLockAtResourceLevel.go | 4 + .../v2/getPimRoleEligibilitySchedule.go | 220 + .../authorization/v2/init.go | 2 + .../v2/managementLockAtResourceLevel.go | 7 + .../v2/pimRoleEligibilitySchedule.go | 314 + .../authorization/v2/pulumi-plugin.json | 2 +- .../authorization/v2/pulumiEnums.go | 185 + .../authorization/v2/pulumiTypes.go | 1106 +++ .../compute/v2/pulumi-plugin.json | 2 +- .../v2/federatedIdentityCredential.go | 5 +- .../v2/getFederatedIdentityCredential.go | 14 +- .../v2/getUserAssignedIdentity.go | 14 +- ...UserAssignedIdentityAssociatedResources.go | 12 +- .../managedidentity/v2/pulumi-plugin.json | 2 +- .../v2/userAssignedIdentity.go | 5 +- .../network/v2/pulumi-plugin.json | 2 +- .../resources/v2/getResource.go | 4 + .../resources/v2/pulumi-plugin.json | 2 +- .../resources/v2/resource.go | 7 + .../storage/v2/pulumi-plugin.json | 2 +- .../v2/utilities/pulumiUtilities.go | 4 +- .../go/command/internal/pulumiUtilities.go | 4 +- .../dockerbuild/internal/pulumiUtilities.go | 4 +- .../sdk/go/dockerbuild/pulumi-plugin.json | 2 +- .../v4/go/docker/internal/pulumiUtilities.go | 4 +- .../sdk/v4/go/docker/pulumi-plugin.json | 2 +- .../v4/go/random/internal/pulumiUtilities.go | 4 +- .../sdk/v4/go/random/pulumi-plugin.json | 2 +- .../sdk/v5/go/tls/getCertificate.go | 16 +- .../pulumi-tls/sdk/v5/go/tls/getPublicKey.go | 18 +- .../sdk/v5/go/tls/internal/pulumiUtilities.go | 4 +- .../sdk/v5/go/tls/pulumi-plugin.json | 2 +- .../github.com/pulumi/pulumi/sdk/v3/.version | 2 +- .../sdk/v3/go/common/apitype/organizations.go | 30 + .../pulumi/sdk/v3/go/common/apitype/stacks.go | 4 +- .../sdk/v3/go/common/apitype/templates.go | 65 + .../pulumi/pulumi/sdk/v3/go/common/env/env.go | 9 + .../sdk/v3/go/common/resource/config/crypt.go | 74 +- .../v3/go/common/resource/plugin/analyzer.go | 1 + .../common/resource/plugin/analyzer_plugin.go | 1 + .../v3/go/common/resource/plugin/context.go | 24 +- .../sdk/v3/go/common/resource/plugin/host.go | 91 +- .../v3/go/common/resource/plugin/provider.go | 33 +- .../common/resource/plugin/provider_server.go | 2 +- .../v3/go/common/resource/resource_goal.go | 4 +- .../sdk/v3/go/common/util/cmdutil/trace.go | 4 + .../sdk/v3/go/common/workspace/project.go | 125 +- .../sdk/v3/go/common/workspace/project.json | 40 + .../pulumi/pulumi/sdk/v3/go/pulumi/context.go | 26 +- .../pulumi/sdk/v3/go/pulumi/provider.go | 2 +- .../pulumi/sdk/v3/go/pulumi/resource.go | 17 +- .../pulumi/sdk/v3/proto/go/analyzer.pb.go | 354 +- .../pulumi/sdk/v3/proto/go/provider.pb.go | 392 +- .../pulumi/sdk/v3/proto/go/resource.pb.go | 867 +- .../github.com/sagikazarmark/locafero/.envrc | 4 +- .../sagikazarmark/locafero/README.md | 2 +- .../sagikazarmark/locafero/finder.go | 6 +- .../sagikazarmark/locafero/flake.lock | 257 +- .../sagikazarmark/locafero/flake.nix | 62 +- .../github.com/sagikazarmark/locafero/glob.go | 5 + .../sagikazarmark/locafero/glob_windows.go | 8 + .../github.com/sagikazarmark/slog-shim/.envrc | 4 - .../sagikazarmark/slog-shim/LICENSE | 27 - .../sagikazarmark/slog-shim/README.md | 81 - .../sagikazarmark/slog-shim/attr.go | 74 - .../sagikazarmark/slog-shim/attr_120.go | 75 - .../sagikazarmark/slog-shim/flake.lock | 273 - .../sagikazarmark/slog-shim/handler.go | 45 - .../sagikazarmark/slog-shim/handler_120.go | 45 - .../sagikazarmark/slog-shim/json_handler.go | 23 - .../slog-shim/json_handler_120.go | 24 - .../sagikazarmark/slog-shim/level.go | 61 - .../sagikazarmark/slog-shim/level_120.go | 61 - .../sagikazarmark/slog-shim/logger.go | 98 - .../sagikazarmark/slog-shim/logger_120.go | 99 - .../sagikazarmark/slog-shim/record.go | 31 - .../sagikazarmark/slog-shim/record_120.go | 32 - .../sagikazarmark/slog-shim/text_handler.go | 23 - .../slog-shim/text_handler_120.go | 24 - .../sagikazarmark/slog-shim/value.go | 109 - .../sagikazarmark/slog-shim/value_120.go | 110 - vendor/github.com/skeema/knownhosts/NOTICE | 2 +- vendor/github.com/skeema/knownhosts/README.md | 2 +- .../github.com/spf13/afero}/.editorconfig | 12 +- vendor/github.com/spf13/afero/.golangci.yaml | 18 + vendor/github.com/spf13/afero/README.md | 41 +- vendor/github.com/spf13/afero/iofs.go | 1 - vendor/github.com/spf13/afero/memmap.go | 2 - vendor/github.com/spf13/cast/README.md | 2 +- vendor/github.com/spf13/cast/caste.go | 98 +- vendor/github.com/spf13/viper/.envrc | 4 +- vendor/github.com/spf13/viper/.golangci.yaml | 3 - vendor/github.com/spf13/viper/README.md | 21 +- vendor/github.com/spf13/viper/UPDATES.md | 126 + vendor/github.com/spf13/viper/encoding.go | 181 + vendor/github.com/spf13/viper/experimental.go | 8 + vendor/github.com/spf13/viper/file.go | 54 +- vendor/github.com/spf13/viper/file_finder.go | 38 - vendor/github.com/spf13/viper/finder.go | 55 + vendor/github.com/spf13/viper/flake.lock | 303 +- vendor/github.com/spf13/viper/flake.nix | 2 +- .../spf13/viper/internal/encoding/decoder.go | 61 - .../spf13/viper/internal/encoding/encoder.go | 60 - .../spf13/viper/internal/encoding/error.go | 7 - .../viper/internal/encoding/hcl/codec.go | 40 - .../viper/internal/encoding/ini/codec.go | 99 - .../viper/internal/encoding/ini/map_utils.go | 74 - .../internal/encoding/javaproperties/codec.go | 86 - .../encoding/javaproperties/map_utils.go | 74 - .../spf13/viper/internal/features/finder.go | 5 + .../viper/internal/features/finder_default.go | 5 + vendor/github.com/spf13/viper/logger.go | 39 +- vendor/github.com/spf13/viper/remote.go | 256 + vendor/github.com/spf13/viper/util.go | 11 +- vendor/github.com/spf13/viper/viper.go | 539 +- .../zclconf/go-cty/cty/convert/conversion.go | 8 +- .../cty/convert/conversion_collection.go | 2 +- .../zclconf/go-cty/cty/function/function.go | 33 +- .../zclconf/go-cty/cty/value_ops.go | 33 +- .../x/exp/constraints/constraints.go | 50 - vendor/golang.org/x/exp/maps/maps.go | 68 +- vendor/golang.org/x/exp/slices/cmp.go | 44 - vendor/golang.org/x/exp/slices/slices.go | 447 +- vendor/golang.org/x/exp/slices/sort.go | 162 +- .../golang.org/x/exp/slices/zsortanyfunc.go | 479 - .../golang.org/x/exp/slices/zsortordered.go | 481 -- vendor/golang.org/x/exp/slog/attr.go | 102 - vendor/golang.org/x/exp/slog/doc.go | 316 - vendor/golang.org/x/exp/slog/handler.go | 577 -- .../x/exp/slog/internal/buffer/buffer.go | 84 - .../x/exp/slog/internal/ignorepc.go | 9 - vendor/golang.org/x/exp/slog/json_handler.go | 336 - vendor/golang.org/x/exp/slog/level.go | 201 - vendor/golang.org/x/exp/slog/logger.go | 343 - vendor/golang.org/x/exp/slog/noplog.bench | 36 - vendor/golang.org/x/exp/slog/record.go | 207 - vendor/golang.org/x/exp/slog/text_handler.go | 161 - vendor/golang.org/x/exp/slog/value.go | 456 - vendor/golang.org/x/exp/slog/value_119.go | 53 - vendor/golang.org/x/exp/slog/value_120.go | 39 - .../x/tools/cmd/stringer/stringer.go | 204 +- .../x/tools/go/gcexportdata/gcexportdata.go | 119 +- vendor/golang.org/x/tools/go/packages/doc.go | 15 +- .../x/tools/go/packages/external.go | 13 +- .../golang.org/x/tools/go/packages/golist.go | 48 +- .../x/tools/go/packages/loadmode_string.go | 71 +- .../x/tools/go/packages/packages.go | 410 +- .../x/tools/go/types/objectpath/objectpath.go | 111 +- .../x/tools/go/types/typeutil/callee.go | 68 + .../x/tools/go/types/typeutil/imports.go | 30 + .../x/tools/go/types/typeutil/map.go | 475 + .../tools/go/types/typeutil/methodsetcache.go | 71 + .../x/tools/go/types/typeutil/ui.go | 53 + .../x/tools/internal/aliases/aliases.go | 2 +- .../x/tools/internal/aliases/aliases_go121.go | 37 - .../x/tools/internal/aliases/aliases_go122.go | 32 +- .../x/tools/internal/event/keys/keys.go | 6 +- .../x/tools/internal/event/label/label.go | 6 +- .../x/tools/internal/gcimporter/bimport.go | 63 +- .../x/tools/internal/gcimporter/exportdata.go | 426 +- .../x/tools/internal/gcimporter/gcimporter.go | 182 +- .../x/tools/internal/gcimporter/iexport.go | 43 +- .../x/tools/internal/gcimporter/iimport.go | 39 +- .../internal/gcimporter/iimport_go122.go | 53 + .../internal/gcimporter/newInterface10.go | 22 - .../internal/gcimporter/newInterface11.go | 14 - .../tools/internal/gcimporter/predeclared.go | 91 + .../x/tools/internal/gcimporter/support.go | 30 + .../internal/gcimporter/support_go118.go | 34 - .../x/tools/internal/gcimporter/unified_no.go | 10 - .../tools/internal/gcimporter/unified_yes.go | 10 - .../tools/internal/gcimporter/ureader_yes.go | 33 +- .../x/tools/internal/gocommand/invoke.go | 60 +- .../internal/gocommand/invoke_notunix.go | 13 + .../x/tools/internal/gocommand/invoke_unix.go | 13 + .../internal/packagesinternal/packages.go | 8 +- .../x/tools/internal/stdlib/deps.go | 359 + .../x/tools/internal/stdlib/import.go | 89 + .../x/tools/internal/stdlib/manifest.go | 225 +- .../x/tools/internal/stdlib/stdlib.go | 2 +- .../internal/tokeninternal/tokeninternal.go | 137 - .../x/tools/internal/typeparams/common.go | 68 + .../x/tools/internal/typeparams/coretype.go | 155 + .../x/tools/internal/typeparams/free.go | 131 + .../x/tools/internal/typeparams/normalize.go | 218 + .../x/tools/internal/typeparams/termlist.go | 163 + .../x/tools/internal/typeparams/typeterm.go | 169 + .../x/tools/internal/typesinternal/element.go | 133 + .../tools/internal/typesinternal/errorcode.go | 2 +- .../tools/internal/typesinternal/qualifier.go | 46 + .../x/tools/internal/typesinternal/recv.go | 11 +- .../x/tools/internal/typesinternal/types.go | 68 +- .../x/tools/internal/typesinternal/varkind.go | 40 + .../tools/internal/typesinternal/zerovalue.go | 392 + .../x/tools/internal/versions/constraint.go | 13 - .../internal/versions/constraint_go121.go | 14 - .../x/tools/internal/versions/toolchain.go | 14 - .../internal/versions/toolchain_go119.go | 14 - .../internal/versions/toolchain_go120.go | 14 - .../x/tools/internal/versions/types.go | 28 +- .../x/tools/internal/versions/types_go121.go | 30 - .../x/tools/internal/versions/types_go122.go | 41 - .../googleapis/rpc/status/status.pb.go | 2 +- vendor/google.golang.org/grpc/CONTRIBUTING.md | 16 +- .../grpc/balancer/balancer.go | 110 +- .../grpc/balancer/base/balancer.go | 2 +- .../endpointsharding/endpointsharding.go | 358 + .../pickfirst/internal/internal.go} | 25 +- .../grpc/balancer/pickfirst/pickfirst.go | 16 +- .../pickfirst/pickfirstleaf/pickfirstleaf.go | 932 ++ .../grpc/balancer/roundrobin/roundrobin.go | 70 +- .../grpc/balancer/subconn.go | 134 + .../grpc/balancer_wrapper.go | 199 +- .../grpc_binarylog_v1/binarylog.pb.go | 296 +- vendor/google.golang.org/grpc/clientconn.go | 112 +- vendor/google.golang.org/grpc/codec.go | 2 +- .../google.golang.org/grpc/credentials/tls.go | 35 +- vendor/google.golang.org/grpc/dialoptions.go | 57 +- .../grpc/experimental/stats/metricregistry.go | 27 +- .../grpc/experimental/stats/metrics.go | 78 +- .../grpc/grpclog/internal/loggerv2.go | 107 +- .../grpc/health/grpc_health_v1/health.pb.go | 81 +- .../google.golang.org/grpc/health/producer.go | 106 + .../grpc/internal/backoff/backoff.go | 2 +- .../balancer/gracefulswitch/config.go | 2 + .../balancer/gracefulswitch/gracefulswitch.go | 10 +- .../grpc/internal/channelz/channel.go | 15 + .../grpc/internal/channelz/server.go | 2 + .../grpc/internal/channelz/socket.go | 7 + .../grpc/internal/channelz/subchannel.go | 2 + .../grpc/internal/channelz/trace.go | 19 +- .../grpc/internal/envconfig/envconfig.go | 7 +- .../grpc/internal/envconfig/xds.go | 10 + .../internal/grpcsync/callback_serializer.go | 2 +- .../grpc/internal/grpcutil/method.go | 2 +- .../grpc/internal/idle/idle.go | 4 +- .../grpc/internal/internal.go | 57 +- .../proxyattributes/proxyattributes.go | 54 + .../delegatingresolver/delegatingresolver.go | 329 + .../internal/resolver/dns/dns_resolver.go | 47 +- .../internal/stats/metrics_recorder_list.go | 10 + .../grpc/internal/status/status.go | 35 +- .../grpc/internal/transport/client_stream.go | 144 + .../grpc/internal/transport/flowcontrol.go | 9 +- .../grpc/internal/transport/handler_server.go | 38 +- .../grpc/internal/transport/http2_client.go | 160 +- .../grpc/internal/transport/http2_server.go | 73 +- .../grpc/internal/transport/proxy.go | 62 +- .../grpc/internal/transport/server_stream.go | 178 + .../grpc/internal/transport/transport.go | 318 +- .../grpc/mem/buffer_slice.go | 59 +- vendor/google.golang.org/grpc/mem/buffers.go | 32 +- .../google.golang.org/grpc/picker_wrapper.go | 2 +- vendor/google.golang.org/grpc/preloader.go | 4 +- .../grpc_reflection_v1/reflection.pb.go | 328 +- .../grpc_reflection_v1alpha/reflection.pb.go | 324 +- .../grpc/resolver/resolver.go | 25 +- .../grpc/resolver_wrapper.go | 13 +- vendor/google.golang.org/grpc/rpc_util.go | 122 +- vendor/google.golang.org/grpc/server.go | 118 +- .../google.golang.org/grpc/service_config.go | 22 +- .../google.golang.org/grpc/stats/metrics.go | 81 + vendor/google.golang.org/grpc/stats/stats.go | 74 +- vendor/google.golang.org/grpc/stream.go | 348 +- vendor/google.golang.org/grpc/version.go | 2 +- .../protobuf/encoding/protojson/decode.go | 5 - .../encoding/protojson/well_known_types.go | 6 +- .../protobuf/encoding/prototext/decode.go | 5 - .../editiondefaults/editions_defaults.binpb | Bin 93 -> 138 bytes .../internal/editionssupport/editions.go | 5 + .../protobuf/internal/encoding/tag/tag.go | 8 +- .../protobuf/internal/errors/is_go112.go | 40 - .../protobuf/internal/errors/is_go113.go | 13 - .../protobuf/internal/filedesc/desc.go | 27 +- .../protobuf/internal/filedesc/desc_lazy.go | 9 - .../protobuf/internal/filedesc/editions.go | 8 + .../protobuf/internal/filetype/build.go | 2 +- .../protobuf/internal/flags/flags.go | 2 +- .../internal/genid/go_features_gen.go | 34 + .../protobuf/internal/genid/goname.go | 5 - .../protobuf/internal/genid/name.go} | 14 +- .../internal/impl/api_export_opaque.go | 128 + .../protobuf/internal/impl/bitmap.go | 34 + .../protobuf/internal/impl/bitmap_race.go | 126 + .../protobuf/internal/impl/checkinit.go | 33 + .../protobuf/internal/impl/codec_field.go | 75 - .../internal/impl/codec_field_opaque.go | 264 + .../protobuf/internal/impl/codec_map.go | 14 +- .../protobuf/internal/impl/codec_map_go111.go | 38 - .../protobuf/internal/impl/codec_map_go112.go | 12 - .../protobuf/internal/impl/codec_message.go | 20 +- .../internal/impl/codec_message_opaque.go | 153 + .../protobuf/internal/impl/convert_map.go | 2 +- .../protobuf/internal/impl/decode.go | 56 +- .../protobuf/internal/impl/encode.go | 78 + .../protobuf/internal/impl/lazy.go | 433 + .../protobuf/internal/impl/legacy_message.go | 5 +- .../protobuf/internal/impl/merge.go | 27 + .../protobuf/internal/impl/message.go | 31 +- .../protobuf/internal/impl/message_opaque.go | 627 ++ .../internal/impl/message_opaque_gen.go | 132 + .../protobuf/internal/impl/message_reflect.go | 10 +- .../internal/impl/message_reflect_field.go | 160 +- .../impl/message_reflect_field_gen.go | 273 + .../protobuf/internal/impl/pointer_unsafe.go | 12 +- .../internal/impl/pointer_unsafe_opaque.go | 42 + .../protobuf/internal/impl/presence.go | 142 + .../protobuf/internal/impl/validate.go | 40 +- .../protobuf/internal/impl/weak.go | 74 - .../internal/protolazy/bufferreader.go | 364 + .../protobuf/internal/protolazy/lazy.go | 359 + .../internal/protolazy/pointer_unsafe.go | 17 + .../protobuf/internal/version/version.go | 4 +- .../protobuf/proto/decode.go | 21 +- .../protobuf/proto/encode.go | 3 +- .../google.golang.org/protobuf/proto/size.go | 8 + .../protobuf/proto/wrapperopaque.go | 80 + .../protobuf/reflect/protodesc/desc.go | 20 +- .../protobuf/reflect/protodesc/desc_init.go | 1 - .../reflect/protodesc/desc_resolve.go | 26 +- .../reflect/protodesc/desc_validate.go | 12 - .../protobuf/reflect/protodesc/editions.go | 44 +- .../protobuf/reflect/protodesc/proto.go | 3 - .../protobuf/reflect/protoreflect/type.go | 12 +- .../protobuf/reflect/protoreflect/value.go | 2 +- .../protobuf/runtime/protoiface/methods.go | 16 + .../protobuf/runtime/protoimpl/impl.go | 4 + .../types/descriptorpb/descriptor.pb.go | 1785 ++-- .../types/gofeaturespb/go_features.pb.go | 238 +- .../protobuf/types/known/anypb/any.pb.go | 21 +- .../types/known/durationpb/duration.pb.go | 21 +- .../protobuf/types/known/emptypb/empty.pb.go | 16 +- .../types/known/structpb/struct.pb.go | 83 +- .../types/known/timestamppb/timestamp.pb.go | 21 +- vendor/gopkg.in/ini.v1/.editorconfig | 12 - vendor/gopkg.in/ini.v1/.gitignore | 7 - vendor/gopkg.in/ini.v1/.golangci.yml | 27 - vendor/gopkg.in/ini.v1/LICENSE | 191 - vendor/gopkg.in/ini.v1/Makefile | 15 - vendor/gopkg.in/ini.v1/README.md | 43 - vendor/gopkg.in/ini.v1/codecov.yml | 16 - vendor/gopkg.in/ini.v1/data_source.go | 76 - vendor/gopkg.in/ini.v1/deprecated.go | 22 - vendor/gopkg.in/ini.v1/error.go | 49 - vendor/gopkg.in/ini.v1/file.go | 541 -- vendor/gopkg.in/ini.v1/helper.go | 24 - vendor/gopkg.in/ini.v1/ini.go | 176 - vendor/gopkg.in/ini.v1/key.go | 837 -- vendor/gopkg.in/ini.v1/parser.go | 520 -- vendor/gopkg.in/ini.v1/section.go | 256 - vendor/gopkg.in/ini.v1/struct.go | 747 -- vendor/lukechampine.com/frand/README.md | 72 +- vendor/lukechampine.com/frand/frand.go | 96 +- vendor/modules.txt | 199 +- 2136 files changed, 74671 insertions(+), 87005 deletions(-) create mode 100644 tools/vendor/github.com/Antonboom/testifylint/internal/checkers/equal_values.go create mode 100644 tools/vendor/github.com/Antonboom/testifylint/internal/checkers/suite_method_signature.go create mode 100644 tools/vendor/github.com/fsnotify/fsnotify/.cirrus.yml delete mode 100644 tools/vendor/github.com/fsnotify/fsnotify/.gitattributes delete mode 100644 tools/vendor/github.com/fsnotify/fsnotify/AUTHORS create mode 100644 tools/vendor/github.com/fsnotify/fsnotify/backend_fen.go create mode 100644 tools/vendor/github.com/fsnotify/fsnotify/backend_inotify.go create mode 100644 tools/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go create mode 100644 tools/vendor/github.com/fsnotify/fsnotify/backend_other.go create mode 100644 tools/vendor/github.com/fsnotify/fsnotify/backend_windows.go delete mode 100644 tools/vendor/github.com/fsnotify/fsnotify/fen.go delete mode 100644 tools/vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go delete mode 100644 tools/vendor/github.com/fsnotify/fsnotify/inotify.go delete mode 100644 tools/vendor/github.com/fsnotify/fsnotify/inotify_poller.go create mode 100644 tools/vendor/github.com/fsnotify/fsnotify/internal/darwin.go create mode 100644 tools/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go create mode 100644 tools/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go create mode 100644 tools/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go create mode 100644 tools/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go create mode 100644 tools/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go create mode 100644 tools/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go create mode 100644 tools/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go create mode 100644 tools/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go create mode 100644 tools/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go create mode 100644 tools/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go create mode 100644 tools/vendor/github.com/fsnotify/fsnotify/internal/internal.go create mode 100644 tools/vendor/github.com/fsnotify/fsnotify/internal/unix.go create mode 100644 tools/vendor/github.com/fsnotify/fsnotify/internal/unix2.go create mode 100644 tools/vendor/github.com/fsnotify/fsnotify/internal/windows.go delete mode 100644 tools/vendor/github.com/fsnotify/fsnotify/kqueue.go delete mode 100644 tools/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go delete mode 100644 tools/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go create mode 100644 tools/vendor/github.com/fsnotify/fsnotify/system_bsd.go create mode 100644 tools/vendor/github.com/fsnotify/fsnotify/system_darwin.go delete mode 100644 tools/vendor/github.com/fsnotify/fsnotify/windows.go delete mode 100644 tools/vendor/github.com/golang/protobuf/AUTHORS delete mode 100644 tools/vendor/github.com/golang/protobuf/CONTRIBUTORS delete mode 100644 tools/vendor/github.com/golang/protobuf/proto/buffer.go delete mode 100644 tools/vendor/github.com/golang/protobuf/proto/defaults.go delete mode 100644 tools/vendor/github.com/golang/protobuf/proto/deprecated.go delete mode 100644 tools/vendor/github.com/golang/protobuf/proto/discard.go delete mode 100644 tools/vendor/github.com/golang/protobuf/proto/extensions.go delete mode 100644 tools/vendor/github.com/golang/protobuf/proto/properties.go delete mode 100644 tools/vendor/github.com/golang/protobuf/proto/proto.go delete mode 100644 tools/vendor/github.com/golang/protobuf/proto/registry.go delete mode 100644 tools/vendor/github.com/golang/protobuf/proto/text_decode.go delete mode 100644 tools/vendor/github.com/golang/protobuf/proto/text_encode.go delete mode 100644 tools/vendor/github.com/golang/protobuf/proto/wire.go delete mode 100644 tools/vendor/github.com/golang/protobuf/proto/wrappers.go delete mode 100644 tools/vendor/github.com/golang/protobuf/ptypes/any.go delete mode 100644 tools/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go delete mode 100644 tools/vendor/github.com/golang/protobuf/ptypes/doc.go delete mode 100644 tools/vendor/github.com/golang/protobuf/ptypes/duration.go delete mode 100644 tools/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go delete mode 100644 tools/vendor/github.com/golang/protobuf/ptypes/timestamp.go delete mode 100644 tools/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go create mode 100644 tools/vendor/github.com/gostaticanalysis/comment/.tagpr create mode 100644 tools/vendor/github.com/gostaticanalysis/comment/CHANGELOG.md create mode 100644 tools/vendor/github.com/gostaticanalysis/comment/version.txt delete mode 100644 tools/vendor/github.com/hashicorp/hcl/.gitignore delete mode 100644 tools/vendor/github.com/hashicorp/hcl/.travis.yml delete mode 100644 tools/vendor/github.com/hashicorp/hcl/LICENSE delete mode 100644 tools/vendor/github.com/hashicorp/hcl/Makefile delete mode 100644 tools/vendor/github.com/hashicorp/hcl/README.md delete mode 100644 tools/vendor/github.com/hashicorp/hcl/appveyor.yml delete mode 100644 tools/vendor/github.com/hashicorp/hcl/decoder.go delete mode 100644 tools/vendor/github.com/hashicorp/hcl/hcl.go delete mode 100644 tools/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go delete mode 100644 tools/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go delete mode 100644 tools/vendor/github.com/hashicorp/hcl/hcl/parser/error.go delete mode 100644 tools/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go delete mode 100644 tools/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go delete mode 100644 tools/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go delete mode 100644 tools/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go delete mode 100644 tools/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go delete mode 100644 tools/vendor/github.com/hashicorp/hcl/hcl/token/position.go delete mode 100644 tools/vendor/github.com/hashicorp/hcl/hcl/token/token.go delete mode 100644 tools/vendor/github.com/hashicorp/hcl/json/parser/flatten.go delete mode 100644 tools/vendor/github.com/hashicorp/hcl/json/parser/parser.go delete mode 100644 tools/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go delete mode 100644 tools/vendor/github.com/hashicorp/hcl/json/token/position.go delete mode 100644 tools/vendor/github.com/hashicorp/hcl/json/token/token.go delete mode 100644 tools/vendor/github.com/hashicorp/hcl/lex.go delete mode 100644 tools/vendor/github.com/hashicorp/hcl/parse.go delete mode 100644 tools/vendor/github.com/kisielk/errcheck/errcheck/tags.go delete mode 100644 tools/vendor/github.com/kisielk/errcheck/errcheck/tags_compat.go delete mode 100644 tools/vendor/github.com/magiconair/properties/.gitignore delete mode 100644 tools/vendor/github.com/magiconair/properties/CHANGELOG.md delete mode 100644 tools/vendor/github.com/magiconair/properties/LICENSE.md delete mode 100644 tools/vendor/github.com/magiconair/properties/README.md delete mode 100644 tools/vendor/github.com/magiconair/properties/decode.go delete mode 100644 tools/vendor/github.com/magiconair/properties/doc.go delete mode 100644 tools/vendor/github.com/magiconair/properties/integrate.go delete mode 100644 tools/vendor/github.com/magiconair/properties/lex.go delete mode 100644 tools/vendor/github.com/magiconair/properties/load.go delete mode 100644 tools/vendor/github.com/magiconair/properties/parser.go delete mode 100644 tools/vendor/github.com/magiconair/properties/properties.go delete mode 100644 tools/vendor/github.com/magiconair/properties/rangecheck.go delete mode 100644 tools/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE delete mode 100644 tools/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE delete mode 100644 tools/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore delete mode 100644 tools/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile delete mode 100644 tools/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go delete mode 100644 tools/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go delete mode 100644 tools/vendor/github.com/mitchellh/mapstructure/README.md delete mode 100644 tools/vendor/github.com/mitchellh/mapstructure/decode_hooks.go delete mode 100644 tools/vendor/github.com/mitchellh/mapstructure/error.go rename tools/vendor/github.com/{golang/protobuf => munnerz/goautoneg}/LICENSE (53%) create mode 100644 tools/vendor/github.com/munnerz/goautoneg/Makefile rename tools/vendor/github.com/{prometheus/common/internal/bitbucket.org/ww => munnerz}/goautoneg/README.txt (100%) rename tools/vendor/github.com/{prometheus/common/internal/bitbucket.org/ww => munnerz}/goautoneg/autoneg.go (63%) create mode 100644 tools/vendor/github.com/nunnatsa/ginkgolinter/internal/ginkgoinfo/ginkgoinfo.go delete mode 100644 tools/vendor/github.com/pelletier/go-toml/.dockerignore delete mode 100644 tools/vendor/github.com/pelletier/go-toml/.gitignore delete mode 100644 tools/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md delete mode 100644 tools/vendor/github.com/pelletier/go-toml/Dockerfile delete mode 100644 tools/vendor/github.com/pelletier/go-toml/LICENSE delete mode 100644 tools/vendor/github.com/pelletier/go-toml/Makefile delete mode 100644 tools/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md delete mode 100644 tools/vendor/github.com/pelletier/go-toml/README.md delete mode 100644 tools/vendor/github.com/pelletier/go-toml/SECURITY.md delete mode 100644 tools/vendor/github.com/pelletier/go-toml/azure-pipelines.yml delete mode 100644 tools/vendor/github.com/pelletier/go-toml/benchmark.sh delete mode 100644 tools/vendor/github.com/pelletier/go-toml/doc.go delete mode 100644 tools/vendor/github.com/pelletier/go-toml/example-crlf.toml delete mode 100644 tools/vendor/github.com/pelletier/go-toml/example.toml delete mode 100644 tools/vendor/github.com/pelletier/go-toml/fuzz.go delete mode 100644 tools/vendor/github.com/pelletier/go-toml/fuzz.sh delete mode 100644 tools/vendor/github.com/pelletier/go-toml/keysparsing.go delete mode 100644 tools/vendor/github.com/pelletier/go-toml/lexer.go delete mode 100644 tools/vendor/github.com/pelletier/go-toml/localtime.go delete mode 100644 tools/vendor/github.com/pelletier/go-toml/marshal.go delete mode 100644 tools/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml delete mode 100644 tools/vendor/github.com/pelletier/go-toml/marshal_test.toml delete mode 100644 tools/vendor/github.com/pelletier/go-toml/parser.go delete mode 100644 tools/vendor/github.com/pelletier/go-toml/position.go delete mode 100644 tools/vendor/github.com/pelletier/go-toml/token.go delete mode 100644 tools/vendor/github.com/pelletier/go-toml/toml.go delete mode 100644 tools/vendor/github.com/pelletier/go-toml/tomlpub.go delete mode 100644 tools/vendor/github.com/pelletier/go-toml/tomltree_create.go delete mode 100644 tools/vendor/github.com/pelletier/go-toml/tomltree_write.go delete mode 100644 tools/vendor/github.com/pelletier/go-toml/tomltree_writepub.go create mode 100644 tools/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go create mode 100644 tools/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go rename tools/vendor/github.com/prometheus/client_golang/prometheus/{go_collector_go117.go => go_collector_latest.go} (51%) create mode 100644 tools/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go create mode 100644 tools/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go create mode 100644 tools/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go create mode 100644 tools/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go create mode 100644 tools/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go create mode 100644 tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go create mode 100644 tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.c create mode 100644 tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.go create mode 100644 tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go create mode 100644 tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_not_supported.go rename tools/vendor/github.com/prometheus/client_golang/prometheus/{process_collector_other.go => process_collector_procfsenabled.go} (63%) create mode 100644 tools/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/problem.go create mode 100644 tools/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go create mode 100644 tools/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/counter_validations.go create mode 100644 tools/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/duplicate_validations.go create mode 100644 tools/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go create mode 100644 tools/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/help_validations.go create mode 100644 tools/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/histogram_validations.go create mode 100644 tools/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/units.go create mode 100644 tools/vendor/github.com/prometheus/client_golang/prometheus/vnext.go create mode 100644 tools/vendor/github.com/prometheus/common/model/labelset_string.go create mode 100644 tools/vendor/github.com/prometheus/common/model/metadata.go create mode 100644 tools/vendor/github.com/prometheus/common/model/value_float.go create mode 100644 tools/vendor/github.com/prometheus/common/model/value_histogram.go create mode 100644 tools/vendor/github.com/prometheus/common/model/value_type.go rename tools/vendor/github.com/{matttproud/golang_protobuf_extensions/pbutil/doc.go => prometheus/procfs/cpuinfo_loong64.go} (73%) delete mode 100644 tools/vendor/github.com/prometheus/procfs/fixtures.ttar create mode 100644 tools/vendor/github.com/prometheus/procfs/fs_statfs_notype.go create mode 100644 tools/vendor/github.com/prometheus/procfs/fs_statfs_type.go create mode 100644 tools/vendor/github.com/prometheus/procfs/net_dev_snmp6.go create mode 100644 tools/vendor/github.com/prometheus/procfs/net_route.go create mode 100644 tools/vendor/github.com/prometheus/procfs/net_tls_stat.go create mode 100644 tools/vendor/github.com/prometheus/procfs/net_wireless.go rename tools/vendor/github.com/prometheus/procfs/{xfrm.go => net_xfrm.go} (94%) create mode 100644 tools/vendor/github.com/prometheus/procfs/proc_cgroups.go create mode 100644 tools/vendor/github.com/prometheus/procfs/proc_interrupts.go create mode 100644 tools/vendor/github.com/prometheus/procfs/proc_netstat.go create mode 100644 tools/vendor/github.com/prometheus/procfs/proc_snmp.go create mode 100644 tools/vendor/github.com/prometheus/procfs/proc_snmp6.go create mode 100644 tools/vendor/github.com/prometheus/procfs/proc_sys.go create mode 100644 tools/vendor/github.com/prometheus/procfs/softirqs.go create mode 100644 tools/vendor/github.com/prometheus/procfs/thread.go create mode 100644 tools/vendor/github.com/sagikazarmark/locafero/.editorconfig create mode 100644 tools/vendor/github.com/sagikazarmark/locafero/.envrc rename {vendor/github.com/sagikazarmark/slog-shim => tools/vendor/github.com/sagikazarmark/locafero}/.gitignore (57%) create mode 100644 tools/vendor/github.com/sagikazarmark/locafero/.golangci.yaml rename tools/vendor/github.com/{spf13/jwalterweatherman => sagikazarmark/locafero}/LICENSE (86%) create mode 100644 tools/vendor/github.com/sagikazarmark/locafero/README.md create mode 100644 tools/vendor/github.com/sagikazarmark/locafero/file_type.go create mode 100644 tools/vendor/github.com/sagikazarmark/locafero/finder.go create mode 100644 tools/vendor/github.com/sagikazarmark/locafero/flake.lock rename {vendor/github.com/sagikazarmark/slog-shim => tools/vendor/github.com/sagikazarmark/locafero}/flake.nix (70%) create mode 100644 tools/vendor/github.com/sagikazarmark/locafero/glob.go create mode 100644 tools/vendor/github.com/sagikazarmark/locafero/glob_windows.go create mode 100644 tools/vendor/github.com/sagikazarmark/locafero/helpers.go create mode 100644 tools/vendor/github.com/sagikazarmark/locafero/justfile create mode 100644 tools/vendor/github.com/sourcegraph/conc/.golangci.yml rename {vendor/github.com/aead/chacha20 => tools/vendor/github.com/sourcegraph/conc}/LICENSE (94%) create mode 100644 tools/vendor/github.com/sourcegraph/conc/README.md create mode 100644 tools/vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go119.go create mode 100644 tools/vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go120.go create mode 100644 tools/vendor/github.com/sourcegraph/conc/iter/iter.go create mode 100644 tools/vendor/github.com/sourcegraph/conc/iter/map.go create mode 100644 tools/vendor/github.com/sourcegraph/conc/panics/panics.go create mode 100644 tools/vendor/github.com/sourcegraph/conc/panics/try.go create mode 100644 tools/vendor/github.com/sourcegraph/conc/waitgroup.go delete mode 100644 tools/vendor/github.com/spf13/jwalterweatherman/.gitignore delete mode 100644 tools/vendor/github.com/spf13/jwalterweatherman/README.md delete mode 100644 tools/vendor/github.com/spf13/jwalterweatherman/default_notepad.go delete mode 100644 tools/vendor/github.com/spf13/jwalterweatherman/log_counter.go delete mode 100644 tools/vendor/github.com/spf13/jwalterweatherman/notepad.go create mode 100644 tools/vendor/github.com/spf13/viper/.envrc create mode 100644 tools/vendor/github.com/spf13/viper/.yamlignore create mode 100644 tools/vendor/github.com/spf13/viper/.yamllint.yaml create mode 100644 tools/vendor/github.com/spf13/viper/UPDATES.md create mode 100644 tools/vendor/github.com/spf13/viper/encoding.go create mode 100644 tools/vendor/github.com/spf13/viper/experimental.go delete mode 100644 tools/vendor/github.com/spf13/viper/experimental_logger.go rename tools/vendor/github.com/spf13/viper/{viper_go1_15.go => file.go} (50%) create mode 100644 tools/vendor/github.com/spf13/viper/finder.go create mode 100644 tools/vendor/github.com/spf13/viper/flake.lock create mode 100644 tools/vendor/github.com/spf13/viper/flake.nix delete mode 100644 tools/vendor/github.com/spf13/viper/fs.go delete mode 100644 tools/vendor/github.com/spf13/viper/internal/encoding/decoder.go delete mode 100644 tools/vendor/github.com/spf13/viper/internal/encoding/encoder.go delete mode 100644 tools/vendor/github.com/spf13/viper/internal/encoding/error.go delete mode 100644 tools/vendor/github.com/spf13/viper/internal/encoding/hcl/codec.go delete mode 100644 tools/vendor/github.com/spf13/viper/internal/encoding/ini/codec.go delete mode 100644 tools/vendor/github.com/spf13/viper/internal/encoding/ini/map_utils.go delete mode 100644 tools/vendor/github.com/spf13/viper/internal/encoding/javaproperties/codec.go delete mode 100644 tools/vendor/github.com/spf13/viper/internal/encoding/javaproperties/map_utils.go delete mode 100644 tools/vendor/github.com/spf13/viper/internal/encoding/toml/codec2.go delete mode 100644 tools/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml2.go delete mode 100644 tools/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml3.go create mode 100644 tools/vendor/github.com/spf13/viper/internal/features/bind_struct.go create mode 100644 tools/vendor/github.com/spf13/viper/internal/features/bind_struct_default.go create mode 100644 tools/vendor/github.com/spf13/viper/internal/features/finder.go create mode 100644 tools/vendor/github.com/spf13/viper/internal/features/finder_default.go create mode 100644 tools/vendor/github.com/spf13/viper/remote.go delete mode 100644 tools/vendor/github.com/spf13/viper/viper_go1_16.go delete mode 100644 tools/vendor/github.com/spf13/viper/watch.go delete mode 100644 tools/vendor/github.com/spf13/viper/watch_wasm.go delete mode 100644 tools/vendor/go.uber.org/atomic/.codecov.yml delete mode 100644 tools/vendor/go.uber.org/atomic/.gitignore delete mode 100644 tools/vendor/go.uber.org/atomic/.travis.yml delete mode 100644 tools/vendor/go.uber.org/atomic/CHANGELOG.md delete mode 100644 tools/vendor/go.uber.org/atomic/LICENSE.txt delete mode 100644 tools/vendor/go.uber.org/atomic/Makefile delete mode 100644 tools/vendor/go.uber.org/atomic/README.md delete mode 100644 tools/vendor/go.uber.org/atomic/bool.go delete mode 100644 tools/vendor/go.uber.org/atomic/doc.go delete mode 100644 tools/vendor/go.uber.org/atomic/duration.go delete mode 100644 tools/vendor/go.uber.org/atomic/duration_ext.go delete mode 100644 tools/vendor/go.uber.org/atomic/float64.go delete mode 100644 tools/vendor/go.uber.org/atomic/float64_ext.go delete mode 100644 tools/vendor/go.uber.org/atomic/gen.go delete mode 100644 tools/vendor/go.uber.org/atomic/int32.go delete mode 100644 tools/vendor/go.uber.org/atomic/int64.go delete mode 100644 tools/vendor/go.uber.org/atomic/nocmp.go delete mode 100644 tools/vendor/go.uber.org/atomic/string.go delete mode 100644 tools/vendor/go.uber.org/atomic/string_ext.go delete mode 100644 tools/vendor/go.uber.org/atomic/uint32.go delete mode 100644 tools/vendor/go.uber.org/atomic/uint64.go delete mode 100644 tools/vendor/go.uber.org/atomic/value.go delete mode 100644 tools/vendor/go.uber.org/multierr/.travis.yml rename tools/vendor/go.uber.org/{atomic/error_ext.go => multierr/error_post_go120.go} (65%) rename tools/vendor/go.uber.org/multierr/{go113.go => error_pre_go120.go} (66%) delete mode 100644 tools/vendor/go.uber.org/multierr/glide.yaml create mode 100644 tools/vendor/go.uber.org/zap/.golangci.yml rename tools/vendor/go.uber.org/zap/{LICENSE.txt => LICENSE} (100%) delete mode 100644 tools/vendor/go.uber.org/zap/array_go118.go rename tools/vendor/go.uber.org/{atomic/bool_ext.go => zap/internal/pool/pool.go} (56%) rename tools/vendor/go.uber.org/zap/{stacktrace.go => internal/stacktrace/stack.go} (73%) rename tools/vendor/go.uber.org/{atomic/error.go => zap/zapcore/lazy_with.go} (60%) create mode 100644 tools/vendor/golang.org/x/text/encoding/encoding.go create mode 100644 tools/vendor/golang.org/x/text/encoding/internal/identifier/identifier.go create mode 100644 tools/vendor/golang.org/x/text/encoding/internal/identifier/mib.go create mode 100644 tools/vendor/golang.org/x/text/encoding/internal/internal.go create mode 100644 tools/vendor/golang.org/x/text/encoding/unicode/override.go create mode 100644 tools/vendor/golang.org/x/text/encoding/unicode/unicode.go create mode 100644 tools/vendor/golang.org/x/text/internal/utf8internal/utf8internal.go create mode 100644 tools/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go delete mode 100644 tools/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go delete mode 100644 tools/vendor/google.golang.org/protobuf/internal/impl/weak.go delete mode 100644 tools/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go delete mode 100644 tools/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go delete mode 100644 tools/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go delete mode 100644 tools/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go delete mode 100644 tools/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go delete mode 100644 tools/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go delete mode 100644 tools/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go delete mode 100644 tools/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go delete mode 100644 tools/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go delete mode 100644 tools/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go delete mode 100644 tools/vendor/gopkg.in/ini.v1/.editorconfig delete mode 100644 tools/vendor/gopkg.in/ini.v1/.gitignore delete mode 100644 tools/vendor/gopkg.in/ini.v1/.golangci.yml delete mode 100644 tools/vendor/gopkg.in/ini.v1/LICENSE delete mode 100644 tools/vendor/gopkg.in/ini.v1/Makefile delete mode 100644 tools/vendor/gopkg.in/ini.v1/README.md delete mode 100644 tools/vendor/gopkg.in/ini.v1/codecov.yml delete mode 100644 tools/vendor/gopkg.in/ini.v1/data_source.go delete mode 100644 tools/vendor/gopkg.in/ini.v1/deprecated.go delete mode 100644 tools/vendor/gopkg.in/ini.v1/error.go delete mode 100644 tools/vendor/gopkg.in/ini.v1/file.go delete mode 100644 tools/vendor/gopkg.in/ini.v1/helper.go delete mode 100644 tools/vendor/gopkg.in/ini.v1/ini.go delete mode 100644 tools/vendor/gopkg.in/ini.v1/key.go delete mode 100644 tools/vendor/gopkg.in/ini.v1/parser.go delete mode 100644 tools/vendor/gopkg.in/ini.v1/section.go delete mode 100644 tools/vendor/gopkg.in/ini.v1/struct.go delete mode 100644 vendor/github.com/aead/chacha20/chacha/chacha.go delete mode 100644 vendor/github.com/aead/chacha20/chacha/chachaAVX2_amd64.s delete mode 100644 vendor/github.com/aead/chacha20/chacha/chacha_386.go delete mode 100644 vendor/github.com/aead/chacha20/chacha/chacha_386.s delete mode 100644 vendor/github.com/aead/chacha20/chacha/chacha_amd64.go delete mode 100644 vendor/github.com/aead/chacha20/chacha/chacha_amd64.s delete mode 100644 vendor/github.com/aead/chacha20/chacha/chacha_generic.go delete mode 100644 vendor/github.com/aead/chacha20/chacha/chacha_ref.go delete mode 100644 vendor/github.com/aead/chacha20/chacha/const.s delete mode 100644 vendor/github.com/aead/chacha20/chacha/macro.s create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_checksum_metrics_tracking.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucketMetadataTableConfiguration.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetadataTableConfiguration.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetadataTableConfiguration.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_user_agent.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/s3/presign_post.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/s3/uri_context.go rename vendor/github.com/{sagikazarmark/slog-shim => go-viper/mapstructure/v2}/.editorconfig (86%) create mode 100644 vendor/github.com/go-viper/mapstructure/v2/.envrc create mode 100644 vendor/github.com/go-viper/mapstructure/v2/.gitignore create mode 100644 vendor/github.com/go-viper/mapstructure/v2/.golangci.yaml rename {tools/vendor/github.com/mitchellh/mapstructure => vendor/github.com/go-viper/mapstructure/v2}/CHANGELOG.md (92%) rename {tools/vendor/github.com/mitchellh/mapstructure => vendor/github.com/go-viper/mapstructure/v2}/LICENSE (100%) create mode 100644 vendor/github.com/go-viper/mapstructure/v2/README.md create mode 100644 vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go create mode 100644 vendor/github.com/go-viper/mapstructure/v2/flake.lock create mode 100644 vendor/github.com/go-viper/mapstructure/v2/flake.nix create mode 100644 vendor/github.com/go-viper/mapstructure/v2/internal/errors/errors.go create mode 100644 vendor/github.com/go-viper/mapstructure/v2/internal/errors/join.go create mode 100644 vendor/github.com/go-viper/mapstructure/v2/internal/errors/join_go1_19.go rename {tools/vendor/github.com/mitchellh/mapstructure => vendor/github.com/go-viper/mapstructure/v2}/mapstructure.go (87%) create mode 100644 vendor/github.com/go-viper/mapstructure/v2/reflect_go1_19.go create mode 100644 vendor/github.com/go-viper/mapstructure/v2/reflect_go1_20.go delete mode 100644 vendor/github.com/hashicorp/hcl/.gitignore delete mode 100644 vendor/github.com/hashicorp/hcl/.travis.yml delete mode 100644 vendor/github.com/hashicorp/hcl/LICENSE delete mode 100644 vendor/github.com/hashicorp/hcl/Makefile delete mode 100644 vendor/github.com/hashicorp/hcl/README.md delete mode 100644 vendor/github.com/hashicorp/hcl/appveyor.yml delete mode 100644 vendor/github.com/hashicorp/hcl/decoder.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/ast/ast.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/ast/walk.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/error.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/parser/parser.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/printer.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/token/position.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/token/token.go delete mode 100644 vendor/github.com/hashicorp/hcl/json/parser/flatten.go delete mode 100644 vendor/github.com/hashicorp/hcl/json/parser/parser.go delete mode 100644 vendor/github.com/hashicorp/hcl/json/scanner/scanner.go delete mode 100644 vendor/github.com/hashicorp/hcl/json/token/position.go delete mode 100644 vendor/github.com/hashicorp/hcl/json/token/token.go delete mode 100644 vendor/github.com/hashicorp/hcl/lex.go delete mode 100644 vendor/github.com/hashicorp/hcl/parse.go delete mode 100644 vendor/github.com/jmespath/go-jmespath/.gitignore delete mode 100644 vendor/github.com/jmespath/go-jmespath/.travis.yml delete mode 100644 vendor/github.com/jmespath/go-jmespath/LICENSE delete mode 100644 vendor/github.com/jmespath/go-jmespath/Makefile delete mode 100644 vendor/github.com/jmespath/go-jmespath/README.md delete mode 100644 vendor/github.com/jmespath/go-jmespath/api.go delete mode 100644 vendor/github.com/jmespath/go-jmespath/astnodetype_string.go delete mode 100644 vendor/github.com/jmespath/go-jmespath/functions.go delete mode 100644 vendor/github.com/jmespath/go-jmespath/interpreter.go delete mode 100644 vendor/github.com/jmespath/go-jmespath/lexer.go delete mode 100644 vendor/github.com/jmespath/go-jmespath/parser.go delete mode 100644 vendor/github.com/jmespath/go-jmespath/toktype_string.go delete mode 100644 vendor/github.com/jmespath/go-jmespath/util.go delete mode 100644 vendor/github.com/magiconair/properties/.gitignore delete mode 100644 vendor/github.com/magiconair/properties/CHANGELOG.md delete mode 100644 vendor/github.com/magiconair/properties/LICENSE.md delete mode 100644 vendor/github.com/magiconair/properties/README.md delete mode 100644 vendor/github.com/magiconair/properties/decode.go delete mode 100644 vendor/github.com/magiconair/properties/doc.go delete mode 100644 vendor/github.com/magiconair/properties/integrate.go delete mode 100644 vendor/github.com/magiconair/properties/lex.go delete mode 100644 vendor/github.com/magiconair/properties/load.go delete mode 100644 vendor/github.com/magiconair/properties/parser.go delete mode 100644 vendor/github.com/magiconair/properties/properties.go delete mode 100644 vendor/github.com/magiconair/properties/rangecheck.go delete mode 100644 vendor/github.com/mitchellh/mapstructure/CHANGELOG.md delete mode 100644 vendor/github.com/mitchellh/mapstructure/LICENSE delete mode 100644 vendor/github.com/mitchellh/mapstructure/README.md delete mode 100644 vendor/github.com/mitchellh/mapstructure/decode_hooks.go delete mode 100644 vendor/github.com/mitchellh/mapstructure/error.go delete mode 100644 vendor/github.com/mitchellh/mapstructure/mapstructure.go rename vendor/github.com/pjbgf/sha1cd/ubc/{doc.go => ubc.go} (68%) create mode 100644 vendor/github.com/pjbgf/sha1cd/ubc/ubc_amd64.go create mode 100644 vendor/github.com/pjbgf/sha1cd/ubc/ubc_amd64.s rename vendor/github.com/pjbgf/sha1cd/ubc/{check.go => ubc_generic.go} (99%) create mode 100644 vendor/github.com/pjbgf/sha1cd/ubc/ubc_noasm.go create mode 100644 vendor/github.com/pulumi/esc/CONTRIBUTING.md create mode 100644 vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/cloudwatch/contributorInsightRule.go create mode 100644 vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/cloudwatch/contributorManagedInsightRule.go create mode 100644 vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/cloudwatch/getContributorManagedInsightRules.go create mode 100644 vendor/github.com/pulumi/pulumi-azure-native-sdk/authorization/v2/getPimRoleEligibilitySchedule.go create mode 100644 vendor/github.com/pulumi/pulumi-azure-native-sdk/authorization/v2/pimRoleEligibilitySchedule.go create mode 100644 vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/organizations.go create mode 100644 vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/templates.go create mode 100644 vendor/github.com/sagikazarmark/locafero/glob.go create mode 100644 vendor/github.com/sagikazarmark/locafero/glob_windows.go delete mode 100644 vendor/github.com/sagikazarmark/slog-shim/.envrc delete mode 100644 vendor/github.com/sagikazarmark/slog-shim/LICENSE delete mode 100644 vendor/github.com/sagikazarmark/slog-shim/README.md delete mode 100644 vendor/github.com/sagikazarmark/slog-shim/attr.go delete mode 100644 vendor/github.com/sagikazarmark/slog-shim/attr_120.go delete mode 100644 vendor/github.com/sagikazarmark/slog-shim/flake.lock delete mode 100644 vendor/github.com/sagikazarmark/slog-shim/handler.go delete mode 100644 vendor/github.com/sagikazarmark/slog-shim/handler_120.go delete mode 100644 vendor/github.com/sagikazarmark/slog-shim/json_handler.go delete mode 100644 vendor/github.com/sagikazarmark/slog-shim/json_handler_120.go delete mode 100644 vendor/github.com/sagikazarmark/slog-shim/level.go delete mode 100644 vendor/github.com/sagikazarmark/slog-shim/level_120.go delete mode 100644 vendor/github.com/sagikazarmark/slog-shim/logger.go delete mode 100644 vendor/github.com/sagikazarmark/slog-shim/logger_120.go delete mode 100644 vendor/github.com/sagikazarmark/slog-shim/record.go delete mode 100644 vendor/github.com/sagikazarmark/slog-shim/record_120.go delete mode 100644 vendor/github.com/sagikazarmark/slog-shim/text_handler.go delete mode 100644 vendor/github.com/sagikazarmark/slog-shim/text_handler_120.go delete mode 100644 vendor/github.com/sagikazarmark/slog-shim/value.go delete mode 100644 vendor/github.com/sagikazarmark/slog-shim/value_120.go rename {tools/vendor/github.com/fsnotify/fsnotify => vendor/github.com/spf13/afero}/.editorconfig (69%) create mode 100644 vendor/github.com/spf13/afero/.golangci.yaml create mode 100644 vendor/github.com/spf13/viper/UPDATES.md create mode 100644 vendor/github.com/spf13/viper/encoding.go create mode 100644 vendor/github.com/spf13/viper/experimental.go delete mode 100644 vendor/github.com/spf13/viper/file_finder.go create mode 100644 vendor/github.com/spf13/viper/finder.go delete mode 100644 vendor/github.com/spf13/viper/internal/encoding/decoder.go delete mode 100644 vendor/github.com/spf13/viper/internal/encoding/encoder.go delete mode 100644 vendor/github.com/spf13/viper/internal/encoding/error.go delete mode 100644 vendor/github.com/spf13/viper/internal/encoding/hcl/codec.go delete mode 100644 vendor/github.com/spf13/viper/internal/encoding/ini/codec.go delete mode 100644 vendor/github.com/spf13/viper/internal/encoding/ini/map_utils.go delete mode 100644 vendor/github.com/spf13/viper/internal/encoding/javaproperties/codec.go delete mode 100644 vendor/github.com/spf13/viper/internal/encoding/javaproperties/map_utils.go create mode 100644 vendor/github.com/spf13/viper/internal/features/finder.go create mode 100644 vendor/github.com/spf13/viper/internal/features/finder_default.go create mode 100644 vendor/github.com/spf13/viper/remote.go delete mode 100644 vendor/golang.org/x/exp/constraints/constraints.go delete mode 100644 vendor/golang.org/x/exp/slices/cmp.go delete mode 100644 vendor/golang.org/x/exp/slices/zsortanyfunc.go delete mode 100644 vendor/golang.org/x/exp/slices/zsortordered.go delete mode 100644 vendor/golang.org/x/exp/slog/attr.go delete mode 100644 vendor/golang.org/x/exp/slog/doc.go delete mode 100644 vendor/golang.org/x/exp/slog/handler.go delete mode 100644 vendor/golang.org/x/exp/slog/internal/buffer/buffer.go delete mode 100644 vendor/golang.org/x/exp/slog/internal/ignorepc.go delete mode 100644 vendor/golang.org/x/exp/slog/json_handler.go delete mode 100644 vendor/golang.org/x/exp/slog/level.go delete mode 100644 vendor/golang.org/x/exp/slog/logger.go delete mode 100644 vendor/golang.org/x/exp/slog/noplog.bench delete mode 100644 vendor/golang.org/x/exp/slog/record.go delete mode 100644 vendor/golang.org/x/exp/slog/text_handler.go delete mode 100644 vendor/golang.org/x/exp/slog/value.go delete mode 100644 vendor/golang.org/x/exp/slog/value_119.go delete mode 100644 vendor/golang.org/x/exp/slog/value_120.go create mode 100644 vendor/golang.org/x/tools/go/types/typeutil/callee.go create mode 100644 vendor/golang.org/x/tools/go/types/typeutil/imports.go create mode 100644 vendor/golang.org/x/tools/go/types/typeutil/map.go create mode 100644 vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go create mode 100644 vendor/golang.org/x/tools/go/types/typeutil/ui.go delete mode 100644 vendor/golang.org/x/tools/internal/aliases/aliases_go121.go create mode 100644 vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go delete mode 100644 vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go delete mode 100644 vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go create mode 100644 vendor/golang.org/x/tools/internal/gcimporter/predeclared.go create mode 100644 vendor/golang.org/x/tools/internal/gcimporter/support.go delete mode 100644 vendor/golang.org/x/tools/internal/gcimporter/support_go118.go delete mode 100644 vendor/golang.org/x/tools/internal/gcimporter/unified_no.go delete mode 100644 vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go create mode 100644 vendor/golang.org/x/tools/internal/gocommand/invoke_notunix.go create mode 100644 vendor/golang.org/x/tools/internal/gocommand/invoke_unix.go create mode 100644 vendor/golang.org/x/tools/internal/stdlib/deps.go create mode 100644 vendor/golang.org/x/tools/internal/stdlib/import.go delete mode 100644 vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/common.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/coretype.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/free.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/normalize.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/termlist.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/typeterm.go create mode 100644 vendor/golang.org/x/tools/internal/typesinternal/element.go create mode 100644 vendor/golang.org/x/tools/internal/typesinternal/qualifier.go create mode 100644 vendor/golang.org/x/tools/internal/typesinternal/varkind.go create mode 100644 vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go delete mode 100644 vendor/golang.org/x/tools/internal/versions/constraint.go delete mode 100644 vendor/golang.org/x/tools/internal/versions/constraint_go121.go delete mode 100644 vendor/golang.org/x/tools/internal/versions/toolchain.go delete mode 100644 vendor/golang.org/x/tools/internal/versions/toolchain_go119.go delete mode 100644 vendor/golang.org/x/tools/internal/versions/toolchain_go120.go delete mode 100644 vendor/golang.org/x/tools/internal/versions/types_go121.go delete mode 100644 vendor/golang.org/x/tools/internal/versions/types_go122.go create mode 100644 vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go rename vendor/google.golang.org/grpc/{internal/grpcsync/oncefunc.go => balancer/pickfirst/internal/internal.go} (55%) create mode 100644 vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go create mode 100644 vendor/google.golang.org/grpc/balancer/subconn.go create mode 100644 vendor/google.golang.org/grpc/health/producer.go create mode 100644 vendor/google.golang.org/grpc/internal/proxyattributes/proxyattributes.go create mode 100644 vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/client_stream.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/server_stream.go create mode 100644 vendor/google.golang.org/grpc/stats/metrics.go delete mode 100644 vendor/google.golang.org/protobuf/internal/errors/is_go112.go delete mode 100644 vendor/google.golang.org/protobuf/internal/errors/is_go113.go rename vendor/{golang.org/x/tools/internal/versions/toolchain_go121.go => google.golang.org/protobuf/internal/genid/name.go} (50%) create mode 100644 vendor/google.golang.org/protobuf/internal/impl/api_export_opaque.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/bitmap.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/bitmap_race.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_field_opaque.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/lazy.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_opaque.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_opaque_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_reflect_field_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe_opaque.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/presence.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/weak.go create mode 100644 vendor/google.golang.org/protobuf/internal/protolazy/bufferreader.go create mode 100644 vendor/google.golang.org/protobuf/internal/protolazy/lazy.go create mode 100644 vendor/google.golang.org/protobuf/internal/protolazy/pointer_unsafe.go create mode 100644 vendor/google.golang.org/protobuf/proto/wrapperopaque.go delete mode 100644 vendor/gopkg.in/ini.v1/.editorconfig delete mode 100644 vendor/gopkg.in/ini.v1/.gitignore delete mode 100644 vendor/gopkg.in/ini.v1/.golangci.yml delete mode 100644 vendor/gopkg.in/ini.v1/LICENSE delete mode 100644 vendor/gopkg.in/ini.v1/Makefile delete mode 100644 vendor/gopkg.in/ini.v1/README.md delete mode 100644 vendor/gopkg.in/ini.v1/codecov.yml delete mode 100644 vendor/gopkg.in/ini.v1/data_source.go delete mode 100644 vendor/gopkg.in/ini.v1/deprecated.go delete mode 100644 vendor/gopkg.in/ini.v1/error.go delete mode 100644 vendor/gopkg.in/ini.v1/file.go delete mode 100644 vendor/gopkg.in/ini.v1/helper.go delete mode 100644 vendor/gopkg.in/ini.v1/ini.go delete mode 100644 vendor/gopkg.in/ini.v1/key.go delete mode 100644 vendor/gopkg.in/ini.v1/parser.go delete mode 100644 vendor/gopkg.in/ini.v1/section.go delete mode 100644 vendor/gopkg.in/ini.v1/struct.go diff --git a/.github/workflows/build-go.yaml b/.github/workflows/build-go.yaml index daac4e92b..318ed90ce 100644 --- a/.github/workflows/build-go.yaml +++ b/.github/workflows/build-go.yaml @@ -18,7 +18,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v5 with: - go-version: '1.21' + go-version: '1.24' - name: Run 'make check' run: make check diff --git a/.github/workflows/build-oci.yaml b/.github/workflows/build-oci.yaml index a421d33ec..0a050fd25 100644 --- a/.github/workflows/build-oci.yaml +++ b/.github/workflows/build-oci.yaml @@ -57,7 +57,11 @@ jobs: uses: actions/checkout@v4 - name: Download mapt oci flatten images +<<<<<<< HEAD uses: actions/download-artifact@v4 +======= + uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4.2.1 +>>>>>>> 20137e84 (fix(deps): update all dependencies) with: name: mapt diff --git a/.github/workflows/build-on-hosted-runner.yaml b/.github/workflows/build-on-hosted-runner.yaml index 4b8a5b77b..7651d7e5e 100644 --- a/.github/workflows/build-on-hosted-runner.yaml +++ b/.github/workflows/build-on-hosted-runner.yaml @@ -27,7 +27,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v5 with: - go-version: "1.21" + go-version: "1.24" - name: Test run: go test -v ./... diff --git a/.github/workflows/push-oci-pr.yml b/.github/workflows/push-oci-pr.yml index 5c29ccebd..41a395ea1 100644 --- a/.github/workflows/push-oci-pr.yml +++ b/.github/workflows/push-oci-pr.yml @@ -19,7 +19,11 @@ jobs: packages: write steps: - name: Download mapt assets +<<<<<<< HEAD uses: actions/download-artifact@v4 +======= + uses: actions/download-artifact@95815c38cf2ff2164869cbab79da8d1f422bc89e # v4.2.1 +>>>>>>> 20137e84 (fix(deps): update all dependencies) with: name: mapt run-id: ${{ github.event.workflow_run.id }} diff --git a/go.mod b/go.mod index e8f453354..2f350a075 100644 --- a/go.mod +++ b/go.mod @@ -1,17 +1,17 @@ module github.com/redhat-developer/mapt -go 1.23.0 +go 1.23.1 toolchain go1.23.7 require ( github.com/coocood/freecache v1.2.4 - github.com/pulumi/pulumi-command/sdk v1.0.1 - github.com/pulumi/pulumi-random/sdk/v4 v4.16.7 - github.com/pulumi/pulumi/sdk/v3 v3.154.0 + github.com/pulumi/pulumi-command/sdk v1.0.2 + github.com/pulumi/pulumi-random/sdk/v4 v4.18.0 + github.com/pulumi/pulumi/sdk/v3 v3.157.0 github.com/sirupsen/logrus v1.9.3 github.com/spf13/cobra v1.9.1 - golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 + golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 ) require ( @@ -21,37 +21,37 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resourcegraph/armresourcegraph v0.9.0 github.com/aws/amazon-ec2-instance-selector/v3 v3.1.1 github.com/aws/aws-sdk-go-v2 v1.36.3 - github.com/aws/aws-sdk-go-v2/config v1.29.9 - github.com/aws/aws-sdk-go-v2/service/ec2 v1.203.1 - github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1 - github.com/pulumi/pulumi-aws-native/sdk v1.25.0 - github.com/pulumi/pulumi-aws/sdk/v6 v6.69.0 - github.com/pulumi/pulumi-awsx/sdk/v2 v2.21.1 - github.com/pulumi/pulumi-azure-native-sdk/authorization/v2 v2.88.0 - github.com/pulumi/pulumi-azure-native-sdk/compute/v2 v2.88.0 - github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2 v2.88.0 - github.com/pulumi/pulumi-azure-native-sdk/network/v2 v2.88.0 - github.com/pulumi/pulumi-azure-native-sdk/resources/v2 v2.88.0 - github.com/pulumi/pulumi-azure-native-sdk/storage/v2 v2.88.0 - github.com/pulumi/pulumi-tls/sdk/v5 v5.0.9 + github.com/aws/aws-sdk-go-v2/config v1.29.8 + github.com/aws/aws-sdk-go-v2/service/ec2 v1.210.1 + github.com/aws/aws-sdk-go-v2/service/s3 v1.78.2 + github.com/pulumi/pulumi-aws-native/sdk v1.26.0 + github.com/pulumi/pulumi-aws/sdk/v6 v6.72.0 + github.com/pulumi/pulumi-awsx/sdk/v2 v2.21.0 + github.com/pulumi/pulumi-azure-native-sdk/authorization/v2 v2.89.3 + github.com/pulumi/pulumi-azure-native-sdk/compute/v2 v2.89.3 + github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2 v2.89.3 + github.com/pulumi/pulumi-azure-native-sdk/network/v2 v2.89.3 + github.com/pulumi/pulumi-azure-native-sdk/resources/v2 v2.89.3 + github.com/pulumi/pulumi-azure-native-sdk/storage/v2 v2.89.3 + github.com/pulumi/pulumi-tls/sdk/v5 v5.1.1 ) require ( - github.com/BurntSushi/toml v1.4.0 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5 // indirect + github.com/BurntSushi/toml v1.5.0 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/evertras/bubble-table v0.17.1 // indirect github.com/hashicorp/hcl/v2 v2.23.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/oliveagle/jsonpath v0.0.0-20180606110733-2e52cf6e6852 // indirect github.com/patrickmn/go-cache v2.1.0+incompatible // indirect - github.com/pulumi/pulumi-docker/sdk/v4 v4.5.8 // indirect + github.com/pulumi/pulumi-docker/sdk/v4 v4.6.2 // indirect github.com/sahilm/fuzzy v0.1.1 // indirect - golang.org/x/mod v0.21.0 // indirect - golang.org/x/tools v0.25.0 // indirect + golang.org/x/mod v0.24.0 // indirect + golang.org/x/tools v0.31.0 // indirect ) require ( @@ -68,7 +68,7 @@ require ( github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect - github.com/aws/aws-sdk-go-v2/service/pricing v1.32.17 // indirect + github.com/aws/aws-sdk-go-v2/service/pricing v1.34.1 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.25.1 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.1 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.33.17 // indirect @@ -86,61 +86,55 @@ require ( github.com/cyphar/filepath-securejoin v0.4.1 // indirect github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect + github.com/go-viper/mapstructure/v2 v2.2.1 // indirect github.com/golang-jwt/jwt/v5 v5.2.1 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/hashicorp/hcl v1.0.0 // indirect github.com/iwdgo/sigintwindows v0.2.2 // indirect - github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/lucasb-eyer/go-colorful v1.2.0 // indirect - github.com/magiconair/properties v1.8.7 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-localereader v0.0.1 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect - github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect github.com/muesli/cancelreader v0.2.2 // indirect github.com/muesli/reflow v0.3.0 // indirect github.com/muesli/termenv v0.16.0 // indirect github.com/pelletier/go-toml/v2 v2.2.3 // indirect github.com/pgavlin/fx v0.1.6 // indirect - github.com/pjbgf/sha1cd v0.3.0 // indirect + github.com/pjbgf/sha1cd v0.3.2 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pulumi/appdash v0.0.0-20231130102222-75f619a67231 // indirect - github.com/pulumi/esc v0.12.0 // indirect - github.com/pulumi/pulumi-azure-native-sdk/v2 v2.88.0 // indirect - github.com/pulumi/pulumi-docker-build/sdk/go/dockerbuild v0.0.3 // indirect - github.com/sagikazarmark/locafero v0.4.0 // indirect - github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/pulumi/esc v0.13.0 // indirect + github.com/pulumi/pulumi-azure-native-sdk/v2 v2.89.3 // indirect + github.com/pulumi/pulumi-docker-build/sdk/go/dockerbuild v0.0.10 // indirect + github.com/sagikazarmark/locafero v0.8.0 // indirect github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 // indirect - github.com/skeema/knownhosts v1.3.0 // indirect + github.com/skeema/knownhosts v1.3.1 // indirect github.com/sourcegraph/conc v0.3.0 // indirect - github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.6.0 // indirect + github.com/spf13/afero v1.14.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect - github.com/zclconf/go-cty v1.15.0 // indirect + github.com/zclconf/go-cty v1.16.2 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/sync v0.12.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect - gopkg.in/ini.v1 v1.67.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250313205543-e70fdf4c4cb4 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) require ( github.com/Microsoft/go-winio v0.6.2 // indirect - github.com/ProtonMail/go-crypto v1.1.3 // indirect - github.com/aead/chacha20 v0.0.0-20180709150244-8b13a72661da // indirect - github.com/aws/aws-sdk-go-v2/service/ecs v1.53.0 - github.com/aws/aws-sdk-go-v2/service/iam v1.39.2 + github.com/ProtonMail/go-crypto v1.1.6 // indirect + github.com/aws/aws-sdk-go-v2/service/ecs v1.54.2 + github.com/aws/aws-sdk-go-v2/service/iam v1.40.1 github.com/blang/semver v3.5.1+incompatible // indirect github.com/cheggaaa/pb v1.0.29 // indirect github.com/djherbis/times v1.6.0 // indirect github.com/emirpasic/gods v1.18.1 // indirect github.com/fsnotify/fsnotify v1.8.0 // indirect github.com/go-git/go-billy/v5 v5.6.2 // indirect - github.com/go-git/go-git/v5 v5.13.1 // indirect + github.com/go-git/go-git/v5 v5.14.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/glog v1.2.4 // indirect github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 // indirect @@ -156,13 +150,13 @@ require ( github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pkg/term v1.1.0 // indirect - github.com/pulumi/pulumi-azure-native-sdk/managedidentity/v2 v2.53.0 + github.com/pulumi/pulumi-azure-native-sdk/managedidentity/v2 v2.89.3 github.com/rivo/uniseg v0.4.7 // indirect - github.com/rogpeppe/go-internal v1.12.0 // indirect + github.com/rogpeppe/go-internal v1.14.1 // indirect github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 // indirect github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect github.com/spf13/pflag v1.0.6 - github.com/spf13/viper v1.19.0 + github.com/spf13/viper v1.20.0 github.com/texttheater/golang-levenshtein v1.0.1 // indirect github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect github.com/uber/jaeger-lib v2.4.1+incompatible // indirect @@ -173,9 +167,9 @@ require ( golang.org/x/sys v0.31.0 // indirect golang.org/x/term v0.30.0 // indirect golang.org/x/text v0.23.0 // indirect - google.golang.org/grpc v1.67.1 // indirect - google.golang.org/protobuf v1.35.1 // indirect + google.golang.org/grpc v1.71.0 // indirect + google.golang.org/protobuf v1.36.5 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect - lukechampine.com/frand v1.4.2 // indirect + lukechampine.com/frand v1.5.1 // indirect ) diff --git a/go.sum b/go.sum index fd29c4156..1f112ef77 100644 --- a/go.sum +++ b/go.sum @@ -20,17 +20,15 @@ github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJ github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= github.com/AzureAD/microsoft-authentication-library-for-go v1.4.0 h1:MUkXAnvvDHgvPItl0nBj0hgk0f7hnnQbGm0h0+YxbN4= github.com/AzureAD/microsoft-authentication-library-for-go v1.4.0/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= -github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= -github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= +github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= -github.com/ProtonMail/go-crypto v1.1.3 h1:nRBOetoydLeUb4nHajyO2bKqMLfWQ/ZPwkXqXxPxCFk= -github.com/ProtonMail/go-crypto v1.1.3/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= -github.com/aead/chacha20 v0.0.0-20180709150244-8b13a72661da h1:KjTM2ks9d14ZYCvmHS9iAKVt9AyzRSqNU1qabPih5BY= -github.com/aead/chacha20 v0.0.0-20180709150244-8b13a72661da/go.mod h1:eHEWzANqSiWQsof+nXEI9bUVUyV6F53Fp89EuCh2EAA= +github.com/ProtonMail/go-crypto v1.1.6 h1:ZcV+Ropw6Qn0AX9brlQLAUXfqLBc7Bl+f/DmNxpLfdw= +github.com/ProtonMail/go-crypto v1.1.6/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= @@ -45,10 +43,10 @@ github.com/aws/amazon-ec2-instance-selector/v3 v3.1.1 h1:8pw8T4Ae8tiXbxTxXhVStJS github.com/aws/amazon-ec2-instance-selector/v3 v3.1.1/go.mod h1:RU/lVVsYHNN7Bwr2UmCw5z2aWPcNIHADY49bj082oYM= github.com/aws/aws-sdk-go-v2 v1.36.3 h1:mJoei2CxPutQVxaATCzDUjcZEjVRdpsiiXi2o38yqWM= github.com/aws/aws-sdk-go-v2 v1.36.3/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 h1:x6xsQXGSmW6frevwDA+vi/wqhp1ct18mVXYN08/93to= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2/go.mod h1:lPprDr1e6cJdyYeGXnRaJoP4Md+cDBvi2eOj00BlGmg= -github.com/aws/aws-sdk-go-v2/config v1.29.9 h1:Kg+fAYNaJeGXp1vmjtidss8O2uXIsXwaRqsQJKXVr+0= -github.com/aws/aws-sdk-go-v2/config v1.29.9/go.mod h1:oU3jj2O53kgOU4TXq/yipt6ryiooYjlkqqVaZk7gY/U= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 h1:zAybnyUQXIZ5mok5Jqwlf58/TFE7uvd3IAsa1aF9cXs= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10/go.mod h1:qqvMj6gHLR/EXWZw4ZbqlPbQUyenf4h82UQUlKc+l14= +github.com/aws/aws-sdk-go-v2/config v1.29.8 h1:RpwAfYcV2lr/yRc4lWhUM9JRPQqKgKWmou3LV7UfWP4= +github.com/aws/aws-sdk-go-v2/config v1.29.8/go.mod h1:t+G7Fq1OcO8cXTPPXzxQSnj/5Xzdc9jAAD3Xrn9/Mgo= github.com/aws/aws-sdk-go-v2/credentials v1.17.62 h1:fvtQY3zFzYJ9CfixuAQ96IxDrBajbBWGqjNTCa79ocU= github.com/aws/aws-sdk-go-v2/credentials v1.17.62/go.mod h1:ElETBxIQqcxej++Cs8GyPBbgMys5DgQPTwo7cUPDKt8= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 h1:x793wxmUWVDhshP8WW2mlnXuFrO4cOd3HLBroh1paFw= @@ -59,26 +57,26 @@ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 h1:SZwFm17ZUNNg5Np0io github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34/go.mod h1:dFZsC0BLo346mvKQLWmoJxT+Sjp+qcVR1tRVHQGOH9Q= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5 h1:81KE7vaZzrl7yHBYHVEzYB8sypz11NMOZ40YlWvPxsU= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5/go.mod h1:LIt2rg7Mcgn09Ygbdh/RdIm0rQ+3BNkbP1gyVMFtRK0= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.203.1 h1:ZgY9zeVAe+54Qa7o1GXKRNTez79lffCeJSSinhl+qec= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.203.1/go.mod h1:0naMk66LtdeTmE+1CWQTKwtzOQ2t8mavOhMhR0Pv1m0= -github.com/aws/aws-sdk-go-v2/service/ecs v1.53.0 h1:TCQZX4ztlcWXAcZouKh9qJMcVaH/qTidFTfsvJwUI30= -github.com/aws/aws-sdk-go-v2/service/ecs v1.53.0/go.mod h1:Ghi1OWUv4+VMEULWiHsKH2gNA3KAcMoLWsvU0eRXvIA= -github.com/aws/aws-sdk-go-v2/service/iam v1.39.2 h1:2JLLGua711n8vn773xw2iwGh0zxLJJ3UDWQ2L7fy0wY= -github.com/aws/aws-sdk-go-v2/service/iam v1.39.2/go.mod h1:ZpAQJqd/i2bgRVa4vTa1ZX96sWgd3MZ/dxkABRXqvyI= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 h1:ZNTqv4nIdE/DiBfUUfXcLZ/Spcuz+RjeziUtNJackkM= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34/go.mod h1:zf7Vcd1ViW7cPqYWEHLHJkS50X0JS2IKz9Cgaj6ugrs= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.210.1 h1:+4A9SDduLZFlDeXWRmfQ6r8kyEJZQfK6lcg+KwdvWrI= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.210.1/go.mod h1:ouvGEfHbLaIlWwpDpOVWPWR+YwO0HDv3vm5tYLq8ImY= +github.com/aws/aws-sdk-go-v2/service/ecs v1.54.2 h1:euy6eWxHp2mLxA1OqQcBFk5vEuXC1UqZL0x9XPlmxns= +github.com/aws/aws-sdk-go-v2/service/ecs v1.54.2/go.mod h1:wAtdeFanDuF9Re/ge4DRDaYe3Wy1OGrU7jG042UcuI4= +github.com/aws/aws-sdk-go-v2/service/iam v1.40.1 h1:PaHCkW8rtLrA89xM/0LsY/NSIQETqmN+f1vt70EmpB8= +github.com/aws/aws-sdk-go-v2/service/iam v1.40.1/go.mod h1:mPJkGQzeCoPs82ElNILor2JzZgYENr4UaSKUT8K27+c= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 h1:eAh2A4b5IzM/lum78bZ590jy36+d/aFLgKF/4Vd1xPE= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3/go.mod h1:0yKJC/kb8sAnmlYa6Zs3QVYqaC8ug2AbnNChv5Ox3uA= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 h1:ZMeFZ5yk+Ek+jNr1+uwCd2tG89t6oTS5yVWpa6yy2es= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7/go.mod h1:mxV05U+4JiHqIpGqqYXOHLPKUC6bDXC44bsUhNjOEwY= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.0 h1:lguz0bmOoGzozP9XfRJR1QIayEYo+2vP/No3OfLF0pU= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.0/go.mod h1:iu6FSzgt+M2/x3Dk8zhycdIcHjEFb36IS8HVUVFoMg0= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 h1:dM9/92u2F1JbDaGooxTq18wmmFzbJRfXfVfy96/1CXM= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15/go.mod h1:SwFBy2vjtA0vZbjjaFtfN045boopadnoVPhu4Fv66vY= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5 h1:f9RyWNtS8oH7cZlbn+/JNPpjUk5+5fLd5lM9M0i49Ys= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5/go.mod h1:h5CoMZV2VF297/VLhRhO1WF+XYWOzXo+4HsObA4HjBQ= -github.com/aws/aws-sdk-go-v2/service/pricing v1.32.17 h1:EtZFyL/uhaXlHjIwHW0KSJvppg+Ie1fzQ3wEXLEUj0I= -github.com/aws/aws-sdk-go-v2/service/pricing v1.32.17/go.mod h1:l7bufyRvU+8mY0Z1BNWbWvjr59dlj9YrLKmeiz5CJ30= -github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1 h1:6cnno47Me9bRykw9AEv9zkXE+5or7jz8TsskTTccbgc= -github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1/go.mod h1:qmdkIIAC+GCLASF7R2whgNrJADz0QZPX+Seiw/i4S3o= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 h1:moLQUoVq91LiqT1nbvzDukyqAlCv89ZmwaHw/ZFlFZg= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15/go.mod h1:ZH34PJUc8ApjBIfgQCFvkWcUDBtl/WTD+uiYHjd8igA= +github.com/aws/aws-sdk-go-v2/service/pricing v1.34.1 h1:tbWWyDVa/U4cr4dsKehNmFi1842yB1Ffw7kBZE+30bQ= +github.com/aws/aws-sdk-go-v2/service/pricing v1.34.1/go.mod h1:giTP9ufzBQJRB6bc7P30PO8s35hCp6au5uM70zkohU4= +github.com/aws/aws-sdk-go-v2/service/s3 v1.78.2 h1:jIiopHEV22b4yQP2q36Y0OmwLbsxNWdWwfZRR5QRRO4= +github.com/aws/aws-sdk-go-v2/service/s3 v1.78.2/go.mod h1:U5SNqwhXB3Xe6F47kXvWihPl/ilGaEDe8HD/50Z9wxc= github.com/aws/aws-sdk-go-v2/service/sso v1.25.1 h1:8JdC7Gr9NROg1Rusk25IcZeTO59zLxsKgE0gkh5O6h0= github.com/aws/aws-sdk-go-v2/service/sso v1.25.1/go.mod h1:qs4a9T5EMLl/Cajiw2TcbNt2UNo/Hqlyp+GiuG4CFDI= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.1 h1:KwuLovgQPcdjNMfFt9OhUd9a2OwcOKhxfvF4glTzLuA= @@ -120,15 +118,14 @@ github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6N github.com/cyphar/filepath-securejoin v0.4.1 h1:JyxxyPEaktOD+GAnqIqTf9A8tHyAG22rowi7HkoSU1s= github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c= github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0= -github.com/elazarl/goproxy v1.2.3 h1:xwIyKHbaP5yfT6O9KIeYJR5549MXRQkoQMRXGztz8YQ= -github.com/elazarl/goproxy v1.2.3/go.mod h1:YfEbZtqP4AetfO6d40vWchF3znWX7C7Vd6ZMfdL8z64= +github.com/elazarl/goproxy v1.7.2 h1:Y2o6urb7Eule09PjlhQRGNsqRfPmYI3KKQLFpCAV3+o= +github.com/elazarl/goproxy v1.7.2/go.mod h1:82vkLNir0ALaW14Rc399OTTjyNREgmdL2cVoIbS6XaE= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4= @@ -136,8 +133,8 @@ github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97 github.com/evertras/bubble-table v0.17.1 h1:HJwq3iQrZulXDE93ZcqJNiUVQCBbN4IJ2CkB/IxO3kk= github.com/evertras/bubble-table v0.17.1/go.mod h1:ifHujS1YxwnYSOgcR2+m3GnJ84f7CVU/4kUOxUCjEbQ= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= -github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= +github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= @@ -151,8 +148,14 @@ github.com/go-git/go-billy/v5 v5.6.2 h1:6Q86EsPXMa7c3YZ3aLAQsMA0VlWmy43r6FHqa/UN github.com/go-git/go-billy/v5 v5.6.2/go.mod h1:rcFC2rAsp/erv7CMz9GczHcuD0D32fWzH+MJAU+jaUU= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= -github.com/go-git/go-git/v5 v5.13.1 h1:DAQ9APonnlvSWpvolXWIuV6Q6zXy2wHbN4cVlNR5Q+M= -github.com/go-git/go-git/v5 v5.13.1/go.mod h1:qryJB4cSBoq3FRoBRf5A77joojuBcmPJ0qu3XXXVixc= +github.com/go-git/go-git/v5 v5.14.0 h1:/MD3lCrGjCen5WfEAzKg00MJJffKhC8gzS80ycmCi60= +github.com/go-git/go-git/v5 v5.14.0/go.mod h1:Z5Xhoia5PcWA3NF8vRLURn9E5FRhSl7dGj9ItW3Wk5k= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss= +github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= @@ -160,10 +163,12 @@ github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17w github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v1.2.4 h1:CNNw5U8lSiiBk7druxtSHHTsRWcxKoac6kZKm2peBBc= github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU= @@ -173,8 +178,6 @@ github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/hcl/v2 v2.23.0 h1:Fphj1/gCylPxHutVSEOf2fBOh1VE4AuLV7+kbJf3qos= github.com/hashicorp/hcl/v2 v2.23.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= @@ -183,10 +186,6 @@ github.com/iwdgo/sigintwindows v0.2.2 h1:P6oWzpvV7MrEAmhUgs+zmarrWkyL77ycZz4v7+1 github.com/iwdgo/sigintwindows v0.2.2/go.mod h1:70wPb8oz8OnxPvsj2QMUjgIVhb8hMu5TUgX8KfFl7QY= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6 h1:IsMZxCuZqKuao2vNdfD82fjjgPLfyHLpR41Z88viRWs= @@ -205,8 +204,6 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= -github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= -github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= @@ -226,8 +223,6 @@ github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 h1:ZK8zHtRHOkbHy6Mmr5D264iyp3TiX5OmNcI5cIARiQI= github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6/go.mod h1:CJlz5H+gyd6CUWT45Oy4q24RdLyn7Md9Vj2/ldJBSIo= github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELUXHmA= @@ -253,70 +248,67 @@ github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNH github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= github.com/pgavlin/fx v0.1.6 h1:r9jEg69DhNoCd3Xh0+5mIbdbS3PqWrVWujkY76MFRTU= github.com/pgavlin/fx v0.1.6/go.mod h1:KWZJ6fqBBSh8GxHYqwYCf3rYE7Gp2p0N8tJp8xv9u9M= -github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= -github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= +github.com/pjbgf/sha1cd v0.3.2 h1:a9wb0bp1oC2TGwStyn0Umc/IGKQnEgF0vVaZ8QF8eo4= +github.com/pjbgf/sha1cd v0.3.2/go.mod h1:zQWigSxVmsHEZow5qaLtPYxpcKMMQpa09ixqBxuCS6A= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/term v1.1.0 h1:xIAAdCMh3QIAy+5FrE8Ad8XoDhEU4ufwbaSozViP9kk= github.com/pkg/term v1.1.0/go.mod h1:E25nymQcrSllhX42Ok8MRm1+hyBdHY0dCeiKZ9jpNGw= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pulumi/appdash v0.0.0-20231130102222-75f619a67231 h1:vkHw5I/plNdTr435cARxCW6q9gc0S/Yxz7Mkd38pOb0= github.com/pulumi/appdash v0.0.0-20231130102222-75f619a67231/go.mod h1:murToZ2N9hNJzewjHBgfFdXhZKjY3z5cYC1VXk+lbFE= -github.com/pulumi/esc v0.12.0 h1:lB9ZohZ1Bo1XfhSHjFnk4/y42UMLkmZxz9klH6Jh6Ps= -github.com/pulumi/esc v0.12.0/go.mod h1:iCs5bP1xvleSzcMDwfNc1Ym/6EJ2P6xmrjy0iqb0ATs= -github.com/pulumi/pulumi-aws-native/sdk v1.25.0 h1:lboDYcrKaz7ggwbUWluybdhG1zNs8Ya7aX6GyaHOwIY= -github.com/pulumi/pulumi-aws-native/sdk v1.25.0/go.mod h1:Kuy19nXfmt4X9ySnWlKJwYrTc/PedPBaNfX6SCoX8gE= -github.com/pulumi/pulumi-aws/sdk/v6 v6.69.0 h1:SO9xo9PIgufYORBPx82lSQN0CLg1CMab+rR6JJnGv/c= -github.com/pulumi/pulumi-aws/sdk/v6 v6.69.0/go.mod h1:OaqbSRCkPxr8XpanQ7u3Xup9MgSnyqQFDxNhV2pXWzw= -github.com/pulumi/pulumi-awsx/sdk/v2 v2.21.1 h1:V0NaeP3Rx5WGysvNz6x13Dnd3visEgQubP/gzQ+X86Q= -github.com/pulumi/pulumi-awsx/sdk/v2 v2.21.1/go.mod h1:j8IUGf1uw47bXxHS5H98O3NZIXvVsb7bfv/zby1cVLE= -github.com/pulumi/pulumi-azure-native-sdk/authorization/v2 v2.88.0 h1:xIb656wq0o/gOLel90Bj/3tRxwq+U+3RaktlNnHvTG4= -github.com/pulumi/pulumi-azure-native-sdk/authorization/v2 v2.88.0/go.mod h1:DnvaZlI1YwSZhIKfPPsXq6Koy1XoOkgGwOorcZeIj0k= -github.com/pulumi/pulumi-azure-native-sdk/compute/v2 v2.88.0 h1:TVO1fIAW7tu2sX7SEPfLH9e5DGH4cbbcG3qee58nEgk= -github.com/pulumi/pulumi-azure-native-sdk/compute/v2 v2.88.0/go.mod h1:Mv7iracyGjfSxOryzRqXGUUZNmBT5k0VQr+7aeXlV/g= -github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2 v2.88.0 h1:TjJ6uz41nQ1tjyWUvshnRsFI5i0Tl7esgixUiOHdPgs= -github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2 v2.88.0/go.mod h1:Rra4dT4NF1O5lgbHfIf09EJPoDo8M2gXPvpgbDnJPGY= -github.com/pulumi/pulumi-azure-native-sdk/managedidentity/v2 v2.53.0 h1:HctTS276Dl0hzqyioTuvLSfFO6xUy0DALtF3Kr2DrmI= -github.com/pulumi/pulumi-azure-native-sdk/managedidentity/v2 v2.53.0/go.mod h1:qMFN9OZL50ARd8xi9zFMpCLflH/3YIT1MtXe5Qxu+54= -github.com/pulumi/pulumi-azure-native-sdk/network/v2 v2.88.0 h1:9evDP53ghSijpJNJRhdP6fiqEWumdbDvXoaDpf0lHgs= -github.com/pulumi/pulumi-azure-native-sdk/network/v2 v2.88.0/go.mod h1:E22FxHrz++y4sHUO5t30mhInQcWAdPd/zzcBJneKVQ0= -github.com/pulumi/pulumi-azure-native-sdk/resources/v2 v2.88.0 h1:cWQ6ddENxG0OPY9XV5e1XaTPehSeqqPrq2GpjBpNEWg= -github.com/pulumi/pulumi-azure-native-sdk/resources/v2 v2.88.0/go.mod h1:zDrKHsZ+S4ixhNXgdMBRfipvQRF0XGaGUFsVh/k/J08= -github.com/pulumi/pulumi-azure-native-sdk/storage/v2 v2.88.0 h1:tmACrjzokE3LDL93RNKgBRzRMqedEGofL9CZ+iruJUw= -github.com/pulumi/pulumi-azure-native-sdk/storage/v2 v2.88.0/go.mod h1:d83kS8DkwoIcAyeQHHFRWcV8q0ZqLmZt4xxBnplmhfE= -github.com/pulumi/pulumi-azure-native-sdk/v2 v2.88.0 h1:tLgRGAZz8ZUFzBvjI35XHSd/ms0wtfrSiOjBYer1MLw= -github.com/pulumi/pulumi-azure-native-sdk/v2 v2.88.0/go.mod h1:uyp/HcB8Iv0gZHmQw3YLtcbheJN8OmrqhaQFfU+9BjA= -github.com/pulumi/pulumi-command/sdk v1.0.1 h1:ZuBSFT57nxg/fs8yBymUhKLkjJ6qmyN3gNvlY/idiN0= -github.com/pulumi/pulumi-command/sdk v1.0.1/go.mod h1:C7sfdFbUIoXKoIASfXUbP/U9xnwPfxvz8dBpFodohlA= -github.com/pulumi/pulumi-docker-build/sdk/go/dockerbuild v0.0.3 h1:NxCXxRvzhUJP9dIvlpNlZKt/A3NHu3i9pC5XO+i8bR0= -github.com/pulumi/pulumi-docker-build/sdk/go/dockerbuild v0.0.3/go.mod h1:jw9NcyRXjv5V2HHHJlqUBdXFCFiLfZoCChWEn38LR2A= -github.com/pulumi/pulumi-docker/sdk/v4 v4.5.8 h1:rik9L2SIpsoDenY51MkogR6GWgu/0Sy/XmyQmKWNUqU= -github.com/pulumi/pulumi-docker/sdk/v4 v4.5.8/go.mod h1:eph7BPNPkEIIK882/Ll4dbeHl5wZEc/UvTcUW0CK1UY= -github.com/pulumi/pulumi-random/sdk/v4 v4.16.7 h1:39rhOe/PTUGMYia8pR5T2wbxxMt2pwrlonf0ncYKSzE= -github.com/pulumi/pulumi-random/sdk/v4 v4.16.7/go.mod h1:cxxDhJzUPt/YElfvlWa15Q4NGF6XXS8kUs4OQsCxSBk= -github.com/pulumi/pulumi-tls/sdk/v5 v5.0.9 h1:JH5TkGJDwlTqKdSJxd49DfrJOpVkRyGDvgKS9gwcV9U= -github.com/pulumi/pulumi-tls/sdk/v5 v5.0.9/go.mod h1:3B+v+HC4bjAYf1mVxRVL/bDY20+VEMwfxRKgc3oeCrs= -github.com/pulumi/pulumi/sdk/v3 v3.154.0 h1:AK2XAIYwCVcCdvHuB87DqmnRLt7AlMRQobYb2VVycN8= -github.com/pulumi/pulumi/sdk/v3 v3.154.0/go.mod h1:+WC9aIDo8fMgd2g0jCHuZU2S/VYNLRAZ3QXt6YVgwaA= +github.com/pulumi/esc v0.13.0 h1:O2MPR2koScaQ2fXwyer8Q3Dd7z+DCnaDfsgNl5mVNMk= +github.com/pulumi/esc v0.13.0/go.mod h1:IIQo6W6Uzajt6f1RW4QvNxIRDlbK3TNQysnrwBHNo3U= +github.com/pulumi/pulumi-aws-native/sdk v1.26.0 h1:hUQlTLmswgr28EOZvmIugO0sF1T8jXSZTAr0l6cuZXU= +github.com/pulumi/pulumi-aws-native/sdk v1.26.0/go.mod h1:IC+sE/FN50ve9AEfhJ8HQBeg6JfgvCaGIqJ+VaL8PZU= +github.com/pulumi/pulumi-aws/sdk/v6 v6.72.0 h1:lGMnU0pRZY2p8nceRrn15Dl/iSH08fa8rqsqpxhqKqQ= +github.com/pulumi/pulumi-aws/sdk/v6 v6.72.0/go.mod h1:clWiwlT7kCBiQYHGFAtfV7/tmQrxQO2CCRK4bKnr6Q8= +github.com/pulumi/pulumi-awsx/sdk/v2 v2.21.0 h1:El2HV0GbOFFoQot0id6ZNjpjrF+HErIdreMG9WwJ7OA= +github.com/pulumi/pulumi-awsx/sdk/v2 v2.21.0/go.mod h1:ZpStejgr+RYJZwbFV6IpOxPb/wLSd86LUTC69UNQVVI= +github.com/pulumi/pulumi-azure-native-sdk/authorization/v2 v2.89.3 h1:4ooEwVXSTgt1ViF9WmrKNMkMMQFrt7ZGuzfSVqBfOw8= +github.com/pulumi/pulumi-azure-native-sdk/authorization/v2 v2.89.3/go.mod h1:+MF/zIf4iGwe+HFjufMJGOPVrvdL9pyIuXajL/u9at4= +github.com/pulumi/pulumi-azure-native-sdk/compute/v2 v2.89.3 h1:HbDzFcCaWUSXoU4srIcssOrd71F9FcxCd/ZrOT+CHkY= +github.com/pulumi/pulumi-azure-native-sdk/compute/v2 v2.89.3/go.mod h1:0RR3VcIzF01AExj+v6+QEmDm9yrqOCEDgFw9kduS7is= +github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2 v2.89.3 h1:rJvxexlISXj+f9xnJL4MYmd4hiDMMu31POoFcfihjOA= +github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2 v2.89.3/go.mod h1:JPzxT7qdyJB8Mka/diD0sjXGnymWaxjhDAzd4ZZuTck= +github.com/pulumi/pulumi-azure-native-sdk/managedidentity/v2 v2.89.3 h1:2tGEHR+yhHpM7LlNJZUTqfAg2nx+8fGrIWq/cWiNRO4= +github.com/pulumi/pulumi-azure-native-sdk/managedidentity/v2 v2.89.3/go.mod h1:KKFLhGzVD8HTFZpygRWNKe2ESuXj0ncnihmifaJWdho= +github.com/pulumi/pulumi-azure-native-sdk/network/v2 v2.89.3 h1:P0U2Bqkdxzd/JOX9YYtl39f6od6RJlQjg6RYMq348KQ= +github.com/pulumi/pulumi-azure-native-sdk/network/v2 v2.89.3/go.mod h1:vzN3NxuYPOtjvDAka1Llc2P4RnEROH/EGgOp3YiGneM= +github.com/pulumi/pulumi-azure-native-sdk/resources/v2 v2.89.3 h1:rTKg6l9TQDjfkUWc4NcL3mor3vCf7MSKGlBA0nrI4XY= +github.com/pulumi/pulumi-azure-native-sdk/resources/v2 v2.89.3/go.mod h1:6JRdTGcxWzJEqMv4HabDX4oWiy2KVZ25w3wfb1B5D8k= +github.com/pulumi/pulumi-azure-native-sdk/storage/v2 v2.89.3 h1:rihq+1CMDhyUpeNzvGn5qFVG+t+hzEJDT11Fs6yCUSs= +github.com/pulumi/pulumi-azure-native-sdk/storage/v2 v2.89.3/go.mod h1:ooz1quxmoGZSukKR8Flb1WP2y7UFwYlvA1W8B5U1CvQ= +github.com/pulumi/pulumi-azure-native-sdk/v2 v2.89.3 h1:KrbJiukrKJQIBXWWLDS5a3nVHBSVcKtqctsM3YAJBkg= +github.com/pulumi/pulumi-azure-native-sdk/v2 v2.89.3/go.mod h1:9Nsv4Qg7OjKiCN96jkX+Iyei+QfumqmdBnVwAdWxOQc= +github.com/pulumi/pulumi-command/sdk v1.0.2 h1:OhyVaUlE+kvx0NXU+wzf7xTA5OL2bHWvKWAHvyhi1uo= +github.com/pulumi/pulumi-command/sdk v1.0.2/go.mod h1:BbcTHkvZtcJqyB7SeLDzP99dj8xyCbA8v/6WjixsuBA= +github.com/pulumi/pulumi-docker-build/sdk/go/dockerbuild v0.0.10 h1:4sJlcvJfh7YqKNNDfYsxA74nHPQSStNOI/jQna67no8= +github.com/pulumi/pulumi-docker-build/sdk/go/dockerbuild v0.0.10/go.mod h1:A6cGhzYPhsSSp724pYT+vXkUIcLSdpz38+feCs6KAFU= +github.com/pulumi/pulumi-docker/sdk/v4 v4.6.2 h1:YJffxtgdexLoI4RPFJpoAbjksoqvBN6VUQlom9HlRuc= +github.com/pulumi/pulumi-docker/sdk/v4 v4.6.2/go.mod h1:7fX6vITy/9ADh81dLC+0qZ2dVmPE8wXLsS1w499f5Yo= +github.com/pulumi/pulumi-random/sdk/v4 v4.18.0 h1:AwgJjmZJavVo30cz2/fAu16Wif2LdhzC7mOEcERtkJ0= +github.com/pulumi/pulumi-random/sdk/v4 v4.18.0/go.mod h1:rNVU3Gh8I19LFpjXudpB5ZebuFwO6L3tCmrqcDtcAGg= +github.com/pulumi/pulumi-tls/sdk/v5 v5.1.1 h1:uR3SAOKOdy7AigiF8tymLk8e0uqdU1NGziltKqG78Zw= +github.com/pulumi/pulumi-tls/sdk/v5 v5.1.1/go.mod h1:bzBYWyJkvnpJIDuXddM2ZeKapVRtUA7h+pvfbmymxPU= +github.com/pulumi/pulumi/sdk/v3 v3.157.0 h1:wqIN+JM/igzOC+XXdch0UKYr3V3k/hjpgt3sS3GzX84= +github.com/pulumi/pulumi/sdk/v3 v3.157.0/go.mod h1:YEbbl0N7eVsgfsL7h5215dDf8GBSe4AnRon7Ya/KIVc= github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E= github.com/redis/go-redis/v9 v9.7.0/go.mod h1:f6zhXITC7JUJIlPEiBOTXxJgPLdZcA93GewI7inzyWw= github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 h1:OkMGxebDjyw0ULyrTYWeN0UNCCkmCWfjPnIA2W6oviI= github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06/go.mod h1:+ePHsJ1keEjQtpvf9HHw0f4ZeJ0TLRsxhunSI2hYJSs= -github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= -github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= -github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= -github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/sagikazarmark/locafero v0.8.0 h1:mXaMVw7IqxNBxfv3LdWt9MDmcWDQ1fagDH918lOdVaQ= +github.com/sagikazarmark/locafero v0.8.0/go.mod h1:UBUyz37V+EdMS3hDF3QWIiVr/2dPrx49OMO0Bn0hJqk= github.com/sahilm/fuzzy v0.1.1 h1:ceu5RHF8DGgoi+/dR5PsECjCDH1BE3Fnmpo7aVXOdRA= github.com/sahilm/fuzzy v0.1.1/go.mod h1:VFvziUEIMCrT6A6tw2RFIXPXXmzXbOsSHF0DOI8ZK9Y= github.com/samber/lo v1.47.0 h1:z7RynLwP5nbyRscyvcD043DWYoOcYRv3mV8lBeqOCLc= @@ -328,20 +320,20 @@ github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/skeema/knownhosts v1.3.0 h1:AM+y0rI04VksttfwjkSTNQorvGqmwATnvnAHpSgc0LY= -github.com/skeema/knownhosts v1.3.0/go.mod h1:sPINvnADmT/qYH1kfv+ePMmOBTH6Tbl7b5LvTDjFK7M= +github.com/skeema/knownhosts v1.3.1 h1:X2osQ+RAjK76shCbvhHHHVl3ZlgDm8apHEHFqRjnBY8= +github.com/skeema/knownhosts v1.3.1/go.mod h1:r7KTdC8l4uxWRyK2TpQZ/1o5HaSzh06ePQNxPwTcfiY= github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= -github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= -github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= -github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= -github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA= +github.com/spf13/afero v1.14.0/go.mod h1:acJQ8t0ohCGuMN3O+Pv0V0hgMxNYDlvdk+VTfyZmbYo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= -github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= +github.com/spf13/viper v1.20.0 h1:zrxIyR3RQIOsarIrgL8+sAvALXul9jeEPa06Y0Ph6vY= +github.com/spf13/viper v1.20.0/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= @@ -367,8 +359,20 @@ github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavM github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/zclconf/go-cty v1.15.0 h1:tTCRWxsexYUmtt/wVxgDClUe+uQusuI443uL6e+5sXQ= -github.com/zclconf/go-cty v1.15.0/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/zclconf/go-cty v1.16.2 h1:LAJSwc3v81IRBZyUVQDUdZ7hs3SYs9jv0eZJDWHD/70= +github.com/zclconf/go-cty v1.16.2/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= +go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= +go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= +go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= +go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= +go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= +go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= +go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= +go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= +go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -379,14 +383,14 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= -golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= -golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= +golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 h1:nDVHiLt8aIbd/VzvPWN6kSOPE7+F/fNFDSXLVYkE/Iw= +golang.org/x/exp v0.0.0-20250305212735-054e65f0b394/go.mod h1:sIifuuw/Yco/y6yb6+bDNfyeQ/MdPUy/hKEMYQV17cM= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= -golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= +golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -403,7 +407,6 @@ golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -434,36 +437,33 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= -golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= +golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU= +golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= -google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= -google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= -google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250313205543-e70fdf4c4cb4 h1:iK2jbkWL86DXjEx0qiHcRE9dE4/Ahua5k6V8OWFb//c= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250313205543-e70fdf4c4cb4/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I= +google.golang.org/grpc v1.71.0 h1:kF77BGdPTQ4/JZWMlb9VpJ5pa25aqvVqogsxNHHdeBg= +google.golang.org/grpc v1.71.0/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec= +google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= +google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= -gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -lukechampine.com/frand v1.4.2 h1:RzFIpOvkMXuPMBb9maa4ND4wjBn71E1Jpf8BzJHMaVw= -lukechampine.com/frand v1.4.2/go.mod h1:4S/TM2ZgrKejMcKMbeLjISpJMO+/eZ1zu3vYX9dtj3s= +lukechampine.com/frand v1.5.1 h1:fg0eRtdmGFIxhP5zQJzM1lFDbD6CUfu/f+7WgAZd5/w= +lukechampine.com/frand v1.5.1/go.mod h1:4VstaWc2plN4Mjr10chUD46RAVGWhpkZ5Nja8+Azp0Q= pgregory.net/rapid v1.1.0 h1:CMa0sjHSru3puNx+J0MIAuiiEV4N0qj8/cMWGBBCsjw= pgregory.net/rapid v1.1.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= diff --git a/oci/Containerfile b/oci/Containerfile index 61865fc60..1ad05d195 100644 --- a/oci/Containerfile +++ b/oci/Containerfile @@ -1,5 +1,5 @@ # Moved to Fedora 41 and install go tools until ubi9 go tools support go 1.23 -FROM quay.io/fedora/fedora:41 as builder +FROM quay.io/fedora/fedora:43@sha256:cd46550fc71e9401db4ad6e2b992df3278f6874a16d0abf1050e332f376802a5 as builder RUN dnf install -y make go ARG TARGETARCH USER root @@ -7,7 +7,7 @@ WORKDIR /workspace COPY . . # renovate: datasource=github-releases depName=pulumi/pulumi -ENV PULUMI_VERSION 3.154.0 +ENV PULUMI_VERSION 3.157.0 ENV PULUMI_BASE_URL="https://github.com/pulumi/pulumi/releases/download/v${PULUMI_VERSION}/pulumi-v${PULUMI_VERSION}" ENV PULUMI_URL="${PULUMI_BASE_URL}-linux-x64.tar.gz" @@ -33,19 +33,19 @@ ENV AWS_SDK_LOAD_CONFIG=1 \ # Pulumi plugins # renovate: datasource=github-releases depName=pulumi/pulumi-aws -ARG PULUMI_AWS_VERSION=v6.69.0 +ARG PULUMI_AWS_VERSION=v6.72.0 # renovate: datasource=github-releases depName=pulumi/pulumi-awsx ARG PULUMI_AWSX_VERSION=v2.21.1 # renovate: datasource=github-releases depName=pulumi/pulumi-azure-native -ARG PULUMI_AZURE_NATIVE_VERSION=v2.88.0 +ARG PULUMI_AZURE_NATIVE_VERSION=v2.89.3 # renovate: datasource=github-releases depName=pulumi/pulumi-command ARG PULUMI_COMMAND_VERSION=v1.0.1 # renovate: datasource=github-releases depName=pulumi/pulumi-tls -ARG PULUMI_TLS_VERSION=v5.0.9 +ARG PULUMI_TLS_VERSION=v5.1.1 # renovate: datasource=github-releases depName=pulumi/pulumi-random -ARG PULUMI_RANDOM_VERSION=v4.16.7 +ARG PULUMI_RANDOM_VERSION=v4.18.0 # renovate: datasource=github-releases depName=pulumi/pulumi-aws-native -ARG PULUMI_AWS_NATIVE_VERSION=v1.25.0 +ARG PULUMI_AWS_NATIVE_VERSION=v1.26.0 ENV PULUMI_HOME "/opt/mapt/run" WORKDIR ${PULUMI_HOME} diff --git a/tools/go.mod b/tools/go.mod index f8451efd5..354166f51 100644 --- a/tools/go.mod +++ b/tools/go.mod @@ -7,16 +7,16 @@ toolchain go1.23.7 require github.com/golangci/golangci-lint v1.64.8 require ( - 4d63.com/gocheckcompilerdirectives v1.2.1 // indirect + 4d63.com/gocheckcompilerdirectives v1.3.0 // indirect 4d63.com/gochecknoglobals v0.2.2 // indirect - github.com/4meepo/tagalign v1.4.1 // indirect + github.com/4meepo/tagalign v1.4.2 // indirect github.com/Abirdcfly/dupword v0.1.3 // indirect - github.com/Antonboom/errname v1.0.0 // indirect - github.com/Antonboom/nilnil v1.0.1 // indirect - github.com/Antonboom/testifylint v1.5.2 // indirect - github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c // indirect + github.com/Antonboom/errname v1.1.0 // indirect + github.com/Antonboom/nilnil v1.1.0 // indirect + github.com/Antonboom/testifylint v1.6.0 // indirect + github.com/BurntSushi/toml v1.5.0 // indirect github.com/Crocmagnon/fatcontext v0.7.1 // indirect - github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect + github.com/Djarvur/go-err113 v0.1.0 // indirect github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.1 // indirect github.com/Masterminds/semver/v3 v3.3.1 // indirect github.com/OpenPeeDeeP/depguard/v2 v2.2.1 // indirect @@ -30,12 +30,12 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/bkielbasa/cyclop v1.2.3 // indirect github.com/blizzy78/varnamelen v0.8.0 // indirect - github.com/bombsimon/wsl/v4 v4.5.0 // indirect + github.com/bombsimon/wsl/v4 v4.6.0 // indirect github.com/breml/bidichk v0.3.3 // indirect github.com/breml/errchkjson v0.4.1 // indirect github.com/butuzov/ireturn v0.3.1 // indirect github.com/butuzov/mirror v1.3.0 // indirect - github.com/catenacyber/perfsprint v0.8.1 // indirect + github.com/catenacyber/perfsprint v0.9.1 // indirect github.com/ccojocar/zxcvbn-go v1.0.2 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/charithe/durationcheck v0.0.10 // indirect @@ -49,10 +49,10 @@ require ( github.com/fatih/color v1.18.0 // indirect github.com/fatih/structtag v1.2.0 // indirect github.com/firefart/nonamedreturns v1.0.5 // indirect - github.com/fsnotify/fsnotify v1.5.4 // indirect + github.com/fsnotify/fsnotify v1.8.0 // indirect github.com/fzipp/gocyclo v0.6.0 // indirect github.com/ghostiam/protogetter v0.3.12 // indirect - github.com/go-critic/go-critic v0.12.0 // indirect + github.com/go-critic/go-critic v0.13.0 // indirect github.com/go-toolsmith/astcast v1.1.0 // indirect github.com/go-toolsmith/astcopy v1.1.0 // indirect github.com/go-toolsmith/astequal v1.2.0 // indirect @@ -64,7 +64,6 @@ require ( github.com/go-xmlfmt/xmlfmt v1.1.3 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/gofrs/flock v0.12.1 // indirect - github.com/golang/protobuf v1.5.4 // indirect github.com/golangci/dupl v0.0.0-20250308024227-f665c8d69b32 // indirect github.com/golangci/go-printf-func-name v0.1.0 // indirect github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d // indirect @@ -72,16 +71,15 @@ require ( github.com/golangci/plugin-module-register v0.1.1 // indirect github.com/golangci/revgrep v0.8.0 // indirect github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed // indirect - github.com/google/go-cmp v0.6.0 // indirect + github.com/google/go-cmp v0.7.0 // indirect github.com/gordonklaus/ineffassign v0.1.0 // indirect github.com/gostaticanalysis/analysisutil v0.7.1 // indirect - github.com/gostaticanalysis/comment v1.4.2 // indirect + github.com/gostaticanalysis/comment v1.5.0 // indirect github.com/gostaticanalysis/forcetypeassert v0.2.0 // indirect github.com/gostaticanalysis/nilerr v0.1.1 // indirect github.com/hashicorp/go-immutable-radix/v2 v2.1.0 // indirect github.com/hashicorp/go-version v1.7.0 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect - github.com/hashicorp/hcl v1.0.0 // indirect github.com/hexops/gotextdiff v1.0.3 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jgautheron/goconst v1.7.1 // indirect @@ -89,53 +87,51 @@ require ( github.com/jjti/go-spancheck v0.6.4 // indirect github.com/julz/importas v0.2.0 // indirect github.com/karamaru-alpha/copyloopvar v1.2.1 // indirect - github.com/kisielk/errcheck v1.8.0 // indirect - github.com/kkHAIKE/contextcheck v1.1.5 // indirect + github.com/kisielk/errcheck v1.9.0 // indirect + github.com/kkHAIKE/contextcheck v1.1.6 // indirect github.com/kulti/thelper v0.6.3 // indirect github.com/kunwardeep/paralleltest v1.0.10 // indirect github.com/lasiar/canonicalheader v1.1.2 // indirect - github.com/ldez/exptostd v0.4.1 // indirect + github.com/ldez/exptostd v0.4.2 // indirect github.com/ldez/gomoddirectives v0.6.1 // indirect github.com/ldez/grignotin v0.9.0 // indirect github.com/ldez/tagliatelle v0.7.1 // indirect github.com/ldez/usetesting v0.4.2 // indirect github.com/leonklingele/grouper v1.1.2 // indirect - github.com/macabu/inamedparam v0.1.3 // indirect - github.com/magiconair/properties v1.8.7 // indirect + github.com/macabu/inamedparam v0.2.0 // indirect github.com/maratori/testableexamples v1.0.0 // indirect github.com/maratori/testpackage v1.1.1 // indirect github.com/matoous/godox v1.1.0 // indirect github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.16 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect - github.com/mgechev/revive v1.6.1 // indirect + github.com/mgechev/revive v1.7.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/moricho/tparallel v0.3.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/nakabonne/nestif v0.3.1 // indirect github.com/nishanths/exhaustive v0.12.0 // indirect github.com/nishanths/predeclared v0.2.2 // indirect - github.com/nunnatsa/ginkgolinter v0.19.0 // indirect + github.com/nunnatsa/ginkgolinter v0.19.1 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect - github.com/pelletier/go-toml v1.9.5 // indirect github.com/pelletier/go-toml/v2 v2.2.3 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/polyfloyd/go-errorlint v1.7.1 // indirect - github.com/prometheus/client_golang v1.12.1 // indirect - github.com/prometheus/client_model v0.2.0 // indirect - github.com/prometheus/common v0.32.1 // indirect - github.com/prometheus/procfs v0.7.3 // indirect - github.com/quasilyte/go-ruleguard v0.4.3-0.20240823090925-0fe6f58b47b1 // indirect + github.com/prometheus/client_golang v1.21.1 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.63.0 // indirect + github.com/prometheus/procfs v0.16.0 // indirect + github.com/quasilyte/go-ruleguard v0.4.4 // indirect github.com/quasilyte/go-ruleguard/dsl v0.3.22 // indirect github.com/quasilyte/gogrep v0.5.0 // indirect github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 // indirect github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect github.com/raeperd/recvcheck v0.2.0 // indirect github.com/rivo/uniseg v0.4.7 // indirect - github.com/rogpeppe/go-internal v1.13.1 // indirect - github.com/ryancurrah/gomodguard v1.3.5 // indirect + github.com/rogpeppe/go-internal v1.14.1 // indirect + github.com/ryancurrah/gomodguard v1.4.1 // indirect github.com/ryanrolds/sqlclosecheck v0.5.1 // indirect + github.com/sagikazarmark/locafero v0.7.0 // indirect github.com/sanposhiho/wastedassign/v2 v2.1.0 // indirect github.com/santhosh-tekuri/jsonschema/v6 v6.0.1 // indirect github.com/sashamelentyev/interfacebloat v1.1.0 // indirect @@ -145,21 +141,21 @@ require ( github.com/sivchari/containedctx v1.0.3 // indirect github.com/sivchari/tenv v1.12.1 // indirect github.com/sonatard/noctx v0.1.0 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect github.com/sourcegraph/go-diff v0.7.0 // indirect - github.com/spf13/afero v1.12.0 // indirect - github.com/spf13/cast v1.5.0 // indirect - github.com/spf13/cobra v1.8.1 // indirect - github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/spf13/afero v1.14.0 // indirect + github.com/spf13/cast v1.7.1 // indirect + github.com/spf13/cobra v1.9.1 // indirect github.com/spf13/pflag v1.0.6 // indirect - github.com/spf13/viper v1.12.0 // indirect + github.com/spf13/viper v1.20.0 // indirect github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect github.com/stbenjam/no-sprintf-host-port v0.2.0 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/stretchr/testify v1.10.0 // indirect - github.com/subosito/gotenv v1.4.1 // indirect - github.com/tdakkota/asciicheck v0.4.0 // indirect - github.com/tetafro/godot v1.4.20 // indirect - github.com/timakin/bodyclose v0.0.0-20241017074812-ed6a65f985e3 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/tdakkota/asciicheck v0.4.1 // indirect + github.com/tetafro/godot v1.5.0 // indirect + github.com/timakin/bodyclose v0.0.0-20241222091800-1db5c5ca4d67 // indirect github.com/timonwong/loggercheck v0.10.1 // indirect github.com/tomarrell/wrapcheck/v2 v2.10.0 // indirect github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect @@ -167,28 +163,26 @@ require ( github.com/ultraware/whitespace v0.2.0 // indirect github.com/uudashr/gocognit v1.2.0 // indirect github.com/uudashr/iface v1.3.1 // indirect - github.com/xen0n/gosmopolitan v1.2.2 // indirect + github.com/xen0n/gosmopolitan v1.3.0 // indirect github.com/yagipy/maintidx v1.0.0 // indirect github.com/yeya24/promlinter v0.3.0 // indirect github.com/ykadowak/zerologlint v0.1.5 // indirect gitlab.com/bosi/decorder v0.4.2 // indirect go-simpler.org/musttag v0.13.0 // indirect go-simpler.org/sloglint v0.9.0 // indirect - go.uber.org/atomic v1.7.0 // indirect go.uber.org/automaxprocs v1.6.0 // indirect - go.uber.org/multierr v1.6.0 // indirect - go.uber.org/zap v1.24.0 // indirect - golang.org/x/exp/typeparams v0.0.0-20250210185358-939b2ce775ac // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.0 // indirect + golang.org/x/exp/typeparams v0.0.0-20250305212735-054e65f0b394 // indirect golang.org/x/mod v0.24.0 // indirect golang.org/x/sync v0.12.0 // indirect golang.org/x/sys v0.31.0 // indirect - golang.org/x/text v0.22.0 // indirect + golang.org/x/text v0.23.0 // indirect golang.org/x/tools v0.31.0 // indirect google.golang.org/protobuf v1.36.5 // indirect - gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect honnef.co/go/tools v0.6.1 // indirect mvdan.cc/gofumpt v0.7.0 // indirect - mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f // indirect + mvdan.cc/unparam v0.0.0-20250301125049-0df0534333a4 // indirect ) diff --git a/tools/go.sum b/tools/go.sum index 582ab5add..74a92b150 100644 --- a/tools/go.sum +++ b/tools/go.sum @@ -1,58 +1,23 @@ -4d63.com/gocheckcompilerdirectives v1.2.1 h1:AHcMYuw56NPjq/2y615IGg2kYkBdTvOaojYCBcRE7MA= -4d63.com/gocheckcompilerdirectives v1.2.1/go.mod h1:yjDJSxmDTtIHHCqX0ufRYZDL6vQtMG7tJdKVeWwsqvs= +4d63.com/gocheckcompilerdirectives v1.3.0 h1:Ew5y5CtcAAQeTVKUVFrE7EwHMrTO6BggtEj8BZSjZ3A= +4d63.com/gocheckcompilerdirectives v1.3.0/go.mod h1:ofsJ4zx2QAuIP/NO/NAh1ig6R1Fb18/GI7RVMwz7kAY= 4d63.com/gochecknoglobals v0.2.2 h1:H1vdnwnMaZdQW/N+NrkT1SZMTBmcwHe9Vq8lJcYYTtU= 4d63.com/gochecknoglobals v0.2.2/go.mod h1:lLxwTQjL5eIesRbvnzIP3jZtG140FnTdz+AlMa+ogt0= -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/4meepo/tagalign v1.4.1 h1:GYTu2FaPGOGb/xJalcqHeD4il5BiCywyEYZOA55P6J4= -github.com/4meepo/tagalign v1.4.1/go.mod h1:2H9Yu6sZ67hmuraFgfZkNcg5Py9Ch/Om9l2K/2W1qS4= +github.com/4meepo/tagalign v1.4.2 h1:0hcLHPGMjDyM1gHG58cS73aQF8J4TdVR96TZViorO9E= +github.com/4meepo/tagalign v1.4.2/go.mod h1:+p4aMyFM+ra7nb41CnFG6aSDXqRxU/w1VQqScKqDARI= github.com/Abirdcfly/dupword v0.1.3 h1:9Pa1NuAsZvpFPi9Pqkd93I7LIYRURj+A//dFd5tgBeE= github.com/Abirdcfly/dupword v0.1.3/go.mod h1:8VbB2t7e10KRNdwTVoxdBaxla6avbhGzb8sCTygUMhw= -github.com/Antonboom/errname v1.0.0 h1:oJOOWR07vS1kRusl6YRSlat7HFnb3mSfMl6sDMRoTBA= -github.com/Antonboom/errname v1.0.0/go.mod h1:gMOBFzK/vrTiXN9Oh+HFs+e6Ndl0eTFbtsRTSRdXyGI= -github.com/Antonboom/nilnil v1.0.1 h1:C3Tkm0KUxgfO4Duk3PM+ztPncTFlOf0b2qadmS0s4xs= -github.com/Antonboom/nilnil v1.0.1/go.mod h1:CH7pW2JsRNFgEh8B2UaPZTEPhCMuFowP/e8Udp9Nnb0= -github.com/Antonboom/testifylint v1.5.2 h1:4s3Xhuv5AvdIgbd8wOOEeo0uZG7PbDKQyKY5lGoQazk= -github.com/Antonboom/testifylint v1.5.2/go.mod h1:vxy8VJ0bc6NavlYqjZfmp6EfqXMtBgQ4+mhCojwC1P8= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c h1:pxW6RcqyfI9/kWtOwnv/G+AzdKuy2ZrqINhenH4HyNs= -github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Antonboom/errname v1.1.0 h1:A+ucvdpMwlo/myWrkHEUEBWc/xuXdud23S8tmTb/oAE= +github.com/Antonboom/errname v1.1.0/go.mod h1:O1NMrzgUcVBGIfi3xlVuvX8Q/VP/73sseCaAppfjqZw= +github.com/Antonboom/nilnil v1.1.0 h1:jGxJxjgYS3VUUtOTNk8Z1icwT5ESpLH/426fjmQG+ng= +github.com/Antonboom/nilnil v1.1.0/go.mod h1:b7sAlogQjFa1wV8jUW3o4PMzDVFLbTux+xnQdvzdcIE= +github.com/Antonboom/testifylint v1.6.0 h1:6rdILVPt4+rqcvhid8w9wJNynKLUgqHNpFyM67UeXyc= +github.com/Antonboom/testifylint v1.6.0/go.mod h1:k+nEkathI2NFjKO6HvwmSrbzUcQ6FAnbZV+ZRrnXPLI= +github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= +github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/Crocmagnon/fatcontext v0.7.1 h1:SC/VIbRRZQeQWj/TcQBS6JmrXcfA+BU4OGSVUt54PjM= github.com/Crocmagnon/fatcontext v0.7.1/go.mod h1:1wMvv3NXEBJucFGfwOJBxSVWcoIO6emV215SMkW9MFU= -github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM= -github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= +github.com/Djarvur/go-err113 v0.1.0 h1:uCRZZOdMQ0TZPHYTdYpoC0bLYJKPEHPUJ8MeAa51lNU= +github.com/Djarvur/go-err113 v0.1.0/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.1 h1:Sz1JIXEcSfhz7fUi7xHnhpIE0thVASYjvosApmHuD2k= github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.1/go.mod h1:n/LSCXNuIYqVfBlVXyHfMQkZDdp1/mmxfSjADd3z1Zg= github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4= @@ -65,11 +30,6 @@ github.com/alecthomas/go-check-sumtype v0.3.1 h1:u9aUvbGINJxLVXiFvHUlPEaD7VDULsr github.com/alecthomas/go-check-sumtype v0.3.1/go.mod h1:A8TSiN3UPRw3laIgWEUOHHLPa6/r9MtoigdlP5h3K/E= github.com/alecthomas/repr v0.4.0 h1:GhI2A8MACjfegCPVq9f1FLvIBS+DrQ2KQBFZP1iFzXc= github.com/alecthomas/repr v0.4.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alexkohler/nakedret/v2 v2.0.5 h1:fP5qLgtwbx9EJE8dGEERT02YwS8En4r9nnZ71RK+EVU= github.com/alexkohler/nakedret/v2 v2.0.5/go.mod h1:bF5i0zF2Wo2o4X4USt9ntUWve6JbFv02Ff4vlkmS/VU= github.com/alexkohler/prealloc v1.0.0 h1:Hbq0/3fJPQhNkN0dR95AVrr6R7tou91y0uHG5pOcUuw= @@ -82,18 +42,14 @@ github.com/ashanbrown/forbidigo v1.6.0 h1:D3aewfM37Yb3pxHujIPSpTf6oQk9sc9WZi8ger github.com/ashanbrown/forbidigo v1.6.0/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1MUyfOKL+AjcU= github.com/ashanbrown/makezero v1.2.0 h1:/2Lp1bypdmK9wDIq7uWBlDF1iMUpIIS4A+pF6C9IEUU= github.com/ashanbrown/makezero v1.2.0/go.mod h1:dxlPhHbDMC6N6xICzFBSK+4njQDdK8euNO0qjQMtGY4= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bkielbasa/cyclop v1.2.3 h1:faIVMIGDIANuGPWH031CZJTi2ymOQBULs9H21HSMa5w= github.com/bkielbasa/cyclop v1.2.3/go.mod h1:kHTwA9Q0uZqOADdupvcFJQtp/ksSnytRMe8ztxG8Fuo= github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M= github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k= -github.com/bombsimon/wsl/v4 v4.5.0 h1:iZRsEvDdyhd2La0FVi5k6tYehpOR/R7qIUjmKk7N74A= -github.com/bombsimon/wsl/v4 v4.5.0/go.mod h1:NOQ3aLF4nD7N5YPXMruR6ZXDOAqLoM0GEpLwTdvmOSc= +github.com/bombsimon/wsl/v4 v4.6.0 h1:ew2R/N42su553DKTYqt3HSxaQN+uHQPv4xZ2MBmwaW4= +github.com/bombsimon/wsl/v4 v4.6.0/go.mod h1:uV/+6BkffuzSAVYD+yGyld1AChO7/EuLrCF/8xTiapg= github.com/breml/bidichk v0.3.3 h1:WSM67ztRusf1sMoqH6/c4OBCUlRVTKq+CbSeo0R17sE= github.com/breml/bidichk v0.3.3/go.mod h1:ISbsut8OnjB367j5NseXEGGgO/th206dVa427kR8YTE= github.com/breml/errchkjson v0.4.1 h1:keFSS8D7A2T0haP9kzZTi7o26r7kE3vymjZNeNDRDwg= @@ -102,27 +58,19 @@ github.com/butuzov/ireturn v0.3.1 h1:mFgbEI6m+9W8oP/oDdfA34dLisRFCj2G6o/yiI1yZrY github.com/butuzov/ireturn v0.3.1/go.mod h1:ZfRp+E7eJLC0NQmk1Nrm1LOrn/gQlOykv+cVPdiXH5M= github.com/butuzov/mirror v1.3.0 h1:HdWCXzmwlQHdVhwvsfBb2Au0r3HyINry3bDWLYXiKoc= github.com/butuzov/mirror v1.3.0/go.mod h1:AEij0Z8YMALaq4yQj9CPPVYOyJQyiexpQEQgihajRfI= -github.com/catenacyber/perfsprint v0.8.1 h1:bGOHuzHe0IkoGeY831RW4aSlt1lPRd3WRAScSWOaV7E= -github.com/catenacyber/perfsprint v0.8.1/go.mod h1:/wclWYompEyjUD2FuIIDVKNkqz7IgBIWXIH3V0Zol50= +github.com/catenacyber/perfsprint v0.9.1 h1:5LlTp4RwTooQjJCvGEFV6XksZvWE7wCOUvjD2z0vls0= +github.com/catenacyber/perfsprint v0.9.1/go.mod h1:q//VWC2fWbcdSLEY1R3l8n0zQCDPdE4IjZwyY1HMunM= github.com/ccojocar/zxcvbn-go v1.0.2 h1:na/czXU8RrhXO4EZme6eQJLR4PzcGsahsBOAwU6I3Vg= github.com/ccojocar/zxcvbn-go v1.0.2/go.mod h1:g1qkXtUSvHP8lhHp5GrSmTz6uWALGRMQdw6Qnz/hi60= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/charithe/durationcheck v0.0.10 h1:wgw73BiocdBDQPik+zcEoBG/ob8uyBHf2iyoHGPf5w4= github.com/charithe/durationcheck v0.0.10/go.mod h1:bCWXb7gYRysD1CU3C+u4ceO49LoGOY1C1L6uouGNreQ= github.com/chavacava/garif v0.1.0 h1:2JHa3hbYf5D9dsgseMKAmc/MZ109otzgNFk5s87H9Pc= github.com/chavacava/garif v0.1.0/go.mod h1:XMyYCkEL58DF0oyW4qDjjnPWONs2HBqYKI+UIPD+Gww= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/ckaznocha/intrange v0.3.1 h1:j1onQyXvHUsPWujDH6WIjhyH26gkRt/txNlV7LspvJs= github.com/ckaznocha/intrange v0.3.1/go.mod h1:QVepyz1AkUoFQkpEqksSYpNpUo3c5W7nWh/s6SHIJJk= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/curioswitch/go-reassign v0.3.0 h1:dh3kpQHuADL3cobV/sSGETA8DOv457dwl+fbBAhrQPs= github.com/curioswitch/go-reassign v0.3.0/go.mod h1:nApPCCTtqLJN/s8HfItCcKV0jIPwluBOvZP+dsJGA88= github.com/daixiang0/gci v0.13.5 h1:kThgmH1yBmZSBCh1EJVxQ7JsHpm5Oms0AMed/0LaH4c= @@ -134,10 +82,6 @@ github.com/denis-tingaikin/go-header v0.5.0 h1:SRdnP5ZKvcO9KKRP1KJrhFR3RrlGuD+42 github.com/denis-tingaikin/go-header v0.5.0/go.mod h1:mMenU5bWrok6Wl2UsZjy+1okegmwQ3UgWl4V1D8gjlY= github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI= github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/ettle/strcase v0.2.0 h1:fGNiVF21fHXpX1niBgk0aROov1LagYsOwV/xqKDKR/Q= github.com/ettle/strcase v0.2.0/go.mod h1:DajmHElDSaX76ITe3/VHVyMin4LWSJN5Z909Wp+ED1A= github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= @@ -146,30 +90,20 @@ github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4 github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/firefart/nonamedreturns v1.0.5 h1:tM+Me2ZaXs8tfdDw3X6DOX++wMCOqzYUho6tUTYIdRA= github.com/firefart/nonamedreturns v1.0.5/go.mod h1:gHJjDqhGM4WyPt639SOZs+G89Ko7QKH5R5BhnO6xJhw= -github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= -github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= -github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= -github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo= github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= github.com/ghostiam/protogetter v0.3.12 h1:xTPjH97iKph27vXRRKV0OCke5sAMoHPbVeVstdzmCLE= github.com/ghostiam/protogetter v0.3.12/go.mod h1:WZ0nw9pfzsgxuRsPOFQomgDVSWtDLJRfQJEhsGbmQMA= -github.com/go-critic/go-critic v0.12.0 h1:iLosHZuye812wnkEz1Xu3aBwn5ocCPfc9yqmFG9pa6w= -github.com/go-critic/go-critic v0.12.0/go.mod h1:DpE0P6OVc6JzVYzmM5gq5jMU31zLr4am5mB/VfFK64w= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-critic/go-critic v0.13.0 h1:kJzM7wzltQasSUXtYyTl6UaPVySO6GkaR1thFnJ6afY= +github.com/go-critic/go-critic v0.13.0/go.mod h1:M/YeuJ3vOCQDnP2SU+ZhjgRzwzcBW87JqLpMJLrZDLI= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI= github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/go-toolsmith/astcast v1.1.0 h1:+JN9xZV1A+Re+95pgnMgDboWNVnIMMQXwfBwLRPgSC8= @@ -199,36 +133,6 @@ github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E= github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golangci/dupl v0.0.0-20250308024227-f665c8d69b32 h1:WUvBfQL6EW/40l6OmeSBYQJNSif4O11+bmWEz+C7FYw= github.com/golangci/dupl v0.0.0-20250308024227-f665c8d69b32/go.mod h1:NUw9Zr2Sy7+HxzdjIULge71wI6yEg1lWQr7Evcu8K0E= github.com/golangci/go-printf-func-name v0.1.0 h1:dVokQP+NMTO7jwO4bwsRwLWeudOVUPPyAKJuzv8pEJU= @@ -245,44 +149,23 @@ github.com/golangci/revgrep v0.8.0 h1:EZBctwbVd0aMeRnNUsFogoyayvKHyxlV3CdUA46FX2 github.com/golangci/revgrep v0.8.0/go.mod h1:U4R/s9dlXZsg8uJmaR1GrloUr14D7qDl8gi2iPXJH8k= github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed h1:IURFTjxeTfNFP0hTEi1YKjB/ub8zkpaOqFFMApi2EAs= github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed/go.mod h1:XLXN8bNw4CGRPaqgl3bv/lhz7bsGPh4/xSaMTbo2vkQ= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gordonklaus/ineffassign v0.1.0 h1:y2Gd/9I7MdY1oEIt+n+rowjBNDcLQq3RsH5hwJd0f9s= github.com/gordonklaus/ineffassign v0.1.0/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0= github.com/gostaticanalysis/analysisutil v0.7.1 h1:ZMCjoue3DtDWQ5WyU16YbjbQEQ3VuzwxALrpYd+HeKk= github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc= github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado= -github.com/gostaticanalysis/comment v1.4.2 h1:hlnx5+S2fY9Zo9ePo4AhgYsYHbM2+eAv8m/s1JiCd6Q= github.com/gostaticanalysis/comment v1.4.2/go.mod h1:KLUTGDv6HOCotCH8h2erHKmpci2ZoR8VPu34YA2uzdM= +github.com/gostaticanalysis/comment v1.5.0 h1:X82FLl+TswsUMpMh17srGRuKaaXprTaytmEpgnKIDu8= +github.com/gostaticanalysis/comment v1.5.0/go.mod h1:V6eb3gpCv9GNVqb6amXzEUX3jXLVK/AdA+IrAMSqvEc= github.com/gostaticanalysis/forcetypeassert v0.2.0 h1:uSnWrrUEYDr86OCxWa4/Tp2jeYDlogZiZHzGkWFefTk= github.com/gostaticanalysis/forcetypeassert v0.2.0/go.mod h1:M5iPavzE9pPqWyeiVXSFghQjljW1+l/Uke3PXHS6ILY= github.com/gostaticanalysis/nilerr v0.1.1 h1:ThE+hJP0fEp4zWLkWHWcRyI2Od0p7DlgYG3Uqrmrcpk= @@ -297,15 +180,10 @@ github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jgautheron/goconst v1.7.1 h1:VpdAG7Ca7yvvJk5n8dMwQhfEZJh95kl/Hl9S1OI5Jkk= @@ -314,32 +192,18 @@ github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjz github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= github.com/jjti/go-spancheck v0.6.4 h1:Tl7gQpYf4/TMU7AT84MN83/6PutY21Nb9fuQjFTpRRc= github.com/jjti/go-spancheck v0.6.4/go.mod h1:yAEYdKJ2lRkDA8g7X+oKUHXOWVAXSBJRv04OhF+QUjk= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/julz/importas v0.2.0 h1:y+MJN/UdL63QbFJHws9BVC5RpA2iq0kpjrFajTGivjQ= github.com/julz/importas v0.2.0/go.mod h1:pThlt589EnCYtMnmhmRYY/qn9lCf/frPOK+WMx3xiJY= github.com/karamaru-alpha/copyloopvar v1.2.1 h1:wmZaZYIjnJ0b5UoKDjUHrikcV0zuPyyxI4SVplLd2CI= github.com/karamaru-alpha/copyloopvar v1.2.1/go.mod h1:nFmMlFNlClC2BPvNaHMdkirmTJxVCY0lhxBtlfOypMM= -github.com/kisielk/errcheck v1.8.0 h1:ZX/URYa7ilESY19ik/vBmCn6zdGQLxACwjAcWbHlYlg= -github.com/kisielk/errcheck v1.8.0/go.mod h1:1kLL+jV4e+CFfueBmI1dSK2ADDyQnlrnrY/FqKluHJQ= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kkHAIKE/contextcheck v1.1.5 h1:CdnJh63tcDe53vG+RebdpdXJTc9atMgGqdx8LXxiilg= -github.com/kkHAIKE/contextcheck v1.1.5/go.mod h1:O930cpht4xb1YQpK+1+AgoM3mFsvxr7uyFptcnWTYUA= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kisielk/errcheck v1.9.0 h1:9xt1zI9EBfcYBvdU1nVrzMzzUPUtPKs9bVSIM3TAb3M= +github.com/kisielk/errcheck v1.9.0/go.mod h1:kQxWMMVZgIkDq7U8xtG/n2juOjbLgZtedi0D+/VL/i8= +github.com/kkHAIKE/contextcheck v1.1.6 h1:7HIyRcnyzxL9Lz06NGhiKvenXq7Zw6Q0UQu/ttjfJCE= +github.com/kkHAIKE/contextcheck v1.1.6/go.mod h1:3dDbMRNBFaq8HFXWC1JyvDSPm43CmE6IuHam8Wr0rkg= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kulti/thelper v0.6.3 h1:ElhKf+AlItIu+xGnI990no4cE2+XaSu1ULymV2Yulxs= @@ -348,8 +212,8 @@ github.com/kunwardeep/paralleltest v1.0.10 h1:wrodoaKYzS2mdNVnc4/w31YaXFtsc21PCT github.com/kunwardeep/paralleltest v1.0.10/go.mod h1:2C7s65hONVqY7Q5Efj5aLzRCNLjw2h4eMc9EcypGjcY= github.com/lasiar/canonicalheader v1.1.2 h1:vZ5uqwvDbyJCnMhmFYimgMZnJMjwljN5VGY0VKbMXb4= github.com/lasiar/canonicalheader v1.1.2/go.mod h1:qJCeLFS0G/QlLQ506T+Fk/fWMa2VmBUiEI2cuMK4djI= -github.com/ldez/exptostd v0.4.1 h1:DIollgQ3LWZMp3HJbSXsdE2giJxMfjyHj3eX4oiD6JU= -github.com/ldez/exptostd v0.4.1/go.mod h1:iZBRYaUmcW5jwCR3KROEZ1KivQQp6PHXbDPk9hqJKCQ= +github.com/ldez/exptostd v0.4.2 h1:l5pOzHBz8mFOlbcifTxzfyYbgEmoUqjxLFHZkjlbHXs= +github.com/ldez/exptostd v0.4.2/go.mod h1:iZBRYaUmcW5jwCR3KROEZ1KivQQp6PHXbDPk9hqJKCQ= github.com/ldez/gomoddirectives v0.6.1 h1:Z+PxGAY+217f/bSGjNZr/b2KTXcyYLgiWI6geMBN2Qc= github.com/ldez/gomoddirectives v0.6.1/go.mod h1:cVBiu3AHR9V31em9u2kwfMKD43ayN5/XDgr+cdaFaKs= github.com/ldez/grignotin v0.9.0 h1:MgOEmjZIVNn6p5wPaGp/0OKWyvq42KnzAt/DAb8O4Ow= @@ -360,10 +224,8 @@ github.com/ldez/usetesting v0.4.2 h1:J2WwbrFGk3wx4cZwSMiCQQ00kjGR0+tuuyW0Lqm4lwA github.com/ldez/usetesting v0.4.2/go.mod h1:eEs46T3PpQ+9RgN9VjpY6qWdiw2/QmfiDeWmdZdrjIQ= github.com/leonklingele/grouper v1.1.2 h1:o1ARBDLOmmasUaNDesWqWCIFH3u7hoFlM84YrjT3mIY= github.com/leonklingele/grouper v1.1.2/go.mod h1:6D0M/HVkhs2yRKRFZUoGjeDy7EZTfFBE9gl4kjmIGkA= -github.com/macabu/inamedparam v0.1.3 h1:2tk/phHkMlEL/1GNe/Yf6kkR/hkcUdAEY3L0hjYV1Mk= -github.com/macabu/inamedparam v0.1.3/go.mod h1:93FLICAIk/quk7eaPPQvbzihUdn/QkGDwIZEoLtpH6I= -github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= -github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/macabu/inamedparam v0.2.0 h1:VyPYpOc10nkhI2qeNUdh3Zket4fcZjEWe35poddBCpE= +github.com/macabu/inamedparam v0.2.0/go.mod h1:+Pee9/YfGe5LJ62pYXqB89lJ+0k5bsR8Wgz/C0Zlq3U= github.com/maratori/testableexamples v1.0.0 h1:dU5alXRrD8WKSjOUnmJZuzdxWOEQ57+7s93SLMxb2vI= github.com/maratori/testableexamples v1.0.0/go.mod h1:4rhjL1n20TUTT4vdh3RDqSizKLyXp7K2u6HgraZCGzE= github.com/maratori/testpackage v1.1.1 h1:S58XVV5AD7HADMmD0fNnziNHqKvSdDuEKdPD1rNTU04= @@ -379,31 +241,22 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mgechev/revive v1.6.1 h1:ncK0ZCMWtb8GXwVAmk+IeWF2ULIDsvRxSRfg5sTwQ2w= -github.com/mgechev/revive v1.6.1/go.mod h1:/2tfHWVO8UQi/hqJsIYNEKELi+DJy/e+PQpLgTB1v88= +github.com/mgechev/revive v1.7.0 h1:JyeQ4yO5K8aZhIKf5rec56u0376h8AlKNQEmjfkjKlY= +github.com/mgechev/revive v1.7.0/go.mod h1:qZnwcNhoguE58dfi96IJeSTPeZQejNeoMQLUZGi4SW4= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/moricho/tparallel v0.3.2 h1:odr8aZVFA3NZrNybggMkYO3rgPRcqjeQUlBBFVxKHTI= github.com/moricho/tparallel v0.3.2/go.mod h1:OQ+K3b4Ln3l2TZveGCywybl68glfLEwFGqvnjok8b+U= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/nakabonne/nestif v0.3.1 h1:wm28nZjhQY5HyYPx+weN3Q65k6ilSBxDb8v5S81B81U= github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE= github.com/nishanths/exhaustive v0.12.0 h1:vIY9sALmw6T/yxiASewa4TQcFsVYZQQRUQJhKRf3Swg= github.com/nishanths/exhaustive v0.12.0/go.mod h1:mEZ95wPIZW+x8kC4TgC+9YCUgiST7ecevsVDTgc2obs= github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk= github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c= -github.com/nunnatsa/ginkgolinter v0.19.0 h1:CnHRFAeBS3LdLI9h+Jidbcc5KH71GKOmaBZQk8Srnto= -github.com/nunnatsa/ginkgolinter v0.19.0/go.mod h1:jkQ3naZDmxaZMXPWaS9rblH+i+GWXQCaS/JFIWcOH2s= +github.com/nunnatsa/ginkgolinter v0.19.1 h1:mjwbOlDQxZi9Cal+KfbEJTCz327OLNfwNvoZ70NJ+c4= +github.com/nunnatsa/ginkgolinter v0.19.1/go.mod h1:jkQ3naZDmxaZMXPWaS9rblH+i+GWXQCaS/JFIWcOH2s= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU= @@ -417,44 +270,24 @@ github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJ github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= -github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= -github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/polyfloyd/go-errorlint v1.7.1 h1:RyLVXIbosq1gBdk/pChWA8zWYLsq9UEw7a1L5TVMCnA= github.com/polyfloyd/go-errorlint v1.7.1/go.mod h1:aXjNb1x2TNhoLsk26iv1yl7a+zTnXPhwEMtEXukiLR8= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/quasilyte/go-ruleguard v0.4.3-0.20240823090925-0fe6f58b47b1 h1:+Wl/0aFp0hpuHM3H//KMft64WQ1yX9LdJY64Qm/gFCo= -github.com/quasilyte/go-ruleguard v0.4.3-0.20240823090925-0fe6f58b47b1/go.mod h1:GJLgqsLeo4qgavUoL8JeGFNS7qcisx3awV/w9eWTmNI= +github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk= +github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k= +github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18= +github.com/prometheus/procfs v0.16.0 h1:xh6oHhKwnOJKMYiYBDWmkHqQPyiY40sny36Cmx2bbsM= +github.com/prometheus/procfs v0.16.0/go.mod h1:8veyXUu3nGP7oaCxhX6yeaM5u4stL2FeMXnCqhDthZg= +github.com/quasilyte/go-ruleguard v0.4.4 h1:53DncefIeLX3qEpjzlS1lyUmQoUEeOWPFWqaTJq9eAQ= +github.com/quasilyte/go-ruleguard v0.4.4/go.mod h1:Vl05zJ538vcEEwu16V/Hdu7IYZWyKSwIy4c88Ro1kRE= github.com/quasilyte/go-ruleguard/dsl v0.3.22 h1:wd8zkOhSNr+I+8Qeciml08ivDt1pSXe60+5DqOpCjPE= github.com/quasilyte/go-ruleguard/dsl v0.3.22/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= github.com/quasilyte/gogrep v0.5.0 h1:eTKODPXbI8ffJMN+W2aE0+oL0z/nh8/5eNdiO34SOAo= @@ -468,14 +301,15 @@ github.com/raeperd/recvcheck v0.2.0/go.mod h1:n04eYkwIR0JbgD73wT8wL4JjPC3wm0nFtz github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= -github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryancurrah/gomodguard v1.3.5 h1:cShyguSwUEeC0jS7ylOiG/idnd1TpJ1LfHGpV3oJmPU= -github.com/ryancurrah/gomodguard v1.3.5/go.mod h1:MXlEPQRxgfPQa62O8wzK3Ozbkv9Rkqr+wKjSxTdsNJE= +github.com/ryancurrah/gomodguard v1.4.1 h1:eWC8eUMNZ/wM/PWuZBv7JxxqT5fiIKSIyTvjb7Elr+g= +github.com/ryancurrah/gomodguard v1.4.1/go.mod h1:qnMJwV1hX9m+YJseXEBhd2s90+1Xn6x9dLz11ualI1I= github.com/ryanrolds/sqlclosecheck v0.5.1 h1:dibWW826u0P8jNLsLN+En7+RqWWTYrjCB9fJfSfdyCU= github.com/ryanrolds/sqlclosecheck v0.5.1/go.mod h1:2g3dUjoS6AL4huFdv6wn55WpLIDjY7ZgUR4J8HOO/XQ= +github.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsFaodPcyo= +github.com/sagikazarmark/locafero v0.7.0/go.mod h1:2za3Cg5rMaTMoG/2Ulr9AwtFaIppKXTRYnozin4aB5k= github.com/sanposhiho/wastedassign/v2 v2.1.0 h1:crurBF7fJKIORrV85u9UUpePDYGWnwvv3+A96WvwXT0= github.com/sanposhiho/wastedassign/v2 v2.1.0/go.mod h1:+oSmSC+9bQ+VUAxA66nBb0Z7N8CK7mscKTDYC6aIek4= github.com/santhosh-tekuri/jsonschema/v6 v6.0.1 h1:PKK9DyHxif4LZo+uQSgXNqs0jj5+xZwwfKHgph2lxBw= @@ -488,9 +322,6 @@ github.com/securego/gosec/v2 v2.22.2 h1:IXbuI7cJninj0nRpZSLCUlotsj8jGusohfONMrHo github.com/securego/gosec/v2 v2.22.2/go.mod h1:UEBGA+dSKb+VqM6TdehR7lnQtIIMorYJ4/9CW1KVQBE= github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sivchari/containedctx v1.0.3 h1:x+etemjbsh2fB5ewm5FeLNi5bUjK0V8n0RB+Wwfd0XE= @@ -499,32 +330,30 @@ github.com/sivchari/tenv v1.12.1 h1:+E0QzjktdnExv/wwsnnyk4oqZBUfuh89YMQT1cyuvSY= github.com/sivchari/tenv v1.12.1/go.mod h1:1LjSOUCc25snIr5n3DtGGrENhX3LuWefcplwVGC24mw= github.com/sonatard/noctx v0.1.0 h1:JjqOc2WN16ISWAjAk8M5ej0RfExEXtkEyExl2hLW+OM= github.com/sonatard/noctx v0.1.0/go.mod h1:0RvBxqY8D4j9cTTTWE8ylt2vqj2EPI8fHmrxHdsaZ2c= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= github.com/sourcegraph/go-diff v0.7.0 h1:9uLlrd5T46OXs5qpp8L/MTltk0zikUGi0sNNyCpA8G0= github.com/sourcegraph/go-diff v0.7.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= -github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs= -github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4sk/4= -github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= -github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= -github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= -github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= -github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= -github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA= +github.com/spf13/afero v1.14.0/go.mod h1:acJQ8t0ohCGuMN3O+Pv0V0hgMxNYDlvdk+VTfyZmbYo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= +github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.12.0 h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ= -github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI= +github.com/spf13/viper v1.20.0 h1:zrxIyR3RQIOsarIrgL8+sAvALXul9jeEPa06Y0Ph6vY= +github.com/spf13/viper v1.20.0/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0= github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= github.com/stbenjam/no-sprintf-host-port v0.2.0 h1:i8pxvGrt1+4G0czLr/WnmyH7zbZ8Bg8etvARQ1rpyl4= github.com/stbenjam/no-sprintf-host-port v0.2.0/go.mod h1:eL0bQ9PasS0hsyTyfTjjG+E80QIyPnBVQbYZyv20Jfk= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= @@ -534,18 +363,18 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs= -github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= -github.com/tdakkota/asciicheck v0.4.0 h1:VZ13Itw4k1i7d+dpDSNS8Op645XgGHpkCEh/WHicgWw= -github.com/tdakkota/asciicheck v0.4.0/go.mod h1:0k7M3rCfRXb0Z6bwgvkEIMleKH3kXNz9UqJ9Xuqopr8= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/tdakkota/asciicheck v0.4.1 h1:bm0tbcmi0jezRA2b5kg4ozmMuGAFotKI3RZfrhfovg8= +github.com/tdakkota/asciicheck v0.4.1/go.mod h1:0k7M3rCfRXb0Z6bwgvkEIMleKH3kXNz9UqJ9Xuqopr8= github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA= github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0= github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpRQGxTSkNYKJ51yaw6ChIqO+Je8UqsTKN/cDag= github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY= -github.com/tetafro/godot v1.4.20 h1:z/p8Ek55UdNvzt4TFn2zx2KscpW4rWqcnUrdmvWJj7E= -github.com/tetafro/godot v1.4.20/go.mod h1:2oVxTBSftRTh4+MVfUaUXR6bn2GDXCaMcOG4Dk3rfio= -github.com/timakin/bodyclose v0.0.0-20241017074812-ed6a65f985e3 h1:y4mJRFlM6fUyPhoXuFg/Yu02fg/nIPFMOY8tOqppoFg= -github.com/timakin/bodyclose v0.0.0-20241017074812-ed6a65f985e3/go.mod h1:mkjARE7Yr8qU23YcGMSALbIxTQ9r9QBVahQOBRfU460= +github.com/tetafro/godot v1.5.0 h1:aNwfVI4I3+gdxjMgYPus9eHmoBeJIbnajOyqZYStzuw= +github.com/tetafro/godot v1.5.0/go.mod h1:2oVxTBSftRTh4+MVfUaUXR6bn2GDXCaMcOG4Dk3rfio= +github.com/timakin/bodyclose v0.0.0-20241222091800-1db5c5ca4d67 h1:9LPGD+jzxMlnk5r6+hJnar67cgpDIz/iyD+rfl5r2Vk= +github.com/timakin/bodyclose v0.0.0-20241222091800-1db5c5ca4d67/go.mod h1:mkjARE7Yr8qU23YcGMSALbIxTQ9r9QBVahQOBRfU460= github.com/timonwong/loggercheck v0.10.1 h1:uVZYClxQFpw55eh+PIoqM7uAOHMrhVcDoWDery9R8Lg= github.com/timonwong/loggercheck v0.10.1/go.mod h1:HEAWU8djynujaAVX7QI65Myb8qgfcZ1uKbdpg3ZzKl8= github.com/tomarrell/wrapcheck/v2 v2.10.0 h1:SzRCryzy4IrAH7bVGG4cK40tNUhmVmMDuJujy4XwYDg= @@ -560,8 +389,8 @@ github.com/uudashr/gocognit v1.2.0 h1:3BU9aMr1xbhPlvJLSydKwdLN3tEUUrzPSSM8S4hDYR github.com/uudashr/gocognit v1.2.0/go.mod h1:k/DdKPI6XBZO1q7HgoV2juESI2/Ofj9AcHPZhBBdrTU= github.com/uudashr/iface v1.3.1 h1:bA51vmVx1UIhiIsQFSNq6GZ6VPTk3WNMZgRiCe9R29U= github.com/uudashr/iface v1.3.1/go.mod h1:4QvspiRd3JLPAEXBQ9AiZpLbJlrWWgRChOKDJEuQTdg= -github.com/xen0n/gosmopolitan v1.2.2 h1:/p2KTnMzwRexIW8GlKawsTWOxn7UHA+jCMF/V8HHtvU= -github.com/xen0n/gosmopolitan v1.2.2/go.mod h1:7XX7Mj61uLYrj0qmeN0zi7XDon9JRAEhYQqAPLVNTeg= +github.com/xen0n/gosmopolitan v1.3.0 h1:zAZI1zefvo7gcpbCOrPSHJZJYA9ZgLfJqtKzZ5pHqQM= +github.com/xen0n/gosmopolitan v1.3.0/go.mod h1:rckfr5T6o4lBtM1ga7mLGKZmLxswUoH1zxHgNXOsEt4= github.com/yagipy/maintidx v1.0.0 h1:h5NvIsCz+nRDapQ0exNv4aJ0yXSI0420omVANTv3GJM= github.com/yagipy/maintidx v1.0.0/go.mod h1:0qNf/I/CCZXSMhsRsrEPDZ+DkekpKLXAJfsTACwgXLk= github.com/yeya24/promlinter v0.3.0 h1:JVDbMp08lVCP7Y6NP3qHroGAO6z2yGKQtS5JsjqtoFs= @@ -569,7 +398,6 @@ github.com/yeya24/promlinter v0.3.0/go.mod h1:cDfJQQYv9uYciW60QT0eeHlFodotkYZlL+ github.com/ykadowak/zerologlint v0.1.5 h1:Gy/fMz1dFQN9JZTPjv1hxEk+sRWm05row04Yoolgdiw= github.com/ykadowak/zerologlint v0.1.5/go.mod h1:KaUskqF3e/v59oPmdq1U1DnKcuHokl2/K1U4pmIELKg= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= @@ -583,64 +411,26 @@ go-simpler.org/musttag v0.13.0 h1:Q/YAW0AHvaoaIbsPj3bvEI5/QFP7w696IMUpnKXQfCE= go-simpler.org/musttag v0.13.0/go.mod h1:FTzIGeK6OkKlUDVpj0iQUXZLUO1Js9+mvykDQy9C5yM= go-simpler.org/sloglint v0.9.0 h1:/40NQtjRx9txvsB/RN022KsUJU+zaaSb/9q9BSefSrE= go-simpler.org/sloglint v0.9.0/go.mod h1:G/OrAF6uxj48sHahCzrbarVMptL2kjWTaUeC8+fOGww= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= -go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= -go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= -go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= -golang.org/x/exp/typeparams v0.0.0-20250210185358-939b2ce775ac h1:TSSpLIG4v+p0rPv1pNOQtl1I8knsO4S9trOxNMOLVP4= -golang.org/x/exp/typeparams v0.0.0-20250210185358-939b2ce775ac/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/exp/typeparams v0.0.0-20250305212735-054e65f0b394 h1:VI4qDpTkfFaCXEPrbojidLgVQhj2x4nzTccG0hjaLlU= +golang.org/x/exp/typeparams v0.0.0-20250305212735-054e65f0b394/go.mod h1:LKZHyeOpPuZcMgxeHjJp4p5yvxrCX1xDvH10zYHhjjQ= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -654,38 +444,13 @@ golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= @@ -696,22 +461,10 @@ golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c= golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -719,49 +472,18 @@ golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -781,9 +503,7 @@ golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -793,55 +513,14 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= -golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= @@ -860,115 +539,20 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.36.4 h1:6A3ZDJHn/eNqc1i+IdefRzy/9PokBTPvcqMySR7NNIM= -google.golang.org/protobuf v1.36.4/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= +google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= -gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.6.1 h1:R094WgE8K4JirYjBaOpz/AvTyUu/3wbmAoskKN/pxTI= honnef.co/go/tools v0.6.1/go.mod h1:3puzxxljPCe8RGJX7BIy1plGbxEOZni5mR2aXe3/uk4= mvdan.cc/gofumpt v0.7.0 h1:bg91ttqXmi9y2xawvkuMXyvAA/1ZGJqYAEGjXuP0JXU= mvdan.cc/gofumpt v0.7.0/go.mod h1:txVFJy/Sc/mvaycET54pV8SW8gWxTlUuGHVEcncmNUo= -mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f h1:lMpcwN6GxNbWtbpI1+xzFLSW8XzX0u72NttUGVFjO3U= -mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f/go.mod h1:RSLa7mKKCNeTTMHBw5Hsy2rfJmd6O2ivt9Dw9ZqCQpQ= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +mvdan.cc/unparam v0.0.0-20250301125049-0df0534333a4 h1:WjUu4yQoT5BHT1w8Zu56SP8367OuBV5jvo+4Ulppyf8= +mvdan.cc/unparam v0.0.0-20250301125049-0df0534333a4/go.mod h1:rthT7OuvRbaGcd5ginj6dA2oLE7YNlta9qhBNNdCaLE= diff --git a/tools/vendor/4d63.com/gocheckcompilerdirectives/checkcompilerdirectives/checkcompilerdirectives.go b/tools/vendor/4d63.com/gocheckcompilerdirectives/checkcompilerdirectives/checkcompilerdirectives.go index 19948c454..e719155d9 100644 --- a/tools/vendor/4d63.com/gocheckcompilerdirectives/checkcompilerdirectives/checkcompilerdirectives.go +++ b/tools/vendor/4d63.com/gocheckcompilerdirectives/checkcompilerdirectives/checkcompilerdirectives.go @@ -72,12 +72,11 @@ func isKnown(directive string) bool { return false } +// Found by running the following command on the source of go. +// git grep -o -E -h '//go:[a-z_-]+' -- ':!**/*_test.go' ':!test/' ':!**/testdata/**' | sort -u +// See https://pkg.go.dev/cmd/compile@go1.24#hdr-Compiler_Directives var known = []string{ - // Found by running the following command on the source of go. - // git grep -o -E -h '//go:[a-z_]+' -- ':!**/*_test.go' ':!test/' ':!**/testdata/**' | sort -u - "binary", "build", - "buildsomethingelse", "cgo_dynamic_linker", "cgo_export_dynamic", "cgo_export_static", @@ -85,10 +84,10 @@ var known = []string{ "cgo_import_static", "cgo_ldflag", "cgo_unsafe_args", + "debug", "embed", "generate", "linkname", - "name", "nocheckptr", "noescape", "noinline", @@ -101,5 +100,7 @@ var known = []string{ "systemstack", "uintptrescapes", "uintptrkeepalive", + "wasmimport", + "wasmexport", "yeswritebarrierrec", } diff --git a/tools/vendor/github.com/Antonboom/nilnil/pkg/analyzer/analyzer.go b/tools/vendor/github.com/Antonboom/nilnil/pkg/analyzer/analyzer.go index 703cc1c39..09ca1a8d9 100644 --- a/tools/vendor/github.com/Antonboom/nilnil/pkg/analyzer/analyzer.go +++ b/tools/vendor/github.com/Antonboom/nilnil/pkg/analyzer/analyzer.go @@ -31,7 +31,9 @@ func New() *analysis.Analyzer { } a.Flags.Var(&n.checkedTypes, "checked-types", "comma separated list of return types to check") a.Flags.BoolVar(&n.detectOpposite, "detect-opposite", false, - "in addition, detect opposite situation (simultaneous return of non-nil error and valid value)") + "in addition, to detect opposite situation (simultaneous return of non-nil error and valid value)") + a.Flags.BoolVar(&n.onlyTwo, "only-two", true, + "to check functions with only two return values") return a } @@ -39,12 +41,14 @@ func New() *analysis.Analyzer { type nilNil struct { checkedTypes checkedTypes detectOpposite bool + onlyTwo bool } func newNilNil() *nilNil { return &nilNil{ checkedTypes: newDefaultCheckedTypes(), detectOpposite: false, + onlyTwo: true, } } @@ -77,37 +81,47 @@ func (n *nilNil) run(pass *analysis.Pass) (interface{}, error) { case *ast.ReturnStmt: ft := fs.Top() // Current function. - if !push || len(v.Results) != 2 || ft == nil || ft.Results == nil || len(ft.Results.List) != 2 { + if !push { return false } - - fRes1Type := pass.TypesInfo.TypeOf(ft.Results.List[0].Type) - if fRes1Type == nil { + if len(v.Results) < 2 { return false } - - fRes2Type := pass.TypesInfo.TypeOf(ft.Results.List[1].Type) - if fRes2Type == nil { + if (ft == nil) || (ft.Results == nil) || (len(ft.Results.List) != len(v.Results)) { + // Unreachable. return false } - ok, zv := n.isDangerNilType(fRes1Type) - if !(ok && implementsError(fRes2Type)) { - return false + lastIdx := len(ft.Results.List) - 1 + if n.onlyTwo { + lastIdx = 1 } - retVal, retErr := v.Results[0], v.Results[1] - - if ((zv == zeroValueNil) && isNil(pass, retVal) && isNil(pass, retErr)) || - ((zv == zeroValueZero) && isZero(retVal) && isNil(pass, retErr)) { - pass.Reportf(v.Pos(), nilNilReportMsg) + lastFtRes := ft.Results.List[lastIdx] + if !implementsError(pass.TypesInfo.TypeOf(lastFtRes.Type)) { return false } - if n.detectOpposite && (((zv == zeroValueNil) && !isNil(pass, retVal) && !isNil(pass, retErr)) || - ((zv == zeroValueZero) && !isZero(retVal) && !isNil(pass, retErr))) { - pass.Reportf(v.Pos(), notNilNotNilReportMsg) - return false + retErr := v.Results[lastIdx] + for i := range lastIdx { + retVal := v.Results[i] + + zv, ok := n.isDangerNilType(pass.TypesInfo.TypeOf(ft.Results.List[i].Type)) + if !ok { + continue + } + + if ((zv == zeroValueNil) && isNil(pass, retVal) && isNil(pass, retErr)) || + ((zv == zeroValueZero) && isZero(retVal) && isNil(pass, retErr)) { + pass.Reportf(v.Pos(), nilNilReportMsg) + return false + } + + if n.detectOpposite && (((zv == zeroValueNil) && !isNil(pass, retVal) && !isNil(pass, retErr)) || + ((zv == zeroValueZero) && !isZero(retVal) && !isNil(pass, retErr))) { + pass.Reportf(v.Pos(), notNilNotNilReportMsg) + return false + } } } @@ -124,35 +138,35 @@ const ( zeroValueZero ) -func (n *nilNil) isDangerNilType(t types.Type) (bool, zeroValue) { +func (n *nilNil) isDangerNilType(t types.Type) (zeroValue, bool) { switch v := types.Unalias(t).(type) { case *types.Pointer: - return n.checkedTypes.Contains(ptrType), zeroValueNil + return zeroValueNil, n.checkedTypes.Contains(ptrType) case *types.Signature: - return n.checkedTypes.Contains(funcType), zeroValueNil + return zeroValueNil, n.checkedTypes.Contains(funcType) case *types.Interface: - return n.checkedTypes.Contains(ifaceType), zeroValueNil + return zeroValueNil, n.checkedTypes.Contains(ifaceType) case *types.Map: - return n.checkedTypes.Contains(mapType), zeroValueNil + return zeroValueNil, n.checkedTypes.Contains(mapType) case *types.Chan: - return n.checkedTypes.Contains(chanType), zeroValueNil + return zeroValueNil, n.checkedTypes.Contains(chanType) case *types.Basic: if v.Kind() == types.Uintptr { - return n.checkedTypes.Contains(uintptrType), zeroValueZero + return zeroValueZero, n.checkedTypes.Contains(uintptrType) } if v.Kind() == types.UnsafePointer { - return n.checkedTypes.Contains(unsafeptrType), zeroValueNil + return zeroValueNil, n.checkedTypes.Contains(unsafeptrType) } case *types.Named: return n.isDangerNilType(v.Underlying()) } - return false, 0 + return 0, false } var errorIface = types.Universe.Lookup("error").Type().Underlying().(*types.Interface) diff --git a/tools/vendor/github.com/Antonboom/testifylint/analyzer/checkers_factory.go b/tools/vendor/github.com/Antonboom/testifylint/analyzer/checkers_factory.go index df04dfdc5..274ce85ce 100644 --- a/tools/vendor/github.com/Antonboom/testifylint/analyzer/checkers_factory.go +++ b/tools/vendor/github.com/Antonboom/testifylint/analyzer/checkers_factory.go @@ -58,6 +58,7 @@ func newCheckers(cfg config.Config) ([]checkers.RegularChecker, []checkers.Advan case *checkers.Formatter: c.SetCheckFormatString(cfg.Formatter.CheckFormatString) c.SetRequireFFuncs(cfg.Formatter.RequireFFuncs) + c.SetRequireStringMsg(cfg.Formatter.RequireStringMsg) case *checkers.GoRequire: c.SetIgnoreHTTPHandlers(cfg.GoRequire.IgnoreHTTPHandlers) diff --git a/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/bool_compare.go b/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/bool_compare.go index 67959b633..4bcd5304f 100644 --- a/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/bool_compare.go +++ b/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/bool_compare.go @@ -43,7 +43,7 @@ func (checker BoolCompare) Check(pass *analysis.Pass, call *CallMeta) *analysis. } newUseFnDiagnostic := func(proposed string, survivingArg ast.Expr, replaceStart, replaceEnd token.Pos) *analysis.Diagnostic { - if !isBuiltinBool(pass, survivingArg) { + if !hasBoolType(pass, survivingArg) { if checker.ignoreCustomTypes { return nil } @@ -65,7 +65,7 @@ func (checker BoolCompare) Check(pass *analysis.Pass, call *CallMeta) *analysis. } newNeedSimplifyDiagnostic := func(survivingArg ast.Expr, replaceStart, replaceEnd token.Pos) *analysis.Diagnostic { - if !isBuiltinBool(pass, survivingArg) { + if !hasBoolType(pass, survivingArg) { if checker.ignoreCustomTypes { return nil } @@ -103,7 +103,7 @@ func (checker BoolCompare) Check(pass *analysis.Pass, call *CallMeta) *analysis. switch { case xor(t1, t2): survivingArg, _ := anyVal([]bool{t1, t2}, arg2, arg1) - if call.Fn.NameFTrimmed == "Exactly" && !isBuiltinBool(pass, survivingArg) { + if call.Fn.NameFTrimmed == "Exactly" && !hasBoolType(pass, survivingArg) { // NOTE(a.telyshev): `Exactly` assumes no type conversion. return nil } @@ -111,7 +111,7 @@ func (checker BoolCompare) Check(pass *analysis.Pass, call *CallMeta) *analysis. case xor(f1, f2): survivingArg, _ := anyVal([]bool{f1, f2}, arg2, arg1) - if call.Fn.NameFTrimmed == "Exactly" && !isBuiltinBool(pass, survivingArg) { + if call.Fn.NameFTrimmed == "Exactly" && !hasBoolType(pass, survivingArg) { // NOTE(a.telyshev): `Exactly` assumes no type conversion. return nil } diff --git a/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/checkers_registry.go b/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/checkers_registry.go index f881be4f2..264e18b6d 100644 --- a/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/checkers_registry.go +++ b/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/checkers_registry.go @@ -10,7 +10,6 @@ var registry = checkersRegistry{ {factory: asCheckerFactory(NewFloatCompare), enabledByDefault: true}, {factory: asCheckerFactory(NewBoolCompare), enabledByDefault: true}, {factory: asCheckerFactory(NewEmpty), enabledByDefault: true}, - {factory: asCheckerFactory(NewLen), enabledByDefault: true}, {factory: asCheckerFactory(NewNegativePositive), enabledByDefault: true}, {factory: asCheckerFactory(NewCompares), enabledByDefault: true}, {factory: asCheckerFactory(NewContains), enabledByDefault: true}, @@ -19,6 +18,8 @@ var registry = checkersRegistry{ {factory: asCheckerFactory(NewErrorIsAs), enabledByDefault: true}, {factory: asCheckerFactory(NewEncodedCompare), enabledByDefault: true}, {factory: asCheckerFactory(NewExpectedActual), enabledByDefault: true}, + {factory: asCheckerFactory(NewLen), enabledByDefault: true}, + {factory: asCheckerFactory(NewEqualValues), enabledByDefault: true}, {factory: asCheckerFactory(NewRegexp), enabledByDefault: true}, {factory: asCheckerFactory(NewSuiteExtraAssertCall), enabledByDefault: true}, {factory: asCheckerFactory(NewSuiteDontUsePkg), enabledByDefault: true}, @@ -29,6 +30,7 @@ var registry = checkersRegistry{ {factory: asCheckerFactory(NewGoRequire), enabledByDefault: true}, {factory: asCheckerFactory(NewRequireError), enabledByDefault: true}, {factory: asCheckerFactory(NewSuiteBrokenParallel), enabledByDefault: true}, + {factory: asCheckerFactory(NewSuiteMethodSignature), enabledByDefault: true}, {factory: asCheckerFactory(NewSuiteSubtestRun), enabledByDefault: true}, {factory: asCheckerFactory(NewSuiteTHelper), enabledByDefault: false}, } diff --git a/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/empty.go b/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/empty.go index 71657fe11..a05423dfe 100644 --- a/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/empty.go +++ b/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/empty.go @@ -11,31 +11,41 @@ import ( // Empty detects situations like // -// assert.Len(t, arr, 0) -// assert.Equal(t, 0, len(arr)) -// assert.EqualValues(t, 0, len(arr)) -// assert.Exactly(t, 0, len(arr)) -// assert.LessOrEqual(t, len(arr), 0) -// assert.GreaterOrEqual(t, 0, len(arr)) -// assert.Less(t, len(arr), 0) -// assert.Greater(t, 0, len(arr)) -// assert.Less(t, len(arr), 1) -// assert.Greater(t, 1, len(arr)) -// assert.Zero(t, len(arr)) -// assert.Empty(t, len(arr)) +// assert.Len(t, arr, 0) +// assert.Zero(t, str) +// assert.Zero(t, len(arr)) +// assert.Equal(t, 0, len(arr)) +// assert.EqualValues(t, 0, len(arr)) +// assert.Exactly(t, 0, len(arr)) +// assert.LessOrEqual(t, len(arr), 0) +// assert.GreaterOrEqual(t, 0, len(arr)) +// assert.Less(t, len(arr), 1) +// assert.Greater(t, 1, len(arr)) +// assert.Equal(t, "", str) +// assert.EqualValues(t, "", str) +// assert.Exactly(t, "", str) +// assert.Equal(t, “, str) +// assert.EqualValues(t, “, str) +// assert.Exactly(t, “, str) // -// assert.NotEqual(t, 0, len(arr)) -// assert.NotEqualValues(t, 0, len(arr)) -// assert.Less(t, 0, len(arr)) -// assert.Greater(t, len(arr), 0) -// assert.Positive(t, len(arr)) -// assert.NotZero(t, len(arr)) -// assert.NotEmpty(t, len(arr)) +// assert.Positive(t, len(arr)) +// assert.NotZero(t, str) +// assert.NotZero(t, len(arr)) +// assert.NotEqual(t, 0, len(arr)) +// assert.NotEqualValues(t, 0, len(arr)) +// assert.Greater(t, len(arr), 0) +// assert.Less(t, 0, len(arr)) +// assert.NotEqual(t, "", str) +// assert.NotEqualValues(t, "", str) +// assert.NotEqual(t, “, str) +// assert.NotEqualValues(t, “, str) // // and requires // // assert.Empty(t, arr) // assert.NotEmpty(t, arr) +// +// Also Empty removes extra `len` call. type Empty struct{} // NewEmpty constructs Empty checker. @@ -67,6 +77,9 @@ func (checker Empty) checkEmpty(pass *analysis.Pass, call *CallMeta) *analysis.D switch call.Fn.NameFTrimmed { case "Zero": + if hasStringType(pass, a) { + return newUseEmptyDiagnostic(a.Pos(), a.End(), a) + } if lenArg, ok := isBuiltinLenCall(pass, a); ok { return newUseEmptyDiagnostic(a.Pos(), a.End(), lenArg) } @@ -89,6 +102,10 @@ func (checker Empty) checkEmpty(pass *analysis.Pass, call *CallMeta) *analysis.D } case "Equal", "EqualValues", "Exactly": + if isEmptyStringLit(a) { + return newUseEmptyDiagnostic(a.Pos(), b.End(), b) + } + arg1, ok1 := isLenCallAndZero(pass, a, b) arg2, ok2 := isLenCallAndZero(pass, b, a) @@ -136,7 +153,15 @@ func (checker Empty) checkNotEmpty(pass *analysis.Pass, call *CallMeta) *analysi a := call.Args[0] switch call.Fn.NameFTrimmed { - case "NotZero", "Positive": + case "Positive": + if lenArg, ok := isBuiltinLenCall(pass, a); ok { + return newUseNotEmptyDiagnostic(a.Pos(), a.End(), lenArg) + } + + case "NotZero": + if hasStringType(pass, a) { + return newUseNotEmptyDiagnostic(a.Pos(), a.End(), a) + } if lenArg, ok := isBuiltinLenCall(pass, a); ok { return newUseNotEmptyDiagnostic(a.Pos(), a.End(), lenArg) } @@ -154,6 +179,10 @@ func (checker Empty) checkNotEmpty(pass *analysis.Pass, call *CallMeta) *analysi switch call.Fn.NameFTrimmed { case "NotEqual", "NotEqualValues": + if isEmptyStringLit(a) { + return newUseNotEmptyDiagnostic(a.Pos(), b.End(), b) + } + arg1, ok1 := isLenCallAndZero(pass, a, b) arg2, ok2 := isLenCallAndZero(pass, b, a) diff --git a/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/equal_values.go b/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/equal_values.go new file mode 100644 index 000000000..df7be5779 --- /dev/null +++ b/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/equal_values.go @@ -0,0 +1,51 @@ +package checkers + +import ( + "go/types" + "strings" + + "golang.org/x/tools/go/analysis" +) + +// EqualValues detects situations like +// +// assert.EqualValues(t, 42, result.IntField) +// assert.NotEqualValues(t, 42, result.IntField) +// ... +// +// and requires +// +// assert.Equal(t, 42, result.IntField) +// assert.NotEqual(t, 42, result.IntField) +type EqualValues struct{} + +// NewEqualValues constructs EqualValues checker. +func NewEqualValues() EqualValues { return EqualValues{} } +func (EqualValues) Name() string { return "equal-values" } + +func (checker EqualValues) Check(pass *analysis.Pass, call *CallMeta) *analysis.Diagnostic { + assrn := call.Fn.NameFTrimmed + switch assrn { + default: + return nil + case "EqualValues", "NotEqualValues": + } + + if len(call.Args) < 2 { + return nil + } + first, second := call.Args[0], call.Args[1] + + if isFunc(pass, first) || isFunc(pass, second) { + // NOTE(a.telyshev): EqualValues for funcs is ok, but not Equal: + // https://github.com/stretchr/testify/issues/1524 + return nil + } + + ft, st := pass.TypesInfo.TypeOf(first), pass.TypesInfo.TypeOf(second) + if types.Identical(ft, st) { + proposed := strings.TrimSuffix(assrn, "Values") + return newUseFunctionDiagnostic(checker.Name(), call, proposed) + } + return nil +} diff --git a/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/error_is_as.go b/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/error_is_as.go index f2812c939..0e6217875 100644 --- a/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/error_is_as.go +++ b/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/error_is_as.go @@ -6,6 +6,9 @@ import ( "go/types" "golang.org/x/tools/go/analysis" + + "github.com/Antonboom/testifylint/internal/analysisutil" + "github.com/Antonboom/testifylint/internal/testify" ) // ErrorIsAs detects situations like @@ -15,12 +18,14 @@ import ( // assert.True(t, errors.Is(err, errSentinel)) // assert.False(t, errors.Is(err, errSentinel)) // assert.True(t, errors.As(err, &target)) +// assert.False(t, errors.As(err, &target)) // // and requires // // assert.ErrorIs(t, err, errSentinel) // assert.NotErrorIs(t, err, errSentinel) // assert.ErrorAs(t, err, &target) +// assert.NotErrorAs(t, err, &target) // // Also ErrorIsAs repeats go vet's "errorsas" check logic. type ErrorIsAs struct{} @@ -32,7 +37,7 @@ func (ErrorIsAs) Name() string { return "error-is-as" } func (checker ErrorIsAs) Check(pass *analysis.Pass, call *CallMeta) *analysis.Diagnostic { switch call.Fn.NameFTrimmed { case "Error": - if len(call.Args) >= 2 && isError(pass, call.Args[1]) { + if len(call.Args) >= 2 && isError(pass, call.Args[1]) && !isAssertCollectT(pass, call.Selector.X) { const proposed = "ErrorIs" msg := fmt.Sprintf("invalid usage of %[1]s.Error, use %[1]s.%[2]s instead", call.SelectorXStr, proposed) return newDiagnostic(checker.Name(), call, msg, newSuggestedFuncReplacement(call, proposed)) @@ -87,8 +92,14 @@ func (checker ErrorIsAs) Check(pass *analysis.Pass, call *CallMeta) *analysis.Di return nil } - if isErrorsIsCall(pass, ce) { - const proposed = "NotErrorIs" + var proposed string + switch { + case isErrorsIsCall(pass, ce): + proposed = "NotErrorIs" + case isErrorsAsCall(pass, ce): + proposed = "NotErrorAs" + } + if proposed != "" { return newUseFunctionDiagnostic(checker.Name(), call, proposed, analysis.TextEdit{ Pos: ce.Pos(), @@ -97,7 +108,7 @@ func (checker ErrorIsAs) Check(pass *analysis.Pass, call *CallMeta) *analysis.Di }) } - case "ErrorAs": + case "ErrorAs", "NotErrorAs": if len(call.Args) < 2 { return nil } @@ -138,3 +149,18 @@ func (checker ErrorIsAs) Check(pass *analysis.Pass, call *CallMeta) *analysis.Di } return nil } + +func isAssertCollectT(pass *analysis.Pass, e ast.Expr) bool { + ptr, ok := pass.TypesInfo.TypeOf(e).(*types.Pointer) + if !ok { + return false + } + + named, ok := ptr.Elem().(*types.Named) + if !ok { + return false + } + + collectT := analysisutil.ObjectOf(pass.Pkg, testify.AssertPkgPath, "CollectT") + return named.Obj() == collectT +} diff --git a/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/expected_actual.go b/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/expected_actual.go index 351d675ce..31ea3ff44 100644 --- a/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/expected_actual.go +++ b/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/expected_actual.go @@ -17,6 +17,8 @@ var DefaultExpectedVarPattern = regexp.MustCompile( // ExpectedActual detects situations like // // assert.Equal(t, result, expected) +// assert.Equal(t, result, len(expected)) +// assert.Equal(t, len(resultFields), len(expectedFields)) // assert.EqualExportedValues(t, resultObj, User{Name: "Anton"}) // assert.EqualValues(t, result, 42) // assert.Exactly(t, result, int64(42)) @@ -37,6 +39,8 @@ var DefaultExpectedVarPattern = regexp.MustCompile( // and requires // // assert.Equal(t, expected, result) +// assert.Equal(t, len(expected), result) +// assert.Equal(t, len(expectedFields), len(resultFields)) // assert.EqualExportedValues(t, User{Name: "Anton"}, resultObj) // assert.EqualValues(t, 42, result) // ... @@ -122,6 +126,9 @@ func (checker ExpectedActual) isExpectedValueCandidate(pass *analysis.Pass, expr return true case *ast.CallExpr: + if lv, ok := isBuiltinLenCall(pass, expr); ok { + return isIdentNamedAfterPattern(checker.expVarPattern, lv) + } return isParenExpr(v) || isCastedBasicLitOrExpectedValue(v, checker.expVarPattern) || isExpectedValueFactory(pass, v, checker.expVarPattern) diff --git a/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/formatter.go b/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/formatter.go index 7ff4de470..572e88f96 100644 --- a/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/formatter.go +++ b/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/formatter.go @@ -1,8 +1,10 @@ package checkers import ( + "fmt" "go/types" "strconv" + "strings" "golang.org/x/tools/go/analysis" @@ -25,9 +27,15 @@ import ( // assert.Errorf(t, err, "Profile %s should not be valid", test.profile) // assert.Errorf(t, err, "test %s", test.testName) // assert.Truef(t, targetTs.Equal(ts), "the timestamp should be as expected (%s) but was %s", targetTs, ts) +// +// It also checks that there are no arguments in `msgAndArgs` if the message is not a string, +// and additionally checks that the first argument of `msgAndArgs` is a not empty string. +// +// Finally, it checks that failure message in Fail and FailNow is not used as a format string (which won't work). type Formatter struct { checkFormatString bool requireFFuncs bool + requireStringMsg bool } // NewFormatter constructs Formatter checker. @@ -35,6 +43,7 @@ func NewFormatter() *Formatter { return &Formatter{ checkFormatString: true, requireFFuncs: false, + requireStringMsg: true, } } @@ -50,6 +59,11 @@ func (checker *Formatter) SetRequireFFuncs(v bool) *Formatter { return checker } +func (checker *Formatter) SetRequireStringMsg(v bool) *Formatter { + checker.requireStringMsg = v + return checker +} + func (checker Formatter) Check(pass *analysis.Pass, call *CallMeta) (result *analysis.Diagnostic) { if call.Fn.IsFmt { return checker.checkFmtAssertion(pass, call) @@ -63,21 +77,64 @@ func (checker Formatter) checkNotFmtAssertion(pass *analysis.Pass, call *CallMet return nil } - fFunc := call.Fn.Name + "f" + lastArgPos := len(call.ArgsRaw) - 1 + isSingleMsgAndArgElem := msgAndArgsPos == lastArgPos + msgAndArgs := call.ArgsRaw[msgAndArgsPos] - if msgAndArgsPos == len(call.ArgsRaw)-1 { - msgAndArgs := call.ArgsRaw[msgAndArgsPos] - if args, ok := isFmtSprintfCall(pass, msgAndArgs); ok { - if checker.requireFFuncs { - return newRemoveFnAndUseDiagnostic(pass, checker.Name(), call, fFunc, - "fmt.Sprintf", msgAndArgs, args...) - } - return newRemoveSprintfDiagnostic(pass, checker.Name(), call, msgAndArgs, args) + if name := call.Fn.NameFTrimmed; name == "Fail" || name == "FailNow" { + failureMsg, err := strconv.Unquote(analysisutil.NodeString(pass.Fset, call.Args[0])) + if err != nil { + return nil + } + if strings.Contains(failureMsg, "%") { + return newDiagnostic(checker.Name(), call, + "failure message is not a format string, use msgAndArgs instead") } } - if checker.requireFFuncs { - return newUseFunctionDiagnostic(checker.Name(), call, fFunc) + if args, ok := isFmtSprintfCall(pass, msgAndArgs); ok && isSingleMsgAndArgElem { + if checker.requireFFuncs { + return newRemoveFnAndUseDiagnostic(pass, checker.Name(), call, call.Fn.Name+"f", + "fmt.Sprintf", msgAndArgs, args...) + } + return newRemoveSprintfDiagnostic(pass, checker.Name(), call, msgAndArgs, args) + } + + if hasStringType(pass, msgAndArgs) { //nolint:nestif // This is the best option of code organization :( + format, err := strconv.Unquote(analysisutil.NodeString(pass.Fset, msgAndArgs)) + if nil == err && format == "" { // Unquote failed for not string literals. + var fixes []analysis.SuggestedFix + if isSingleMsgAndArgElem { + fixes = append(fixes, analysis.SuggestedFix{ + Message: "Remove empty message", + TextEdits: []analysis.TextEdit{newRemoveLastArgTextEdit(pass, call.Args)}, + }) + } + return newDiagnostic(checker.Name(), call, "empty message", fixes...) + } + if checker.requireFFuncs { + return newUseFunctionDiagnostic(checker.Name(), call, call.Fn.Name+"f") + } + } else { + if isSingleMsgAndArgElem { //nolint:revive // Better without early-return. + if checker.requireStringMsg { + return newDiagnostic(checker.Name(), call, + "do not use non-string value as first element (msg) of msgAndArgs", + analysis.SuggestedFix{ + Message: `Introduce "%+v" as the message`, + TextEdits: []analysis.TextEdit{ + { + Pos: msgAndArgs.Pos(), + End: msgAndArgs.End(), + NewText: []byte(`"%+v", ` + analysisutil.NodeString(pass.Fset, msgAndArgs)), + }, + }, + }) + } + } else { + return newDiagnostic(checker.Name(), call, + "using msgAndArgs with non-string first element (msg) causes panic") + } } return nil } @@ -88,14 +145,35 @@ func (checker Formatter) checkFmtAssertion(pass *analysis.Pass, call *CallMeta) return nil } + lastArgPos := len(call.ArgsRaw) - 1 msg := call.ArgsRaw[formatPos] + noFormatArgs := formatPos == lastArgPos - if formatPos == len(call.ArgsRaw)-1 { + if formatPos == lastArgPos { if args, ok := isFmtSprintfCall(pass, msg); ok { return newRemoveSprintfDiagnostic(pass, checker.Name(), call, msg, args) } } + format, err := strconv.Unquote(analysisutil.NodeString(pass.Fset, msg)) + if err != nil { + // Unreachable, because msg is a string literal in formatted assertion. + return nil + } + if format == "" { + var fixes []analysis.SuggestedFix + if noFormatArgs { + fixes = append(fixes, analysis.SuggestedFix{ + Message: fmt.Sprintf("Remove empty message and use `%s`", call.Fn.NameFTrimmed), + TextEdits: []analysis.TextEdit{ + newReplaceFnTextEdit(call.Fn, call.Fn.NameFTrimmed), + newRemoveLastArgTextEdit(pass, call.Args), + }, + }) + } + return newDiagnostic(checker.Name(), call, "empty message", fixes...) + } + if checker.checkFormatString { report := pass.Report defer func() { pass.Report = report }() @@ -103,23 +181,22 @@ func (checker Formatter) checkFmtAssertion(pass *analysis.Pass, call *CallMeta) pass.Report = func(d analysis.Diagnostic) { result = newDiagnostic(checker.Name(), call, d.Message) } - - format, err := strconv.Unquote(analysisutil.NodeString(pass.Fset, msg)) - if err != nil { - return nil - } printf.CheckPrintf(pass, call.Call, call.String(), format, formatPos) } return result } func isPrintfLikeCall(pass *analysis.Pass, call *CallMeta) (int, bool) { + if call.Call.Ellipsis.IsValid() { + return -1, false + } + msgAndArgsPos := getMsgAndArgsPosition(call.Fn.Signature) if msgAndArgsPos <= 0 { return -1, false } - if !(len(call.ArgsRaw) > msgAndArgsPos && hasStringType(pass, call.ArgsRaw[msgAndArgsPos])) { + if msgAndArgsPos >= len(call.ArgsRaw) { return -1, false } @@ -153,7 +230,7 @@ func assertHasFormattedAnalogue(pass *analysis.Pass, call *CallMeta) bool { if !ok { return false } - for i := 0; i < suite.NumMethods(); i++ { + for i := range suite.NumMethods() { if suite.Method(i).Name() == call.Fn.Name+"f" { return true } @@ -179,10 +256,11 @@ func getMsgAndArgsPosition(sig *types.Signature) int { } func getMsgPosition(sig *types.Signature) int { - for i := 0; i < sig.Params().Len(); i++ { + for i := range sig.Params().Len() { param := sig.Params().At(i) - if b, ok := param.Type().(*types.Basic); ok && b.Kind() == types.String && param.Name() == "msg" { + if b, ok := param.Type().(*types.Basic); ok && b.Kind() == types.String && (param.Name() == "msg" || + param.Name() == "format") { // NOTE(a.telyshev): assert.CollectT case. return i } } diff --git a/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/helpers_basic_type.go b/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/helpers_basic_type.go index b4bb56321..0e15cc1bf 100644 --- a/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/helpers_basic_type.go +++ b/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/helpers_basic_type.go @@ -61,24 +61,14 @@ func isIntNumber(e ast.Expr, rhs int) bool { return ok && (lhs == rhs) } -func isNegativeIntNumber(e ast.Expr) bool { - v, ok := isIntBasicLit(e) - return ok && v < 0 -} - -func isPositiveIntNumber(e ast.Expr) bool { - v, ok := isIntBasicLit(e) - return ok && v > 0 -} - -func isEmptyStringLit(e ast.Expr) bool { +func isStringLit(e ast.Expr) bool { bl, ok := e.(*ast.BasicLit) - return ok && bl.Kind == token.STRING && bl.Value == `""` + return ok && bl.Kind == token.STRING } -func isNotEmptyStringLit(e ast.Expr) bool { +func isEmptyStringLit(e ast.Expr) bool { bl, ok := e.(*ast.BasicLit) - return ok && bl.Kind == token.STRING && bl.Value != `""` + return ok && bl.Kind == token.STRING && (bl.Value == `""` || bl.Value == "``") } func isBasicLit(e ast.Expr) bool { @@ -144,6 +134,11 @@ func isPointer(pass *analysis.Pass, e ast.Expr) (types.Type, bool) { return ptr.Elem(), true } +func isFunc(pass *analysis.Pass, e ast.Expr) bool { + _, ok := pass.TypesInfo.TypeOf(e).(*types.Signature) + return ok +} + // isByteArray returns true if expression is `[]byte` itself. func isByteArray(e ast.Expr) bool { at, ok := e.(*ast.ArrayType) @@ -171,12 +166,3 @@ func hasStringType(pass *analysis.Pass, e ast.Expr) bool { basicType, ok := pass.TypesInfo.TypeOf(e).(*types.Basic) return ok && basicType.Kind() == types.String } - -// untype returns v from type(v) expression or v itself if there is no type conversion. -func untype(e ast.Expr) ast.Expr { - ce, ok := e.(*ast.CallExpr) - if !ok || len(ce.Args) != 1 { - return e - } - return ce.Args[0] -} diff --git a/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/helpers_bool.go b/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/helpers_bool.go index 13e579a2b..613f2fd30 100644 --- a/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/helpers_bool.go +++ b/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/helpers_bool.go @@ -14,6 +14,10 @@ var ( trueObj = types.Universe.Lookup("true") ) +func isUntypedBool(pass *analysis.Pass, e ast.Expr) bool { + return isUntypedTrue(pass, e) || isUntypedFalse(pass, e) +} + func isUntypedTrue(pass *analysis.Pass, e ast.Expr) bool { return analysisutil.IsObj(pass.TypesInfo, e, trueObj) } @@ -22,7 +26,7 @@ func isUntypedFalse(pass *analysis.Pass, e ast.Expr) bool { return analysisutil.IsObj(pass.TypesInfo, e, falseObj) } -func isBuiltinBool(pass *analysis.Pass, e ast.Expr) bool { +func hasBoolType(pass *analysis.Pass, e ast.Expr) bool { basicType, ok := pass.TypesInfo.TypeOf(e).(*types.Basic) return ok && basicType.Kind() == types.Bool } diff --git a/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/helpers_diagnostic.go b/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/helpers_diagnostic.go index f12d87aa3..c1364f274 100644 --- a/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/helpers_diagnostic.go +++ b/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/helpers_diagnostic.go @@ -131,13 +131,23 @@ func newSuggestedFuncReplacement( proposedFn += "f" } return analysis.SuggestedFix{ - Message: fmt.Sprintf("Replace `%s` with `%s`", call.Fn.Name, proposedFn), - TextEdits: append([]analysis.TextEdit{ - { - Pos: call.Fn.Pos(), - End: call.Fn.End(), - NewText: []byte(proposedFn), - }, - }, additionalEdits...), + Message: fmt.Sprintf("Replace `%s` with `%s`", call.Fn.Name, proposedFn), + TextEdits: append([]analysis.TextEdit{newReplaceFnTextEdit(call.Fn, proposedFn)}, additionalEdits...), + } +} + +func newReplaceFnTextEdit(callFn analysis.Range, proposedFn string) analysis.TextEdit { + return analysis.TextEdit{ + Pos: callFn.Pos(), + End: callFn.End(), + NewText: []byte(proposedFn), + } +} + +func newRemoveLastArgTextEdit(pass *analysis.Pass, callArgs []ast.Expr) analysis.TextEdit { + return analysis.TextEdit{ + Pos: callArgs[0].Pos(), + End: callArgs[len(callArgs)-1].End(), + NewText: formatAsCallArgs(pass, callArgs[0:len(callArgs)-1]...), } } diff --git a/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/helpers_http.go b/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/helpers_http.go index 9dabb02a9..4aa19c1bb 100644 --- a/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/helpers_http.go +++ b/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/helpers_http.go @@ -24,7 +24,7 @@ func mimicHTTPHandler(pass *analysis.Pass, fType *ast.FuncType) bool { return false } - for i := 0; i < sig.Params().Len(); i++ { + for i := range sig.Params().Len() { lhs := sig.Params().At(i).Type() rhs := pass.TypesInfo.TypeOf(fType.Params.List[i].Type) if !types.Identical(lhs, rhs) { diff --git a/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/helpers_len.go b/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/helpers_len.go index 904950ff3..6afa5849a 100644 --- a/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/helpers_len.go +++ b/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/helpers_len.go @@ -2,7 +2,6 @@ package checkers import ( "go/ast" - "go/token" "go/types" "golang.org/x/tools/go/analysis" @@ -12,31 +11,6 @@ import ( var lenObj = types.Universe.Lookup("len") -func isLenEquality(pass *analysis.Pass, e ast.Expr) (ast.Expr, ast.Expr, bool) { - be, ok := e.(*ast.BinaryExpr) - if !ok { - return nil, nil, false - } - - if be.Op != token.EQL { - return nil, nil, false - } - return xorLenCall(pass, be.X, be.Y) -} - -func xorLenCall(pass *analysis.Pass, a, b ast.Expr) (lenArg ast.Expr, expectedLen ast.Expr, ok bool) { - arg1, ok1 := isBuiltinLenCall(pass, a) - arg2, ok2 := isBuiltinLenCall(pass, b) - - if xor(ok1, ok2) { - if ok1 { - return arg1, b, true - } - return arg2, a, true - } - return nil, nil, false -} - func isLenCallAndZero(pass *analysis.Pass, a, b ast.Expr) (ast.Expr, bool) { lenArg, ok := isBuiltinLenCall(pass, a) return lenArg, ok && isZero(b) diff --git a/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/len.go b/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/len.go index c240a6174..9bdd8ff98 100644 --- a/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/len.go +++ b/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/len.go @@ -1,19 +1,42 @@ package checkers import ( + "go/ast" + "go/token" + "golang.org/x/tools/go/analysis" ) // Len detects situations like // -// assert.Equal(t, 3, len(arr)) -// assert.EqualValues(t, 3, len(arr)) -// assert.Exactly(t, 3, len(arr)) -// assert.True(t, len(arr) == 3) +// assert.Equal(t, 42, len(arr)) +// assert.Equal(t, len(arr), 42) +// assert.EqualValues(t, 42, len(arr)) +// assert.EqualValues(t, len(arr), 42) +// assert.Exactly(t, 42, len(arr)) +// assert.Exactly(t, len(arr), 42) +// assert.True(t, 42 == len(arr)) +// assert.True(t, len(arr) == 42) +// +// assert.Equal(t, value, len(arr)) +// assert.EqualValues(t, value, len(arr)) +// assert.Exactly(t, value, len(arr)) +// assert.True(t, len(arr) == value) + +// assert.Equal(t, len(expArr), len(arr)) +// assert.EqualValues(t, len(expArr), len(arr)) +// assert.Exactly(t, len(expArr), len(arr)) +// assert.True(t, len(arr) == len(expArr)) // // and requires // -// assert.Len(t, arr, 3) +// assert.Len(t, arr, 42) +// assert.Len(t, arr, value) +// assert.Len(t, arr, len(expArr)) +// +// The checker ignores assertions in which length checking is not a priority, e.g +// +// assert.Equal(t, len(arr), value) type Len struct{} // NewLen constructs Len checker. @@ -21,45 +44,62 @@ func NewLen() Len { return Len{} } func (Len) Name() string { return "len" } func (checker Len) Check(pass *analysis.Pass, call *CallMeta) *analysis.Diagnostic { - const proposedFn = "Len" - switch call.Fn.NameFTrimmed { case "Equal", "EqualValues", "Exactly": if len(call.Args) < 2 { return nil } - a, b := call.Args[0], call.Args[1] - if lenArg, expectedLen, ok := xorLenCall(pass, a, b); ok { - if _, ok := isIntBasicLit(expectedLen); (expectedLen == b) && !ok { - // https://github.com/Antonboom/testifylint/issues/9 - return nil - } - return newUseFunctionDiagnostic(checker.Name(), call, proposedFn, - analysis.TextEdit{ - Pos: a.Pos(), - End: b.End(), - NewText: formatAsCallArgs(pass, lenArg, expectedLen), - }) - } + a, b := call.Args[0], call.Args[1] + return checker.checkArgs(call, pass, a, b, false) case "True": if len(call.Args) < 1 { return nil } - expr := call.Args[0] - if lenArg, expectedLen, ok := isLenEquality(pass, expr); ok { - if _, ok := isIntBasicLit(expectedLen); !ok { - return nil - } - return newUseFunctionDiagnostic(checker.Name(), call, proposedFn, - analysis.TextEdit{ - Pos: expr.Pos(), - End: expr.End(), - NewText: formatAsCallArgs(pass, lenArg, expectedLen), - }) + be, ok := call.Args[0].(*ast.BinaryExpr) + if !ok { + return nil + } + if be.Op != token.EQL { + return nil + } + return checker.checkArgs(call, pass, be.Y, be.X, true) // In True, the actual value is usually first. + } + return nil +} + +func (checker Len) checkArgs(call *CallMeta, pass *analysis.Pass, a, b ast.Expr, inverted bool) *analysis.Diagnostic { + newUseLenDiagnostic := func(lenArg, expectedLen ast.Expr) *analysis.Diagnostic { + const proposedFn = "Len" + start, end := a.Pos(), b.End() + if inverted { + start, end = b.Pos(), a.End() + } + return newUseFunctionDiagnostic(checker.Name(), call, proposedFn, + analysis.TextEdit{ + Pos: start, + End: end, + NewText: formatAsCallArgs(pass, lenArg, expectedLen), + }) + } + + arg1, firstIsLen := isBuiltinLenCall(pass, a) + arg2, secondIsLen := isBuiltinLenCall(pass, b) + + switch { + case firstIsLen && secondIsLen: + return newUseLenDiagnostic(arg2, a) + + case firstIsLen: + if _, secondIsNum := isIntBasicLit(b); secondIsNum { + return newUseLenDiagnostic(arg1, b) } + + case secondIsLen: + return newUseLenDiagnostic(arg2, a) } + return nil } diff --git a/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/negative_positive.go b/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/negative_positive.go index a61bbdfcb..f15dc4981 100644 --- a/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/negative_positive.go +++ b/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/negative_positive.go @@ -55,7 +55,7 @@ func (checker NegativePositive) checkNegative(pass *analysis.Pass, call *CallMet }) } - // NOTE(a.telyshev): We ignore uint-asserts as being no sense for assert.Negative. + // NOTE(a.telyshev): We ignore uint and len asserts as being no sense for assert.Negative. switch call.Fn.NameFTrimmed { case "Less": @@ -64,8 +64,8 @@ func (checker NegativePositive) checkNegative(pass *analysis.Pass, call *CallMet } a, b := call.Args[0], call.Args[1] - if isSignedNotZero(pass, a) && isZeroOrSignedZero(b) { - return newUseNegativeDiagnostic(a.Pos(), b.End(), untype(a)) + if canBeNegative(pass, a) && isZeroOrSignedZero(b) { + return newUseNegativeDiagnostic(a.Pos(), b.End(), a) } case "Greater": @@ -74,8 +74,8 @@ func (checker NegativePositive) checkNegative(pass *analysis.Pass, call *CallMet } a, b := call.Args[0], call.Args[1] - if isZeroOrSignedZero(a) && isSignedNotZero(pass, b) { - return newUseNegativeDiagnostic(a.Pos(), b.End(), untype(b)) + if isZeroOrSignedZero(a) && canBeNegative(pass, b) { + return newUseNegativeDiagnostic(a.Pos(), b.End(), b) } case "True": @@ -84,12 +84,12 @@ func (checker NegativePositive) checkNegative(pass *analysis.Pass, call *CallMet } expr := call.Args[0] - a, _, ok1 := isStrictComparisonWith(pass, expr, isSignedNotZero, token.LSS, p(isZeroOrSignedZero)) // a < 0 - _, b, ok2 := isStrictComparisonWith(pass, expr, p(isZeroOrSignedZero), token.GTR, isSignedNotZero) // 0 > a + a, _, ok1 := isStrictComparisonWith(pass, expr, canBeNegative, token.LSS, p(isZeroOrSignedZero)) // a < 0 + _, b, ok2 := isStrictComparisonWith(pass, expr, p(isZeroOrSignedZero), token.GTR, canBeNegative) // 0 > a survivingArg, ok := anyVal([]bool{ok1, ok2}, a, b) if ok { - return newUseNegativeDiagnostic(expr.Pos(), expr.End(), untype(survivingArg)) + return newUseNegativeDiagnostic(expr.Pos(), expr.End(), survivingArg) } case "False": @@ -98,12 +98,12 @@ func (checker NegativePositive) checkNegative(pass *analysis.Pass, call *CallMet } expr := call.Args[0] - a, _, ok1 := isStrictComparisonWith(pass, expr, isSignedNotZero, token.GEQ, p(isZeroOrSignedZero)) // a >= 0 - _, b, ok2 := isStrictComparisonWith(pass, expr, p(isZeroOrSignedZero), token.LEQ, isSignedNotZero) // 0 <= a + a, _, ok1 := isStrictComparisonWith(pass, expr, canBeNegative, token.GEQ, p(isZeroOrSignedZero)) // a >= 0 + _, b, ok2 := isStrictComparisonWith(pass, expr, p(isZeroOrSignedZero), token.LEQ, canBeNegative) // 0 <= a survivingArg, ok := anyVal([]bool{ok1, ok2}, a, b) if ok { - return newUseNegativeDiagnostic(expr.Pos(), expr.End(), untype(survivingArg)) + return newUseNegativeDiagnostic(expr.Pos(), expr.End(), survivingArg) } } return nil @@ -128,7 +128,7 @@ func (checker NegativePositive) checkPositive(pass *analysis.Pass, call *CallMet a, b := call.Args[0], call.Args[1] if isNotAnyZero(a) && isAnyZero(b) { - return newUsePositiveDiagnostic(a.Pos(), b.End(), untype(a)) + return newUsePositiveDiagnostic(a.Pos(), b.End(), a) } case "Less": @@ -138,7 +138,7 @@ func (checker NegativePositive) checkPositive(pass *analysis.Pass, call *CallMet a, b := call.Args[0], call.Args[1] if isAnyZero(a) && isNotAnyZero(b) { - return newUsePositiveDiagnostic(a.Pos(), b.End(), untype(b)) + return newUsePositiveDiagnostic(a.Pos(), b.End(), b) } case "True": @@ -152,7 +152,7 @@ func (checker NegativePositive) checkPositive(pass *analysis.Pass, call *CallMet survivingArg, ok := anyVal([]bool{ok1, ok2}, a, b) if ok { - return newUsePositiveDiagnostic(expr.Pos(), expr.End(), untype(survivingArg)) + return newUsePositiveDiagnostic(expr.Pos(), expr.End(), survivingArg) } case "False": @@ -166,8 +166,13 @@ func (checker NegativePositive) checkPositive(pass *analysis.Pass, call *CallMet survivingArg, ok := anyVal([]bool{ok1, ok2}, a, b) if ok { - return newUsePositiveDiagnostic(expr.Pos(), expr.End(), untype(survivingArg)) + return newUsePositiveDiagnostic(expr.Pos(), expr.End(), survivingArg) } } return nil } + +func canBeNegative(pass *analysis.Pass, e ast.Expr) bool { + _, isLen := isBuiltinLenCall(pass, e) + return isSignedNotZero(pass, e) && !isLen +} diff --git a/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/suite_method_signature.go b/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/suite_method_signature.go new file mode 100644 index 000000000..5293fc7be --- /dev/null +++ b/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/suite_method_signature.go @@ -0,0 +1,71 @@ +package checkers + +import ( + "fmt" + "go/ast" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/ast/inspector" + + "github.com/Antonboom/testifylint/internal/analysisutil" + "github.com/Antonboom/testifylint/internal/testify" +) + +// SuiteMethodSignature warns, when +// - suite's test methods have arguments or returning values; +// - suite's custom (user) methods conflicts with (shadows/overrides) suite functionality methods. +type SuiteMethodSignature struct{} + +// NewSuiteMethodSignature constructs SuiteMethodSignature checker. +func NewSuiteMethodSignature() SuiteMethodSignature { return SuiteMethodSignature{} } +func (SuiteMethodSignature) Name() string { return "suite-method-signature" } + +func (checker SuiteMethodSignature) Check(pass *analysis.Pass, inspector *inspector.Inspector) (diags []analysis.Diagnostic) { + inspector.Preorder([]ast.Node{(*ast.FuncDecl)(nil)}, func(node ast.Node) { + fd := node.(*ast.FuncDecl) + if !isSuiteMethod(pass, fd) { + return + } + + methodName := fd.Name.Name + rcv := fd.Recv.List[0] + + if isSuiteTestMethod(methodName) { + if fd.Type.Params.NumFields() > 0 || fd.Type.Results.NumFields() > 0 { + const msg = "test method should not have any arguments or returning values" + diags = append(diags, *newDiagnostic(checker.Name(), fd, msg)) + return + } + } + + iface, ok := suiteMethodToInterface[methodName] + if !ok { + return + } + ifaceObj := analysisutil.ObjectOf(pass.Pkg, testify.SuitePkgPath, iface) + if ifaceObj == nil { + return + } + if !implements(pass, rcv.Type, ifaceObj) { + msg := fmt.Sprintf("method conflicts with %s.%s interface", testify.SuitePkgName, iface) + diags = append(diags, *newDiagnostic(checker.Name(), fd, msg)) + } + }) + return diags +} + +// https://github.com/stretchr/testify/blob/master/suite/interfaces.go +var suiteMethodToInterface = map[string]string{ + // NOTE(a.telyshev): We ignore suite.TestingSuite interface, because + // - suite.Run will not work for suite-types that do not implement it; + // - this interface has several methods, which may cause a false positive for one of the methods in the suite-type. + "SetupSuite": "SetupAllSuite", + "SetupTest": "SetupTestSuite", + "TearDownSuite": "TearDownAllSuite", + "TearDownTest": "TearDownTestSuite", + "BeforeTest": "BeforeTest", + "AfterTest": "AfterTest", + "HandleStats": "WithStats", + "SetupSubTest": "SetupSubTest", + "TearDownSubTest": "TearDownSubTest", +} diff --git a/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/suite_thelper.go b/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/suite_thelper.go index ef8d82132..94f0b4c45 100644 --- a/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/suite_thelper.go +++ b/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/suite_thelper.go @@ -43,7 +43,7 @@ func (checker SuiteTHelper) Check(pass *analysis.Pass, inspector *inspector.Insp } rcvName := rcv.Names[0].Name - helperCallStr := fmt.Sprintf("%s.T().Helper()", rcvName) + helperCallStr := rcvName + ".T().Helper()" firstStmt := fd.Body.List[0] if analysisutil.NodeString(pass.Fset, firstStmt) == helperCallStr { diff --git a/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/useless_assert.go b/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/useless_assert.go index 045706e5d..ef23d6acc 100644 --- a/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/useless_assert.go +++ b/tools/vendor/github.com/Antonboom/testifylint/internal/checkers/useless_assert.go @@ -30,20 +30,40 @@ import ( // assert.False(t, num == num) // assert.False(t, num != num) // -// assert.Empty(t, "") -// assert.False(t, false) +// assert.Empty(t, "value") // Any string literal. +// assert.Error(t, nil) +// assert.False(t, false) // Any bool literal. // assert.Implements(t, (*any)(nil), new(Conn)) -// assert.Negative(t, -42) +// assert.Negative(t, 42) // Any int literal. // assert.Nil(t, nil) // assert.NoError(t, nil) -// assert.NotEmpty(t, "value") -// assert.NotZero(t, 42) -// assert.NotZero(t, "value") -// assert.Positive(t, 42) -// assert.True(t, true) -// assert.Zero(t, 0) -// assert.Zero(t, "") +// assert.NotEmpty(t, "value") // Any string literal. +// assert.NotImplements(t, (*any)(nil), new(Conn)) +// assert.NotNil(t, nil) +// assert.NotZero(t, 42) // Any int literal. +// assert.NotZero(t, "value") // Any string literal. +// assert.NotZero(t, nil) +// assert.NotZero(t, false) // Any bool literal. +// assert.Positive(t, 42) // Any int literal. +// assert.True(t, true) // Any bool literal. +// assert.Zero(t, 42) // Any int literal. +// assert.Zero(t, "value") // Any string literal. // assert.Zero(t, nil) +// assert.Zero(t, false) // Any bool literal. +// +// assert.Negative(len(x)) +// assert.Less(len(x), 0) +// assert.Greater(0, len(x)) +// assert.Positive(len(x)) +// assert.GreaterOrEqual(len(x), 0) +// assert.LessOrEqual(0, len(x)) +// +// assert.Negative(uintVal) +// assert.Less(uintVal, 0) +// assert.Greater(0, uintVal) +// assert.Positive(uintVal) +// assert.GreaterOrEqual(uintVal, 0) +// assert.LessOrEqual(0, uintVal) type UselessAssert struct{} // NewUselessAssert constructs UselessAssert checker. @@ -57,13 +77,13 @@ func (checker UselessAssert) Check(pass *analysis.Pass, call *CallMeta) *analysi var isMeaningless bool switch call.Fn.NameFTrimmed { - case "Empty": - isMeaningless = (len(call.Args) >= 1) && isEmptyStringLit(call.Args[0]) + case "False", "True": + isMeaningless = (len(call.Args) >= 1) && isUntypedBool(pass, call.Args[0]) - case "False": - isMeaningless = (len(call.Args) >= 1) && isUntypedFalse(pass, call.Args[0]) + case "GreaterOrEqual", "Less": + isMeaningless = (len(call.Args) >= 2) && isAnyZero(call.Args[1]) && canNotBeNegative(pass, call.Args[0]) - case "Implements": + case "Implements", "NotImplements": if len(call.Args) < 2 { return nil } @@ -71,29 +91,28 @@ func (checker UselessAssert) Check(pass *analysis.Pass, call *CallMeta) *analysi elem, ok := isPointer(pass, call.Args[0]) isMeaningless = ok && isEmptyInterfaceType(elem) - case "Negative": - isMeaningless = (len(call.Args) >= 1) && isNegativeIntNumber(call.Args[0]) - - case "Nil", "NoError": - isMeaningless = (len(call.Args) >= 1) && isNil(call.Args[0]) + case "LessOrEqual", "Greater": + isMeaningless = (len(call.Args) >= 2) && isAnyZero(call.Args[0]) && canNotBeNegative(pass, call.Args[1]) - case "NotEmpty": - isMeaningless = (len(call.Args) >= 1) && isNotEmptyStringLit(call.Args[0]) - - case "NotZero": - isMeaningless = (len(call.Args) >= 1) && - (isNotEmptyStringLit(call.Args[0]) || - isNegativeIntNumber(call.Args[0]) || isPositiveIntNumber(call.Args[0])) + case "Negative", "Positive": + if len(call.Args) < 1 { + return nil + } + _, isInt := isIntBasicLit(call.Args[0]) + isMeaningless = isInt || canNotBeNegative(pass, call.Args[0]) - case "Positive": - isMeaningless = (len(call.Args) >= 1) && isPositiveIntNumber(call.Args[0]) + case "Error", "Nil", "NoError", "NotNil": + isMeaningless = (len(call.Args) >= 1) && isNil(call.Args[0]) - case "True": - isMeaningless = (len(call.Args) >= 1) && isUntypedTrue(pass, call.Args[0]) + case "Empty", "NotEmpty": + isMeaningless = (len(call.Args) >= 1) && isStringLit(call.Args[0]) - case "Zero": - isMeaningless = (len(call.Args) >= 1) && - (isZero(call.Args[0]) || isEmptyStringLit(call.Args[0]) || isNil(call.Args[0])) + case "NotZero", "Zero": + if len(call.Args) < 1 { + return nil + } + _, isInt := isIntBasicLit(call.Args[0]) + isMeaningless = isInt || isStringLit(call.Args[0]) || isNil(call.Args[0]) || isUntypedBool(pass, call.Args[0]) } if isMeaningless { @@ -127,8 +146,10 @@ func (checker UselessAssert) checkSameVars(pass *analysis.Pass, call *CallMeta) "JSONEq", "Less", "LessOrEqual", + "NotElementsMatch", "NotEqual", "NotEqualValues", + "NotErrorAs", "NotErrorIs", "NotRegexp", "NotSame", @@ -163,3 +184,8 @@ func (checker UselessAssert) checkSameVars(pass *analysis.Pass, call *CallMeta) } return nil } + +func canNotBeNegative(pass *analysis.Pass, e ast.Expr) bool { + _, isLen := isBuiltinLenCall(pass, e) + return isLen || isUnsigned(pass, e) +} diff --git a/tools/vendor/github.com/Antonboom/testifylint/internal/config/config.go b/tools/vendor/github.com/Antonboom/testifylint/internal/config/config.go index 23b673428..7b2587876 100644 --- a/tools/vendor/github.com/Antonboom/testifylint/internal/config/config.go +++ b/tools/vendor/github.com/Antonboom/testifylint/internal/config/config.go @@ -24,6 +24,7 @@ func NewDefault() Config { Formatter: FormatterConfig{ CheckFormatString: true, RequireFFuncs: false, + RequireStringMsg: true, }, GoRequire: GoRequireConfig{ IgnoreHTTPHandlers: false, @@ -66,6 +67,7 @@ type ExpectedActualConfig struct { type FormatterConfig struct { CheckFormatString bool RequireFFuncs bool + RequireStringMsg bool } // GoRequireConfig implements configuration of checkers.GoRequire. @@ -133,7 +135,10 @@ func BindToFlags(cfg *Config, fs *flag.FlagSet) { "to enable go vet's printf checks") fs.BoolVar(&cfg.Formatter.RequireFFuncs, "formatter.require-f-funcs", false, - "to require f-assertions (e.g. assert.Equalf) if format string is used, even if there are no variable-length variables.") + "to require f-assertions (e.g. assert.Equalf) if format string is used, even if there are no variable-length variables") + fs.BoolVar(&cfg.Formatter.RequireStringMsg, + "formatter.require-string-msg", true, + "to require that the first element of msgAndArgs (msg) has a string type") fs.BoolVar(&cfg.GoRequire.IgnoreHTTPHandlers, "go-require.ignore-http-handlers", false, diff --git a/tools/vendor/github.com/BurntSushi/toml/README.md b/tools/vendor/github.com/BurntSushi/toml/README.md index 639e6c399..235496eeb 100644 --- a/tools/vendor/github.com/BurntSushi/toml/README.md +++ b/tools/vendor/github.com/BurntSushi/toml/README.md @@ -3,7 +3,7 @@ reflection interface similar to Go's standard library `json` and `xml` packages. Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0). -Documentation: https://godocs.io/github.com/BurntSushi/toml +Documentation: https://pkg.go.dev/github.com/BurntSushi/toml See the [releases page](https://github.com/BurntSushi/toml/releases) for a changelog; this information is also in the git tag annotations (e.g. `git show diff --git a/tools/vendor/github.com/BurntSushi/toml/decode.go b/tools/vendor/github.com/BurntSushi/toml/decode.go index c05a0b7e5..3fa516caa 100644 --- a/tools/vendor/github.com/BurntSushi/toml/decode.go +++ b/tools/vendor/github.com/BurntSushi/toml/decode.go @@ -196,6 +196,19 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v any) error { return md.unify(primValue.undecoded, rvalue(v)) } +// markDecodedRecursive is a helper to mark any key under the given tmap as +// decoded, recursing as needed +func markDecodedRecursive(md *MetaData, tmap map[string]any) { + for key := range tmap { + md.decoded[md.context.add(key).String()] = struct{}{} + if tmap, ok := tmap[key].(map[string]any); ok { + md.context = append(md.context, key) + markDecodedRecursive(md, tmap) + md.context = md.context[0 : len(md.context)-1] + } + } +} + // unify performs a sort of type unification based on the structure of `rv`, // which is the client representation. // @@ -222,6 +235,16 @@ func (md *MetaData) unify(data any, rv reflect.Value) error { if err != nil { return md.parseErr(err) } + // Assume the Unmarshaler decoded everything, so mark all keys under + // this table as decoded. + if tmap, ok := data.(map[string]any); ok { + markDecodedRecursive(md, tmap) + } + if aot, ok := data.([]map[string]any); ok { + for _, tmap := range aot { + markDecodedRecursive(md, tmap) + } + } return nil } if v, ok := rvi.(encoding.TextUnmarshaler); ok { diff --git a/tools/vendor/github.com/BurntSushi/toml/encode.go b/tools/vendor/github.com/BurntSushi/toml/encode.go index 73366c0d9..ac196e7df 100644 --- a/tools/vendor/github.com/BurntSushi/toml/encode.go +++ b/tools/vendor/github.com/BurntSushi/toml/encode.go @@ -402,31 +402,30 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) { // Sort keys so that we have deterministic output. And write keys directly // underneath this key first, before writing sub-structs or sub-maps. - var mapKeysDirect, mapKeysSub []string + var mapKeysDirect, mapKeysSub []reflect.Value for _, mapKey := range rv.MapKeys() { - k := mapKey.String() if typeIsTable(tomlTypeOfGo(eindirect(rv.MapIndex(mapKey)))) { - mapKeysSub = append(mapKeysSub, k) + mapKeysSub = append(mapKeysSub, mapKey) } else { - mapKeysDirect = append(mapKeysDirect, k) + mapKeysDirect = append(mapKeysDirect, mapKey) } } - var writeMapKeys = func(mapKeys []string, trailC bool) { - sort.Strings(mapKeys) + writeMapKeys := func(mapKeys []reflect.Value, trailC bool) { + sort.Slice(mapKeys, func(i, j int) bool { return mapKeys[i].String() < mapKeys[j].String() }) for i, mapKey := range mapKeys { - val := eindirect(rv.MapIndex(reflect.ValueOf(mapKey))) + val := eindirect(rv.MapIndex(mapKey)) if isNil(val) { continue } if inline { - enc.writeKeyValue(Key{mapKey}, val, true) + enc.writeKeyValue(Key{mapKey.String()}, val, true) if trailC || i != len(mapKeys)-1 { enc.wf(", ") } } else { - enc.encode(key.add(mapKey), val) + enc.encode(key.add(mapKey.String()), val) } } } @@ -441,8 +440,6 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) { } } -const is32Bit = (32 << (^uint(0) >> 63)) == 32 - func pointerTo(t reflect.Type) reflect.Type { if t.Kind() == reflect.Ptr { return pointerTo(t.Elem()) @@ -477,15 +474,14 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { frv := eindirect(rv.Field(i)) - if is32Bit { - // Copy so it works correct on 32bit archs; not clear why this - // is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4 - // This also works fine on 64bit, but 32bit archs are somewhat - // rare and this is a wee bit faster. - copyStart := make([]int, len(start)) - copy(copyStart, start) - start = copyStart - } + // Need to make a copy because ... ehm, I don't know why... I guess + // allocating a new array can cause it to fail(?) + // + // Done for: https://github.com/BurntSushi/toml/issues/430 + // Previously only on 32bit for: https://github.com/BurntSushi/toml/issues/314 + copyStart := make([]int, len(start)) + copy(copyStart, start) + start = copyStart // Treat anonymous struct fields with tag names as though they are // not anonymous, like encoding/json does. @@ -507,7 +503,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { } addFields(rt, rv, nil) - writeFields := func(fields [][]int) { + writeFields := func(fields [][]int, totalFields int) { for _, fieldIndex := range fields { fieldType := rt.FieldByIndex(fieldIndex) fieldVal := rv.FieldByIndex(fieldIndex) @@ -537,7 +533,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { if inline { enc.writeKeyValue(Key{keyName}, fieldVal, true) - if fieldIndex[0] != len(fields)-1 { + if fieldIndex[0] != totalFields-1 { enc.wf(", ") } } else { @@ -549,8 +545,10 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { if inline { enc.wf("{") } - writeFields(fieldsDirect) - writeFields(fieldsSub) + + l := len(fieldsDirect) + len(fieldsSub) + writeFields(fieldsDirect, l) + writeFields(fieldsSub, l) if inline { enc.wf("}") } diff --git a/tools/vendor/github.com/BurntSushi/toml/error.go b/tools/vendor/github.com/BurntSushi/toml/error.go index 1dd523211..b7077d3ae 100644 --- a/tools/vendor/github.com/BurntSushi/toml/error.go +++ b/tools/vendor/github.com/BurntSushi/toml/error.go @@ -69,7 +69,7 @@ type Position struct { Line int // Line number, starting at 1. Col int // Error column, starting at 1. Start int // Start of error, as byte offset starting at 0. - Len int // Lenght of the error in bytes. + Len int // Length of the error in bytes. } func (p Position) withCol(tomlFile string) Position { diff --git a/tools/vendor/github.com/BurntSushi/toml/lex.go b/tools/vendor/github.com/BurntSushi/toml/lex.go index 6878d9d69..1c3b47702 100644 --- a/tools/vendor/github.com/BurntSushi/toml/lex.go +++ b/tools/vendor/github.com/BurntSushi/toml/lex.go @@ -275,7 +275,9 @@ func (lx *lexer) errorPos(start, length int, err error) stateFn { func (lx *lexer) errorf(format string, values ...any) stateFn { if lx.atEOF { pos := lx.getPos() - pos.Line-- + if lx.pos >= 1 && lx.input[lx.pos-1] == '\n' { + pos.Line-- + } pos.Len = 1 pos.Start = lx.pos - 1 lx.items <- item{typ: itemError, pos: pos, err: fmt.Errorf(format, values...)} @@ -1117,7 +1119,7 @@ func lexBaseNumberOrDate(lx *lexer) stateFn { case 'x': r = lx.peek() if !isHex(r) { - lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r) + lx.errorf("not a hexadecimal number: '%s%c'", lx.current(), r) } return lexHexInteger } @@ -1265,23 +1267,6 @@ func isBinary(r rune) bool { return r == '0' || r == '1' } func isOctal(r rune) bool { return r >= '0' && r <= '7' } func isHex(r rune) bool { return (r >= '0' && r <= '9') || (r|0x20 >= 'a' && r|0x20 <= 'f') } func isBareKeyChar(r rune, tomlNext bool) bool { - if tomlNext { - return (r >= 'A' && r <= 'Z') || - (r >= 'a' && r <= 'z') || - (r >= '0' && r <= '9') || - r == '_' || r == '-' || - r == 0xb2 || r == 0xb3 || r == 0xb9 || (r >= 0xbc && r <= 0xbe) || - (r >= 0xc0 && r <= 0xd6) || (r >= 0xd8 && r <= 0xf6) || (r >= 0xf8 && r <= 0x037d) || - (r >= 0x037f && r <= 0x1fff) || - (r >= 0x200c && r <= 0x200d) || (r >= 0x203f && r <= 0x2040) || - (r >= 0x2070 && r <= 0x218f) || (r >= 0x2460 && r <= 0x24ff) || - (r >= 0x2c00 && r <= 0x2fef) || (r >= 0x3001 && r <= 0xd7ff) || - (r >= 0xf900 && r <= 0xfdcf) || (r >= 0xfdf0 && r <= 0xfffd) || - (r >= 0x10000 && r <= 0xeffff) - } - - return (r >= 'A' && r <= 'Z') || - (r >= 'a' && r <= 'z') || - (r >= '0' && r <= '9') || - r == '_' || r == '-' + return (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || + (r >= '0' && r <= '9') || r == '_' || r == '-' } diff --git a/tools/vendor/github.com/BurntSushi/toml/meta.go b/tools/vendor/github.com/BurntSushi/toml/meta.go index e61453730..0d337026c 100644 --- a/tools/vendor/github.com/BurntSushi/toml/meta.go +++ b/tools/vendor/github.com/BurntSushi/toml/meta.go @@ -135,9 +135,6 @@ func (k Key) maybeQuoted(i int) string { // Like append(), but only increase the cap by 1. func (k Key) add(piece string) Key { - if cap(k) > len(k) { - return append(k, piece) - } newKey := make(Key, len(k)+1) copy(newKey, k) newKey[len(k)] = piece diff --git a/tools/vendor/github.com/BurntSushi/toml/parse.go b/tools/vendor/github.com/BurntSushi/toml/parse.go index 3f2c090c8..e3ea8a9a2 100644 --- a/tools/vendor/github.com/BurntSushi/toml/parse.go +++ b/tools/vendor/github.com/BurntSushi/toml/parse.go @@ -50,7 +50,6 @@ func parse(data string) (p *parser, err error) { // it anyway. if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { // UTF-16 data = data[2:] - //lint:ignore S1017 https://github.com/dominikh/go-tools/issues/1447 } else if strings.HasPrefix(data, "\xef\xbb\xbf") { // UTF-8 data = data[3:] } @@ -529,7 +528,7 @@ func numUnderscoresOK(s string) bool { } } - // isHexis a superset of all the permissable characters surrounding an + // isHex is a superset of all the permissible characters surrounding an // underscore. accept = isHex(r) } diff --git a/tools/vendor/github.com/Djarvur/go-err113/comparison.go b/tools/vendor/github.com/Djarvur/go-err113/comparison.go index 8a8555783..0ffe2863c 100644 --- a/tools/vendor/github.com/Djarvur/go-err113/comparison.go +++ b/tools/vendor/github.com/Djarvur/go-err113/comparison.go @@ -32,12 +32,12 @@ func inspectComparision(pass *analysis.Pass, n ast.Node) bool { // nolint: unpar negate = "!" } - newExpr := fmt.Sprintf("%s%s.Is(%s, %s)", negate, "errors", rawString(be.X), rawString(be.Y)) + newExpr := fmt.Sprintf("%s%s.Is(%s, %s)", negate, "errors", be.X, be.Y) pass.Report( analysis.Diagnostic{ Pos: be.Pos(), - Message: fmt.Sprintf("do not compare errors directly %q, use %q instead", oldExpr, newExpr), + Message: fmt.Sprintf("do not compare errors directly, use errors.Is() instead: %q", oldExpr), SuggestedFixes: []analysis.SuggestedFix{ { Message: fmt.Sprintf("should replace %q with %q", oldExpr, newExpr), @@ -109,15 +109,3 @@ func areBothErrors(x, y ast.Expr, typesInfo *types.Info) bool { return true } - -func rawString(x ast.Expr) string { - switch t := x.(type) { - case *ast.Ident: - return t.Name - case *ast.SelectorExpr: - return fmt.Sprintf("%s.%s", rawString(t.X), t.Sel.Name) - case *ast.CallExpr: - return fmt.Sprintf("%s()", rawString(t.Fun)) - } - return fmt.Sprintf("%s", x) -} diff --git a/tools/vendor/github.com/Djarvur/go-err113/definition.go b/tools/vendor/github.com/Djarvur/go-err113/definition.go index 689236bac..79201c929 100644 --- a/tools/vendor/github.com/Djarvur/go-err113/definition.go +++ b/tools/vendor/github.com/Djarvur/go-err113/definition.go @@ -67,7 +67,7 @@ func inspectDefinition(pass *analysis.Pass, tlds map[*ast.CallExpr]struct{}, n a func toString(ex ast.Expr, info *types.Info) string { if tv, ok := info.Types[ex]; ok && tv.Value != nil { - return tv.Value.ExactString() + return tv.Value.String() } return "" diff --git a/tools/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/analyzer/analyzer.go b/tools/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/analyzer/analyzer.go index ec75fd409..5be31eb68 100644 --- a/tools/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/analyzer/analyzer.go +++ b/tools/vendor/github.com/GaijinEntertainment/go-exhaustruct/v3/analyzer/analyzer.go @@ -150,7 +150,7 @@ func getCompositeLitRelatedComments(stack []ast.Node, cm ast.CommentMap) []*ast. } func getStructType(pass *analysis.Pass, lit *ast.CompositeLit) (*types.Struct, *TypeInfo, bool) { - switch typ := pass.TypesInfo.TypeOf(lit).(type) { + switch typ := types.Unalias(pass.TypesInfo.TypeOf(lit)).(type) { case *types.Named: // named type if structTyp, ok := typ.Underlying().(*types.Struct); ok { pkg := typ.Obj().Pkg() diff --git a/tools/vendor/github.com/Masterminds/semver/v3/version.go b/tools/vendor/github.com/Masterminds/semver/v3/version.go index ff499fb66..304edc342 100644 --- a/tools/vendor/github.com/Masterminds/semver/v3/version.go +++ b/tools/vendor/github.com/Masterminds/semver/v3/version.go @@ -39,9 +39,11 @@ var ( ) // semVerRegex is the regular expression used to parse a semantic version. -const semVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + - `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + - `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +// This is not the official regex from the semver spec. It has been modified to allow for loose handling +// where versions like 2.1 are detected. +const semVerRegex string = `v?(0|[1-9]\d*)(?:\.(0|[1-9]\d*))?(?:\.(0|[1-9]\d*))?` + + `(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?` + + `(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?` // Version represents a single semantic version. type Version struct { @@ -146,8 +148,8 @@ func NewVersion(v string) (*Version, error) { } sv := &Version{ - metadata: m[8], - pre: m[5], + metadata: m[5], + pre: m[4], original: v, } @@ -158,7 +160,7 @@ func NewVersion(v string) (*Version, error) { } if m[2] != "" { - sv.minor, err = strconv.ParseUint(strings.TrimPrefix(m[2], "."), 10, 64) + sv.minor, err = strconv.ParseUint(m[2], 10, 64) if err != nil { return nil, fmt.Errorf("Error parsing version segment: %s", err) } @@ -167,7 +169,7 @@ func NewVersion(v string) (*Version, error) { } if m[3] != "" { - sv.patch, err = strconv.ParseUint(strings.TrimPrefix(m[3], "."), 10, 64) + sv.patch, err = strconv.ParseUint(m[3], 10, 64) if err != nil { return nil, fmt.Errorf("Error parsing version segment: %s", err) } @@ -612,7 +614,9 @@ func containsOnly(s string, comp string) bool { func validatePrerelease(p string) error { eparts := strings.Split(p, ".") for _, p := range eparts { - if containsOnly(p, num) { + if p == "" { + return ErrInvalidMetadata + } else if containsOnly(p, num) { if len(p) > 1 && p[0] == '0' { return ErrSegmentStartsZero } @@ -631,7 +635,9 @@ func validatePrerelease(p string) error { func validateMetadata(m string) error { eparts := strings.Split(m, ".") for _, p := range eparts { - if !containsOnly(p, allowed) { + if p == "" { + return ErrInvalidMetadata + } else if !containsOnly(p, allowed) { return ErrInvalidMetadata } } diff --git a/tools/vendor/github.com/bombsimon/wsl/v4/README.md b/tools/vendor/github.com/bombsimon/wsl/v4/README.md index c9c42341e..e98fe1b4a 100644 --- a/tools/vendor/github.com/bombsimon/wsl/v4/README.md +++ b/tools/vendor/github.com/bombsimon/wsl/v4/README.md @@ -28,11 +28,6 @@ go install github.com/bombsimon/wsl/v4/cmd/wsl@master ## Usage -> **Note**: This linter provides a fixer that can fix most issues with the -> `--fix` flag. However, currently `golangci-lint` [does not support suggested -> fixes](https://github.com/golangci/golangci-lint/issues/1779) so the `--fix` -> flag in `golangci-lint` will **not** work. - `wsl` uses the [analysis](https://pkg.go.dev/golang.org/x/tools/go/analysis) package meaning it will operate on package level with the default analysis flags and way of working. @@ -47,9 +42,12 @@ wsl --allow-cuddle-declarations --fix ./... `wsl` is also integrated in [`golangci-lint`](https://golangci-lint.run) ```sh -golangci-lint run --no-config --disable-all --enable wsl +golangci-lint run --no-config --disable-all --enable wsl --fix ``` +> **Note**: If you're not sure what the diagnostic is trying to tell you, use +> any of the fix approaches to fix the code for you. + ## Issues and configuration The linter suppers a few ways to configure it to satisfy more than one kind of @@ -62,10 +60,6 @@ documentation](doc/configuration.md). Below are the available checklist for any hit from `wsl`. If you do not see any, feel free to raise an [issue](https://github.com/bombsimon/wsl/issues/new). -> **Note**: this linter doesn't take in consideration the issues that will be -> fixed with `go fmt -s` so ensure that the code is properly formatted before -> use. - * [Anonymous switch statements should never be cuddled](doc/rules.md#anonymous-switch-statements-should-never-be-cuddled) * [Append only allowed to cuddle with appended value](doc/rules.md#append-only-allowed-to-cuddle-with-appended-value) * [Assignments should only be cuddled with other assignments](doc/rules.md#assignments-should-only-be-cuddled-with-other-assignments) diff --git a/tools/vendor/github.com/bombsimon/wsl/v4/analyzer.go b/tools/vendor/github.com/bombsimon/wsl/v4/analyzer.go index e51df89c6..9be43d91c 100644 --- a/tools/vendor/github.com/bombsimon/wsl/v4/analyzer.go +++ b/tools/vendor/github.com/bombsimon/wsl/v4/analyzer.go @@ -37,6 +37,7 @@ func defaultConfig() *Configuration { AllowCuddleWithRHS: []string{"Unlock", "RUnlock"}, ErrorVariableNames: []string{"err"}, ForceCaseTrailingWhitespaceLimit: 0, + AllowCuddleUsedInBlock: false, } } @@ -68,6 +69,7 @@ func (wa *wslAnalyzer) flags() flag.FlagSet { flags.BoolVar(&wa.config.ForceExclusiveShortDeclarations, "force-short-decl-cuddling", false, "Force short declarations to cuddle by themselves") flags.BoolVar(&wa.config.StrictAppend, "strict-append", true, "Strict rules for append") flags.BoolVar(&wa.config.IncludeGenerated, "include-generated", false, "Include generated files") + flags.BoolVar(&wa.config.AllowCuddleUsedInBlock, "allow-cuddle-used-in-block", false, "Allow cuddling of variables used in block statements") flags.IntVar(&wa.config.ForceCaseTrailingWhitespaceLimit, "force-case-trailing-whitespace", 0, "Force newlines for case blocks > this number.") flags.Var(&multiStringValue{slicePtr: &wa.config.AllowCuddleWithCalls}, "allow-cuddle-with-calls", "Comma separated list of idents that can have cuddles after") diff --git a/tools/vendor/github.com/bombsimon/wsl/v4/wsl.go b/tools/vendor/github.com/bombsimon/wsl/v4/wsl.go index 44c7abe21..abfad9372 100644 --- a/tools/vendor/github.com/bombsimon/wsl/v4/wsl.go +++ b/tools/vendor/github.com/bombsimon/wsl/v4/wsl.go @@ -179,6 +179,18 @@ type Configuration struct { // errors even for generated files. Can be useful when developing // generators. IncludeGenerated bool + + // AllowCuddleUsedInBlock will allowing cuddling of variables with block statements + // if they are used anywhere in the block. This defaults to false but setting + // it to true will allow the following example: + // + // var numbers []int + // for i := 0; i < 10; i++ { + // if 1 == 1 { + // numbers = append(numbers, i) + // } + // } + AllowCuddleUsedInBlock bool } // fix is a range to fixup. @@ -304,12 +316,18 @@ func (p *processor) parseBlockStatements(statements []ast.Stmt) { } } - // We could potentially have a block which require us to check the first - // argument before ruling out an allowed cuddle. - var calledOrAssignedFirstInBlock []string + // Contains the union of all variable names used anywhere + // within the block body (if applicable) and is used to check + // if a preceding statement's variables are actually used within + // the block. This helps enforce rules about allowed cuddling. + var identifiersUsedInBlock []string if firstBodyStatement != nil { - calledOrAssignedFirstInBlock = append(p.findLHS(firstBodyStatement), p.findRHS(firstBodyStatement)...) + if p.config.AllowCuddleUsedInBlock { + identifiersUsedInBlock = p.findUsedVariablesInStatement(stmt) + } else { + identifiersUsedInBlock = append(p.findLHS(firstBodyStatement), p.findRHS(firstBodyStatement)...) + } } var ( @@ -426,7 +444,7 @@ func (p *processor) parseBlockStatements(statements []ast.Stmt) { reportNewlineTwoLinesAbove := func(n1, n2 ast.Node, reason string) { if atLeastOneInListsMatch(rightAndLeftHandSide, assignedOnLineAbove) || - atLeastOneInListsMatch(assignedOnLineAbove, calledOrAssignedFirstInBlock) { + atLeastOneInListsMatch(assignedOnLineAbove, identifiersUsedInBlock) { // If both the assignment on the line above _and_ the assignment // two lines above is part of line or first in block, add the // newline as if non were. @@ -435,7 +453,7 @@ func (p *processor) parseBlockStatements(statements []ast.Stmt) { if isAssignmentTwoLinesAbove && (atLeastOneInListsMatch(rightAndLeftHandSide, assignedTwoLinesAbove) || - atLeastOneInListsMatch(assignedTwoLinesAbove, calledOrAssignedFirstInBlock)) { + atLeastOneInListsMatch(assignedTwoLinesAbove, identifiersUsedInBlock)) { p.addWhitespaceBeforeError(n1, reason) } else { // If the variable on the line above is allowed to be @@ -507,7 +525,7 @@ func (p *processor) parseBlockStatements(statements []ast.Stmt) { continue } - if atLeastOneInListsMatch(assignedOnLineAbove, calledOrAssignedFirstInBlock) { + if atLeastOneInListsMatch(assignedOnLineAbove, identifiersUsedInBlock) { continue } @@ -611,7 +629,7 @@ func (p *processor) parseBlockStatements(statements []ast.Stmt) { } if !atLeastOneInListsMatch(rightAndLeftHandSide, assignedOnLineAbove) { - if !atLeastOneInListsMatch(assignedOnLineAbove, calledOrAssignedFirstInBlock) { + if !atLeastOneInListsMatch(assignedOnLineAbove, identifiersUsedInBlock) { p.addWhitespaceBeforeError(t, reasonRangeCuddledWithoutUse) } } @@ -679,7 +697,7 @@ func (p *processor) parseBlockStatements(statements []ast.Stmt) { } } - if atLeastOneInListsMatch(assignedOnLineAbove, calledOrAssignedFirstInBlock) { + if atLeastOneInListsMatch(assignedOnLineAbove, identifiersUsedInBlock) { continue } @@ -687,7 +705,7 @@ func (p *processor) parseBlockStatements(statements []ast.Stmt) { p.addWhitespaceBeforeError(t, reasonDeferCuddledWithOtherVar) } case *ast.ForStmt: - if len(rightAndLeftHandSide) == 0 { + if len(rightAndLeftHandSide) == 0 && !p.config.AllowCuddleUsedInBlock { p.addWhitespaceBeforeError(t, reasonForWithoutCondition) continue } @@ -701,7 +719,7 @@ func (p *processor) parseBlockStatements(statements []ast.Stmt) { // comments regarding variable usages on the line before or as the // first line in the block for details. if !atLeastOneInListsMatch(rightAndLeftHandSide, assignedOnLineAbove) { - if !atLeastOneInListsMatch(assignedOnLineAbove, calledOrAssignedFirstInBlock) { + if !atLeastOneInListsMatch(assignedOnLineAbove, identifiersUsedInBlock) { p.addWhitespaceBeforeError(t, reasonForCuddledAssignWithoutUse) } } @@ -742,6 +760,10 @@ func (p *processor) parseBlockStatements(statements []ast.Stmt) { if !atLeastOneInListsMatch(rightAndLeftHandSide, assignedOnLineAbove) { if len(rightAndLeftHandSide) == 0 { + if p.config.AllowCuddleUsedInBlock { + continue + } + p.addWhitespaceBeforeError(t, reasonAnonSwitchCuddled) } else { p.addWhitespaceBeforeError(t, reasonSwitchCuddledWithoutUse) @@ -757,7 +779,7 @@ func (p *processor) parseBlockStatements(statements []ast.Stmt) { if !atLeastOneInListsMatch(rightHandSide, assignedOnLineAbove) { // Allow type assertion on variables used in the first case // immediately. - if !atLeastOneInListsMatch(assignedOnLineAbove, calledOrAssignedFirstInBlock) { + if !atLeastOneInListsMatch(assignedOnLineAbove, identifiersUsedInBlock) { p.addWhitespaceBeforeError(t, reasonTypeSwitchCuddledWithoutUse) } } @@ -839,6 +861,32 @@ func (p *processor) firstBodyStatement(i int, allStmt []ast.Stmt) ast.Node { return firstBodyStatement } +// findUsedVariablesInStatement processes a statement, +// returning a union of all variables used within it. +func (p *processor) findUsedVariablesInStatement(stmt ast.Stmt) []string { + var ( + used []string + seen = map[string]struct{}{} + ) + + // ast.Inspect walks the AST of the statement. + ast.Inspect(stmt, func(n ast.Node) bool { + // We're only interested in identifiers. + if ident, ok := n.(*ast.Ident); ok { + if _, exists := seen[ident.Name]; !exists { + seen[ident.Name] = struct{}{} + + used = append(used, ident.Name) + } + } + + // Continue walking the AST. + return true + }) + + return used +} + func (p *processor) findLHS(node ast.Node) []string { var lhs []string diff --git a/tools/vendor/github.com/catenacyber/perfsprint/analyzer/analyzer.go b/tools/vendor/github.com/catenacyber/perfsprint/analyzer/analyzer.go index a551a2418..c40e84d41 100644 --- a/tools/vendor/github.com/catenacyber/perfsprint/analyzer/analyzer.go +++ b/tools/vendor/github.com/catenacyber/perfsprint/analyzer/analyzer.go @@ -54,25 +54,43 @@ func newPerfSprint() *perfSprint { } } +const ( + // checkerErrorFormat checks for error formatting. + checkerErrorFormat = "error-format" + // checkerIntegerFormat checks for integer formatting. + checkerIntegerFormat = "integer-format" + // checkerStringFormat checks for string formatting. + checkerStringFormat = "string-format" + // checkerBoolFormat checks for bool formatting. + checkerBoolFormat = "bool-format" + // checkerHexFormat checks for hexadecimal formatting. + checkerHexFormat = "hex-format" + // checkerFixImports fix needed imports from other fixes. + checkerFixImports = "fiximports" +) + func New() *analysis.Analyzer { n := newPerfSprint() r := &analysis.Analyzer{ Name: "perfsprint", + URL: "https://github.com/catenacyber/perfsprint", Doc: "Checks that fmt.Sprintf can be replaced with a faster alternative.", Run: n.run, Requires: []*analysis.Analyzer{inspect.Analyzer}, } - r.Flags.BoolVar(&n.intFormat.enabled, "integer-format", true, "enable/disable optimization of integer formatting") - r.Flags.BoolVar(&n.intFormat.intConv, "int-conversion", true, "optimizes even if it requires an int or uint type cast") - r.Flags.BoolVar(&n.errFormat.enabled, "error-format", true, "enable/disable optimization of error formatting") - r.Flags.BoolVar(&n.errFormat.errError, "err-error", false, "optimizes into err.Error() even if it is only equivalent for non-nil errors") - r.Flags.BoolVar(&n.errFormat.errorf, "errorf", true, "optimizes fmt.Errorf") - r.Flags.BoolVar(&n.boolFormat, "bool-format", true, "enable/disable optimization of bool formatting") - r.Flags.BoolVar(&n.hexFormat, "hex-format", true, "enable/disable optimization of hex formatting") - r.Flags.BoolVar(&n.strFormat.enabled, "string-format", true, "enable/disable optimization of string formatting") - r.Flags.BoolVar(&n.strFormat.sprintf1, "sprintf1", true, "optimizes fmt.Sprintf with only one argument") - r.Flags.BoolVar(&n.strFormat.strconcat, "strconcat", true, "optimizes into strings concatenation") - r.Flags.BoolVar(&n.fiximports, "fiximports", true, "fix needed imports from other fixes") + + r.Flags.BoolVar(&n.intFormat.enabled, checkerIntegerFormat, n.intFormat.enabled, "enable/disable optimization of integer formatting") + r.Flags.BoolVar(&n.intFormat.intConv, "int-conversion", n.intFormat.intConv, "optimizes even if it requires an int or uint type cast") + r.Flags.BoolVar(&n.errFormat.enabled, checkerErrorFormat, n.errFormat.enabled, "enable/disable optimization of error formatting") + r.Flags.BoolVar(&n.errFormat.errError, "err-error", n.errFormat.errError, "optimizes into err.Error() even if it is only equivalent for non-nil errors") + r.Flags.BoolVar(&n.errFormat.errorf, "errorf", n.errFormat.errorf, "optimizes fmt.Errorf") + + r.Flags.BoolVar(&n.boolFormat, checkerBoolFormat, n.boolFormat, "enable/disable optimization of bool formatting") + r.Flags.BoolVar(&n.hexFormat, checkerHexFormat, n.hexFormat, "enable/disable optimization of hex formatting") + r.Flags.BoolVar(&n.strFormat.enabled, checkerStringFormat, n.strFormat.enabled, "enable/disable optimization of string formatting") + r.Flags.BoolVar(&n.strFormat.sprintf1, "sprintf1", n.strFormat.sprintf1, "optimizes fmt.Sprintf with only one argument") + r.Flags.BoolVar(&n.strFormat.strconcat, "strconcat", n.strFormat.strconcat, "optimizes into strings concatenation") + r.Flags.BoolVar(&n.fiximports, checkerFixImports, n.fiximports, "fix needed imports from other fixes") return r } @@ -197,7 +215,7 @@ func (n *perfSprint) run(pass *analysis.Pass) (interface{}, error) { if fn == "fmt.Errorf" && n.errFormat.enabled { neededPackages[fname]["errors"] = struct{}{} d = newAnalysisDiagnostic( - "", // TODO: precise checker + checkerErrorFormat, call, fn+" can be replaced with errors.New", []analysis.SuggestedFix{ @@ -213,7 +231,7 @@ func (n *perfSprint) run(pass *analysis.Pass) (interface{}, error) { ) } else if fn != "fmt.Errorf" && n.strFormat.enabled { d = newAnalysisDiagnostic( - "", // TODO: precise checker + checkerStringFormat, call, fn+" can be replaced with just using the string", []analysis.SuggestedFix{ @@ -235,7 +253,7 @@ func (n *perfSprint) run(pass *analysis.Pass) (interface{}, error) { fname := pass.Fset.File(call.Pos()).Name() removedFmtUsages[fname]++ d = newAnalysisDiagnostic( - "", // TODO: precise checker + checkerErrorFormat, call, fn+" can be replaced with "+errMethodCall, []analysis.SuggestedFix{ @@ -258,7 +276,7 @@ func (n *perfSprint) run(pass *analysis.Pass) (interface{}, error) { } neededPackages[fname]["strconv"] = struct{}{} d = newAnalysisDiagnostic( - "", // TODO: precise checker + checkerBoolFormat, call, fn+" can be replaced with faster strconv.FormatBool", []analysis.SuggestedFix{ @@ -286,7 +304,7 @@ func (n *perfSprint) run(pass *analysis.Pass) (interface{}, error) { } neededPackages[fname]["encoding/hex"] = struct{}{} d = newAnalysisDiagnostic( - "", // TODO: precise checker + checkerHexFormat, call, fn+" can be replaced with faster hex.EncodeToString", []analysis.SuggestedFix{ @@ -315,7 +333,7 @@ func (n *perfSprint) run(pass *analysis.Pass) (interface{}, error) { } neededPackages[fname]["encoding/hex"] = struct{}{} d = newAnalysisDiagnostic( - "", // TODO: precise checker + checkerHexFormat, call, fn+" can be replaced with faster hex.EncodeToString", []analysis.SuggestedFix{ @@ -338,7 +356,7 @@ func (n *perfSprint) run(pass *analysis.Pass) (interface{}, error) { } neededPackages[fname]["strconv"] = struct{}{} d = newAnalysisDiagnostic( - "", // TODO: precise checker + checkerIntegerFormat, call, fn+" can be replaced with faster strconv.Itoa", []analysis.SuggestedFix{ @@ -367,7 +385,7 @@ func (n *perfSprint) run(pass *analysis.Pass) (interface{}, error) { } neededPackages[fname]["strconv"] = struct{}{} d = newAnalysisDiagnostic( - "", // TODO: precise checker + checkerIntegerFormat, call, fn+" can be replaced with faster strconv.Itoa", []analysis.SuggestedFix{ @@ -389,7 +407,7 @@ func (n *perfSprint) run(pass *analysis.Pass) (interface{}, error) { } neededPackages[fname]["strconv"] = struct{}{} d = newAnalysisDiagnostic( - "", // TODO: precise checker + checkerIntegerFormat, call, fn+" can be replaced with faster strconv.FormatInt", []analysis.SuggestedFix{ @@ -423,7 +441,7 @@ func (n *perfSprint) run(pass *analysis.Pass) (interface{}, error) { } neededPackages[fname]["strconv"] = struct{}{} d = newAnalysisDiagnostic( - "", // TODO: precise checker + checkerIntegerFormat, call, fn+" can be replaced with faster strconv.FormatUint", []analysis.SuggestedFix{ @@ -456,7 +474,7 @@ func (n *perfSprint) run(pass *analysis.Pass) (interface{}, error) { } neededPackages[fname]["strconv"] = struct{}{} d = newAnalysisDiagnostic( - "", // TODO: precise checker + checkerIntegerFormat, call, fn+" can be replaced with faster strconv.FormatUint", []analysis.SuggestedFix{ @@ -480,18 +498,18 @@ func (n *perfSprint) run(pass *analysis.Pass) (interface{}, error) { case isBasicType(valueType, types.String) && fn == "fmt.Sprintf" && isConcatable(verb) && n.strFormat.enabled: var fix string if strings.HasSuffix(verb, "%s") { - fix = strconv.Quote(verb[:len(verb)-2]) + "+" + formatNode(pass.Fset, value) + fix = strings.ReplaceAll(strconv.Quote(verb[:len(verb)-2]), "%%", "%") + "+" + formatNode(pass.Fset, value) } else if strings.HasSuffix(verb, "%[1]s") { - fix = strconv.Quote(verb[:len(verb)-5]) + "+" + formatNode(pass.Fset, value) + fix = strings.ReplaceAll(strconv.Quote(verb[:len(verb)-5]), "%%", "%") + "+" + formatNode(pass.Fset, value) } else if strings.HasPrefix(verb, "%s") { - fix = formatNode(pass.Fset, value) + "+" + strconv.Quote(verb[2:]) + fix = formatNode(pass.Fset, value) + "+" + strings.ReplaceAll(strconv.Quote(verb[2:]), "%%", "%") } else { - fix = formatNode(pass.Fset, value) + "+" + strconv.Quote(verb[5:]) + fix = formatNode(pass.Fset, value) + "+" + strings.ReplaceAll(strconv.Quote(verb[5:]), "%%", "%") } fname := pass.Fset.File(call.Pos()).Name() removedFmtUsages[fname]++ d = newAnalysisDiagnostic( - "", // TODO: precise checker + checkerStringFormat, call, fn+" can be replaced with string concatenation", []analysis.SuggestedFix{ @@ -570,7 +588,7 @@ func (n *perfSprint) run(pass *analysis.Pass) (interface{}, error) { fix = fix + "\n\t\"" + k + `"` } pass.Report(*newAnalysisDiagnostic( - "", // TODO: precise checker + checkerFixImports, gd, "Fix imports", []analysis.SuggestedFix{ diff --git a/tools/vendor/github.com/catenacyber/perfsprint/analyzer/diagnostic.go b/tools/vendor/github.com/catenacyber/perfsprint/analyzer/diagnostic.go index 9384c6ef1..f1d8d090e 100644 --- a/tools/vendor/github.com/catenacyber/perfsprint/analyzer/diagnostic.go +++ b/tools/vendor/github.com/catenacyber/perfsprint/analyzer/diagnostic.go @@ -10,16 +10,15 @@ func newAnalysisDiagnostic( message string, suggestedFixes []analysis.SuggestedFix, ) *analysis.Diagnostic { - d := analysis.Diagnostic{ + if checker != "" { + message = checker + ": " + message + } + + return &analysis.Diagnostic{ Pos: analysisRange.Pos(), End: analysisRange.End(), SuggestedFixes: suggestedFixes, + Message: message, + Category: checker, // Possible hashtag available on the documentation } - if checker != "" { - d.Category = checker - d.Message = checker + ": " + message - } else { - d.Message = message - } - return &d } diff --git a/tools/vendor/github.com/fsnotify/fsnotify/.cirrus.yml b/tools/vendor/github.com/fsnotify/fsnotify/.cirrus.yml new file mode 100644 index 000000000..f4e7dbf37 --- /dev/null +++ b/tools/vendor/github.com/fsnotify/fsnotify/.cirrus.yml @@ -0,0 +1,14 @@ +freebsd_task: + name: 'FreeBSD' + freebsd_instance: + image_family: freebsd-14-1 + install_script: + - pkg update -f + - pkg install -y go + test_script: + # run tests as user "cirrus" instead of root + - pw useradd cirrus -m + - chown -R cirrus:cirrus . + - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - FSNOTIFY_DEBUG=1 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race -v ./... diff --git a/tools/vendor/github.com/fsnotify/fsnotify/.gitattributes b/tools/vendor/github.com/fsnotify/fsnotify/.gitattributes deleted file mode 100644 index 32f1001be..000000000 --- a/tools/vendor/github.com/fsnotify/fsnotify/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -go.sum linguist-generated diff --git a/tools/vendor/github.com/fsnotify/fsnotify/.gitignore b/tools/vendor/github.com/fsnotify/fsnotify/.gitignore index 4cd0cbaf4..daea9dd6d 100644 --- a/tools/vendor/github.com/fsnotify/fsnotify/.gitignore +++ b/tools/vendor/github.com/fsnotify/fsnotify/.gitignore @@ -1,6 +1,10 @@ -# Setup a Global .gitignore for OS and editor generated files: -# https://help.github.com/articles/ignoring-files -# git config --global core.excludesfile ~/.gitignore_global +# go test -c output +*.test +*.test.exe -.vagrant -*.sublime-project +# Output of go build ./cmd/fsnotify +/fsnotify +/fsnotify.exe + +/test/kqueue +/test/a.out diff --git a/tools/vendor/github.com/fsnotify/fsnotify/AUTHORS b/tools/vendor/github.com/fsnotify/fsnotify/AUTHORS deleted file mode 100644 index 6cbabe5ef..000000000 --- a/tools/vendor/github.com/fsnotify/fsnotify/AUTHORS +++ /dev/null @@ -1,62 +0,0 @@ -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# You can update this list using the following command: -# -# $ (head -n10 AUTHORS && git shortlog -se | sed -E 's/^\s+[0-9]+\t//') | tee AUTHORS - -# Please keep the list sorted. - -Aaron L -Adrien Bustany -Alexey Kazakov -Amit Krishnan -Anmol Sethi -Bjørn Erik Pedersen -Brian Goff -Bruno Bigras -Caleb Spare -Case Nelson -Chris Howey -Christoffer Buchholz -Daniel Wagner-Hall -Dave Cheney -Eric Lin -Evan Phoenix -Francisco Souza -Gautam Dey -Hari haran -Ichinose Shogo -Johannes Ebke -John C Barstow -Kelvin Fo -Ken-ichirou MATSUZAWA -Matt Layher -Matthias Stone -Nathan Youngman -Nickolai Zeldovich -Oliver Bristow -Patrick -Paul Hammond -Pawel Knap -Pieter Droogendijk -Pratik Shinde -Pursuit92 -Riku Voipio -Rob Figueiredo -Rodrigo Chiossi -Slawek Ligus -Soge Zhang -Tiffany Jernigan -Tilak Sharma -Tobias Klauser -Tom Payne -Travis Cline -Tudor Golubenco -Vahe Khachikyan -Yukang -bronze1man -debrando -henrikedwards -铁哥 diff --git a/tools/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/tools/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md index cc01c08f5..fa854785d 100644 --- a/tools/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ b/tools/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -1,11 +1,199 @@ # Changelog -All notable changes to this project will be documented in this file. +1.8.0 2023-10-31 +---------------- -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +### Additions -## [Unreleased] +- all: add `FSNOTIFY_DEBUG` to print debug logs to stderr ([#619]) + +### Changes and fixes + +- windows: fix behaviour of `WatchList()` to be consistent with other platforms ([#610]) + +- kqueue: ignore events with Ident=0 ([#590]) + +- kqueue: set O_CLOEXEC to prevent passing file descriptors to children ([#617]) + +- kqueue: emit events as "/path/dir/file" instead of "path/link/file" when watching a symlink ([#625]) + +- inotify: don't send event for IN_DELETE_SELF when also watching the parent ([#620]) + +- inotify: fix panic when calling Remove() in a goroutine ([#650]) + +- fen: allow watching subdirectories of watched directories ([#621]) + +[#590]: https://github.com/fsnotify/fsnotify/pull/590 +[#610]: https://github.com/fsnotify/fsnotify/pull/610 +[#617]: https://github.com/fsnotify/fsnotify/pull/617 +[#619]: https://github.com/fsnotify/fsnotify/pull/619 +[#620]: https://github.com/fsnotify/fsnotify/pull/620 +[#621]: https://github.com/fsnotify/fsnotify/pull/621 +[#625]: https://github.com/fsnotify/fsnotify/pull/625 +[#650]: https://github.com/fsnotify/fsnotify/pull/650 + +1.7.0 - 2023-10-22 +------------------ +This version of fsnotify needs Go 1.17. + +### Additions + +- illumos: add FEN backend to support illumos and Solaris. ([#371]) + +- all: add `NewBufferedWatcher()` to use a buffered channel, which can be useful + in cases where you can't control the kernel buffer and receive a large number + of events in bursts. ([#550], [#572]) + +- all: add `AddWith()`, which is identical to `Add()` but allows passing + options. ([#521]) + +- windows: allow setting the ReadDirectoryChangesW() buffer size with + `fsnotify.WithBufferSize()`; the default of 64K is the highest value that + works on all platforms and is enough for most purposes, but in some cases a + highest buffer is needed. ([#521]) + +### Changes and fixes + +- inotify: remove watcher if a watched path is renamed ([#518]) + + After a rename the reported name wasn't updated, or even an empty string. + Inotify doesn't provide any good facilities to update it, so just remove the + watcher. This is already how it worked on kqueue and FEN. + + On Windows this does work, and remains working. + +- windows: don't listen for file attribute changes ([#520]) + + File attribute changes are sent as `FILE_ACTION_MODIFIED` by the Windows API, + with no way to see if they're a file write or attribute change, so would show + up as a fsnotify.Write event. This is never useful, and could result in many + spurious Write events. + +- windows: return `ErrEventOverflow` if the buffer is full ([#525]) + + Before it would merely return "short read", making it hard to detect this + error. + +- kqueue: make sure events for all files are delivered properly when removing a + watched directory ([#526]) + + Previously they would get sent with `""` (empty string) or `"."` as the path + name. + +- kqueue: don't emit spurious Create events for symbolic links ([#524]) + + The link would get resolved but kqueue would "forget" it already saw the link + itself, resulting on a Create for every Write event for the directory. + +- all: return `ErrClosed` on `Add()` when the watcher is closed ([#516]) + +- other: add `Watcher.Errors` and `Watcher.Events` to the no-op `Watcher` in + `backend_other.go`, making it easier to use on unsupported platforms such as + WASM, AIX, etc. ([#528]) + +- other: use the `backend_other.go` no-op if the `appengine` build tag is set; + Google AppEngine forbids usage of the unsafe package so the inotify backend + won't compile there. + +[#371]: https://github.com/fsnotify/fsnotify/pull/371 +[#516]: https://github.com/fsnotify/fsnotify/pull/516 +[#518]: https://github.com/fsnotify/fsnotify/pull/518 +[#520]: https://github.com/fsnotify/fsnotify/pull/520 +[#521]: https://github.com/fsnotify/fsnotify/pull/521 +[#524]: https://github.com/fsnotify/fsnotify/pull/524 +[#525]: https://github.com/fsnotify/fsnotify/pull/525 +[#526]: https://github.com/fsnotify/fsnotify/pull/526 +[#528]: https://github.com/fsnotify/fsnotify/pull/528 +[#537]: https://github.com/fsnotify/fsnotify/pull/537 +[#550]: https://github.com/fsnotify/fsnotify/pull/550 +[#572]: https://github.com/fsnotify/fsnotify/pull/572 + +1.6.0 - 2022-10-13 +------------------ +This version of fsnotify needs Go 1.16 (this was already the case since 1.5.1, +but not documented). It also increases the minimum Linux version to 2.6.32. + +### Additions + +- all: add `Event.Has()` and `Op.Has()` ([#477]) + + This makes checking events a lot easier; for example: + + if event.Op&Write == Write && !(event.Op&Remove == Remove) { + } + + Becomes: + + if event.Has(Write) && !event.Has(Remove) { + } + +- all: add cmd/fsnotify ([#463]) + + A command-line utility for testing and some examples. + +### Changes and fixes + +- inotify: don't ignore events for files that don't exist ([#260], [#470]) + + Previously the inotify watcher would call `os.Lstat()` to check if a file + still exists before emitting events. + + This was inconsistent with other platforms and resulted in inconsistent event + reporting (e.g. when a file is quickly removed and re-created), and generally + a source of confusion. It was added in 2013 to fix a memory leak that no + longer exists. + +- all: return `ErrNonExistentWatch` when `Remove()` is called on a path that's + not watched ([#460]) + +- inotify: replace epoll() with non-blocking inotify ([#434]) + + Non-blocking inotify was not generally available at the time this library was + written in 2014, but now it is. As a result, the minimum Linux version is + bumped from 2.6.27 to 2.6.32. This hugely simplifies the code and is faster. + +- kqueue: don't check for events every 100ms ([#480]) + + The watcher would wake up every 100ms, even when there was nothing to do. Now + it waits until there is something to do. + +- macos: retry opening files on EINTR ([#475]) + +- kqueue: skip unreadable files ([#479]) + + kqueue requires a file descriptor for every file in a directory; this would + fail if a file was unreadable by the current user. Now these files are simply + skipped. + +- windows: fix renaming a watched directory if the parent is also watched ([#370]) + +- windows: increase buffer size from 4K to 64K ([#485]) + +- windows: close file handle on Remove() ([#288]) + +- kqueue: put pathname in the error if watching a file fails ([#471]) + +- inotify, windows: calling Close() more than once could race ([#465]) + +- kqueue: improve Close() performance ([#233]) + +- all: various documentation additions and clarifications. + +[#233]: https://github.com/fsnotify/fsnotify/pull/233 +[#260]: https://github.com/fsnotify/fsnotify/pull/260 +[#288]: https://github.com/fsnotify/fsnotify/pull/288 +[#370]: https://github.com/fsnotify/fsnotify/pull/370 +[#434]: https://github.com/fsnotify/fsnotify/pull/434 +[#460]: https://github.com/fsnotify/fsnotify/pull/460 +[#463]: https://github.com/fsnotify/fsnotify/pull/463 +[#465]: https://github.com/fsnotify/fsnotify/pull/465 +[#470]: https://github.com/fsnotify/fsnotify/pull/470 +[#471]: https://github.com/fsnotify/fsnotify/pull/471 +[#475]: https://github.com/fsnotify/fsnotify/pull/475 +[#477]: https://github.com/fsnotify/fsnotify/pull/477 +[#479]: https://github.com/fsnotify/fsnotify/pull/479 +[#480]: https://github.com/fsnotify/fsnotify/pull/480 +[#485]: https://github.com/fsnotify/fsnotify/pull/485 ## [1.5.4] - 2022-04-25 @@ -40,6 +228,30 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 [#385](https://github.com/fsnotify/fsnotify/pull/385) * Go 1.14+: Fix unsafe pointer conversion [#325](https://github.com/fsnotify/fsnotify/pull/325) +## [1.4.9] - 2020-03-11 + +* Move example usage to the readme #329. This may resolve #328. + +## [1.4.8] - 2020-03-10 + +* CI: test more go versions (@nathany 1d13583d846ea9d66dcabbfefbfb9d8e6fb05216) +* Tests: Queued inotify events could have been read by the test before max_queued_events was hit (@matthias-stone #265) +* Tests: t.Fatalf -> t.Errorf in go routines (@gdey #266) +* CI: Less verbosity (@nathany #267) +* Tests: Darwin: Exchangedata is deprecated on 10.13 (@nathany #267) +* Tests: Check if channels are closed in the example (@alexeykazakov #244) +* CI: Only run golint on latest version of go and fix issues (@cpuguy83 #284) +* CI: Add windows to travis matrix (@cpuguy83 #284) +* Docs: Remover appveyor badge (@nathany 11844c0959f6fff69ba325d097fce35bd85a8e93) +* Linux: create epoll and pipe fds with close-on-exec (@JohannesEbke #219) +* Linux: open files with close-on-exec (@linxiulei #273) +* Docs: Plan to support fanotify (@nathany ab058b44498e8b7566a799372a39d150d9ea0119 ) +* Project: Add go.mod (@nathany #309) +* Project: Revise editor config (@nathany #309) +* Project: Update copyright for 2019 (@nathany #309) +* CI: Drop go1.8 from CI matrix (@nathany #309) +* Docs: Updating the FAQ section for supportability with NFS & FUSE filesystems (@Pratik32 4bf2d1fec78374803a39307bfb8d340688f4f28e ) + ## [1.4.7] - 2018-01-09 * BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine) diff --git a/tools/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/tools/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md index 8a642563d..e4ac2a2ff 100644 --- a/tools/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md +++ b/tools/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md @@ -1,60 +1,144 @@ -# Contributing +Thank you for your interest in contributing to fsnotify! We try to review and +merge PRs in a reasonable timeframe, but please be aware that: -## Issues +- To avoid "wasted" work, please discuss changes on the issue tracker first. You + can just send PRs, but they may end up being rejected for one reason or the + other. -* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues). -* Please indicate the platform you are using fsnotify on. -* A code example to reproduce the problem is appreciated. +- fsnotify is a cross-platform library, and changes must work reasonably well on + all supported platforms. -## Pull Requests +- Changes will need to be compatible; old code should still compile, and the + runtime behaviour can't change in ways that are likely to lead to problems for + users. -### Contributor License Agreement +Testing +------- +Just `go test ./...` runs all the tests; the CI runs this on all supported +platforms. Testing different platforms locally can be done with something like +[goon] or [Vagrant], but this isn't super-easy to set up at the moment. -fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual). +Use the `-short` flag to make the "stress test" run faster. -Please indicate that you have signed the CLA in your pull request. +Writing new tests +----------------- +Scripts in the testdata directory allow creating test cases in a "shell-like" +syntax. The basic format is: -### How fsnotify is Developed + script -* Development is done on feature branches. -* Tests are run on BSD, Linux, macOS and Windows. -* Pull requests are reviewed and [applied to master][am] using [hub][]. - * Maintainers may modify or squash commits rather than asking contributors to. -* To issue a new release, the maintainers will: - * Update the CHANGELOG - * Tag a version, which will become available through gopkg.in. - -### How to Fork + Output: + desired output -For smooth sailing, always use the original import path. Installing with `go get` makes this easy. +For example: -1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`) -2. Create your feature branch (`git checkout -b my-new-feature`) -3. Ensure everything works and the tests pass (see below) -4. Commit your changes (`git commit -am 'Add some feature'`) + # Create a new empty file with some data. + watch / + echo data >/file -Contribute upstream: + Output: + create /file + write /file -1. Fork fsnotify on GitHub -2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`) -3. Push to the branch (`git push fork my-new-feature`) -4. Create a new Pull Request on GitHub +Just create a new file to add a new test; select which tests to run with +`-run TestScript/[path]`. -This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/). +script +------ +The script is a "shell-like" script: -### Testing + cmd arg arg -fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows. +Comments are supported with `#`: -Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on. + # Comment + cmd arg arg # Comment -### Maintainers +All operations are done in a temp directory; a path like "/foo" is rewritten to +"/tmp/TestFoo/foo". -Help maintaining fsnotify is welcome. To be a maintainer: +Arguments can be quoted with `"` or `'`; there are no escapes and they're +functionally identical right now, but this may change in the future, so best to +assume shell-like rules. -* Submit a pull request and sign the CLA as above. -* You must be able to run the test suite on Mac, Windows, Linux and BSD. + touch "/file with spaces" -All code changes should be internal pull requests. +End-of-line escapes with `\` are not supported. -Releases are tagged using [Semantic Versioning](http://semver.org/). +### Supported commands + + watch path [ops] # Watch the path, reporting events for it. Nothing is + # watched by default. Optionally a list of ops can be + # given, as with AddWith(path, WithOps(...)). + unwatch path # Stop watching the path. + watchlist n # Assert watchlist length. + + stop # Stop running the script; for debugging. + debug [yes/no] # Enable/disable FSNOTIFY_DEBUG (tests are run in + parallel by default, so -parallel=1 is probably a good + idea). + + touch path + mkdir [-p] dir + ln -s target link # Only ln -s supported. + mkfifo path + mknod dev path + mv src dst + rm [-r] path + chmod mode path # Octal only + sleep time-in-ms + + cat path # Read path (does nothing with the data; just reads it). + echo str >>path # Append "str" to "path". + echo str >path # Truncate "path" and write "str". + + require reason # Skip the test if "reason" is true; "skip" and + skip reason # "require" behave identical; it supports both for + # readability. Possible reasons are: + # + # always Always skip this test. + # symlink Symlinks are supported (requires admin + # permissions on Windows). + # mkfifo Platform doesn't support FIFO named sockets. + # mknod Platform doesn't support device nodes. + + +output +------ +After `Output:` the desired output is given; this is indented by convention, but +that's not required. + +The format of that is: + + # Comment + event path # Comment + + system: + event path + system2: + event path + +Every event is one line, and any whitespace between the event and path are +ignored. The path can optionally be surrounded in ". Anything after a "#" is +ignored. + +Platform-specific tests can be added after GOOS; for example: + + watch / + touch /file + + Output: + # Tested if nothing else matches + create /file + + # Windows-specific test. + windows: + write /file + +You can specify multiple platforms with a comma (e.g. "windows, linux:"). +"kqueue" is a shortcut for all kqueue systems (BSD, macOS). + + +[goon]: https://github.com/arp242/goon +[Vagrant]: https://www.vagrantup.com/ +[integration_test.go]: /integration_test.go diff --git a/tools/vendor/github.com/fsnotify/fsnotify/LICENSE b/tools/vendor/github.com/fsnotify/fsnotify/LICENSE index e180c8fb0..fb03ade75 100644 --- a/tools/vendor/github.com/fsnotify/fsnotify/LICENSE +++ b/tools/vendor/github.com/fsnotify/fsnotify/LICENSE @@ -1,28 +1,25 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. -Copyright (c) 2012-2019 fsnotify Authors. All rights reserved. +Copyright © 2012 The Go Authors. All rights reserved. +Copyright © fsnotify Authors. All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. +* Neither the name of Google Inc. nor the names of its contributors may be used + to endorse or promote products derived from this software without specific + prior written permission. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/tools/vendor/github.com/fsnotify/fsnotify/README.md b/tools/vendor/github.com/fsnotify/fsnotify/README.md index 0731c5ef8..e480733d1 100644 --- a/tools/vendor/github.com/fsnotify/fsnotify/README.md +++ b/tools/vendor/github.com/fsnotify/fsnotify/README.md @@ -1,120 +1,184 @@ -# File system notifications for Go +fsnotify is a Go library to provide cross-platform filesystem notifications on +Windows, Linux, macOS, BSD, and illumos. -[![Go Reference](https://pkg.go.dev/badge/github.com/fsnotify/fsnotify.svg)](https://pkg.go.dev/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify) [![Maintainers Wanted](https://img.shields.io/badge/maintainers-wanted-red.svg)](https://github.com/fsnotify/fsnotify/issues/413) +Go 1.17 or newer is required; the full documentation is at +https://pkg.go.dev/github.com/fsnotify/fsnotify -fsnotify utilizes [`golang.org/x/sys`](https://pkg.go.dev/golang.org/x/sys) rather than [`syscall`](https://pkg.go.dev/syscall) from the standard library. +--- -Cross platform: Windows, Linux, BSD and macOS. +Platform support: -| Adapter | OS | Status | -| --------------------- | -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | -| inotify | Linux 2.6.27 or later, Android\* | Supported | -| kqueue | BSD, macOS, iOS\* | Supported | -| ReadDirectoryChangesW | Windows | Supported | -| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) | -| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/pull/371) | -| fanotify | Linux 2.6.37+ | [Maybe](https://github.com/fsnotify/fsnotify/issues/114) | -| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) | -| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) | +| Backend | OS | Status | +| :-------------------- | :--------- | :------------------------------------------------------------------------ | +| inotify | Linux | Supported | +| kqueue | BSD, macOS | Supported | +| ReadDirectoryChangesW | Windows | Supported | +| FEN | illumos | Supported | +| fanotify | Linux 5.9+ | [Not yet](https://github.com/fsnotify/fsnotify/issues/114) | +| AHAFS | AIX | [aix branch]; experimental due to lack of maintainer and test environment | +| FSEvents | macOS | [Needs support in x/sys/unix][fsevents] | +| USN Journals | Windows | [Needs support in x/sys/windows][usn] | +| Polling | *All* | [Not yet](https://github.com/fsnotify/fsnotify/issues/9) | -\* Android and iOS are untested. +Linux and illumos should include Android and Solaris, but these are currently +untested. -Please see [the documentation](https://pkg.go.dev/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information. +[fsevents]: https://github.com/fsnotify/fsnotify/issues/11#issuecomment-1279133120 +[usn]: https://github.com/fsnotify/fsnotify/issues/53#issuecomment-1279829847 +[aix branch]: https://github.com/fsnotify/fsnotify/issues/353#issuecomment-1284590129 -## API stability - -fsnotify is a fork of [howeyc/fsnotify](https://github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA). - -All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). - -## Usage +Usage +----- +A basic example: ```go package main import ( - "log" + "log" - "github.com/fsnotify/fsnotify" + "github.com/fsnotify/fsnotify" ) func main() { - watcher, err := fsnotify.NewWatcher() - if err != nil { - log.Fatal(err) - } - defer watcher.Close() - - done := make(chan bool) - go func() { - for { - select { - case event, ok := <-watcher.Events: - if !ok { - return - } - log.Println("event:", event) - if event.Op&fsnotify.Write == fsnotify.Write { - log.Println("modified file:", event.Name) - } - case err, ok := <-watcher.Errors: - if !ok { - return - } - log.Println("error:", err) - } - } - }() - - err = watcher.Add("/tmp/foo") - if err != nil { - log.Fatal(err) - } - <-done + // Create new watcher. + watcher, err := fsnotify.NewWatcher() + if err != nil { + log.Fatal(err) + } + defer watcher.Close() + + // Start listening for events. + go func() { + for { + select { + case event, ok := <-watcher.Events: + if !ok { + return + } + log.Println("event:", event) + if event.Has(fsnotify.Write) { + log.Println("modified file:", event.Name) + } + case err, ok := <-watcher.Errors: + if !ok { + return + } + log.Println("error:", err) + } + } + }() + + // Add a path. + err = watcher.Add("/tmp") + if err != nil { + log.Fatal(err) + } + + // Block main goroutine forever. + <-make(chan struct{}) } ``` -## Contributing +Some more examples can be found in [cmd/fsnotify](cmd/fsnotify), which can be +run with: + + % go run ./cmd/fsnotify -Please refer to [CONTRIBUTING][] before opening an issue or pull request. +Further detailed documentation can be found in godoc: +https://pkg.go.dev/github.com/fsnotify/fsnotify -## FAQ +FAQ +--- +### Will a file still be watched when it's moved to another directory? +No, not unless you are watching the location it was moved to. -**When a file is moved to another directory is it still being watched?** +### Are subdirectories watched? +No, you must add watches for any directory you want to watch (a recursive +watcher is on the roadmap: [#18]). -No (it shouldn't be, unless you are watching where it was moved to). +[#18]: https://github.com/fsnotify/fsnotify/issues/18 -**When I watch a directory, are all subdirectories watched as well?** +### Do I have to watch the Error and Event channels in a goroutine? +Yes. You can read both channels in the same goroutine using `select` (you don't +need a separate goroutine for both channels; see the example). -No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]). +### Why don't notifications work with NFS, SMB, FUSE, /proc, or /sys? +fsnotify requires support from underlying OS to work. The current NFS and SMB +protocols does not provide network level support for file notifications, and +neither do the /proc and /sys virtual filesystems. -**Do I have to watch the Error and Event channels in a separate goroutine?** +This could be fixed with a polling watcher ([#9]), but it's not yet implemented. -As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7]) +[#9]: https://github.com/fsnotify/fsnotify/issues/9 -**Why am I receiving multiple events for the same file on OS X?** +### Why do I get many Chmod events? +Some programs may generate a lot of attribute changes; for example Spotlight on +macOS, anti-virus programs, backup applications, and some others are known to do +this. As a rule, it's typically best to ignore Chmod events. They're often not +useful, and tend to cause problems. -Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]). +Spotlight indexing on macOS can result in multiple events (see [#15]). A +temporary workaround is to add your folder(s) to the *Spotlight Privacy +settings* until we have a native FSEvents implementation (see [#11]). -**How many files can be watched at once?** +[#11]: https://github.com/fsnotify/fsnotify/issues/11 +[#15]: https://github.com/fsnotify/fsnotify/issues/15 -There are OS-specific limits as to how many watches can be created: -* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error. -* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error. +### Watching a file doesn't work well +Watching individual files (rather than directories) is generally not recommended +as many programs (especially editors) update files atomically: it will write to +a temporary file which is then moved to to destination, overwriting the original +(or some variant thereof). The watcher on the original file is now lost, as that +no longer exists. -**Why don't notifications work with NFS filesystems or filesystem in userspace (FUSE)?** +The upshot of this is that a power failure or crash won't leave a half-written +file. -fsnotify requires support from underlying OS to work. The current NFS protocol does not provide network level support for file notifications. +Watch the parent directory and use `Event.Name` to filter out files you're not +interested in. There is an example of this in `cmd/fsnotify/file.go`. -[#62]: https://github.com/howeyc/fsnotify/issues/62 -[#18]: https://github.com/fsnotify/fsnotify/issues/18 -[#11]: https://github.com/fsnotify/fsnotify/issues/11 -[#7]: https://github.com/howeyc/fsnotify/issues/7 +Platform-specific notes +----------------------- +### Linux +When a file is removed a REMOVE event won't be emitted until all file +descriptors are closed; it will emit a CHMOD instead: + + fp := os.Open("file") + os.Remove("file") // CHMOD + fp.Close() // REMOVE + +This is the event that inotify sends, so not much can be changed about this. + +The `fs.inotify.max_user_watches` sysctl variable specifies the upper limit for +the number of watches per user, and `fs.inotify.max_user_instances` specifies +the maximum number of inotify instances per user. Every Watcher you create is an +"instance", and every path you add is a "watch". + +These are also exposed in `/proc` as `/proc/sys/fs/inotify/max_user_watches` and +`/proc/sys/fs/inotify/max_user_instances` + +To increase them you can use `sysctl` or write the value to proc file: + + # The default values on Linux 5.18 + sysctl fs.inotify.max_user_watches=124983 + sysctl fs.inotify.max_user_instances=128 + +To make the changes persist on reboot edit `/etc/sysctl.conf` or +`/usr/lib/sysctl.d/50-default.conf` (details differ per Linux distro; check your +distro's documentation): -[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md + fs.inotify.max_user_watches=124983 + fs.inotify.max_user_instances=128 -## Related Projects +Reaching the limit will result in a "no space left on device" or "too many open +files" error. -* [notify](https://github.com/rjeczalik/notify) -* [fsevents](https://github.com/fsnotify/fsevents) +### kqueue (macOS, all BSD systems) +kqueue requires opening a file descriptor for every file that's being watched; +so if you're watching a directory with five files then that's six file +descriptors. You will run in to your system's "max open files" limit faster on +these platforms. +The sysctl variables `kern.maxfiles` and `kern.maxfilesperproc` can be used to +control the maximum number of open files. diff --git a/tools/vendor/github.com/fsnotify/fsnotify/backend_fen.go b/tools/vendor/github.com/fsnotify/fsnotify/backend_fen.go new file mode 100644 index 000000000..c349c326c --- /dev/null +++ b/tools/vendor/github.com/fsnotify/fsnotify/backend_fen.go @@ -0,0 +1,484 @@ +//go:build solaris + +// FEN backend for illumos (supported) and Solaris (untested, but should work). +// +// See port_create(3c) etc. for docs. https://www.illumos.org/man/3C/port_create + +package fsnotify + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "sync" + "time" + + "github.com/fsnotify/fsnotify/internal" + "golang.org/x/sys/unix" +) + +type fen struct { + Events chan Event + Errors chan error + + mu sync.Mutex + port *unix.EventPort + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + dirs map[string]Op // Explicitly watched directories + watches map[string]Op // Explicitly watched non-directories +} + +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) +} + +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { + w := &fen{ + Events: ev, + Errors: errs, + dirs: make(map[string]Op), + watches: make(map[string]Op), + done: make(chan struct{}), + } + + var err error + w.port, err = unix.NewEventPort() + if err != nil { + return nil, fmt.Errorf("fsnotify.NewWatcher: %w", err) + } + + go w.readEvents() + return w, nil +} + +// sendEvent attempts to send an event to the user, returning true if the event +// was put in the channel successfully and false if the watcher has been closed. +func (w *fen) sendEvent(name string, op Op) (sent bool) { + select { + case <-w.done: + return false + case w.Events <- Event{Name: name, Op: op}: + return true + } +} + +// sendError attempts to send an error to the user, returning true if the error +// was put in the channel successfully and false if the watcher has been closed. +func (w *fen) sendError(err error) (sent bool) { + if err == nil { + return true + } + select { + case <-w.done: + return false + case w.Errors <- err: + return true + } +} + +func (w *fen) isClosed() bool { + select { + case <-w.done: + return true + default: + return false + } +} + +func (w *fen) Close() error { + // Take the lock used by associateFile to prevent lingering events from + // being processed after the close + w.mu.Lock() + defer w.mu.Unlock() + if w.isClosed() { + return nil + } + close(w.done) + return w.port.Close() +} + +func (w *fen) Add(name string) error { return w.AddWith(name) } + +func (w *fen) AddWith(name string, opts ...addOpt) error { + if w.isClosed() { + return ErrClosed + } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } + + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } + + // Currently we resolve symlinks that were explicitly requested to be + // watched. Otherwise we would use LStat here. + stat, err := os.Stat(name) + if err != nil { + return err + } + + // Associate all files in the directory. + if stat.IsDir() { + err := w.handleDirectory(name, stat, true, w.associateFile) + if err != nil { + return err + } + + w.mu.Lock() + w.dirs[name] = with.op + w.mu.Unlock() + return nil + } + + err = w.associateFile(name, stat, true) + if err != nil { + return err + } + + w.mu.Lock() + w.watches[name] = with.op + w.mu.Unlock() + return nil +} + +func (w *fen) Remove(name string) error { + if w.isClosed() { + return nil + } + if !w.port.PathIsWatched(name) { + return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) + } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } + + // The user has expressed an intent. Immediately remove this name from + // whichever watch list it might be in. If it's not in there the delete + // doesn't cause harm. + w.mu.Lock() + delete(w.watches, name) + delete(w.dirs, name) + w.mu.Unlock() + + stat, err := os.Stat(name) + if err != nil { + return err + } + + // Remove associations for every file in the directory. + if stat.IsDir() { + err := w.handleDirectory(name, stat, false, w.dissociateFile) + if err != nil { + return err + } + return nil + } + + err = w.port.DissociatePath(name) + if err != nil { + return err + } + + return nil +} + +// readEvents contains the main loop that runs in a goroutine watching for events. +func (w *fen) readEvents() { + // If this function returns, the watcher has been closed and we can close + // these channels + defer func() { + close(w.Errors) + close(w.Events) + }() + + pevents := make([]unix.PortEvent, 8) + for { + count, err := w.port.Get(pevents, 1, nil) + if err != nil && err != unix.ETIME { + // Interrupted system call (count should be 0) ignore and continue + if errors.Is(err, unix.EINTR) && count == 0 { + continue + } + // Get failed because we called w.Close() + if errors.Is(err, unix.EBADF) && w.isClosed() { + return + } + // There was an error not caused by calling w.Close() + if !w.sendError(err) { + return + } + } + + p := pevents[:count] + for _, pevent := range p { + if pevent.Source != unix.PORT_SOURCE_FILE { + // Event from unexpected source received; should never happen. + if !w.sendError(errors.New("Event from unexpected source received")) { + return + } + continue + } + + if debug { + internal.Debug(pevent.Path, pevent.Events) + } + + err = w.handleEvent(&pevent) + if !w.sendError(err) { + return + } + } + } +} + +func (w *fen) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error { + files, err := os.ReadDir(path) + if err != nil { + return err + } + + // Handle all children of the directory. + for _, entry := range files { + finfo, err := entry.Info() + if err != nil { + return err + } + err = handler(filepath.Join(path, finfo.Name()), finfo, false) + if err != nil { + return err + } + } + + // And finally handle the directory itself. + return handler(path, stat, follow) +} + +// handleEvent might need to emit more than one fsnotify event if the events +// bitmap matches more than one event type (e.g. the file was both modified and +// had the attributes changed between when the association was created and the +// when event was returned) +func (w *fen) handleEvent(event *unix.PortEvent) error { + var ( + events = event.Events + path = event.Path + fmode = event.Cookie.(os.FileMode) + reRegister = true + ) + + w.mu.Lock() + _, watchedDir := w.dirs[path] + _, watchedPath := w.watches[path] + w.mu.Unlock() + isWatched := watchedDir || watchedPath + + if events&unix.FILE_DELETE != 0 { + if !w.sendEvent(path, Remove) { + return nil + } + reRegister = false + } + if events&unix.FILE_RENAME_FROM != 0 { + if !w.sendEvent(path, Rename) { + return nil + } + // Don't keep watching the new file name + reRegister = false + } + if events&unix.FILE_RENAME_TO != 0 { + // We don't report a Rename event for this case, because Rename events + // are interpreted as referring to the _old_ name of the file, and in + // this case the event would refer to the new name of the file. This + // type of rename event is not supported by fsnotify. + + // inotify reports a Remove event in this case, so we simulate this + // here. + if !w.sendEvent(path, Remove) { + return nil + } + // Don't keep watching the file that was removed + reRegister = false + } + + // The file is gone, nothing left to do. + if !reRegister { + if watchedDir { + w.mu.Lock() + delete(w.dirs, path) + w.mu.Unlock() + } + if watchedPath { + w.mu.Lock() + delete(w.watches, path) + w.mu.Unlock() + } + return nil + } + + // If we didn't get a deletion the file still exists and we're going to have + // to watch it again. Let's Stat it now so that we can compare permissions + // and have what we need to continue watching the file + + stat, err := os.Lstat(path) + if err != nil { + // This is unexpected, but we should still emit an event. This happens + // most often on "rm -r" of a subdirectory inside a watched directory We + // get a modify event of something happening inside, but by the time we + // get here, the sudirectory is already gone. Clearly we were watching + // this path but now it is gone. Let's tell the user that it was + // removed. + if !w.sendEvent(path, Remove) { + return nil + } + // Suppress extra write events on removed directories; they are not + // informative and can be confusing. + return nil + } + + // resolve symlinks that were explicitly watched as we would have at Add() + // time. this helps suppress spurious Chmod events on watched symlinks + if isWatched { + stat, err = os.Stat(path) + if err != nil { + // The symlink still exists, but the target is gone. Report the + // Remove similar to above. + if !w.sendEvent(path, Remove) { + return nil + } + // Don't return the error + } + } + + if events&unix.FILE_MODIFIED != 0 { + if fmode.IsDir() && watchedDir { + if err := w.updateDirectory(path); err != nil { + return err + } + } else { + if !w.sendEvent(path, Write) { + return nil + } + } + } + if events&unix.FILE_ATTRIB != 0 && stat != nil { + // Only send Chmod if perms changed + if stat.Mode().Perm() != fmode.Perm() { + if !w.sendEvent(path, Chmod) { + return nil + } + } + } + + if stat != nil { + // If we get here, it means we've hit an event above that requires us to + // continue watching the file or directory + return w.associateFile(path, stat, isWatched) + } + return nil +} + +func (w *fen) updateDirectory(path string) error { + // The directory was modified, so we must find unwatched entities and watch + // them. If something was removed from the directory, nothing will happen, + // as everything else should still be watched. + files, err := os.ReadDir(path) + if err != nil { + return err + } + + for _, entry := range files { + path := filepath.Join(path, entry.Name()) + if w.port.PathIsWatched(path) { + continue + } + + finfo, err := entry.Info() + if err != nil { + return err + } + err = w.associateFile(path, finfo, false) + if !w.sendError(err) { + return nil + } + if !w.sendEvent(path, Create) { + return nil + } + } + return nil +} + +func (w *fen) associateFile(path string, stat os.FileInfo, follow bool) error { + if w.isClosed() { + return ErrClosed + } + // This is primarily protecting the call to AssociatePath but it is + // important and intentional that the call to PathIsWatched is also + // protected by this mutex. Without this mutex, AssociatePath has been seen + // to error out that the path is already associated. + w.mu.Lock() + defer w.mu.Unlock() + + if w.port.PathIsWatched(path) { + // Remove the old association in favor of this one If we get ENOENT, + // then while the x/sys/unix wrapper still thought that this path was + // associated, the underlying event port did not. This call will have + // cleared up that discrepancy. The most likely cause is that the event + // has fired but we haven't processed it yet. + err := w.port.DissociatePath(path) + if err != nil && !errors.Is(err, unix.ENOENT) { + return err + } + } + + var events int + if !follow { + // Watch symlinks themselves rather than their targets unless this entry + // is explicitly watched. + events |= unix.FILE_NOFOLLOW + } + if true { // TODO: implement withOps() + events |= unix.FILE_MODIFIED + } + if true { + events |= unix.FILE_ATTRIB + } + return w.port.AssociatePath(path, stat, events, stat.Mode()) +} + +func (w *fen) dissociateFile(path string, stat os.FileInfo, unused bool) error { + if !w.port.PathIsWatched(path) { + return nil + } + return w.port.DissociatePath(path) +} + +func (w *fen) WatchList() []string { + if w.isClosed() { + return nil + } + + w.mu.Lock() + defer w.mu.Unlock() + + entries := make([]string, 0, len(w.watches)+len(w.dirs)) + for pathname := range w.dirs { + entries = append(entries, pathname) + } + for pathname := range w.watches { + entries = append(entries, pathname) + } + + return entries +} + +func (w *fen) xSupports(op Op) bool { + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/tools/vendor/github.com/fsnotify/fsnotify/backend_inotify.go b/tools/vendor/github.com/fsnotify/fsnotify/backend_inotify.go new file mode 100644 index 000000000..36c311694 --- /dev/null +++ b/tools/vendor/github.com/fsnotify/fsnotify/backend_inotify.go @@ -0,0 +1,658 @@ +//go:build linux && !appengine + +package fsnotify + +import ( + "errors" + "fmt" + "io" + "io/fs" + "os" + "path/filepath" + "strings" + "sync" + "time" + "unsafe" + + "github.com/fsnotify/fsnotify/internal" + "golang.org/x/sys/unix" +) + +type inotify struct { + Events chan Event + Errors chan error + + // Store fd here as os.File.Read() will no longer return on close after + // calling Fd(). See: https://github.com/golang/go/issues/26439 + fd int + inotifyFile *os.File + watches *watches + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + doneMu sync.Mutex + doneResp chan struct{} // Channel to respond to Close + + // Store rename cookies in an array, with the index wrapping to 0. Almost + // all of the time what we get is a MOVED_FROM to set the cookie and the + // next event inotify sends will be MOVED_TO to read it. However, this is + // not guaranteed – as described in inotify(7) – and we may get other events + // between the two MOVED_* events (including other MOVED_* ones). + // + // A second issue is that moving a file outside the watched directory will + // trigger a MOVED_FROM to set the cookie, but we never see the MOVED_TO to + // read and delete it. So just storing it in a map would slowly leak memory. + // + // Doing it like this gives us a simple fast LRU-cache that won't allocate. + // Ten items should be more than enough for our purpose, and a loop over + // such a short array is faster than a map access anyway (not that it hugely + // matters since we're talking about hundreds of ns at the most, but still). + cookies [10]koekje + cookieIndex uint8 + cookiesMu sync.Mutex +} + +type ( + watches struct { + mu sync.RWMutex + wd map[uint32]*watch // wd → watch + path map[string]uint32 // pathname → wd + } + watch struct { + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) + path string // Watch path. + recurse bool // Recursion with ./...? + } + koekje struct { + cookie uint32 + path string + } +) + +func newWatches() *watches { + return &watches{ + wd: make(map[uint32]*watch), + path: make(map[string]uint32), + } +} + +func (w *watches) len() int { + w.mu.RLock() + defer w.mu.RUnlock() + return len(w.wd) +} + +func (w *watches) add(ww *watch) { + w.mu.Lock() + defer w.mu.Unlock() + w.wd[ww.wd] = ww + w.path[ww.path] = ww.wd +} + +func (w *watches) remove(wd uint32) { + w.mu.Lock() + defer w.mu.Unlock() + watch := w.wd[wd] // Could have had Remove() called. See #616. + if watch == nil { + return + } + delete(w.path, watch.path) + delete(w.wd, wd) +} + +func (w *watches) removePath(path string) ([]uint32, error) { + w.mu.Lock() + defer w.mu.Unlock() + + path, recurse := recursivePath(path) + wd, ok := w.path[path] + if !ok { + return nil, fmt.Errorf("%w: %s", ErrNonExistentWatch, path) + } + + watch := w.wd[wd] + if recurse && !watch.recurse { + return nil, fmt.Errorf("can't use /... with non-recursive watch %q", path) + } + + delete(w.path, path) + delete(w.wd, wd) + if !watch.recurse { + return []uint32{wd}, nil + } + + wds := make([]uint32, 0, 8) + wds = append(wds, wd) + for p, rwd := range w.path { + if filepath.HasPrefix(p, path) { + delete(w.path, p) + delete(w.wd, rwd) + wds = append(wds, rwd) + } + } + return wds, nil +} + +func (w *watches) byPath(path string) *watch { + w.mu.RLock() + defer w.mu.RUnlock() + return w.wd[w.path[path]] +} + +func (w *watches) byWd(wd uint32) *watch { + w.mu.RLock() + defer w.mu.RUnlock() + return w.wd[wd] +} + +func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error { + w.mu.Lock() + defer w.mu.Unlock() + + var existing *watch + wd, ok := w.path[path] + if ok { + existing = w.wd[wd] + } + + upd, err := f(existing) + if err != nil { + return err + } + if upd != nil { + w.wd[upd.wd] = upd + w.path[upd.path] = upd.wd + + if upd.wd != wd { + delete(w.wd, wd) + } + } + + return nil +} + +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) +} + +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { + // Need to set nonblocking mode for SetDeadline to work, otherwise blocking + // I/O operations won't terminate on close. + fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK) + if fd == -1 { + return nil, errno + } + + w := &inotify{ + Events: ev, + Errors: errs, + fd: fd, + inotifyFile: os.NewFile(uintptr(fd), ""), + watches: newWatches(), + done: make(chan struct{}), + doneResp: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +// Returns true if the event was sent, or false if watcher is closed. +func (w *inotify) sendEvent(e Event) bool { + select { + case <-w.done: + return false + case w.Events <- e: + return true + } +} + +// Returns true if the error was sent, or false if watcher is closed. +func (w *inotify) sendError(err error) bool { + if err == nil { + return true + } + select { + case <-w.done: + return false + case w.Errors <- err: + return true + } +} + +func (w *inotify) isClosed() bool { + select { + case <-w.done: + return true + default: + return false + } +} + +func (w *inotify) Close() error { + w.doneMu.Lock() + if w.isClosed() { + w.doneMu.Unlock() + return nil + } + close(w.done) + w.doneMu.Unlock() + + // Causes any blocking reads to return with an error, provided the file + // still supports deadline operations. + err := w.inotifyFile.Close() + if err != nil { + return err + } + + // Wait for goroutine to close + <-w.doneResp + + return nil +} + +func (w *inotify) Add(name string) error { return w.AddWith(name) } + +func (w *inotify) AddWith(path string, opts ...addOpt) error { + if w.isClosed() { + return ErrClosed + } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), path) + } + + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } + + path, recurse := recursivePath(path) + if recurse { + return filepath.WalkDir(path, func(root string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if !d.IsDir() { + if root == path { + return fmt.Errorf("fsnotify: not a directory: %q", path) + } + return nil + } + + // Send a Create event when adding new directory from a recursive + // watch; this is for "mkdir -p one/two/three". Usually all those + // directories will be created before we can set up watchers on the + // subdirectories, so only "one" would be sent as a Create event and + // not "one/two" and "one/two/three" (inotifywait -r has the same + // problem). + if with.sendCreate && root != path { + w.sendEvent(Event{Name: root, Op: Create}) + } + + return w.add(root, with, true) + }) + } + + return w.add(path, with, false) +} + +func (w *inotify) add(path string, with withOpts, recurse bool) error { + var flags uint32 + if with.noFollow { + flags |= unix.IN_DONT_FOLLOW + } + if with.op.Has(Create) { + flags |= unix.IN_CREATE + } + if with.op.Has(Write) { + flags |= unix.IN_MODIFY + } + if with.op.Has(Remove) { + flags |= unix.IN_DELETE | unix.IN_DELETE_SELF + } + if with.op.Has(Rename) { + flags |= unix.IN_MOVED_TO | unix.IN_MOVED_FROM | unix.IN_MOVE_SELF + } + if with.op.Has(Chmod) { + flags |= unix.IN_ATTRIB + } + if with.op.Has(xUnportableOpen) { + flags |= unix.IN_OPEN + } + if with.op.Has(xUnportableRead) { + flags |= unix.IN_ACCESS + } + if with.op.Has(xUnportableCloseWrite) { + flags |= unix.IN_CLOSE_WRITE + } + if with.op.Has(xUnportableCloseRead) { + flags |= unix.IN_CLOSE_NOWRITE + } + return w.register(path, flags, recurse) +} + +func (w *inotify) register(path string, flags uint32, recurse bool) error { + return w.watches.updatePath(path, func(existing *watch) (*watch, error) { + if existing != nil { + flags |= existing.flags | unix.IN_MASK_ADD + } + + wd, err := unix.InotifyAddWatch(w.fd, path, flags) + if wd == -1 { + return nil, err + } + + if existing == nil { + return &watch{ + wd: uint32(wd), + path: path, + flags: flags, + recurse: recurse, + }, nil + } + + existing.wd = uint32(wd) + existing.flags = flags + return existing, nil + }) +} + +func (w *inotify) Remove(name string) error { + if w.isClosed() { + return nil + } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } + return w.remove(filepath.Clean(name)) +} + +func (w *inotify) remove(name string) error { + wds, err := w.watches.removePath(name) + if err != nil { + return err + } + + for _, wd := range wds { + _, err := unix.InotifyRmWatch(w.fd, wd) + if err != nil { + // TODO: Perhaps it's not helpful to return an error here in every + // case; the only two possible errors are: + // + // EBADF, which happens when w.fd is not a valid file descriptor of + // any kind. + // + // EINVAL, which is when fd is not an inotify descriptor or wd is + // not a valid watch descriptor. Watch descriptors are invalidated + // when they are removed explicitly or implicitly; explicitly by + // inotify_rm_watch, implicitly when the file they are watching is + // deleted. + return err + } + } + return nil +} + +func (w *inotify) WatchList() []string { + if w.isClosed() { + return nil + } + + entries := make([]string, 0, w.watches.len()) + w.watches.mu.RLock() + for pathname := range w.watches.path { + entries = append(entries, pathname) + } + w.watches.mu.RUnlock() + + return entries +} + +// readEvents reads from the inotify file descriptor, converts the +// received events into Event objects and sends them via the Events channel +func (w *inotify) readEvents() { + defer func() { + close(w.doneResp) + close(w.Errors) + close(w.Events) + }() + + var ( + buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events + errno error // Syscall errno + ) + for { + // See if we have been closed. + if w.isClosed() { + return + } + + n, err := w.inotifyFile.Read(buf[:]) + switch { + case errors.Unwrap(err) == os.ErrClosed: + return + case err != nil: + if !w.sendError(err) { + return + } + continue + } + + if n < unix.SizeofInotifyEvent { + var err error + if n == 0 { + err = io.EOF // If EOF is received. This should really never happen. + } else if n < 0 { + err = errno // If an error occurred while reading. + } else { + err = errors.New("notify: short read in readEvents()") // Read was too short. + } + if !w.sendError(err) { + return + } + continue + } + + // We don't know how many events we just read into the buffer + // While the offset points to at least one whole event... + var offset uint32 + for offset <= uint32(n-unix.SizeofInotifyEvent) { + var ( + // Point "raw" to the event in the buffer + raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) + mask = uint32(raw.Mask) + nameLen = uint32(raw.Len) + // Move to the next event in the buffer + next = func() { offset += unix.SizeofInotifyEvent + nameLen } + ) + + if mask&unix.IN_Q_OVERFLOW != 0 { + if !w.sendError(ErrEventOverflow) { + return + } + } + + /// If the event happened to the watched directory or the watched + /// file, the kernel doesn't append the filename to the event, but + /// we would like to always fill the the "Name" field with a valid + /// filename. We retrieve the path of the watch from the "paths" + /// map. + watch := w.watches.byWd(uint32(raw.Wd)) + /// Can be nil if Remove() was called in another goroutine for this + /// path inbetween reading the events from the kernel and reading + /// the internal state. Not much we can do about it, so just skip. + /// See #616. + if watch == nil { + next() + continue + } + + name := watch.path + if nameLen > 0 { + /// Point "bytes" at the first byte of the filename + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] + /// The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + + if debug { + internal.Debug(name, raw.Mask, raw.Cookie) + } + + if mask&unix.IN_IGNORED != 0 { //&& event.Op != 0 + next() + continue + } + + // inotify will automatically remove the watch on deletes; just need + // to clean our state here. + if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + w.watches.remove(watch.wd) + } + + // We can't really update the state when a watched path is moved; + // only IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove + // the watch. + if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { + if watch.recurse { + next() // Do nothing + continue + } + + err := w.remove(watch.path) + if err != nil && !errors.Is(err, ErrNonExistentWatch) { + if !w.sendError(err) { + return + } + } + } + + /// Skip if we're watching both this path and the parent; the parent + /// will already send a delete so no need to do it twice. + if mask&unix.IN_DELETE_SELF != 0 { + if _, ok := w.watches.path[filepath.Dir(watch.path)]; ok { + next() + continue + } + } + + ev := w.newEvent(name, mask, raw.Cookie) + // Need to update watch path for recurse. + if watch.recurse { + isDir := mask&unix.IN_ISDIR == unix.IN_ISDIR + /// New directory created: set up watch on it. + if isDir && ev.Has(Create) { + err := w.register(ev.Name, watch.flags, true) + if !w.sendError(err) { + return + } + + // This was a directory rename, so we need to update all + // the children. + // + // TODO: this is of course pretty slow; we should use a + // better data structure for storing all of this, e.g. store + // children in the watch. I have some code for this in my + // kqueue refactor we can use in the future. For now I'm + // okay with this as it's not publicly available. + // Correctness first, performance second. + if ev.renamedFrom != "" { + w.watches.mu.Lock() + for k, ww := range w.watches.wd { + if k == watch.wd || ww.path == ev.Name { + continue + } + if strings.HasPrefix(ww.path, ev.renamedFrom) { + ww.path = strings.Replace(ww.path, ev.renamedFrom, ev.Name, 1) + w.watches.wd[k] = ww + } + } + w.watches.mu.Unlock() + } + } + } + + /// Send the events that are not ignored on the events channel + if !w.sendEvent(ev) { + return + } + next() + } + } +} + +func (w *inotify) isRecursive(path string) bool { + ww := w.watches.byPath(path) + if ww == nil { // path could be a file, so also check the Dir. + ww = w.watches.byPath(filepath.Dir(path)) + } + return ww != nil && ww.recurse +} + +func (w *inotify) newEvent(name string, mask, cookie uint32) Event { + e := Event{Name: name} + if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + e.Op |= Create + } + if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { + e.Op |= Remove + } + if mask&unix.IN_MODIFY == unix.IN_MODIFY { + e.Op |= Write + } + if mask&unix.IN_OPEN == unix.IN_OPEN { + e.Op |= xUnportableOpen + } + if mask&unix.IN_ACCESS == unix.IN_ACCESS { + e.Op |= xUnportableRead + } + if mask&unix.IN_CLOSE_WRITE == unix.IN_CLOSE_WRITE { + e.Op |= xUnportableCloseWrite + } + if mask&unix.IN_CLOSE_NOWRITE == unix.IN_CLOSE_NOWRITE { + e.Op |= xUnportableCloseRead + } + if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + e.Op |= Rename + } + if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { + e.Op |= Chmod + } + + if cookie != 0 { + if mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + w.cookiesMu.Lock() + w.cookies[w.cookieIndex] = koekje{cookie: cookie, path: e.Name} + w.cookieIndex++ + if w.cookieIndex > 9 { + w.cookieIndex = 0 + } + w.cookiesMu.Unlock() + } else if mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + w.cookiesMu.Lock() + var prev string + for _, c := range w.cookies { + if c.cookie == cookie { + prev = c.path + break + } + } + w.cookiesMu.Unlock() + e.renamedFrom = prev + } + } + return e +} + +func (w *inotify) xSupports(op Op) bool { + return true // Supports everything. +} + +func (w *inotify) state() { + w.watches.mu.Lock() + defer w.watches.mu.Unlock() + for wd, ww := range w.watches.wd { + fmt.Fprintf(os.Stderr, "%4d: recurse=%t %q\n", wd, ww.recurse, ww.path) + } +} diff --git a/tools/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go b/tools/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go new file mode 100644 index 000000000..d8de5ab76 --- /dev/null +++ b/tools/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go @@ -0,0 +1,733 @@ +//go:build freebsd || openbsd || netbsd || dragonfly || darwin + +package fsnotify + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "runtime" + "sync" + "time" + + "github.com/fsnotify/fsnotify/internal" + "golang.org/x/sys/unix" +) + +type kqueue struct { + Events chan Event + Errors chan error + + kq int // File descriptor (as returned by the kqueue() syscall). + closepipe [2]int // Pipe used for closing kq. + watches *watches + done chan struct{} + doneMu sync.Mutex +} + +type ( + watches struct { + mu sync.RWMutex + wd map[int]watch // wd → watch + path map[string]int // pathname → wd + byDir map[string]map[int]struct{} // dirname(path) → wd + seen map[string]struct{} // Keep track of if we know this file exists. + byUser map[string]struct{} // Watches added with Watcher.Add() + } + watch struct { + wd int + name string + linkName string // In case of links; name is the target, and this is the link. + isDir bool + dirFlags uint32 + } +) + +func newWatches() *watches { + return &watches{ + wd: make(map[int]watch), + path: make(map[string]int), + byDir: make(map[string]map[int]struct{}), + seen: make(map[string]struct{}), + byUser: make(map[string]struct{}), + } +} + +func (w *watches) listPaths(userOnly bool) []string { + w.mu.RLock() + defer w.mu.RUnlock() + + if userOnly { + l := make([]string, 0, len(w.byUser)) + for p := range w.byUser { + l = append(l, p) + } + return l + } + + l := make([]string, 0, len(w.path)) + for p := range w.path { + l = append(l, p) + } + return l +} + +func (w *watches) watchesInDir(path string) []string { + w.mu.RLock() + defer w.mu.RUnlock() + + l := make([]string, 0, 4) + for fd := range w.byDir[path] { + info := w.wd[fd] + if _, ok := w.byUser[info.name]; !ok { + l = append(l, info.name) + } + } + return l +} + +// Mark path as added by the user. +func (w *watches) addUserWatch(path string) { + w.mu.Lock() + defer w.mu.Unlock() + w.byUser[path] = struct{}{} +} + +func (w *watches) addLink(path string, fd int) { + w.mu.Lock() + defer w.mu.Unlock() + + w.path[path] = fd + w.seen[path] = struct{}{} +} + +func (w *watches) add(path, linkPath string, fd int, isDir bool) { + w.mu.Lock() + defer w.mu.Unlock() + + w.path[path] = fd + w.wd[fd] = watch{wd: fd, name: path, linkName: linkPath, isDir: isDir} + + parent := filepath.Dir(path) + byDir, ok := w.byDir[parent] + if !ok { + byDir = make(map[int]struct{}, 1) + w.byDir[parent] = byDir + } + byDir[fd] = struct{}{} +} + +func (w *watches) byWd(fd int) (watch, bool) { + w.mu.RLock() + defer w.mu.RUnlock() + info, ok := w.wd[fd] + return info, ok +} + +func (w *watches) byPath(path string) (watch, bool) { + w.mu.RLock() + defer w.mu.RUnlock() + info, ok := w.wd[w.path[path]] + return info, ok +} + +func (w *watches) updateDirFlags(path string, flags uint32) { + w.mu.Lock() + defer w.mu.Unlock() + + fd := w.path[path] + info := w.wd[fd] + info.dirFlags = flags + w.wd[fd] = info +} + +func (w *watches) remove(fd int, path string) bool { + w.mu.Lock() + defer w.mu.Unlock() + + isDir := w.wd[fd].isDir + delete(w.path, path) + delete(w.byUser, path) + + parent := filepath.Dir(path) + delete(w.byDir[parent], fd) + + if len(w.byDir[parent]) == 0 { + delete(w.byDir, parent) + } + + delete(w.wd, fd) + delete(w.seen, path) + return isDir +} + +func (w *watches) markSeen(path string, exists bool) { + w.mu.Lock() + defer w.mu.Unlock() + if exists { + w.seen[path] = struct{}{} + } else { + delete(w.seen, path) + } +} + +func (w *watches) seenBefore(path string) bool { + w.mu.RLock() + defer w.mu.RUnlock() + _, ok := w.seen[path] + return ok +} + +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) +} + +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { + kq, closepipe, err := newKqueue() + if err != nil { + return nil, err + } + + w := &kqueue{ + Events: ev, + Errors: errs, + kq: kq, + closepipe: closepipe, + done: make(chan struct{}), + watches: newWatches(), + } + + go w.readEvents() + return w, nil +} + +// newKqueue creates a new kernel event queue and returns a descriptor. +// +// This registers a new event on closepipe, which will trigger an event when +// it's closed. This way we can use kevent() without timeout/polling; without +// the closepipe, it would block forever and we wouldn't be able to stop it at +// all. +func newKqueue() (kq int, closepipe [2]int, err error) { + kq, err = unix.Kqueue() + if kq == -1 { + return kq, closepipe, err + } + + // Register the close pipe. + err = unix.Pipe(closepipe[:]) + if err != nil { + unix.Close(kq) + return kq, closepipe, err + } + unix.CloseOnExec(closepipe[0]) + unix.CloseOnExec(closepipe[1]) + + // Register changes to listen on the closepipe. + changes := make([]unix.Kevent_t, 1) + // SetKevent converts int to the platform-specific types. + unix.SetKevent(&changes[0], closepipe[0], unix.EVFILT_READ, + unix.EV_ADD|unix.EV_ENABLE|unix.EV_ONESHOT) + + ok, err := unix.Kevent(kq, changes, nil, nil) + if ok == -1 { + unix.Close(kq) + unix.Close(closepipe[0]) + unix.Close(closepipe[1]) + return kq, closepipe, err + } + return kq, closepipe, nil +} + +// Returns true if the event was sent, or false if watcher is closed. +func (w *kqueue) sendEvent(e Event) bool { + select { + case <-w.done: + return false + case w.Events <- e: + return true + } +} + +// Returns true if the error was sent, or false if watcher is closed. +func (w *kqueue) sendError(err error) bool { + if err == nil { + return true + } + select { + case <-w.done: + return false + case w.Errors <- err: + return true + } +} + +func (w *kqueue) isClosed() bool { + select { + case <-w.done: + return true + default: + return false + } +} + +func (w *kqueue) Close() error { + w.doneMu.Lock() + if w.isClosed() { + w.doneMu.Unlock() + return nil + } + close(w.done) + w.doneMu.Unlock() + + pathsToRemove := w.watches.listPaths(false) + for _, name := range pathsToRemove { + w.Remove(name) + } + + // Send "quit" message to the reader goroutine. + unix.Close(w.closepipe[1]) + return nil +} + +func (w *kqueue) Add(name string) error { return w.AddWith(name) } + +func (w *kqueue) AddWith(name string, opts ...addOpt) error { + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } + + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } + + _, err := w.addWatch(name, noteAllEvents) + if err != nil { + return err + } + w.watches.addUserWatch(name) + return nil +} + +func (w *kqueue) Remove(name string) error { + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } + return w.remove(name, true) +} + +func (w *kqueue) remove(name string, unwatchFiles bool) error { + if w.isClosed() { + return nil + } + + name = filepath.Clean(name) + info, ok := w.watches.byPath(name) + if !ok { + return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) + } + + err := w.register([]int{info.wd}, unix.EV_DELETE, 0) + if err != nil { + return err + } + + unix.Close(info.wd) + + isDir := w.watches.remove(info.wd, name) + + // Find all watched paths that are in this directory that are not external. + if unwatchFiles && isDir { + pathsToRemove := w.watches.watchesInDir(name) + for _, name := range pathsToRemove { + // Since these are internal, not much sense in propagating error to + // the user, as that will just confuse them with an error about a + // path they did not explicitly watch themselves. + w.Remove(name) + } + } + return nil +} + +func (w *kqueue) WatchList() []string { + if w.isClosed() { + return nil + } + return w.watches.listPaths(true) +} + +// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) +const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME + +// addWatch adds name to the watched file set; the flags are interpreted as +// described in kevent(2). +// +// Returns the real path to the file which was added, with symlinks resolved. +func (w *kqueue) addWatch(name string, flags uint32) (string, error) { + if w.isClosed() { + return "", ErrClosed + } + + name = filepath.Clean(name) + + info, alreadyWatching := w.watches.byPath(name) + if !alreadyWatching { + fi, err := os.Lstat(name) + if err != nil { + return "", err + } + + // Don't watch sockets or named pipes. + if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) { + return "", nil + } + + // Follow symlinks. + if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + link, err := os.Readlink(name) + if err != nil { + // Return nil because Linux can add unresolvable symlinks to the + // watch list without problems, so maintain consistency with + // that. There will be no file events for broken symlinks. + // TODO: more specific check; returns os.PathError; ENOENT? + return "", nil + } + + _, alreadyWatching = w.watches.byPath(link) + if alreadyWatching { + // Add to watches so we don't get spurious Create events later + // on when we diff the directories. + w.watches.addLink(name, 0) + return link, nil + } + + info.linkName = name + name = link + fi, err = os.Lstat(name) + if err != nil { + return "", nil + } + } + + // Retry on EINTR; open() can return EINTR in practice on macOS. + // See #354, and Go issues 11180 and 39237. + for { + info.wd, err = unix.Open(name, openMode, 0) + if err == nil { + break + } + if errors.Is(err, unix.EINTR) { + continue + } + + return "", err + } + + info.isDir = fi.IsDir() + } + + err := w.register([]int{info.wd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) + if err != nil { + unix.Close(info.wd) + return "", err + } + + if !alreadyWatching { + w.watches.add(name, info.linkName, info.wd, info.isDir) + } + + // Watch the directory if it has not been watched before, or if it was + // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + if info.isDir { + watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && + (!alreadyWatching || (info.dirFlags&unix.NOTE_WRITE) != unix.NOTE_WRITE) + w.watches.updateDirFlags(name, flags) + + if watchDir { + if err := w.watchDirectoryFiles(name); err != nil { + return "", err + } + } + } + return name, nil +} + +// readEvents reads from kqueue and converts the received kevents into +// Event values that it sends down the Events channel. +func (w *kqueue) readEvents() { + defer func() { + close(w.Events) + close(w.Errors) + _ = unix.Close(w.kq) + unix.Close(w.closepipe[0]) + }() + + eventBuffer := make([]unix.Kevent_t, 10) + for { + kevents, err := w.read(eventBuffer) + // EINTR is okay, the syscall was interrupted before timeout expired. + if err != nil && err != unix.EINTR { + if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) { + return + } + } + + for _, kevent := range kevents { + var ( + wd = int(kevent.Ident) + mask = uint32(kevent.Fflags) + ) + + // Shut down the loop when the pipe is closed, but only after all + // other events have been processed. + if wd == w.closepipe[0] { + return + } + + path, ok := w.watches.byWd(wd) + if debug { + internal.Debug(path.name, &kevent) + } + + // On macOS it seems that sometimes an event with Ident=0 is + // delivered, and no other flags/information beyond that, even + // though we never saw such a file descriptor. For example in + // TestWatchSymlink/277 (usually at the end, but sometimes sooner): + // + // fmt.Printf("READ: %2d %#v\n", kevent.Ident, kevent) + // unix.Kevent_t{Ident:0x2a, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)} + // unix.Kevent_t{Ident:0x0, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)} + // + // The first is a normal event, the second with Ident 0. No error + // flag, no data, no ... nothing. + // + // I read a bit through bsd/kern_event.c from the xnu source, but I + // don't really see an obvious location where this is triggered – + // this doesn't seem intentional, but idk... + // + // Technically fd 0 is a valid descriptor, so only skip it if + // there's no path, and if we're on macOS. + if !ok && kevent.Ident == 0 && runtime.GOOS == "darwin" { + continue + } + + event := w.newEvent(path.name, path.linkName, mask) + + if event.Has(Rename) || event.Has(Remove) { + w.remove(event.Name, false) + w.watches.markSeen(event.Name, false) + } + + if path.isDir && event.Has(Write) && !event.Has(Remove) { + w.dirChange(event.Name) + } else if !w.sendEvent(event) { + return + } + + if event.Has(Remove) { + // Look for a file that may have overwritten this; for example, + // mv f1 f2 will delete f2, then create f2. + if path.isDir { + fileDir := filepath.Clean(event.Name) + _, found := w.watches.byPath(fileDir) + if found { + // TODO: this branch is never triggered in any test. + // Added in d6220df (2012). + // isDir check added in 8611c35 (2016): https://github.com/fsnotify/fsnotify/pull/111 + // + // I don't really get how this can be triggered either. + // And it wasn't triggered in the patch that added it, + // either. + // + // Original also had a comment: + // make sure the directory exists before we watch for + // changes. When we do a recursive watch and perform + // rm -rf, the parent directory might have gone + // missing, ignore the missing directory and let the + // upcoming delete event remove the watch from the + // parent directory. + err := w.dirChange(fileDir) + if !w.sendError(err) { + return + } + } + } else { + path := filepath.Clean(event.Name) + if fi, err := os.Lstat(path); err == nil { + err := w.sendCreateIfNew(path, fi) + if !w.sendError(err) { + return + } + } + } + } + } + } +} + +// newEvent returns an platform-independent Event based on kqueue Fflags. +func (w *kqueue) newEvent(name, linkName string, mask uint32) Event { + e := Event{Name: name} + if linkName != "" { + // If the user watched "/path/link" then emit events as "/path/link" + // rather than "/path/target". + e.Name = linkName + } + + if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { + e.Op |= Remove + } + if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { + e.Op |= Write + } + if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { + e.Op |= Rename + } + if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { + e.Op |= Chmod + } + // No point sending a write and delete event at the same time: if it's gone, + // then it's gone. + if e.Op.Has(Write) && e.Op.Has(Remove) { + e.Op &^= Write + } + return e +} + +// watchDirectoryFiles to mimic inotify when adding a watch on a directory +func (w *kqueue) watchDirectoryFiles(dirPath string) error { + files, err := os.ReadDir(dirPath) + if err != nil { + return err + } + + for _, f := range files { + path := filepath.Join(dirPath, f.Name()) + + fi, err := f.Info() + if err != nil { + return fmt.Errorf("%q: %w", path, err) + } + + cleanPath, err := w.internalWatch(path, fi) + if err != nil { + // No permission to read the file; that's not a problem: just skip. + // But do add it to w.fileExists to prevent it from being picked up + // as a "new" file later (it still shows up in the directory + // listing). + switch { + case errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM): + cleanPath = filepath.Clean(path) + default: + return fmt.Errorf("%q: %w", path, err) + } + } + + w.watches.markSeen(cleanPath, true) + } + + return nil +} + +// Search the directory for new files and send an event for them. +// +// This functionality is to have the BSD watcher match the inotify, which sends +// a create event for files created in a watched directory. +func (w *kqueue) dirChange(dir string) error { + files, err := os.ReadDir(dir) + if err != nil { + // Directory no longer exists: we can ignore this safely. kqueue will + // still give us the correct events. + if errors.Is(err, os.ErrNotExist) { + return nil + } + return fmt.Errorf("fsnotify.dirChange: %w", err) + } + + for _, f := range files { + fi, err := f.Info() + if err != nil { + return fmt.Errorf("fsnotify.dirChange: %w", err) + } + + err = w.sendCreateIfNew(filepath.Join(dir, fi.Name()), fi) + if err != nil { + // Don't need to send an error if this file isn't readable. + if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) { + return nil + } + return fmt.Errorf("fsnotify.dirChange: %w", err) + } + } + return nil +} + +// Send a create event if the file isn't already being tracked, and start +// watching this file. +func (w *kqueue) sendCreateIfNew(path string, fi os.FileInfo) error { + if !w.watches.seenBefore(path) { + if !w.sendEvent(Event{Name: path, Op: Create}) { + return nil + } + } + + // Like watchDirectoryFiles, but without doing another ReadDir. + path, err := w.internalWatch(path, fi) + if err != nil { + return err + } + w.watches.markSeen(path, true) + return nil +} + +func (w *kqueue) internalWatch(name string, fi os.FileInfo) (string, error) { + if fi.IsDir() { + // mimic Linux providing delete events for subdirectories, but preserve + // the flags used if currently watching subdirectory + info, _ := w.watches.byPath(name) + return w.addWatch(name, info.dirFlags|unix.NOTE_DELETE|unix.NOTE_RENAME) + } + + // watch file to mimic Linux inotify + return w.addWatch(name, noteAllEvents) +} + +// Register events with the queue. +func (w *kqueue) register(fds []int, flags int, fflags uint32) error { + changes := make([]unix.Kevent_t, len(fds)) + for i, fd := range fds { + // SetKevent converts int to the platform-specific types. + unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) + changes[i].Fflags = fflags + } + + // Register the events. + success, err := unix.Kevent(w.kq, changes, nil, nil) + if success == -1 { + return err + } + return nil +} + +// read retrieves pending events, or waits until an event occurs. +func (w *kqueue) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { + n, err := unix.Kevent(w.kq, nil, events, nil) + if err != nil { + return nil, err + } + return events[0:n], nil +} + +func (w *kqueue) xSupports(op Op) bool { + if runtime.GOOS == "freebsd" { + //return true // Supports everything. + } + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/tools/vendor/github.com/fsnotify/fsnotify/backend_other.go b/tools/vendor/github.com/fsnotify/fsnotify/backend_other.go new file mode 100644 index 000000000..5eb5dbc66 --- /dev/null +++ b/tools/vendor/github.com/fsnotify/fsnotify/backend_other.go @@ -0,0 +1,23 @@ +//go:build appengine || (!darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows) + +package fsnotify + +import "errors" + +type other struct { + Events chan Event + Errors chan error +} + +func newBackend(ev chan Event, errs chan error) (backend, error) { + return nil, errors.New("fsnotify not supported on the current platform") +} +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { + return newBackend(ev, errs) +} +func (w *other) Close() error { return nil } +func (w *other) WatchList() []string { return nil } +func (w *other) Add(name string) error { return nil } +func (w *other) AddWith(name string, opts ...addOpt) error { return nil } +func (w *other) Remove(name string) error { return nil } +func (w *other) xSupports(op Op) bool { return false } diff --git a/tools/vendor/github.com/fsnotify/fsnotify/backend_windows.go b/tools/vendor/github.com/fsnotify/fsnotify/backend_windows.go new file mode 100644 index 000000000..c54a63083 --- /dev/null +++ b/tools/vendor/github.com/fsnotify/fsnotify/backend_windows.go @@ -0,0 +1,682 @@ +//go:build windows + +// Windows backend based on ReadDirectoryChangesW() +// +// https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw + +package fsnotify + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "reflect" + "runtime" + "strings" + "sync" + "time" + "unsafe" + + "github.com/fsnotify/fsnotify/internal" + "golang.org/x/sys/windows" +) + +type readDirChangesW struct { + Events chan Event + Errors chan error + + port windows.Handle // Handle to completion port + input chan *input // Inputs to the reader are sent on this channel + quit chan chan<- error + + mu sync.Mutex // Protects access to watches, closed + watches watchMap // Map of watches (key: i-number) + closed bool // Set to true when Close() is first called +} + +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(50, ev, errs) +} + +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { + port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0) + if err != nil { + return nil, os.NewSyscallError("CreateIoCompletionPort", err) + } + w := &readDirChangesW{ + Events: ev, + Errors: errs, + port: port, + watches: make(watchMap), + input: make(chan *input, 1), + quit: make(chan chan<- error, 1), + } + go w.readEvents() + return w, nil +} + +func (w *readDirChangesW) isClosed() bool { + w.mu.Lock() + defer w.mu.Unlock() + return w.closed +} + +func (w *readDirChangesW) sendEvent(name, renamedFrom string, mask uint64) bool { + if mask == 0 { + return false + } + + event := w.newEvent(name, uint32(mask)) + event.renamedFrom = renamedFrom + select { + case ch := <-w.quit: + w.quit <- ch + case w.Events <- event: + } + return true +} + +// Returns true if the error was sent, or false if watcher is closed. +func (w *readDirChangesW) sendError(err error) bool { + if err == nil { + return true + } + select { + case w.Errors <- err: + return true + case <-w.quit: + return false + } +} + +func (w *readDirChangesW) Close() error { + if w.isClosed() { + return nil + } + + w.mu.Lock() + w.closed = true + w.mu.Unlock() + + // Send "quit" message to the reader goroutine + ch := make(chan error) + w.quit <- ch + if err := w.wakeupReader(); err != nil { + return err + } + return <-ch +} + +func (w *readDirChangesW) Add(name string) error { return w.AddWith(name) } + +func (w *readDirChangesW) AddWith(name string, opts ...addOpt) error { + if w.isClosed() { + return ErrClosed + } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name)) + } + + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } + if with.bufsize < 4096 { + return fmt.Errorf("fsnotify.WithBufferSize: buffer size cannot be smaller than 4096 bytes") + } + + in := &input{ + op: opAddWatch, + path: filepath.Clean(name), + flags: sysFSALLEVENTS, + reply: make(chan error), + bufsize: with.bufsize, + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +func (w *readDirChangesW) Remove(name string) error { + if w.isClosed() { + return nil + } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name)) + } + + in := &input{ + op: opRemoveWatch, + path: filepath.Clean(name), + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +func (w *readDirChangesW) WatchList() []string { + if w.isClosed() { + return nil + } + + w.mu.Lock() + defer w.mu.Unlock() + + entries := make([]string, 0, len(w.watches)) + for _, entry := range w.watches { + for _, watchEntry := range entry { + for name := range watchEntry.names { + entries = append(entries, filepath.Join(watchEntry.path, name)) + } + // the directory itself is being watched + if watchEntry.mask != 0 { + entries = append(entries, watchEntry.path) + } + } + } + + return entries +} + +// These options are from the old golang.org/x/exp/winfsnotify, where you could +// add various options to the watch. This has long since been removed. +// +// The "sys" in the name is misleading as they're not part of any "system". +// +// This should all be removed at some point, and just use windows.FILE_NOTIFY_* +const ( + sysFSALLEVENTS = 0xfff + sysFSCREATE = 0x100 + sysFSDELETE = 0x200 + sysFSDELETESELF = 0x400 + sysFSMODIFY = 0x2 + sysFSMOVE = 0xc0 + sysFSMOVEDFROM = 0x40 + sysFSMOVEDTO = 0x80 + sysFSMOVESELF = 0x800 + sysFSIGNORED = 0x8000 +) + +func (w *readDirChangesW) newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { + e.Op |= Create + } + if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { + e.Op |= Remove + } + if mask&sysFSMODIFY == sysFSMODIFY { + e.Op |= Write + } + if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { + e.Op |= Rename + } + return e +} + +const ( + opAddWatch = iota + opRemoveWatch +) + +const ( + provisional uint64 = 1 << (32 + iota) +) + +type input struct { + op int + path string + flags uint32 + bufsize int + reply chan error +} + +type inode struct { + handle windows.Handle + volume uint32 + index uint64 +} + +type watch struct { + ov windows.Overlapped + ino *inode // i-number + recurse bool // Recursive watch? + path string // Directory path + mask uint64 // Directory itself is being watched with these notify flags + names map[string]uint64 // Map of names being watched and their notify flags + rename string // Remembers the old name while renaming a file + buf []byte // buffer, allocated later +} + +type ( + indexMap map[uint64]*watch + watchMap map[uint32]indexMap +) + +func (w *readDirChangesW) wakeupReader() error { + err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil) + if err != nil { + return os.NewSyscallError("PostQueuedCompletionStatus", err) + } + return nil +} + +func (w *readDirChangesW) getDir(pathname string) (dir string, err error) { + attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname)) + if err != nil { + return "", os.NewSyscallError("GetFileAttributes", err) + } + if attr&windows.FILE_ATTRIBUTE_DIRECTORY != 0 { + dir = pathname + } else { + dir, _ = filepath.Split(pathname) + dir = filepath.Clean(dir) + } + return +} + +func (w *readDirChangesW) getIno(path string) (ino *inode, err error) { + h, err := windows.CreateFile(windows.StringToUTF16Ptr(path), + windows.FILE_LIST_DIRECTORY, + windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE, + nil, windows.OPEN_EXISTING, + windows.FILE_FLAG_BACKUP_SEMANTICS|windows.FILE_FLAG_OVERLAPPED, 0) + if err != nil { + return nil, os.NewSyscallError("CreateFile", err) + } + + var fi windows.ByHandleFileInformation + err = windows.GetFileInformationByHandle(h, &fi) + if err != nil { + windows.CloseHandle(h) + return nil, os.NewSyscallError("GetFileInformationByHandle", err) + } + ino = &inode{ + handle: h, + volume: fi.VolumeSerialNumber, + index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), + } + return ino, nil +} + +// Must run within the I/O thread. +func (m watchMap) get(ino *inode) *watch { + if i := m[ino.volume]; i != nil { + return i[ino.index] + } + return nil +} + +// Must run within the I/O thread. +func (m watchMap) set(ino *inode, watch *watch) { + i := m[ino.volume] + if i == nil { + i = make(indexMap) + m[ino.volume] = i + } + i[ino.index] = watch +} + +// Must run within the I/O thread. +func (w *readDirChangesW) addWatch(pathname string, flags uint64, bufsize int) error { + pathname, recurse := recursivePath(pathname) + + dir, err := w.getDir(pathname) + if err != nil { + return err + } + + ino, err := w.getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watchEntry := w.watches.get(ino) + w.mu.Unlock() + if watchEntry == nil { + _, err := windows.CreateIoCompletionPort(ino.handle, w.port, 0, 0) + if err != nil { + windows.CloseHandle(ino.handle) + return os.NewSyscallError("CreateIoCompletionPort", err) + } + watchEntry = &watch{ + ino: ino, + path: dir, + names: make(map[string]uint64), + recurse: recurse, + buf: make([]byte, bufsize), + } + w.mu.Lock() + w.watches.set(ino, watchEntry) + w.mu.Unlock() + flags |= provisional + } else { + windows.CloseHandle(ino.handle) + } + if pathname == dir { + watchEntry.mask |= flags + } else { + watchEntry.names[filepath.Base(pathname)] |= flags + } + + err = w.startRead(watchEntry) + if err != nil { + return err + } + + if pathname == dir { + watchEntry.mask &= ^provisional + } else { + watchEntry.names[filepath.Base(pathname)] &= ^provisional + } + return nil +} + +// Must run within the I/O thread. +func (w *readDirChangesW) remWatch(pathname string) error { + pathname, recurse := recursivePath(pathname) + + dir, err := w.getDir(pathname) + if err != nil { + return err + } + ino, err := w.getIno(dir) + if err != nil { + return err + } + + w.mu.Lock() + watch := w.watches.get(ino) + w.mu.Unlock() + + if recurse && !watch.recurse { + return fmt.Errorf("can't use \\... with non-recursive watch %q", pathname) + } + + err = windows.CloseHandle(ino.handle) + if err != nil { + w.sendError(os.NewSyscallError("CloseHandle", err)) + } + if watch == nil { + return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname) + } + if pathname == dir { + w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED) + watch.mask = 0 + } else { + name := filepath.Base(pathname) + w.sendEvent(filepath.Join(watch.path, name), "", watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + + return w.startRead(watch) +} + +// Must run within the I/O thread. +func (w *readDirChangesW) deleteWatch(watch *watch) { + for name, mask := range watch.names { + if mask&provisional == 0 { + w.sendEvent(filepath.Join(watch.path, name), "", mask&sysFSIGNORED) + } + delete(watch.names, name) + } + if watch.mask != 0 { + if watch.mask&provisional == 0 { + w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED) + } + watch.mask = 0 + } +} + +// Must run within the I/O thread. +func (w *readDirChangesW) startRead(watch *watch) error { + err := windows.CancelIo(watch.ino.handle) + if err != nil { + w.sendError(os.NewSyscallError("CancelIo", err)) + w.deleteWatch(watch) + } + mask := w.toWindowsFlags(watch.mask) + for _, m := range watch.names { + mask |= w.toWindowsFlags(m) + } + if mask == 0 { + err := windows.CloseHandle(watch.ino.handle) + if err != nil { + w.sendError(os.NewSyscallError("CloseHandle", err)) + } + w.mu.Lock() + delete(w.watches[watch.ino.volume], watch.ino.index) + w.mu.Unlock() + return nil + } + + // We need to pass the array, rather than the slice. + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&watch.buf)) + rdErr := windows.ReadDirectoryChanges(watch.ino.handle, + (*byte)(unsafe.Pointer(hdr.Data)), uint32(hdr.Len), + watch.recurse, mask, nil, &watch.ov, 0) + if rdErr != nil { + err := os.NewSyscallError("ReadDirectoryChanges", rdErr) + if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { + // Watched directory was probably removed + w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF) + err = nil + } + w.deleteWatch(watch) + w.startRead(watch) + return err + } + return nil +} + +// readEvents reads from the I/O completion port, converts the +// received events into Event objects and sends them via the Events channel. +// Entry point to the I/O thread. +func (w *readDirChangesW) readEvents() { + var ( + n uint32 + key uintptr + ov *windows.Overlapped + ) + runtime.LockOSThread() + + for { + // This error is handled after the watch == nil check below. + qErr := windows.GetQueuedCompletionStatus(w.port, &n, &key, &ov, windows.INFINITE) + + watch := (*watch)(unsafe.Pointer(ov)) + if watch == nil { + select { + case ch := <-w.quit: + w.mu.Lock() + var indexes []indexMap + for _, index := range w.watches { + indexes = append(indexes, index) + } + w.mu.Unlock() + for _, index := range indexes { + for _, watch := range index { + w.deleteWatch(watch) + w.startRead(watch) + } + } + + err := windows.CloseHandle(w.port) + if err != nil { + err = os.NewSyscallError("CloseHandle", err) + } + close(w.Events) + close(w.Errors) + ch <- err + return + case in := <-w.input: + switch in.op { + case opAddWatch: + in.reply <- w.addWatch(in.path, uint64(in.flags), in.bufsize) + case opRemoveWatch: + in.reply <- w.remWatch(in.path) + } + default: + } + continue + } + + switch qErr { + case nil: + // No error + case windows.ERROR_MORE_DATA: + if watch == nil { + w.sendError(errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")) + } else { + // The i/o succeeded but the buffer is full. + // In theory we should be building up a full packet. + // In practice we can get away with just carrying on. + n = uint32(unsafe.Sizeof(watch.buf)) + } + case windows.ERROR_ACCESS_DENIED: + // Watched directory was probably removed + w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF) + w.deleteWatch(watch) + w.startRead(watch) + continue + case windows.ERROR_OPERATION_ABORTED: + // CancelIo was called on this handle + continue + default: + w.sendError(os.NewSyscallError("GetQueuedCompletionPort", qErr)) + continue + } + + var offset uint32 + for { + if n == 0 { + w.sendError(ErrEventOverflow) + break + } + + // Point "raw" to the event in the buffer + raw := (*windows.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) + + // Create a buf that is the size of the path name + size := int(raw.FileNameLength / 2) + var buf []uint16 + // TODO: Use unsafe.Slice in Go 1.17; https://stackoverflow.com/questions/51187973 + sh := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) + sh.Data = uintptr(unsafe.Pointer(&raw.FileName)) + sh.Len = size + sh.Cap = size + name := windows.UTF16ToString(buf) + fullname := filepath.Join(watch.path, name) + + if debug { + internal.Debug(fullname, raw.Action) + } + + var mask uint64 + switch raw.Action { + case windows.FILE_ACTION_REMOVED: + mask = sysFSDELETESELF + case windows.FILE_ACTION_MODIFIED: + mask = sysFSMODIFY + case windows.FILE_ACTION_RENAMED_OLD_NAME: + watch.rename = name + case windows.FILE_ACTION_RENAMED_NEW_NAME: + // Update saved path of all sub-watches. + old := filepath.Join(watch.path, watch.rename) + w.mu.Lock() + for _, watchMap := range w.watches { + for _, ww := range watchMap { + if strings.HasPrefix(ww.path, old) { + ww.path = filepath.Join(fullname, strings.TrimPrefix(ww.path, old)) + } + } + } + w.mu.Unlock() + + if watch.names[watch.rename] != 0 { + watch.names[name] |= watch.names[watch.rename] + delete(watch.names, watch.rename) + mask = sysFSMOVESELF + } + } + + if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME { + w.sendEvent(fullname, "", watch.names[name]&mask) + } + if raw.Action == windows.FILE_ACTION_REMOVED { + w.sendEvent(fullname, "", watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + + if watch.rename != "" && raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { + w.sendEvent(fullname, filepath.Join(watch.path, watch.rename), watch.mask&w.toFSnotifyFlags(raw.Action)) + } else { + w.sendEvent(fullname, "", watch.mask&w.toFSnotifyFlags(raw.Action)) + } + + if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { + w.sendEvent(filepath.Join(watch.path, watch.rename), "", watch.names[name]&mask) + } + + // Move to the next event in the buffer + if raw.NextEntryOffset == 0 { + break + } + offset += raw.NextEntryOffset + + // Error! + if offset >= n { + //lint:ignore ST1005 Windows should be capitalized + w.sendError(errors.New("Windows system assumed buffer larger than it is, events have likely been missed")) + break + } + } + + if err := w.startRead(watch); err != nil { + w.sendError(err) + } + } +} + +func (w *readDirChangesW) toWindowsFlags(mask uint64) uint32 { + var m uint32 + if mask&sysFSMODIFY != 0 { + m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE + } + if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { + m |= windows.FILE_NOTIFY_CHANGE_FILE_NAME | windows.FILE_NOTIFY_CHANGE_DIR_NAME + } + return m +} + +func (w *readDirChangesW) toFSnotifyFlags(action uint32) uint64 { + switch action { + case windows.FILE_ACTION_ADDED: + return sysFSCREATE + case windows.FILE_ACTION_REMOVED: + return sysFSDELETE + case windows.FILE_ACTION_MODIFIED: + return sysFSMODIFY + case windows.FILE_ACTION_RENAMED_OLD_NAME: + return sysFSMOVEDFROM + case windows.FILE_ACTION_RENAMED_NEW_NAME: + return sysFSMOVEDTO + } + return 0 +} + +func (w *readDirChangesW) xSupports(op Op) bool { + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/tools/vendor/github.com/fsnotify/fsnotify/fen.go b/tools/vendor/github.com/fsnotify/fsnotify/fen.go deleted file mode 100644 index b3ac3d8f5..000000000 --- a/tools/vendor/github.com/fsnotify/fsnotify/fen.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build solaris -// +build solaris - -package fsnotify - -import ( - "errors" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - return nil, errors.New("FEN based watcher not yet supported for fsnotify\n") -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - return nil -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - return nil -} diff --git a/tools/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/tools/vendor/github.com/fsnotify/fsnotify/fsnotify.go index 0f4ee52e8..0760efe91 100644 --- a/tools/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ b/tools/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -1,69 +1,494 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !plan9 -// +build !plan9 - -// Package fsnotify provides a platform-independent interface for file system notifications. +// Package fsnotify provides a cross-platform interface for file system +// notifications. +// +// Currently supported systems: +// +// - Linux via inotify +// - BSD, macOS via kqueue +// - Windows via ReadDirectoryChangesW +// - illumos via FEN +// +// # FSNOTIFY_DEBUG +// +// Set the FSNOTIFY_DEBUG environment variable to "1" to print debug messages to +// stderr. This can be useful to track down some problems, especially in cases +// where fsnotify is used as an indirect dependency. +// +// Every event will be printed as soon as there's something useful to print, +// with as little processing from fsnotify. +// +// Example output: +// +// FSNOTIFY_DEBUG: 11:34:23.633087586 256:IN_CREATE → "/tmp/file-1" +// FSNOTIFY_DEBUG: 11:34:23.633202319 4:IN_ATTRIB → "/tmp/file-1" +// FSNOTIFY_DEBUG: 11:34:28.989728764 512:IN_DELETE → "/tmp/file-1" package fsnotify import ( - "bytes" "errors" "fmt" + "os" + "path/filepath" + "strings" ) -// Event represents a single file system notification. +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # Windows notes +// +// Paths can be added as "C:\\path\\to\\dir", but forward slashes +// ("C:/path/to/dir") will also work. +// +// When a watched directory is removed it will always send an event for the +// directory itself, but may not send events for all files in that directory. +// Sometimes it will send events for all files, sometimes it will send no +// events, and often only for some files. +// +// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest +// value that is guaranteed to work with SMB filesystems. If you have many +// events in quick succession this may not be enough, and you will have to use +// [WithBufferSize] to increase the value. +type Watcher struct { + b backend + + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, and you may + // want to wait until you've stopped receiving them + // (see the dedup example in cmd/fsnotify). + // + // Some systems may send Write event for directories + // when the directory content changes. + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // when a file is truncated. On Windows it's never + // sent. + Events chan Event + + // Errors sends any errors. + Errors chan error +} + +// Event represents a file system notification. type Event struct { - Name string // Relative path to the file or directory. - Op Op // File operation that triggered the event. + // Path to the file or directory. + // + // Paths are relative to the input; for example with Add("dir") the Name + // will be set to "dir/file" if you create that file, but if you use + // Add("/path/to/dir") it will be "/path/to/dir/file". + Name string + + // File operation that triggered the event. + // + // This is a bitmask and some systems may send multiple operations at once. + // Use the Event.Has() method instead of comparing with ==. + Op Op + + // Create events will have this set to the old path if it's a rename. This + // only works when both the source and destination are watched. It's not + // reliable when watching individual files, only directories. + // + // For example "mv /tmp/file /tmp/rename" will emit: + // + // Event{Op: Rename, Name: "/tmp/file"} + // Event{Op: Create, Name: "/tmp/rename", RenamedFrom: "/tmp/file"} + renamedFrom string } // Op describes a set of file operations. type Op uint32 -// These are the generalized file operations that can trigger a notification. +// The operations fsnotify can trigger; see the documentation on [Watcher] for a +// full description, and check them with [Event.Has]. const ( + // A new pathname was created. Create Op = 1 << iota + + // The pathname was written to; this does *not* mean the write has finished, + // and a write can be followed by more writes. Write + + // The path was removed; any watches on it will be removed. Some "remove" + // operations may trigger a Rename if the file is actually moved (for + // example "remove to trash" is often a rename). Remove + + // The path was renamed to something else; any watches on it will be + // removed. Rename + + // File attributes were changed. + // + // It's generally not recommended to take action on this event, as it may + // get triggered very frequently by some software. For example, Spotlight + // indexing on macOS, anti-virus software, backup software, etc. Chmod + + // File descriptor was opened. + // + // Only works on Linux and FreeBSD. + xUnportableOpen + + // File was read from. + // + // Only works on Linux and FreeBSD. + xUnportableRead + + // File opened for writing was closed. + // + // Only works on Linux and FreeBSD. + // + // The advantage of using this over Write is that it's more reliable than + // waiting for Write events to stop. It's also faster (if you're not + // listening to Write events): copying a file of a few GB can easily + // generate tens of thousands of Write events in a short span of time. + xUnportableCloseWrite + + // File opened for reading was closed. + // + // Only works on Linux and FreeBSD. + xUnportableCloseRead ) -func (op Op) String() string { - // Use a buffer for efficient string concatenation - var buffer bytes.Buffer +var ( + // ErrNonExistentWatch is used when Remove() is called on a path that's not + // added. + ErrNonExistentWatch = errors.New("fsnotify: can't remove non-existent watch") + + // ErrClosed is used when trying to operate on a closed Watcher. + ErrClosed = errors.New("fsnotify: watcher already closed") + + // ErrEventOverflow is reported from the Errors channel when there are too + // many events: + // + // - inotify: inotify returns IN_Q_OVERFLOW – because there are too + // many queued events (the fs.inotify.max_queued_events + // sysctl can be used to increase this). + // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. + // - kqueue, fen: Not used. + ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow") + + // ErrUnsupported is returned by AddWith() when WithOps() specified an + // Unportable event that's not supported on this platform. + xErrUnsupported = errors.New("fsnotify: not supported with this backend") +) + +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + ev, errs := make(chan Event), make(chan error) + b, err := newBackend(ev, errs) + if err != nil { + return nil, err + } + return &Watcher{b: b, Events: ev, Errors: errs}, nil +} + +// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events +// channel. +// +// The main use case for this is situations with a very large number of events +// where the kernel buffer size can't be increased (e.g. due to lack of +// permissions). An unbuffered Watcher will perform better for almost all use +// cases, and whenever possible you will be better off increasing the kernel +// buffers instead of adding a large userspace buffer. +func NewBufferedWatcher(sz uint) (*Watcher, error) { + ev, errs := make(chan Event), make(chan error) + b, err := newBufferedBackend(sz, ev, errs) + if err != nil { + return nil, err + } + return &Watcher{b: b, Events: ev, Errors: errs}, nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; watching it more than once is a no-op and will +// not return an error. Paths that do not yet exist on the filesystem cannot be +// watched. +// +// A watch will be automatically removed if the watched path is deleted or +// renamed. The exception is the Windows backend, which doesn't remove the +// watcher on renames. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// Returns [ErrClosed] if [Watcher.Close] was called. +// +// See [Watcher.AddWith] for a version that allows adding options. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many programs (especially editors) update files atomically: it +// will write to a temporary file which is then moved to destination, +// overwriting the original (or some variant thereof). The watcher on the +// original file is now lost, as that no longer exists. +// +// The upshot of this is that a power failure or crash won't leave a +// half-written file. +// +// Watch the parent directory and use Event.Name to filter out files you're not +// interested in. There is an example of this in cmd/fsnotify/file.go. +func (w *Watcher) Add(path string) error { return w.b.Add(path) } + +// AddWith is like [Watcher.Add], but allows adding options. When using Add() +// the defaults described below are used. +// +// Possible options are: +// +// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on +// other platforms. The default is 64K (65536 bytes). +func (w *Watcher) AddWith(path string, opts ...addOpt) error { return w.b.AddWith(path, opts...) } + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) Remove(path string) error { return w.b.Remove(path) } + +// Close removes all watches and closes the Events channel. +func (w *Watcher) Close() error { return w.b.Close() } - if op&Create == Create { - buffer.WriteString("|CREATE") +// WatchList returns all paths explicitly added with [Watcher.Add] (and are not +// yet removed). +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) WatchList() []string { return w.b.WatchList() } + +// Supports reports if all the listed operations are supported by this platform. +// +// Create, Write, Remove, Rename, and Chmod are always supported. It can only +// return false for an Op starting with Unportable. +func (w *Watcher) xSupports(op Op) bool { return w.b.xSupports(op) } + +func (o Op) String() string { + var b strings.Builder + if o.Has(Create) { + b.WriteString("|CREATE") + } + if o.Has(Remove) { + b.WriteString("|REMOVE") + } + if o.Has(Write) { + b.WriteString("|WRITE") } - if op&Remove == Remove { - buffer.WriteString("|REMOVE") + if o.Has(xUnportableOpen) { + b.WriteString("|OPEN") } - if op&Write == Write { - buffer.WriteString("|WRITE") + if o.Has(xUnportableRead) { + b.WriteString("|READ") } - if op&Rename == Rename { - buffer.WriteString("|RENAME") + if o.Has(xUnportableCloseWrite) { + b.WriteString("|CLOSE_WRITE") } - if op&Chmod == Chmod { - buffer.WriteString("|CHMOD") + if o.Has(xUnportableCloseRead) { + b.WriteString("|CLOSE_READ") } - if buffer.Len() == 0 { - return "" + if o.Has(Rename) { + b.WriteString("|RENAME") } - return buffer.String()[1:] // Strip leading pipe + if o.Has(Chmod) { + b.WriteString("|CHMOD") + } + if b.Len() == 0 { + return "[no events]" + } + return b.String()[1:] } -// String returns a string representation of the event in the form -// "file: REMOVE|WRITE|..." +// Has reports if this operation has the given operation. +func (o Op) Has(h Op) bool { return o&h != 0 } + +// Has reports if this event has the given operation. +func (e Event) Has(op Op) bool { return e.Op.Has(op) } + +// String returns a string representation of the event with their path. func (e Event) String() string { - return fmt.Sprintf("%q: %s", e.Name, e.Op.String()) + if e.renamedFrom != "" { + return fmt.Sprintf("%-13s %q ← %q", e.Op.String(), e.Name, e.renamedFrom) + } + return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name) } -// Common errors that can be reported by a watcher -var ( - ErrEventOverflow = errors.New("fsnotify queue overflow") +type ( + backend interface { + Add(string) error + AddWith(string, ...addOpt) error + Remove(string) error + WatchList() []string + Close() error + xSupports(Op) bool + } + addOpt func(opt *withOpts) + withOpts struct { + bufsize int + op Op + noFollow bool + sendCreate bool + } ) + +var debug = func() bool { + // Check for exactly "1" (rather than mere existence) so we can add + // options/flags in the future. I don't know if we ever want that, but it's + // nice to leave the option open. + return os.Getenv("FSNOTIFY_DEBUG") == "1" +}() + +var defaultOpts = withOpts{ + bufsize: 65536, // 64K + op: Create | Write | Remove | Rename | Chmod, +} + +func getOptions(opts ...addOpt) withOpts { + with := defaultOpts + for _, o := range opts { + if o != nil { + o(&with) + } + } + return with +} + +// WithBufferSize sets the [ReadDirectoryChangesW] buffer size. +// +// This only has effect on Windows systems, and is a no-op for other backends. +// +// The default value is 64K (65536 bytes) which is the highest value that works +// on all filesystems and should be enough for most applications, but if you +// have a large burst of events it may not be enough. You can increase it if +// you're hitting "queue or buffer overflow" errors ([ErrEventOverflow]). +// +// [ReadDirectoryChangesW]: https://learn.microsoft.com/en-gb/windows/win32/api/winbase/nf-winbase-readdirectorychangesw +func WithBufferSize(bytes int) addOpt { + return func(opt *withOpts) { opt.bufsize = bytes } +} + +// WithOps sets which operations to listen for. The default is [Create], +// [Write], [Remove], [Rename], and [Chmod]. +// +// Excluding operations you're not interested in can save quite a bit of CPU +// time; in some use cases there may be hundreds of thousands of useless Write +// or Chmod operations per second. +// +// This can also be used to add unportable operations not supported by all +// platforms; unportable operations all start with "Unportable": +// [UnportableOpen], [UnportableRead], [UnportableCloseWrite], and +// [UnportableCloseRead]. +// +// AddWith returns an error when using an unportable operation that's not +// supported. Use [Watcher.Support] to check for support. +func withOps(op Op) addOpt { + return func(opt *withOpts) { opt.op = op } +} + +// WithNoFollow disables following symlinks, so the symlinks themselves are +// watched. +func withNoFollow() addOpt { + return func(opt *withOpts) { opt.noFollow = true } +} + +// "Internal" option for recursive watches on inotify. +func withCreate() addOpt { + return func(opt *withOpts) { opt.sendCreate = true } +} + +var enableRecurse = false + +// Check if this path is recursive (ends with "/..." or "\..."), and return the +// path with the /... stripped. +func recursivePath(path string) (string, bool) { + path = filepath.Clean(path) + if !enableRecurse { // Only enabled in tests for now. + return path, false + } + if filepath.Base(path) == "..." { + return filepath.Dir(path), true + } + return path, false +} diff --git a/tools/vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go b/tools/vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go deleted file mode 100644 index 596885598..000000000 --- a/tools/vendor/github.com/fsnotify/fsnotify/fsnotify_unsupported.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows -// +build !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows - -package fsnotify - -import ( - "fmt" - "runtime" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct{} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - return nil, fmt.Errorf("fsnotify not supported on %s", runtime.GOOS) -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - return nil -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - return nil -} diff --git a/tools/vendor/github.com/fsnotify/fsnotify/inotify.go b/tools/vendor/github.com/fsnotify/fsnotify/inotify.go deleted file mode 100644 index a6d0e0ec8..000000000 --- a/tools/vendor/github.com/fsnotify/fsnotify/inotify.go +++ /dev/null @@ -1,351 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux -// +build linux - -package fsnotify - -import ( - "errors" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "sync" - "unsafe" - - "golang.org/x/sys/unix" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - mu sync.Mutex // Map access - fd int - poller *fdPoller - watches map[string]*watch // Map of inotify watches (key: path) - paths map[int]string // Map of watched paths (key: watch descriptor) - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - doneResp chan struct{} // Channel to respond to Close -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - // Create inotify fd - fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC) - if fd == -1 { - return nil, errno - } - // Create epoll - poller, err := newFdPoller(fd) - if err != nil { - unix.Close(fd) - return nil, err - } - w := &Watcher{ - fd: fd, - poller: poller, - watches: make(map[string]*watch), - paths: make(map[int]string), - Events: make(chan Event), - Errors: make(chan error), - done: make(chan struct{}), - doneResp: make(chan struct{}), - } - - go w.readEvents() - return w, nil -} - -func (w *Watcher) isClosed() bool { - select { - case <-w.done: - return true - default: - return false - } -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - if w.isClosed() { - return nil - } - - // Send 'close' signal to goroutine, and set the Watcher to closed. - close(w.done) - - // Wake up goroutine - w.poller.wake() - - // Wait for goroutine to close - <-w.doneResp - - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - name = filepath.Clean(name) - if w.isClosed() { - return errors.New("inotify instance already closed") - } - - const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | - unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | - unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF - - var flags uint32 = agnosticEvents - - w.mu.Lock() - defer w.mu.Unlock() - watchEntry := w.watches[name] - if watchEntry != nil { - flags |= watchEntry.flags | unix.IN_MASK_ADD - } - wd, errno := unix.InotifyAddWatch(w.fd, name, flags) - if wd == -1 { - return errno - } - - if watchEntry == nil { - w.watches[name] = &watch{wd: uint32(wd), flags: flags} - w.paths[wd] = name - } else { - watchEntry.wd = uint32(wd) - watchEntry.flags = flags - } - - return nil -} - -// Remove stops watching the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - name = filepath.Clean(name) - - // Fetch the watch. - w.mu.Lock() - defer w.mu.Unlock() - watch, ok := w.watches[name] - - // Remove it from inotify. - if !ok { - return fmt.Errorf("can't remove non-existent inotify watch for: %s", name) - } - - // We successfully removed the watch if InotifyRmWatch doesn't return an - // error, we need to clean up our internal state to ensure it matches - // inotify's kernel state. - delete(w.paths, int(watch.wd)) - delete(w.watches, name) - - // inotify_rm_watch will return EINVAL if the file has been deleted; - // the inotify will already have been removed. - // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously - // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE - // so that EINVAL means that the wd is being rm_watch()ed or its file removed - // by another thread and we have not received IN_IGNORE event. - success, errno := unix.InotifyRmWatch(w.fd, watch.wd) - if success == -1 { - // TODO: Perhaps it's not helpful to return an error here in every case. - // the only two possible errors are: - // EBADF, which happens when w.fd is not a valid file descriptor of any kind. - // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor. - // Watch descriptors are invalidated when they are removed explicitly or implicitly; - // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted. - return errno - } - - return nil -} - -// WatchList returns the directories and files that are being monitered. -func (w *Watcher) WatchList() []string { - w.mu.Lock() - defer w.mu.Unlock() - - entries := make([]string, 0, len(w.watches)) - for pathname := range w.watches { - entries = append(entries, pathname) - } - - return entries -} - -type watch struct { - wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) - flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) -} - -// readEvents reads from the inotify file descriptor, converts the -// received events into Event objects and sends them via the Events channel -func (w *Watcher) readEvents() { - var ( - buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events - n int // Number of bytes read with read() - errno error // Syscall errno - ok bool // For poller.wait - ) - - defer close(w.doneResp) - defer close(w.Errors) - defer close(w.Events) - defer unix.Close(w.fd) - defer w.poller.close() - - for { - // See if we have been closed. - if w.isClosed() { - return - } - - ok, errno = w.poller.wait() - if errno != nil { - select { - case w.Errors <- errno: - case <-w.done: - return - } - continue - } - - if !ok { - continue - } - - n, errno = unix.Read(w.fd, buf[:]) - // If a signal interrupted execution, see if we've been asked to close, and try again. - // http://man7.org/linux/man-pages/man7/signal.7.html : - // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable" - if errno == unix.EINTR { - continue - } - - // unix.Read might have been woken up by Close. If so, we're done. - if w.isClosed() { - return - } - - if n < unix.SizeofInotifyEvent { - var err error - if n == 0 { - // If EOF is received. This should really never happen. - err = io.EOF - } else if n < 0 { - // If an error occurred while reading. - err = errno - } else { - // Read was too short. - err = errors.New("notify: short read in readEvents()") - } - select { - case w.Errors <- err: - case <-w.done: - return - } - continue - } - - var offset uint32 - // We don't know how many events we just read into the buffer - // While the offset points to at least one whole event... - for offset <= uint32(n-unix.SizeofInotifyEvent) { - // Point "raw" to the event in the buffer - raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) - - mask := uint32(raw.Mask) - nameLen := uint32(raw.Len) - - if mask&unix.IN_Q_OVERFLOW != 0 { - select { - case w.Errors <- ErrEventOverflow: - case <-w.done: - return - } - } - - // If the event happened to the watched directory or the watched file, the kernel - // doesn't append the filename to the event, but we would like to always fill the - // the "Name" field with a valid filename. We retrieve the path of the watch from - // the "paths" map. - w.mu.Lock() - name, ok := w.paths[int(raw.Wd)] - // IN_DELETE_SELF occurs when the file/directory being watched is removed. - // This is a sign to clean up the maps, otherwise we are no longer in sync - // with the inotify kernel state which has already deleted the watch - // automatically. - if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { - delete(w.paths, int(raw.Wd)) - delete(w.watches, name) - } - w.mu.Unlock() - - if nameLen > 0 { - // Point "bytes" at the first byte of the filename - bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] - // The filename is padded with NULL bytes. TrimRight() gets rid of those. - name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") - } - - event := newEvent(name, mask) - - // Send the events that are not ignored on the events channel - if !event.ignoreLinux(mask) { - select { - case w.Events <- event: - case <-w.done: - return - } - } - - // Move to the next event in the buffer - offset += unix.SizeofInotifyEvent + nameLen - } - } -} - -// Certain types of events can be "ignored" and not sent over the Events -// channel. Such as events marked ignore by the kernel, or MODIFY events -// against files that do not exist. -func (e *Event) ignoreLinux(mask uint32) bool { - // Ignore anything the inotify API says to ignore - if mask&unix.IN_IGNORED == unix.IN_IGNORED { - return true - } - - // If the event is not a DELETE or RENAME, the file must exist. - // Otherwise the event is ignored. - // *Note*: this was put in place because it was seen that a MODIFY - // event was sent after the DELETE. This ignores that MODIFY and - // assumes a DELETE will come or has come if the file doesn't exist. - if !(e.Op&Remove == Remove || e.Op&Rename == Rename) { - _, statErr := os.Lstat(e.Name) - return os.IsNotExist(statErr) - } - return false -} - -// newEvent returns an platform-independent Event based on an inotify mask. -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { - e.Op |= Create - } - if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { - e.Op |= Remove - } - if mask&unix.IN_MODIFY == unix.IN_MODIFY { - e.Op |= Write - } - if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { - e.Op |= Rename - } - if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { - e.Op |= Chmod - } - return e -} diff --git a/tools/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/tools/vendor/github.com/fsnotify/fsnotify/inotify_poller.go deleted file mode 100644 index b572a37c3..000000000 --- a/tools/vendor/github.com/fsnotify/fsnotify/inotify_poller.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux -// +build linux - -package fsnotify - -import ( - "errors" - - "golang.org/x/sys/unix" -) - -type fdPoller struct { - fd int // File descriptor (as returned by the inotify_init() syscall) - epfd int // Epoll file descriptor - pipe [2]int // Pipe for waking up -} - -func emptyPoller(fd int) *fdPoller { - poller := new(fdPoller) - poller.fd = fd - poller.epfd = -1 - poller.pipe[0] = -1 - poller.pipe[1] = -1 - return poller -} - -// Create a new inotify poller. -// This creates an inotify handler, and an epoll handler. -func newFdPoller(fd int) (*fdPoller, error) { - var errno error - poller := emptyPoller(fd) - defer func() { - if errno != nil { - poller.close() - } - }() - - // Create epoll fd - poller.epfd, errno = unix.EpollCreate1(unix.EPOLL_CLOEXEC) - if poller.epfd == -1 { - return nil, errno - } - // Create pipe; pipe[0] is the read end, pipe[1] the write end. - errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK|unix.O_CLOEXEC) - if errno != nil { - return nil, errno - } - - // Register inotify fd with epoll - event := unix.EpollEvent{ - Fd: int32(poller.fd), - Events: unix.EPOLLIN, - } - errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event) - if errno != nil { - return nil, errno - } - - // Register pipe fd with epoll - event = unix.EpollEvent{ - Fd: int32(poller.pipe[0]), - Events: unix.EPOLLIN, - } - errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event) - if errno != nil { - return nil, errno - } - - return poller, nil -} - -// Wait using epoll. -// Returns true if something is ready to be read, -// false if there is not. -func (poller *fdPoller) wait() (bool, error) { - // 3 possible events per fd, and 2 fds, makes a maximum of 6 events. - // I don't know whether epoll_wait returns the number of events returned, - // or the total number of events ready. - // I decided to catch both by making the buffer one larger than the maximum. - events := make([]unix.EpollEvent, 7) - for { - n, errno := unix.EpollWait(poller.epfd, events, -1) - if n == -1 { - if errno == unix.EINTR { - continue - } - return false, errno - } - if n == 0 { - // If there are no events, try again. - continue - } - if n > 6 { - // This should never happen. More events were returned than should be possible. - return false, errors.New("epoll_wait returned more events than I know what to do with") - } - ready := events[:n] - epollhup := false - epollerr := false - epollin := false - for _, event := range ready { - if event.Fd == int32(poller.fd) { - if event.Events&unix.EPOLLHUP != 0 { - // This should not happen, but if it does, treat it as a wakeup. - epollhup = true - } - if event.Events&unix.EPOLLERR != 0 { - // If an error is waiting on the file descriptor, we should pretend - // something is ready to read, and let unix.Read pick up the error. - epollerr = true - } - if event.Events&unix.EPOLLIN != 0 { - // There is data to read. - epollin = true - } - } - if event.Fd == int32(poller.pipe[0]) { - if event.Events&unix.EPOLLHUP != 0 { - // Write pipe descriptor was closed, by us. This means we're closing down the - // watcher, and we should wake up. - } - if event.Events&unix.EPOLLERR != 0 { - // If an error is waiting on the pipe file descriptor. - // This is an absolute mystery, and should never ever happen. - return false, errors.New("Error on the pipe descriptor.") - } - if event.Events&unix.EPOLLIN != 0 { - // This is a regular wakeup, so we have to clear the buffer. - err := poller.clearWake() - if err != nil { - return false, err - } - } - } - } - - if epollhup || epollerr || epollin { - return true, nil - } - return false, nil - } -} - -// Close the write end of the poller. -func (poller *fdPoller) wake() error { - buf := make([]byte, 1) - n, errno := unix.Write(poller.pipe[1], buf) - if n == -1 { - if errno == unix.EAGAIN { - // Buffer is full, poller will wake. - return nil - } - return errno - } - return nil -} - -func (poller *fdPoller) clearWake() error { - // You have to be woken up a LOT in order to get to 100! - buf := make([]byte, 100) - n, errno := unix.Read(poller.pipe[0], buf) - if n == -1 { - if errno == unix.EAGAIN { - // Buffer is empty, someone else cleared our wake. - return nil - } - return errno - } - return nil -} - -// Close all poller file descriptors, but not the one passed to it. -func (poller *fdPoller) close() { - if poller.pipe[1] != -1 { - unix.Close(poller.pipe[1]) - } - if poller.pipe[0] != -1 { - unix.Close(poller.pipe[0]) - } - if poller.epfd != -1 { - unix.Close(poller.epfd) - } -} diff --git a/tools/vendor/github.com/fsnotify/fsnotify/internal/darwin.go b/tools/vendor/github.com/fsnotify/fsnotify/internal/darwin.go new file mode 100644 index 000000000..b0eab1009 --- /dev/null +++ b/tools/vendor/github.com/fsnotify/fsnotify/internal/darwin.go @@ -0,0 +1,39 @@ +//go:build darwin + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ +func SetRlimit() { + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = l.Cur + + if n, err := syscall.SysctlUint32("kern.maxfiles"); err == nil && uint64(n) < maxfiles { + maxfiles = uint64(n) + } + + if n, err := syscall.SysctlUint32("kern.maxfilesperproc"); err == nil && uint64(n) < maxfiles { + maxfiles = uint64(n) + } +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) } diff --git a/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go b/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go new file mode 100644 index 000000000..928319fb0 --- /dev/null +++ b/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go @@ -0,0 +1,57 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ABSOLUTE", unix.NOTE_ABSOLUTE}, + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_BACKGROUND", unix.NOTE_BACKGROUND}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_CRITICAL", unix.NOTE_CRITICAL}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXITSTATUS", unix.NOTE_EXITSTATUS}, + {"NOTE_EXIT_CSERROR", unix.NOTE_EXIT_CSERROR}, + {"NOTE_EXIT_DECRYPTFAIL", unix.NOTE_EXIT_DECRYPTFAIL}, + {"NOTE_EXIT_DETAIL", unix.NOTE_EXIT_DETAIL}, + {"NOTE_EXIT_DETAIL_MASK", unix.NOTE_EXIT_DETAIL_MASK}, + {"NOTE_EXIT_MEMORY", unix.NOTE_EXIT_MEMORY}, + {"NOTE_EXIT_REPARENTED", unix.NOTE_EXIT_REPARENTED}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_FUNLOCK", unix.NOTE_FUNLOCK}, + {"NOTE_LEEWAY", unix.NOTE_LEEWAY}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_MACHTIME", unix.NOTE_MACHTIME}, + {"NOTE_MACH_CONTINUOUS_TIME", unix.NOTE_MACH_CONTINUOUS_TIME}, + {"NOTE_NONE", unix.NOTE_NONE}, + {"NOTE_NSECONDS", unix.NOTE_NSECONDS}, + {"NOTE_OOB", unix.NOTE_OOB}, + //{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, -0x100000 (?!) + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_REAP", unix.NOTE_REAP}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_SECONDS", unix.NOTE_SECONDS}, + {"NOTE_SIGNAL", unix.NOTE_SIGNAL}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_USECONDS", unix.NOTE_USECONDS}, + {"NOTE_VM_ERROR", unix.NOTE_VM_ERROR}, + {"NOTE_VM_PRESSURE", unix.NOTE_VM_PRESSURE}, + {"NOTE_VM_PRESSURE_SUDDEN_TERMINATE", unix.NOTE_VM_PRESSURE_SUDDEN_TERMINATE}, + {"NOTE_VM_PRESSURE_TERMINATE", unix.NOTE_VM_PRESSURE_TERMINATE}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go b/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go new file mode 100644 index 000000000..3186b0c34 --- /dev/null +++ b/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go @@ -0,0 +1,33 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_OOB", unix.NOTE_OOB}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go b/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go new file mode 100644 index 000000000..f69fdb930 --- /dev/null +++ b/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go @@ -0,0 +1,42 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ABSTIME", unix.NOTE_ABSTIME}, + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_CLOSE", unix.NOTE_CLOSE}, + {"NOTE_CLOSE_WRITE", unix.NOTE_CLOSE_WRITE}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FILE_POLL", unix.NOTE_FILE_POLL}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_MSECONDS", unix.NOTE_MSECONDS}, + {"NOTE_NSECONDS", unix.NOTE_NSECONDS}, + {"NOTE_OPEN", unix.NOTE_OPEN}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_READ", unix.NOTE_READ}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_SECONDS", unix.NOTE_SECONDS}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_USECONDS", unix.NOTE_USECONDS}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go b/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go new file mode 100644 index 000000000..607e683bd --- /dev/null +++ b/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go @@ -0,0 +1,32 @@ +//go:build freebsd || openbsd || netbsd || dragonfly || darwin + +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, kevent *unix.Kevent_t) { + mask := uint32(kevent.Fflags) + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-60s → %q\n", + time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name) +} diff --git a/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go b/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go new file mode 100644 index 000000000..35c734be4 --- /dev/null +++ b/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go @@ -0,0 +1,56 @@ +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, mask, cookie uint32) { + names := []struct { + n string + m uint32 + }{ + {"IN_ACCESS", unix.IN_ACCESS}, + {"IN_ATTRIB", unix.IN_ATTRIB}, + {"IN_CLOSE", unix.IN_CLOSE}, + {"IN_CLOSE_NOWRITE", unix.IN_CLOSE_NOWRITE}, + {"IN_CLOSE_WRITE", unix.IN_CLOSE_WRITE}, + {"IN_CREATE", unix.IN_CREATE}, + {"IN_DELETE", unix.IN_DELETE}, + {"IN_DELETE_SELF", unix.IN_DELETE_SELF}, + {"IN_IGNORED", unix.IN_IGNORED}, + {"IN_ISDIR", unix.IN_ISDIR}, + {"IN_MODIFY", unix.IN_MODIFY}, + {"IN_MOVE", unix.IN_MOVE}, + {"IN_MOVED_FROM", unix.IN_MOVED_FROM}, + {"IN_MOVED_TO", unix.IN_MOVED_TO}, + {"IN_MOVE_SELF", unix.IN_MOVE_SELF}, + {"IN_OPEN", unix.IN_OPEN}, + {"IN_Q_OVERFLOW", unix.IN_Q_OVERFLOW}, + {"IN_UNMOUNT", unix.IN_UNMOUNT}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + var c string + if cookie > 0 { + c = fmt.Sprintf("(cookie: %d) ", cookie) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-30s → %s%q\n", + time.Now().Format("15:04:05.000000000"), strings.Join(l, "|"), c, name) +} diff --git a/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go b/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go new file mode 100644 index 000000000..e5b3b6f69 --- /dev/null +++ b/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go @@ -0,0 +1,25 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go b/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go new file mode 100644 index 000000000..1dd455bc5 --- /dev/null +++ b/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go @@ -0,0 +1,28 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + // {"NOTE_CHANGE", unix.NOTE_CHANGE}, // Not on 386? + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EOF", unix.NOTE_EOF}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRUNCATE", unix.NOTE_TRUNCATE}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go b/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go new file mode 100644 index 000000000..f1b2e73bd --- /dev/null +++ b/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go @@ -0,0 +1,45 @@ +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, mask int32) { + names := []struct { + n string + m int32 + }{ + {"FILE_ACCESS", unix.FILE_ACCESS}, + {"FILE_MODIFIED", unix.FILE_MODIFIED}, + {"FILE_ATTRIB", unix.FILE_ATTRIB}, + {"FILE_TRUNC", unix.FILE_TRUNC}, + {"FILE_NOFOLLOW", unix.FILE_NOFOLLOW}, + {"FILE_DELETE", unix.FILE_DELETE}, + {"FILE_RENAME_TO", unix.FILE_RENAME_TO}, + {"FILE_RENAME_FROM", unix.FILE_RENAME_FROM}, + {"UNMOUNTED", unix.UNMOUNTED}, + {"MOUNTEDOVER", unix.MOUNTEDOVER}, + {"FILE_EXCEPTION", unix.FILE_EXCEPTION}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-30s → %q\n", + time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name) +} diff --git a/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go b/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go new file mode 100644 index 000000000..52bf4ce53 --- /dev/null +++ b/tools/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go @@ -0,0 +1,40 @@ +package internal + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "golang.org/x/sys/windows" +) + +func Debug(name string, mask uint32) { + names := []struct { + n string + m uint32 + }{ + {"FILE_ACTION_ADDED", windows.FILE_ACTION_ADDED}, + {"FILE_ACTION_REMOVED", windows.FILE_ACTION_REMOVED}, + {"FILE_ACTION_MODIFIED", windows.FILE_ACTION_MODIFIED}, + {"FILE_ACTION_RENAMED_OLD_NAME", windows.FILE_ACTION_RENAMED_OLD_NAME}, + {"FILE_ACTION_RENAMED_NEW_NAME", windows.FILE_ACTION_RENAMED_NEW_NAME}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-65s → %q\n", + time.Now().Format("15:04:05.000000000"), strings.Join(l, " | "), filepath.ToSlash(name)) +} diff --git a/tools/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go b/tools/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go new file mode 100644 index 000000000..547df1df8 --- /dev/null +++ b/tools/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go @@ -0,0 +1,31 @@ +//go:build freebsd + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +func SetRlimit() { + // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = uint64(l.Cur) +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, uint64(dev)) } diff --git a/tools/vendor/github.com/fsnotify/fsnotify/internal/internal.go b/tools/vendor/github.com/fsnotify/fsnotify/internal/internal.go new file mode 100644 index 000000000..7daa45e19 --- /dev/null +++ b/tools/vendor/github.com/fsnotify/fsnotify/internal/internal.go @@ -0,0 +1,2 @@ +// Package internal contains some helpers. +package internal diff --git a/tools/vendor/github.com/fsnotify/fsnotify/internal/unix.go b/tools/vendor/github.com/fsnotify/fsnotify/internal/unix.go new file mode 100644 index 000000000..30976ce97 --- /dev/null +++ b/tools/vendor/github.com/fsnotify/fsnotify/internal/unix.go @@ -0,0 +1,31 @@ +//go:build !windows && !darwin && !freebsd + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +func SetRlimit() { + // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = uint64(l.Cur) +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) } diff --git a/tools/vendor/github.com/fsnotify/fsnotify/internal/unix2.go b/tools/vendor/github.com/fsnotify/fsnotify/internal/unix2.go new file mode 100644 index 000000000..37dfeddc2 --- /dev/null +++ b/tools/vendor/github.com/fsnotify/fsnotify/internal/unix2.go @@ -0,0 +1,7 @@ +//go:build !windows + +package internal + +func HasPrivilegesForSymlink() bool { + return true +} diff --git a/tools/vendor/github.com/fsnotify/fsnotify/internal/windows.go b/tools/vendor/github.com/fsnotify/fsnotify/internal/windows.go new file mode 100644 index 000000000..a72c64954 --- /dev/null +++ b/tools/vendor/github.com/fsnotify/fsnotify/internal/windows.go @@ -0,0 +1,41 @@ +//go:build windows + +package internal + +import ( + "errors" + + "golang.org/x/sys/windows" +) + +// Just a dummy. +var ( + SyscallEACCES = errors.New("dummy") + UnixEACCES = errors.New("dummy") +) + +func SetRlimit() {} +func Maxfiles() uint64 { return 1<<64 - 1 } +func Mkfifo(path string, mode uint32) error { return errors.New("no FIFOs on Windows") } +func Mknod(path string, mode uint32, dev int) error { return errors.New("no device nodes on Windows") } + +func HasPrivilegesForSymlink() bool { + var sid *windows.SID + err := windows.AllocateAndInitializeSid( + &windows.SECURITY_NT_AUTHORITY, + 2, + windows.SECURITY_BUILTIN_DOMAIN_RID, + windows.DOMAIN_ALIAS_RID_ADMINS, + 0, 0, 0, 0, 0, 0, + &sid) + if err != nil { + return false + } + defer windows.FreeSid(sid) + token := windows.Token(0) + member, err := token.IsMember(sid) + if err != nil { + return false + } + return member || token.IsElevated() +} diff --git a/tools/vendor/github.com/fsnotify/fsnotify/kqueue.go b/tools/vendor/github.com/fsnotify/fsnotify/kqueue.go deleted file mode 100644 index 6fb8d8532..000000000 --- a/tools/vendor/github.com/fsnotify/fsnotify/kqueue.go +++ /dev/null @@ -1,535 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build freebsd || openbsd || netbsd || dragonfly || darwin -// +build freebsd openbsd netbsd dragonfly darwin - -package fsnotify - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "sync" - "time" - - "golang.org/x/sys/unix" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - - kq int // File descriptor (as returned by the kqueue() syscall). - - mu sync.Mutex // Protects access to watcher data - watches map[string]int // Map of watched file descriptors (key: path). - externalWatches map[string]bool // Map of watches added by user of the library. - dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue. - paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events. - fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events). - isClosed bool // Set to true when Close() is first called -} - -type pathInfo struct { - name string - isDir bool -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - kq, err := kqueue() - if err != nil { - return nil, err - } - - w := &Watcher{ - kq: kq, - watches: make(map[string]int), - dirFlags: make(map[string]uint32), - paths: make(map[int]pathInfo), - fileExists: make(map[string]bool), - externalWatches: make(map[string]bool), - Events: make(chan Event), - Errors: make(chan error), - done: make(chan struct{}), - } - - go w.readEvents() - return w, nil -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return nil - } - w.isClosed = true - - // copy paths to remove while locked - var pathsToRemove = make([]string, 0, len(w.watches)) - for name := range w.watches { - pathsToRemove = append(pathsToRemove, name) - } - w.mu.Unlock() - // unlock before calling Remove, which also locks - - for _, name := range pathsToRemove { - w.Remove(name) - } - - // send a "quit" message to the reader goroutine - close(w.done) - - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - w.mu.Lock() - w.externalWatches[name] = true - w.mu.Unlock() - _, err := w.addWatch(name, noteAllEvents) - return err -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - name = filepath.Clean(name) - w.mu.Lock() - watchfd, ok := w.watches[name] - w.mu.Unlock() - if !ok { - return fmt.Errorf("can't remove non-existent kevent watch for: %s", name) - } - - const registerRemove = unix.EV_DELETE - if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil { - return err - } - - unix.Close(watchfd) - - w.mu.Lock() - isDir := w.paths[watchfd].isDir - delete(w.watches, name) - delete(w.paths, watchfd) - delete(w.dirFlags, name) - w.mu.Unlock() - - // Find all watched paths that are in this directory that are not external. - if isDir { - var pathsToRemove []string - w.mu.Lock() - for _, path := range w.paths { - wdir, _ := filepath.Split(path.name) - if filepath.Clean(wdir) == name { - if !w.externalWatches[path.name] { - pathsToRemove = append(pathsToRemove, path.name) - } - } - } - w.mu.Unlock() - for _, name := range pathsToRemove { - // Since these are internal, not much sense in propagating error - // to the user, as that will just confuse them with an error about - // a path they did not explicitly watch themselves. - w.Remove(name) - } - } - - return nil -} - -// WatchList returns the directories and files that are being monitered. -func (w *Watcher) WatchList() []string { - w.mu.Lock() - defer w.mu.Unlock() - - entries := make([]string, 0, len(w.watches)) - for pathname := range w.watches { - entries = append(entries, pathname) - } - - return entries -} - -// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) -const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME - -// keventWaitTime to block on each read from kevent -var keventWaitTime = durationToTimespec(100 * time.Millisecond) - -// addWatch adds name to the watched file set. -// The flags are interpreted as described in kevent(2). -// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks. -func (w *Watcher) addWatch(name string, flags uint32) (string, error) { - var isDir bool - // Make ./name and name equivalent - name = filepath.Clean(name) - - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return "", errors.New("kevent instance already closed") - } - watchfd, alreadyWatching := w.watches[name] - // We already have a watch, but we can still override flags. - if alreadyWatching { - isDir = w.paths[watchfd].isDir - } - w.mu.Unlock() - - if !alreadyWatching { - fi, err := os.Lstat(name) - if err != nil { - return "", err - } - - // Don't watch sockets. - if fi.Mode()&os.ModeSocket == os.ModeSocket { - return "", nil - } - - // Don't watch named pipes. - if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe { - return "", nil - } - - // Follow Symlinks - // Unfortunately, Linux can add bogus symlinks to watch list without - // issue, and Windows can't do symlinks period (AFAIK). To maintain - // consistency, we will act like everything is fine. There will simply - // be no file events for broken symlinks. - // Hence the returns of nil on errors. - if fi.Mode()&os.ModeSymlink == os.ModeSymlink { - name, err = filepath.EvalSymlinks(name) - if err != nil { - return "", nil - } - - w.mu.Lock() - _, alreadyWatching = w.watches[name] - w.mu.Unlock() - - if alreadyWatching { - return name, nil - } - - fi, err = os.Lstat(name) - if err != nil { - return "", nil - } - } - - watchfd, err = unix.Open(name, openMode, 0700) - if watchfd == -1 { - return "", err - } - - isDir = fi.IsDir() - } - - const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE - if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil { - unix.Close(watchfd) - return "", err - } - - if !alreadyWatching { - w.mu.Lock() - w.watches[name] = watchfd - w.paths[watchfd] = pathInfo{name: name, isDir: isDir} - w.mu.Unlock() - } - - if isDir { - // Watch the directory if it has not been watched before, - // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) - w.mu.Lock() - - watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && - (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) - // Store flags so this watch can be updated later - w.dirFlags[name] = flags - w.mu.Unlock() - - if watchDir { - if err := w.watchDirectoryFiles(name); err != nil { - return "", err - } - } - } - return name, nil -} - -// readEvents reads from kqueue and converts the received kevents into -// Event values that it sends down the Events channel. -func (w *Watcher) readEvents() { - eventBuffer := make([]unix.Kevent_t, 10) - -loop: - for { - // See if there is a message on the "done" channel - select { - case <-w.done: - break loop - default: - } - - // Get new events - kevents, err := read(w.kq, eventBuffer, &keventWaitTime) - // EINTR is okay, the syscall was interrupted before timeout expired. - if err != nil && err != unix.EINTR { - select { - case w.Errors <- err: - case <-w.done: - break loop - } - continue - } - - // Flush the events we received to the Events channel - for len(kevents) > 0 { - kevent := &kevents[0] - watchfd := int(kevent.Ident) - mask := uint32(kevent.Fflags) - w.mu.Lock() - path := w.paths[watchfd] - w.mu.Unlock() - event := newEvent(path.name, mask) - - if path.isDir && !(event.Op&Remove == Remove) { - // Double check to make sure the directory exists. This can happen when - // we do a rm -fr on a recursively watched folders and we receive a - // modification event first but the folder has been deleted and later - // receive the delete event - if _, err := os.Lstat(event.Name); os.IsNotExist(err) { - // mark is as delete event - event.Op |= Remove - } - } - - if event.Op&Rename == Rename || event.Op&Remove == Remove { - w.Remove(event.Name) - w.mu.Lock() - delete(w.fileExists, event.Name) - w.mu.Unlock() - } - - if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) { - w.sendDirectoryChangeEvents(event.Name) - } else { - // Send the event on the Events channel. - select { - case w.Events <- event: - case <-w.done: - break loop - } - } - - if event.Op&Remove == Remove { - // Look for a file that may have overwritten this. - // For example, mv f1 f2 will delete f2, then create f2. - if path.isDir { - fileDir := filepath.Clean(event.Name) - w.mu.Lock() - _, found := w.watches[fileDir] - w.mu.Unlock() - if found { - // make sure the directory exists before we watch for changes. When we - // do a recursive watch and perform rm -fr, the parent directory might - // have gone missing, ignore the missing directory and let the - // upcoming delete event remove the watch from the parent directory. - if _, err := os.Lstat(fileDir); err == nil { - w.sendDirectoryChangeEvents(fileDir) - } - } - } else { - filePath := filepath.Clean(event.Name) - if fileInfo, err := os.Lstat(filePath); err == nil { - w.sendFileCreatedEventIfNew(filePath, fileInfo) - } - } - } - - // Move to next event - kevents = kevents[1:] - } - } - - // cleanup - err := unix.Close(w.kq) - if err != nil { - // only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors. - select { - case w.Errors <- err: - default: - } - } - close(w.Events) - close(w.Errors) -} - -// newEvent returns an platform-independent Event based on kqueue Fflags. -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { - e.Op |= Remove - } - if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { - e.Op |= Write - } - if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { - e.Op |= Rename - } - if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { - e.Op |= Chmod - } - return e -} - -func newCreateEvent(name string) Event { - return Event{Name: name, Op: Create} -} - -// watchDirectoryFiles to mimic inotify when adding a watch on a directory -func (w *Watcher) watchDirectoryFiles(dirPath string) error { - // Get all files - files, err := ioutil.ReadDir(dirPath) - if err != nil { - return err - } - - for _, fileInfo := range files { - filePath := filepath.Join(dirPath, fileInfo.Name()) - filePath, err = w.internalWatch(filePath, fileInfo) - if err != nil { - return err - } - - w.mu.Lock() - w.fileExists[filePath] = true - w.mu.Unlock() - } - - return nil -} - -// sendDirectoryEvents searches the directory for newly created files -// and sends them over the event channel. This functionality is to have -// the BSD version of fsnotify match Linux inotify which provides a -// create event for files created in a watched directory. -func (w *Watcher) sendDirectoryChangeEvents(dirPath string) { - // Get all files - files, err := ioutil.ReadDir(dirPath) - if err != nil { - select { - case w.Errors <- err: - case <-w.done: - return - } - } - - // Search for new files - for _, fileInfo := range files { - filePath := filepath.Join(dirPath, fileInfo.Name()) - err := w.sendFileCreatedEventIfNew(filePath, fileInfo) - - if err != nil { - return - } - } -} - -// sendFileCreatedEvent sends a create event if the file isn't already being tracked. -func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) { - w.mu.Lock() - _, doesExist := w.fileExists[filePath] - w.mu.Unlock() - if !doesExist { - // Send create event - select { - case w.Events <- newCreateEvent(filePath): - case <-w.done: - return - } - } - - // like watchDirectoryFiles (but without doing another ReadDir) - filePath, err = w.internalWatch(filePath, fileInfo) - if err != nil { - return err - } - - w.mu.Lock() - w.fileExists[filePath] = true - w.mu.Unlock() - - return nil -} - -func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) { - if fileInfo.IsDir() { - // mimic Linux providing delete events for subdirectories - // but preserve the flags used if currently watching subdirectory - w.mu.Lock() - flags := w.dirFlags[name] - w.mu.Unlock() - - flags |= unix.NOTE_DELETE | unix.NOTE_RENAME - return w.addWatch(name, flags) - } - - // watch file to mimic Linux inotify - return w.addWatch(name, noteAllEvents) -} - -// kqueue creates a new kernel event queue and returns a descriptor. -func kqueue() (kq int, err error) { - kq, err = unix.Kqueue() - if kq == -1 { - return kq, err - } - return kq, nil -} - -// register events with the queue -func register(kq int, fds []int, flags int, fflags uint32) error { - changes := make([]unix.Kevent_t, len(fds)) - - for i, fd := range fds { - // SetKevent converts int to the platform-specific types: - unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) - changes[i].Fflags = fflags - } - - // register the events - success, err := unix.Kevent(kq, changes, nil, nil) - if success == -1 { - return err - } - return nil -} - -// read retrieves pending events, or waits until an event occurs. -// A timeout of nil blocks indefinitely, while 0 polls the queue. -func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) { - n, err := unix.Kevent(kq, nil, events, timeout) - if err != nil { - return nil, err - } - return events[0:n], nil -} - -// durationToTimespec prepares a timeout value -func durationToTimespec(d time.Duration) unix.Timespec { - return unix.NsecToTimespec(d.Nanoseconds()) -} diff --git a/tools/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go b/tools/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go deleted file mode 100644 index 36cc3845b..000000000 --- a/tools/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build freebsd || openbsd || netbsd || dragonfly -// +build freebsd openbsd netbsd dragonfly - -package fsnotify - -import "golang.org/x/sys/unix" - -const openMode = unix.O_NONBLOCK | unix.O_RDONLY | unix.O_CLOEXEC diff --git a/tools/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go b/tools/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go deleted file mode 100644 index 98cd8476f..000000000 --- a/tools/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build darwin -// +build darwin - -package fsnotify - -import "golang.org/x/sys/unix" - -// note: this constant is not defined on BSD -const openMode = unix.O_EVTONLY | unix.O_CLOEXEC diff --git a/tools/vendor/github.com/fsnotify/fsnotify/system_bsd.go b/tools/vendor/github.com/fsnotify/fsnotify/system_bsd.go new file mode 100644 index 000000000..f65e8fe3e --- /dev/null +++ b/tools/vendor/github.com/fsnotify/fsnotify/system_bsd.go @@ -0,0 +1,7 @@ +//go:build freebsd || openbsd || netbsd || dragonfly + +package fsnotify + +import "golang.org/x/sys/unix" + +const openMode = unix.O_NONBLOCK | unix.O_RDONLY | unix.O_CLOEXEC diff --git a/tools/vendor/github.com/fsnotify/fsnotify/system_darwin.go b/tools/vendor/github.com/fsnotify/fsnotify/system_darwin.go new file mode 100644 index 000000000..a29fc7aab --- /dev/null +++ b/tools/vendor/github.com/fsnotify/fsnotify/system_darwin.go @@ -0,0 +1,8 @@ +//go:build darwin + +package fsnotify + +import "golang.org/x/sys/unix" + +// note: this constant is not defined on BSD +const openMode = unix.O_EVTONLY | unix.O_CLOEXEC diff --git a/tools/vendor/github.com/fsnotify/fsnotify/windows.go b/tools/vendor/github.com/fsnotify/fsnotify/windows.go deleted file mode 100644 index 02ce7deb0..000000000 --- a/tools/vendor/github.com/fsnotify/fsnotify/windows.go +++ /dev/null @@ -1,586 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build windows -// +build windows - -package fsnotify - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "reflect" - "runtime" - "sync" - "syscall" - "unsafe" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - isClosed bool // Set to true when Close() is first called - mu sync.Mutex // Map access - port syscall.Handle // Handle to completion port - watches watchMap // Map of watches (key: i-number) - input chan *input // Inputs to the reader are sent on this channel - quit chan chan<- error -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0) - if e != nil { - return nil, os.NewSyscallError("CreateIoCompletionPort", e) - } - w := &Watcher{ - port: port, - watches: make(watchMap), - input: make(chan *input, 1), - Events: make(chan Event, 50), - Errors: make(chan error), - quit: make(chan chan<- error, 1), - } - go w.readEvents() - return w, nil -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - if w.isClosed { - return nil - } - w.isClosed = true - - // Send "quit" message to the reader goroutine - ch := make(chan error) - w.quit <- ch - if err := w.wakeupReader(); err != nil { - return err - } - return <-ch -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - if w.isClosed { - return errors.New("watcher already closed") - } - in := &input{ - op: opAddWatch, - path: filepath.Clean(name), - flags: sysFSALLEVENTS, - reply: make(chan error), - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - in := &input{ - op: opRemoveWatch, - path: filepath.Clean(name), - reply: make(chan error), - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -// WatchList returns the directories and files that are being monitered. -func (w *Watcher) WatchList() []string { - w.mu.Lock() - defer w.mu.Unlock() - - entries := make([]string, 0, len(w.watches)) - for _, entry := range w.watches { - for _, watchEntry := range entry { - entries = append(entries, watchEntry.path) - } - } - - return entries -} - -const ( - // Options for AddWatch - sysFSONESHOT = 0x80000000 - sysFSONLYDIR = 0x1000000 - - // Events - sysFSACCESS = 0x1 - sysFSALLEVENTS = 0xfff - sysFSATTRIB = 0x4 - sysFSCLOSE = 0x18 - sysFSCREATE = 0x100 - sysFSDELETE = 0x200 - sysFSDELETESELF = 0x400 - sysFSMODIFY = 0x2 - sysFSMOVE = 0xc0 - sysFSMOVEDFROM = 0x40 - sysFSMOVEDTO = 0x80 - sysFSMOVESELF = 0x800 - - // Special events - sysFSIGNORED = 0x8000 - sysFSQOVERFLOW = 0x4000 -) - -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { - e.Op |= Create - } - if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { - e.Op |= Remove - } - if mask&sysFSMODIFY == sysFSMODIFY { - e.Op |= Write - } - if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { - e.Op |= Rename - } - if mask&sysFSATTRIB == sysFSATTRIB { - e.Op |= Chmod - } - return e -} - -const ( - opAddWatch = iota - opRemoveWatch -) - -const ( - provisional uint64 = 1 << (32 + iota) -) - -type input struct { - op int - path string - flags uint32 - reply chan error -} - -type inode struct { - handle syscall.Handle - volume uint32 - index uint64 -} - -type watch struct { - ov syscall.Overlapped - ino *inode // i-number - path string // Directory path - mask uint64 // Directory itself is being watched with these notify flags - names map[string]uint64 // Map of names being watched and their notify flags - rename string // Remembers the old name while renaming a file - buf [4096]byte -} - -type indexMap map[uint64]*watch -type watchMap map[uint32]indexMap - -func (w *Watcher) wakeupReader() error { - e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil) - if e != nil { - return os.NewSyscallError("PostQueuedCompletionStatus", e) - } - return nil -} - -func getDir(pathname string) (dir string, err error) { - attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname)) - if e != nil { - return "", os.NewSyscallError("GetFileAttributes", e) - } - if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { - dir = pathname - } else { - dir, _ = filepath.Split(pathname) - dir = filepath.Clean(dir) - } - return -} - -func getIno(path string) (ino *inode, err error) { - h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path), - syscall.FILE_LIST_DIRECTORY, - syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - nil, syscall.OPEN_EXISTING, - syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0) - if e != nil { - return nil, os.NewSyscallError("CreateFile", e) - } - var fi syscall.ByHandleFileInformation - if e = syscall.GetFileInformationByHandle(h, &fi); e != nil { - syscall.CloseHandle(h) - return nil, os.NewSyscallError("GetFileInformationByHandle", e) - } - ino = &inode{ - handle: h, - volume: fi.VolumeSerialNumber, - index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), - } - return ino, nil -} - -// Must run within the I/O thread. -func (m watchMap) get(ino *inode) *watch { - if i := m[ino.volume]; i != nil { - return i[ino.index] - } - return nil -} - -// Must run within the I/O thread. -func (m watchMap) set(ino *inode, watch *watch) { - i := m[ino.volume] - if i == nil { - i = make(indexMap) - m[ino.volume] = i - } - i[ino.index] = watch -} - -// Must run within the I/O thread. -func (w *Watcher) addWatch(pathname string, flags uint64) error { - dir, err := getDir(pathname) - if err != nil { - return err - } - if flags&sysFSONLYDIR != 0 && pathname != dir { - return nil - } - ino, err := getIno(dir) - if err != nil { - return err - } - w.mu.Lock() - watchEntry := w.watches.get(ino) - w.mu.Unlock() - if watchEntry == nil { - if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil { - syscall.CloseHandle(ino.handle) - return os.NewSyscallError("CreateIoCompletionPort", e) - } - watchEntry = &watch{ - ino: ino, - path: dir, - names: make(map[string]uint64), - } - w.mu.Lock() - w.watches.set(ino, watchEntry) - w.mu.Unlock() - flags |= provisional - } else { - syscall.CloseHandle(ino.handle) - } - if pathname == dir { - watchEntry.mask |= flags - } else { - watchEntry.names[filepath.Base(pathname)] |= flags - } - if err = w.startRead(watchEntry); err != nil { - return err - } - if pathname == dir { - watchEntry.mask &= ^provisional - } else { - watchEntry.names[filepath.Base(pathname)] &= ^provisional - } - return nil -} - -// Must run within the I/O thread. -func (w *Watcher) remWatch(pathname string) error { - dir, err := getDir(pathname) - if err != nil { - return err - } - ino, err := getIno(dir) - if err != nil { - return err - } - w.mu.Lock() - watch := w.watches.get(ino) - w.mu.Unlock() - if watch == nil { - return fmt.Errorf("can't remove non-existent watch for: %s", pathname) - } - if pathname == dir { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) - watch.mask = 0 - } else { - name := filepath.Base(pathname) - w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) - delete(watch.names, name) - } - return w.startRead(watch) -} - -// Must run within the I/O thread. -func (w *Watcher) deleteWatch(watch *watch) { - for name, mask := range watch.names { - if mask&provisional == 0 { - w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) - } - delete(watch.names, name) - } - if watch.mask != 0 { - if watch.mask&provisional == 0 { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) - } - watch.mask = 0 - } -} - -// Must run within the I/O thread. -func (w *Watcher) startRead(watch *watch) error { - if e := syscall.CancelIo(watch.ino.handle); e != nil { - w.Errors <- os.NewSyscallError("CancelIo", e) - w.deleteWatch(watch) - } - mask := toWindowsFlags(watch.mask) - for _, m := range watch.names { - mask |= toWindowsFlags(m) - } - if mask == 0 { - if e := syscall.CloseHandle(watch.ino.handle); e != nil { - w.Errors <- os.NewSyscallError("CloseHandle", e) - } - w.mu.Lock() - delete(w.watches[watch.ino.volume], watch.ino.index) - w.mu.Unlock() - return nil - } - e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], - uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) - if e != nil { - err := os.NewSyscallError("ReadDirectoryChanges", e) - if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { - // Watched directory was probably removed - if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) { - if watch.mask&sysFSONESHOT != 0 { - watch.mask = 0 - } - } - err = nil - } - w.deleteWatch(watch) - w.startRead(watch) - return err - } - return nil -} - -// readEvents reads from the I/O completion port, converts the -// received events into Event objects and sends them via the Events channel. -// Entry point to the I/O thread. -func (w *Watcher) readEvents() { - var ( - n, key uint32 - ov *syscall.Overlapped - ) - runtime.LockOSThread() - - for { - e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE) - watch := (*watch)(unsafe.Pointer(ov)) - - if watch == nil { - select { - case ch := <-w.quit: - w.mu.Lock() - var indexes []indexMap - for _, index := range w.watches { - indexes = append(indexes, index) - } - w.mu.Unlock() - for _, index := range indexes { - for _, watch := range index { - w.deleteWatch(watch) - w.startRead(watch) - } - } - var err error - if e := syscall.CloseHandle(w.port); e != nil { - err = os.NewSyscallError("CloseHandle", e) - } - close(w.Events) - close(w.Errors) - ch <- err - return - case in := <-w.input: - switch in.op { - case opAddWatch: - in.reply <- w.addWatch(in.path, uint64(in.flags)) - case opRemoveWatch: - in.reply <- w.remWatch(in.path) - } - default: - } - continue - } - - switch e { - case syscall.ERROR_MORE_DATA: - if watch == nil { - w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer") - } else { - // The i/o succeeded but the buffer is full. - // In theory we should be building up a full packet. - // In practice we can get away with just carrying on. - n = uint32(unsafe.Sizeof(watch.buf)) - } - case syscall.ERROR_ACCESS_DENIED: - // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) - w.deleteWatch(watch) - w.startRead(watch) - continue - case syscall.ERROR_OPERATION_ABORTED: - // CancelIo was called on this handle - continue - default: - w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e) - continue - case nil: - } - - var offset uint32 - for { - if n == 0 { - w.Events <- newEvent("", sysFSQOVERFLOW) - w.Errors <- errors.New("short read in readEvents()") - break - } - - // Point "raw" to the event in the buffer - raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) - // TODO: Consider using unsafe.Slice that is available from go1.17 - // https://stackoverflow.com/questions/51187973/how-to-create-an-array-or-a-slice-from-an-array-unsafe-pointer-in-golang - // instead of using a fixed syscall.MAX_PATH buf, we create a buf that is the size of the path name - size := int(raw.FileNameLength / 2) - var buf []uint16 - sh := (*reflect.SliceHeader)(unsafe.Pointer(&buf)) - sh.Data = uintptr(unsafe.Pointer(&raw.FileName)) - sh.Len = size - sh.Cap = size - name := syscall.UTF16ToString(buf) - fullname := filepath.Join(watch.path, name) - - var mask uint64 - switch raw.Action { - case syscall.FILE_ACTION_REMOVED: - mask = sysFSDELETESELF - case syscall.FILE_ACTION_MODIFIED: - mask = sysFSMODIFY - case syscall.FILE_ACTION_RENAMED_OLD_NAME: - watch.rename = name - case syscall.FILE_ACTION_RENAMED_NEW_NAME: - if watch.names[watch.rename] != 0 { - watch.names[name] |= watch.names[watch.rename] - delete(watch.names, watch.rename) - mask = sysFSMOVESELF - } - } - - sendNameEvent := func() { - if w.sendEvent(fullname, watch.names[name]&mask) { - if watch.names[name]&sysFSONESHOT != 0 { - delete(watch.names, name) - } - } - } - if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME { - sendNameEvent() - } - if raw.Action == syscall.FILE_ACTION_REMOVED { - w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) - delete(watch.names, name) - } - if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) { - if watch.mask&sysFSONESHOT != 0 { - watch.mask = 0 - } - } - if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME { - fullname = filepath.Join(watch.path, watch.rename) - sendNameEvent() - } - - // Move to the next event in the buffer - if raw.NextEntryOffset == 0 { - break - } - offset += raw.NextEntryOffset - - // Error! - if offset >= n { - w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.") - break - } - } - - if err := w.startRead(watch); err != nil { - w.Errors <- err - } - } -} - -func (w *Watcher) sendEvent(name string, mask uint64) bool { - if mask == 0 { - return false - } - event := newEvent(name, uint32(mask)) - select { - case ch := <-w.quit: - w.quit <- ch - case w.Events <- event: - } - return true -} - -func toWindowsFlags(mask uint64) uint32 { - var m uint32 - if mask&sysFSACCESS != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS - } - if mask&sysFSMODIFY != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE - } - if mask&sysFSATTRIB != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES - } - if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME - } - return m -} - -func toFSnotifyFlags(action uint32) uint64 { - switch action { - case syscall.FILE_ACTION_ADDED: - return sysFSCREATE - case syscall.FILE_ACTION_REMOVED: - return sysFSDELETE - case syscall.FILE_ACTION_MODIFIED: - return sysFSMODIFY - case syscall.FILE_ACTION_RENAMED_OLD_NAME: - return sysFSMOVEDFROM - case syscall.FILE_ACTION_RENAMED_NEW_NAME: - return sysFSMOVEDTO - } - return 0 -} diff --git a/tools/vendor/github.com/go-critic/go-critic/checkers/commentedOutCode_checker.go b/tools/vendor/github.com/go-critic/go-critic/checkers/commentedOutCode_checker.go index 8595b7951..788355d7f 100644 --- a/tools/vendor/github.com/go-critic/go-critic/checkers/commentedOutCode_checker.go +++ b/tools/vendor/github.com/go-critic/go-critic/checkers/commentedOutCode_checker.go @@ -93,6 +93,10 @@ func (c *commentedOutCodeChecker) VisitLocalComment(cg *ast.CommentGroup) { return } + if c.isExampleOutputComment(s) { + return + } + stmt := strparse.Stmt(s) if c.isPermittedStmt(stmt) { @@ -109,11 +113,6 @@ func (c *commentedOutCodeChecker) VisitLocalComment(cg *ast.CommentGroup) { return } - // Some attempts to avoid false positives. - if c.skipBlock(s) { - return - } - // Add braces to make block statement from // multiple statements. stmt = strparse.Stmt(fmt.Sprintf("{ %s }", s)) @@ -123,15 +122,18 @@ func (c *commentedOutCodeChecker) VisitLocalComment(cg *ast.CommentGroup) { } } -func (c *commentedOutCodeChecker) skipBlock(s string) bool { - lines := strings.Split(s, "\n") // There is at least 1 line, that's invariant - - // Special example test block. - if isExampleTestFunc(c.fn) && strings.Contains(lines[0], "Output:") { - return true - } - - return false +// An example output comment can be one of the following: +// +// Output: some output +// +// or +// +// Output: +// some output +// +// See https://go.dev/blog/examples +func (c *commentedOutCodeChecker) isExampleOutputComment(s string) bool { + return isExampleTestFunc(c.fn) && strings.Contains(s, "Output:") } func (c *commentedOutCodeChecker) isPermittedStmt(stmt ast.Stmt) bool { diff --git a/tools/vendor/github.com/go-critic/go-critic/checkers/rulesdata/rulesdata.go b/tools/vendor/github.com/go-critic/go-critic/checkers/rulesdata/rulesdata.go index 4ab31076f..ace418d48 100644 --- a/tools/vendor/github.com/go-critic/go-critic/checkers/rulesdata/rulesdata.go +++ b/tools/vendor/github.com/go-critic/go-critic/checkers/rulesdata/rulesdata.go @@ -1391,48 +1391,53 @@ var PrecompiledRules = &ir.File{ Line: 384, SyntaxPatterns: []ir.PatternString{ {Line: 384, Value: "copy($x, $x)"}, - {Line: 385, Value: "math.Max($x, $x)"}, - {Line: 386, Value: "math.Min($x, $x)"}, - {Line: 387, Value: "reflect.Copy($x, $x)"}, - {Line: 388, Value: "reflect.DeepEqual($x, $x)"}, - {Line: 389, Value: "strings.Contains($x, $x)"}, - {Line: 390, Value: "strings.Compare($x, $x)"}, - {Line: 391, Value: "strings.EqualFold($x, $x)"}, - {Line: 392, Value: "strings.HasPrefix($x, $x)"}, - {Line: 393, Value: "strings.HasSuffix($x, $x)"}, - {Line: 394, Value: "strings.Index($x, $x)"}, - {Line: 395, Value: "strings.LastIndex($x, $x)"}, - {Line: 396, Value: "strings.Split($x, $x)"}, - {Line: 397, Value: "strings.SplitAfter($x, $x)"}, - {Line: 398, Value: "strings.SplitAfterN($x, $x, $_)"}, - {Line: 399, Value: "strings.SplitN($x, $x, $_)"}, - {Line: 400, Value: "strings.Replace($_, $x, $x, $_)"}, - {Line: 401, Value: "strings.ReplaceAll($_, $x, $x)"}, - {Line: 402, Value: "bytes.Contains($x, $x)"}, - {Line: 403, Value: "bytes.Compare($x, $x)"}, - {Line: 404, Value: "bytes.Equal($x, $x)"}, - {Line: 405, Value: "bytes.EqualFold($x, $x)"}, - {Line: 406, Value: "bytes.HasPrefix($x, $x)"}, - {Line: 407, Value: "bytes.HasSuffix($x, $x)"}, - {Line: 408, Value: "bytes.Index($x, $x)"}, - {Line: 409, Value: "bytes.LastIndex($x, $x)"}, - {Line: 410, Value: "bytes.Split($x, $x)"}, - {Line: 411, Value: "bytes.SplitAfter($x, $x)"}, - {Line: 412, Value: "bytes.SplitAfterN($x, $x, $_)"}, - {Line: 413, Value: "bytes.SplitN($x, $x, $_)"}, - {Line: 414, Value: "bytes.Replace($_, $x, $x, $_)"}, - {Line: 415, Value: "bytes.ReplaceAll($_, $x, $x)"}, - {Line: 416, Value: "types.Identical($x, $x)"}, - {Line: 417, Value: "types.IdenticalIgnoreTags($x, $x)"}, - {Line: 418, Value: "draw.Draw($x, $_, $x, $_, $_)"}, + {Line: 385, Value: "cmp.Compare($x, $x)"}, + {Line: 386, Value: "maps.Equal($x, $x)"}, + {Line: 387, Value: "math.Dim($x, $x)"}, + {Line: 388, Value: "math.Max($x, $x)"}, + {Line: 389, Value: "math.Min($x, $x)"}, + {Line: 390, Value: "reflect.Copy($x, $x)"}, + {Line: 391, Value: "reflect.DeepEqual($x, $x)"}, + {Line: 392, Value: "slices.Compare($x, $x)"}, + {Line: 393, Value: "slices.Equal($x, $x)"}, + {Line: 394, Value: "strings.Contains($x, $x)"}, + {Line: 395, Value: "strings.Compare($x, $x)"}, + {Line: 396, Value: "strings.EqualFold($x, $x)"}, + {Line: 397, Value: "strings.HasPrefix($x, $x)"}, + {Line: 398, Value: "strings.HasSuffix($x, $x)"}, + {Line: 399, Value: "strings.Index($x, $x)"}, + {Line: 400, Value: "strings.LastIndex($x, $x)"}, + {Line: 401, Value: "strings.Split($x, $x)"}, + {Line: 402, Value: "strings.SplitAfter($x, $x)"}, + {Line: 403, Value: "strings.SplitAfterN($x, $x, $_)"}, + {Line: 404, Value: "strings.SplitN($x, $x, $_)"}, + {Line: 405, Value: "strings.Replace($_, $x, $x, $_)"}, + {Line: 406, Value: "strings.ReplaceAll($_, $x, $x)"}, + {Line: 407, Value: "bytes.Contains($x, $x)"}, + {Line: 408, Value: "bytes.Compare($x, $x)"}, + {Line: 409, Value: "bytes.Equal($x, $x)"}, + {Line: 410, Value: "bytes.EqualFold($x, $x)"}, + {Line: 411, Value: "bytes.HasPrefix($x, $x)"}, + {Line: 412, Value: "bytes.HasSuffix($x, $x)"}, + {Line: 413, Value: "bytes.Index($x, $x)"}, + {Line: 414, Value: "bytes.LastIndex($x, $x)"}, + {Line: 415, Value: "bytes.Split($x, $x)"}, + {Line: 416, Value: "bytes.SplitAfter($x, $x)"}, + {Line: 417, Value: "bytes.SplitAfterN($x, $x, $_)"}, + {Line: 418, Value: "bytes.SplitN($x, $x, $_)"}, + {Line: 419, Value: "bytes.Replace($_, $x, $x, $_)"}, + {Line: 420, Value: "bytes.ReplaceAll($_, $x, $x)"}, + {Line: 421, Value: "types.Identical($x, $x)"}, + {Line: 422, Value: "types.IdenticalIgnoreTags($x, $x)"}, + {Line: 423, Value: "draw.Draw($x, $_, $x, $_, $_)"}, }, ReportTemplate: "suspicious duplicated args in $$", - WhereExpr: ir.FilterExpr{Line: 419, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, + WhereExpr: ir.FilterExpr{Line: 424, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, }, }, }, { - Line: 427, + Line: 432, Name: "returnAfterHttpError", MatcherName: "m", DocTags: []string{"diagnostic", "experimental"}, @@ -1440,14 +1445,14 @@ var PrecompiledRules = &ir.File{ DocBefore: "if err != nil { http.Error(...); }", DocAfter: "if err != nil { http.Error(...); return; }", Rules: []ir.Rule{{ - Line: 428, - SyntaxPatterns: []ir.PatternString{{Line: 428, Value: "if $_ { $*_; http.Error($w, $err, $code) }"}}, + Line: 433, + SyntaxPatterns: []ir.PatternString{{Line: 433, Value: "if $_ { $*_; http.Error($w, $err, $code) }"}}, ReportTemplate: "Possibly return is missed after the http.Error call", LocationVar: "w", }}, }, { - Line: 437, + Line: 442, Name: "preferFilepathJoin", MatcherName: "m", DocTags: []string{"style", "experimental"}, @@ -1455,35 +1460,35 @@ var PrecompiledRules = &ir.File{ DocBefore: "x + string(os.PathSeparator) + y", DocAfter: "filepath.Join(x, y)", Rules: []ir.Rule{{ - Line: 438, - SyntaxPatterns: []ir.PatternString{{Line: 438, Value: "$x + string(os.PathSeparator) + $y"}}, + Line: 443, + SyntaxPatterns: []ir.PatternString{{Line: 443, Value: "$x + string(os.PathSeparator) + $y"}}, ReportTemplate: "filepath.Join($x, $y) should be preferred to the $$", SuggestTemplate: "filepath.Join($x, $y)", WhereExpr: ir.FilterExpr{ - Line: 439, + Line: 444, Op: ir.FilterAndOp, Src: "m[\"x\"].Type.Is(`string`) && m[\"y\"].Type.Is(`string`)", Args: []ir.FilterExpr{ { - Line: 439, + Line: 444, Op: ir.FilterVarTypeIsOp, Src: "m[\"x\"].Type.Is(`string`)", Value: "x", - Args: []ir.FilterExpr{{Line: 439, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, + Args: []ir.FilterExpr{{Line: 444, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, }, { - Line: 439, + Line: 444, Op: ir.FilterVarTypeIsOp, Src: "m[\"y\"].Type.Is(`string`)", Value: "y", - Args: []ir.FilterExpr{{Line: 439, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, + Args: []ir.FilterExpr{{Line: 444, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, }, }, }, }}, }, { - Line: 448, + Line: 453, Name: "preferStringWriter", MatcherName: "m", DocTags: []string{"performance", "experimental"}, @@ -1492,35 +1497,35 @@ var PrecompiledRules = &ir.File{ DocAfter: "w.WriteString(\"foo\")", Rules: []ir.Rule{ { - Line: 449, - SyntaxPatterns: []ir.PatternString{{Line: 449, Value: "$w.Write([]byte($s))"}}, + Line: 454, + SyntaxPatterns: []ir.PatternString{{Line: 454, Value: "$w.Write([]byte($s))"}}, ReportTemplate: "$w.WriteString($s) should be preferred to the $$", SuggestTemplate: "$w.WriteString($s)", WhereExpr: ir.FilterExpr{ - Line: 450, + Line: 455, Op: ir.FilterVarTypeImplementsOp, Src: "m[\"w\"].Type.Implements(\"io.StringWriter\")", Value: "w", - Args: []ir.FilterExpr{{Line: 450, Op: ir.FilterStringOp, Src: "\"io.StringWriter\"", Value: "io.StringWriter"}}, + Args: []ir.FilterExpr{{Line: 455, Op: ir.FilterStringOp, Src: "\"io.StringWriter\"", Value: "io.StringWriter"}}, }, }, { - Line: 454, - SyntaxPatterns: []ir.PatternString{{Line: 454, Value: "io.WriteString($w, $s)"}}, + Line: 459, + SyntaxPatterns: []ir.PatternString{{Line: 459, Value: "io.WriteString($w, $s)"}}, ReportTemplate: "$w.WriteString($s) should be preferred to the $$", SuggestTemplate: "$w.WriteString($s)", WhereExpr: ir.FilterExpr{ - Line: 455, + Line: 460, Op: ir.FilterVarTypeImplementsOp, Src: "m[\"w\"].Type.Implements(\"io.StringWriter\")", Value: "w", - Args: []ir.FilterExpr{{Line: 455, Op: ir.FilterStringOp, Src: "\"io.StringWriter\"", Value: "io.StringWriter"}}, + Args: []ir.FilterExpr{{Line: 460, Op: ir.FilterStringOp, Src: "\"io.StringWriter\"", Value: "io.StringWriter"}}, }, }, }, }, { - Line: 464, + Line: 469, Name: "sliceClear", MatcherName: "m", DocTags: []string{"performance", "experimental"}, @@ -1528,22 +1533,22 @@ var PrecompiledRules = &ir.File{ DocBefore: "for i := 0; i < len(buf); i++ { buf[i] = 0 }", DocAfter: "for i := range buf { buf[i] = 0 }", Rules: []ir.Rule{{ - Line: 465, - SyntaxPatterns: []ir.PatternString{{Line: 465, Value: "for $i := 0; $i < len($xs); $i++ { $xs[$i] = $zero }"}}, + Line: 470, + SyntaxPatterns: []ir.PatternString{{Line: 470, Value: "for $i := 0; $i < len($xs); $i++ { $xs[$i] = $zero }"}}, ReportTemplate: "rewrite as for-range so compiler can recognize this pattern", WhereExpr: ir.FilterExpr{ - Line: 466, + Line: 471, Op: ir.FilterEqOp, Src: "m[\"zero\"].Value.Int() == 0", Args: []ir.FilterExpr{ { - Line: 466, + Line: 471, Op: ir.FilterVarValueIntOp, Src: "m[\"zero\"].Value.Int()", Value: "zero", }, { - Line: 466, + Line: 471, Op: ir.FilterIntOp, Src: "0", Value: int64(0), @@ -1553,7 +1558,7 @@ var PrecompiledRules = &ir.File{ }}, }, { - Line: 474, + Line: 479, Name: "syncMapLoadAndDelete", MatcherName: "m", DocTags: []string{"diagnostic", "experimental"}, @@ -1561,33 +1566,33 @@ var PrecompiledRules = &ir.File{ DocBefore: "v, ok := m.Load(k); if ok { m.Delete($k); f(v); }", DocAfter: "v, deleted := m.LoadAndDelete(k); if deleted { f(v) }", Rules: []ir.Rule{{ - Line: 475, - SyntaxPatterns: []ir.PatternString{{Line: 475, Value: "$_, $ok := $m.Load($k); if $ok { $m.Delete($k); $*_ }"}}, + Line: 480, + SyntaxPatterns: []ir.PatternString{{Line: 480, Value: "$_, $ok := $m.Load($k); if $ok { $m.Delete($k); $*_ }"}}, ReportTemplate: "use $m.LoadAndDelete to perform load+delete operations atomically", WhereExpr: ir.FilterExpr{ - Line: 476, + Line: 481, Op: ir.FilterAndOp, Src: "m.GoVersion().GreaterEqThan(\"1.15\") &&\n\tm[\"m\"].Type.Is(`*sync.Map`)", Args: []ir.FilterExpr{ { - Line: 476, + Line: 481, Op: ir.FilterGoVersionGreaterEqThanOp, Src: "m.GoVersion().GreaterEqThan(\"1.15\")", Value: "1.15", }, { - Line: 477, + Line: 482, Op: ir.FilterVarTypeIsOp, Src: "m[\"m\"].Type.Is(`*sync.Map`)", Value: "m", - Args: []ir.FilterExpr{{Line: 477, Op: ir.FilterStringOp, Src: "`*sync.Map`", Value: "*sync.Map"}}, + Args: []ir.FilterExpr{{Line: 482, Op: ir.FilterStringOp, Src: "`*sync.Map`", Value: "*sync.Map"}}, }, }, }, }}, }, { - Line: 485, + Line: 490, Name: "sprintfQuotedString", MatcherName: "m", DocTags: []string{"diagnostic", "experimental"}, @@ -1595,34 +1600,34 @@ var PrecompiledRules = &ir.File{ DocBefore: "fmt.Sprintf(`\"%s\"`, s)", DocAfter: "fmt.Sprintf(`%q`, s)", Rules: []ir.Rule{{ - Line: 486, - SyntaxPatterns: []ir.PatternString{{Line: 486, Value: "fmt.Sprintf($s, $*_)"}}, + Line: 491, + SyntaxPatterns: []ir.PatternString{{Line: 491, Value: "fmt.Sprintf($s, $*_)"}}, ReportTemplate: "use %q instead of \"%s\" for quoted strings", WhereExpr: ir.FilterExpr{ - Line: 487, + Line: 492, Op: ir.FilterOrOp, Src: "m[\"s\"].Text.Matches(\"^`.*\\\"%s\\\".*`$\") ||\n\tm[\"s\"].Text.Matches(`^\".*\\\\\"%s\\\\\".*\"$`)", Args: []ir.FilterExpr{ { - Line: 487, + Line: 492, Op: ir.FilterVarTextMatchesOp, Src: "m[\"s\"].Text.Matches(\"^`.*\\\"%s\\\".*`$\")", Value: "s", - Args: []ir.FilterExpr{{Line: 487, Op: ir.FilterStringOp, Src: "\"^`.*\\\"%s\\\".*`$\"", Value: "^`.*\"%s\".*`$"}}, + Args: []ir.FilterExpr{{Line: 492, Op: ir.FilterStringOp, Src: "\"^`.*\\\"%s\\\".*`$\"", Value: "^`.*\"%s\".*`$"}}, }, { - Line: 488, + Line: 493, Op: ir.FilterVarTextMatchesOp, Src: "m[\"s\"].Text.Matches(`^\".*\\\\\"%s\\\\\".*\"$`)", Value: "s", - Args: []ir.FilterExpr{{Line: 488, Op: ir.FilterStringOp, Src: "`^\".*\\\\\"%s\\\\\".*\"$`", Value: "^\".*\\\\\"%s\\\\\".*\"$"}}, + Args: []ir.FilterExpr{{Line: 493, Op: ir.FilterStringOp, Src: "`^\".*\\\\\"%s\\\\\".*\"$`", Value: "^\".*\\\\\"%s\\\\\".*\"$"}}, }, }, }, }}, }, { - Line: 496, + Line: 501, Name: "offBy1", MatcherName: "m", DocTags: []string{"diagnostic"}, @@ -1631,80 +1636,80 @@ var PrecompiledRules = &ir.File{ DocAfter: "xs[len(xs)-1]", Rules: []ir.Rule{ { - Line: 497, - SyntaxPatterns: []ir.PatternString{{Line: 497, Value: "$x[len($x)]"}}, + Line: 502, + SyntaxPatterns: []ir.PatternString{{Line: 502, Value: "$x[len($x)]"}}, ReportTemplate: "index expr always panics; maybe you wanted $x[len($x)-1]?", SuggestTemplate: "$x[len($x)-1]", WhereExpr: ir.FilterExpr{ - Line: 498, + Line: 503, Op: ir.FilterAndOp, Src: "m[\"x\"].Pure && m[\"x\"].Type.Is(`[]$_`)", Args: []ir.FilterExpr{ - {Line: 498, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, + {Line: 503, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, { - Line: 498, + Line: 503, Op: ir.FilterVarTypeIsOp, Src: "m[\"x\"].Type.Is(`[]$_`)", Value: "x", - Args: []ir.FilterExpr{{Line: 498, Op: ir.FilterStringOp, Src: "`[]$_`", Value: "[]$_"}}, + Args: []ir.FilterExpr{{Line: 503, Op: ir.FilterStringOp, Src: "`[]$_`", Value: "[]$_"}}, }, }, }, }, { - Line: 505, + Line: 510, SyntaxPatterns: []ir.PatternString{ - {Line: 506, Value: "$i := strings.Index($s, $_); $_ := $slicing[$i:]"}, - {Line: 507, Value: "$i := strings.Index($s, $_); $_ = $slicing[$i:]"}, - {Line: 508, Value: "$i := bytes.Index($s, $_); $_ := $slicing[$i:]"}, - {Line: 509, Value: "$i := bytes.Index($s, $_); $_ = $slicing[$i:]"}, + {Line: 511, Value: "$i := strings.Index($s, $_); $_ := $slicing[$i:]"}, + {Line: 512, Value: "$i := strings.Index($s, $_); $_ = $slicing[$i:]"}, + {Line: 513, Value: "$i := bytes.Index($s, $_); $_ := $slicing[$i:]"}, + {Line: 514, Value: "$i := bytes.Index($s, $_); $_ = $slicing[$i:]"}, }, ReportTemplate: "Index() can return -1; maybe you wanted to do $s[$i+1:]", WhereExpr: ir.FilterExpr{ - Line: 510, + Line: 515, Op: ir.FilterEqOp, Src: "m[\"s\"].Text == m[\"slicing\"].Text", Args: []ir.FilterExpr{ - {Line: 510, Op: ir.FilterVarTextOp, Src: "m[\"s\"].Text", Value: "s"}, - {Line: 510, Op: ir.FilterVarTextOp, Src: "m[\"slicing\"].Text", Value: "slicing"}, + {Line: 515, Op: ir.FilterVarTextOp, Src: "m[\"s\"].Text", Value: "s"}, + {Line: 515, Op: ir.FilterVarTextOp, Src: "m[\"slicing\"].Text", Value: "slicing"}, }, }, LocationVar: "slicing", }, { - Line: 514, + Line: 519, SyntaxPatterns: []ir.PatternString{ - {Line: 515, Value: "$i := strings.Index($s, $_); $_ := $slicing[:$i]"}, - {Line: 516, Value: "$i := strings.Index($s, $_); $_ = $slicing[:$i]"}, - {Line: 517, Value: "$i := bytes.Index($s, $_); $_ := $slicing[:$i]"}, - {Line: 518, Value: "$i := bytes.Index($s, $_); $_ = $slicing[:$i]"}, + {Line: 520, Value: "$i := strings.Index($s, $_); $_ := $slicing[:$i]"}, + {Line: 521, Value: "$i := strings.Index($s, $_); $_ = $slicing[:$i]"}, + {Line: 522, Value: "$i := bytes.Index($s, $_); $_ := $slicing[:$i]"}, + {Line: 523, Value: "$i := bytes.Index($s, $_); $_ = $slicing[:$i]"}, }, ReportTemplate: "Index() can return -1; maybe you wanted to do $s[:$i+1]", WhereExpr: ir.FilterExpr{ - Line: 519, + Line: 524, Op: ir.FilterEqOp, Src: "m[\"s\"].Text == m[\"slicing\"].Text", Args: []ir.FilterExpr{ - {Line: 519, Op: ir.FilterVarTextOp, Src: "m[\"s\"].Text", Value: "s"}, - {Line: 519, Op: ir.FilterVarTextOp, Src: "m[\"slicing\"].Text", Value: "slicing"}, + {Line: 524, Op: ir.FilterVarTextOp, Src: "m[\"s\"].Text", Value: "s"}, + {Line: 524, Op: ir.FilterVarTextOp, Src: "m[\"slicing\"].Text", Value: "slicing"}, }, }, LocationVar: "slicing", }, { - Line: 523, + Line: 528, SyntaxPatterns: []ir.PatternString{ - {Line: 524, Value: "$s[strings.Index($s, $_):]"}, - {Line: 525, Value: "$s[:strings.Index($s, $_)]"}, - {Line: 526, Value: "$s[bytes.Index($s, $_):]"}, - {Line: 527, Value: "$s[:bytes.Index($s, $_)]"}, + {Line: 529, Value: "$s[strings.Index($s, $_):]"}, + {Line: 530, Value: "$s[:strings.Index($s, $_)]"}, + {Line: 531, Value: "$s[bytes.Index($s, $_):]"}, + {Line: 532, Value: "$s[:bytes.Index($s, $_)]"}, }, ReportTemplate: "Index() can return -1; maybe you wanted to do Index()+1", }, }, }, { - Line: 535, + Line: 540, Name: "unslice", MatcherName: "m", DocTags: []string{"style"}, @@ -1712,35 +1717,35 @@ var PrecompiledRules = &ir.File{ DocBefore: "copy(b[:], values...)", DocAfter: "copy(b, values...)", Rules: []ir.Rule{{ - Line: 536, - SyntaxPatterns: []ir.PatternString{{Line: 536, Value: "$s[:]"}}, + Line: 541, + SyntaxPatterns: []ir.PatternString{{Line: 541, Value: "$s[:]"}}, ReportTemplate: "could simplify $$ to $s", SuggestTemplate: "$s", WhereExpr: ir.FilterExpr{ - Line: 537, + Line: 542, Op: ir.FilterOrOp, Src: "m[\"s\"].Type.Is(`string`) || m[\"s\"].Type.Is(`[]$_`)", Args: []ir.FilterExpr{ { - Line: 537, + Line: 542, Op: ir.FilterVarTypeIsOp, Src: "m[\"s\"].Type.Is(`string`)", Value: "s", - Args: []ir.FilterExpr{{Line: 537, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, + Args: []ir.FilterExpr{{Line: 542, Op: ir.FilterStringOp, Src: "`string`", Value: "string"}}, }, { - Line: 537, + Line: 542, Op: ir.FilterVarTypeIsOp, Src: "m[\"s\"].Type.Is(`[]$_`)", Value: "s", - Args: []ir.FilterExpr{{Line: 537, Op: ir.FilterStringOp, Src: "`[]$_`", Value: "[]$_"}}, + Args: []ir.FilterExpr{{Line: 542, Op: ir.FilterStringOp, Src: "`[]$_`", Value: "[]$_"}}, }, }, }, }}, }, { - Line: 546, + Line: 551, Name: "yodaStyleExpr", MatcherName: "m", DocTags: []string{"style", "experimental"}, @@ -1749,105 +1754,105 @@ var PrecompiledRules = &ir.File{ DocAfter: "return ptr != nil", Rules: []ir.Rule{ { - Line: 547, - SyntaxPatterns: []ir.PatternString{{Line: 547, Value: "$constval != $x"}}, + Line: 552, + SyntaxPatterns: []ir.PatternString{{Line: 552, Value: "$constval != $x"}}, ReportTemplate: "consider to change order in expression to $x != $constval", WhereExpr: ir.FilterExpr{ - Line: 547, + Line: 552, Op: ir.FilterAndOp, Src: "m[\"constval\"].Node.Is(`BasicLit`) && !m[\"x\"].Node.Is(`BasicLit`)", Args: []ir.FilterExpr{ { - Line: 547, + Line: 552, Op: ir.FilterVarNodeIsOp, Src: "m[\"constval\"].Node.Is(`BasicLit`)", Value: "constval", - Args: []ir.FilterExpr{{Line: 547, Op: ir.FilterStringOp, Src: "`BasicLit`", Value: "BasicLit"}}, + Args: []ir.FilterExpr{{Line: 552, Op: ir.FilterStringOp, Src: "`BasicLit`", Value: "BasicLit"}}, }, { - Line: 547, + Line: 552, Op: ir.FilterNotOp, Src: "!m[\"x\"].Node.Is(`BasicLit`)", Args: []ir.FilterExpr{{ - Line: 547, + Line: 552, Op: ir.FilterVarNodeIsOp, Src: "m[\"x\"].Node.Is(`BasicLit`)", Value: "x", - Args: []ir.FilterExpr{{Line: 547, Op: ir.FilterStringOp, Src: "`BasicLit`", Value: "BasicLit"}}, + Args: []ir.FilterExpr{{Line: 552, Op: ir.FilterStringOp, Src: "`BasicLit`", Value: "BasicLit"}}, }}, }, }, }, }, { - Line: 549, - SyntaxPatterns: []ir.PatternString{{Line: 549, Value: "$constval == $x"}}, + Line: 554, + SyntaxPatterns: []ir.PatternString{{Line: 554, Value: "$constval == $x"}}, ReportTemplate: "consider to change order in expression to $x == $constval", WhereExpr: ir.FilterExpr{ - Line: 549, + Line: 554, Op: ir.FilterAndOp, Src: "m[\"constval\"].Node.Is(`BasicLit`) && !m[\"x\"].Node.Is(`BasicLit`)", Args: []ir.FilterExpr{ { - Line: 549, + Line: 554, Op: ir.FilterVarNodeIsOp, Src: "m[\"constval\"].Node.Is(`BasicLit`)", Value: "constval", - Args: []ir.FilterExpr{{Line: 549, Op: ir.FilterStringOp, Src: "`BasicLit`", Value: "BasicLit"}}, + Args: []ir.FilterExpr{{Line: 554, Op: ir.FilterStringOp, Src: "`BasicLit`", Value: "BasicLit"}}, }, { - Line: 549, + Line: 554, Op: ir.FilterNotOp, Src: "!m[\"x\"].Node.Is(`BasicLit`)", Args: []ir.FilterExpr{{ - Line: 549, + Line: 554, Op: ir.FilterVarNodeIsOp, Src: "m[\"x\"].Node.Is(`BasicLit`)", Value: "x", - Args: []ir.FilterExpr{{Line: 549, Op: ir.FilterStringOp, Src: "`BasicLit`", Value: "BasicLit"}}, + Args: []ir.FilterExpr{{Line: 554, Op: ir.FilterStringOp, Src: "`BasicLit`", Value: "BasicLit"}}, }}, }, }, }, }, { - Line: 552, - SyntaxPatterns: []ir.PatternString{{Line: 552, Value: "nil != $x"}}, + Line: 557, + SyntaxPatterns: []ir.PatternString{{Line: 557, Value: "nil != $x"}}, ReportTemplate: "consider to change order in expression to $x != nil", WhereExpr: ir.FilterExpr{ - Line: 552, + Line: 557, Op: ir.FilterNotOp, Src: "!m[\"x\"].Node.Is(`BasicLit`)", Args: []ir.FilterExpr{{ - Line: 552, + Line: 557, Op: ir.FilterVarNodeIsOp, Src: "m[\"x\"].Node.Is(`BasicLit`)", Value: "x", - Args: []ir.FilterExpr{{Line: 552, Op: ir.FilterStringOp, Src: "`BasicLit`", Value: "BasicLit"}}, + Args: []ir.FilterExpr{{Line: 557, Op: ir.FilterStringOp, Src: "`BasicLit`", Value: "BasicLit"}}, }}, }, }, { - Line: 554, - SyntaxPatterns: []ir.PatternString{{Line: 554, Value: "nil == $x"}}, + Line: 559, + SyntaxPatterns: []ir.PatternString{{Line: 559, Value: "nil == $x"}}, ReportTemplate: "consider to change order in expression to $x == nil", WhereExpr: ir.FilterExpr{ - Line: 554, + Line: 559, Op: ir.FilterNotOp, Src: "!m[\"x\"].Node.Is(`BasicLit`)", Args: []ir.FilterExpr{{ - Line: 554, + Line: 559, Op: ir.FilterVarNodeIsOp, Src: "m[\"x\"].Node.Is(`BasicLit`)", Value: "x", - Args: []ir.FilterExpr{{Line: 554, Op: ir.FilterStringOp, Src: "`BasicLit`", Value: "BasicLit"}}, + Args: []ir.FilterExpr{{Line: 559, Op: ir.FilterStringOp, Src: "`BasicLit`", Value: "BasicLit"}}, }}, }, }, }, }, { - Line: 562, + Line: 567, Name: "equalFold", MatcherName: "m", DocTags: []string{"performance", "experimental"}, @@ -1856,114 +1861,114 @@ var PrecompiledRules = &ir.File{ DocAfter: "strings.EqualFold(x, y)", Rules: []ir.Rule{ { - Line: 571, + Line: 576, SyntaxPatterns: []ir.PatternString{ - {Line: 572, Value: "strings.ToLower($x) == $y"}, - {Line: 573, Value: "strings.ToLower($x) == strings.ToLower($y)"}, - {Line: 574, Value: "$x == strings.ToLower($y)"}, - {Line: 575, Value: "strings.ToUpper($x) == $y"}, - {Line: 576, Value: "strings.ToUpper($x) == strings.ToUpper($y)"}, - {Line: 577, Value: "$x == strings.ToUpper($y)"}, + {Line: 577, Value: "strings.ToLower($x) == $y"}, + {Line: 578, Value: "strings.ToLower($x) == strings.ToLower($y)"}, + {Line: 579, Value: "$x == strings.ToLower($y)"}, + {Line: 580, Value: "strings.ToUpper($x) == $y"}, + {Line: 581, Value: "strings.ToUpper($x) == strings.ToUpper($y)"}, + {Line: 582, Value: "$x == strings.ToUpper($y)"}, }, ReportTemplate: "consider replacing with strings.EqualFold($x, $y)", SuggestTemplate: "strings.EqualFold($x, $y)", WhereExpr: ir.FilterExpr{ - Line: 578, + Line: 583, Op: ir.FilterAndOp, Src: "m[\"x\"].Pure && m[\"y\"].Pure && m[\"x\"].Text != m[\"y\"].Text", Args: []ir.FilterExpr{ { - Line: 578, + Line: 583, Op: ir.FilterAndOp, Src: "m[\"x\"].Pure && m[\"y\"].Pure", Args: []ir.FilterExpr{ - {Line: 578, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, - {Line: 578, Op: ir.FilterVarPureOp, Src: "m[\"y\"].Pure", Value: "y"}, + {Line: 583, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, + {Line: 583, Op: ir.FilterVarPureOp, Src: "m[\"y\"].Pure", Value: "y"}, }, }, { - Line: 578, + Line: 583, Op: ir.FilterNeqOp, Src: "m[\"x\"].Text != m[\"y\"].Text", Args: []ir.FilterExpr{ - {Line: 578, Op: ir.FilterVarTextOp, Src: "m[\"x\"].Text", Value: "x"}, - {Line: 578, Op: ir.FilterVarTextOp, Src: "m[\"y\"].Text", Value: "y"}, + {Line: 583, Op: ir.FilterVarTextOp, Src: "m[\"x\"].Text", Value: "x"}, + {Line: 583, Op: ir.FilterVarTextOp, Src: "m[\"y\"].Text", Value: "y"}, }, }, }, }, }, { - Line: 583, + Line: 588, SyntaxPatterns: []ir.PatternString{ - {Line: 584, Value: "strings.ToLower($x) != $y"}, - {Line: 585, Value: "strings.ToLower($x) != strings.ToLower($y)"}, - {Line: 586, Value: "$x != strings.ToLower($y)"}, - {Line: 587, Value: "strings.ToUpper($x) != $y"}, - {Line: 588, Value: "strings.ToUpper($x) != strings.ToUpper($y)"}, - {Line: 589, Value: "$x != strings.ToUpper($y)"}, + {Line: 589, Value: "strings.ToLower($x) != $y"}, + {Line: 590, Value: "strings.ToLower($x) != strings.ToLower($y)"}, + {Line: 591, Value: "$x != strings.ToLower($y)"}, + {Line: 592, Value: "strings.ToUpper($x) != $y"}, + {Line: 593, Value: "strings.ToUpper($x) != strings.ToUpper($y)"}, + {Line: 594, Value: "$x != strings.ToUpper($y)"}, }, ReportTemplate: "consider replacing with !strings.EqualFold($x, $y)", SuggestTemplate: "!strings.EqualFold($x, $y)", WhereExpr: ir.FilterExpr{ - Line: 590, + Line: 595, Op: ir.FilterAndOp, Src: "m[\"x\"].Pure && m[\"y\"].Pure && m[\"x\"].Text != m[\"y\"].Text", Args: []ir.FilterExpr{ { - Line: 590, + Line: 595, Op: ir.FilterAndOp, Src: "m[\"x\"].Pure && m[\"y\"].Pure", Args: []ir.FilterExpr{ - {Line: 590, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, - {Line: 590, Op: ir.FilterVarPureOp, Src: "m[\"y\"].Pure", Value: "y"}, + {Line: 595, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, + {Line: 595, Op: ir.FilterVarPureOp, Src: "m[\"y\"].Pure", Value: "y"}, }, }, { - Line: 590, + Line: 595, Op: ir.FilterNeqOp, Src: "m[\"x\"].Text != m[\"y\"].Text", Args: []ir.FilterExpr{ - {Line: 590, Op: ir.FilterVarTextOp, Src: "m[\"x\"].Text", Value: "x"}, - {Line: 590, Op: ir.FilterVarTextOp, Src: "m[\"y\"].Text", Value: "y"}, + {Line: 595, Op: ir.FilterVarTextOp, Src: "m[\"x\"].Text", Value: "x"}, + {Line: 595, Op: ir.FilterVarTextOp, Src: "m[\"y\"].Text", Value: "y"}, }, }, }, }, }, { - Line: 595, + Line: 600, SyntaxPatterns: []ir.PatternString{ - {Line: 596, Value: "bytes.Equal(bytes.ToLower($x), $y)"}, - {Line: 597, Value: "bytes.Equal(bytes.ToLower($x), bytes.ToLower($y))"}, - {Line: 598, Value: "bytes.Equal($x, bytes.ToLower($y))"}, - {Line: 599, Value: "bytes.Equal(bytes.ToUpper($x), $y)"}, - {Line: 600, Value: "bytes.Equal(bytes.ToUpper($x), bytes.ToUpper($y))"}, - {Line: 601, Value: "bytes.Equal($x, bytes.ToUpper($y))"}, + {Line: 601, Value: "bytes.Equal(bytes.ToLower($x), $y)"}, + {Line: 602, Value: "bytes.Equal(bytes.ToLower($x), bytes.ToLower($y))"}, + {Line: 603, Value: "bytes.Equal($x, bytes.ToLower($y))"}, + {Line: 604, Value: "bytes.Equal(bytes.ToUpper($x), $y)"}, + {Line: 605, Value: "bytes.Equal(bytes.ToUpper($x), bytes.ToUpper($y))"}, + {Line: 606, Value: "bytes.Equal($x, bytes.ToUpper($y))"}, }, ReportTemplate: "consider replacing with bytes.EqualFold($x, $y)", SuggestTemplate: "bytes.EqualFold($x, $y)", WhereExpr: ir.FilterExpr{ - Line: 602, + Line: 607, Op: ir.FilterAndOp, Src: "m[\"x\"].Pure && m[\"y\"].Pure && m[\"x\"].Text != m[\"y\"].Text", Args: []ir.FilterExpr{ { - Line: 602, + Line: 607, Op: ir.FilterAndOp, Src: "m[\"x\"].Pure && m[\"y\"].Pure", Args: []ir.FilterExpr{ - {Line: 602, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, - {Line: 602, Op: ir.FilterVarPureOp, Src: "m[\"y\"].Pure", Value: "y"}, + {Line: 607, Op: ir.FilterVarPureOp, Src: "m[\"x\"].Pure", Value: "x"}, + {Line: 607, Op: ir.FilterVarPureOp, Src: "m[\"y\"].Pure", Value: "y"}, }, }, { - Line: 602, + Line: 607, Op: ir.FilterNeqOp, Src: "m[\"x\"].Text != m[\"y\"].Text", Args: []ir.FilterExpr{ - {Line: 602, Op: ir.FilterVarTextOp, Src: "m[\"x\"].Text", Value: "x"}, - {Line: 602, Op: ir.FilterVarTextOp, Src: "m[\"y\"].Text", Value: "y"}, + {Line: 607, Op: ir.FilterVarTextOp, Src: "m[\"x\"].Text", Value: "x"}, + {Line: 607, Op: ir.FilterVarTextOp, Src: "m[\"y\"].Text", Value: "y"}, }, }, }, @@ -1972,7 +1977,7 @@ var PrecompiledRules = &ir.File{ }, }, { - Line: 611, + Line: 616, Name: "argOrder", MatcherName: "m", DocTags: []string{"diagnostic"}, @@ -1980,45 +1985,45 @@ var PrecompiledRules = &ir.File{ DocBefore: "strings.HasPrefix(\"#\", userpass)", DocAfter: "strings.HasPrefix(userpass, \"#\")", Rules: []ir.Rule{{ - Line: 612, + Line: 617, SyntaxPatterns: []ir.PatternString{ - {Line: 613, Value: "strings.HasPrefix($lit, $s)"}, - {Line: 614, Value: "bytes.HasPrefix($lit, $s)"}, - {Line: 615, Value: "strings.HasSuffix($lit, $s)"}, - {Line: 616, Value: "bytes.HasSuffix($lit, $s)"}, - {Line: 617, Value: "strings.Contains($lit, $s)"}, - {Line: 618, Value: "bytes.Contains($lit, $s)"}, - {Line: 619, Value: "strings.TrimPrefix($lit, $s)"}, - {Line: 620, Value: "bytes.TrimPrefix($lit, $s)"}, - {Line: 621, Value: "strings.TrimSuffix($lit, $s)"}, - {Line: 622, Value: "bytes.TrimSuffix($lit, $s)"}, - {Line: 623, Value: "strings.Split($lit, $s)"}, - {Line: 624, Value: "bytes.Split($lit, $s)"}, + {Line: 618, Value: "strings.HasPrefix($lit, $s)"}, + {Line: 619, Value: "bytes.HasPrefix($lit, $s)"}, + {Line: 620, Value: "strings.HasSuffix($lit, $s)"}, + {Line: 621, Value: "bytes.HasSuffix($lit, $s)"}, + {Line: 622, Value: "strings.Contains($lit, $s)"}, + {Line: 623, Value: "bytes.Contains($lit, $s)"}, + {Line: 624, Value: "strings.TrimPrefix($lit, $s)"}, + {Line: 625, Value: "bytes.TrimPrefix($lit, $s)"}, + {Line: 626, Value: "strings.TrimSuffix($lit, $s)"}, + {Line: 627, Value: "bytes.TrimSuffix($lit, $s)"}, + {Line: 628, Value: "strings.Split($lit, $s)"}, + {Line: 629, Value: "bytes.Split($lit, $s)"}, }, ReportTemplate: "$lit and $s arguments order looks reversed", WhereExpr: ir.FilterExpr{ - Line: 625, + Line: 630, Op: ir.FilterAndOp, Src: "(m[\"lit\"].Const || m[\"lit\"].ConstSlice) &&\n\t!(m[\"s\"].Const || m[\"s\"].ConstSlice) &&\n\t!m[\"lit\"].Node.Is(`Ident`)", Args: []ir.FilterExpr{ { - Line: 625, + Line: 630, Op: ir.FilterAndOp, Src: "(m[\"lit\"].Const || m[\"lit\"].ConstSlice) &&\n\t!(m[\"s\"].Const || m[\"s\"].ConstSlice)", Args: []ir.FilterExpr{ { - Line: 625, + Line: 630, Op: ir.FilterOrOp, Src: "(m[\"lit\"].Const || m[\"lit\"].ConstSlice)", Args: []ir.FilterExpr{ { - Line: 625, + Line: 630, Op: ir.FilterVarConstOp, Src: "m[\"lit\"].Const", Value: "lit", }, { - Line: 625, + Line: 630, Op: ir.FilterVarConstSliceOp, Src: "m[\"lit\"].ConstSlice", Value: "lit", @@ -2026,22 +2031,22 @@ var PrecompiledRules = &ir.File{ }, }, { - Line: 626, + Line: 631, Op: ir.FilterNotOp, Src: "!(m[\"s\"].Const || m[\"s\"].ConstSlice)", Args: []ir.FilterExpr{{ - Line: 626, + Line: 631, Op: ir.FilterOrOp, Src: "(m[\"s\"].Const || m[\"s\"].ConstSlice)", Args: []ir.FilterExpr{ { - Line: 626, + Line: 631, Op: ir.FilterVarConstOp, Src: "m[\"s\"].Const", Value: "s", }, { - Line: 626, + Line: 631, Op: ir.FilterVarConstSliceOp, Src: "m[\"s\"].ConstSlice", Value: "s", @@ -2052,15 +2057,15 @@ var PrecompiledRules = &ir.File{ }, }, { - Line: 627, + Line: 632, Op: ir.FilterNotOp, Src: "!m[\"lit\"].Node.Is(`Ident`)", Args: []ir.FilterExpr{{ - Line: 627, + Line: 632, Op: ir.FilterVarNodeIsOp, Src: "m[\"lit\"].Node.Is(`Ident`)", Value: "lit", - Args: []ir.FilterExpr{{Line: 627, Op: ir.FilterStringOp, Src: "`Ident`", Value: "Ident"}}, + Args: []ir.FilterExpr{{Line: 632, Op: ir.FilterStringOp, Src: "`Ident`", Value: "Ident"}}, }}, }, }, @@ -2068,7 +2073,7 @@ var PrecompiledRules = &ir.File{ }}, }, { - Line: 635, + Line: 640, Name: "stringConcatSimplify", MatcherName: "m", DocTags: []string{"style", "experimental"}, @@ -2077,27 +2082,27 @@ var PrecompiledRules = &ir.File{ DocAfter: "x + \"_\" + y", Rules: []ir.Rule{ { - Line: 636, - SyntaxPatterns: []ir.PatternString{{Line: 636, Value: "strings.Join([]string{$x, $y}, \"\")"}}, + Line: 641, + SyntaxPatterns: []ir.PatternString{{Line: 641, Value: "strings.Join([]string{$x, $y}, \"\")"}}, ReportTemplate: "suggestion: $x + $y", SuggestTemplate: "$x + $y", }, { - Line: 637, - SyntaxPatterns: []ir.PatternString{{Line: 637, Value: "strings.Join([]string{$x, $y, $z}, \"\")"}}, + Line: 642, + SyntaxPatterns: []ir.PatternString{{Line: 642, Value: "strings.Join([]string{$x, $y, $z}, \"\")"}}, ReportTemplate: "suggestion: $x + $y + $z", SuggestTemplate: "$x + $y + $z", }, { - Line: 638, - SyntaxPatterns: []ir.PatternString{{Line: 638, Value: "strings.Join([]string{$x, $y}, $glue)"}}, + Line: 643, + SyntaxPatterns: []ir.PatternString{{Line: 643, Value: "strings.Join([]string{$x, $y}, $glue)"}}, ReportTemplate: "suggestion: $x + $glue + $y", SuggestTemplate: "$x + $glue + $y", }, }, }, { - Line: 645, + Line: 650, Name: "timeExprSimplify", MatcherName: "m", DocTags: []string{"style", "experimental"}, @@ -2106,39 +2111,39 @@ var PrecompiledRules = &ir.File{ DocAfter: "t.UnixMilli()", Rules: []ir.Rule{ { - Line: 650, - SyntaxPatterns: []ir.PatternString{{Line: 650, Value: "$t.Unix() / 1000"}}, + Line: 655, + SyntaxPatterns: []ir.PatternString{{Line: 655, Value: "$t.Unix() / 1000"}}, ReportTemplate: "use $t.UnixMilli() instead of $$", SuggestTemplate: "$t.UnixMilli()", WhereExpr: ir.FilterExpr{ - Line: 651, + Line: 656, Op: ir.FilterAndOp, Src: "m.GoVersion().GreaterEqThan(\"1.17\") && isTime(m[\"t\"])", Args: []ir.FilterExpr{ { - Line: 651, + Line: 656, Op: ir.FilterGoVersionGreaterEqThanOp, Src: "m.GoVersion().GreaterEqThan(\"1.17\")", Value: "1.17", }, { - Line: 651, + Line: 656, Op: ir.FilterOrOp, Src: "isTime(m[\"t\"])", Args: []ir.FilterExpr{ { - Line: 651, + Line: 656, Op: ir.FilterVarTypeIsOp, Src: "m[\"t\"].Type.Is(`time.Time`)", Value: "t", - Args: []ir.FilterExpr{{Line: 647, Op: ir.FilterStringOp, Src: "`time.Time`", Value: "time.Time"}}, + Args: []ir.FilterExpr{{Line: 652, Op: ir.FilterStringOp, Src: "`time.Time`", Value: "time.Time"}}, }, { - Line: 651, + Line: 656, Op: ir.FilterVarTypeIsOp, Src: "m[\"t\"].Type.Is(`*time.Time`)", Value: "t", - Args: []ir.FilterExpr{{Line: 647, Op: ir.FilterStringOp, Src: "`*time.Time`", Value: "*time.Time"}}, + Args: []ir.FilterExpr{{Line: 652, Op: ir.FilterStringOp, Src: "`*time.Time`", Value: "*time.Time"}}, }, }, }, @@ -2146,39 +2151,39 @@ var PrecompiledRules = &ir.File{ }, }, { - Line: 655, - SyntaxPatterns: []ir.PatternString{{Line: 655, Value: "$t.UnixNano() * 1000"}}, + Line: 660, + SyntaxPatterns: []ir.PatternString{{Line: 660, Value: "$t.UnixNano() * 1000"}}, ReportTemplate: "use $t.UnixMicro() instead of $$", SuggestTemplate: "$t.UnixMicro()", WhereExpr: ir.FilterExpr{ - Line: 656, + Line: 661, Op: ir.FilterAndOp, Src: "m.GoVersion().GreaterEqThan(\"1.17\") && isTime(m[\"t\"])", Args: []ir.FilterExpr{ { - Line: 656, + Line: 661, Op: ir.FilterGoVersionGreaterEqThanOp, Src: "m.GoVersion().GreaterEqThan(\"1.17\")", Value: "1.17", }, { - Line: 656, + Line: 661, Op: ir.FilterOrOp, Src: "isTime(m[\"t\"])", Args: []ir.FilterExpr{ { - Line: 656, + Line: 661, Op: ir.FilterVarTypeIsOp, Src: "m[\"t\"].Type.Is(`time.Time`)", Value: "t", - Args: []ir.FilterExpr{{Line: 647, Op: ir.FilterStringOp, Src: "`time.Time`", Value: "time.Time"}}, + Args: []ir.FilterExpr{{Line: 652, Op: ir.FilterStringOp, Src: "`time.Time`", Value: "time.Time"}}, }, { - Line: 656, + Line: 661, Op: ir.FilterVarTypeIsOp, Src: "m[\"t\"].Type.Is(`*time.Time`)", Value: "t", - Args: []ir.FilterExpr{{Line: 647, Op: ir.FilterStringOp, Src: "`*time.Time`", Value: "*time.Time"}}, + Args: []ir.FilterExpr{{Line: 652, Op: ir.FilterStringOp, Src: "`*time.Time`", Value: "*time.Time"}}, }, }, }, @@ -2188,7 +2193,7 @@ var PrecompiledRules = &ir.File{ }, }, { - Line: 665, + Line: 670, Name: "exposedSyncMutex", MatcherName: "m", DocTags: []string{"style", "experimental"}, @@ -2197,57 +2202,57 @@ var PrecompiledRules = &ir.File{ DocAfter: "type Foo struct{ ...; mu sync.Mutex; ... }", Rules: []ir.Rule{ { - Line: 670, - SyntaxPatterns: []ir.PatternString{{Line: 670, Value: "type $x struct { $*_; sync.Mutex; $*_ }"}}, + Line: 675, + SyntaxPatterns: []ir.PatternString{{Line: 675, Value: "type $x struct { $*_; sync.Mutex; $*_ }"}}, ReportTemplate: "don't embed sync.Mutex", WhereExpr: ir.FilterExpr{ - Line: 671, + Line: 676, Op: ir.FilterVarTextMatchesOp, Src: "isExported(m[\"x\"])", Value: "x", - Args: []ir.FilterExpr{{Line: 667, Op: ir.FilterStringOp, Src: "`^\\p{Lu}`", Value: "^\\p{Lu}"}}, + Args: []ir.FilterExpr{{Line: 672, Op: ir.FilterStringOp, Src: "`^\\p{Lu}`", Value: "^\\p{Lu}"}}, }, }, { - Line: 674, - SyntaxPatterns: []ir.PatternString{{Line: 674, Value: "type $x struct { $*_; *sync.Mutex; $*_ }"}}, + Line: 679, + SyntaxPatterns: []ir.PatternString{{Line: 679, Value: "type $x struct { $*_; *sync.Mutex; $*_ }"}}, ReportTemplate: "don't embed *sync.Mutex", WhereExpr: ir.FilterExpr{ - Line: 675, + Line: 680, Op: ir.FilterVarTextMatchesOp, Src: "isExported(m[\"x\"])", Value: "x", - Args: []ir.FilterExpr{{Line: 667, Op: ir.FilterStringOp, Src: "`^\\p{Lu}`", Value: "^\\p{Lu}"}}, + Args: []ir.FilterExpr{{Line: 672, Op: ir.FilterStringOp, Src: "`^\\p{Lu}`", Value: "^\\p{Lu}"}}, }, }, { - Line: 678, - SyntaxPatterns: []ir.PatternString{{Line: 678, Value: "type $x struct { $*_; sync.RWMutex; $*_ }"}}, + Line: 683, + SyntaxPatterns: []ir.PatternString{{Line: 683, Value: "type $x struct { $*_; sync.RWMutex; $*_ }"}}, ReportTemplate: "don't embed sync.RWMutex", WhereExpr: ir.FilterExpr{ - Line: 679, + Line: 684, Op: ir.FilterVarTextMatchesOp, Src: "isExported(m[\"x\"])", Value: "x", - Args: []ir.FilterExpr{{Line: 667, Op: ir.FilterStringOp, Src: "`^\\p{Lu}`", Value: "^\\p{Lu}"}}, + Args: []ir.FilterExpr{{Line: 672, Op: ir.FilterStringOp, Src: "`^\\p{Lu}`", Value: "^\\p{Lu}"}}, }, }, { - Line: 682, - SyntaxPatterns: []ir.PatternString{{Line: 682, Value: "type $x struct { $*_; *sync.RWMutex; $*_ }"}}, + Line: 687, + SyntaxPatterns: []ir.PatternString{{Line: 687, Value: "type $x struct { $*_; *sync.RWMutex; $*_ }"}}, ReportTemplate: "don't embed *sync.RWMutex", WhereExpr: ir.FilterExpr{ - Line: 683, + Line: 688, Op: ir.FilterVarTextMatchesOp, Src: "isExported(m[\"x\"])", Value: "x", - Args: []ir.FilterExpr{{Line: 667, Op: ir.FilterStringOp, Src: "`^\\p{Lu}`", Value: "^\\p{Lu}"}}, + Args: []ir.FilterExpr{{Line: 672, Op: ir.FilterStringOp, Src: "`^\\p{Lu}`", Value: "^\\p{Lu}"}}, }, }, }, }, { - Line: 691, + Line: 696, Name: "badSorting", MatcherName: "m", DocTags: []string{"diagnostic", "experimental"}, @@ -2256,48 +2261,48 @@ var PrecompiledRules = &ir.File{ DocAfter: "sort.Strings(xs)", Rules: []ir.Rule{ { - Line: 692, - SyntaxPatterns: []ir.PatternString{{Line: 692, Value: "$x = sort.IntSlice($x)"}}, + Line: 697, + SyntaxPatterns: []ir.PatternString{{Line: 697, Value: "$x = sort.IntSlice($x)"}}, ReportTemplate: "suspicious sort.IntSlice usage, maybe sort.Ints was intended?", SuggestTemplate: "sort.Ints($x)", WhereExpr: ir.FilterExpr{ - Line: 693, + Line: 698, Op: ir.FilterVarTypeIsOp, Src: "m[\"x\"].Type.Is(`[]int`)", Value: "x", - Args: []ir.FilterExpr{{Line: 693, Op: ir.FilterStringOp, Src: "`[]int`", Value: "[]int"}}, + Args: []ir.FilterExpr{{Line: 698, Op: ir.FilterStringOp, Src: "`[]int`", Value: "[]int"}}, }, }, { - Line: 697, - SyntaxPatterns: []ir.PatternString{{Line: 697, Value: "$x = sort.Float64Slice($x)"}}, + Line: 702, + SyntaxPatterns: []ir.PatternString{{Line: 702, Value: "$x = sort.Float64Slice($x)"}}, ReportTemplate: "suspicious sort.Float64s usage, maybe sort.Float64s was intended?", SuggestTemplate: "sort.Float64s($x)", WhereExpr: ir.FilterExpr{ - Line: 698, + Line: 703, Op: ir.FilterVarTypeIsOp, Src: "m[\"x\"].Type.Is(`[]float64`)", Value: "x", - Args: []ir.FilterExpr{{Line: 698, Op: ir.FilterStringOp, Src: "`[]float64`", Value: "[]float64"}}, + Args: []ir.FilterExpr{{Line: 703, Op: ir.FilterStringOp, Src: "`[]float64`", Value: "[]float64"}}, }, }, { - Line: 702, - SyntaxPatterns: []ir.PatternString{{Line: 702, Value: "$x = sort.StringSlice($x)"}}, + Line: 707, + SyntaxPatterns: []ir.PatternString{{Line: 707, Value: "$x = sort.StringSlice($x)"}}, ReportTemplate: "suspicious sort.StringSlice usage, maybe sort.Strings was intended?", SuggestTemplate: "sort.Strings($x)", WhereExpr: ir.FilterExpr{ - Line: 703, + Line: 708, Op: ir.FilterVarTypeIsOp, Src: "m[\"x\"].Type.Is(`[]string`)", Value: "x", - Args: []ir.FilterExpr{{Line: 703, Op: ir.FilterStringOp, Src: "`[]string`", Value: "[]string"}}, + Args: []ir.FilterExpr{{Line: 708, Op: ir.FilterStringOp, Src: "`[]string`", Value: "[]string"}}, }, }, }, }, { - Line: 712, + Line: 717, Name: "externalErrorReassign", MatcherName: "m", DocTags: []string{"diagnostic", "experimental"}, @@ -2305,34 +2310,34 @@ var PrecompiledRules = &ir.File{ DocBefore: "io.EOF = nil", DocAfter: "/* don't do it */", Rules: []ir.Rule{{ - Line: 713, - SyntaxPatterns: []ir.PatternString{{Line: 713, Value: "$pkg.$err = $x"}}, + Line: 718, + SyntaxPatterns: []ir.PatternString{{Line: 718, Value: "$pkg.$err = $x"}}, ReportTemplate: "suspicious reassignment of error from another package", WhereExpr: ir.FilterExpr{ - Line: 714, + Line: 719, Op: ir.FilterAndOp, Src: "m[\"err\"].Type.Is(`error`) && m[\"pkg\"].Object.Is(`PkgName`)", Args: []ir.FilterExpr{ { - Line: 714, + Line: 719, Op: ir.FilterVarTypeIsOp, Src: "m[\"err\"].Type.Is(`error`)", Value: "err", - Args: []ir.FilterExpr{{Line: 714, Op: ir.FilterStringOp, Src: "`error`", Value: "error"}}, + Args: []ir.FilterExpr{{Line: 719, Op: ir.FilterStringOp, Src: "`error`", Value: "error"}}, }, { - Line: 714, + Line: 719, Op: ir.FilterVarObjectIsOp, Src: "m[\"pkg\"].Object.Is(`PkgName`)", Value: "pkg", - Args: []ir.FilterExpr{{Line: 714, Op: ir.FilterStringOp, Src: "`PkgName`", Value: "PkgName"}}, + Args: []ir.FilterExpr{{Line: 719, Op: ir.FilterStringOp, Src: "`PkgName`", Value: "PkgName"}}, }, }, }, }}, }, { - Line: 722, + Line: 727, Name: "emptyDecl", MatcherName: "m", DocTags: []string{"diagnostic", "experimental"}, @@ -2341,42 +2346,42 @@ var PrecompiledRules = &ir.File{ DocAfter: "/* nothing */", Rules: []ir.Rule{ { - Line: 723, - SyntaxPatterns: []ir.PatternString{{Line: 723, Value: "var()"}}, + Line: 728, + SyntaxPatterns: []ir.PatternString{{Line: 728, Value: "var()"}}, ReportTemplate: "empty var() block", }, { - Line: 724, - SyntaxPatterns: []ir.PatternString{{Line: 724, Value: "const()"}}, + Line: 729, + SyntaxPatterns: []ir.PatternString{{Line: 729, Value: "const()"}}, ReportTemplate: "empty const() block", }, { - Line: 725, - SyntaxPatterns: []ir.PatternString{{Line: 725, Value: "type()"}}, + Line: 730, + SyntaxPatterns: []ir.PatternString{{Line: 730, Value: "type()"}}, ReportTemplate: "empty type() block", }, }, }, { - Line: 732, + Line: 737, Name: "dynamicFmtString", MatcherName: "m", DocTags: []string{"diagnostic", "experimental"}, DocSummary: "Detects suspicious formatting strings usage", DocBefore: "fmt.Errorf(msg)", - DocAfter: "fmt.Errorf(\"%s\", msg)", + DocAfter: "errors.New(msg) or fmt.Errorf(\"%s\", msg)", Rules: []ir.Rule{ { - Line: 733, - SyntaxPatterns: []ir.PatternString{{Line: 733, Value: "fmt.Errorf($f)"}}, + Line: 738, + SyntaxPatterns: []ir.PatternString{{Line: 738, Value: "fmt.Errorf($f)"}}, ReportTemplate: "use errors.New($f) or fmt.Errorf(\"%s\", $f) instead", SuggestTemplate: "errors.New($f)", WhereExpr: ir.FilterExpr{ - Line: 734, + Line: 739, Op: ir.FilterNotOp, Src: "!m[\"f\"].Const", Args: []ir.FilterExpr{{ - Line: 734, + Line: 739, Op: ir.FilterVarConstOp, Src: "m[\"f\"].Const", Value: "f", @@ -2384,15 +2389,15 @@ var PrecompiledRules = &ir.File{ }, }, { - Line: 738, - SyntaxPatterns: []ir.PatternString{{Line: 738, Value: "fmt.Errorf($f($*args))"}}, + Line: 743, + SyntaxPatterns: []ir.PatternString{{Line: 743, Value: "fmt.Errorf($f($*args))"}}, ReportTemplate: "use errors.New($f($*args)) or fmt.Errorf(\"%s\", $f($*args)) instead", SuggestTemplate: "errors.New($f($*args))", }, }, }, { - Line: 747, + Line: 752, Name: "stringsCompare", MatcherName: "m", DocTags: []string{"style", "experimental"}, @@ -2401,25 +2406,25 @@ var PrecompiledRules = &ir.File{ DocAfter: "x < y", Rules: []ir.Rule{ { - Line: 748, - SyntaxPatterns: []ir.PatternString{{Line: 748, Value: "strings.Compare($s1, $s2) == 0"}}, + Line: 753, + SyntaxPatterns: []ir.PatternString{{Line: 753, Value: "strings.Compare($s1, $s2) == 0"}}, ReportTemplate: "suggestion: $s1 == $s2", SuggestTemplate: "$s1 == $s2", }, { - Line: 751, + Line: 756, SyntaxPatterns: []ir.PatternString{ - {Line: 751, Value: "strings.Compare($s1, $s2) == -1"}, - {Line: 752, Value: "strings.Compare($s1, $s2) < 0"}, + {Line: 756, Value: "strings.Compare($s1, $s2) == -1"}, + {Line: 757, Value: "strings.Compare($s1, $s2) < 0"}, }, ReportTemplate: "suggestion: $s1 < $s2", SuggestTemplate: "$s1 < $s2", }, { - Line: 755, + Line: 760, SyntaxPatterns: []ir.PatternString{ - {Line: 755, Value: "strings.Compare($s1, $s2) == 1"}, - {Line: 756, Value: "strings.Compare($s1, $s2) > 0"}, + {Line: 760, Value: "strings.Compare($s1, $s2) == 1"}, + {Line: 761, Value: "strings.Compare($s1, $s2) > 0"}, }, ReportTemplate: "suggestion: $s1 > $s2", SuggestTemplate: "$s1 > $s2", @@ -2427,7 +2432,7 @@ var PrecompiledRules = &ir.File{ }, }, { - Line: 764, + Line: 769, Name: "uncheckedInlineErr", MatcherName: "m", DocTags: []string{"diagnostic", "experimental"}, @@ -2435,47 +2440,47 @@ var PrecompiledRules = &ir.File{ DocBefore: "if err := expr(); err2 != nil { /*...*/ }", DocAfter: "if err := expr(); err != nil { /*...*/ }", Rules: []ir.Rule{{ - Line: 765, + Line: 770, SyntaxPatterns: []ir.PatternString{ - {Line: 766, Value: "if $err := $_($*_); $err2 != nil { $*_ }"}, - {Line: 767, Value: "if $err = $_($*_); $err2 != nil { $*_ }"}, - {Line: 768, Value: "if $*_, $err := $_($*_); $err2 != nil { $*_ }"}, - {Line: 769, Value: "if $*_, $err = $_($*_); $err2 != nil { $*_ }"}, + {Line: 771, Value: "if $err := $_($*_); $err2 != nil { $*_ }"}, + {Line: 772, Value: "if $err = $_($*_); $err2 != nil { $*_ }"}, + {Line: 773, Value: "if $*_, $err := $_($*_); $err2 != nil { $*_ }"}, + {Line: 774, Value: "if $*_, $err = $_($*_); $err2 != nil { $*_ }"}, }, ReportTemplate: "$err error is unchecked, maybe intended to check it instead of $err2", WhereExpr: ir.FilterExpr{ - Line: 770, + Line: 775, Op: ir.FilterAndOp, Src: "m[\"err\"].Type.Implements(\"error\") && m[\"err2\"].Type.Implements(\"error\") &&\n\tm[\"err\"].Text != m[\"err2\"].Text", Args: []ir.FilterExpr{ { - Line: 770, + Line: 775, Op: ir.FilterAndOp, Src: "m[\"err\"].Type.Implements(\"error\") && m[\"err2\"].Type.Implements(\"error\")", Args: []ir.FilterExpr{ { - Line: 770, + Line: 775, Op: ir.FilterVarTypeImplementsOp, Src: "m[\"err\"].Type.Implements(\"error\")", Value: "err", - Args: []ir.FilterExpr{{Line: 770, Op: ir.FilterStringOp, Src: "\"error\"", Value: "error"}}, + Args: []ir.FilterExpr{{Line: 775, Op: ir.FilterStringOp, Src: "\"error\"", Value: "error"}}, }, { - Line: 770, + Line: 775, Op: ir.FilterVarTypeImplementsOp, Src: "m[\"err2\"].Type.Implements(\"error\")", Value: "err2", - Args: []ir.FilterExpr{{Line: 770, Op: ir.FilterStringOp, Src: "\"error\"", Value: "error"}}, + Args: []ir.FilterExpr{{Line: 775, Op: ir.FilterStringOp, Src: "\"error\"", Value: "error"}}, }, }, }, { - Line: 771, + Line: 776, Op: ir.FilterNeqOp, Src: "m[\"err\"].Text != m[\"err2\"].Text", Args: []ir.FilterExpr{ - {Line: 771, Op: ir.FilterVarTextOp, Src: "m[\"err\"].Text", Value: "err"}, - {Line: 771, Op: ir.FilterVarTextOp, Src: "m[\"err2\"].Text", Value: "err2"}, + {Line: 776, Op: ir.FilterVarTextOp, Src: "m[\"err\"].Text", Value: "err"}, + {Line: 776, Op: ir.FilterVarTextOp, Src: "m[\"err2\"].Text", Value: "err2"}, }, }, }, @@ -2484,7 +2489,7 @@ var PrecompiledRules = &ir.File{ }}, }, { - Line: 780, + Line: 785, Name: "badSyncOnceFunc", MatcherName: "m", DocTags: []string{"diagnostic", "experimental"}, @@ -2493,22 +2498,22 @@ var PrecompiledRules = &ir.File{ DocAfter: "fooOnce := sync.OnceFunc(foo); ...; fooOnce()", Rules: []ir.Rule{ { - Line: 781, - SyntaxPatterns: []ir.PatternString{{Line: 781, Value: "$*_; sync.OnceFunc($x); $*_;"}}, + Line: 786, + SyntaxPatterns: []ir.PatternString{{Line: 786, Value: "$*_; sync.OnceFunc($x); $*_;"}}, ReportTemplate: "possible sync.OnceFunc misuse, sync.OnceFunc($x) result is not used", WhereExpr: ir.FilterExpr{ - Line: 783, + Line: 788, Op: ir.FilterGoVersionGreaterEqThanOp, Src: "m.GoVersion().GreaterEqThan(\"1.21\")", Value: "1.21", }, }, { - Line: 785, - SyntaxPatterns: []ir.PatternString{{Line: 785, Value: "sync.OnceFunc($x)()"}}, + Line: 790, + SyntaxPatterns: []ir.PatternString{{Line: 790, Value: "sync.OnceFunc($x)()"}}, ReportTemplate: "possible sync.OnceFunc misuse, consider to assign sync.OnceFunc($x) to a variable", WhereExpr: ir.FilterExpr{ - Line: 787, + Line: 792, Op: ir.FilterGoVersionGreaterEqThanOp, Src: "m.GoVersion().GreaterEqThan(\"1.21\")", Value: "1.21", diff --git a/tools/vendor/github.com/go-critic/go-critic/checkers/sqlQuery_checker.go b/tools/vendor/github.com/go-critic/go-critic/checkers/sqlQuery_checker.go index 8a132b586..02a2370d3 100644 --- a/tools/vendor/github.com/go-critic/go-critic/checkers/sqlQuery_checker.go +++ b/tools/vendor/github.com/go-critic/go-critic/checkers/sqlQuery_checker.go @@ -125,6 +125,13 @@ func (c *sqlQueryChecker) typeHasExecMethod(typ types.Type) bool { return true } } + case *types.Alias: + switch typ := typ.Underlying().(type) { + case *types.Interface: + return c.typeHasExecMethod(typ) + default: + // TODO(cristaloleg): is there something else to handle? + } case *types.Interface: for i := 0; i < typ.NumMethods(); i++ { if c.funcIsExec(typ.Method(i)) { diff --git a/tools/vendor/github.com/go-critic/go-critic/checkers/utils.go b/tools/vendor/github.com/go-critic/go-critic/checkers/utils.go index 6e12cf9b3..5740f0d4d 100644 --- a/tools/vendor/github.com/go-critic/go-critic/checkers/utils.go +++ b/tools/vendor/github.com/go-critic/go-critic/checkers/utils.go @@ -9,183 +9,183 @@ import ( ) // goStdlib contains `go list std` command output list. +// `internal` and `vendor` packages are excluded. // Used to detect packages that belong to standard Go packages distribution. var goStdlib = map[string]bool{ - "archive/tar": true, - "archive/zip": true, - "bufio": true, - "bytes": true, - "compress/bzip2": true, - "compress/flate": true, - "compress/gzip": true, - "compress/lzw": true, - "compress/zlib": true, - "container/heap": true, - "container/list": true, - "container/ring": true, - "context": true, - "crypto": true, - "crypto/aes": true, - "crypto/cipher": true, - "crypto/des": true, - "crypto/dsa": true, - "crypto/ecdsa": true, - "crypto/elliptic": true, - "crypto/hmac": true, - "crypto/internal/randutil": true, - "crypto/internal/subtle": true, - "crypto/md5": true, - "crypto/rand": true, - "crypto/rc4": true, - "crypto/rsa": true, - "crypto/sha1": true, - "crypto/sha256": true, - "crypto/sha512": true, - "crypto/subtle": true, - "crypto/tls": true, - "crypto/x509": true, - "crypto/x509/pkix": true, - "database/sql": true, - "database/sql/driver": true, - "debug/dwarf": true, - "debug/elf": true, - "debug/gosym": true, - "debug/macho": true, - "debug/pe": true, - "debug/plan9obj": true, - "encoding": true, - "encoding/ascii85": true, - "encoding/asn1": true, - "encoding/base32": true, - "encoding/base64": true, - "encoding/binary": true, - "encoding/csv": true, - "encoding/gob": true, - "encoding/hex": true, - "encoding/json": true, - "encoding/pem": true, - "encoding/xml": true, - "errors": true, - "expvar": true, - "flag": true, - "fmt": true, - "go/ast": true, - "go/build": true, - "go/constant": true, - "go/doc": true, - "go/format": true, - "go/importer": true, - "go/internal/gccgoimporter": true, - "go/internal/gcimporter": true, - "go/internal/srcimporter": true, - "go/parser": true, - "go/printer": true, - "go/scanner": true, - "go/token": true, - "go/types": true, - "hash": true, - "hash/adler32": true, - "hash/crc32": true, - "hash/crc64": true, - "hash/fnv": true, - "html": true, - "html/template": true, - "image": true, - "image/color": true, - "image/color/palette": true, - "image/draw": true, - "image/gif": true, - "image/internal/imageutil": true, - "image/jpeg": true, - "image/png": true, - "index/suffixarray": true, - "internal/bytealg": true, - "internal/cpu": true, - "internal/nettrace": true, - "internal/poll": true, - "internal/race": true, - "internal/singleflight": true, - "internal/syscall/unix": true, - "internal/syscall/windows": true, - "internal/syscall/windows/registry": true, - "internal/syscall/windows/sysdll": true, - "internal/testenv": true, - "internal/testlog": true, - "internal/trace": true, - "io": true, - "io/ioutil": true, - "log": true, - "log/syslog": true, - "math": true, - "math/big": true, - "math/bits": true, - "math/cmplx": true, - "math/rand": true, - "mime": true, - "mime/multipart": true, - "mime/quotedprintable": true, - "net": true, - "net/http": true, - "net/http/cgi": true, - "net/http/cookiejar": true, - "net/http/fcgi": true, - "net/http/httptest": true, - "net/http/httptrace": true, - "net/http/httputil": true, - "net/http/internal": true, - "net/http/pprof": true, - "net/internal/socktest": true, - "net/mail": true, - "net/rpc": true, - "net/rpc/jsonrpc": true, - "net/smtp": true, - "net/textproto": true, - "net/url": true, - "os": true, - "os/exec": true, - "os/signal": true, - "os/signal/internal/pty": true, - "os/user": true, - "path": true, - "path/filepath": true, - "plugin": true, - "reflect": true, - "regexp": true, - "regexp/syntax": true, - "runtime": true, - "runtime/cgo": true, - "runtime/debug": true, - "runtime/internal/atomic": true, - "runtime/internal/sys": true, - "runtime/pprof": true, - "runtime/pprof/internal/profile": true, - "runtime/race": true, - "runtime/trace": true, - "sort": true, - "strconv": true, - "strings": true, - "sync": true, - "sync/atomic": true, - "syscall": true, - "testing": true, - "testing/internal/testdeps": true, - "testing/iotest": true, - "testing/quick": true, - "text/scanner": true, - "text/tabwriter": true, - "text/template": true, - "text/template/parse": true, - "time": true, - "unicode": true, - "unicode/utf16": true, - "unicode/utf8": true, - "unsafe": true, + "archive/tar": true, + "archive/zip": true, + "bufio": true, + "bytes": true, + "cmp": true, + "compress/bzip2": true, + "compress/flate": true, + "compress/gzip": true, + "compress/lzw": true, + "compress/zlib": true, + "container/heap": true, + "container/list": true, + "container/ring": true, + "context": true, + "crypto": true, + "crypto/aes": true, + "crypto/cipher": true, + "crypto/des": true, + "crypto/dsa": true, + "crypto/ecdh": true, + "crypto/ecdsa": true, + "crypto/ed25519": true, + "crypto/elliptic": true, + "crypto/hmac": true, + "crypto/md5": true, + "crypto/rand": true, + "crypto/rc4": true, + "crypto/rsa": true, + "crypto/sha1": true, + "crypto/sha256": true, + "crypto/sha512": true, + "crypto/subtle": true, + "crypto/tls": true, + "crypto/x509": true, + "crypto/x509/pkix": true, + "database/sql": true, + "database/sql/driver": true, + "debug/buildinfo": true, + "debug/dwarf": true, + "debug/elf": true, + "debug/gosym": true, + "debug/macho": true, + "debug/pe": true, + "debug/plan9obj": true, + "embed": true, + "encoding": true, + "encoding/ascii85": true, + "encoding/asn1": true, + "encoding/base32": true, + "encoding/base64": true, + "encoding/binary": true, + "encoding/csv": true, + "encoding/gob": true, + "encoding/hex": true, + "encoding/json": true, + "encoding/pem": true, + "encoding/xml": true, + "errors": true, + "expvar": true, + "flag": true, + "fmt": true, + "go/ast": true, + "go/build": true, + "go/build/constraint": true, + "go/constant": true, + "go/doc": true, + "go/doc/comment": true, + "go/format": true, + "go/importer": true, + "go/parser": true, + "go/printer": true, + "go/scanner": true, + "go/token": true, + "go/types": true, + "go/version": true, + "hash": true, + "hash/adler32": true, + "hash/crc32": true, + "hash/crc64": true, + "hash/fnv": true, + "hash/maphash": true, + "html": true, + "html/template": true, + "image": true, + "image/color": true, + "image/color/palette": true, + "image/draw": true, + "image/gif": true, + "image/jpeg": true, + "image/png": true, + "index/suffixarray": true, + "io": true, + "io/fs": true, + "io/ioutil": true, + "iter": true, + "log": true, + "log/slog": true, + "log/syslog": true, + "maps": true, + "math": true, + "math/big": true, + "math/bits": true, + "math/cmplx": true, + "math/rand": true, + "math/rand/v2": true, + "mime": true, + "mime/multipart": true, + "mime/quotedprintable": true, + "net": true, + "net/http": true, + "net/http/cgi": true, + "net/http/cookiejar": true, + "net/http/fcgi": true, + "net/http/httptest": true, + "net/http/httptrace": true, + "net/http/httputil": true, + "net/http/pprof": true, + "net/mail": true, + "net/netip": true, + "net/rpc": true, + "net/rpc/jsonrpc": true, + "net/smtp": true, + "net/textproto": true, + "net/url": true, + "os": true, + "os/exec": true, + "os/signal": true, + "os/user": true, + "path": true, + "path/filepath": true, + "plugin": true, + "reflect": true, + "regexp": true, + "regexp/syntax": true, + "runtime": true, + "runtime/cgo": true, + "runtime/coverage": true, + "runtime/debug": true, + "runtime/metrics": true, + "runtime/pprof": true, + "runtime/race": true, + "runtime/trace": true, + "slices": true, + "sort": true, + "strconv": true, + "strings": true, + "structs": true, + "sync": true, + "sync/atomic": true, + "syscall": true, + "testing": true, + "testing/fstest": true, + "testing/iotest": true, + "testing/quick": true, + "testing/slogtest": true, + "text/scanner": true, + "text/tabwriter": true, + "text/template": true, + "text/template/parse": true, + "time": true, + "time/tzdata": true, + "unicode": true, + "unicode/utf16": true, + "unicode/utf8": true, + "unique": true, + "unsafe": true, } var goBuiltins = map[string]bool{ // Types + "any": true, "bool": true, "byte": true, + "comparable": true, "complex64": true, "complex128": true, "error": true, @@ -216,6 +216,7 @@ var goBuiltins = map[string]bool{ // Functions "append": true, "cap": true, + "clear": true, "close": true, "complex": true, "copy": true, diff --git a/tools/vendor/github.com/golang/protobuf/AUTHORS b/tools/vendor/github.com/golang/protobuf/AUTHORS deleted file mode 100644 index 15167cd74..000000000 --- a/tools/vendor/github.com/golang/protobuf/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/tools/vendor/github.com/golang/protobuf/CONTRIBUTORS b/tools/vendor/github.com/golang/protobuf/CONTRIBUTORS deleted file mode 100644 index 1c4577e96..000000000 --- a/tools/vendor/github.com/golang/protobuf/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/tools/vendor/github.com/golang/protobuf/proto/buffer.go b/tools/vendor/github.com/golang/protobuf/proto/buffer.go deleted file mode 100644 index e810e6fea..000000000 --- a/tools/vendor/github.com/golang/protobuf/proto/buffer.go +++ /dev/null @@ -1,324 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "errors" - "fmt" - - "google.golang.org/protobuf/encoding/prototext" - "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/runtime/protoimpl" -) - -const ( - WireVarint = 0 - WireFixed32 = 5 - WireFixed64 = 1 - WireBytes = 2 - WireStartGroup = 3 - WireEndGroup = 4 -) - -// EncodeVarint returns the varint encoded bytes of v. -func EncodeVarint(v uint64) []byte { - return protowire.AppendVarint(nil, v) -} - -// SizeVarint returns the length of the varint encoded bytes of v. -// This is equal to len(EncodeVarint(v)). -func SizeVarint(v uint64) int { - return protowire.SizeVarint(v) -} - -// DecodeVarint parses a varint encoded integer from b, -// returning the integer value and the length of the varint. -// It returns (0, 0) if there is a parse error. -func DecodeVarint(b []byte) (uint64, int) { - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return 0, 0 - } - return v, n -} - -// Buffer is a buffer for encoding and decoding the protobuf wire format. -// It may be reused between invocations to reduce memory usage. -type Buffer struct { - buf []byte - idx int - deterministic bool -} - -// NewBuffer allocates a new Buffer initialized with buf, -// where the contents of buf are considered the unread portion of the buffer. -func NewBuffer(buf []byte) *Buffer { - return &Buffer{buf: buf} -} - -// SetDeterministic specifies whether to use deterministic serialization. -// -// Deterministic serialization guarantees that for a given binary, equal -// messages will always be serialized to the same bytes. This implies: -// -// - Repeated serialization of a message will return the same bytes. -// - Different processes of the same binary (which may be executing on -// different machines) will serialize equal messages to the same bytes. -// -// Note that the deterministic serialization is NOT canonical across -// languages. It is not guaranteed to remain stable over time. It is unstable -// across different builds with schema changes due to unknown fields. -// Users who need canonical serialization (e.g., persistent storage in a -// canonical form, fingerprinting, etc.) should define their own -// canonicalization specification and implement their own serializer rather -// than relying on this API. -// -// If deterministic serialization is requested, map entries will be sorted -// by keys in lexographical order. This is an implementation detail and -// subject to change. -func (b *Buffer) SetDeterministic(deterministic bool) { - b.deterministic = deterministic -} - -// SetBuf sets buf as the internal buffer, -// where the contents of buf are considered the unread portion of the buffer. -func (b *Buffer) SetBuf(buf []byte) { - b.buf = buf - b.idx = 0 -} - -// Reset clears the internal buffer of all written and unread data. -func (b *Buffer) Reset() { - b.buf = b.buf[:0] - b.idx = 0 -} - -// Bytes returns the internal buffer. -func (b *Buffer) Bytes() []byte { - return b.buf -} - -// Unread returns the unread portion of the buffer. -func (b *Buffer) Unread() []byte { - return b.buf[b.idx:] -} - -// Marshal appends the wire-format encoding of m to the buffer. -func (b *Buffer) Marshal(m Message) error { - var err error - b.buf, err = marshalAppend(b.buf, m, b.deterministic) - return err -} - -// Unmarshal parses the wire-format message in the buffer and -// places the decoded results in m. -// It does not reset m before unmarshaling. -func (b *Buffer) Unmarshal(m Message) error { - err := UnmarshalMerge(b.Unread(), m) - b.idx = len(b.buf) - return err -} - -type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields } - -func (m *unknownFields) String() string { panic("not implemented") } -func (m *unknownFields) Reset() { panic("not implemented") } -func (m *unknownFields) ProtoMessage() { panic("not implemented") } - -// DebugPrint dumps the encoded bytes of b with a header and footer including s -// to stdout. This is only intended for debugging. -func (*Buffer) DebugPrint(s string, b []byte) { - m := MessageReflect(new(unknownFields)) - m.SetUnknown(b) - b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface()) - fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s) -} - -// EncodeVarint appends an unsigned varint encoding to the buffer. -func (b *Buffer) EncodeVarint(v uint64) error { - b.buf = protowire.AppendVarint(b.buf, v) - return nil -} - -// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer. -func (b *Buffer) EncodeZigzag32(v uint64) error { - return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) -} - -// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer. -func (b *Buffer) EncodeZigzag64(v uint64) error { - return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63)))) -} - -// EncodeFixed32 appends a 32-bit little-endian integer to the buffer. -func (b *Buffer) EncodeFixed32(v uint64) error { - b.buf = protowire.AppendFixed32(b.buf, uint32(v)) - return nil -} - -// EncodeFixed64 appends a 64-bit little-endian integer to the buffer. -func (b *Buffer) EncodeFixed64(v uint64) error { - b.buf = protowire.AppendFixed64(b.buf, uint64(v)) - return nil -} - -// EncodeRawBytes appends a length-prefixed raw bytes to the buffer. -func (b *Buffer) EncodeRawBytes(v []byte) error { - b.buf = protowire.AppendBytes(b.buf, v) - return nil -} - -// EncodeStringBytes appends a length-prefixed raw bytes to the buffer. -// It does not validate whether v contains valid UTF-8. -func (b *Buffer) EncodeStringBytes(v string) error { - b.buf = protowire.AppendString(b.buf, v) - return nil -} - -// EncodeMessage appends a length-prefixed encoded message to the buffer. -func (b *Buffer) EncodeMessage(m Message) error { - var err error - b.buf = protowire.AppendVarint(b.buf, uint64(Size(m))) - b.buf, err = marshalAppend(b.buf, m, b.deterministic) - return err -} - -// DecodeVarint consumes an encoded unsigned varint from the buffer. -func (b *Buffer) DecodeVarint() (uint64, error) { - v, n := protowire.ConsumeVarint(b.buf[b.idx:]) - if n < 0 { - return 0, protowire.ParseError(n) - } - b.idx += n - return uint64(v), nil -} - -// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer. -func (b *Buffer) DecodeZigzag32() (uint64, error) { - v, err := b.DecodeVarint() - if err != nil { - return 0, err - } - return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil -} - -// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer. -func (b *Buffer) DecodeZigzag64() (uint64, error) { - v, err := b.DecodeVarint() - if err != nil { - return 0, err - } - return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil -} - -// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer. -func (b *Buffer) DecodeFixed32() (uint64, error) { - v, n := protowire.ConsumeFixed32(b.buf[b.idx:]) - if n < 0 { - return 0, protowire.ParseError(n) - } - b.idx += n - return uint64(v), nil -} - -// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer. -func (b *Buffer) DecodeFixed64() (uint64, error) { - v, n := protowire.ConsumeFixed64(b.buf[b.idx:]) - if n < 0 { - return 0, protowire.ParseError(n) - } - b.idx += n - return uint64(v), nil -} - -// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer. -// If alloc is specified, it returns a copy the raw bytes -// rather than a sub-slice of the buffer. -func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) { - v, n := protowire.ConsumeBytes(b.buf[b.idx:]) - if n < 0 { - return nil, protowire.ParseError(n) - } - b.idx += n - if alloc { - v = append([]byte(nil), v...) - } - return v, nil -} - -// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer. -// It does not validate whether the raw bytes contain valid UTF-8. -func (b *Buffer) DecodeStringBytes() (string, error) { - v, n := protowire.ConsumeString(b.buf[b.idx:]) - if n < 0 { - return "", protowire.ParseError(n) - } - b.idx += n - return v, nil -} - -// DecodeMessage consumes a length-prefixed message from the buffer. -// It does not reset m before unmarshaling. -func (b *Buffer) DecodeMessage(m Message) error { - v, err := b.DecodeRawBytes(false) - if err != nil { - return err - } - return UnmarshalMerge(v, m) -} - -// DecodeGroup consumes a message group from the buffer. -// It assumes that the start group marker has already been consumed and -// consumes all bytes until (and including the end group marker). -// It does not reset m before unmarshaling. -func (b *Buffer) DecodeGroup(m Message) error { - v, n, err := consumeGroup(b.buf[b.idx:]) - if err != nil { - return err - } - b.idx += n - return UnmarshalMerge(v, m) -} - -// consumeGroup parses b until it finds an end group marker, returning -// the raw bytes of the message (excluding the end group marker) and the -// the total length of the message (including the end group marker). -func consumeGroup(b []byte) ([]byte, int, error) { - b0 := b - depth := 1 // assume this follows a start group marker - for { - _, wtyp, tagLen := protowire.ConsumeTag(b) - if tagLen < 0 { - return nil, 0, protowire.ParseError(tagLen) - } - b = b[tagLen:] - - var valLen int - switch wtyp { - case protowire.VarintType: - _, valLen = protowire.ConsumeVarint(b) - case protowire.Fixed32Type: - _, valLen = protowire.ConsumeFixed32(b) - case protowire.Fixed64Type: - _, valLen = protowire.ConsumeFixed64(b) - case protowire.BytesType: - _, valLen = protowire.ConsumeBytes(b) - case protowire.StartGroupType: - depth++ - case protowire.EndGroupType: - depth-- - default: - return nil, 0, errors.New("proto: cannot parse reserved wire type") - } - if valLen < 0 { - return nil, 0, protowire.ParseError(valLen) - } - b = b[valLen:] - - if depth == 0 { - return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil - } - } -} diff --git a/tools/vendor/github.com/golang/protobuf/proto/defaults.go b/tools/vendor/github.com/golang/protobuf/proto/defaults.go deleted file mode 100644 index d399bf069..000000000 --- a/tools/vendor/github.com/golang/protobuf/proto/defaults.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "google.golang.org/protobuf/reflect/protoreflect" -) - -// SetDefaults sets unpopulated scalar fields to their default values. -// Fields within a oneof are not set even if they have a default value. -// SetDefaults is recursively called upon any populated message fields. -func SetDefaults(m Message) { - if m != nil { - setDefaults(MessageReflect(m)) - } -} - -func setDefaults(m protoreflect.Message) { - fds := m.Descriptor().Fields() - for i := 0; i < fds.Len(); i++ { - fd := fds.Get(i) - if !m.Has(fd) { - if fd.HasDefault() && fd.ContainingOneof() == nil { - v := fd.Default() - if fd.Kind() == protoreflect.BytesKind { - v = protoreflect.ValueOf(append([]byte(nil), v.Bytes()...)) // copy the default bytes - } - m.Set(fd, v) - } - continue - } - } - - m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { - switch { - // Handle singular message. - case fd.Cardinality() != protoreflect.Repeated: - if fd.Message() != nil { - setDefaults(m.Get(fd).Message()) - } - // Handle list of messages. - case fd.IsList(): - if fd.Message() != nil { - ls := m.Get(fd).List() - for i := 0; i < ls.Len(); i++ { - setDefaults(ls.Get(i).Message()) - } - } - // Handle map of messages. - case fd.IsMap(): - if fd.MapValue().Message() != nil { - ms := m.Get(fd).Map() - ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool { - setDefaults(v.Message()) - return true - }) - } - } - return true - }) -} diff --git a/tools/vendor/github.com/golang/protobuf/proto/deprecated.go b/tools/vendor/github.com/golang/protobuf/proto/deprecated.go deleted file mode 100644 index e8db57e09..000000000 --- a/tools/vendor/github.com/golang/protobuf/proto/deprecated.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "encoding/json" - "errors" - "fmt" - "strconv" - - protoV2 "google.golang.org/protobuf/proto" -) - -var ( - // Deprecated: No longer returned. - ErrNil = errors.New("proto: Marshal called with nil") - - // Deprecated: No longer returned. - ErrTooLarge = errors.New("proto: message encodes to over 2 GB") - - // Deprecated: No longer returned. - ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") -) - -// Deprecated: Do not use. -type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } - -// Deprecated: Do not use. -func GetStats() Stats { return Stats{} } - -// Deprecated: Do not use. -func MarshalMessageSet(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: Do not use. -func UnmarshalMessageSet([]byte, interface{}) error { - return errors.New("proto: not implemented") -} - -// Deprecated: Do not use. -func MarshalMessageSetJSON(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: Do not use. -func UnmarshalMessageSetJSON([]byte, interface{}) error { - return errors.New("proto: not implemented") -} - -// Deprecated: Do not use. -func RegisterMessageSetType(Message, int32, string) {} - -// Deprecated: Do not use. -func EnumName(m map[int32]string, v int32) string { - s, ok := m[v] - if ok { - return s - } - return strconv.Itoa(int(v)) -} - -// Deprecated: Do not use. -func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { - if data[0] == '"' { - // New style: enums are strings. - var repr string - if err := json.Unmarshal(data, &repr); err != nil { - return -1, err - } - val, ok := m[repr] - if !ok { - return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) - } - return val, nil - } - // Old style: enums are ints. - var val int32 - if err := json.Unmarshal(data, &val); err != nil { - return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) - } - return val, nil -} - -// Deprecated: Do not use; this type existed for intenal-use only. -type InternalMessageInfo struct{} - -// Deprecated: Do not use; this method existed for intenal-use only. -func (*InternalMessageInfo) DiscardUnknown(m Message) { - DiscardUnknown(m) -} - -// Deprecated: Do not use; this method existed for intenal-use only. -func (*InternalMessageInfo) Marshal(b []byte, m Message, deterministic bool) ([]byte, error) { - return protoV2.MarshalOptions{Deterministic: deterministic}.MarshalAppend(b, MessageV2(m)) -} - -// Deprecated: Do not use; this method existed for intenal-use only. -func (*InternalMessageInfo) Merge(dst, src Message) { - protoV2.Merge(MessageV2(dst), MessageV2(src)) -} - -// Deprecated: Do not use; this method existed for intenal-use only. -func (*InternalMessageInfo) Size(m Message) int { - return protoV2.Size(MessageV2(m)) -} - -// Deprecated: Do not use; this method existed for intenal-use only. -func (*InternalMessageInfo) Unmarshal(m Message, b []byte) error { - return protoV2.UnmarshalOptions{Merge: true}.Unmarshal(b, MessageV2(m)) -} diff --git a/tools/vendor/github.com/golang/protobuf/proto/discard.go b/tools/vendor/github.com/golang/protobuf/proto/discard.go deleted file mode 100644 index 2187e877f..000000000 --- a/tools/vendor/github.com/golang/protobuf/proto/discard.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "google.golang.org/protobuf/reflect/protoreflect" -) - -// DiscardUnknown recursively discards all unknown fields from this message -// and all embedded messages. -// -// When unmarshaling a message with unrecognized fields, the tags and values -// of such fields are preserved in the Message. This allows a later call to -// marshal to be able to produce a message that continues to have those -// unrecognized fields. To avoid this, DiscardUnknown is used to -// explicitly clear the unknown fields after unmarshaling. -func DiscardUnknown(m Message) { - if m != nil { - discardUnknown(MessageReflect(m)) - } -} - -func discardUnknown(m protoreflect.Message) { - m.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool { - switch { - // Handle singular message. - case fd.Cardinality() != protoreflect.Repeated: - if fd.Message() != nil { - discardUnknown(m.Get(fd).Message()) - } - // Handle list of messages. - case fd.IsList(): - if fd.Message() != nil { - ls := m.Get(fd).List() - for i := 0; i < ls.Len(); i++ { - discardUnknown(ls.Get(i).Message()) - } - } - // Handle map of messages. - case fd.IsMap(): - if fd.MapValue().Message() != nil { - ms := m.Get(fd).Map() - ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool { - discardUnknown(v.Message()) - return true - }) - } - } - return true - }) - - // Discard unknown fields. - if len(m.GetUnknown()) > 0 { - m.SetUnknown(nil) - } -} diff --git a/tools/vendor/github.com/golang/protobuf/proto/extensions.go b/tools/vendor/github.com/golang/protobuf/proto/extensions.go deleted file mode 100644 index 42fc120c9..000000000 --- a/tools/vendor/github.com/golang/protobuf/proto/extensions.go +++ /dev/null @@ -1,356 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "errors" - "fmt" - "reflect" - - "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" - "google.golang.org/protobuf/runtime/protoiface" - "google.golang.org/protobuf/runtime/protoimpl" -) - -type ( - // ExtensionDesc represents an extension descriptor and - // is used to interact with an extension field in a message. - // - // Variables of this type are generated in code by protoc-gen-go. - ExtensionDesc = protoimpl.ExtensionInfo - - // ExtensionRange represents a range of message extensions. - // Used in code generated by protoc-gen-go. - ExtensionRange = protoiface.ExtensionRangeV1 - - // Deprecated: Do not use; this is an internal type. - Extension = protoimpl.ExtensionFieldV1 - - // Deprecated: Do not use; this is an internal type. - XXX_InternalExtensions = protoimpl.ExtensionFields -) - -// ErrMissingExtension reports whether the extension was not present. -var ErrMissingExtension = errors.New("proto: missing extension") - -var errNotExtendable = errors.New("proto: not an extendable proto.Message") - -// HasExtension reports whether the extension field is present in m -// either as an explicitly populated field or as an unknown field. -func HasExtension(m Message, xt *ExtensionDesc) (has bool) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() { - return false - } - - // Check whether any populated known field matches the field number. - xtd := xt.TypeDescriptor() - if isValidExtension(mr.Descriptor(), xtd) { - has = mr.Has(xtd) - } else { - mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { - has = int32(fd.Number()) == xt.Field - return !has - }) - } - - // Check whether any unknown field matches the field number. - for b := mr.GetUnknown(); !has && len(b) > 0; { - num, _, n := protowire.ConsumeField(b) - has = int32(num) == xt.Field - b = b[n:] - } - return has -} - -// ClearExtension removes the extension field from m -// either as an explicitly populated field or as an unknown field. -func ClearExtension(m Message, xt *ExtensionDesc) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() { - return - } - - xtd := xt.TypeDescriptor() - if isValidExtension(mr.Descriptor(), xtd) { - mr.Clear(xtd) - } else { - mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { - if int32(fd.Number()) == xt.Field { - mr.Clear(fd) - return false - } - return true - }) - } - clearUnknown(mr, fieldNum(xt.Field)) -} - -// ClearAllExtensions clears all extensions from m. -// This includes populated fields and unknown fields in the extension range. -func ClearAllExtensions(m Message) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() { - return - } - - mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { - if fd.IsExtension() { - mr.Clear(fd) - } - return true - }) - clearUnknown(mr, mr.Descriptor().ExtensionRanges()) -} - -// GetExtension retrieves a proto2 extended field from m. -// -// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), -// then GetExtension parses the encoded field and returns a Go value of the specified type. -// If the field is not present, then the default value is returned (if one is specified), -// otherwise ErrMissingExtension is reported. -// -// If the descriptor is type incomplete (i.e., ExtensionDesc.ExtensionType is nil), -// then GetExtension returns the raw encoded bytes for the extension field. -func GetExtension(m Message, xt *ExtensionDesc) (interface{}, error) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { - return nil, errNotExtendable - } - - // Retrieve the unknown fields for this extension field. - var bo protoreflect.RawFields - for bi := mr.GetUnknown(); len(bi) > 0; { - num, _, n := protowire.ConsumeField(bi) - if int32(num) == xt.Field { - bo = append(bo, bi[:n]...) - } - bi = bi[n:] - } - - // For type incomplete descriptors, only retrieve the unknown fields. - if xt.ExtensionType == nil { - return []byte(bo), nil - } - - // If the extension field only exists as unknown fields, unmarshal it. - // This is rarely done since proto.Unmarshal eagerly unmarshals extensions. - xtd := xt.TypeDescriptor() - if !isValidExtension(mr.Descriptor(), xtd) { - return nil, fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m) - } - if !mr.Has(xtd) && len(bo) > 0 { - m2 := mr.New() - if err := (proto.UnmarshalOptions{ - Resolver: extensionResolver{xt}, - }.Unmarshal(bo, m2.Interface())); err != nil { - return nil, err - } - if m2.Has(xtd) { - mr.Set(xtd, m2.Get(xtd)) - clearUnknown(mr, fieldNum(xt.Field)) - } - } - - // Check whether the message has the extension field set or a default. - var pv protoreflect.Value - switch { - case mr.Has(xtd): - pv = mr.Get(xtd) - case xtd.HasDefault(): - pv = xtd.Default() - default: - return nil, ErrMissingExtension - } - - v := xt.InterfaceOf(pv) - rv := reflect.ValueOf(v) - if isScalarKind(rv.Kind()) { - rv2 := reflect.New(rv.Type()) - rv2.Elem().Set(rv) - v = rv2.Interface() - } - return v, nil -} - -// extensionResolver is a custom extension resolver that stores a single -// extension type that takes precedence over the global registry. -type extensionResolver struct{ xt protoreflect.ExtensionType } - -func (r extensionResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { - if xtd := r.xt.TypeDescriptor(); xtd.FullName() == field { - return r.xt, nil - } - return protoregistry.GlobalTypes.FindExtensionByName(field) -} - -func (r extensionResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { - if xtd := r.xt.TypeDescriptor(); xtd.ContainingMessage().FullName() == message && xtd.Number() == field { - return r.xt, nil - } - return protoregistry.GlobalTypes.FindExtensionByNumber(message, field) -} - -// GetExtensions returns a list of the extensions values present in m, -// corresponding with the provided list of extension descriptors, xts. -// If an extension is missing in m, the corresponding value is nil. -func GetExtensions(m Message, xts []*ExtensionDesc) ([]interface{}, error) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() { - return nil, errNotExtendable - } - - vs := make([]interface{}, len(xts)) - for i, xt := range xts { - v, err := GetExtension(m, xt) - if err != nil { - if err == ErrMissingExtension { - continue - } - return vs, err - } - vs[i] = v - } - return vs, nil -} - -// SetExtension sets an extension field in m to the provided value. -func SetExtension(m Message, xt *ExtensionDesc, v interface{}) error { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { - return errNotExtendable - } - - rv := reflect.ValueOf(v) - if reflect.TypeOf(v) != reflect.TypeOf(xt.ExtensionType) { - return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", v, xt.ExtensionType) - } - if rv.Kind() == reflect.Ptr { - if rv.IsNil() { - return fmt.Errorf("proto: SetExtension called with nil value of type %T", v) - } - if isScalarKind(rv.Elem().Kind()) { - v = rv.Elem().Interface() - } - } - - xtd := xt.TypeDescriptor() - if !isValidExtension(mr.Descriptor(), xtd) { - return fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m) - } - mr.Set(xtd, xt.ValueOf(v)) - clearUnknown(mr, fieldNum(xt.Field)) - return nil -} - -// SetRawExtension inserts b into the unknown fields of m. -// -// Deprecated: Use Message.ProtoReflect.SetUnknown instead. -func SetRawExtension(m Message, fnum int32, b []byte) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() { - return - } - - // Verify that the raw field is valid. - for b0 := b; len(b0) > 0; { - num, _, n := protowire.ConsumeField(b0) - if int32(num) != fnum { - panic(fmt.Sprintf("mismatching field number: got %d, want %d", num, fnum)) - } - b0 = b0[n:] - } - - ClearExtension(m, &ExtensionDesc{Field: fnum}) - mr.SetUnknown(append(mr.GetUnknown(), b...)) -} - -// ExtensionDescs returns a list of extension descriptors found in m, -// containing descriptors for both populated extension fields in m and -// also unknown fields of m that are in the extension range. -// For the later case, an type incomplete descriptor is provided where only -// the ExtensionDesc.Field field is populated. -// The order of the extension descriptors is undefined. -func ExtensionDescs(m Message) ([]*ExtensionDesc, error) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { - return nil, errNotExtendable - } - - // Collect a set of known extension descriptors. - extDescs := make(map[protoreflect.FieldNumber]*ExtensionDesc) - mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { - if fd.IsExtension() { - xt := fd.(protoreflect.ExtensionTypeDescriptor) - if xd, ok := xt.Type().(*ExtensionDesc); ok { - extDescs[fd.Number()] = xd - } - } - return true - }) - - // Collect a set of unknown extension descriptors. - extRanges := mr.Descriptor().ExtensionRanges() - for b := mr.GetUnknown(); len(b) > 0; { - num, _, n := protowire.ConsumeField(b) - if extRanges.Has(num) && extDescs[num] == nil { - extDescs[num] = nil - } - b = b[n:] - } - - // Transpose the set of descriptors into a list. - var xts []*ExtensionDesc - for num, xt := range extDescs { - if xt == nil { - xt = &ExtensionDesc{Field: int32(num)} - } - xts = append(xts, xt) - } - return xts, nil -} - -// isValidExtension reports whether xtd is a valid extension descriptor for md. -func isValidExtension(md protoreflect.MessageDescriptor, xtd protoreflect.ExtensionTypeDescriptor) bool { - return xtd.ContainingMessage() == md && md.ExtensionRanges().Has(xtd.Number()) -} - -// isScalarKind reports whether k is a protobuf scalar kind (except bytes). -// This function exists for historical reasons since the representation of -// scalars differs between v1 and v2, where v1 uses *T and v2 uses T. -func isScalarKind(k reflect.Kind) bool { - switch k { - case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: - return true - default: - return false - } -} - -// clearUnknown removes unknown fields from m where remover.Has reports true. -func clearUnknown(m protoreflect.Message, remover interface { - Has(protoreflect.FieldNumber) bool -}) { - var bo protoreflect.RawFields - for bi := m.GetUnknown(); len(bi) > 0; { - num, _, n := protowire.ConsumeField(bi) - if !remover.Has(num) { - bo = append(bo, bi[:n]...) - } - bi = bi[n:] - } - if bi := m.GetUnknown(); len(bi) != len(bo) { - m.SetUnknown(bo) - } -} - -type fieldNum protoreflect.FieldNumber - -func (n1 fieldNum) Has(n2 protoreflect.FieldNumber) bool { - return protoreflect.FieldNumber(n1) == n2 -} diff --git a/tools/vendor/github.com/golang/protobuf/proto/properties.go b/tools/vendor/github.com/golang/protobuf/proto/properties.go deleted file mode 100644 index dcdc2202f..000000000 --- a/tools/vendor/github.com/golang/protobuf/proto/properties.go +++ /dev/null @@ -1,306 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "fmt" - "reflect" - "strconv" - "strings" - "sync" - - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/runtime/protoimpl" -) - -// StructProperties represents protocol buffer type information for a -// generated protobuf message in the open-struct API. -// -// Deprecated: Do not use. -type StructProperties struct { - // Prop are the properties for each field. - // - // Fields belonging to a oneof are stored in OneofTypes instead, with a - // single Properties representing the parent oneof held here. - // - // The order of Prop matches the order of fields in the Go struct. - // Struct fields that are not related to protobufs have a "XXX_" prefix - // in the Properties.Name and must be ignored by the user. - Prop []*Properties - - // OneofTypes contains information about the oneof fields in this message. - // It is keyed by the protobuf field name. - OneofTypes map[string]*OneofProperties -} - -// Properties represents the type information for a protobuf message field. -// -// Deprecated: Do not use. -type Properties struct { - // Name is a placeholder name with little meaningful semantic value. - // If the name has an "XXX_" prefix, the entire Properties must be ignored. - Name string - // OrigName is the protobuf field name or oneof name. - OrigName string - // JSONName is the JSON name for the protobuf field. - JSONName string - // Enum is a placeholder name for enums. - // For historical reasons, this is neither the Go name for the enum, - // nor the protobuf name for the enum. - Enum string // Deprecated: Do not use. - // Weak contains the full name of the weakly referenced message. - Weak string - // Wire is a string representation of the wire type. - Wire string - // WireType is the protobuf wire type for the field. - WireType int - // Tag is the protobuf field number. - Tag int - // Required reports whether this is a required field. - Required bool - // Optional reports whether this is a optional field. - Optional bool - // Repeated reports whether this is a repeated field. - Repeated bool - // Packed reports whether this is a packed repeated field of scalars. - Packed bool - // Proto3 reports whether this field operates under the proto3 syntax. - Proto3 bool - // Oneof reports whether this field belongs within a oneof. - Oneof bool - - // Default is the default value in string form. - Default string - // HasDefault reports whether the field has a default value. - HasDefault bool - - // MapKeyProp is the properties for the key field for a map field. - MapKeyProp *Properties - // MapValProp is the properties for the value field for a map field. - MapValProp *Properties -} - -// OneofProperties represents the type information for a protobuf oneof. -// -// Deprecated: Do not use. -type OneofProperties struct { - // Type is a pointer to the generated wrapper type for the field value. - // This is nil for messages that are not in the open-struct API. - Type reflect.Type - // Field is the index into StructProperties.Prop for the containing oneof. - Field int - // Prop is the properties for the field. - Prop *Properties -} - -// String formats the properties in the protobuf struct field tag style. -func (p *Properties) String() string { - s := p.Wire - s += "," + strconv.Itoa(p.Tag) - if p.Required { - s += ",req" - } - if p.Optional { - s += ",opt" - } - if p.Repeated { - s += ",rep" - } - if p.Packed { - s += ",packed" - } - s += ",name=" + p.OrigName - if p.JSONName != "" { - s += ",json=" + p.JSONName - } - if len(p.Enum) > 0 { - s += ",enum=" + p.Enum - } - if len(p.Weak) > 0 { - s += ",weak=" + p.Weak - } - if p.Proto3 { - s += ",proto3" - } - if p.Oneof { - s += ",oneof" - } - if p.HasDefault { - s += ",def=" + p.Default - } - return s -} - -// Parse populates p by parsing a string in the protobuf struct field tag style. -func (p *Properties) Parse(tag string) { - // For example: "bytes,49,opt,name=foo,def=hello!" - for len(tag) > 0 { - i := strings.IndexByte(tag, ',') - if i < 0 { - i = len(tag) - } - switch s := tag[:i]; { - case strings.HasPrefix(s, "name="): - p.OrigName = s[len("name="):] - case strings.HasPrefix(s, "json="): - p.JSONName = s[len("json="):] - case strings.HasPrefix(s, "enum="): - p.Enum = s[len("enum="):] - case strings.HasPrefix(s, "weak="): - p.Weak = s[len("weak="):] - case strings.Trim(s, "0123456789") == "": - n, _ := strconv.ParseUint(s, 10, 32) - p.Tag = int(n) - case s == "opt": - p.Optional = true - case s == "req": - p.Required = true - case s == "rep": - p.Repeated = true - case s == "varint" || s == "zigzag32" || s == "zigzag64": - p.Wire = s - p.WireType = WireVarint - case s == "fixed32": - p.Wire = s - p.WireType = WireFixed32 - case s == "fixed64": - p.Wire = s - p.WireType = WireFixed64 - case s == "bytes": - p.Wire = s - p.WireType = WireBytes - case s == "group": - p.Wire = s - p.WireType = WireStartGroup - case s == "packed": - p.Packed = true - case s == "proto3": - p.Proto3 = true - case s == "oneof": - p.Oneof = true - case strings.HasPrefix(s, "def="): - // The default tag is special in that everything afterwards is the - // default regardless of the presence of commas. - p.HasDefault = true - p.Default, i = tag[len("def="):], len(tag) - } - tag = strings.TrimPrefix(tag[i:], ",") - } -} - -// Init populates the properties from a protocol buffer struct tag. -// -// Deprecated: Do not use. -func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { - p.Name = name - p.OrigName = name - if tag == "" { - return - } - p.Parse(tag) - - if typ != nil && typ.Kind() == reflect.Map { - p.MapKeyProp = new(Properties) - p.MapKeyProp.Init(nil, "Key", f.Tag.Get("protobuf_key"), nil) - p.MapValProp = new(Properties) - p.MapValProp.Init(nil, "Value", f.Tag.Get("protobuf_val"), nil) - } -} - -var propertiesCache sync.Map // map[reflect.Type]*StructProperties - -// GetProperties returns the list of properties for the type represented by t, -// which must be a generated protocol buffer message in the open-struct API, -// where protobuf message fields are represented by exported Go struct fields. -// -// Deprecated: Use protobuf reflection instead. -func GetProperties(t reflect.Type) *StructProperties { - if p, ok := propertiesCache.Load(t); ok { - return p.(*StructProperties) - } - p, _ := propertiesCache.LoadOrStore(t, newProperties(t)) - return p.(*StructProperties) -} - -func newProperties(t reflect.Type) *StructProperties { - if t.Kind() != reflect.Struct { - panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t)) - } - - var hasOneof bool - prop := new(StructProperties) - - // Construct a list of properties for each field in the struct. - for i := 0; i < t.NumField(); i++ { - p := new(Properties) - f := t.Field(i) - tagField := f.Tag.Get("protobuf") - p.Init(f.Type, f.Name, tagField, &f) - - tagOneof := f.Tag.Get("protobuf_oneof") - if tagOneof != "" { - hasOneof = true - p.OrigName = tagOneof - } - - // Rename unrelated struct fields with the "XXX_" prefix since so much - // user code simply checks for this to exclude special fields. - if tagField == "" && tagOneof == "" && !strings.HasPrefix(p.Name, "XXX_") { - p.Name = "XXX_" + p.Name - p.OrigName = "XXX_" + p.OrigName - } else if p.Weak != "" { - p.Name = p.OrigName // avoid possible "XXX_" prefix on weak field - } - - prop.Prop = append(prop.Prop, p) - } - - // Construct a mapping of oneof field names to properties. - if hasOneof { - var oneofWrappers []interface{} - if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok { - oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[3].Interface().([]interface{}) - } - if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok { - oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0].Interface().([]interface{}) - } - if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(protoreflect.ProtoMessage); ok { - if m, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *protoimpl.MessageInfo }); ok { - oneofWrappers = m.ProtoMessageInfo().OneofWrappers - } - } - - prop.OneofTypes = make(map[string]*OneofProperties) - for _, wrapper := range oneofWrappers { - p := &OneofProperties{ - Type: reflect.ValueOf(wrapper).Type(), // *T - Prop: new(Properties), - } - f := p.Type.Elem().Field(0) - p.Prop.Name = f.Name - p.Prop.Parse(f.Tag.Get("protobuf")) - - // Determine the struct field that contains this oneof. - // Each wrapper is assignable to exactly one parent field. - var foundOneof bool - for i := 0; i < t.NumField() && !foundOneof; i++ { - if p.Type.AssignableTo(t.Field(i).Type) { - p.Field = i - foundOneof = true - } - } - if !foundOneof { - panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t)) - } - prop.OneofTypes[p.Prop.OrigName] = p - } - } - - return prop -} - -func (sp *StructProperties) Len() int { return len(sp.Prop) } -func (sp *StructProperties) Less(i, j int) bool { return false } -func (sp *StructProperties) Swap(i, j int) { return } diff --git a/tools/vendor/github.com/golang/protobuf/proto/proto.go b/tools/vendor/github.com/golang/protobuf/proto/proto.go deleted file mode 100644 index 5aee89c32..000000000 --- a/tools/vendor/github.com/golang/protobuf/proto/proto.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package proto provides functionality for handling protocol buffer messages. -// In particular, it provides marshaling and unmarshaling between a protobuf -// message and the binary wire format. -// -// See https://developers.google.com/protocol-buffers/docs/gotutorial for -// more information. -// -// Deprecated: Use the "google.golang.org/protobuf/proto" package instead. -package proto - -import ( - protoV2 "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/runtime/protoiface" - "google.golang.org/protobuf/runtime/protoimpl" -) - -const ( - ProtoPackageIsVersion1 = true - ProtoPackageIsVersion2 = true - ProtoPackageIsVersion3 = true - ProtoPackageIsVersion4 = true -) - -// GeneratedEnum is any enum type generated by protoc-gen-go -// which is a named int32 kind. -// This type exists for documentation purposes. -type GeneratedEnum interface{} - -// GeneratedMessage is any message type generated by protoc-gen-go -// which is a pointer to a named struct kind. -// This type exists for documentation purposes. -type GeneratedMessage interface{} - -// Message is a protocol buffer message. -// -// This is the v1 version of the message interface and is marginally better -// than an empty interface as it lacks any method to programatically interact -// with the contents of the message. -// -// A v2 message is declared in "google.golang.org/protobuf/proto".Message and -// exposes protobuf reflection as a first-class feature of the interface. -// -// To convert a v1 message to a v2 message, use the MessageV2 function. -// To convert a v2 message to a v1 message, use the MessageV1 function. -type Message = protoiface.MessageV1 - -// MessageV1 converts either a v1 or v2 message to a v1 message. -// It returns nil if m is nil. -func MessageV1(m GeneratedMessage) protoiface.MessageV1 { - return protoimpl.X.ProtoMessageV1Of(m) -} - -// MessageV2 converts either a v1 or v2 message to a v2 message. -// It returns nil if m is nil. -func MessageV2(m GeneratedMessage) protoV2.Message { - return protoimpl.X.ProtoMessageV2Of(m) -} - -// MessageReflect returns a reflective view for a message. -// It returns nil if m is nil. -func MessageReflect(m Message) protoreflect.Message { - return protoimpl.X.MessageOf(m) -} - -// Marshaler is implemented by messages that can marshal themselves. -// This interface is used by the following functions: Size, Marshal, -// Buffer.Marshal, and Buffer.EncodeMessage. -// -// Deprecated: Do not implement. -type Marshaler interface { - // Marshal formats the encoded bytes of the message. - // It should be deterministic and emit valid protobuf wire data. - // The caller takes ownership of the returned buffer. - Marshal() ([]byte, error) -} - -// Unmarshaler is implemented by messages that can unmarshal themselves. -// This interface is used by the following functions: Unmarshal, UnmarshalMerge, -// Buffer.Unmarshal, Buffer.DecodeMessage, and Buffer.DecodeGroup. -// -// Deprecated: Do not implement. -type Unmarshaler interface { - // Unmarshal parses the encoded bytes of the protobuf wire input. - // The provided buffer is only valid for during method call. - // It should not reset the receiver message. - Unmarshal([]byte) error -} - -// Merger is implemented by messages that can merge themselves. -// This interface is used by the following functions: Clone and Merge. -// -// Deprecated: Do not implement. -type Merger interface { - // Merge merges the contents of src into the receiver message. - // It clones all data structures in src such that it aliases no mutable - // memory referenced by src. - Merge(src Message) -} - -// RequiredNotSetError is an error type returned when -// marshaling or unmarshaling a message with missing required fields. -type RequiredNotSetError struct { - err error -} - -func (e *RequiredNotSetError) Error() string { - if e.err != nil { - return e.err.Error() - } - return "proto: required field not set" -} -func (e *RequiredNotSetError) RequiredNotSet() bool { - return true -} - -func checkRequiredNotSet(m protoV2.Message) error { - if err := protoV2.CheckInitialized(m); err != nil { - return &RequiredNotSetError{err: err} - } - return nil -} - -// Clone returns a deep copy of src. -func Clone(src Message) Message { - return MessageV1(protoV2.Clone(MessageV2(src))) -} - -// Merge merges src into dst, which must be messages of the same type. -// -// Populated scalar fields in src are copied to dst, while populated -// singular messages in src are merged into dst by recursively calling Merge. -// The elements of every list field in src is appended to the corresponded -// list fields in dst. The entries of every map field in src is copied into -// the corresponding map field in dst, possibly replacing existing entries. -// The unknown fields of src are appended to the unknown fields of dst. -func Merge(dst, src Message) { - protoV2.Merge(MessageV2(dst), MessageV2(src)) -} - -// Equal reports whether two messages are equal. -// If two messages marshal to the same bytes under deterministic serialization, -// then Equal is guaranteed to report true. -// -// Two messages are equal if they are the same protobuf message type, -// have the same set of populated known and extension field values, -// and the same set of unknown fields values. -// -// Scalar values are compared with the equivalent of the == operator in Go, -// except bytes values which are compared using bytes.Equal and -// floating point values which specially treat NaNs as equal. -// Message values are compared by recursively calling Equal. -// Lists are equal if each element value is also equal. -// Maps are equal if they have the same set of keys, where the pair of values -// for each key is also equal. -func Equal(x, y Message) bool { - return protoV2.Equal(MessageV2(x), MessageV2(y)) -} - -func isMessageSet(md protoreflect.MessageDescriptor) bool { - ms, ok := md.(interface{ IsMessageSet() bool }) - return ok && ms.IsMessageSet() -} diff --git a/tools/vendor/github.com/golang/protobuf/proto/registry.go b/tools/vendor/github.com/golang/protobuf/proto/registry.go deleted file mode 100644 index 066b4323b..000000000 --- a/tools/vendor/github.com/golang/protobuf/proto/registry.go +++ /dev/null @@ -1,317 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "bytes" - "compress/gzip" - "fmt" - "io/ioutil" - "reflect" - "strings" - "sync" - - "google.golang.org/protobuf/reflect/protodesc" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" - "google.golang.org/protobuf/runtime/protoimpl" -) - -// filePath is the path to the proto source file. -type filePath = string // e.g., "google/protobuf/descriptor.proto" - -// fileDescGZIP is the compressed contents of the encoded FileDescriptorProto. -type fileDescGZIP = []byte - -var fileCache sync.Map // map[filePath]fileDescGZIP - -// RegisterFile is called from generated code to register the compressed -// FileDescriptorProto with the file path for a proto source file. -// -// Deprecated: Use protoregistry.GlobalFiles.RegisterFile instead. -func RegisterFile(s filePath, d fileDescGZIP) { - // Decompress the descriptor. - zr, err := gzip.NewReader(bytes.NewReader(d)) - if err != nil { - panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err)) - } - b, err := ioutil.ReadAll(zr) - if err != nil { - panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err)) - } - - // Construct a protoreflect.FileDescriptor from the raw descriptor. - // Note that DescBuilder.Build automatically registers the constructed - // file descriptor with the v2 registry. - protoimpl.DescBuilder{RawDescriptor: b}.Build() - - // Locally cache the raw descriptor form for the file. - fileCache.Store(s, d) -} - -// FileDescriptor returns the compressed FileDescriptorProto given the file path -// for a proto source file. It returns nil if not found. -// -// Deprecated: Use protoregistry.GlobalFiles.FindFileByPath instead. -func FileDescriptor(s filePath) fileDescGZIP { - if v, ok := fileCache.Load(s); ok { - return v.(fileDescGZIP) - } - - // Find the descriptor in the v2 registry. - var b []byte - if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil { - b, _ = Marshal(protodesc.ToFileDescriptorProto(fd)) - } - - // Locally cache the raw descriptor form for the file. - if len(b) > 0 { - v, _ := fileCache.LoadOrStore(s, protoimpl.X.CompressGZIP(b)) - return v.(fileDescGZIP) - } - return nil -} - -// enumName is the name of an enum. For historical reasons, the enum name is -// neither the full Go name nor the full protobuf name of the enum. -// The name is the dot-separated combination of just the proto package that the -// enum is declared within followed by the Go type name of the generated enum. -type enumName = string // e.g., "my.proto.package.GoMessage_GoEnum" - -// enumsByName maps enum values by name to their numeric counterpart. -type enumsByName = map[string]int32 - -// enumsByNumber maps enum values by number to their name counterpart. -type enumsByNumber = map[int32]string - -var enumCache sync.Map // map[enumName]enumsByName -var numFilesCache sync.Map // map[protoreflect.FullName]int - -// RegisterEnum is called from the generated code to register the mapping of -// enum value names to enum numbers for the enum identified by s. -// -// Deprecated: Use protoregistry.GlobalTypes.RegisterEnum instead. -func RegisterEnum(s enumName, _ enumsByNumber, m enumsByName) { - if _, ok := enumCache.Load(s); ok { - panic("proto: duplicate enum registered: " + s) - } - enumCache.Store(s, m) - - // This does not forward registration to the v2 registry since this API - // lacks sufficient information to construct a complete v2 enum descriptor. -} - -// EnumValueMap returns the mapping from enum value names to enum numbers for -// the enum of the given name. It returns nil if not found. -// -// Deprecated: Use protoregistry.GlobalTypes.FindEnumByName instead. -func EnumValueMap(s enumName) enumsByName { - if v, ok := enumCache.Load(s); ok { - return v.(enumsByName) - } - - // Check whether the cache is stale. If the number of files in the current - // package differs, then it means that some enums may have been recently - // registered upstream that we do not know about. - var protoPkg protoreflect.FullName - if i := strings.LastIndexByte(s, '.'); i >= 0 { - protoPkg = protoreflect.FullName(s[:i]) - } - v, _ := numFilesCache.Load(protoPkg) - numFiles, _ := v.(int) - if protoregistry.GlobalFiles.NumFilesByPackage(protoPkg) == numFiles { - return nil // cache is up-to-date; was not found earlier - } - - // Update the enum cache for all enums declared in the given proto package. - numFiles = 0 - protoregistry.GlobalFiles.RangeFilesByPackage(protoPkg, func(fd protoreflect.FileDescriptor) bool { - walkEnums(fd, func(ed protoreflect.EnumDescriptor) { - name := protoimpl.X.LegacyEnumName(ed) - if _, ok := enumCache.Load(name); !ok { - m := make(enumsByName) - evs := ed.Values() - for i := evs.Len() - 1; i >= 0; i-- { - ev := evs.Get(i) - m[string(ev.Name())] = int32(ev.Number()) - } - enumCache.LoadOrStore(name, m) - } - }) - numFiles++ - return true - }) - numFilesCache.Store(protoPkg, numFiles) - - // Check cache again for enum map. - if v, ok := enumCache.Load(s); ok { - return v.(enumsByName) - } - return nil -} - -// walkEnums recursively walks all enums declared in d. -func walkEnums(d interface { - Enums() protoreflect.EnumDescriptors - Messages() protoreflect.MessageDescriptors -}, f func(protoreflect.EnumDescriptor)) { - eds := d.Enums() - for i := eds.Len() - 1; i >= 0; i-- { - f(eds.Get(i)) - } - mds := d.Messages() - for i := mds.Len() - 1; i >= 0; i-- { - walkEnums(mds.Get(i), f) - } -} - -// messageName is the full name of protobuf message. -type messageName = string - -var messageTypeCache sync.Map // map[messageName]reflect.Type - -// RegisterType is called from generated code to register the message Go type -// for a message of the given name. -// -// Deprecated: Use protoregistry.GlobalTypes.RegisterMessage instead. -func RegisterType(m Message, s messageName) { - mt := protoimpl.X.LegacyMessageTypeOf(m, protoreflect.FullName(s)) - if err := protoregistry.GlobalTypes.RegisterMessage(mt); err != nil { - panic(err) - } - messageTypeCache.Store(s, reflect.TypeOf(m)) -} - -// RegisterMapType is called from generated code to register the Go map type -// for a protobuf message representing a map entry. -// -// Deprecated: Do not use. -func RegisterMapType(m interface{}, s messageName) { - t := reflect.TypeOf(m) - if t.Kind() != reflect.Map { - panic(fmt.Sprintf("invalid map kind: %v", t)) - } - if _, ok := messageTypeCache.Load(s); ok { - panic(fmt.Errorf("proto: duplicate proto message registered: %s", s)) - } - messageTypeCache.Store(s, t) -} - -// MessageType returns the message type for a named message. -// It returns nil if not found. -// -// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead. -func MessageType(s messageName) reflect.Type { - if v, ok := messageTypeCache.Load(s); ok { - return v.(reflect.Type) - } - - // Derive the message type from the v2 registry. - var t reflect.Type - if mt, _ := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(s)); mt != nil { - t = messageGoType(mt) - } - - // If we could not get a concrete type, it is possible that it is a - // pseudo-message for a map entry. - if t == nil { - d, _ := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(s)) - if md, _ := d.(protoreflect.MessageDescriptor); md != nil && md.IsMapEntry() { - kt := goTypeForField(md.Fields().ByNumber(1)) - vt := goTypeForField(md.Fields().ByNumber(2)) - t = reflect.MapOf(kt, vt) - } - } - - // Locally cache the message type for the given name. - if t != nil { - v, _ := messageTypeCache.LoadOrStore(s, t) - return v.(reflect.Type) - } - return nil -} - -func goTypeForField(fd protoreflect.FieldDescriptor) reflect.Type { - switch k := fd.Kind(); k { - case protoreflect.EnumKind: - if et, _ := protoregistry.GlobalTypes.FindEnumByName(fd.Enum().FullName()); et != nil { - return enumGoType(et) - } - return reflect.TypeOf(protoreflect.EnumNumber(0)) - case protoreflect.MessageKind, protoreflect.GroupKind: - if mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()); mt != nil { - return messageGoType(mt) - } - return reflect.TypeOf((*protoreflect.Message)(nil)).Elem() - default: - return reflect.TypeOf(fd.Default().Interface()) - } -} - -func enumGoType(et protoreflect.EnumType) reflect.Type { - return reflect.TypeOf(et.New(0)) -} - -func messageGoType(mt protoreflect.MessageType) reflect.Type { - return reflect.TypeOf(MessageV1(mt.Zero().Interface())) -} - -// MessageName returns the full protobuf name for the given message type. -// -// Deprecated: Use protoreflect.MessageDescriptor.FullName instead. -func MessageName(m Message) messageName { - if m == nil { - return "" - } - if m, ok := m.(interface{ XXX_MessageName() messageName }); ok { - return m.XXX_MessageName() - } - return messageName(protoimpl.X.MessageDescriptorOf(m).FullName()) -} - -// RegisterExtension is called from the generated code to register -// the extension descriptor. -// -// Deprecated: Use protoregistry.GlobalTypes.RegisterExtension instead. -func RegisterExtension(d *ExtensionDesc) { - if err := protoregistry.GlobalTypes.RegisterExtension(d); err != nil { - panic(err) - } -} - -type extensionsByNumber = map[int32]*ExtensionDesc - -var extensionCache sync.Map // map[messageName]extensionsByNumber - -// RegisteredExtensions returns a map of the registered extensions for the -// provided protobuf message, indexed by the extension field number. -// -// Deprecated: Use protoregistry.GlobalTypes.RangeExtensionsByMessage instead. -func RegisteredExtensions(m Message) extensionsByNumber { - // Check whether the cache is stale. If the number of extensions for - // the given message differs, then it means that some extensions were - // recently registered upstream that we do not know about. - s := MessageName(m) - v, _ := extensionCache.Load(s) - xs, _ := v.(extensionsByNumber) - if protoregistry.GlobalTypes.NumExtensionsByMessage(protoreflect.FullName(s)) == len(xs) { - return xs // cache is up-to-date - } - - // Cache is stale, re-compute the extensions map. - xs = make(extensionsByNumber) - protoregistry.GlobalTypes.RangeExtensionsByMessage(protoreflect.FullName(s), func(xt protoreflect.ExtensionType) bool { - if xd, ok := xt.(*ExtensionDesc); ok { - xs[int32(xt.TypeDescriptor().Number())] = xd - } else { - // TODO: This implies that the protoreflect.ExtensionType is a - // custom type not generated by protoc-gen-go. We could try and - // convert the type to an ExtensionDesc. - } - return true - }) - extensionCache.Store(s, xs) - return xs -} diff --git a/tools/vendor/github.com/golang/protobuf/proto/text_decode.go b/tools/vendor/github.com/golang/protobuf/proto/text_decode.go deleted file mode 100644 index 47eb3e445..000000000 --- a/tools/vendor/github.com/golang/protobuf/proto/text_decode.go +++ /dev/null @@ -1,801 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "encoding" - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "unicode/utf8" - - "google.golang.org/protobuf/encoding/prototext" - protoV2 "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" -) - -const wrapTextUnmarshalV2 = false - -// ParseError is returned by UnmarshalText. -type ParseError struct { - Message string - - // Deprecated: Do not use. - Line, Offset int -} - -func (e *ParseError) Error() string { - if wrapTextUnmarshalV2 { - return e.Message - } - if e.Line == 1 { - return fmt.Sprintf("line 1.%d: %v", e.Offset, e.Message) - } - return fmt.Sprintf("line %d: %v", e.Line, e.Message) -} - -// UnmarshalText parses a proto text formatted string into m. -func UnmarshalText(s string, m Message) error { - if u, ok := m.(encoding.TextUnmarshaler); ok { - return u.UnmarshalText([]byte(s)) - } - - m.Reset() - mi := MessageV2(m) - - if wrapTextUnmarshalV2 { - err := prototext.UnmarshalOptions{ - AllowPartial: true, - }.Unmarshal([]byte(s), mi) - if err != nil { - return &ParseError{Message: err.Error()} - } - return checkRequiredNotSet(mi) - } else { - if err := newTextParser(s).unmarshalMessage(mi.ProtoReflect(), ""); err != nil { - return err - } - return checkRequiredNotSet(mi) - } -} - -type textParser struct { - s string // remaining input - done bool // whether the parsing is finished (success or error) - backed bool // whether back() was called - offset, line int - cur token -} - -type token struct { - value string - err *ParseError - line int // line number - offset int // byte number from start of input, not start of line - unquoted string // the unquoted version of value, if it was a quoted string -} - -func newTextParser(s string) *textParser { - p := new(textParser) - p.s = s - p.line = 1 - p.cur.line = 1 - return p -} - -func (p *textParser) unmarshalMessage(m protoreflect.Message, terminator string) (err error) { - md := m.Descriptor() - fds := md.Fields() - - // A struct is a sequence of "name: value", terminated by one of - // '>' or '}', or the end of the input. A name may also be - // "[extension]" or "[type/url]". - // - // The whole struct can also be an expanded Any message, like: - // [type/url] < ... struct contents ... > - seen := make(map[protoreflect.FieldNumber]bool) - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - if tok.value == "[" { - if err := p.unmarshalExtensionOrAny(m, seen); err != nil { - return err - } - continue - } - - // This is a normal, non-extension field. - name := protoreflect.Name(tok.value) - fd := fds.ByName(name) - switch { - case fd == nil: - gd := fds.ByName(protoreflect.Name(strings.ToLower(string(name)))) - if gd != nil && gd.Kind() == protoreflect.GroupKind && gd.Message().Name() == name { - fd = gd - } - case fd.Kind() == protoreflect.GroupKind && fd.Message().Name() != name: - fd = nil - case fd.IsWeak() && fd.Message().IsPlaceholder(): - fd = nil - } - if fd == nil { - typeName := string(md.FullName()) - if m, ok := m.Interface().(Message); ok { - t := reflect.TypeOf(m) - if t.Kind() == reflect.Ptr { - typeName = t.Elem().String() - } - } - return p.errorf("unknown field name %q in %v", name, typeName) - } - if od := fd.ContainingOneof(); od != nil && m.WhichOneof(od) != nil { - return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, od.Name()) - } - if fd.Cardinality() != protoreflect.Repeated && seen[fd.Number()] { - return p.errorf("non-repeated field %q was repeated", fd.Name()) - } - seen[fd.Number()] = true - - // Consume any colon. - if err := p.checkForColon(fd); err != nil { - return err - } - - // Parse into the field. - v := m.Get(fd) - if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) { - v = m.Mutable(fd) - } - if v, err = p.unmarshalValue(v, fd); err != nil { - return err - } - m.Set(fd, v) - - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - } - return nil -} - -func (p *textParser) unmarshalExtensionOrAny(m protoreflect.Message, seen map[protoreflect.FieldNumber]bool) error { - name, err := p.consumeExtensionOrAnyName() - if err != nil { - return err - } - - // If it contains a slash, it's an Any type URL. - if slashIdx := strings.LastIndex(name, "/"); slashIdx >= 0 { - tok := p.next() - if tok.err != nil { - return tok.err - } - // consume an optional colon - if tok.value == ":" { - tok = p.next() - if tok.err != nil { - return tok.err - } - } - - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - - mt, err := protoregistry.GlobalTypes.FindMessageByURL(name) - if err != nil { - return p.errorf("unrecognized message %q in google.protobuf.Any", name[slashIdx+len("/"):]) - } - m2 := mt.New() - if err := p.unmarshalMessage(m2, terminator); err != nil { - return err - } - b, err := protoV2.Marshal(m2.Interface()) - if err != nil { - return p.errorf("failed to marshal message of type %q: %v", name[slashIdx+len("/"):], err) - } - - urlFD := m.Descriptor().Fields().ByName("type_url") - valFD := m.Descriptor().Fields().ByName("value") - if seen[urlFD.Number()] { - return p.errorf("Any message unpacked multiple times, or %q already set", urlFD.Name()) - } - if seen[valFD.Number()] { - return p.errorf("Any message unpacked multiple times, or %q already set", valFD.Name()) - } - m.Set(urlFD, protoreflect.ValueOfString(name)) - m.Set(valFD, protoreflect.ValueOfBytes(b)) - seen[urlFD.Number()] = true - seen[valFD.Number()] = true - return nil - } - - xname := protoreflect.FullName(name) - xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname) - if xt == nil && isMessageSet(m.Descriptor()) { - xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension")) - } - if xt == nil { - return p.errorf("unrecognized extension %q", name) - } - fd := xt.TypeDescriptor() - if fd.ContainingMessage().FullName() != m.Descriptor().FullName() { - return p.errorf("extension field %q does not extend message %q", name, m.Descriptor().FullName()) - } - - if err := p.checkForColon(fd); err != nil { - return err - } - - v := m.Get(fd) - if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) { - v = m.Mutable(fd) - } - v, err = p.unmarshalValue(v, fd) - if err != nil { - return err - } - m.Set(fd, v) - return p.consumeOptionalSeparator() -} - -func (p *textParser) unmarshalValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { - tok := p.next() - if tok.err != nil { - return v, tok.err - } - if tok.value == "" { - return v, p.errorf("unexpected EOF") - } - - switch { - case fd.IsList(): - lv := v.List() - var err error - if tok.value == "[" { - // Repeated field with list notation, like [1,2,3]. - for { - vv := lv.NewElement() - vv, err = p.unmarshalSingularValue(vv, fd) - if err != nil { - return v, err - } - lv.Append(vv) - - tok := p.next() - if tok.err != nil { - return v, tok.err - } - if tok.value == "]" { - break - } - if tok.value != "," { - return v, p.errorf("Expected ']' or ',' found %q", tok.value) - } - } - return v, nil - } - - // One value of the repeated field. - p.back() - vv := lv.NewElement() - vv, err = p.unmarshalSingularValue(vv, fd) - if err != nil { - return v, err - } - lv.Append(vv) - return v, nil - case fd.IsMap(): - // The map entry should be this sequence of tokens: - // < key : KEY value : VALUE > - // However, implementations may omit key or value, and technically - // we should support them in any order. - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return v, p.errorf("expected '{' or '<', found %q", tok.value) - } - - keyFD := fd.MapKey() - valFD := fd.MapValue() - - mv := v.Map() - kv := keyFD.Default() - vv := mv.NewValue() - for { - tok := p.next() - if tok.err != nil { - return v, tok.err - } - if tok.value == terminator { - break - } - var err error - switch tok.value { - case "key": - if err := p.consumeToken(":"); err != nil { - return v, err - } - if kv, err = p.unmarshalSingularValue(kv, keyFD); err != nil { - return v, err - } - if err := p.consumeOptionalSeparator(); err != nil { - return v, err - } - case "value": - if err := p.checkForColon(valFD); err != nil { - return v, err - } - if vv, err = p.unmarshalSingularValue(vv, valFD); err != nil { - return v, err - } - if err := p.consumeOptionalSeparator(); err != nil { - return v, err - } - default: - p.back() - return v, p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) - } - } - mv.Set(kv.MapKey(), vv) - return v, nil - default: - p.back() - return p.unmarshalSingularValue(v, fd) - } -} - -func (p *textParser) unmarshalSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { - tok := p.next() - if tok.err != nil { - return v, tok.err - } - if tok.value == "" { - return v, p.errorf("unexpected EOF") - } - - switch fd.Kind() { - case protoreflect.BoolKind: - switch tok.value { - case "true", "1", "t", "True": - return protoreflect.ValueOfBool(true), nil - case "false", "0", "f", "False": - return protoreflect.ValueOfBool(false), nil - } - case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - return protoreflect.ValueOfInt32(int32(x)), nil - } - - // The C++ parser accepts large positive hex numbers that uses - // two's complement arithmetic to represent negative numbers. - // This feature is here for backwards compatibility with C++. - if strings.HasPrefix(tok.value, "0x") { - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - return protoreflect.ValueOfInt32(int32(-(int64(^x) + 1))), nil - } - } - case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: - if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { - return protoreflect.ValueOfInt64(int64(x)), nil - } - - // The C++ parser accepts large positive hex numbers that uses - // two's complement arithmetic to represent negative numbers. - // This feature is here for backwards compatibility with C++. - if strings.HasPrefix(tok.value, "0x") { - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - return protoreflect.ValueOfInt64(int64(-(int64(^x) + 1))), nil - } - } - case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - return protoreflect.ValueOfUint32(uint32(x)), nil - } - case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - return protoreflect.ValueOfUint64(uint64(x)), nil - } - case protoreflect.FloatKind: - // Ignore 'f' for compatibility with output generated by C++, - // but don't remove 'f' when the value is "-inf" or "inf". - v := tok.value - if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" { - v = v[:len(v)-len("f")] - } - if x, err := strconv.ParseFloat(v, 32); err == nil { - return protoreflect.ValueOfFloat32(float32(x)), nil - } - case protoreflect.DoubleKind: - // Ignore 'f' for compatibility with output generated by C++, - // but don't remove 'f' when the value is "-inf" or "inf". - v := tok.value - if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" { - v = v[:len(v)-len("f")] - } - if x, err := strconv.ParseFloat(v, 64); err == nil { - return protoreflect.ValueOfFloat64(float64(x)), nil - } - case protoreflect.StringKind: - if isQuote(tok.value[0]) { - return protoreflect.ValueOfString(tok.unquoted), nil - } - case protoreflect.BytesKind: - if isQuote(tok.value[0]) { - return protoreflect.ValueOfBytes([]byte(tok.unquoted)), nil - } - case protoreflect.EnumKind: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - return protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil - } - vd := fd.Enum().Values().ByName(protoreflect.Name(tok.value)) - if vd != nil { - return protoreflect.ValueOfEnum(vd.Number()), nil - } - case protoreflect.MessageKind, protoreflect.GroupKind: - var terminator string - switch tok.value { - case "{": - terminator = "}" - case "<": - terminator = ">" - default: - return v, p.errorf("expected '{' or '<', found %q", tok.value) - } - err := p.unmarshalMessage(v.Message(), terminator) - return v, err - default: - panic(fmt.Sprintf("invalid kind %v", fd.Kind())) - } - return v, p.errorf("invalid %v: %v", fd.Kind(), tok.value) -} - -// Consume a ':' from the input stream (if the next token is a colon), -// returning an error if a colon is needed but not present. -func (p *textParser) checkForColon(fd protoreflect.FieldDescriptor) *ParseError { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ":" { - if fd.Message() == nil { - return p.errorf("expected ':', found %q", tok.value) - } - p.back() - } - return nil -} - -// consumeExtensionOrAnyName consumes an extension name or an Any type URL and -// the following ']'. It returns the name or URL consumed. -func (p *textParser) consumeExtensionOrAnyName() (string, error) { - tok := p.next() - if tok.err != nil { - return "", tok.err - } - - // If extension name or type url is quoted, it's a single token. - if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { - name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) - if err != nil { - return "", err - } - return name, p.consumeToken("]") - } - - // Consume everything up to "]" - var parts []string - for tok.value != "]" { - parts = append(parts, tok.value) - tok = p.next() - if tok.err != nil { - return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) - } - if p.done && tok.value != "]" { - return "", p.errorf("unclosed type_url or extension name") - } - } - return strings.Join(parts, ""), nil -} - -// consumeOptionalSeparator consumes an optional semicolon or comma. -// It is used in unmarshalMessage to provide backward compatibility. -func (p *textParser) consumeOptionalSeparator() error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ";" && tok.value != "," { - p.back() - } - return nil -} - -func (p *textParser) errorf(format string, a ...interface{}) *ParseError { - pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} - p.cur.err = pe - p.done = true - return pe -} - -func (p *textParser) skipWhitespace() { - i := 0 - for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { - if p.s[i] == '#' { - // comment; skip to end of line or input - for i < len(p.s) && p.s[i] != '\n' { - i++ - } - if i == len(p.s) { - break - } - } - if p.s[i] == '\n' { - p.line++ - } - i++ - } - p.offset += i - p.s = p.s[i:len(p.s)] - if len(p.s) == 0 { - p.done = true - } -} - -func (p *textParser) advance() { - // Skip whitespace - p.skipWhitespace() - if p.done { - return - } - - // Start of non-whitespace - p.cur.err = nil - p.cur.offset, p.cur.line = p.offset, p.line - p.cur.unquoted = "" - switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': - // Single symbol - p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] - case '"', '\'': - // Quoted string - i := 1 - for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { - if p.s[i] == '\\' && i+1 < len(p.s) { - // skip escaped char - i++ - } - i++ - } - if i >= len(p.s) || p.s[i] != p.s[0] { - p.errorf("unmatched quote") - return - } - unq, err := unquoteC(p.s[1:i], rune(p.s[0])) - if err != nil { - p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) - return - } - p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] - p.cur.unquoted = unq - default: - i := 0 - for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { - i++ - } - if i == 0 { - p.errorf("unexpected byte %#x", p.s[0]) - return - } - p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] - } - p.offset += len(p.cur.value) -} - -// Back off the parser by one token. Can only be done between calls to next(). -// It makes the next advance() a no-op. -func (p *textParser) back() { p.backed = true } - -// Advances the parser and returns the new current token. -func (p *textParser) next() *token { - if p.backed || p.done { - p.backed = false - return &p.cur - } - p.advance() - if p.done { - p.cur.value = "" - } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { - // Look for multiple quoted strings separated by whitespace, - // and concatenate them. - cat := p.cur - for { - p.skipWhitespace() - if p.done || !isQuote(p.s[0]) { - break - } - p.advance() - if p.cur.err != nil { - return &p.cur - } - cat.value += " " + p.cur.value - cat.unquoted += p.cur.unquoted - } - p.done = false // parser may have seen EOF, but we want to return cat - p.cur = cat - } - return &p.cur -} - -func (p *textParser) consumeToken(s string) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != s { - p.back() - return p.errorf("expected %q, found %q", s, tok.value) - } - return nil -} - -var errBadUTF8 = errors.New("proto: bad UTF-8") - -func unquoteC(s string, quote rune) (string, error) { - // This is based on C++'s tokenizer.cc. - // Despite its name, this is *not* parsing C syntax. - // For instance, "\0" is an invalid quoted string. - - // Avoid allocation in trivial cases. - simple := true - for _, r := range s { - if r == '\\' || r == quote { - simple = false - break - } - } - if simple { - return s, nil - } - - buf := make([]byte, 0, 3*len(s)/2) - for len(s) > 0 { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", errBadUTF8 - } - s = s[n:] - if r != '\\' { - if r < utf8.RuneSelf { - buf = append(buf, byte(r)) - } else { - buf = append(buf, string(r)...) - } - continue - } - - ch, tail, err := unescape(s) - if err != nil { - return "", err - } - buf = append(buf, ch...) - s = tail - } - return string(buf), nil -} - -func unescape(s string) (ch string, tail string, err error) { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", "", errBadUTF8 - } - s = s[n:] - switch r { - case 'a': - return "\a", s, nil - case 'b': - return "\b", s, nil - case 'f': - return "\f", s, nil - case 'n': - return "\n", s, nil - case 'r': - return "\r", s, nil - case 't': - return "\t", s, nil - case 'v': - return "\v", s, nil - case '?': - return "?", s, nil // trigraph workaround - case '\'', '"', '\\': - return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7': - if len(s) < 2 { - return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) - } - ss := string(r) + s[:2] - s = s[2:] - i, err := strconv.ParseUint(ss, 8, 8) - if err != nil { - return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) - } - return string([]byte{byte(i)}), s, nil - case 'x', 'X', 'u', 'U': - var n int - switch r { - case 'x', 'X': - n = 2 - case 'u': - n = 4 - case 'U': - n = 8 - } - if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) - } - ss := s[:n] - s = s[n:] - i, err := strconv.ParseUint(ss, 16, 64) - if err != nil { - return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) - } - if r == 'x' || r == 'X' { - return string([]byte{byte(i)}), s, nil - } - if i > utf8.MaxRune { - return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) - } - return string(rune(i)), s, nil - } - return "", "", fmt.Errorf(`unknown escape \%c`, r) -} - -func isIdentOrNumberChar(c byte) bool { - switch { - case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': - return true - case '0' <= c && c <= '9': - return true - } - switch c { - case '-', '+', '.', '_': - return true - } - return false -} - -func isWhitespace(c byte) bool { - switch c { - case ' ', '\t', '\n', '\r': - return true - } - return false -} - -func isQuote(c byte) bool { - switch c { - case '"', '\'': - return true - } - return false -} diff --git a/tools/vendor/github.com/golang/protobuf/proto/text_encode.go b/tools/vendor/github.com/golang/protobuf/proto/text_encode.go deleted file mode 100644 index a31134eeb..000000000 --- a/tools/vendor/github.com/golang/protobuf/proto/text_encode.go +++ /dev/null @@ -1,560 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "bytes" - "encoding" - "fmt" - "io" - "math" - "sort" - "strings" - - "google.golang.org/protobuf/encoding/prototext" - "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" -) - -const wrapTextMarshalV2 = false - -// TextMarshaler is a configurable text format marshaler. -type TextMarshaler struct { - Compact bool // use compact text format (one line) - ExpandAny bool // expand google.protobuf.Any messages of known types -} - -// Marshal writes the proto text format of m to w. -func (tm *TextMarshaler) Marshal(w io.Writer, m Message) error { - b, err := tm.marshal(m) - if len(b) > 0 { - if _, err := w.Write(b); err != nil { - return err - } - } - return err -} - -// Text returns a proto text formatted string of m. -func (tm *TextMarshaler) Text(m Message) string { - b, _ := tm.marshal(m) - return string(b) -} - -func (tm *TextMarshaler) marshal(m Message) ([]byte, error) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() { - return []byte(""), nil - } - - if wrapTextMarshalV2 { - if m, ok := m.(encoding.TextMarshaler); ok { - return m.MarshalText() - } - - opts := prototext.MarshalOptions{ - AllowPartial: true, - EmitUnknown: true, - } - if !tm.Compact { - opts.Indent = " " - } - if !tm.ExpandAny { - opts.Resolver = (*protoregistry.Types)(nil) - } - return opts.Marshal(mr.Interface()) - } else { - w := &textWriter{ - compact: tm.Compact, - expandAny: tm.ExpandAny, - complete: true, - } - - if m, ok := m.(encoding.TextMarshaler); ok { - b, err := m.MarshalText() - if err != nil { - return nil, err - } - w.Write(b) - return w.buf, nil - } - - err := w.writeMessage(mr) - return w.buf, err - } -} - -var ( - defaultTextMarshaler = TextMarshaler{} - compactTextMarshaler = TextMarshaler{Compact: true} -) - -// MarshalText writes the proto text format of m to w. -func MarshalText(w io.Writer, m Message) error { return defaultTextMarshaler.Marshal(w, m) } - -// MarshalTextString returns a proto text formatted string of m. -func MarshalTextString(m Message) string { return defaultTextMarshaler.Text(m) } - -// CompactText writes the compact proto text format of m to w. -func CompactText(w io.Writer, m Message) error { return compactTextMarshaler.Marshal(w, m) } - -// CompactTextString returns a compact proto text formatted string of m. -func CompactTextString(m Message) string { return compactTextMarshaler.Text(m) } - -var ( - newline = []byte("\n") - endBraceNewline = []byte("}\n") - posInf = []byte("inf") - negInf = []byte("-inf") - nan = []byte("nan") -) - -// textWriter is an io.Writer that tracks its indentation level. -type textWriter struct { - compact bool // same as TextMarshaler.Compact - expandAny bool // same as TextMarshaler.ExpandAny - complete bool // whether the current position is a complete line - indent int // indentation level; never negative - buf []byte -} - -func (w *textWriter) Write(p []byte) (n int, _ error) { - newlines := bytes.Count(p, newline) - if newlines == 0 { - if !w.compact && w.complete { - w.writeIndent() - } - w.buf = append(w.buf, p...) - w.complete = false - return len(p), nil - } - - frags := bytes.SplitN(p, newline, newlines+1) - if w.compact { - for i, frag := range frags { - if i > 0 { - w.buf = append(w.buf, ' ') - n++ - } - w.buf = append(w.buf, frag...) - n += len(frag) - } - return n, nil - } - - for i, frag := range frags { - if w.complete { - w.writeIndent() - } - w.buf = append(w.buf, frag...) - n += len(frag) - if i+1 < len(frags) { - w.buf = append(w.buf, '\n') - n++ - } - } - w.complete = len(frags[len(frags)-1]) == 0 - return n, nil -} - -func (w *textWriter) WriteByte(c byte) error { - if w.compact && c == '\n' { - c = ' ' - } - if !w.compact && w.complete { - w.writeIndent() - } - w.buf = append(w.buf, c) - w.complete = c == '\n' - return nil -} - -func (w *textWriter) writeName(fd protoreflect.FieldDescriptor) { - if !w.compact && w.complete { - w.writeIndent() - } - w.complete = false - - if fd.Kind() != protoreflect.GroupKind { - w.buf = append(w.buf, fd.Name()...) - w.WriteByte(':') - } else { - // Use message type name for group field name. - w.buf = append(w.buf, fd.Message().Name()...) - } - - if !w.compact { - w.WriteByte(' ') - } -} - -func requiresQuotes(u string) bool { - // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. - for _, ch := range u { - switch { - case ch == '.' || ch == '/' || ch == '_': - continue - case '0' <= ch && ch <= '9': - continue - case 'A' <= ch && ch <= 'Z': - continue - case 'a' <= ch && ch <= 'z': - continue - default: - return true - } - } - return false -} - -// writeProto3Any writes an expanded google.protobuf.Any message. -// -// It returns (false, nil) if sv value can't be unmarshaled (e.g. because -// required messages are not linked in). -// -// It returns (true, error) when sv was written in expanded format or an error -// was encountered. -func (w *textWriter) writeProto3Any(m protoreflect.Message) (bool, error) { - md := m.Descriptor() - fdURL := md.Fields().ByName("type_url") - fdVal := md.Fields().ByName("value") - - url := m.Get(fdURL).String() - mt, err := protoregistry.GlobalTypes.FindMessageByURL(url) - if err != nil { - return false, nil - } - - b := m.Get(fdVal).Bytes() - m2 := mt.New() - if err := proto.Unmarshal(b, m2.Interface()); err != nil { - return false, nil - } - w.Write([]byte("[")) - if requiresQuotes(url) { - w.writeQuotedString(url) - } else { - w.Write([]byte(url)) - } - if w.compact { - w.Write([]byte("]:<")) - } else { - w.Write([]byte("]: <\n")) - w.indent++ - } - if err := w.writeMessage(m2); err != nil { - return true, err - } - if w.compact { - w.Write([]byte("> ")) - } else { - w.indent-- - w.Write([]byte(">\n")) - } - return true, nil -} - -func (w *textWriter) writeMessage(m protoreflect.Message) error { - md := m.Descriptor() - if w.expandAny && md.FullName() == "google.protobuf.Any" { - if canExpand, err := w.writeProto3Any(m); canExpand { - return err - } - } - - fds := md.Fields() - for i := 0; i < fds.Len(); { - fd := fds.Get(i) - if od := fd.ContainingOneof(); od != nil { - fd = m.WhichOneof(od) - i += od.Fields().Len() - } else { - i++ - } - if fd == nil || !m.Has(fd) { - continue - } - - switch { - case fd.IsList(): - lv := m.Get(fd).List() - for j := 0; j < lv.Len(); j++ { - w.writeName(fd) - v := lv.Get(j) - if err := w.writeSingularValue(v, fd); err != nil { - return err - } - w.WriteByte('\n') - } - case fd.IsMap(): - kfd := fd.MapKey() - vfd := fd.MapValue() - mv := m.Get(fd).Map() - - type entry struct{ key, val protoreflect.Value } - var entries []entry - mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { - entries = append(entries, entry{k.Value(), v}) - return true - }) - sort.Slice(entries, func(i, j int) bool { - switch kfd.Kind() { - case protoreflect.BoolKind: - return !entries[i].key.Bool() && entries[j].key.Bool() - case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: - return entries[i].key.Int() < entries[j].key.Int() - case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind: - return entries[i].key.Uint() < entries[j].key.Uint() - case protoreflect.StringKind: - return entries[i].key.String() < entries[j].key.String() - default: - panic("invalid kind") - } - }) - for _, entry := range entries { - w.writeName(fd) - w.WriteByte('<') - if !w.compact { - w.WriteByte('\n') - } - w.indent++ - w.writeName(kfd) - if err := w.writeSingularValue(entry.key, kfd); err != nil { - return err - } - w.WriteByte('\n') - w.writeName(vfd) - if err := w.writeSingularValue(entry.val, vfd); err != nil { - return err - } - w.WriteByte('\n') - w.indent-- - w.WriteByte('>') - w.WriteByte('\n') - } - default: - w.writeName(fd) - if err := w.writeSingularValue(m.Get(fd), fd); err != nil { - return err - } - w.WriteByte('\n') - } - } - - if b := m.GetUnknown(); len(b) > 0 { - w.writeUnknownFields(b) - } - return w.writeExtensions(m) -} - -func (w *textWriter) writeSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) error { - switch fd.Kind() { - case protoreflect.FloatKind, protoreflect.DoubleKind: - switch vf := v.Float(); { - case math.IsInf(vf, +1): - w.Write(posInf) - case math.IsInf(vf, -1): - w.Write(negInf) - case math.IsNaN(vf): - w.Write(nan) - default: - fmt.Fprint(w, v.Interface()) - } - case protoreflect.StringKind: - // NOTE: This does not validate UTF-8 for historical reasons. - w.writeQuotedString(string(v.String())) - case protoreflect.BytesKind: - w.writeQuotedString(string(v.Bytes())) - case protoreflect.MessageKind, protoreflect.GroupKind: - var bra, ket byte = '<', '>' - if fd.Kind() == protoreflect.GroupKind { - bra, ket = '{', '}' - } - w.WriteByte(bra) - if !w.compact { - w.WriteByte('\n') - } - w.indent++ - m := v.Message() - if m2, ok := m.Interface().(encoding.TextMarshaler); ok { - b, err := m2.MarshalText() - if err != nil { - return err - } - w.Write(b) - } else { - w.writeMessage(m) - } - w.indent-- - w.WriteByte(ket) - case protoreflect.EnumKind: - if ev := fd.Enum().Values().ByNumber(v.Enum()); ev != nil { - fmt.Fprint(w, ev.Name()) - } else { - fmt.Fprint(w, v.Enum()) - } - default: - fmt.Fprint(w, v.Interface()) - } - return nil -} - -// writeQuotedString writes a quoted string in the protocol buffer text format. -func (w *textWriter) writeQuotedString(s string) { - w.WriteByte('"') - for i := 0; i < len(s); i++ { - switch c := s[i]; c { - case '\n': - w.buf = append(w.buf, `\n`...) - case '\r': - w.buf = append(w.buf, `\r`...) - case '\t': - w.buf = append(w.buf, `\t`...) - case '"': - w.buf = append(w.buf, `\"`...) - case '\\': - w.buf = append(w.buf, `\\`...) - default: - if isPrint := c >= 0x20 && c < 0x7f; isPrint { - w.buf = append(w.buf, c) - } else { - w.buf = append(w.buf, fmt.Sprintf(`\%03o`, c)...) - } - } - } - w.WriteByte('"') -} - -func (w *textWriter) writeUnknownFields(b []byte) { - if !w.compact { - fmt.Fprintf(w, "/* %d unknown bytes */\n", len(b)) - } - - for len(b) > 0 { - num, wtyp, n := protowire.ConsumeTag(b) - if n < 0 { - return - } - b = b[n:] - - if wtyp == protowire.EndGroupType { - w.indent-- - w.Write(endBraceNewline) - continue - } - fmt.Fprint(w, num) - if wtyp != protowire.StartGroupType { - w.WriteByte(':') - } - if !w.compact || wtyp == protowire.StartGroupType { - w.WriteByte(' ') - } - switch wtyp { - case protowire.VarintType: - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return - } - b = b[n:] - fmt.Fprint(w, v) - case protowire.Fixed32Type: - v, n := protowire.ConsumeFixed32(b) - if n < 0 { - return - } - b = b[n:] - fmt.Fprint(w, v) - case protowire.Fixed64Type: - v, n := protowire.ConsumeFixed64(b) - if n < 0 { - return - } - b = b[n:] - fmt.Fprint(w, v) - case protowire.BytesType: - v, n := protowire.ConsumeBytes(b) - if n < 0 { - return - } - b = b[n:] - fmt.Fprintf(w, "%q", v) - case protowire.StartGroupType: - w.WriteByte('{') - w.indent++ - default: - fmt.Fprintf(w, "/* unknown wire type %d */", wtyp) - } - w.WriteByte('\n') - } -} - -// writeExtensions writes all the extensions in m. -func (w *textWriter) writeExtensions(m protoreflect.Message) error { - md := m.Descriptor() - if md.ExtensionRanges().Len() == 0 { - return nil - } - - type ext struct { - desc protoreflect.FieldDescriptor - val protoreflect.Value - } - var exts []ext - m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { - if fd.IsExtension() { - exts = append(exts, ext{fd, v}) - } - return true - }) - sort.Slice(exts, func(i, j int) bool { - return exts[i].desc.Number() < exts[j].desc.Number() - }) - - for _, ext := range exts { - // For message set, use the name of the message as the extension name. - name := string(ext.desc.FullName()) - if isMessageSet(ext.desc.ContainingMessage()) { - name = strings.TrimSuffix(name, ".message_set_extension") - } - - if !ext.desc.IsList() { - if err := w.writeSingularExtension(name, ext.val, ext.desc); err != nil { - return err - } - } else { - lv := ext.val.List() - for i := 0; i < lv.Len(); i++ { - if err := w.writeSingularExtension(name, lv.Get(i), ext.desc); err != nil { - return err - } - } - } - } - return nil -} - -func (w *textWriter) writeSingularExtension(name string, v protoreflect.Value, fd protoreflect.FieldDescriptor) error { - fmt.Fprintf(w, "[%s]:", name) - if !w.compact { - w.WriteByte(' ') - } - if err := w.writeSingularValue(v, fd); err != nil { - return err - } - w.WriteByte('\n') - return nil -} - -func (w *textWriter) writeIndent() { - if !w.complete { - return - } - for i := 0; i < w.indent*2; i++ { - w.buf = append(w.buf, ' ') - } - w.complete = false -} diff --git a/tools/vendor/github.com/golang/protobuf/proto/wire.go b/tools/vendor/github.com/golang/protobuf/proto/wire.go deleted file mode 100644 index d7c28da5a..000000000 --- a/tools/vendor/github.com/golang/protobuf/proto/wire.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - protoV2 "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/runtime/protoiface" -) - -// Size returns the size in bytes of the wire-format encoding of m. -func Size(m Message) int { - if m == nil { - return 0 - } - mi := MessageV2(m) - return protoV2.Size(mi) -} - -// Marshal returns the wire-format encoding of m. -func Marshal(m Message) ([]byte, error) { - b, err := marshalAppend(nil, m, false) - if b == nil { - b = zeroBytes - } - return b, err -} - -var zeroBytes = make([]byte, 0, 0) - -func marshalAppend(buf []byte, m Message, deterministic bool) ([]byte, error) { - if m == nil { - return nil, ErrNil - } - mi := MessageV2(m) - nbuf, err := protoV2.MarshalOptions{ - Deterministic: deterministic, - AllowPartial: true, - }.MarshalAppend(buf, mi) - if err != nil { - return buf, err - } - if len(buf) == len(nbuf) { - if !mi.ProtoReflect().IsValid() { - return buf, ErrNil - } - } - return nbuf, checkRequiredNotSet(mi) -} - -// Unmarshal parses a wire-format message in b and places the decoded results in m. -// -// Unmarshal resets m before starting to unmarshal, so any existing data in m is always -// removed. Use UnmarshalMerge to preserve and append to existing data. -func Unmarshal(b []byte, m Message) error { - m.Reset() - return UnmarshalMerge(b, m) -} - -// UnmarshalMerge parses a wire-format message in b and places the decoded results in m. -func UnmarshalMerge(b []byte, m Message) error { - mi := MessageV2(m) - out, err := protoV2.UnmarshalOptions{ - AllowPartial: true, - Merge: true, - }.UnmarshalState(protoiface.UnmarshalInput{ - Buf: b, - Message: mi.ProtoReflect(), - }) - if err != nil { - return err - } - if out.Flags&protoiface.UnmarshalInitialized > 0 { - return nil - } - return checkRequiredNotSet(mi) -} diff --git a/tools/vendor/github.com/golang/protobuf/proto/wrappers.go b/tools/vendor/github.com/golang/protobuf/proto/wrappers.go deleted file mode 100644 index 398e34859..000000000 --- a/tools/vendor/github.com/golang/protobuf/proto/wrappers.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -// Bool stores v in a new bool value and returns a pointer to it. -func Bool(v bool) *bool { return &v } - -// Int stores v in a new int32 value and returns a pointer to it. -// -// Deprecated: Use Int32 instead. -func Int(v int) *int32 { return Int32(int32(v)) } - -// Int32 stores v in a new int32 value and returns a pointer to it. -func Int32(v int32) *int32 { return &v } - -// Int64 stores v in a new int64 value and returns a pointer to it. -func Int64(v int64) *int64 { return &v } - -// Uint32 stores v in a new uint32 value and returns a pointer to it. -func Uint32(v uint32) *uint32 { return &v } - -// Uint64 stores v in a new uint64 value and returns a pointer to it. -func Uint64(v uint64) *uint64 { return &v } - -// Float32 stores v in a new float32 value and returns a pointer to it. -func Float32(v float32) *float32 { return &v } - -// Float64 stores v in a new float64 value and returns a pointer to it. -func Float64(v float64) *float64 { return &v } - -// String stores v in a new string value and returns a pointer to it. -func String(v string) *string { return &v } diff --git a/tools/vendor/github.com/golang/protobuf/ptypes/any.go b/tools/vendor/github.com/golang/protobuf/ptypes/any.go deleted file mode 100644 index fdff3fdb4..000000000 --- a/tools/vendor/github.com/golang/protobuf/ptypes/any.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ptypes - -import ( - "fmt" - "strings" - - "github.com/golang/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" - - anypb "github.com/golang/protobuf/ptypes/any" -) - -const urlPrefix = "type.googleapis.com/" - -// AnyMessageName returns the message name contained in an anypb.Any message. -// Most type assertions should use the Is function instead. -// -// Deprecated: Call the any.MessageName method instead. -func AnyMessageName(any *anypb.Any) (string, error) { - name, err := anyMessageName(any) - return string(name), err -} -func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) { - if any == nil { - return "", fmt.Errorf("message is nil") - } - name := protoreflect.FullName(any.TypeUrl) - if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 { - name = name[i+len("/"):] - } - if !name.IsValid() { - return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) - } - return name, nil -} - -// MarshalAny marshals the given message m into an anypb.Any message. -// -// Deprecated: Call the anypb.New function instead. -func MarshalAny(m proto.Message) (*anypb.Any, error) { - switch dm := m.(type) { - case DynamicAny: - m = dm.Message - case *DynamicAny: - if dm == nil { - return nil, proto.ErrNil - } - m = dm.Message - } - b, err := proto.Marshal(m) - if err != nil { - return nil, err - } - return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil -} - -// Empty returns a new message of the type specified in an anypb.Any message. -// It returns protoregistry.NotFound if the corresponding message type could not -// be resolved in the global registry. -// -// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead -// to resolve the message name and create a new instance of it. -func Empty(any *anypb.Any) (proto.Message, error) { - name, err := anyMessageName(any) - if err != nil { - return nil, err - } - mt, err := protoregistry.GlobalTypes.FindMessageByName(name) - if err != nil { - return nil, err - } - return proto.MessageV1(mt.New().Interface()), nil -} - -// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message -// into the provided message m. It returns an error if the target message -// does not match the type in the Any message or if an unmarshal error occurs. -// -// The target message m may be a *DynamicAny message. If the underlying message -// type could not be resolved, then this returns protoregistry.NotFound. -// -// Deprecated: Call the any.UnmarshalTo method instead. -func UnmarshalAny(any *anypb.Any, m proto.Message) error { - if dm, ok := m.(*DynamicAny); ok { - if dm.Message == nil { - var err error - dm.Message, err = Empty(any) - if err != nil { - return err - } - } - m = dm.Message - } - - anyName, err := AnyMessageName(any) - if err != nil { - return err - } - msgName := proto.MessageName(m) - if anyName != msgName { - return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName) - } - return proto.Unmarshal(any.Value, m) -} - -// Is reports whether the Any message contains a message of the specified type. -// -// Deprecated: Call the any.MessageIs method instead. -func Is(any *anypb.Any, m proto.Message) bool { - if any == nil || m == nil { - return false - } - name := proto.MessageName(m) - if !strings.HasSuffix(any.TypeUrl, name) { - return false - } - return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/' -} - -// DynamicAny is a value that can be passed to UnmarshalAny to automatically -// allocate a proto.Message for the type specified in an anypb.Any message. -// The allocated message is stored in the embedded proto.Message. -// -// Example: -// -// var x ptypes.DynamicAny -// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } -// fmt.Printf("unmarshaled message: %v", x.Message) -// -// Deprecated: Use the any.UnmarshalNew method instead to unmarshal -// the any message contents into a new instance of the underlying message. -type DynamicAny struct{ proto.Message } - -func (m DynamicAny) String() string { - if m.Message == nil { - return "" - } - return m.Message.String() -} -func (m DynamicAny) Reset() { - if m.Message == nil { - return - } - m.Message.Reset() -} -func (m DynamicAny) ProtoMessage() { - return -} -func (m DynamicAny) ProtoReflect() protoreflect.Message { - if m.Message == nil { - return nil - } - return dynamicAny{proto.MessageReflect(m.Message)} -} - -type dynamicAny struct{ protoreflect.Message } - -func (m dynamicAny) Type() protoreflect.MessageType { - return dynamicAnyType{m.Message.Type()} -} -func (m dynamicAny) New() protoreflect.Message { - return dynamicAnyType{m.Message.Type()}.New() -} -func (m dynamicAny) Interface() protoreflect.ProtoMessage { - return DynamicAny{proto.MessageV1(m.Message.Interface())} -} - -type dynamicAnyType struct{ protoreflect.MessageType } - -func (t dynamicAnyType) New() protoreflect.Message { - return dynamicAny{t.MessageType.New()} -} -func (t dynamicAnyType) Zero() protoreflect.Message { - return dynamicAny{t.MessageType.Zero()} -} diff --git a/tools/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/tools/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go deleted file mode 100644 index 0ef27d33d..000000000 --- a/tools/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go +++ /dev/null @@ -1,62 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: github.com/golang/protobuf/ptypes/any/any.proto - -package any - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - anypb "google.golang.org/protobuf/types/known/anypb" - reflect "reflect" -) - -// Symbols defined in public import of google/protobuf/any.proto. - -type Any = anypb.Any - -var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor - -var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{ - 0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29, - 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, - 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, -} - -var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{} -var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() } -func file_github_com_golang_protobuf_ptypes_any_any_proto_init() { - if File_github_com_golang_protobuf_ptypes_any_any_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes, - DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs, - }.Build() - File_github_com_golang_protobuf_ptypes_any_any_proto = out.File - file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil - file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil - file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil -} diff --git a/tools/vendor/github.com/golang/protobuf/ptypes/doc.go b/tools/vendor/github.com/golang/protobuf/ptypes/doc.go deleted file mode 100644 index d3c33259d..000000000 --- a/tools/vendor/github.com/golang/protobuf/ptypes/doc.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ptypes provides functionality for interacting with well-known types. -// -// Deprecated: Well-known types have specialized functionality directly -// injected into the generated packages for each message type. -// See the deprecation notice for each function for the suggested alternative. -package ptypes diff --git a/tools/vendor/github.com/golang/protobuf/ptypes/duration.go b/tools/vendor/github.com/golang/protobuf/ptypes/duration.go deleted file mode 100644 index b2b55dd85..000000000 --- a/tools/vendor/github.com/golang/protobuf/ptypes/duration.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ptypes - -import ( - "errors" - "fmt" - "time" - - durationpb "github.com/golang/protobuf/ptypes/duration" -) - -// Range of google.protobuf.Duration as specified in duration.proto. -// This is about 10,000 years in seconds. -const ( - maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) - minSeconds = -maxSeconds -) - -// Duration converts a durationpb.Duration to a time.Duration. -// Duration returns an error if dur is invalid or overflows a time.Duration. -// -// Deprecated: Call the dur.AsDuration and dur.CheckValid methods instead. -func Duration(dur *durationpb.Duration) (time.Duration, error) { - if err := validateDuration(dur); err != nil { - return 0, err - } - d := time.Duration(dur.Seconds) * time.Second - if int64(d/time.Second) != dur.Seconds { - return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) - } - if dur.Nanos != 0 { - d += time.Duration(dur.Nanos) * time.Nanosecond - if (d < 0) != (dur.Nanos < 0) { - return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) - } - } - return d, nil -} - -// DurationProto converts a time.Duration to a durationpb.Duration. -// -// Deprecated: Call the durationpb.New function instead. -func DurationProto(d time.Duration) *durationpb.Duration { - nanos := d.Nanoseconds() - secs := nanos / 1e9 - nanos -= secs * 1e9 - return &durationpb.Duration{ - Seconds: int64(secs), - Nanos: int32(nanos), - } -} - -// validateDuration determines whether the durationpb.Duration is valid -// according to the definition in google/protobuf/duration.proto. -// A valid durpb.Duration may still be too large to fit into a time.Duration -// Note that the range of durationpb.Duration is about 10,000 years, -// while the range of time.Duration is about 290 years. -func validateDuration(dur *durationpb.Duration) error { - if dur == nil { - return errors.New("duration: nil Duration") - } - if dur.Seconds < minSeconds || dur.Seconds > maxSeconds { - return fmt.Errorf("duration: %v: seconds out of range", dur) - } - if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 { - return fmt.Errorf("duration: %v: nanos out of range", dur) - } - // Seconds and Nanos must have the same sign, unless d.Nanos is zero. - if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) { - return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur) - } - return nil -} diff --git a/tools/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/tools/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go deleted file mode 100644 index d0079ee3e..000000000 --- a/tools/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go +++ /dev/null @@ -1,63 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: github.com/golang/protobuf/ptypes/duration/duration.proto - -package duration - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - durationpb "google.golang.org/protobuf/types/known/durationpb" - reflect "reflect" -) - -// Symbols defined in public import of google/protobuf/duration.proto. - -type Duration = durationpb.Duration - -var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor - -var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{ - 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, - 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, - 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{} -var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() } -func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() { - if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes, - DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs, - }.Build() - File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File - file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil - file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil - file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil -} diff --git a/tools/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/tools/vendor/github.com/golang/protobuf/ptypes/timestamp.go deleted file mode 100644 index 8368a3f70..000000000 --- a/tools/vendor/github.com/golang/protobuf/ptypes/timestamp.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ptypes - -import ( - "errors" - "fmt" - "time" - - timestamppb "github.com/golang/protobuf/ptypes/timestamp" -) - -// Range of google.protobuf.Duration as specified in timestamp.proto. -const ( - // Seconds field of the earliest valid Timestamp. - // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - minValidSeconds = -62135596800 - // Seconds field just after the latest valid Timestamp. - // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - maxValidSeconds = 253402300800 -) - -// Timestamp converts a timestamppb.Timestamp to a time.Time. -// It returns an error if the argument is invalid. -// -// Unlike most Go functions, if Timestamp returns an error, the first return -// value is not the zero time.Time. Instead, it is the value obtained from the -// time.Unix function when passed the contents of the Timestamp, in the UTC -// locale. This may or may not be a meaningful time; many invalid Timestamps -// do map to valid time.Times. -// -// A nil Timestamp returns an error. The first return value in that case is -// undefined. -// -// Deprecated: Call the ts.AsTime and ts.CheckValid methods instead. -func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) { - // Don't return the zero value on error, because corresponds to a valid - // timestamp. Instead return whatever time.Unix gives us. - var t time.Time - if ts == nil { - t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp - } else { - t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() - } - return t, validateTimestamp(ts) -} - -// TimestampNow returns a google.protobuf.Timestamp for the current time. -// -// Deprecated: Call the timestamppb.Now function instead. -func TimestampNow() *timestamppb.Timestamp { - ts, err := TimestampProto(time.Now()) - if err != nil { - panic("ptypes: time.Now() out of Timestamp range") - } - return ts -} - -// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. -// It returns an error if the resulting Timestamp is invalid. -// -// Deprecated: Call the timestamppb.New function instead. -func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) { - ts := ×tamppb.Timestamp{ - Seconds: t.Unix(), - Nanos: int32(t.Nanosecond()), - } - if err := validateTimestamp(ts); err != nil { - return nil, err - } - return ts, nil -} - -// TimestampString returns the RFC 3339 string for valid Timestamps. -// For invalid Timestamps, it returns an error message in parentheses. -// -// Deprecated: Call the ts.AsTime method instead, -// followed by a call to the Format method on the time.Time value. -func TimestampString(ts *timestamppb.Timestamp) string { - t, err := Timestamp(ts) - if err != nil { - return fmt.Sprintf("(%v)", err) - } - return t.Format(time.RFC3339Nano) -} - -// validateTimestamp determines whether a Timestamp is valid. -// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01) -// and has a Nanos field in the range [0, 1e9). -// -// If the Timestamp is valid, validateTimestamp returns nil. -// Otherwise, it returns an error that describes the problem. -// -// Every valid Timestamp can be represented by a time.Time, -// but the converse is not true. -func validateTimestamp(ts *timestamppb.Timestamp) error { - if ts == nil { - return errors.New("timestamp: nil Timestamp") - } - if ts.Seconds < minValidSeconds { - return fmt.Errorf("timestamp: %v before 0001-01-01", ts) - } - if ts.Seconds >= maxValidSeconds { - return fmt.Errorf("timestamp: %v after 10000-01-01", ts) - } - if ts.Nanos < 0 || ts.Nanos >= 1e9 { - return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) - } - return nil -} diff --git a/tools/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/tools/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go deleted file mode 100644 index a76f80760..000000000 --- a/tools/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go +++ /dev/null @@ -1,64 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto - -package timestamp - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" - reflect "reflect" -) - -// Symbols defined in public import of google/protobuf/timestamp.proto. - -type Timestamp = timestamppb.Timestamp - -var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor - -var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{ - 0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37, - 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, -} - -var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{} -var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() } -func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() { - if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes, - DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs, - }.Build() - File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File - file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil - file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil - file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil -} diff --git a/tools/vendor/github.com/golangci/golangci-lint/pkg/commands/config_verify.go b/tools/vendor/github.com/golangci/golangci-lint/pkg/commands/config_verify.go index 5a26c75ae..76e09581c 100644 --- a/tools/vendor/github.com/golangci/golangci-lint/pkg/commands/config_verify.go +++ b/tools/vendor/github.com/golangci/golangci-lint/pkg/commands/config_verify.go @@ -70,33 +70,58 @@ func createSchemaURL(flags *pflag.FlagSet, buildInfo BuildInfo) (string, error) return "", fmt.Errorf("parse version: %w", err) } - schemaURL = fmt.Sprintf("https://golangci-lint.run/jsonschema/golangci.v%d.%d.jsonschema.json", - version.Segments()[0], version.Segments()[1]) + if version.Core().Equal(hcversion.Must(hcversion.NewVersion("v0.0.0"))) { + commit, err := extractCommitHash(buildInfo) + if err != nil { + return "", err + } - case buildInfo.Commit != "" && buildInfo.Commit != "?": - if buildInfo.Commit == "unknown" { - return "", errors.New("unknown commit information") + return fmt.Sprintf("https://raw.githubusercontent.com/golangci/golangci-lint/%s/jsonschema/golangci.next.jsonschema.json", + commit), nil } - commit := buildInfo.Commit + return fmt.Sprintf("https://golangci-lint.run/jsonschema/golangci.v%d.%d.jsonschema.json", + version.Segments()[0], version.Segments()[1]), nil - if strings.HasPrefix(commit, "(") { - c, _, ok := strings.Cut(strings.TrimPrefix(commit, "("), ",") - if !ok { - return "", errors.New("commit information not found") - } - - commit = c + case buildInfo.Commit != "" && buildInfo.Commit != "?": + commit, err := extractCommitHash(buildInfo) + if err != nil { + return "", err } - schemaURL = fmt.Sprintf("https://raw.githubusercontent.com/golangci/golangci-lint/%s/jsonschema/golangci.next.jsonschema.json", - commit) + return fmt.Sprintf("https://raw.githubusercontent.com/golangci/golangci-lint/%s/jsonschema/golangci.next.jsonschema.json", + commit), nil default: return "", errors.New("version not found") } +} + +func extractCommitHash(buildInfo BuildInfo) (string, error) { + if buildInfo.Commit == "" || buildInfo.Commit == "?" { + return "", errors.New("empty commit information") + } + + if buildInfo.Commit == "unknown" { + return "", errors.New("unknown commit information") + } + + commit := buildInfo.Commit + + if strings.HasPrefix(commit, "(") { + c, _, ok := strings.Cut(strings.TrimPrefix(commit, "("), ",") + if !ok { + return "", errors.New("commit information not found") + } + + commit = c + } + + if commit == "unknown" { + return "", errors.New("unknown commit information") + } - return schemaURL, nil + return commit, nil } func validateConfiguration(schemaPath, targetFile string) error { diff --git a/tools/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/builder_linter.go b/tools/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/builder_linter.go index 7fa63e526..ddeb99e14 100644 --- a/tools/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/builder_linter.go +++ b/tools/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/builder_linter.go @@ -770,7 +770,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) { WithPresets(linter.PresetTest). WithLoadForGoAnalysis(). WithURL("https://github.com/sivchari/tenv"). - DeprecatedWarning("Duplicate feature another linter.", "v1.64.0", "usetesting"), + DeprecatedWarning("Duplicate feature in another linter.", "v1.64.0", "usetesting"), linter.NewConfig(testableexamples.New()). WithSince("v1.50.0"). diff --git a/tools/vendor/github.com/google/go-cmp/cmp/internal/function/func.go b/tools/vendor/github.com/google/go-cmp/cmp/internal/function/func.go index d127d4362..def01a6be 100644 --- a/tools/vendor/github.com/google/go-cmp/cmp/internal/function/func.go +++ b/tools/vendor/github.com/google/go-cmp/cmp/internal/function/func.go @@ -19,6 +19,7 @@ const ( tbFunc // func(T) bool ttbFunc // func(T, T) bool + ttiFunc // func(T, T) int trbFunc // func(T, R) bool tibFunc // func(T, I) bool trFunc // func(T) R @@ -28,11 +29,13 @@ const ( Transformer = trFunc // func(T) R ValueFilter = ttbFunc // func(T, T) bool Less = ttbFunc // func(T, T) bool + Compare = ttiFunc // func(T, T) int ValuePredicate = tbFunc // func(T) bool KeyValuePredicate = trbFunc // func(T, R) bool ) var boolType = reflect.TypeOf(true) +var intType = reflect.TypeOf(0) // IsType reports whether the reflect.Type is of the specified function type. func IsType(t reflect.Type, ft funcType) bool { @@ -49,6 +52,10 @@ func IsType(t reflect.Type, ft funcType) bool { if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType { return true } + case ttiFunc: // func(T, T) int + if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == intType { + return true + } case trbFunc: // func(T, R) bool if ni == 2 && no == 1 && t.Out(0) == boolType { return true diff --git a/tools/vendor/github.com/google/go-cmp/cmp/options.go b/tools/vendor/github.com/google/go-cmp/cmp/options.go index 754496f3b..ba3fce81f 100644 --- a/tools/vendor/github.com/google/go-cmp/cmp/options.go +++ b/tools/vendor/github.com/google/go-cmp/cmp/options.go @@ -232,7 +232,15 @@ func (validator) apply(s *state, vx, vy reflect.Value) { if t := s.curPath.Index(-2).Type(); t.Name() != "" { // Named type with unexported fields. name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType - if _, ok := reflect.New(t).Interface().(error); ok { + isProtoMessage := func(t reflect.Type) bool { + m, ok := reflect.PointerTo(t).MethodByName("ProtoReflect") + return ok && m.Type.NumIn() == 1 && m.Type.NumOut() == 1 && + m.Type.Out(0).PkgPath() == "google.golang.org/protobuf/reflect/protoreflect" && + m.Type.Out(0).Name() == "Message" + } + if isProtoMessage(t) { + help = `consider using "google.golang.org/protobuf/testing/protocmp".Transform to compare proto.Message types` + } else if _, ok := reflect.New(t).Interface().(error); ok { help = "consider using cmpopts.EquateErrors to compare error values" } else if t.Comparable() { help = "consider using cmpopts.EquateComparable to compare comparable Go types" diff --git a/tools/vendor/github.com/gostaticanalysis/comment/.tagpr b/tools/vendor/github.com/gostaticanalysis/comment/.tagpr new file mode 100644 index 000000000..59bf98541 --- /dev/null +++ b/tools/vendor/github.com/gostaticanalysis/comment/.tagpr @@ -0,0 +1,35 @@ +# config file for the tagpr in git config format +# The tagpr generates the initial configuration, which you can rewrite to suit your environment. +# CONFIGURATIONS: +# tagpr.releaseBranch +# Generally, it is "main." It is the branch for releases. The pcpr tracks this branch, +# creates or updates a pull request as a release candidate, or tags when they are merged. +# +# tagpr.versionFile +# Versioning file containing the semantic version needed to be updated at release. +# It will be synchronized with the "git tag". +# Often this is a meta-information file such as gemspec, setup.cfg, package.json, etc. +# Sometimes the source code file, such as version.go or Bar.pm, is used. +# If you do not want to use versioning files but only git tags, specify the "-" string here. +# You can specify multiple version files by comma separated strings. +# +# tagpr.vPrefix +# Flag whether or not v-prefix is added to semver when git tagging. (e.g. v1.2.3 if true) +# This is only a tagging convention, not how it is described in the version file. +# +# tagpr.changelog (Optional) +# Flag whether or not changelog is added or changed during the release. +# +# tagpr.command (Optional) +# Command to change files just before release. +# +# tagpr.tmplate (Optional) +# Pull request template in go template format +# +# tagpr.release (Optional) +# GitHub Release creation behavior after tagging [true, draft, false] +# If this value is not set, the release is to be created. +[tagpr] + vPrefix = true + releaseBranch = main + versionFile = version.txt diff --git a/tools/vendor/github.com/gostaticanalysis/comment/CHANGELOG.md b/tools/vendor/github.com/gostaticanalysis/comment/CHANGELOG.md new file mode 100644 index 000000000..941cc15ff --- /dev/null +++ b/tools/vendor/github.com/gostaticanalysis/comment/CHANGELOG.md @@ -0,0 +1,34 @@ +# Changelog + +## [v1.5.0](https://github.com/gostaticanalysis/comment/compare/v1.4.2...v1.5.0) - 2024-11-15 +- Add tagpr and testvet by @tenntenn in https://github.com/gostaticanalysis/comment/pull/18 +- Add IgnorePosLine and deprecate IgnoreLine by @neglect-yp in https://github.com/gostaticanalysis/comment/pull/17 +- Fix errors for testvet by @tenntenn in https://github.com/gostaticanalysis/comment/pull/20 +- Add version.txt by @tenntenn in https://github.com/gostaticanalysis/comment/pull/21 +- Update go version and dependencies by @tenntenn in https://github.com/gostaticanalysis/comment/pull/19 + +## [v1.4.2](https://github.com/gostaticanalysis/comment/compare/v1.4.1...v1.4.2) - 2021-03-03 +- passes/commentmap: use txtar for testdata by @zchee in https://github.com/gostaticanalysis/comment/pull/14 +- github/workflows: add test GHA by @zchee in https://github.com/gostaticanalysis/comment/pull/15 +- omment: fix hasIgnoreCheck to more pares lines by @zchee in https://github.com/gostaticanalysis/comment/pull/16 + +## [v1.4.1](https://github.com/gostaticanalysis/comment/compare/v1.4.0...v1.4.1) - 2020-09-10 +- Fix comment directive parsing in Go 1.15+ by @nmiyake in https://github.com/gostaticanalysis/comment/pull/13 +- gofmt files by @nmiyake in https://github.com/gostaticanalysis/comment/pull/12 +- Fix logic error in hasIgnoreCheck by @nmiyake in https://github.com/gostaticanalysis/comment/pull/11 + +## [v1.4.0](https://github.com/gostaticanalysis/comment/compare/v1.3.0...v1.4.0) - 2020-08-20 +- Add CommentsByPosLine by @tenntenn in https://github.com/gostaticanalysis/comment/pull/9 + +## [v1.3.0](https://github.com/gostaticanalysis/comment/compare/v1.2.0...v1.3.0) - 2020-01-30 +- Fix link to ast package by @po3rin in https://github.com/gostaticanalysis/comment/pull/4 +- Add IgnoreLine by @tenntenn in https://github.com/gostaticanalysis/comment/pull/5 + +## [v1.2.0](https://github.com/gostaticanalysis/comment/compare/v1.1.0...v1.2.0) - 2019-03-18 +- Add IgnorePos by @tenntenn in https://github.com/gostaticanalysis/comment/pull/3 + +## [v1.1.0](https://github.com/gostaticanalysis/comment/compare/v1.0.0...v1.1.0) - 2019-03-08 +- Add ignore by @tenntenn in https://github.com/gostaticanalysis/comment/pull/1 +- Fix Ignore and add tests by @tenntenn in https://github.com/gostaticanalysis/comment/pull/2 + +## [v1.0.0](https://github.com/gostaticanalysis/comment/commits/v1.0.0) - 2019-03-08 diff --git a/tools/vendor/github.com/gostaticanalysis/comment/comment.go b/tools/vendor/github.com/gostaticanalysis/comment/comment.go index 79cb09382..2e418a466 100644 --- a/tools/vendor/github.com/gostaticanalysis/comment/comment.go +++ b/tools/vendor/github.com/gostaticanalysis/comment/comment.go @@ -52,7 +52,8 @@ func (maps Maps) Annotated(n ast.Node, annotation string) bool { // Ignore checks either specified AST node is ignored by the check. // It follows staticcheck style as the below. -// //lint:ignore Check1[,Check2,...,CheckN] reason +// +// //lint:ignore Check1[,Check2,...,CheckN] reason func (maps Maps) Ignore(n ast.Node, check string) bool { for _, cg := range maps.Comments(n) { if hasIgnoreCheck(cg, check) { @@ -64,7 +65,8 @@ func (maps Maps) Ignore(n ast.Node, check string) bool { // IgnorePos checks either specified postion of AST node is ignored by the check. // It follows staticcheck style as the below. -// //lint:ignore Check1[,Check2,...,CheckN] reason +// +// //lint:ignore Check1[,Check2,...,CheckN] reason func (maps Maps) IgnorePos(pos token.Pos, check string) bool { for _, cg := range maps.CommentsByPos(pos) { if hasIgnoreCheck(cg, check) { @@ -109,9 +111,11 @@ func (maps Maps) CommentsByPosLine(fset *token.FileSet, pos token.Pos) []*ast.Co return nil } +// Deprecated: This function does not work with multiple files. // IgnoreLine checks either specified lineof AST node is ignored by the check. // It follows staticcheck style as the below. -// //lint:ignore Check1[,Check2,...,CheckN] reason +// +// //lint:ignore Check1[,Check2,...,CheckN] reason func (maps Maps) IgnoreLine(fset *token.FileSet, line int, check string) bool { for _, cg := range maps.CommentsByLine(fset, line) { if hasIgnoreCheck(cg, check) { @@ -121,6 +125,19 @@ func (maps Maps) IgnoreLine(fset *token.FileSet, line int, check string) bool { return false } +// IgnorePosLine checks either specified lineof AST node is ignored by the check. +// It follows staticcheck style as the below. +// +// //lint:ignore Check1[,Check2,...,CheckN] reason +func (maps Maps) IgnorePosLine(fset *token.FileSet, pos token.Pos, check string) bool { + for _, cg := range maps.CommentsByPosLine(fset, pos) { + if hasIgnoreCheck(cg, check) { + return true + } + } + return false +} + // hasIgnoreCheck returns true if the provided CommentGroup starts with a comment // of the form "//lint:ignore Check1[,Check2,...,CheckN] reason" and one of the // checks matches the provided check. diff --git a/tools/vendor/github.com/gostaticanalysis/comment/version.txt b/tools/vendor/github.com/gostaticanalysis/comment/version.txt new file mode 100644 index 000000000..2e7bd9108 --- /dev/null +++ b/tools/vendor/github.com/gostaticanalysis/comment/version.txt @@ -0,0 +1 @@ +v1.5.0 diff --git a/tools/vendor/github.com/hashicorp/hcl/.gitignore b/tools/vendor/github.com/hashicorp/hcl/.gitignore deleted file mode 100644 index 15586a2b5..000000000 --- a/tools/vendor/github.com/hashicorp/hcl/.gitignore +++ /dev/null @@ -1,9 +0,0 @@ -y.output - -# ignore intellij files -.idea -*.iml -*.ipr -*.iws - -*.test diff --git a/tools/vendor/github.com/hashicorp/hcl/.travis.yml b/tools/vendor/github.com/hashicorp/hcl/.travis.yml deleted file mode 100644 index cb63a3216..000000000 --- a/tools/vendor/github.com/hashicorp/hcl/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -sudo: false - -language: go - -go: - - 1.x - - tip - -branches: - only: - - master - -script: make test diff --git a/tools/vendor/github.com/hashicorp/hcl/LICENSE b/tools/vendor/github.com/hashicorp/hcl/LICENSE deleted file mode 100644 index c33dcc7c9..000000000 --- a/tools/vendor/github.com/hashicorp/hcl/LICENSE +++ /dev/null @@ -1,354 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/tools/vendor/github.com/hashicorp/hcl/Makefile b/tools/vendor/github.com/hashicorp/hcl/Makefile deleted file mode 100644 index 84fd743f5..000000000 --- a/tools/vendor/github.com/hashicorp/hcl/Makefile +++ /dev/null @@ -1,18 +0,0 @@ -TEST?=./... - -default: test - -fmt: generate - go fmt ./... - -test: generate - go get -t ./... - go test $(TEST) $(TESTARGS) - -generate: - go generate ./... - -updatedeps: - go get -u golang.org/x/tools/cmd/stringer - -.PHONY: default generate test updatedeps diff --git a/tools/vendor/github.com/hashicorp/hcl/README.md b/tools/vendor/github.com/hashicorp/hcl/README.md deleted file mode 100644 index c8223326d..000000000 --- a/tools/vendor/github.com/hashicorp/hcl/README.md +++ /dev/null @@ -1,125 +0,0 @@ -# HCL - -[![GoDoc](https://godoc.org/github.com/hashicorp/hcl?status.png)](https://godoc.org/github.com/hashicorp/hcl) [![Build Status](https://travis-ci.org/hashicorp/hcl.svg?branch=master)](https://travis-ci.org/hashicorp/hcl) - -HCL (HashiCorp Configuration Language) is a configuration language built -by HashiCorp. The goal of HCL is to build a structured configuration language -that is both human and machine friendly for use with command-line tools, but -specifically targeted towards DevOps tools, servers, etc. - -HCL is also fully JSON compatible. That is, JSON can be used as completely -valid input to a system expecting HCL. This helps makes systems -interoperable with other systems. - -HCL is heavily inspired by -[libucl](https://github.com/vstakhov/libucl), -nginx configuration, and others similar. - -## Why? - -A common question when viewing HCL is to ask the question: why not -JSON, YAML, etc.? - -Prior to HCL, the tools we built at [HashiCorp](http://www.hashicorp.com) -used a variety of configuration languages from full programming languages -such as Ruby to complete data structure languages such as JSON. What we -learned is that some people wanted human-friendly configuration languages -and some people wanted machine-friendly languages. - -JSON fits a nice balance in this, but is fairly verbose and most -importantly doesn't support comments. With YAML, we found that beginners -had a really hard time determining what the actual structure was, and -ended up guessing more often than not whether to use a hyphen, colon, etc. -in order to represent some configuration key. - -Full programming languages such as Ruby enable complex behavior -a configuration language shouldn't usually allow, and also forces -people to learn some set of Ruby. - -Because of this, we decided to create our own configuration language -that is JSON-compatible. Our configuration language (HCL) is designed -to be written and modified by humans. The API for HCL allows JSON -as an input so that it is also machine-friendly (machines can generate -JSON instead of trying to generate HCL). - -Our goal with HCL is not to alienate other configuration languages. -It is instead to provide HCL as a specialized language for our tools, -and JSON as the interoperability layer. - -## Syntax - -For a complete grammar, please see the parser itself. A high-level overview -of the syntax and grammar is listed here. - - * Single line comments start with `#` or `//` - - * Multi-line comments are wrapped in `/*` and `*/`. Nested block comments - are not allowed. A multi-line comment (also known as a block comment) - terminates at the first `*/` found. - - * Values are assigned with the syntax `key = value` (whitespace doesn't - matter). The value can be any primitive: a string, number, boolean, - object, or list. - - * Strings are double-quoted and can contain any UTF-8 characters. - Example: `"Hello, World"` - - * Multi-line strings start with `<- - echo %Path% - - go version - - go env - - go get -t ./... - -build_script: -- cmd: go test -v ./... diff --git a/tools/vendor/github.com/hashicorp/hcl/decoder.go b/tools/vendor/github.com/hashicorp/hcl/decoder.go deleted file mode 100644 index bed9ebbe1..000000000 --- a/tools/vendor/github.com/hashicorp/hcl/decoder.go +++ /dev/null @@ -1,729 +0,0 @@ -package hcl - -import ( - "errors" - "fmt" - "reflect" - "sort" - "strconv" - "strings" - - "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/hcl/hcl/parser" - "github.com/hashicorp/hcl/hcl/token" -) - -// This is the tag to use with structures to have settings for HCL -const tagName = "hcl" - -var ( - // nodeType holds a reference to the type of ast.Node - nodeType reflect.Type = findNodeType() -) - -// Unmarshal accepts a byte slice as input and writes the -// data to the value pointed to by v. -func Unmarshal(bs []byte, v interface{}) error { - root, err := parse(bs) - if err != nil { - return err - } - - return DecodeObject(v, root) -} - -// Decode reads the given input and decodes it into the structure -// given by `out`. -func Decode(out interface{}, in string) error { - obj, err := Parse(in) - if err != nil { - return err - } - - return DecodeObject(out, obj) -} - -// DecodeObject is a lower-level version of Decode. It decodes a -// raw Object into the given output. -func DecodeObject(out interface{}, n ast.Node) error { - val := reflect.ValueOf(out) - if val.Kind() != reflect.Ptr { - return errors.New("result must be a pointer") - } - - // If we have the file, we really decode the root node - if f, ok := n.(*ast.File); ok { - n = f.Node - } - - var d decoder - return d.decode("root", n, val.Elem()) -} - -type decoder struct { - stack []reflect.Kind -} - -func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error { - k := result - - // If we have an interface with a valid value, we use that - // for the check. - if result.Kind() == reflect.Interface { - elem := result.Elem() - if elem.IsValid() { - k = elem - } - } - - // Push current onto stack unless it is an interface. - if k.Kind() != reflect.Interface { - d.stack = append(d.stack, k.Kind()) - - // Schedule a pop - defer func() { - d.stack = d.stack[:len(d.stack)-1] - }() - } - - switch k.Kind() { - case reflect.Bool: - return d.decodeBool(name, node, result) - case reflect.Float32, reflect.Float64: - return d.decodeFloat(name, node, result) - case reflect.Int, reflect.Int32, reflect.Int64: - return d.decodeInt(name, node, result) - case reflect.Interface: - // When we see an interface, we make our own thing - return d.decodeInterface(name, node, result) - case reflect.Map: - return d.decodeMap(name, node, result) - case reflect.Ptr: - return d.decodePtr(name, node, result) - case reflect.Slice: - return d.decodeSlice(name, node, result) - case reflect.String: - return d.decodeString(name, node, result) - case reflect.Struct: - return d.decodeStruct(name, node, result) - default: - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()), - } - } -} - -func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error { - switch n := node.(type) { - case *ast.LiteralType: - if n.Token.Type == token.BOOL { - v, err := strconv.ParseBool(n.Token.Text) - if err != nil { - return err - } - - result.Set(reflect.ValueOf(v)) - return nil - } - } - - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown type %T", name, node), - } -} - -func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error { - switch n := node.(type) { - case *ast.LiteralType: - if n.Token.Type == token.FLOAT || n.Token.Type == token.NUMBER { - v, err := strconv.ParseFloat(n.Token.Text, 64) - if err != nil { - return err - } - - result.Set(reflect.ValueOf(v).Convert(result.Type())) - return nil - } - } - - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown type %T", name, node), - } -} - -func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error { - switch n := node.(type) { - case *ast.LiteralType: - switch n.Token.Type { - case token.NUMBER: - v, err := strconv.ParseInt(n.Token.Text, 0, 0) - if err != nil { - return err - } - - if result.Kind() == reflect.Interface { - result.Set(reflect.ValueOf(int(v))) - } else { - result.SetInt(v) - } - return nil - case token.STRING: - v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0) - if err != nil { - return err - } - - if result.Kind() == reflect.Interface { - result.Set(reflect.ValueOf(int(v))) - } else { - result.SetInt(v) - } - return nil - } - } - - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown type %T", name, node), - } -} - -func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error { - // When we see an ast.Node, we retain the value to enable deferred decoding. - // Very useful in situations where we want to preserve ast.Node information - // like Pos - if result.Type() == nodeType && result.CanSet() { - result.Set(reflect.ValueOf(node)) - return nil - } - - var set reflect.Value - redecode := true - - // For testing types, ObjectType should just be treated as a list. We - // set this to a temporary var because we want to pass in the real node. - testNode := node - if ot, ok := node.(*ast.ObjectType); ok { - testNode = ot.List - } - - switch n := testNode.(type) { - case *ast.ObjectList: - // If we're at the root or we're directly within a slice, then we - // decode objects into map[string]interface{}, otherwise we decode - // them into lists. - if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { - var temp map[string]interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeMap( - reflect.MapOf( - reflect.TypeOf(""), - tempVal.Type().Elem())) - - set = result - } else { - var temp []map[string]interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeSlice( - reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items)) - set = result - } - case *ast.ObjectType: - // If we're at the root or we're directly within a slice, then we - // decode objects into map[string]interface{}, otherwise we decode - // them into lists. - if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { - var temp map[string]interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeMap( - reflect.MapOf( - reflect.TypeOf(""), - tempVal.Type().Elem())) - - set = result - } else { - var temp []map[string]interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeSlice( - reflect.SliceOf(tempVal.Type().Elem()), 0, 1) - set = result - } - case *ast.ListType: - var temp []interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeSlice( - reflect.SliceOf(tempVal.Type().Elem()), 0, 0) - set = result - case *ast.LiteralType: - switch n.Token.Type { - case token.BOOL: - var result bool - set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) - case token.FLOAT: - var result float64 - set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) - case token.NUMBER: - var result int - set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) - case token.STRING, token.HEREDOC: - set = reflect.Indirect(reflect.New(reflect.TypeOf(""))) - default: - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node), - } - } - default: - return fmt.Errorf( - "%s: cannot decode into interface: %T", - name, node) - } - - // Set the result to what its supposed to be, then reset - // result so we don't reflect into this method anymore. - result.Set(set) - - if redecode { - // Revisit the node so that we can use the newly instantiated - // thing and populate it. - if err := d.decode(name, node, result); err != nil { - return err - } - } - - return nil -} - -func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error { - if item, ok := node.(*ast.ObjectItem); ok { - node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} - } - - if ot, ok := node.(*ast.ObjectType); ok { - node = ot.List - } - - n, ok := node.(*ast.ObjectList) - if !ok { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: not an object type for map (%T)", name, node), - } - } - - // If we have an interface, then we can address the interface, - // but not the slice itself, so get the element but set the interface - set := result - if result.Kind() == reflect.Interface { - result = result.Elem() - } - - resultType := result.Type() - resultElemType := resultType.Elem() - resultKeyType := resultType.Key() - if resultKeyType.Kind() != reflect.String { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: map must have string keys", name), - } - } - - // Make a map if it is nil - resultMap := result - if result.IsNil() { - resultMap = reflect.MakeMap( - reflect.MapOf(resultKeyType, resultElemType)) - } - - // Go through each element and decode it. - done := make(map[string]struct{}) - for _, item := range n.Items { - if item.Val == nil { - continue - } - - // github.com/hashicorp/terraform/issue/5740 - if len(item.Keys) == 0 { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: map must have string keys", name), - } - } - - // Get the key we're dealing with, which is the first item - keyStr := item.Keys[0].Token.Value().(string) - - // If we've already processed this key, then ignore it - if _, ok := done[keyStr]; ok { - continue - } - - // Determine the value. If we have more than one key, then we - // get the objectlist of only these keys. - itemVal := item.Val - if len(item.Keys) > 1 { - itemVal = n.Filter(keyStr) - done[keyStr] = struct{}{} - } - - // Make the field name - fieldName := fmt.Sprintf("%s.%s", name, keyStr) - - // Get the key/value as reflection values - key := reflect.ValueOf(keyStr) - val := reflect.Indirect(reflect.New(resultElemType)) - - // If we have a pre-existing value in the map, use that - oldVal := resultMap.MapIndex(key) - if oldVal.IsValid() { - val.Set(oldVal) - } - - // Decode! - if err := d.decode(fieldName, itemVal, val); err != nil { - return err - } - - // Set the value on the map - resultMap.SetMapIndex(key, val) - } - - // Set the final map if we can - set.Set(resultMap) - return nil -} - -func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error { - // Create an element of the concrete (non pointer) type and decode - // into that. Then set the value of the pointer to this type. - resultType := result.Type() - resultElemType := resultType.Elem() - val := reflect.New(resultElemType) - if err := d.decode(name, node, reflect.Indirect(val)); err != nil { - return err - } - - result.Set(val) - return nil -} - -func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error { - // If we have an interface, then we can address the interface, - // but not the slice itself, so get the element but set the interface - set := result - if result.Kind() == reflect.Interface { - result = result.Elem() - } - // Create the slice if it isn't nil - resultType := result.Type() - resultElemType := resultType.Elem() - if result.IsNil() { - resultSliceType := reflect.SliceOf(resultElemType) - result = reflect.MakeSlice( - resultSliceType, 0, 0) - } - - // Figure out the items we'll be copying into the slice - var items []ast.Node - switch n := node.(type) { - case *ast.ObjectList: - items = make([]ast.Node, len(n.Items)) - for i, item := range n.Items { - items[i] = item - } - case *ast.ObjectType: - items = []ast.Node{n} - case *ast.ListType: - items = n.List - default: - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("unknown slice type: %T", node), - } - } - - for i, item := range items { - fieldName := fmt.Sprintf("%s[%d]", name, i) - - // Decode - val := reflect.Indirect(reflect.New(resultElemType)) - - // if item is an object that was decoded from ambiguous JSON and - // flattened, make sure it's expanded if it needs to decode into a - // defined structure. - item := expandObject(item, val) - - if err := d.decode(fieldName, item, val); err != nil { - return err - } - - // Append it onto the slice - result = reflect.Append(result, val) - } - - set.Set(result) - return nil -} - -// expandObject detects if an ambiguous JSON object was flattened to a List which -// should be decoded into a struct, and expands the ast to properly deocode. -func expandObject(node ast.Node, result reflect.Value) ast.Node { - item, ok := node.(*ast.ObjectItem) - if !ok { - return node - } - - elemType := result.Type() - - // our target type must be a struct - switch elemType.Kind() { - case reflect.Ptr: - switch elemType.Elem().Kind() { - case reflect.Struct: - //OK - default: - return node - } - case reflect.Struct: - //OK - default: - return node - } - - // A list value will have a key and field name. If it had more fields, - // it wouldn't have been flattened. - if len(item.Keys) != 2 { - return node - } - - keyToken := item.Keys[0].Token - item.Keys = item.Keys[1:] - - // we need to un-flatten the ast enough to decode - newNode := &ast.ObjectItem{ - Keys: []*ast.ObjectKey{ - &ast.ObjectKey{ - Token: keyToken, - }, - }, - Val: &ast.ObjectType{ - List: &ast.ObjectList{ - Items: []*ast.ObjectItem{item}, - }, - }, - } - - return newNode -} - -func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error { - switch n := node.(type) { - case *ast.LiteralType: - switch n.Token.Type { - case token.NUMBER: - result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type())) - return nil - case token.STRING, token.HEREDOC: - result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type())) - return nil - } - } - - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown type for string %T", name, node), - } -} - -func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error { - var item *ast.ObjectItem - if it, ok := node.(*ast.ObjectItem); ok { - item = it - node = it.Val - } - - if ot, ok := node.(*ast.ObjectType); ok { - node = ot.List - } - - // Handle the special case where the object itself is a literal. Previously - // the yacc parser would always ensure top-level elements were arrays. The new - // parser does not make the same guarantees, thus we need to convert any - // top-level literal elements into a list. - if _, ok := node.(*ast.LiteralType); ok && item != nil { - node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} - } - - list, ok := node.(*ast.ObjectList) - if !ok { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node), - } - } - - // This slice will keep track of all the structs we'll be decoding. - // There can be more than one struct if there are embedded structs - // that are squashed. - structs := make([]reflect.Value, 1, 5) - structs[0] = result - - // Compile the list of all the fields that we're going to be decoding - // from all the structs. - type field struct { - field reflect.StructField - val reflect.Value - } - fields := []field{} - for len(structs) > 0 { - structVal := structs[0] - structs = structs[1:] - - structType := structVal.Type() - for i := 0; i < structType.NumField(); i++ { - fieldType := structType.Field(i) - tagParts := strings.Split(fieldType.Tag.Get(tagName), ",") - - // Ignore fields with tag name "-" - if tagParts[0] == "-" { - continue - } - - if fieldType.Anonymous { - fieldKind := fieldType.Type.Kind() - if fieldKind != reflect.Struct { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unsupported type to struct: %s", - fieldType.Name, fieldKind), - } - } - - // We have an embedded field. We "squash" the fields down - // if specified in the tag. - squash := false - for _, tag := range tagParts[1:] { - if tag == "squash" { - squash = true - break - } - } - - if squash { - structs = append( - structs, result.FieldByName(fieldType.Name)) - continue - } - } - - // Normal struct field, store it away - fields = append(fields, field{fieldType, structVal.Field(i)}) - } - } - - usedKeys := make(map[string]struct{}) - decodedFields := make([]string, 0, len(fields)) - decodedFieldsVal := make([]reflect.Value, 0) - unusedKeysVal := make([]reflect.Value, 0) - for _, f := range fields { - field, fieldValue := f.field, f.val - if !fieldValue.IsValid() { - // This should never happen - panic("field is not valid") - } - - // If we can't set the field, then it is unexported or something, - // and we just continue onwards. - if !fieldValue.CanSet() { - continue - } - - fieldName := field.Name - - tagValue := field.Tag.Get(tagName) - tagParts := strings.SplitN(tagValue, ",", 2) - if len(tagParts) >= 2 { - switch tagParts[1] { - case "decodedFields": - decodedFieldsVal = append(decodedFieldsVal, fieldValue) - continue - case "key": - if item == nil { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: %s asked for 'key', impossible", - name, fieldName), - } - } - - fieldValue.SetString(item.Keys[0].Token.Value().(string)) - continue - case "unusedKeys": - unusedKeysVal = append(unusedKeysVal, fieldValue) - continue - } - } - - if tagParts[0] != "" { - fieldName = tagParts[0] - } - - // Determine the element we'll use to decode. If it is a single - // match (only object with the field), then we decode it exactly. - // If it is a prefix match, then we decode the matches. - filter := list.Filter(fieldName) - - prefixMatches := filter.Children() - matches := filter.Elem() - if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 { - continue - } - - // Track the used key - usedKeys[fieldName] = struct{}{} - - // Create the field name and decode. We range over the elements - // because we actually want the value. - fieldName = fmt.Sprintf("%s.%s", name, fieldName) - if len(prefixMatches.Items) > 0 { - if err := d.decode(fieldName, prefixMatches, fieldValue); err != nil { - return err - } - } - for _, match := range matches.Items { - var decodeNode ast.Node = match.Val - if ot, ok := decodeNode.(*ast.ObjectType); ok { - decodeNode = &ast.ObjectList{Items: ot.List.Items} - } - - if err := d.decode(fieldName, decodeNode, fieldValue); err != nil { - return err - } - } - - decodedFields = append(decodedFields, field.Name) - } - - if len(decodedFieldsVal) > 0 { - // Sort it so that it is deterministic - sort.Strings(decodedFields) - - for _, v := range decodedFieldsVal { - v.Set(reflect.ValueOf(decodedFields)) - } - } - - return nil -} - -// findNodeType returns the type of ast.Node -func findNodeType() reflect.Type { - var nodeContainer struct { - Node ast.Node - } - value := reflect.ValueOf(nodeContainer).FieldByName("Node") - return value.Type() -} diff --git a/tools/vendor/github.com/hashicorp/hcl/hcl.go b/tools/vendor/github.com/hashicorp/hcl/hcl.go deleted file mode 100644 index 575a20b50..000000000 --- a/tools/vendor/github.com/hashicorp/hcl/hcl.go +++ /dev/null @@ -1,11 +0,0 @@ -// Package hcl decodes HCL into usable Go structures. -// -// hcl input can come in either pure HCL format or JSON format. -// It can be parsed into an AST, and then decoded into a structure, -// or it can be decoded directly from a string into a structure. -// -// If you choose to parse HCL into a raw AST, the benefit is that you -// can write custom visitor implementations to implement custom -// semantic checks. By default, HCL does not perform any semantic -// checks. -package hcl diff --git a/tools/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go b/tools/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go deleted file mode 100644 index 6e5ef654b..000000000 --- a/tools/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go +++ /dev/null @@ -1,219 +0,0 @@ -// Package ast declares the types used to represent syntax trees for HCL -// (HashiCorp Configuration Language) -package ast - -import ( - "fmt" - "strings" - - "github.com/hashicorp/hcl/hcl/token" -) - -// Node is an element in the abstract syntax tree. -type Node interface { - node() - Pos() token.Pos -} - -func (File) node() {} -func (ObjectList) node() {} -func (ObjectKey) node() {} -func (ObjectItem) node() {} -func (Comment) node() {} -func (CommentGroup) node() {} -func (ObjectType) node() {} -func (LiteralType) node() {} -func (ListType) node() {} - -// File represents a single HCL file -type File struct { - Node Node // usually a *ObjectList - Comments []*CommentGroup // list of all comments in the source -} - -func (f *File) Pos() token.Pos { - return f.Node.Pos() -} - -// ObjectList represents a list of ObjectItems. An HCL file itself is an -// ObjectList. -type ObjectList struct { - Items []*ObjectItem -} - -func (o *ObjectList) Add(item *ObjectItem) { - o.Items = append(o.Items, item) -} - -// Filter filters out the objects with the given key list as a prefix. -// -// The returned list of objects contain ObjectItems where the keys have -// this prefix already stripped off. This might result in objects with -// zero-length key lists if they have no children. -// -// If no matches are found, an empty ObjectList (non-nil) is returned. -func (o *ObjectList) Filter(keys ...string) *ObjectList { - var result ObjectList - for _, item := range o.Items { - // If there aren't enough keys, then ignore this - if len(item.Keys) < len(keys) { - continue - } - - match := true - for i, key := range item.Keys[:len(keys)] { - key := key.Token.Value().(string) - if key != keys[i] && !strings.EqualFold(key, keys[i]) { - match = false - break - } - } - if !match { - continue - } - - // Strip off the prefix from the children - newItem := *item - newItem.Keys = newItem.Keys[len(keys):] - result.Add(&newItem) - } - - return &result -} - -// Children returns further nested objects (key length > 0) within this -// ObjectList. This should be used with Filter to get at child items. -func (o *ObjectList) Children() *ObjectList { - var result ObjectList - for _, item := range o.Items { - if len(item.Keys) > 0 { - result.Add(item) - } - } - - return &result -} - -// Elem returns items in the list that are direct element assignments -// (key length == 0). This should be used with Filter to get at elements. -func (o *ObjectList) Elem() *ObjectList { - var result ObjectList - for _, item := range o.Items { - if len(item.Keys) == 0 { - result.Add(item) - } - } - - return &result -} - -func (o *ObjectList) Pos() token.Pos { - // always returns the uninitiliazed position - return o.Items[0].Pos() -} - -// ObjectItem represents a HCL Object Item. An item is represented with a key -// (or keys). It can be an assignment or an object (both normal and nested) -type ObjectItem struct { - // keys is only one length long if it's of type assignment. If it's a - // nested object it can be larger than one. In that case "assign" is - // invalid as there is no assignments for a nested object. - Keys []*ObjectKey - - // assign contains the position of "=", if any - Assign token.Pos - - // val is the item itself. It can be an object,list, number, bool or a - // string. If key length is larger than one, val can be only of type - // Object. - Val Node - - LeadComment *CommentGroup // associated lead comment - LineComment *CommentGroup // associated line comment -} - -func (o *ObjectItem) Pos() token.Pos { - // I'm not entirely sure what causes this, but removing this causes - // a test failure. We should investigate at some point. - if len(o.Keys) == 0 { - return token.Pos{} - } - - return o.Keys[0].Pos() -} - -// ObjectKeys are either an identifier or of type string. -type ObjectKey struct { - Token token.Token -} - -func (o *ObjectKey) Pos() token.Pos { - return o.Token.Pos -} - -// LiteralType represents a literal of basic type. Valid types are: -// token.NUMBER, token.FLOAT, token.BOOL and token.STRING -type LiteralType struct { - Token token.Token - - // comment types, only used when in a list - LeadComment *CommentGroup - LineComment *CommentGroup -} - -func (l *LiteralType) Pos() token.Pos { - return l.Token.Pos -} - -// ListStatement represents a HCL List type -type ListType struct { - Lbrack token.Pos // position of "[" - Rbrack token.Pos // position of "]" - List []Node // the elements in lexical order -} - -func (l *ListType) Pos() token.Pos { - return l.Lbrack -} - -func (l *ListType) Add(node Node) { - l.List = append(l.List, node) -} - -// ObjectType represents a HCL Object Type -type ObjectType struct { - Lbrace token.Pos // position of "{" - Rbrace token.Pos // position of "}" - List *ObjectList // the nodes in lexical order -} - -func (o *ObjectType) Pos() token.Pos { - return o.Lbrace -} - -// Comment node represents a single //, # style or /*- style commment -type Comment struct { - Start token.Pos // position of / or # - Text string -} - -func (c *Comment) Pos() token.Pos { - return c.Start -} - -// CommentGroup node represents a sequence of comments with no other tokens and -// no empty lines between. -type CommentGroup struct { - List []*Comment // len(List) > 0 -} - -func (c *CommentGroup) Pos() token.Pos { - return c.List[0].Pos() -} - -//------------------------------------------------------------------- -// GoStringer -//------------------------------------------------------------------- - -func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) } -func (o *ObjectList) GoString() string { return fmt.Sprintf("*%#v", *o) } diff --git a/tools/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go b/tools/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go deleted file mode 100644 index ba07ad42b..000000000 --- a/tools/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go +++ /dev/null @@ -1,52 +0,0 @@ -package ast - -import "fmt" - -// WalkFunc describes a function to be called for each node during a Walk. The -// returned node can be used to rewrite the AST. Walking stops the returned -// bool is false. -type WalkFunc func(Node) (Node, bool) - -// Walk traverses an AST in depth-first order: It starts by calling fn(node); -// node must not be nil. If fn returns true, Walk invokes fn recursively for -// each of the non-nil children of node, followed by a call of fn(nil). The -// returned node of fn can be used to rewrite the passed node to fn. -func Walk(node Node, fn WalkFunc) Node { - rewritten, ok := fn(node) - if !ok { - return rewritten - } - - switch n := node.(type) { - case *File: - n.Node = Walk(n.Node, fn) - case *ObjectList: - for i, item := range n.Items { - n.Items[i] = Walk(item, fn).(*ObjectItem) - } - case *ObjectKey: - // nothing to do - case *ObjectItem: - for i, k := range n.Keys { - n.Keys[i] = Walk(k, fn).(*ObjectKey) - } - - if n.Val != nil { - n.Val = Walk(n.Val, fn) - } - case *LiteralType: - // nothing to do - case *ListType: - for i, l := range n.List { - n.List[i] = Walk(l, fn) - } - case *ObjectType: - n.List = Walk(n.List, fn).(*ObjectList) - default: - // should we panic here? - fmt.Printf("unknown type: %T\n", n) - } - - fn(nil) - return rewritten -} diff --git a/tools/vendor/github.com/hashicorp/hcl/hcl/parser/error.go b/tools/vendor/github.com/hashicorp/hcl/hcl/parser/error.go deleted file mode 100644 index 5c99381df..000000000 --- a/tools/vendor/github.com/hashicorp/hcl/hcl/parser/error.go +++ /dev/null @@ -1,17 +0,0 @@ -package parser - -import ( - "fmt" - - "github.com/hashicorp/hcl/hcl/token" -) - -// PosError is a parse error that contains a position. -type PosError struct { - Pos token.Pos - Err error -} - -func (e *PosError) Error() string { - return fmt.Sprintf("At %s: %s", e.Pos, e.Err) -} diff --git a/tools/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/tools/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go deleted file mode 100644 index 64c83bcfb..000000000 --- a/tools/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go +++ /dev/null @@ -1,532 +0,0 @@ -// Package parser implements a parser for HCL (HashiCorp Configuration -// Language) -package parser - -import ( - "bytes" - "errors" - "fmt" - "strings" - - "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/hcl/hcl/scanner" - "github.com/hashicorp/hcl/hcl/token" -) - -type Parser struct { - sc *scanner.Scanner - - // Last read token - tok token.Token - commaPrev token.Token - - comments []*ast.CommentGroup - leadComment *ast.CommentGroup // last lead comment - lineComment *ast.CommentGroup // last line comment - - enableTrace bool - indent int - n int // buffer size (max = 1) -} - -func newParser(src []byte) *Parser { - return &Parser{ - sc: scanner.New(src), - } -} - -// Parse returns the fully parsed source and returns the abstract syntax tree. -func Parse(src []byte) (*ast.File, error) { - // normalize all line endings - // since the scanner and output only work with "\n" line endings, we may - // end up with dangling "\r" characters in the parsed data. - src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1) - - p := newParser(src) - return p.Parse() -} - -var errEofToken = errors.New("EOF token found") - -// Parse returns the fully parsed source and returns the abstract syntax tree. -func (p *Parser) Parse() (*ast.File, error) { - f := &ast.File{} - var err, scerr error - p.sc.Error = func(pos token.Pos, msg string) { - scerr = &PosError{Pos: pos, Err: errors.New(msg)} - } - - f.Node, err = p.objectList(false) - if scerr != nil { - return nil, scerr - } - if err != nil { - return nil, err - } - - f.Comments = p.comments - return f, nil -} - -// objectList parses a list of items within an object (generally k/v pairs). -// The parameter" obj" tells this whether to we are within an object (braces: -// '{', '}') or just at the top level. If we're within an object, we end -// at an RBRACE. -func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) { - defer un(trace(p, "ParseObjectList")) - node := &ast.ObjectList{} - - for { - if obj { - tok := p.scan() - p.unscan() - if tok.Type == token.RBRACE { - break - } - } - - n, err := p.objectItem() - if err == errEofToken { - break // we are finished - } - - // we don't return a nil node, because might want to use already - // collected items. - if err != nil { - return node, err - } - - node.Add(n) - - // object lists can be optionally comma-delimited e.g. when a list of maps - // is being expressed, so a comma is allowed here - it's simply consumed - tok := p.scan() - if tok.Type != token.COMMA { - p.unscan() - } - } - return node, nil -} - -func (p *Parser) consumeComment() (comment *ast.Comment, endline int) { - endline = p.tok.Pos.Line - - // count the endline if it's multiline comment, ie starting with /* - if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' { - // don't use range here - no need to decode Unicode code points - for i := 0; i < len(p.tok.Text); i++ { - if p.tok.Text[i] == '\n' { - endline++ - } - } - } - - comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text} - p.tok = p.sc.Scan() - return -} - -func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) { - var list []*ast.Comment - endline = p.tok.Pos.Line - - for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n { - var comment *ast.Comment - comment, endline = p.consumeComment() - list = append(list, comment) - } - - // add comment group to the comments list - comments = &ast.CommentGroup{List: list} - p.comments = append(p.comments, comments) - - return -} - -// objectItem parses a single object item -func (p *Parser) objectItem() (*ast.ObjectItem, error) { - defer un(trace(p, "ParseObjectItem")) - - keys, err := p.objectKey() - if len(keys) > 0 && err == errEofToken { - // We ignore eof token here since it is an error if we didn't - // receive a value (but we did receive a key) for the item. - err = nil - } - if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE { - // This is a strange boolean statement, but what it means is: - // We have keys with no value, and we're likely in an object - // (since RBrace ends an object). For this, we set err to nil so - // we continue and get the error below of having the wrong value - // type. - err = nil - - // Reset the token type so we don't think it completed fine. See - // objectType which uses p.tok.Type to check if we're done with - // the object. - p.tok.Type = token.EOF - } - if err != nil { - return nil, err - } - - o := &ast.ObjectItem{ - Keys: keys, - } - - if p.leadComment != nil { - o.LeadComment = p.leadComment - p.leadComment = nil - } - - switch p.tok.Type { - case token.ASSIGN: - o.Assign = p.tok.Pos - o.Val, err = p.object() - if err != nil { - return nil, err - } - case token.LBRACE: - o.Val, err = p.objectType() - if err != nil { - return nil, err - } - default: - keyStr := make([]string, 0, len(keys)) - for _, k := range keys { - keyStr = append(keyStr, k.Token.Text) - } - - return nil, &PosError{ - Pos: p.tok.Pos, - Err: fmt.Errorf( - "key '%s' expected start of object ('{') or assignment ('=')", - strings.Join(keyStr, " ")), - } - } - - // key=#comment - // val - if p.lineComment != nil { - o.LineComment, p.lineComment = p.lineComment, nil - } - - // do a look-ahead for line comment - p.scan() - if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil { - o.LineComment = p.lineComment - p.lineComment = nil - } - p.unscan() - return o, nil -} - -// objectKey parses an object key and returns a ObjectKey AST -func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { - keyCount := 0 - keys := make([]*ast.ObjectKey, 0) - - for { - tok := p.scan() - switch tok.Type { - case token.EOF: - // It is very important to also return the keys here as well as - // the error. This is because we need to be able to tell if we - // did parse keys prior to finding the EOF, or if we just found - // a bare EOF. - return keys, errEofToken - case token.ASSIGN: - // assignment or object only, but not nested objects. this is not - // allowed: `foo bar = {}` - if keyCount > 1 { - return nil, &PosError{ - Pos: p.tok.Pos, - Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type), - } - } - - if keyCount == 0 { - return nil, &PosError{ - Pos: p.tok.Pos, - Err: errors.New("no object keys found!"), - } - } - - return keys, nil - case token.LBRACE: - var err error - - // If we have no keys, then it is a syntax error. i.e. {{}} is not - // allowed. - if len(keys) == 0 { - err = &PosError{ - Pos: p.tok.Pos, - Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type), - } - } - - // object - return keys, err - case token.IDENT, token.STRING: - keyCount++ - keys = append(keys, &ast.ObjectKey{Token: p.tok}) - case token.ILLEGAL: - return keys, &PosError{ - Pos: p.tok.Pos, - Err: fmt.Errorf("illegal character"), - } - default: - return keys, &PosError{ - Pos: p.tok.Pos, - Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type), - } - } - } -} - -// object parses any type of object, such as number, bool, string, object or -// list. -func (p *Parser) object() (ast.Node, error) { - defer un(trace(p, "ParseType")) - tok := p.scan() - - switch tok.Type { - case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC: - return p.literalType() - case token.LBRACE: - return p.objectType() - case token.LBRACK: - return p.listType() - case token.COMMENT: - // implement comment - case token.EOF: - return nil, errEofToken - } - - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf("Unknown token: %+v", tok), - } -} - -// objectType parses an object type and returns a ObjectType AST -func (p *Parser) objectType() (*ast.ObjectType, error) { - defer un(trace(p, "ParseObjectType")) - - // we assume that the currently scanned token is a LBRACE - o := &ast.ObjectType{ - Lbrace: p.tok.Pos, - } - - l, err := p.objectList(true) - - // if we hit RBRACE, we are good to go (means we parsed all Items), if it's - // not a RBRACE, it's an syntax error and we just return it. - if err != nil && p.tok.Type != token.RBRACE { - return nil, err - } - - // No error, scan and expect the ending to be a brace - if tok := p.scan(); tok.Type != token.RBRACE { - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf("object expected closing RBRACE got: %s", tok.Type), - } - } - - o.List = l - o.Rbrace = p.tok.Pos // advanced via parseObjectList - return o, nil -} - -// listType parses a list type and returns a ListType AST -func (p *Parser) listType() (*ast.ListType, error) { - defer un(trace(p, "ParseListType")) - - // we assume that the currently scanned token is a LBRACK - l := &ast.ListType{ - Lbrack: p.tok.Pos, - } - - needComma := false - for { - tok := p.scan() - if needComma { - switch tok.Type { - case token.COMMA, token.RBRACK: - default: - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf( - "error parsing list, expected comma or list end, got: %s", - tok.Type), - } - } - } - switch tok.Type { - case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC: - node, err := p.literalType() - if err != nil { - return nil, err - } - - // If there is a lead comment, apply it - if p.leadComment != nil { - node.LeadComment = p.leadComment - p.leadComment = nil - } - - l.Add(node) - needComma = true - case token.COMMA: - // get next list item or we are at the end - // do a look-ahead for line comment - p.scan() - if p.lineComment != nil && len(l.List) > 0 { - lit, ok := l.List[len(l.List)-1].(*ast.LiteralType) - if ok { - lit.LineComment = p.lineComment - l.List[len(l.List)-1] = lit - p.lineComment = nil - } - } - p.unscan() - - needComma = false - continue - case token.LBRACE: - // Looks like a nested object, so parse it out - node, err := p.objectType() - if err != nil { - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf( - "error while trying to parse object within list: %s", err), - } - } - l.Add(node) - needComma = true - case token.LBRACK: - node, err := p.listType() - if err != nil { - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf( - "error while trying to parse list within list: %s", err), - } - } - l.Add(node) - case token.RBRACK: - // finished - l.Rbrack = p.tok.Pos - return l, nil - default: - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type), - } - } - } -} - -// literalType parses a literal type and returns a LiteralType AST -func (p *Parser) literalType() (*ast.LiteralType, error) { - defer un(trace(p, "ParseLiteral")) - - return &ast.LiteralType{ - Token: p.tok, - }, nil -} - -// scan returns the next token from the underlying scanner. If a token has -// been unscanned then read that instead. In the process, it collects any -// comment groups encountered, and remembers the last lead and line comments. -func (p *Parser) scan() token.Token { - // If we have a token on the buffer, then return it. - if p.n != 0 { - p.n = 0 - return p.tok - } - - // Otherwise read the next token from the scanner and Save it to the buffer - // in case we unscan later. - prev := p.tok - p.tok = p.sc.Scan() - - if p.tok.Type == token.COMMENT { - var comment *ast.CommentGroup - var endline int - - // fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n", - // p.tok.Pos.Line, prev.Pos.Line, endline) - if p.tok.Pos.Line == prev.Pos.Line { - // The comment is on same line as the previous token; it - // cannot be a lead comment but may be a line comment. - comment, endline = p.consumeCommentGroup(0) - if p.tok.Pos.Line != endline { - // The next token is on a different line, thus - // the last comment group is a line comment. - p.lineComment = comment - } - } - - // consume successor comments, if any - endline = -1 - for p.tok.Type == token.COMMENT { - comment, endline = p.consumeCommentGroup(1) - } - - if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE { - switch p.tok.Type { - case token.RBRACE, token.RBRACK: - // Do not count for these cases - default: - // The next token is following on the line immediately after the - // comment group, thus the last comment group is a lead comment. - p.leadComment = comment - } - } - - } - - return p.tok -} - -// unscan pushes the previously read token back onto the buffer. -func (p *Parser) unscan() { - p.n = 1 -} - -// ---------------------------------------------------------------------------- -// Parsing support - -func (p *Parser) printTrace(a ...interface{}) { - if !p.enableTrace { - return - } - - const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " - const n = len(dots) - fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) - - i := 2 * p.indent - for i > n { - fmt.Print(dots) - i -= n - } - // i <= n - fmt.Print(dots[0:i]) - fmt.Println(a...) -} - -func trace(p *Parser, msg string) *Parser { - p.printTrace(msg, "(") - p.indent++ - return p -} - -// Usage pattern: defer un(trace(p, "...")) -func un(p *Parser) { - p.indent-- - p.printTrace(")") -} diff --git a/tools/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go b/tools/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go deleted file mode 100644 index 7c038d12a..000000000 --- a/tools/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go +++ /dev/null @@ -1,789 +0,0 @@ -package printer - -import ( - "bytes" - "fmt" - "sort" - - "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/hcl/hcl/token" -) - -const ( - blank = byte(' ') - newline = byte('\n') - tab = byte('\t') - infinity = 1 << 30 // offset or line -) - -var ( - unindent = []byte("\uE123") // in the private use space -) - -type printer struct { - cfg Config - prev token.Pos - - comments []*ast.CommentGroup // may be nil, contains all comments - standaloneComments []*ast.CommentGroup // contains all standalone comments (not assigned to any node) - - enableTrace bool - indentTrace int -} - -type ByPosition []*ast.CommentGroup - -func (b ByPosition) Len() int { return len(b) } -func (b ByPosition) Swap(i, j int) { b[i], b[j] = b[j], b[i] } -func (b ByPosition) Less(i, j int) bool { return b[i].Pos().Before(b[j].Pos()) } - -// collectComments comments all standalone comments which are not lead or line -// comment -func (p *printer) collectComments(node ast.Node) { - // first collect all comments. This is already stored in - // ast.File.(comments) - ast.Walk(node, func(nn ast.Node) (ast.Node, bool) { - switch t := nn.(type) { - case *ast.File: - p.comments = t.Comments - return nn, false - } - return nn, true - }) - - standaloneComments := make(map[token.Pos]*ast.CommentGroup, 0) - for _, c := range p.comments { - standaloneComments[c.Pos()] = c - } - - // next remove all lead and line comments from the overall comment map. - // This will give us comments which are standalone, comments which are not - // assigned to any kind of node. - ast.Walk(node, func(nn ast.Node) (ast.Node, bool) { - switch t := nn.(type) { - case *ast.LiteralType: - if t.LeadComment != nil { - for _, comment := range t.LeadComment.List { - if _, ok := standaloneComments[comment.Pos()]; ok { - delete(standaloneComments, comment.Pos()) - } - } - } - - if t.LineComment != nil { - for _, comment := range t.LineComment.List { - if _, ok := standaloneComments[comment.Pos()]; ok { - delete(standaloneComments, comment.Pos()) - } - } - } - case *ast.ObjectItem: - if t.LeadComment != nil { - for _, comment := range t.LeadComment.List { - if _, ok := standaloneComments[comment.Pos()]; ok { - delete(standaloneComments, comment.Pos()) - } - } - } - - if t.LineComment != nil { - for _, comment := range t.LineComment.List { - if _, ok := standaloneComments[comment.Pos()]; ok { - delete(standaloneComments, comment.Pos()) - } - } - } - } - - return nn, true - }) - - for _, c := range standaloneComments { - p.standaloneComments = append(p.standaloneComments, c) - } - - sort.Sort(ByPosition(p.standaloneComments)) -} - -// output prints creates b printable HCL output and returns it. -func (p *printer) output(n interface{}) []byte { - var buf bytes.Buffer - - switch t := n.(type) { - case *ast.File: - // File doesn't trace so we add the tracing here - defer un(trace(p, "File")) - return p.output(t.Node) - case *ast.ObjectList: - defer un(trace(p, "ObjectList")) - - var index int - for { - // Determine the location of the next actual non-comment - // item. If we're at the end, the next item is at "infinity" - var nextItem token.Pos - if index != len(t.Items) { - nextItem = t.Items[index].Pos() - } else { - nextItem = token.Pos{Offset: infinity, Line: infinity} - } - - // Go through the standalone comments in the file and print out - // the comments that we should be for this object item. - for _, c := range p.standaloneComments { - // Go through all the comments in the group. The group - // should be printed together, not separated by double newlines. - printed := false - newlinePrinted := false - for _, comment := range c.List { - // We only care about comments after the previous item - // we've printed so that comments are printed in the - // correct locations (between two objects for example). - // And before the next item. - if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) { - // if we hit the end add newlines so we can print the comment - // we don't do this if prev is invalid which means the - // beginning of the file since the first comment should - // be at the first line. - if !newlinePrinted && p.prev.IsValid() && index == len(t.Items) { - buf.Write([]byte{newline, newline}) - newlinePrinted = true - } - - // Write the actual comment. - buf.WriteString(comment.Text) - buf.WriteByte(newline) - - // Set printed to true to note that we printed something - printed = true - } - } - - // If we're not at the last item, write a new line so - // that there is a newline separating this comment from - // the next object. - if printed && index != len(t.Items) { - buf.WriteByte(newline) - } - } - - if index == len(t.Items) { - break - } - - buf.Write(p.output(t.Items[index])) - if index != len(t.Items)-1 { - // Always write a newline to separate us from the next item - buf.WriteByte(newline) - - // Need to determine if we're going to separate the next item - // with a blank line. The logic here is simple, though there - // are a few conditions: - // - // 1. The next object is more than one line away anyways, - // so we need an empty line. - // - // 2. The next object is not a "single line" object, so - // we need an empty line. - // - // 3. This current object is not a single line object, - // so we need an empty line. - current := t.Items[index] - next := t.Items[index+1] - if next.Pos().Line != t.Items[index].Pos().Line+1 || - !p.isSingleLineObject(next) || - !p.isSingleLineObject(current) { - buf.WriteByte(newline) - } - } - index++ - } - case *ast.ObjectKey: - buf.WriteString(t.Token.Text) - case *ast.ObjectItem: - p.prev = t.Pos() - buf.Write(p.objectItem(t)) - case *ast.LiteralType: - buf.Write(p.literalType(t)) - case *ast.ListType: - buf.Write(p.list(t)) - case *ast.ObjectType: - buf.Write(p.objectType(t)) - default: - fmt.Printf(" unknown type: %T\n", n) - } - - return buf.Bytes() -} - -func (p *printer) literalType(lit *ast.LiteralType) []byte { - result := []byte(lit.Token.Text) - switch lit.Token.Type { - case token.HEREDOC: - // Clear the trailing newline from heredocs - if result[len(result)-1] == '\n' { - result = result[:len(result)-1] - } - - // Poison lines 2+ so that we don't indent them - result = p.heredocIndent(result) - case token.STRING: - // If this is a multiline string, poison lines 2+ so we don't - // indent them. - if bytes.IndexRune(result, '\n') >= 0 { - result = p.heredocIndent(result) - } - } - - return result -} - -// objectItem returns the printable HCL form of an object item. An object type -// starts with one/multiple keys and has a value. The value might be of any -// type. -func (p *printer) objectItem(o *ast.ObjectItem) []byte { - defer un(trace(p, fmt.Sprintf("ObjectItem: %s", o.Keys[0].Token.Text))) - var buf bytes.Buffer - - if o.LeadComment != nil { - for _, comment := range o.LeadComment.List { - buf.WriteString(comment.Text) - buf.WriteByte(newline) - } - } - - // If key and val are on different lines, treat line comments like lead comments. - if o.LineComment != nil && o.Val.Pos().Line != o.Keys[0].Pos().Line { - for _, comment := range o.LineComment.List { - buf.WriteString(comment.Text) - buf.WriteByte(newline) - } - } - - for i, k := range o.Keys { - buf.WriteString(k.Token.Text) - buf.WriteByte(blank) - - // reach end of key - if o.Assign.IsValid() && i == len(o.Keys)-1 && len(o.Keys) == 1 { - buf.WriteString("=") - buf.WriteByte(blank) - } - } - - buf.Write(p.output(o.Val)) - - if o.LineComment != nil && o.Val.Pos().Line == o.Keys[0].Pos().Line { - buf.WriteByte(blank) - for _, comment := range o.LineComment.List { - buf.WriteString(comment.Text) - } - } - - return buf.Bytes() -} - -// objectType returns the printable HCL form of an object type. An object type -// begins with a brace and ends with a brace. -func (p *printer) objectType(o *ast.ObjectType) []byte { - defer un(trace(p, "ObjectType")) - var buf bytes.Buffer - buf.WriteString("{") - - var index int - var nextItem token.Pos - var commented, newlinePrinted bool - for { - // Determine the location of the next actual non-comment - // item. If we're at the end, the next item is the closing brace - if index != len(o.List.Items) { - nextItem = o.List.Items[index].Pos() - } else { - nextItem = o.Rbrace - } - - // Go through the standalone comments in the file and print out - // the comments that we should be for this object item. - for _, c := range p.standaloneComments { - printed := false - var lastCommentPos token.Pos - for _, comment := range c.List { - // We only care about comments after the previous item - // we've printed so that comments are printed in the - // correct locations (between two objects for example). - // And before the next item. - if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) { - // If there are standalone comments and the initial newline has not - // been printed yet, do it now. - if !newlinePrinted { - newlinePrinted = true - buf.WriteByte(newline) - } - - // add newline if it's between other printed nodes - if index > 0 { - commented = true - buf.WriteByte(newline) - } - - // Store this position - lastCommentPos = comment.Pos() - - // output the comment itself - buf.Write(p.indent(p.heredocIndent([]byte(comment.Text)))) - - // Set printed to true to note that we printed something - printed = true - - /* - if index != len(o.List.Items) { - buf.WriteByte(newline) // do not print on the end - } - */ - } - } - - // Stuff to do if we had comments - if printed { - // Always write a newline - buf.WriteByte(newline) - - // If there is another item in the object and our comment - // didn't hug it directly, then make sure there is a blank - // line separating them. - if nextItem != o.Rbrace && nextItem.Line != lastCommentPos.Line+1 { - buf.WriteByte(newline) - } - } - } - - if index == len(o.List.Items) { - p.prev = o.Rbrace - break - } - - // At this point we are sure that it's not a totally empty block: print - // the initial newline if it hasn't been printed yet by the previous - // block about standalone comments. - if !newlinePrinted { - buf.WriteByte(newline) - newlinePrinted = true - } - - // check if we have adjacent one liner items. If yes we'll going to align - // the comments. - var aligned []*ast.ObjectItem - for _, item := range o.List.Items[index:] { - // we don't group one line lists - if len(o.List.Items) == 1 { - break - } - - // one means a oneliner with out any lead comment - // two means a oneliner with lead comment - // anything else might be something else - cur := lines(string(p.objectItem(item))) - if cur > 2 { - break - } - - curPos := item.Pos() - - nextPos := token.Pos{} - if index != len(o.List.Items)-1 { - nextPos = o.List.Items[index+1].Pos() - } - - prevPos := token.Pos{} - if index != 0 { - prevPos = o.List.Items[index-1].Pos() - } - - // fmt.Println("DEBUG ----------------") - // fmt.Printf("prev = %+v prevPos: %s\n", prev, prevPos) - // fmt.Printf("cur = %+v curPos: %s\n", cur, curPos) - // fmt.Printf("next = %+v nextPos: %s\n", next, nextPos) - - if curPos.Line+1 == nextPos.Line { - aligned = append(aligned, item) - index++ - continue - } - - if curPos.Line-1 == prevPos.Line { - aligned = append(aligned, item) - index++ - - // finish if we have a new line or comment next. This happens - // if the next item is not adjacent - if curPos.Line+1 != nextPos.Line { - break - } - continue - } - - break - } - - // put newlines if the items are between other non aligned items. - // newlines are also added if there is a standalone comment already, so - // check it too - if !commented && index != len(aligned) { - buf.WriteByte(newline) - } - - if len(aligned) >= 1 { - p.prev = aligned[len(aligned)-1].Pos() - - items := p.alignedItems(aligned) - buf.Write(p.indent(items)) - } else { - p.prev = o.List.Items[index].Pos() - - buf.Write(p.indent(p.objectItem(o.List.Items[index]))) - index++ - } - - buf.WriteByte(newline) - } - - buf.WriteString("}") - return buf.Bytes() -} - -func (p *printer) alignedItems(items []*ast.ObjectItem) []byte { - var buf bytes.Buffer - - // find the longest key and value length, needed for alignment - var longestKeyLen int // longest key length - var longestValLen int // longest value length - for _, item := range items { - key := len(item.Keys[0].Token.Text) - val := len(p.output(item.Val)) - - if key > longestKeyLen { - longestKeyLen = key - } - - if val > longestValLen { - longestValLen = val - } - } - - for i, item := range items { - if item.LeadComment != nil { - for _, comment := range item.LeadComment.List { - buf.WriteString(comment.Text) - buf.WriteByte(newline) - } - } - - for i, k := range item.Keys { - keyLen := len(k.Token.Text) - buf.WriteString(k.Token.Text) - for i := 0; i < longestKeyLen-keyLen+1; i++ { - buf.WriteByte(blank) - } - - // reach end of key - if i == len(item.Keys)-1 && len(item.Keys) == 1 { - buf.WriteString("=") - buf.WriteByte(blank) - } - } - - val := p.output(item.Val) - valLen := len(val) - buf.Write(val) - - if item.Val.Pos().Line == item.Keys[0].Pos().Line && item.LineComment != nil { - for i := 0; i < longestValLen-valLen+1; i++ { - buf.WriteByte(blank) - } - - for _, comment := range item.LineComment.List { - buf.WriteString(comment.Text) - } - } - - // do not print for the last item - if i != len(items)-1 { - buf.WriteByte(newline) - } - } - - return buf.Bytes() -} - -// list returns the printable HCL form of an list type. -func (p *printer) list(l *ast.ListType) []byte { - if p.isSingleLineList(l) { - return p.singleLineList(l) - } - - var buf bytes.Buffer - buf.WriteString("[") - buf.WriteByte(newline) - - var longestLine int - for _, item := range l.List { - // for now we assume that the list only contains literal types - if lit, ok := item.(*ast.LiteralType); ok { - lineLen := len(lit.Token.Text) - if lineLen > longestLine { - longestLine = lineLen - } - } - } - - haveEmptyLine := false - for i, item := range l.List { - // If we have a lead comment, then we want to write that first - leadComment := false - if lit, ok := item.(*ast.LiteralType); ok && lit.LeadComment != nil { - leadComment = true - - // Ensure an empty line before every element with a - // lead comment (except the first item in a list). - if !haveEmptyLine && i != 0 { - buf.WriteByte(newline) - } - - for _, comment := range lit.LeadComment.List { - buf.Write(p.indent([]byte(comment.Text))) - buf.WriteByte(newline) - } - } - - // also indent each line - val := p.output(item) - curLen := len(val) - buf.Write(p.indent(val)) - - // if this item is a heredoc, then we output the comma on - // the next line. This is the only case this happens. - comma := []byte{','} - if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC { - buf.WriteByte(newline) - comma = p.indent(comma) - } - - buf.Write(comma) - - if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil { - // if the next item doesn't have any comments, do not align - buf.WriteByte(blank) // align one space - for i := 0; i < longestLine-curLen; i++ { - buf.WriteByte(blank) - } - - for _, comment := range lit.LineComment.List { - buf.WriteString(comment.Text) - } - } - - buf.WriteByte(newline) - - // Ensure an empty line after every element with a - // lead comment (except the first item in a list). - haveEmptyLine = leadComment && i != len(l.List)-1 - if haveEmptyLine { - buf.WriteByte(newline) - } - } - - buf.WriteString("]") - return buf.Bytes() -} - -// isSingleLineList returns true if: -// * they were previously formatted entirely on one line -// * they consist entirely of literals -// * there are either no heredoc strings or the list has exactly one element -// * there are no line comments -func (printer) isSingleLineList(l *ast.ListType) bool { - for _, item := range l.List { - if item.Pos().Line != l.Lbrack.Line { - return false - } - - lit, ok := item.(*ast.LiteralType) - if !ok { - return false - } - - if lit.Token.Type == token.HEREDOC && len(l.List) != 1 { - return false - } - - if lit.LineComment != nil { - return false - } - } - - return true -} - -// singleLineList prints a simple single line list. -// For a definition of "simple", see isSingleLineList above. -func (p *printer) singleLineList(l *ast.ListType) []byte { - buf := &bytes.Buffer{} - - buf.WriteString("[") - for i, item := range l.List { - if i != 0 { - buf.WriteString(", ") - } - - // Output the item itself - buf.Write(p.output(item)) - - // The heredoc marker needs to be at the end of line. - if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC { - buf.WriteByte(newline) - } - } - - buf.WriteString("]") - return buf.Bytes() -} - -// indent indents the lines of the given buffer for each non-empty line -func (p *printer) indent(buf []byte) []byte { - var prefix []byte - if p.cfg.SpacesWidth != 0 { - for i := 0; i < p.cfg.SpacesWidth; i++ { - prefix = append(prefix, blank) - } - } else { - prefix = []byte{tab} - } - - var res []byte - bol := true - for _, c := range buf { - if bol && c != '\n' { - res = append(res, prefix...) - } - - res = append(res, c) - bol = c == '\n' - } - return res -} - -// unindent removes all the indentation from the tombstoned lines -func (p *printer) unindent(buf []byte) []byte { - var res []byte - for i := 0; i < len(buf); i++ { - skip := len(buf)-i <= len(unindent) - if !skip { - skip = !bytes.Equal(unindent, buf[i:i+len(unindent)]) - } - if skip { - res = append(res, buf[i]) - continue - } - - // We have a marker. we have to backtrace here and clean out - // any whitespace ahead of our tombstone up to a \n - for j := len(res) - 1; j >= 0; j-- { - if res[j] == '\n' { - break - } - - res = res[:j] - } - - // Skip the entire unindent marker - i += len(unindent) - 1 - } - - return res -} - -// heredocIndent marks all the 2nd and further lines as unindentable -func (p *printer) heredocIndent(buf []byte) []byte { - var res []byte - bol := false - for _, c := range buf { - if bol && c != '\n' { - res = append(res, unindent...) - } - res = append(res, c) - bol = c == '\n' - } - return res -} - -// isSingleLineObject tells whether the given object item is a single -// line object such as "obj {}". -// -// A single line object: -// -// * has no lead comments (hence multi-line) -// * has no assignment -// * has no values in the stanza (within {}) -// -func (p *printer) isSingleLineObject(val *ast.ObjectItem) bool { - // If there is a lead comment, can't be one line - if val.LeadComment != nil { - return false - } - - // If there is assignment, we always break by line - if val.Assign.IsValid() { - return false - } - - // If it isn't an object type, then its not a single line object - ot, ok := val.Val.(*ast.ObjectType) - if !ok { - return false - } - - // If the object has no items, it is single line! - return len(ot.List.Items) == 0 -} - -func lines(txt string) int { - endline := 1 - for i := 0; i < len(txt); i++ { - if txt[i] == '\n' { - endline++ - } - } - return endline -} - -// ---------------------------------------------------------------------------- -// Tracing support - -func (p *printer) printTrace(a ...interface{}) { - if !p.enableTrace { - return - } - - const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " - const n = len(dots) - i := 2 * p.indentTrace - for i > n { - fmt.Print(dots) - i -= n - } - // i <= n - fmt.Print(dots[0:i]) - fmt.Println(a...) -} - -func trace(p *printer, msg string) *printer { - p.printTrace(msg, "(") - p.indentTrace++ - return p -} - -// Usage pattern: defer un(trace(p, "...")) -func un(p *printer) { - p.indentTrace-- - p.printTrace(")") -} diff --git a/tools/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go b/tools/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go deleted file mode 100644 index 6617ab8e7..000000000 --- a/tools/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go +++ /dev/null @@ -1,66 +0,0 @@ -// Package printer implements printing of AST nodes to HCL format. -package printer - -import ( - "bytes" - "io" - "text/tabwriter" - - "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/hcl/hcl/parser" -) - -var DefaultConfig = Config{ - SpacesWidth: 2, -} - -// A Config node controls the output of Fprint. -type Config struct { - SpacesWidth int // if set, it will use spaces instead of tabs for alignment -} - -func (c *Config) Fprint(output io.Writer, node ast.Node) error { - p := &printer{ - cfg: *c, - comments: make([]*ast.CommentGroup, 0), - standaloneComments: make([]*ast.CommentGroup, 0), - // enableTrace: true, - } - - p.collectComments(node) - - if _, err := output.Write(p.unindent(p.output(node))); err != nil { - return err - } - - // flush tabwriter, if any - var err error - if tw, _ := output.(*tabwriter.Writer); tw != nil { - err = tw.Flush() - } - - return err -} - -// Fprint "pretty-prints" an HCL node to output -// It calls Config.Fprint with default settings. -func Fprint(output io.Writer, node ast.Node) error { - return DefaultConfig.Fprint(output, node) -} - -// Format formats src HCL and returns the result. -func Format(src []byte) ([]byte, error) { - node, err := parser.Parse(src) - if err != nil { - return nil, err - } - - var buf bytes.Buffer - if err := DefaultConfig.Fprint(&buf, node); err != nil { - return nil, err - } - - // Add trailing newline to result - buf.WriteString("\n") - return buf.Bytes(), nil -} diff --git a/tools/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go b/tools/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go deleted file mode 100644 index 624a18fe3..000000000 --- a/tools/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go +++ /dev/null @@ -1,652 +0,0 @@ -// Package scanner implements a scanner for HCL (HashiCorp Configuration -// Language) source text. -package scanner - -import ( - "bytes" - "fmt" - "os" - "regexp" - "unicode" - "unicode/utf8" - - "github.com/hashicorp/hcl/hcl/token" -) - -// eof represents a marker rune for the end of the reader. -const eof = rune(0) - -// Scanner defines a lexical scanner -type Scanner struct { - buf *bytes.Buffer // Source buffer for advancing and scanning - src []byte // Source buffer for immutable access - - // Source Position - srcPos token.Pos // current position - prevPos token.Pos // previous position, used for peek() method - - lastCharLen int // length of last character in bytes - lastLineLen int // length of last line in characters (for correct column reporting) - - tokStart int // token text start position - tokEnd int // token text end position - - // Error is called for each error encountered. If no Error - // function is set, the error is reported to os.Stderr. - Error func(pos token.Pos, msg string) - - // ErrorCount is incremented by one for each error encountered. - ErrorCount int - - // tokPos is the start position of most recently scanned token; set by - // Scan. The Filename field is always left untouched by the Scanner. If - // an error is reported (via Error) and Position is invalid, the scanner is - // not inside a token. - tokPos token.Pos -} - -// New creates and initializes a new instance of Scanner using src as -// its source content. -func New(src []byte) *Scanner { - // even though we accept a src, we read from a io.Reader compatible type - // (*bytes.Buffer). So in the future we might easily change it to streaming - // read. - b := bytes.NewBuffer(src) - s := &Scanner{ - buf: b, - src: src, - } - - // srcPosition always starts with 1 - s.srcPos.Line = 1 - return s -} - -// next reads the next rune from the bufferred reader. Returns the rune(0) if -// an error occurs (or io.EOF is returned). -func (s *Scanner) next() rune { - ch, size, err := s.buf.ReadRune() - if err != nil { - // advance for error reporting - s.srcPos.Column++ - s.srcPos.Offset += size - s.lastCharLen = size - return eof - } - - // remember last position - s.prevPos = s.srcPos - - s.srcPos.Column++ - s.lastCharLen = size - s.srcPos.Offset += size - - if ch == utf8.RuneError && size == 1 { - s.err("illegal UTF-8 encoding") - return ch - } - - if ch == '\n' { - s.srcPos.Line++ - s.lastLineLen = s.srcPos.Column - s.srcPos.Column = 0 - } - - if ch == '\x00' { - s.err("unexpected null character (0x00)") - return eof - } - - if ch == '\uE123' { - s.err("unicode code point U+E123 reserved for internal use") - return utf8.RuneError - } - - // debug - // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) - return ch -} - -// unread unreads the previous read Rune and updates the source position -func (s *Scanner) unread() { - if err := s.buf.UnreadRune(); err != nil { - panic(err) // this is user fault, we should catch it - } - s.srcPos = s.prevPos // put back last position -} - -// peek returns the next rune without advancing the reader. -func (s *Scanner) peek() rune { - peek, _, err := s.buf.ReadRune() - if err != nil { - return eof - } - - s.buf.UnreadRune() - return peek -} - -// Scan scans the next token and returns the token. -func (s *Scanner) Scan() token.Token { - ch := s.next() - - // skip white space - for isWhitespace(ch) { - ch = s.next() - } - - var tok token.Type - - // token text markings - s.tokStart = s.srcPos.Offset - s.lastCharLen - - // token position, initial next() is moving the offset by one(size of rune - // actually), though we are interested with the starting point - s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen - if s.srcPos.Column > 0 { - // common case: last character was not a '\n' - s.tokPos.Line = s.srcPos.Line - s.tokPos.Column = s.srcPos.Column - } else { - // last character was a '\n' - // (we cannot be at the beginning of the source - // since we have called next() at least once) - s.tokPos.Line = s.srcPos.Line - 1 - s.tokPos.Column = s.lastLineLen - } - - switch { - case isLetter(ch): - tok = token.IDENT - lit := s.scanIdentifier() - if lit == "true" || lit == "false" { - tok = token.BOOL - } - case isDecimal(ch): - tok = s.scanNumber(ch) - default: - switch ch { - case eof: - tok = token.EOF - case '"': - tok = token.STRING - s.scanString() - case '#', '/': - tok = token.COMMENT - s.scanComment(ch) - case '.': - tok = token.PERIOD - ch = s.peek() - if isDecimal(ch) { - tok = token.FLOAT - ch = s.scanMantissa(ch) - ch = s.scanExponent(ch) - } - case '<': - tok = token.HEREDOC - s.scanHeredoc() - case '[': - tok = token.LBRACK - case ']': - tok = token.RBRACK - case '{': - tok = token.LBRACE - case '}': - tok = token.RBRACE - case ',': - tok = token.COMMA - case '=': - tok = token.ASSIGN - case '+': - tok = token.ADD - case '-': - if isDecimal(s.peek()) { - ch := s.next() - tok = s.scanNumber(ch) - } else { - tok = token.SUB - } - default: - s.err("illegal char") - } - } - - // finish token ending - s.tokEnd = s.srcPos.Offset - - // create token literal - var tokenText string - if s.tokStart >= 0 { - tokenText = string(s.src[s.tokStart:s.tokEnd]) - } - s.tokStart = s.tokEnd // ensure idempotency of tokenText() call - - return token.Token{ - Type: tok, - Pos: s.tokPos, - Text: tokenText, - } -} - -func (s *Scanner) scanComment(ch rune) { - // single line comments - if ch == '#' || (ch == '/' && s.peek() != '*') { - if ch == '/' && s.peek() != '/' { - s.err("expected '/' for comment") - return - } - - ch = s.next() - for ch != '\n' && ch >= 0 && ch != eof { - ch = s.next() - } - if ch != eof && ch >= 0 { - s.unread() - } - return - } - - // be sure we get the character after /* This allows us to find comment's - // that are not erminated - if ch == '/' { - s.next() - ch = s.next() // read character after "/*" - } - - // look for /* - style comments - for { - if ch < 0 || ch == eof { - s.err("comment not terminated") - break - } - - ch0 := ch - ch = s.next() - if ch0 == '*' && ch == '/' { - break - } - } -} - -// scanNumber scans a HCL number definition starting with the given rune -func (s *Scanner) scanNumber(ch rune) token.Type { - if ch == '0' { - // check for hexadecimal, octal or float - ch = s.next() - if ch == 'x' || ch == 'X' { - // hexadecimal - ch = s.next() - found := false - for isHexadecimal(ch) { - ch = s.next() - found = true - } - - if !found { - s.err("illegal hexadecimal number") - } - - if ch != eof { - s.unread() - } - - return token.NUMBER - } - - // now it's either something like: 0421(octal) or 0.1231(float) - illegalOctal := false - for isDecimal(ch) { - ch = s.next() - if ch == '8' || ch == '9' { - // this is just a possibility. For example 0159 is illegal, but - // 0159.23 is valid. So we mark a possible illegal octal. If - // the next character is not a period, we'll print the error. - illegalOctal = true - } - } - - if ch == 'e' || ch == 'E' { - ch = s.scanExponent(ch) - return token.FLOAT - } - - if ch == '.' { - ch = s.scanFraction(ch) - - if ch == 'e' || ch == 'E' { - ch = s.next() - ch = s.scanExponent(ch) - } - return token.FLOAT - } - - if illegalOctal { - s.err("illegal octal number") - } - - if ch != eof { - s.unread() - } - return token.NUMBER - } - - s.scanMantissa(ch) - ch = s.next() // seek forward - if ch == 'e' || ch == 'E' { - ch = s.scanExponent(ch) - return token.FLOAT - } - - if ch == '.' { - ch = s.scanFraction(ch) - if ch == 'e' || ch == 'E' { - ch = s.next() - ch = s.scanExponent(ch) - } - return token.FLOAT - } - - if ch != eof { - s.unread() - } - return token.NUMBER -} - -// scanMantissa scans the mantissa beginning from the rune. It returns the next -// non decimal rune. It's used to determine wheter it's a fraction or exponent. -func (s *Scanner) scanMantissa(ch rune) rune { - scanned := false - for isDecimal(ch) { - ch = s.next() - scanned = true - } - - if scanned && ch != eof { - s.unread() - } - return ch -} - -// scanFraction scans the fraction after the '.' rune -func (s *Scanner) scanFraction(ch rune) rune { - if ch == '.' { - ch = s.peek() // we peek just to see if we can move forward - ch = s.scanMantissa(ch) - } - return ch -} - -// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' -// rune. -func (s *Scanner) scanExponent(ch rune) rune { - if ch == 'e' || ch == 'E' { - ch = s.next() - if ch == '-' || ch == '+' { - ch = s.next() - } - ch = s.scanMantissa(ch) - } - return ch -} - -// scanHeredoc scans a heredoc string -func (s *Scanner) scanHeredoc() { - // Scan the second '<' in example: '<= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) { - break - } - - // Not an anchor match, record the start of a new line - lineStart = s.srcPos.Offset - } - - if ch == eof { - s.err("heredoc not terminated") - return - } - } - - return -} - -// scanString scans a quoted string -func (s *Scanner) scanString() { - braces := 0 - for { - // '"' opening already consumed - // read character after quote - ch := s.next() - - if (ch == '\n' && braces == 0) || ch < 0 || ch == eof { - s.err("literal not terminated") - return - } - - if ch == '"' && braces == 0 { - break - } - - // If we're going into a ${} then we can ignore quotes for awhile - if braces == 0 && ch == '$' && s.peek() == '{' { - braces++ - s.next() - } else if braces > 0 && ch == '{' { - braces++ - } - if braces > 0 && ch == '}' { - braces-- - } - - if ch == '\\' { - s.scanEscape() - } - } - - return -} - -// scanEscape scans an escape sequence -func (s *Scanner) scanEscape() rune { - // http://en.cppreference.com/w/cpp/language/escape - ch := s.next() // read character after '/' - switch ch { - case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': - // nothing to do - case '0', '1', '2', '3', '4', '5', '6', '7': - // octal notation - ch = s.scanDigits(ch, 8, 3) - case 'x': - // hexademical notation - ch = s.scanDigits(s.next(), 16, 2) - case 'u': - // universal character name - ch = s.scanDigits(s.next(), 16, 4) - case 'U': - // universal character name - ch = s.scanDigits(s.next(), 16, 8) - default: - s.err("illegal char escape") - } - return ch -} - -// scanDigits scans a rune with the given base for n times. For example an -// octal notation \184 would yield in scanDigits(ch, 8, 3) -func (s *Scanner) scanDigits(ch rune, base, n int) rune { - start := n - for n > 0 && digitVal(ch) < base { - ch = s.next() - if ch == eof { - // If we see an EOF, we halt any more scanning of digits - // immediately. - break - } - - n-- - } - if n > 0 { - s.err("illegal char escape") - } - - if n != start && ch != eof { - // we scanned all digits, put the last non digit char back, - // only if we read anything at all - s.unread() - } - - return ch -} - -// scanIdentifier scans an identifier and returns the literal string -func (s *Scanner) scanIdentifier() string { - offs := s.srcPos.Offset - s.lastCharLen - ch := s.next() - for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' { - ch = s.next() - } - - if ch != eof { - s.unread() // we got identifier, put back latest char - } - - return string(s.src[offs:s.srcPos.Offset]) -} - -// recentPosition returns the position of the character immediately after the -// character or token returned by the last call to Scan. -func (s *Scanner) recentPosition() (pos token.Pos) { - pos.Offset = s.srcPos.Offset - s.lastCharLen - switch { - case s.srcPos.Column > 0: - // common case: last character was not a '\n' - pos.Line = s.srcPos.Line - pos.Column = s.srcPos.Column - case s.lastLineLen > 0: - // last character was a '\n' - // (we cannot be at the beginning of the source - // since we have called next() at least once) - pos.Line = s.srcPos.Line - 1 - pos.Column = s.lastLineLen - default: - // at the beginning of the source - pos.Line = 1 - pos.Column = 1 - } - return -} - -// err prints the error of any scanning to s.Error function. If the function is -// not defined, by default it prints them to os.Stderr -func (s *Scanner) err(msg string) { - s.ErrorCount++ - pos := s.recentPosition() - - if s.Error != nil { - s.Error(pos, msg) - return - } - - fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) -} - -// isHexadecimal returns true if the given rune is a letter -func isLetter(ch rune) bool { - return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) -} - -// isDigit returns true if the given rune is a decimal digit -func isDigit(ch rune) bool { - return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) -} - -// isDecimal returns true if the given rune is a decimal number -func isDecimal(ch rune) bool { - return '0' <= ch && ch <= '9' -} - -// isHexadecimal returns true if the given rune is an hexadecimal number -func isHexadecimal(ch rune) bool { - return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' -} - -// isWhitespace returns true if the rune is a space, tab, newline or carriage return -func isWhitespace(ch rune) bool { - return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' -} - -// digitVal returns the integer value of a given octal,decimal or hexadecimal rune -func digitVal(ch rune) int { - switch { - case '0' <= ch && ch <= '9': - return int(ch - '0') - case 'a' <= ch && ch <= 'f': - return int(ch - 'a' + 10) - case 'A' <= ch && ch <= 'F': - return int(ch - 'A' + 10) - } - return 16 // larger than any legal digit val -} diff --git a/tools/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go b/tools/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go deleted file mode 100644 index 5f981eaa2..000000000 --- a/tools/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go +++ /dev/null @@ -1,241 +0,0 @@ -package strconv - -import ( - "errors" - "unicode/utf8" -) - -// ErrSyntax indicates that a value does not have the right syntax for the target type. -var ErrSyntax = errors.New("invalid syntax") - -// Unquote interprets s as a single-quoted, double-quoted, -// or backquoted Go string literal, returning the string value -// that s quotes. (If s is single-quoted, it would be a Go -// character literal; Unquote returns the corresponding -// one-character string.) -func Unquote(s string) (t string, err error) { - n := len(s) - if n < 2 { - return "", ErrSyntax - } - quote := s[0] - if quote != s[n-1] { - return "", ErrSyntax - } - s = s[1 : n-1] - - if quote != '"' { - return "", ErrSyntax - } - if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') { - return "", ErrSyntax - } - - // Is it trivial? Avoid allocation. - if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') { - switch quote { - case '"': - return s, nil - case '\'': - r, size := utf8.DecodeRuneInString(s) - if size == len(s) && (r != utf8.RuneError || size != 1) { - return s, nil - } - } - } - - var runeTmp [utf8.UTFMax]byte - buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. - for len(s) > 0 { - // If we're starting a '${}' then let it through un-unquoted. - // Specifically: we don't unquote any characters within the `${}` - // section. - if s[0] == '$' && len(s) > 1 && s[1] == '{' { - buf = append(buf, '$', '{') - s = s[2:] - - // Continue reading until we find the closing brace, copying as-is - braces := 1 - for len(s) > 0 && braces > 0 { - r, size := utf8.DecodeRuneInString(s) - if r == utf8.RuneError { - return "", ErrSyntax - } - - s = s[size:] - - n := utf8.EncodeRune(runeTmp[:], r) - buf = append(buf, runeTmp[:n]...) - - switch r { - case '{': - braces++ - case '}': - braces-- - } - } - if braces != 0 { - return "", ErrSyntax - } - if len(s) == 0 { - // If there's no string left, we're done! - break - } else { - // If there's more left, we need to pop back up to the top of the loop - // in case there's another interpolation in this string. - continue - } - } - - if s[0] == '\n' { - return "", ErrSyntax - } - - c, multibyte, ss, err := unquoteChar(s, quote) - if err != nil { - return "", err - } - s = ss - if c < utf8.RuneSelf || !multibyte { - buf = append(buf, byte(c)) - } else { - n := utf8.EncodeRune(runeTmp[:], c) - buf = append(buf, runeTmp[:n]...) - } - if quote == '\'' && len(s) != 0 { - // single-quoted must be single character - return "", ErrSyntax - } - } - return string(buf), nil -} - -// contains reports whether the string contains the byte c. -func contains(s string, c byte) bool { - for i := 0; i < len(s); i++ { - if s[i] == c { - return true - } - } - return false -} - -func unhex(b byte) (v rune, ok bool) { - c := rune(b) - switch { - case '0' <= c && c <= '9': - return c - '0', true - case 'a' <= c && c <= 'f': - return c - 'a' + 10, true - case 'A' <= c && c <= 'F': - return c - 'A' + 10, true - } - return -} - -func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) { - // easy cases - switch c := s[0]; { - case c == quote && (quote == '\'' || quote == '"'): - err = ErrSyntax - return - case c >= utf8.RuneSelf: - r, size := utf8.DecodeRuneInString(s) - return r, true, s[size:], nil - case c != '\\': - return rune(s[0]), false, s[1:], nil - } - - // hard case: c is backslash - if len(s) <= 1 { - err = ErrSyntax - return - } - c := s[1] - s = s[2:] - - switch c { - case 'a': - value = '\a' - case 'b': - value = '\b' - case 'f': - value = '\f' - case 'n': - value = '\n' - case 'r': - value = '\r' - case 't': - value = '\t' - case 'v': - value = '\v' - case 'x', 'u', 'U': - n := 0 - switch c { - case 'x': - n = 2 - case 'u': - n = 4 - case 'U': - n = 8 - } - var v rune - if len(s) < n { - err = ErrSyntax - return - } - for j := 0; j < n; j++ { - x, ok := unhex(s[j]) - if !ok { - err = ErrSyntax - return - } - v = v<<4 | x - } - s = s[n:] - if c == 'x' { - // single-byte string, possibly not UTF-8 - value = v - break - } - if v > utf8.MaxRune { - err = ErrSyntax - return - } - value = v - multibyte = true - case '0', '1', '2', '3', '4', '5', '6', '7': - v := rune(c) - '0' - if len(s) < 2 { - err = ErrSyntax - return - } - for j := 0; j < 2; j++ { // one digit already; two more - x := rune(s[j]) - '0' - if x < 0 || x > 7 { - err = ErrSyntax - return - } - v = (v << 3) | x - } - s = s[2:] - if v > 255 { - err = ErrSyntax - return - } - value = v - case '\\': - value = '\\' - case '\'', '"': - if c != quote { - err = ErrSyntax - return - } - value = rune(c) - default: - err = ErrSyntax - return - } - tail = s - return -} diff --git a/tools/vendor/github.com/hashicorp/hcl/hcl/token/position.go b/tools/vendor/github.com/hashicorp/hcl/hcl/token/position.go deleted file mode 100644 index 59c1bb72d..000000000 --- a/tools/vendor/github.com/hashicorp/hcl/hcl/token/position.go +++ /dev/null @@ -1,46 +0,0 @@ -package token - -import "fmt" - -// Pos describes an arbitrary source position -// including the file, line, and column location. -// A Position is valid if the line number is > 0. -type Pos struct { - Filename string // filename, if any - Offset int // offset, starting at 0 - Line int // line number, starting at 1 - Column int // column number, starting at 1 (character count) -} - -// IsValid returns true if the position is valid. -func (p *Pos) IsValid() bool { return p.Line > 0 } - -// String returns a string in one of several forms: -// -// file:line:column valid position with file name -// line:column valid position without file name -// file invalid position with file name -// - invalid position without file name -func (p Pos) String() string { - s := p.Filename - if p.IsValid() { - if s != "" { - s += ":" - } - s += fmt.Sprintf("%d:%d", p.Line, p.Column) - } - if s == "" { - s = "-" - } - return s -} - -// Before reports whether the position p is before u. -func (p Pos) Before(u Pos) bool { - return u.Offset > p.Offset || u.Line > p.Line -} - -// After reports whether the position p is after u. -func (p Pos) After(u Pos) bool { - return u.Offset < p.Offset || u.Line < p.Line -} diff --git a/tools/vendor/github.com/hashicorp/hcl/hcl/token/token.go b/tools/vendor/github.com/hashicorp/hcl/hcl/token/token.go deleted file mode 100644 index e37c0664e..000000000 --- a/tools/vendor/github.com/hashicorp/hcl/hcl/token/token.go +++ /dev/null @@ -1,219 +0,0 @@ -// Package token defines constants representing the lexical tokens for HCL -// (HashiCorp Configuration Language) -package token - -import ( - "fmt" - "strconv" - "strings" - - hclstrconv "github.com/hashicorp/hcl/hcl/strconv" -) - -// Token defines a single HCL token which can be obtained via the Scanner -type Token struct { - Type Type - Pos Pos - Text string - JSON bool -} - -// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) -type Type int - -const ( - // Special tokens - ILLEGAL Type = iota - EOF - COMMENT - - identifier_beg - IDENT // literals - literal_beg - NUMBER // 12345 - FLOAT // 123.45 - BOOL // true,false - STRING // "abc" - HEREDOC // < 0 { - // Pop the current item - n := len(frontier) - item := frontier[n-1] - frontier = frontier[:n-1] - - switch v := item.Val.(type) { - case *ast.ObjectType: - items, frontier = flattenObjectType(v, item, items, frontier) - case *ast.ListType: - items, frontier = flattenListType(v, item, items, frontier) - default: - items = append(items, item) - } - } - - // Reverse the list since the frontier model runs things backwards - for i := len(items)/2 - 1; i >= 0; i-- { - opp := len(items) - 1 - i - items[i], items[opp] = items[opp], items[i] - } - - // Done! Set the original items - list.Items = items - return n, true - }) -} - -func flattenListType( - ot *ast.ListType, - item *ast.ObjectItem, - items []*ast.ObjectItem, - frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { - // If the list is empty, keep the original list - if len(ot.List) == 0 { - items = append(items, item) - return items, frontier - } - - // All the elements of this object must also be objects! - for _, subitem := range ot.List { - if _, ok := subitem.(*ast.ObjectType); !ok { - items = append(items, item) - return items, frontier - } - } - - // Great! We have a match go through all the items and flatten - for _, elem := range ot.List { - // Add it to the frontier so that we can recurse - frontier = append(frontier, &ast.ObjectItem{ - Keys: item.Keys, - Assign: item.Assign, - Val: elem, - LeadComment: item.LeadComment, - LineComment: item.LineComment, - }) - } - - return items, frontier -} - -func flattenObjectType( - ot *ast.ObjectType, - item *ast.ObjectItem, - items []*ast.ObjectItem, - frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { - // If the list has no items we do not have to flatten anything - if ot.List.Items == nil { - items = append(items, item) - return items, frontier - } - - // All the elements of this object must also be objects! - for _, subitem := range ot.List.Items { - if _, ok := subitem.Val.(*ast.ObjectType); !ok { - items = append(items, item) - return items, frontier - } - } - - // Great! We have a match go through all the items and flatten - for _, subitem := range ot.List.Items { - // Copy the new key - keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys)) - copy(keys, item.Keys) - copy(keys[len(item.Keys):], subitem.Keys) - - // Add it to the frontier so that we can recurse - frontier = append(frontier, &ast.ObjectItem{ - Keys: keys, - Assign: item.Assign, - Val: subitem.Val, - LeadComment: item.LeadComment, - LineComment: item.LineComment, - }) - } - - return items, frontier -} diff --git a/tools/vendor/github.com/hashicorp/hcl/json/parser/parser.go b/tools/vendor/github.com/hashicorp/hcl/json/parser/parser.go deleted file mode 100644 index 125a5f072..000000000 --- a/tools/vendor/github.com/hashicorp/hcl/json/parser/parser.go +++ /dev/null @@ -1,313 +0,0 @@ -package parser - -import ( - "errors" - "fmt" - - "github.com/hashicorp/hcl/hcl/ast" - hcltoken "github.com/hashicorp/hcl/hcl/token" - "github.com/hashicorp/hcl/json/scanner" - "github.com/hashicorp/hcl/json/token" -) - -type Parser struct { - sc *scanner.Scanner - - // Last read token - tok token.Token - commaPrev token.Token - - enableTrace bool - indent int - n int // buffer size (max = 1) -} - -func newParser(src []byte) *Parser { - return &Parser{ - sc: scanner.New(src), - } -} - -// Parse returns the fully parsed source and returns the abstract syntax tree. -func Parse(src []byte) (*ast.File, error) { - p := newParser(src) - return p.Parse() -} - -var errEofToken = errors.New("EOF token found") - -// Parse returns the fully parsed source and returns the abstract syntax tree. -func (p *Parser) Parse() (*ast.File, error) { - f := &ast.File{} - var err, scerr error - p.sc.Error = func(pos token.Pos, msg string) { - scerr = fmt.Errorf("%s: %s", pos, msg) - } - - // The root must be an object in JSON - object, err := p.object() - if scerr != nil { - return nil, scerr - } - if err != nil { - return nil, err - } - - // We make our final node an object list so it is more HCL compatible - f.Node = object.List - - // Flatten it, which finds patterns and turns them into more HCL-like - // AST trees. - flattenObjects(f.Node) - - return f, nil -} - -func (p *Parser) objectList() (*ast.ObjectList, error) { - defer un(trace(p, "ParseObjectList")) - node := &ast.ObjectList{} - - for { - n, err := p.objectItem() - if err == errEofToken { - break // we are finished - } - - // we don't return a nil node, because might want to use already - // collected items. - if err != nil { - return node, err - } - - node.Add(n) - - // Check for a followup comma. If it isn't a comma, then we're done - if tok := p.scan(); tok.Type != token.COMMA { - break - } - } - - return node, nil -} - -// objectItem parses a single object item -func (p *Parser) objectItem() (*ast.ObjectItem, error) { - defer un(trace(p, "ParseObjectItem")) - - keys, err := p.objectKey() - if err != nil { - return nil, err - } - - o := &ast.ObjectItem{ - Keys: keys, - } - - switch p.tok.Type { - case token.COLON: - pos := p.tok.Pos - o.Assign = hcltoken.Pos{ - Filename: pos.Filename, - Offset: pos.Offset, - Line: pos.Line, - Column: pos.Column, - } - - o.Val, err = p.objectValue() - if err != nil { - return nil, err - } - } - - return o, nil -} - -// objectKey parses an object key and returns a ObjectKey AST -func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { - keyCount := 0 - keys := make([]*ast.ObjectKey, 0) - - for { - tok := p.scan() - switch tok.Type { - case token.EOF: - return nil, errEofToken - case token.STRING: - keyCount++ - keys = append(keys, &ast.ObjectKey{ - Token: p.tok.HCLToken(), - }) - case token.COLON: - // If we have a zero keycount it means that we never got - // an object key, i.e. `{ :`. This is a syntax error. - if keyCount == 0 { - return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) - } - - // Done - return keys, nil - case token.ILLEGAL: - return nil, errors.New("illegal") - default: - return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) - } - } -} - -// object parses any type of object, such as number, bool, string, object or -// list. -func (p *Parser) objectValue() (ast.Node, error) { - defer un(trace(p, "ParseObjectValue")) - tok := p.scan() - - switch tok.Type { - case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING: - return p.literalType() - case token.LBRACE: - return p.objectType() - case token.LBRACK: - return p.listType() - case token.EOF: - return nil, errEofToken - } - - return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok) -} - -// object parses any type of object, such as number, bool, string, object or -// list. -func (p *Parser) object() (*ast.ObjectType, error) { - defer un(trace(p, "ParseType")) - tok := p.scan() - - switch tok.Type { - case token.LBRACE: - return p.objectType() - case token.EOF: - return nil, errEofToken - } - - return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok) -} - -// objectType parses an object type and returns a ObjectType AST -func (p *Parser) objectType() (*ast.ObjectType, error) { - defer un(trace(p, "ParseObjectType")) - - // we assume that the currently scanned token is a LBRACE - o := &ast.ObjectType{} - - l, err := p.objectList() - - // if we hit RBRACE, we are good to go (means we parsed all Items), if it's - // not a RBRACE, it's an syntax error and we just return it. - if err != nil && p.tok.Type != token.RBRACE { - return nil, err - } - - o.List = l - return o, nil -} - -// listType parses a list type and returns a ListType AST -func (p *Parser) listType() (*ast.ListType, error) { - defer un(trace(p, "ParseListType")) - - // we assume that the currently scanned token is a LBRACK - l := &ast.ListType{} - - for { - tok := p.scan() - switch tok.Type { - case token.NUMBER, token.FLOAT, token.STRING: - node, err := p.literalType() - if err != nil { - return nil, err - } - - l.Add(node) - case token.COMMA: - continue - case token.LBRACE: - node, err := p.objectType() - if err != nil { - return nil, err - } - - l.Add(node) - case token.BOOL: - // TODO(arslan) should we support? not supported by HCL yet - case token.LBRACK: - // TODO(arslan) should we support nested lists? Even though it's - // written in README of HCL, it's not a part of the grammar - // (not defined in parse.y) - case token.RBRACK: - // finished - return l, nil - default: - return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type) - } - - } -} - -// literalType parses a literal type and returns a LiteralType AST -func (p *Parser) literalType() (*ast.LiteralType, error) { - defer un(trace(p, "ParseLiteral")) - - return &ast.LiteralType{ - Token: p.tok.HCLToken(), - }, nil -} - -// scan returns the next token from the underlying scanner. If a token has -// been unscanned then read that instead. -func (p *Parser) scan() token.Token { - // If we have a token on the buffer, then return it. - if p.n != 0 { - p.n = 0 - return p.tok - } - - p.tok = p.sc.Scan() - return p.tok -} - -// unscan pushes the previously read token back onto the buffer. -func (p *Parser) unscan() { - p.n = 1 -} - -// ---------------------------------------------------------------------------- -// Parsing support - -func (p *Parser) printTrace(a ...interface{}) { - if !p.enableTrace { - return - } - - const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " - const n = len(dots) - fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) - - i := 2 * p.indent - for i > n { - fmt.Print(dots) - i -= n - } - // i <= n - fmt.Print(dots[0:i]) - fmt.Println(a...) -} - -func trace(p *Parser, msg string) *Parser { - p.printTrace(msg, "(") - p.indent++ - return p -} - -// Usage pattern: defer un(trace(p, "...")) -func un(p *Parser) { - p.indent-- - p.printTrace(")") -} diff --git a/tools/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go b/tools/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go deleted file mode 100644 index fe3f0f095..000000000 --- a/tools/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go +++ /dev/null @@ -1,451 +0,0 @@ -package scanner - -import ( - "bytes" - "fmt" - "os" - "unicode" - "unicode/utf8" - - "github.com/hashicorp/hcl/json/token" -) - -// eof represents a marker rune for the end of the reader. -const eof = rune(0) - -// Scanner defines a lexical scanner -type Scanner struct { - buf *bytes.Buffer // Source buffer for advancing and scanning - src []byte // Source buffer for immutable access - - // Source Position - srcPos token.Pos // current position - prevPos token.Pos // previous position, used for peek() method - - lastCharLen int // length of last character in bytes - lastLineLen int // length of last line in characters (for correct column reporting) - - tokStart int // token text start position - tokEnd int // token text end position - - // Error is called for each error encountered. If no Error - // function is set, the error is reported to os.Stderr. - Error func(pos token.Pos, msg string) - - // ErrorCount is incremented by one for each error encountered. - ErrorCount int - - // tokPos is the start position of most recently scanned token; set by - // Scan. The Filename field is always left untouched by the Scanner. If - // an error is reported (via Error) and Position is invalid, the scanner is - // not inside a token. - tokPos token.Pos -} - -// New creates and initializes a new instance of Scanner using src as -// its source content. -func New(src []byte) *Scanner { - // even though we accept a src, we read from a io.Reader compatible type - // (*bytes.Buffer). So in the future we might easily change it to streaming - // read. - b := bytes.NewBuffer(src) - s := &Scanner{ - buf: b, - src: src, - } - - // srcPosition always starts with 1 - s.srcPos.Line = 1 - return s -} - -// next reads the next rune from the bufferred reader. Returns the rune(0) if -// an error occurs (or io.EOF is returned). -func (s *Scanner) next() rune { - ch, size, err := s.buf.ReadRune() - if err != nil { - // advance for error reporting - s.srcPos.Column++ - s.srcPos.Offset += size - s.lastCharLen = size - return eof - } - - if ch == utf8.RuneError && size == 1 { - s.srcPos.Column++ - s.srcPos.Offset += size - s.lastCharLen = size - s.err("illegal UTF-8 encoding") - return ch - } - - // remember last position - s.prevPos = s.srcPos - - s.srcPos.Column++ - s.lastCharLen = size - s.srcPos.Offset += size - - if ch == '\n' { - s.srcPos.Line++ - s.lastLineLen = s.srcPos.Column - s.srcPos.Column = 0 - } - - // debug - // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) - return ch -} - -// unread unreads the previous read Rune and updates the source position -func (s *Scanner) unread() { - if err := s.buf.UnreadRune(); err != nil { - panic(err) // this is user fault, we should catch it - } - s.srcPos = s.prevPos // put back last position -} - -// peek returns the next rune without advancing the reader. -func (s *Scanner) peek() rune { - peek, _, err := s.buf.ReadRune() - if err != nil { - return eof - } - - s.buf.UnreadRune() - return peek -} - -// Scan scans the next token and returns the token. -func (s *Scanner) Scan() token.Token { - ch := s.next() - - // skip white space - for isWhitespace(ch) { - ch = s.next() - } - - var tok token.Type - - // token text markings - s.tokStart = s.srcPos.Offset - s.lastCharLen - - // token position, initial next() is moving the offset by one(size of rune - // actually), though we are interested with the starting point - s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen - if s.srcPos.Column > 0 { - // common case: last character was not a '\n' - s.tokPos.Line = s.srcPos.Line - s.tokPos.Column = s.srcPos.Column - } else { - // last character was a '\n' - // (we cannot be at the beginning of the source - // since we have called next() at least once) - s.tokPos.Line = s.srcPos.Line - 1 - s.tokPos.Column = s.lastLineLen - } - - switch { - case isLetter(ch): - lit := s.scanIdentifier() - if lit == "true" || lit == "false" { - tok = token.BOOL - } else if lit == "null" { - tok = token.NULL - } else { - s.err("illegal char") - } - case isDecimal(ch): - tok = s.scanNumber(ch) - default: - switch ch { - case eof: - tok = token.EOF - case '"': - tok = token.STRING - s.scanString() - case '.': - tok = token.PERIOD - ch = s.peek() - if isDecimal(ch) { - tok = token.FLOAT - ch = s.scanMantissa(ch) - ch = s.scanExponent(ch) - } - case '[': - tok = token.LBRACK - case ']': - tok = token.RBRACK - case '{': - tok = token.LBRACE - case '}': - tok = token.RBRACE - case ',': - tok = token.COMMA - case ':': - tok = token.COLON - case '-': - if isDecimal(s.peek()) { - ch := s.next() - tok = s.scanNumber(ch) - } else { - s.err("illegal char") - } - default: - s.err("illegal char: " + string(ch)) - } - } - - // finish token ending - s.tokEnd = s.srcPos.Offset - - // create token literal - var tokenText string - if s.tokStart >= 0 { - tokenText = string(s.src[s.tokStart:s.tokEnd]) - } - s.tokStart = s.tokEnd // ensure idempotency of tokenText() call - - return token.Token{ - Type: tok, - Pos: s.tokPos, - Text: tokenText, - } -} - -// scanNumber scans a HCL number definition starting with the given rune -func (s *Scanner) scanNumber(ch rune) token.Type { - zero := ch == '0' - pos := s.srcPos - - s.scanMantissa(ch) - ch = s.next() // seek forward - if ch == 'e' || ch == 'E' { - ch = s.scanExponent(ch) - return token.FLOAT - } - - if ch == '.' { - ch = s.scanFraction(ch) - if ch == 'e' || ch == 'E' { - ch = s.next() - ch = s.scanExponent(ch) - } - return token.FLOAT - } - - if ch != eof { - s.unread() - } - - // If we have a larger number and this is zero, error - if zero && pos != s.srcPos { - s.err("numbers cannot start with 0") - } - - return token.NUMBER -} - -// scanMantissa scans the mantissa beginning from the rune. It returns the next -// non decimal rune. It's used to determine wheter it's a fraction or exponent. -func (s *Scanner) scanMantissa(ch rune) rune { - scanned := false - for isDecimal(ch) { - ch = s.next() - scanned = true - } - - if scanned && ch != eof { - s.unread() - } - return ch -} - -// scanFraction scans the fraction after the '.' rune -func (s *Scanner) scanFraction(ch rune) rune { - if ch == '.' { - ch = s.peek() // we peek just to see if we can move forward - ch = s.scanMantissa(ch) - } - return ch -} - -// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' -// rune. -func (s *Scanner) scanExponent(ch rune) rune { - if ch == 'e' || ch == 'E' { - ch = s.next() - if ch == '-' || ch == '+' { - ch = s.next() - } - ch = s.scanMantissa(ch) - } - return ch -} - -// scanString scans a quoted string -func (s *Scanner) scanString() { - braces := 0 - for { - // '"' opening already consumed - // read character after quote - ch := s.next() - - if ch == '\n' || ch < 0 || ch == eof { - s.err("literal not terminated") - return - } - - if ch == '"' { - break - } - - // If we're going into a ${} then we can ignore quotes for awhile - if braces == 0 && ch == '$' && s.peek() == '{' { - braces++ - s.next() - } else if braces > 0 && ch == '{' { - braces++ - } - if braces > 0 && ch == '}' { - braces-- - } - - if ch == '\\' { - s.scanEscape() - } - } - - return -} - -// scanEscape scans an escape sequence -func (s *Scanner) scanEscape() rune { - // http://en.cppreference.com/w/cpp/language/escape - ch := s.next() // read character after '/' - switch ch { - case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': - // nothing to do - case '0', '1', '2', '3', '4', '5', '6', '7': - // octal notation - ch = s.scanDigits(ch, 8, 3) - case 'x': - // hexademical notation - ch = s.scanDigits(s.next(), 16, 2) - case 'u': - // universal character name - ch = s.scanDigits(s.next(), 16, 4) - case 'U': - // universal character name - ch = s.scanDigits(s.next(), 16, 8) - default: - s.err("illegal char escape") - } - return ch -} - -// scanDigits scans a rune with the given base for n times. For example an -// octal notation \184 would yield in scanDigits(ch, 8, 3) -func (s *Scanner) scanDigits(ch rune, base, n int) rune { - for n > 0 && digitVal(ch) < base { - ch = s.next() - n-- - } - if n > 0 { - s.err("illegal char escape") - } - - // we scanned all digits, put the last non digit char back - s.unread() - return ch -} - -// scanIdentifier scans an identifier and returns the literal string -func (s *Scanner) scanIdentifier() string { - offs := s.srcPos.Offset - s.lastCharLen - ch := s.next() - for isLetter(ch) || isDigit(ch) || ch == '-' { - ch = s.next() - } - - if ch != eof { - s.unread() // we got identifier, put back latest char - } - - return string(s.src[offs:s.srcPos.Offset]) -} - -// recentPosition returns the position of the character immediately after the -// character or token returned by the last call to Scan. -func (s *Scanner) recentPosition() (pos token.Pos) { - pos.Offset = s.srcPos.Offset - s.lastCharLen - switch { - case s.srcPos.Column > 0: - // common case: last character was not a '\n' - pos.Line = s.srcPos.Line - pos.Column = s.srcPos.Column - case s.lastLineLen > 0: - // last character was a '\n' - // (we cannot be at the beginning of the source - // since we have called next() at least once) - pos.Line = s.srcPos.Line - 1 - pos.Column = s.lastLineLen - default: - // at the beginning of the source - pos.Line = 1 - pos.Column = 1 - } - return -} - -// err prints the error of any scanning to s.Error function. If the function is -// not defined, by default it prints them to os.Stderr -func (s *Scanner) err(msg string) { - s.ErrorCount++ - pos := s.recentPosition() - - if s.Error != nil { - s.Error(pos, msg) - return - } - - fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) -} - -// isHexadecimal returns true if the given rune is a letter -func isLetter(ch rune) bool { - return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) -} - -// isHexadecimal returns true if the given rune is a decimal digit -func isDigit(ch rune) bool { - return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) -} - -// isHexadecimal returns true if the given rune is a decimal number -func isDecimal(ch rune) bool { - return '0' <= ch && ch <= '9' -} - -// isHexadecimal returns true if the given rune is an hexadecimal number -func isHexadecimal(ch rune) bool { - return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' -} - -// isWhitespace returns true if the rune is a space, tab, newline or carriage return -func isWhitespace(ch rune) bool { - return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' -} - -// digitVal returns the integer value of a given octal,decimal or hexadecimal rune -func digitVal(ch rune) int { - switch { - case '0' <= ch && ch <= '9': - return int(ch - '0') - case 'a' <= ch && ch <= 'f': - return int(ch - 'a' + 10) - case 'A' <= ch && ch <= 'F': - return int(ch - 'A' + 10) - } - return 16 // larger than any legal digit val -} diff --git a/tools/vendor/github.com/hashicorp/hcl/json/token/position.go b/tools/vendor/github.com/hashicorp/hcl/json/token/position.go deleted file mode 100644 index 59c1bb72d..000000000 --- a/tools/vendor/github.com/hashicorp/hcl/json/token/position.go +++ /dev/null @@ -1,46 +0,0 @@ -package token - -import "fmt" - -// Pos describes an arbitrary source position -// including the file, line, and column location. -// A Position is valid if the line number is > 0. -type Pos struct { - Filename string // filename, if any - Offset int // offset, starting at 0 - Line int // line number, starting at 1 - Column int // column number, starting at 1 (character count) -} - -// IsValid returns true if the position is valid. -func (p *Pos) IsValid() bool { return p.Line > 0 } - -// String returns a string in one of several forms: -// -// file:line:column valid position with file name -// line:column valid position without file name -// file invalid position with file name -// - invalid position without file name -func (p Pos) String() string { - s := p.Filename - if p.IsValid() { - if s != "" { - s += ":" - } - s += fmt.Sprintf("%d:%d", p.Line, p.Column) - } - if s == "" { - s = "-" - } - return s -} - -// Before reports whether the position p is before u. -func (p Pos) Before(u Pos) bool { - return u.Offset > p.Offset || u.Line > p.Line -} - -// After reports whether the position p is after u. -func (p Pos) After(u Pos) bool { - return u.Offset < p.Offset || u.Line < p.Line -} diff --git a/tools/vendor/github.com/hashicorp/hcl/json/token/token.go b/tools/vendor/github.com/hashicorp/hcl/json/token/token.go deleted file mode 100644 index 95a0c3eee..000000000 --- a/tools/vendor/github.com/hashicorp/hcl/json/token/token.go +++ /dev/null @@ -1,118 +0,0 @@ -package token - -import ( - "fmt" - "strconv" - - hcltoken "github.com/hashicorp/hcl/hcl/token" -) - -// Token defines a single HCL token which can be obtained via the Scanner -type Token struct { - Type Type - Pos Pos - Text string -} - -// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) -type Type int - -const ( - // Special tokens - ILLEGAL Type = iota - EOF - - identifier_beg - literal_beg - NUMBER // 12345 - FLOAT // 123.45 - BOOL // true,false - STRING // "abc" - NULL // null - literal_end - identifier_end - - operator_beg - LBRACK // [ - LBRACE // { - COMMA // , - PERIOD // . - COLON // : - - RBRACK // ] - RBRACE // } - - operator_end -) - -var tokens = [...]string{ - ILLEGAL: "ILLEGAL", - - EOF: "EOF", - - NUMBER: "NUMBER", - FLOAT: "FLOAT", - BOOL: "BOOL", - STRING: "STRING", - NULL: "NULL", - - LBRACK: "LBRACK", - LBRACE: "LBRACE", - COMMA: "COMMA", - PERIOD: "PERIOD", - COLON: "COLON", - - RBRACK: "RBRACK", - RBRACE: "RBRACE", -} - -// String returns the string corresponding to the token tok. -func (t Type) String() string { - s := "" - if 0 <= t && t < Type(len(tokens)) { - s = tokens[t] - } - if s == "" { - s = "token(" + strconv.Itoa(int(t)) + ")" - } - return s -} - -// IsIdentifier returns true for tokens corresponding to identifiers and basic -// type literals; it returns false otherwise. -func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end } - -// IsLiteral returns true for tokens corresponding to basic type literals; it -// returns false otherwise. -func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end } - -// IsOperator returns true for tokens corresponding to operators and -// delimiters; it returns false otherwise. -func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end } - -// String returns the token's literal text. Note that this is only -// applicable for certain token types, such as token.IDENT, -// token.STRING, etc.. -func (t Token) String() string { - return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text) -} - -// HCLToken converts this token to an HCL token. -// -// The token type must be a literal type or this will panic. -func (t Token) HCLToken() hcltoken.Token { - switch t.Type { - case BOOL: - return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text} - case FLOAT: - return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text} - case NULL: - return hcltoken.Token{Type: hcltoken.STRING, Text: ""} - case NUMBER: - return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text} - case STRING: - return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true} - default: - panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type)) - } -} diff --git a/tools/vendor/github.com/hashicorp/hcl/lex.go b/tools/vendor/github.com/hashicorp/hcl/lex.go deleted file mode 100644 index d9993c292..000000000 --- a/tools/vendor/github.com/hashicorp/hcl/lex.go +++ /dev/null @@ -1,38 +0,0 @@ -package hcl - -import ( - "unicode" - "unicode/utf8" -) - -type lexModeValue byte - -const ( - lexModeUnknown lexModeValue = iota - lexModeHcl - lexModeJson -) - -// lexMode returns whether we're going to be parsing in JSON -// mode or HCL mode. -func lexMode(v []byte) lexModeValue { - var ( - r rune - w int - offset int - ) - - for { - r, w = utf8.DecodeRune(v[offset:]) - offset += w - if unicode.IsSpace(r) { - continue - } - if r == '{' { - return lexModeJson - } - break - } - - return lexModeHcl -} diff --git a/tools/vendor/github.com/hashicorp/hcl/parse.go b/tools/vendor/github.com/hashicorp/hcl/parse.go deleted file mode 100644 index 1fca53c4c..000000000 --- a/tools/vendor/github.com/hashicorp/hcl/parse.go +++ /dev/null @@ -1,39 +0,0 @@ -package hcl - -import ( - "fmt" - - "github.com/hashicorp/hcl/hcl/ast" - hclParser "github.com/hashicorp/hcl/hcl/parser" - jsonParser "github.com/hashicorp/hcl/json/parser" -) - -// ParseBytes accepts as input byte slice and returns ast tree. -// -// Input can be either JSON or HCL -func ParseBytes(in []byte) (*ast.File, error) { - return parse(in) -} - -// ParseString accepts input as a string and returns ast tree. -func ParseString(input string) (*ast.File, error) { - return parse([]byte(input)) -} - -func parse(in []byte) (*ast.File, error) { - switch lexMode(in) { - case lexModeHcl: - return hclParser.Parse(in) - case lexModeJson: - return jsonParser.Parse(in) - } - - return nil, fmt.Errorf("unknown config format") -} - -// Parse parses the given input and returns the root object. -// -// The input format can be either HCL or JSON. -func Parse(input string) (*ast.File, error) { - return parse([]byte(input)) -} diff --git a/tools/vendor/github.com/kisielk/errcheck/errcheck/errcheck.go b/tools/vendor/github.com/kisielk/errcheck/errcheck/errcheck.go index a7a2a30bf..325aeec98 100644 --- a/tools/vendor/github.com/kisielk/errcheck/errcheck/errcheck.go +++ b/tools/vendor/github.com/kisielk/errcheck/errcheck/errcheck.go @@ -23,7 +23,9 @@ func init() { } var ( - // ErrNoGoFiles is returned when CheckPackage is run on a package with no Go source files + // ErrNoGoFiles is returned when CheckPackage is run on a package with no Go source files. + // + // Deprecated: this error is no longer returned by errcheck.LoadPackages. ErrNoGoFiles = errors.New("package contains no go source files") ) @@ -162,7 +164,7 @@ var loadPackages = func(cfg *packages.Config, paths ...string) ([]*packages.Pack // LoadPackages loads all the packages in all the paths provided. It uses the // exclusions and build tags provided to by the user when loading the packages. func (c *Checker) LoadPackages(paths ...string) ([]*packages.Package, error) { - buildFlags := []string{fmtTags(c.Tags)} + buildFlags := []string{fmt.Sprintf("-tags=%s", strings.Join(c.Tags, ","))} if c.Mod != "" { buildFlags = append(buildFlags, fmt.Sprintf("-mod=%s", c.Mod)) } diff --git a/tools/vendor/github.com/kisielk/errcheck/errcheck/tags.go b/tools/vendor/github.com/kisielk/errcheck/errcheck/tags.go deleted file mode 100644 index 7b423ca69..000000000 --- a/tools/vendor/github.com/kisielk/errcheck/errcheck/tags.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build go1.13 - -package errcheck - -import ( - "fmt" - "strings" -) - -func fmtTags(tags []string) string { - return fmt.Sprintf("-tags=%s", strings.Join(tags, ",")) -} diff --git a/tools/vendor/github.com/kisielk/errcheck/errcheck/tags_compat.go b/tools/vendor/github.com/kisielk/errcheck/errcheck/tags_compat.go deleted file mode 100644 index 2f534f40a..000000000 --- a/tools/vendor/github.com/kisielk/errcheck/errcheck/tags_compat.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build go1.11 -// +build !go1.13 - -package errcheck - -import ( - "fmt" - "strings" -) - -func fmtTags(tags []string) string { - return fmt.Sprintf("-tags=%s", strings.Join(tags, " ")) -} diff --git a/tools/vendor/github.com/kkHAIKE/contextcheck/contextcheck.go b/tools/vendor/github.com/kkHAIKE/contextcheck/contextcheck.go index 62696351a..c62909a87 100644 --- a/tools/vendor/github.com/kkHAIKE/contextcheck/contextcheck.go +++ b/tools/vendor/github.com/kkHAIKE/contextcheck/contextcheck.go @@ -727,6 +727,14 @@ func (r *runner) getFunction(instr ssa.Instruction) (f *ssa.Function) { } func (r *runner) isCtxType(tp types.Type) bool { + if p, ok := tp.(*types.Pointer); ok { + // opaqueType is not exposed and lead to unreachable error. + // Related to https://github.com/golang/tools/blob/63229bc79404d8cf2fe4e88ad569168fe251d993/go/ssa/builder.go#L107 + if p.Elem().String() == "deferStack" { + return false + } + } + return types.Identical(tp, r.ctxTyp) || types.Identical(tp, r.ctxPTyp) } diff --git a/tools/vendor/github.com/ldez/exptostd/exptostd.go b/tools/vendor/github.com/ldez/exptostd/exptostd.go index dc03d015c..cf6c5e842 100644 --- a/tools/vendor/github.com/ldez/exptostd/exptostd.go +++ b/tools/vendor/github.com/ldez/exptostd/exptostd.go @@ -356,11 +356,6 @@ func suggestedFixForClear(callExpr *ast.CallExpr) (analysis.SuggestedFix, error) } func suggestedFixForKeysOrValues(callExpr *ast.CallExpr) (analysis.SuggestedFix, error) { - var name string - if ident, ok := callExpr.Args[0].(*ast.Ident); ok { - name = ident.Name - } - s := &ast.CallExpr{ Fun: &ast.SelectorExpr{ X: &ast.Ident{Name: "slices"}, @@ -375,10 +370,8 @@ func suggestedFixForKeysOrValues(callExpr *ast.CallExpr) (analysis.SuggestedFix, }, &ast.BasicLit{Kind: token.INT, Value: "0"}, &ast.CallExpr{ - Fun: &ast.Ident{Name: "len"}, - Args: []ast.Expr{ - &ast.Ident{Name: name}, - }, + Fun: &ast.Ident{Name: "len"}, + Args: callExpr.Args, }, }, }, diff --git a/tools/vendor/github.com/macabu/inamedparam/.golangci.yml b/tools/vendor/github.com/macabu/inamedparam/.golangci.yml index f0efa1cb6..7f301af5a 100644 --- a/tools/vendor/github.com/macabu/inamedparam/.golangci.yml +++ b/tools/vendor/github.com/macabu/inamedparam/.golangci.yml @@ -1,33 +1,21 @@ run: - deadline: 30s + timeout: 30s linters: enable-all: true disable: - cyclop - - deadcode - depguard - - exhaustivestruct - exhaustruct - forcetypeassert - gochecknoglobals - gocognit - - golint - - ifshort - - interfacer - - maligned - nilnil - - nosnakecase - paralleltest - - scopelint - - structcheck - - varcheck linters-settings: gci: sections: - standard - default - - prefix(github.com/macabu/inamedparam) - section-separators: - - newLine + - localmodule diff --git a/tools/vendor/github.com/macabu/inamedparam/README.md b/tools/vendor/github.com/macabu/inamedparam/README.md index 3336cb950..56fce4ff0 100644 --- a/tools/vendor/github.com/macabu/inamedparam/README.md +++ b/tools/vendor/github.com/macabu/inamedparam/README.md @@ -15,7 +15,7 @@ You can run it standalone through `go vet`. You must install the binary to your `$GOBIN` folder like so: ```sh -$ go install github.com/macabu/inamedparam/cmd/inamedparam +$ go install github.com/macabu/inamedparam/cmd/inamedparam@latest ``` And then navigate to your Go project's root folder, where can run `go vet` in the following way: @@ -29,10 +29,16 @@ $ go vet -vettool=$(which inamedparam) ./... To enable it, you can add it to your `.golangci.yml` file, as such: ```yaml run: - deadline: 30s + timeout: 30s linters: disable-all: true enable: - inamedparam + +linters-settings: + inamedparam: + # Skips check for interface methods with only a single parameter. + # Default: false + skip-single-param: true ``` diff --git a/tools/vendor/github.com/macabu/inamedparam/inamedparam.go b/tools/vendor/github.com/macabu/inamedparam/inamedparam.go index 8ba7fe188..932bd3b59 100644 --- a/tools/vendor/github.com/macabu/inamedparam/inamedparam.go +++ b/tools/vendor/github.com/macabu/inamedparam/inamedparam.go @@ -33,14 +33,17 @@ func flags() flag.FlagSet { return *flags } -func run(pass *analysis.Pass) (interface{}, error) { +func run(pass *analysis.Pass) (any, error) { inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) types := []ast.Node{ &ast.InterfaceType{}, } - skipSingleParam := pass.Analyzer.Flags.Lookup(flagSkipSingleParam).Value.(flag.Getter).Get().(bool) + var skipSingleParam bool + if f := pass.Analyzer.Flags.Lookup(flagSkipSingleParam); f != nil { + skipSingleParam, _ = f.Value.(flag.Getter).Get().(bool) + } inspect.Preorder(types, func(n ast.Node) { interfaceType, ok := n.(*ast.InterfaceType) diff --git a/tools/vendor/github.com/magiconair/properties/.gitignore b/tools/vendor/github.com/magiconair/properties/.gitignore deleted file mode 100644 index e7081ff52..000000000 --- a/tools/vendor/github.com/magiconair/properties/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -*.sublime-project -*.sublime-workspace -*.un~ -*.swp -.idea/ -*.iml diff --git a/tools/vendor/github.com/magiconair/properties/CHANGELOG.md b/tools/vendor/github.com/magiconair/properties/CHANGELOG.md deleted file mode 100644 index 842e8e24f..000000000 --- a/tools/vendor/github.com/magiconair/properties/CHANGELOG.md +++ /dev/null @@ -1,205 +0,0 @@ -## Changelog - -### [1.8.7](https://github.com/magiconair/properties/tree/v1.8.7) - 08 Dec 2022 - - * [PR #65](https://github.com/magiconair/properties/pull/65): Speedup Merge - - Thanks to [@AdityaVallabh](https://github.com/AdityaVallabh) for the patch. - - * [PR #66](https://github.com/magiconair/properties/pull/66): use github actions - -### [1.8.6](https://github.com/magiconair/properties/tree/v1.8.6) - 23 Feb 2022 - - * [PR #57](https://github.com/magiconair/properties/pull/57):Fix "unreachable code" lint error - - Thanks to [@ellie](https://github.com/ellie) for the patch. - - * [PR #63](https://github.com/magiconair/properties/pull/63): Make TestMustGetParsedDuration backwards compatible - - This patch ensures that the `TestMustGetParsedDuration` still works with `go1.3` to make the - author happy until it affects real users. - - Thanks to [@maage](https://github.com/maage) for the patch. - -### [1.8.5](https://github.com/magiconair/properties/tree/v1.8.5) - 24 Mar 2021 - - * [PR #55](https://github.com/magiconair/properties/pull/55): Fix: Encoding Bug in Comments - - When reading comments \ are loaded correctly, but when writing they are then - replaced by \\. This leads to wrong comments when writing and reading multiple times. - - Thanks to [@doxsch](https://github.com/doxsch) for the patch. - -### [1.8.4](https://github.com/magiconair/properties/tree/v1.8.4) - 23 Sep 2020 - - * [PR #50](https://github.com/magiconair/properties/pull/50): enhance error message for circular references - - Thanks to [@sriv](https://github.com/sriv) for the patch. - -### [1.8.3](https://github.com/magiconair/properties/tree/v1.8.3) - 14 Sep 2020 - - * [PR #49](https://github.com/magiconair/properties/pull/49): Include the key in error message causing the circular reference - - The change is include the key in the error message which is causing the circular - reference when parsing/loading the properties files. - - Thanks to [@haroon-sheikh](https://github.com/haroon-sheikh) for the patch. - -### [1.8.2](https://github.com/magiconair/properties/tree/v1.8.2) - 25 Aug 2020 - - * [PR #36](https://github.com/magiconair/properties/pull/36): Escape backslash on write - - This patch ensures that backslashes are escaped on write. Existing applications which - rely on the old behavior may need to be updated. - - Thanks to [@apesternikov](https://github.com/apesternikov) for the patch. - - * [PR #42](https://github.com/magiconair/properties/pull/42): Made Content-Type check whitespace agnostic in LoadURL() - - Thanks to [@aliras1](https://github.com/aliras1) for the patch. - - * [PR #41](https://github.com/magiconair/properties/pull/41): Make key/value separator configurable on Write() - - Thanks to [@mkjor](https://github.com/mkjor) for the patch. - - * [PR #40](https://github.com/magiconair/properties/pull/40): Add method to return a sorted list of keys - - Thanks to [@mkjor](https://github.com/mkjor) for the patch. - -### [1.8.1](https://github.com/magiconair/properties/tree/v1.8.1) - 10 May 2019 - - * [PR #35](https://github.com/magiconair/properties/pull/35): Close body always after request - - This patch ensures that in `LoadURL` the response body is always closed. - - Thanks to [@liubog2008](https://github.com/liubog2008) for the patch. - -### [1.8](https://github.com/magiconair/properties/tree/v1.8) - 15 May 2018 - - * [PR #26](https://github.com/magiconair/properties/pull/26): Disable expansion during loading - - This adds the option to disable property expansion during loading. - - Thanks to [@kmala](https://github.com/kmala) for the patch. - -### [1.7.6](https://github.com/magiconair/properties/tree/v1.7.6) - 14 Feb 2018 - - * [PR #29](https://github.com/magiconair/properties/pull/29): Reworked expansion logic to handle more complex cases. - - See PR for an example. - - Thanks to [@yobert](https://github.com/yobert) for the fix. - -### [1.7.5](https://github.com/magiconair/properties/tree/v1.7.5) - 13 Feb 2018 - - * [PR #28](https://github.com/magiconair/properties/pull/28): Support duplicate expansions in the same value - - Values which expand the same key multiple times (e.g. `key=${a} ${a}`) will no longer fail - with a `circular reference error`. - - Thanks to [@yobert](https://github.com/yobert) for the fix. - -### [1.7.4](https://github.com/magiconair/properties/tree/v1.7.4) - 31 Oct 2017 - - * [Issue #23](https://github.com/magiconair/properties/issues/23): Ignore blank lines with whitespaces - - * [PR #24](https://github.com/magiconair/properties/pull/24): Update keys when DisableExpansion is enabled - - Thanks to [@mgurov](https://github.com/mgurov) for the fix. - -### [1.7.3](https://github.com/magiconair/properties/tree/v1.7.3) - 10 Jul 2017 - - * [Issue #17](https://github.com/magiconair/properties/issues/17): Add [SetValue()](http://godoc.org/github.com/magiconair/properties#Properties.SetValue) method to set values generically - * [Issue #22](https://github.com/magiconair/properties/issues/22): Add [LoadMap()](http://godoc.org/github.com/magiconair/properties#LoadMap) function to load properties from a string map - -### [1.7.2](https://github.com/magiconair/properties/tree/v1.7.2) - 20 Mar 2017 - - * [Issue #15](https://github.com/magiconair/properties/issues/15): Drop gocheck dependency - * [PR #21](https://github.com/magiconair/properties/pull/21): Add [Map()](http://godoc.org/github.com/magiconair/properties#Properties.Map) and [FilterFunc()](http://godoc.org/github.com/magiconair/properties#Properties.FilterFunc) - -### [1.7.1](https://github.com/magiconair/properties/tree/v1.7.1) - 13 Jan 2017 - - * [Issue #14](https://github.com/magiconair/properties/issues/14): Decouple TestLoadExpandedFile from `$USER` - * [PR #12](https://github.com/magiconair/properties/pull/12): Load from files and URLs - * [PR #16](https://github.com/magiconair/properties/pull/16): Keep gofmt happy - * [PR #18](https://github.com/magiconair/properties/pull/18): Fix Delete() function - -### [1.7.0](https://github.com/magiconair/properties/tree/v1.7.0) - 20 Mar 2016 - - * [Issue #10](https://github.com/magiconair/properties/issues/10): Add [LoadURL,LoadURLs,MustLoadURL,MustLoadURLs](http://godoc.org/github.com/magiconair/properties#LoadURL) method to load properties from a URL. - * [Issue #11](https://github.com/magiconair/properties/issues/11): Add [LoadString,MustLoadString](http://godoc.org/github.com/magiconair/properties#LoadString) method to load properties from an UTF8 string. - * [PR #8](https://github.com/magiconair/properties/pull/8): Add [MustFlag](http://godoc.org/github.com/magiconair/properties#Properties.MustFlag) method to provide overrides via command line flags. (@pascaldekloe) - -### [1.6.0](https://github.com/magiconair/properties/tree/v1.6.0) - 11 Dec 2015 - - * Add [Decode](http://godoc.org/github.com/magiconair/properties#Properties.Decode) method to populate struct from properties via tags. - -### [1.5.6](https://github.com/magiconair/properties/tree/v1.5.6) - 18 Oct 2015 - - * Vendored in gopkg.in/check.v1 - -### [1.5.5](https://github.com/magiconair/properties/tree/v1.5.5) - 31 Jul 2015 - - * [PR #6](https://github.com/magiconair/properties/pull/6): Add [Delete](http://godoc.org/github.com/magiconair/properties#Properties.Delete) method to remove keys including comments. (@gerbenjacobs) - -### [1.5.4](https://github.com/magiconair/properties/tree/v1.5.4) - 23 Jun 2015 - - * [Issue #5](https://github.com/magiconair/properties/issues/5): Allow disabling of property expansion [DisableExpansion](http://godoc.org/github.com/magiconair/properties#Properties.DisableExpansion). When property expansion is disabled Properties become a simple key/value store and don't check for circular references. - -### [1.5.3](https://github.com/magiconair/properties/tree/v1.5.3) - 02 Jun 2015 - - * [Issue #4](https://github.com/magiconair/properties/issues/4): Maintain key order in [Filter()](http://godoc.org/github.com/magiconair/properties#Properties.Filter), [FilterPrefix()](http://godoc.org/github.com/magiconair/properties#Properties.FilterPrefix) and [FilterRegexp()](http://godoc.org/github.com/magiconair/properties#Properties.FilterRegexp) - -### [1.5.2](https://github.com/magiconair/properties/tree/v1.5.2) - 10 Apr 2015 - - * [Issue #3](https://github.com/magiconair/properties/issues/3): Don't print comments in [WriteComment()](http://godoc.org/github.com/magiconair/properties#Properties.WriteComment) if they are all empty - * Add clickable links to README - -### [1.5.1](https://github.com/magiconair/properties/tree/v1.5.1) - 08 Dec 2014 - - * Added [GetParsedDuration()](http://godoc.org/github.com/magiconair/properties#Properties.GetParsedDuration) and [MustGetParsedDuration()](http://godoc.org/github.com/magiconair/properties#Properties.MustGetParsedDuration) for values specified compatible with - [time.ParseDuration()](http://golang.org/pkg/time/#ParseDuration). - -### [1.5.0](https://github.com/magiconair/properties/tree/v1.5.0) - 18 Nov 2014 - - * Added support for single and multi-line comments (reading, writing and updating) - * The order of keys is now preserved - * Calling [Set()](http://godoc.org/github.com/magiconair/properties#Properties.Set) with an empty key now silently ignores the call and does not create a new entry - * Added a [MustSet()](http://godoc.org/github.com/magiconair/properties#Properties.MustSet) method - * Migrated test library from launchpad.net/gocheck to [gopkg.in/check.v1](http://gopkg.in/check.v1) - -### [1.4.2](https://github.com/magiconair/properties/tree/v1.4.2) - 15 Nov 2014 - - * [Issue #2](https://github.com/magiconair/properties/issues/2): Fixed goroutine leak in parser which created two lexers but cleaned up only one - -### [1.4.1](https://github.com/magiconair/properties/tree/v1.4.1) - 13 Nov 2014 - - * [Issue #1](https://github.com/magiconair/properties/issues/1): Fixed bug in Keys() method which returned an empty string - -### [1.4.0](https://github.com/magiconair/properties/tree/v1.4.0) - 23 Sep 2014 - - * Added [Keys()](http://godoc.org/github.com/magiconair/properties#Properties.Keys) to get the keys - * Added [Filter()](http://godoc.org/github.com/magiconair/properties#Properties.Filter), [FilterRegexp()](http://godoc.org/github.com/magiconair/properties#Properties.FilterRegexp) and [FilterPrefix()](http://godoc.org/github.com/magiconair/properties#Properties.FilterPrefix) to get a subset of the properties - -### [1.3.0](https://github.com/magiconair/properties/tree/v1.3.0) - 18 Mar 2014 - -* Added support for time.Duration -* Made MustXXX() failure beha[ior configurable (log.Fatal, panic](https://github.com/magiconair/properties/tree/vior configurable (log.Fatal, panic) - custom) -* Changed default of MustXXX() failure from panic to log.Fatal - -### [1.2.0](https://github.com/magiconair/properties/tree/v1.2.0) - 05 Mar 2014 - -* Added MustGet... functions -* Added support for int and uint with range checks on 32 bit platforms - -### [1.1.0](https://github.com/magiconair/properties/tree/v1.1.0) - 20 Jan 2014 - -* Renamed from goproperties to properties -* Added support for expansion of environment vars in - filenames and value expressions -* Fixed bug where value expressions were not at the - start of the string - -### [1.0.0](https://github.com/magiconair/properties/tree/v1.0.0) - 7 Jan 2014 - -* Initial release diff --git a/tools/vendor/github.com/magiconair/properties/LICENSE.md b/tools/vendor/github.com/magiconair/properties/LICENSE.md deleted file mode 100644 index 79c87e3e6..000000000 --- a/tools/vendor/github.com/magiconair/properties/LICENSE.md +++ /dev/null @@ -1,24 +0,0 @@ -Copyright (c) 2013-2020, Frank Schroeder - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/tools/vendor/github.com/magiconair/properties/README.md b/tools/vendor/github.com/magiconair/properties/README.md deleted file mode 100644 index e2edda025..000000000 --- a/tools/vendor/github.com/magiconair/properties/README.md +++ /dev/null @@ -1,128 +0,0 @@ -[![](https://img.shields.io/github/tag/magiconair/properties.svg?style=flat-square&label=release)](https://github.com/magiconair/properties/releases) -[![Travis CI Status](https://img.shields.io/travis/magiconair/properties.svg?branch=master&style=flat-square&label=travis)](https://travis-ci.org/magiconair/properties) -[![License](https://img.shields.io/badge/License-BSD%202--Clause-orange.svg?style=flat-square)](https://raw.githubusercontent.com/magiconair/properties/master/LICENSE) -[![GoDoc](http://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](http://godoc.org/github.com/magiconair/properties) - -# Overview - -#### Please run `git pull --tags` to update the tags. See [below](#updated-git-tags) why. - -properties is a Go library for reading and writing properties files. - -It supports reading from multiple files or URLs and Spring style recursive -property expansion of expressions like `${key}` to their corresponding value. -Value expressions can refer to other keys like in `${key}` or to environment -variables like in `${USER}`. Filenames can also contain environment variables -like in `/home/${USER}/myapp.properties`. - -Properties can be decoded into structs, maps, arrays and values through -struct tags. - -Comments and the order of keys are preserved. Comments can be modified -and can be written to the output. - -The properties library supports both ISO-8859-1 and UTF-8 encoded data. - -Starting from version 1.3.0 the behavior of the MustXXX() functions is -configurable by providing a custom `ErrorHandler` function. The default has -changed from `panic` to `log.Fatal` but this is configurable and custom -error handling functions can be provided. See the package documentation for -details. - -Read the full documentation on [![GoDoc](http://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](http://godoc.org/github.com/magiconair/properties) - -## Getting Started - -```go -import ( - "flag" - "github.com/magiconair/properties" -) - -func main() { - // init from a file - p := properties.MustLoadFile("${HOME}/config.properties", properties.UTF8) - - // or multiple files - p = properties.MustLoadFiles([]string{ - "${HOME}/config.properties", - "${HOME}/config-${USER}.properties", - }, properties.UTF8, true) - - // or from a map - p = properties.LoadMap(map[string]string{"key": "value", "abc": "def"}) - - // or from a string - p = properties.MustLoadString("key=value\nabc=def") - - // or from a URL - p = properties.MustLoadURL("http://host/path") - - // or from multiple URLs - p = properties.MustLoadURL([]string{ - "http://host/config", - "http://host/config-${USER}", - }, true) - - // or from flags - p.MustFlag(flag.CommandLine) - - // get values through getters - host := p.MustGetString("host") - port := p.GetInt("port", 8080) - - // or through Decode - type Config struct { - Host string `properties:"host"` - Port int `properties:"port,default=9000"` - Accept []string `properties:"accept,default=image/png;image;gif"` - Timeout time.Duration `properties:"timeout,default=5s"` - } - var cfg Config - if err := p.Decode(&cfg); err != nil { - log.Fatal(err) - } -} - -``` - -## Installation and Upgrade - -``` -$ go get -u github.com/magiconair/properties -``` - -## License - -2 clause BSD license. See [LICENSE](https://github.com/magiconair/properties/blob/master/LICENSE) file for details. - -## ToDo - -* Dump contents with passwords and secrets obscured - -## Updated Git tags - -#### 13 Feb 2018 - -I realized that all of the git tags I had pushed before v1.7.5 were lightweight tags -and I've only recently learned that this doesn't play well with `git describe` 😞 - -I have replaced all lightweight tags with signed tags using this script which should -retain the commit date, name and email address. Please run `git pull --tags` to update them. - -Worst case you have to reclone the repo. - -```shell -#!/bin/bash -tag=$1 -echo "Updating $tag" -date=$(git show ${tag}^0 --format=%aD | head -1) -email=$(git show ${tag}^0 --format=%aE | head -1) -name=$(git show ${tag}^0 --format=%aN | head -1) -GIT_COMMITTER_DATE="$date" GIT_COMMITTER_NAME="$name" GIT_COMMITTER_EMAIL="$email" git tag -s -f ${tag} ${tag}^0 -m ${tag} -``` - -I apologize for the inconvenience. - -Frank - diff --git a/tools/vendor/github.com/magiconair/properties/decode.go b/tools/vendor/github.com/magiconair/properties/decode.go deleted file mode 100644 index 8e6aa441d..000000000 --- a/tools/vendor/github.com/magiconair/properties/decode.go +++ /dev/null @@ -1,289 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -import ( - "fmt" - "reflect" - "strconv" - "strings" - "time" -) - -// Decode assigns property values to exported fields of a struct. -// -// Decode traverses v recursively and returns an error if a value cannot be -// converted to the field type or a required value is missing for a field. -// -// The following type dependent decodings are used: -// -// String, boolean, numeric fields have the value of the property key assigned. -// The property key name is the name of the field. A different key and a default -// value can be set in the field's tag. Fields without default value are -// required. If the value cannot be converted to the field type an error is -// returned. -// -// time.Duration fields have the result of time.ParseDuration() assigned. -// -// time.Time fields have the vaule of time.Parse() assigned. The default layout -// is time.RFC3339 but can be set in the field's tag. -// -// Arrays and slices of string, boolean, numeric, time.Duration and time.Time -// fields have the value interpreted as a comma separated list of values. The -// individual values are trimmed of whitespace and empty values are ignored. A -// default value can be provided as a semicolon separated list in the field's -// tag. -// -// Struct fields are decoded recursively using the field name plus "." as -// prefix. The prefix (without dot) can be overridden in the field's tag. -// Default values are not supported in the field's tag. Specify them on the -// fields of the inner struct instead. -// -// Map fields must have a key of type string and are decoded recursively by -// using the field's name plus ".' as prefix and the next element of the key -// name as map key. The prefix (without dot) can be overridden in the field's -// tag. Default values are not supported. -// -// Examples: -// -// // Field is ignored. -// Field int `properties:"-"` -// -// // Field is assigned value of 'Field'. -// Field int -// -// // Field is assigned value of 'myName'. -// Field int `properties:"myName"` -// -// // Field is assigned value of key 'myName' and has a default -// // value 15 if the key does not exist. -// Field int `properties:"myName,default=15"` -// -// // Field is assigned value of key 'Field' and has a default -// // value 15 if the key does not exist. -// Field int `properties:",default=15"` -// -// // Field is assigned value of key 'date' and the date -// // is in format 2006-01-02 -// Field time.Time `properties:"date,layout=2006-01-02"` -// -// // Field is assigned the non-empty and whitespace trimmed -// // values of key 'Field' split by commas. -// Field []string -// -// // Field is assigned the non-empty and whitespace trimmed -// // values of key 'Field' split by commas and has a default -// // value ["a", "b", "c"] if the key does not exist. -// Field []string `properties:",default=a;b;c"` -// -// // Field is decoded recursively with "Field." as key prefix. -// Field SomeStruct -// -// // Field is decoded recursively with "myName." as key prefix. -// Field SomeStruct `properties:"myName"` -// -// // Field is decoded recursively with "Field." as key prefix -// // and the next dotted element of the key as map key. -// Field map[string]string -// -// // Field is decoded recursively with "myName." as key prefix -// // and the next dotted element of the key as map key. -// Field map[string]string `properties:"myName"` -func (p *Properties) Decode(x interface{}) error { - t, v := reflect.TypeOf(x), reflect.ValueOf(x) - if t.Kind() != reflect.Ptr || v.Elem().Type().Kind() != reflect.Struct { - return fmt.Errorf("not a pointer to struct: %s", t) - } - if err := dec(p, "", nil, nil, v); err != nil { - return err - } - return nil -} - -func dec(p *Properties, key string, def *string, opts map[string]string, v reflect.Value) error { - t := v.Type() - - // value returns the property value for key or the default if provided. - value := func() (string, error) { - if val, ok := p.Get(key); ok { - return val, nil - } - if def != nil { - return *def, nil - } - return "", fmt.Errorf("missing required key %s", key) - } - - // conv converts a string to a value of the given type. - conv := func(s string, t reflect.Type) (val reflect.Value, err error) { - var v interface{} - - switch { - case isDuration(t): - v, err = time.ParseDuration(s) - - case isTime(t): - layout := opts["layout"] - if layout == "" { - layout = time.RFC3339 - } - v, err = time.Parse(layout, s) - - case isBool(t): - v, err = boolVal(s), nil - - case isString(t): - v, err = s, nil - - case isFloat(t): - v, err = strconv.ParseFloat(s, 64) - - case isInt(t): - v, err = strconv.ParseInt(s, 10, 64) - - case isUint(t): - v, err = strconv.ParseUint(s, 10, 64) - - default: - return reflect.Zero(t), fmt.Errorf("unsupported type %s", t) - } - if err != nil { - return reflect.Zero(t), err - } - return reflect.ValueOf(v).Convert(t), nil - } - - // keydef returns the property key and the default value based on the - // name of the struct field and the options in the tag. - keydef := func(f reflect.StructField) (string, *string, map[string]string) { - _key, _opts := parseTag(f.Tag.Get("properties")) - - var _def *string - if d, ok := _opts["default"]; ok { - _def = &d - } - if _key != "" { - return _key, _def, _opts - } - return f.Name, _def, _opts - } - - switch { - case isDuration(t) || isTime(t) || isBool(t) || isString(t) || isFloat(t) || isInt(t) || isUint(t): - s, err := value() - if err != nil { - return err - } - val, err := conv(s, t) - if err != nil { - return err - } - v.Set(val) - - case isPtr(t): - return dec(p, key, def, opts, v.Elem()) - - case isStruct(t): - for i := 0; i < v.NumField(); i++ { - fv := v.Field(i) - fk, def, opts := keydef(t.Field(i)) - if !fv.CanSet() { - return fmt.Errorf("cannot set %s", t.Field(i).Name) - } - if fk == "-" { - continue - } - if key != "" { - fk = key + "." + fk - } - if err := dec(p, fk, def, opts, fv); err != nil { - return err - } - } - return nil - - case isArray(t): - val, err := value() - if err != nil { - return err - } - vals := split(val, ";") - a := reflect.MakeSlice(t, 0, len(vals)) - for _, s := range vals { - val, err := conv(s, t.Elem()) - if err != nil { - return err - } - a = reflect.Append(a, val) - } - v.Set(a) - - case isMap(t): - valT := t.Elem() - m := reflect.MakeMap(t) - for postfix := range p.FilterStripPrefix(key + ".").m { - pp := strings.SplitN(postfix, ".", 2) - mk, mv := pp[0], reflect.New(valT) - if err := dec(p, key+"."+mk, nil, nil, mv); err != nil { - return err - } - m.SetMapIndex(reflect.ValueOf(mk), mv.Elem()) - } - v.Set(m) - - default: - return fmt.Errorf("unsupported type %s", t) - } - return nil -} - -// split splits a string on sep, trims whitespace of elements -// and omits empty elements -func split(s string, sep string) []string { - var a []string - for _, v := range strings.Split(s, sep) { - if v = strings.TrimSpace(v); v != "" { - a = append(a, v) - } - } - return a -} - -// parseTag parses a "key,k=v,k=v,..." -func parseTag(tag string) (key string, opts map[string]string) { - opts = map[string]string{} - for i, s := range strings.Split(tag, ",") { - if i == 0 { - key = s - continue - } - - pp := strings.SplitN(s, "=", 2) - if len(pp) == 1 { - opts[pp[0]] = "" - } else { - opts[pp[0]] = pp[1] - } - } - return key, opts -} - -func isArray(t reflect.Type) bool { return t.Kind() == reflect.Array || t.Kind() == reflect.Slice } -func isBool(t reflect.Type) bool { return t.Kind() == reflect.Bool } -func isDuration(t reflect.Type) bool { return t == reflect.TypeOf(time.Second) } -func isMap(t reflect.Type) bool { return t.Kind() == reflect.Map } -func isPtr(t reflect.Type) bool { return t.Kind() == reflect.Ptr } -func isString(t reflect.Type) bool { return t.Kind() == reflect.String } -func isStruct(t reflect.Type) bool { return t.Kind() == reflect.Struct } -func isTime(t reflect.Type) bool { return t == reflect.TypeOf(time.Time{}) } -func isFloat(t reflect.Type) bool { - return t.Kind() == reflect.Float32 || t.Kind() == reflect.Float64 -} -func isInt(t reflect.Type) bool { - return t.Kind() == reflect.Int || t.Kind() == reflect.Int8 || t.Kind() == reflect.Int16 || t.Kind() == reflect.Int32 || t.Kind() == reflect.Int64 -} -func isUint(t reflect.Type) bool { - return t.Kind() == reflect.Uint || t.Kind() == reflect.Uint8 || t.Kind() == reflect.Uint16 || t.Kind() == reflect.Uint32 || t.Kind() == reflect.Uint64 -} diff --git a/tools/vendor/github.com/magiconair/properties/doc.go b/tools/vendor/github.com/magiconair/properties/doc.go deleted file mode 100644 index 7c7979315..000000000 --- a/tools/vendor/github.com/magiconair/properties/doc.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package properties provides functions for reading and writing -// ISO-8859-1 and UTF-8 encoded .properties files and has -// support for recursive property expansion. -// -// Java properties files are ISO-8859-1 encoded and use Unicode -// literals for characters outside the ISO character set. Unicode -// literals can be used in UTF-8 encoded properties files but -// aren't necessary. -// -// To load a single properties file use MustLoadFile(): -// -// p := properties.MustLoadFile(filename, properties.UTF8) -// -// To load multiple properties files use MustLoadFiles() -// which loads the files in the given order and merges the -// result. Missing properties files can be ignored if the -// 'ignoreMissing' flag is set to true. -// -// Filenames can contain environment variables which are expanded -// before loading. -// -// f1 := "/etc/myapp/myapp.conf" -// f2 := "/home/${USER}/myapp.conf" -// p := MustLoadFiles([]string{f1, f2}, properties.UTF8, true) -// -// All of the different key/value delimiters ' ', ':' and '=' are -// supported as well as the comment characters '!' and '#' and -// multi-line values. -// -// ! this is a comment -// # and so is this -// -// # the following expressions are equal -// key value -// key=value -// key:value -// key = value -// key : value -// key = val\ -// ue -// -// Properties stores all comments preceding a key and provides -// GetComments() and SetComments() methods to retrieve and -// update them. The convenience functions GetComment() and -// SetComment() allow access to the last comment. The -// WriteComment() method writes properties files including -// the comments and with the keys in the original order. -// This can be used for sanitizing properties files. -// -// Property expansion is recursive and circular references -// and malformed expressions are not allowed and cause an -// error. Expansion of environment variables is supported. -// -// # standard property -// key = value -// -// # property expansion: key2 = value -// key2 = ${key} -// -// # recursive expansion: key3 = value -// key3 = ${key2} -// -// # circular reference (error) -// key = ${key} -// -// # malformed expression (error) -// key = ${ke -// -// # refers to the users' home dir -// home = ${HOME} -// -// # local key takes precedence over env var: u = foo -// USER = foo -// u = ${USER} -// -// The default property expansion format is ${key} but can be -// changed by setting different pre- and postfix values on the -// Properties object. -// -// p := properties.NewProperties() -// p.Prefix = "#[" -// p.Postfix = "]#" -// -// Properties provides convenience functions for getting typed -// values with default values if the key does not exist or the -// type conversion failed. -// -// # Returns true if the value is either "1", "on", "yes" or "true" -// # Returns false for every other value and the default value if -// # the key does not exist. -// v = p.GetBool("key", false) -// -// # Returns the value if the key exists and the format conversion -// # was successful. Otherwise, the default value is returned. -// v = p.GetInt64("key", 999) -// v = p.GetUint64("key", 999) -// v = p.GetFloat64("key", 123.0) -// v = p.GetString("key", "def") -// v = p.GetDuration("key", 999) -// -// As an alternative properties may be applied with the standard -// library's flag implementation at any time. -// -// # Standard configuration -// v = flag.Int("key", 999, "help message") -// flag.Parse() -// -// # Merge p into the flag set -// p.MustFlag(flag.CommandLine) -// -// Properties provides several MustXXX() convenience functions -// which will terminate the app if an error occurs. The behavior -// of the failure is configurable and the default is to call -// log.Fatal(err). To have the MustXXX() functions panic instead -// of logging the error set a different ErrorHandler before -// you use the Properties package. -// -// properties.ErrorHandler = properties.PanicHandler -// -// # Will panic instead of logging an error -// p := properties.MustLoadFile("config.properties") -// -// You can also provide your own ErrorHandler function. The only requirement -// is that the error handler function must exit after handling the error. -// -// properties.ErrorHandler = func(err error) { -// fmt.Println(err) -// os.Exit(1) -// } -// -// # Will write to stdout and then exit -// p := properties.MustLoadFile("config.properties") -// -// Properties can also be loaded into a struct via the `Decode` -// method, e.g. -// -// type S struct { -// A string `properties:"a,default=foo"` -// D time.Duration `properties:"timeout,default=5s"` -// E time.Time `properties:"expires,layout=2006-01-02,default=2015-01-01"` -// } -// -// See `Decode()` method for the full documentation. -// -// The following documents provide a description of the properties -// file format. -// -// http://en.wikipedia.org/wiki/.properties -// -// http://docs.oracle.com/javase/7/docs/api/java/util/Properties.html#load%28java.io.Reader%29 -package properties diff --git a/tools/vendor/github.com/magiconair/properties/integrate.go b/tools/vendor/github.com/magiconair/properties/integrate.go deleted file mode 100644 index 35d0ae97b..000000000 --- a/tools/vendor/github.com/magiconair/properties/integrate.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -import "flag" - -// MustFlag sets flags that are skipped by dst.Parse when p contains -// the respective key for flag.Flag.Name. -// -// It's use is recommended with command line arguments as in: -// -// flag.Parse() -// p.MustFlag(flag.CommandLine) -func (p *Properties) MustFlag(dst *flag.FlagSet) { - m := make(map[string]*flag.Flag) - dst.VisitAll(func(f *flag.Flag) { - m[f.Name] = f - }) - dst.Visit(func(f *flag.Flag) { - delete(m, f.Name) // overridden - }) - - for name, f := range m { - v, ok := p.Get(name) - if !ok { - continue - } - - if err := f.Value.Set(v); err != nil { - ErrorHandler(err) - } - } -} diff --git a/tools/vendor/github.com/magiconair/properties/lex.go b/tools/vendor/github.com/magiconair/properties/lex.go deleted file mode 100644 index 3d15a1f6e..000000000 --- a/tools/vendor/github.com/magiconair/properties/lex.go +++ /dev/null @@ -1,395 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// -// Parts of the lexer are from the template/text/parser package -// For these parts the following applies: -// -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file of the go 1.2 -// distribution. - -package properties - -import ( - "fmt" - "strconv" - "strings" - "unicode/utf8" -) - -// item represents a token or text string returned from the scanner. -type item struct { - typ itemType // The type of this item. - pos int // The starting position, in bytes, of this item in the input string. - val string // The value of this item. -} - -func (i item) String() string { - switch { - case i.typ == itemEOF: - return "EOF" - case i.typ == itemError: - return i.val - case len(i.val) > 10: - return fmt.Sprintf("%.10q...", i.val) - } - return fmt.Sprintf("%q", i.val) -} - -// itemType identifies the type of lex items. -type itemType int - -const ( - itemError itemType = iota // error occurred; value is text of error - itemEOF - itemKey // a key - itemValue // a value - itemComment // a comment -) - -// defines a constant for EOF -const eof = -1 - -// permitted whitespace characters space, FF and TAB -const whitespace = " \f\t" - -// stateFn represents the state of the scanner as a function that returns the next state. -type stateFn func(*lexer) stateFn - -// lexer holds the state of the scanner. -type lexer struct { - input string // the string being scanned - state stateFn // the next lexing function to enter - pos int // current position in the input - start int // start position of this item - width int // width of last rune read from input - lastPos int // position of most recent item returned by nextItem - runes []rune // scanned runes for this item - items chan item // channel of scanned items -} - -// next returns the next rune in the input. -func (l *lexer) next() rune { - if l.pos >= len(l.input) { - l.width = 0 - return eof - } - r, w := utf8.DecodeRuneInString(l.input[l.pos:]) - l.width = w - l.pos += l.width - return r -} - -// peek returns but does not consume the next rune in the input. -func (l *lexer) peek() rune { - r := l.next() - l.backup() - return r -} - -// backup steps back one rune. Can only be called once per call of next. -func (l *lexer) backup() { - l.pos -= l.width -} - -// emit passes an item back to the client. -func (l *lexer) emit(t itemType) { - i := item{t, l.start, string(l.runes)} - l.items <- i - l.start = l.pos - l.runes = l.runes[:0] -} - -// ignore skips over the pending input before this point. -func (l *lexer) ignore() { - l.start = l.pos -} - -// appends the rune to the current value -func (l *lexer) appendRune(r rune) { - l.runes = append(l.runes, r) -} - -// accept consumes the next rune if it's from the valid set. -func (l *lexer) accept(valid string) bool { - if strings.ContainsRune(valid, l.next()) { - return true - } - l.backup() - return false -} - -// acceptRun consumes a run of runes from the valid set. -func (l *lexer) acceptRun(valid string) { - for strings.ContainsRune(valid, l.next()) { - } - l.backup() -} - -// lineNumber reports which line we're on, based on the position of -// the previous item returned by nextItem. Doing it this way -// means we don't have to worry about peek double counting. -func (l *lexer) lineNumber() int { - return 1 + strings.Count(l.input[:l.lastPos], "\n") -} - -// errorf returns an error token and terminates the scan by passing -// back a nil pointer that will be the next state, terminating l.nextItem. -func (l *lexer) errorf(format string, args ...interface{}) stateFn { - l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)} - return nil -} - -// nextItem returns the next item from the input. -func (l *lexer) nextItem() item { - i := <-l.items - l.lastPos = i.pos - return i -} - -// lex creates a new scanner for the input string. -func lex(input string) *lexer { - l := &lexer{ - input: input, - items: make(chan item), - runes: make([]rune, 0, 32), - } - go l.run() - return l -} - -// run runs the state machine for the lexer. -func (l *lexer) run() { - for l.state = lexBeforeKey(l); l.state != nil; { - l.state = l.state(l) - } -} - -// state functions - -// lexBeforeKey scans until a key begins. -func lexBeforeKey(l *lexer) stateFn { - switch r := l.next(); { - case isEOF(r): - l.emit(itemEOF) - return nil - - case isEOL(r): - l.ignore() - return lexBeforeKey - - case isComment(r): - return lexComment - - case isWhitespace(r): - l.ignore() - return lexBeforeKey - - default: - l.backup() - return lexKey - } -} - -// lexComment scans a comment line. The comment character has already been scanned. -func lexComment(l *lexer) stateFn { - l.acceptRun(whitespace) - l.ignore() - for { - switch r := l.next(); { - case isEOF(r): - l.ignore() - l.emit(itemEOF) - return nil - case isEOL(r): - l.emit(itemComment) - return lexBeforeKey - default: - l.appendRune(r) - } - } -} - -// lexKey scans the key up to a delimiter -func lexKey(l *lexer) stateFn { - var r rune - -Loop: - for { - switch r = l.next(); { - - case isEscape(r): - err := l.scanEscapeSequence() - if err != nil { - return l.errorf(err.Error()) - } - - case isEndOfKey(r): - l.backup() - break Loop - - case isEOF(r): - break Loop - - default: - l.appendRune(r) - } - } - - if len(l.runes) > 0 { - l.emit(itemKey) - } - - if isEOF(r) { - l.emit(itemEOF) - return nil - } - - return lexBeforeValue -} - -// lexBeforeValue scans the delimiter between key and value. -// Leading and trailing whitespace is ignored. -// We expect to be just after the key. -func lexBeforeValue(l *lexer) stateFn { - l.acceptRun(whitespace) - l.accept(":=") - l.acceptRun(whitespace) - l.ignore() - return lexValue -} - -// lexValue scans text until the end of the line. We expect to be just after the delimiter. -func lexValue(l *lexer) stateFn { - for { - switch r := l.next(); { - case isEscape(r): - if isEOL(l.peek()) { - l.next() - l.acceptRun(whitespace) - } else { - err := l.scanEscapeSequence() - if err != nil { - return l.errorf(err.Error()) - } - } - - case isEOL(r): - l.emit(itemValue) - l.ignore() - return lexBeforeKey - - case isEOF(r): - l.emit(itemValue) - l.emit(itemEOF) - return nil - - default: - l.appendRune(r) - } - } -} - -// scanEscapeSequence scans either one of the escaped characters -// or a unicode literal. We expect to be after the escape character. -func (l *lexer) scanEscapeSequence() error { - switch r := l.next(); { - - case isEscapedCharacter(r): - l.appendRune(decodeEscapedCharacter(r)) - return nil - - case atUnicodeLiteral(r): - return l.scanUnicodeLiteral() - - case isEOF(r): - return fmt.Errorf("premature EOF") - - // silently drop the escape character and append the rune as is - default: - l.appendRune(r) - return nil - } -} - -// scans a unicode literal in the form \uXXXX. We expect to be after the \u. -func (l *lexer) scanUnicodeLiteral() error { - // scan the digits - d := make([]rune, 4) - for i := 0; i < 4; i++ { - d[i] = l.next() - if d[i] == eof || !strings.ContainsRune("0123456789abcdefABCDEF", d[i]) { - return fmt.Errorf("invalid unicode literal") - } - } - - // decode the digits into a rune - r, err := strconv.ParseInt(string(d), 16, 0) - if err != nil { - return err - } - - l.appendRune(rune(r)) - return nil -} - -// decodeEscapedCharacter returns the unescaped rune. We expect to be after the escape character. -func decodeEscapedCharacter(r rune) rune { - switch r { - case 'f': - return '\f' - case 'n': - return '\n' - case 'r': - return '\r' - case 't': - return '\t' - default: - return r - } -} - -// atUnicodeLiteral reports whether we are at a unicode literal. -// The escape character has already been consumed. -func atUnicodeLiteral(r rune) bool { - return r == 'u' -} - -// isComment reports whether we are at the start of a comment. -func isComment(r rune) bool { - return r == '#' || r == '!' -} - -// isEndOfKey reports whether the rune terminates the current key. -func isEndOfKey(r rune) bool { - return strings.ContainsRune(" \f\t\r\n:=", r) -} - -// isEOF reports whether we are at EOF. -func isEOF(r rune) bool { - return r == eof -} - -// isEOL reports whether we are at a new line character. -func isEOL(r rune) bool { - return r == '\n' || r == '\r' -} - -// isEscape reports whether the rune is the escape character which -// prefixes unicode literals and other escaped characters. -func isEscape(r rune) bool { - return r == '\\' -} - -// isEscapedCharacter reports whether we are at one of the characters that need escaping. -// The escape character has already been consumed. -func isEscapedCharacter(r rune) bool { - return strings.ContainsRune(" :=fnrt", r) -} - -// isWhitespace reports whether the rune is a whitespace character. -func isWhitespace(r rune) bool { - return strings.ContainsRune(whitespace, r) -} diff --git a/tools/vendor/github.com/magiconair/properties/load.go b/tools/vendor/github.com/magiconair/properties/load.go deleted file mode 100644 index 635368dc8..000000000 --- a/tools/vendor/github.com/magiconair/properties/load.go +++ /dev/null @@ -1,293 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -import ( - "fmt" - "io/ioutil" - "net/http" - "os" - "strings" -) - -// Encoding specifies encoding of the input data. -type Encoding uint - -const ( - // utf8Default is a private placeholder for the zero value of Encoding to - // ensure that it has the correct meaning. UTF8 is the default encoding but - // was assigned a non-zero value which cannot be changed without breaking - // existing code. Clients should continue to use the public constants. - utf8Default Encoding = iota - - // UTF8 interprets the input data as UTF-8. - UTF8 - - // ISO_8859_1 interprets the input data as ISO-8859-1. - ISO_8859_1 -) - -type Loader struct { - // Encoding determines how the data from files and byte buffers - // is interpreted. For URLs the Content-Type header is used - // to determine the encoding of the data. - Encoding Encoding - - // DisableExpansion configures the property expansion of the - // returned property object. When set to true, the property values - // will not be expanded and the Property object will not be checked - // for invalid expansion expressions. - DisableExpansion bool - - // IgnoreMissing configures whether missing files or URLs which return - // 404 are reported as errors. When set to true, missing files and 404 - // status codes are not reported as errors. - IgnoreMissing bool -} - -// Load reads a buffer into a Properties struct. -func (l *Loader) LoadBytes(buf []byte) (*Properties, error) { - return l.loadBytes(buf, l.Encoding) -} - -// LoadAll reads the content of multiple URLs or files in the given order into -// a Properties struct. If IgnoreMissing is true then a 404 status code or -// missing file will not be reported as error. Encoding sets the encoding for -// files. For the URLs see LoadURL for the Content-Type header and the -// encoding. -func (l *Loader) LoadAll(names []string) (*Properties, error) { - all := NewProperties() - for _, name := range names { - n, err := expandName(name) - if err != nil { - return nil, err - } - - var p *Properties - switch { - case strings.HasPrefix(n, "http://"): - p, err = l.LoadURL(n) - case strings.HasPrefix(n, "https://"): - p, err = l.LoadURL(n) - default: - p, err = l.LoadFile(n) - } - if err != nil { - return nil, err - } - all.Merge(p) - } - - all.DisableExpansion = l.DisableExpansion - if all.DisableExpansion { - return all, nil - } - return all, all.check() -} - -// LoadFile reads a file into a Properties struct. -// If IgnoreMissing is true then a missing file will not be -// reported as error. -func (l *Loader) LoadFile(filename string) (*Properties, error) { - data, err := ioutil.ReadFile(filename) - if err != nil { - if l.IgnoreMissing && os.IsNotExist(err) { - LogPrintf("properties: %s not found. skipping", filename) - return NewProperties(), nil - } - return nil, err - } - return l.loadBytes(data, l.Encoding) -} - -// LoadURL reads the content of the URL into a Properties struct. -// -// The encoding is determined via the Content-Type header which -// should be set to 'text/plain'. If the 'charset' parameter is -// missing, 'iso-8859-1' or 'latin1' the encoding is set to -// ISO-8859-1. If the 'charset' parameter is set to 'utf-8' the -// encoding is set to UTF-8. A missing content type header is -// interpreted as 'text/plain; charset=utf-8'. -func (l *Loader) LoadURL(url string) (*Properties, error) { - resp, err := http.Get(url) - if err != nil { - return nil, fmt.Errorf("properties: error fetching %q. %s", url, err) - } - defer resp.Body.Close() - - if resp.StatusCode == 404 && l.IgnoreMissing { - LogPrintf("properties: %s returned %d. skipping", url, resp.StatusCode) - return NewProperties(), nil - } - - if resp.StatusCode != 200 { - return nil, fmt.Errorf("properties: %s returned %d", url, resp.StatusCode) - } - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("properties: %s error reading response. %s", url, err) - } - - ct := resp.Header.Get("Content-Type") - ct = strings.Join(strings.Fields(ct), "") - var enc Encoding - switch strings.ToLower(ct) { - case "text/plain", "text/plain;charset=iso-8859-1", "text/plain;charset=latin1": - enc = ISO_8859_1 - case "", "text/plain;charset=utf-8": - enc = UTF8 - default: - return nil, fmt.Errorf("properties: invalid content type %s", ct) - } - - return l.loadBytes(body, enc) -} - -func (l *Loader) loadBytes(buf []byte, enc Encoding) (*Properties, error) { - p, err := parse(convert(buf, enc)) - if err != nil { - return nil, err - } - p.DisableExpansion = l.DisableExpansion - if p.DisableExpansion { - return p, nil - } - return p, p.check() -} - -// Load reads a buffer into a Properties struct. -func Load(buf []byte, enc Encoding) (*Properties, error) { - l := &Loader{Encoding: enc} - return l.LoadBytes(buf) -} - -// LoadString reads an UTF8 string into a properties struct. -func LoadString(s string) (*Properties, error) { - l := &Loader{Encoding: UTF8} - return l.LoadBytes([]byte(s)) -} - -// LoadMap creates a new Properties struct from a string map. -func LoadMap(m map[string]string) *Properties { - p := NewProperties() - for k, v := range m { - p.Set(k, v) - } - return p -} - -// LoadFile reads a file into a Properties struct. -func LoadFile(filename string, enc Encoding) (*Properties, error) { - l := &Loader{Encoding: enc} - return l.LoadAll([]string{filename}) -} - -// LoadFiles reads multiple files in the given order into -// a Properties struct. If 'ignoreMissing' is true then -// non-existent files will not be reported as error. -func LoadFiles(filenames []string, enc Encoding, ignoreMissing bool) (*Properties, error) { - l := &Loader{Encoding: enc, IgnoreMissing: ignoreMissing} - return l.LoadAll(filenames) -} - -// LoadURL reads the content of the URL into a Properties struct. -// See Loader#LoadURL for details. -func LoadURL(url string) (*Properties, error) { - l := &Loader{Encoding: UTF8} - return l.LoadAll([]string{url}) -} - -// LoadURLs reads the content of multiple URLs in the given order into a -// Properties struct. If IgnoreMissing is true then a 404 status code will -// not be reported as error. See Loader#LoadURL for the Content-Type header -// and the encoding. -func LoadURLs(urls []string, ignoreMissing bool) (*Properties, error) { - l := &Loader{Encoding: UTF8, IgnoreMissing: ignoreMissing} - return l.LoadAll(urls) -} - -// LoadAll reads the content of multiple URLs or files in the given order into a -// Properties struct. If 'ignoreMissing' is true then a 404 status code or missing file will -// not be reported as error. Encoding sets the encoding for files. For the URLs please see -// LoadURL for the Content-Type header and the encoding. -func LoadAll(names []string, enc Encoding, ignoreMissing bool) (*Properties, error) { - l := &Loader{Encoding: enc, IgnoreMissing: ignoreMissing} - return l.LoadAll(names) -} - -// MustLoadString reads an UTF8 string into a Properties struct and -// panics on error. -func MustLoadString(s string) *Properties { - return must(LoadString(s)) -} - -// MustLoadFile reads a file into a Properties struct and -// panics on error. -func MustLoadFile(filename string, enc Encoding) *Properties { - return must(LoadFile(filename, enc)) -} - -// MustLoadFiles reads multiple files in the given order into -// a Properties struct and panics on error. If 'ignoreMissing' -// is true then non-existent files will not be reported as error. -func MustLoadFiles(filenames []string, enc Encoding, ignoreMissing bool) *Properties { - return must(LoadFiles(filenames, enc, ignoreMissing)) -} - -// MustLoadURL reads the content of a URL into a Properties struct and -// panics on error. -func MustLoadURL(url string) *Properties { - return must(LoadURL(url)) -} - -// MustLoadURLs reads the content of multiple URLs in the given order into a -// Properties struct and panics on error. If 'ignoreMissing' is true then a 404 -// status code will not be reported as error. -func MustLoadURLs(urls []string, ignoreMissing bool) *Properties { - return must(LoadURLs(urls, ignoreMissing)) -} - -// MustLoadAll reads the content of multiple URLs or files in the given order into a -// Properties struct. If 'ignoreMissing' is true then a 404 status code or missing file will -// not be reported as error. Encoding sets the encoding for files. For the URLs please see -// LoadURL for the Content-Type header and the encoding. It panics on error. -func MustLoadAll(names []string, enc Encoding, ignoreMissing bool) *Properties { - return must(LoadAll(names, enc, ignoreMissing)) -} - -func must(p *Properties, err error) *Properties { - if err != nil { - ErrorHandler(err) - } - return p -} - -// expandName expands ${ENV_VAR} expressions in a name. -// If the environment variable does not exist then it will be replaced -// with an empty string. Malformed expressions like "${ENV_VAR" will -// be reported as error. -func expandName(name string) (string, error) { - return expand(name, []string{}, "${", "}", make(map[string]string)) -} - -// Interprets a byte buffer either as an ISO-8859-1 or UTF-8 encoded string. -// For ISO-8859-1 we can convert each byte straight into a rune since the -// first 256 unicode code points cover ISO-8859-1. -func convert(buf []byte, enc Encoding) string { - switch enc { - case utf8Default, UTF8: - return string(buf) - case ISO_8859_1: - runes := make([]rune, len(buf)) - for i, b := range buf { - runes[i] = rune(b) - } - return string(runes) - default: - ErrorHandler(fmt.Errorf("unsupported encoding %v", enc)) - } - panic("ErrorHandler should exit") -} diff --git a/tools/vendor/github.com/magiconair/properties/parser.go b/tools/vendor/github.com/magiconair/properties/parser.go deleted file mode 100644 index fccfd39f6..000000000 --- a/tools/vendor/github.com/magiconair/properties/parser.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -import ( - "fmt" - "runtime" -) - -type parser struct { - lex *lexer -} - -func parse(input string) (properties *Properties, err error) { - p := &parser{lex: lex(input)} - defer p.recover(&err) - - properties = NewProperties() - key := "" - comments := []string{} - - for { - token := p.expectOneOf(itemComment, itemKey, itemEOF) - switch token.typ { - case itemEOF: - goto done - case itemComment: - comments = append(comments, token.val) - continue - case itemKey: - key = token.val - if _, ok := properties.m[key]; !ok { - properties.k = append(properties.k, key) - } - } - - token = p.expectOneOf(itemValue, itemEOF) - if len(comments) > 0 { - properties.c[key] = comments - comments = []string{} - } - switch token.typ { - case itemEOF: - properties.m[key] = "" - goto done - case itemValue: - properties.m[key] = token.val - } - } - -done: - return properties, nil -} - -func (p *parser) errorf(format string, args ...interface{}) { - format = fmt.Sprintf("properties: Line %d: %s", p.lex.lineNumber(), format) - panic(fmt.Errorf(format, args...)) -} - -func (p *parser) expectOneOf(expected ...itemType) (token item) { - token = p.lex.nextItem() - for _, v := range expected { - if token.typ == v { - return token - } - } - p.unexpected(token) - panic("unexpected token") -} - -func (p *parser) unexpected(token item) { - p.errorf(token.String()) -} - -// recover is the handler that turns panics into returns from the top level of Parse. -func (p *parser) recover(errp *error) { - e := recover() - if e != nil { - if _, ok := e.(runtime.Error); ok { - panic(e) - } - *errp = e.(error) - } -} diff --git a/tools/vendor/github.com/magiconair/properties/properties.go b/tools/vendor/github.com/magiconair/properties/properties.go deleted file mode 100644 index fb2f7b404..000000000 --- a/tools/vendor/github.com/magiconair/properties/properties.go +++ /dev/null @@ -1,848 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -// BUG(frank): Set() does not check for invalid unicode literals since this is currently handled by the lexer. -// BUG(frank): Write() does not allow to configure the newline character. Therefore, on Windows LF is used. - -import ( - "bytes" - "fmt" - "io" - "log" - "os" - "regexp" - "sort" - "strconv" - "strings" - "time" - "unicode/utf8" -) - -const maxExpansionDepth = 64 - -// ErrorHandlerFunc defines the type of function which handles failures -// of the MustXXX() functions. An error handler function must exit -// the application after handling the error. -type ErrorHandlerFunc func(error) - -// ErrorHandler is the function which handles failures of the MustXXX() -// functions. The default is LogFatalHandler. -var ErrorHandler ErrorHandlerFunc = LogFatalHandler - -// LogHandlerFunc defines the function prototype for logging errors. -type LogHandlerFunc func(fmt string, args ...interface{}) - -// LogPrintf defines a log handler which uses log.Printf. -var LogPrintf LogHandlerFunc = log.Printf - -// LogFatalHandler handles the error by logging a fatal error and exiting. -func LogFatalHandler(err error) { - log.Fatal(err) -} - -// PanicHandler handles the error by panicking. -func PanicHandler(err error) { - panic(err) -} - -// ----------------------------------------------------------------------------- - -// A Properties contains the key/value pairs from the properties input. -// All values are stored in unexpanded form and are expanded at runtime -type Properties struct { - // Pre-/Postfix for property expansion. - Prefix string - Postfix string - - // DisableExpansion controls the expansion of properties on Get() - // and the check for circular references on Set(). When set to - // true Properties behaves like a simple key/value store and does - // not check for circular references on Get() or on Set(). - DisableExpansion bool - - // Stores the key/value pairs - m map[string]string - - // Stores the comments per key. - c map[string][]string - - // Stores the keys in order of appearance. - k []string - - // WriteSeparator specifies the separator of key and value while writing the properties. - WriteSeparator string -} - -// NewProperties creates a new Properties struct with the default -// configuration for "${key}" expressions. -func NewProperties() *Properties { - return &Properties{ - Prefix: "${", - Postfix: "}", - m: map[string]string{}, - c: map[string][]string{}, - k: []string{}, - } -} - -// Load reads a buffer into the given Properties struct. -func (p *Properties) Load(buf []byte, enc Encoding) error { - l := &Loader{Encoding: enc, DisableExpansion: p.DisableExpansion} - newProperties, err := l.LoadBytes(buf) - if err != nil { - return err - } - p.Merge(newProperties) - return nil -} - -// Get returns the expanded value for the given key if exists. -// Otherwise, ok is false. -func (p *Properties) Get(key string) (value string, ok bool) { - v, ok := p.m[key] - if p.DisableExpansion { - return v, ok - } - if !ok { - return "", false - } - - expanded, err := p.expand(key, v) - - // we guarantee that the expanded value is free of - // circular references and malformed expressions - // so we panic if we still get an error here. - if err != nil { - ErrorHandler(err) - } - - return expanded, true -} - -// MustGet returns the expanded value for the given key if exists. -// Otherwise, it panics. -func (p *Properties) MustGet(key string) string { - if v, ok := p.Get(key); ok { - return v - } - ErrorHandler(invalidKeyError(key)) - panic("ErrorHandler should exit") -} - -// ---------------------------------------------------------------------------- - -// ClearComments removes the comments for all keys. -func (p *Properties) ClearComments() { - p.c = map[string][]string{} -} - -// ---------------------------------------------------------------------------- - -// GetComment returns the last comment before the given key or an empty string. -func (p *Properties) GetComment(key string) string { - comments, ok := p.c[key] - if !ok || len(comments) == 0 { - return "" - } - return comments[len(comments)-1] -} - -// ---------------------------------------------------------------------------- - -// GetComments returns all comments that appeared before the given key or nil. -func (p *Properties) GetComments(key string) []string { - if comments, ok := p.c[key]; ok { - return comments - } - return nil -} - -// ---------------------------------------------------------------------------- - -// SetComment sets the comment for the key. -func (p *Properties) SetComment(key, comment string) { - p.c[key] = []string{comment} -} - -// ---------------------------------------------------------------------------- - -// SetComments sets the comments for the key. If the comments are nil then -// all comments for this key are deleted. -func (p *Properties) SetComments(key string, comments []string) { - if comments == nil { - delete(p.c, key) - return - } - p.c[key] = comments -} - -// ---------------------------------------------------------------------------- - -// GetBool checks if the expanded value is one of '1', 'yes', -// 'true' or 'on' if the key exists. The comparison is case-insensitive. -// If the key does not exist the default value is returned. -func (p *Properties) GetBool(key string, def bool) bool { - v, err := p.getBool(key) - if err != nil { - return def - } - return v -} - -// MustGetBool checks if the expanded value is one of '1', 'yes', -// 'true' or 'on' if the key exists. The comparison is case-insensitive. -// If the key does not exist the function panics. -func (p *Properties) MustGetBool(key string) bool { - v, err := p.getBool(key) - if err != nil { - ErrorHandler(err) - } - return v -} - -func (p *Properties) getBool(key string) (value bool, err error) { - if v, ok := p.Get(key); ok { - return boolVal(v), nil - } - return false, invalidKeyError(key) -} - -func boolVal(v string) bool { - v = strings.ToLower(v) - return v == "1" || v == "true" || v == "yes" || v == "on" -} - -// ---------------------------------------------------------------------------- - -// GetDuration parses the expanded value as an time.Duration (in ns) if the -// key exists. If key does not exist or the value cannot be parsed the default -// value is returned. In almost all cases you want to use GetParsedDuration(). -func (p *Properties) GetDuration(key string, def time.Duration) time.Duration { - v, err := p.getInt64(key) - if err != nil { - return def - } - return time.Duration(v) -} - -// MustGetDuration parses the expanded value as an time.Duration (in ns) if -// the key exists. If key does not exist or the value cannot be parsed the -// function panics. In almost all cases you want to use MustGetParsedDuration(). -func (p *Properties) MustGetDuration(key string) time.Duration { - v, err := p.getInt64(key) - if err != nil { - ErrorHandler(err) - } - return time.Duration(v) -} - -// ---------------------------------------------------------------------------- - -// GetParsedDuration parses the expanded value with time.ParseDuration() if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. -func (p *Properties) GetParsedDuration(key string, def time.Duration) time.Duration { - s, ok := p.Get(key) - if !ok { - return def - } - v, err := time.ParseDuration(s) - if err != nil { - return def - } - return v -} - -// MustGetParsedDuration parses the expanded value with time.ParseDuration() if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -func (p *Properties) MustGetParsedDuration(key string) time.Duration { - s, ok := p.Get(key) - if !ok { - ErrorHandler(invalidKeyError(key)) - } - v, err := time.ParseDuration(s) - if err != nil { - ErrorHandler(err) - } - return v -} - -// ---------------------------------------------------------------------------- - -// GetFloat64 parses the expanded value as a float64 if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. -func (p *Properties) GetFloat64(key string, def float64) float64 { - v, err := p.getFloat64(key) - if err != nil { - return def - } - return v -} - -// MustGetFloat64 parses the expanded value as a float64 if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -func (p *Properties) MustGetFloat64(key string) float64 { - v, err := p.getFloat64(key) - if err != nil { - ErrorHandler(err) - } - return v -} - -func (p *Properties) getFloat64(key string) (value float64, err error) { - if v, ok := p.Get(key); ok { - value, err = strconv.ParseFloat(v, 64) - if err != nil { - return 0, err - } - return value, nil - } - return 0, invalidKeyError(key) -} - -// ---------------------------------------------------------------------------- - -// GetInt parses the expanded value as an int if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. If the value does not fit into an int the -// function panics with an out of range error. -func (p *Properties) GetInt(key string, def int) int { - v, err := p.getInt64(key) - if err != nil { - return def - } - return intRangeCheck(key, v) -} - -// MustGetInt parses the expanded value as an int if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -// If the value does not fit into an int the function panics with -// an out of range error. -func (p *Properties) MustGetInt(key string) int { - v, err := p.getInt64(key) - if err != nil { - ErrorHandler(err) - } - return intRangeCheck(key, v) -} - -// ---------------------------------------------------------------------------- - -// GetInt64 parses the expanded value as an int64 if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. -func (p *Properties) GetInt64(key string, def int64) int64 { - v, err := p.getInt64(key) - if err != nil { - return def - } - return v -} - -// MustGetInt64 parses the expanded value as an int if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -func (p *Properties) MustGetInt64(key string) int64 { - v, err := p.getInt64(key) - if err != nil { - ErrorHandler(err) - } - return v -} - -func (p *Properties) getInt64(key string) (value int64, err error) { - if v, ok := p.Get(key); ok { - value, err = strconv.ParseInt(v, 10, 64) - if err != nil { - return 0, err - } - return value, nil - } - return 0, invalidKeyError(key) -} - -// ---------------------------------------------------------------------------- - -// GetUint parses the expanded value as an uint if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. If the value does not fit into an int the -// function panics with an out of range error. -func (p *Properties) GetUint(key string, def uint) uint { - v, err := p.getUint64(key) - if err != nil { - return def - } - return uintRangeCheck(key, v) -} - -// MustGetUint parses the expanded value as an int if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -// If the value does not fit into an int the function panics with -// an out of range error. -func (p *Properties) MustGetUint(key string) uint { - v, err := p.getUint64(key) - if err != nil { - ErrorHandler(err) - } - return uintRangeCheck(key, v) -} - -// ---------------------------------------------------------------------------- - -// GetUint64 parses the expanded value as an uint64 if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. -func (p *Properties) GetUint64(key string, def uint64) uint64 { - v, err := p.getUint64(key) - if err != nil { - return def - } - return v -} - -// MustGetUint64 parses the expanded value as an int if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -func (p *Properties) MustGetUint64(key string) uint64 { - v, err := p.getUint64(key) - if err != nil { - ErrorHandler(err) - } - return v -} - -func (p *Properties) getUint64(key string) (value uint64, err error) { - if v, ok := p.Get(key); ok { - value, err = strconv.ParseUint(v, 10, 64) - if err != nil { - return 0, err - } - return value, nil - } - return 0, invalidKeyError(key) -} - -// ---------------------------------------------------------------------------- - -// GetString returns the expanded value for the given key if exists or -// the default value otherwise. -func (p *Properties) GetString(key, def string) string { - if v, ok := p.Get(key); ok { - return v - } - return def -} - -// MustGetString returns the expanded value for the given key if exists or -// panics otherwise. -func (p *Properties) MustGetString(key string) string { - if v, ok := p.Get(key); ok { - return v - } - ErrorHandler(invalidKeyError(key)) - panic("ErrorHandler should exit") -} - -// ---------------------------------------------------------------------------- - -// Filter returns a new properties object which contains all properties -// for which the key matches the pattern. -func (p *Properties) Filter(pattern string) (*Properties, error) { - re, err := regexp.Compile(pattern) - if err != nil { - return nil, err - } - - return p.FilterRegexp(re), nil -} - -// FilterRegexp returns a new properties object which contains all properties -// for which the key matches the regular expression. -func (p *Properties) FilterRegexp(re *regexp.Regexp) *Properties { - pp := NewProperties() - for _, k := range p.k { - if re.MatchString(k) { - // TODO(fs): we are ignoring the error which flags a circular reference. - // TODO(fs): since we are just copying a subset of keys this cannot happen (fingers crossed) - pp.Set(k, p.m[k]) - } - } - return pp -} - -// FilterPrefix returns a new properties object with a subset of all keys -// with the given prefix. -func (p *Properties) FilterPrefix(prefix string) *Properties { - pp := NewProperties() - for _, k := range p.k { - if strings.HasPrefix(k, prefix) { - // TODO(fs): we are ignoring the error which flags a circular reference. - // TODO(fs): since we are just copying a subset of keys this cannot happen (fingers crossed) - pp.Set(k, p.m[k]) - } - } - return pp -} - -// FilterStripPrefix returns a new properties object with a subset of all keys -// with the given prefix and the prefix removed from the keys. -func (p *Properties) FilterStripPrefix(prefix string) *Properties { - pp := NewProperties() - n := len(prefix) - for _, k := range p.k { - if len(k) > len(prefix) && strings.HasPrefix(k, prefix) { - // TODO(fs): we are ignoring the error which flags a circular reference. - // TODO(fs): since we are modifying keys I am not entirely sure whether we can create a circular reference - // TODO(fs): this function should probably return an error but the signature is fixed - pp.Set(k[n:], p.m[k]) - } - } - return pp -} - -// Len returns the number of keys. -func (p *Properties) Len() int { - return len(p.m) -} - -// Keys returns all keys in the same order as in the input. -func (p *Properties) Keys() []string { - keys := make([]string, len(p.k)) - copy(keys, p.k) - return keys -} - -// Set sets the property key to the corresponding value. -// If a value for key existed before then ok is true and prev -// contains the previous value. If the value contains a -// circular reference or a malformed expression then -// an error is returned. -// An empty key is silently ignored. -func (p *Properties) Set(key, value string) (prev string, ok bool, err error) { - if key == "" { - return "", false, nil - } - - // if expansion is disabled we allow circular references - if p.DisableExpansion { - prev, ok = p.Get(key) - p.m[key] = value - if !ok { - p.k = append(p.k, key) - } - return prev, ok, nil - } - - // to check for a circular reference we temporarily need - // to set the new value. If there is an error then revert - // to the previous state. Only if all tests are successful - // then we add the key to the p.k list. - prev, ok = p.Get(key) - p.m[key] = value - - // now check for a circular reference - _, err = p.expand(key, value) - if err != nil { - - // revert to the previous state - if ok { - p.m[key] = prev - } else { - delete(p.m, key) - } - - return "", false, err - } - - if !ok { - p.k = append(p.k, key) - } - - return prev, ok, nil -} - -// SetValue sets property key to the default string value -// as defined by fmt.Sprintf("%v"). -func (p *Properties) SetValue(key string, value interface{}) error { - _, _, err := p.Set(key, fmt.Sprintf("%v", value)) - return err -} - -// MustSet sets the property key to the corresponding value. -// If a value for key existed before then ok is true and prev -// contains the previous value. An empty key is silently ignored. -func (p *Properties) MustSet(key, value string) (prev string, ok bool) { - prev, ok, err := p.Set(key, value) - if err != nil { - ErrorHandler(err) - } - return prev, ok -} - -// String returns a string of all expanded 'key = value' pairs. -func (p *Properties) String() string { - var s string - for _, key := range p.k { - value, _ := p.Get(key) - s = fmt.Sprintf("%s%s = %s\n", s, key, value) - } - return s -} - -// Sort sorts the properties keys in alphabetical order. -// This is helpfully before writing the properties. -func (p *Properties) Sort() { - sort.Strings(p.k) -} - -// Write writes all unexpanded 'key = value' pairs to the given writer. -// Write returns the number of bytes written and any write error encountered. -func (p *Properties) Write(w io.Writer, enc Encoding) (n int, err error) { - return p.WriteComment(w, "", enc) -} - -// WriteComment writes all unexpanced 'key = value' pairs to the given writer. -// If prefix is not empty then comments are written with a blank line and the -// given prefix. The prefix should be either "# " or "! " to be compatible with -// the properties file format. Otherwise, the properties parser will not be -// able to read the file back in. It returns the number of bytes written and -// any write error encountered. -func (p *Properties) WriteComment(w io.Writer, prefix string, enc Encoding) (n int, err error) { - var x int - - for _, key := range p.k { - value := p.m[key] - - if prefix != "" { - if comments, ok := p.c[key]; ok { - // don't print comments if they are all empty - allEmpty := true - for _, c := range comments { - if c != "" { - allEmpty = false - break - } - } - - if !allEmpty { - // add a blank line between entries but not at the top - if len(comments) > 0 && n > 0 { - x, err = fmt.Fprintln(w) - if err != nil { - return - } - n += x - } - - for _, c := range comments { - x, err = fmt.Fprintf(w, "%s%s\n", prefix, c) - if err != nil { - return - } - n += x - } - } - } - } - sep := " = " - if p.WriteSeparator != "" { - sep = p.WriteSeparator - } - x, err = fmt.Fprintf(w, "%s%s%s\n", encode(key, " :", enc), sep, encode(value, "", enc)) - if err != nil { - return - } - n += x - } - return -} - -// Map returns a copy of the properties as a map. -func (p *Properties) Map() map[string]string { - m := make(map[string]string) - for k, v := range p.m { - m[k] = v - } - return m -} - -// FilterFunc returns a copy of the properties which includes the values which passed all filters. -func (p *Properties) FilterFunc(filters ...func(k, v string) bool) *Properties { - pp := NewProperties() -outer: - for k, v := range p.m { - for _, f := range filters { - if !f(k, v) { - continue outer - } - pp.Set(k, v) - } - } - return pp -} - -// ---------------------------------------------------------------------------- - -// Delete removes the key and its comments. -func (p *Properties) Delete(key string) { - delete(p.m, key) - delete(p.c, key) - newKeys := []string{} - for _, k := range p.k { - if k != key { - newKeys = append(newKeys, k) - } - } - p.k = newKeys -} - -// Merge merges properties, comments and keys from other *Properties into p -func (p *Properties) Merge(other *Properties) { - for _, k := range other.k { - if _, ok := p.m[k]; !ok { - p.k = append(p.k, k) - } - } - for k, v := range other.m { - p.m[k] = v - } - for k, v := range other.c { - p.c[k] = v - } -} - -// ---------------------------------------------------------------------------- - -// check expands all values and returns an error if a circular reference or -// a malformed expression was found. -func (p *Properties) check() error { - for key, value := range p.m { - if _, err := p.expand(key, value); err != nil { - return err - } - } - return nil -} - -func (p *Properties) expand(key, input string) (string, error) { - // no pre/postfix -> nothing to expand - if p.Prefix == "" && p.Postfix == "" { - return input, nil - } - - return expand(input, []string{key}, p.Prefix, p.Postfix, p.m) -} - -// expand recursively expands expressions of '(prefix)key(postfix)' to their corresponding values. -// The function keeps track of the keys that were already expanded and stops if it -// detects a circular reference or a malformed expression of the form '(prefix)key'. -func expand(s string, keys []string, prefix, postfix string, values map[string]string) (string, error) { - if len(keys) > maxExpansionDepth { - return "", fmt.Errorf("expansion too deep") - } - - for { - start := strings.Index(s, prefix) - if start == -1 { - return s, nil - } - - keyStart := start + len(prefix) - keyLen := strings.Index(s[keyStart:], postfix) - if keyLen == -1 { - return "", fmt.Errorf("malformed expression") - } - - end := keyStart + keyLen + len(postfix) - 1 - key := s[keyStart : keyStart+keyLen] - - // fmt.Printf("s:%q pp:%q start:%d end:%d keyStart:%d keyLen:%d key:%q\n", s, prefix + "..." + postfix, start, end, keyStart, keyLen, key) - - for _, k := range keys { - if key == k { - var b bytes.Buffer - b.WriteString("circular reference in:\n") - for _, k1 := range keys { - fmt.Fprintf(&b, "%s=%s\n", k1, values[k1]) - } - return "", fmt.Errorf(b.String()) - } - } - - val, ok := values[key] - if !ok { - val = os.Getenv(key) - } - new_val, err := expand(val, append(keys, key), prefix, postfix, values) - if err != nil { - return "", err - } - s = s[:start] + new_val + s[end+1:] - } -} - -// encode encodes a UTF-8 string to ISO-8859-1 and escapes some characters. -func encode(s string, special string, enc Encoding) string { - switch enc { - case UTF8: - return encodeUtf8(s, special) - case ISO_8859_1: - return encodeIso(s, special) - default: - panic(fmt.Sprintf("unsupported encoding %v", enc)) - } -} - -func encodeUtf8(s string, special string) string { - v := "" - for pos := 0; pos < len(s); { - r, w := utf8.DecodeRuneInString(s[pos:]) - pos += w - v += escape(r, special) - } - return v -} - -func encodeIso(s string, special string) string { - var r rune - var w int - var v string - for pos := 0; pos < len(s); { - switch r, w = utf8.DecodeRuneInString(s[pos:]); { - case r < 1<<8: // single byte rune -> escape special chars only - v += escape(r, special) - case r < 1<<16: // two byte rune -> unicode literal - v += fmt.Sprintf("\\u%04x", r) - default: // more than two bytes per rune -> can't encode - v += "?" - } - pos += w - } - return v -} - -func escape(r rune, special string) string { - switch r { - case '\f': - return "\\f" - case '\n': - return "\\n" - case '\r': - return "\\r" - case '\t': - return "\\t" - case '\\': - return "\\\\" - default: - if strings.ContainsRune(special, r) { - return "\\" + string(r) - } - return string(r) - } -} - -func invalidKeyError(key string) error { - return fmt.Errorf("unknown property: %s", key) -} diff --git a/tools/vendor/github.com/magiconair/properties/rangecheck.go b/tools/vendor/github.com/magiconair/properties/rangecheck.go deleted file mode 100644 index dbd60b36e..000000000 --- a/tools/vendor/github.com/magiconair/properties/rangecheck.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -import ( - "fmt" - "math" -) - -// make this a var to overwrite it in a test -var is32Bit = ^uint(0) == math.MaxUint32 - -// intRangeCheck checks if the value fits into the int type and -// panics if it does not. -func intRangeCheck(key string, v int64) int { - if is32Bit && (v < math.MinInt32 || v > math.MaxInt32) { - panic(fmt.Sprintf("Value %d for key %s out of range", v, key)) - } - return int(v) -} - -// uintRangeCheck checks if the value fits into the uint type and -// panics if it does not. -func uintRangeCheck(key string, v uint64) uint { - if is32Bit && v > math.MaxUint32 { - panic(fmt.Sprintf("Value %d for key %s out of range", v, key)) - } - return uint(v) -} diff --git a/tools/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE b/tools/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE deleted file mode 100644 index 8dada3eda..000000000 --- a/tools/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/tools/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE b/tools/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE deleted file mode 100644 index 5d8cb5b72..000000000 --- a/tools/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE +++ /dev/null @@ -1 +0,0 @@ -Copyright 2012 Matt T. Proud (matt.proud@gmail.com) diff --git a/tools/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore b/tools/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore deleted file mode 100644 index e16fb946b..000000000 --- a/tools/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore +++ /dev/null @@ -1 +0,0 @@ -cover.dat diff --git a/tools/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile b/tools/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile deleted file mode 100644 index 81be21437..000000000 --- a/tools/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -all: - -cover: - go test -cover -v -coverprofile=cover.dat ./... - go tool cover -func cover.dat - -.PHONY: cover diff --git a/tools/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go b/tools/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go deleted file mode 100644 index 258c0636a..000000000 --- a/tools/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2013 Matt T. Proud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pbutil - -import ( - "encoding/binary" - "errors" - "io" - - "github.com/golang/protobuf/proto" -) - -var errInvalidVarint = errors.New("invalid varint32 encountered") - -// ReadDelimited decodes a message from the provided length-delimited stream, -// where the length is encoded as 32-bit varint prefix to the message body. -// It returns the total number of bytes read and any applicable error. This is -// roughly equivalent to the companion Java API's -// MessageLite#parseDelimitedFrom. As per the reader contract, this function -// calls r.Read repeatedly as required until exactly one message including its -// prefix is read and decoded (or an error has occurred). The function never -// reads more bytes from the stream than required. The function never returns -// an error if a message has been read and decoded correctly, even if the end -// of the stream has been reached in doing so. In that case, any subsequent -// calls return (0, io.EOF). -func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) { - // Per AbstractParser#parsePartialDelimitedFrom with - // CodedInputStream#readRawVarint32. - var headerBuf [binary.MaxVarintLen32]byte - var bytesRead, varIntBytes int - var messageLength uint64 - for varIntBytes == 0 { // i.e. no varint has been decoded yet. - if bytesRead >= len(headerBuf) { - return bytesRead, errInvalidVarint - } - // We have to read byte by byte here to avoid reading more bytes - // than required. Each read byte is appended to what we have - // read before. - newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1]) - if newBytesRead == 0 { - if err != nil { - return bytesRead, err - } - // A Reader should not return (0, nil), but if it does, - // it should be treated as no-op (according to the - // Reader contract). So let's go on... - continue - } - bytesRead += newBytesRead - // Now present everything read so far to the varint decoder and - // see if a varint can be decoded already. - messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead]) - } - - messageBuf := make([]byte, messageLength) - newBytesRead, err := io.ReadFull(r, messageBuf) - bytesRead += newBytesRead - if err != nil { - return bytesRead, err - } - - return bytesRead, proto.Unmarshal(messageBuf, m) -} diff --git a/tools/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go b/tools/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go deleted file mode 100644 index 8fb59ad22..000000000 --- a/tools/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2013 Matt T. Proud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pbutil - -import ( - "encoding/binary" - "io" - - "github.com/golang/protobuf/proto" -) - -// WriteDelimited encodes and dumps a message to the provided writer prefixed -// with a 32-bit varint indicating the length of the encoded message, producing -// a length-delimited record stream, which can be used to chain together -// encoded messages of the same type together in a file. It returns the total -// number of bytes written and any applicable error. This is roughly -// equivalent to the companion Java API's MessageLite#writeDelimitedTo. -func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) { - buffer, err := proto.Marshal(m) - if err != nil { - return 0, err - } - - var buf [binary.MaxVarintLen32]byte - encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer))) - - sync, err := w.Write(buf[:encodedLength]) - if err != nil { - return sync, err - } - - n, err = w.Write(buffer) - return n + sync, err -} diff --git a/tools/vendor/github.com/mgechev/revive/lint/package.go b/tools/vendor/github.com/mgechev/revive/lint/package.go index dc99e8027..dfc701f7e 100644 --- a/tools/vendor/github.com/mgechev/revive/lint/package.go +++ b/tools/vendor/github.com/mgechev/revive/lint/package.go @@ -38,6 +38,7 @@ var ( go115 = goversion.Must(goversion.NewVersion("1.15")) go121 = goversion.Must(goversion.NewVersion("1.21")) go122 = goversion.Must(goversion.NewVersion("1.22")) + go124 = goversion.Must(goversion.NewVersion("1.24")) ) // Files return package's files. @@ -210,6 +211,11 @@ func (p *Package) IsAtLeastGo122() bool { return p.goVersion.GreaterThanOrEqual(go122) } +// IsAtLeastGo124 returns true if the Go version for this package is 1.24 or higher, false otherwise +func (p *Package) IsAtLeastGo124() bool { + return p.goVersion.GreaterThanOrEqual(go124) +} + func getSortableMethodFlagForFunction(fn *ast.FuncDecl) sortableMethodsFlags { switch { case astutils.FuncSignatureIs(fn, "Len", []string{}, []string{"int"}): diff --git a/tools/vendor/github.com/mgechev/revive/rule/struct_tag.go b/tools/vendor/github.com/mgechev/revive/rule/struct_tag.go index 5182612b2..00a2b964c 100644 --- a/tools/vendor/github.com/mgechev/revive/rule/struct_tag.go +++ b/tools/vendor/github.com/mgechev/revive/rule/struct_tag.go @@ -54,8 +54,9 @@ func (r *StructTagRule) Apply(file *lint.File, _ lint.Arguments) []lint.Failure } w := lintStructTagRule{ - onFailure: onFailure, - userDefined: r.userDefined, + onFailure: onFailure, + userDefined: r.userDefined, + isAtLeastGo124: file.Pkg.IsAtLeastGo124(), } ast.Walk(w, file.AST) @@ -69,10 +70,11 @@ func (*StructTagRule) Name() string { } type lintStructTagRule struct { - onFailure func(lint.Failure) - userDefined map[string][]string // map: key -> []option - usedTagNbr map[int]bool // list of used tag numbers - usedTagName map[string]bool // list of used tag keys + onFailure func(lint.Failure) + userDefined map[string][]string // map: key -> []option + usedTagNbr map[int]bool // list of used tag numbers + usedTagName map[string]bool // list of used tag keys + isAtLeastGo124 bool } func (w lintStructTagRule) Visit(node ast.Node) ast.Visitor { @@ -281,6 +283,11 @@ func (w lintStructTagRule) checkJSONTag(name string, options []string) (string, if name != "-" { return "option can not be empty in JSON tag", false } + case "omitzero": + if w.isAtLeastGo124 { + continue + } + fallthrough default: if w.isUserDefined(keyJSON, opt) { continue diff --git a/tools/vendor/github.com/mitchellh/mapstructure/README.md b/tools/vendor/github.com/mitchellh/mapstructure/README.md deleted file mode 100644 index 0018dc7d9..000000000 --- a/tools/vendor/github.com/mitchellh/mapstructure/README.md +++ /dev/null @@ -1,46 +0,0 @@ -# mapstructure [![Godoc](https://godoc.org/github.com/mitchellh/mapstructure?status.svg)](https://godoc.org/github.com/mitchellh/mapstructure) - -mapstructure is a Go library for decoding generic map values to structures -and vice versa, while providing helpful error handling. - -This library is most useful when decoding values from some data stream (JSON, -Gob, etc.) where you don't _quite_ know the structure of the underlying data -until you read a part of it. You can therefore read a `map[string]interface{}` -and use this library to decode it into the proper underlying native Go -structure. - -## Installation - -Standard `go get`: - -``` -$ go get github.com/mitchellh/mapstructure -``` - -## Usage & Example - -For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure). - -The `Decode` function has examples associated with it there. - -## But Why?! - -Go offers fantastic standard libraries for decoding formats such as JSON. -The standard method is to have a struct pre-created, and populate that struct -from the bytes of the encoded format. This is great, but the problem is if -you have configuration or an encoding that changes slightly depending on -specific fields. For example, consider this JSON: - -```json -{ - "type": "person", - "name": "Mitchell" -} -``` - -Perhaps we can't populate a specific structure without first reading -the "type" field from the JSON. We could always do two passes over the -decoding of the JSON (reading the "type" first, and the rest later). -However, it is much simpler to just decode this into a `map[string]interface{}` -structure, read the "type" key, then use something like this library -to decode it into the proper structure. diff --git a/tools/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/tools/vendor/github.com/mitchellh/mapstructure/decode_hooks.go deleted file mode 100644 index 3a754ca72..000000000 --- a/tools/vendor/github.com/mitchellh/mapstructure/decode_hooks.go +++ /dev/null @@ -1,279 +0,0 @@ -package mapstructure - -import ( - "encoding" - "errors" - "fmt" - "net" - "reflect" - "strconv" - "strings" - "time" -) - -// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns -// it into the proper DecodeHookFunc type, such as DecodeHookFuncType. -func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { - // Create variables here so we can reference them with the reflect pkg - var f1 DecodeHookFuncType - var f2 DecodeHookFuncKind - var f3 DecodeHookFuncValue - - // Fill in the variables into this interface and the rest is done - // automatically using the reflect package. - potential := []interface{}{f1, f2, f3} - - v := reflect.ValueOf(h) - vt := v.Type() - for _, raw := range potential { - pt := reflect.ValueOf(raw).Type() - if vt.ConvertibleTo(pt) { - return v.Convert(pt).Interface() - } - } - - return nil -} - -// DecodeHookExec executes the given decode hook. This should be used -// since it'll naturally degrade to the older backwards compatible DecodeHookFunc -// that took reflect.Kind instead of reflect.Type. -func DecodeHookExec( - raw DecodeHookFunc, - from reflect.Value, to reflect.Value) (interface{}, error) { - - switch f := typedDecodeHook(raw).(type) { - case DecodeHookFuncType: - return f(from.Type(), to.Type(), from.Interface()) - case DecodeHookFuncKind: - return f(from.Kind(), to.Kind(), from.Interface()) - case DecodeHookFuncValue: - return f(from, to) - default: - return nil, errors.New("invalid decode hook signature") - } -} - -// ComposeDecodeHookFunc creates a single DecodeHookFunc that -// automatically composes multiple DecodeHookFuncs. -// -// The composed funcs are called in order, with the result of the -// previous transformation. -func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { - return func(f reflect.Value, t reflect.Value) (interface{}, error) { - var err error - data := f.Interface() - - newFrom := f - for _, f1 := range fs { - data, err = DecodeHookExec(f1, newFrom, t) - if err != nil { - return nil, err - } - newFrom = reflect.ValueOf(data) - } - - return data, nil - } -} - -// OrComposeDecodeHookFunc executes all input hook functions until one of them returns no error. In that case its value is returned. -// If all hooks return an error, OrComposeDecodeHookFunc returns an error concatenating all error messages. -func OrComposeDecodeHookFunc(ff ...DecodeHookFunc) DecodeHookFunc { - return func(a, b reflect.Value) (interface{}, error) { - var allErrs string - var out interface{} - var err error - - for _, f := range ff { - out, err = DecodeHookExec(f, a, b) - if err != nil { - allErrs += err.Error() + "\n" - continue - } - - return out, nil - } - - return nil, errors.New(allErrs) - } -} - -// StringToSliceHookFunc returns a DecodeHookFunc that converts -// string to []string by splitting on the given sep. -func StringToSliceHookFunc(sep string) DecodeHookFunc { - return func( - f reflect.Kind, - t reflect.Kind, - data interface{}) (interface{}, error) { - if f != reflect.String || t != reflect.Slice { - return data, nil - } - - raw := data.(string) - if raw == "" { - return []string{}, nil - } - - return strings.Split(raw, sep), nil - } -} - -// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts -// strings to time.Duration. -func StringToTimeDurationHookFunc() DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(time.Duration(5)) { - return data, nil - } - - // Convert it by parsing - return time.ParseDuration(data.(string)) - } -} - -// StringToIPHookFunc returns a DecodeHookFunc that converts -// strings to net.IP -func StringToIPHookFunc() DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(net.IP{}) { - return data, nil - } - - // Convert it by parsing - ip := net.ParseIP(data.(string)) - if ip == nil { - return net.IP{}, fmt.Errorf("failed parsing ip %v", data) - } - - return ip, nil - } -} - -// StringToIPNetHookFunc returns a DecodeHookFunc that converts -// strings to net.IPNet -func StringToIPNetHookFunc() DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(net.IPNet{}) { - return data, nil - } - - // Convert it by parsing - _, net, err := net.ParseCIDR(data.(string)) - return net, err - } -} - -// StringToTimeHookFunc returns a DecodeHookFunc that converts -// strings to time.Time. -func StringToTimeHookFunc(layout string) DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(time.Time{}) { - return data, nil - } - - // Convert it by parsing - return time.Parse(layout, data.(string)) - } -} - -// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to -// the decoder. -// -// Note that this is significantly different from the WeaklyTypedInput option -// of the DecoderConfig. -func WeaklyTypedHook( - f reflect.Kind, - t reflect.Kind, - data interface{}) (interface{}, error) { - dataVal := reflect.ValueOf(data) - switch t { - case reflect.String: - switch f { - case reflect.Bool: - if dataVal.Bool() { - return "1", nil - } - return "0", nil - case reflect.Float32: - return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil - case reflect.Int: - return strconv.FormatInt(dataVal.Int(), 10), nil - case reflect.Slice: - dataType := dataVal.Type() - elemKind := dataType.Elem().Kind() - if elemKind == reflect.Uint8 { - return string(dataVal.Interface().([]uint8)), nil - } - case reflect.Uint: - return strconv.FormatUint(dataVal.Uint(), 10), nil - } - } - - return data, nil -} - -func RecursiveStructToMapHookFunc() DecodeHookFunc { - return func(f reflect.Value, t reflect.Value) (interface{}, error) { - if f.Kind() != reflect.Struct { - return f.Interface(), nil - } - - var i interface{} = struct{}{} - if t.Type() != reflect.TypeOf(&i).Elem() { - return f.Interface(), nil - } - - m := make(map[string]interface{}) - t.Set(reflect.ValueOf(m)) - - return f.Interface(), nil - } -} - -// TextUnmarshallerHookFunc returns a DecodeHookFunc that applies -// strings to the UnmarshalText function, when the target type -// implements the encoding.TextUnmarshaler interface -func TextUnmarshallerHookFunc() DecodeHookFuncType { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - result := reflect.New(t).Interface() - unmarshaller, ok := result.(encoding.TextUnmarshaler) - if !ok { - return data, nil - } - if err := unmarshaller.UnmarshalText([]byte(data.(string))); err != nil { - return nil, err - } - return result, nil - } -} diff --git a/tools/vendor/github.com/mitchellh/mapstructure/error.go b/tools/vendor/github.com/mitchellh/mapstructure/error.go deleted file mode 100644 index 47a99e5af..000000000 --- a/tools/vendor/github.com/mitchellh/mapstructure/error.go +++ /dev/null @@ -1,50 +0,0 @@ -package mapstructure - -import ( - "errors" - "fmt" - "sort" - "strings" -) - -// Error implements the error interface and can represents multiple -// errors that occur in the course of a single decode. -type Error struct { - Errors []string -} - -func (e *Error) Error() string { - points := make([]string, len(e.Errors)) - for i, err := range e.Errors { - points[i] = fmt.Sprintf("* %s", err) - } - - sort.Strings(points) - return fmt.Sprintf( - "%d error(s) decoding:\n\n%s", - len(e.Errors), strings.Join(points, "\n")) -} - -// WrappedErrors implements the errwrap.Wrapper interface to make this -// return value more useful with the errwrap and go-multierror libraries. -func (e *Error) WrappedErrors() []error { - if e == nil { - return nil - } - - result := make([]error, len(e.Errors)) - for i, e := range e.Errors { - result[i] = errors.New(e) - } - - return result -} - -func appendErrors(errors []string, err error) []string { - switch e := err.(type) { - case *Error: - return append(errors, e.Errors...) - default: - return append(errors, e.Error()) - } -} diff --git a/tools/vendor/github.com/golang/protobuf/LICENSE b/tools/vendor/github.com/munnerz/goautoneg/LICENSE similarity index 53% rename from tools/vendor/github.com/golang/protobuf/LICENSE rename to tools/vendor/github.com/munnerz/goautoneg/LICENSE index 0f646931a..bbc7b897c 100644 --- a/tools/vendor/github.com/golang/protobuf/LICENSE +++ b/tools/vendor/github.com/munnerz/goautoneg/LICENSE @@ -1,28 +1,31 @@ -Copyright 2010 The Go Authors. All rights reserved. +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/tools/vendor/github.com/munnerz/goautoneg/Makefile b/tools/vendor/github.com/munnerz/goautoneg/Makefile new file mode 100644 index 000000000..e33ee1730 --- /dev/null +++ b/tools/vendor/github.com/munnerz/goautoneg/Makefile @@ -0,0 +1,13 @@ +include $(GOROOT)/src/Make.inc + +TARG=bitbucket.org/ww/goautoneg +GOFILES=autoneg.go + +include $(GOROOT)/src/Make.pkg + +format: + gofmt -w *.go + +docs: + gomake clean + godoc ${TARG} > README.txt diff --git a/tools/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt b/tools/vendor/github.com/munnerz/goautoneg/README.txt similarity index 100% rename from tools/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt rename to tools/vendor/github.com/munnerz/goautoneg/README.txt diff --git a/tools/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/tools/vendor/github.com/munnerz/goautoneg/autoneg.go similarity index 63% rename from tools/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go rename to tools/vendor/github.com/munnerz/goautoneg/autoneg.go index 26e92288c..1dd1cad64 100644 --- a/tools/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go +++ b/tools/vendor/github.com/munnerz/goautoneg/autoneg.go @@ -1,12 +1,12 @@ /* -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - HTTP Content-Type Autonegotiation. The functions in this package implement the behaviour specified in http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: @@ -35,9 +35,8 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - */ + package goautoneg import ( @@ -53,16 +52,14 @@ type Accept struct { Params map[string]string } -// For internal use, so that we can use the sort interface -type accept_slice []Accept +// acceptSlice is defined to implement sort interface. +type acceptSlice []Accept -func (accept accept_slice) Len() int { - slice := []Accept(accept) +func (slice acceptSlice) Len() int { return len(slice) } -func (accept accept_slice) Less(i, j int) bool { - slice := []Accept(accept) +func (slice acceptSlice) Less(i, j int) bool { ai, aj := slice[i], slice[j] if ai.Q > aj.Q { return true @@ -76,63 +73,93 @@ func (accept accept_slice) Less(i, j int) bool { return false } -func (accept accept_slice) Swap(i, j int) { - slice := []Accept(accept) +func (slice acceptSlice) Swap(i, j int) { slice[i], slice[j] = slice[j], slice[i] } +func stringTrimSpaceCutset(r rune) bool { + return r == ' ' +} + +func nextSplitElement(s, sep string) (item string, remaining string) { + if index := strings.Index(s, sep); index != -1 { + return s[:index], s[index+1:] + } + return s, "" +} + // Parse an Accept Header string returning a sorted list // of clauses -func ParseAccept(header string) (accept []Accept) { - parts := strings.Split(header, ",") - accept = make([]Accept, 0, len(parts)) - for _, part := range parts { - part := strings.Trim(part, " ") +func ParseAccept(header string) acceptSlice { + partsCount := 0 + remaining := header + for len(remaining) > 0 { + partsCount++ + _, remaining = nextSplitElement(remaining, ",") + } + accept := make(acceptSlice, 0, partsCount) - a := Accept{} - a.Params = make(map[string]string) - a.Q = 1.0 + remaining = header + var part string + for len(remaining) > 0 { + part, remaining = nextSplitElement(remaining, ",") + part = strings.TrimFunc(part, stringTrimSpaceCutset) + + a := Accept{ + Q: 1.0, + } - mrp := strings.Split(part, ";") + sp, remainingPart := nextSplitElement(part, ";") - media_range := mrp[0] - sp := strings.Split(media_range, "/") - a.Type = strings.Trim(sp[0], " ") + sp0, spRemaining := nextSplitElement(sp, "/") + a.Type = strings.TrimFunc(sp0, stringTrimSpaceCutset) switch { - case len(sp) == 1 && a.Type == "*": - a.SubType = "*" - case len(sp) == 2: - a.SubType = strings.Trim(sp[1], " ") + case len(spRemaining) == 0: + if a.Type == "*" { + a.SubType = "*" + } else { + continue + } default: - continue + var sp1 string + sp1, spRemaining = nextSplitElement(spRemaining, "/") + if len(spRemaining) > 0 { + continue + } + a.SubType = strings.TrimFunc(sp1, stringTrimSpaceCutset) } - if len(mrp) == 1 { + if len(remainingPart) == 0 { accept = append(accept, a) continue } - for _, param := range mrp[1:] { - sp := strings.SplitN(param, "=", 2) - if len(sp) != 2 { + a.Params = make(map[string]string) + for len(remainingPart) > 0 { + sp, remainingPart = nextSplitElement(remainingPart, ";") + sp0, spRemaining = nextSplitElement(sp, "=") + if len(spRemaining) == 0 { continue } - token := strings.Trim(sp[0], " ") + var sp1 string + sp1, spRemaining = nextSplitElement(spRemaining, "=") + if len(spRemaining) != 0 { + continue + } + token := strings.TrimFunc(sp0, stringTrimSpaceCutset) if token == "q" { - a.Q, _ = strconv.ParseFloat(sp[1], 32) + a.Q, _ = strconv.ParseFloat(sp1, 32) } else { - a.Params[token] = strings.Trim(sp[1], " ") + a.Params[token] = strings.TrimFunc(sp1, stringTrimSpaceCutset) } } accept = append(accept, a) } - slice := accept_slice(accept) - sort.Sort(slice) - - return + sort.Sort(accept) + return accept } // Negotiate the most appropriate content_type given the accept header diff --git a/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/actual/actualarg.go b/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/actual/actualarg.go index 5b6cfbbc4..7ba83c586 100644 --- a/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/actual/actualarg.go +++ b/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/expression/actual/actualarg.go @@ -8,6 +8,7 @@ import ( "golang.org/x/tools/go/analysis" "github.com/nunnatsa/ginkgolinter/internal/expression/value" + "github.com/nunnatsa/ginkgolinter/internal/ginkgoinfo" "github.com/nunnatsa/ginkgolinter/internal/gomegahandler" "github.com/nunnatsa/ginkgolinter/internal/gomegainfo" "github.com/nunnatsa/ginkgolinter/internal/reverseassertion" @@ -98,7 +99,7 @@ func getActualArg(origActualExpr *ast.CallExpr, actualExprClone *ast.CallExpr, a argExprClone = actualExprClone.Args[funcOffset] if gomegainfo.IsAsyncActualMethod(actualMethodName) { - if pass.TypesInfo.TypeOf(origArgExpr).String() == "context.Context" { + if ginkgoinfo.IsGinkgoContext(pass.TypesInfo.TypeOf(origArgExpr)) { funcOffset++ if len(origActualExpr.Args) <= funcOffset { return nil, nil, 0, false diff --git a/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/ginkgoinfo/ginkgoinfo.go b/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/ginkgoinfo/ginkgoinfo.go new file mode 100644 index 000000000..bdc8b2e16 --- /dev/null +++ b/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/ginkgoinfo/ginkgoinfo.go @@ -0,0 +1,26 @@ +package ginkgoinfo + +import ( + gotypes "go/types" + "strings" +) + +const ( + ctxTypeName = "context.Context" + ginkgoCtxSuffix = "github.com/onsi/ginkgo/v2/internal.SpecContext" +) + +func IsGinkgoContext(t gotypes.Type) bool { + maybeCtx := gotypes.Unalias(t) + + typeName := maybeCtx.String() + if typeName == ctxTypeName { + return true + } + + if strings.HasSuffix(typeName, ginkgoCtxSuffix) { + return true + } + + return false +} diff --git a/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegainfo/gomegainfo.go b/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegainfo/gomegainfo.go index 144ad0bde..93be55ec0 100644 --- a/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegainfo/gomegainfo.go +++ b/tools/vendor/github.com/nunnatsa/ginkgolinter/internal/gomegainfo/gomegainfo.go @@ -86,30 +86,32 @@ func IsAssertionFunc(name string) bool { } func IsGomegaVar(x ast.Expr, pass *analysis.Pass) bool { - if tx, ok := pass.TypesInfo.Types[x]; ok { - return IsGomegaType(tx.Type) + if _, isIdent := x.(*ast.Ident); !isIdent { + return false } - return false + tx, ok := pass.TypesInfo.Types[x] + if !ok { + return false + } + + return IsGomegaType(tx.Type) } +const ( + gomegaStructType = "github.com/onsi/gomega/internal.Gomega" + gomegaInterface = "github.com/onsi/gomega/types.Gomega" +) + func IsGomegaType(t gotypes.Type) bool { - var typeStr string - switch ttx := t.(type) { + switch ttx := gotypes.Unalias(t).(type) { case *gotypes.Pointer: - tp := ttx.Elem() - typeStr = tp.String() + return IsGomegaType(ttx.Elem()) case *gotypes.Named: - typeStr = ttx.String() - - case *gotypes.Alias: - typeStr = ttx.String() - - default: - return false + name := ttx.String() + return strings.HasSuffix(name, gomegaStructType) || strings.HasSuffix(name, gomegaInterface) } - return strings.Contains(typeStr, "github.com/onsi/gomega") && - (strings.HasSuffix(typeStr, "Gomega") || strings.HasSuffix(typeStr, "WithT")) + return false } diff --git a/tools/vendor/github.com/pelletier/go-toml/.dockerignore b/tools/vendor/github.com/pelletier/go-toml/.dockerignore deleted file mode 100644 index 7b5883475..000000000 --- a/tools/vendor/github.com/pelletier/go-toml/.dockerignore +++ /dev/null @@ -1,2 +0,0 @@ -cmd/tomll/tomll -cmd/tomljson/tomljson diff --git a/tools/vendor/github.com/pelletier/go-toml/.gitignore b/tools/vendor/github.com/pelletier/go-toml/.gitignore deleted file mode 100644 index e6ba63a5c..000000000 --- a/tools/vendor/github.com/pelletier/go-toml/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -test_program/test_program_bin -fuzz/ -cmd/tomll/tomll -cmd/tomljson/tomljson -cmd/tomltestgen/tomltestgen diff --git a/tools/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md b/tools/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md deleted file mode 100644 index 98b9893d3..000000000 --- a/tools/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md +++ /dev/null @@ -1,132 +0,0 @@ -## Contributing - -Thank you for your interest in go-toml! We appreciate you considering -contributing to go-toml! - -The main goal is the project is to provide an easy-to-use TOML -implementation for Go that gets the job done and gets out of your way – -dealing with TOML is probably not the central piece of your project. - -As the single maintainer of go-toml, time is scarce. All help, big or -small, is more than welcomed! - -### Ask questions - -Any question you may have, somebody else might have it too. Always feel -free to ask them on the [issues tracker][issues-tracker]. We will try to -answer them as clearly and quickly as possible, time permitting. - -Asking questions also helps us identify areas where the documentation needs -improvement, or new features that weren't envisioned before. Sometimes, a -seemingly innocent question leads to the fix of a bug. Don't hesitate and -ask away! - -### Improve the documentation - -The best way to share your knowledge and experience with go-toml is to -improve the documentation. Fix a typo, clarify an interface, add an -example, anything goes! - -The documentation is present in the [README][readme] and thorough the -source code. On release, it gets updated on [pkg.go.dev][pkg.go.dev]. To make a -change to the documentation, create a pull request with your proposed -changes. For simple changes like that, the easiest way to go is probably -the "Fork this project and edit the file" button on Github, displayed at -the top right of the file. Unless it's a trivial change (for example a -typo), provide a little bit of context in your pull request description or -commit message. - -### Report a bug - -Found a bug! Sorry to hear that :(. Help us and other track them down and -fix by reporting it. [File a new bug report][bug-report] on the [issues -tracker][issues-tracker]. The template should provide enough guidance on -what to include. When in doubt: add more details! By reducing ambiguity and -providing more information, it decreases back and forth and saves everyone -time. - -### Code changes - -Want to contribute a patch? Very happy to hear that! - -First, some high-level rules: - -* A short proposal with some POC code is better than a lengthy piece of - text with no code. Code speaks louder than words. -* No backward-incompatible patch will be accepted unless discussed. - Sometimes it's hard, and Go's lack of versioning by default does not - help, but we try not to break people's programs unless we absolutely have - to. -* If you are writing a new feature or extending an existing one, make sure - to write some documentation. -* Bug fixes need to be accompanied with regression tests. -* New code needs to be tested. -* Your commit messages need to explain why the change is needed, even if - already included in the PR description. - -It does sound like a lot, but those best practices are here to save time -overall and continuously improve the quality of the project, which is -something everyone benefits from. - -#### Get started - -The fairly standard code contribution process looks like that: - -1. [Fork the project][fork]. -2. Make your changes, commit on any branch you like. -3. [Open up a pull request][pull-request] -4. Review, potential ask for changes. -5. Merge. You're in! - -Feel free to ask for help! You can create draft pull requests to gather -some early feedback! - -#### Run the tests - -You can run tests for go-toml using Go's test tool: `go test ./...`. -When creating a pull requests, all tests will be ran on Linux on a few Go -versions (Travis CI), and on Windows using the latest Go version -(AppVeyor). - -#### Style - -Try to look around and follow the same format and structure as the rest of -the code. We enforce using `go fmt` on the whole code base. - ---- - -### Maintainers-only - -#### Merge pull request - -Checklist: - -* Passing CI. -* Does not introduce backward-incompatible changes (unless discussed). -* Has relevant doc changes. -* Has relevant unit tests. - -1. Merge using "squash and merge". -2. Make sure to edit the commit message to keep all the useful information - nice and clean. -3. Make sure the commit title is clear and contains the PR number (#123). - -#### New release - -1. Go to [releases][releases]. Click on "X commits to master since this - release". -2. Make note of all the changes. Look for backward incompatible changes, - new features, and bug fixes. -3. Pick the new version using the above and semver. -4. Create a [new release][new-release]. -5. Follow the same format as [1.1.0][release-110]. - -[issues-tracker]: https://github.com/pelletier/go-toml/issues -[bug-report]: https://github.com/pelletier/go-toml/issues/new?template=bug_report.md -[pkg.go.dev]: https://pkg.go.dev/github.com/pelletier/go-toml -[readme]: ./README.md -[fork]: https://help.github.com/articles/fork-a-repo -[pull-request]: https://help.github.com/en/articles/creating-a-pull-request -[releases]: https://github.com/pelletier/go-toml/releases -[new-release]: https://github.com/pelletier/go-toml/releases/new -[release-110]: https://github.com/pelletier/go-toml/releases/tag/v1.1.0 diff --git a/tools/vendor/github.com/pelletier/go-toml/Dockerfile b/tools/vendor/github.com/pelletier/go-toml/Dockerfile deleted file mode 100644 index fffdb0166..000000000 --- a/tools/vendor/github.com/pelletier/go-toml/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM golang:1.12-alpine3.9 as builder -WORKDIR /go/src/github.com/pelletier/go-toml -COPY . . -ENV CGO_ENABLED=0 -ENV GOOS=linux -RUN go install ./... - -FROM scratch -COPY --from=builder /go/bin/tomll /usr/bin/tomll -COPY --from=builder /go/bin/tomljson /usr/bin/tomljson -COPY --from=builder /go/bin/jsontoml /usr/bin/jsontoml diff --git a/tools/vendor/github.com/pelletier/go-toml/LICENSE b/tools/vendor/github.com/pelletier/go-toml/LICENSE deleted file mode 100644 index f414553c2..000000000 --- a/tools/vendor/github.com/pelletier/go-toml/LICENSE +++ /dev/null @@ -1,247 +0,0 @@ -The bulk of github.com/pelletier/go-toml is distributed under the MIT license -(see below), with the exception of localtime.go and localtime.test.go. -Those two files have been copied over from Google's civil library at revision -ed46f5086358513cf8c25f8e3f022cb838a49d66, and are distributed under the Apache -2.0 license (see below). - - -github.com/pelletier/go-toml: - - -The MIT License (MIT) - -Copyright (c) 2013 - 2021 Thomas Pelletier, Eric Anderton - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -localtime.go, localtime_test.go: - -Originals: - https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/civil/civil.go - https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/civil/civil_test.go -Changes: - * Renamed files from civil* to localtime*. - * Package changed from civil to toml. - * 'Local' prefix added to all structs. -License: - https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/LICENSE - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/tools/vendor/github.com/pelletier/go-toml/Makefile b/tools/vendor/github.com/pelletier/go-toml/Makefile deleted file mode 100644 index 9e4503aea..000000000 --- a/tools/vendor/github.com/pelletier/go-toml/Makefile +++ /dev/null @@ -1,29 +0,0 @@ -export CGO_ENABLED=0 -go := go -go.goos ?= $(shell echo `go version`|cut -f4 -d ' '|cut -d '/' -f1) -go.goarch ?= $(shell echo `go version`|cut -f4 -d ' '|cut -d '/' -f2) - -out.tools := tomll tomljson jsontoml -out.dist := $(out.tools:=_$(go.goos)_$(go.goarch).tar.xz) -sources := $(wildcard **/*.go) - - -.PHONY: -tools: $(out.tools) - -$(out.tools): $(sources) - GOOS=$(go.goos) GOARCH=$(go.goarch) $(go) build ./cmd/$@ - -.PHONY: -dist: $(out.dist) - -$(out.dist):%_$(go.goos)_$(go.goarch).tar.xz: % - if [ "$(go.goos)" = "windows" ]; then \ - tar -cJf $@ $^.exe; \ - else \ - tar -cJf $@ $^; \ - fi - -.PHONY: -clean: - rm -rf $(out.tools) $(out.dist) diff --git a/tools/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md b/tools/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md deleted file mode 100644 index 041cdc4a2..000000000 --- a/tools/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md +++ /dev/null @@ -1,5 +0,0 @@ -**Issue:** add link to pelletier/go-toml issue here - -Explanation of what this pull request does. - -More detailed description of the decisions being made and the reasons why (if the patch is non-trivial). diff --git a/tools/vendor/github.com/pelletier/go-toml/README.md b/tools/vendor/github.com/pelletier/go-toml/README.md deleted file mode 100644 index 7399e04bf..000000000 --- a/tools/vendor/github.com/pelletier/go-toml/README.md +++ /dev/null @@ -1,176 +0,0 @@ -# go-toml - -Go library for the [TOML](https://toml.io/) format. - -This library supports TOML version -[v1.0.0-rc.3](https://toml.io/en/v1.0.0-rc.3) - -[![Go Reference](https://pkg.go.dev/badge/github.com/pelletier/go-toml.svg)](https://pkg.go.dev/github.com/pelletier/go-toml) -[![license](https://img.shields.io/github/license/pelletier/go-toml.svg)](https://github.com/pelletier/go-toml/blob/master/LICENSE) -[![Build Status](https://dev.azure.com/pelletierthomas/go-toml-ci/_apis/build/status/pelletier.go-toml?branchName=master)](https://dev.azure.com/pelletierthomas/go-toml-ci/_build/latest?definitionId=1&branchName=master) -[![codecov](https://codecov.io/gh/pelletier/go-toml/branch/master/graph/badge.svg)](https://codecov.io/gh/pelletier/go-toml) -[![Go Report Card](https://goreportcard.com/badge/github.com/pelletier/go-toml)](https://goreportcard.com/report/github.com/pelletier/go-toml) -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fpelletier%2Fgo-toml.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fpelletier%2Fgo-toml?ref=badge_shield) - - -## Development status - -**ℹ️ Consider go-toml v2!** - -The next version of go-toml is in [active development][v2-dev], and -[nearing completion][v2-map]. - -Though technically in beta, v2 is already more tested, [fixes bugs][v1-bugs], -and [much faster][v2-bench]. If you only need reading and writing TOML documents -(majority of cases), those features are implemented and the API unlikely to -change. - -The remaining features will be added shortly. While pull-requests are welcome on -v1, no active development is expected on it. When v2.0.0 is released, v1 will be -deprecated. - -👉 [go-toml v2][v2] - -[v2]: https://github.com/pelletier/go-toml/tree/v2 -[v2-map]: https://github.com/pelletier/go-toml/discussions/506 -[v2-dev]: https://github.com/pelletier/go-toml/tree/v2 -[v1-bugs]: https://github.com/pelletier/go-toml/issues?q=is%3Aissue+is%3Aopen+label%3Av2-fixed -[v2-bench]: https://github.com/pelletier/go-toml/tree/v2#benchmarks - -## Features - -Go-toml provides the following features for using data parsed from TOML documents: - -* Load TOML documents from files and string data -* Easily navigate TOML structure using Tree -* Marshaling and unmarshaling to and from data structures -* Line & column position data for all parsed elements -* [Query support similar to JSON-Path](query/) -* Syntax errors contain line and column numbers - -## Import - -```go -import "github.com/pelletier/go-toml" -``` - -## Usage example - -Read a TOML document: - -```go -config, _ := toml.Load(` -[postgres] -user = "pelletier" -password = "mypassword"`) -// retrieve data directly -user := config.Get("postgres.user").(string) - -// or using an intermediate object -postgresConfig := config.Get("postgres").(*toml.Tree) -password := postgresConfig.Get("password").(string) -``` - -Or use Unmarshal: - -```go -type Postgres struct { - User string - Password string -} -type Config struct { - Postgres Postgres -} - -doc := []byte(` -[Postgres] -User = "pelletier" -Password = "mypassword"`) - -config := Config{} -toml.Unmarshal(doc, &config) -fmt.Println("user=", config.Postgres.User) -``` - -Or use a query: - -```go -// use a query to gather elements without walking the tree -q, _ := query.Compile("$..[user,password]") -results := q.Execute(config) -for ii, item := range results.Values() { - fmt.Printf("Query result %d: %v\n", ii, item) -} -``` - -## Documentation - -The documentation and additional examples are available at -[pkg.go.dev](https://pkg.go.dev/github.com/pelletier/go-toml). - -## Tools - -Go-toml provides three handy command line tools: - -* `tomll`: Reads TOML files and lints them. - - ``` - go install github.com/pelletier/go-toml/cmd/tomll - tomll --help - ``` -* `tomljson`: Reads a TOML file and outputs its JSON representation. - - ``` - go install github.com/pelletier/go-toml/cmd/tomljson - tomljson --help - ``` - - * `jsontoml`: Reads a JSON file and outputs a TOML representation. - - ``` - go install github.com/pelletier/go-toml/cmd/jsontoml - jsontoml --help - ``` - -### Docker image - -Those tools are also available as a Docker image from -[dockerhub](https://hub.docker.com/r/pelletier/go-toml). For example, to -use `tomljson`: - -``` -docker run -v $PWD:/workdir pelletier/go-toml tomljson /workdir/example.toml -``` - -Only master (`latest`) and tagged versions are published to dockerhub. You -can build your own image as usual: - -``` -docker build -t go-toml . -``` - -## Contribute - -Feel free to report bugs and patches using GitHub's pull requests system on -[pelletier/go-toml](https://github.com/pelletier/go-toml). Any feedback would be -much appreciated! - -### Run tests - -`go test ./...` - -### Fuzzing - -The script `./fuzz.sh` is available to -run [go-fuzz](https://github.com/dvyukov/go-fuzz) on go-toml. - -## Versioning - -Go-toml follows [Semantic Versioning](http://semver.org/). The supported version -of [TOML](https://github.com/toml-lang/toml) is indicated at the beginning of -this document. The last two major versions of Go are supported -(see [Go Release Policy](https://golang.org/doc/devel/release.html#policy)). - -## License - -The MIT License (MIT) + Apache 2.0. Read [LICENSE](LICENSE). diff --git a/tools/vendor/github.com/pelletier/go-toml/SECURITY.md b/tools/vendor/github.com/pelletier/go-toml/SECURITY.md deleted file mode 100644 index b2f21cfc9..000000000 --- a/tools/vendor/github.com/pelletier/go-toml/SECURITY.md +++ /dev/null @@ -1,19 +0,0 @@ -# Security Policy - -## Supported Versions - -Use this section to tell people about which versions of your project are -currently being supported with security updates. - -| Version | Supported | -| ---------- | ------------------ | -| Latest 2.x | :white_check_mark: | -| All 1.x | :x: | -| All 0.x | :x: | - -## Reporting a Vulnerability - -Email a vulnerability report to `security@pelletier.codes`. Make sure to include -as many details as possible to reproduce the vulnerability. This is a -side-project: I will try to get back to you as quickly as possible, time -permitting in my personal life. Providing a working patch helps very much! diff --git a/tools/vendor/github.com/pelletier/go-toml/azure-pipelines.yml b/tools/vendor/github.com/pelletier/go-toml/azure-pipelines.yml deleted file mode 100644 index 4af198b4d..000000000 --- a/tools/vendor/github.com/pelletier/go-toml/azure-pipelines.yml +++ /dev/null @@ -1,188 +0,0 @@ -trigger: -- master - -stages: -- stage: run_checks - displayName: "Check" - dependsOn: [] - jobs: - - job: fmt - displayName: "fmt" - pool: - vmImage: ubuntu-latest - steps: - - task: GoTool@0 - displayName: "Install Go 1.16" - inputs: - version: "1.16" - - task: Go@0 - displayName: "go fmt ./..." - inputs: - command: 'custom' - customCommand: 'fmt' - arguments: './...' - - job: coverage - displayName: "coverage" - pool: - vmImage: ubuntu-latest - steps: - - task: GoTool@0 - displayName: "Install Go 1.16" - inputs: - version: "1.16" - - task: Go@0 - displayName: "Generate coverage" - inputs: - command: 'test' - arguments: "-race -coverprofile=coverage.txt -covermode=atomic" - - task: Bash@3 - inputs: - targetType: 'inline' - script: 'bash <(curl -s https://codecov.io/bash) -t ${CODECOV_TOKEN}' - env: - CODECOV_TOKEN: $(CODECOV_TOKEN) - - job: benchmark - displayName: "benchmark" - pool: - vmImage: ubuntu-latest - steps: - - task: GoTool@0 - displayName: "Install Go 1.16" - inputs: - version: "1.16" - - script: echo "##vso[task.setvariable variable=PATH]${PATH}:/home/vsts/go/bin/" - - task: Bash@3 - inputs: - filePath: './benchmark.sh' - arguments: "master $(Build.Repository.Uri)" - - - job: go_unit_tests - displayName: "unit tests" - strategy: - matrix: - linux 1.16: - goVersion: '1.16' - imageName: 'ubuntu-latest' - mac 1.16: - goVersion: '1.16' - imageName: 'macOS-latest' - windows 1.16: - goVersion: '1.16' - imageName: 'windows-latest' - linux 1.15: - goVersion: '1.15' - imageName: 'ubuntu-latest' - mac 1.15: - goVersion: '1.15' - imageName: 'macOS-latest' - windows 1.15: - goVersion: '1.15' - imageName: 'windows-latest' - pool: - vmImage: $(imageName) - steps: - - task: GoTool@0 - displayName: "Install Go $(goVersion)" - inputs: - version: $(goVersion) - - task: Go@0 - displayName: "go test ./..." - inputs: - command: 'test' - arguments: './...' -- stage: build_binaries - displayName: "Build binaries" - dependsOn: run_checks - jobs: - - job: build_binary - displayName: "Build binary" - strategy: - matrix: - linux_amd64: - GOOS: linux - GOARCH: amd64 - darwin_amd64: - GOOS: darwin - GOARCH: amd64 - windows_amd64: - GOOS: windows - GOARCH: amd64 - pool: - vmImage: ubuntu-latest - steps: - - task: GoTool@0 - displayName: "Install Go" - inputs: - version: 1.16 - - task: Bash@3 - inputs: - targetType: inline - script: "make dist" - env: - go.goos: $(GOOS) - go.goarch: $(GOARCH) - - task: CopyFiles@2 - inputs: - sourceFolder: '$(Build.SourcesDirectory)' - contents: '*.tar.xz' - TargetFolder: '$(Build.ArtifactStagingDirectory)' - - task: PublishBuildArtifacts@1 - inputs: - pathtoPublish: '$(Build.ArtifactStagingDirectory)' - artifactName: binaries -- stage: build_binaries_manifest - displayName: "Build binaries manifest" - dependsOn: build_binaries - jobs: - - job: build_manifest - displayName: "Build binaries manifest" - steps: - - task: DownloadBuildArtifacts@0 - inputs: - buildType: 'current' - downloadType: 'single' - artifactName: 'binaries' - downloadPath: '$(Build.SourcesDirectory)' - - task: Bash@3 - inputs: - targetType: inline - script: "cd binaries && sha256sum --binary *.tar.xz | tee $(Build.ArtifactStagingDirectory)/sha256sums.txt" - - task: PublishBuildArtifacts@1 - inputs: - pathtoPublish: '$(Build.ArtifactStagingDirectory)' - artifactName: manifest - -- stage: build_docker_image - displayName: "Build Docker image" - dependsOn: run_checks - jobs: - - job: build - displayName: "Build" - pool: - vmImage: ubuntu-latest - steps: - - task: Docker@2 - inputs: - command: 'build' - Dockerfile: 'Dockerfile' - buildContext: '.' - addPipelineData: false - -- stage: publish_docker_image - displayName: "Publish Docker image" - dependsOn: build_docker_image - condition: and(succeeded(), eq(variables['Build.SourceBranchName'], 'master')) - jobs: - - job: publish - displayName: "Publish" - pool: - vmImage: ubuntu-latest - steps: - - task: Docker@2 - inputs: - containerRegistry: 'DockerHub' - repository: 'pelletier/go-toml' - command: 'buildAndPush' - Dockerfile: 'Dockerfile' - buildContext: '.' - tags: 'latest' diff --git a/tools/vendor/github.com/pelletier/go-toml/benchmark.sh b/tools/vendor/github.com/pelletier/go-toml/benchmark.sh deleted file mode 100644 index a69d3040f..000000000 --- a/tools/vendor/github.com/pelletier/go-toml/benchmark.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -set -ex - -reference_ref=${1:-master} -reference_git=${2:-.} - -if ! `hash benchstat 2>/dev/null`; then - echo "Installing benchstat" - go get golang.org/x/perf/cmd/benchstat -fi - -tempdir=`mktemp -d /tmp/go-toml-benchmark-XXXXXX` -ref_tempdir="${tempdir}/ref" -ref_benchmark="${ref_tempdir}/benchmark-`echo -n ${reference_ref}|tr -s '/' '-'`.txt" -local_benchmark="`pwd`/benchmark-local.txt" - -echo "=== ${reference_ref} (${ref_tempdir})" -git clone ${reference_git} ${ref_tempdir} >/dev/null 2>/dev/null -pushd ${ref_tempdir} >/dev/null -git checkout ${reference_ref} >/dev/null 2>/dev/null -go test -bench=. -benchmem | tee ${ref_benchmark} -cd benchmark -go test -bench=. -benchmem | tee -a ${ref_benchmark} -popd >/dev/null - -echo "" -echo "=== local" -go test -bench=. -benchmem | tee ${local_benchmark} -cd benchmark -go test -bench=. -benchmem | tee -a ${local_benchmark} - -echo "" -echo "=== diff" -benchstat -delta-test=none ${ref_benchmark} ${local_benchmark} diff --git a/tools/vendor/github.com/pelletier/go-toml/doc.go b/tools/vendor/github.com/pelletier/go-toml/doc.go deleted file mode 100644 index a1406a32b..000000000 --- a/tools/vendor/github.com/pelletier/go-toml/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -// Package toml is a TOML parser and manipulation library. -// -// This version supports the specification as described in -// https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.5.0.md -// -// Marshaling -// -// Go-toml can marshal and unmarshal TOML documents from and to data -// structures. -// -// TOML document as a tree -// -// Go-toml can operate on a TOML document as a tree. Use one of the Load* -// functions to parse TOML data and obtain a Tree instance, then one of its -// methods to manipulate the tree. -// -// JSONPath-like queries -// -// The package github.com/pelletier/go-toml/query implements a system -// similar to JSONPath to quickly retrieve elements of a TOML document using a -// single expression. See the package documentation for more information. -// -package toml diff --git a/tools/vendor/github.com/pelletier/go-toml/example-crlf.toml b/tools/vendor/github.com/pelletier/go-toml/example-crlf.toml deleted file mode 100644 index 780d9c68f..000000000 --- a/tools/vendor/github.com/pelletier/go-toml/example-crlf.toml +++ /dev/null @@ -1,30 +0,0 @@ -# This is a TOML document. Boom. - -title = "TOML Example" - -[owner] -name = "Tom Preston-Werner" -organization = "GitHub" -bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." -dob = 1979-05-27T07:32:00Z # First class dates? Why not? - -[database] -server = "192.168.1.1" -ports = [ 8001, 8001, 8002 ] -connection_max = 5000 -enabled = true - -[servers] - - # You can indent as you please. Tabs or spaces. TOML don't care. - [servers.alpha] - ip = "10.0.0.1" - dc = "eqdc10" - - [servers.beta] - ip = "10.0.0.2" - dc = "eqdc10" - -[clients] -data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it -score = 4e-08 # to make sure leading zeroes in exponent parts of floats are supported \ No newline at end of file diff --git a/tools/vendor/github.com/pelletier/go-toml/example.toml b/tools/vendor/github.com/pelletier/go-toml/example.toml deleted file mode 100644 index f45bf88b8..000000000 --- a/tools/vendor/github.com/pelletier/go-toml/example.toml +++ /dev/null @@ -1,30 +0,0 @@ -# This is a TOML document. Boom. - -title = "TOML Example" - -[owner] -name = "Tom Preston-Werner" -organization = "GitHub" -bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." -dob = 1979-05-27T07:32:00Z # First class dates? Why not? - -[database] -server = "192.168.1.1" -ports = [ 8001, 8001, 8002 ] -connection_max = 5000 -enabled = true - -[servers] - - # You can indent as you please. Tabs or spaces. TOML don't care. - [servers.alpha] - ip = "10.0.0.1" - dc = "eqdc10" - - [servers.beta] - ip = "10.0.0.2" - dc = "eqdc10" - -[clients] -data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it -score = 4e-08 # to make sure leading zeroes in exponent parts of floats are supported \ No newline at end of file diff --git a/tools/vendor/github.com/pelletier/go-toml/fuzz.go b/tools/vendor/github.com/pelletier/go-toml/fuzz.go deleted file mode 100644 index 14570c8d3..000000000 --- a/tools/vendor/github.com/pelletier/go-toml/fuzz.go +++ /dev/null @@ -1,31 +0,0 @@ -// +build gofuzz - -package toml - -func Fuzz(data []byte) int { - tree, err := LoadBytes(data) - if err != nil { - if tree != nil { - panic("tree must be nil if there is an error") - } - return 0 - } - - str, err := tree.ToTomlString() - if err != nil { - if str != "" { - panic(`str must be "" if there is an error`) - } - panic(err) - } - - tree, err = Load(str) - if err != nil { - if tree != nil { - panic("tree must be nil if there is an error") - } - return 0 - } - - return 1 -} diff --git a/tools/vendor/github.com/pelletier/go-toml/fuzz.sh b/tools/vendor/github.com/pelletier/go-toml/fuzz.sh deleted file mode 100644 index 3204b4c44..000000000 --- a/tools/vendor/github.com/pelletier/go-toml/fuzz.sh +++ /dev/null @@ -1,15 +0,0 @@ -#! /bin/sh -set -eu - -go get github.com/dvyukov/go-fuzz/go-fuzz -go get github.com/dvyukov/go-fuzz/go-fuzz-build - -if [ ! -e toml-fuzz.zip ]; then - go-fuzz-build github.com/pelletier/go-toml -fi - -rm -fr fuzz -mkdir -p fuzz/corpus -cp *.toml fuzz/corpus - -go-fuzz -bin=toml-fuzz.zip -workdir=fuzz diff --git a/tools/vendor/github.com/pelletier/go-toml/keysparsing.go b/tools/vendor/github.com/pelletier/go-toml/keysparsing.go deleted file mode 100644 index e091500b2..000000000 --- a/tools/vendor/github.com/pelletier/go-toml/keysparsing.go +++ /dev/null @@ -1,112 +0,0 @@ -// Parsing keys handling both bare and quoted keys. - -package toml - -import ( - "errors" - "fmt" -) - -// Convert the bare key group string to an array. -// The input supports double quotation and single quotation, -// but escape sequences are not supported. Lexers must unescape them beforehand. -func parseKey(key string) ([]string, error) { - runes := []rune(key) - var groups []string - - if len(key) == 0 { - return nil, errors.New("empty key") - } - - idx := 0 - for idx < len(runes) { - for ; idx < len(runes) && isSpace(runes[idx]); idx++ { - // skip leading whitespace - } - if idx >= len(runes) { - break - } - r := runes[idx] - if isValidBareChar(r) { - // parse bare key - startIdx := idx - endIdx := -1 - idx++ - for idx < len(runes) { - r = runes[idx] - if isValidBareChar(r) { - idx++ - } else if r == '.' { - endIdx = idx - break - } else if isSpace(r) { - endIdx = idx - for ; idx < len(runes) && isSpace(runes[idx]); idx++ { - // skip trailing whitespace - } - if idx < len(runes) && runes[idx] != '.' { - return nil, fmt.Errorf("invalid key character after whitespace: %c", runes[idx]) - } - break - } else { - return nil, fmt.Errorf("invalid bare key character: %c", r) - } - } - if endIdx == -1 { - endIdx = idx - } - groups = append(groups, string(runes[startIdx:endIdx])) - } else if r == '\'' { - // parse single quoted key - idx++ - startIdx := idx - for { - if idx >= len(runes) { - return nil, fmt.Errorf("unclosed single-quoted key") - } - r = runes[idx] - if r == '\'' { - groups = append(groups, string(runes[startIdx:idx])) - idx++ - break - } - idx++ - } - } else if r == '"' { - // parse double quoted key - idx++ - startIdx := idx - for { - if idx >= len(runes) { - return nil, fmt.Errorf("unclosed double-quoted key") - } - r = runes[idx] - if r == '"' { - groups = append(groups, string(runes[startIdx:idx])) - idx++ - break - } - idx++ - } - } else if r == '.' { - idx++ - if idx >= len(runes) { - return nil, fmt.Errorf("unexpected end of key") - } - r = runes[idx] - if !isValidBareChar(r) && r != '\'' && r != '"' && r != ' ' { - return nil, fmt.Errorf("expecting key part after dot") - } - } else { - return nil, fmt.Errorf("invalid key character: %c", r) - } - } - if len(groups) == 0 { - return nil, fmt.Errorf("empty key") - } - return groups, nil -} - -func isValidBareChar(r rune) bool { - return isAlphanumeric(r) || r == '-' || isDigit(r) -} diff --git a/tools/vendor/github.com/pelletier/go-toml/lexer.go b/tools/vendor/github.com/pelletier/go-toml/lexer.go deleted file mode 100644 index 313908e3e..000000000 --- a/tools/vendor/github.com/pelletier/go-toml/lexer.go +++ /dev/null @@ -1,1031 +0,0 @@ -// TOML lexer. -// -// Written using the principles developed by Rob Pike in -// http://www.youtube.com/watch?v=HxaD_trXwRE - -package toml - -import ( - "bytes" - "errors" - "fmt" - "strconv" - "strings" -) - -// Define state functions -type tomlLexStateFn func() tomlLexStateFn - -// Define lexer -type tomlLexer struct { - inputIdx int - input []rune // Textual source - currentTokenStart int - currentTokenStop int - tokens []token - brackets []rune - line int - col int - endbufferLine int - endbufferCol int -} - -// Basic read operations on input - -func (l *tomlLexer) read() rune { - r := l.peek() - if r == '\n' { - l.endbufferLine++ - l.endbufferCol = 1 - } else { - l.endbufferCol++ - } - l.inputIdx++ - return r -} - -func (l *tomlLexer) next() rune { - r := l.read() - - if r != eof { - l.currentTokenStop++ - } - return r -} - -func (l *tomlLexer) ignore() { - l.currentTokenStart = l.currentTokenStop - l.line = l.endbufferLine - l.col = l.endbufferCol -} - -func (l *tomlLexer) skip() { - l.next() - l.ignore() -} - -func (l *tomlLexer) fastForward(n int) { - for i := 0; i < n; i++ { - l.next() - } -} - -func (l *tomlLexer) emitWithValue(t tokenType, value string) { - l.tokens = append(l.tokens, token{ - Position: Position{l.line, l.col}, - typ: t, - val: value, - }) - l.ignore() -} - -func (l *tomlLexer) emit(t tokenType) { - l.emitWithValue(t, string(l.input[l.currentTokenStart:l.currentTokenStop])) -} - -func (l *tomlLexer) peek() rune { - if l.inputIdx >= len(l.input) { - return eof - } - return l.input[l.inputIdx] -} - -func (l *tomlLexer) peekString(size int) string { - maxIdx := len(l.input) - upperIdx := l.inputIdx + size // FIXME: potential overflow - if upperIdx > maxIdx { - upperIdx = maxIdx - } - return string(l.input[l.inputIdx:upperIdx]) -} - -func (l *tomlLexer) follow(next string) bool { - return next == l.peekString(len(next)) -} - -// Error management - -func (l *tomlLexer) errorf(format string, args ...interface{}) tomlLexStateFn { - l.tokens = append(l.tokens, token{ - Position: Position{l.line, l.col}, - typ: tokenError, - val: fmt.Sprintf(format, args...), - }) - return nil -} - -// State functions - -func (l *tomlLexer) lexVoid() tomlLexStateFn { - for { - next := l.peek() - switch next { - case '}': // after '{' - return l.lexRightCurlyBrace - case '[': - return l.lexTableKey - case '#': - return l.lexComment(l.lexVoid) - case '=': - return l.lexEqual - case '\r': - fallthrough - case '\n': - l.skip() - continue - } - - if isSpace(next) { - l.skip() - } - - if isKeyStartChar(next) { - return l.lexKey - } - - if next == eof { - l.next() - break - } - } - - l.emit(tokenEOF) - return nil -} - -func (l *tomlLexer) lexRvalue() tomlLexStateFn { - for { - next := l.peek() - switch next { - case '.': - return l.errorf("cannot start float with a dot") - case '=': - return l.lexEqual - case '[': - return l.lexLeftBracket - case ']': - return l.lexRightBracket - case '{': - return l.lexLeftCurlyBrace - case '}': - return l.lexRightCurlyBrace - case '#': - return l.lexComment(l.lexRvalue) - case '"': - return l.lexString - case '\'': - return l.lexLiteralString - case ',': - return l.lexComma - case '\r': - fallthrough - case '\n': - l.skip() - if len(l.brackets) > 0 && l.brackets[len(l.brackets)-1] == '[' { - return l.lexRvalue - } - return l.lexVoid - } - - if l.follow("true") { - return l.lexTrue - } - - if l.follow("false") { - return l.lexFalse - } - - if l.follow("inf") { - return l.lexInf - } - - if l.follow("nan") { - return l.lexNan - } - - if isSpace(next) { - l.skip() - continue - } - - if next == eof { - l.next() - break - } - - if next == '+' || next == '-' { - return l.lexNumber - } - - if isDigit(next) { - return l.lexDateTimeOrNumber - } - - return l.errorf("no value can start with %c", next) - } - - l.emit(tokenEOF) - return nil -} - -func (l *tomlLexer) lexDateTimeOrNumber() tomlLexStateFn { - // Could be either a date/time, or a digit. - // The options for date/times are: - // YYYY-... => date or date-time - // HH:... => time - // Anything else should be a number. - - lookAhead := l.peekString(5) - if len(lookAhead) < 3 { - return l.lexNumber() - } - - for idx, r := range lookAhead { - if !isDigit(r) { - if idx == 2 && r == ':' { - return l.lexDateTimeOrTime() - } - if idx == 4 && r == '-' { - return l.lexDateTimeOrTime() - } - return l.lexNumber() - } - } - return l.lexNumber() -} - -func (l *tomlLexer) lexLeftCurlyBrace() tomlLexStateFn { - l.next() - l.emit(tokenLeftCurlyBrace) - l.brackets = append(l.brackets, '{') - return l.lexVoid -} - -func (l *tomlLexer) lexRightCurlyBrace() tomlLexStateFn { - l.next() - l.emit(tokenRightCurlyBrace) - if len(l.brackets) == 0 || l.brackets[len(l.brackets)-1] != '{' { - return l.errorf("cannot have '}' here") - } - l.brackets = l.brackets[:len(l.brackets)-1] - return l.lexRvalue -} - -func (l *tomlLexer) lexDateTimeOrTime() tomlLexStateFn { - // Example matches: - // 1979-05-27T07:32:00Z - // 1979-05-27T00:32:00-07:00 - // 1979-05-27T00:32:00.999999-07:00 - // 1979-05-27 07:32:00Z - // 1979-05-27 00:32:00-07:00 - // 1979-05-27 00:32:00.999999-07:00 - // 1979-05-27T07:32:00 - // 1979-05-27T00:32:00.999999 - // 1979-05-27 07:32:00 - // 1979-05-27 00:32:00.999999 - // 1979-05-27 - // 07:32:00 - // 00:32:00.999999 - - // we already know those two are digits - l.next() - l.next() - - // Got 2 digits. At that point it could be either a time or a date(-time). - - r := l.next() - if r == ':' { - return l.lexTime() - } - - return l.lexDateTime() -} - -func (l *tomlLexer) lexDateTime() tomlLexStateFn { - // This state accepts an offset date-time, a local date-time, or a local date. - // - // v--- cursor - // 1979-05-27T07:32:00Z - // 1979-05-27T00:32:00-07:00 - // 1979-05-27T00:32:00.999999-07:00 - // 1979-05-27 07:32:00Z - // 1979-05-27 00:32:00-07:00 - // 1979-05-27 00:32:00.999999-07:00 - // 1979-05-27T07:32:00 - // 1979-05-27T00:32:00.999999 - // 1979-05-27 07:32:00 - // 1979-05-27 00:32:00.999999 - // 1979-05-27 - - // date - - // already checked by lexRvalue - l.next() // digit - l.next() // - - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid month digit in date: %c", r) - } - } - - r := l.next() - if r != '-' { - return l.errorf("expected - to separate month of a date, not %c", r) - } - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid day digit in date: %c", r) - } - } - - l.emit(tokenLocalDate) - - r = l.peek() - - if r == eof { - - return l.lexRvalue - } - - if r != ' ' && r != 'T' { - return l.errorf("incorrect date/time separation character: %c", r) - } - - if r == ' ' { - lookAhead := l.peekString(3)[1:] - if len(lookAhead) < 2 { - return l.lexRvalue - } - for _, r := range lookAhead { - if !isDigit(r) { - return l.lexRvalue - } - } - } - - l.skip() // skip the T or ' ' - - // time - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid hour digit in time: %c", r) - } - } - - r = l.next() - if r != ':' { - return l.errorf("time hour/minute separator should be :, not %c", r) - } - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid minute digit in time: %c", r) - } - } - - r = l.next() - if r != ':' { - return l.errorf("time minute/second separator should be :, not %c", r) - } - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid second digit in time: %c", r) - } - } - - r = l.peek() - if r == '.' { - l.next() - r := l.next() - if !isDigit(r) { - return l.errorf("expected at least one digit in time's fraction, not %c", r) - } - - for { - r := l.peek() - if !isDigit(r) { - break - } - l.next() - } - } - - l.emit(tokenLocalTime) - - return l.lexTimeOffset - -} - -func (l *tomlLexer) lexTimeOffset() tomlLexStateFn { - // potential offset - - // Z - // -07:00 - // +07:00 - // nothing - - r := l.peek() - - if r == 'Z' { - l.next() - l.emit(tokenTimeOffset) - } else if r == '+' || r == '-' { - l.next() - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid hour digit in time offset: %c", r) - } - } - - r = l.next() - if r != ':' { - return l.errorf("time offset hour/minute separator should be :, not %c", r) - } - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid minute digit in time offset: %c", r) - } - } - - l.emit(tokenTimeOffset) - } - - return l.lexRvalue -} - -func (l *tomlLexer) lexTime() tomlLexStateFn { - // v--- cursor - // 07:32:00 - // 00:32:00.999999 - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid minute digit in time: %c", r) - } - } - - r := l.next() - if r != ':' { - return l.errorf("time minute/second separator should be :, not %c", r) - } - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid second digit in time: %c", r) - } - } - - r = l.peek() - if r == '.' { - l.next() - r := l.next() - if !isDigit(r) { - return l.errorf("expected at least one digit in time's fraction, not %c", r) - } - - for { - r := l.peek() - if !isDigit(r) { - break - } - l.next() - } - } - - l.emit(tokenLocalTime) - return l.lexRvalue - -} - -func (l *tomlLexer) lexTrue() tomlLexStateFn { - l.fastForward(4) - l.emit(tokenTrue) - return l.lexRvalue -} - -func (l *tomlLexer) lexFalse() tomlLexStateFn { - l.fastForward(5) - l.emit(tokenFalse) - return l.lexRvalue -} - -func (l *tomlLexer) lexInf() tomlLexStateFn { - l.fastForward(3) - l.emit(tokenInf) - return l.lexRvalue -} - -func (l *tomlLexer) lexNan() tomlLexStateFn { - l.fastForward(3) - l.emit(tokenNan) - return l.lexRvalue -} - -func (l *tomlLexer) lexEqual() tomlLexStateFn { - l.next() - l.emit(tokenEqual) - return l.lexRvalue -} - -func (l *tomlLexer) lexComma() tomlLexStateFn { - l.next() - l.emit(tokenComma) - if len(l.brackets) > 0 && l.brackets[len(l.brackets)-1] == '{' { - return l.lexVoid - } - return l.lexRvalue -} - -// Parse the key and emits its value without escape sequences. -// bare keys, basic string keys and literal string keys are supported. -func (l *tomlLexer) lexKey() tomlLexStateFn { - var sb strings.Builder - - for r := l.peek(); isKeyChar(r) || r == '\n' || r == '\r'; r = l.peek() { - if r == '"' { - l.next() - str, err := l.lexStringAsString(`"`, false, true) - if err != nil { - return l.errorf(err.Error()) - } - sb.WriteString("\"") - sb.WriteString(str) - sb.WriteString("\"") - l.next() - continue - } else if r == '\'' { - l.next() - str, err := l.lexLiteralStringAsString(`'`, false) - if err != nil { - return l.errorf(err.Error()) - } - sb.WriteString("'") - sb.WriteString(str) - sb.WriteString("'") - l.next() - continue - } else if r == '\n' { - return l.errorf("keys cannot contain new lines") - } else if isSpace(r) { - var str strings.Builder - str.WriteString(" ") - - // skip trailing whitespace - l.next() - for r = l.peek(); isSpace(r); r = l.peek() { - str.WriteRune(r) - l.next() - } - // break loop if not a dot - if r != '.' { - break - } - str.WriteString(".") - // skip trailing whitespace after dot - l.next() - for r = l.peek(); isSpace(r); r = l.peek() { - str.WriteRune(r) - l.next() - } - sb.WriteString(str.String()) - continue - } else if r == '.' { - // skip - } else if !isValidBareChar(r) { - return l.errorf("keys cannot contain %c character", r) - } - sb.WriteRune(r) - l.next() - } - l.emitWithValue(tokenKey, sb.String()) - return l.lexVoid -} - -func (l *tomlLexer) lexComment(previousState tomlLexStateFn) tomlLexStateFn { - return func() tomlLexStateFn { - for next := l.peek(); next != '\n' && next != eof; next = l.peek() { - if next == '\r' && l.follow("\r\n") { - break - } - l.next() - } - l.ignore() - return previousState - } -} - -func (l *tomlLexer) lexLeftBracket() tomlLexStateFn { - l.next() - l.emit(tokenLeftBracket) - l.brackets = append(l.brackets, '[') - return l.lexRvalue -} - -func (l *tomlLexer) lexLiteralStringAsString(terminator string, discardLeadingNewLine bool) (string, error) { - var sb strings.Builder - - if discardLeadingNewLine { - if l.follow("\r\n") { - l.skip() - l.skip() - } else if l.peek() == '\n' { - l.skip() - } - } - - // find end of string - for { - if l.follow(terminator) { - return sb.String(), nil - } - - next := l.peek() - if next == eof { - break - } - sb.WriteRune(l.next()) - } - - return "", errors.New("unclosed string") -} - -func (l *tomlLexer) lexLiteralString() tomlLexStateFn { - l.skip() - - // handle special case for triple-quote - terminator := "'" - discardLeadingNewLine := false - if l.follow("''") { - l.skip() - l.skip() - terminator = "'''" - discardLeadingNewLine = true - } - - str, err := l.lexLiteralStringAsString(terminator, discardLeadingNewLine) - if err != nil { - return l.errorf(err.Error()) - } - - l.emitWithValue(tokenString, str) - l.fastForward(len(terminator)) - l.ignore() - return l.lexRvalue -} - -// Lex a string and return the results as a string. -// Terminator is the substring indicating the end of the token. -// The resulting string does not include the terminator. -func (l *tomlLexer) lexStringAsString(terminator string, discardLeadingNewLine, acceptNewLines bool) (string, error) { - var sb strings.Builder - - if discardLeadingNewLine { - if l.follow("\r\n") { - l.skip() - l.skip() - } else if l.peek() == '\n' { - l.skip() - } - } - - for { - if l.follow(terminator) { - return sb.String(), nil - } - - if l.follow("\\") { - l.next() - switch l.peek() { - case '\r': - fallthrough - case '\n': - fallthrough - case '\t': - fallthrough - case ' ': - // skip all whitespace chars following backslash - for strings.ContainsRune("\r\n\t ", l.peek()) { - l.next() - } - case '"': - sb.WriteString("\"") - l.next() - case 'n': - sb.WriteString("\n") - l.next() - case 'b': - sb.WriteString("\b") - l.next() - case 'f': - sb.WriteString("\f") - l.next() - case '/': - sb.WriteString("/") - l.next() - case 't': - sb.WriteString("\t") - l.next() - case 'r': - sb.WriteString("\r") - l.next() - case '\\': - sb.WriteString("\\") - l.next() - case 'u': - l.next() - var code strings.Builder - for i := 0; i < 4; i++ { - c := l.peek() - if !isHexDigit(c) { - return "", errors.New("unfinished unicode escape") - } - l.next() - code.WriteRune(c) - } - intcode, err := strconv.ParseInt(code.String(), 16, 32) - if err != nil { - return "", errors.New("invalid unicode escape: \\u" + code.String()) - } - sb.WriteRune(rune(intcode)) - case 'U': - l.next() - var code strings.Builder - for i := 0; i < 8; i++ { - c := l.peek() - if !isHexDigit(c) { - return "", errors.New("unfinished unicode escape") - } - l.next() - code.WriteRune(c) - } - intcode, err := strconv.ParseInt(code.String(), 16, 64) - if err != nil { - return "", errors.New("invalid unicode escape: \\U" + code.String()) - } - sb.WriteRune(rune(intcode)) - default: - return "", errors.New("invalid escape sequence: \\" + string(l.peek())) - } - } else { - r := l.peek() - - if 0x00 <= r && r <= 0x1F && r != '\t' && !(acceptNewLines && (r == '\n' || r == '\r')) { - return "", fmt.Errorf("unescaped control character %U", r) - } - l.next() - sb.WriteRune(r) - } - - if l.peek() == eof { - break - } - } - - return "", errors.New("unclosed string") -} - -func (l *tomlLexer) lexString() tomlLexStateFn { - l.skip() - - // handle special case for triple-quote - terminator := `"` - discardLeadingNewLine := false - acceptNewLines := false - if l.follow(`""`) { - l.skip() - l.skip() - terminator = `"""` - discardLeadingNewLine = true - acceptNewLines = true - } - - str, err := l.lexStringAsString(terminator, discardLeadingNewLine, acceptNewLines) - if err != nil { - return l.errorf(err.Error()) - } - - l.emitWithValue(tokenString, str) - l.fastForward(len(terminator)) - l.ignore() - return l.lexRvalue -} - -func (l *tomlLexer) lexTableKey() tomlLexStateFn { - l.next() - - if l.peek() == '[' { - // token '[[' signifies an array of tables - l.next() - l.emit(tokenDoubleLeftBracket) - return l.lexInsideTableArrayKey - } - // vanilla table key - l.emit(tokenLeftBracket) - return l.lexInsideTableKey -} - -// Parse the key till "]]", but only bare keys are supported -func (l *tomlLexer) lexInsideTableArrayKey() tomlLexStateFn { - for r := l.peek(); r != eof; r = l.peek() { - switch r { - case ']': - if l.currentTokenStop > l.currentTokenStart { - l.emit(tokenKeyGroupArray) - } - l.next() - if l.peek() != ']' { - break - } - l.next() - l.emit(tokenDoubleRightBracket) - return l.lexVoid - case '[': - return l.errorf("table array key cannot contain ']'") - default: - l.next() - } - } - return l.errorf("unclosed table array key") -} - -// Parse the key till "]" but only bare keys are supported -func (l *tomlLexer) lexInsideTableKey() tomlLexStateFn { - for r := l.peek(); r != eof; r = l.peek() { - switch r { - case ']': - if l.currentTokenStop > l.currentTokenStart { - l.emit(tokenKeyGroup) - } - l.next() - l.emit(tokenRightBracket) - return l.lexVoid - case '[': - return l.errorf("table key cannot contain ']'") - default: - l.next() - } - } - return l.errorf("unclosed table key") -} - -func (l *tomlLexer) lexRightBracket() tomlLexStateFn { - l.next() - l.emit(tokenRightBracket) - if len(l.brackets) == 0 || l.brackets[len(l.brackets)-1] != '[' { - return l.errorf("cannot have ']' here") - } - l.brackets = l.brackets[:len(l.brackets)-1] - return l.lexRvalue -} - -type validRuneFn func(r rune) bool - -func isValidHexRune(r rune) bool { - return r >= 'a' && r <= 'f' || - r >= 'A' && r <= 'F' || - r >= '0' && r <= '9' || - r == '_' -} - -func isValidOctalRune(r rune) bool { - return r >= '0' && r <= '7' || r == '_' -} - -func isValidBinaryRune(r rune) bool { - return r == '0' || r == '1' || r == '_' -} - -func (l *tomlLexer) lexNumber() tomlLexStateFn { - r := l.peek() - - if r == '0' { - follow := l.peekString(2) - if len(follow) == 2 { - var isValidRune validRuneFn - switch follow[1] { - case 'x': - isValidRune = isValidHexRune - case 'o': - isValidRune = isValidOctalRune - case 'b': - isValidRune = isValidBinaryRune - default: - if follow[1] >= 'a' && follow[1] <= 'z' || follow[1] >= 'A' && follow[1] <= 'Z' { - return l.errorf("unknown number base: %s. possible options are x (hex) o (octal) b (binary)", string(follow[1])) - } - } - - if isValidRune != nil { - l.next() - l.next() - digitSeen := false - for { - next := l.peek() - if !isValidRune(next) { - break - } - digitSeen = true - l.next() - } - - if !digitSeen { - return l.errorf("number needs at least one digit") - } - - l.emit(tokenInteger) - - return l.lexRvalue - } - } - } - - if r == '+' || r == '-' { - l.next() - if l.follow("inf") { - return l.lexInf - } - if l.follow("nan") { - return l.lexNan - } - } - - pointSeen := false - expSeen := false - digitSeen := false - for { - next := l.peek() - if next == '.' { - if pointSeen { - return l.errorf("cannot have two dots in one float") - } - l.next() - if !isDigit(l.peek()) { - return l.errorf("float cannot end with a dot") - } - pointSeen = true - } else if next == 'e' || next == 'E' { - expSeen = true - l.next() - r := l.peek() - if r == '+' || r == '-' { - l.next() - } - } else if isDigit(next) { - digitSeen = true - l.next() - } else if next == '_' { - l.next() - } else { - break - } - if pointSeen && !digitSeen { - return l.errorf("cannot start float with a dot") - } - } - - if !digitSeen { - return l.errorf("no digit in that number") - } - if pointSeen || expSeen { - l.emit(tokenFloat) - } else { - l.emit(tokenInteger) - } - return l.lexRvalue -} - -func (l *tomlLexer) run() { - for state := l.lexVoid; state != nil; { - state = state() - } -} - -// Entry point -func lexToml(inputBytes []byte) []token { - runes := bytes.Runes(inputBytes) - l := &tomlLexer{ - input: runes, - tokens: make([]token, 0, 256), - line: 1, - col: 1, - endbufferLine: 1, - endbufferCol: 1, - } - l.run() - return l.tokens -} diff --git a/tools/vendor/github.com/pelletier/go-toml/localtime.go b/tools/vendor/github.com/pelletier/go-toml/localtime.go deleted file mode 100644 index 9dfe4b9e6..000000000 --- a/tools/vendor/github.com/pelletier/go-toml/localtime.go +++ /dev/null @@ -1,287 +0,0 @@ -// Implementation of TOML's local date/time. -// -// Copied over from Google's civil to avoid pulling all the Google dependencies. -// Originals: -// https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/civil/civil.go -// Changes: -// * Renamed files from civil* to localtime*. -// * Package changed from civil to toml. -// * 'Local' prefix added to all structs. -// -// Copyright 2016 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package civil implements types for civil time, a time-zone-independent -// representation of time that follows the rules of the proleptic -// Gregorian calendar with exactly 24-hour days, 60-minute hours, and 60-second -// minutes. -// -// Because they lack location information, these types do not represent unique -// moments or intervals of time. Use time.Time for that purpose. -package toml - -import ( - "fmt" - "time" -) - -// A LocalDate represents a date (year, month, day). -// -// This type does not include location information, and therefore does not -// describe a unique 24-hour timespan. -type LocalDate struct { - Year int // Year (e.g., 2014). - Month time.Month // Month of the year (January = 1, ...). - Day int // Day of the month, starting at 1. -} - -// LocalDateOf returns the LocalDate in which a time occurs in that time's location. -func LocalDateOf(t time.Time) LocalDate { - var d LocalDate - d.Year, d.Month, d.Day = t.Date() - return d -} - -// ParseLocalDate parses a string in RFC3339 full-date format and returns the date value it represents. -func ParseLocalDate(s string) (LocalDate, error) { - t, err := time.Parse("2006-01-02", s) - if err != nil { - return LocalDate{}, err - } - return LocalDateOf(t), nil -} - -// String returns the date in RFC3339 full-date format. -func (d LocalDate) String() string { - return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day) -} - -// IsValid reports whether the date is valid. -func (d LocalDate) IsValid() bool { - return LocalDateOf(d.In(time.UTC)) == d -} - -// In returns the time corresponding to time 00:00:00 of the date in the location. -// -// In is always consistent with time.LocalDate, even when time.LocalDate returns a time -// on a different day. For example, if loc is America/Indiana/Vincennes, then both -// time.LocalDate(1955, time.May, 1, 0, 0, 0, 0, loc) -// and -// civil.LocalDate{Year: 1955, Month: time.May, Day: 1}.In(loc) -// return 23:00:00 on April 30, 1955. -// -// In panics if loc is nil. -func (d LocalDate) In(loc *time.Location) time.Time { - return time.Date(d.Year, d.Month, d.Day, 0, 0, 0, 0, loc) -} - -// AddDays returns the date that is n days in the future. -// n can also be negative to go into the past. -func (d LocalDate) AddDays(n int) LocalDate { - return LocalDateOf(d.In(time.UTC).AddDate(0, 0, n)) -} - -// DaysSince returns the signed number of days between the date and s, not including the end day. -// This is the inverse operation to AddDays. -func (d LocalDate) DaysSince(s LocalDate) (days int) { - // We convert to Unix time so we do not have to worry about leap seconds: - // Unix time increases by exactly 86400 seconds per day. - deltaUnix := d.In(time.UTC).Unix() - s.In(time.UTC).Unix() - return int(deltaUnix / 86400) -} - -// Before reports whether d1 occurs before d2. -func (d1 LocalDate) Before(d2 LocalDate) bool { - if d1.Year != d2.Year { - return d1.Year < d2.Year - } - if d1.Month != d2.Month { - return d1.Month < d2.Month - } - return d1.Day < d2.Day -} - -// After reports whether d1 occurs after d2. -func (d1 LocalDate) After(d2 LocalDate) bool { - return d2.Before(d1) -} - -// MarshalText implements the encoding.TextMarshaler interface. -// The output is the result of d.String(). -func (d LocalDate) MarshalText() ([]byte, error) { - return []byte(d.String()), nil -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -// The date is expected to be a string in a format accepted by ParseLocalDate. -func (d *LocalDate) UnmarshalText(data []byte) error { - var err error - *d, err = ParseLocalDate(string(data)) - return err -} - -// A LocalTime represents a time with nanosecond precision. -// -// This type does not include location information, and therefore does not -// describe a unique moment in time. -// -// This type exists to represent the TIME type in storage-based APIs like BigQuery. -// Most operations on Times are unlikely to be meaningful. Prefer the LocalDateTime type. -type LocalTime struct { - Hour int // The hour of the day in 24-hour format; range [0-23] - Minute int // The minute of the hour; range [0-59] - Second int // The second of the minute; range [0-59] - Nanosecond int // The nanosecond of the second; range [0-999999999] -} - -// LocalTimeOf returns the LocalTime representing the time of day in which a time occurs -// in that time's location. It ignores the date. -func LocalTimeOf(t time.Time) LocalTime { - var tm LocalTime - tm.Hour, tm.Minute, tm.Second = t.Clock() - tm.Nanosecond = t.Nanosecond() - return tm -} - -// ParseLocalTime parses a string and returns the time value it represents. -// ParseLocalTime accepts an extended form of the RFC3339 partial-time format. After -// the HH:MM:SS part of the string, an optional fractional part may appear, -// consisting of a decimal point followed by one to nine decimal digits. -// (RFC3339 admits only one digit after the decimal point). -func ParseLocalTime(s string) (LocalTime, error) { - t, err := time.Parse("15:04:05.999999999", s) - if err != nil { - return LocalTime{}, err - } - return LocalTimeOf(t), nil -} - -// String returns the date in the format described in ParseLocalTime. If Nanoseconds -// is zero, no fractional part will be generated. Otherwise, the result will -// end with a fractional part consisting of a decimal point and nine digits. -func (t LocalTime) String() string { - s := fmt.Sprintf("%02d:%02d:%02d", t.Hour, t.Minute, t.Second) - if t.Nanosecond == 0 { - return s - } - return s + fmt.Sprintf(".%09d", t.Nanosecond) -} - -// IsValid reports whether the time is valid. -func (t LocalTime) IsValid() bool { - // Construct a non-zero time. - tm := time.Date(2, 2, 2, t.Hour, t.Minute, t.Second, t.Nanosecond, time.UTC) - return LocalTimeOf(tm) == t -} - -// MarshalText implements the encoding.TextMarshaler interface. -// The output is the result of t.String(). -func (t LocalTime) MarshalText() ([]byte, error) { - return []byte(t.String()), nil -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -// The time is expected to be a string in a format accepted by ParseLocalTime. -func (t *LocalTime) UnmarshalText(data []byte) error { - var err error - *t, err = ParseLocalTime(string(data)) - return err -} - -// A LocalDateTime represents a date and time. -// -// This type does not include location information, and therefore does not -// describe a unique moment in time. -type LocalDateTime struct { - Date LocalDate - Time LocalTime -} - -// Note: We deliberately do not embed LocalDate into LocalDateTime, to avoid promoting AddDays and Sub. - -// LocalDateTimeOf returns the LocalDateTime in which a time occurs in that time's location. -func LocalDateTimeOf(t time.Time) LocalDateTime { - return LocalDateTime{ - Date: LocalDateOf(t), - Time: LocalTimeOf(t), - } -} - -// ParseLocalDateTime parses a string and returns the LocalDateTime it represents. -// ParseLocalDateTime accepts a variant of the RFC3339 date-time format that omits -// the time offset but includes an optional fractional time, as described in -// ParseLocalTime. Informally, the accepted format is -// YYYY-MM-DDTHH:MM:SS[.FFFFFFFFF] -// where the 'T' may be a lower-case 't'. -func ParseLocalDateTime(s string) (LocalDateTime, error) { - t, err := time.Parse("2006-01-02T15:04:05.999999999", s) - if err != nil { - t, err = time.Parse("2006-01-02t15:04:05.999999999", s) - if err != nil { - return LocalDateTime{}, err - } - } - return LocalDateTimeOf(t), nil -} - -// String returns the date in the format described in ParseLocalDate. -func (dt LocalDateTime) String() string { - return dt.Date.String() + "T" + dt.Time.String() -} - -// IsValid reports whether the datetime is valid. -func (dt LocalDateTime) IsValid() bool { - return dt.Date.IsValid() && dt.Time.IsValid() -} - -// In returns the time corresponding to the LocalDateTime in the given location. -// -// If the time is missing or ambigous at the location, In returns the same -// result as time.LocalDate. For example, if loc is America/Indiana/Vincennes, then -// both -// time.LocalDate(1955, time.May, 1, 0, 30, 0, 0, loc) -// and -// civil.LocalDateTime{ -// civil.LocalDate{Year: 1955, Month: time.May, Day: 1}}, -// civil.LocalTime{Minute: 30}}.In(loc) -// return 23:30:00 on April 30, 1955. -// -// In panics if loc is nil. -func (dt LocalDateTime) In(loc *time.Location) time.Time { - return time.Date(dt.Date.Year, dt.Date.Month, dt.Date.Day, dt.Time.Hour, dt.Time.Minute, dt.Time.Second, dt.Time.Nanosecond, loc) -} - -// Before reports whether dt1 occurs before dt2. -func (dt1 LocalDateTime) Before(dt2 LocalDateTime) bool { - return dt1.In(time.UTC).Before(dt2.In(time.UTC)) -} - -// After reports whether dt1 occurs after dt2. -func (dt1 LocalDateTime) After(dt2 LocalDateTime) bool { - return dt2.Before(dt1) -} - -// MarshalText implements the encoding.TextMarshaler interface. -// The output is the result of dt.String(). -func (dt LocalDateTime) MarshalText() ([]byte, error) { - return []byte(dt.String()), nil -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -// The datetime is expected to be a string in a format accepted by ParseLocalDateTime -func (dt *LocalDateTime) UnmarshalText(data []byte) error { - var err error - *dt, err = ParseLocalDateTime(string(data)) - return err -} diff --git a/tools/vendor/github.com/pelletier/go-toml/marshal.go b/tools/vendor/github.com/pelletier/go-toml/marshal.go deleted file mode 100644 index 571273049..000000000 --- a/tools/vendor/github.com/pelletier/go-toml/marshal.go +++ /dev/null @@ -1,1308 +0,0 @@ -package toml - -import ( - "bytes" - "encoding" - "errors" - "fmt" - "io" - "reflect" - "sort" - "strconv" - "strings" - "time" -) - -const ( - tagFieldName = "toml" - tagFieldComment = "comment" - tagCommented = "commented" - tagMultiline = "multiline" - tagLiteral = "literal" - tagDefault = "default" -) - -type tomlOpts struct { - name string - nameFromTag bool - comment string - commented bool - multiline bool - literal bool - include bool - omitempty bool - defaultValue string -} - -type encOpts struct { - quoteMapKeys bool - arraysOneElementPerLine bool -} - -var encOptsDefaults = encOpts{ - quoteMapKeys: false, -} - -type annotation struct { - tag string - comment string - commented string - multiline string - literal string - defaultValue string -} - -var annotationDefault = annotation{ - tag: tagFieldName, - comment: tagFieldComment, - commented: tagCommented, - multiline: tagMultiline, - literal: tagLiteral, - defaultValue: tagDefault, -} - -type MarshalOrder int - -// Orders the Encoder can write the fields to the output stream. -const ( - // Sort fields alphabetically. - OrderAlphabetical MarshalOrder = iota + 1 - // Preserve the order the fields are encountered. For example, the order of fields in - // a struct. - OrderPreserve -) - -var timeType = reflect.TypeOf(time.Time{}) -var marshalerType = reflect.TypeOf(new(Marshaler)).Elem() -var unmarshalerType = reflect.TypeOf(new(Unmarshaler)).Elem() -var textMarshalerType = reflect.TypeOf(new(encoding.TextMarshaler)).Elem() -var textUnmarshalerType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem() -var localDateType = reflect.TypeOf(LocalDate{}) -var localTimeType = reflect.TypeOf(LocalTime{}) -var localDateTimeType = reflect.TypeOf(LocalDateTime{}) -var mapStringInterfaceType = reflect.TypeOf(map[string]interface{}{}) - -// Check if the given marshal type maps to a Tree primitive -func isPrimitive(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Ptr: - return isPrimitive(mtype.Elem()) - case reflect.Bool: - return true - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return true - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return true - case reflect.Float32, reflect.Float64: - return true - case reflect.String: - return true - case reflect.Struct: - return isTimeType(mtype) - default: - return false - } -} - -func isTimeType(mtype reflect.Type) bool { - return mtype == timeType || mtype == localDateType || mtype == localDateTimeType || mtype == localTimeType -} - -// Check if the given marshal type maps to a Tree slice or array -func isTreeSequence(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Ptr: - return isTreeSequence(mtype.Elem()) - case reflect.Slice, reflect.Array: - return isTree(mtype.Elem()) - default: - return false - } -} - -// Check if the given marshal type maps to a slice or array of a custom marshaler type -func isCustomMarshalerSequence(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Ptr: - return isCustomMarshalerSequence(mtype.Elem()) - case reflect.Slice, reflect.Array: - return isCustomMarshaler(mtype.Elem()) || isCustomMarshaler(reflect.New(mtype.Elem()).Type()) - default: - return false - } -} - -// Check if the given marshal type maps to a slice or array of a text marshaler type -func isTextMarshalerSequence(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Ptr: - return isTextMarshalerSequence(mtype.Elem()) - case reflect.Slice, reflect.Array: - return isTextMarshaler(mtype.Elem()) || isTextMarshaler(reflect.New(mtype.Elem()).Type()) - default: - return false - } -} - -// Check if the given marshal type maps to a non-Tree slice or array -func isOtherSequence(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Ptr: - return isOtherSequence(mtype.Elem()) - case reflect.Slice, reflect.Array: - return !isTreeSequence(mtype) - default: - return false - } -} - -// Check if the given marshal type maps to a Tree -func isTree(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Ptr: - return isTree(mtype.Elem()) - case reflect.Map: - return true - case reflect.Struct: - return !isPrimitive(mtype) - default: - return false - } -} - -func isCustomMarshaler(mtype reflect.Type) bool { - return mtype.Implements(marshalerType) -} - -func callCustomMarshaler(mval reflect.Value) ([]byte, error) { - return mval.Interface().(Marshaler).MarshalTOML() -} - -func isTextMarshaler(mtype reflect.Type) bool { - return mtype.Implements(textMarshalerType) && !isTimeType(mtype) -} - -func callTextMarshaler(mval reflect.Value) ([]byte, error) { - return mval.Interface().(encoding.TextMarshaler).MarshalText() -} - -func isCustomUnmarshaler(mtype reflect.Type) bool { - return mtype.Implements(unmarshalerType) -} - -func callCustomUnmarshaler(mval reflect.Value, tval interface{}) error { - return mval.Interface().(Unmarshaler).UnmarshalTOML(tval) -} - -func isTextUnmarshaler(mtype reflect.Type) bool { - return mtype.Implements(textUnmarshalerType) -} - -func callTextUnmarshaler(mval reflect.Value, text []byte) error { - return mval.Interface().(encoding.TextUnmarshaler).UnmarshalText(text) -} - -// Marshaler is the interface implemented by types that -// can marshal themselves into valid TOML. -type Marshaler interface { - MarshalTOML() ([]byte, error) -} - -// Unmarshaler is the interface implemented by types that -// can unmarshal a TOML description of themselves. -type Unmarshaler interface { - UnmarshalTOML(interface{}) error -} - -/* -Marshal returns the TOML encoding of v. Behavior is similar to the Go json -encoder, except that there is no concept of a Marshaler interface or MarshalTOML -function for sub-structs, and currently only definite types can be marshaled -(i.e. no `interface{}`). - -The following struct annotations are supported: - - toml:"Field" Overrides the field's name to output. - omitempty When set, empty values and groups are not emitted. - comment:"comment" Emits a # comment on the same line. This supports new lines. - commented:"true" Emits the value as commented. - -Note that pointers are automatically assigned the "omitempty" option, as TOML -explicitly does not handle null values (saying instead the label should be -dropped). - -Tree structural types and corresponding marshal types: - - *Tree (*)struct, (*)map[string]interface{} - []*Tree (*)[](*)struct, (*)[](*)map[string]interface{} - []interface{} (as interface{}) (*)[]primitive, (*)[]([]interface{}) - interface{} (*)primitive - -Tree primitive types and corresponding marshal types: - - uint64 uint, uint8-uint64, pointers to same - int64 int, int8-uint64, pointers to same - float64 float32, float64, pointers to same - string string, pointers to same - bool bool, pointers to same - time.LocalTime time.LocalTime{}, pointers to same - -For additional flexibility, use the Encoder API. -*/ -func Marshal(v interface{}) ([]byte, error) { - return NewEncoder(nil).marshal(v) -} - -// Encoder writes TOML values to an output stream. -type Encoder struct { - w io.Writer - encOpts - annotation - line int - col int - order MarshalOrder - promoteAnon bool - compactComments bool - indentation string -} - -// NewEncoder returns a new encoder that writes to w. -func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ - w: w, - encOpts: encOptsDefaults, - annotation: annotationDefault, - line: 0, - col: 1, - order: OrderAlphabetical, - indentation: " ", - } -} - -// Encode writes the TOML encoding of v to the stream. -// -// See the documentation for Marshal for details. -func (e *Encoder) Encode(v interface{}) error { - b, err := e.marshal(v) - if err != nil { - return err - } - if _, err := e.w.Write(b); err != nil { - return err - } - return nil -} - -// QuoteMapKeys sets up the encoder to encode -// maps with string type keys with quoted TOML keys. -// -// This relieves the character limitations on map keys. -func (e *Encoder) QuoteMapKeys(v bool) *Encoder { - e.quoteMapKeys = v - return e -} - -// ArraysWithOneElementPerLine sets up the encoder to encode arrays -// with more than one element on multiple lines instead of one. -// -// For example: -// -// A = [1,2,3] -// -// Becomes -// -// A = [ -// 1, -// 2, -// 3, -// ] -func (e *Encoder) ArraysWithOneElementPerLine(v bool) *Encoder { - e.arraysOneElementPerLine = v - return e -} - -// Order allows to change in which order fields will be written to the output stream. -func (e *Encoder) Order(ord MarshalOrder) *Encoder { - e.order = ord - return e -} - -// Indentation allows to change indentation when marshalling. -func (e *Encoder) Indentation(indent string) *Encoder { - e.indentation = indent - return e -} - -// SetTagName allows changing default tag "toml" -func (e *Encoder) SetTagName(v string) *Encoder { - e.tag = v - return e -} - -// SetTagComment allows changing default tag "comment" -func (e *Encoder) SetTagComment(v string) *Encoder { - e.comment = v - return e -} - -// SetTagCommented allows changing default tag "commented" -func (e *Encoder) SetTagCommented(v string) *Encoder { - e.commented = v - return e -} - -// SetTagMultiline allows changing default tag "multiline" -func (e *Encoder) SetTagMultiline(v string) *Encoder { - e.multiline = v - return e -} - -// PromoteAnonymous allows to change how anonymous struct fields are marshaled. -// Usually, they are marshaled as if the inner exported fields were fields in -// the outer struct. However, if an anonymous struct field is given a name in -// its TOML tag, it is treated like a regular struct field with that name. -// rather than being anonymous. -// -// In case anonymous promotion is enabled, all anonymous structs are promoted -// and treated like regular struct fields. -func (e *Encoder) PromoteAnonymous(promote bool) *Encoder { - e.promoteAnon = promote - return e -} - -// CompactComments removes the new line before each comment in the tree. -func (e *Encoder) CompactComments(cc bool) *Encoder { - e.compactComments = cc - return e -} - -func (e *Encoder) marshal(v interface{}) ([]byte, error) { - // Check if indentation is valid - for _, char := range e.indentation { - if !isSpace(char) { - return []byte{}, fmt.Errorf("invalid indentation: must only contains space or tab characters") - } - } - - mtype := reflect.TypeOf(v) - if mtype == nil { - return []byte{}, errors.New("nil cannot be marshaled to TOML") - } - - switch mtype.Kind() { - case reflect.Struct, reflect.Map: - case reflect.Ptr: - if mtype.Elem().Kind() != reflect.Struct { - return []byte{}, errors.New("Only pointer to struct can be marshaled to TOML") - } - if reflect.ValueOf(v).IsNil() { - return []byte{}, errors.New("nil pointer cannot be marshaled to TOML") - } - default: - return []byte{}, errors.New("Only a struct or map can be marshaled to TOML") - } - - sval := reflect.ValueOf(v) - if isCustomMarshaler(mtype) { - return callCustomMarshaler(sval) - } - if isTextMarshaler(mtype) { - return callTextMarshaler(sval) - } - t, err := e.valueToTree(mtype, sval) - if err != nil { - return []byte{}, err - } - - var buf bytes.Buffer - _, err = t.writeToOrdered(&buf, "", "", 0, e.arraysOneElementPerLine, e.order, e.indentation, e.compactComments, false) - - return buf.Bytes(), err -} - -// Create next tree with a position based on Encoder.line -func (e *Encoder) nextTree() *Tree { - return newTreeWithPosition(Position{Line: e.line, Col: 1}) -} - -// Convert given marshal struct or map value to toml tree -func (e *Encoder) valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, error) { - if mtype.Kind() == reflect.Ptr { - return e.valueToTree(mtype.Elem(), mval.Elem()) - } - tval := e.nextTree() - switch mtype.Kind() { - case reflect.Struct: - switch mval.Interface().(type) { - case Tree: - reflect.ValueOf(tval).Elem().Set(mval) - default: - for i := 0; i < mtype.NumField(); i++ { - mtypef, mvalf := mtype.Field(i), mval.Field(i) - opts := tomlOptions(mtypef, e.annotation) - if opts.include && ((mtypef.Type.Kind() != reflect.Interface && !opts.omitempty) || !isZero(mvalf)) { - val, err := e.valueToToml(mtypef.Type, mvalf) - if err != nil { - return nil, err - } - if tree, ok := val.(*Tree); ok && mtypef.Anonymous && !opts.nameFromTag && !e.promoteAnon { - e.appendTree(tval, tree) - } else { - val = e.wrapTomlValue(val, tval) - tval.SetPathWithOptions([]string{opts.name}, SetOptions{ - Comment: opts.comment, - Commented: opts.commented, - Multiline: opts.multiline, - Literal: opts.literal, - }, val) - } - } - } - } - case reflect.Map: - keys := mval.MapKeys() - if e.order == OrderPreserve && len(keys) > 0 { - // Sorting []reflect.Value is not straight forward. - // - // OrderPreserve will support deterministic results when string is used - // as the key to maps. - typ := keys[0].Type() - kind := keys[0].Kind() - if kind == reflect.String { - ikeys := make([]string, len(keys)) - for i := range keys { - ikeys[i] = keys[i].Interface().(string) - } - sort.Strings(ikeys) - for i := range ikeys { - keys[i] = reflect.ValueOf(ikeys[i]).Convert(typ) - } - } - } - for _, key := range keys { - mvalf := mval.MapIndex(key) - if (mtype.Elem().Kind() == reflect.Ptr || mtype.Elem().Kind() == reflect.Interface) && mvalf.IsNil() { - continue - } - val, err := e.valueToToml(mtype.Elem(), mvalf) - if err != nil { - return nil, err - } - val = e.wrapTomlValue(val, tval) - if e.quoteMapKeys { - keyStr, err := tomlValueStringRepresentation(key.String(), "", "", e.order, e.arraysOneElementPerLine) - if err != nil { - return nil, err - } - tval.SetPath([]string{keyStr}, val) - } else { - tval.SetPath([]string{key.String()}, val) - } - } - } - return tval, nil -} - -// Convert given marshal slice to slice of Toml trees -func (e *Encoder) valueToTreeSlice(mtype reflect.Type, mval reflect.Value) ([]*Tree, error) { - tval := make([]*Tree, mval.Len(), mval.Len()) - for i := 0; i < mval.Len(); i++ { - val, err := e.valueToTree(mtype.Elem(), mval.Index(i)) - if err != nil { - return nil, err - } - tval[i] = val - } - return tval, nil -} - -// Convert given marshal slice to slice of toml values -func (e *Encoder) valueToOtherSlice(mtype reflect.Type, mval reflect.Value) (interface{}, error) { - tval := make([]interface{}, mval.Len(), mval.Len()) - for i := 0; i < mval.Len(); i++ { - val, err := e.valueToToml(mtype.Elem(), mval.Index(i)) - if err != nil { - return nil, err - } - tval[i] = val - } - return tval, nil -} - -// Convert given marshal value to toml value -func (e *Encoder) valueToToml(mtype reflect.Type, mval reflect.Value) (interface{}, error) { - if mtype.Kind() == reflect.Ptr { - switch { - case isCustomMarshaler(mtype): - return callCustomMarshaler(mval) - case isTextMarshaler(mtype): - b, err := callTextMarshaler(mval) - return string(b), err - default: - return e.valueToToml(mtype.Elem(), mval.Elem()) - } - } - if mtype.Kind() == reflect.Interface { - return e.valueToToml(mval.Elem().Type(), mval.Elem()) - } - switch { - case isCustomMarshaler(mtype): - return callCustomMarshaler(mval) - case isTextMarshaler(mtype): - b, err := callTextMarshaler(mval) - return string(b), err - case isTree(mtype): - return e.valueToTree(mtype, mval) - case isOtherSequence(mtype), isCustomMarshalerSequence(mtype), isTextMarshalerSequence(mtype): - return e.valueToOtherSlice(mtype, mval) - case isTreeSequence(mtype): - return e.valueToTreeSlice(mtype, mval) - default: - switch mtype.Kind() { - case reflect.Bool: - return mval.Bool(), nil - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - if mtype.Kind() == reflect.Int64 && mtype == reflect.TypeOf(time.Duration(1)) { - return fmt.Sprint(mval), nil - } - return mval.Int(), nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return mval.Uint(), nil - case reflect.Float32, reflect.Float64: - return mval.Float(), nil - case reflect.String: - return mval.String(), nil - case reflect.Struct: - return mval.Interface(), nil - default: - return nil, fmt.Errorf("Marshal can't handle %v(%v)", mtype, mtype.Kind()) - } - } -} - -func (e *Encoder) appendTree(t, o *Tree) error { - for key, value := range o.values { - if _, ok := t.values[key]; ok { - continue - } - if tomlValue, ok := value.(*tomlValue); ok { - tomlValue.position.Col = t.position.Col - } - t.values[key] = value - } - return nil -} - -// Create a toml value with the current line number as the position line -func (e *Encoder) wrapTomlValue(val interface{}, parent *Tree) interface{} { - _, isTree := val.(*Tree) - _, isTreeS := val.([]*Tree) - if isTree || isTreeS { - e.line++ - return val - } - - ret := &tomlValue{ - value: val, - position: Position{ - e.line, - parent.position.Col, - }, - } - e.line++ - return ret -} - -// Unmarshal attempts to unmarshal the Tree into a Go struct pointed by v. -// Neither Unmarshaler interfaces nor UnmarshalTOML functions are supported for -// sub-structs, and only definite types can be unmarshaled. -func (t *Tree) Unmarshal(v interface{}) error { - d := Decoder{tval: t, tagName: tagFieldName} - return d.unmarshal(v) -} - -// Marshal returns the TOML encoding of Tree. -// See Marshal() documentation for types mapping table. -func (t *Tree) Marshal() ([]byte, error) { - var buf bytes.Buffer - _, err := t.WriteTo(&buf) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// Unmarshal parses the TOML-encoded data and stores the result in the value -// pointed to by v. Behavior is similar to the Go json encoder, except that there -// is no concept of an Unmarshaler interface or UnmarshalTOML function for -// sub-structs, and currently only definite types can be unmarshaled to (i.e. no -// `interface{}`). -// -// The following struct annotations are supported: -// -// toml:"Field" Overrides the field's name to map to. -// default:"foo" Provides a default value. -// -// For default values, only fields of the following types are supported: -// * string -// * bool -// * int -// * int64 -// * float64 -// -// See Marshal() documentation for types mapping table. -func Unmarshal(data []byte, v interface{}) error { - t, err := LoadReader(bytes.NewReader(data)) - if err != nil { - return err - } - return t.Unmarshal(v) -} - -// Decoder reads and decodes TOML values from an input stream. -type Decoder struct { - r io.Reader - tval *Tree - encOpts - tagName string - strict bool - visitor visitorState -} - -// NewDecoder returns a new decoder that reads from r. -func NewDecoder(r io.Reader) *Decoder { - return &Decoder{ - r: r, - encOpts: encOptsDefaults, - tagName: tagFieldName, - } -} - -// Decode reads a TOML-encoded value from it's input -// and unmarshals it in the value pointed at by v. -// -// See the documentation for Marshal for details. -func (d *Decoder) Decode(v interface{}) error { - var err error - d.tval, err = LoadReader(d.r) - if err != nil { - return err - } - return d.unmarshal(v) -} - -// SetTagName allows changing default tag "toml" -func (d *Decoder) SetTagName(v string) *Decoder { - d.tagName = v - return d -} - -// Strict allows changing to strict decoding. Any fields that are found in the -// input data and do not have a corresponding struct member cause an error. -func (d *Decoder) Strict(strict bool) *Decoder { - d.strict = strict - return d -} - -func (d *Decoder) unmarshal(v interface{}) error { - mtype := reflect.TypeOf(v) - if mtype == nil { - return errors.New("nil cannot be unmarshaled from TOML") - } - if mtype.Kind() != reflect.Ptr { - return errors.New("only a pointer to struct or map can be unmarshaled from TOML") - } - - elem := mtype.Elem() - - switch elem.Kind() { - case reflect.Struct, reflect.Map: - case reflect.Interface: - elem = mapStringInterfaceType - default: - return errors.New("only a pointer to struct or map can be unmarshaled from TOML") - } - - if reflect.ValueOf(v).IsNil() { - return errors.New("nil pointer cannot be unmarshaled from TOML") - } - - vv := reflect.ValueOf(v).Elem() - - if d.strict { - d.visitor = newVisitorState(d.tval) - } - - sval, err := d.valueFromTree(elem, d.tval, &vv) - if err != nil { - return err - } - if err := d.visitor.validate(); err != nil { - return err - } - reflect.ValueOf(v).Elem().Set(sval) - return nil -} - -// Convert toml tree to marshal struct or map, using marshal type. When mval1 -// is non-nil, merge fields into the given value instead of allocating a new one. -func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree, mval1 *reflect.Value) (reflect.Value, error) { - if mtype.Kind() == reflect.Ptr { - return d.unwrapPointer(mtype, tval, mval1) - } - - // Check if pointer to value implements the Unmarshaler interface. - if mvalPtr := reflect.New(mtype); isCustomUnmarshaler(mvalPtr.Type()) { - d.visitor.visitAll() - - if tval == nil { - return mvalPtr.Elem(), nil - } - - if err := callCustomUnmarshaler(mvalPtr, tval.ToMap()); err != nil { - return reflect.ValueOf(nil), fmt.Errorf("unmarshal toml: %v", err) - } - return mvalPtr.Elem(), nil - } - - var mval reflect.Value - switch mtype.Kind() { - case reflect.Struct: - if mval1 != nil { - mval = *mval1 - } else { - mval = reflect.New(mtype).Elem() - } - - switch mval.Interface().(type) { - case Tree: - mval.Set(reflect.ValueOf(tval).Elem()) - default: - for i := 0; i < mtype.NumField(); i++ { - mtypef := mtype.Field(i) - an := annotation{tag: d.tagName} - opts := tomlOptions(mtypef, an) - if !opts.include { - continue - } - baseKey := opts.name - keysToTry := []string{ - baseKey, - strings.ToLower(baseKey), - strings.ToTitle(baseKey), - strings.ToLower(string(baseKey[0])) + baseKey[1:], - } - - found := false - if tval != nil { - for _, key := range keysToTry { - exists := tval.HasPath([]string{key}) - if !exists { - continue - } - - d.visitor.push(key) - val := tval.GetPath([]string{key}) - fval := mval.Field(i) - mvalf, err := d.valueFromToml(mtypef.Type, val, &fval) - if err != nil { - return mval, formatError(err, tval.GetPositionPath([]string{key})) - } - mval.Field(i).Set(mvalf) - found = true - d.visitor.pop() - break - } - } - - if !found && opts.defaultValue != "" { - mvalf := mval.Field(i) - var val interface{} - var err error - switch mvalf.Kind() { - case reflect.String: - val = opts.defaultValue - case reflect.Bool: - val, err = strconv.ParseBool(opts.defaultValue) - case reflect.Uint: - val, err = strconv.ParseUint(opts.defaultValue, 10, 0) - case reflect.Uint8: - val, err = strconv.ParseUint(opts.defaultValue, 10, 8) - case reflect.Uint16: - val, err = strconv.ParseUint(opts.defaultValue, 10, 16) - case reflect.Uint32: - val, err = strconv.ParseUint(opts.defaultValue, 10, 32) - case reflect.Uint64: - val, err = strconv.ParseUint(opts.defaultValue, 10, 64) - case reflect.Int: - val, err = strconv.ParseInt(opts.defaultValue, 10, 0) - case reflect.Int8: - val, err = strconv.ParseInt(opts.defaultValue, 10, 8) - case reflect.Int16: - val, err = strconv.ParseInt(opts.defaultValue, 10, 16) - case reflect.Int32: - val, err = strconv.ParseInt(opts.defaultValue, 10, 32) - case reflect.Int64: - // Check if the provided number has a non-numeric extension. - var hasExtension bool - if len(opts.defaultValue) > 0 { - lastChar := opts.defaultValue[len(opts.defaultValue)-1] - if lastChar < '0' || lastChar > '9' { - hasExtension = true - } - } - // If the value is a time.Duration with extension, parse as duration. - // If the value is an int64 or a time.Duration without extension, parse as number. - if hasExtension && mvalf.Type().String() == "time.Duration" { - val, err = time.ParseDuration(opts.defaultValue) - } else { - val, err = strconv.ParseInt(opts.defaultValue, 10, 64) - } - case reflect.Float32: - val, err = strconv.ParseFloat(opts.defaultValue, 32) - case reflect.Float64: - val, err = strconv.ParseFloat(opts.defaultValue, 64) - default: - return mvalf, fmt.Errorf("unsupported field type for default option") - } - - if err != nil { - return mvalf, err - } - mvalf.Set(reflect.ValueOf(val).Convert(mvalf.Type())) - } - - // save the old behavior above and try to check structs - if !found && opts.defaultValue == "" && mtypef.Type.Kind() == reflect.Struct { - tmpTval := tval - if !mtypef.Anonymous { - tmpTval = nil - } - fval := mval.Field(i) - v, err := d.valueFromTree(mtypef.Type, tmpTval, &fval) - if err != nil { - return v, err - } - mval.Field(i).Set(v) - } - } - } - case reflect.Map: - mval = reflect.MakeMap(mtype) - for _, key := range tval.Keys() { - d.visitor.push(key) - // TODO: path splits key - val := tval.GetPath([]string{key}) - mvalf, err := d.valueFromToml(mtype.Elem(), val, nil) - if err != nil { - return mval, formatError(err, tval.GetPositionPath([]string{key})) - } - mval.SetMapIndex(reflect.ValueOf(key).Convert(mtype.Key()), mvalf) - d.visitor.pop() - } - } - return mval, nil -} - -// Convert toml value to marshal struct/map slice, using marshal type -func (d *Decoder) valueFromTreeSlice(mtype reflect.Type, tval []*Tree) (reflect.Value, error) { - mval, err := makeSliceOrArray(mtype, len(tval)) - if err != nil { - return mval, err - } - - for i := 0; i < len(tval); i++ { - d.visitor.push(strconv.Itoa(i)) - val, err := d.valueFromTree(mtype.Elem(), tval[i], nil) - if err != nil { - return mval, err - } - mval.Index(i).Set(val) - d.visitor.pop() - } - return mval, nil -} - -// Convert toml value to marshal primitive slice, using marshal type -func (d *Decoder) valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (reflect.Value, error) { - mval, err := makeSliceOrArray(mtype, len(tval)) - if err != nil { - return mval, err - } - - for i := 0; i < len(tval); i++ { - val, err := d.valueFromToml(mtype.Elem(), tval[i], nil) - if err != nil { - return mval, err - } - mval.Index(i).Set(val) - } - return mval, nil -} - -// Convert toml value to marshal primitive slice, using marshal type -func (d *Decoder) valueFromOtherSliceI(mtype reflect.Type, tval interface{}) (reflect.Value, error) { - val := reflect.ValueOf(tval) - length := val.Len() - - mval, err := makeSliceOrArray(mtype, length) - if err != nil { - return mval, err - } - - for i := 0; i < length; i++ { - val, err := d.valueFromToml(mtype.Elem(), val.Index(i).Interface(), nil) - if err != nil { - return mval, err - } - mval.Index(i).Set(val) - } - return mval, nil -} - -// Create a new slice or a new array with specified length -func makeSliceOrArray(mtype reflect.Type, tLength int) (reflect.Value, error) { - var mval reflect.Value - switch mtype.Kind() { - case reflect.Slice: - mval = reflect.MakeSlice(mtype, tLength, tLength) - case reflect.Array: - mval = reflect.New(reflect.ArrayOf(mtype.Len(), mtype.Elem())).Elem() - if tLength > mtype.Len() { - return mval, fmt.Errorf("unmarshal: TOML array length (%v) exceeds destination array length (%v)", tLength, mtype.Len()) - } - } - return mval, nil -} - -// Convert toml value to marshal value, using marshal type. When mval1 is non-nil -// and the given type is a struct value, merge fields into it. -func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}, mval1 *reflect.Value) (reflect.Value, error) { - if mtype.Kind() == reflect.Ptr { - return d.unwrapPointer(mtype, tval, mval1) - } - - switch t := tval.(type) { - case *Tree: - var mval11 *reflect.Value - if mtype.Kind() == reflect.Struct { - mval11 = mval1 - } - - if isTree(mtype) { - return d.valueFromTree(mtype, t, mval11) - } - - if mtype.Kind() == reflect.Interface { - if mval1 == nil || mval1.IsNil() { - return d.valueFromTree(reflect.TypeOf(map[string]interface{}{}), t, nil) - } else { - return d.valueFromToml(mval1.Elem().Type(), t, nil) - } - } - - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a tree", tval, tval) - case []*Tree: - if isTreeSequence(mtype) { - return d.valueFromTreeSlice(mtype, t) - } - if mtype.Kind() == reflect.Interface { - if mval1 == nil || mval1.IsNil() { - return d.valueFromTreeSlice(reflect.TypeOf([]map[string]interface{}{}), t) - } else { - ival := mval1.Elem() - return d.valueFromToml(mval1.Elem().Type(), t, &ival) - } - } - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to trees", tval, tval) - case []interface{}: - d.visitor.visit() - if isOtherSequence(mtype) { - return d.valueFromOtherSlice(mtype, t) - } - if mtype.Kind() == reflect.Interface { - if mval1 == nil || mval1.IsNil() { - return d.valueFromOtherSlice(reflect.TypeOf([]interface{}{}), t) - } else { - ival := mval1.Elem() - return d.valueFromToml(mval1.Elem().Type(), t, &ival) - } - } - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a slice", tval, tval) - default: - d.visitor.visit() - mvalPtr := reflect.New(mtype) - - // Check if pointer to value implements the Unmarshaler interface. - if isCustomUnmarshaler(mvalPtr.Type()) { - if err := callCustomUnmarshaler(mvalPtr, tval); err != nil { - return reflect.ValueOf(nil), fmt.Errorf("unmarshal toml: %v", err) - } - return mvalPtr.Elem(), nil - } - - // Check if pointer to value implements the encoding.TextUnmarshaler. - if isTextUnmarshaler(mvalPtr.Type()) && !isTimeType(mtype) { - if err := d.unmarshalText(tval, mvalPtr); err != nil { - return reflect.ValueOf(nil), fmt.Errorf("unmarshal text: %v", err) - } - return mvalPtr.Elem(), nil - } - - switch mtype.Kind() { - case reflect.Bool, reflect.Struct: - val := reflect.ValueOf(tval) - - switch val.Type() { - case localDateType: - localDate := val.Interface().(LocalDate) - switch mtype { - case timeType: - return reflect.ValueOf(time.Date(localDate.Year, localDate.Month, localDate.Day, 0, 0, 0, 0, time.Local)), nil - } - case localDateTimeType: - localDateTime := val.Interface().(LocalDateTime) - switch mtype { - case timeType: - return reflect.ValueOf(time.Date( - localDateTime.Date.Year, - localDateTime.Date.Month, - localDateTime.Date.Day, - localDateTime.Time.Hour, - localDateTime.Time.Minute, - localDateTime.Time.Second, - localDateTime.Time.Nanosecond, - time.Local)), nil - } - } - - // if this passes for when mtype is reflect.Struct, tval is a time.LocalTime - if !val.Type().ConvertibleTo(mtype) { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) - } - - return val.Convert(mtype), nil - case reflect.String: - val := reflect.ValueOf(tval) - // stupidly, int64 is convertible to string. So special case this. - if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Int64 { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) - } - - return val.Convert(mtype), nil - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - val := reflect.ValueOf(tval) - if mtype.Kind() == reflect.Int64 && mtype == reflect.TypeOf(time.Duration(1)) && val.Kind() == reflect.String { - d, err := time.ParseDuration(val.String()) - if err != nil { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v. %s", tval, tval, mtype.String(), err) - } - return reflect.ValueOf(d), nil - } - if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Float64 { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) - } - if reflect.Indirect(reflect.New(mtype)).OverflowInt(val.Convert(reflect.TypeOf(int64(0))).Int()) { - return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) - } - - return val.Convert(mtype), nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - val := reflect.ValueOf(tval) - if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Float64 { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) - } - - if val.Type().Kind() != reflect.Uint64 && val.Convert(reflect.TypeOf(int(1))).Int() < 0 { - return reflect.ValueOf(nil), fmt.Errorf("%v(%T) is negative so does not fit in %v", tval, tval, mtype.String()) - } - if reflect.Indirect(reflect.New(mtype)).OverflowUint(val.Convert(reflect.TypeOf(uint64(0))).Uint()) { - return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) - } - - return val.Convert(mtype), nil - case reflect.Float32, reflect.Float64: - val := reflect.ValueOf(tval) - if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Int64 { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) - } - if reflect.Indirect(reflect.New(mtype)).OverflowFloat(val.Convert(reflect.TypeOf(float64(0))).Float()) { - return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) - } - - return val.Convert(mtype), nil - case reflect.Interface: - if mval1 == nil || mval1.IsNil() { - return reflect.ValueOf(tval), nil - } else { - ival := mval1.Elem() - return d.valueFromToml(mval1.Elem().Type(), t, &ival) - } - case reflect.Slice, reflect.Array: - if isOtherSequence(mtype) && isOtherSequence(reflect.TypeOf(t)) { - return d.valueFromOtherSliceI(mtype, t) - } - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind()) - default: - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind()) - } - } -} - -func (d *Decoder) unwrapPointer(mtype reflect.Type, tval interface{}, mval1 *reflect.Value) (reflect.Value, error) { - var melem *reflect.Value - - if mval1 != nil && !mval1.IsNil() && (mtype.Elem().Kind() == reflect.Struct || mtype.Elem().Kind() == reflect.Interface) { - elem := mval1.Elem() - melem = &elem - } - - val, err := d.valueFromToml(mtype.Elem(), tval, melem) - if err != nil { - return reflect.ValueOf(nil), err - } - mval := reflect.New(mtype.Elem()) - mval.Elem().Set(val) - return mval, nil -} - -func (d *Decoder) unmarshalText(tval interface{}, mval reflect.Value) error { - var buf bytes.Buffer - fmt.Fprint(&buf, tval) - return callTextUnmarshaler(mval, buf.Bytes()) -} - -func tomlOptions(vf reflect.StructField, an annotation) tomlOpts { - tag := vf.Tag.Get(an.tag) - parse := strings.Split(tag, ",") - var comment string - if c := vf.Tag.Get(an.comment); c != "" { - comment = c - } - commented, _ := strconv.ParseBool(vf.Tag.Get(an.commented)) - multiline, _ := strconv.ParseBool(vf.Tag.Get(an.multiline)) - literal, _ := strconv.ParseBool(vf.Tag.Get(an.literal)) - defaultValue := vf.Tag.Get(tagDefault) - result := tomlOpts{ - name: vf.Name, - nameFromTag: false, - comment: comment, - commented: commented, - multiline: multiline, - literal: literal, - include: true, - omitempty: false, - defaultValue: defaultValue, - } - if parse[0] != "" { - if parse[0] == "-" && len(parse) == 1 { - result.include = false - } else { - result.name = strings.Trim(parse[0], " ") - result.nameFromTag = true - } - } - if vf.PkgPath != "" { - result.include = false - } - if len(parse) > 1 && strings.Trim(parse[1], " ") == "omitempty" { - result.omitempty = true - } - if vf.Type.Kind() == reflect.Ptr { - result.omitempty = true - } - return result -} - -func isZero(val reflect.Value) bool { - switch val.Type().Kind() { - case reflect.Slice, reflect.Array, reflect.Map: - return val.Len() == 0 - default: - return reflect.DeepEqual(val.Interface(), reflect.Zero(val.Type()).Interface()) - } -} - -func formatError(err error, pos Position) error { - if err.Error()[0] == '(' { // Error already contains position information - return err - } - return fmt.Errorf("%s: %s", pos, err) -} - -// visitorState keeps track of which keys were unmarshaled. -type visitorState struct { - tree *Tree - path []string - keys map[string]struct{} - active bool -} - -func newVisitorState(tree *Tree) visitorState { - path, result := []string{}, map[string]struct{}{} - insertKeys(path, result, tree) - return visitorState{ - tree: tree, - path: path[:0], - keys: result, - active: true, - } -} - -func (s *visitorState) push(key string) { - if s.active { - s.path = append(s.path, key) - } -} - -func (s *visitorState) pop() { - if s.active { - s.path = s.path[:len(s.path)-1] - } -} - -func (s *visitorState) visit() { - if s.active { - delete(s.keys, strings.Join(s.path, ".")) - } -} - -func (s *visitorState) visitAll() { - if s.active { - for k := range s.keys { - if strings.HasPrefix(k, strings.Join(s.path, ".")) { - delete(s.keys, k) - } - } - } -} - -func (s *visitorState) validate() error { - if !s.active { - return nil - } - undecoded := make([]string, 0, len(s.keys)) - for key := range s.keys { - undecoded = append(undecoded, key) - } - sort.Strings(undecoded) - if len(undecoded) > 0 { - return fmt.Errorf("undecoded keys: %q", undecoded) - } - return nil -} - -func insertKeys(path []string, m map[string]struct{}, tree *Tree) { - for k, v := range tree.values { - switch node := v.(type) { - case []*Tree: - for i, item := range node { - insertKeys(append(path, k, strconv.Itoa(i)), m, item) - } - case *Tree: - insertKeys(append(path, k), m, node) - case *tomlValue: - m[strings.Join(append(path, k), ".")] = struct{}{} - } - } -} diff --git a/tools/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml b/tools/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml deleted file mode 100644 index 792b72ed7..000000000 --- a/tools/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml +++ /dev/null @@ -1,39 +0,0 @@ -title = "TOML Marshal Testing" - -[basic_lists] - floats = [12.3,45.6,78.9] - bools = [true,false,true] - dates = [1979-05-27T07:32:00Z,1980-05-27T07:32:00Z] - ints = [8001,8001,8002] - uints = [5002,5003] - strings = ["One","Two","Three"] - -[[subdocptrs]] - name = "Second" - -[basic_map] - one = "one" - two = "two" - -[subdoc] - - [subdoc.second] - name = "Second" - - [subdoc.first] - name = "First" - -[basic] - uint = 5001 - bool = true - float = 123.4 - float64 = 123.456782132399 - int = 5000 - string = "Bite me" - date = 1979-05-27T07:32:00Z - -[[subdoclist]] - name = "List.First" - -[[subdoclist]] - name = "List.Second" diff --git a/tools/vendor/github.com/pelletier/go-toml/marshal_test.toml b/tools/vendor/github.com/pelletier/go-toml/marshal_test.toml deleted file mode 100644 index ba5e110bf..000000000 --- a/tools/vendor/github.com/pelletier/go-toml/marshal_test.toml +++ /dev/null @@ -1,39 +0,0 @@ -title = "TOML Marshal Testing" - -[basic] - bool = true - date = 1979-05-27T07:32:00Z - float = 123.4 - float64 = 123.456782132399 - int = 5000 - string = "Bite me" - uint = 5001 - -[basic_lists] - bools = [true,false,true] - dates = [1979-05-27T07:32:00Z,1980-05-27T07:32:00Z] - floats = [12.3,45.6,78.9] - ints = [8001,8001,8002] - strings = ["One","Two","Three"] - uints = [5002,5003] - -[basic_map] - one = "one" - two = "two" - -[subdoc] - - [subdoc.first] - name = "First" - - [subdoc.second] - name = "Second" - -[[subdoclist]] - name = "List.First" - -[[subdoclist]] - name = "List.Second" - -[[subdocptrs]] - name = "Second" diff --git a/tools/vendor/github.com/pelletier/go-toml/parser.go b/tools/vendor/github.com/pelletier/go-toml/parser.go deleted file mode 100644 index b3726d0dd..000000000 --- a/tools/vendor/github.com/pelletier/go-toml/parser.go +++ /dev/null @@ -1,507 +0,0 @@ -// TOML Parser. - -package toml - -import ( - "errors" - "fmt" - "math" - "reflect" - "strconv" - "strings" - "time" -) - -type tomlParser struct { - flowIdx int - flow []token - tree *Tree - currentTable []string - seenTableKeys []string -} - -type tomlParserStateFn func() tomlParserStateFn - -// Formats and panics an error message based on a token -func (p *tomlParser) raiseError(tok *token, msg string, args ...interface{}) { - panic(tok.Position.String() + ": " + fmt.Sprintf(msg, args...)) -} - -func (p *tomlParser) run() { - for state := p.parseStart; state != nil; { - state = state() - } -} - -func (p *tomlParser) peek() *token { - if p.flowIdx >= len(p.flow) { - return nil - } - return &p.flow[p.flowIdx] -} - -func (p *tomlParser) assume(typ tokenType) { - tok := p.getToken() - if tok == nil { - p.raiseError(tok, "was expecting token %s, but token stream is empty", tok) - } - if tok.typ != typ { - p.raiseError(tok, "was expecting token %s, but got %s instead", typ, tok) - } -} - -func (p *tomlParser) getToken() *token { - tok := p.peek() - if tok == nil { - return nil - } - p.flowIdx++ - return tok -} - -func (p *tomlParser) parseStart() tomlParserStateFn { - tok := p.peek() - - // end of stream, parsing is finished - if tok == nil { - return nil - } - - switch tok.typ { - case tokenDoubleLeftBracket: - return p.parseGroupArray - case tokenLeftBracket: - return p.parseGroup - case tokenKey: - return p.parseAssign - case tokenEOF: - return nil - case tokenError: - p.raiseError(tok, "parsing error: %s", tok.String()) - default: - p.raiseError(tok, "unexpected token %s", tok.typ) - } - return nil -} - -func (p *tomlParser) parseGroupArray() tomlParserStateFn { - startToken := p.getToken() // discard the [[ - key := p.getToken() - if key.typ != tokenKeyGroupArray { - p.raiseError(key, "unexpected token %s, was expecting a table array key", key) - } - - // get or create table array element at the indicated part in the path - keys, err := parseKey(key.val) - if err != nil { - p.raiseError(key, "invalid table array key: %s", err) - } - p.tree.createSubTree(keys[:len(keys)-1], startToken.Position) // create parent entries - destTree := p.tree.GetPath(keys) - var array []*Tree - if destTree == nil { - array = make([]*Tree, 0) - } else if target, ok := destTree.([]*Tree); ok && target != nil { - array = destTree.([]*Tree) - } else { - p.raiseError(key, "key %s is already assigned and not of type table array", key) - } - p.currentTable = keys - - // add a new tree to the end of the table array - newTree := newTree() - newTree.position = startToken.Position - array = append(array, newTree) - p.tree.SetPath(p.currentTable, array) - - // remove all keys that were children of this table array - prefix := key.val + "." - found := false - for ii := 0; ii < len(p.seenTableKeys); { - tableKey := p.seenTableKeys[ii] - if strings.HasPrefix(tableKey, prefix) { - p.seenTableKeys = append(p.seenTableKeys[:ii], p.seenTableKeys[ii+1:]...) - } else { - found = (tableKey == key.val) - ii++ - } - } - - // keep this key name from use by other kinds of assignments - if !found { - p.seenTableKeys = append(p.seenTableKeys, key.val) - } - - // move to next parser state - p.assume(tokenDoubleRightBracket) - return p.parseStart -} - -func (p *tomlParser) parseGroup() tomlParserStateFn { - startToken := p.getToken() // discard the [ - key := p.getToken() - if key.typ != tokenKeyGroup { - p.raiseError(key, "unexpected token %s, was expecting a table key", key) - } - for _, item := range p.seenTableKeys { - if item == key.val { - p.raiseError(key, "duplicated tables") - } - } - - p.seenTableKeys = append(p.seenTableKeys, key.val) - keys, err := parseKey(key.val) - if err != nil { - p.raiseError(key, "invalid table array key: %s", err) - } - if err := p.tree.createSubTree(keys, startToken.Position); err != nil { - p.raiseError(key, "%s", err) - } - destTree := p.tree.GetPath(keys) - if target, ok := destTree.(*Tree); ok && target != nil && target.inline { - p.raiseError(key, "could not re-define exist inline table or its sub-table : %s", - strings.Join(keys, ".")) - } - p.assume(tokenRightBracket) - p.currentTable = keys - return p.parseStart -} - -func (p *tomlParser) parseAssign() tomlParserStateFn { - key := p.getToken() - p.assume(tokenEqual) - - parsedKey, err := parseKey(key.val) - if err != nil { - p.raiseError(key, "invalid key: %s", err.Error()) - } - - value := p.parseRvalue() - var tableKey []string - if len(p.currentTable) > 0 { - tableKey = p.currentTable - } else { - tableKey = []string{} - } - - prefixKey := parsedKey[0 : len(parsedKey)-1] - tableKey = append(tableKey, prefixKey...) - - // find the table to assign, looking out for arrays of tables - var targetNode *Tree - switch node := p.tree.GetPath(tableKey).(type) { - case []*Tree: - targetNode = node[len(node)-1] - case *Tree: - targetNode = node - case nil: - // create intermediate - if err := p.tree.createSubTree(tableKey, key.Position); err != nil { - p.raiseError(key, "could not create intermediate group: %s", err) - } - targetNode = p.tree.GetPath(tableKey).(*Tree) - default: - p.raiseError(key, "Unknown table type for path: %s", - strings.Join(tableKey, ".")) - } - - if targetNode.inline { - p.raiseError(key, "could not add key or sub-table to exist inline table or its sub-table : %s", - strings.Join(tableKey, ".")) - } - - // assign value to the found table - keyVal := parsedKey[len(parsedKey)-1] - localKey := []string{keyVal} - finalKey := append(tableKey, keyVal) - if targetNode.GetPath(localKey) != nil { - p.raiseError(key, "The following key was defined twice: %s", - strings.Join(finalKey, ".")) - } - var toInsert interface{} - - switch value.(type) { - case *Tree, []*Tree: - toInsert = value - default: - toInsert = &tomlValue{value: value, position: key.Position} - } - targetNode.values[keyVal] = toInsert - return p.parseStart -} - -var errInvalidUnderscore = errors.New("invalid use of _ in number") - -func numberContainsInvalidUnderscore(value string) error { - // For large numbers, you may use underscores between digits to enhance - // readability. Each underscore must be surrounded by at least one digit on - // each side. - - hasBefore := false - for idx, r := range value { - if r == '_' { - if !hasBefore || idx+1 >= len(value) { - // can't end with an underscore - return errInvalidUnderscore - } - } - hasBefore = isDigit(r) - } - return nil -} - -var errInvalidUnderscoreHex = errors.New("invalid use of _ in hex number") - -func hexNumberContainsInvalidUnderscore(value string) error { - hasBefore := false - for idx, r := range value { - if r == '_' { - if !hasBefore || idx+1 >= len(value) { - // can't end with an underscore - return errInvalidUnderscoreHex - } - } - hasBefore = isHexDigit(r) - } - return nil -} - -func cleanupNumberToken(value string) string { - cleanedVal := strings.Replace(value, "_", "", -1) - return cleanedVal -} - -func (p *tomlParser) parseRvalue() interface{} { - tok := p.getToken() - if tok == nil || tok.typ == tokenEOF { - p.raiseError(tok, "expecting a value") - } - - switch tok.typ { - case tokenString: - return tok.val - case tokenTrue: - return true - case tokenFalse: - return false - case tokenInf: - if tok.val[0] == '-' { - return math.Inf(-1) - } - return math.Inf(1) - case tokenNan: - return math.NaN() - case tokenInteger: - cleanedVal := cleanupNumberToken(tok.val) - base := 10 - s := cleanedVal - checkInvalidUnderscore := numberContainsInvalidUnderscore - if len(cleanedVal) >= 3 && cleanedVal[0] == '0' { - switch cleanedVal[1] { - case 'x': - checkInvalidUnderscore = hexNumberContainsInvalidUnderscore - base = 16 - case 'o': - base = 8 - case 'b': - base = 2 - default: - panic("invalid base") // the lexer should catch this first - } - s = cleanedVal[2:] - } - - err := checkInvalidUnderscore(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - - var val interface{} - val, err = strconv.ParseInt(s, base, 64) - if err == nil { - return val - } - - if s[0] != '-' { - if val, err = strconv.ParseUint(s, base, 64); err == nil { - return val - } - } - p.raiseError(tok, "%s", err) - case tokenFloat: - err := numberContainsInvalidUnderscore(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - cleanedVal := cleanupNumberToken(tok.val) - val, err := strconv.ParseFloat(cleanedVal, 64) - if err != nil { - p.raiseError(tok, "%s", err) - } - return val - case tokenLocalTime: - val, err := ParseLocalTime(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - return val - case tokenLocalDate: - // a local date may be followed by: - // * nothing: this is a local date - // * a local time: this is a local date-time - - next := p.peek() - if next == nil || next.typ != tokenLocalTime { - val, err := ParseLocalDate(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - return val - } - - localDate := tok - localTime := p.getToken() - - next = p.peek() - if next == nil || next.typ != tokenTimeOffset { - v := localDate.val + "T" + localTime.val - val, err := ParseLocalDateTime(v) - if err != nil { - p.raiseError(tok, "%s", err) - } - return val - } - - offset := p.getToken() - - layout := time.RFC3339Nano - v := localDate.val + "T" + localTime.val + offset.val - val, err := time.ParseInLocation(layout, v, time.UTC) - if err != nil { - p.raiseError(tok, "%s", err) - } - return val - case tokenLeftBracket: - return p.parseArray() - case tokenLeftCurlyBrace: - return p.parseInlineTable() - case tokenEqual: - p.raiseError(tok, "cannot have multiple equals for the same key") - case tokenError: - p.raiseError(tok, "%s", tok) - default: - panic(fmt.Errorf("unhandled token: %v", tok)) - } - - return nil -} - -func tokenIsComma(t *token) bool { - return t != nil && t.typ == tokenComma -} - -func (p *tomlParser) parseInlineTable() *Tree { - tree := newTree() - var previous *token -Loop: - for { - follow := p.peek() - if follow == nil || follow.typ == tokenEOF { - p.raiseError(follow, "unterminated inline table") - } - switch follow.typ { - case tokenRightCurlyBrace: - p.getToken() - break Loop - case tokenKey, tokenInteger, tokenString: - if !tokenIsComma(previous) && previous != nil { - p.raiseError(follow, "comma expected between fields in inline table") - } - key := p.getToken() - p.assume(tokenEqual) - - parsedKey, err := parseKey(key.val) - if err != nil { - p.raiseError(key, "invalid key: %s", err) - } - - value := p.parseRvalue() - tree.SetPath(parsedKey, value) - case tokenComma: - if tokenIsComma(previous) { - p.raiseError(follow, "need field between two commas in inline table") - } - p.getToken() - default: - p.raiseError(follow, "unexpected token type in inline table: %s", follow.String()) - } - previous = follow - } - if tokenIsComma(previous) { - p.raiseError(previous, "trailing comma at the end of inline table") - } - tree.inline = true - return tree -} - -func (p *tomlParser) parseArray() interface{} { - var array []interface{} - arrayType := reflect.TypeOf(newTree()) - for { - follow := p.peek() - if follow == nil || follow.typ == tokenEOF { - p.raiseError(follow, "unterminated array") - } - if follow.typ == tokenRightBracket { - p.getToken() - break - } - val := p.parseRvalue() - if reflect.TypeOf(val) != arrayType { - arrayType = nil - } - array = append(array, val) - follow = p.peek() - if follow == nil || follow.typ == tokenEOF { - p.raiseError(follow, "unterminated array") - } - if follow.typ != tokenRightBracket && follow.typ != tokenComma { - p.raiseError(follow, "missing comma") - } - if follow.typ == tokenComma { - p.getToken() - } - } - - // if the array is a mixed-type array or its length is 0, - // don't convert it to a table array - if len(array) <= 0 { - arrayType = nil - } - // An array of Trees is actually an array of inline - // tables, which is a shorthand for a table array. If the - // array was not converted from []interface{} to []*Tree, - // the two notations would not be equivalent. - if arrayType == reflect.TypeOf(newTree()) { - tomlArray := make([]*Tree, len(array)) - for i, v := range array { - tomlArray[i] = v.(*Tree) - } - return tomlArray - } - return array -} - -func parseToml(flow []token) *Tree { - result := newTree() - result.position = Position{1, 1} - parser := &tomlParser{ - flowIdx: 0, - flow: flow, - tree: result, - currentTable: make([]string, 0), - seenTableKeys: make([]string, 0), - } - parser.run() - return result -} diff --git a/tools/vendor/github.com/pelletier/go-toml/position.go b/tools/vendor/github.com/pelletier/go-toml/position.go deleted file mode 100644 index c17bff87b..000000000 --- a/tools/vendor/github.com/pelletier/go-toml/position.go +++ /dev/null @@ -1,29 +0,0 @@ -// Position support for go-toml - -package toml - -import ( - "fmt" -) - -// Position of a document element within a TOML document. -// -// Line and Col are both 1-indexed positions for the element's line number and -// column number, respectively. Values of zero or less will cause Invalid(), -// to return true. -type Position struct { - Line int // line within the document - Col int // column within the line -} - -// String representation of the position. -// Displays 1-indexed line and column numbers. -func (p Position) String() string { - return fmt.Sprintf("(%d, %d)", p.Line, p.Col) -} - -// Invalid returns whether or not the position is valid (i.e. with negative or -// null values) -func (p Position) Invalid() bool { - return p.Line <= 0 || p.Col <= 0 -} diff --git a/tools/vendor/github.com/pelletier/go-toml/token.go b/tools/vendor/github.com/pelletier/go-toml/token.go deleted file mode 100644 index b437fdd3b..000000000 --- a/tools/vendor/github.com/pelletier/go-toml/token.go +++ /dev/null @@ -1,136 +0,0 @@ -package toml - -import "fmt" - -// Define tokens -type tokenType int - -const ( - eof = -(iota + 1) -) - -const ( - tokenError tokenType = iota - tokenEOF - tokenComment - tokenKey - tokenString - tokenInteger - tokenTrue - tokenFalse - tokenFloat - tokenInf - tokenNan - tokenEqual - tokenLeftBracket - tokenRightBracket - tokenLeftCurlyBrace - tokenRightCurlyBrace - tokenLeftParen - tokenRightParen - tokenDoubleLeftBracket - tokenDoubleRightBracket - tokenLocalDate - tokenLocalTime - tokenTimeOffset - tokenKeyGroup - tokenKeyGroupArray - tokenComma - tokenColon - tokenDollar - tokenStar - tokenQuestion - tokenDot - tokenDotDot - tokenEOL -) - -var tokenTypeNames = []string{ - "Error", - "EOF", - "Comment", - "Key", - "String", - "Integer", - "True", - "False", - "Float", - "Inf", - "NaN", - "=", - "[", - "]", - "{", - "}", - "(", - ")", - "]]", - "[[", - "LocalDate", - "LocalTime", - "TimeOffset", - "KeyGroup", - "KeyGroupArray", - ",", - ":", - "$", - "*", - "?", - ".", - "..", - "EOL", -} - -type token struct { - Position - typ tokenType - val string -} - -func (tt tokenType) String() string { - idx := int(tt) - if idx < len(tokenTypeNames) { - return tokenTypeNames[idx] - } - return "Unknown" -} - -func (t token) String() string { - switch t.typ { - case tokenEOF: - return "EOF" - case tokenError: - return t.val - } - - return fmt.Sprintf("%q", t.val) -} - -func isSpace(r rune) bool { - return r == ' ' || r == '\t' -} - -func isAlphanumeric(r rune) bool { - return 'a' <= r && r <= 'z' || 'A' <= r && r <= 'Z' || r == '_' -} - -func isKeyChar(r rune) bool { - // Keys start with the first character that isn't whitespace or [ and end - // with the last non-whitespace character before the equals sign. Keys - // cannot contain a # character." - return !(r == '\r' || r == '\n' || r == eof || r == '=') -} - -func isKeyStartChar(r rune) bool { - return !(isSpace(r) || r == '\r' || r == '\n' || r == eof || r == '[') -} - -func isDigit(r rune) bool { - return '0' <= r && r <= '9' -} - -func isHexDigit(r rune) bool { - return isDigit(r) || - (r >= 'a' && r <= 'f') || - (r >= 'A' && r <= 'F') -} diff --git a/tools/vendor/github.com/pelletier/go-toml/toml.go b/tools/vendor/github.com/pelletier/go-toml/toml.go deleted file mode 100644 index 5541b941f..000000000 --- a/tools/vendor/github.com/pelletier/go-toml/toml.go +++ /dev/null @@ -1,533 +0,0 @@ -package toml - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "runtime" - "strings" -) - -type tomlValue struct { - value interface{} // string, int64, uint64, float64, bool, time.Time, [] of any of this list - comment string - commented bool - multiline bool - literal bool - position Position -} - -// Tree is the result of the parsing of a TOML file. -type Tree struct { - values map[string]interface{} // string -> *tomlValue, *Tree, []*Tree - comment string - commented bool - inline bool - position Position -} - -func newTree() *Tree { - return newTreeWithPosition(Position{}) -} - -func newTreeWithPosition(pos Position) *Tree { - return &Tree{ - values: make(map[string]interface{}), - position: pos, - } -} - -// TreeFromMap initializes a new Tree object using the given map. -func TreeFromMap(m map[string]interface{}) (*Tree, error) { - result, err := toTree(m) - if err != nil { - return nil, err - } - return result.(*Tree), nil -} - -// Position returns the position of the tree. -func (t *Tree) Position() Position { - return t.position -} - -// Has returns a boolean indicating if the given key exists. -func (t *Tree) Has(key string) bool { - if key == "" { - return false - } - return t.HasPath(strings.Split(key, ".")) -} - -// HasPath returns true if the given path of keys exists, false otherwise. -func (t *Tree) HasPath(keys []string) bool { - return t.GetPath(keys) != nil -} - -// Keys returns the keys of the toplevel tree (does not recurse). -func (t *Tree) Keys() []string { - keys := make([]string, len(t.values)) - i := 0 - for k := range t.values { - keys[i] = k - i++ - } - return keys -} - -// Get the value at key in the Tree. -// Key is a dot-separated path (e.g. a.b.c) without single/double quoted strings. -// If you need to retrieve non-bare keys, use GetPath. -// Returns nil if the path does not exist in the tree. -// If keys is of length zero, the current tree is returned. -func (t *Tree) Get(key string) interface{} { - if key == "" { - return t - } - return t.GetPath(strings.Split(key, ".")) -} - -// GetPath returns the element in the tree indicated by 'keys'. -// If keys is of length zero, the current tree is returned. -func (t *Tree) GetPath(keys []string) interface{} { - if len(keys) == 0 { - return t - } - subtree := t - for _, intermediateKey := range keys[:len(keys)-1] { - value, exists := subtree.values[intermediateKey] - if !exists { - return nil - } - switch node := value.(type) { - case *Tree: - subtree = node - case []*Tree: - // go to most recent element - if len(node) == 0 { - return nil - } - subtree = node[len(node)-1] - default: - return nil // cannot navigate through other node types - } - } - // branch based on final node type - switch node := subtree.values[keys[len(keys)-1]].(type) { - case *tomlValue: - return node.value - default: - return node - } -} - -// GetArray returns the value at key in the Tree. -// It returns []string, []int64, etc type if key has homogeneous lists -// Key is a dot-separated path (e.g. a.b.c) without single/double quoted strings. -// Returns nil if the path does not exist in the tree. -// If keys is of length zero, the current tree is returned. -func (t *Tree) GetArray(key string) interface{} { - if key == "" { - return t - } - return t.GetArrayPath(strings.Split(key, ".")) -} - -// GetArrayPath returns the element in the tree indicated by 'keys'. -// If keys is of length zero, the current tree is returned. -func (t *Tree) GetArrayPath(keys []string) interface{} { - if len(keys) == 0 { - return t - } - subtree := t - for _, intermediateKey := range keys[:len(keys)-1] { - value, exists := subtree.values[intermediateKey] - if !exists { - return nil - } - switch node := value.(type) { - case *Tree: - subtree = node - case []*Tree: - // go to most recent element - if len(node) == 0 { - return nil - } - subtree = node[len(node)-1] - default: - return nil // cannot navigate through other node types - } - } - // branch based on final node type - switch node := subtree.values[keys[len(keys)-1]].(type) { - case *tomlValue: - switch n := node.value.(type) { - case []interface{}: - return getArray(n) - default: - return node.value - } - default: - return node - } -} - -// if homogeneous array, then return slice type object over []interface{} -func getArray(n []interface{}) interface{} { - var s []string - var i64 []int64 - var f64 []float64 - var bl []bool - for _, value := range n { - switch v := value.(type) { - case string: - s = append(s, v) - case int64: - i64 = append(i64, v) - case float64: - f64 = append(f64, v) - case bool: - bl = append(bl, v) - default: - return n - } - } - if len(s) == len(n) { - return s - } else if len(i64) == len(n) { - return i64 - } else if len(f64) == len(n) { - return f64 - } else if len(bl) == len(n) { - return bl - } - return n -} - -// GetPosition returns the position of the given key. -func (t *Tree) GetPosition(key string) Position { - if key == "" { - return t.position - } - return t.GetPositionPath(strings.Split(key, ".")) -} - -// SetPositionPath sets the position of element in the tree indicated by 'keys'. -// If keys is of length zero, the current tree position is set. -func (t *Tree) SetPositionPath(keys []string, pos Position) { - if len(keys) == 0 { - t.position = pos - return - } - subtree := t - for _, intermediateKey := range keys[:len(keys)-1] { - value, exists := subtree.values[intermediateKey] - if !exists { - return - } - switch node := value.(type) { - case *Tree: - subtree = node - case []*Tree: - // go to most recent element - if len(node) == 0 { - return - } - subtree = node[len(node)-1] - default: - return - } - } - // branch based on final node type - switch node := subtree.values[keys[len(keys)-1]].(type) { - case *tomlValue: - node.position = pos - return - case *Tree: - node.position = pos - return - case []*Tree: - // go to most recent element - if len(node) == 0 { - return - } - node[len(node)-1].position = pos - return - } -} - -// GetPositionPath returns the element in the tree indicated by 'keys'. -// If keys is of length zero, the current tree is returned. -func (t *Tree) GetPositionPath(keys []string) Position { - if len(keys) == 0 { - return t.position - } - subtree := t - for _, intermediateKey := range keys[:len(keys)-1] { - value, exists := subtree.values[intermediateKey] - if !exists { - return Position{0, 0} - } - switch node := value.(type) { - case *Tree: - subtree = node - case []*Tree: - // go to most recent element - if len(node) == 0 { - return Position{0, 0} - } - subtree = node[len(node)-1] - default: - return Position{0, 0} - } - } - // branch based on final node type - switch node := subtree.values[keys[len(keys)-1]].(type) { - case *tomlValue: - return node.position - case *Tree: - return node.position - case []*Tree: - // go to most recent element - if len(node) == 0 { - return Position{0, 0} - } - return node[len(node)-1].position - default: - return Position{0, 0} - } -} - -// GetDefault works like Get but with a default value -func (t *Tree) GetDefault(key string, def interface{}) interface{} { - val := t.Get(key) - if val == nil { - return def - } - return val -} - -// SetOptions arguments are supplied to the SetWithOptions and SetPathWithOptions functions to modify marshalling behaviour. -// The default values within the struct are valid default options. -type SetOptions struct { - Comment string - Commented bool - Multiline bool - Literal bool -} - -// SetWithOptions is the same as Set, but allows you to provide formatting -// instructions to the key, that will be used by Marshal(). -func (t *Tree) SetWithOptions(key string, opts SetOptions, value interface{}) { - t.SetPathWithOptions(strings.Split(key, "."), opts, value) -} - -// SetPathWithOptions is the same as SetPath, but allows you to provide -// formatting instructions to the key, that will be reused by Marshal(). -func (t *Tree) SetPathWithOptions(keys []string, opts SetOptions, value interface{}) { - subtree := t - for i, intermediateKey := range keys[:len(keys)-1] { - nextTree, exists := subtree.values[intermediateKey] - if !exists { - nextTree = newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col}) - subtree.values[intermediateKey] = nextTree // add new element here - } - switch node := nextTree.(type) { - case *Tree: - subtree = node - case []*Tree: - // go to most recent element - if len(node) == 0 { - // create element if it does not exist - node = append(node, newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col})) - subtree.values[intermediateKey] = node - } - subtree = node[len(node)-1] - } - } - - var toInsert interface{} - - switch v := value.(type) { - case *Tree: - v.comment = opts.Comment - v.commented = opts.Commented - toInsert = value - case []*Tree: - for i := range v { - v[i].commented = opts.Commented - } - toInsert = value - case *tomlValue: - v.comment = opts.Comment - v.commented = opts.Commented - v.multiline = opts.Multiline - v.literal = opts.Literal - toInsert = v - default: - toInsert = &tomlValue{value: value, - comment: opts.Comment, - commented: opts.Commented, - multiline: opts.Multiline, - literal: opts.Literal, - position: Position{Line: subtree.position.Line + len(subtree.values) + 1, Col: subtree.position.Col}} - } - - subtree.values[keys[len(keys)-1]] = toInsert -} - -// Set an element in the tree. -// Key is a dot-separated path (e.g. a.b.c). -// Creates all necessary intermediate trees, if needed. -func (t *Tree) Set(key string, value interface{}) { - t.SetWithComment(key, "", false, value) -} - -// SetWithComment is the same as Set, but allows you to provide comment -// information to the key, that will be reused by Marshal(). -func (t *Tree) SetWithComment(key string, comment string, commented bool, value interface{}) { - t.SetPathWithComment(strings.Split(key, "."), comment, commented, value) -} - -// SetPath sets an element in the tree. -// Keys is an array of path elements (e.g. {"a","b","c"}). -// Creates all necessary intermediate trees, if needed. -func (t *Tree) SetPath(keys []string, value interface{}) { - t.SetPathWithComment(keys, "", false, value) -} - -// SetPathWithComment is the same as SetPath, but allows you to provide comment -// information to the key, that will be reused by Marshal(). -func (t *Tree) SetPathWithComment(keys []string, comment string, commented bool, value interface{}) { - t.SetPathWithOptions(keys, SetOptions{Comment: comment, Commented: commented}, value) -} - -// Delete removes a key from the tree. -// Key is a dot-separated path (e.g. a.b.c). -func (t *Tree) Delete(key string) error { - keys, err := parseKey(key) - if err != nil { - return err - } - return t.DeletePath(keys) -} - -// DeletePath removes a key from the tree. -// Keys is an array of path elements (e.g. {"a","b","c"}). -func (t *Tree) DeletePath(keys []string) error { - keyLen := len(keys) - if keyLen == 1 { - delete(t.values, keys[0]) - return nil - } - tree := t.GetPath(keys[:keyLen-1]) - item := keys[keyLen-1] - switch node := tree.(type) { - case *Tree: - delete(node.values, item) - return nil - } - return errors.New("no such key to delete") -} - -// createSubTree takes a tree and a key and create the necessary intermediate -// subtrees to create a subtree at that point. In-place. -// -// e.g. passing a.b.c will create (assuming tree is empty) tree[a], tree[a][b] -// and tree[a][b][c] -// -// Returns nil on success, error object on failure -func (t *Tree) createSubTree(keys []string, pos Position) error { - subtree := t - for i, intermediateKey := range keys { - nextTree, exists := subtree.values[intermediateKey] - if !exists { - tree := newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col}) - tree.position = pos - tree.inline = subtree.inline - subtree.values[intermediateKey] = tree - nextTree = tree - } - - switch node := nextTree.(type) { - case []*Tree: - subtree = node[len(node)-1] - case *Tree: - subtree = node - default: - return fmt.Errorf("unknown type for path %s (%s): %T (%#v)", - strings.Join(keys, "."), intermediateKey, nextTree, nextTree) - } - } - return nil -} - -// LoadBytes creates a Tree from a []byte. -func LoadBytes(b []byte) (tree *Tree, err error) { - defer func() { - if r := recover(); r != nil { - if _, ok := r.(runtime.Error); ok { - panic(r) - } - err = fmt.Errorf("%s", r) - } - }() - - if len(b) >= 4 && (hasUTF32BigEndianBOM4(b) || hasUTF32LittleEndianBOM4(b)) { - b = b[4:] - } else if len(b) >= 3 && hasUTF8BOM3(b) { - b = b[3:] - } else if len(b) >= 2 && (hasUTF16BigEndianBOM2(b) || hasUTF16LittleEndianBOM2(b)) { - b = b[2:] - } - - tree = parseToml(lexToml(b)) - return -} - -func hasUTF16BigEndianBOM2(b []byte) bool { - return b[0] == 0xFE && b[1] == 0xFF -} - -func hasUTF16LittleEndianBOM2(b []byte) bool { - return b[0] == 0xFF && b[1] == 0xFE -} - -func hasUTF8BOM3(b []byte) bool { - return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF -} - -func hasUTF32BigEndianBOM4(b []byte) bool { - return b[0] == 0x00 && b[1] == 0x00 && b[2] == 0xFE && b[3] == 0xFF -} - -func hasUTF32LittleEndianBOM4(b []byte) bool { - return b[0] == 0xFF && b[1] == 0xFE && b[2] == 0x00 && b[3] == 0x00 -} - -// LoadReader creates a Tree from any io.Reader. -func LoadReader(reader io.Reader) (tree *Tree, err error) { - inputBytes, err := ioutil.ReadAll(reader) - if err != nil { - return - } - tree, err = LoadBytes(inputBytes) - return -} - -// Load creates a Tree from a string. -func Load(content string) (tree *Tree, err error) { - return LoadBytes([]byte(content)) -} - -// LoadFile creates a Tree from a file. -func LoadFile(path string) (tree *Tree, err error) { - file, err := os.Open(path) - if err != nil { - return nil, err - } - defer file.Close() - return LoadReader(file) -} diff --git a/tools/vendor/github.com/pelletier/go-toml/tomlpub.go b/tools/vendor/github.com/pelletier/go-toml/tomlpub.go deleted file mode 100644 index 4136b4625..000000000 --- a/tools/vendor/github.com/pelletier/go-toml/tomlpub.go +++ /dev/null @@ -1,71 +0,0 @@ -package toml - -// PubTOMLValue wrapping tomlValue in order to access all properties from outside. -type PubTOMLValue = tomlValue - -func (ptv *PubTOMLValue) Value() interface{} { - return ptv.value -} -func (ptv *PubTOMLValue) Comment() string { - return ptv.comment -} -func (ptv *PubTOMLValue) Commented() bool { - return ptv.commented -} -func (ptv *PubTOMLValue) Multiline() bool { - return ptv.multiline -} -func (ptv *PubTOMLValue) Position() Position { - return ptv.position -} - -func (ptv *PubTOMLValue) SetValue(v interface{}) { - ptv.value = v -} -func (ptv *PubTOMLValue) SetComment(s string) { - ptv.comment = s -} -func (ptv *PubTOMLValue) SetCommented(c bool) { - ptv.commented = c -} -func (ptv *PubTOMLValue) SetMultiline(m bool) { - ptv.multiline = m -} -func (ptv *PubTOMLValue) SetPosition(p Position) { - ptv.position = p -} - -// PubTree wrapping Tree in order to access all properties from outside. -type PubTree = Tree - -func (pt *PubTree) Values() map[string]interface{} { - return pt.values -} - -func (pt *PubTree) Comment() string { - return pt.comment -} - -func (pt *PubTree) Commented() bool { - return pt.commented -} - -func (pt *PubTree) Inline() bool { - return pt.inline -} - -func (pt *PubTree) SetValues(v map[string]interface{}) { - pt.values = v -} - -func (pt *PubTree) SetComment(c string) { - pt.comment = c -} - -func (pt *PubTree) SetCommented(c bool) { - pt.commented = c -} - -func (pt *PubTree) SetInline(i bool) { - pt.inline = i -} diff --git a/tools/vendor/github.com/pelletier/go-toml/tomltree_create.go b/tools/vendor/github.com/pelletier/go-toml/tomltree_create.go deleted file mode 100644 index 80353500a..000000000 --- a/tools/vendor/github.com/pelletier/go-toml/tomltree_create.go +++ /dev/null @@ -1,155 +0,0 @@ -package toml - -import ( - "fmt" - "reflect" - "time" -) - -var kindToType = [reflect.String + 1]reflect.Type{ - reflect.Bool: reflect.TypeOf(true), - reflect.String: reflect.TypeOf(""), - reflect.Float32: reflect.TypeOf(float64(1)), - reflect.Float64: reflect.TypeOf(float64(1)), - reflect.Int: reflect.TypeOf(int64(1)), - reflect.Int8: reflect.TypeOf(int64(1)), - reflect.Int16: reflect.TypeOf(int64(1)), - reflect.Int32: reflect.TypeOf(int64(1)), - reflect.Int64: reflect.TypeOf(int64(1)), - reflect.Uint: reflect.TypeOf(uint64(1)), - reflect.Uint8: reflect.TypeOf(uint64(1)), - reflect.Uint16: reflect.TypeOf(uint64(1)), - reflect.Uint32: reflect.TypeOf(uint64(1)), - reflect.Uint64: reflect.TypeOf(uint64(1)), -} - -// typeFor returns a reflect.Type for a reflect.Kind, or nil if none is found. -// supported values: -// string, bool, int64, uint64, float64, time.Time, int, int8, int16, int32, uint, uint8, uint16, uint32, float32 -func typeFor(k reflect.Kind) reflect.Type { - if k > 0 && int(k) < len(kindToType) { - return kindToType[k] - } - return nil -} - -func simpleValueCoercion(object interface{}) (interface{}, error) { - switch original := object.(type) { - case string, bool, int64, uint64, float64, time.Time: - return original, nil - case int: - return int64(original), nil - case int8: - return int64(original), nil - case int16: - return int64(original), nil - case int32: - return int64(original), nil - case uint: - return uint64(original), nil - case uint8: - return uint64(original), nil - case uint16: - return uint64(original), nil - case uint32: - return uint64(original), nil - case float32: - return float64(original), nil - case fmt.Stringer: - return original.String(), nil - case []interface{}: - value := reflect.ValueOf(original) - length := value.Len() - arrayValue := reflect.MakeSlice(value.Type(), 0, length) - for i := 0; i < length; i++ { - val := value.Index(i).Interface() - simpleValue, err := simpleValueCoercion(val) - if err != nil { - return nil, err - } - arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue)) - } - return arrayValue.Interface(), nil - default: - return nil, fmt.Errorf("cannot convert type %T to Tree", object) - } -} - -func sliceToTree(object interface{}) (interface{}, error) { - // arrays are a bit tricky, since they can represent either a - // collection of simple values, which is represented by one - // *tomlValue, or an array of tables, which is represented by an - // array of *Tree. - - // holding the assumption that this function is called from toTree only when value.Kind() is Array or Slice - value := reflect.ValueOf(object) - insideType := value.Type().Elem() - length := value.Len() - if length > 0 { - insideType = reflect.ValueOf(value.Index(0).Interface()).Type() - } - if insideType.Kind() == reflect.Map { - // this is considered as an array of tables - tablesArray := make([]*Tree, 0, length) - for i := 0; i < length; i++ { - table := value.Index(i) - tree, err := toTree(table.Interface()) - if err != nil { - return nil, err - } - tablesArray = append(tablesArray, tree.(*Tree)) - } - return tablesArray, nil - } - - sliceType := typeFor(insideType.Kind()) - if sliceType == nil { - sliceType = insideType - } - - arrayValue := reflect.MakeSlice(reflect.SliceOf(sliceType), 0, length) - - for i := 0; i < length; i++ { - val := value.Index(i).Interface() - simpleValue, err := simpleValueCoercion(val) - if err != nil { - return nil, err - } - arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue)) - } - return &tomlValue{value: arrayValue.Interface(), position: Position{}}, nil -} - -func toTree(object interface{}) (interface{}, error) { - value := reflect.ValueOf(object) - - if value.Kind() == reflect.Map { - values := map[string]interface{}{} - keys := value.MapKeys() - for _, key := range keys { - if key.Kind() != reflect.String { - if _, ok := key.Interface().(string); !ok { - return nil, fmt.Errorf("map key needs to be a string, not %T (%v)", key.Interface(), key.Kind()) - } - } - - v := value.MapIndex(key) - newValue, err := toTree(v.Interface()) - if err != nil { - return nil, err - } - values[key.String()] = newValue - } - return &Tree{values: values, position: Position{}}, nil - } - - if value.Kind() == reflect.Array || value.Kind() == reflect.Slice { - return sliceToTree(object) - } - - simpleValue, err := simpleValueCoercion(object) - if err != nil { - return nil, err - } - return &tomlValue{value: simpleValue, position: Position{}}, nil -} diff --git a/tools/vendor/github.com/pelletier/go-toml/tomltree_write.go b/tools/vendor/github.com/pelletier/go-toml/tomltree_write.go deleted file mode 100644 index c9afbdab7..000000000 --- a/tools/vendor/github.com/pelletier/go-toml/tomltree_write.go +++ /dev/null @@ -1,552 +0,0 @@ -package toml - -import ( - "bytes" - "fmt" - "io" - "math" - "math/big" - "reflect" - "sort" - "strconv" - "strings" - "time" -) - -type valueComplexity int - -const ( - valueSimple valueComplexity = iota + 1 - valueComplex -) - -type sortNode struct { - key string - complexity valueComplexity -} - -// Encodes a string to a TOML-compliant multi-line string value -// This function is a clone of the existing encodeTomlString function, except that whitespace characters -// are preserved. Quotation marks and backslashes are also not escaped. -func encodeMultilineTomlString(value string, commented string) string { - var b bytes.Buffer - adjacentQuoteCount := 0 - - b.WriteString(commented) - for i, rr := range value { - if rr != '"' { - adjacentQuoteCount = 0 - } else { - adjacentQuoteCount++ - } - switch rr { - case '\b': - b.WriteString(`\b`) - case '\t': - b.WriteString("\t") - case '\n': - b.WriteString("\n" + commented) - case '\f': - b.WriteString(`\f`) - case '\r': - b.WriteString("\r") - case '"': - if adjacentQuoteCount >= 3 || i == len(value)-1 { - adjacentQuoteCount = 0 - b.WriteString(`\"`) - } else { - b.WriteString(`"`) - } - case '\\': - b.WriteString(`\`) - default: - intRr := uint16(rr) - if intRr < 0x001F { - b.WriteString(fmt.Sprintf("\\u%0.4X", intRr)) - } else { - b.WriteRune(rr) - } - } - } - return b.String() -} - -// Encodes a string to a TOML-compliant string value -func encodeTomlString(value string) string { - var b bytes.Buffer - - for _, rr := range value { - switch rr { - case '\b': - b.WriteString(`\b`) - case '\t': - b.WriteString(`\t`) - case '\n': - b.WriteString(`\n`) - case '\f': - b.WriteString(`\f`) - case '\r': - b.WriteString(`\r`) - case '"': - b.WriteString(`\"`) - case '\\': - b.WriteString(`\\`) - default: - intRr := uint16(rr) - if intRr < 0x001F { - b.WriteString(fmt.Sprintf("\\u%0.4X", intRr)) - } else { - b.WriteRune(rr) - } - } - } - return b.String() -} - -func tomlTreeStringRepresentation(t *Tree, ord MarshalOrder) (string, error) { - var orderedVals []sortNode - switch ord { - case OrderPreserve: - orderedVals = sortByLines(t) - default: - orderedVals = sortAlphabetical(t) - } - - var values []string - for _, node := range orderedVals { - k := node.key - v := t.values[k] - - repr, err := tomlValueStringRepresentation(v, "", "", ord, false) - if err != nil { - return "", err - } - values = append(values, quoteKeyIfNeeded(k)+" = "+repr) - } - return "{ " + strings.Join(values, ", ") + " }", nil -} - -func tomlValueStringRepresentation(v interface{}, commented string, indent string, ord MarshalOrder, arraysOneElementPerLine bool) (string, error) { - // this interface check is added to dereference the change made in the writeTo function. - // That change was made to allow this function to see formatting options. - tv, ok := v.(*tomlValue) - if ok { - v = tv.value - } else { - tv = &tomlValue{} - } - - switch value := v.(type) { - case uint64: - return strconv.FormatUint(value, 10), nil - case int64: - return strconv.FormatInt(value, 10), nil - case float64: - // Default bit length is full 64 - bits := 64 - // Float panics if nan is used - if !math.IsNaN(value) { - // if 32 bit accuracy is enough to exactly show, use 32 - _, acc := big.NewFloat(value).Float32() - if acc == big.Exact { - bits = 32 - } - } - if math.Trunc(value) == value { - return strings.ToLower(strconv.FormatFloat(value, 'f', 1, bits)), nil - } - return strings.ToLower(strconv.FormatFloat(value, 'f', -1, bits)), nil - case string: - if tv.multiline { - if tv.literal { - b := strings.Builder{} - b.WriteString("'''\n") - b.Write([]byte(value)) - b.WriteString("\n'''") - return b.String(), nil - } else { - return "\"\"\"\n" + encodeMultilineTomlString(value, commented) + "\"\"\"", nil - } - } - return "\"" + encodeTomlString(value) + "\"", nil - case []byte: - b, _ := v.([]byte) - return string(b), nil - case bool: - if value { - return "true", nil - } - return "false", nil - case time.Time: - return value.Format(time.RFC3339), nil - case LocalDate: - return value.String(), nil - case LocalDateTime: - return value.String(), nil - case LocalTime: - return value.String(), nil - case *Tree: - return tomlTreeStringRepresentation(value, ord) - case nil: - return "", nil - } - - rv := reflect.ValueOf(v) - - if rv.Kind() == reflect.Slice { - var values []string - for i := 0; i < rv.Len(); i++ { - item := rv.Index(i).Interface() - itemRepr, err := tomlValueStringRepresentation(item, commented, indent, ord, arraysOneElementPerLine) - if err != nil { - return "", err - } - values = append(values, itemRepr) - } - if arraysOneElementPerLine && len(values) > 1 { - stringBuffer := bytes.Buffer{} - valueIndent := indent + ` ` // TODO: move that to a shared encoder state - - stringBuffer.WriteString("[\n") - - for _, value := range values { - stringBuffer.WriteString(valueIndent) - stringBuffer.WriteString(commented + value) - stringBuffer.WriteString(`,`) - stringBuffer.WriteString("\n") - } - - stringBuffer.WriteString(indent + commented + "]") - - return stringBuffer.String(), nil - } - return "[" + strings.Join(values, ", ") + "]", nil - } - return "", fmt.Errorf("unsupported value type %T: %v", v, v) -} - -func getTreeArrayLine(trees []*Tree) (line int) { - // Prevent returning 0 for empty trees - line = int(^uint(0) >> 1) - // get lowest line number >= 0 - for _, tv := range trees { - if tv.position.Line < line || line == 0 { - line = tv.position.Line - } - } - return -} - -func sortByLines(t *Tree) (vals []sortNode) { - var ( - line int - lines []int - tv *Tree - tom *tomlValue - node sortNode - ) - vals = make([]sortNode, 0) - m := make(map[int]sortNode) - - for k := range t.values { - v := t.values[k] - switch v.(type) { - case *Tree: - tv = v.(*Tree) - line = tv.position.Line - node = sortNode{key: k, complexity: valueComplex} - case []*Tree: - line = getTreeArrayLine(v.([]*Tree)) - node = sortNode{key: k, complexity: valueComplex} - default: - tom = v.(*tomlValue) - line = tom.position.Line - node = sortNode{key: k, complexity: valueSimple} - } - lines = append(lines, line) - vals = append(vals, node) - m[line] = node - } - sort.Ints(lines) - - for i, line := range lines { - vals[i] = m[line] - } - - return vals -} - -func sortAlphabetical(t *Tree) (vals []sortNode) { - var ( - node sortNode - simpVals []string - compVals []string - ) - vals = make([]sortNode, 0) - m := make(map[string]sortNode) - - for k := range t.values { - v := t.values[k] - switch v.(type) { - case *Tree, []*Tree: - node = sortNode{key: k, complexity: valueComplex} - compVals = append(compVals, node.key) - default: - node = sortNode{key: k, complexity: valueSimple} - simpVals = append(simpVals, node.key) - } - vals = append(vals, node) - m[node.key] = node - } - - // Simples first to match previous implementation - sort.Strings(simpVals) - i := 0 - for _, key := range simpVals { - vals[i] = m[key] - i++ - } - - sort.Strings(compVals) - for _, key := range compVals { - vals[i] = m[key] - i++ - } - - return vals -} - -func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool) (int64, error) { - return t.writeToOrdered(w, indent, keyspace, bytesCount, arraysOneElementPerLine, OrderAlphabetical, " ", false, false) -} - -func (t *Tree) writeToOrdered(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool, ord MarshalOrder, indentString string, compactComments, parentCommented bool) (int64, error) { - var orderedVals []sortNode - - switch ord { - case OrderPreserve: - orderedVals = sortByLines(t) - default: - orderedVals = sortAlphabetical(t) - } - - for _, node := range orderedVals { - switch node.complexity { - case valueComplex: - k := node.key - v := t.values[k] - - combinedKey := quoteKeyIfNeeded(k) - if keyspace != "" { - combinedKey = keyspace + "." + combinedKey - } - - switch node := v.(type) { - // node has to be of those two types given how keys are sorted above - case *Tree: - tv, ok := t.values[k].(*Tree) - if !ok { - return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) - } - if tv.comment != "" { - comment := strings.Replace(tv.comment, "\n", "\n"+indent+"#", -1) - start := "# " - if strings.HasPrefix(comment, "#") { - start = "" - } - writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment) - bytesCount += int64(writtenBytesCountComment) - if errc != nil { - return bytesCount, errc - } - } - - var commented string - if parentCommented || t.commented || tv.commented { - commented = "# " - } - writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[", combinedKey, "]\n") - bytesCount += int64(writtenBytesCount) - if err != nil { - return bytesCount, err - } - bytesCount, err = node.writeToOrdered(w, indent+indentString, combinedKey, bytesCount, arraysOneElementPerLine, ord, indentString, compactComments, parentCommented || t.commented || tv.commented) - if err != nil { - return bytesCount, err - } - case []*Tree: - for _, subTree := range node { - var commented string - if parentCommented || t.commented || subTree.commented { - commented = "# " - } - writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[[", combinedKey, "]]\n") - bytesCount += int64(writtenBytesCount) - if err != nil { - return bytesCount, err - } - - bytesCount, err = subTree.writeToOrdered(w, indent+indentString, combinedKey, bytesCount, arraysOneElementPerLine, ord, indentString, compactComments, parentCommented || t.commented || subTree.commented) - if err != nil { - return bytesCount, err - } - } - } - default: // Simple - k := node.key - v, ok := t.values[k].(*tomlValue) - if !ok { - return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) - } - - var commented string - if parentCommented || t.commented || v.commented { - commented = "# " - } - repr, err := tomlValueStringRepresentation(v, commented, indent, ord, arraysOneElementPerLine) - if err != nil { - return bytesCount, err - } - - if v.comment != "" { - comment := strings.Replace(v.comment, "\n", "\n"+indent+"#", -1) - start := "# " - if strings.HasPrefix(comment, "#") { - start = "" - } - if !compactComments { - writtenBytesCountComment, errc := writeStrings(w, "\n") - bytesCount += int64(writtenBytesCountComment) - if errc != nil { - return bytesCount, errc - } - } - writtenBytesCountComment, errc := writeStrings(w, indent, start, comment, "\n") - bytesCount += int64(writtenBytesCountComment) - if errc != nil { - return bytesCount, errc - } - } - - quotedKey := quoteKeyIfNeeded(k) - writtenBytesCount, err := writeStrings(w, indent, commented, quotedKey, " = ", repr, "\n") - bytesCount += int64(writtenBytesCount) - if err != nil { - return bytesCount, err - } - } - } - - return bytesCount, nil -} - -// quote a key if it does not fit the bare key format (A-Za-z0-9_-) -// quoted keys use the same rules as strings -func quoteKeyIfNeeded(k string) string { - // when encoding a map with the 'quoteMapKeys' option enabled, the tree will contain - // keys that have already been quoted. - // not an ideal situation, but good enough of a stop gap. - if len(k) >= 2 && k[0] == '"' && k[len(k)-1] == '"' { - return k - } - isBare := true - for _, r := range k { - if !isValidBareChar(r) { - isBare = false - break - } - } - if isBare { - return k - } - return quoteKey(k) -} - -func quoteKey(k string) string { - return "\"" + encodeTomlString(k) + "\"" -} - -func writeStrings(w io.Writer, s ...string) (int, error) { - var n int - for i := range s { - b, err := io.WriteString(w, s[i]) - n += b - if err != nil { - return n, err - } - } - return n, nil -} - -// WriteTo encode the Tree as Toml and writes it to the writer w. -// Returns the number of bytes written in case of success, or an error if anything happened. -func (t *Tree) WriteTo(w io.Writer) (int64, error) { - return t.writeTo(w, "", "", 0, false) -} - -// ToTomlString generates a human-readable representation of the current tree. -// Output spans multiple lines, and is suitable for ingest by a TOML parser. -// If the conversion cannot be performed, ToString returns a non-nil error. -func (t *Tree) ToTomlString() (string, error) { - b, err := t.Marshal() - if err != nil { - return "", err - } - return string(b), nil -} - -// String generates a human-readable representation of the current tree. -// Alias of ToString. Present to implement the fmt.Stringer interface. -func (t *Tree) String() string { - result, _ := t.ToTomlString() - return result -} - -// ToMap recursively generates a representation of the tree using Go built-in structures. -// The following types are used: -// -// * bool -// * float64 -// * int64 -// * string -// * uint64 -// * time.Time -// * map[string]interface{} (where interface{} is any of this list) -// * []interface{} (where interface{} is any of this list) -func (t *Tree) ToMap() map[string]interface{} { - result := map[string]interface{}{} - - for k, v := range t.values { - switch node := v.(type) { - case []*Tree: - var array []interface{} - for _, item := range node { - array = append(array, item.ToMap()) - } - result[k] = array - case *Tree: - result[k] = node.ToMap() - case *tomlValue: - result[k] = tomlValueToGo(node.value) - } - } - return result -} - -func tomlValueToGo(v interface{}) interface{} { - if tree, ok := v.(*Tree); ok { - return tree.ToMap() - } - - rv := reflect.ValueOf(v) - - if rv.Kind() != reflect.Slice { - return v - } - values := make([]interface{}, rv.Len()) - for i := 0; i < rv.Len(); i++ { - item := rv.Index(i).Interface() - values[i] = tomlValueToGo(item) - } - return values -} diff --git a/tools/vendor/github.com/pelletier/go-toml/tomltree_writepub.go b/tools/vendor/github.com/pelletier/go-toml/tomltree_writepub.go deleted file mode 100644 index fa326308c..000000000 --- a/tools/vendor/github.com/pelletier/go-toml/tomltree_writepub.go +++ /dev/null @@ -1,6 +0,0 @@ -package toml - -// ValueStringRepresentation transforms an interface{} value into its toml string representation. -func ValueStringRepresentation(v interface{}, commented string, indent string, ord MarshalOrder, arraysOneElementPerLine bool) (string, error) { - return tomlValueStringRepresentation(v, commented, indent, ord, arraysOneElementPerLine) -} diff --git a/tools/vendor/github.com/prometheus/client_golang/NOTICE b/tools/vendor/github.com/prometheus/client_golang/NOTICE index dd878a30e..b9cc55abb 100644 --- a/tools/vendor/github.com/prometheus/client_golang/NOTICE +++ b/tools/vendor/github.com/prometheus/client_golang/NOTICE @@ -16,8 +16,3 @@ Go support for Protocol Buffers - Google's data interchange format http://github.com/golang/protobuf/ Copyright 2010 The Go Authors See source code for license details. - -Support for streaming Protocol Buffer messages for the Go language (golang). -https://github.com/matttproud/golang_protobuf_extensions -Copyright 2013 Matt T. Proud -Licensed under the Apache License, Version 2.0 diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/collector.go index ac1ca3cf5..cf05079fb 100644 --- a/tools/vendor/github.com/prometheus/client_golang/prometheus/collector.go +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/collector.go @@ -69,9 +69,9 @@ type Collector interface { // If a Collector collects the same metrics throughout its lifetime, its // Describe method can simply be implemented as: // -// func (c customCollector) Describe(ch chan<- *Desc) { -// DescribeByCollect(c, ch) -// } +// func (c customCollector) Describe(ch chan<- *Desc) { +// DescribeByCollect(c, ch) +// } // // However, this will not work if the metrics collected change dynamically over // the lifetime of the Collector in a way that their combined set of descriptors diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/counter.go index 00d70f09b..4ce84e7a8 100644 --- a/tools/vendor/github.com/prometheus/client_golang/prometheus/counter.go +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/counter.go @@ -20,6 +20,7 @@ import ( "time" dto "github.com/prometheus/client_model/go" + "google.golang.org/protobuf/types/known/timestamppb" ) // Counter is a Metric that represents a single numerical value that only ever @@ -51,7 +52,7 @@ type Counter interface { // will lead to a valid (label-less) exemplar. But if Labels is nil, the current // exemplar is left in place. AddWithExemplar panics if the value is < 0, if any // of the provided labels are invalid, or if the provided labels contain more -// than 64 runes in total. +// than 128 runes in total. type ExemplarAdder interface { AddWithExemplar(value float64, exemplar Labels) } @@ -59,6 +60,18 @@ type ExemplarAdder interface { // CounterOpts is an alias for Opts. See there for doc comments. type CounterOpts Opts +// CounterVecOpts bundles the options to create a CounterVec metric. +// It is mandatory to set CounterOpts, see there for mandatory fields. VariableLabels +// is optional and can safely be left to its default value. +type CounterVecOpts struct { + CounterOpts + + // VariableLabels are used to partition the metric vector by the given set + // of labels. Each label value will be constrained with the optional Constraint + // function, if provided. + VariableLabels ConstrainableLabels +} + // NewCounter creates a new Counter based on the provided CounterOpts. // // The returned implementation also implements ExemplarAdder. It is safe to @@ -78,8 +91,12 @@ func NewCounter(opts CounterOpts) Counter { nil, opts.ConstLabels, ) - result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: time.Now} + if opts.now == nil { + opts.now = time.Now + } + result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: opts.now} result.init(result) // Init self-collection. + result.createdTs = timestamppb.New(opts.now()) return result } @@ -94,10 +111,12 @@ type counter struct { selfCollector desc *Desc + createdTs *timestamppb.Timestamp labelPairs []*dto.LabelPair exemplar atomic.Value // Containing nil or a *dto.Exemplar. - now func() time.Time // To mock out time.Now() for testing. + // now is for testing purposes, by default it's time.Now. + now func() time.Time } func (c *counter) Desc() *Desc { @@ -140,14 +159,14 @@ func (c *counter) get() float64 { } func (c *counter) Write(out *dto.Metric) error { - val := c.get() - + // Read the Exemplar first and the value second. This is to avoid a race condition + // where users see an exemplar for a not-yet-existing observation. var exemplar *dto.Exemplar if e := c.exemplar.Load(); e != nil { exemplar = e.(*dto.Exemplar) } - - return populateMetric(CounterValue, val, c.labelPairs, exemplar, out) + val := c.get() + return populateMetric(CounterValue, val, c.labelPairs, exemplar, out, c.createdTs) } func (c *counter) updateExemplar(v float64, l Labels) { @@ -173,19 +192,31 @@ type CounterVec struct { // NewCounterVec creates a new CounterVec based on the provided CounterOpts and // partitioned by the given label names. func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { - desc := NewDesc( + return V2.NewCounterVec(CounterVecOpts{ + CounterOpts: opts, + VariableLabels: UnconstrainedLabels(labelNames), + }) +} + +// NewCounterVec creates a new CounterVec based on the provided CounterVecOpts. +func (v2) NewCounterVec(opts CounterVecOpts) *CounterVec { + desc := V2.NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, - labelNames, + opts.VariableLabels, opts.ConstLabels, ) + if opts.now == nil { + opts.now = time.Now + } return &CounterVec{ MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { - if len(lvs) != len(desc.variableLabels) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) + if len(lvs) != len(desc.variableLabels.names) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs)) } - result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: time.Now} + result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: opts.now} result.init(result) // Init self-collection. + result.createdTs = timestamppb.New(opts.now()) return result }), } @@ -245,7 +276,8 @@ func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) { // WithLabelValues works as GetMetricWithLabelValues, but panics where // GetMetricWithLabelValues would have returned an error. Not returning an // error allows shortcuts like -// myVec.WithLabelValues("404", "GET").Add(42) +// +// myVec.WithLabelValues("404", "GET").Add(42) func (v *CounterVec) WithLabelValues(lvs ...string) Counter { c, err := v.GetMetricWithLabelValues(lvs...) if err != nil { @@ -256,7 +288,8 @@ func (v *CounterVec) WithLabelValues(lvs ...string) Counter { // With works as GetMetricWith, but panics where GetMetricWithLabels would have // returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) +// +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) func (v *CounterVec) With(labels Labels) Counter { c, err := v.GetMetricWith(labels) if err != nil { diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/desc.go index 4bb816ab7..ad347113c 100644 --- a/tools/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -14,17 +14,16 @@ package prometheus import ( - "errors" "fmt" "sort" "strings" "github.com/cespare/xxhash/v2" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" + dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/model" + "google.golang.org/protobuf/proto" - dto "github.com/prometheus/client_model/go" + "github.com/prometheus/client_golang/prometheus/internal" ) // Desc is the descriptor used by every Prometheus Metric. It is essentially @@ -51,9 +50,9 @@ type Desc struct { // constLabelPairs contains precalculated DTO label pairs based on // the constant labels. constLabelPairs []*dto.LabelPair - // variableLabels contains names of labels for which the metric - // maintains variable values. - variableLabels []string + // variableLabels contains names of labels and normalization function for + // which the metric maintains variable values. + variableLabels *compiledLabels // id is a hash of the values of the ConstLabels and fqName. This // must be unique among all registered descriptors and can therefore be // used as an identifier of the descriptor. @@ -77,10 +76,24 @@ type Desc struct { // For constLabels, the label values are constant. Therefore, they are fully // specified in the Desc. See the Collector example for a usage pattern. func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { + return V2.NewDesc(fqName, help, UnconstrainedLabels(variableLabels), constLabels) +} + +// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc +// and will be reported on registration time. variableLabels and constLabels can +// be nil if no such labels should be set. fqName must not be empty. +// +// variableLabels only contain the label names and normalization functions. Their +// label values are variable and therefore not part of the Desc. (They are managed +// within the Metric.) +// +// For constLabels, the label values are constant. Therefore, they are fully +// specified in the Desc. See the Collector example for a usage pattern. +func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, constLabels Labels) *Desc { d := &Desc{ fqName: fqName, help: help, - variableLabels: variableLabels, + variableLabels: variableLabels.compile(), } if !model.IsValidMetricName(model.LabelValue(fqName)) { d.err = fmt.Errorf("%q is not a valid metric name", fqName) @@ -90,7 +103,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * // their sorted label names) plus the fqName (at position 0). labelValues := make([]string, 1, len(constLabels)+1) labelValues[0] = fqName - labelNames := make([]string, 0, len(constLabels)+len(variableLabels)) + labelNames := make([]string, 0, len(constLabels)+len(d.variableLabels.names)) labelNameSet := map[string]struct{}{} // First add only the const label names and sort them... for labelName := range constLabels { @@ -115,16 +128,16 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * // Now add the variable label names, but prefix them with something that // cannot be in a regular label name. That prevents matching the label // dimension with a different mix between preset and variable labels. - for _, labelName := range variableLabels { - if !checkLabelName(labelName) { - d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName) + for _, label := range d.variableLabels.names { + if !checkLabelName(label) { + d.err = fmt.Errorf("%q is not a valid label name for metric %q", label, fqName) return d } - labelNames = append(labelNames, "$"+labelName) - labelNameSet[labelName] = struct{}{} + labelNames = append(labelNames, "$"+label) + labelNameSet[label] = struct{}{} } if len(labelNames) != len(labelNameSet) { - d.err = errors.New("duplicate label names") + d.err = fmt.Errorf("duplicate label names in constant and variable labels for metric %q", fqName) return d } @@ -154,7 +167,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) * Value: proto.String(v), }) } - sort.Sort(labelPairSorter(d.constLabelPairs)) + sort.Sort(internal.LabelPairSorter(d.constLabelPairs)) return d } @@ -176,11 +189,22 @@ func (d *Desc) String() string { fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), ) } + vlStrings := []string{} + if d.variableLabels != nil { + vlStrings = make([]string, 0, len(d.variableLabels.names)) + for _, vl := range d.variableLabels.names { + if fn, ok := d.variableLabels.labelConstraints[vl]; ok && fn != nil { + vlStrings = append(vlStrings, fmt.Sprintf("c(%s)", vl)) + } else { + vlStrings = append(vlStrings, vl) + } + } + } return fmt.Sprintf( - "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}", + "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: {%s}}", d.fqName, d.help, strings.Join(lpStrings, ","), - d.variableLabels, + strings.Join(vlStrings, ","), ) } diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/doc.go index 98450125d..962608f02 100644 --- a/tools/vendor/github.com/prometheus/client_golang/prometheus/doc.go +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/doc.go @@ -21,55 +21,66 @@ // All exported functions and methods are safe to be used concurrently unless // specified otherwise. // -// A Basic Example +// # A Basic Example // // As a starting point, a very basic usage example: // -// package main -// -// import ( -// "log" -// "net/http" -// -// "github.com/prometheus/client_golang/prometheus" -// "github.com/prometheus/client_golang/prometheus/promhttp" -// ) -// -// var ( -// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{ -// Name: "cpu_temperature_celsius", -// Help: "Current temperature of the CPU.", -// }) -// hdFailures = prometheus.NewCounterVec( -// prometheus.CounterOpts{ -// Name: "hd_errors_total", -// Help: "Number of hard-disk errors.", -// }, -// []string{"device"}, -// ) -// ) -// -// func init() { -// // Metrics have to be registered to be exposed: -// prometheus.MustRegister(cpuTemp) -// prometheus.MustRegister(hdFailures) -// } -// -// func main() { -// cpuTemp.Set(65.3) -// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() -// -// // The Handler function provides a default handler to expose metrics -// // via an HTTP server. "/metrics" is the usual endpoint for that. -// http.Handle("/metrics", promhttp.Handler()) -// log.Fatal(http.ListenAndServe(":8080", nil)) -// } -// +// package main +// +// import ( +// "log" +// "net/http" +// +// "github.com/prometheus/client_golang/prometheus" +// "github.com/prometheus/client_golang/prometheus/promhttp" +// ) +// +// type metrics struct { +// cpuTemp prometheus.Gauge +// hdFailures *prometheus.CounterVec +// } +// +// func NewMetrics(reg prometheus.Registerer) *metrics { +// m := &metrics{ +// cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{ +// Name: "cpu_temperature_celsius", +// Help: "Current temperature of the CPU.", +// }), +// hdFailures: prometheus.NewCounterVec( +// prometheus.CounterOpts{ +// Name: "hd_errors_total", +// Help: "Number of hard-disk errors.", +// }, +// []string{"device"}, +// ), +// } +// reg.MustRegister(m.cpuTemp) +// reg.MustRegister(m.hdFailures) +// return m +// } +// +// func main() { +// // Create a non-global registry. +// reg := prometheus.NewRegistry() +// +// // Create new metrics and register them using the custom registry. +// m := NewMetrics(reg) +// // Set values for the new created metrics. +// m.cpuTemp.Set(65.3) +// m.hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() +// +// // Expose metrics and custom registry via an HTTP server +// // using the HandleFor function. "/metrics" is the usual endpoint for that. +// http.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg})) +// log.Fatal(http.ListenAndServe(":8080", nil)) +// } // // This is a complete program that exports two metrics, a Gauge and a Counter, // the latter with a label attached to turn it into a (one-dimensional) vector. +// It register the metrics using a custom registry and exposes them via an HTTP server +// on the /metrics endpoint. // -// Metrics +// # Metrics // // The number of exported identifiers in this package might appear a bit // overwhelming. However, in addition to the basic plumbing shown in the example @@ -100,7 +111,7 @@ // To create instances of Metrics and their vector versions, you need a suitable // …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts. // -// Custom Collectors and constant Metrics +// # Custom Collectors and constant Metrics // // While you could create your own implementations of Metric, most likely you // will only ever implement the Collector interface on your own. At a first @@ -141,7 +152,7 @@ // a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting // shortcuts. // -// Advanced Uses of the Registry +// # Advanced Uses of the Registry // // While MustRegister is the by far most common way of registering a Collector, // sometimes you might want to handle the errors the registration might cause. @@ -176,23 +187,23 @@ // NewProcessCollector). With a custom registry, you are in control and decide // yourself about the Collectors to register. // -// HTTP Exposition +// # HTTP Exposition // // The Registry implements the Gatherer interface. The caller of the Gather // method can then expose the gathered metrics in some way. Usually, the metrics // are served via HTTP on the /metrics endpoint. That's happening in the example // above. The tools to expose metrics via HTTP are in the promhttp sub-package. // -// Pushing to the Pushgateway +// # Pushing to the Pushgateway // // Function for pushing to the Pushgateway can be found in the push sub-package. // -// Graphite Bridge +// # Graphite Bridge // // Functions and examples to push metrics from a Gatherer to Graphite can be // found in the graphite sub-package. // -// Other Means of Exposition +// # Other Means of Exposition // // More ways of exposing metrics can easily be added by following the approaches // of the existing implementations. diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go index c41ab37f3..de5a85629 100644 --- a/tools/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go @@ -48,7 +48,7 @@ func (e *expvarCollector) Collect(ch chan<- Metric) { continue } var v interface{} - labels := make([]string, len(desc.variableLabels)) + labels := make([]string, len(desc.variableLabels.names)) if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil { ch <- NewInvalidMetric(desc, err) continue diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/gauge.go index bd0733d6a..dd2eac940 100644 --- a/tools/vendor/github.com/prometheus/client_golang/prometheus/gauge.go +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/gauge.go @@ -55,6 +55,18 @@ type Gauge interface { // GaugeOpts is an alias for Opts. See there for doc comments. type GaugeOpts Opts +// GaugeVecOpts bundles the options to create a GaugeVec metric. +// It is mandatory to set GaugeOpts, see there for mandatory fields. VariableLabels +// is optional and can safely be left to its default value. +type GaugeVecOpts struct { + GaugeOpts + + // VariableLabels are used to partition the metric vector by the given set + // of labels. Each label value will be constrained with the optional Constraint + // function, if provided. + VariableLabels ConstrainableLabels +} + // NewGauge creates a new Gauge based on the provided GaugeOpts. // // The returned implementation is optimized for a fast Set method. If you have a @@ -123,7 +135,7 @@ func (g *gauge) Sub(val float64) { func (g *gauge) Write(out *dto.Metric) error { val := math.Float64frombits(atomic.LoadUint64(&g.valBits)) - return populateMetric(GaugeValue, val, g.labelPairs, nil, out) + return populateMetric(GaugeValue, val, g.labelPairs, nil, out, nil) } // GaugeVec is a Collector that bundles a set of Gauges that all share the same @@ -138,16 +150,24 @@ type GaugeVec struct { // NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and // partitioned by the given label names. func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { - desc := NewDesc( + return V2.NewGaugeVec(GaugeVecOpts{ + GaugeOpts: opts, + VariableLabels: UnconstrainedLabels(labelNames), + }) +} + +// NewGaugeVec creates a new GaugeVec based on the provided GaugeVecOpts. +func (v2) NewGaugeVec(opts GaugeVecOpts) *GaugeVec { + desc := V2.NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, - labelNames, + opts.VariableLabels, opts.ConstLabels, ) return &GaugeVec{ MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { - if len(lvs) != len(desc.variableLabels) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) + if len(lvs) != len(desc.variableLabels.names) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs)) } result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)} result.init(result) // Init self-collection. @@ -210,7 +230,8 @@ func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { // WithLabelValues works as GetMetricWithLabelValues, but panics where // GetMetricWithLabelValues would have returned an error. Not returning an // error allows shortcuts like -// myVec.WithLabelValues("404", "GET").Add(42) +// +// myVec.WithLabelValues("404", "GET").Add(42) func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge { g, err := v.GetMetricWithLabelValues(lvs...) if err != nil { @@ -221,7 +242,8 @@ func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge { // With works as GetMetricWith, but panics where GetMetricWithLabels would have // returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) +// +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) func (v *GaugeVec) With(labels Labels) Gauge { g, err := v.GetMetricWith(labels) if err != nil { diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go new file mode 100644 index 000000000..614fd61be --- /dev/null +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go @@ -0,0 +1,26 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !js || wasm +// +build !js wasm + +package prometheus + +import "os" + +func getPIDFn() func() (int, error) { + pid := os.Getpid() + return func() (int, error) { + return pid, nil + } +} diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go new file mode 100644 index 000000000..eaf8059ee --- /dev/null +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go @@ -0,0 +1,23 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build js && !wasm +// +build js,!wasm + +package prometheus + +func getPIDFn() func() (int, error) { + return func() (int, error) { + return 1, nil + } +} diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go index 08195b410..520cbd7d4 100644 --- a/tools/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -19,12 +19,16 @@ import ( "time" ) +// goRuntimeMemStats provides the metrics initially provided by runtime.ReadMemStats. +// From Go 1.17 those similar (and better) statistics are provided by runtime/metrics, so +// while eval closure works on runtime.MemStats, the struct from Go 1.17+ is +// populated using runtime/metrics. Those are the defaults we can't alter. func goRuntimeMemStats() memStatsMetrics { return memStatsMetrics{ { desc: NewDesc( memstatNamespace("alloc_bytes"), - "Number of bytes allocated and still in use.", + "Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) }, @@ -32,7 +36,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("alloc_bytes_total"), - "Total number of bytes allocated, even if freed.", + "Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) }, @@ -40,23 +44,16 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("sys_bytes"), - "Number of bytes obtained from system.", + "Number of bytes obtained from system. Equals to /memory/classes/total:byte.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("lookups_total"), - "Total number of pointer lookups.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) }, - valType: CounterValue, }, { desc: NewDesc( memstatNamespace("mallocs_total"), - "Total number of mallocs.", + // TODO(bwplotka): We could add go_memstats_heap_objects, probably useful for discovery. Let's gather more feedback, kind of a waste of bytes for everybody for compatibility reasons to keep both, and we can't really rename/remove useful metric. + "Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) }, @@ -64,7 +61,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("frees_total"), - "Total number of frees.", + "Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) }, @@ -72,7 +69,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_alloc_bytes"), - "Number of heap bytes allocated and still in use.", + "Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) }, @@ -80,7 +77,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_sys_bytes"), - "Number of heap bytes obtained from system.", + "Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) }, @@ -88,7 +85,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_idle_bytes"), - "Number of heap bytes waiting to be used.", + "Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) }, @@ -96,7 +93,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_inuse_bytes"), - "Number of heap bytes that are in use.", + "Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) }, @@ -104,7 +101,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_released_bytes"), - "Number of heap bytes released to OS.", + "Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, @@ -112,7 +109,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_objects"), - "Number of allocated objects.", + "Number of currently allocated objects. Equals to /gc/heap/objects:objects.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) }, @@ -120,7 +117,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("stack_inuse_bytes"), - "Number of bytes in use by the stack allocator.", + "Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) }, @@ -128,7 +125,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("stack_sys_bytes"), - "Number of bytes obtained from system for stack allocator.", + "Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) }, @@ -136,7 +133,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mspan_inuse_bytes"), - "Number of bytes in use by mspan structures.", + "Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) }, @@ -144,7 +141,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mspan_sys_bytes"), - "Number of bytes used for mspan structures obtained from system.", + "Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) }, @@ -152,7 +149,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mcache_inuse_bytes"), - "Number of bytes in use by mcache structures.", + "Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) }, @@ -160,7 +157,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mcache_sys_bytes"), - "Number of bytes used for mcache structures obtained from system.", + "Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) }, @@ -168,7 +165,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("buck_hash_sys_bytes"), - "Number of bytes used by the profiling bucket hash table.", + "Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) }, @@ -176,7 +173,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("gc_sys_bytes"), - "Number of bytes used for garbage collection system metadata.", + "Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) }, @@ -184,7 +181,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("other_sys_bytes"), - "Number of bytes used for other system allocations.", + "Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) }, @@ -192,19 +189,11 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("next_gc_bytes"), - "Number of heap bytes when next garbage collection will take place.", + "Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("gc_cpu_fraction"), - "The fraction of this program's available CPU time used by the GC since the program started.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction }, - valType: GaugeValue, }, } } @@ -229,10 +218,10 @@ func newBaseGoCollector() baseGoCollector { nil, nil), gcDesc: NewDesc( "go_gc_duration_seconds", - "A summary of the pause duration of garbage collection cycles.", + "A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles.", nil, nil), gcLastTimeDesc: NewDesc( - memstatNamespace("last_gc_time_seconds"), + "go_memstats_last_gc_time_seconds", "Number of seconds since 1970 of last garbage collection.", nil, nil), goInfoDesc: NewDesc( @@ -254,8 +243,9 @@ func (c *baseGoCollector) Describe(ch chan<- *Desc) { // Collect returns the current state of all metrics of the collector. func (c *baseGoCollector) Collect(ch chan<- Metric) { ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine())) - n, _ := runtime.ThreadCreateProfile(nil) - ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n)) + + n := getRuntimeNumThreads() + ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, n) var stats debug.GCStats stats.PauseQuantiles = make([]time.Duration, 5) @@ -268,7 +258,6 @@ func (c *baseGoCollector) Collect(ch chan<- Metric) { quantiles[0.0] = stats.PauseQuantiles[0].Seconds() ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles) ch <- MustNewConstMetric(c.gcLastTimeDesc, GaugeValue, float64(stats.LastGC.UnixNano())/1e9) - ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1) } diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go index 24526131e..897a6e906 100644 --- a/tools/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go @@ -40,13 +40,28 @@ type goCollector struct { // // Deprecated: Use collectors.NewGoCollector instead. func NewGoCollector() Collector { + msMetrics := goRuntimeMemStats() + msMetrics = append(msMetrics, struct { + desc *Desc + eval func(*runtime.MemStats) float64 + valType ValueType + }{ + // This metric is omitted in Go1.17+, see https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034 + desc: NewDesc( + memstatNamespace("gc_cpu_fraction"), + "The fraction of this program's available CPU time used by the GC since the program started.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction }, + valType: GaugeValue, + }) return &goCollector{ base: newBaseGoCollector(), msLast: &runtime.MemStats{}, msRead: runtime.ReadMemStats, msMaxWait: time.Second, msMaxAge: 5 * time.Minute, - msMetrics: goRuntimeMemStats(), + msMetrics: msMetrics, } } diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go117.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go similarity index 51% rename from tools/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go117.go rename to tools/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go index d43bdcdda..6b8684731 100644 --- a/tools/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go117.go +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go @@ -17,18 +17,80 @@ package prometheus import ( + "fmt" "math" "runtime" "runtime/metrics" "strings" "sync" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" "github.com/prometheus/client_golang/prometheus/internal" + dto "github.com/prometheus/client_model/go" + "google.golang.org/protobuf/proto" +) + +const ( + // constants for strings referenced more than once. + goGCHeapTinyAllocsObjects = "/gc/heap/tiny/allocs:objects" + goGCHeapAllocsObjects = "/gc/heap/allocs:objects" + goGCHeapFreesObjects = "/gc/heap/frees:objects" + goGCHeapFreesBytes = "/gc/heap/frees:bytes" + goGCHeapAllocsBytes = "/gc/heap/allocs:bytes" + goGCHeapObjects = "/gc/heap/objects:objects" + goGCHeapGoalBytes = "/gc/heap/goal:bytes" + goMemoryClassesTotalBytes = "/memory/classes/total:bytes" + goMemoryClassesHeapObjectsBytes = "/memory/classes/heap/objects:bytes" + goMemoryClassesHeapUnusedBytes = "/memory/classes/heap/unused:bytes" + goMemoryClassesHeapReleasedBytes = "/memory/classes/heap/released:bytes" + goMemoryClassesHeapFreeBytes = "/memory/classes/heap/free:bytes" + goMemoryClassesHeapStacksBytes = "/memory/classes/heap/stacks:bytes" + goMemoryClassesOSStacksBytes = "/memory/classes/os-stacks:bytes" + goMemoryClassesMetadataMSpanInuseBytes = "/memory/classes/metadata/mspan/inuse:bytes" + goMemoryClassesMetadataMSPanFreeBytes = "/memory/classes/metadata/mspan/free:bytes" + goMemoryClassesMetadataMCacheInuseBytes = "/memory/classes/metadata/mcache/inuse:bytes" + goMemoryClassesMetadataMCacheFreeBytes = "/memory/classes/metadata/mcache/free:bytes" + goMemoryClassesProfilingBucketsBytes = "/memory/classes/profiling/buckets:bytes" + goMemoryClassesMetadataOtherBytes = "/memory/classes/metadata/other:bytes" + goMemoryClassesOtherBytes = "/memory/classes/other:bytes" ) +// rmNamesForMemStatsMetrics represents runtime/metrics names required to populate goRuntimeMemStats from like logic. +var rmNamesForMemStatsMetrics = []string{ + goGCHeapTinyAllocsObjects, + goGCHeapAllocsObjects, + goGCHeapFreesObjects, + goGCHeapAllocsBytes, + goGCHeapObjects, + goGCHeapGoalBytes, + goMemoryClassesTotalBytes, + goMemoryClassesHeapObjectsBytes, + goMemoryClassesHeapUnusedBytes, + goMemoryClassesHeapReleasedBytes, + goMemoryClassesHeapFreeBytes, + goMemoryClassesHeapStacksBytes, + goMemoryClassesOSStacksBytes, + goMemoryClassesMetadataMSpanInuseBytes, + goMemoryClassesMetadataMSPanFreeBytes, + goMemoryClassesMetadataMCacheInuseBytes, + goMemoryClassesMetadataMCacheFreeBytes, + goMemoryClassesProfilingBucketsBytes, + goMemoryClassesMetadataOtherBytes, + goMemoryClassesOtherBytes, +} + +func bestEffortLookupRM(lookup []string) []metrics.Description { + ret := make([]metrics.Description, 0, len(lookup)) + for _, rm := range metrics.All() { + for _, m := range lookup { + if m == rm.Name { + ret = append(ret, rm) + } + } + } + return ret +} + type goCollector struct { base baseGoCollector @@ -36,70 +98,126 @@ type goCollector struct { // snapshot is always produced by Collect. mu sync.Mutex - // rm... fields all pertain to the runtime/metrics package. - rmSampleBuf []metrics.Sample - rmSampleMap map[string]*metrics.Sample - rmMetrics []collectorMetric + // Contains all samples that has to retrieved from runtime/metrics (not all of them will be exposed). + sampleBuf []metrics.Sample + // sampleMap allows lookup for MemStats metrics and runtime/metrics histograms for exact sums. + sampleMap map[string]*metrics.Sample + + // rmExposedMetrics represents all runtime/metrics package metrics + // that were configured to be exposed. + rmExposedMetrics []collectorMetric + rmExactSumMapForHist map[string]string // With Go 1.17, the runtime/metrics package was introduced. // From that point on, metric names produced by the runtime/metrics // package could be generated from runtime/metrics names. However, // these differ from the old names for the same values. // - // This field exist to export the same values under the old names + // This field exists to export the same values under the old names // as well. - msMetrics memStatsMetrics + msMetrics memStatsMetrics + msMetricsEnabled bool +} + +type rmMetricDesc struct { + metrics.Description +} + +func matchRuntimeMetricsRules(rules []internal.GoCollectorRule) []rmMetricDesc { + var descs []rmMetricDesc + for _, d := range metrics.All() { + var ( + deny = true + desc rmMetricDesc + ) + + for _, r := range rules { + if !r.Matcher.MatchString(d.Name) { + continue + } + deny = r.Deny + } + if deny { + continue + } + + desc.Description = d + descs = append(descs, desc) + } + return descs +} + +func defaultGoCollectorOptions() internal.GoCollectorOptions { + return internal.GoCollectorOptions{ + RuntimeMetricSumForHist: map[string]string{ + "/gc/heap/allocs-by-size:bytes": goGCHeapAllocsBytes, + "/gc/heap/frees-by-size:bytes": goGCHeapFreesBytes, + }, + RuntimeMetricRules: []internal.GoCollectorRule{ + // Recommended metrics we want by default from runtime/metrics. + {Matcher: internal.GoCollectorDefaultRuntimeMetrics}, + }, + } } // NewGoCollector is the obsolete version of collectors.NewGoCollector. // See there for documentation. // // Deprecated: Use collectors.NewGoCollector instead. -func NewGoCollector() Collector { - descriptions := metrics.All() +func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { + opt := defaultGoCollectorOptions() + for _, o := range opts { + o(&opt) + } + + exposedDescriptions := matchRuntimeMetricsRules(opt.RuntimeMetricRules) // Collect all histogram samples so that we can get their buckets. // The API guarantees that the buckets are always fixed for the lifetime // of the process. var histograms []metrics.Sample - for _, d := range descriptions { + for _, d := range exposedDescriptions { if d.Kind == metrics.KindFloat64Histogram { histograms = append(histograms, metrics.Sample{Name: d.Name}) } } - metrics.Read(histograms) + + if len(histograms) > 0 { + metrics.Read(histograms) + } + bucketsMap := make(map[string][]float64) for i := range histograms { bucketsMap[histograms[i].Name] = histograms[i].Value.Float64Histogram().Buckets } - // Generate a Desc and ValueType for each runtime/metrics metric. - metricSet := make([]collectorMetric, 0, len(descriptions)) - sampleBuf := make([]metrics.Sample, 0, len(descriptions)) - sampleMap := make(map[string]*metrics.Sample, len(descriptions)) - for i := range descriptions { - d := &descriptions[i] - namespace, subsystem, name, ok := internal.RuntimeMetricsToProm(d) + // Generate a collector for each exposed runtime/metrics metric. + metricSet := make([]collectorMetric, 0, len(exposedDescriptions)) + // SampleBuf is used for reading from runtime/metrics. + // We are assuming the largest case to have stable pointers for sampleMap purposes. + sampleBuf := make([]metrics.Sample, 0, len(exposedDescriptions)+len(opt.RuntimeMetricSumForHist)+len(rmNamesForMemStatsMetrics)) + sampleMap := make(map[string]*metrics.Sample, len(exposedDescriptions)) + for _, d := range exposedDescriptions { + namespace, subsystem, name, ok := internal.RuntimeMetricsToProm(&d.Description) if !ok { // Just ignore this metric; we can't do anything with it here. // If a user decides to use the latest version of Go, we don't want - // to fail here. This condition is tested elsewhere. + // to fail here. This condition is tested in TestExpectedRuntimeMetrics. continue } + help := attachOriginalName(d.Description.Description, d.Name) - // Set up sample buffer for reading, and a map - // for quick lookup of sample values. sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name}) sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1] var m collectorMetric if d.Kind == metrics.KindFloat64Histogram { - _, hasSum := rmExactSumMap[d.Name] + _, hasSum := opt.RuntimeMetricSumForHist[d.Name] unit := d.Name[strings.IndexRune(d.Name, ':')+1:] m = newBatchHistogram( NewDesc( BuildFQName(namespace, subsystem, name), - d.Description, + help, nil, nil, ), @@ -111,34 +229,75 @@ func NewGoCollector() Collector { Namespace: namespace, Subsystem: subsystem, Name: name, - Help: d.Description, - }) + Help: help, + }, + ) } else { m = NewGauge(GaugeOpts{ Namespace: namespace, Subsystem: subsystem, Name: name, - Help: d.Description, + Help: help, }) } metricSet = append(metricSet, m) } + + // Add exact sum metrics to sampleBuf if not added before. + for _, h := range histograms { + sumMetric, ok := opt.RuntimeMetricSumForHist[h.Name] + if !ok { + continue + } + + if _, ok := sampleMap[sumMetric]; ok { + continue + } + sampleBuf = append(sampleBuf, metrics.Sample{Name: sumMetric}) + sampleMap[sumMetric] = &sampleBuf[len(sampleBuf)-1] + } + + var ( + msMetrics memStatsMetrics + msDescriptions []metrics.Description + ) + + if !opt.DisableMemStatsLikeMetrics { + msMetrics = goRuntimeMemStats() + msDescriptions = bestEffortLookupRM(rmNamesForMemStatsMetrics) + + // Check if metric was not exposed before and if not, add to sampleBuf. + for _, mdDesc := range msDescriptions { + if _, ok := sampleMap[mdDesc.Name]; ok { + continue + } + sampleBuf = append(sampleBuf, metrics.Sample{Name: mdDesc.Name}) + sampleMap[mdDesc.Name] = &sampleBuf[len(sampleBuf)-1] + } + } + return &goCollector{ - base: newBaseGoCollector(), - rmSampleBuf: sampleBuf, - rmSampleMap: sampleMap, - rmMetrics: metricSet, - msMetrics: goRuntimeMemStats(), + base: newBaseGoCollector(), + sampleBuf: sampleBuf, + sampleMap: sampleMap, + rmExposedMetrics: metricSet, + rmExactSumMapForHist: opt.RuntimeMetricSumForHist, + msMetrics: msMetrics, + msMetricsEnabled: !opt.DisableMemStatsLikeMetrics, } } +func attachOriginalName(desc, origName string) string { + return fmt.Sprintf("%s Sourced from %s.", desc, origName) +} + // Describe returns all descriptions of the collector. func (c *goCollector) Describe(ch chan<- *Desc) { c.base.Describe(ch) for _, i := range c.msMetrics { ch <- i.desc } - for _, m := range c.rmMetrics { + for _, m := range c.rmExposedMetrics { ch <- m.Desc() } } @@ -148,8 +307,12 @@ func (c *goCollector) Collect(ch chan<- Metric) { // Collect base non-memory metrics. c.base.Collect(ch) + if len(c.sampleBuf) == 0 { + return + } + // Collect must be thread-safe, so prevent concurrent use of - // rmSampleBuf. Just read into rmSampleBuf but write all the data + // sampleBuf elements. Just read into sampleBuf but write all the data // we get into our Metrics or MemStats. // // This lock also ensures that the Metrics we send out are all from @@ -164,14 +327,17 @@ func (c *goCollector) Collect(ch chan<- Metric) { defer c.mu.Unlock() // Populate runtime/metrics sample buffer. - metrics.Read(c.rmSampleBuf) + metrics.Read(c.sampleBuf) + + // Collect all our runtime/metrics user chose to expose from sampleBuf (if any). + for i, metric := range c.rmExposedMetrics { + // We created samples for exposed metrics first in order, so indexes match. + sample := c.sampleBuf[i] - // Update all our metrics from rmSampleBuf. - for i, sample := range c.rmSampleBuf { // N.B. switch on concrete type because it's significantly more efficient // than checking for the Counter and Gauge interface implementations. In // this case, we control all the types here. - switch m := c.rmMetrics[i].(type) { + switch m := metric.(type) { case *counter: // Guard against decreases. This should never happen, but a failure // to do so will result in a panic, which is a harsh consequence for @@ -191,12 +357,15 @@ func (c *goCollector) Collect(ch chan<- Metric) { panic("unexpected metric type") } } - // ms is a dummy MemStats that we populate ourselves so that we can - // populate the old metrics from it. - var ms runtime.MemStats - memStatsFromRM(&ms, c.rmSampleMap) - for _, i := range c.msMetrics { - ch <- MustNewConstMetric(i.desc, i.valType, i.eval(&ms)) + + if c.msMetricsEnabled { + // ms is a dummy MemStats that we populate ourselves so that we can + // populate the old metrics from it if goMemStatsCollection is enabled. + var ms runtime.MemStats + memStatsFromRM(&ms, c.sampleMap) + for _, i := range c.msMetrics { + ch <- MustNewConstMetric(i.desc, i.valType, i.eval(&ms)) + } } } @@ -214,21 +383,16 @@ func unwrapScalarRMValue(v metrics.Value) float64 { // // This should never happen because we always populate our metric // set from the runtime/metrics package. - panic("unexpected unsupported metric") + panic("unexpected bad kind metric") default: // Unsupported metric kind. // // This should never happen because we check for this during initialization // and flag and filter metrics whose kinds we don't understand. - panic("unexpected unsupported metric kind") + panic(fmt.Sprintf("unexpected unsupported metric: %v", v.Kind())) } } -var rmExactSumMap = map[string]string{ - "/gc/heap/allocs-by-size:bytes": "/gc/heap/allocs:bytes", - "/gc/heap/frees-by-size:bytes": "/gc/heap/frees:bytes", -} - // exactSumFor takes a runtime/metrics metric name (that is assumed to // be of kind KindFloat64Histogram) and returns its exact sum and whether // its exact sum exists. @@ -236,11 +400,11 @@ var rmExactSumMap = map[string]string{ // The runtime/metrics API for histograms doesn't currently expose exact // sums, but some of the other metrics are in fact exact sums of histograms. func (c *goCollector) exactSumFor(rmName string) float64 { - sumName, ok := rmExactSumMap[rmName] + sumName, ok := c.rmExactSumMapForHist[rmName] if !ok { return 0 } - s, ok := c.rmSampleMap[sumName] + s, ok := c.sampleMap[sumName] if !ok { return 0 } @@ -261,35 +425,30 @@ func memStatsFromRM(ms *runtime.MemStats, rm map[string]*metrics.Sample) { // while having Mallocs - Frees still represent a live object count. // Unfortunately, MemStats doesn't actually export a large allocation count, // so it's impossible to pull this number out directly. - tinyAllocs := lookupOrZero("/gc/heap/tiny/allocs:objects") - ms.Mallocs = lookupOrZero("/gc/heap/allocs:objects") + tinyAllocs - ms.Frees = lookupOrZero("/gc/heap/frees:objects") + tinyAllocs + tinyAllocs := lookupOrZero(goGCHeapTinyAllocsObjects) + ms.Mallocs = lookupOrZero(goGCHeapAllocsObjects) + tinyAllocs + ms.Frees = lookupOrZero(goGCHeapFreesObjects) + tinyAllocs - ms.TotalAlloc = lookupOrZero("/gc/heap/allocs:bytes") - ms.Sys = lookupOrZero("/memory/classes/total:bytes") + ms.TotalAlloc = lookupOrZero(goGCHeapAllocsBytes) + ms.Sys = lookupOrZero(goMemoryClassesTotalBytes) ms.Lookups = 0 // Already always zero. - ms.HeapAlloc = lookupOrZero("/memory/classes/heap/objects:bytes") + ms.HeapAlloc = lookupOrZero(goMemoryClassesHeapObjectsBytes) ms.Alloc = ms.HeapAlloc - ms.HeapInuse = ms.HeapAlloc + lookupOrZero("/memory/classes/heap/unused:bytes") - ms.HeapReleased = lookupOrZero("/memory/classes/heap/released:bytes") - ms.HeapIdle = ms.HeapReleased + lookupOrZero("/memory/classes/heap/free:bytes") + ms.HeapInuse = ms.HeapAlloc + lookupOrZero(goMemoryClassesHeapUnusedBytes) + ms.HeapReleased = lookupOrZero(goMemoryClassesHeapReleasedBytes) + ms.HeapIdle = ms.HeapReleased + lookupOrZero(goMemoryClassesHeapFreeBytes) ms.HeapSys = ms.HeapInuse + ms.HeapIdle - ms.HeapObjects = lookupOrZero("/gc/heap/objects:objects") - ms.StackInuse = lookupOrZero("/memory/classes/heap/stacks:bytes") - ms.StackSys = ms.StackInuse + lookupOrZero("/memory/classes/os-stacks:bytes") - ms.MSpanInuse = lookupOrZero("/memory/classes/metadata/mspan/inuse:bytes") - ms.MSpanSys = ms.MSpanInuse + lookupOrZero("/memory/classes/metadata/mspan/free:bytes") - ms.MCacheInuse = lookupOrZero("/memory/classes/metadata/mcache/inuse:bytes") - ms.MCacheSys = ms.MCacheInuse + lookupOrZero("/memory/classes/metadata/mcache/free:bytes") - ms.BuckHashSys = lookupOrZero("/memory/classes/profiling/buckets:bytes") - ms.GCSys = lookupOrZero("/memory/classes/metadata/other:bytes") - ms.OtherSys = lookupOrZero("/memory/classes/other:bytes") - ms.NextGC = lookupOrZero("/gc/heap/goal:bytes") - - // N.B. LastGC is omitted because runtime.GCStats already has this. - // See https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034 - // for more details. - ms.LastGC = 0 + ms.HeapObjects = lookupOrZero(goGCHeapObjects) + ms.StackInuse = lookupOrZero(goMemoryClassesHeapStacksBytes) + ms.StackSys = ms.StackInuse + lookupOrZero(goMemoryClassesOSStacksBytes) + ms.MSpanInuse = lookupOrZero(goMemoryClassesMetadataMSpanInuseBytes) + ms.MSpanSys = ms.MSpanInuse + lookupOrZero(goMemoryClassesMetadataMSPanFreeBytes) + ms.MCacheInuse = lookupOrZero(goMemoryClassesMetadataMCacheInuseBytes) + ms.MCacheSys = ms.MCacheInuse + lookupOrZero(goMemoryClassesMetadataMCacheFreeBytes) + ms.BuckHashSys = lookupOrZero(goMemoryClassesProfilingBucketsBytes) + ms.GCSys = lookupOrZero(goMemoryClassesMetadataOtherBytes) + ms.OtherSys = lookupOrZero(goMemoryClassesOtherBytes) + ms.NextGC = lookupOrZero(goGCHeapGoalBytes) // N.B. GCCPUFraction is intentionally omitted. This metric is not useful, // and often misleading due to the fact that it's an average over the lifetime @@ -324,6 +483,11 @@ type batchHistogram struct { // buckets must always be from the runtime/metrics package, following // the same conventions. func newBatchHistogram(desc *Desc, buckets []float64, hasSum bool) *batchHistogram { + // We need to remove -Inf values. runtime/metrics keeps them around. + // But -Inf bucket should not be allowed for prometheus histograms. + if buckets[0] == math.Inf(-1) { + buckets = buckets[1:] + } h := &batchHistogram{ desc: desc, buckets: buckets, @@ -382,8 +546,10 @@ func (h *batchHistogram) Write(out *dto.Metric) error { for i, count := range h.counts { totalCount += count if !h.hasSum { - // N.B. This computed sum is an underestimate. - sum += h.buckets[i] * float64(count) + if count != 0 { + // N.B. This computed sum is an underestimate. + sum += h.buckets[i] * float64(count) + } } // Skip the +Inf bucket, but only for the bucket list. diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index 893802fd6..c453b754a 100644 --- a/tools/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -14,6 +14,7 @@ package prometheus import ( + "errors" "fmt" "math" "runtime" @@ -22,25 +23,227 @@ import ( "sync/atomic" "time" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - dto "github.com/prometheus/client_model/go" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/timestamppb" +) + +const ( + nativeHistogramSchemaMaximum = 8 + nativeHistogramSchemaMinimum = -4 ) +// nativeHistogramBounds for the frac of observed values. Only relevant for +// schema > 0. The position in the slice is the schema. (0 is never used, just +// here for convenience of using the schema directly as the index.) +// +// TODO(beorn7): Currently, we do a binary search into these slices. There are +// ways to turn it into a small number of simple array lookups. It probably only +// matters for schema 5 and beyond, but should be investigated. See this comment +// as a starting point: +// https://github.com/open-telemetry/opentelemetry-specification/issues/1776#issuecomment-870164310 +var nativeHistogramBounds = [][]float64{ + // Schema "0": + {0.5}, + // Schema 1: + {0.5, 0.7071067811865475}, + // Schema 2: + {0.5, 0.5946035575013605, 0.7071067811865475, 0.8408964152537144}, + // Schema 3: + { + 0.5, 0.5452538663326288, 0.5946035575013605, 0.6484197773255048, + 0.7071067811865475, 0.7711054127039704, 0.8408964152537144, 0.9170040432046711, + }, + // Schema 4: + { + 0.5, 0.5221368912137069, 0.5452538663326288, 0.5693943173783458, + 0.5946035575013605, 0.620928906036742, 0.6484197773255048, 0.6771277734684463, + 0.7071067811865475, 0.7384130729697496, 0.7711054127039704, 0.805245165974627, + 0.8408964152537144, 0.8781260801866495, 0.9170040432046711, 0.9576032806985735, + }, + // Schema 5: + { + 0.5, 0.5109485743270583, 0.5221368912137069, 0.5335702003384117, + 0.5452538663326288, 0.5571933712979462, 0.5693943173783458, 0.5818624293887887, + 0.5946035575013605, 0.6076236799902344, 0.620928906036742, 0.6345254785958666, + 0.6484197773255048, 0.6626183215798706, 0.6771277734684463, 0.6919549409819159, + 0.7071067811865475, 0.7225904034885232, 0.7384130729697496, 0.7545822137967112, + 0.7711054127039704, 0.7879904225539431, 0.805245165974627, 0.8228777390769823, + 0.8408964152537144, 0.8593096490612387, 0.8781260801866495, 0.8973545375015533, + 0.9170040432046711, 0.9370838170551498, 0.9576032806985735, 0.9785720620876999, + }, + // Schema 6: + { + 0.5, 0.5054446430258502, 0.5109485743270583, 0.5165124395106142, + 0.5221368912137069, 0.5278225891802786, 0.5335702003384117, 0.5393803988785598, + 0.5452538663326288, 0.5511912916539204, 0.5571933712979462, 0.5632608093041209, + 0.5693943173783458, 0.5755946149764913, 0.5818624293887887, 0.5881984958251406, + 0.5946035575013605, 0.6010783657263515, 0.6076236799902344, 0.6142402680534349, + 0.620928906036742, 0.6276903785123455, 0.6345254785958666, 0.6414350080393891, + 0.6484197773255048, 0.6554806057623822, 0.6626183215798706, 0.6698337620266515, + 0.6771277734684463, 0.6845012114872953, 0.6919549409819159, 0.6994898362691555, + 0.7071067811865475, 0.7148066691959849, 0.7225904034885232, 0.7304588970903234, + 0.7384130729697496, 0.7464538641456323, 0.7545822137967112, 0.762799075372269, + 0.7711054127039704, 0.7795022001189185, 0.7879904225539431, 0.7965710756711334, + 0.805245165974627, 0.8140137109286738, 0.8228777390769823, 0.8318382901633681, + 0.8408964152537144, 0.8500531768592616, 0.8593096490612387, 0.8686669176368529, + 0.8781260801866495, 0.8876882462632604, 0.8973545375015533, 0.9071260877501991, + 0.9170040432046711, 0.9269895625416926, 0.9370838170551498, 0.9472879907934827, + 0.9576032806985735, 0.9680308967461471, 0.9785720620876999, 0.9892280131939752, + }, + // Schema 7: + { + 0.5, 0.5027149505564014, 0.5054446430258502, 0.5081891574554764, + 0.5109485743270583, 0.5137229745593818, 0.5165124395106142, 0.5193170509806894, + 0.5221368912137069, 0.5249720429003435, 0.5278225891802786, 0.5306886136446309, + 0.5335702003384117, 0.5364674337629877, 0.5393803988785598, 0.5423091811066545, + 0.5452538663326288, 0.5482145409081883, 0.5511912916539204, 0.5541842058618393, + 0.5571933712979462, 0.5602188762048033, 0.5632608093041209, 0.5663192597993595, + 0.5693943173783458, 0.572486072215902, 0.5755946149764913, 0.5787200368168754, + 0.5818624293887887, 0.585021884841625, 0.5881984958251406, 0.5913923554921704, + 0.5946035575013605, 0.5978321960199137, 0.6010783657263515, 0.6043421618132907, + 0.6076236799902344, 0.6109230164863786, 0.6142402680534349, 0.6175755319684665, + 0.620928906036742, 0.6243004885946023, 0.6276903785123455, 0.6310986751971253, + 0.6345254785958666, 0.637970889198196, 0.6414350080393891, 0.6449179367033329, + 0.6484197773255048, 0.6519406325959679, 0.6554806057623822, 0.659039800633032, + 0.6626183215798706, 0.6662162735415805, 0.6698337620266515, 0.6734708931164728, + 0.6771277734684463, 0.6808045103191123, 0.6845012114872953, 0.688217985377265, + 0.6919549409819159, 0.6957121878859629, 0.6994898362691555, 0.7032879969095076, + 0.7071067811865475, 0.7109463010845827, 0.7148066691959849, 0.718687998724491, + 0.7225904034885232, 0.7265139979245261, 0.7304588970903234, 0.7344252166684908, + 0.7384130729697496, 0.7424225829363761, 0.7464538641456323, 0.7505070348132126, + 0.7545822137967112, 0.7586795205991071, 0.762799075372269, 0.7669409989204777, + 0.7711054127039704, 0.7752924388424999, 0.7795022001189185, 0.7837348199827764, + 0.7879904225539431, 0.7922691326262467, 0.7965710756711334, 0.8008963778413465, + 0.805245165974627, 0.8096175675974316, 0.8140137109286738, 0.8184337248834821, + 0.8228777390769823, 0.8273458838280969, 0.8318382901633681, 0.8363550898207981, + 0.8408964152537144, 0.8454623996346523, 0.8500531768592616, 0.8546688815502312, + 0.8593096490612387, 0.8639756154809185, 0.8686669176368529, 0.8733836930995842, + 0.8781260801866495, 0.8828942179666361, 0.8876882462632604, 0.8925083056594671, + 0.8973545375015533, 0.9022270839033115, 0.9071260877501991, 0.9120516927035263, + 0.9170040432046711, 0.9219832844793128, 0.9269895625416926, 0.9320230241988943, + 0.9370838170551498, 0.9421720895161669, 0.9472879907934827, 0.9524316709088368, + 0.9576032806985735, 0.9628029718180622, 0.9680308967461471, 0.9732872087896164, + 0.9785720620876999, 0.9838856116165875, 0.9892280131939752, 0.9945994234836328, + }, + // Schema 8: + { + 0.5, 0.5013556375251013, 0.5027149505564014, 0.5040779490592088, + 0.5054446430258502, 0.5068150424757447, 0.5081891574554764, 0.509566998038869, + 0.5109485743270583, 0.5123338964485679, 0.5137229745593818, 0.5151158188430205, + 0.5165124395106142, 0.5179128468009786, 0.5193170509806894, 0.520725062344158, + 0.5221368912137069, 0.5235525479396449, 0.5249720429003435, 0.526395386502313, + 0.5278225891802786, 0.5292536613972564, 0.5306886136446309, 0.5321274564422321, + 0.5335702003384117, 0.5350168559101208, 0.5364674337629877, 0.5379219445313954, + 0.5393803988785598, 0.5408428074966075, 0.5423091811066545, 0.5437795304588847, + 0.5452538663326288, 0.5467321995364429, 0.5482145409081883, 0.549700901315111, + 0.5511912916539204, 0.5526857228508706, 0.5541842058618393, 0.5556867516724088, + 0.5571933712979462, 0.5587040757836845, 0.5602188762048033, 0.5617377836665098, + 0.5632608093041209, 0.564787964283144, 0.5663192597993595, 0.5678547070789026, + 0.5693943173783458, 0.5709381019847808, 0.572486072215902, 0.5740382394200894, + 0.5755946149764913, 0.5771552102951081, 0.5787200368168754, 0.5802891060137493, + 0.5818624293887887, 0.5834400184762408, 0.585021884841625, 0.5866080400818185, + 0.5881984958251406, 0.5897932637314379, 0.5913923554921704, 0.5929957828304968, + 0.5946035575013605, 0.5962156912915756, 0.5978321960199137, 0.5994530835371903, + 0.6010783657263515, 0.6027080545025619, 0.6043421618132907, 0.6059806996384005, + 0.6076236799902344, 0.6092711149137041, 0.6109230164863786, 0.6125793968185725, + 0.6142402680534349, 0.6159056423670379, 0.6175755319684665, 0.6192499490999082, + 0.620928906036742, 0.622612415087629, 0.6243004885946023, 0.6259931389331581, + 0.6276903785123455, 0.6293922197748583, 0.6310986751971253, 0.6328097572894031, + 0.6345254785958666, 0.6362458516947014, 0.637970889198196, 0.6397006037528346, + 0.6414350080393891, 0.6431741147730128, 0.6449179367033329, 0.6466664866145447, + 0.6484197773255048, 0.6501778216898253, 0.6519406325959679, 0.6537082229673385, + 0.6554806057623822, 0.6572577939746774, 0.659039800633032, 0.6608266388015788, + 0.6626183215798706, 0.6644148621029772, 0.6662162735415805, 0.6680225691020727, + 0.6698337620266515, 0.6716498655934177, 0.6734708931164728, 0.6752968579460171, + 0.6771277734684463, 0.6789636531064505, 0.6808045103191123, 0.6826503586020058, + 0.6845012114872953, 0.6863570825438342, 0.688217985377265, 0.690083933630119, + 0.6919549409819159, 0.6938310211492645, 0.6957121878859629, 0.6975984549830999, + 0.6994898362691555, 0.7013863456101023, 0.7032879969095076, 0.7051948041086352, + 0.7071067811865475, 0.7090239421602076, 0.7109463010845827, 0.7128738720527471, + 0.7148066691959849, 0.7167447066838943, 0.718687998724491, 0.7206365595643126, + 0.7225904034885232, 0.7245495448210174, 0.7265139979245261, 0.7284837772007218, + 0.7304588970903234, 0.7324393720732029, 0.7344252166684908, 0.7364164454346837, + 0.7384130729697496, 0.7404151139112358, 0.7424225829363761, 0.7444354947621984, + 0.7464538641456323, 0.7484777058836176, 0.7505070348132126, 0.7525418658117031, + 0.7545822137967112, 0.7566280937263048, 0.7586795205991071, 0.7607365094544071, + 0.762799075372269, 0.7648672334736434, 0.7669409989204777, 0.7690203869158282, + 0.7711054127039704, 0.7731960915705107, 0.7752924388424999, 0.7773944698885442, + 0.7795022001189185, 0.7816156449856788, 0.7837348199827764, 0.7858597406461707, + 0.7879904225539431, 0.7901268813264122, 0.7922691326262467, 0.7944171921585818, + 0.7965710756711334, 0.7987307989543135, 0.8008963778413465, 0.8030678282083853, + 0.805245165974627, 0.8074284071024302, 0.8096175675974316, 0.8118126635086642, + 0.8140137109286738, 0.8162207259936375, 0.8184337248834821, 0.820652723822003, + 0.8228777390769823, 0.8251087869603088, 0.8273458838280969, 0.8295890460808079, + 0.8318382901633681, 0.8340936325652911, 0.8363550898207981, 0.8386226785089391, + 0.8408964152537144, 0.8431763167241966, 0.8454623996346523, 0.8477546807446661, + 0.8500531768592616, 0.8523579048290255, 0.8546688815502312, 0.8569861239649629, + 0.8593096490612387, 0.8616394738731368, 0.8639756154809185, 0.8663180910111553, + 0.8686669176368529, 0.871022112577578, 0.8733836930995842, 0.8757516765159389, + 0.8781260801866495, 0.8805069215187917, 0.8828942179666361, 0.8852879870317771, + 0.8876882462632604, 0.890095013257712, 0.8925083056594671, 0.8949281411607002, + 0.8973545375015533, 0.8997875124702672, 0.9022270839033115, 0.9046732696855155, + 0.9071260877501991, 0.909585556079304, 0.9120516927035263, 0.9145245157024483, + 0.9170040432046711, 0.9194902933879467, 0.9219832844793128, 0.9244830347552253, + 0.9269895625416926, 0.92950288621441, 0.9320230241988943, 0.9345499949706191, + 0.9370838170551498, 0.93962450902828, 0.9421720895161669, 0.9447265771954693, + 0.9472879907934827, 0.9498563490882775, 0.9524316709088368, 0.9550139751351947, + 0.9576032806985735, 0.9601996065815236, 0.9628029718180622, 0.9654133954938133, + 0.9680308967461471, 0.9706554947643201, 0.9732872087896164, 0.9759260581154889, + 0.9785720620876999, 0.9812252401044634, 0.9838856116165875, 0.9865531961276168, + 0.9892280131939752, 0.9919100824251095, 0.9945994234836328, 0.9972960560854698, + }, +} + +// The nativeHistogramBounds above can be generated with the code below. +// +// TODO(beorn7): It's tempting to actually use `go generate` to generate the +// code above. However, this could lead to slightly different numbers on +// different architectures. We still need to come to terms if we are fine with +// that, or if we might prefer to specify precise numbers in the standard. +// +// var nativeHistogramBounds [][]float64 = make([][]float64, 9) +// +// func init() { +// // Populate nativeHistogramBounds. +// numBuckets := 1 +// for i := range nativeHistogramBounds { +// bounds := []float64{0.5} +// factor := math.Exp2(math.Exp2(float64(-i))) +// for j := 0; j < numBuckets-1; j++ { +// var bound float64 +// if (j+1)%2 == 0 { +// // Use previously calculated value for increased precision. +// bound = nativeHistogramBounds[i-1][j/2+1] +// } else { +// bound = bounds[j] * factor +// } +// bounds = append(bounds, bound) +// } +// numBuckets *= 2 +// nativeHistogramBounds[i] = bounds +// } +// } + // A Histogram counts individual observations from an event or sample stream in -// configurable buckets. Similar to a summary, it also provides a sum of -// observations and an observation count. +// configurable static buckets (or in dynamic sparse buckets as part of the +// experimental Native Histograms, see below for more details). Similar to a +// Summary, it also provides a sum of observations and an observation count. // // On the Prometheus server, quantiles can be calculated from a Histogram using -// the histogram_quantile function in the query language. +// the histogram_quantile PromQL function. // -// Note that Histograms, in contrast to Summaries, can be aggregated with the -// Prometheus query language (see the documentation for detailed -// procedures). However, Histograms require the user to pre-define suitable -// buckets, and they are in general less accurate. The Observe method of a -// Histogram has a very low performance overhead in comparison with the Observe -// method of a Summary. +// Note that Histograms, in contrast to Summaries, can be aggregated in PromQL +// (see the documentation for detailed procedures). However, Histograms require +// the user to pre-define suitable buckets, and they are in general less +// accurate. (Both problems are addressed by the experimental Native +// Histograms. To use them, configure a NativeHistogramBucketFactor in the +// HistogramOpts. They also require a Prometheus server v2.40+ with the +// corresponding feature flag enabled.) +// +// The Observe method of a Histogram has a very low performance overhead in +// comparison with the Observe method of a Summary. // // To create Histogram instances, use NewHistogram. type Histogram interface { @@ -50,7 +253,8 @@ type Histogram interface { // Observe adds a single observation to the histogram. Observations are // usually positive or zero. Negative observations are accepted but // prevent current versions of Prometheus from properly detecting - // counter resets in the sum of observations. See + // counter resets in the sum of observations. (The experimental Native + // Histograms handle negative observations properly.) See // https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations // for details. Observe(float64) @@ -64,18 +268,28 @@ const bucketLabel = "le" // tailored to broadly measure the response time (in seconds) of a network // service. Most likely, however, you will be required to define buckets // customized to your use case. -var ( - DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} +var DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} - errBucketLabelNotAllowed = fmt.Errorf( - "%q is not allowed as label name in histograms", bucketLabel, - ) +// DefNativeHistogramZeroThreshold is the default value for +// NativeHistogramZeroThreshold in the HistogramOpts. +// +// The value is 2^-128 (or 0.5*2^-127 in the actual IEEE 754 representation), +// which is a bucket boundary at all possible resolutions. +const DefNativeHistogramZeroThreshold = 2.938735877055719e-39 + +// NativeHistogramZeroThresholdZero can be used as NativeHistogramZeroThreshold +// in the HistogramOpts to create a zero bucket of width zero, i.e. a zero +// bucket that only receives observations of precisely zero. +const NativeHistogramZeroThresholdZero = -1 + +var errBucketLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in histograms", bucketLabel, ) -// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest -// bucket has an upper bound of 'start'. The final +Inf bucket is not counted -// and not included in the returned slice. The returned slice is meant to be -// used for the Buckets field of HistogramOpts. +// LinearBuckets creates 'count' regular buckets, each 'width' wide, where the +// lowest bucket has an upper bound of 'start'. The final +Inf bucket is not +// counted and not included in the returned slice. The returned slice is meant +// to be used for the Buckets field of HistogramOpts. // // The function panics if 'count' is zero or negative. func LinearBuckets(start, width float64, count int) []float64 { @@ -90,11 +304,11 @@ func LinearBuckets(start, width float64, count int) []float64 { return buckets } -// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an -// upper bound of 'start' and each following bucket's upper bound is 'factor' -// times the previous bucket's upper bound. The final +Inf bucket is not counted -// and not included in the returned slice. The returned slice is meant to be -// used for the Buckets field of HistogramOpts. +// ExponentialBuckets creates 'count' regular buckets, where the lowest bucket +// has an upper bound of 'start' and each following bucket's upper bound is +// 'factor' times the previous bucket's upper bound. The final +Inf bucket is +// not counted and not included in the returned slice. The returned slice is +// meant to be used for the Buckets field of HistogramOpts. // // The function panics if 'count' is 0 or negative, if 'start' is 0 or negative, // or if 'factor' is less than or equal 1. @@ -122,11 +336,11 @@ func ExponentialBuckets(start, factor float64, count int) []float64 { // used for the Buckets field of HistogramOpts. // // The function panics if 'count' is 0 or negative, if 'min' is 0 or negative. -func ExponentialBucketsRange(min, max float64, count int) []float64 { +func ExponentialBucketsRange(minBucket, maxBucket float64, count int) []float64 { if count < 1 { panic("ExponentialBucketsRange count needs a positive count") } - if min <= 0 { + if minBucket <= 0 { panic("ExponentialBucketsRange min needs to be greater than 0") } @@ -134,12 +348,12 @@ func ExponentialBucketsRange(min, max float64, count int) []float64 { // max = min*growthFactor^(bucketCount-1) // We know max/min and highest bucket. Solve for growthFactor. - growthFactor := math.Pow(max/min, 1.0/float64(count-1)) + growthFactor := math.Pow(maxBucket/minBucket, 1.0/float64(count-1)) // Now that we know growthFactor, solve for each bucket. buckets := make([]float64, count) for i := 1; i <= count; i++ { - buckets[i-1] = min * math.Pow(growthFactor, float64(i-1)) + buckets[i-1] = minBucket * math.Pow(growthFactor, float64(i-1)) } return buckets } @@ -180,8 +394,124 @@ type HistogramOpts struct { // element in the slice is the upper inclusive bound of a bucket. The // values must be sorted in strictly increasing order. There is no need // to add a highest bucket with +Inf bound, it will be added - // implicitly. The default value is DefBuckets. + // implicitly. If Buckets is left as nil or set to a slice of length + // zero, it is replaced by default buckets. The default buckets are + // DefBuckets if no buckets for a native histogram (see below) are used, + // otherwise the default is no buckets. (In other words, if you want to + // use both regular buckets and buckets for a native histogram, you have + // to define the regular buckets here explicitly.) Buckets []float64 + + // If NativeHistogramBucketFactor is greater than one, so-called sparse + // buckets are used (in addition to the regular buckets, if defined + // above). A Histogram with sparse buckets will be ingested as a Native + // Histogram by a Prometheus server with that feature enabled (requires + // Prometheus v2.40+). Sparse buckets are exponential buckets covering + // the whole float64 range (with the exception of the “zero” bucket, see + // NativeHistogramZeroThreshold below). From any one bucket to the next, + // the width of the bucket grows by a constant + // factor. NativeHistogramBucketFactor provides an upper bound for this + // factor (exception see below). The smaller + // NativeHistogramBucketFactor, the more buckets will be used and thus + // the more costly the histogram will become. A generally good trade-off + // between cost and accuracy is a value of 1.1 (each bucket is at most + // 10% wider than the previous one), which will result in each power of + // two divided into 8 buckets (e.g. there will be 8 buckets between 1 + // and 2, same as between 2 and 4, and 4 and 8, etc.). + // + // Details about the actually used factor: The factor is calculated as + // 2^(2^-n), where n is an integer number between (and including) -4 and + // 8. n is chosen so that the resulting factor is the largest that is + // still smaller or equal to NativeHistogramBucketFactor. Note that the + // smallest possible factor is therefore approx. 1.00271 (i.e. 2^(2^-8) + // ). If NativeHistogramBucketFactor is greater than 1 but smaller than + // 2^(2^-8), then the actually used factor is still 2^(2^-8) even though + // it is larger than the provided NativeHistogramBucketFactor. + // + // NOTE: Native Histograms are still an experimental feature. Their + // behavior might still change without a major version + // bump. Subsequently, all NativeHistogram... options here might still + // change their behavior or name (or might completely disappear) without + // a major version bump. + NativeHistogramBucketFactor float64 + // All observations with an absolute value of less or equal + // NativeHistogramZeroThreshold are accumulated into a “zero” bucket. + // For best results, this should be close to a bucket boundary. This is + // usually the case if picking a power of two. If + // NativeHistogramZeroThreshold is left at zero, + // DefNativeHistogramZeroThreshold is used as the threshold. To + // configure a zero bucket with an actual threshold of zero (i.e. only + // observations of precisely zero will go into the zero bucket), set + // NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero + // constant (or any negative float value). + NativeHistogramZeroThreshold float64 + + // The next three fields define a strategy to limit the number of + // populated sparse buckets. If NativeHistogramMaxBucketNumber is left + // at zero, the number of buckets is not limited. (Note that this might + // lead to unbounded memory consumption if the values observed by the + // Histogram are sufficiently wide-spread. In particular, this could be + // used as a DoS attack vector. Where the observed values depend on + // external inputs, it is highly recommended to set a + // NativeHistogramMaxBucketNumber.) Once the set + // NativeHistogramMaxBucketNumber is exceeded, the following strategy is + // enacted: + // - First, if the last reset (or the creation) of the histogram is at + // least NativeHistogramMinResetDuration ago, then the whole + // histogram is reset to its initial state (including regular + // buckets). + // - If less time has passed, or if NativeHistogramMinResetDuration is + // zero, no reset is performed. Instead, the zero threshold is + // increased sufficiently to reduce the number of buckets to or below + // NativeHistogramMaxBucketNumber, but not to more than + // NativeHistogramMaxZeroThreshold. Thus, if + // NativeHistogramMaxZeroThreshold is already at or below the current + // zero threshold, nothing happens at this step. + // - After that, if the number of buckets still exceeds + // NativeHistogramMaxBucketNumber, the resolution of the histogram is + // reduced by doubling the width of the sparse buckets (up to a + // growth factor between one bucket to the next of 2^(2^4) = 65536, + // see above). + // - Any increased zero threshold or reduced resolution is reset back + // to their original values once NativeHistogramMinResetDuration has + // passed (since the last reset or the creation of the histogram). + NativeHistogramMaxBucketNumber uint32 + NativeHistogramMinResetDuration time.Duration + NativeHistogramMaxZeroThreshold float64 + + // NativeHistogramMaxExemplars limits the number of exemplars + // that are kept in memory for each native histogram. If you leave it at + // zero, a default value of 10 is used. If no exemplars should be kept specifically + // for native histograms, set it to a negative value. (Scrapers can + // still use the exemplars exposed for classic buckets, which are managed + // independently.) + NativeHistogramMaxExemplars int + // NativeHistogramExemplarTTL is only checked once + // NativeHistogramMaxExemplars is exceeded. In that case, the + // oldest exemplar is removed if it is older than NativeHistogramExemplarTTL. + // Otherwise, the older exemplar in the pair of exemplars that are closest + // together (on an exponential scale) is removed. + // If NativeHistogramExemplarTTL is left at its zero value, a default value of + // 5m is used. To always delete the oldest exemplar, set it to a negative value. + NativeHistogramExemplarTTL time.Duration + + // now is for testing purposes, by default it's time.Now. + now func() time.Time + + // afterFunc is for testing purposes, by default it's time.AfterFunc. + afterFunc func(time.Duration, func()) *time.Timer +} + +// HistogramVecOpts bundles the options to create a HistogramVec metric. +// It is mandatory to set HistogramOpts, see there for mandatory fields. VariableLabels +// is optional and can safely be left to its default value. +type HistogramVecOpts struct { + HistogramOpts + + // VariableLabels are used to partition the metric vector by the given set + // of labels. Each label value will be constrained with the optional Constraint + // function, if provided. + VariableLabels ConstrainableLabels } // NewHistogram creates a new Histogram based on the provided HistogramOpts. It @@ -203,11 +533,11 @@ func NewHistogram(opts HistogramOpts) Histogram { } func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { - if len(desc.variableLabels) != len(labelValues) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) + if len(desc.variableLabels.names) != len(labelValues) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, labelValues)) } - for _, n := range desc.variableLabels { + for _, n := range desc.variableLabels.names { if n == bucketLabel { panic(errBucketLabelNotAllowed) } @@ -218,16 +548,38 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr } } - if len(opts.Buckets) == 0 { - opts.Buckets = DefBuckets + if opts.now == nil { + opts.now = time.Now + } + if opts.afterFunc == nil { + opts.afterFunc = time.AfterFunc } h := &histogram{ - desc: desc, - upperBounds: opts.Buckets, - labelPairs: MakeLabelPairs(desc, labelValues), - counts: [2]*histogramCounts{{}, {}}, - now: time.Now, + desc: desc, + upperBounds: opts.Buckets, + labelPairs: MakeLabelPairs(desc, labelValues), + nativeHistogramMaxBuckets: opts.NativeHistogramMaxBucketNumber, + nativeHistogramMaxZeroThreshold: opts.NativeHistogramMaxZeroThreshold, + nativeHistogramMinResetDuration: opts.NativeHistogramMinResetDuration, + lastResetTime: opts.now(), + now: opts.now, + afterFunc: opts.afterFunc, + } + if len(h.upperBounds) == 0 && opts.NativeHistogramBucketFactor <= 1 { + h.upperBounds = DefBuckets + } + if opts.NativeHistogramBucketFactor <= 1 { + h.nativeHistogramSchema = math.MinInt32 // To mark that there are no sparse buckets. + } else { + switch { + case opts.NativeHistogramZeroThreshold > 0: + h.nativeHistogramZeroThreshold = opts.NativeHistogramZeroThreshold + case opts.NativeHistogramZeroThreshold == 0: + h.nativeHistogramZeroThreshold = DefNativeHistogramZeroThreshold + } // Leave h.nativeHistogramZeroThreshold at 0 otherwise. + h.nativeHistogramSchema = pickSchema(opts.NativeHistogramBucketFactor) + h.nativeExemplars = makeNativeExemplars(opts.NativeHistogramExemplarTTL, opts.NativeHistogramMaxExemplars) } for i, upperBound := range h.upperBounds { if i < len(h.upperBounds)-1 { @@ -246,8 +598,12 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr } // Finally we know the final length of h.upperBounds and can make buckets // for both counts as well as exemplars: - h.counts[0].buckets = make([]uint64, len(h.upperBounds)) - h.counts[1].buckets = make([]uint64, len(h.upperBounds)) + h.counts[0] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))} + atomic.StoreUint64(&h.counts[0].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold)) + atomic.StoreInt32(&h.counts[0].nativeHistogramSchema, h.nativeHistogramSchema) + h.counts[1] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))} + atomic.StoreUint64(&h.counts[1].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold)) + atomic.StoreInt32(&h.counts[1].nativeHistogramSchema, h.nativeHistogramSchema) h.exemplars = make([]atomic.Value, len(h.upperBounds)+1) h.init(h) // Init self-collection. @@ -255,13 +611,98 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr } type histogramCounts struct { + // Order in this struct matters for the alignment required by atomic + // operations, see http://golang.org/pkg/sync/atomic/#pkg-note-BUG + // sumBits contains the bits of the float64 representing the sum of all - // observations. sumBits and count have to go first in the struct to - // guarantee alignment for atomic operations. - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + // observations. sumBits uint64 count uint64 + + // nativeHistogramZeroBucket counts all (positive and negative) + // observations in the zero bucket (with an absolute value less or equal + // the current threshold, see next field. + nativeHistogramZeroBucket uint64 + // nativeHistogramZeroThresholdBits is the bit pattern of the current + // threshold for the zero bucket. It's initially equal to + // nativeHistogramZeroThreshold but may change according to the bucket + // count limitation strategy. + nativeHistogramZeroThresholdBits uint64 + // nativeHistogramSchema may change over time according to the bucket + // count limitation strategy and therefore has to be saved here. + nativeHistogramSchema int32 + // Number of (positive and negative) sparse buckets. + nativeHistogramBucketsNumber uint32 + + // Regular buckets. buckets []uint64 + + // The sparse buckets for native histograms are implemented with a + // sync.Map for now. A dedicated data structure will likely be more + // efficient. There are separate maps for negative and positive + // observations. The map's value is an *int64, counting observations in + // that bucket. (Note that we don't use uint64 as an int64 won't + // overflow in practice, and working with signed numbers from the + // beginning simplifies the handling of deltas.) The map's key is the + // index of the bucket according to the used + // nativeHistogramSchema. Index 0 is for an upper bound of 1. + nativeHistogramBucketsPositive, nativeHistogramBucketsNegative sync.Map +} + +// observe manages the parts of observe that only affects +// histogramCounts. doSparse is true if sparse buckets should be done, +// too. +func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool) { + if bucket < len(hc.buckets) { + atomic.AddUint64(&hc.buckets[bucket], 1) + } + atomicAddFloat(&hc.sumBits, v) + if doSparse && !math.IsNaN(v) { + var ( + key int + schema = atomic.LoadInt32(&hc.nativeHistogramSchema) + zeroThreshold = math.Float64frombits(atomic.LoadUint64(&hc.nativeHistogramZeroThresholdBits)) + bucketCreated, isInf bool + ) + if math.IsInf(v, 0) { + // Pretend v is MaxFloat64 but later increment key by one. + if math.IsInf(v, +1) { + v = math.MaxFloat64 + } else { + v = -math.MaxFloat64 + } + isInf = true + } + frac, exp := math.Frexp(math.Abs(v)) + if schema > 0 { + bounds := nativeHistogramBounds[schema] + key = sort.SearchFloat64s(bounds, frac) + (exp-1)*len(bounds) + } else { + key = exp + if frac == 0.5 { + key-- + } + offset := (1 << -schema) - 1 + key = (key + offset) >> -schema + } + if isInf { + key++ + } + switch { + case v > zeroThreshold: + bucketCreated = addToBucket(&hc.nativeHistogramBucketsPositive, key, 1) + case v < -zeroThreshold: + bucketCreated = addToBucket(&hc.nativeHistogramBucketsNegative, key, 1) + default: + atomic.AddUint64(&hc.nativeHistogramZeroBucket, 1) + } + if bucketCreated { + atomic.AddUint32(&hc.nativeHistogramBucketsNumber, 1) + } + } + // Increment count last as we take it as a signal that the observation + // is complete. + atomic.AddUint64(&hc.count, 1) } type histogram struct { @@ -276,7 +717,7 @@ type histogram struct { // perspective of the histogram) swap the hot–cold under the writeMtx // lock. A cooldown is awaited (while locked) by comparing the number of // observations with the initiation count. Once they match, then the - // last observation on the now cool one has completed. All cool fields must + // last observation on the now cool one has completed. All cold fields must // be merged into the new hot before releasing writeMtx. // // Fields with atomic access first! See alignment constraint: @@ -284,8 +725,10 @@ type histogram struct { countAndHotIdx uint64 selfCollector - desc *Desc - writeMtx sync.Mutex // Only used in the Write method. + desc *Desc + + // Only used in the Write method and for sparse bucket management. + mtx sync.Mutex // Two counts, one is "hot" for lock-free observations, the other is // "cold" for writing out a dto.Metric. It has to be an array of @@ -293,11 +736,27 @@ type histogram struct { // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. counts [2]*histogramCounts - upperBounds []float64 - labelPairs []*dto.LabelPair - exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar. - - now func() time.Time // To mock out time.Now() for testing. + upperBounds []float64 + labelPairs []*dto.LabelPair + exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar. + nativeHistogramSchema int32 // The initial schema. Set to math.MinInt32 if no sparse buckets are used. + nativeHistogramZeroThreshold float64 // The initial zero threshold. + nativeHistogramMaxZeroThreshold float64 + nativeHistogramMaxBuckets uint32 + nativeHistogramMinResetDuration time.Duration + // lastResetTime is protected by mtx. It is also used as created timestamp. + lastResetTime time.Time + // resetScheduled is protected by mtx. It is true if a reset is + // scheduled for a later time (when nativeHistogramMinResetDuration has + // passed). + resetScheduled bool + nativeExemplars nativeExemplars + + // now is for testing purposes, by default it's time.Now. + now func() time.Time + + // afterFunc is for testing purposes, by default it's time.AfterFunc. + afterFunc func(time.Duration, func()) *time.Timer } func (h *histogram) Desc() *Desc { @@ -308,6 +767,9 @@ func (h *histogram) Observe(v float64) { h.observe(v, h.findBucket(v)) } +// ObserveWithExemplar should not be called in a high-frequency setting +// for a native histogram with configured exemplars. For this case, +// the implementation isn't lock-free and might suffer from lock contention. func (h *histogram) ObserveWithExemplar(v float64, e Labels) { i := h.findBucket(v) h.observe(v, i) @@ -319,8 +781,8 @@ func (h *histogram) Write(out *dto.Metric) error { // the hot path, i.e. Observe is called much more often than Write. The // complication of making Write lock-free isn't worth it, if possible at // all. - h.writeMtx.Lock() - defer h.writeMtx.Unlock() + h.mtx.Lock() + defer h.mtx.Unlock() // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0) // without touching the count bits. See the struct comments for a full @@ -333,16 +795,17 @@ func (h *histogram) Write(out *dto.Metric) error { hotCounts := h.counts[n>>63] coldCounts := h.counts[(^n)>>63] - // Await cooldown. - for count != atomic.LoadUint64(&coldCounts.count) { - runtime.Gosched() // Let observations get work done. - } + waitForCooldown(count, coldCounts) his := &dto.Histogram{ - Bucket: make([]*dto.Bucket, len(h.upperBounds)), - SampleCount: proto.Uint64(count), - SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), + Bucket: make([]*dto.Bucket, len(h.upperBounds)), + SampleCount: proto.Uint64(count), + SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), + CreatedTimestamp: timestamppb.New(h.lastResetTime), } + out.Histogram = his + out.Label = h.labelPairs + var cumCount uint64 for i, upperBound := range h.upperBounds { cumCount += atomic.LoadUint64(&coldCounts.buckets[i]) @@ -363,68 +826,330 @@ func (h *histogram) Write(out *dto.Metric) error { } his.Bucket = append(his.Bucket, b) } + if h.nativeHistogramSchema > math.MinInt32 { + his.ZeroThreshold = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.nativeHistogramZeroThresholdBits))) + his.Schema = proto.Int32(atomic.LoadInt32(&coldCounts.nativeHistogramSchema)) + zeroBucket := atomic.LoadUint64(&coldCounts.nativeHistogramZeroBucket) + + defer func() { + coldCounts.nativeHistogramBucketsPositive.Range(addAndReset(&hotCounts.nativeHistogramBucketsPositive, &hotCounts.nativeHistogramBucketsNumber)) + coldCounts.nativeHistogramBucketsNegative.Range(addAndReset(&hotCounts.nativeHistogramBucketsNegative, &hotCounts.nativeHistogramBucketsNumber)) + }() + + his.ZeroCount = proto.Uint64(zeroBucket) + his.NegativeSpan, his.NegativeDelta = makeBuckets(&coldCounts.nativeHistogramBucketsNegative) + his.PositiveSpan, his.PositiveDelta = makeBuckets(&coldCounts.nativeHistogramBucketsPositive) + + // Add a no-op span to a histogram without observations and with + // a zero threshold of zero. Otherwise, a native histogram would + // look like a classic histogram to scrapers. + if *his.ZeroThreshold == 0 && *his.ZeroCount == 0 && len(his.PositiveSpan) == 0 && len(his.NegativeSpan) == 0 { + his.PositiveSpan = []*dto.BucketSpan{{ + Offset: proto.Int32(0), + Length: proto.Uint32(0), + }} + } - out.Histogram = his - out.Label = h.labelPairs - - // Finally add all the cold counts to the new hot counts and reset the cold counts. - atomic.AddUint64(&hotCounts.count, count) - atomic.StoreUint64(&coldCounts.count, 0) - for { - oldBits := atomic.LoadUint64(&hotCounts.sumBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum()) - if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { - atomic.StoreUint64(&coldCounts.sumBits, 0) - break + if h.nativeExemplars.isEnabled() { + h.nativeExemplars.Lock() + his.Exemplars = append(his.Exemplars, h.nativeExemplars.exemplars...) + h.nativeExemplars.Unlock() } + } - for i := range h.upperBounds { - atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i])) - atomic.StoreUint64(&coldCounts.buckets[i], 0) - } + addAndResetCounts(hotCounts, coldCounts) return nil } // findBucket returns the index of the bucket for the provided value, or // len(h.upperBounds) for the +Inf bucket. func (h *histogram) findBucket(v float64) int { - // TODO(beorn7): For small numbers of buckets (<30), a linear search is - // slightly faster than the binary search. If we really care, we could - // switch from one search strategy to the other depending on the number - // of buckets. - // - // Microbenchmarks (BenchmarkHistogramNoLabels): - // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op - // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op - // 300 buckets: 154 ns/op linear - binary 61.6 ns/op + n := len(h.upperBounds) + if n == 0 { + return 0 + } + + // Early exit: if v is less than or equal to the first upper bound, return 0 + if v <= h.upperBounds[0] { + return 0 + } + + // Early exit: if v is greater than the last upper bound, return len(h.upperBounds) + if v > h.upperBounds[n-1] { + return n + } + + // For small arrays, use simple linear search + // "magic number" 35 is result of tests on couple different (AWS and baremetal) servers + // see more details here: https://github.com/prometheus/client_golang/pull/1662 + if n < 35 { + for i, bound := range h.upperBounds { + if v <= bound { + return i + } + } + // If v is greater than all upper bounds, return len(h.upperBounds) + return n + } + + // For larger arrays, use stdlib's binary search return sort.SearchFloat64s(h.upperBounds, v) } // observe is the implementation for Observe without the findBucket part. func (h *histogram) observe(v float64, bucket int) { + // Do not add to sparse buckets for NaN observations. + doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v) // We increment h.countAndHotIdx so that the counter in the lower // 63 bits gets incremented. At the same time, we get the new value // back, which we can use to find the currently-hot counts. n := atomic.AddUint64(&h.countAndHotIdx, 1) hotCounts := h.counts[n>>63] + hotCounts.observe(v, bucket, doSparse) + if doSparse { + h.limitBuckets(hotCounts, v, bucket) + } +} - if bucket < len(h.upperBounds) { - atomic.AddUint64(&hotCounts.buckets[bucket], 1) +// limitBuckets applies a strategy to limit the number of populated sparse +// buckets. It's generally best effort, and there are situations where the +// number can go higher (if even the lowest resolution isn't enough to reduce +// the number sufficiently, or if the provided counts aren't fully updated yet +// by a concurrently happening Write call). +func (h *histogram) limitBuckets(counts *histogramCounts, value float64, bucket int) { + if h.nativeHistogramMaxBuckets == 0 { + return // No limit configured. } - for { - oldBits := atomic.LoadUint64(&hotCounts.sumBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + v) - if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { - break + if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&counts.nativeHistogramBucketsNumber) { + return // Bucket limit not exceeded yet. + } + + h.mtx.Lock() + defer h.mtx.Unlock() + + // The hot counts might have been swapped just before we acquired the + // lock. Re-fetch the hot counts first... + n := atomic.LoadUint64(&h.countAndHotIdx) + hotIdx := n >> 63 + coldIdx := (^n) >> 63 + hotCounts := h.counts[hotIdx] + coldCounts := h.counts[coldIdx] + // ...and then check again if we really have to reduce the bucket count. + if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&hotCounts.nativeHistogramBucketsNumber) { + return // Bucket limit not exceeded after all. + } + // Try the various strategies in order. + if h.maybeReset(hotCounts, coldCounts, coldIdx, value, bucket) { + return + } + // One of the other strategies will happen. To undo what they will do as + // soon as enough time has passed to satisfy + // h.nativeHistogramMinResetDuration, schedule a reset at the right time + // if we haven't done so already. + if h.nativeHistogramMinResetDuration > 0 && !h.resetScheduled { + h.resetScheduled = true + h.afterFunc(h.nativeHistogramMinResetDuration-h.now().Sub(h.lastResetTime), h.reset) + } + + if h.maybeWidenZeroBucket(hotCounts, coldCounts) { + return + } + h.doubleBucketWidth(hotCounts, coldCounts) +} + +// maybeReset resets the whole histogram if at least +// h.nativeHistogramMinResetDuration has been passed. It returns true if the +// histogram has been reset. The caller must have locked h.mtx. +func (h *histogram) maybeReset( + hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int, +) bool { + // We are using the possibly mocked h.now() rather than + // time.Since(h.lastResetTime) to enable testing. + if h.nativeHistogramMinResetDuration == 0 || // No reset configured. + h.resetScheduled || // Do not interefere if a reset is already scheduled. + h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration { + return false + } + // Completely reset coldCounts. + h.resetCounts(cold) + // Repeat the latest observation to not lose it completely. + cold.observe(value, bucket, true) + // Make coldCounts the new hot counts while resetting countAndHotIdx. + n := atomic.SwapUint64(&h.countAndHotIdx, (coldIdx<<63)+1) + count := n & ((1 << 63) - 1) + waitForCooldown(count, hot) + // Finally, reset the formerly hot counts, too. + h.resetCounts(hot) + h.lastResetTime = h.now() + return true +} + +// reset resets the whole histogram. It locks h.mtx itself, i.e. it has to be +// called without having locked h.mtx. +func (h *histogram) reset() { + h.mtx.Lock() + defer h.mtx.Unlock() + + n := atomic.LoadUint64(&h.countAndHotIdx) + hotIdx := n >> 63 + coldIdx := (^n) >> 63 + hot := h.counts[hotIdx] + cold := h.counts[coldIdx] + // Completely reset coldCounts. + h.resetCounts(cold) + // Make coldCounts the new hot counts while resetting countAndHotIdx. + n = atomic.SwapUint64(&h.countAndHotIdx, coldIdx<<63) + count := n & ((1 << 63) - 1) + waitForCooldown(count, hot) + // Finally, reset the formerly hot counts, too. + h.resetCounts(hot) + h.lastResetTime = h.now() + h.resetScheduled = false +} + +// maybeWidenZeroBucket widens the zero bucket until it includes the existing +// buckets closest to the zero bucket (which could be two, if an equidistant +// negative and a positive bucket exists, but usually it's only one bucket to be +// merged into the new wider zero bucket). h.nativeHistogramMaxZeroThreshold +// limits how far the zero bucket can be extended, and if that's not enough to +// include an existing bucket, the method returns false. The caller must have +// locked h.mtx. +func (h *histogram) maybeWidenZeroBucket(hot, cold *histogramCounts) bool { + currentZeroThreshold := math.Float64frombits(atomic.LoadUint64(&hot.nativeHistogramZeroThresholdBits)) + if currentZeroThreshold >= h.nativeHistogramMaxZeroThreshold { + return false + } + // Find the key of the bucket closest to zero. + smallestKey := findSmallestKey(&hot.nativeHistogramBucketsPositive) + smallestNegativeKey := findSmallestKey(&hot.nativeHistogramBucketsNegative) + if smallestNegativeKey < smallestKey { + smallestKey = smallestNegativeKey + } + if smallestKey == math.MaxInt32 { + return false + } + newZeroThreshold := getLe(smallestKey, atomic.LoadInt32(&hot.nativeHistogramSchema)) + if newZeroThreshold > h.nativeHistogramMaxZeroThreshold { + return false // New threshold would exceed the max threshold. + } + atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold)) + // Remove applicable buckets. + if _, loaded := cold.nativeHistogramBucketsNegative.LoadAndDelete(smallestKey); loaded { + atomicDecUint32(&cold.nativeHistogramBucketsNumber) + } + if _, loaded := cold.nativeHistogramBucketsPositive.LoadAndDelete(smallestKey); loaded { + atomicDecUint32(&cold.nativeHistogramBucketsNumber) + } + // Make cold counts the new hot counts. + n := atomic.AddUint64(&h.countAndHotIdx, 1<<63) + count := n & ((1 << 63) - 1) + // Swap the pointer names to represent the new roles and make + // the rest less confusing. + hot, cold = cold, hot + waitForCooldown(count, cold) + // Add all the now cold counts to the new hot counts... + addAndResetCounts(hot, cold) + // ...adjust the new zero threshold in the cold counts, too... + atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold)) + // ...and then merge the newly deleted buckets into the wider zero + // bucket. + mergeAndDeleteOrAddAndReset := func(hotBuckets, coldBuckets *sync.Map) func(k, v interface{}) bool { + return func(k, v interface{}) bool { + key := k.(int) + bucket := v.(*int64) + if key == smallestKey { + // Merge into hot zero bucket... + atomic.AddUint64(&hot.nativeHistogramZeroBucket, uint64(atomic.LoadInt64(bucket))) + // ...and delete from cold counts. + coldBuckets.Delete(key) + atomicDecUint32(&cold.nativeHistogramBucketsNumber) + } else { + // Add to corresponding hot bucket... + if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) { + atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1) + } + // ...and reset cold bucket. + atomic.StoreInt64(bucket, 0) + } + return true } } - // Increment count last as we take it as a signal that the observation - // is complete. - atomic.AddUint64(&hotCounts.count, 1) + + cold.nativeHistogramBucketsPositive.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsPositive, &cold.nativeHistogramBucketsPositive)) + cold.nativeHistogramBucketsNegative.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsNegative, &cold.nativeHistogramBucketsNegative)) + return true +} + +// doubleBucketWidth doubles the bucket width (by decrementing the schema +// number). Note that very sparse buckets could lead to a low reduction of the +// bucket count (or even no reduction at all). The method does nothing if the +// schema is already -4. +func (h *histogram) doubleBucketWidth(hot, cold *histogramCounts) { + coldSchema := atomic.LoadInt32(&cold.nativeHistogramSchema) + if coldSchema == -4 { + return // Already at lowest resolution. + } + coldSchema-- + atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema) + // Play it simple and just delete all cold buckets. + atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0) + deleteSyncMap(&cold.nativeHistogramBucketsNegative) + deleteSyncMap(&cold.nativeHistogramBucketsPositive) + // Make coldCounts the new hot counts. + n := atomic.AddUint64(&h.countAndHotIdx, 1<<63) + count := n & ((1 << 63) - 1) + // Swap the pointer names to represent the new roles and make + // the rest less confusing. + hot, cold = cold, hot + waitForCooldown(count, cold) + // Add all the now cold counts to the new hot counts... + addAndResetCounts(hot, cold) + // ...adjust the schema in the cold counts, too... + atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema) + // ...and then merge the cold buckets into the wider hot buckets. + merge := func(hotBuckets *sync.Map) func(k, v interface{}) bool { + return func(k, v interface{}) bool { + key := k.(int) + bucket := v.(*int64) + // Adjust key to match the bucket to merge into. + if key > 0 { + key++ + } + key /= 2 + // Add to corresponding hot bucket. + if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) { + atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1) + } + return true + } + } + + cold.nativeHistogramBucketsPositive.Range(merge(&hot.nativeHistogramBucketsPositive)) + cold.nativeHistogramBucketsNegative.Range(merge(&hot.nativeHistogramBucketsNegative)) + // Play it simple again and just delete all cold buckets. + atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0) + deleteSyncMap(&cold.nativeHistogramBucketsNegative) + deleteSyncMap(&cold.nativeHistogramBucketsPositive) +} + +func (h *histogram) resetCounts(counts *histogramCounts) { + atomic.StoreUint64(&counts.sumBits, 0) + atomic.StoreUint64(&counts.count, 0) + atomic.StoreUint64(&counts.nativeHistogramZeroBucket, 0) + atomic.StoreUint64(&counts.nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold)) + atomic.StoreInt32(&counts.nativeHistogramSchema, h.nativeHistogramSchema) + atomic.StoreUint32(&counts.nativeHistogramBucketsNumber, 0) + for i := range h.upperBounds { + atomic.StoreUint64(&counts.buckets[i], 0) + } + deleteSyncMap(&counts.nativeHistogramBucketsNegative) + deleteSyncMap(&counts.nativeHistogramBucketsPositive) } -// updateExemplar replaces the exemplar for the provided bucket. With empty -// labels, it's a no-op. It panics if any of the labels is invalid. +// updateExemplar replaces the exemplar for the provided classic bucket. +// With empty labels, it's a no-op. It panics if any of the labels is invalid. +// If histogram is native, the exemplar will be cached into nativeExemplars, +// which has a limit, and will remove one exemplar when limit is reached. func (h *histogram) updateExemplar(v float64, bucket int, l Labels) { if l == nil { return @@ -434,6 +1159,10 @@ func (h *histogram) updateExemplar(v float64, bucket int, l Labels) { panic(err) } h.exemplars[bucket].Store(e) + doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v) + if doSparse { + h.nativeExemplars.addExemplar(e) + } } // HistogramVec is a Collector that bundles a set of Histograms that all share the @@ -448,15 +1177,23 @@ type HistogramVec struct { // NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and // partitioned by the given label names. func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { - desc := NewDesc( + return V2.NewHistogramVec(HistogramVecOpts{ + HistogramOpts: opts, + VariableLabels: UnconstrainedLabels(labelNames), + }) +} + +// NewHistogramVec creates a new HistogramVec based on the provided HistogramVecOpts. +func (v2) NewHistogramVec(opts HistogramVecOpts) *HistogramVec { + desc := V2.NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, - labelNames, + opts.VariableLabels, opts.ConstLabels, ) return &HistogramVec{ MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { - return newHistogram(desc, opts, lvs...) + return newHistogram(desc, opts.HistogramOpts, lvs...) }), } } @@ -516,7 +1253,8 @@ func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) { // WithLabelValues works as GetMetricWithLabelValues, but panics where // GetMetricWithLabelValues would have returned an error. Not returning an // error allows shortcuts like -// myVec.WithLabelValues("404", "GET").Observe(42.21) +// +// myVec.WithLabelValues("404", "GET").Observe(42.21) func (v *HistogramVec) WithLabelValues(lvs ...string) Observer { h, err := v.GetMetricWithLabelValues(lvs...) if err != nil { @@ -527,7 +1265,8 @@ func (v *HistogramVec) WithLabelValues(lvs ...string) Observer { // With works as GetMetricWith but panics where GetMetricWithLabels would have // returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) +// +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) func (v *HistogramVec) With(labels Labels) Observer { h, err := v.GetMetricWith(labels) if err != nil { @@ -573,6 +1312,7 @@ type constHistogram struct { sum float64 buckets map[float64]uint64 labelPairs []*dto.LabelPair + createdTs *timestamppb.Timestamp } func (h *constHistogram) Desc() *Desc { @@ -580,12 +1320,14 @@ func (h *constHistogram) Desc() *Desc { } func (h *constHistogram) Write(out *dto.Metric) error { - his := &dto.Histogram{} + his := &dto.Histogram{ + CreatedTimestamp: h.createdTs, + } + buckets := make([]*dto.Bucket, 0, len(h.buckets)) his.SampleCount = proto.Uint64(h.count) his.SampleSum = proto.Float64(h.sum) - for upperBound, count := range h.buckets { buckets = append(buckets, &dto.Bucket{ CumulativeCount: proto.Uint64(count), @@ -613,7 +1355,7 @@ func (h *constHistogram) Write(out *dto.Metric) error { // to send it to Prometheus in the Collect method. // // buckets is a map of upper bounds to cumulative counts, excluding the +Inf -// bucket. +// bucket. The +Inf bucket is implicit, and its value is equal to the provided count. // // NewConstHistogram returns an error if the length of labelValues is not // consistent with the variable labels in Desc or if Desc is invalid. @@ -627,7 +1369,7 @@ func NewConstHistogram( if desc.err != nil { return nil, desc.err } - if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { return nil, err } return &constHistogram{ @@ -655,6 +1397,48 @@ func MustNewConstHistogram( return m } +// NewConstHistogramWithCreatedTimestamp does the same thing as NewConstHistogram but sets the created timestamp. +func NewConstHistogramWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + ct time.Time, + labelValues ...string, +) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { + return nil, err + } + return &constHistogram{ + desc: desc, + count: count, + sum: sum, + buckets: buckets, + labelPairs: MakeLabelPairs(desc, labelValues), + createdTs: timestamppb.New(ct), + }, nil +} + +// MustNewConstHistogramWithCreatedTimestamp is a version of NewConstHistogramWithCreatedTimestamp that panics where +// NewConstHistogramWithCreatedTimestamp would have returned an error. +func MustNewConstHistogramWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + ct time.Time, + labelValues ...string, +) Metric { + m, err := NewConstHistogramWithCreatedTimestamp(desc, count, sum, buckets, ct, labelValues...) + if err != nil { + panic(err) + } + return m +} + type buckSort []*dto.Bucket func (s buckSort) Len() int { @@ -668,3 +1452,605 @@ func (s buckSort) Swap(i, j int) { func (s buckSort) Less(i, j int) bool { return s[i].GetUpperBound() < s[j].GetUpperBound() } + +// pickSchema returns the largest number n between -4 and 8 such that +// 2^(2^-n) is less or equal the provided bucketFactor. +// +// Special cases: +// - bucketFactor <= 1: panics. +// - bucketFactor < 2^(2^-8) (but > 1): still returns 8. +func pickSchema(bucketFactor float64) int32 { + if bucketFactor <= 1 { + panic(fmt.Errorf("bucketFactor %f is <=1", bucketFactor)) + } + floor := math.Floor(math.Log2(math.Log2(bucketFactor))) + switch { + case floor <= -8: + return nativeHistogramSchemaMaximum + case floor >= 4: + return nativeHistogramSchemaMinimum + default: + return -int32(floor) + } +} + +func makeBuckets(buckets *sync.Map) ([]*dto.BucketSpan, []int64) { + var ii []int + buckets.Range(func(k, v interface{}) bool { + ii = append(ii, k.(int)) + return true + }) + sort.Ints(ii) + + if len(ii) == 0 { + return nil, nil + } + + var ( + spans []*dto.BucketSpan + deltas []int64 + prevCount int64 + nextI int + ) + + appendDelta := func(count int64) { + *spans[len(spans)-1].Length++ + deltas = append(deltas, count-prevCount) + prevCount = count + } + + for n, i := range ii { + v, _ := buckets.Load(i) + count := atomic.LoadInt64(v.(*int64)) + // Multiple spans with only small gaps in between are probably + // encoded more efficiently as one larger span with a few empty + // buckets. Needs some research to find the sweet spot. For now, + // we assume that gaps of one or two buckets should not create + // a new span. + iDelta := int32(i - nextI) + if n == 0 || iDelta > 2 { + // We have to create a new span, either because we are + // at the very beginning, or because we have found a gap + // of more than two buckets. + spans = append(spans, &dto.BucketSpan{ + Offset: proto.Int32(iDelta), + Length: proto.Uint32(0), + }) + } else { + // We have found a small gap (or no gap at all). + // Insert empty buckets as needed. + for j := int32(0); j < iDelta; j++ { + appendDelta(0) + } + } + appendDelta(count) + nextI = i + 1 + } + return spans, deltas +} + +// addToBucket increments the sparse bucket at key by the provided amount. It +// returns true if a new sparse bucket had to be created for that. +func addToBucket(buckets *sync.Map, key int, increment int64) bool { + if existingBucket, ok := buckets.Load(key); ok { + // Fast path without allocation. + atomic.AddInt64(existingBucket.(*int64), increment) + return false + } + // Bucket doesn't exist yet. Slow path allocating new counter. + newBucket := increment // TODO(beorn7): Check if this is sufficient to not let increment escape. + if actualBucket, loaded := buckets.LoadOrStore(key, &newBucket); loaded { + // The bucket was created concurrently in another goroutine. + // Have to increment after all. + atomic.AddInt64(actualBucket.(*int64), increment) + return false + } + return true +} + +// addAndReset returns a function to be used with sync.Map.Range of spare +// buckets in coldCounts. It increments the buckets in the provided hotBuckets +// according to the buckets ranged through. It then resets all buckets ranged +// through to 0 (but leaves them in place so that they don't need to get +// recreated on the next scrape). +func addAndReset(hotBuckets *sync.Map, bucketNumber *uint32) func(k, v interface{}) bool { + return func(k, v interface{}) bool { + bucket := v.(*int64) + if addToBucket(hotBuckets, k.(int), atomic.LoadInt64(bucket)) { + atomic.AddUint32(bucketNumber, 1) + } + atomic.StoreInt64(bucket, 0) + return true + } +} + +func deleteSyncMap(m *sync.Map) { + m.Range(func(k, v interface{}) bool { + m.Delete(k) + return true + }) +} + +func findSmallestKey(m *sync.Map) int { + result := math.MaxInt32 + m.Range(func(k, v interface{}) bool { + key := k.(int) + if key < result { + result = key + } + return true + }) + return result +} + +func getLe(key int, schema int32) float64 { + // Here a bit of context about the behavior for the last bucket counting + // regular numbers (called simply "last bucket" below) and the bucket + // counting observations of ±Inf (called "inf bucket" below, with a key + // one higher than that of the "last bucket"): + // + // If we apply the usual formula to the last bucket, its upper bound + // would be calculated as +Inf. The reason is that the max possible + // regular float64 number (math.MaxFloat64) doesn't coincide with one of + // the calculated bucket boundaries. So the calculated boundary has to + // be larger than math.MaxFloat64, and the only float64 larger than + // math.MaxFloat64 is +Inf. However, we want to count actual + // observations of ±Inf in the inf bucket. Therefore, we have to treat + // the upper bound of the last bucket specially and set it to + // math.MaxFloat64. (The upper bound of the inf bucket, with its key + // being one higher than that of the last bucket, naturally comes out as + // +Inf by the usual formula. So that's fine.) + // + // math.MaxFloat64 has a frac of 0.9999999999999999 and an exp of + // 1024. If there were a float64 number following math.MaxFloat64, it + // would have a frac of 1.0 and an exp of 1024, or equivalently a frac + // of 0.5 and an exp of 1025. However, since frac must be smaller than + // 1, and exp must be smaller than 1025, either representation overflows + // a float64. (Which, in turn, is the reason that math.MaxFloat64 is the + // largest possible float64. Q.E.D.) However, the formula for + // calculating the upper bound from the idx and schema of the last + // bucket results in precisely that. It is either frac=1.0 & exp=1024 + // (for schema < 0) or frac=0.5 & exp=1025 (for schema >=0). (This is, + // by the way, a power of two where the exponent itself is a power of + // two, 2¹⁰ in fact, which coinicides with a bucket boundary in all + // schemas.) So these are the special cases we have to catch below. + if schema < 0 { + exp := key << -schema + if exp == 1024 { + // This is the last bucket before the overflow bucket + // (for ±Inf observations). Return math.MaxFloat64 as + // explained above. + return math.MaxFloat64 + } + return math.Ldexp(1, exp) + } + + fracIdx := key & ((1 << schema) - 1) + frac := nativeHistogramBounds[schema][fracIdx] + exp := (key >> schema) + 1 + if frac == 0.5 && exp == 1025 { + // This is the last bucket before the overflow bucket (for ±Inf + // observations). Return math.MaxFloat64 as explained above. + return math.MaxFloat64 + } + return math.Ldexp(frac, exp) +} + +// waitForCooldown returns after the count field in the provided histogramCounts +// has reached the provided count value. +func waitForCooldown(count uint64, counts *histogramCounts) { + for count != atomic.LoadUint64(&counts.count) { + runtime.Gosched() // Let observations get work done. + } +} + +// atomicAddFloat adds the provided float atomically to another float +// represented by the bit pattern the bits pointer is pointing to. +func atomicAddFloat(bits *uint64, v float64) { + for { + loadedBits := atomic.LoadUint64(bits) + newBits := math.Float64bits(math.Float64frombits(loadedBits) + v) + if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) { + break + } + } +} + +// atomicDecUint32 atomically decrements the uint32 p points to. See +// https://pkg.go.dev/sync/atomic#AddUint32 to understand how this is done. +func atomicDecUint32(p *uint32) { + atomic.AddUint32(p, ^uint32(0)) +} + +// addAndResetCounts adds certain fields (count, sum, conventional buckets, zero +// bucket) from the cold counts to the corresponding fields in the hot +// counts. Those fields are then reset to 0 in the cold counts. +func addAndResetCounts(hot, cold *histogramCounts) { + atomic.AddUint64(&hot.count, atomic.LoadUint64(&cold.count)) + atomic.StoreUint64(&cold.count, 0) + coldSum := math.Float64frombits(atomic.LoadUint64(&cold.sumBits)) + atomicAddFloat(&hot.sumBits, coldSum) + atomic.StoreUint64(&cold.sumBits, 0) + for i := range hot.buckets { + atomic.AddUint64(&hot.buckets[i], atomic.LoadUint64(&cold.buckets[i])) + atomic.StoreUint64(&cold.buckets[i], 0) + } + atomic.AddUint64(&hot.nativeHistogramZeroBucket, atomic.LoadUint64(&cold.nativeHistogramZeroBucket)) + atomic.StoreUint64(&cold.nativeHistogramZeroBucket, 0) +} + +type nativeExemplars struct { + sync.Mutex + + // Time-to-live for exemplars, it is set to -1 if exemplars are disabled, that is NativeHistogramMaxExemplars is below 0. + // The ttl is used on insertion to remove an exemplar that is older than ttl, if present. + ttl time.Duration + + exemplars []*dto.Exemplar +} + +func (n *nativeExemplars) isEnabled() bool { + return n.ttl != -1 +} + +func makeNativeExemplars(ttl time.Duration, maxCount int) nativeExemplars { + if ttl == 0 { + ttl = 5 * time.Minute + } + + if maxCount == 0 { + maxCount = 10 + } + + if maxCount < 0 { + maxCount = 0 + ttl = -1 + } + + return nativeExemplars{ + ttl: ttl, + exemplars: make([]*dto.Exemplar, 0, maxCount), + } +} + +func (n *nativeExemplars) addExemplar(e *dto.Exemplar) { + if !n.isEnabled() { + return + } + + n.Lock() + defer n.Unlock() + + // When the number of exemplars has not yet exceeded or + // is equal to cap(n.exemplars), then + // insert the new exemplar directly. + if len(n.exemplars) < cap(n.exemplars) { + var nIdx int + for nIdx = 0; nIdx < len(n.exemplars); nIdx++ { + if *e.Value < *n.exemplars[nIdx].Value { + break + } + } + n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...) + return + } + + if len(n.exemplars) == 1 { + // When the number of exemplars is 1, then + // replace the existing exemplar with the new exemplar. + n.exemplars[0] = e + return + } + // From this point on, the number of exemplars is greater than 1. + + // When the number of exemplars exceeds the limit, remove one exemplar. + var ( + ot = time.Time{} // Oldest timestamp seen. Initial value doesn't matter as we replace it due to otIdx == -1 in the loop. + otIdx = -1 // Index of the exemplar with the oldest timestamp. + + md = -1.0 // Logarithm of the delta of the closest pair of exemplars. + + // The insertion point of the new exemplar in the exemplars slice after insertion. + // This is calculated purely based on the order of the exemplars by value. + // nIdx == len(n.exemplars) means the new exemplar is to be inserted after the end. + nIdx = -1 + + // rIdx is ultimately the index for the exemplar that we are replacing with the new exemplar. + // The aim is to keep a good spread of exemplars by value and not let them bunch up too much. + // It is calculated in 3 steps: + // 1. First we set rIdx to the index of the older exemplar within the closest pair by value. + // That is the following will be true (on log scale): + // either the exemplar pair on index (rIdx-1, rIdx) or (rIdx, rIdx+1) will have + // the closest values to each other from all pairs. + // For example, suppose the values are distributed like this: + // |-----------x-------------x----------------x----x-----| + // ^--rIdx as this is older. + // Or like this: + // |-----------x-------------x----------------x----x-----| + // ^--rIdx as this is older. + // 2. If there is an exemplar that expired, then we simple reset rIdx to that index. + // 3. We check if by inserting the new exemplar we would create a closer pair at + // (nIdx-1, nIdx) or (nIdx, nIdx+1) and set rIdx to nIdx-1 or nIdx accordingly to + // keep the spread of exemplars by value; otherwise we keep rIdx as it is. + rIdx = -1 + cLog float64 // Logarithm of the current exemplar. + pLog float64 // Logarithm of the previous exemplar. + ) + + for i, exemplar := range n.exemplars { + // Find the exemplar with the oldest timestamp. + if otIdx == -1 || exemplar.Timestamp.AsTime().Before(ot) { + ot = exemplar.Timestamp.AsTime() + otIdx = i + } + + // Find the index at which to insert new the exemplar. + if nIdx == -1 && *e.Value <= *exemplar.Value { + nIdx = i + } + + // Find the two closest exemplars and pick the one the with older timestamp. + pLog = cLog + cLog = math.Log(exemplar.GetValue()) + if i == 0 { + continue + } + diff := math.Abs(cLog - pLog) + if md == -1 || diff < md { + // The closest exemplar pair is at index: i-1, i. + // Choose the exemplar with the older timestamp for replacement. + md = diff + if n.exemplars[i].Timestamp.AsTime().Before(n.exemplars[i-1].Timestamp.AsTime()) { + rIdx = i + } else { + rIdx = i - 1 + } + } + + } + + // If all existing exemplar are smaller than new exemplar, + // then the exemplar should be inserted at the end. + if nIdx == -1 { + nIdx = len(n.exemplars) + } + // Here, we have the following relationships: + // n.exemplars[nIdx-1].Value < e.Value (if nIdx > 0) + // e.Value <= n.exemplars[nIdx].Value (if nIdx < len(n.exemplars)) + + if otIdx != -1 && e.Timestamp.AsTime().Sub(ot) > n.ttl { + // If the oldest exemplar has expired, then replace it with the new exemplar. + rIdx = otIdx + } else { + // In the previous for loop, when calculating the closest pair of exemplars, + // we did not take into account the newly inserted exemplar. + // So we need to calculate with the newly inserted exemplar again. + elog := math.Log(e.GetValue()) + if nIdx > 0 { + diff := math.Abs(elog - math.Log(n.exemplars[nIdx-1].GetValue())) + if diff < md { + // The value we are about to insert is closer to the previous exemplar at the insertion point than what we calculated before in rIdx. + // v--rIdx + // |-----------x-n-----------x----------------x----x-----| + // nIdx-1--^ ^--new exemplar value + // Do not make the spread worse, replace nIdx-1 and not rIdx. + md = diff + rIdx = nIdx - 1 + } + } + if nIdx < len(n.exemplars) { + diff := math.Abs(math.Log(n.exemplars[nIdx].GetValue()) - elog) + if diff < md { + // The value we are about to insert is closer to the next exemplar at the insertion point than what we calculated before in rIdx. + // v--rIdx + // |-----------x-----------n-x----------------x----x-----| + // new exemplar value--^ ^--nIdx + // Do not make the spread worse, replace nIdx-1 and not rIdx. + rIdx = nIdx + } + } + } + + // Adjust the slice according to rIdx and nIdx. + switch { + case rIdx == nIdx: + n.exemplars[nIdx] = e + case rIdx < nIdx: + n.exemplars = append(n.exemplars[:rIdx], append(n.exemplars[rIdx+1:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...)...) + case rIdx > nIdx: + n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, append(n.exemplars[nIdx:rIdx], n.exemplars[rIdx+1:]...)...)...) + } +} + +type constNativeHistogram struct { + desc *Desc + dto.Histogram + labelPairs []*dto.LabelPair +} + +func validateCount(sum float64, count uint64, negativeBuckets, positiveBuckets map[int]int64, zeroBucket uint64) error { + var bucketPopulationSum int64 + for _, v := range positiveBuckets { + bucketPopulationSum += v + } + for _, v := range negativeBuckets { + bucketPopulationSum += v + } + bucketPopulationSum += int64(zeroBucket) + + // If the sum of observations is NaN, the number of observations must be greater or equal to the sum of all bucket counts. + // Otherwise, the number of observations must be equal to the sum of all bucket counts . + + if math.IsNaN(sum) && bucketPopulationSum > int64(count) || + !math.IsNaN(sum) && bucketPopulationSum != int64(count) { + return errors.New("the sum of all bucket populations exceeds the count of observations") + } + return nil +} + +// NewConstNativeHistogram returns a metric representing a Prometheus native histogram with +// fixed values for the count, sum, and positive/negative/zero bucket counts. As those parameters +// cannot be changed, the returned value does not implement the Histogram +// interface (but only the Metric interface). Users of this package will not +// have much use for it in regular operations. However, when implementing custom +// OpenTelemetry Collectors, it is useful as a throw-away metric that is generated on the fly +// to send it to Prometheus in the Collect method. +// +// zeroBucket counts all (positive and negative) +// observations in the zero bucket (with an absolute value less or equal +// the current threshold). +// positiveBuckets and negativeBuckets are separate maps for negative and positive +// observations. The map's value is an int64, counting observations in +// that bucket. The map's key is the +// index of the bucket according to the used +// Schema. Index 0 is for an upper bound of 1 in positive buckets and for a lower bound of -1 in negative buckets. +// NewConstNativeHistogram returns an error if +// - the length of labelValues is not consistent with the variable labels in Desc or if Desc is invalid. +// - the schema passed is not between 8 and -4 +// - the sum of counts in all buckets including the zero bucket does not equal the count if sum is not NaN (or exceeds the count if sum is NaN) +// +// See https://opentelemetry.io/docs/specs/otel/compatibility/prometheus_and_openmetrics/#exponential-histograms for more details about the conversion from OTel to Prometheus. +func NewConstNativeHistogram( + desc *Desc, + count uint64, + sum float64, + positiveBuckets, negativeBuckets map[int]int64, + zeroBucket uint64, + schema int32, + zeroThreshold float64, + createdTimestamp time.Time, + labelValues ...string, +) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { + return nil, err + } + if schema > nativeHistogramSchemaMaximum || schema < nativeHistogramSchemaMinimum { + return nil, errors.New("invalid native histogram schema") + } + if err := validateCount(sum, count, negativeBuckets, positiveBuckets, zeroBucket); err != nil { + return nil, err + } + + NegativeSpan, NegativeDelta := makeBucketsFromMap(negativeBuckets) + PositiveSpan, PositiveDelta := makeBucketsFromMap(positiveBuckets) + ret := &constNativeHistogram{ + desc: desc, + Histogram: dto.Histogram{ + CreatedTimestamp: timestamppb.New(createdTimestamp), + Schema: &schema, + ZeroThreshold: &zeroThreshold, + SampleCount: &count, + SampleSum: &sum, + + NegativeSpan: NegativeSpan, + NegativeDelta: NegativeDelta, + + PositiveSpan: PositiveSpan, + PositiveDelta: PositiveDelta, + + ZeroCount: proto.Uint64(zeroBucket), + }, + labelPairs: MakeLabelPairs(desc, labelValues), + } + if *ret.ZeroThreshold == 0 && *ret.ZeroCount == 0 && len(ret.PositiveSpan) == 0 && len(ret.NegativeSpan) == 0 { + ret.PositiveSpan = []*dto.BucketSpan{{ + Offset: proto.Int32(0), + Length: proto.Uint32(0), + }} + } + return ret, nil +} + +// MustNewConstNativeHistogram is a version of NewConstNativeHistogram that panics where +// NewConstNativeHistogram would have returned an error. +func MustNewConstNativeHistogram( + desc *Desc, + count uint64, + sum float64, + positiveBuckets, negativeBuckets map[int]int64, + zeroBucket uint64, + nativeHistogramSchema int32, + nativeHistogramZeroThreshold float64, + createdTimestamp time.Time, + labelValues ...string, +) Metric { + nativehistogram, err := NewConstNativeHistogram(desc, + count, + sum, + positiveBuckets, + negativeBuckets, + zeroBucket, + nativeHistogramSchema, + nativeHistogramZeroThreshold, + createdTimestamp, + labelValues...) + if err != nil { + panic(err) + } + return nativehistogram +} + +func (h *constNativeHistogram) Desc() *Desc { + return h.desc +} + +func (h *constNativeHistogram) Write(out *dto.Metric) error { + out.Histogram = &h.Histogram + out.Label = h.labelPairs + return nil +} + +func makeBucketsFromMap(buckets map[int]int64) ([]*dto.BucketSpan, []int64) { + if len(buckets) == 0 { + return nil, nil + } + var ii []int + for k := range buckets { + ii = append(ii, k) + } + sort.Ints(ii) + + var ( + spans []*dto.BucketSpan + deltas []int64 + prevCount int64 + nextI int + ) + + appendDelta := func(count int64) { + *spans[len(spans)-1].Length++ + deltas = append(deltas, count-prevCount) + prevCount = count + } + + for n, i := range ii { + count := buckets[i] + // Multiple spans with only small gaps in between are probably + // encoded more efficiently as one larger span with a few empty + // buckets. Needs some research to find the sweet spot. For now, + // we assume that gaps of one or two buckets should not create + // a new span. + iDelta := int32(i - nextI) + if n == 0 || iDelta > 2 { + // We have to create a new span, either because we are + // at the very beginning, or because we have found a gap + // of more than two buckets. + spans = append(spans, &dto.BucketSpan{ + Offset: proto.Int32(iDelta), + Length: proto.Uint32(0), + }) + } else { + // We have found a small gap (or no gap at all). + // Insert empty buckets as needed. + for j := int32(0); j < iDelta; j++ { + appendDelta(0) + } + } + appendDelta(count) + nextI = i + 1 + } + return spans, deltas +} diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go new file mode 100644 index 000000000..1ed5abe74 --- /dev/null +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go @@ -0,0 +1,60 @@ +// Copyright (c) 2015 Björn Rabenstein +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. +// +// The code in this package is copy/paste to avoid a dependency. Hence this file +// carries the copyright of the original repo. +// https://github.com/beorn7/floats +package internal + +import ( + "math" +) + +// minNormalFloat64 is the smallest positive normal value of type float64. +var minNormalFloat64 = math.Float64frombits(0x0010000000000000) + +// AlmostEqualFloat64 returns true if a and b are equal within a relative error +// of epsilon. See http://floating-point-gui.de/errors/comparison/ for the +// details of the applied method. +func AlmostEqualFloat64(a, b, epsilon float64) bool { + if a == b { + return true + } + absA := math.Abs(a) + absB := math.Abs(b) + diff := math.Abs(a - b) + if a == 0 || b == 0 || absA+absB < minNormalFloat64 { + return diff < epsilon*minNormalFloat64 + } + return diff/math.Min(absA+absB, math.MaxFloat64) < epsilon +} + +// AlmostEqualFloat64s is the slice form of AlmostEqualFloat64. +func AlmostEqualFloat64s(a, b []float64, epsilon float64) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if !AlmostEqualFloat64(a[i], b[i], epsilon) { + return false + } + } + return true +} diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go new file mode 100644 index 000000000..8b016355a --- /dev/null +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go @@ -0,0 +1,655 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// It provides tools to compare sequences of strings and generate textual diffs. +// +// Maintaining `GetUnifiedDiffString` here because original repository +// (https://github.com/pmezard/go-difflib) is no longer maintained. +package internal + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" +) + +func minInt(a, b int) int { + if a < b { + return a + } + return b +} + +func maxInt(a, b int) int { + if a > b { + return a + } + return b +} + +func calculateRatio(matches, length int) float64 { + if length > 0 { + return 2.0 * float64(matches) / float64(length) + } + return 1.0 +} + +type Match struct { + A int + B int + Size int +} + +type OpCode struct { + Tag byte + I1 int + I2 int + J1 int + J2 int +} + +// SequenceMatcher compares sequence of strings. The basic +// algorithm predates, and is a little fancier than, an algorithm +// published in the late 1980's by Ratcliff and Obershelp under the +// hyperbolic name "gestalt pattern matching". The basic idea is to find +// the longest contiguous matching subsequence that contains no "junk" +// elements (R-O doesn't address junk). The same idea is then applied +// recursively to the pieces of the sequences to the left and to the right +// of the matching subsequence. This does not yield minimal edit +// sequences, but does tend to yield matches that "look right" to people. +// +// SequenceMatcher tries to compute a "human-friendly diff" between two +// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the +// longest *contiguous* & junk-free matching subsequence. That's what +// catches peoples' eyes. The Windows(tm) windiff has another interesting +// notion, pairing up elements that appear uniquely in each sequence. +// That, and the method here, appear to yield more intuitive difference +// reports than does diff. This method appears to be the least vulnerable +// to synching up on blocks of "junk lines", though (like blank lines in +// ordinary text files, or maybe "

" lines in HTML files). That may be +// because this is the only method of the 3 that has a *concept* of +// "junk" . +// +// Timing: Basic R-O is cubic time worst case and quadratic time expected +// case. SequenceMatcher is quadratic time for the worst case and has +// expected-case behavior dependent in a complicated way on how many +// elements the sequences have in common; best case time is linear. +type SequenceMatcher struct { + a []string + b []string + b2j map[string][]int + IsJunk func(string) bool + autoJunk bool + bJunk map[string]struct{} + matchingBlocks []Match + fullBCount map[string]int + bPopular map[string]struct{} + opCodes []OpCode +} + +func NewMatcher(a, b []string) *SequenceMatcher { + m := SequenceMatcher{autoJunk: true} + m.SetSeqs(a, b) + return &m +} + +func NewMatcherWithJunk(a, b []string, autoJunk bool, + isJunk func(string) bool, +) *SequenceMatcher { + m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk} + m.SetSeqs(a, b) + return &m +} + +// Set two sequences to be compared. +func (m *SequenceMatcher) SetSeqs(a, b []string) { + m.SetSeq1(a) + m.SetSeq2(b) +} + +// Set the first sequence to be compared. The second sequence to be compared is +// not changed. +// +// SequenceMatcher computes and caches detailed information about the second +// sequence, so if you want to compare one sequence S against many sequences, +// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other +// sequences. +// +// See also SetSeqs() and SetSeq2(). +func (m *SequenceMatcher) SetSeq1(a []string) { + if &a == &m.a { + return + } + m.a = a + m.matchingBlocks = nil + m.opCodes = nil +} + +// Set the second sequence to be compared. The first sequence to be compared is +// not changed. +func (m *SequenceMatcher) SetSeq2(b []string) { + if &b == &m.b { + return + } + m.b = b + m.matchingBlocks = nil + m.opCodes = nil + m.fullBCount = nil + m.chainB() +} + +func (m *SequenceMatcher) chainB() { + // Populate line -> index mapping + b2j := map[string][]int{} + for i, s := range m.b { + indices := b2j[s] + indices = append(indices, i) + b2j[s] = indices + } + + // Purge junk elements + m.bJunk = map[string]struct{}{} + if m.IsJunk != nil { + junk := m.bJunk + for s := range b2j { + if m.IsJunk(s) { + junk[s] = struct{}{} + } + } + for s := range junk { + delete(b2j, s) + } + } + + // Purge remaining popular elements + popular := map[string]struct{}{} + n := len(m.b) + if m.autoJunk && n >= 200 { + ntest := n/100 + 1 + for s, indices := range b2j { + if len(indices) > ntest { + popular[s] = struct{}{} + } + } + for s := range popular { + delete(b2j, s) + } + } + m.bPopular = popular + m.b2j = b2j +} + +func (m *SequenceMatcher) isBJunk(s string) bool { + _, ok := m.bJunk[s] + return ok +} + +// Find longest matching block in a[alo:ahi] and b[blo:bhi]. +// +// If IsJunk is not defined: +// +// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where +// +// alo <= i <= i+k <= ahi +// blo <= j <= j+k <= bhi +// +// and for all (i',j',k') meeting those conditions, +// +// k >= k' +// i <= i' +// and if i == i', j <= j' +// +// In other words, of all maximal matching blocks, return one that +// starts earliest in a, and of all those maximal matching blocks that +// start earliest in a, return the one that starts earliest in b. +// +// If IsJunk is defined, first the longest matching block is +// determined as above, but with the additional restriction that no +// junk element appears in the block. Then that block is extended as +// far as possible by matching (only) junk elements on both sides. So +// the resulting block never matches on junk except as identical junk +// happens to be adjacent to an "interesting" match. +// +// If no blocks match, return (alo, blo, 0). +func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match { + // CAUTION: stripping common prefix or suffix would be incorrect. + // E.g., + // ab + // acab + // Longest matching block is "ab", but if common prefix is + // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so + // strip, so ends up claiming that ab is changed to acab by + // inserting "ca" in the middle. That's minimal but unintuitive: + // "it's obvious" that someone inserted "ac" at the front. + // Windiff ends up at the same place as diff, but by pairing up + // the unique 'b's and then matching the first two 'a's. + besti, bestj, bestsize := alo, blo, 0 + + // find longest junk-free match + // during an iteration of the loop, j2len[j] = length of longest + // junk-free match ending with a[i-1] and b[j] + j2len := map[int]int{} + for i := alo; i != ahi; i++ { + // look at all instances of a[i] in b; note that because + // b2j has no junk keys, the loop is skipped if a[i] is junk + newj2len := map[int]int{} + for _, j := range m.b2j[m.a[i]] { + // a[i] matches b[j] + if j < blo { + continue + } + if j >= bhi { + break + } + k := j2len[j-1] + 1 + newj2len[j] = k + if k > bestsize { + besti, bestj, bestsize = i-k+1, j-k+1, k + } + } + j2len = newj2len + } + + // Extend the best by non-junk elements on each end. In particular, + // "popular" non-junk elements aren't in b2j, which greatly speeds + // the inner loop above, but also means "the best" match so far + // doesn't contain any junk *or* popular non-junk elements. + for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) && + m.a[besti-1] == m.b[bestj-1] { + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + } + for besti+bestsize < ahi && bestj+bestsize < bhi && + !m.isBJunk(m.b[bestj+bestsize]) && + m.a[besti+bestsize] == m.b[bestj+bestsize] { + bestsize++ + } + + // Now that we have a wholly interesting match (albeit possibly + // empty!), we may as well suck up the matching junk on each + // side of it too. Can't think of a good reason not to, and it + // saves post-processing the (possibly considerable) expense of + // figuring out what to do with it. In the case of an empty + // interesting match, this is clearly the right thing to do, + // because no other kind of match is possible in the regions. + for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) && + m.a[besti-1] == m.b[bestj-1] { + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + } + for besti+bestsize < ahi && bestj+bestsize < bhi && + m.isBJunk(m.b[bestj+bestsize]) && + m.a[besti+bestsize] == m.b[bestj+bestsize] { + bestsize++ + } + + return Match{A: besti, B: bestj, Size: bestsize} +} + +// Return list of triples describing matching subsequences. +// +// Each triple is of the form (i, j, n), and means that +// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in +// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are +// adjacent triples in the list, and the second is not the last triple in the +// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe +// adjacent equal blocks. +// +// The last triple is a dummy, (len(a), len(b), 0), and is the only +// triple with n==0. +func (m *SequenceMatcher) GetMatchingBlocks() []Match { + if m.matchingBlocks != nil { + return m.matchingBlocks + } + + var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match + matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match { + match := m.findLongestMatch(alo, ahi, blo, bhi) + i, j, k := match.A, match.B, match.Size + if match.Size > 0 { + if alo < i && blo < j { + matched = matchBlocks(alo, i, blo, j, matched) + } + matched = append(matched, match) + if i+k < ahi && j+k < bhi { + matched = matchBlocks(i+k, ahi, j+k, bhi, matched) + } + } + return matched + } + matched := matchBlocks(0, len(m.a), 0, len(m.b), nil) + + // It's possible that we have adjacent equal blocks in the + // matching_blocks list now. + nonAdjacent := []Match{} + i1, j1, k1 := 0, 0, 0 + for _, b := range matched { + // Is this block adjacent to i1, j1, k1? + i2, j2, k2 := b.A, b.B, b.Size + if i1+k1 == i2 && j1+k1 == j2 { + // Yes, so collapse them -- this just increases the length of + // the first block by the length of the second, and the first + // block so lengthened remains the block to compare against. + k1 += k2 + } else { + // Not adjacent. Remember the first block (k1==0 means it's + // the dummy we started with), and make the second block the + // new block to compare against. + if k1 > 0 { + nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) + } + i1, j1, k1 = i2, j2, k2 + } + } + if k1 > 0 { + nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) + } + + nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0}) + m.matchingBlocks = nonAdjacent + return m.matchingBlocks +} + +// Return list of 5-tuples describing how to turn a into b. +// +// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple +// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the +// tuple preceding it, and likewise for j1 == the previous j2. +// +// The tags are characters, with these meanings: +// +// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2] +// +// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case. +// +// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case. +// +// 'e' (equal): a[i1:i2] == b[j1:j2] +func (m *SequenceMatcher) GetOpCodes() []OpCode { + if m.opCodes != nil { + return m.opCodes + } + i, j := 0, 0 + matching := m.GetMatchingBlocks() + opCodes := make([]OpCode, 0, len(matching)) + for _, m := range matching { + // invariant: we've pumped out correct diffs to change + // a[:i] into b[:j], and the next matching block is + // a[ai:ai+size] == b[bj:bj+size]. So we need to pump + // out a diff to change a[i:ai] into b[j:bj], pump out + // the matching block, and move (i,j) beyond the match + ai, bj, size := m.A, m.B, m.Size + tag := byte(0) + if i < ai && j < bj { + tag = 'r' + } else if i < ai { + tag = 'd' + } else if j < bj { + tag = 'i' + } + if tag > 0 { + opCodes = append(opCodes, OpCode{tag, i, ai, j, bj}) + } + i, j = ai+size, bj+size + // the list of matching blocks is terminated by a + // sentinel with size 0 + if size > 0 { + opCodes = append(opCodes, OpCode{'e', ai, i, bj, j}) + } + } + m.opCodes = opCodes + return m.opCodes +} + +// Isolate change clusters by eliminating ranges with no changes. +// +// Return a generator of groups with up to n lines of context. +// Each group is in the same format as returned by GetOpCodes(). +func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { + if n < 0 { + n = 3 + } + codes := m.GetOpCodes() + if len(codes) == 0 { + codes = []OpCode{{'e', 0, 1, 0, 1}} + } + // Fixup leading and trailing groups if they show no changes. + if codes[0].Tag == 'e' { + c := codes[0] + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + codes[0] = OpCode{c.Tag, maxInt(i1, i2-n), i2, maxInt(j1, j2-n), j2} + } + if codes[len(codes)-1].Tag == 'e' { + c := codes[len(codes)-1] + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + codes[len(codes)-1] = OpCode{c.Tag, i1, minInt(i2, i1+n), j1, minInt(j2, j1+n)} + } + nn := n + n + groups := [][]OpCode{} + group := []OpCode{} + for _, c := range codes { + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + // End the current group and start a new one whenever + // there is a large range with no changes. + if c.Tag == 'e' && i2-i1 > nn { + group = append(group, OpCode{ + c.Tag, i1, minInt(i2, i1+n), + j1, minInt(j2, j1+n), + }) + groups = append(groups, group) + group = []OpCode{} + i1, j1 = maxInt(i1, i2-n), maxInt(j1, j2-n) + } + group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) + } + if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') { + groups = append(groups, group) + } + return groups +} + +// Return a measure of the sequences' similarity (float in [0,1]). +// +// Where T is the total number of elements in both sequences, and +// M is the number of matches, this is 2.0*M / T. +// Note that this is 1 if the sequences are identical, and 0 if +// they have nothing in common. +// +// .Ratio() is expensive to compute if you haven't already computed +// .GetMatchingBlocks() or .GetOpCodes(), in which case you may +// want to try .QuickRatio() or .RealQuickRation() first to get an +// upper bound. +func (m *SequenceMatcher) Ratio() float64 { + matches := 0 + for _, m := range m.GetMatchingBlocks() { + matches += m.Size + } + return calculateRatio(matches, len(m.a)+len(m.b)) +} + +// Return an upper bound on ratio() relatively quickly. +// +// This isn't defined beyond that it is an upper bound on .Ratio(), and +// is faster to compute. +func (m *SequenceMatcher) QuickRatio() float64 { + // viewing a and b as multisets, set matches to the cardinality + // of their intersection; this counts the number of matches + // without regard to order, so is clearly an upper bound + if m.fullBCount == nil { + m.fullBCount = map[string]int{} + for _, s := range m.b { + m.fullBCount[s]++ + } + } + + // avail[x] is the number of times x appears in 'b' less the + // number of times we've seen it in 'a' so far ... kinda + avail := map[string]int{} + matches := 0 + for _, s := range m.a { + n, ok := avail[s] + if !ok { + n = m.fullBCount[s] + } + avail[s] = n - 1 + if n > 0 { + matches++ + } + } + return calculateRatio(matches, len(m.a)+len(m.b)) +} + +// Return an upper bound on ratio() very quickly. +// +// This isn't defined beyond that it is an upper bound on .Ratio(), and +// is faster to compute than either .Ratio() or .QuickRatio(). +func (m *SequenceMatcher) RealQuickRatio() float64 { + la, lb := len(m.a), len(m.b) + return calculateRatio(minInt(la, lb), la+lb) +} + +// Convert range to the "ed" format +func formatRangeUnified(start, stop int) string { + // Per the diff spec at http://www.unix.org/single_unix_specification/ + beginning := start + 1 // lines start numbering with one + length := stop - start + if length == 1 { + return strconv.Itoa(beginning) + } + if length == 0 { + beginning-- // empty ranges begin at line just before the range + } + return fmt.Sprintf("%d,%d", beginning, length) +} + +// Unified diff parameters +type UnifiedDiff struct { + A []string // First sequence lines + FromFile string // First file name + FromDate string // First file time + B []string // Second sequence lines + ToFile string // Second file name + ToDate string // Second file time + Eol string // Headers end of line, defaults to LF + Context int // Number of context lines +} + +// Compare two sequences of lines; generate the delta as a unified diff. +// +// Unified diffs are a compact way of showing line changes and a few +// lines of context. The number of context lines is set by 'n' which +// defaults to three. +// +// By default, the diff control lines (those with ---, +++, or @@) are +// created with a trailing newline. This is helpful so that inputs +// created from file.readlines() result in diffs that are suitable for +// file.writelines() since both the inputs and outputs have trailing +// newlines. +// +// For inputs that do not have trailing newlines, set the lineterm +// argument to "" so that the output will be uniformly newline free. +// +// The unidiff format normally has a header for filenames and modification +// times. Any or all of these may be specified using strings for +// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. +// The modification times are normally expressed in the ISO 8601 format. +func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { + buf := bufio.NewWriter(writer) + defer buf.Flush() + wf := func(format string, args ...interface{}) error { + _, err := buf.WriteString(fmt.Sprintf(format, args...)) + return err + } + ws := func(s string) error { + _, err := buf.WriteString(s) + return err + } + + if len(diff.Eol) == 0 { + diff.Eol = "\n" + } + + started := false + m := NewMatcher(diff.A, diff.B) + for _, g := range m.GetGroupedOpCodes(diff.Context) { + if !started { + started = true + fromDate := "" + if len(diff.FromDate) > 0 { + fromDate = "\t" + diff.FromDate + } + toDate := "" + if len(diff.ToDate) > 0 { + toDate = "\t" + diff.ToDate + } + if diff.FromFile != "" || diff.ToFile != "" { + err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol) + if err != nil { + return err + } + err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol) + if err != nil { + return err + } + } + } + first, last := g[0], g[len(g)-1] + range1 := formatRangeUnified(first.I1, last.I2) + range2 := formatRangeUnified(first.J1, last.J2) + if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil { + return err + } + for _, c := range g { + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + if c.Tag == 'e' { + for _, line := range diff.A[i1:i2] { + if err := ws(" " + line); err != nil { + return err + } + } + continue + } + if c.Tag == 'r' || c.Tag == 'd' { + for _, line := range diff.A[i1:i2] { + if err := ws("-" + line); err != nil { + return err + } + } + } + if c.Tag == 'r' || c.Tag == 'i' { + for _, line := range diff.B[j1:j2] { + if err := ws("+" + line); err != nil { + return err + } + } + } + } + } + return nil +} + +// Like WriteUnifiedDiff but returns the diff a string. +func GetUnifiedDiffString(diff UnifiedDiff) (string, error) { + w := &bytes.Buffer{} + err := WriteUnifiedDiff(w, diff) + return w.String(), err +} + +// Split a string on "\n" while preserving them. The output can be used +// as input for UnifiedDiff and ContextDiff structures. +func SplitLines(s string) []string { + lines := strings.SplitAfter(s, "\n") + lines[len(lines)-1] += "\n" + return lines +} diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go new file mode 100644 index 000000000..a4fa6eabd --- /dev/null +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go @@ -0,0 +1,34 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import "regexp" + +type GoCollectorRule struct { + Matcher *regexp.Regexp + Deny bool +} + +// GoCollectorOptions should not be used be directly by anything, except `collectors` package. +// Use it via collectors package instead. See issue +// https://github.com/prometheus/client_golang/issues/1030. +// +// This is internal, so external users only can use it via `collector.WithGoCollector*` methods +type GoCollectorOptions struct { + DisableMemStatsLikeMetrics bool + RuntimeMetricSumForHist map[string]string + RuntimeMetricRules []GoCollectorRule +} + +var GoCollectorDefaultRuntimeMetrics = regexp.MustCompile(`/gc/gogc:percent|/gc/gomemlimit:bytes|/sched/gomaxprocs:threads`) diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go index fe0a52180..f7f97ef92 100644 --- a/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go @@ -61,12 +61,13 @@ func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) // name has - replaced with _ and is concatenated with the unit and // other data. name = strings.ReplaceAll(name, "-", "_") - name = name + "_" + unit - if d.Cumulative { - name = name + "_total" + name += "_" + unit + if d.Cumulative && d.Kind != metrics.KindFloat64Histogram { + name += "_total" } - valid := model.IsValidMetricName(model.LabelValue(namespace + "_" + subsystem + "_" + name)) + // Our current conversion moves to legacy naming, so use legacy validation. + valid := model.IsValidLegacyMetricName(namespace + "_" + subsystem + "_" + name) switch d.Kind { case metrics.KindUint64: case metrics.KindFloat64: @@ -84,12 +85,12 @@ func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) func RuntimeMetricsBucketsForUnit(buckets []float64, unit string) []float64 { switch unit { case "bytes": - // Rebucket as powers of 2. - return rebucketExp(buckets, 2) + // Re-bucket as powers of 2. + return reBucketExp(buckets, 2) case "seconds": - // Rebucket as powers of 10 and then merge all buckets greater + // Re-bucket as powers of 10 and then merge all buckets greater // than 1 second into the +Inf bucket. - b := rebucketExp(buckets, 10) + b := reBucketExp(buckets, 10) for i := range b { if b[i] <= 1 { continue @@ -103,11 +104,11 @@ func RuntimeMetricsBucketsForUnit(buckets []float64, unit string) []float64 { return buckets } -// rebucketExp takes a list of bucket boundaries (lower bound inclusive) and +// reBucketExp takes a list of bucket boundaries (lower bound inclusive) and // downsamples the buckets to those a multiple of base apart. The end result // is a roughly exponential (in many cases, perfectly exponential) bucketing // scheme. -func rebucketExp(buckets []float64, base float64) []float64 { +func reBucketExp(buckets []float64, base float64) []float64 { bucket := buckets[0] var newBuckets []float64 // We may see a -Inf here, in which case, add it and skip it diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go index 351c26e1a..6515c1148 100644 --- a/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go @@ -19,18 +19,34 @@ import ( dto "github.com/prometheus/client_model/go" ) -// metricSorter is a sortable slice of *dto.Metric. -type metricSorter []*dto.Metric +// LabelPairSorter implements sort.Interface. It is used to sort a slice of +// dto.LabelPair pointers. +type LabelPairSorter []*dto.LabelPair -func (s metricSorter) Len() int { +func (s LabelPairSorter) Len() int { return len(s) } -func (s metricSorter) Swap(i, j int) { +func (s LabelPairSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s metricSorter) Less(i, j int) bool { +func (s LabelPairSorter) Less(i, j int) bool { + return s[i].GetName() < s[j].GetName() +} + +// MetricSorter is a sortable slice of *dto.Metric. +type MetricSorter []*dto.Metric + +func (s MetricSorter) Len() int { + return len(s) +} + +func (s MetricSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s MetricSorter) Less(i, j int) bool { if len(s[i].Label) != len(s[j].Label) { // This should not happen. The metrics are // inconsistent. However, we have to deal with the fact, as @@ -68,7 +84,7 @@ func (s metricSorter) Less(i, j int) bool { // the slice, with the contained Metrics sorted within each MetricFamily. func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { for _, mf := range metricFamiliesByName { - sort.Sort(metricSorter(mf.Metric)) + sort.Sort(MetricSorter(mf.Metric)) } names := make([]string, 0, len(metricFamiliesByName)) for name, mf := range metricFamiliesByName { diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/labels.go index 2744443ac..c21911f29 100644 --- a/tools/vendor/github.com/prometheus/client_golang/prometheus/labels.go +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/labels.go @@ -25,12 +25,111 @@ import ( // Labels represents a collection of label name -> value mappings. This type is // commonly used with the With(Labels) and GetMetricWith(Labels) methods of // metric vector Collectors, e.g.: -// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +// +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) // // The other use-case is the specification of constant label pairs in Opts or to // create a Desc. type Labels map[string]string +// LabelConstraint normalizes label values. +type LabelConstraint func(string) string + +// ConstrainedLabels represents a label name and its constrain function +// to normalize label values. This type is commonly used when constructing +// metric vector Collectors. +type ConstrainedLabel struct { + Name string + Constraint LabelConstraint +} + +// ConstrainableLabels is an interface that allows creating of labels that can +// be optionally constrained. +// +// prometheus.V2().NewCounterVec(CounterVecOpts{ +// CounterOpts: {...}, // Usual CounterOpts fields +// VariableLabels: []ConstrainedLabels{ +// {Name: "A"}, +// {Name: "B", Constraint: func(v string) string { ... }}, +// }, +// }) +type ConstrainableLabels interface { + compile() *compiledLabels + labelNames() []string +} + +// ConstrainedLabels represents a collection of label name -> constrain function +// to normalize label values. This type is commonly used when constructing +// metric vector Collectors. +type ConstrainedLabels []ConstrainedLabel + +func (cls ConstrainedLabels) compile() *compiledLabels { + compiled := &compiledLabels{ + names: make([]string, len(cls)), + labelConstraints: map[string]LabelConstraint{}, + } + + for i, label := range cls { + compiled.names[i] = label.Name + if label.Constraint != nil { + compiled.labelConstraints[label.Name] = label.Constraint + } + } + + return compiled +} + +func (cls ConstrainedLabels) labelNames() []string { + names := make([]string, len(cls)) + for i, label := range cls { + names[i] = label.Name + } + return names +} + +// UnconstrainedLabels represents collection of label without any constraint on +// their value. Thus, it is simply a collection of label names. +// +// UnconstrainedLabels([]string{ "A", "B" }) +// +// is equivalent to +// +// ConstrainedLabels { +// { Name: "A" }, +// { Name: "B" }, +// } +type UnconstrainedLabels []string + +func (uls UnconstrainedLabels) compile() *compiledLabels { + return &compiledLabels{ + names: uls, + } +} + +func (uls UnconstrainedLabels) labelNames() []string { + return uls +} + +type compiledLabels struct { + names []string + labelConstraints map[string]LabelConstraint +} + +func (cls *compiledLabels) compile() *compiledLabels { + return cls +} + +func (cls *compiledLabels) labelNames() []string { + return cls.names +} + +func (cls *compiledLabels) constrain(labelName, value string) string { + if fn, ok := cls.labelConstraints[labelName]; ok && fn != nil { + return fn(value) + } + return value +} + // reservedLabelPrefix is a prefix which is not legal in user-supplied // label names. const reservedLabelPrefix = "__" @@ -39,7 +138,7 @@ var errInconsistentCardinality = errors.New("inconsistent label cardinality") func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error { return fmt.Errorf( - "%s: %q has %d variable labels named %q but %d values %q were provided", + "%w: %q has %d variable labels named %q but %d values %q were provided", errInconsistentCardinality, fqName, len(labels), labels, len(labelValues), labelValues, @@ -49,7 +148,7 @@ func makeInconsistentCardinalityError(fqName string, labels, labelValues []strin func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { if len(labels) != expectedNumberOfValues { return fmt.Errorf( - "%s: expected %d label values but got %d in %#v", + "%w: expected %d label values but got %d in %#v", errInconsistentCardinality, expectedNumberOfValues, len(labels), labels, ) @@ -66,8 +165,10 @@ func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { func validateLabelValues(vals []string, expectedNumberOfValues int) error { if len(vals) != expectedNumberOfValues { + // The call below makes vals escape, copy them to avoid that. + vals := append([]string(nil), vals...) return fmt.Errorf( - "%s: expected %d label values but got %d in %#v", + "%w: expected %d label values but got %d in %#v", errInconsistentCardinality, expectedNumberOfValues, len(vals), vals, ) diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/metric.go index dc121910a..592eec3e2 100644 --- a/tools/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -14,14 +14,15 @@ package prometheus import ( + "errors" + "math" + "sort" "strings" "time" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - "github.com/prometheus/common/model" - dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/model" + "google.golang.org/protobuf/proto" ) var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash. @@ -91,6 +92,9 @@ type Opts struct { // machine_role metric). See also // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels ConstLabels Labels + + // now is for testing purposes, by default it's time.Now. + now func() time.Time } // BuildFQName joins the given three name components by "_". Empty name @@ -104,31 +108,23 @@ func BuildFQName(namespace, subsystem, name string) string { if name == "" { return "" } - switch { - case namespace != "" && subsystem != "": - return strings.Join([]string{namespace, subsystem, name}, "_") - case namespace != "": - return strings.Join([]string{namespace, name}, "_") - case subsystem != "": - return strings.Join([]string{subsystem, name}, "_") - } - return name -} -// labelPairSorter implements sort.Interface. It is used to sort a slice of -// dto.LabelPair pointers. -type labelPairSorter []*dto.LabelPair + sb := strings.Builder{} + sb.Grow(len(namespace) + len(subsystem) + len(name) + 2) -func (s labelPairSorter) Len() int { - return len(s) -} + if namespace != "" { + sb.WriteString(namespace) + sb.WriteString("_") + } -func (s labelPairSorter) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} + if subsystem != "" { + sb.WriteString(subsystem) + sb.WriteString("_") + } + + sb.WriteString(name) -func (s labelPairSorter) Less(i, j int) bool { - return s[i].GetName() < s[j].GetName() + return sb.String() } type invalidMetric struct { @@ -174,3 +170,96 @@ func (m timestampedMetric) Write(pb *dto.Metric) error { func NewMetricWithTimestamp(t time.Time, m Metric) Metric { return timestampedMetric{Metric: m, t: t} } + +type withExemplarsMetric struct { + Metric + + exemplars []*dto.Exemplar +} + +func (m *withExemplarsMetric) Write(pb *dto.Metric) error { + if err := m.Metric.Write(pb); err != nil { + return err + } + + switch { + case pb.Counter != nil: + pb.Counter.Exemplar = m.exemplars[len(m.exemplars)-1] + case pb.Histogram != nil: + for _, e := range m.exemplars { + // pb.Histogram.Bucket are sorted by UpperBound. + i := sort.Search(len(pb.Histogram.Bucket), func(i int) bool { + return pb.Histogram.Bucket[i].GetUpperBound() >= e.GetValue() + }) + if i < len(pb.Histogram.Bucket) { + pb.Histogram.Bucket[i].Exemplar = e + } else { + // The +Inf bucket should be explicitly added if there is an exemplar for it, similar to non-const histogram logic in https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go#L357-L365. + b := &dto.Bucket{ + CumulativeCount: proto.Uint64(pb.Histogram.GetSampleCount()), + UpperBound: proto.Float64(math.Inf(1)), + Exemplar: e, + } + pb.Histogram.Bucket = append(pb.Histogram.Bucket, b) + } + } + default: + // TODO(bwplotka): Implement Gauge? + return errors.New("cannot inject exemplar into Gauge, Summary or Untyped") + } + + return nil +} + +// Exemplar is easier to use, user-facing representation of *dto.Exemplar. +type Exemplar struct { + Value float64 + Labels Labels + // Optional. + // Default value (time.Time{}) indicates its empty, which should be + // understood as time.Now() time at the moment of creation of metric. + Timestamp time.Time +} + +// NewMetricWithExemplars returns a new Metric wrapping the provided Metric with given +// exemplars. Exemplars are validated. +// +// Only last applicable exemplar is injected from the list. +// For example for Counter it means last exemplar is injected. +// For Histogram, it means last applicable exemplar for each bucket is injected. +// +// NewMetricWithExemplars works best with MustNewConstMetric and +// MustNewConstHistogram, see example. +func NewMetricWithExemplars(m Metric, exemplars ...Exemplar) (Metric, error) { + if len(exemplars) == 0 { + return nil, errors.New("no exemplar was passed for NewMetricWithExemplars") + } + + var ( + now = time.Now() + exs = make([]*dto.Exemplar, len(exemplars)) + err error + ) + for i, e := range exemplars { + ts := e.Timestamp + if ts.IsZero() { + ts = now + } + exs[i], err = newExemplar(e.Value, ts, e.Labels) + if err != nil { + return nil, err + } + } + + return &withExemplarsMetric{Metric: m, exemplars: exs}, nil +} + +// MustNewMetricWithExemplars is a version of NewMetricWithExemplars that panics where +// NewMetricWithExemplars would have returned an error. +func MustNewMetricWithExemplars(m Metric, exemplars ...Exemplar) Metric { + ret, err := NewMetricWithExemplars(m, exemplars...) + if err != nil { + panic(err) + } + return ret +} diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go new file mode 100644 index 000000000..7c12b2108 --- /dev/null +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go @@ -0,0 +1,25 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !js || wasm +// +build !js wasm + +package prometheus + +import "runtime" + +// getRuntimeNumThreads returns the number of open OS threads. +func getRuntimeNumThreads() float64 { + n, _ := runtime.ThreadCreateProfile(nil) + return float64(n) +} diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go new file mode 100644 index 000000000..7348df01d --- /dev/null +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go @@ -0,0 +1,22 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build js && !wasm +// +build js,!wasm + +package prometheus + +// getRuntimeNumThreads returns the number of open OS threads. +func getRuntimeNumThreads() float64 { + return 1 +} diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/observer.go index 44128016f..03773b21f 100644 --- a/tools/vendor/github.com/prometheus/client_golang/prometheus/observer.go +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/observer.go @@ -58,7 +58,7 @@ type ObserverVec interface { // current time as timestamp, and the provided Labels. Empty Labels will lead to // a valid (label-less) exemplar. But if Labels is nil, the current exemplar is // left in place. ObserveWithExemplar panics if any of the provided labels are -// invalid or if the provided labels contain more than 64 runes in total. +// invalid or if the provided labels contain more than 128 runes in total. type ExemplarObserver interface { ObserveWithExemplar(value float64, exemplar Labels) } diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go index 5bfe0ff5b..e7bce8b58 100644 --- a/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -16,21 +16,22 @@ package prometheus import ( "errors" "fmt" - "io/ioutil" "os" "strconv" "strings" ) type processCollector struct { - collectFn func(chan<- Metric) - pidFn func() (int, error) - reportErrors bool - cpuTotal *Desc - openFDs, maxFDs *Desc - vsize, maxVsize *Desc - rss *Desc - startTime *Desc + collectFn func(chan<- Metric) + describeFn func(chan<- *Desc) + pidFn func() (int, error) + reportErrors bool + cpuTotal *Desc + openFDs, maxFDs *Desc + vsize, maxVsize *Desc + rss *Desc + startTime *Desc + inBytes, outBytes *Desc } // ProcessCollectorOpts defines the behavior of a process metrics collector @@ -101,11 +102,20 @@ func NewProcessCollector(opts ProcessCollectorOpts) Collector { "Start time of the process since unix epoch in seconds.", nil, nil, ), + inBytes: NewDesc( + ns+"process_network_receive_bytes_total", + "Number of bytes received by the process over the network.", + nil, nil, + ), + outBytes: NewDesc( + ns+"process_network_transmit_bytes_total", + "Number of bytes sent by the process over the network.", + nil, nil, + ), } if opts.PidFn == nil { - pid := os.Getpid() - c.pidFn = func() (int, error) { return pid, nil } + c.pidFn = getPIDFn() } else { c.pidFn = opts.PidFn } @@ -113,24 +123,23 @@ func NewProcessCollector(opts ProcessCollectorOpts) Collector { // Set up process metric collection if supported by the runtime. if canCollectProcess() { c.collectFn = c.processCollect + c.describeFn = c.describe } else { - c.collectFn = func(ch chan<- Metric) { - c.reportError(ch, nil, errors.New("process metrics not supported on this platform")) - } + c.collectFn = c.errorCollectFn + c.describeFn = c.errorDescribeFn } return c } -// Describe returns all descriptions of the collector. -func (c *processCollector) Describe(ch chan<- *Desc) { - ch <- c.cpuTotal - ch <- c.openFDs - ch <- c.maxFDs - ch <- c.vsize - ch <- c.maxVsize - ch <- c.rss - ch <- c.startTime +func (c *processCollector) errorCollectFn(ch chan<- Metric) { + c.reportError(ch, nil, errors.New("process metrics not supported on this platform")) +} + +func (c *processCollector) errorDescribeFn(ch chan<- *Desc) { + if c.reportErrors { + ch <- NewInvalidDesc(errors.New("process metrics not supported on this platform")) + } } // Collect returns the current state of all metrics of the collector. @@ -138,6 +147,11 @@ func (c *processCollector) Collect(ch chan<- Metric) { c.collectFn(ch) } +// Describe returns all descriptions of the collector. +func (c *processCollector) Describe(ch chan<- *Desc) { + c.describeFn(ch) +} + func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) { if !c.reportErrors { return @@ -152,13 +166,13 @@ func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) // It is meant to be used for the PidFn field in ProcessCollectorOpts. func NewPidFileFn(pidFilePath string) func() (int, error) { return func() (int, error) { - content, err := ioutil.ReadFile(pidFilePath) + content, err := os.ReadFile(pidFilePath) if err != nil { - return 0, fmt.Errorf("can't read pid file %q: %+v", pidFilePath, err) + return 0, fmt.Errorf("can't read pid file %q: %w", pidFilePath, err) } pid, err := strconv.Atoi(strings.TrimSpace(string(content))) if err != nil { - return 0, fmt.Errorf("can't parse pid file %q: %+v", pidFilePath, err) + return 0, fmt.Errorf("can't parse pid file %q: %w", pidFilePath, err) } return pid, nil diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go new file mode 100644 index 000000000..0a61b9846 --- /dev/null +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go @@ -0,0 +1,130 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build darwin && !ios + +package prometheus + +import ( + "errors" + "fmt" + "os" + "syscall" + "time" + + "golang.org/x/sys/unix" +) + +// notImplementedErr is returned by stub functions that replace cgo functions, when cgo +// isn't available. +var notImplementedErr = errors.New("not implemented") + +type memoryInfo struct { + vsize uint64 // Virtual memory size in bytes + rss uint64 // Resident memory size in bytes +} + +func canCollectProcess() bool { + return true +} + +func getSoftLimit(which int) (uint64, error) { + rlimit := syscall.Rlimit{} + + if err := syscall.Getrlimit(which, &rlimit); err != nil { + return 0, err + } + + return rlimit.Cur, nil +} + +func getOpenFileCount() (float64, error) { + // Alternately, the undocumented proc_pidinfo(PROC_PIDLISTFDS) can be used to + // return a list of open fds, but that requires a way to call C APIs. The + // benefits, however, include fewer system calls and not failing when at the + // open file soft limit. + + if dir, err := os.Open("/dev/fd"); err != nil { + return 0.0, err + } else { + defer dir.Close() + + // Avoid ReadDir(), as it calls stat(2) on each descriptor. Not only is + // that info not used, but KQUEUE descriptors fail stat(2), which causes + // the whole method to fail. + if names, err := dir.Readdirnames(0); err != nil { + return 0.0, err + } else { + // Subtract 1 to ignore the open /dev/fd descriptor above. + return float64(len(names) - 1), nil + } + } +} + +func (c *processCollector) processCollect(ch chan<- Metric) { + if procs, err := unix.SysctlKinfoProcSlice("kern.proc.pid", os.Getpid()); err == nil { + if len(procs) == 1 { + startTime := float64(procs[0].Proc.P_starttime.Nano() / 1e9) + ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) + } else { + err = fmt.Errorf("sysctl() returned %d proc structs (expected 1)", len(procs)) + c.reportError(ch, c.startTime, err) + } + } else { + c.reportError(ch, c.startTime, err) + } + + // The proc structure returned by kern.proc.pid above has an Rusage member, + // but it is not filled in, so it needs to be fetched by getrusage(2). For + // that call, the UTime, STime, and Maxrss members are filled out, but not + // Ixrss, Idrss, or Isrss for the memory usage. Memory stats will require + // access to the C API to call task_info(TASK_BASIC_INFO). + rusage := unix.Rusage{} + + if err := unix.Getrusage(syscall.RUSAGE_SELF, &rusage); err == nil { + cpuTime := time.Duration(rusage.Stime.Nano() + rusage.Utime.Nano()).Seconds() + ch <- MustNewConstMetric(c.cpuTotal, CounterValue, cpuTime) + } else { + c.reportError(ch, c.cpuTotal, err) + } + + if memInfo, err := getMemory(); err == nil { + ch <- MustNewConstMetric(c.rss, GaugeValue, float64(memInfo.rss)) + ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(memInfo.vsize)) + } else if !errors.Is(err, notImplementedErr) { + // Don't report an error when support is not compiled in. + c.reportError(ch, c.rss, err) + c.reportError(ch, c.vsize, err) + } + + if fds, err := getOpenFileCount(); err == nil { + ch <- MustNewConstMetric(c.openFDs, GaugeValue, fds) + } else { + c.reportError(ch, c.openFDs, err) + } + + if openFiles, err := getSoftLimit(syscall.RLIMIT_NOFILE); err == nil { + ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(openFiles)) + } else { + c.reportError(ch, c.maxFDs, err) + } + + if addressSpace, err := getSoftLimit(syscall.RLIMIT_AS); err == nil { + ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(addressSpace)) + } else { + c.reportError(ch, c.maxVsize, err) + } + + // TODO: socket(PF_SYSTEM) to fetch "com.apple.network.statistics" might + // be able to get the per-process network send/receive counts. +} diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.c b/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.c new file mode 100644 index 000000000..d00a24315 --- /dev/null +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.c @@ -0,0 +1,84 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build darwin && !ios && cgo + +#include +#include +#include + +// The compiler warns that mach/shared_memory_server.h is deprecated, and to use +// mach/shared_region.h instead. But that doesn't define +// SHARED_DATA_REGION_SIZE or SHARED_TEXT_REGION_SIZE, so redefine them here and +// avoid a warning message when running tests. +#define GLOBAL_SHARED_TEXT_SEGMENT 0x90000000U +#define SHARED_DATA_REGION_SIZE 0x10000000 +#define SHARED_TEXT_REGION_SIZE 0x10000000 + + +int get_memory_info(unsigned long long *rss, unsigned long long *vsize) +{ + // This is lightly adapted from how ps(1) obtains its memory info. + // https://github.com/apple-oss-distributions/adv_cmds/blob/8744084ea0ff41ca4bb96b0f9c22407d0e48e9b7/ps/tasks.c#L109 + + kern_return_t error; + task_t task = MACH_PORT_NULL; + mach_task_basic_info_data_t info; + mach_msg_type_number_t info_count = MACH_TASK_BASIC_INFO_COUNT; + + error = task_info( + mach_task_self(), + MACH_TASK_BASIC_INFO, + (task_info_t) &info, + &info_count ); + + if( error != KERN_SUCCESS ) + { + return error; + } + + *rss = info.resident_size; + *vsize = info.virtual_size; + + { + vm_region_basic_info_data_64_t b_info; + mach_vm_address_t address = GLOBAL_SHARED_TEXT_SEGMENT; + mach_vm_size_t size; + mach_port_t object_name; + + /* + * try to determine if this task has the split libraries + * mapped in... if so, adjust its virtual size down by + * the 2 segments that are used for split libraries + */ + info_count = VM_REGION_BASIC_INFO_COUNT_64; + + error = mach_vm_region( + mach_task_self(), + &address, + &size, + VM_REGION_BASIC_INFO_64, + (vm_region_info_t) &b_info, + &info_count, + &object_name); + + if (error == KERN_SUCCESS) { + if (b_info.reserved && size == (SHARED_TEXT_REGION_SIZE) && + *vsize > (SHARED_TEXT_REGION_SIZE + SHARED_DATA_REGION_SIZE)) { + *vsize -= (SHARED_TEXT_REGION_SIZE + SHARED_DATA_REGION_SIZE); + } + } + } + + return 0; +} diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.go new file mode 100644 index 000000000..9ac53f999 --- /dev/null +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.go @@ -0,0 +1,51 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build darwin && !ios && cgo + +package prometheus + +/* +int get_memory_info(unsigned long long *rss, unsigned long long *vs); +*/ +import "C" +import "fmt" + +func getMemory() (*memoryInfo, error) { + var rss, vsize C.ulonglong + + if err := C.get_memory_info(&rss, &vsize); err != 0 { + return nil, fmt.Errorf("task_info() failed with 0x%x", int(err)) + } + + return &memoryInfo{vsize: uint64(vsize), rss: uint64(rss)}, nil +} + +// describe returns all descriptions of the collector for Darwin. +// Ensure that this list of descriptors is kept in sync with the metrics collected +// in the processCollect method. Any changes to the metrics in processCollect +// (such as adding or removing metrics) should be reflected in this list of descriptors. +func (c *processCollector) describe(ch chan<- *Desc) { + ch <- c.cpuTotal + ch <- c.openFDs + ch <- c.maxFDs + ch <- c.maxVsize + ch <- c.startTime + ch <- c.rss + ch <- c.vsize + + /* the process could be collected but not implemented yet + ch <- c.inBytes + ch <- c.outBytes + */ +} diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go new file mode 100644 index 000000000..8ddb0995d --- /dev/null +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go @@ -0,0 +1,39 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build darwin && !ios && !cgo + +package prometheus + +func getMemory() (*memoryInfo, error) { + return nil, notImplementedErr +} + +// describe returns all descriptions of the collector for Darwin. +// Ensure that this list of descriptors is kept in sync with the metrics collected +// in the processCollect method. Any changes to the metrics in processCollect +// (such as adding or removing metrics) should be reflected in this list of descriptors. +func (c *processCollector) describe(ch chan<- *Desc) { + ch <- c.cpuTotal + ch <- c.openFDs + ch <- c.maxFDs + ch <- c.maxVsize + ch <- c.startTime + + /* the process could be collected but not implemented yet + ch <- c.rss + ch <- c.vsize + ch <- c.inBytes + ch <- c.outBytes + */ +} diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_not_supported.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_not_supported.go new file mode 100644 index 000000000..7732b7f37 --- /dev/null +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_not_supported.go @@ -0,0 +1,33 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build wasip1 || js || ios +// +build wasip1 js ios + +package prometheus + +func canCollectProcess() bool { + return false +} + +func (c *processCollector) processCollect(ch chan<- Metric) { + c.errorCollectFn(ch) +} + +// describe returns all descriptions of the collector for wasip1 and js. +// Ensure that this list of descriptors is kept in sync with the metrics collected +// in the processCollect method. Any changes to the metrics in processCollect +// (such as adding or removing metrics) should be reflected in this list of descriptors. +func (c *processCollector) describe(ch chan<- *Desc) { + c.errorDescribeFn(ch) +} diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go similarity index 63% rename from tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go rename to tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go index 2dc3660da..9f4b130be 100644 --- a/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build !windows -// +build !windows +//go:build !windows && !js && !wasip1 && !darwin +// +build !windows,!js,!wasip1,!darwin package prometheus @@ -63,4 +63,34 @@ func (c *processCollector) processCollect(ch chan<- Metric) { } else { c.reportError(ch, nil, err) } + + if netstat, err := p.Netstat(); err == nil { + var inOctets, outOctets float64 + if netstat.IpExt.InOctets != nil { + inOctets = *netstat.IpExt.InOctets + } + if netstat.IpExt.OutOctets != nil { + outOctets = *netstat.IpExt.OutOctets + } + ch <- MustNewConstMetric(c.inBytes, CounterValue, inOctets) + ch <- MustNewConstMetric(c.outBytes, CounterValue, outOctets) + } else { + c.reportError(ch, nil, err) + } +} + +// describe returns all descriptions of the collector for others than windows, js, wasip1 and darwin. +// Ensure that this list of descriptors is kept in sync with the metrics collected +// in the processCollect method. Any changes to the metrics in processCollect +// (such as adding or removing metrics) should be reflected in this list of descriptors. +func (c *processCollector) describe(ch chan<- *Desc) { + ch <- c.cpuTotal + ch <- c.openFDs + ch <- c.maxFDs + ch <- c.vsize + ch <- c.maxVsize + ch <- c.rss + ch <- c.startTime + ch <- c.inBytes + ch <- c.outBytes } diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go index f973398df..fa474289e 100644 --- a/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go @@ -79,14 +79,10 @@ func getProcessHandleCount(handle windows.Handle) (uint32, error) { } func (c *processCollector) processCollect(ch chan<- Metric) { - h, err := windows.GetCurrentProcess() - if err != nil { - c.reportError(ch, nil, err) - return - } + h := windows.CurrentProcess() var startTime, exitTime, kernelTime, userTime windows.Filetime - err = windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime) + err := windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime) if err != nil { c.reportError(ch, nil, err) return @@ -111,6 +107,19 @@ func (c *processCollector) processCollect(ch chan<- Metric) { ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process. } +// describe returns all descriptions of the collector for windows. +// Ensure that this list of descriptors is kept in sync with the metrics collected +// in the processCollect method. Any changes to the metrics in processCollect +// (such as adding or removing metrics) should be reflected in this list of descriptors. +func (c *processCollector) describe(ch chan<- *Desc) { + ch <- c.cpuTotal + ch <- c.openFDs + ch <- c.maxFDs + ch <- c.vsize + ch <- c.rss + ch <- c.startTime +} + func fileTimeToSeconds(ft windows.Filetime) float64 { return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7 } diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/registry.go index 383a7f594..c6fd2f58b 100644 --- a/tools/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -15,24 +15,23 @@ package prometheus import ( "bytes" + "errors" "fmt" - "io/ioutil" "os" "path/filepath" "runtime" "sort" + "strconv" "strings" "sync" "unicode/utf8" - "github.com/cespare/xxhash/v2" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - "github.com/prometheus/common/expfmt" + "github.com/prometheus/client_golang/prometheus/internal" + "github.com/cespare/xxhash/v2" dto "github.com/prometheus/client_model/go" - - "github.com/prometheus/client_golang/prometheus/internal" + "github.com/prometheus/common/expfmt" + "google.golang.org/protobuf/proto" ) const ( @@ -252,9 +251,12 @@ func (errs MultiError) MaybeUnwrap() error { } // Registry registers Prometheus collectors, collects their metrics, and gathers -// them into MetricFamilies for exposition. It implements both Registerer and -// Gatherer. The zero value is not usable. Create instances with NewRegistry or -// NewPedanticRegistry. +// them into MetricFamilies for exposition. It implements Registerer, Gatherer, +// and Collector. The zero value is not usable. Create instances with +// NewRegistry or NewPedanticRegistry. +// +// Registry implements Collector to allow it to be used for creating groups of +// metrics. See the Grouping example for how this can be done. type Registry struct { mtx sync.RWMutex collectorsByID map[uint64]Collector // ID is a hash of the descIDs. @@ -289,7 +291,7 @@ func (r *Registry) Register(c Collector) error { // Is the descriptor valid at all? if desc.err != nil { - return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err) + return fmt.Errorf("descriptor %s is invalid: %w", desc, desc.err) } // Is the descID unique? @@ -312,16 +314,17 @@ func (r *Registry) Register(c Collector) error { if dimHash != desc.dimHash { return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) } - } else { - // ...then check the new descriptors already seen. - if dimHash, exists := newDimHashesByName[desc.fqName]; exists { - if dimHash != desc.dimHash { - return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) - } - } else { - newDimHashesByName[desc.fqName] = desc.dimHash + continue + } + + // ...then check the new descriptors already seen. + if dimHash, exists := newDimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) } + continue } + newDimHashesByName[desc.fqName] = desc.dimHash } // A Collector yielding no Desc at all is considered unchecked. if len(newDescIDs) == 0 { @@ -407,6 +410,14 @@ func (r *Registry) MustRegister(cs ...Collector) { // Gather implements Gatherer. func (r *Registry) Gather() ([]*dto.MetricFamily, error) { + r.mtx.RLock() + + if len(r.collectorsByID) == 0 && len(r.uncheckedCollectors) == 0 { + // Fast path. + r.mtx.RUnlock() + return nil, nil + } + var ( checkedMetricChan = make(chan Metric, capMetricChan) uncheckedMetricChan = make(chan Metric, capMetricChan) @@ -416,7 +427,6 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) { registeredDescIDs map[uint64]struct{} // Only used for pedantic checks ) - r.mtx.RLock() goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors) metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) checkedCollectors := make(chan Collector, len(r.collectorsByID)) @@ -539,7 +549,7 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) { goroutineBudget-- runtime.Gosched() } - // Once both checkedMetricChan and uncheckdMetricChan are closed + // Once both checkedMetricChan and uncheckedMetricChan are closed // and drained, the contraption above will nil out cmc and umc, // and then we can leave the collect loop here. if cmc == nil && umc == nil { @@ -549,6 +559,31 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) { return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() } +// Describe implements Collector. +func (r *Registry) Describe(ch chan<- *Desc) { + r.mtx.RLock() + defer r.mtx.RUnlock() + + // Only report the checked Collectors; unchecked collectors don't report any + // Desc. + for _, c := range r.collectorsByID { + c.Describe(ch) + } +} + +// Collect implements Collector. +func (r *Registry) Collect(ch chan<- Metric) { + r.mtx.RLock() + defer r.mtx.RUnlock() + + for _, c := range r.collectorsByID { + c.Collect(ch) + } + for _, c := range r.uncheckedCollectors { + c.Collect(ch) + } +} + // WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the // Prometheus text format, and writes it to a temporary file. Upon success, the // temporary file is renamed to the provided filename. @@ -556,7 +591,7 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) { // This is intended for use with the textfile collector of the node exporter. // Note that the node exporter expects the filename to be suffixed with ".prom". func WriteToTextfile(filename string, g Gatherer) error { - tmp, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename)) + tmp, err := os.CreateTemp(filepath.Dir(filename), filepath.Base(filename)) if err != nil { return err } @@ -575,7 +610,7 @@ func WriteToTextfile(filename string, g Gatherer) error { return err } - if err := os.Chmod(tmp.Name(), 0644); err != nil { + if err := os.Chmod(tmp.Name(), 0o644); err != nil { return err } return os.Rename(tmp.Name(), filename) @@ -596,7 +631,7 @@ func processMetric( } dtoMetric := &dto.Metric{} if err := metric.Write(dtoMetric); err != nil { - return fmt.Errorf("error collecting metric %v: %s", desc, err) + return fmt.Errorf("error collecting metric %v: %w", desc, err) } metricFamily, ok := metricFamiliesByName[desc.fqName] if ok { // Existing name. @@ -718,12 +753,13 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { for i, g := range gs { mfs, err := g.Gather() if err != nil { - if multiErr, ok := err.(MultiError); ok { + multiErr := MultiError{} + if errors.As(err, &multiErr) { for _, err := range multiErr { - errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) + errs = append(errs, fmt.Errorf("[from Gatherer #%d] %w", i+1, err)) } } else { - errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) + errs = append(errs, fmt.Errorf("[from Gatherer #%d] %w", i+1, err)) } } for _, mf := range mfs { @@ -884,11 +920,11 @@ func checkMetricConsistency( h.Write(separatorByteSlice) // Make sure label pairs are sorted. We depend on it for the consistency // check. - if !sort.IsSorted(labelPairSorter(dtoMetric.Label)) { + if !sort.IsSorted(internal.LabelPairSorter(dtoMetric.Label)) { // We cannot sort dtoMetric.Label in place as it is immutable by contract. copiedLabels := make([]*dto.LabelPair, len(dtoMetric.Label)) copy(copiedLabels, dtoMetric.Label) - sort.Sort(labelPairSorter(copiedLabels)) + sort.Sort(internal.LabelPairSorter(copiedLabels)) dtoMetric.Label = copiedLabels } for _, lp := range dtoMetric.Label { @@ -897,6 +933,10 @@ func checkMetricConsistency( h.WriteString(lp.GetValue()) h.Write(separatorByteSlice) } + if dtoMetric.TimestampMs != nil { + h.WriteString(strconv.FormatInt(*(dtoMetric.TimestampMs), 10)) + h.Write(separatorByteSlice) + } hSum := h.Sum64() if _, exists := metricHashes[hSum]; exists { return fmt.Errorf( @@ -924,7 +964,7 @@ func checkDescConsistency( // Is the desc consistent with the content of the metric? lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label)) copy(lpsFromDesc, desc.constLabelPairs) - for _, l := range desc.variableLabels { + for _, l := range desc.variableLabels.names { lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ Name: proto.String(l), }) @@ -935,7 +975,7 @@ func checkDescConsistency( metricFamily.GetName(), dtoMetric, desc, ) } - sort.Sort(labelPairSorter(lpsFromDesc)) + sort.Sort(internal.LabelPairSorter(lpsFromDesc)) for i, lpFromDesc := range lpsFromDesc { lpFromMetric := dtoMetric.Label[i] if lpFromDesc.GetName() != lpFromMetric.GetName() || @@ -948,3 +988,89 @@ func checkDescConsistency( } return nil } + +var _ TransactionalGatherer = &MultiTRegistry{} + +// MultiTRegistry is a TransactionalGatherer that joins gathered metrics from multiple +// transactional gatherers. +// +// It is caller responsibility to ensure two registries have mutually exclusive metric families, +// no deduplication will happen. +type MultiTRegistry struct { + tGatherers []TransactionalGatherer +} + +// NewMultiTRegistry creates MultiTRegistry. +func NewMultiTRegistry(tGatherers ...TransactionalGatherer) *MultiTRegistry { + return &MultiTRegistry{ + tGatherers: tGatherers, + } +} + +// Gather implements TransactionalGatherer interface. +func (r *MultiTRegistry) Gather() (mfs []*dto.MetricFamily, done func(), err error) { + errs := MultiError{} + + dFns := make([]func(), 0, len(r.tGatherers)) + // TODO(bwplotka): Implement concurrency for those? + for _, g := range r.tGatherers { + // TODO(bwplotka): Check for duplicates? + m, d, err := g.Gather() + errs.Append(err) + + mfs = append(mfs, m...) + dFns = append(dFns, d) + } + + // TODO(bwplotka): Consider sort in place, given metric family in gather is sorted already. + sort.Slice(mfs, func(i, j int) bool { + return *mfs[i].Name < *mfs[j].Name + }) + return mfs, func() { + for _, d := range dFns { + d() + } + }, errs.MaybeUnwrap() +} + +// TransactionalGatherer represents transactional gatherer that can be triggered to notify gatherer that memory +// used by metric family is no longer used by a caller. This allows implementations with cache. +type TransactionalGatherer interface { + // Gather returns metrics in a lexicographically sorted slice + // of uniquely named MetricFamily protobufs. Gather ensures that the + // returned slice is valid and self-consistent so that it can be used + // for valid exposition. As an exception to the strict consistency + // requirements described for metric.Desc, Gather will tolerate + // different sets of label names for metrics of the same metric family. + // + // Even if an error occurs, Gather attempts to gather as many metrics as + // possible. Hence, if a non-nil error is returned, the returned + // MetricFamily slice could be nil (in case of a fatal error that + // prevented any meaningful metric collection) or contain a number of + // MetricFamily protobufs, some of which might be incomplete, and some + // might be missing altogether. The returned error (which might be a + // MultiError) explains the details. Note that this is mostly useful for + // debugging purposes. If the gathered protobufs are to be used for + // exposition in actual monitoring, it is almost always better to not + // expose an incomplete result and instead disregard the returned + // MetricFamily protobufs in case the returned error is non-nil. + // + // Important: done is expected to be triggered (even if the error occurs!) + // once caller does not need returned slice of dto.MetricFamily. + Gather() (_ []*dto.MetricFamily, done func(), err error) +} + +// ToTransactionalGatherer transforms Gatherer to transactional one with noop as done function. +func ToTransactionalGatherer(g Gatherer) TransactionalGatherer { + return &noTransactionGatherer{g: g} +} + +type noTransactionGatherer struct { + g Gatherer +} + +// Gather implements TransactionalGatherer interface. +func (g *noTransactionGatherer) Gather() (_ []*dto.MetricFamily, done func(), err error) { + mfs, err := g.g.Gather() + return mfs, func() {}, err +} diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/summary.go index c5fa8ed7c..ac5203c6f 100644 --- a/tools/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -22,11 +22,11 @@ import ( "sync/atomic" "time" - "github.com/beorn7/perks/quantile" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - dto "github.com/prometheus/client_model/go" + + "github.com/beorn7/perks/quantile" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/timestamppb" ) // quantileLabel is used for the label that defines the quantile in a @@ -146,6 +146,21 @@ type SummaryOpts struct { // is the internal buffer size of the underlying package // "github.com/bmizerany/perks/quantile"). BufCap uint32 + + // now is for testing purposes, by default it's time.Now. + now func() time.Time +} + +// SummaryVecOpts bundles the options to create a SummaryVec metric. +// It is mandatory to set SummaryOpts, see there for mandatory fields. VariableLabels +// is optional and can safely be left to its default value. +type SummaryVecOpts struct { + SummaryOpts + + // VariableLabels are used to partition the metric vector by the given set + // of labels. Each label value will be constrained with the optional Constraint + // function, if provided. + VariableLabels ConstrainableLabels } // Problem with the sliding-window decay algorithm... The Merge method of @@ -177,11 +192,11 @@ func NewSummary(opts SummaryOpts) Summary { } func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { - if len(desc.variableLabels) != len(labelValues) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) + if len(desc.variableLabels.names) != len(labelValues) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, labelValues)) } - for _, n := range desc.variableLabels { + for _, n := range desc.variableLabels.names { if n == quantileLabel { panic(errQuantileLabelNotAllowed) } @@ -211,6 +226,9 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { opts.BufCap = DefBufCap } + if opts.now == nil { + opts.now = time.Now + } if len(opts.Objectives) == 0 { // Use the lock-free implementation of a Summary without objectives. s := &noObjectivesSummary{ @@ -219,11 +237,13 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { counts: [2]*summaryCounts{{}, {}}, } s.init(s) // Init self-collection. + s.createdTs = timestamppb.New(opts.now()) return s } s := &summary{ desc: desc, + now: opts.now, objectives: opts.Objectives, sortedObjectives: make([]float64, 0, len(opts.Objectives)), @@ -234,7 +254,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { coldBuf: make([]float64, 0, opts.BufCap), streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets), } - s.headStreamExpTime = time.Now().Add(s.streamDuration) + s.headStreamExpTime = opts.now().Add(s.streamDuration) s.hotBufExpTime = s.headStreamExpTime for i := uint32(0); i < opts.AgeBuckets; i++ { @@ -248,6 +268,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { sort.Float64s(s.sortedObjectives) s.init(s) // Init self-collection. + s.createdTs = timestamppb.New(opts.now()) return s } @@ -260,6 +281,8 @@ type summary struct { desc *Desc + now func() time.Time + objectives map[float64]float64 sortedObjectives []float64 @@ -275,6 +298,8 @@ type summary struct { headStream *quantile.Stream headStreamIdx int headStreamExpTime, hotBufExpTime time.Time + + createdTs *timestamppb.Timestamp } func (s *summary) Desc() *Desc { @@ -285,7 +310,7 @@ func (s *summary) Observe(v float64) { s.bufMtx.Lock() defer s.bufMtx.Unlock() - now := time.Now() + now := s.now() if now.After(s.hotBufExpTime) { s.asyncFlush(now) } @@ -296,13 +321,15 @@ func (s *summary) Observe(v float64) { } func (s *summary) Write(out *dto.Metric) error { - sum := &dto.Summary{} + sum := &dto.Summary{ + CreatedTimestamp: s.createdTs, + } qs := make([]*dto.Quantile, 0, len(s.objectives)) s.bufMtx.Lock() s.mtx.Lock() // Swap bufs even if hotBuf is empty to set new hotBufExpTime. - s.swapBufs(time.Now()) + s.swapBufs(s.now()) s.bufMtx.Unlock() s.flushColdBuf() @@ -429,6 +456,8 @@ type noObjectivesSummary struct { counts [2]*summaryCounts labelPairs []*dto.LabelPair + + createdTs *timestamppb.Timestamp } func (s *noObjectivesSummary) Desc() *Desc { @@ -479,8 +508,9 @@ func (s *noObjectivesSummary) Write(out *dto.Metric) error { } sum := &dto.Summary{ - SampleCount: proto.Uint64(count), - SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), + SampleCount: proto.Uint64(count), + SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), + CreatedTimestamp: s.createdTs, } out.Summary = sum @@ -530,20 +560,28 @@ type SummaryVec struct { // it is handled by the Prometheus server internally, “quantile” is an illegal // label name. NewSummaryVec will panic if this label name is used. func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { - for _, ln := range labelNames { + return V2.NewSummaryVec(SummaryVecOpts{ + SummaryOpts: opts, + VariableLabels: UnconstrainedLabels(labelNames), + }) +} + +// NewSummaryVec creates a new SummaryVec based on the provided SummaryVecOpts. +func (v2) NewSummaryVec(opts SummaryVecOpts) *SummaryVec { + for _, ln := range opts.VariableLabels.labelNames() { if ln == quantileLabel { panic(errQuantileLabelNotAllowed) } } - desc := NewDesc( + desc := V2.NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, - labelNames, + opts.VariableLabels, opts.ConstLabels, ) return &SummaryVec{ MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { - return newSummary(desc, opts, lvs...) + return newSummary(desc, opts.SummaryOpts, lvs...) }), } } @@ -603,7 +641,8 @@ func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) { // WithLabelValues works as GetMetricWithLabelValues, but panics where // GetMetricWithLabelValues would have returned an error. Not returning an // error allows shortcuts like -// myVec.WithLabelValues("404", "GET").Observe(42.21) +// +// myVec.WithLabelValues("404", "GET").Observe(42.21) func (v *SummaryVec) WithLabelValues(lvs ...string) Observer { s, err := v.GetMetricWithLabelValues(lvs...) if err != nil { @@ -614,7 +653,8 @@ func (v *SummaryVec) WithLabelValues(lvs ...string) Observer { // With works as GetMetricWith, but panics where GetMetricWithLabels would have // returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) +// +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) func (v *SummaryVec) With(labels Labels) Observer { s, err := v.GetMetricWith(labels) if err != nil { @@ -660,6 +700,7 @@ type constSummary struct { sum float64 quantiles map[float64]float64 labelPairs []*dto.LabelPair + createdTs *timestamppb.Timestamp } func (s *constSummary) Desc() *Desc { @@ -667,7 +708,9 @@ func (s *constSummary) Desc() *Desc { } func (s *constSummary) Write(out *dto.Metric) error { - sum := &dto.Summary{} + sum := &dto.Summary{ + CreatedTimestamp: s.createdTs, + } qs := make([]*dto.Quantile, 0, len(s.quantiles)) sum.SampleCount = proto.Uint64(s.count) @@ -701,7 +744,8 @@ func (s *constSummary) Write(out *dto.Metric) error { // // quantiles maps ranks to quantile values. For example, a median latency of // 0.23s and a 99th percentile latency of 0.56s would be expressed as: -// map[float64]float64{0.5: 0.23, 0.99: 0.56} +// +// map[float64]float64{0.5: 0.23, 0.99: 0.56} // // NewConstSummary returns an error if the length of labelValues is not // consistent with the variable labels in Desc or if Desc is invalid. @@ -715,7 +759,7 @@ func NewConstSummary( if desc.err != nil { return nil, desc.err } - if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { return nil, err } return &constSummary{ @@ -742,3 +786,45 @@ func MustNewConstSummary( } return m } + +// NewConstSummaryWithCreatedTimestamp does the same thing as NewConstSummary but sets the created timestamp. +func NewConstSummaryWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + ct time.Time, + labelValues ...string, +) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { + return nil, err + } + return &constSummary{ + desc: desc, + count: count, + sum: sum, + quantiles: quantiles, + labelPairs: MakeLabelPairs(desc, labelValues), + createdTs: timestamppb.New(ct), + }, nil +} + +// MustNewConstSummaryWithCreatedTimestamp is a version of NewConstSummaryWithCreatedTimestamp that panics where +// NewConstSummaryWithCreatedTimestamp would have returned an error. +func MustNewConstSummaryWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + ct time.Time, + labelValues ...string, +) Metric { + m, err := NewConstSummaryWithCreatedTimestamp(desc, count, sum, quantiles, ct, labelValues...) + if err != nil { + panic(err) + } + return m +} diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/problem.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/problem.go new file mode 100644 index 000000000..9ba42826a --- /dev/null +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/problem.go @@ -0,0 +1,33 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promlint + +import dto "github.com/prometheus/client_model/go" + +// A Problem is an issue detected by a linter. +type Problem struct { + // The name of the metric indicated by this Problem. + Metric string + + // A description of the issue for this Problem. + Text string +} + +// newProblem is helper function to create a Problem. +func newProblem(mf *dto.MetricFamily, text string) Problem { + return Problem{ + Metric: mf.GetName(), + Text: text, + } +} diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go index ec8061706..ea46f38ec 100644 --- a/tools/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go @@ -15,15 +15,12 @@ package promlint import ( - "fmt" + "errors" "io" - "regexp" "sort" - "strings" - - "github.com/prometheus/common/expfmt" dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/expfmt" ) // A Linter is a Prometheus metrics linter. It identifies issues with metric @@ -36,23 +33,8 @@ type Linter struct { // of them. r io.Reader mfs []*dto.MetricFamily -} -// A Problem is an issue detected by a Linter. -type Problem struct { - // The name of the metric indicated by this Problem. - Metric string - - // A description of the issue for this Problem. - Text string -} - -// newProblem is helper function to create a Problem. -func newProblem(mf *dto.MetricFamily, text string) Problem { - return Problem{ - Metric: mf.GetName(), - Text: text, - } + customValidations []Validation } // New creates a new Linter that reads an input stream of Prometheus metrics in @@ -71,6 +53,14 @@ func NewWithMetricFamilies(mfs []*dto.MetricFamily) *Linter { } } +// AddCustomValidations adds custom validations to the linter. +func (l *Linter) AddCustomValidations(vs ...Validation) { + if l.customValidations == nil { + l.customValidations = make([]Validation, 0, len(vs)) + } + l.customValidations = append(l.customValidations, vs...) +} + // Lint performs a linting pass, returning a slice of Problems indicating any // issues found in the metrics stream. The slice is sorted by metric name // and issue description. @@ -78,23 +68,23 @@ func (l *Linter) Lint() ([]Problem, error) { var problems []Problem if l.r != nil { - d := expfmt.NewDecoder(l.r, expfmt.FmtText) + d := expfmt.NewDecoder(l.r, expfmt.NewFormat(expfmt.TypeTextPlain)) mf := &dto.MetricFamily{} for { if err := d.Decode(mf); err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { break } return nil, err } - problems = append(problems, lint(mf)...) + problems = append(problems, l.lint(mf)...) } } for _, mf := range l.mfs { - problems = append(problems, lint(mf)...) + problems = append(problems, l.lint(mf)...) } // Ensure deterministic output. @@ -109,278 +99,25 @@ func (l *Linter) Lint() ([]Problem, error) { } // lint is the entry point for linting a single metric. -func lint(mf *dto.MetricFamily) []Problem { - fns := []func(mf *dto.MetricFamily) []Problem{ - lintHelp, - lintMetricUnits, - lintCounter, - lintHistogramSummaryReserved, - lintMetricTypeInName, - lintReservedChars, - lintCamelCase, - lintUnitAbbreviations, - } - - var problems []Problem - for _, fn := range fns { - problems = append(problems, fn(mf)...) - } - - // TODO(mdlayher): lint rules for specific metrics types. - return problems -} - -// lintHelp detects issues related to the help text for a metric. -func lintHelp(mf *dto.MetricFamily) []Problem { - var problems []Problem - - // Expect all metrics to have help text available. - if mf.Help == nil { - problems = append(problems, newProblem(mf, "no help text")) - } - - return problems -} - -// lintMetricUnits detects issues with metric unit names. -func lintMetricUnits(mf *dto.MetricFamily) []Problem { +func (l *Linter) lint(mf *dto.MetricFamily) []Problem { var problems []Problem - unit, base, ok := metricUnits(*mf.Name) - if !ok { - // No known units detected. - return nil - } - - // Unit is already a base unit. - if unit == base { - return nil - } - - problems = append(problems, newProblem(mf, fmt.Sprintf("use base unit %q instead of %q", base, unit))) - - return problems -} - -// lintCounter detects issues specific to counters, as well as patterns that should -// only be used with counters. -func lintCounter(mf *dto.MetricFamily) []Problem { - var problems []Problem - - isCounter := mf.GetType() == dto.MetricType_COUNTER - isUntyped := mf.GetType() == dto.MetricType_UNTYPED - hasTotalSuffix := strings.HasSuffix(mf.GetName(), "_total") - - switch { - case isCounter && !hasTotalSuffix: - problems = append(problems, newProblem(mf, `counter metrics should have "_total" suffix`)) - case !isUntyped && !isCounter && hasTotalSuffix: - problems = append(problems, newProblem(mf, `non-counter metrics should not have "_total" suffix`)) - } - - return problems -} - -// lintHistogramSummaryReserved detects when other types of metrics use names or labels -// reserved for use by histograms and/or summaries. -func lintHistogramSummaryReserved(mf *dto.MetricFamily) []Problem { - // These rules do not apply to untyped metrics. - t := mf.GetType() - if t == dto.MetricType_UNTYPED { - return nil - } - - var problems []Problem - - isHistogram := t == dto.MetricType_HISTOGRAM - isSummary := t == dto.MetricType_SUMMARY - - n := mf.GetName() - - if !isHistogram && strings.HasSuffix(n, "_bucket") { - problems = append(problems, newProblem(mf, `non-histogram metrics should not have "_bucket" suffix`)) - } - if !isHistogram && !isSummary && strings.HasSuffix(n, "_count") { - problems = append(problems, newProblem(mf, `non-histogram and non-summary metrics should not have "_count" suffix`)) - } - if !isHistogram && !isSummary && strings.HasSuffix(n, "_sum") { - problems = append(problems, newProblem(mf, `non-histogram and non-summary metrics should not have "_sum" suffix`)) - } - - for _, m := range mf.GetMetric() { - for _, l := range m.GetLabel() { - ln := l.GetName() - - if !isHistogram && ln == "le" { - problems = append(problems, newProblem(mf, `non-histogram metrics should not have "le" label`)) - } - if !isSummary && ln == "quantile" { - problems = append(problems, newProblem(mf, `non-summary metrics should not have "quantile" label`)) - } + for _, fn := range defaultValidations { + errs := fn(mf) + for _, err := range errs { + problems = append(problems, newProblem(mf, err.Error())) } } - return problems -} - -// lintMetricTypeInName detects when metric types are included in the metric name. -func lintMetricTypeInName(mf *dto.MetricFamily) []Problem { - var problems []Problem - n := strings.ToLower(mf.GetName()) - - for i, t := range dto.MetricType_name { - if i == int32(dto.MetricType_UNTYPED) { - continue - } - - typename := strings.ToLower(t) - if strings.Contains(n, "_"+typename+"_") || strings.HasSuffix(n, "_"+typename) { - problems = append(problems, newProblem(mf, fmt.Sprintf(`metric name should not include type '%s'`, typename))) - } - } - return problems -} - -// lintReservedChars detects colons in metric names. -func lintReservedChars(mf *dto.MetricFamily) []Problem { - var problems []Problem - if strings.Contains(mf.GetName(), ":") { - problems = append(problems, newProblem(mf, "metric names should not contain ':'")) - } - return problems -} - -var camelCase = regexp.MustCompile(`[a-z][A-Z]`) - -// lintCamelCase detects metric names and label names written in camelCase. -func lintCamelCase(mf *dto.MetricFamily) []Problem { - var problems []Problem - if camelCase.FindString(mf.GetName()) != "" { - problems = append(problems, newProblem(mf, "metric names should be written in 'snake_case' not 'camelCase'")) - } - - for _, m := range mf.GetMetric() { - for _, l := range m.GetLabel() { - if camelCase.FindString(l.GetName()) != "" { - problems = append(problems, newProblem(mf, "label names should be written in 'snake_case' not 'camelCase'")) + if l.customValidations != nil { + for _, fn := range l.customValidations { + errs := fn(mf) + for _, err := range errs { + problems = append(problems, newProblem(mf, err.Error())) } } } - return problems -} -// lintUnitAbbreviations detects abbreviated units in the metric name. -func lintUnitAbbreviations(mf *dto.MetricFamily) []Problem { - var problems []Problem - n := strings.ToLower(mf.GetName()) - for _, s := range unitAbbreviations { - if strings.Contains(n, "_"+s+"_") || strings.HasSuffix(n, "_"+s) { - problems = append(problems, newProblem(mf, "metric names should not contain abbreviated units")) - } - } + // TODO(mdlayher): lint rules for specific metrics types. return problems } - -// metricUnits attempts to detect known unit types used as part of a metric name, -// e.g. "foo_bytes_total" or "bar_baz_milligrams". -func metricUnits(m string) (unit string, base string, ok bool) { - ss := strings.Split(m, "_") - - for unit, base := range units { - // Also check for "no prefix". - for _, p := range append(unitPrefixes, "") { - for _, s := range ss { - // Attempt to explicitly match a known unit with a known prefix, - // as some words may look like "units" when matching suffix. - // - // As an example, "thermometers" should not match "meters", but - // "kilometers" should. - if s == p+unit { - return p + unit, base, true - } - } - } - } - - return "", "", false -} - -// Units and their possible prefixes recognized by this library. More can be -// added over time as needed. -var ( - // map a unit to the appropriate base unit. - units = map[string]string{ - // Base units. - "amperes": "amperes", - "bytes": "bytes", - "celsius": "celsius", // Also allow Celsius because it is common in typical Prometheus use cases. - "grams": "grams", - "joules": "joules", - "kelvin": "kelvin", // SI base unit, used in special cases (e.g. color temperature, scientific measurements). - "meters": "meters", // Both American and international spelling permitted. - "metres": "metres", - "seconds": "seconds", - "volts": "volts", - - // Non base units. - // Time. - "minutes": "seconds", - "hours": "seconds", - "days": "seconds", - "weeks": "seconds", - // Temperature. - "kelvins": "kelvin", - "fahrenheit": "celsius", - "rankine": "celsius", - // Length. - "inches": "meters", - "yards": "meters", - "miles": "meters", - // Bytes. - "bits": "bytes", - // Energy. - "calories": "joules", - // Mass. - "pounds": "grams", - "ounces": "grams", - } - - unitPrefixes = []string{ - "pico", - "nano", - "micro", - "milli", - "centi", - "deci", - "deca", - "hecto", - "kilo", - "kibi", - "mega", - "mibi", - "giga", - "gibi", - "tera", - "tebi", - "peta", - "pebi", - } - - // Common abbreviations that we'd like to discourage. - unitAbbreviations = []string{ - "s", - "ms", - "us", - "ns", - "sec", - "b", - "kb", - "mb", - "gb", - "tb", - "pb", - "m", - "h", - "d", - } -) diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go new file mode 100644 index 000000000..e1441598d --- /dev/null +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go @@ -0,0 +1,34 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promlint + +import ( + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus/testutil/promlint/validations" +) + +type Validation = func(mf *dto.MetricFamily) []error + +var defaultValidations = []Validation{ + validations.LintHelp, + validations.LintMetricUnits, + validations.LintCounter, + validations.LintHistogramSummaryReserved, + validations.LintMetricTypeInName, + validations.LintReservedChars, + validations.LintCamelCase, + validations.LintUnitAbbreviations, + validations.LintDuplicateMetric, +} diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/counter_validations.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/counter_validations.go new file mode 100644 index 000000000..f2c2c3905 --- /dev/null +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/counter_validations.go @@ -0,0 +1,40 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validations + +import ( + "errors" + "strings" + + dto "github.com/prometheus/client_model/go" +) + +// LintCounter detects issues specific to counters, as well as patterns that should +// only be used with counters. +func LintCounter(mf *dto.MetricFamily) []error { + var problems []error + + isCounter := mf.GetType() == dto.MetricType_COUNTER + isUntyped := mf.GetType() == dto.MetricType_UNTYPED + hasTotalSuffix := strings.HasSuffix(mf.GetName(), "_total") + + switch { + case isCounter && !hasTotalSuffix: + problems = append(problems, errors.New(`counter metrics should have "_total" suffix`)) + case !isUntyped && !isCounter && hasTotalSuffix: + problems = append(problems, errors.New(`non-counter metrics should not have "_total" suffix`)) + } + + return problems +} diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/duplicate_validations.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/duplicate_validations.go new file mode 100644 index 000000000..68645ed0a --- /dev/null +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/duplicate_validations.go @@ -0,0 +1,37 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validations + +import ( + "errors" + "reflect" + + dto "github.com/prometheus/client_model/go" +) + +// LintDuplicateMetric detects duplicate metric. +func LintDuplicateMetric(mf *dto.MetricFamily) []error { + var problems []error + + for i, m := range mf.Metric { + for _, k := range mf.Metric[i+1:] { + if reflect.DeepEqual(m.Label, k.Label) { + problems = append(problems, errors.New("metric not unique")) + break + } + } + } + + return problems +} diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go new file mode 100644 index 000000000..de52cfee4 --- /dev/null +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go @@ -0,0 +1,101 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validations + +import ( + "errors" + "fmt" + "regexp" + "strings" + + dto "github.com/prometheus/client_model/go" +) + +var camelCase = regexp.MustCompile(`[a-z][A-Z]`) + +// LintMetricUnits detects issues with metric unit names. +func LintMetricUnits(mf *dto.MetricFamily) []error { + var problems []error + + unit, base, ok := metricUnits(*mf.Name) + if !ok { + // No known units detected. + return nil + } + + // Unit is already a base unit. + if unit == base { + return nil + } + + problems = append(problems, fmt.Errorf("use base unit %q instead of %q", base, unit)) + + return problems +} + +// LintMetricTypeInName detects when the metric type is included in the metric name. +func LintMetricTypeInName(mf *dto.MetricFamily) []error { + if mf.GetType() == dto.MetricType_UNTYPED { + return nil + } + + var problems []error + + n := strings.ToLower(mf.GetName()) + typename := strings.ToLower(mf.GetType().String()) + + if strings.Contains(n, "_"+typename+"_") || strings.HasSuffix(n, "_"+typename) { + problems = append(problems, fmt.Errorf(`metric name should not include type '%s'`, typename)) + } + + return problems +} + +// LintReservedChars detects colons in metric names. +func LintReservedChars(mf *dto.MetricFamily) []error { + var problems []error + if strings.Contains(mf.GetName(), ":") { + problems = append(problems, errors.New("metric names should not contain ':'")) + } + return problems +} + +// LintCamelCase detects metric names and label names written in camelCase. +func LintCamelCase(mf *dto.MetricFamily) []error { + var problems []error + if camelCase.FindString(mf.GetName()) != "" { + problems = append(problems, errors.New("metric names should be written in 'snake_case' not 'camelCase'")) + } + + for _, m := range mf.GetMetric() { + for _, l := range m.GetLabel() { + if camelCase.FindString(l.GetName()) != "" { + problems = append(problems, errors.New("label names should be written in 'snake_case' not 'camelCase'")) + } + } + } + return problems +} + +// LintUnitAbbreviations detects abbreviated units in the metric name. +func LintUnitAbbreviations(mf *dto.MetricFamily) []error { + var problems []error + n := strings.ToLower(mf.GetName()) + for _, s := range unitAbbreviations { + if strings.Contains(n, "_"+s+"_") || strings.HasSuffix(n, "_"+s) { + problems = append(problems, errors.New("metric names should not contain abbreviated units")) + } + } + return problems +} diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/help_validations.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/help_validations.go new file mode 100644 index 000000000..1df294468 --- /dev/null +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/help_validations.go @@ -0,0 +1,32 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validations + +import ( + "errors" + + dto "github.com/prometheus/client_model/go" +) + +// LintHelp detects issues related to the help text for a metric. +func LintHelp(mf *dto.MetricFamily) []error { + var problems []error + + // Expect all metrics to have help text available. + if mf.Help == nil { + problems = append(problems, errors.New("no help text")) + } + + return problems +} diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/histogram_validations.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/histogram_validations.go new file mode 100644 index 000000000..6564bdf36 --- /dev/null +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/histogram_validations.go @@ -0,0 +1,63 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validations + +import ( + "errors" + "strings" + + dto "github.com/prometheus/client_model/go" +) + +// LintHistogramSummaryReserved detects when other types of metrics use names or labels +// reserved for use by histograms and/or summaries. +func LintHistogramSummaryReserved(mf *dto.MetricFamily) []error { + // These rules do not apply to untyped metrics. + t := mf.GetType() + if t == dto.MetricType_UNTYPED { + return nil + } + + var problems []error + + isHistogram := t == dto.MetricType_HISTOGRAM + isSummary := t == dto.MetricType_SUMMARY + + n := mf.GetName() + + if !isHistogram && strings.HasSuffix(n, "_bucket") { + problems = append(problems, errors.New(`non-histogram metrics should not have "_bucket" suffix`)) + } + if !isHistogram && !isSummary && strings.HasSuffix(n, "_count") { + problems = append(problems, errors.New(`non-histogram and non-summary metrics should not have "_count" suffix`)) + } + if !isHistogram && !isSummary && strings.HasSuffix(n, "_sum") { + problems = append(problems, errors.New(`non-histogram and non-summary metrics should not have "_sum" suffix`)) + } + + for _, m := range mf.GetMetric() { + for _, l := range m.GetLabel() { + ln := l.GetName() + + if !isHistogram && ln == "le" { + problems = append(problems, errors.New(`non-histogram metrics should not have "le" label`)) + } + if !isSummary && ln == "quantile" { + problems = append(problems, errors.New(`non-summary metrics should not have "quantile" label`)) + } + } + } + + return problems +} diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/units.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/units.go new file mode 100644 index 000000000..967977d2b --- /dev/null +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/units.go @@ -0,0 +1,118 @@ +// Copyright 2020 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validations + +import "strings" + +// Units and their possible prefixes recognized by this library. More can be +// added over time as needed. +var ( + // map a unit to the appropriate base unit. + units = map[string]string{ + // Base units. + "amperes": "amperes", + "bytes": "bytes", + "celsius": "celsius", // Also allow Celsius because it is common in typical Prometheus use cases. + "grams": "grams", + "joules": "joules", + "kelvin": "kelvin", // SI base unit, used in special cases (e.g. color temperature, scientific measurements). + "meters": "meters", // Both American and international spelling permitted. + "metres": "metres", + "seconds": "seconds", + "volts": "volts", + + // Non base units. + // Time. + "minutes": "seconds", + "hours": "seconds", + "days": "seconds", + "weeks": "seconds", + // Temperature. + "kelvins": "kelvin", + "fahrenheit": "celsius", + "rankine": "celsius", + // Length. + "inches": "meters", + "yards": "meters", + "miles": "meters", + // Bytes. + "bits": "bytes", + // Energy. + "calories": "joules", + // Mass. + "pounds": "grams", + "ounces": "grams", + } + + unitPrefixes = []string{ + "pico", + "nano", + "micro", + "milli", + "centi", + "deci", + "deca", + "hecto", + "kilo", + "kibi", + "mega", + "mibi", + "giga", + "gibi", + "tera", + "tebi", + "peta", + "pebi", + } + + // Common abbreviations that we'd like to discourage. + unitAbbreviations = []string{ + "s", + "ms", + "us", + "ns", + "sec", + "b", + "kb", + "mb", + "gb", + "tb", + "pb", + "m", + "h", + "d", + } +) + +// metricUnits attempts to detect known unit types used as part of a metric name, +// e.g. "foo_bytes_total" or "bar_baz_milligrams". +func metricUnits(m string) (unit, base string, ok bool) { + ss := strings.Split(m, "_") + + for _, s := range ss { + if base, found := units[s]; found { + return s, base, true + } + + for _, p := range unitPrefixes { + if strings.HasPrefix(s, p) { + if base, found := units[s[len(p):]]; found { + return s, base, true + } + } + } + } + + return "", "", false +} diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/timer.go index 8d5f10523..52344fef5 100644 --- a/tools/vendor/github.com/prometheus/client_golang/prometheus/timer.go +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/timer.go @@ -23,13 +23,24 @@ type Timer struct { } // NewTimer creates a new Timer. The provided Observer is used to observe a -// duration in seconds. Timer is usually used to time a function call in the +// duration in seconds. If the Observer implements ExemplarObserver, passing exemplar +// later on will be also supported. +// Timer is usually used to time a function call in the // following way: -// func TimeMe() { -// timer := NewTimer(myHistogram) -// defer timer.ObserveDuration() -// // Do actual work. -// } +// +// func TimeMe() { +// timer := NewTimer(myHistogram) +// defer timer.ObserveDuration() +// // Do actual work. +// } +// +// or +// +// func TimeMeWithExemplar() { +// timer := NewTimer(myHistogram) +// defer timer.ObserveDurationWithExemplar(exemplar) +// // Do actual work. +// } func NewTimer(o Observer) *Timer { return &Timer{ begin: time.Now(), @@ -52,3 +63,19 @@ func (t *Timer) ObserveDuration() time.Duration { } return d } + +// ObserveDurationWithExemplar is like ObserveDuration, but it will also +// observe exemplar with the duration unless exemplar is nil or provided Observer can't +// be casted to ExemplarObserver. +func (t *Timer) ObserveDurationWithExemplar(exemplar Labels) time.Duration { + d := time.Since(t.begin) + eo, ok := t.observer.(ExemplarObserver) + if ok && exemplar != nil { + eo.ObserveWithExemplar(d.Seconds(), exemplar) + return d + } + if t.observer != nil { + t.observer.Observe(d.Seconds()) + } + return d +} diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/value.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/value.go index b4e0ae11c..cc23011fa 100644 --- a/tools/vendor/github.com/prometheus/client_golang/prometheus/value.go +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/value.go @@ -14,16 +14,17 @@ package prometheus import ( + "errors" "fmt" "sort" "time" "unicode/utf8" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - "google.golang.org/protobuf/types/known/timestamppb" + "github.com/prometheus/client_golang/prometheus/internal" dto "github.com/prometheus/client_model/go" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/timestamppb" ) // ValueType is an enumeration of metric types that represent a simple value. @@ -38,6 +39,23 @@ const ( UntypedValue ) +var ( + CounterMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_COUNTER; return &d }() + GaugeMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_GAUGE; return &d }() + UntypedMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_UNTYPED; return &d }() +) + +func (v ValueType) ToDTO() *dto.MetricType { + switch v { + case CounterValue: + return CounterMetricTypePtr + case GaugeValue: + return GaugeMetricTypePtr + default: + return UntypedMetricTypePtr + } +} + // valueFunc is a generic metric for simple values retrieved on collect time // from a function. It implements Metric and Collector. Its effective type is // determined by ValueType. This is a low-level building block used by the @@ -74,7 +92,7 @@ func (v *valueFunc) Desc() *Desc { } func (v *valueFunc) Write(out *dto.Metric) error { - return populateMetric(v.valType, v.function(), v.labelPairs, nil, out) + return populateMetric(v.valType, v.function(), v.labelPairs, nil, out, nil) } // NewConstMetric returns a metric with one fixed value that cannot be @@ -88,14 +106,18 @@ func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues if desc.err != nil { return nil, desc.err } - if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { return nil, err } + + metric := &dto.Metric{} + if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric, nil); err != nil { + return nil, err + } + return &constMetric{ - desc: desc, - valType: valueType, - val: value, - labelPairs: MakeLabelPairs(desc, labelValues), + desc: desc, + metric: metric, }, nil } @@ -109,11 +131,46 @@ func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelVal return m } +// NewConstMetricWithCreatedTimestamp does the same thing as NewConstMetric, but generates Counters +// with created timestamp set and returns an error for other metric types. +func NewConstMetricWithCreatedTimestamp(desc *Desc, valueType ValueType, value float64, ct time.Time, labelValues ...string) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { + return nil, err + } + switch valueType { + case CounterValue: + break + default: + return nil, errors.New("created timestamps are only supported for counters") + } + + metric := &dto.Metric{} + if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric, timestamppb.New(ct)); err != nil { + return nil, err + } + + return &constMetric{ + desc: desc, + metric: metric, + }, nil +} + +// MustNewConstMetricWithCreatedTimestamp is a version of NewConstMetricWithCreatedTimestamp that panics where +// NewConstMetricWithCreatedTimestamp would have returned an error. +func MustNewConstMetricWithCreatedTimestamp(desc *Desc, valueType ValueType, value float64, ct time.Time, labelValues ...string) Metric { + m, err := NewConstMetricWithCreatedTimestamp(desc, valueType, value, ct, labelValues...) + if err != nil { + panic(err) + } + return m +} + type constMetric struct { - desc *Desc - valType ValueType - val float64 - labelPairs []*dto.LabelPair + desc *Desc + metric *dto.Metric } func (m *constMetric) Desc() *Desc { @@ -121,7 +178,11 @@ func (m *constMetric) Desc() *Desc { } func (m *constMetric) Write(out *dto.Metric) error { - return populateMetric(m.valType, m.val, m.labelPairs, nil, out) + out.Label = m.metric.Label + out.Counter = m.metric.Counter + out.Gauge = m.metric.Gauge + out.Untyped = m.metric.Untyped + return nil } func populateMetric( @@ -130,11 +191,12 @@ func populateMetric( labelPairs []*dto.LabelPair, e *dto.Exemplar, m *dto.Metric, + ct *timestamppb.Timestamp, ) error { m.Label = labelPairs switch t { case CounterValue: - m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e} + m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e, CreatedTimestamp: ct} case GaugeValue: m.Gauge = &dto.Gauge{Value: proto.Float64(v)} case UntypedValue: @@ -153,29 +215,29 @@ func populateMetric( // This function is only needed for custom Metric implementations. See MetricVec // example. func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { - totalLen := len(desc.variableLabels) + len(desc.constLabelPairs) + totalLen := len(desc.variableLabels.names) + len(desc.constLabelPairs) if totalLen == 0 { // Super fast path. return nil } - if len(desc.variableLabels) == 0 { + if len(desc.variableLabels.names) == 0 { // Moderately fast path. return desc.constLabelPairs } labelPairs := make([]*dto.LabelPair, 0, totalLen) - for i, n := range desc.variableLabels { + for i, l := range desc.variableLabels.names { labelPairs = append(labelPairs, &dto.LabelPair{ - Name: proto.String(n), + Name: proto.String(l), Value: proto.String(labelValues[i]), }) } labelPairs = append(labelPairs, desc.constLabelPairs...) - sort.Sort(labelPairSorter(labelPairs)) + sort.Sort(internal.LabelPairSorter(labelPairs)) return labelPairs } // ExemplarMaxRunes is the max total number of runes allowed in exemplar labels. -const ExemplarMaxRunes = 64 +const ExemplarMaxRunes = 128 // newExemplar creates a new dto.Exemplar from the provided values. An error is // returned if any of the label names or values are invalid or if the total diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/vec.go index 4ababe6c9..2c808eece 100644 --- a/tools/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -72,6 +72,8 @@ func NewMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec { // with a performance overhead (for creating and processing the Labels map). // See also the CounterVec example. func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { + lvs = constrainLabelValues(m.desc, lvs, m.curry) + h, err := m.hashLabelValues(lvs) if err != nil { return false @@ -91,6 +93,9 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { // This method is used for the same purpose as DeleteLabelValues(...string). See // there for pros and cons of the two methods. func (m *MetricVec) Delete(labels Labels) bool { + labels, closer := constrainLabels(m.desc, labels) + defer closer() + h, err := m.hashLabels(labels) if err != nil { return false @@ -99,6 +104,19 @@ func (m *MetricVec) Delete(labels Labels) bool { return m.metricMap.deleteByHashWithLabels(h, labels, m.curry) } +// DeletePartialMatch deletes all metrics where the variable labels contain all of those +// passed in as labels. The order of the labels does not matter. +// It returns the number of metrics deleted. +// +// Note that curried labels will never be matched if deleting from the curried vector. +// To match curried labels with DeletePartialMatch, it must be called on the base vector. +func (m *MetricVec) DeletePartialMatch(labels Labels) int { + labels, closer := constrainLabels(m.desc, labels) + defer closer() + + return m.metricMap.deleteByLabels(labels, m.curry) +} + // Without explicit forwarding of Describe, Collect, Reset, those methods won't // show up in GoDoc. @@ -134,11 +152,11 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) { oldCurry = m.curry iCurry int ) - for i, label := range m.desc.variableLabels { - val, ok := labels[label] + for i, labelName := range m.desc.variableLabels.names { + val, ok := labels[labelName] if iCurry < len(oldCurry) && oldCurry[iCurry].index == i { if ok { - return nil, fmt.Errorf("label name %q is already curried", label) + return nil, fmt.Errorf("label name %q is already curried", labelName) } newCurry = append(newCurry, oldCurry[iCurry]) iCurry++ @@ -146,7 +164,10 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) { if !ok { continue // Label stays uncurried. } - newCurry = append(newCurry, curriedLabelValue{i, val}) + newCurry = append(newCurry, curriedLabelValue{ + i, + m.desc.variableLabels.constrain(labelName, val), + }) } } if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 { @@ -189,6 +210,7 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) { // a wrapper around MetricVec, implementing a vector for a specific Metric // implementation, for example GaugeVec. func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { + lvs = constrainLabelValues(m.desc, lvs, m.curry) h, err := m.hashLabelValues(lvs) if err != nil { return nil, err @@ -214,6 +236,9 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { // around MetricVec, implementing a vector for a specific Metric implementation, // for example GaugeVec. func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { + labels, closer := constrainLabels(m.desc, labels) + defer closer() + h, err := m.hashLabels(labels) if err != nil { return nil, err @@ -223,7 +248,7 @@ func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { } func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { - if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil { + if err := validateLabelValues(vals, len(m.desc.variableLabels.names)-len(m.curry)); err != nil { return 0, err } @@ -232,7 +257,7 @@ func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { curry = m.curry iVals, iCurry int ) - for i := 0; i < len(m.desc.variableLabels); i++ { + for i := 0; i < len(m.desc.variableLabels.names); i++ { if iCurry < len(curry) && curry[iCurry].index == i { h = m.hashAdd(h, curry[iCurry].value) iCurry++ @@ -246,7 +271,7 @@ func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { } func (m *MetricVec) hashLabels(labels Labels) (uint64, error) { - if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil { + if err := validateValuesInLabels(labels, len(m.desc.variableLabels.names)-len(m.curry)); err != nil { return 0, err } @@ -255,17 +280,17 @@ func (m *MetricVec) hashLabels(labels Labels) (uint64, error) { curry = m.curry iCurry int ) - for i, label := range m.desc.variableLabels { - val, ok := labels[label] + for i, labelName := range m.desc.variableLabels.names { + val, ok := labels[labelName] if iCurry < len(curry) && curry[iCurry].index == i { if ok { - return 0, fmt.Errorf("label name %q is already curried", label) + return 0, fmt.Errorf("label name %q is already curried", labelName) } h = m.hashAdd(h, curry[iCurry].value) iCurry++ } else { if !ok { - return 0, fmt.Errorf("label name %q missing in label map", label) + return 0, fmt.Errorf("label name %q missing in label map", labelName) } h = m.hashAdd(h, val) } @@ -381,6 +406,82 @@ func (m *metricMap) deleteByHashWithLabels( return true } +// deleteByLabels deletes a metric if the given labels are present in the metric. +func (m *metricMap) deleteByLabels(labels Labels, curry []curriedLabelValue) int { + m.mtx.Lock() + defer m.mtx.Unlock() + + var numDeleted int + + for h, metrics := range m.metrics { + i := findMetricWithPartialLabels(m.desc, metrics, labels, curry) + if i >= len(metrics) { + // Didn't find matching labels in this metric slice. + continue + } + delete(m.metrics, h) + numDeleted++ + } + + return numDeleted +} + +// findMetricWithPartialLabel returns the index of the matching metric or +// len(metrics) if not found. +func findMetricWithPartialLabels( + desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue, +) int { + for i, metric := range metrics { + if matchPartialLabels(desc, metric.values, labels, curry) { + return i + } + } + return len(metrics) +} + +// indexOf searches the given slice of strings for the target string and returns +// the index or len(items) as well as a boolean whether the search succeeded. +func indexOf(target string, items []string) (int, bool) { + for i, l := range items { + if l == target { + return i, true + } + } + return len(items), false +} + +// valueMatchesVariableOrCurriedValue determines if a value was previously curried, +// and returns whether it matches either the "base" value or the curried value accordingly. +// It also indicates whether the match is against a curried or uncurried value. +func valueMatchesVariableOrCurriedValue(targetValue string, index int, values []string, curry []curriedLabelValue) (bool, bool) { + for _, curriedValue := range curry { + if curriedValue.index == index { + // This label was curried. See if the curried value matches our target. + return curriedValue.value == targetValue, true + } + } + // This label was not curried. See if the current value matches our target label. + return values[index] == targetValue, false +} + +// matchPartialLabels searches the current metric and returns whether all of the target label:value pairs are present. +func matchPartialLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool { + for l, v := range labels { + // Check if the target label exists in our metrics and get the index. + varLabelIndex, validLabel := indexOf(l, desc.variableLabels.names) + if validLabel { + // Check the value of that label against the target value. + // We don't consider curried values in partial matches. + matches, curried := valueMatchesVariableOrCurriedValue(v, varLabelIndex, values, curry) + if matches && !curried { + continue + } + } + return false + } + return true +} + // getOrCreateMetricWithLabelValues retrieves the metric by hash and label value // or creates it and returns the new one. // @@ -406,7 +507,7 @@ func (m *metricMap) getOrCreateMetricWithLabelValues( return metric } -// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value +// getOrCreateMetricWithLabels retrieves the metric by hash and label value // or creates it and returns the new one. // // This function holds the mutex. @@ -485,7 +586,7 @@ func findMetricWithLabels( return len(metrics) } -func matchLabelValues(values []string, lvs []string, curry []curriedLabelValue) bool { +func matchLabelValues(values, lvs []string, curry []curriedLabelValue) bool { if len(values) != len(lvs)+len(curry) { return false } @@ -511,7 +612,7 @@ func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabe return false } iCurry := 0 - for i, k := range desc.variableLabels { + for i, k := range desc.variableLabels.names { if iCurry < len(curry) && curry[iCurry].index == i { if values[i] != curry[iCurry].value { return false @@ -529,7 +630,7 @@ func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabe func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string { labelValues := make([]string, len(labels)+len(curry)) iCurry := 0 - for i, k := range desc.variableLabels { + for i, k := range desc.variableLabels.names { if iCurry < len(curry) && curry[iCurry].index == i { labelValues[i] = curry[iCurry].value iCurry++ @@ -554,3 +655,55 @@ func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string { } return labelValues } + +var labelsPool = &sync.Pool{ + New: func() interface{} { + return make(Labels) + }, +} + +func constrainLabels(desc *Desc, labels Labels) (Labels, func()) { + if len(desc.variableLabels.labelConstraints) == 0 { + // Fast path when there's no constraints + return labels, func() {} + } + + constrainedLabels := labelsPool.Get().(Labels) + for l, v := range labels { + constrainedLabels[l] = desc.variableLabels.constrain(l, v) + } + + return constrainedLabels, func() { + for k := range constrainedLabels { + delete(constrainedLabels, k) + } + labelsPool.Put(constrainedLabels) + } +} + +func constrainLabelValues(desc *Desc, lvs []string, curry []curriedLabelValue) []string { + if len(desc.variableLabels.labelConstraints) == 0 { + // Fast path when there's no constraints + return lvs + } + + constrainedValues := make([]string, len(lvs)) + var iCurry, iLVs int + for i := 0; i < len(lvs)+len(curry); i++ { + if iCurry < len(curry) && curry[iCurry].index == i { + iCurry++ + continue + } + + if i < len(desc.variableLabels.names) { + constrainedValues[iLVs] = desc.variableLabels.constrain( + desc.variableLabels.names[i], + lvs[iLVs], + ) + } else { + constrainedValues[iLVs] = lvs[iLVs] + } + iLVs++ + } + return constrainedValues +} diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/vnext.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/vnext.go new file mode 100644 index 000000000..42bc3a8f0 --- /dev/null +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/vnext.go @@ -0,0 +1,23 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +type v2 struct{} + +// V2 is a struct that can be referenced to access experimental API that might +// be present in v2 of client golang someday. It offers extended functionality +// of v1 with slightly changed API. It is acceptable to use some pieces from v1 +// and e.g `prometheus.NewGauge` and some from v2 e.g. `prometheus.V2.NewDesc` +// in the same codebase. +var V2 = v2{} diff --git a/tools/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/tools/vendor/github.com/prometheus/client_golang/prometheus/wrap.go index 74ee93280..25da157f1 100644 --- a/tools/vendor/github.com/prometheus/client_golang/prometheus/wrap.go +++ b/tools/vendor/github.com/prometheus/client_golang/prometheus/wrap.go @@ -17,10 +17,10 @@ import ( "fmt" "sort" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" + "github.com/prometheus/client_golang/prometheus/internal" dto "github.com/prometheus/client_model/go" + "google.golang.org/protobuf/proto" ) // WrapRegistererWith returns a Registerer wrapping the provided @@ -182,7 +182,7 @@ func (m *wrappingMetric) Write(out *dto.Metric) error { Value: proto.String(lv), }) } - sort.Sort(labelPairSorter(out.Label)) + sort.Sort(internal.LabelPairSorter(out.Label)) return nil } @@ -204,7 +204,7 @@ func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc { constLabels[ln] = lv } // NewDesc will do remaining validations. - newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels) + newDesc := V2.NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels) // Propagate errors if there was any. This will override any errer // created by NewDesc above, i.e. earlier errors get precedence. if desc.err != nil { diff --git a/tools/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/tools/vendor/github.com/prometheus/client_model/go/metrics.pb.go index 2f4930d9d..2f1549075 100644 --- a/tools/vendor/github.com/prometheus/client_model/go/metrics.pb.go +++ b/tools/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -1,51 +1,75 @@ +// Copyright 2013 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // Code generated by protoc-gen-go. DO NOT EDIT. -// source: metrics.proto +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.20.3 +// source: io/prometheus/client/metrics.proto package io_prometheus_client import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - timestamp "github.com/golang/protobuf/ptypes/timestamp" - math "math" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type MetricType int32 const ( - MetricType_COUNTER MetricType = 0 - MetricType_GAUGE MetricType = 1 - MetricType_SUMMARY MetricType = 2 - MetricType_UNTYPED MetricType = 3 + // COUNTER must use the Metric field "counter". + MetricType_COUNTER MetricType = 0 + // GAUGE must use the Metric field "gauge". + MetricType_GAUGE MetricType = 1 + // SUMMARY must use the Metric field "summary". + MetricType_SUMMARY MetricType = 2 + // UNTYPED must use the Metric field "untyped". + MetricType_UNTYPED MetricType = 3 + // HISTOGRAM must use the Metric field "histogram". MetricType_HISTOGRAM MetricType = 4 + // GAUGE_HISTOGRAM must use the Metric field "histogram". + MetricType_GAUGE_HISTOGRAM MetricType = 5 ) -var MetricType_name = map[int32]string{ - 0: "COUNTER", - 1: "GAUGE", - 2: "SUMMARY", - 3: "UNTYPED", - 4: "HISTOGRAM", -} - -var MetricType_value = map[string]int32{ - "COUNTER": 0, - "GAUGE": 1, - "SUMMARY": 2, - "UNTYPED": 3, - "HISTOGRAM": 4, -} +// Enum value maps for MetricType. +var ( + MetricType_name = map[int32]string{ + 0: "COUNTER", + 1: "GAUGE", + 2: "SUMMARY", + 3: "UNTYPED", + 4: "HISTOGRAM", + 5: "GAUGE_HISTOGRAM", + } + MetricType_value = map[string]int32{ + "COUNTER": 0, + "GAUGE": 1, + "SUMMARY": 2, + "UNTYPED": 3, + "HISTOGRAM": 4, + "GAUGE_HISTOGRAM": 5, + } +) func (x MetricType) Enum() *MetricType { p := new(MetricType) @@ -54,670 +78,1322 @@ func (x MetricType) Enum() *MetricType { } func (x MetricType) String() string { - return proto.EnumName(MetricType_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (MetricType) Descriptor() protoreflect.EnumDescriptor { + return file_io_prometheus_client_metrics_proto_enumTypes[0].Descriptor() +} + +func (MetricType) Type() protoreflect.EnumType { + return &file_io_prometheus_client_metrics_proto_enumTypes[0] } -func (x *MetricType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType") +func (x MetricType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *MetricType) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) if err != nil { return err } - *x = MetricType(value) + *x = MetricType(num) return nil } +// Deprecated: Use MetricType.Descriptor instead. func (MetricType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{0} + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{0} } type LabelPair struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *LabelPair) Reset() { *m = LabelPair{} } -func (m *LabelPair) String() string { return proto.CompactTextString(m) } -func (*LabelPair) ProtoMessage() {} -func (*LabelPair) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{0} + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` } -func (m *LabelPair) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LabelPair.Unmarshal(m, b) -} -func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic) -} -func (m *LabelPair) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelPair.Merge(m, src) +func (x *LabelPair) Reset() { + *x = LabelPair{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *LabelPair) XXX_Size() int { - return xxx_messageInfo_LabelPair.Size(m) + +func (x *LabelPair) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *LabelPair) XXX_DiscardUnknown() { - xxx_messageInfo_LabelPair.DiscardUnknown(m) + +func (*LabelPair) ProtoMessage() {} + +func (x *LabelPair) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_LabelPair proto.InternalMessageInfo +// Deprecated: Use LabelPair.ProtoReflect.Descriptor instead. +func (*LabelPair) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{0} +} -func (m *LabelPair) GetName() string { - if m != nil && m.Name != nil { - return *m.Name +func (x *LabelPair) GetName() string { + if x != nil && x.Name != nil { + return *x.Name } return "" } -func (m *LabelPair) GetValue() string { - if m != nil && m.Value != nil { - return *m.Value +func (x *LabelPair) GetValue() string { + if x != nil && x.Value != nil { + return *x.Value } return "" } type Gauge struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Gauge) Reset() { *m = Gauge{} } -func (m *Gauge) String() string { return proto.CompactTextString(m) } -func (*Gauge) ProtoMessage() {} -func (*Gauge) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{1} + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` } -func (m *Gauge) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Gauge.Unmarshal(m, b) -} -func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Gauge.Marshal(b, m, deterministic) -} -func (m *Gauge) XXX_Merge(src proto.Message) { - xxx_messageInfo_Gauge.Merge(m, src) +func (x *Gauge) Reset() { + *x = Gauge{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Gauge) XXX_Size() int { - return xxx_messageInfo_Gauge.Size(m) + +func (x *Gauge) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Gauge) XXX_DiscardUnknown() { - xxx_messageInfo_Gauge.DiscardUnknown(m) + +func (*Gauge) ProtoMessage() {} + +func (x *Gauge) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Gauge proto.InternalMessageInfo +// Deprecated: Use Gauge.ProtoReflect.Descriptor instead. +func (*Gauge) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{1} +} -func (m *Gauge) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value +func (x *Gauge) GetValue() float64 { + if x != nil && x.Value != nil { + return *x.Value } return 0 } type Counter struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Counter) Reset() { *m = Counter{} } -func (m *Counter) String() string { return proto.CompactTextString(m) } -func (*Counter) ProtoMessage() {} -func (*Counter) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{2} + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"` + CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"` } -func (m *Counter) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Counter.Unmarshal(m, b) -} -func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Counter.Marshal(b, m, deterministic) -} -func (m *Counter) XXX_Merge(src proto.Message) { - xxx_messageInfo_Counter.Merge(m, src) +func (x *Counter) Reset() { + *x = Counter{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Counter) XXX_Size() int { - return xxx_messageInfo_Counter.Size(m) + +func (x *Counter) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Counter) XXX_DiscardUnknown() { - xxx_messageInfo_Counter.DiscardUnknown(m) + +func (*Counter) ProtoMessage() {} + +func (x *Counter) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Counter proto.InternalMessageInfo +// Deprecated: Use Counter.ProtoReflect.Descriptor instead. +func (*Counter) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{2} +} -func (m *Counter) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value +func (x *Counter) GetValue() float64 { + if x != nil && x.Value != nil { + return *x.Value } return 0 } -func (m *Counter) GetExemplar() *Exemplar { - if m != nil { - return m.Exemplar +func (x *Counter) GetExemplar() *Exemplar { + if x != nil { + return x.Exemplar } return nil } -type Quantile struct { - Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` - Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *Counter) GetCreatedTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.CreatedTimestamp + } + return nil } -func (m *Quantile) Reset() { *m = Quantile{} } -func (m *Quantile) String() string { return proto.CompactTextString(m) } -func (*Quantile) ProtoMessage() {} -func (*Quantile) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{3} -} +type Quantile struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Quantile) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Quantile.Unmarshal(m, b) + Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` + Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` } -func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Quantile.Marshal(b, m, deterministic) -} -func (m *Quantile) XXX_Merge(src proto.Message) { - xxx_messageInfo_Quantile.Merge(m, src) + +func (x *Quantile) Reset() { + *x = Quantile{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Quantile) XXX_Size() int { - return xxx_messageInfo_Quantile.Size(m) + +func (x *Quantile) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Quantile) XXX_DiscardUnknown() { - xxx_messageInfo_Quantile.DiscardUnknown(m) + +func (*Quantile) ProtoMessage() {} + +func (x *Quantile) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Quantile proto.InternalMessageInfo +// Deprecated: Use Quantile.ProtoReflect.Descriptor instead. +func (*Quantile) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{3} +} -func (m *Quantile) GetQuantile() float64 { - if m != nil && m.Quantile != nil { - return *m.Quantile +func (x *Quantile) GetQuantile() float64 { + if x != nil && x.Quantile != nil { + return *x.Quantile } return 0 } -func (m *Quantile) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value +func (x *Quantile) GetValue() float64 { + if x != nil && x.Value != nil { + return *x.Value } return 0 } type Summary struct { - SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` - SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` - Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` + Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` + CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"` +} + +func (x *Summary) Reset() { + *x = Summary{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Summary) Reset() { *m = Summary{} } -func (m *Summary) String() string { return proto.CompactTextString(m) } -func (*Summary) ProtoMessage() {} -func (*Summary) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{4} +func (x *Summary) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Summary) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Summary.Unmarshal(m, b) -} -func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Summary.Marshal(b, m, deterministic) -} -func (m *Summary) XXX_Merge(src proto.Message) { - xxx_messageInfo_Summary.Merge(m, src) -} -func (m *Summary) XXX_Size() int { - return xxx_messageInfo_Summary.Size(m) -} -func (m *Summary) XXX_DiscardUnknown() { - xxx_messageInfo_Summary.DiscardUnknown(m) +func (*Summary) ProtoMessage() {} + +func (x *Summary) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Summary proto.InternalMessageInfo +// Deprecated: Use Summary.ProtoReflect.Descriptor instead. +func (*Summary) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{4} +} -func (m *Summary) GetSampleCount() uint64 { - if m != nil && m.SampleCount != nil { - return *m.SampleCount +func (x *Summary) GetSampleCount() uint64 { + if x != nil && x.SampleCount != nil { + return *x.SampleCount } return 0 } -func (m *Summary) GetSampleSum() float64 { - if m != nil && m.SampleSum != nil { - return *m.SampleSum +func (x *Summary) GetSampleSum() float64 { + if x != nil && x.SampleSum != nil { + return *x.SampleSum } return 0 } -func (m *Summary) GetQuantile() []*Quantile { - if m != nil { - return m.Quantile +func (x *Summary) GetQuantile() []*Quantile { + if x != nil { + return x.Quantile } return nil } -type Untyped struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *Summary) GetCreatedTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.CreatedTimestamp + } + return nil } -func (m *Untyped) Reset() { *m = Untyped{} } -func (m *Untyped) String() string { return proto.CompactTextString(m) } -func (*Untyped) ProtoMessage() {} -func (*Untyped) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{5} -} +type Untyped struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Untyped) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Untyped.Unmarshal(m, b) + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` } -func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Untyped.Marshal(b, m, deterministic) -} -func (m *Untyped) XXX_Merge(src proto.Message) { - xxx_messageInfo_Untyped.Merge(m, src) + +func (x *Untyped) Reset() { + *x = Untyped{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Untyped) XXX_Size() int { - return xxx_messageInfo_Untyped.Size(m) + +func (x *Untyped) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Untyped) XXX_DiscardUnknown() { - xxx_messageInfo_Untyped.DiscardUnknown(m) + +func (*Untyped) ProtoMessage() {} + +func (x *Untyped) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Untyped proto.InternalMessageInfo +// Deprecated: Use Untyped.ProtoReflect.Descriptor instead. +func (*Untyped) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{5} +} -func (m *Untyped) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value +func (x *Untyped) GetValue() float64 { + if x != nil && x.Value != nil { + return *x.Value } return 0 } type Histogram struct { - SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` - SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` - Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` + SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"` // Overrides sample_count if > 0. + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` + // Buckets for the conventional histogram. + Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` // Ordered in increasing order of upper_bound, +Inf bucket is optional. + CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,15,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"` + // schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8. + // They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and + // then each power of two is divided into 2^n logarithmic buckets. + // Or in other words, each bucket boundary is the previous boundary times 2^(2^-n). + // In the future, more bucket schemas may be added using numbers < -4 or > 8. + Schema *int32 `protobuf:"zigzag32,5,opt,name=schema" json:"schema,omitempty"` + ZeroThreshold *float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold" json:"zero_threshold,omitempty"` // Breadth of the zero bucket. + ZeroCount *uint64 `protobuf:"varint,7,opt,name=zero_count,json=zeroCount" json:"zero_count,omitempty"` // Count in zero bucket. + ZeroCountFloat *float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat" json:"zero_count_float,omitempty"` // Overrides sb_zero_count if > 0. + // Negative buckets for the native histogram. + NegativeSpan []*BucketSpan `protobuf:"bytes,9,rep,name=negative_span,json=negativeSpan" json:"negative_span,omitempty"` + // Use either "negative_delta" or "negative_count", the former for + // regular histograms with integer counts, the latter for float + // histograms. + NegativeDelta []int64 `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"` // Absolute count of each bucket. + // Positive buckets for the native histogram. + // Use a no-op span (offset 0, length 0) for a native histogram without any + // observations yet and with a zero_threshold of 0. Otherwise, it would be + // indistinguishable from a classic histogram. + PositiveSpan []*BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan" json:"positive_span,omitempty"` + // Use either "positive_delta" or "positive_count", the former for + // regular histograms with integer counts, the latter for float + // histograms. + PositiveDelta []int64 `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket). + PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"` // Absolute count of each bucket. + // Only used for native histograms. These exemplars MUST have a timestamp. + Exemplars []*Exemplar `protobuf:"bytes,16,rep,name=exemplars" json:"exemplars,omitempty"` +} + +func (x *Histogram) Reset() { + *x = Histogram{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Histogram) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Histogram) ProtoMessage() {} + +func (x *Histogram) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *Histogram) Reset() { *m = Histogram{} } -func (m *Histogram) String() string { return proto.CompactTextString(m) } -func (*Histogram) ProtoMessage() {} +// Deprecated: Use Histogram.ProtoReflect.Descriptor instead. func (*Histogram) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{6} + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{6} } -func (m *Histogram) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Histogram.Unmarshal(m, b) +func (x *Histogram) GetSampleCount() uint64 { + if x != nil && x.SampleCount != nil { + return *x.SampleCount + } + return 0 } -func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) + +func (x *Histogram) GetSampleCountFloat() float64 { + if x != nil && x.SampleCountFloat != nil { + return *x.SampleCountFloat + } + return 0 } -func (m *Histogram) XXX_Merge(src proto.Message) { - xxx_messageInfo_Histogram.Merge(m, src) + +func (x *Histogram) GetSampleSum() float64 { + if x != nil && x.SampleSum != nil { + return *x.SampleSum + } + return 0 } -func (m *Histogram) XXX_Size() int { - return xxx_messageInfo_Histogram.Size(m) + +func (x *Histogram) GetBucket() []*Bucket { + if x != nil { + return x.Bucket + } + return nil } -func (m *Histogram) XXX_DiscardUnknown() { - xxx_messageInfo_Histogram.DiscardUnknown(m) + +func (x *Histogram) GetCreatedTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.CreatedTimestamp + } + return nil +} + +func (x *Histogram) GetSchema() int32 { + if x != nil && x.Schema != nil { + return *x.Schema + } + return 0 } -var xxx_messageInfo_Histogram proto.InternalMessageInfo +func (x *Histogram) GetZeroThreshold() float64 { + if x != nil && x.ZeroThreshold != nil { + return *x.ZeroThreshold + } + return 0 +} -func (m *Histogram) GetSampleCount() uint64 { - if m != nil && m.SampleCount != nil { - return *m.SampleCount +func (x *Histogram) GetZeroCount() uint64 { + if x != nil && x.ZeroCount != nil { + return *x.ZeroCount } return 0 } -func (m *Histogram) GetSampleSum() float64 { - if m != nil && m.SampleSum != nil { - return *m.SampleSum +func (x *Histogram) GetZeroCountFloat() float64 { + if x != nil && x.ZeroCountFloat != nil { + return *x.ZeroCountFloat } return 0 } -func (m *Histogram) GetBucket() []*Bucket { - if m != nil { - return m.Bucket +func (x *Histogram) GetNegativeSpan() []*BucketSpan { + if x != nil { + return x.NegativeSpan } return nil } -type Bucket struct { - CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` - UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` - Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *Histogram) GetNegativeDelta() []int64 { + if x != nil { + return x.NegativeDelta + } + return nil } -func (m *Bucket) Reset() { *m = Bucket{} } -func (m *Bucket) String() string { return proto.CompactTextString(m) } -func (*Bucket) ProtoMessage() {} -func (*Bucket) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{7} +func (x *Histogram) GetNegativeCount() []float64 { + if x != nil { + return x.NegativeCount + } + return nil } -func (m *Bucket) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Bucket.Unmarshal(m, b) +func (x *Histogram) GetPositiveSpan() []*BucketSpan { + if x != nil { + return x.PositiveSpan + } + return nil } -func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Bucket.Marshal(b, m, deterministic) + +func (x *Histogram) GetPositiveDelta() []int64 { + if x != nil { + return x.PositiveDelta + } + return nil } -func (m *Bucket) XXX_Merge(src proto.Message) { - xxx_messageInfo_Bucket.Merge(m, src) + +func (x *Histogram) GetPositiveCount() []float64 { + if x != nil { + return x.PositiveCount + } + return nil } -func (m *Bucket) XXX_Size() int { - return xxx_messageInfo_Bucket.Size(m) + +func (x *Histogram) GetExemplars() []*Exemplar { + if x != nil { + return x.Exemplars + } + return nil +} + +// A Bucket of a conventional histogram, each of which is treated as +// an individual counter-like time series by Prometheus. +type Bucket struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` // Cumulative in increasing order. + CumulativeCountFloat *float64 `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat" json:"cumulative_count_float,omitempty"` // Overrides cumulative_count if > 0. + UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` // Inclusive. + Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"` +} + +func (x *Bucket) Reset() { + *x = Bucket{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket) ProtoMessage() {} + +func (x *Bucket) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *Bucket) XXX_DiscardUnknown() { - xxx_messageInfo_Bucket.DiscardUnknown(m) + +// Deprecated: Use Bucket.ProtoReflect.Descriptor instead. +func (*Bucket) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{7} } -var xxx_messageInfo_Bucket proto.InternalMessageInfo +func (x *Bucket) GetCumulativeCount() uint64 { + if x != nil && x.CumulativeCount != nil { + return *x.CumulativeCount + } + return 0 +} -func (m *Bucket) GetCumulativeCount() uint64 { - if m != nil && m.CumulativeCount != nil { - return *m.CumulativeCount +func (x *Bucket) GetCumulativeCountFloat() float64 { + if x != nil && x.CumulativeCountFloat != nil { + return *x.CumulativeCountFloat } return 0 } -func (m *Bucket) GetUpperBound() float64 { - if m != nil && m.UpperBound != nil { - return *m.UpperBound +func (x *Bucket) GetUpperBound() float64 { + if x != nil && x.UpperBound != nil { + return *x.UpperBound } return 0 } -func (m *Bucket) GetExemplar() *Exemplar { - if m != nil { - return m.Exemplar +func (x *Bucket) GetExemplar() *Exemplar { + if x != nil { + return x.Exemplar } return nil } -type Exemplar struct { - Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` - Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` - Timestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +// A BucketSpan defines a number of consecutive buckets in a native +// histogram with their offset. Logically, it would be more +// straightforward to include the bucket counts in the Span. However, +// the protobuf representation is more compact in the way the data is +// structured here (with all the buckets in a single array separate +// from the Spans). +type BucketSpan struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Offset *int32 `protobuf:"zigzag32,1,opt,name=offset" json:"offset,omitempty"` // Gap to previous span, or starting point for 1st span (which can be negative). + Length *uint32 `protobuf:"varint,2,opt,name=length" json:"length,omitempty"` // Length of consecutive buckets. } -func (m *Exemplar) Reset() { *m = Exemplar{} } -func (m *Exemplar) String() string { return proto.CompactTextString(m) } -func (*Exemplar) ProtoMessage() {} -func (*Exemplar) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{8} +func (x *BucketSpan) Reset() { + *x = BucketSpan{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Exemplar) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Exemplar.Unmarshal(m, b) +func (x *BucketSpan) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic) + +func (*BucketSpan) ProtoMessage() {} + +func (x *BucketSpan) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BucketSpan.ProtoReflect.Descriptor instead. +func (*BucketSpan) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{8} +} + +func (x *BucketSpan) GetOffset() int32 { + if x != nil && x.Offset != nil { + return *x.Offset + } + return 0 +} + +func (x *BucketSpan) GetLength() uint32 { + if x != nil && x.Length != nil { + return *x.Length + } + return 0 +} + +type Exemplar struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` + Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` + Timestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"` // OpenMetrics-style. } -func (m *Exemplar) XXX_Merge(src proto.Message) { - xxx_messageInfo_Exemplar.Merge(m, src) + +func (x *Exemplar) Reset() { + *x = Exemplar{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Exemplar) XXX_Size() int { - return xxx_messageInfo_Exemplar.Size(m) + +func (x *Exemplar) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Exemplar) XXX_DiscardUnknown() { - xxx_messageInfo_Exemplar.DiscardUnknown(m) + +func (*Exemplar) ProtoMessage() {} + +func (x *Exemplar) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Exemplar proto.InternalMessageInfo +// Deprecated: Use Exemplar.ProtoReflect.Descriptor instead. +func (*Exemplar) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{9} +} -func (m *Exemplar) GetLabel() []*LabelPair { - if m != nil { - return m.Label +func (x *Exemplar) GetLabel() []*LabelPair { + if x != nil { + return x.Label } return nil } -func (m *Exemplar) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value +func (x *Exemplar) GetValue() float64 { + if x != nil && x.Value != nil { + return *x.Value } return 0 } -func (m *Exemplar) GetTimestamp() *timestamp.Timestamp { - if m != nil { - return m.Timestamp +func (x *Exemplar) GetTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.Timestamp } return nil } type Metric struct { - Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` - Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` - Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` - Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` - Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` - Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` - TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Metric) Reset() { *m = Metric{} } -func (m *Metric) String() string { return proto.CompactTextString(m) } -func (*Metric) ProtoMessage() {} -func (*Metric) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{9} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` + Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` + Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` + Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` + Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` + Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` + TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"` +} + +func (x *Metric) Reset() { + *x = Metric{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Metric) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Metric.Unmarshal(m, b) +func (x *Metric) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Metric.Marshal(b, m, deterministic) -} -func (m *Metric) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metric.Merge(m, src) -} -func (m *Metric) XXX_Size() int { - return xxx_messageInfo_Metric.Size(m) -} -func (m *Metric) XXX_DiscardUnknown() { - xxx_messageInfo_Metric.DiscardUnknown(m) + +func (*Metric) ProtoMessage() {} + +func (x *Metric) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Metric proto.InternalMessageInfo +// Deprecated: Use Metric.ProtoReflect.Descriptor instead. +func (*Metric) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{10} +} -func (m *Metric) GetLabel() []*LabelPair { - if m != nil { - return m.Label +func (x *Metric) GetLabel() []*LabelPair { + if x != nil { + return x.Label } return nil } -func (m *Metric) GetGauge() *Gauge { - if m != nil { - return m.Gauge +func (x *Metric) GetGauge() *Gauge { + if x != nil { + return x.Gauge } return nil } -func (m *Metric) GetCounter() *Counter { - if m != nil { - return m.Counter +func (x *Metric) GetCounter() *Counter { + if x != nil { + return x.Counter } return nil } -func (m *Metric) GetSummary() *Summary { - if m != nil { - return m.Summary +func (x *Metric) GetSummary() *Summary { + if x != nil { + return x.Summary } return nil } -func (m *Metric) GetUntyped() *Untyped { - if m != nil { - return m.Untyped +func (x *Metric) GetUntyped() *Untyped { + if x != nil { + return x.Untyped } return nil } -func (m *Metric) GetHistogram() *Histogram { - if m != nil { - return m.Histogram +func (x *Metric) GetHistogram() *Histogram { + if x != nil { + return x.Histogram } return nil } -func (m *Metric) GetTimestampMs() int64 { - if m != nil && m.TimestampMs != nil { - return *m.TimestampMs +func (x *Metric) GetTimestampMs() int64 { + if x != nil && x.TimestampMs != nil { + return *x.TimestampMs } return 0 } type MetricFamily struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` - Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` - Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MetricFamily) Reset() { *m = MetricFamily{} } -func (m *MetricFamily) String() string { return proto.CompactTextString(m) } -func (*MetricFamily) ProtoMessage() {} -func (*MetricFamily) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{10} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` + Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` + Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` + Unit *string `protobuf:"bytes,5,opt,name=unit" json:"unit,omitempty"` +} + +func (x *MetricFamily) Reset() { + *x = MetricFamily{} + if protoimpl.UnsafeEnabled { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *MetricFamily) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MetricFamily.Unmarshal(m, b) -} -func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic) -} -func (m *MetricFamily) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricFamily.Merge(m, src) -} -func (m *MetricFamily) XXX_Size() int { - return xxx_messageInfo_MetricFamily.Size(m) +func (x *MetricFamily) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *MetricFamily) XXX_DiscardUnknown() { - xxx_messageInfo_MetricFamily.DiscardUnknown(m) + +func (*MetricFamily) ProtoMessage() {} + +func (x *MetricFamily) ProtoReflect() protoreflect.Message { + mi := &file_io_prometheus_client_metrics_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_MetricFamily proto.InternalMessageInfo +// Deprecated: Use MetricFamily.ProtoReflect.Descriptor instead. +func (*MetricFamily) Descriptor() ([]byte, []int) { + return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{11} +} -func (m *MetricFamily) GetName() string { - if m != nil && m.Name != nil { - return *m.Name +func (x *MetricFamily) GetName() string { + if x != nil && x.Name != nil { + return *x.Name } return "" } -func (m *MetricFamily) GetHelp() string { - if m != nil && m.Help != nil { - return *m.Help +func (x *MetricFamily) GetHelp() string { + if x != nil && x.Help != nil { + return *x.Help } return "" } -func (m *MetricFamily) GetType() MetricType { - if m != nil && m.Type != nil { - return *m.Type +func (x *MetricFamily) GetType() MetricType { + if x != nil && x.Type != nil { + return *x.Type } return MetricType_COUNTER } -func (m *MetricFamily) GetMetric() []*Metric { - if m != nil { - return m.Metric +func (x *MetricFamily) GetMetric() []*Metric { + if x != nil { + return x.Metric } return nil } -func init() { - proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) - proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair") - proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge") - proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter") - proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile") - proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary") - proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped") - proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram") - proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket") - proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar") - proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric") - proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily") -} - -func init() { proto.RegisterFile("metrics.proto", fileDescriptor_6039342a2ba47b72) } - -var fileDescriptor_6039342a2ba47b72 = []byte{ - // 665 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xcd, 0x6e, 0xd3, 0x4c, - 0x14, 0xfd, 0xdc, 0x38, 0x3f, 0xbe, 0x69, 0x3f, 0xa2, 0x51, 0x17, 0x56, 0xa1, 0x24, 0x78, 0x55, - 0x58, 0x38, 0xa2, 0x6a, 0x05, 0x2a, 0xb0, 0x68, 0x4b, 0x48, 0x91, 0x48, 0x5b, 0x26, 0xc9, 0xa2, - 0xb0, 0x88, 0x1c, 0x77, 0x70, 0x2c, 0x3c, 0xb1, 0xb1, 0x67, 0x2a, 0xb2, 0x66, 0xc1, 0x16, 0x5e, - 0x81, 0x17, 0x05, 0xcd, 0x8f, 0x6d, 0x2a, 0xb9, 0x95, 0x40, 0xec, 0x66, 0xee, 0x3d, 0xe7, 0xfa, - 0xcc, 0xf8, 0x9c, 0x81, 0x0d, 0x4a, 0x58, 0x1a, 0xfa, 0x99, 0x9b, 0xa4, 0x31, 0x8b, 0xd1, 0x66, - 0x18, 0x8b, 0x15, 0x25, 0x6c, 0x41, 0x78, 0xe6, 0xfa, 0x51, 0x48, 0x96, 0x6c, 0xab, 0x1b, 0xc4, - 0x71, 0x10, 0x91, 0xbe, 0xc4, 0xcc, 0xf9, 0x87, 0x3e, 0x0b, 0x29, 0xc9, 0x98, 0x47, 0x13, 0x45, - 0x73, 0xf6, 0xc1, 0x7a, 0xe3, 0xcd, 0x49, 0x74, 0xee, 0x85, 0x29, 0x42, 0x60, 0x2e, 0x3d, 0x4a, - 0x6c, 0xa3, 0x67, 0xec, 0x58, 0x58, 0xae, 0xd1, 0x26, 0xd4, 0xaf, 0xbc, 0x88, 0x13, 0x7b, 0x4d, - 0x16, 0xd5, 0xc6, 0xd9, 0x86, 0xfa, 0xd0, 0xe3, 0xc1, 0x6f, 0x6d, 0xc1, 0x31, 0xf2, 0xf6, 0x7b, - 0x68, 0x1e, 0xc7, 0x7c, 0xc9, 0x48, 0x5a, 0x0d, 0x40, 0x07, 0xd0, 0x22, 0x9f, 0x09, 0x4d, 0x22, - 0x2f, 0x95, 0x83, 0xdb, 0xbb, 0xf7, 0xdd, 0xaa, 0x03, 0xb8, 0x03, 0x8d, 0xc2, 0x05, 0xde, 0x79, - 0x0e, 0xad, 0xb7, 0xdc, 0x5b, 0xb2, 0x30, 0x22, 0x68, 0x0b, 0x5a, 0x9f, 0xf4, 0x5a, 0x7f, 0xa0, - 0xd8, 0x5f, 0x57, 0x5e, 0x48, 0xfb, 0x6a, 0x40, 0x73, 0xcc, 0x29, 0xf5, 0xd2, 0x15, 0x7a, 0x00, - 0xeb, 0x99, 0x47, 0x93, 0x88, 0xcc, 0x7c, 0xa1, 0x56, 0x4e, 0x30, 0x71, 0x5b, 0xd5, 0xe4, 0x01, - 0xd0, 0x36, 0x80, 0x86, 0x64, 0x9c, 0xea, 0x49, 0x96, 0xaa, 0x8c, 0x39, 0x15, 0xe7, 0x28, 0xbe, - 0x5f, 0xeb, 0xd5, 0x6e, 0x3e, 0x47, 0xae, 0xb8, 0xd4, 0xe7, 0x74, 0xa1, 0x39, 0x5d, 0xb2, 0x55, - 0x42, 0x2e, 0x6f, 0xb8, 0xc5, 0x2f, 0x06, 0x58, 0x27, 0x61, 0xc6, 0xe2, 0x20, 0xf5, 0xe8, 0x3f, - 0x10, 0xbb, 0x07, 0x8d, 0x39, 0xf7, 0x3f, 0x12, 0xa6, 0xa5, 0xde, 0xab, 0x96, 0x7a, 0x24, 0x31, - 0x58, 0x63, 0x9d, 0x6f, 0x06, 0x34, 0x54, 0x09, 0x3d, 0x84, 0x8e, 0xcf, 0x29, 0x8f, 0x3c, 0x16, - 0x5e, 0x5d, 0x97, 0x71, 0xa7, 0xac, 0x2b, 0x29, 0x5d, 0x68, 0xf3, 0x24, 0x21, 0xe9, 0x6c, 0x1e, - 0xf3, 0xe5, 0xa5, 0xd6, 0x02, 0xb2, 0x74, 0x24, 0x2a, 0xd7, 0x1c, 0x50, 0xfb, 0x43, 0x07, 0x7c, - 0x37, 0xa0, 0x95, 0x97, 0xd1, 0x3e, 0xd4, 0x23, 0xe1, 0x60, 0xdb, 0x90, 0x87, 0xea, 0x56, 0x4f, - 0x29, 0x4c, 0x8e, 0x15, 0xba, 0xda, 0x1d, 0xe8, 0x29, 0x58, 0x45, 0x42, 0xb4, 0xac, 0x2d, 0x57, - 0x65, 0xc8, 0xcd, 0x33, 0xe4, 0x4e, 0x72, 0x04, 0x2e, 0xc1, 0xce, 0xcf, 0x35, 0x68, 0x8c, 0x64, - 0x22, 0xff, 0x56, 0xd1, 0x63, 0xa8, 0x07, 0x22, 0x53, 0x3a, 0x10, 0x77, 0xab, 0x69, 0x32, 0x76, - 0x58, 0x21, 0xd1, 0x13, 0x68, 0xfa, 0x2a, 0x67, 0x5a, 0xec, 0x76, 0x35, 0x49, 0x87, 0x11, 0xe7, - 0x68, 0x41, 0xcc, 0x54, 0x08, 0x6c, 0xf3, 0x36, 0xa2, 0x4e, 0x0a, 0xce, 0xd1, 0x82, 0xc8, 0x95, - 0x69, 0xed, 0xfa, 0x6d, 0x44, 0xed, 0x6c, 0x9c, 0xa3, 0xd1, 0x0b, 0xb0, 0x16, 0xb9, 0x97, 0xed, - 0xa6, 0xa4, 0xde, 0x70, 0x31, 0x85, 0xe5, 0x71, 0xc9, 0x10, 0xee, 0x2f, 0xee, 0x7a, 0x46, 0x33, - 0xbb, 0xd1, 0x33, 0x76, 0x6a, 0xb8, 0x5d, 0xd4, 0x46, 0x99, 0xf3, 0xc3, 0x80, 0x75, 0xf5, 0x07, - 0x5e, 0x79, 0x34, 0x8c, 0x56, 0x95, 0xcf, 0x19, 0x02, 0x73, 0x41, 0xa2, 0x44, 0xbf, 0x66, 0x72, - 0x8d, 0xf6, 0xc0, 0x14, 0x1a, 0xe5, 0x15, 0xfe, 0xbf, 0xdb, 0xab, 0x56, 0xa5, 0x26, 0x4f, 0x56, - 0x09, 0xc1, 0x12, 0x2d, 0xd2, 0xa4, 0x5e, 0x60, 0xdb, 0xbc, 0x2d, 0x4d, 0x8a, 0x87, 0x35, 0xf6, - 0xd1, 0x08, 0xa0, 0x9c, 0x84, 0xda, 0xd0, 0x3c, 0x3e, 0x9b, 0x9e, 0x4e, 0x06, 0xb8, 0xf3, 0x1f, - 0xb2, 0xa0, 0x3e, 0x3c, 0x9c, 0x0e, 0x07, 0x1d, 0x43, 0xd4, 0xc7, 0xd3, 0xd1, 0xe8, 0x10, 0x5f, - 0x74, 0xd6, 0xc4, 0x66, 0x7a, 0x3a, 0xb9, 0x38, 0x1f, 0xbc, 0xec, 0xd4, 0xd0, 0x06, 0x58, 0x27, - 0xaf, 0xc7, 0x93, 0xb3, 0x21, 0x3e, 0x1c, 0x75, 0xcc, 0x23, 0x0c, 0x95, 0xef, 0xfe, 0xbb, 0x83, - 0x20, 0x64, 0x0b, 0x3e, 0x77, 0xfd, 0x98, 0xf6, 0xcb, 0x6e, 0x5f, 0x75, 0x67, 0x34, 0xbe, 0x24, - 0x51, 0x3f, 0x88, 0x9f, 0x85, 0xf1, 0xac, 0xec, 0xce, 0x54, 0xf7, 0x57, 0x00, 0x00, 0x00, 0xff, - 0xff, 0xd0, 0x84, 0x91, 0x73, 0x59, 0x06, 0x00, 0x00, +func (x *MetricFamily) GetUnit() string { + if x != nil && x.Unit != nil { + return *x.Unit + } + return "" +} + +var File_io_prometheus_client_metrics_proto protoreflect.FileDescriptor + +var file_io_prometheus_client_metrics_proto_rawDesc = []byte{ + 0x0a, 0x22, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, + 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x35, 0x0a, 0x09, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x22, 0x1d, 0x0a, 0x05, 0x47, 0x61, 0x75, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x22, 0xa4, 0x01, 0x0a, 0x07, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, + 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, + 0x47, 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x3c, 0x0a, 0x08, 0x51, 0x75, 0x61, 0x6e, + 0x74, 0x69, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xd0, 0x01, 0x0a, 0x07, 0x53, 0x75, 0x6d, 0x6d, 0x61, + 0x72, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, + 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73, 0x61, 0x6d, 0x70, 0x6c, + 0x65, 0x53, 0x75, 0x6d, 0x12, 0x3a, 0x0a, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, + 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x51, 0x75, + 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x52, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, + 0x12, 0x47, 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x1f, 0x0a, 0x07, 0x55, 0x6e, 0x74, + 0x79, 0x70, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xea, 0x05, 0x0a, 0x09, 0x48, + 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, + 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, + 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x73, + 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, + 0x70, 0x6c, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73, + 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x34, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, + 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x47, + 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, + 0x25, 0x0a, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, + 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0d, 0x7a, 0x65, 0x72, 0x6f, 0x54, 0x68, 0x72, + 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x7a, 0x65, 0x72, 0x6f, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01, 0x52, + 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, + 0x45, 0x0a, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e, + 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, + 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, + 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, + 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x12, 0x52, 0x0d, + 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x25, 0x0a, + 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, + 0x0b, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, + 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f, + 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x70, + 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x70, + 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0d, 0x20, + 0x03, 0x28, 0x12, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, + 0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x09, 0x65, 0x78, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, + 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x09, 0x65, 0x78, + 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x22, 0xc6, 0x01, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, + 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x63, 0x75, + 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x34, 0x0a, + 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x14, 0x63, + 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, + 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, 0x65, 0x72, 0x5f, 0x62, 0x6f, 0x75, + 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x75, 0x70, 0x70, 0x65, 0x72, 0x42, + 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, + 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, + 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, + 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x16, + 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, + 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x22, 0x91, + 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x35, 0x0a, 0x05, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, + 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62, + 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x35, 0x0a, + 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, + 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, + 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x61, 0x75, 0x67, 0x65, + 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, + 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, + 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, + 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, + 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x37, 0x0a, 0x07, 0x75, 0x6e, 0x74, + 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, + 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70, + 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, + 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x69, 0x73, + 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, + 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d, + 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xb6, 0x01, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x46, + 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x65, 0x6c, + 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x12, 0x34, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x69, 0x6f, + 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, + 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69, + 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x2a, 0x62, 0x0a, + 0x0a, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43, + 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47, + 0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x02, + 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a, + 0x09, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f, + 0x47, 0x41, 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, + 0x05, 0x42, 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, + 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, + 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, 0x67, 0x6f, + 0x3b, 0x69, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, +} + +var ( + file_io_prometheus_client_metrics_proto_rawDescOnce sync.Once + file_io_prometheus_client_metrics_proto_rawDescData = file_io_prometheus_client_metrics_proto_rawDesc +) + +func file_io_prometheus_client_metrics_proto_rawDescGZIP() []byte { + file_io_prometheus_client_metrics_proto_rawDescOnce.Do(func() { + file_io_prometheus_client_metrics_proto_rawDescData = protoimpl.X.CompressGZIP(file_io_prometheus_client_metrics_proto_rawDescData) + }) + return file_io_prometheus_client_metrics_proto_rawDescData +} + +var file_io_prometheus_client_metrics_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_io_prometheus_client_metrics_proto_msgTypes = make([]protoimpl.MessageInfo, 12) +var file_io_prometheus_client_metrics_proto_goTypes = []interface{}{ + (MetricType)(0), // 0: io.prometheus.client.MetricType + (*LabelPair)(nil), // 1: io.prometheus.client.LabelPair + (*Gauge)(nil), // 2: io.prometheus.client.Gauge + (*Counter)(nil), // 3: io.prometheus.client.Counter + (*Quantile)(nil), // 4: io.prometheus.client.Quantile + (*Summary)(nil), // 5: io.prometheus.client.Summary + (*Untyped)(nil), // 6: io.prometheus.client.Untyped + (*Histogram)(nil), // 7: io.prometheus.client.Histogram + (*Bucket)(nil), // 8: io.prometheus.client.Bucket + (*BucketSpan)(nil), // 9: io.prometheus.client.BucketSpan + (*Exemplar)(nil), // 10: io.prometheus.client.Exemplar + (*Metric)(nil), // 11: io.prometheus.client.Metric + (*MetricFamily)(nil), // 12: io.prometheus.client.MetricFamily + (*timestamppb.Timestamp)(nil), // 13: google.protobuf.Timestamp +} +var file_io_prometheus_client_metrics_proto_depIdxs = []int32{ + 10, // 0: io.prometheus.client.Counter.exemplar:type_name -> io.prometheus.client.Exemplar + 13, // 1: io.prometheus.client.Counter.created_timestamp:type_name -> google.protobuf.Timestamp + 4, // 2: io.prometheus.client.Summary.quantile:type_name -> io.prometheus.client.Quantile + 13, // 3: io.prometheus.client.Summary.created_timestamp:type_name -> google.protobuf.Timestamp + 8, // 4: io.prometheus.client.Histogram.bucket:type_name -> io.prometheus.client.Bucket + 13, // 5: io.prometheus.client.Histogram.created_timestamp:type_name -> google.protobuf.Timestamp + 9, // 6: io.prometheus.client.Histogram.negative_span:type_name -> io.prometheus.client.BucketSpan + 9, // 7: io.prometheus.client.Histogram.positive_span:type_name -> io.prometheus.client.BucketSpan + 10, // 8: io.prometheus.client.Histogram.exemplars:type_name -> io.prometheus.client.Exemplar + 10, // 9: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar + 1, // 10: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair + 13, // 11: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp + 1, // 12: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair + 2, // 13: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge + 3, // 14: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter + 5, // 15: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary + 6, // 16: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped + 7, // 17: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram + 0, // 18: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType + 11, // 19: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric + 20, // [20:20] is the sub-list for method output_type + 20, // [20:20] is the sub-list for method input_type + 20, // [20:20] is the sub-list for extension type_name + 20, // [20:20] is the sub-list for extension extendee + 0, // [0:20] is the sub-list for field type_name +} + +func init() { file_io_prometheus_client_metrics_proto_init() } +func file_io_prometheus_client_metrics_proto_init() { + if File_io_prometheus_client_metrics_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_io_prometheus_client_metrics_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LabelPair); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Gauge); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Counter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Quantile); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Summary); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Untyped); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Histogram); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BucketSpan); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Exemplar); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Metric); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_io_prometheus_client_metrics_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetricFamily); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_io_prometheus_client_metrics_proto_rawDesc, + NumEnums: 1, + NumMessages: 12, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_io_prometheus_client_metrics_proto_goTypes, + DependencyIndexes: file_io_prometheus_client_metrics_proto_depIdxs, + EnumInfos: file_io_prometheus_client_metrics_proto_enumTypes, + MessageInfos: file_io_prometheus_client_metrics_proto_msgTypes, + }.Build() + File_io_prometheus_client_metrics_proto = out.File + file_io_prometheus_client_metrics_proto_rawDesc = nil + file_io_prometheus_client_metrics_proto_goTypes = nil + file_io_prometheus_client_metrics_proto_depIdxs = nil } diff --git a/tools/vendor/github.com/prometheus/common/expfmt/decode.go b/tools/vendor/github.com/prometheus/common/expfmt/decode.go index 7657f841d..1448439b7 100644 --- a/tools/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/tools/vendor/github.com/prometheus/common/expfmt/decode.go @@ -14,6 +14,7 @@ package expfmt import ( + "bufio" "fmt" "io" "math" @@ -21,8 +22,8 @@ import ( "net/http" dto "github.com/prometheus/client_model/go" + "google.golang.org/protobuf/encoding/protodelim" - "github.com/matttproud/golang_protobuf_extensions/pbutil" "github.com/prometheus/common/model" ) @@ -72,22 +73,24 @@ func ResponseFormat(h http.Header) Format { // NewDecoder returns a new decoder based on the given input format. // If the input format does not imply otherwise, a text format decoder is returned. func NewDecoder(r io.Reader, format Format) Decoder { - switch format { - case FmtProtoDelim: - return &protoDecoder{r: r} + switch format.FormatType() { + case TypeProtoDelim: + return &protoDecoder{r: bufio.NewReader(r)} } return &textDecoder{r: r} } // protoDecoder implements the Decoder interface for protocol buffers. type protoDecoder struct { - r io.Reader + r protodelim.Reader } // Decode implements the Decoder interface. func (d *protoDecoder) Decode(v *dto.MetricFamily) error { - _, err := pbutil.ReadDelimited(d.r, v) - if err != nil { + opts := protodelim.UnmarshalOptions{ + MaxSize: -1, + } + if err := opts.UnmarshalFrom(d.r, v); err != nil { return err } if !model.IsValidMetricName(model.LabelValue(v.GetName())) { @@ -115,32 +118,31 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error { // textDecoder implements the Decoder interface for the text protocol. type textDecoder struct { r io.Reader - p TextParser - fams []*dto.MetricFamily + fams map[string]*dto.MetricFamily + err error } // Decode implements the Decoder interface. func (d *textDecoder) Decode(v *dto.MetricFamily) error { - // TODO(fabxc): Wrap this as a line reader to make streaming safer. - if len(d.fams) == 0 { - // No cached metric families, read everything and parse metrics. - fams, err := d.p.TextToMetricFamilies(d.r) - if err != nil { - return err - } - if len(fams) == 0 { - return io.EOF - } - d.fams = make([]*dto.MetricFamily, 0, len(fams)) - for _, f := range fams { - d.fams = append(d.fams, f) + if d.err == nil { + // Read all metrics in one shot. + var p TextParser + d.fams, d.err = p.TextToMetricFamilies(d.r) + // If we don't get an error, store io.EOF for the end. + if d.err == nil { + d.err = io.EOF } } - - *v = *d.fams[0] - d.fams = d.fams[1:] - - return nil + // Pick off one MetricFamily per Decode until there's nothing left. + for key, fam := range d.fams { + v.Name = fam.Name + v.Help = fam.Help + v.Type = fam.Type + v.Metric = fam.Metric + delete(d.fams, key) + return nil + } + return d.err } // SampleDecoder wraps a Decoder to extract samples from the metric families diff --git a/tools/vendor/github.com/prometheus/common/expfmt/encode.go b/tools/vendor/github.com/prometheus/common/expfmt/encode.go index 64dc0eb40..d7f3d76f5 100644 --- a/tools/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/tools/vendor/github.com/prometheus/common/expfmt/encode.go @@ -18,9 +18,12 @@ import ( "io" "net/http" - "github.com/golang/protobuf/proto" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/matttproud/golang_protobuf_extensions/pbutil" - "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" + "google.golang.org/protobuf/encoding/protodelim" + "google.golang.org/protobuf/encoding/prototext" + + "github.com/prometheus/common/model" + + "github.com/munnerz/goautoneg" dto "github.com/prometheus/client_model/go" ) @@ -60,23 +63,32 @@ func (ec encoderCloser) Close() error { // as the support is still experimental. To include the option to negotiate // FmtOpenMetrics, use NegotiateOpenMetrics. func Negotiate(h http.Header) Format { + escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String()))) for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { + if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" { + switch Format(escapeParam) { + case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues: + escapingScheme = Format("; escaping=" + escapeParam) + default: + // If the escaping parameter is unknown, ignore it. + } + } ver := ac.Params["version"] if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { switch ac.Params["encoding"] { case "delimited": - return FmtProtoDelim + return FmtProtoDelim + escapingScheme case "text": - return FmtProtoText + return FmtProtoText + escapingScheme case "compact-text": - return FmtProtoCompact + return FmtProtoCompact + escapingScheme } } if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return FmtText + return FmtText + escapingScheme } } - return FmtText + return FmtText + escapingScheme } // NegotiateIncludingOpenMetrics works like Negotiate but includes @@ -84,26 +96,40 @@ func Negotiate(h http.Header) Format { // temporary and will disappear once FmtOpenMetrics is fully supported and as // such may be negotiated by the normal Negotiate function. func NegotiateIncludingOpenMetrics(h http.Header) Format { + escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String()))) for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { + if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" { + switch Format(escapeParam) { + case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues: + escapingScheme = Format("; escaping=" + escapeParam) + default: + // If the escaping parameter is unknown, ignore it. + } + } ver := ac.Params["version"] if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { switch ac.Params["encoding"] { case "delimited": - return FmtProtoDelim + return FmtProtoDelim + escapingScheme case "text": - return FmtProtoText + return FmtProtoText + escapingScheme case "compact-text": - return FmtProtoCompact + return FmtProtoCompact + escapingScheme } } if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return FmtText + return FmtText + escapingScheme } - if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion || ver == "") { - return FmtOpenMetrics + if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") { + switch ver { + case OpenMetricsVersion_1_0_0: + return FmtOpenMetrics_1_0_0 + escapingScheme + default: + return FmtOpenMetrics_0_0_1 + escapingScheme + } } } - return FmtText + return FmtText + escapingScheme } // NewEncoder returns a new encoder based on content type negotiation. All @@ -112,44 +138,54 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format { // for FmtOpenMetrics, but a future (breaking) release will add the Close method // to the Encoder interface directly. The current version of the Encoder // interface is kept for backwards compatibility. -func NewEncoder(w io.Writer, format Format) Encoder { - switch format { - case FmtProtoDelim: +// In cases where the Format does not allow for UTF-8 names, the global +// NameEscapingScheme will be applied. +// +// NewEncoder can be called with additional options to customize the OpenMetrics text output. +// For example: +// NewEncoder(w, FmtOpenMetrics_1_0_0, WithCreatedLines()) +// +// Extra options are ignored for all other formats. +func NewEncoder(w io.Writer, format Format, options ...EncoderOption) Encoder { + escapingScheme := format.ToEscapingScheme() + + switch format.FormatType() { + case TypeProtoDelim: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := pbutil.WriteDelimited(w, v) + _, err := protodelim.MarshalTo(w, v) return err }, close: func() error { return nil }, } - case FmtProtoCompact: + case TypeProtoCompact: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := fmt.Fprintln(w, v.String()) + _, err := fmt.Fprintln(w, model.EscapeMetricFamily(v, escapingScheme).String()) return err }, close: func() error { return nil }, } - case FmtProtoText: + case TypeProtoText: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := fmt.Fprintln(w, proto.MarshalTextString(v)) + _, err := fmt.Fprintln(w, prototext.Format(model.EscapeMetricFamily(v, escapingScheme))) return err }, close: func() error { return nil }, } - case FmtText: + case TypeTextPlain: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := MetricFamilyToText(w, v) + _, err := MetricFamilyToText(w, model.EscapeMetricFamily(v, escapingScheme)) return err }, close: func() error { return nil }, } - case FmtOpenMetrics: + case TypeOpenMetrics: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := MetricFamilyToOpenMetrics(w, v) + _, err := MetricFamilyToOpenMetrics(w, model.EscapeMetricFamily(v, escapingScheme), options...) return err }, close: func() error { diff --git a/tools/vendor/github.com/prometheus/common/expfmt/expfmt.go b/tools/vendor/github.com/prometheus/common/expfmt/expfmt.go index 0f176fa64..b26886560 100644 --- a/tools/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/tools/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -14,28 +14,194 @@ // Package expfmt contains tools for reading and writing Prometheus metrics. package expfmt +import ( + "errors" + "strings" + + "github.com/prometheus/common/model" +) + // Format specifies the HTTP content type of the different wire protocols. type Format string -// Constants to assemble the Content-Type values for the different wire protocols. +// Constants to assemble the Content-Type values for the different wire +// protocols. The Content-Type strings here are all for the legacy exposition +// formats, where valid characters for metric names and label names are limited. +// Support for arbitrary UTF-8 characters in those names is already partially +// implemented in this module (see model.ValidationScheme), but to actually use +// it on the wire, new content-type strings will have to be agreed upon and +// added here. const ( - TextVersion = "0.0.4" - ProtoType = `application/vnd.google.protobuf` - ProtoProtocol = `io.prometheus.client.MetricFamily` - ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" - OpenMetricsType = `application/openmetrics-text` - OpenMetricsVersion = "0.0.1" - - // The Content-Type values for the different wire protocols. - FmtUnknown Format = `` - FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` - FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` - FmtProtoText Format = ProtoFmt + ` encoding=text` + TextVersion = "0.0.4" + ProtoType = `application/vnd.google.protobuf` + ProtoProtocol = `io.prometheus.client.MetricFamily` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. + ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + OpenMetricsType = `application/openmetrics-text` + OpenMetricsVersion_0_0_1 = "0.0.1" + OpenMetricsVersion_1_0_0 = "1.0.0" + + // The Content-Type values for the different wire protocols. Do not do direct + // comparisons to these constants, instead use the comparison functions. + // Deprecated: Use expfmt.NewFormat(expfmt.TypeUnknown) instead. + FmtUnknown Format = `` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeTextPlain) instead. + FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoDelim) instead. + FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoText) instead. + FmtProtoText Format = ProtoFmt + ` encoding=text` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` - FmtOpenMetrics Format = OpenMetricsType + `; version=` + OpenMetricsVersion + `; charset=utf-8` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. + FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. + FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` ) const ( hdrContentType = "Content-Type" hdrAccept = "Accept" ) + +// FormatType is a Go enum representing the overall category for the given +// Format. As the number of Format permutations increases, doing basic string +// comparisons are not feasible, so this enum captures the most useful +// high-level attribute of the Format string. +type FormatType int + +const ( + TypeUnknown FormatType = iota + TypeProtoCompact + TypeProtoDelim + TypeProtoText + TypeTextPlain + TypeOpenMetrics +) + +// NewFormat generates a new Format from the type provided. Mostly used for +// tests, most Formats should be generated as part of content negotiation in +// encode.go. If a type has more than one version, the latest version will be +// returned. +func NewFormat(t FormatType) Format { + switch t { + case TypeProtoCompact: + return FmtProtoCompact + case TypeProtoDelim: + return FmtProtoDelim + case TypeProtoText: + return FmtProtoText + case TypeTextPlain: + return FmtText + case TypeOpenMetrics: + return FmtOpenMetrics_1_0_0 + default: + return FmtUnknown + } +} + +// NewOpenMetricsFormat generates a new OpenMetrics format matching the +// specified version number. +func NewOpenMetricsFormat(version string) (Format, error) { + if version == OpenMetricsVersion_0_0_1 { + return FmtOpenMetrics_0_0_1, nil + } + if version == OpenMetricsVersion_1_0_0 { + return FmtOpenMetrics_1_0_0, nil + } + return FmtUnknown, errors.New("unknown open metrics version string") +} + +// WithEscapingScheme returns a copy of Format with the specified escaping +// scheme appended to the end. If an escaping scheme already exists it is +// removed. +func (f Format) WithEscapingScheme(s model.EscapingScheme) Format { + var terms []string + for _, p := range strings.Split(string(f), ";") { + toks := strings.Split(p, "=") + if len(toks) != 2 { + trimmed := strings.TrimSpace(p) + if len(trimmed) > 0 { + terms = append(terms, trimmed) + } + continue + } + key := strings.TrimSpace(toks[0]) + if key != model.EscapingKey { + terms = append(terms, strings.TrimSpace(p)) + } + } + terms = append(terms, model.EscapingKey+"="+s.String()) + return Format(strings.Join(terms, "; ")) +} + +// FormatType deduces an overall FormatType for the given format. +func (f Format) FormatType() FormatType { + toks := strings.Split(string(f), ";") + params := make(map[string]string) + for i, t := range toks { + if i == 0 { + continue + } + args := strings.Split(t, "=") + if len(args) != 2 { + continue + } + params[strings.TrimSpace(args[0])] = strings.TrimSpace(args[1]) + } + + switch strings.TrimSpace(toks[0]) { + case ProtoType: + if params["proto"] != ProtoProtocol { + return TypeUnknown + } + switch params["encoding"] { + case "delimited": + return TypeProtoDelim + case "text": + return TypeProtoText + case "compact-text": + return TypeProtoCompact + default: + return TypeUnknown + } + case OpenMetricsType: + if params["charset"] != "utf-8" { + return TypeUnknown + } + return TypeOpenMetrics + case "text/plain": + v, ok := params["version"] + if !ok { + return TypeTextPlain + } + if v == TextVersion { + return TypeTextPlain + } + return TypeUnknown + default: + return TypeUnknown + } +} + +// ToEscapingScheme returns an EscapingScheme depending on the Format. Iff the +// Format contains a escaping=allow-utf-8 term, it will select NoEscaping. If a valid +// "escaping" term exists, that will be used. Otherwise, the global default will +// be returned. +func (format Format) ToEscapingScheme() model.EscapingScheme { + for _, p := range strings.Split(string(format), ";") { + toks := strings.Split(p, "=") + if len(toks) != 2 { + continue + } + key, value := strings.TrimSpace(toks[0]), strings.TrimSpace(toks[1]) + if key == model.EscapingKey { + scheme, err := model.ToEscapingScheme(value) + if err != nil { + return model.NameEscapingScheme + } + return scheme + } + } + return model.NameEscapingScheme +} diff --git a/tools/vendor/github.com/prometheus/common/expfmt/fuzz.go b/tools/vendor/github.com/prometheus/common/expfmt/fuzz.go index dc2eedeef..dfac962a4 100644 --- a/tools/vendor/github.com/prometheus/common/expfmt/fuzz.go +++ b/tools/vendor/github.com/prometheus/common/expfmt/fuzz.go @@ -12,6 +12,7 @@ // limitations under the License. // Build only when actually fuzzing +//go:build gofuzz // +build gofuzz package expfmt @@ -20,8 +21,8 @@ import "bytes" // Fuzz text metric parser with with github.com/dvyukov/go-fuzz: // -// go-fuzz-build github.com/prometheus/common/expfmt -// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz +// go-fuzz-build github.com/prometheus/common/expfmt +// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz // // Further input samples should go in the folder fuzz/corpus. func Fuzz(in []byte) int { diff --git a/tools/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/tools/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index 8a9313a3b..a21ed4ec1 100644 --- a/tools/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/tools/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -22,12 +22,47 @@ import ( "strconv" "strings" - "github.com/golang/protobuf/ptypes" + "google.golang.org/protobuf/types/known/timestamppb" + "github.com/prometheus/common/model" dto "github.com/prometheus/client_model/go" ) +type encoderOption struct { + withCreatedLines bool + withUnit bool +} + +type EncoderOption func(*encoderOption) + +// WithCreatedLines is an EncoderOption that configures the OpenMetrics encoder +// to include _created lines (See +// https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#counter-1). +// Created timestamps can improve the accuracy of series reset detection, but +// come with a bandwidth cost. +// +// At the time of writing, created timestamp ingestion is still experimental in +// Prometheus and need to be enabled with the feature-flag +// `--feature-flag=created-timestamp-zero-ingestion`, and breaking changes are +// still possible. Therefore, it is recommended to use this feature with caution. +func WithCreatedLines() EncoderOption { + return func(t *encoderOption) { + t.withCreatedLines = true + } +} + +// WithUnit is an EncoderOption enabling a set unit to be written to the output +// and to be added to the metric name, if it's not there already, as a suffix. +// Without opting in this way, the unit will not be added to the metric name and, +// on top of that, the unit will not be passed onto the output, even if it +// were declared in the *dto.MetricFamily struct, i.e. even if in.Unit !=nil. +func WithUnit() EncoderOption { + return func(t *encoderOption) { + t.withUnit = true + } +} + // MetricFamilyToOpenMetrics converts a MetricFamily proto message into the // OpenMetrics text format and writes the resulting lines to 'out'. It returns // the number of bytes written and any error encountered. The output will have @@ -36,6 +71,18 @@ import ( // sanity checks. If the input contains duplicate metrics or invalid metric or // label names, the conversion will result in invalid text format output. // +// If metric names conform to the legacy validation pattern, they will be placed +// outside the brackets in the traditional way, like `foo{}`. If the metric name +// fails the legacy validation check, it will be placed quoted inside the +// brackets: `{"foo"}`. As stated above, the input is assumed to be santized and +// no error will be thrown in this case. +// +// Similar to metric names, if label names conform to the legacy validation +// pattern, they will be unquoted as normal, like `foo{bar="baz"}`. If the label +// name fails the legacy validation check, it will be quoted: +// `foo{"bar"="baz"}`. As stated above, the input is assumed to be santized and +// no error will be thrown in this case. +// // This function fulfills the type 'expfmt.encoder'. // // Note that OpenMetrics requires a final `# EOF` line. Since this function acts @@ -47,21 +94,35 @@ import ( // missing features and peculiarities to avoid complications when switching from // Prometheus to OpenMetrics or vice versa: // -// - Counters are expected to have the `_total` suffix in their metric name. In -// the output, the suffix will be truncated from the `# TYPE` and `# HELP` -// line. A counter with a missing `_total` suffix is not an error. However, -// its type will be set to `unknown` in that case to avoid invalid OpenMetrics -// output. +// - Counters are expected to have the `_total` suffix in their metric name. In +// the output, the suffix will be truncated from the `# TYPE`, `# HELP` and `# UNIT` +// lines. A counter with a missing `_total` suffix is not an error. However, +// its type will be set to `unknown` in that case to avoid invalid OpenMetrics +// output. // -// - No support for the following (optional) features: `# UNIT` line, `_created` -// line, info type, stateset type, gaugehistogram type. +// - According to the OM specs, the `# UNIT` line is optional, but if populated, +// the unit has to be present in the metric name as its suffix: +// (see https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#unit). +// However, in order to accommodate any potential scenario where such a change in the +// metric name is not desirable, the users are here given the choice of either explicitly +// opt in, in case they wish for the unit to be included in the output AND in the metric name +// as a suffix (see the description of the WithUnit function above), +// or not to opt in, in case they don't want for any of that to happen. // -// - The size of exemplar labels is not checked (i.e. it's possible to create -// exemplars that are larger than allowed by the OpenMetrics specification). +// - No support for the following (optional) features: info type, +// stateset type, gaugehistogram type. // -// - The value of Counters is not checked. (OpenMetrics doesn't allow counters -// with a `NaN` value.) -func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) { +// - The size of exemplar labels is not checked (i.e. it's possible to create +// exemplars that are larger than allowed by the OpenMetrics specification). +// +// - The value of Counters is not checked. (OpenMetrics doesn't allow counters +// with a `NaN` value.) +func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...EncoderOption) (written int, err error) { + toOM := encoderOption{} + for _, option := range options { + option(&toOM) + } + name := in.GetName() if name == "" { return 0, fmt.Errorf("MetricFamily has no name: %s", in) @@ -84,12 +145,15 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int } var ( - n int - metricType = in.GetType() - shortName = name + n int + metricType = in.GetType() + compliantName = name ) - if metricType == dto.MetricType_COUNTER && strings.HasSuffix(shortName, "_total") { - shortName = name[:len(name)-6] + if metricType == dto.MetricType_COUNTER && strings.HasSuffix(compliantName, "_total") { + compliantName = name[:len(name)-6] + } + if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, "_"+*in.Unit) { + compliantName = compliantName + "_" + *in.Unit } // Comments, first HELP, then TYPE. @@ -99,7 +163,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int if err != nil { return } - n, err = w.WriteString(shortName) + n, err = writeName(w, compliantName) written += n if err != nil { return @@ -125,7 +189,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int if err != nil { return } - n, err = w.WriteString(shortName) + n, err = writeName(w, compliantName) written += n if err != nil { return @@ -152,55 +216,89 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int if err != nil { return } + if toOM.withUnit && in.Unit != nil { + n, err = w.WriteString("# UNIT ") + written += n + if err != nil { + return + } + n, err = writeName(w, compliantName) + written += n + if err != nil { + return + } + + err = w.WriteByte(' ') + written++ + if err != nil { + return + } + n, err = writeEscapedString(w, *in.Unit, true) + written += n + if err != nil { + return + } + err = w.WriteByte('\n') + written++ + if err != nil { + return + } + } + + var createdTsBytesWritten int // Finally the samples, one line for each. + if metricType == dto.MetricType_COUNTER && strings.HasSuffix(name, "_total") { + compliantName = compliantName + "_total" + } for _, metric := range in.Metric { switch metricType { case dto.MetricType_COUNTER: if metric.Counter == nil { return written, fmt.Errorf( - "expected counter in metric %s %s", name, metric, + "expected counter in metric %s %s", compliantName, metric, ) } - // Note that we have ensured above that either the name - // ends on `_total` or that the rendered type is - // `unknown`. Therefore, no `_total` must be added here. n, err = writeOpenMetricsSample( - w, name, "", metric, "", 0, + w, compliantName, "", metric, "", 0, metric.Counter.GetValue(), 0, false, metric.Counter.Exemplar, ) + if toOM.withCreatedLines && metric.Counter.CreatedTimestamp != nil { + createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "_total", metric, "", 0, metric.Counter.GetCreatedTimestamp()) + n += createdTsBytesWritten + } case dto.MetricType_GAUGE: if metric.Gauge == nil { return written, fmt.Errorf( - "expected gauge in metric %s %s", name, metric, + "expected gauge in metric %s %s", compliantName, metric, ) } n, err = writeOpenMetricsSample( - w, name, "", metric, "", 0, + w, compliantName, "", metric, "", 0, metric.Gauge.GetValue(), 0, false, nil, ) case dto.MetricType_UNTYPED: if metric.Untyped == nil { return written, fmt.Errorf( - "expected untyped in metric %s %s", name, metric, + "expected untyped in metric %s %s", compliantName, metric, ) } n, err = writeOpenMetricsSample( - w, name, "", metric, "", 0, + w, compliantName, "", metric, "", 0, metric.Untyped.GetValue(), 0, false, nil, ) case dto.MetricType_SUMMARY: if metric.Summary == nil { return written, fmt.Errorf( - "expected summary in metric %s %s", name, metric, + "expected summary in metric %s %s", compliantName, metric, ) } for _, q := range metric.Summary.Quantile { n, err = writeOpenMetricsSample( - w, name, "", metric, + w, compliantName, "", metric, model.QuantileLabel, q.GetQuantile(), q.GetValue(), 0, false, nil, @@ -211,7 +309,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int } } n, err = writeOpenMetricsSample( - w, name, "_sum", metric, "", 0, + w, compliantName, "_sum", metric, "", 0, metric.Summary.GetSampleSum(), 0, false, nil, ) @@ -220,20 +318,24 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int return } n, err = writeOpenMetricsSample( - w, name, "_count", metric, "", 0, + w, compliantName, "_count", metric, "", 0, 0, metric.Summary.GetSampleCount(), true, nil, ) + if toOM.withCreatedLines && metric.Summary.CreatedTimestamp != nil { + createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "", metric, "", 0, metric.Summary.GetCreatedTimestamp()) + n += createdTsBytesWritten + } case dto.MetricType_HISTOGRAM: if metric.Histogram == nil { return written, fmt.Errorf( - "expected histogram in metric %s %s", name, metric, + "expected histogram in metric %s %s", compliantName, metric, ) } infSeen := false for _, b := range metric.Histogram.Bucket { n, err = writeOpenMetricsSample( - w, name, "_bucket", metric, + w, compliantName, "_bucket", metric, model.BucketLabel, b.GetUpperBound(), 0, b.GetCumulativeCount(), true, b.Exemplar, @@ -248,7 +350,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int } if !infSeen { n, err = writeOpenMetricsSample( - w, name, "_bucket", metric, + w, compliantName, "_bucket", metric, model.BucketLabel, math.Inf(+1), 0, metric.Histogram.GetSampleCount(), true, nil, @@ -259,7 +361,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int } } n, err = writeOpenMetricsSample( - w, name, "_sum", metric, "", 0, + w, compliantName, "_sum", metric, "", 0, metric.Histogram.GetSampleSum(), 0, false, nil, ) @@ -268,13 +370,17 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int return } n, err = writeOpenMetricsSample( - w, name, "_count", metric, "", 0, + w, compliantName, "_count", metric, "", 0, 0, metric.Histogram.GetSampleCount(), true, nil, ) + if toOM.withCreatedLines && metric.Histogram.CreatedTimestamp != nil { + createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "", metric, "", 0, metric.Histogram.GetCreatedTimestamp()) + n += createdTsBytesWritten + } default: return written, fmt.Errorf( - "unexpected type in metric %s %s", name, metric, + "unexpected type in metric %s %s", compliantName, metric, ) } written += n @@ -304,21 +410,9 @@ func writeOpenMetricsSample( floatValue float64, intValue uint64, useIntValue bool, exemplar *dto.Exemplar, ) (int, error) { - var written int - n, err := w.WriteString(name) - written += n - if err != nil { - return written, err - } - if suffix != "" { - n, err = w.WriteString(suffix) - written += n - if err != nil { - return written, err - } - } - n, err = writeOpenMetricsLabelPairs( - w, metric.Label, additionalLabelName, additionalLabelValue, + written := 0 + n, err := writeOpenMetricsNameAndLabelPairs( + w, name+suffix, metric.Label, additionalLabelName, additionalLabelValue, ) written += n if err != nil { @@ -351,7 +445,7 @@ func writeOpenMetricsSample( return written, err } } - if exemplar != nil { + if exemplar != nil && len(exemplar.Label) > 0 { n, err = writeExemplar(w, exemplar) written += n if err != nil { @@ -366,27 +460,58 @@ func writeOpenMetricsSample( return written, nil } -// writeOpenMetricsLabelPairs works like writeOpenMetrics but formats the float -// in OpenMetrics style. -func writeOpenMetricsLabelPairs( +// writeOpenMetricsNameAndLabelPairs works like writeOpenMetricsSample but +// formats the float in OpenMetrics style. +func writeOpenMetricsNameAndLabelPairs( w enhancedWriter, + name string, in []*dto.LabelPair, additionalLabelName string, additionalLabelValue float64, ) (int, error) { - if len(in) == 0 && additionalLabelName == "" { - return 0, nil - } var ( - written int - separator byte = '{' + written int + separator byte = '{' + metricInsideBraces = false ) + + if name != "" { + // If the name does not pass the legacy validity check, we must put the + // metric name inside the braces, quoted. + if !model.IsValidLegacyMetricName(name) { + metricInsideBraces = true + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + separator = ',' + } + + n, err := writeName(w, name) + written += n + if err != nil { + return written, err + } + } + + if len(in) == 0 && additionalLabelName == "" { + if metricInsideBraces { + err := w.WriteByte('}') + written++ + if err != nil { + return written, err + } + } + return written, nil + } + for _, lp := range in { err := w.WriteByte(separator) written++ if err != nil { return written, err } - n, err := w.WriteString(lp.GetName()) + n, err := writeName(w, lp.GetName()) written += n if err != nil { return written, err @@ -443,6 +568,49 @@ func writeOpenMetricsLabelPairs( return written, nil } +// writeOpenMetricsCreated writes the created timestamp for a single time series +// following OpenMetrics text format to w, given the metric name, the metric proto +// message itself, optionally a suffix to be removed, e.g. '_total' for counters, +// an additional label name with a float64 value (use empty string as label name if +// not required) and the timestamp that represents the created timestamp. +// The function returns the number of bytes written and any error encountered. +func writeOpenMetricsCreated(w enhancedWriter, + name, suffixToTrim string, metric *dto.Metric, + additionalLabelName string, additionalLabelValue float64, + createdTimestamp *timestamppb.Timestamp, +) (int, error) { + written := 0 + n, err := writeOpenMetricsNameAndLabelPairs( + w, strings.TrimSuffix(name, suffixToTrim)+"_created", metric.Label, additionalLabelName, additionalLabelValue, + ) + written += n + if err != nil { + return written, err + } + + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + + // TODO(beorn7): Format this directly from components of ts to + // avoid overflow/underflow and precision issues of the float + // conversion. + n, err = writeOpenMetricsFloat(w, float64(createdTimestamp.AsTime().UnixNano())/1e9) + written += n + if err != nil { + return written, err + } + + err = w.WriteByte('\n') + written++ + if err != nil { + return written, err + } + return written, nil +} + // writeExemplar writes the provided exemplar in OpenMetrics format to w. The // function returns the number of bytes written and any error encountered. func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) { @@ -452,7 +620,7 @@ func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) { if err != nil { return written, err } - n, err = writeOpenMetricsLabelPairs(w, e.Label, "", 0) + n, err = writeOpenMetricsNameAndLabelPairs(w, "", e.Label, "", 0) written += n if err != nil { return written, err @@ -473,10 +641,11 @@ func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) { if err != nil { return written, err } - ts, err := ptypes.Timestamp((*e).Timestamp) + err = (*e).Timestamp.CheckValid() if err != nil { return written, err } + ts := (*e).Timestamp.AsTime() // TODO(beorn7): Format this directly from components of ts to // avoid overflow/underflow and precision issues of the float // conversion. diff --git a/tools/vendor/github.com/prometheus/common/expfmt/text_create.go b/tools/vendor/github.com/prometheus/common/expfmt/text_create.go index 5ba503b06..4b86434b3 100644 --- a/tools/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/tools/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -17,7 +17,6 @@ import ( "bufio" "fmt" "io" - "io/ioutil" "math" "strconv" "strings" @@ -44,7 +43,7 @@ const ( var ( bufPool = sync.Pool{ New: func() interface{} { - return bufio.NewWriter(ioutil.Discard) + return bufio.NewWriter(io.Discard) }, } numBufPool = sync.Pool{ @@ -63,6 +62,18 @@ var ( // contains duplicate metrics or invalid metric or label names, the conversion // will result in invalid text format output. // +// If metric names conform to the legacy validation pattern, they will be placed +// outside the brackets in the traditional way, like `foo{}`. If the metric name +// fails the legacy validation check, it will be placed quoted inside the +// brackets: `{"foo"}`. As stated above, the input is assumed to be santized and +// no error will be thrown in this case. +// +// Similar to metric names, if label names conform to the legacy validation +// pattern, they will be unquoted as normal, like `foo{bar="baz"}`. If the label +// name fails the legacy validation check, it will be quoted: +// `foo{"bar"="baz"}`. As stated above, the input is assumed to be santized and +// no error will be thrown in this case. +// // This method fulfills the type 'prometheus.encoder'. func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) { // Fail-fast checks. @@ -99,7 +110,7 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e if err != nil { return } - n, err = w.WriteString(name) + n, err = writeName(w, name) written += n if err != nil { return @@ -125,7 +136,7 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e if err != nil { return } - n, err = w.WriteString(name) + n, err = writeName(w, name) written += n if err != nil { return @@ -281,21 +292,9 @@ func writeSample( additionalLabelName string, additionalLabelValue float64, value float64, ) (int, error) { - var written int - n, err := w.WriteString(name) - written += n - if err != nil { - return written, err - } - if suffix != "" { - n, err = w.WriteString(suffix) - written += n - if err != nil { - return written, err - } - } - n, err = writeLabelPairs( - w, metric.Label, additionalLabelName, additionalLabelValue, + written := 0 + n, err := writeNameAndLabelPairs( + w, name+suffix, metric.Label, additionalLabelName, additionalLabelValue, ) written += n if err != nil { @@ -331,32 +330,64 @@ func writeSample( return written, nil } -// writeLabelPairs converts a slice of LabelPair proto messages plus the -// explicitly given additional label pair into text formatted as required by the -// text format and writes it to 'w'. An empty slice in combination with an empty -// string 'additionalLabelName' results in nothing being written. Otherwise, the -// label pairs are written, escaped as required by the text format, and enclosed -// in '{...}'. The function returns the number of bytes written and any error -// encountered. -func writeLabelPairs( +// writeNameAndLabelPairs converts a slice of LabelPair proto messages plus the +// explicitly given metric name and additional label pair into text formatted as +// required by the text format and writes it to 'w'. An empty slice in +// combination with an empty string 'additionalLabelName' results in nothing +// being written. Otherwise, the label pairs are written, escaped as required by +// the text format, and enclosed in '{...}'. The function returns the number of +// bytes written and any error encountered. If the metric name is not +// legacy-valid, it will be put inside the brackets as well. Legacy-invalid +// label names will also be quoted. +func writeNameAndLabelPairs( w enhancedWriter, + name string, in []*dto.LabelPair, additionalLabelName string, additionalLabelValue float64, ) (int, error) { - if len(in) == 0 && additionalLabelName == "" { - return 0, nil - } var ( - written int - separator byte = '{' + written int + separator byte = '{' + metricInsideBraces = false ) + + if name != "" { + // If the name does not pass the legacy validity check, we must put the + // metric name inside the braces. + if !model.IsValidLegacyMetricName(name) { + metricInsideBraces = true + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + separator = ',' + } + n, err := writeName(w, name) + written += n + if err != nil { + return written, err + } + } + + if len(in) == 0 && additionalLabelName == "" { + if metricInsideBraces { + err := w.WriteByte('}') + written++ + if err != nil { + return written, err + } + } + return written, nil + } + for _, lp := range in { err := w.WriteByte(separator) written++ if err != nil { return written, err } - n, err := w.WriteString(lp.GetName()) + n, err := writeName(w, lp.GetName()) written += n if err != nil { return written, err @@ -463,3 +494,27 @@ func writeInt(w enhancedWriter, i int64) (int, error) { numBufPool.Put(bp) return written, err } + +// writeName writes a string as-is if it complies with the legacy naming +// scheme, or escapes it in double quotes if not. +func writeName(w enhancedWriter, name string) (int, error) { + if model.IsValidLegacyMetricName(name) { + return w.WriteString(name) + } + var written int + var err error + err = w.WriteByte('"') + written++ + if err != nil { + return written, err + } + var n int + n, err = writeEscapedString(w, name, true) + written += n + if err != nil { + return written, err + } + err = w.WriteByte('"') + written++ + return written, err +} diff --git a/tools/vendor/github.com/prometheus/common/expfmt/text_parse.go b/tools/vendor/github.com/prometheus/common/expfmt/text_parse.go index 84be0643e..b4607fe4d 100644 --- a/tools/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/tools/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -16,15 +16,17 @@ package expfmt import ( "bufio" "bytes" + "errors" "fmt" "io" "math" "strconv" "strings" + "unicode/utf8" dto "github.com/prometheus/client_model/go" + "google.golang.org/protobuf/proto" - "github.com/golang/protobuf/proto" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/prometheus/common/model" ) @@ -58,6 +60,7 @@ type TextParser struct { currentMF *dto.MetricFamily currentMetric *dto.Metric currentLabelPair *dto.LabelPair + currentLabelPairs []*dto.LabelPair // Temporarily stores label pairs while parsing a metric line. // The remaining member variables are only used for summaries/histograms. currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le' @@ -72,6 +75,9 @@ type TextParser struct { // count and sum of that summary/histogram. currentIsSummaryCount, currentIsSummarySum bool currentIsHistogramCount, currentIsHistogramSum bool + // These indicate if the metric name from the current line being parsed is inside + // braces and if that metric name was found respectively. + currentMetricIsInsideBraces, currentMetricInsideBracesIsPresent bool } // TextToMetricFamilies reads 'in' as the simple and flat text-based exchange @@ -112,7 +118,7 @@ func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricF // stream. Turn this error into something nicer and more // meaningful. (io.EOF is often used as a signal for the legitimate end // of an input stream.) - if p.err == io.EOF { + if p.err != nil && errors.Is(p.err, io.EOF) { p.parseError("unexpected end of input stream") } return p.metricFamiliesByName, p.err @@ -135,16 +141,23 @@ func (p *TextParser) reset(in io.Reader) { } p.currentQuantile = math.NaN() p.currentBucket = math.NaN() + p.currentMF = nil } // startOfLine represents the state where the next byte read from p.buf is the // start of a line (or whitespace leading up to it). func (p *TextParser) startOfLine() stateFn { p.lineCount++ + p.currentMetricIsInsideBraces = false + p.currentMetricInsideBracesIsPresent = false if p.skipBlankTab(); p.err != nil { - // End of input reached. This is the only case where - // that is not an error but a signal that we are done. - p.err = nil + // This is the only place that we expect to see io.EOF, + // which is not an error but the signal that we are done. + // Any other error that happens to align with the start of + // a line is still an error. + if errors.Is(p.err, io.EOF) { + p.err = nil + } return nil } switch p.currentByte { @@ -152,6 +165,9 @@ func (p *TextParser) startOfLine() stateFn { return p.startComment case '\n': return p.startOfLine // Empty line, start the next one. + case '{': + p.currentMetricIsInsideBraces = true + return p.readingLabels } return p.readingMetricName } @@ -269,6 +285,8 @@ func (p *TextParser) startLabelName() stateFn { return nil // Unexpected end of input. } if p.currentByte == '}' { + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) + p.currentLabelPairs = nil if p.skipBlankTab(); p.err != nil { return nil // Unexpected end of input. } @@ -281,6 +299,45 @@ func (p *TextParser) startLabelName() stateFn { p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) return nil } + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '=' { + if p.currentMetricIsInsideBraces { + if p.currentMetricInsideBracesIsPresent { + p.parseError(fmt.Sprintf("multiple metric names for metric %q", p.currentMF.GetName())) + return nil + } + switch p.currentByte { + case ',': + p.setOrCreateCurrentMF() + if p.currentMF.Type == nil { + p.currentMF.Type = dto.MetricType_UNTYPED.Enum() + } + p.currentMetric = &dto.Metric{} + p.currentMetricInsideBracesIsPresent = true + return p.startLabelName + case '}': + p.setOrCreateCurrentMF() + if p.currentMF.Type == nil { + p.currentMF.Type = dto.MetricType_UNTYPED.Enum() + } + p.currentMetric = &dto.Metric{} + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) + p.currentLabelPairs = nil + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + default: + p.parseError(fmt.Sprintf("unexpected end of metric name %q", p.currentByte)) + return nil + } + } + p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) + p.currentLabelPairs = nil + return nil + } p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) @@ -290,23 +347,17 @@ func (p *TextParser) startLabelName() stateFn { // labels to 'real' labels. if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { - p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair) - } - if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte != '=' { - p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) - return nil + p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair) } // Check for duplicate label names. labels := make(map[string]struct{}) - for _, l := range p.currentMetric.Label { + for _, l := range p.currentLabelPairs { lName := l.GetName() if _, exists := labels[lName]; !exists { labels[lName] = struct{}{} } else { p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName())) + p.currentLabelPairs = nil return nil } } @@ -339,6 +390,7 @@ func (p *TextParser) startLabelValue() stateFn { if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { // Create a more helpful error message. p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) + p.currentLabelPairs = nil return nil } } else { @@ -365,12 +417,19 @@ func (p *TextParser) startLabelValue() stateFn { return p.startLabelName case '}': + if p.currentMF == nil { + p.parseError("invalid metric name") + return nil + } + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) + p.currentLabelPairs = nil if p.skipBlankTab(); p.err != nil { return nil // Unexpected end of input. } return p.readingValue default: p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue())) + p.currentLabelPairs = nil return nil } } @@ -579,6 +638,8 @@ func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { p.currentToken.WriteByte(p.currentByte) case 'n': p.currentToken.WriteByte('\n') + case '"': + p.currentToken.WriteByte('"') default: p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) return @@ -604,13 +665,45 @@ func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { // but not into p.currentToken. func (p *TextParser) readTokenAsMetricName() { p.currentToken.Reset() + // A UTF-8 metric name must be quoted and may have escaped characters. + quoted := false + escaped := false if !isValidMetricNameStart(p.currentByte) { return } - for { - p.currentToken.WriteByte(p.currentByte) + for p.err == nil { + if escaped { + switch p.currentByte { + case '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + case '"': + p.currentToken.WriteByte('"') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + } else { + switch p.currentByte { + case '"': + quoted = !quoted + if !quoted { + p.currentByte, p.err = p.buf.ReadByte() + return + } + case '\n': + p.parseError(fmt.Sprintf("metric name %q contains unescaped new-line", p.currentToken.String())) + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidMetricNameContinuation(p.currentByte) { + if !isValidMetricNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == ' ') { return } } @@ -622,13 +715,45 @@ func (p *TextParser) readTokenAsMetricName() { // but not into p.currentToken. func (p *TextParser) readTokenAsLabelName() { p.currentToken.Reset() + // A UTF-8 label name must be quoted and may have escaped characters. + quoted := false + escaped := false if !isValidLabelNameStart(p.currentByte) { return } - for { - p.currentToken.WriteByte(p.currentByte) + for p.err == nil { + if escaped { + switch p.currentByte { + case '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + case '"': + p.currentToken.WriteByte('"') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + } else { + switch p.currentByte { + case '"': + quoted = !quoted + if !quoted { + p.currentByte, p.err = p.buf.ReadByte() + return + } + case '\n': + p.parseError(fmt.Sprintf("label name %q contains unescaped new-line", p.currentToken.String())) + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidLabelNameContinuation(p.currentByte) { + if !isValidLabelNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == '=') { return } } @@ -654,6 +779,7 @@ func (p *TextParser) readTokenAsLabelValue() { p.currentToken.WriteByte('\n') default: p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + p.currentLabelPairs = nil return } escaped = false @@ -712,19 +838,19 @@ func (p *TextParser) setOrCreateCurrentMF() { } func isValidLabelNameStart(b byte) bool { - return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == '"' } -func isValidLabelNameContinuation(b byte) bool { - return isValidLabelNameStart(b) || (b >= '0' && b <= '9') +func isValidLabelNameContinuation(b byte, quoted bool) bool { + return isValidLabelNameStart(b) || (b >= '0' && b <= '9') || (quoted && utf8.ValidString(string(b))) } func isValidMetricNameStart(b byte) bool { return isValidLabelNameStart(b) || b == ':' } -func isValidMetricNameContinuation(b byte) bool { - return isValidLabelNameContinuation(b) || b == ':' +func isValidMetricNameContinuation(b byte, quoted bool) bool { + return isValidLabelNameContinuation(b, quoted) || b == ':' } func isBlankOrTab(b byte) bool { @@ -769,7 +895,7 @@ func histogramMetricName(name string) string { func parseFloat(s string) (float64, error) { if strings.ContainsAny(s, "pP_") { - return 0, fmt.Errorf("unsupported character in float") + return 0, errors.New("unsupported character in float") } return strconv.ParseFloat(s, 64) } diff --git a/tools/vendor/github.com/prometheus/common/model/alert.go b/tools/vendor/github.com/prometheus/common/model/alert.go index 35e739c7a..460f554f2 100644 --- a/tools/vendor/github.com/prometheus/common/model/alert.go +++ b/tools/vendor/github.com/prometheus/common/model/alert.go @@ -14,6 +14,7 @@ package model import ( + "errors" "fmt" "time" ) @@ -64,7 +65,7 @@ func (a *Alert) Resolved() bool { return a.ResolvedAt(time.Now()) } -// ResolvedAt returns true off the activity interval ended before +// ResolvedAt returns true iff the activity interval ended before // the given timestamp. func (a *Alert) ResolvedAt(ts time.Time) bool { if a.EndsAt.IsZero() { @@ -75,7 +76,12 @@ func (a *Alert) ResolvedAt(ts time.Time) bool { // Status returns the status of the alert. func (a *Alert) Status() AlertStatus { - if a.Resolved() { + return a.StatusAt(time.Now()) +} + +// StatusAt returns the status of the alert at the given timestamp. +func (a *Alert) StatusAt(ts time.Time) AlertStatus { + if a.ResolvedAt(ts) { return AlertResolved } return AlertFiring @@ -84,19 +90,19 @@ func (a *Alert) Status() AlertStatus { // Validate checks whether the alert data is inconsistent. func (a *Alert) Validate() error { if a.StartsAt.IsZero() { - return fmt.Errorf("start time missing") + return errors.New("start time missing") } if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) { - return fmt.Errorf("start time must be before end time") + return errors.New("start time must be before end time") } if err := a.Labels.Validate(); err != nil { - return fmt.Errorf("invalid label set: %s", err) + return fmt.Errorf("invalid label set: %w", err) } if len(a.Labels) == 0 { - return fmt.Errorf("at least one label pair required") + return errors.New("at least one label pair required") } if err := a.Annotations.Validate(); err != nil { - return fmt.Errorf("invalid annotations: %s", err) + return fmt.Errorf("invalid annotations: %w", err) } return nil } @@ -127,6 +133,17 @@ func (as Alerts) HasFiring() bool { return false } +// HasFiringAt returns true iff one of the alerts is not resolved +// at the time ts. +func (as Alerts) HasFiringAt(ts time.Time) bool { + for _, a := range as { + if !a.ResolvedAt(ts) { + return true + } + } + return false +} + // Status returns StatusFiring iff at least one of the alerts is firing. func (as Alerts) Status() AlertStatus { if as.HasFiring() { @@ -134,3 +151,12 @@ func (as Alerts) Status() AlertStatus { } return AlertResolved } + +// StatusAt returns StatusFiring iff at least one of the alerts is firing +// at the time ts. +func (as Alerts) StatusAt(ts time.Time) AlertStatus { + if as.HasFiringAt(ts) { + return AlertFiring + } + return AlertResolved +} diff --git a/tools/vendor/github.com/prometheus/common/model/labels.go b/tools/vendor/github.com/prometheus/common/model/labels.go index ef8956335..f4a387605 100644 --- a/tools/vendor/github.com/prometheus/common/model/labels.go +++ b/tools/vendor/github.com/prometheus/common/model/labels.go @@ -22,7 +22,7 @@ import ( ) const ( - // AlertNameLabel is the name of the label containing the an alert's name. + // AlertNameLabel is the name of the label containing the alert's name. AlertNameLabel = "alertname" // ExportedLabelPrefix is the prefix to prepend to the label names present in @@ -97,10 +97,27 @@ var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") // therewith. type LabelName string -// IsValid is true iff the label name matches the pattern of LabelNameRE. This -// method, however, does not use LabelNameRE for the check but a much faster -// hardcoded implementation. +// IsValid returns true iff the name matches the pattern of LabelNameRE when +// NameValidationScheme is set to LegacyValidation, or valid UTF-8 if +// NameValidationScheme is set to UTF8Validation. func (ln LabelName) IsValid() bool { + if len(ln) == 0 { + return false + } + switch NameValidationScheme { + case LegacyValidation: + return ln.IsValidLegacy() + case UTF8Validation: + return utf8.ValidString(string(ln)) + default: + panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) + } +} + +// IsValidLegacy returns true iff name matches the pattern of LabelNameRE for +// legacy names. It does not use LabelNameRE for the check but a much faster +// hardcoded implementation. +func (ln LabelName) IsValidLegacy() bool { if len(ln) == 0 { return false } @@ -164,7 +181,7 @@ func (l LabelNames) String() string { // A LabelValue is an associated value for a LabelName. type LabelValue string -// IsValid returns true iff the string is a valid UTF8. +// IsValid returns true iff the string is a valid UTF-8. func (lv LabelValue) IsValid() bool { return utf8.ValidString(string(lv)) } diff --git a/tools/vendor/github.com/prometheus/common/model/labelset.go b/tools/vendor/github.com/prometheus/common/model/labelset.go index 6eda08a73..d0ad88da3 100644 --- a/tools/vendor/github.com/prometheus/common/model/labelset.go +++ b/tools/vendor/github.com/prometheus/common/model/labelset.go @@ -17,7 +17,6 @@ import ( "encoding/json" "fmt" "sort" - "strings" ) // A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet @@ -129,16 +128,6 @@ func (l LabelSet) Merge(other LabelSet) LabelSet { return result } -func (l LabelSet) String() string { - lstrs := make([]string, 0, len(l)) - for l, v := range l { - lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v)) - } - - sort.Strings(lstrs) - return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) -} - // Fingerprint returns the LabelSet's fingerprint. func (ls LabelSet) Fingerprint() Fingerprint { return labelSetToFingerprint(ls) diff --git a/tools/vendor/github.com/prometheus/common/model/labelset_string.go b/tools/vendor/github.com/prometheus/common/model/labelset_string.go new file mode 100644 index 000000000..abb2c9001 --- /dev/null +++ b/tools/vendor/github.com/prometheus/common/model/labelset_string.go @@ -0,0 +1,43 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "bytes" + "slices" + "strconv" +) + +// String will look like `{foo="bar", more="less"}`. Names are sorted alphabetically. +func (l LabelSet) String() string { + var lna [32]string // On stack to avoid memory allocation for sorting names. + labelNames := lna[:0] + for name := range l { + labelNames = append(labelNames, string(name)) + } + slices.Sort(labelNames) + var bytea [1024]byte // On stack to avoid memory allocation while building the output. + b := bytes.NewBuffer(bytea[:0]) + b.WriteByte('{') + for i, name := range labelNames { + if i > 0 { + b.WriteString(", ") + } + b.WriteString(name) + b.WriteByte('=') + b.Write(strconv.AppendQuote(b.AvailableBuffer(), string(l[LabelName(name)]))) + } + b.WriteByte('}') + return b.String() +} diff --git a/tools/vendor/github.com/prometheus/common/model/metadata.go b/tools/vendor/github.com/prometheus/common/model/metadata.go new file mode 100644 index 000000000..447ab8ad6 --- /dev/null +++ b/tools/vendor/github.com/prometheus/common/model/metadata.go @@ -0,0 +1,28 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +// MetricType represents metric type values. +type MetricType string + +const ( + MetricTypeCounter = MetricType("counter") + MetricTypeGauge = MetricType("gauge") + MetricTypeHistogram = MetricType("histogram") + MetricTypeGaugeHistogram = MetricType("gaugehistogram") + MetricTypeSummary = MetricType("summary") + MetricTypeInfo = MetricType("info") + MetricTypeStateset = MetricType("stateset") + MetricTypeUnknown = MetricType("unknown") +) diff --git a/tools/vendor/github.com/prometheus/common/model/metric.go b/tools/vendor/github.com/prometheus/common/model/metric.go index 00804b7fe..a6b01755b 100644 --- a/tools/vendor/github.com/prometheus/common/model/metric.go +++ b/tools/vendor/github.com/prometheus/common/model/metric.go @@ -14,19 +14,105 @@ package model import ( + "errors" "fmt" "regexp" "sort" + "strconv" "strings" + "unicode/utf8" + + dto "github.com/prometheus/client_model/go" + "google.golang.org/protobuf/proto" ) var ( - // MetricNameRE is a regular expression matching valid metric - // names. Note that the IsValidMetricName function performs the same - // check but faster than a match with this regular expression. - MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`) + // NameValidationScheme determines the global default method of the name + // validation to be used by all calls to IsValidMetricName() and LabelName + // IsValid(). + // + // Deprecated: This variable should not be used and might be removed in the + // far future. If you wish to stick to the legacy name validation use + // `IsValidLegacyMetricName()` and `LabelName.IsValidLegacy()` methods + // instead. This variable is here as an escape hatch for emergency cases, + // given the recent change from `LegacyValidation` to `UTF8Validation`, e.g., + // to delay UTF-8 migrations in time or aid in debugging unforeseen results of + // the change. In such a case, a temporary assignment to `LegacyValidation` + // value in the `init()` function in your main.go or so, could be considered. + // + // Historically we opted for a global variable for feature gating different + // validation schemes in operations that were not otherwise easily adjustable + // (e.g. Labels yaml unmarshaling). That could have been a mistake, a separate + // Labels structure or package might have been a better choice. Given the + // change was made and many upgraded the common already, we live this as-is + // with this warning and learning for the future. + NameValidationScheme = UTF8Validation + + // NameEscapingScheme defines the default way that names will be escaped when + // presented to systems that do not support UTF-8 names. If the Content-Type + // "escaping" term is specified, that will override this value. + // NameEscapingScheme should not be set to the NoEscaping value. That string + // is used in content negotiation to indicate that a system supports UTF-8 and + // has that feature enabled. + NameEscapingScheme = UnderscoreEscaping +) + +// ValidationScheme is a Go enum for determining how metric and label names will +// be validated by this library. +type ValidationScheme int + +const ( + // LegacyValidation is a setting that requires that all metric and label names + // conform to the original Prometheus character requirements described by + // MetricNameRE and LabelNameRE. + LegacyValidation ValidationScheme = iota + + // UTF8Validation only requires that metric and label names be valid UTF-8 + // strings. + UTF8Validation ) +type EscapingScheme int + +const ( + // NoEscaping indicates that a name will not be escaped. Unescaped names that + // do not conform to the legacy validity check will use a new exposition + // format syntax that will be officially standardized in future versions. + NoEscaping EscapingScheme = iota + + // UnderscoreEscaping replaces all legacy-invalid characters with underscores. + UnderscoreEscaping + + // DotsEscaping is similar to UnderscoreEscaping, except that dots are + // converted to `_dot_` and pre-existing underscores are converted to `__`. + DotsEscaping + + // ValueEncodingEscaping prepends the name with `U__` and replaces all invalid + // characters with the unicode value, surrounded by underscores. Single + // underscores are replaced with double underscores. + ValueEncodingEscaping +) + +const ( + // EscapingKey is the key in an Accept or Content-Type header that defines how + // metric and label names that do not conform to the legacy character + // requirements should be escaped when being scraped by a legacy prometheus + // system. If a system does not explicitly pass an escaping parameter in the + // Accept header, the default NameEscapingScheme will be used. + EscapingKey = "escaping" + + // Possible values for Escaping Key: + AllowUTF8 = "allow-utf-8" // No escaping required. + EscapeUnderscores = "underscores" + EscapeDots = "dots" + EscapeValues = "values" +) + +// MetricNameRE is a regular expression matching valid metric +// names. Note that the IsValidMetricName function performs the same +// check but faster than a match with this regular expression. +var MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`) + // A Metric is similar to a LabelSet, but the key difference is that a Metric is // a singleton and refers to one and only one stream of samples. type Metric LabelSet @@ -86,17 +172,294 @@ func (m Metric) FastFingerprint() Fingerprint { return LabelSet(m).FastFingerprint() } -// IsValidMetricName returns true iff name matches the pattern of MetricNameRE. +// IsValidMetricName returns true iff name matches the pattern of MetricNameRE +// for legacy names, and iff it's valid UTF-8 if the UTF8Validation scheme is +// selected. +func IsValidMetricName(n LabelValue) bool { + switch NameValidationScheme { + case LegacyValidation: + return IsValidLegacyMetricName(string(n)) + case UTF8Validation: + if len(n) == 0 { + return false + } + return utf8.ValidString(string(n)) + default: + panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) + } +} + +// IsValidLegacyMetricName is similar to IsValidMetricName but always uses the +// legacy validation scheme regardless of the value of NameValidationScheme. // This function, however, does not use MetricNameRE for the check but a much // faster hardcoded implementation. -func IsValidMetricName(n LabelValue) bool { +func IsValidLegacyMetricName(n string) bool { if len(n) == 0 { return false } for i, b := range n { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) { + if !isValidLegacyRune(b, i) { return false } } return true } + +// EscapeMetricFamily escapes the given metric names and labels with the given +// escaping scheme. Returns a new object that uses the same pointers to fields +// when possible and creates new escaped versions so as not to mutate the +// input. +func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricFamily { + if v == nil { + return nil + } + + if scheme == NoEscaping { + return v + } + + out := &dto.MetricFamily{ + Help: v.Help, + Type: v.Type, + Unit: v.Unit, + } + + // If the name is nil, copy as-is, don't try to escape. + if v.Name == nil || IsValidLegacyMetricName(v.GetName()) { + out.Name = v.Name + } else { + out.Name = proto.String(EscapeName(v.GetName(), scheme)) + } + for _, m := range v.Metric { + if !metricNeedsEscaping(m) { + out.Metric = append(out.Metric, m) + continue + } + + escaped := &dto.Metric{ + Gauge: m.Gauge, + Counter: m.Counter, + Summary: m.Summary, + Untyped: m.Untyped, + Histogram: m.Histogram, + TimestampMs: m.TimestampMs, + } + + for _, l := range m.Label { + if l.GetName() == MetricNameLabel { + if l.Value == nil || IsValidLegacyMetricName(l.GetValue()) { + escaped.Label = append(escaped.Label, l) + continue + } + escaped.Label = append(escaped.Label, &dto.LabelPair{ + Name: proto.String(MetricNameLabel), + Value: proto.String(EscapeName(l.GetValue(), scheme)), + }) + continue + } + if l.Name == nil || IsValidLegacyMetricName(l.GetName()) { + escaped.Label = append(escaped.Label, l) + continue + } + escaped.Label = append(escaped.Label, &dto.LabelPair{ + Name: proto.String(EscapeName(l.GetName(), scheme)), + Value: l.Value, + }) + } + out.Metric = append(out.Metric, escaped) + } + return out +} + +func metricNeedsEscaping(m *dto.Metric) bool { + for _, l := range m.Label { + if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(l.GetValue()) { + return true + } + if !IsValidLegacyMetricName(l.GetName()) { + return true + } + } + return false +} + +// EscapeName escapes the incoming name according to the provided escaping +// scheme. Depending on the rules of escaping, this may cause no change in the +// string that is returned. (Especially NoEscaping, which by definition is a +// noop). This function does not do any validation of the name. +func EscapeName(name string, scheme EscapingScheme) string { + if len(name) == 0 { + return name + } + var escaped strings.Builder + switch scheme { + case NoEscaping: + return name + case UnderscoreEscaping: + if IsValidLegacyMetricName(name) { + return name + } + for i, b := range name { + if isValidLegacyRune(b, i) { + escaped.WriteRune(b) + } else { + escaped.WriteRune('_') + } + } + return escaped.String() + case DotsEscaping: + // Do not early return for legacy valid names, we still escape underscores. + for i, b := range name { + if b == '_' { + escaped.WriteString("__") + } else if b == '.' { + escaped.WriteString("_dot_") + } else if isValidLegacyRune(b, i) { + escaped.WriteRune(b) + } else { + escaped.WriteString("__") + } + } + return escaped.String() + case ValueEncodingEscaping: + if IsValidLegacyMetricName(name) { + return name + } + escaped.WriteString("U__") + for i, b := range name { + if b == '_' { + escaped.WriteString("__") + } else if isValidLegacyRune(b, i) { + escaped.WriteRune(b) + } else if !utf8.ValidRune(b) { + escaped.WriteString("_FFFD_") + } else { + escaped.WriteRune('_') + escaped.WriteString(strconv.FormatInt(int64(b), 16)) + escaped.WriteRune('_') + } + } + return escaped.String() + default: + panic(fmt.Sprintf("invalid escaping scheme %d", scheme)) + } +} + +// lower function taken from strconv.atoi +func lower(c byte) byte { + return c | ('x' - 'X') +} + +// UnescapeName unescapes the incoming name according to the provided escaping +// scheme if possible. Some schemes are partially or totally non-roundtripable. +// If any error is enountered, returns the original input. +func UnescapeName(name string, scheme EscapingScheme) string { + if len(name) == 0 { + return name + } + switch scheme { + case NoEscaping: + return name + case UnderscoreEscaping: + // It is not possible to unescape from underscore replacement. + return name + case DotsEscaping: + name = strings.ReplaceAll(name, "_dot_", ".") + name = strings.ReplaceAll(name, "__", "_") + return name + case ValueEncodingEscaping: + escapedName, found := strings.CutPrefix(name, "U__") + if !found { + return name + } + + var unescaped strings.Builder + TOP: + for i := 0; i < len(escapedName); i++ { + // All non-underscores are treated normally. + if escapedName[i] != '_' { + unescaped.WriteByte(escapedName[i]) + continue + } + i++ + if i >= len(escapedName) { + return name + } + // A double underscore is a single underscore. + if escapedName[i] == '_' { + unescaped.WriteByte('_') + continue + } + // We think we are in a UTF-8 code, process it. + var utf8Val uint + for j := 0; i < len(escapedName); j++ { + // This is too many characters for a utf8 value based on the MaxRune + // value of '\U0010FFFF'. + if j >= 6 { + return name + } + // Found a closing underscore, convert to a rune, check validity, and append. + if escapedName[i] == '_' { + utf8Rune := rune(utf8Val) + if !utf8.ValidRune(utf8Rune) { + return name + } + unescaped.WriteRune(utf8Rune) + continue TOP + } + r := lower(escapedName[i]) + utf8Val *= 16 + if r >= '0' && r <= '9' { + utf8Val += uint(r) - '0' + } else if r >= 'a' && r <= 'f' { + utf8Val += uint(r) - 'a' + 10 + } else { + return name + } + i++ + } + // Didn't find closing underscore, invalid. + return name + } + return unescaped.String() + default: + panic(fmt.Sprintf("invalid escaping scheme %d", scheme)) + } +} + +func isValidLegacyRune(b rune, i int) bool { + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0) +} + +func (e EscapingScheme) String() string { + switch e { + case NoEscaping: + return AllowUTF8 + case UnderscoreEscaping: + return EscapeUnderscores + case DotsEscaping: + return EscapeDots + case ValueEncodingEscaping: + return EscapeValues + default: + panic(fmt.Sprintf("unknown format scheme %d", e)) + } +} + +func ToEscapingScheme(s string) (EscapingScheme, error) { + if s == "" { + return NoEscaping, errors.New("got empty string instead of escaping scheme") + } + switch s { + case AllowUTF8: + return NoEscaping, nil + case EscapeUnderscores: + return UnderscoreEscaping, nil + case EscapeDots: + return DotsEscaping, nil + case EscapeValues: + return ValueEncodingEscaping, nil + default: + return NoEscaping, fmt.Errorf("unknown format scheme %s", s) + } +} diff --git a/tools/vendor/github.com/prometheus/common/model/signature.go b/tools/vendor/github.com/prometheus/common/model/signature.go index 8762b13c6..dc8a0026c 100644 --- a/tools/vendor/github.com/prometheus/common/model/signature.go +++ b/tools/vendor/github.com/prometheus/common/model/signature.go @@ -22,10 +22,8 @@ import ( // when calculating their combined hash value (aka signature aka fingerprint). const SeparatorByte byte = 255 -var ( - // cache the signature of an empty label set. - emptyLabelSignature = hashNew() -) +// cache the signature of an empty label set. +var emptyLabelSignature = hashNew() // LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a // given label set. (Collisions are possible but unlikely if the number of label diff --git a/tools/vendor/github.com/prometheus/common/model/silence.go b/tools/vendor/github.com/prometheus/common/model/silence.go index bb99889d2..8f91a9702 100644 --- a/tools/vendor/github.com/prometheus/common/model/silence.go +++ b/tools/vendor/github.com/prometheus/common/model/silence.go @@ -15,6 +15,7 @@ package model import ( "encoding/json" + "errors" "fmt" "regexp" "time" @@ -34,7 +35,7 @@ func (m *Matcher) UnmarshalJSON(b []byte) error { } if len(m.Name) == 0 { - return fmt.Errorf("label name in matcher must not be empty") + return errors.New("label name in matcher must not be empty") } if m.IsRegex { if _, err := regexp.Compile(m.Value); err != nil { @@ -77,30 +78,30 @@ type Silence struct { // Validate returns true iff all fields of the silence have valid values. func (s *Silence) Validate() error { if len(s.Matchers) == 0 { - return fmt.Errorf("at least one matcher required") + return errors.New("at least one matcher required") } for _, m := range s.Matchers { if err := m.Validate(); err != nil { - return fmt.Errorf("invalid matcher: %s", err) + return fmt.Errorf("invalid matcher: %w", err) } } if s.StartsAt.IsZero() { - return fmt.Errorf("start time missing") + return errors.New("start time missing") } if s.EndsAt.IsZero() { - return fmt.Errorf("end time missing") + return errors.New("end time missing") } if s.EndsAt.Before(s.StartsAt) { - return fmt.Errorf("start time must be before end time") + return errors.New("start time must be before end time") } if s.CreatedBy == "" { - return fmt.Errorf("creator information missing") + return errors.New("creator information missing") } if s.Comment == "" { - return fmt.Errorf("comment missing") + return errors.New("comment missing") } if s.CreatedAt.IsZero() { - return fmt.Errorf("creation timestamp missing") + return errors.New("creation timestamp missing") } return nil } diff --git a/tools/vendor/github.com/prometheus/common/model/time.go b/tools/vendor/github.com/prometheus/common/model/time.go index 7f67b16e4..5727452c1 100644 --- a/tools/vendor/github.com/prometheus/common/model/time.go +++ b/tools/vendor/github.com/prometheus/common/model/time.go @@ -18,7 +18,6 @@ import ( "errors" "fmt" "math" - "regexp" "strconv" "strings" "time" @@ -183,54 +182,78 @@ func (d *Duration) Type() string { return "duration" } -var durationRE = regexp.MustCompile("^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$") +func isdigit(c byte) bool { return c >= '0' && c <= '9' } + +// Units are required to go in order from biggest to smallest. +// This guards against confusion from "1m1d" being 1 minute + 1 day, not 1 month + 1 day. +var unitMap = map[string]struct { + pos int + mult uint64 +}{ + "ms": {7, uint64(time.Millisecond)}, + "s": {6, uint64(time.Second)}, + "m": {5, uint64(time.Minute)}, + "h": {4, uint64(time.Hour)}, + "d": {3, uint64(24 * time.Hour)}, + "w": {2, uint64(7 * 24 * time.Hour)}, + "y": {1, uint64(365 * 24 * time.Hour)}, +} // ParseDuration parses a string into a time.Duration, assuming that a year // always has 365d, a week always has 7d, and a day always has 24h. -func ParseDuration(durationStr string) (Duration, error) { - switch durationStr { +func ParseDuration(s string) (Duration, error) { + switch s { case "0": // Allow 0 without a unit. return 0, nil case "": - return 0, fmt.Errorf("empty duration string") - } - matches := durationRE.FindStringSubmatch(durationStr) - if matches == nil { - return 0, fmt.Errorf("not a valid duration string: %q", durationStr) + return 0, errors.New("empty duration string") } - var dur time.Duration - // Parse the match at pos `pos` in the regex and use `mult` to turn that - // into ms, then add that value to the total parsed duration. - var overflowErr error - m := func(pos int, mult time.Duration) { - if matches[pos] == "" { - return + orig := s + var dur uint64 + lastUnitPos := 0 + + for s != "" { + if !isdigit(s[0]) { + return 0, fmt.Errorf("not a valid duration string: %q", orig) + } + // Consume [0-9]* + i := 0 + for ; i < len(s) && isdigit(s[i]); i++ { + } + v, err := strconv.ParseUint(s[:i], 10, 0) + if err != nil { + return 0, fmt.Errorf("not a valid duration string: %q", orig) } - n, _ := strconv.Atoi(matches[pos]) + s = s[i:] + // Consume unit. + for i = 0; i < len(s) && !isdigit(s[i]); i++ { + } + if i == 0 { + return 0, fmt.Errorf("not a valid duration string: %q", orig) + } + u := s[:i] + s = s[i:] + unit, ok := unitMap[u] + if !ok { + return 0, fmt.Errorf("unknown unit %q in duration %q", u, orig) + } + if unit.pos <= lastUnitPos { // Units must go in order from biggest to smallest. + return 0, fmt.Errorf("not a valid duration string: %q", orig) + } + lastUnitPos = unit.pos // Check if the provided duration overflows time.Duration (> ~ 290years). - if n > int((1<<63-1)/mult/time.Millisecond) { - overflowErr = errors.New("duration out of range") + if v > 1<<63/unit.mult { + return 0, errors.New("duration out of range") } - d := time.Duration(n) * time.Millisecond - dur += d * mult - - if dur < 0 { - overflowErr = errors.New("duration out of range") + dur += v * unit.mult + if dur > 1<<63-1 { + return 0, errors.New("duration out of range") } } - - m(2, 1000*60*60*24*365) // y - m(4, 1000*60*60*24*7) // w - m(6, 1000*60*60*24) // d - m(8, 1000*60*60) // h - m(10, 1000*60) // m - m(12, 1000) // s - m(14, 1) // ms - - return Duration(dur), overflowErr + return Duration(dur), nil } func (d Duration) String() string { diff --git a/tools/vendor/github.com/prometheus/common/model/value.go b/tools/vendor/github.com/prometheus/common/model/value.go index c9d8fb1a2..8050637d8 100644 --- a/tools/vendor/github.com/prometheus/common/model/value.go +++ b/tools/vendor/github.com/prometheus/common/model/value.go @@ -16,104 +16,26 @@ package model import ( "encoding/json" "fmt" - "math" "sort" "strconv" "strings" ) -var ( - // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a - // non-existing sample pair. It is a SamplePair with timestamp Earliest and - // value 0.0. Note that the natural zero value of SamplePair has a timestamp - // of 0, which is possible to appear in a real SamplePair and thus not - // suitable to signal a non-existing SamplePair. - ZeroSamplePair = SamplePair{Timestamp: Earliest} - - // ZeroSample is the pseudo zero-value of Sample used to signal a - // non-existing sample. It is a Sample with timestamp Earliest, value 0.0, - // and metric nil. Note that the natural zero value of Sample has a timestamp - // of 0, which is possible to appear in a real Sample and thus not suitable - // to signal a non-existing Sample. - ZeroSample = Sample{Timestamp: Earliest} -) - -// A SampleValue is a representation of a value for a given sample at a given -// time. -type SampleValue float64 - -// MarshalJSON implements json.Marshaler. -func (v SampleValue) MarshalJSON() ([]byte, error) { - return json.Marshal(v.String()) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (v *SampleValue) UnmarshalJSON(b []byte) error { - if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { - return fmt.Errorf("sample value must be a quoted string") - } - f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) - if err != nil { - return err - } - *v = SampleValue(f) - return nil -} - -// Equal returns true if the value of v and o is equal or if both are NaN. Note -// that v==o is false if both are NaN. If you want the conventional float -// behavior, use == to compare two SampleValues. -func (v SampleValue) Equal(o SampleValue) bool { - if v == o { - return true - } - return math.IsNaN(float64(v)) && math.IsNaN(float64(o)) -} - -func (v SampleValue) String() string { - return strconv.FormatFloat(float64(v), 'f', -1, 64) -} - -// SamplePair pairs a SampleValue with a Timestamp. -type SamplePair struct { - Timestamp Time - Value SampleValue -} - -// MarshalJSON implements json.Marshaler. -func (s SamplePair) MarshalJSON() ([]byte, error) { - t, err := json.Marshal(s.Timestamp) - if err != nil { - return nil, err - } - v, err := json.Marshal(s.Value) - if err != nil { - return nil, err - } - return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *SamplePair) UnmarshalJSON(b []byte) error { - v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} - return json.Unmarshal(b, &v) -} - -// Equal returns true if this SamplePair and o have equal Values and equal -// Timestamps. The semantics of Value equality is defined by SampleValue.Equal. -func (s *SamplePair) Equal(o *SamplePair) bool { - return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) -} - -func (s SamplePair) String() string { - return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) -} +// ZeroSample is the pseudo zero-value of Sample used to signal a +// non-existing sample. It is a Sample with timestamp Earliest, value 0.0, +// and metric nil. Note that the natural zero value of Sample has a timestamp +// of 0, which is possible to appear in a real Sample and thus not suitable +// to signal a non-existing Sample. +var ZeroSample = Sample{Timestamp: Earliest} -// Sample is a sample pair associated with a metric. +// Sample is a sample pair associated with a metric. A single sample must either +// define Value or Histogram but not both. Histogram == nil implies the Value +// field is used, otherwise it should be ignored. type Sample struct { - Metric Metric `json:"metric"` - Value SampleValue `json:"value"` - Timestamp Time `json:"timestamp"` + Metric Metric `json:"metric"` + Value SampleValue `json:"value"` + Timestamp Time `json:"timestamp"` + Histogram *SampleHistogram `json:"histogram"` } // Equal compares first the metrics, then the timestamp, then the value. The @@ -129,11 +51,19 @@ func (s *Sample) Equal(o *Sample) bool { if !s.Timestamp.Equal(o.Timestamp) { return false } - + if s.Histogram != nil { + return s.Histogram.Equal(o.Histogram) + } return s.Value.Equal(o.Value) } func (s Sample) String() string { + if s.Histogram != nil { + return fmt.Sprintf("%s => %s", s.Metric, SampleHistogramPair{ + Timestamp: s.Timestamp, + Histogram: s.Histogram, + }) + } return fmt.Sprintf("%s => %s", s.Metric, SamplePair{ Timestamp: s.Timestamp, Value: s.Value, @@ -142,6 +72,19 @@ func (s Sample) String() string { // MarshalJSON implements json.Marshaler. func (s Sample) MarshalJSON() ([]byte, error) { + if s.Histogram != nil { + v := struct { + Metric Metric `json:"metric"` + Histogram SampleHistogramPair `json:"histogram"` + }{ + Metric: s.Metric, + Histogram: SampleHistogramPair{ + Timestamp: s.Timestamp, + Histogram: s.Histogram, + }, + } + return json.Marshal(&v) + } v := struct { Metric Metric `json:"metric"` Value SamplePair `json:"value"` @@ -152,21 +95,25 @@ func (s Sample) MarshalJSON() ([]byte, error) { Value: s.Value, }, } - return json.Marshal(&v) } // UnmarshalJSON implements json.Unmarshaler. func (s *Sample) UnmarshalJSON(b []byte) error { v := struct { - Metric Metric `json:"metric"` - Value SamplePair `json:"value"` + Metric Metric `json:"metric"` + Value SamplePair `json:"value"` + Histogram SampleHistogramPair `json:"histogram"` }{ Metric: s.Metric, Value: SamplePair{ Timestamp: s.Timestamp, Value: s.Value, }, + Histogram: SampleHistogramPair{ + Timestamp: s.Timestamp, + Histogram: s.Histogram, + }, } if err := json.Unmarshal(b, &v); err != nil { @@ -174,8 +121,13 @@ func (s *Sample) UnmarshalJSON(b []byte) error { } s.Metric = v.Metric - s.Timestamp = v.Value.Timestamp - s.Value = v.Value.Value + if v.Histogram.Histogram != nil { + s.Timestamp = v.Histogram.Timestamp + s.Histogram = v.Histogram.Histogram + } else { + s.Timestamp = v.Value.Timestamp + s.Value = v.Value.Value + } return nil } @@ -221,80 +173,76 @@ func (s Samples) Equal(o Samples) bool { // SampleStream is a stream of Values belonging to an attached COWMetric. type SampleStream struct { - Metric Metric `json:"metric"` - Values []SamplePair `json:"values"` + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` + Histograms []SampleHistogramPair `json:"histograms"` } func (ss SampleStream) String() string { - vals := make([]string, len(ss.Values)) + valuesLength := len(ss.Values) + vals := make([]string, valuesLength+len(ss.Histograms)) for i, v := range ss.Values { vals[i] = v.String() } + for i, v := range ss.Histograms { + vals[i+valuesLength] = v.String() + } return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n")) } -// Value is a generic interface for values resulting from a query evaluation. -type Value interface { - Type() ValueType - String() string +func (ss SampleStream) MarshalJSON() ([]byte, error) { + if len(ss.Histograms) > 0 && len(ss.Values) > 0 { + v := struct { + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` + Histograms []SampleHistogramPair `json:"histograms"` + }{ + Metric: ss.Metric, + Values: ss.Values, + Histograms: ss.Histograms, + } + return json.Marshal(&v) + } else if len(ss.Histograms) > 0 { + v := struct { + Metric Metric `json:"metric"` + Histograms []SampleHistogramPair `json:"histograms"` + }{ + Metric: ss.Metric, + Histograms: ss.Histograms, + } + return json.Marshal(&v) + } else { + v := struct { + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` + }{ + Metric: ss.Metric, + Values: ss.Values, + } + return json.Marshal(&v) + } } -func (Matrix) Type() ValueType { return ValMatrix } -func (Vector) Type() ValueType { return ValVector } -func (*Scalar) Type() ValueType { return ValScalar } -func (*String) Type() ValueType { return ValString } - -type ValueType int - -const ( - ValNone ValueType = iota - ValScalar - ValVector - ValMatrix - ValString -) - -// MarshalJSON implements json.Marshaler. -func (et ValueType) MarshalJSON() ([]byte, error) { - return json.Marshal(et.String()) -} +func (ss *SampleStream) UnmarshalJSON(b []byte) error { + v := struct { + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` + Histograms []SampleHistogramPair `json:"histograms"` + }{ + Metric: ss.Metric, + Values: ss.Values, + Histograms: ss.Histograms, + } -func (et *ValueType) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { + if err := json.Unmarshal(b, &v); err != nil { return err } - switch s { - case "": - *et = ValNone - case "scalar": - *et = ValScalar - case "vector": - *et = ValVector - case "matrix": - *et = ValMatrix - case "string": - *et = ValString - default: - return fmt.Errorf("unknown value type %q", s) - } - return nil -} -func (e ValueType) String() string { - switch e { - case ValNone: - return "" - case ValScalar: - return "scalar" - case ValVector: - return "vector" - case ValMatrix: - return "matrix" - case ValString: - return "string" - } - panic("ValueType.String: unhandled value type") + ss.Metric = v.Metric + ss.Values = v.Values + ss.Histograms = v.Histograms + + return nil } // Scalar is a scalar value evaluated at the set timestamp. @@ -324,7 +272,7 @@ func (s *Scalar) UnmarshalJSON(b []byte) error { value, err := strconv.ParseFloat(f, 64) if err != nil { - return fmt.Errorf("error parsing sample value: %s", err) + return fmt.Errorf("error parsing sample value: %w", err) } s.Value = SampleValue(value) return nil diff --git a/tools/vendor/github.com/prometheus/common/model/value_float.go b/tools/vendor/github.com/prometheus/common/model/value_float.go new file mode 100644 index 000000000..6bfc757d1 --- /dev/null +++ b/tools/vendor/github.com/prometheus/common/model/value_float.go @@ -0,0 +1,99 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "strconv" +) + +// ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a +// non-existing sample pair. It is a SamplePair with timestamp Earliest and +// value 0.0. Note that the natural zero value of SamplePair has a timestamp +// of 0, which is possible to appear in a real SamplePair and thus not +// suitable to signal a non-existing SamplePair. +var ZeroSamplePair = SamplePair{Timestamp: Earliest} + +// A SampleValue is a representation of a value for a given sample at a given +// time. +type SampleValue float64 + +// MarshalJSON implements json.Marshaler. +func (v SampleValue) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (v *SampleValue) UnmarshalJSON(b []byte) error { + if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { + return errors.New("sample value must be a quoted string") + } + f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) + if err != nil { + return err + } + *v = SampleValue(f) + return nil +} + +// Equal returns true if the value of v and o is equal or if both are NaN. Note +// that v==o is false if both are NaN. If you want the conventional float +// behavior, use == to compare two SampleValues. +func (v SampleValue) Equal(o SampleValue) bool { + if v == o { + return true + } + return math.IsNaN(float64(v)) && math.IsNaN(float64(o)) +} + +func (v SampleValue) String() string { + return strconv.FormatFloat(float64(v), 'f', -1, 64) +} + +// SamplePair pairs a SampleValue with a Timestamp. +type SamplePair struct { + Timestamp Time + Value SampleValue +} + +func (s SamplePair) MarshalJSON() ([]byte, error) { + t, err := json.Marshal(s.Timestamp) + if err != nil { + return nil, err + } + v, err := json.Marshal(s.Value) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *SamplePair) UnmarshalJSON(b []byte) error { + v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} + return json.Unmarshal(b, &v) +} + +// Equal returns true if this SamplePair and o have equal Values and equal +// Timestamps. The semantics of Value equality is defined by SampleValue.Equal. +func (s *SamplePair) Equal(o *SamplePair) bool { + return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) +} + +func (s SamplePair) String() string { + return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) +} diff --git a/tools/vendor/github.com/prometheus/common/model/value_histogram.go b/tools/vendor/github.com/prometheus/common/model/value_histogram.go new file mode 100644 index 000000000..895e6a3e8 --- /dev/null +++ b/tools/vendor/github.com/prometheus/common/model/value_histogram.go @@ -0,0 +1,179 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "errors" + "fmt" + "strconv" + "strings" +) + +type FloatString float64 + +func (v FloatString) String() string { + return strconv.FormatFloat(float64(v), 'f', -1, 64) +} + +func (v FloatString) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +func (v *FloatString) UnmarshalJSON(b []byte) error { + if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { + return errors.New("float value must be a quoted string") + } + f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) + if err != nil { + return err + } + *v = FloatString(f) + return nil +} + +type HistogramBucket struct { + Boundaries int32 + Lower FloatString + Upper FloatString + Count FloatString +} + +func (s HistogramBucket) MarshalJSON() ([]byte, error) { + b, err := json.Marshal(s.Boundaries) + if err != nil { + return nil, err + } + l, err := json.Marshal(s.Lower) + if err != nil { + return nil, err + } + u, err := json.Marshal(s.Upper) + if err != nil { + return nil, err + } + c, err := json.Marshal(s.Count) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("[%s,%s,%s,%s]", b, l, u, c)), nil +} + +func (s *HistogramBucket) UnmarshalJSON(buf []byte) error { + tmp := []interface{}{&s.Boundaries, &s.Lower, &s.Upper, &s.Count} + wantLen := len(tmp) + if err := json.Unmarshal(buf, &tmp); err != nil { + return err + } + if gotLen := len(tmp); gotLen != wantLen { + return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen) + } + return nil +} + +func (s *HistogramBucket) Equal(o *HistogramBucket) bool { + return s == o || (s.Boundaries == o.Boundaries && s.Lower == o.Lower && s.Upper == o.Upper && s.Count == o.Count) +} + +func (b HistogramBucket) String() string { + var sb strings.Builder + lowerInclusive := b.Boundaries == 1 || b.Boundaries == 3 + upperInclusive := b.Boundaries == 0 || b.Boundaries == 3 + if lowerInclusive { + sb.WriteRune('[') + } else { + sb.WriteRune('(') + } + fmt.Fprintf(&sb, "%g,%g", b.Lower, b.Upper) + if upperInclusive { + sb.WriteRune(']') + } else { + sb.WriteRune(')') + } + fmt.Fprintf(&sb, ":%v", b.Count) + return sb.String() +} + +type HistogramBuckets []*HistogramBucket + +func (s HistogramBuckets) Equal(o HistogramBuckets) bool { + if len(s) != len(o) { + return false + } + + for i, bucket := range s { + if !bucket.Equal(o[i]) { + return false + } + } + return true +} + +type SampleHistogram struct { + Count FloatString `json:"count"` + Sum FloatString `json:"sum"` + Buckets HistogramBuckets `json:"buckets"` +} + +func (s SampleHistogram) String() string { + return fmt.Sprintf("Count: %f, Sum: %f, Buckets: %v", s.Count, s.Sum, s.Buckets) +} + +func (s *SampleHistogram) Equal(o *SampleHistogram) bool { + return s == o || (s.Count == o.Count && s.Sum == o.Sum && s.Buckets.Equal(o.Buckets)) +} + +type SampleHistogramPair struct { + Timestamp Time + // Histogram should never be nil, it's only stored as pointer for efficiency. + Histogram *SampleHistogram +} + +func (s SampleHistogramPair) MarshalJSON() ([]byte, error) { + if s.Histogram == nil { + return nil, errors.New("histogram is nil") + } + t, err := json.Marshal(s.Timestamp) + if err != nil { + return nil, err + } + v, err := json.Marshal(s.Histogram) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil +} + +func (s *SampleHistogramPair) UnmarshalJSON(buf []byte) error { + tmp := []interface{}{&s.Timestamp, &s.Histogram} + wantLen := len(tmp) + if err := json.Unmarshal(buf, &tmp); err != nil { + return err + } + if gotLen := len(tmp); gotLen != wantLen { + return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen) + } + if s.Histogram == nil { + return errors.New("histogram is null") + } + return nil +} + +func (s SampleHistogramPair) String() string { + return fmt.Sprintf("%s @[%s]", s.Histogram, s.Timestamp) +} + +func (s *SampleHistogramPair) Equal(o *SampleHistogramPair) bool { + return s == o || (s.Histogram.Equal(o.Histogram) && s.Timestamp.Equal(o.Timestamp)) +} diff --git a/tools/vendor/github.com/prometheus/common/model/value_type.go b/tools/vendor/github.com/prometheus/common/model/value_type.go new file mode 100644 index 000000000..726c50ee6 --- /dev/null +++ b/tools/vendor/github.com/prometheus/common/model/value_type.go @@ -0,0 +1,83 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" +) + +// Value is a generic interface for values resulting from a query evaluation. +type Value interface { + Type() ValueType + String() string +} + +func (Matrix) Type() ValueType { return ValMatrix } +func (Vector) Type() ValueType { return ValVector } +func (*Scalar) Type() ValueType { return ValScalar } +func (*String) Type() ValueType { return ValString } + +type ValueType int + +const ( + ValNone ValueType = iota + ValScalar + ValVector + ValMatrix + ValString +) + +// MarshalJSON implements json.Marshaler. +func (et ValueType) MarshalJSON() ([]byte, error) { + return json.Marshal(et.String()) +} + +func (et *ValueType) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + switch s { + case "": + *et = ValNone + case "scalar": + *et = ValScalar + case "vector": + *et = ValVector + case "matrix": + *et = ValMatrix + case "string": + *et = ValString + default: + return fmt.Errorf("unknown value type %q", s) + } + return nil +} + +func (e ValueType) String() string { + switch e { + case ValNone: + return "" + case ValScalar: + return "scalar" + case ValVector: + return "vector" + case ValMatrix: + return "matrix" + case ValString: + return "string" + } + panic("ValueType.String: unhandled value type") +} diff --git a/tools/vendor/github.com/prometheus/procfs/.gitignore b/tools/vendor/github.com/prometheus/procfs/.gitignore index 25e3659ab..7cc33ae4a 100644 --- a/tools/vendor/github.com/prometheus/procfs/.gitignore +++ b/tools/vendor/github.com/prometheus/procfs/.gitignore @@ -1 +1,2 @@ -/fixtures/ +/testdata/fixtures/ +/fixtures diff --git a/tools/vendor/github.com/prometheus/procfs/.golangci.yml b/tools/vendor/github.com/prometheus/procfs/.golangci.yml index 0aa09edac..b43e09f68 100644 --- a/tools/vendor/github.com/prometheus/procfs/.golangci.yml +++ b/tools/vendor/github.com/prometheus/procfs/.golangci.yml @@ -1,4 +1,31 @@ --- linters: enable: - - golint + - errcheck + - forbidigo + - godot + - gofmt + - goimports + - gosimple + - govet + - ineffassign + - misspell + - revive + - staticcheck + - testifylint + - unused + +linters-settings: + forbidigo: + forbid: + - p: ^fmt\.Print.*$ + msg: Do not commit print statements. + godot: + capital: true + exclude: + # Ignore "See: URL" + - 'See:' + goimports: + local-prefixes: github.com/prometheus/procfs + misspell: + locale: US diff --git a/tools/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md b/tools/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md index 9a1aff412..d325872bd 100644 --- a/tools/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md +++ b/tools/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md @@ -1,3 +1,3 @@ -## Prometheus Community Code of Conduct +# Prometheus Community Code of Conduct -Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). +Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md). diff --git a/tools/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/tools/vendor/github.com/prometheus/procfs/CONTRIBUTING.md index 943de7615..853eb9d49 100644 --- a/tools/vendor/github.com/prometheus/procfs/CONTRIBUTING.md +++ b/tools/vendor/github.com/prometheus/procfs/CONTRIBUTING.md @@ -97,7 +97,7 @@ Many of the files are changing continuously and the data being read can in some reads in the same file. Also, most of the files are relatively small (less than a few KBs), and system calls to the `stat` function will often return the wrong size. Therefore, for most files it's recommended to read the full file in a single operation using an internal utility function called `util.ReadFileNoStat`. -This function is similar to `ioutil.ReadFile`, but it avoids the system call to `stat` to get the current size of +This function is similar to `os.ReadFile`, but it avoids the system call to `stat` to get the current size of the file. Note that parsing the file's contents can still be performed one line at a time. This is done by first reading @@ -113,7 +113,7 @@ the full file, and then using a scanner on the `[]byte` or `string` containing t ``` The `/sys` filesystem contains many very small files which contain only a single numeric or text value. These files -can be read using an internal function called `util.SysReadFile` which is similar to `ioutil.ReadFile` but does +can be read using an internal function called `util.SysReadFile` which is similar to `os.ReadFile` but does not bother to check the size of the file before reading. ``` data, err := util.SysReadFile("/sys/class/power_supply/BAT0/capacity") diff --git a/tools/vendor/github.com/prometheus/procfs/MAINTAINERS.md b/tools/vendor/github.com/prometheus/procfs/MAINTAINERS.md index 56ba67d3e..e00f3b365 100644 --- a/tools/vendor/github.com/prometheus/procfs/MAINTAINERS.md +++ b/tools/vendor/github.com/prometheus/procfs/MAINTAINERS.md @@ -1,2 +1,3 @@ * Johannes 'fish' Ziemke @discordianfish -* Paul Gier @pgier +* Paul Gier @pgier +* Ben Kochie @SuperQ diff --git a/tools/vendor/github.com/prometheus/procfs/Makefile b/tools/vendor/github.com/prometheus/procfs/Makefile index fa2bd5b52..7edfe4d09 100644 --- a/tools/vendor/github.com/prometheus/procfs/Makefile +++ b/tools/vendor/github.com/prometheus/procfs/Makefile @@ -14,18 +14,18 @@ include Makefile.common %/.unpacked: %.ttar - @echo ">> extracting fixtures" + @echo ">> extracting fixtures $*" ./ttar -C $(dir $*) -x -f $*.ttar touch $@ -fixtures: fixtures/.unpacked +fixtures: testdata/fixtures/.unpacked update_fixtures: - rm -vf fixtures/.unpacked - ./ttar -c -f fixtures.ttar fixtures/ + rm -vf testdata/fixtures/.unpacked + ./ttar -c -f testdata/fixtures.ttar -C testdata/ fixtures/ .PHONY: build build: .PHONY: test -test: fixtures/.unpacked common-test +test: testdata/fixtures/.unpacked common-test diff --git a/tools/vendor/github.com/prometheus/procfs/Makefile.common b/tools/vendor/github.com/prometheus/procfs/Makefile.common index a1b1ca40f..cbb5d8638 100644 --- a/tools/vendor/github.com/prometheus/procfs/Makefile.common +++ b/tools/vendor/github.com/prometheus/procfs/Makefile.common @@ -36,29 +36,6 @@ GO_VERSION ?= $(shell $(GO) version) GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') -GOVENDOR := -GO111MODULE := -ifeq (, $(PRE_GO_111)) - ifneq (,$(wildcard go.mod)) - # Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI). - GO111MODULE := on - - ifneq (,$(wildcard vendor)) - # Always use the local vendor/ directory to satisfy the dependencies. - GOOPTS := $(GOOPTS) -mod=vendor - endif - endif -else - ifneq (,$(wildcard go.mod)) - ifneq (,$(wildcard vendor)) -$(warning This repository requires Go >= 1.11 because of Go modules) -$(warning Some recipes may not work as expected as the current Go runtime is '$(GO_VERSION_NUMBER)') - endif - else - # This repository isn't using Go modules (yet). - GOVENDOR := $(FIRST_GOPATH)/bin/govendor - endif -endif PROMU := $(FIRST_GOPATH)/bin/promu pkgs = ./... @@ -72,23 +49,32 @@ endif GOTEST := $(GO) test GOTEST_DIR := ifneq ($(CIRCLE_JOB),) -ifneq ($(shell which gotestsum),) +ifneq ($(shell command -v gotestsum 2> /dev/null),) GOTEST_DIR := test-results GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml -- endif endif -PROMU_VERSION ?= 0.12.0 +PROMU_VERSION ?= 0.17.0 PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz +SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.39.0 -# golangci-lint only supports linux, darwin and windows platforms on i386/amd64. +GOLANGCI_LINT_VERSION ?= v1.60.2 +# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) - ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386)) - GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint + ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386 arm64)) + # If we're in CI and there is an Actions file, that means the linter + # is being run in Actions, so we don't need to run it here. + ifneq (,$(SKIP_GOLANGCI_LINT)) + GOLANGCI_LINT := + else ifeq (,$(CIRCLE_JOB)) + GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint + else ifeq (,$(wildcard .github/workflows/golangci-lint.yml)) + GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint + endif endif endif @@ -105,6 +91,8 @@ BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS)) PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS)) TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS)) +SANITIZED_DOCKER_IMAGE_TAG := $(subst +,-,$(DOCKER_IMAGE_TAG)) + ifeq ($(GOHOSTARCH),amd64) ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows)) # Only supported on amd64 @@ -144,32 +132,25 @@ common-check_license: .PHONY: common-deps common-deps: @echo ">> getting dependencies" -ifdef GO111MODULE - GO111MODULE=$(GO111MODULE) $(GO) mod download -else - $(GO) get $(GOOPTS) -t ./... -endif + $(GO) mod download .PHONY: update-go-deps update-go-deps: @echo ">> updating Go dependencies" @for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \ - $(GO) get $$m; \ + $(GO) get -d $$m; \ done - GO111MODULE=$(GO111MODULE) $(GO) mod tidy -ifneq (,$(wildcard vendor)) - GO111MODULE=$(GO111MODULE) $(GO) mod vendor -endif + $(GO) mod tidy .PHONY: common-test-short common-test-short: $(GOTEST_DIR) @echo ">> running short tests" - GO111MODULE=$(GO111MODULE) $(GOTEST) -short $(GOOPTS) $(pkgs) + $(GOTEST) -short $(GOOPTS) $(pkgs) .PHONY: common-test common-test: $(GOTEST_DIR) @echo ">> running all tests" - GO111MODULE=$(GO111MODULE) $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs) + $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs) $(GOTEST_DIR): @mkdir -p $@ @@ -177,31 +158,31 @@ $(GOTEST_DIR): .PHONY: common-format common-format: @echo ">> formatting code" - GO111MODULE=$(GO111MODULE) $(GO) fmt $(pkgs) + $(GO) fmt $(pkgs) .PHONY: common-vet common-vet: @echo ">> vetting code" - GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs) + $(GO) vet $(GOOPTS) $(pkgs) .PHONY: common-lint common-lint: $(GOLANGCI_LINT) ifdef GOLANGCI_LINT @echo ">> running golangci-lint" -ifdef GO111MODULE -# 'go list' needs to be executed before staticcheck to prepopulate the modules cache. -# Otherwise staticcheck might fail randomly for some reason not yet explained. - GO111MODULE=$(GO111MODULE) $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null - GO111MODULE=$(GO111MODULE) $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) -else - $(GOLANGCI_LINT) run $(pkgs) + $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) endif + +.PHONY: common-lint-fix +common-lint-fix: $(GOLANGCI_LINT) +ifdef GOLANGCI_LINT + @echo ">> running golangci-lint fix" + $(GOLANGCI_LINT) run --fix $(GOLANGCI_LINT_OPTS) $(pkgs) endif .PHONY: common-yamllint common-yamllint: @echo ">> running yamllint on all YAML files in the repository" -ifeq (, $(shell which yamllint)) +ifeq (, $(shell command -v yamllint 2> /dev/null)) @echo "yamllint not installed so skipping" else yamllint . @@ -212,38 +193,29 @@ endif common-staticcheck: lint .PHONY: common-unused -common-unused: $(GOVENDOR) -ifdef GOVENDOR - @echo ">> running check for unused packages" - @$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages' -else -ifdef GO111MODULE +common-unused: @echo ">> running check for unused/missing packages in go.mod" - GO111MODULE=$(GO111MODULE) $(GO) mod tidy -ifeq (,$(wildcard vendor)) + $(GO) mod tidy @git diff --exit-code -- go.sum go.mod -else - @echo ">> running check for unused packages in vendor/" - GO111MODULE=$(GO111MODULE) $(GO) mod vendor - @git diff --exit-code -- go.sum go.mod vendor/ -endif -endif -endif .PHONY: common-build common-build: promu @echo ">> building binaries" - GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES) + $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES) .PHONY: common-tarball common-tarball: promu @echo ">> building release tarball" $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) +.PHONY: common-docker-repo-name +common-docker-repo-name: + @echo "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)" + .PHONY: common-docker $(BUILD_DOCKER_ARCHS) common-docker: $(BUILD_DOCKER_ARCHS) $(BUILD_DOCKER_ARCHS): common-docker-%: - docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \ + docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \ -f $(DOCKERFILE_PATH) \ --build-arg ARCH="$*" \ --build-arg OS="linux" \ @@ -252,19 +224,19 @@ $(BUILD_DOCKER_ARCHS): common-docker-%: .PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS) common-docker-publish: $(PUBLISH_DOCKER_ARCHS) $(PUBLISH_DOCKER_ARCHS): common-docker-publish-%: - docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" + docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION))) .PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS) common-docker-tag-latest: $(TAG_DOCKER_ARCHS) $(TAG_DOCKER_ARCHS): common-docker-tag-latest-%: - docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" - docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)" + docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" + docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)" .PHONY: common-docker-manifest common-docker-manifest: - DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG)) - DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG)) + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" .PHONY: promu promu: $(PROMU) @@ -289,12 +261,6 @@ $(GOLANGCI_LINT): | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION) endif -ifdef GOVENDOR -.PHONY: $(GOVENDOR) -$(GOVENDOR): - GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor -endif - .PHONY: precheck precheck:: @@ -309,3 +275,9 @@ $(1)_precheck: exit 1; \ fi endef + +govulncheck: install-govulncheck + govulncheck ./... + +install-govulncheck: + command -v govulncheck > /dev/null || go install golang.org/x/vuln/cmd/govulncheck@latest diff --git a/tools/vendor/github.com/prometheus/procfs/README.md b/tools/vendor/github.com/prometheus/procfs/README.md index 43c37735a..0718239cf 100644 --- a/tools/vendor/github.com/prometheus/procfs/README.md +++ b/tools/vendor/github.com/prometheus/procfs/README.md @@ -47,15 +47,15 @@ However, most of the API includes unit tests which can be run with `make test`. The procfs library includes a set of test fixtures which include many example files from the `/proc` and `/sys` filesystems. These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file which is extracted automatically during testing. To add/update the test fixtures, first -ensure the `fixtures` directory is up to date by removing the existing directory and then -extracting the ttar file using `make fixtures/.unpacked` or just `make test`. +ensure the `testdata/fixtures` directory is up to date by removing the existing directory and then +extracting the ttar file using `make testdata/fixtures/.unpacked` or just `make test`. ```bash -rm -rf fixtures +rm -rf testdata/fixtures make test ``` -Next, make the required changes to the extracted files in the `fixtures` directory. When +Next, make the required changes to the extracted files in the `testdata/fixtures` directory. When the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file based on the updated `fixtures` directory. And finally, verify the changes using -`git diff fixtures.ttar`. +`git diff testdata/fixtures.ttar`. diff --git a/tools/vendor/github.com/prometheus/procfs/SECURITY.md b/tools/vendor/github.com/prometheus/procfs/SECURITY.md index 67741f015..fed02d85c 100644 --- a/tools/vendor/github.com/prometheus/procfs/SECURITY.md +++ b/tools/vendor/github.com/prometheus/procfs/SECURITY.md @@ -3,4 +3,4 @@ The Prometheus security policy, including how to report vulnerabilities, can be found here: -https://prometheus.io/docs/operating/security/ + diff --git a/tools/vendor/github.com/prometheus/procfs/arp.go b/tools/vendor/github.com/prometheus/procfs/arp.go index 4e47e6172..2e5334415 100644 --- a/tools/vendor/github.com/prometheus/procfs/arp.go +++ b/tools/vendor/github.com/prometheus/procfs/arp.go @@ -15,11 +15,28 @@ package procfs import ( "fmt" - "io/ioutil" "net" + "os" + "strconv" "strings" ) +// Learned from include/uapi/linux/if_arp.h. +const ( + // Completed entry (ha valid). + ATFComplete = 0x02 + // Permanent entry. + ATFPermanent = 0x04 + // Publish entry. + ATFPublish = 0x08 + // Has requested trailers. + ATFUseTrailers = 0x10 + // Obsoleted: Want to use a netmask (only for proxy entries). + ATFNetmask = 0x20 + // Don't answer this addresses. + ATFDontPublish = 0x40 +) + // ARPEntry contains a single row of the columnar data represented in // /proc/net/arp. type ARPEntry struct { @@ -29,14 +46,16 @@ type ARPEntry struct { HWAddr net.HardwareAddr // Name of the device Device string + // Flags + Flags byte } // GatherARPEntries retrieves all the ARP entries, parse the relevant columns, // and then return a slice of ARPEntry's. func (fs FS) GatherARPEntries() ([]ARPEntry, error) { - data, err := ioutil.ReadFile(fs.proc.Path("net/arp")) + data, err := os.ReadFile(fs.proc.Path("net/arp")) if err != nil { - return nil, fmt.Errorf("error reading arp %q: %w", fs.proc.Path("net/arp"), err) + return nil, fmt.Errorf("%w: error reading arp %s: %w", ErrFileRead, fs.proc.Path("net/arp"), err) } return parseARPEntries(data) @@ -59,11 +78,11 @@ func parseARPEntries(data []byte) ([]ARPEntry, error) { } else if width == expectedDataWidth { entry, err := parseARPEntry(columns) if err != nil { - return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %w", err) + return []ARPEntry{}, fmt.Errorf("%w: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err) } entries = append(entries, entry) } else { - return []ARPEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedDataWidth) + return []ARPEntry{}, fmt.Errorf("%w: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err) } } @@ -72,14 +91,26 @@ func parseARPEntries(data []byte) ([]ARPEntry, error) { } func parseARPEntry(columns []string) (ARPEntry, error) { + entry := ARPEntry{Device: columns[5]} ip := net.ParseIP(columns[0]) - mac := net.HardwareAddr(columns[3]) + entry.IPAddr = ip + + if mac, err := net.ParseMAC(columns[3]); err == nil { + entry.HWAddr = mac + } else { + return ARPEntry{}, err + } - entry := ARPEntry{ - IPAddr: ip, - HWAddr: mac, - Device: columns[5], + if flags, err := strconv.ParseUint(columns[2], 0, 8); err == nil { + entry.Flags = byte(flags) + } else { + return ARPEntry{}, err } return entry, nil } + +// IsComplete returns true if ARP entry is marked with complete flag. +func (entry *ARPEntry) IsComplete() bool { + return entry.Flags&ATFComplete != 0 +} diff --git a/tools/vendor/github.com/prometheus/procfs/buddyinfo.go b/tools/vendor/github.com/prometheus/procfs/buddyinfo.go index f5b7939b2..838075009 100644 --- a/tools/vendor/github.com/prometheus/procfs/buddyinfo.go +++ b/tools/vendor/github.com/prometheus/procfs/buddyinfo.go @@ -55,18 +55,18 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { parts := strings.Fields(line) if len(parts) < 4 { - return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo") + return nil, fmt.Errorf("%w: Invalid number of fields, found: %v", ErrFileParse, parts) } - node := strings.TrimRight(parts[1], ",") - zone := strings.TrimRight(parts[3], ",") + node := strings.TrimSuffix(parts[1], ",") + zone := strings.TrimSuffix(parts[3], ",") arraySize := len(parts[4:]) if bucketCount == -1 { bucketCount = arraySize } else { if bucketCount != arraySize { - return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize) + return nil, fmt.Errorf("%w: mismatch in number of buddyinfo buckets, previous count %d, new count %d", ErrFileParse, bucketCount, arraySize) } } @@ -74,7 +74,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { for i := 0; i < arraySize; i++ { sizes[i], err = strconv.ParseFloat(parts[i+4], 64) if err != nil { - return nil, fmt.Errorf("invalid value in buddyinfo: %w", err) + return nil, fmt.Errorf("%w: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err) } } diff --git a/tools/vendor/github.com/prometheus/procfs/cpuinfo.go b/tools/vendor/github.com/prometheus/procfs/cpuinfo.go index 5623b24a1..f0950bb49 100644 --- a/tools/vendor/github.com/prometheus/procfs/cpuinfo.go +++ b/tools/vendor/github.com/prometheus/procfs/cpuinfo.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build linux // +build linux package procfs @@ -27,7 +28,7 @@ import ( "github.com/prometheus/procfs/internal/util" ) -// CPUInfo contains general information about a system CPU found in /proc/cpuinfo +// CPUInfo contains general information about a system CPU found in /proc/cpuinfo. type CPUInfo struct { Processor uint VendorID string @@ -78,7 +79,7 @@ func parseCPUInfoX86(info []byte) ([]CPUInfo, error) { // find the first "processor" line firstLine := firstNonEmptyLine(scanner) if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, firstLine) } field := strings.SplitN(firstLine, ": ", 2) v, err := strconv.ParseUint(field[1], 0, 32) @@ -191,9 +192,10 @@ func parseCPUInfoARM(info []byte) ([]CPUInfo, error) { scanner := bufio.NewScanner(bytes.NewReader(info)) firstLine := firstNonEmptyLine(scanner) - match, _ := regexp.MatchString("^[Pp]rocessor", firstLine) + match, err := regexp.MatchString("^[Pp]rocessor", firstLine) if !match || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + return nil, fmt.Errorf("%w: Cannot parse line: %q: %w", ErrFileParse, firstLine, err) + } field := strings.SplitN(firstLine, ": ", 2) cpuinfo := []CPUInfo{} @@ -257,7 +259,7 @@ func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) { firstLine := firstNonEmptyLine(scanner) if !strings.HasPrefix(firstLine, "vendor_id") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, firstLine) } field := strings.SplitN(firstLine, ": ", 2) cpuinfo := []CPUInfo{} @@ -282,7 +284,7 @@ func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) { if strings.HasPrefix(line, "processor") { match := cpuinfoS390XProcessorRegexp.FindStringSubmatch(line) if len(match) < 2 { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine) } cpu := commonCPUInfo v, err := strconv.ParseUint(match[1], 0, 32) @@ -342,7 +344,7 @@ func parseCPUInfoMips(info []byte) ([]CPUInfo, error) { // find the first "processor" line firstLine := firstNonEmptyLine(scanner) if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine) } field := strings.SplitN(firstLine, ": ", 2) cpuinfo := []CPUInfo{} @@ -379,12 +381,48 @@ func parseCPUInfoMips(info []byte) ([]CPUInfo, error) { return cpuinfo, nil } +func parseCPUInfoLoong(info []byte) ([]CPUInfo, error) { + scanner := bufio.NewScanner(bytes.NewReader(info)) + // find the first "processor" line + firstLine := firstNonEmptyLine(scanner) + if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") { + return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine) + } + field := strings.SplitN(firstLine, ": ", 2) + cpuinfo := []CPUInfo{} + systemType := field[1] + i := 0 + for scanner.Scan() { + line := scanner.Text() + if !strings.Contains(line, ":") { + continue + } + field := strings.SplitN(line, ": ", 2) + switch strings.TrimSpace(field[0]) { + case "processor": + v, err := strconv.ParseUint(field[1], 0, 32) + if err != nil { + return nil, err + } + i = int(v) + cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor + cpuinfo[i].Processor = uint(v) + cpuinfo[i].VendorID = systemType + case "CPU Family": + cpuinfo[i].CPUFamily = field[1] + case "Model Name": + cpuinfo[i].ModelName = field[1] + } + } + return cpuinfo, nil +} + func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) { scanner := bufio.NewScanner(bytes.NewReader(info)) firstLine := firstNonEmptyLine(scanner) if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine) } field := strings.SplitN(firstLine, ": ", 2) v, err := strconv.ParseUint(field[1], 0, 32) @@ -429,7 +467,7 @@ func parseCPUInfoRISCV(info []byte) ([]CPUInfo, error) { firstLine := firstNonEmptyLine(scanner) if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) + return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine) } field := strings.SplitN(firstLine, ": ", 2) v, err := strconv.ParseUint(field[1], 0, 32) @@ -469,7 +507,7 @@ func parseCPUInfoDummy(_ []byte) ([]CPUInfo, error) { // nolint:unused,deadcode } // firstNonEmptyLine advances the scanner to the first non-empty line -// and returns the contents of that line +// and returns the contents of that line. func firstNonEmptyLine(scanner *bufio.Scanner) string { for scanner.Scan() { line := scanner.Text() diff --git a/tools/vendor/github.com/prometheus/procfs/cpuinfo_armx.go b/tools/vendor/github.com/prometheus/procfs/cpuinfo_armx.go index 44b590ed3..64cfd534c 100644 --- a/tools/vendor/github.com/prometheus/procfs/cpuinfo_armx.go +++ b/tools/vendor/github.com/prometheus/procfs/cpuinfo_armx.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build linux && (arm || arm64) // +build linux // +build arm arm64 diff --git a/tools/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/tools/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go similarity index 73% rename from tools/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go rename to tools/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go index c318385cb..d88442f0e 100644 --- a/tools/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go +++ b/tools/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go @@ -1,10 +1,9 @@ -// Copyright 2013 Matt T. Proud -// +// Copyright 2022 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -12,5 +11,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package pbutil provides record length-delimited Protocol Buffer streaming. -package pbutil +//go:build linux +// +build linux + +package procfs + +var parseCPUInfo = parseCPUInfoLoong diff --git a/tools/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go b/tools/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go index 91e272573..c11207f3a 100644 --- a/tools/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go +++ b/tools/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build linux && (mips || mipsle || mips64 || mips64le) // +build linux // +build mips mipsle mips64 mips64le diff --git a/tools/vendor/github.com/prometheus/procfs/cpuinfo_others.go b/tools/vendor/github.com/prometheus/procfs/cpuinfo_others.go index 95b5b4ec4..a6b2b3127 100644 --- a/tools/vendor/github.com/prometheus/procfs/cpuinfo_others.go +++ b/tools/vendor/github.com/prometheus/procfs/cpuinfo_others.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build linux -// +build !386,!amd64,!arm,!arm64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x +//go:build linux && !386 && !amd64 && !arm && !arm64 && !loong64 && !mips && !mips64 && !mips64le && !mipsle && !ppc64 && !ppc64le && !riscv64 && !s390x +// +build linux,!386,!amd64,!arm,!arm64,!loong64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x package procfs diff --git a/tools/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go b/tools/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go index 6068bd571..003bc2ad4 100644 --- a/tools/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go +++ b/tools/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build linux && (ppc64 || ppc64le) // +build linux // +build ppc64 ppc64le diff --git a/tools/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go b/tools/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go index e83c2e207..1c9b7313b 100644 --- a/tools/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go +++ b/tools/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build linux && (riscv || riscv64) // +build linux // +build riscv riscv64 diff --git a/tools/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go b/tools/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go index 26814eeba..fa3686bc0 100644 --- a/tools/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go +++ b/tools/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build linux // +build linux package procfs diff --git a/tools/vendor/github.com/prometheus/procfs/cpuinfo_x86.go b/tools/vendor/github.com/prometheus/procfs/cpuinfo_x86.go index d5bedf97f..a0ef55562 100644 --- a/tools/vendor/github.com/prometheus/procfs/cpuinfo_x86.go +++ b/tools/vendor/github.com/prometheus/procfs/cpuinfo_x86.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build linux && (386 || amd64) // +build linux // +build 386 amd64 diff --git a/tools/vendor/github.com/prometheus/procfs/crypto.go b/tools/vendor/github.com/prometheus/procfs/crypto.go index 5048ad1f2..5f2a37a78 100644 --- a/tools/vendor/github.com/prometheus/procfs/crypto.go +++ b/tools/vendor/github.com/prometheus/procfs/crypto.go @@ -55,12 +55,13 @@ func (fs FS) Crypto() ([]Crypto, error) { path := fs.proc.Path("crypto") b, err := util.ReadFileNoStat(path) if err != nil { - return nil, fmt.Errorf("error reading crypto %q: %w", path, err) + return nil, fmt.Errorf("%w: Cannot read file %v: %w", ErrFileRead, b, err) + } crypto, err := parseCrypto(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("error parsing crypto %q: %w", path, err) + return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, crypto, err) } return crypto, nil @@ -83,7 +84,7 @@ func parseCrypto(r io.Reader) ([]Crypto, error) { kv := strings.Split(text, ":") if len(kv) != 2 { - return nil, fmt.Errorf("malformed crypto line: %q", text) + return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, text) } k := strings.TrimSpace(kv[0]) diff --git a/tools/vendor/github.com/prometheus/procfs/doc.go b/tools/vendor/github.com/prometheus/procfs/doc.go index d31a82600..f9d961e44 100644 --- a/tools/vendor/github.com/prometheus/procfs/doc.go +++ b/tools/vendor/github.com/prometheus/procfs/doc.go @@ -16,30 +16,29 @@ // // Example: // -// package main -// -// import ( -// "fmt" -// "log" -// -// "github.com/prometheus/procfs" -// ) -// -// func main() { -// p, err := procfs.Self() -// if err != nil { -// log.Fatalf("could not get process: %s", err) -// } -// -// stat, err := p.Stat() -// if err != nil { -// log.Fatalf("could not get process stat: %s", err) -// } -// -// fmt.Printf("command: %s\n", stat.Comm) -// fmt.Printf("cpu time: %fs\n", stat.CPUTime()) -// fmt.Printf("vsize: %dB\n", stat.VirtualMemory()) -// fmt.Printf("rss: %dB\n", stat.ResidentMemory()) -// } -// +// package main +// +// import ( +// "fmt" +// "log" +// +// "github.com/prometheus/procfs" +// ) +// +// func main() { +// p, err := procfs.Self() +// if err != nil { +// log.Fatalf("could not get process: %s", err) +// } +// +// stat, err := p.Stat() +// if err != nil { +// log.Fatalf("could not get process stat: %s", err) +// } +// +// fmt.Printf("command: %s\n", stat.Comm) +// fmt.Printf("cpu time: %fs\n", stat.CPUTime()) +// fmt.Printf("vsize: %dB\n", stat.VirtualMemory()) +// fmt.Printf("rss: %dB\n", stat.ResidentMemory()) +// } package procfs diff --git a/tools/vendor/github.com/prometheus/procfs/fixtures.ttar b/tools/vendor/github.com/prometheus/procfs/fixtures.ttar deleted file mode 100644 index 5e7eeef4a..000000000 --- a/tools/vendor/github.com/prometheus/procfs/fixtures.ttar +++ /dev/null @@ -1,7673 +0,0 @@ -# Archive created by ttar -c -f fixtures.ttar fixtures/ -Directory: fixtures -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26231 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/cmdline -Lines: 1 -vimNULLBYTEtest.goNULLBYTE+10NULLBYTEEOF -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/comm -Lines: 1 -vim -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/cwd -SymlinkTo: /usr/bin -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/environ -Lines: 1 -PATH=/go/bin:/usr/local/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/binNULLBYTEHOSTNAME=cd24e11f73a5NULLBYTETERM=xtermNULLBYTEGOLANG_VERSION=1.12.5NULLBYTEGOPATH=/goNULLBYTEHOME=/rootNULLBYTEEOF -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/exe -SymlinkTo: /usr/bin/vim -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26231/fd -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/0 -SymlinkTo: ../../symlinktargets/abc -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/1 -SymlinkTo: ../../symlinktargets/def -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/10 -SymlinkTo: ../../symlinktargets/xyz -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/2 -SymlinkTo: ../../symlinktargets/ghi -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/3 -SymlinkTo: ../../symlinktargets/uvw -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26231/fdinfo -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fdinfo/0 -Lines: 6 -pos: 0 -flags: 02004000 -mnt_id: 13 -inotify wd:3 ino:1 sdev:34 mask:fce ignored_mask:0 fhandle-bytes:c fhandle-type:81 f_handle:000000000100000000000000 -inotify wd:2 ino:1300016 sdev:fd00002 mask:fce ignored_mask:0 fhandle-bytes:8 fhandle-type:1 f_handle:16003001ed3f022a -inotify wd:1 ino:2e0001 sdev:fd00000 mask:fce ignored_mask:0 fhandle-bytes:8 fhandle-type:1 f_handle:01002e00138e7c65 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fdinfo/1 -Lines: 4 -pos: 0 -flags: 02004002 -mnt_id: 13 -eventfd-count: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fdinfo/10 -Lines: 3 -pos: 0 -flags: 02004002 -mnt_id: 9 -Mode: 400 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fdinfo/2 -Lines: 3 -pos: 0 -flags: 02004002 -mnt_id: 9 -Mode: 400 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fdinfo/3 -Lines: 3 -pos: 0 -flags: 02004002 -mnt_id: 9 -Mode: 400 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/io -Lines: 7 -rchar: 750339 -wchar: 818609 -syscr: 7405 -syscw: 5245 -read_bytes: 1024 -write_bytes: 2048 -cancelled_write_bytes: -1024 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/limits -Lines: 17 -Limit Soft Limit Hard Limit Units -Max cpu time unlimited unlimited seconds -Max file size unlimited unlimited bytes -Max data size unlimited unlimited bytes -Max stack size 8388608 unlimited bytes -Max core file size 0 unlimited bytes -Max resident set unlimited unlimited bytes -Max processes 62898 62898 processes -Max open files 2048 4096 files -Max locked memory 18446744073708503040 18446744073708503040 bytes -Max address space 8589934592 unlimited bytes -Max file locks unlimited unlimited locks -Max pending signals 62898 62898 signals -Max msgqueue size 819200 819200 bytes -Max nice priority 0 0 -Max realtime priority 0 0 -Max realtime timeout unlimited unlimited us -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/mountstats -Lines: 20 -device rootfs mounted on / with fstype rootfs -device sysfs mounted on /sys with fstype sysfs -device proc mounted on /proc with fstype proc -device /dev/sda1 mounted on / with fstype ext4 -device 192.168.1.1:/srv/test mounted on /mnt/nfs/test with fstype nfs4 statvers=1.1 - opts: rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,mountaddr=192.168.1.1,clientaddr=192.168.1.5,local_lock=none - age: 13968 - caps: caps=0xfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255 - nfsv4: bm0=0xfdffafff,bm1=0xf9be3e,bm2=0x0,acl=0x0,pnfs=not configured - sec: flavor=1,pseudoflavor=1 - events: 52 226 0 0 1 13 398 0 0 331 0 47 0 0 77 0 0 77 0 0 0 0 0 0 0 0 0 - bytes: 1207640230 0 0 0 1210214218 0 295483 0 - RPC iostats version: 1.0 p/v: 100003/4 (nfs) - xprt: tcp 832 0 1 0 11 6428 6428 0 12154 0 24 26 5726 - per-op statistics - NULL: 0 0 0 0 0 0 0 0 - READ: 1298 1298 0 207680 1210292152 6 79386 79407 - WRITE: 0 0 0 0 0 0 0 0 - ACCESS: 2927395007 2927394995 0 526931094212 362996810236 18446743919241604546 1667369447 1953587717 - -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26231/net -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/net/dev -Lines: 4 -Inter-| Receive | Transmit - face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed - lo: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 - eth0: 438 5 0 0 0 0 0 0 648 8 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26231/ns -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/ns/mnt -SymlinkTo: mnt:[4026531840] -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/ns/net -SymlinkTo: net:[4026531993] -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/root -SymlinkTo: / -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/schedstat -Lines: 1 -411605849 93680043 79 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/smaps -Lines: 252 -00400000-00cb1000 r-xp 00000000 fd:01 952273 /bin/alertmanager -Size: 8900 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 2952 kB -Pss: 2952 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 2952 kB -Private_Dirty: 0 kB -Referenced: 2864 kB -Anonymous: 0 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd ex mr mw me dw sd -00cb1000-016b0000 r--p 008b1000 fd:01 952273 /bin/alertmanager -Size: 10236 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 6152 kB -Pss: 6152 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 6152 kB -Private_Dirty: 0 kB -Referenced: 5308 kB -Anonymous: 0 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd mr mw me dw sd -016b0000-0171a000 rw-p 012b0000 fd:01 952273 /bin/alertmanager -Size: 424 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 176 kB -Pss: 176 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 84 kB -Private_Dirty: 92 kB -Referenced: 176 kB -Anonymous: 92 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 12 kB -SwapPss: 12 kB -Locked: 0 kB -VmFlags: rd wr mr mw me dw ac sd -0171a000-0173f000 rw-p 00000000 00:00 0 -Size: 148 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 76 kB -Pss: 76 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 0 kB -Private_Dirty: 76 kB -Referenced: 76 kB -Anonymous: 76 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd wr mr mw me ac sd -c000000000-c000400000 rw-p 00000000 00:00 0 -Size: 4096 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 2564 kB -Pss: 2564 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 20 kB -Private_Dirty: 2544 kB -Referenced: 2544 kB -Anonymous: 2564 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 1100 kB -SwapPss: 1100 kB -Locked: 0 kB -VmFlags: rd wr mr mw me ac sd -c000400000-c001600000 rw-p 00000000 00:00 0 -Size: 18432 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 16024 kB -Pss: 16024 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 5864 kB -Private_Dirty: 10160 kB -Referenced: 11944 kB -Anonymous: 16024 kB -LazyFree: 5848 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 440 kB -SwapPss: 440 kB -Locked: 0 kB -VmFlags: rd wr mr mw me ac sd nh -c001600000-c004000000 rw-p 00000000 00:00 0 -Size: 43008 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 0 kB -Pss: 0 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 0 kB -Private_Dirty: 0 kB -Referenced: 0 kB -Anonymous: 0 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd wr mr mw me ac sd -7f0ab95ca000-7f0abbb7b000 rw-p 00000000 00:00 0 -Size: 38596 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 1992 kB -Pss: 1992 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 476 kB -Private_Dirty: 1516 kB -Referenced: 1828 kB -Anonymous: 1992 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 384 kB -SwapPss: 384 kB -Locked: 0 kB -VmFlags: rd wr mr mw me ac sd -7ffc07ecf000-7ffc07ef0000 rw-p 00000000 00:00 0 [stack] -Size: 132 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 8 kB -Pss: 8 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 0 kB -Private_Dirty: 8 kB -Referenced: 8 kB -Anonymous: 8 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 4 kB -SwapPss: 4 kB -Locked: 0 kB -VmFlags: rd wr mr mw me gd ac -7ffc07f9e000-7ffc07fa1000 r--p 00000000 00:00 0 [vvar] -Size: 12 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 0 kB -Pss: 0 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 0 kB -Private_Dirty: 0 kB -Referenced: 0 kB -Anonymous: 0 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd mr pf io de dd sd -7ffc07fa1000-7ffc07fa3000 r-xp 00000000 00:00 0 [vdso] -Size: 8 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 4 kB -Pss: 0 kB -Shared_Clean: 4 kB -Shared_Dirty: 0 kB -Private_Clean: 0 kB -Private_Dirty: 0 kB -Referenced: 4 kB -Anonymous: 0 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd ex mr mw me de sd -ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0 [vsyscall] -Size: 4 kB -KernelPageSize: 4 kB -MMUPageSize: 4 kB -Rss: 0 kB -Pss: 0 kB -Shared_Clean: 0 kB -Shared_Dirty: 0 kB -Private_Clean: 0 kB -Private_Dirty: 0 kB -Referenced: 0 kB -Anonymous: 0 kB -LazyFree: 0 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 0 kB -SwapPss: 0 kB -Locked: 0 kB -VmFlags: rd ex -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/smaps_rollup -Lines: 17 -00400000-ffffffffff601000 ---p 00000000 00:00 0 [rollup] -Rss: 29948 kB -Pss: 29944 kB -Shared_Clean: 4 kB -Shared_Dirty: 0 kB -Private_Clean: 15548 kB -Private_Dirty: 14396 kB -Referenced: 24752 kB -Anonymous: 20756 kB -LazyFree: 5848 kB -AnonHugePages: 0 kB -ShmemPmdMapped: 0 kB -Shared_Hugetlb: 0 kB -Private_Hugetlb: 0 kB -Swap: 1940 kB -SwapPss: 1940 kB -Locked: 0 kB -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/stat -Lines: 1 -26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/status -Lines: 53 - -Name: prometheus -Umask: 0022 -State: S (sleeping) -Tgid: 26231 -Ngid: 0 -Pid: 26231 -PPid: 1 -TracerPid: 0 -Uid: 1000 1000 1000 0 -Gid: 1001 1001 1001 0 -FDSize: 128 -Groups: -NStgid: 1 -NSpid: 1 -NSpgid: 1 -NSsid: 1 -VmPeak: 58472 kB -VmSize: 58440 kB -VmLck: 0 kB -VmPin: 0 kB -VmHWM: 8028 kB -VmRSS: 6716 kB -RssAnon: 2092 kB -RssFile: 4624 kB -RssShmem: 0 kB -VmData: 2580 kB -VmStk: 136 kB -VmExe: 948 kB -VmLib: 6816 kB -VmPTE: 128 kB -VmPMD: 12 kB -VmSwap: 660 kB -HugetlbPages: 0 kB -Threads: 1 -SigQ: 8/63965 -SigPnd: 0000000000000000 -ShdPnd: 0000000000000000 -SigBlk: 7be3c0fe28014a03 -SigIgn: 0000000000001000 -SigCgt: 00000001800004ec -CapInh: 0000000000000000 -CapPrm: 0000003fffffffff -CapEff: 0000003fffffffff -CapBnd: 0000003fffffffff -CapAmb: 0000000000000000 -Seccomp: 0 -Cpus_allowed: ff -Cpus_allowed_list: 0-7 -Mems_allowed: 00000000,00000001 -Mems_allowed_list: 0 -voluntary_ctxt_switches: 4742839 -nonvoluntary_ctxt_switches: 1727500 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/wchan -Lines: 1 -poll_schedule_timeoutEOF -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26232 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/cmdline -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/comm -Lines: 1 -ata_sff -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/cwd -SymlinkTo: /does/not/exist -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26232/fd -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/0 -SymlinkTo: ../../symlinktargets/abc -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/1 -SymlinkTo: ../../symlinktargets/def -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/2 -SymlinkTo: ../../symlinktargets/ghi -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/3 -SymlinkTo: ../../symlinktargets/uvw -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/4 -SymlinkTo: ../../symlinktargets/xyz -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/limits -Lines: 17 -Limit Soft Limit Hard Limit Units -Max cpu time unlimited unlimited seconds -Max file size unlimited unlimited bytes -Max data size unlimited unlimited bytes -Max stack size 8388608 unlimited bytes -Max core file size 0 unlimited bytes -Max resident set unlimited unlimited bytes -Max processes 29436 29436 processes -Max open files 1024 4096 files -Max locked memory 65536 65536 bytes -Max address space unlimited unlimited bytes -Max file locks unlimited unlimited locks -Max pending signals 29436 29436 signals -Max msgqueue size 819200 819200 bytes -Max nice priority 0 0 -Max realtime priority 0 0 -Max realtime timeout unlimited unlimited us -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/maps -Lines: 9 -55680ae1e000-55680ae20000 r--p 00000000 fd:01 47316994 /bin/cat -55680ae29000-55680ae2a000 rwxs 0000a000 fd:01 47316994 /bin/cat -55680bed6000-55680bef7000 rw-p 00000000 00:00 0 [heap] -7fdf964fc000-7fdf973f2000 r--p 00000000 fd:01 17432624 /usr/lib/locale/locale-archive -7fdf973f2000-7fdf97417000 r--p 00000000 fd:01 60571062 /lib/x86_64-linux-gnu/libc-2.29.so -7ffe9215c000-7ffe9217f000 rw-p 00000000 00:00 0 [stack] -7ffe921da000-7ffe921dd000 r--p 00000000 00:00 0 [vvar] -7ffe921dd000-7ffe921de000 r-xp 00000000 00:00 0 [vdso] -ffffffffff600000-ffffffffff601000 --xp 00000000 00:00 0 [vsyscall] -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/root -SymlinkTo: /does/not/exist -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/stat -Lines: 1 -33 (ata_sff) S 2 0 0 0 -1 69238880 0 0 0 0 0 0 0 0 0 -20 1 0 5 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/wchan -Lines: 1 -0EOF -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26233 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26233/cmdline -Lines: 1 -com.github.uiautomatorNULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTEEOF -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26233/schedstat -Lines: 8 - ____________________________________ -< this is a malformed schedstat file > - ------------------------------------ - \ ^__^ - \ (oo)\_______ - (__)\ )\/\ - ||----w | - || || -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26234 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26234/maps -Lines: 4 -08048000-08089000 r-xp 00000000 03:01 104219 /bin/tcsh -08089000-0808c000 rw-p 00041000 03:01 104219 /bin/tcsh -0808c000-08146000 rwxp 00000000 00:00 0 -40000000-40015000 r-xp 00000000 03:01 61874 /lib/ld-2.3.2.so -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/584 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/584/stat -Lines: 2 -1020 ((a b ) ( c d) ) R 28378 1020 28378 34842 1020 4218880 286 0 0 0 0 0 0 0 20 0 1 0 10839175 10395648 155 18446744073709551615 4194304 4238788 140736466511168 140736466511168 140609271124624 0 0 0 0 0 0 0 17 5 0 0 0 0 0 6336016 6337300 25579520 140736466515030 140736466515061 140736466515061 140736466518002 0 -#!/bin/cat /proc/self/stat -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/buddyinfo -Lines: 3 -Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3 -Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 -Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/cmdline -Lines: 1 -BOOT_IMAGE=/vmlinuz-5.11.0-22-generic root=UUID=456a0345-450d-4f7b-b7c9-43e3241d99ad ro quiet splash vt.handoff=7 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/cpuinfo -Lines: 216 -processor : 0 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 799.998 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 0 -cpu cores : 4 -apicid : 0 -initial apicid : 0 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 1 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 800.037 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 1 -cpu cores : 4 -apicid : 2 -initial apicid : 2 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 2 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 800.010 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 2 -cpu cores : 4 -apicid : 4 -initial apicid : 4 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 3 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 800.028 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 3 -cpu cores : 4 -apicid : 6 -initial apicid : 6 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 4 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 799.989 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 0 -cpu cores : 4 -apicid : 1 -initial apicid : 1 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 5 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 800.083 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 1 -cpu cores : 4 -apicid : 3 -initial apicid : 3 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 6 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 800.017 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 2 -cpu cores : 4 -apicid : 5 -initial apicid : 5 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 7 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 800.030 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 3 -cpu cores : 4 -apicid : 7 -initial apicid : 7 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/crypto -Lines: 972 -name : ccm(aes) -driver : ccm_base(ctr(aes-aesni),cbcmac(aes-aesni)) -module : ccm -priority : 300 -refcnt : 4 -selftest : passed -internal : no -type : aead -async : no -blocksize : 1 -ivsize : 16 -maxauthsize : 16 -geniv : - -name : cbcmac(aes) -driver : cbcmac(aes-aesni) -module : ccm -priority : 300 -refcnt : 7 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 16 - -name : ecdh -driver : ecdh-generic -module : ecdh_generic -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : kpp -async : yes - -name : ecb(arc4) -driver : ecb(arc4)-generic -module : arc4 -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : no -blocksize : 1 -min keysize : 1 -max keysize : 256 -ivsize : 0 -chunksize : 1 -walksize : 1 - -name : arc4 -driver : arc4-generic -module : arc4 -priority : 0 -refcnt : 3 -selftest : passed -internal : no -type : cipher -blocksize : 1 -min keysize : 1 -max keysize : 256 - -name : crct10dif -driver : crct10dif-pclmul -module : crct10dif_pclmul -priority : 200 -refcnt : 2 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 2 - -name : crc32 -driver : crc32-pclmul -module : crc32_pclmul -priority : 200 -refcnt : 1 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 4 - -name : __ghash -driver : cryptd(__ghash-pclmulqdqni) -module : kernel -priority : 50 -refcnt : 1 -selftest : passed -internal : yes -type : ahash -async : yes -blocksize : 16 -digestsize : 16 - -name : ghash -driver : ghash-clmulni -module : ghash_clmulni_intel -priority : 400 -refcnt : 1 -selftest : passed -internal : no -type : ahash -async : yes -blocksize : 16 -digestsize : 16 - -name : __ghash -driver : __ghash-pclmulqdqni -module : ghash_clmulni_intel -priority : 0 -refcnt : 1 -selftest : passed -internal : yes -type : shash -blocksize : 16 -digestsize : 16 - -name : crc32c -driver : crc32c-intel -module : crc32c_intel -priority : 200 -refcnt : 5 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 4 - -name : cbc(aes) -driver : cbc(aes-aesni) -module : kernel -priority : 300 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : no -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : ctr(aes) -driver : ctr(aes-aesni) -module : kernel -priority : 300 -refcnt : 5 -selftest : passed -internal : no -type : skcipher -async : no -blocksize : 1 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : pkcs1pad(rsa,sha256) -driver : pkcs1pad(rsa-generic,sha256) -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : akcipher - -name : __xts(aes) -driver : cryptd(__xts-aes-aesni) -module : kernel -priority : 451 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : yes -blocksize : 16 -min keysize : 32 -max keysize : 64 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : xts(aes) -driver : xts-aes-aesni -module : kernel -priority : 401 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : yes -blocksize : 16 -min keysize : 32 -max keysize : 64 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : __ctr(aes) -driver : cryptd(__ctr-aes-aesni) -module : kernel -priority : 450 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : yes -blocksize : 1 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : ctr(aes) -driver : ctr-aes-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : yes -blocksize : 1 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : __cbc(aes) -driver : cryptd(__cbc-aes-aesni) -module : kernel -priority : 450 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : yes -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : cbc(aes) -driver : cbc-aes-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : yes -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : __ecb(aes) -driver : cryptd(__ecb-aes-aesni) -module : kernel -priority : 450 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : yes -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 0 -chunksize : 16 -walksize : 16 - -name : ecb(aes) -driver : ecb-aes-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : yes -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 0 -chunksize : 16 -walksize : 16 - -name : __generic-gcm-aes-aesni -driver : cryptd(__driver-generic-gcm-aes-aesni) -module : kernel -priority : 50 -refcnt : 1 -selftest : passed -internal : yes -type : aead -async : yes -blocksize : 1 -ivsize : 12 -maxauthsize : 16 -geniv : - -name : gcm(aes) -driver : generic-gcm-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : no -type : aead -async : yes -blocksize : 1 -ivsize : 12 -maxauthsize : 16 -geniv : - -name : __generic-gcm-aes-aesni -driver : __driver-generic-gcm-aes-aesni -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : yes -type : aead -async : no -blocksize : 1 -ivsize : 12 -maxauthsize : 16 -geniv : - -name : __gcm-aes-aesni -driver : cryptd(__driver-gcm-aes-aesni) -module : kernel -priority : 50 -refcnt : 1 -selftest : passed -internal : yes -type : aead -async : yes -blocksize : 1 -ivsize : 8 -maxauthsize : 16 -geniv : - -name : rfc4106(gcm(aes)) -driver : rfc4106-gcm-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : no -type : aead -async : yes -blocksize : 1 -ivsize : 8 -maxauthsize : 16 -geniv : - -name : __gcm-aes-aesni -driver : __driver-gcm-aes-aesni -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : yes -type : aead -async : no -blocksize : 1 -ivsize : 8 -maxauthsize : 16 -geniv : - -name : __xts(aes) -driver : __xts-aes-aesni -module : kernel -priority : 401 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : no -blocksize : 16 -min keysize : 32 -max keysize : 64 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : __ctr(aes) -driver : __ctr-aes-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : no -blocksize : 1 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : __cbc(aes) -driver : __cbc-aes-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : no -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : __ecb(aes) -driver : __ecb-aes-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : no -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 0 -chunksize : 16 -walksize : 16 - -name : __aes -driver : __aes-aesni -module : kernel -priority : 300 -refcnt : 1 -selftest : passed -internal : yes -type : cipher -blocksize : 16 -min keysize : 16 -max keysize : 32 - -name : aes -driver : aes-aesni -module : kernel -priority : 300 -refcnt : 8 -selftest : passed -internal : no -type : cipher -blocksize : 16 -min keysize : 16 -max keysize : 32 - -name : hmac(sha1) -driver : hmac(sha1-generic) -module : kernel -priority : 100 -refcnt : 9 -selftest : passed -internal : no -type : shash -blocksize : 64 -digestsize : 20 - -name : ghash -driver : ghash-generic -module : kernel -priority : 100 -refcnt : 3 -selftest : passed -internal : no -type : shash -blocksize : 16 -digestsize : 16 - -name : jitterentropy_rng -driver : jitterentropy_rng -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_hmac_sha256 -module : kernel -priority : 221 -refcnt : 2 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_hmac_sha512 -module : kernel -priority : 220 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_hmac_sha384 -module : kernel -priority : 219 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_hmac_sha1 -module : kernel -priority : 218 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_sha256 -module : kernel -priority : 217 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_sha512 -module : kernel -priority : 216 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_sha384 -module : kernel -priority : 215 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_sha1 -module : kernel -priority : 214 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_ctr_aes256 -module : kernel -priority : 213 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_ctr_aes192 -module : kernel -priority : 212 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_ctr_aes128 -module : kernel -priority : 211 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : hmac(sha256) -driver : hmac(sha256-generic) -module : kernel -priority : 100 -refcnt : 10 -selftest : passed -internal : no -type : shash -blocksize : 64 -digestsize : 32 - -name : stdrng -driver : drbg_pr_hmac_sha256 -module : kernel -priority : 210 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_hmac_sha512 -module : kernel -priority : 209 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_hmac_sha384 -module : kernel -priority : 208 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_hmac_sha1 -module : kernel -priority : 207 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_sha256 -module : kernel -priority : 206 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_sha512 -module : kernel -priority : 205 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_sha384 -module : kernel -priority : 204 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_sha1 -module : kernel -priority : 203 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_ctr_aes256 -module : kernel -priority : 202 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_ctr_aes192 -module : kernel -priority : 201 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_ctr_aes128 -module : kernel -priority : 200 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : 842 -driver : 842-scomp -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : scomp - -name : 842 -driver : 842-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : compression - -name : lzo-rle -driver : lzo-rle-scomp -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : scomp - -name : lzo-rle -driver : lzo-rle-generic -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : compression - -name : lzo -driver : lzo-scomp -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : scomp - -name : lzo -driver : lzo-generic -module : kernel -priority : 0 -refcnt : 9 -selftest : passed -internal : no -type : compression - -name : crct10dif -driver : crct10dif-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 2 - -name : crc32c -driver : crc32c-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 4 - -name : zlib-deflate -driver : zlib-deflate-scomp -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : scomp - -name : deflate -driver : deflate-scomp -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : scomp - -name : deflate -driver : deflate-generic -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : compression - -name : aes -driver : aes-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : cipher -blocksize : 16 -min keysize : 16 -max keysize : 32 - -name : sha224 -driver : sha224-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : shash -blocksize : 64 -digestsize : 28 - -name : sha256 -driver : sha256-generic -module : kernel -priority : 100 -refcnt : 11 -selftest : passed -internal : no -type : shash -blocksize : 64 -digestsize : 32 - -name : sha1 -driver : sha1-generic -module : kernel -priority : 100 -refcnt : 11 -selftest : passed -internal : no -type : shash -blocksize : 64 -digestsize : 20 - -name : md5 -driver : md5-generic -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : shash -blocksize : 64 -digestsize : 16 - -name : ecb(cipher_null) -driver : ecb-cipher_null -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : no -blocksize : 1 -min keysize : 0 -max keysize : 0 -ivsize : 0 -chunksize : 1 -walksize : 1 - -name : digest_null -driver : digest_null-generic -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 0 - -name : compress_null -driver : compress_null-generic -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : compression - -name : cipher_null -driver : cipher_null-generic -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : cipher -blocksize : 1 -min keysize : 0 -max keysize : 0 - -name : rsa -driver : rsa-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : akcipher - -name : dh -driver : dh-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : kpp - -name : aes -driver : aes-asm -module : kernel -priority : 200 -refcnt : 1 -selftest : passed -internal : no -type : cipher -blocksize : 16 -min keysize : 16 -max keysize : 32 - -Mode: 444 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/diskstats -Lines: 52 - 1 0 ram0 0 0 0 0 0 0 0 0 0 0 0 - 1 1 ram1 0 0 0 0 0 0 0 0 0 0 0 - 1 2 ram2 0 0 0 0 0 0 0 0 0 0 0 - 1 3 ram3 0 0 0 0 0 0 0 0 0 0 0 - 1 4 ram4 0 0 0 0 0 0 0 0 0 0 0 - 1 5 ram5 0 0 0 0 0 0 0 0 0 0 0 - 1 6 ram6 0 0 0 0 0 0 0 0 0 0 0 - 1 7 ram7 0 0 0 0 0 0 0 0 0 0 0 - 1 8 ram8 0 0 0 0 0 0 0 0 0 0 0 - 1 9 ram9 0 0 0 0 0 0 0 0 0 0 0 - 1 10 ram10 0 0 0 0 0 0 0 0 0 0 0 - 1 11 ram11 0 0 0 0 0 0 0 0 0 0 0 - 1 12 ram12 0 0 0 0 0 0 0 0 0 0 0 - 1 13 ram13 0 0 0 0 0 0 0 0 0 0 0 - 1 14 ram14 0 0 0 0 0 0 0 0 0 0 0 - 1 15 ram15 0 0 0 0 0 0 0 0 0 0 0 - 7 0 loop0 0 0 0 0 0 0 0 0 0 0 0 - 7 1 loop1 0 0 0 0 0 0 0 0 0 0 0 - 7 2 loop2 0 0 0 0 0 0 0 0 0 0 0 - 7 3 loop3 0 0 0 0 0 0 0 0 0 0 0 - 7 4 loop4 0 0 0 0 0 0 0 0 0 0 0 - 7 5 loop5 0 0 0 0 0 0 0 0 0 0 0 - 7 6 loop6 0 0 0 0 0 0 0 0 0 0 0 - 7 7 loop7 0 0 0 0 0 0 0 0 0 0 0 - 8 0 sda 25354637 34367663 1003346126 18492372 28444756 11134226 505697032 63877960 0 9653880 82621804 - 8 1 sda1 250 0 2000 36 0 0 0 0 0 36 36 - 8 2 sda2 246 0 1968 32 0 0 0 0 0 32 32 - 8 3 sda3 340 13 2818 52 11 8 152 8 0 56 60 - 8 4 sda4 25353629 34367650 1003337964 18492232 27448755 11134218 505696880 61593380 0 7576432 80332428 - 252 0 dm-0 59910002 0 1003337218 46229572 39231014 0 505696880 1158557800 0 11325968 1206301256 - 252 1 dm-1 388 0 3104 84 74 0 592 0 0 76 84 - 252 2 dm-2 11571 0 308350 6536 153522 0 5093416 122884 0 65400 129416 - 252 3 dm-3 3870 0 3870 104 0 0 0 0 0 16 104 - 252 4 dm-4 392 0 1034 28 38 0 137 16 0 24 44 - 252 5 dm-5 3729 0 84279 924 98918 0 1151688 104684 0 58848 105632 - 179 0 mmcblk0 192 3 1560 156 0 0 0 0 0 136 156 - 179 1 mmcblk0p1 17 3 160 24 0 0 0 0 0 24 24 - 179 2 mmcblk0p2 95 0 760 68 0 0 0 0 0 68 68 - 2 0 fd0 2 0 16 80 0 0 0 0 0 80 80 - 254 0 vda 1775784 15386 32670882 8655768 6038856 20711856 213637440 2069221364 0 41614592 2077872228 - 254 1 vda1 668 85 5984 956 207 4266 35784 32772 0 8808 33720 - 254 2 vda2 1774936 15266 32663262 8654692 5991028 20707590 213601656 2069152216 0 41607628 2077801992 - 11 0 sr0 0 0 0 0 0 0 0 0 0 0 0 - 259 0 nvme0n1 47114 4 4643973 21650 1078320 43950 39451633 1011053 0 222766 1032546 - 259 1 nvme0n1p1 1140 0 9370 16 1 0 1 0 0 16 16 - 259 2 nvme0n1p2 45914 4 4631243 21626 1036885 43950 39451632 919480 0 131580 940970 - 8 0 sdb 326552 841 9657779 84 41822 2895 1972905 5007 0 60730 67070 68851 0 1925173784 11130 - 8 1 sdb1 231 3 34466 4 24 23 106 0 0 64 64 0 0 0 0 - 8 2 sdb2 326310 838 9622281 67 40726 2872 1972799 4924 0 58250 64567 68851 0 1925173784 11130 - 8 0 sdc 14202 71 579164 21861 2995 1589 180500 40875 0 11628 55200 0 0 0 0 127 182 - 8 1 sdc1 1027 0 13795 5021 2 0 4096 3 0 690 4579 0 0 0 0 0 0 - 8 2 sdc2 13126 71 561749 16802 2830 1589 176404 40620 0 10931 50449 0 0 0 0 0 0 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/fs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/fs/fscache -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/fs/fscache/stats -Lines: 24 -FS-Cache statistics -Cookies: idx=3 dat=67877 spc=0 -Objects: alc=67473 nal=0 avl=67473 ded=388 -ChkAux : non=12 ok=33 upd=44 obs=55 -Pages : mrk=547164 unc=364577 -Acquire: n=67880 nul=98 noc=25 ok=67780 nbf=39 oom=26 -Lookups: n=67473 neg=67470 pos=58 crt=67473 tmo=85 -Invals : n=14 run=13 -Updates: n=7 nul=3 run=8 -Relinqs: n=394 nul=1 wcr=2 rtr=3 -AttrChg: n=6 ok=5 nbf=4 oom=3 run=2 -Allocs : n=20 ok=19 wt=18 nbf=17 int=16 -Allocs : ops=15 owt=14 abt=13 -Retrvls: n=151959 ok=82823 wt=23467 nod=69136 nbf=15 int=69 oom=43 -Retrvls: ops=151959 owt=42747 abt=44 -Stores : n=225565 ok=225565 agn=12 nbf=13 oom=14 -Stores : ops=69156 run=294721 pgs=225565 rxd=225565 olm=43 -VmScan : nos=364512 gon=2 bsy=43 can=12 wt=66 -Ops : pend=42753 run=221129 enq=628798 can=11 rej=88 -Ops : ini=377538 dfr=27 rel=377538 gc=37 -CacheOp: alo=1 luo=2 luc=3 gro=4 -CacheOp: inv=5 upo=6 dro=7 pto=8 atc=9 syn=10 -CacheOp: rap=11 ras=12 alp=13 als=14 wrp=15 ucp=16 dsp=17 -CacheEv: nsp=18 stl=19 rtr=20 cul=21EOF -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/fs/xfs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/fs/xfs/stat -Lines: 23 -extent_alloc 92447 97589 92448 93751 -abt 0 0 0 0 -blk_map 1767055 188820 184891 92447 92448 2140766 0 -bmbt 0 0 0 0 -dir 185039 92447 92444 136422 -trans 706 944304 0 -ig 185045 58807 0 126238 0 33637 22 -log 2883 113448 9 17360 739 -push_ail 945014 0 134260 15483 0 3940 464 159985 0 40 -xstrat 92447 0 -rw 107739 94045 -attr 4 0 0 0 -icluster 8677 7849 135802 -vnodes 92601 0 0 0 92444 92444 92444 0 -buf 2666287 7122 2659202 3599 2 7085 0 10297 7085 -abtb2 184941 1277345 13257 13278 0 0 0 0 0 0 0 0 0 0 2746147 -abtc2 345295 2416764 172637 172658 0 0 0 0 0 0 0 0 0 0 21406023 -bmbt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -ibt2 343004 1358467 0 0 0 0 0 0 0 0 0 0 0 0 0 -fibt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -qm 0 0 0 0 0 0 0 0 -xpc 399724544 92823103 86219234 -debug 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/loadavg -Lines: 1 -0.02 0.04 0.05 1/497 11947 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/mdstat -Lines: 60 -Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10] - -md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9] sdd1[10](S) sdd2[11](S) - 5853468288 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU] - -md127 : active raid1 sdi2[0] sdj2[1] - 312319552 blocks [2/2] [UU] - -md0 : active raid1 sdi1[0] sdj1[1] - 248896 blocks [2/2] [UU] - -md4 : inactive raid1 sda3[0](F) sdb3[1](S) - 4883648 blocks [2/2] [UU] - -md6 : active raid1 sdb2[2](F) sdc[1](S) sda2[0] - 195310144 blocks [2/1] [U_] - [=>...................] recovery = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec - -md8 : active raid1 sdb1[1] sda1[0] sdc[2](S) sde[3](S) - 195310144 blocks [2/2] [UU] - [=>...................] resync = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec - -md201 : active raid1 sda3[0] sdb3[1] - 1993728 blocks super 1.2 [2/2] [UU] - [=>...................] check = 5.7% (114176/1993728) finish=0.2min speed=114176K/sec - -md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1](F) - 7813735424 blocks super 1.2 level 6, 512k chunk, algorithm 2 [4/3] [U_UU] - bitmap: 0/30 pages [0KB], 65536KB chunk - -md9 : active raid1 sdc2[2] sdd2[3] sdb2[1] sda2[0] sde[4](F) sdf[5](F) sdg[6](S) - 523968 blocks super 1.2 [4/4] [UUUU] - resync=DELAYED - -md10 : active raid0 sda1[0] sdb1[1] - 314159265 blocks 64k chunks - -md11 : active (auto-read-only) raid1 sdb2[0] sdc2[1] sdc3[2](F) hda[4](S) ssdc2[3](S) - 4190208 blocks super 1.2 [2/2] [UU] - resync=PENDING - -md12 : active raid0 sdc2[0] sdd2[1] - 3886394368 blocks super 1.2 512k chunks - -md126 : active raid0 sdb[1] sdc[0] - 1855870976 blocks super external:/md127/0 128k chunks - -md219 : inactive sdb[2](S) sdc[1](S) sda[0](S) - 7932 blocks super external:imsm - -md00 : active raid0 xvdb[0] - 4186624 blocks super 1.2 256k chunks - -md120 : active linear sda1[1] sdb1[0] - 2095104 blocks super 1.2 0k rounding - -md101 : active (read-only) raid0 sdb[2] sdd[1] sdc[0] - 322560 blocks super 1.2 512k chunks - -unused devices: -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/meminfo -Lines: 42 -MemTotal: 15666184 kB -MemFree: 440324 kB -Buffers: 1020128 kB -Cached: 12007640 kB -SwapCached: 0 kB -Active: 6761276 kB -Inactive: 6532708 kB -Active(anon): 267256 kB -Inactive(anon): 268 kB -Active(file): 6494020 kB -Inactive(file): 6532440 kB -Unevictable: 0 kB -Mlocked: 0 kB -SwapTotal: 0 kB -SwapFree: 0 kB -Dirty: 768 kB -Writeback: 0 kB -AnonPages: 266216 kB -Mapped: 44204 kB -Shmem: 1308 kB -Slab: 1807264 kB -SReclaimable: 1738124 kB -SUnreclaim: 69140 kB -KernelStack: 1616 kB -PageTables: 5288 kB -NFS_Unstable: 0 kB -Bounce: 0 kB -WritebackTmp: 0 kB -CommitLimit: 7833092 kB -Committed_AS: 530844 kB -VmallocTotal: 34359738367 kB -VmallocUsed: 36596 kB -VmallocChunk: 34359637840 kB -HardwareCorrupted: 0 kB -AnonHugePages: 12288 kB -HugePages_Total: 0 -HugePages_Free: 0 -HugePages_Rsvd: 0 -HugePages_Surp: 0 -Hugepagesize: 2048 kB -DirectMap4k: 91136 kB -DirectMap2M: 16039936 kB -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/net -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/arp -Lines: 2 -IP address HW type Flags HW address Mask Device -192.168.224.1 0x1 0x2 00:50:56:c0:00:08 * ens33 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/dev -Lines: 6 -Inter-| Receive | Transmit - face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed -vethf345468: 648 8 0 0 0 0 0 0 438 5 0 0 0 0 0 0 - lo: 1664039048 1566805 0 0 0 0 0 0 1664039048 1566805 0 0 0 0 0 0 -docker0: 2568 38 0 0 0 0 0 0 438 5 0 0 0 0 0 0 - eth0: 874354587 1036395 0 0 0 0 0 0 563352563 732147 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/ip_vs -Lines: 21 -IP Virtual Server version 1.2.1 (size=4096) -Prot LocalAddress:Port Scheduler Flags - -> RemoteAddress:Port Forward Weight ActiveConn InActConn -TCP C0A80016:0CEA wlc - -> C0A85216:0CEA Tunnel 100 248 2 - -> C0A85318:0CEA Tunnel 100 248 2 - -> C0A85315:0CEA Tunnel 100 248 1 -TCP C0A80039:0CEA wlc - -> C0A85416:0CEA Tunnel 0 0 0 - -> C0A85215:0CEA Tunnel 100 1499 0 - -> C0A83215:0CEA Tunnel 100 1498 0 -TCP C0A80037:0CEA wlc - -> C0A8321A:0CEA Tunnel 0 0 0 - -> C0A83120:0CEA Tunnel 100 0 0 -TCP [2620:0000:0000:0000:0000:0000:0000:0001]:0050 sh - -> [2620:0000:0000:0000:0000:0000:0000:0002]:0050 Route 1 0 0 - -> [2620:0000:0000:0000:0000:0000:0000:0003]:0050 Route 1 0 0 - -> [2620:0000:0000:0000:0000:0000:0000:0004]:0050 Route 1 1 1 -FWM 10001000 wlc - -> C0A8321A:0CEA Route 0 0 1 - -> C0A83215:0CEA Route 0 0 2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/ip_vs_stats -Lines: 6 - Total Incoming Outgoing Incoming Outgoing - Conns Packets Packets Bytes Bytes - 16AA370 E33656E5 0 51D8C8883AB3 0 - - Conns/s Pkts/s Pkts/s Bytes/s Bytes/s - 4 1FB3C 0 1282A8F 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/protocols -Lines: 14 -protocol size sockets memory press maxhdr slab module cl co di ac io in de sh ss gs se re sp bi br ha uh gp em -PACKET 1344 2 -1 NI 0 no kernel n n n n n n n n n n n n n n n n n n n -PINGv6 1112 0 -1 NI 0 yes kernel y y y n n y n n y y y y n y y y y y n -RAWv6 1112 1 -1 NI 0 yes kernel y y y n y y y n y y y y n y y y y n n -UDPLITEv6 1216 0 57 NI 0 yes kernel y y y n y y y n y y y y n n n y y y n -UDPv6 1216 10 57 NI 0 yes kernel y y y n y y y n y y y y n n n y y y n -TCPv6 2144 1937 1225378 no 320 yes kernel y y y y y y y y y y y y y n y y y y y -UNIX 1024 120 -1 NI 0 yes kernel n n n n n n n n n n n n n n n n n n n -UDP-Lite 1024 0 57 NI 0 yes kernel y y y n y y y n y y y y y n n y y y n -PING 904 0 -1 NI 0 yes kernel y y y n n y n n y y y y n y y y y y n -RAW 912 0 -1 NI 0 yes kernel y y y n y y y n y y y y n y y y y n n -UDP 1024 73 57 NI 0 yes kernel y y y n y y y n y y y y y n n y y y n -TCP 1984 93064 1225378 yes 320 yes kernel y y y y y y y y y y y y y n y y y y y -NETLINK 1040 16 -1 NI 0 no kernel n n n n n n n n n n n n n n n n n n n -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/net/rpc -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/rpc/nfs -Lines: 5 -net 18628 0 18628 6 -rpc 4329785 0 4338291 -proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2 -proc3 22 1 4084749 29200 94754 32580 186 47747 7981 8639 0 6356 0 6962 0 7958 0 0 241 4 4 2 39 -proc4 61 1 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/rpc/nfsd -Lines: 11 -rc 0 6 18622 -fh 0 0 0 0 0 -io 157286400 0 -th 8 0 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 -ra 32 0 0 0 0 0 0 0 0 0 0 0 -net 18628 0 18628 6 -rpc 18628 0 0 0 0 -proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2 -proc3 22 2 112 0 2719 111 0 0 0 0 0 0 0 0 0 0 0 27 216 0 2 1 0 -proc4 2 2 10853 -proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/sockstat -Lines: 6 -sockets: used 1602 -TCP: inuse 35 orphan 0 tw 4 alloc 59 mem 22 -UDP: inuse 12 mem 62 -UDPLITE: inuse 0 -RAW: inuse 0 -FRAG: inuse 0 memory 0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/sockstat6 -Lines: 5 -TCP6: inuse 17 -UDP6: inuse 9 -UDPLITE6: inuse 0 -RAW6: inuse 1 -FRAG6: inuse 0 memory 0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/softnet_stat -Lines: 2 -00015c73 00020e76 F0000769 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 -01663fb2 00000000 000109a4 00000000 00000000 00000000 00000000 00000000 00000000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/softnet_stat.broken -Lines: 1 -00015c73 00020e76 F0000769 00000000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/net/stat -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/stat/arp_cache -Lines: 3 -entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls -00000014 00000001 00000002 00000003 00000004 00000005 00000006 00000007 00000008 00000009 0000000a 0000000b 0000000c -00000014 0000000d 0000000e 0000000f 00000010 00000011 00000012 00000013 00000014 00000015 00000016 00000017 00000018 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/stat/ndisc_cache -Lines: 3 -entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards table_fulls -00000024 000000f0 000000f1 000000f2 000000f3 000000f4 000000f5 000000f6 000000f7 000000f8 000000f9 000000fa 000000fb -00000024 000000fc 000000fd 000000fe 000000ff 00000100 00000101 00000102 00000103 00000104 00000105 00000106 00000107 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/tcp -Lines: 4 - sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode - 0: 0500000A:0016 00000000:0000 0A 00000000:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 - 1: 00000000:0016 00000000:0000 0A 00000001:00000000 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 - 2: 00000000:0016 00000000:0000 0A 00000001:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/tcp6 -Lines: 3 - sl local_address remote_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops - 1315: 00000000000000000000000000000000:14EB 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 981 0 21040 2 0000000013726323 0 - 6073: 000080FE00000000FFADE15609667CFE:C781 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 1000 0 11337031 2 00000000b9256fdd 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/udp -Lines: 4 - sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode - 0: 0500000A:0016 00000000:0000 0A 00000000:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 - 1: 00000000:0016 00000000:0000 0A 00000001:00000000 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 - 2: 00000000:0016 00000000:0000 0A 00000001:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/udp6 -Lines: 3 - sl local_address remote_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops - 1315: 00000000000000000000000000000000:14EB 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 981 0 21040 2 0000000013726323 0 - 6073: 000080FE00000000FFADE15609667CFE:C781 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 1000 0 11337031 2 00000000b9256fdd 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/udp_broken -Lines: 2 - sl local_address rem_address st - 1: 00000000:0016 00000000:0000 0A -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/unix -Lines: 6 -Num RefCount Protocol Flags Type St Inode Path -0000000000000000: 00000002 00000000 00010000 0001 01 3442596 /var/run/postgresql/.s.PGSQL.5432 -0000000000000000: 0000000a 00000000 00010000 0005 01 10061 /run/udev/control -0000000000000000: 00000007 00000000 00000000 0002 01 12392 /dev/log -0000000000000000: 00000003 00000000 00000000 0001 03 4787297 /var/run/postgresql/.s.PGSQL.5432 -0000000000000000: 00000003 00000000 00000000 0001 03 5091797 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/unix_without_inode -Lines: 6 -Num RefCount Protocol Flags Type St Path -0000000000000000: 00000002 00000000 00010000 0001 01 /var/run/postgresql/.s.PGSQL.5432 -0000000000000000: 0000000a 00000000 00010000 0005 01 /run/udev/control -0000000000000000: 00000007 00000000 00000000 0002 01 /dev/log -0000000000000000: 00000003 00000000 00000000 0001 03 /var/run/postgresql/.s.PGSQL.5432 -0000000000000000: 00000003 00000000 00000000 0001 03 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/xfrm_stat -Lines: 28 -XfrmInError 1 -XfrmInBufferError 2 -XfrmInHdrError 4 -XfrmInNoStates 3 -XfrmInStateProtoError 40 -XfrmInStateModeError 100 -XfrmInStateSeqError 6000 -XfrmInStateExpired 4 -XfrmInStateMismatch 23451 -XfrmInStateInvalid 55555 -XfrmInTmplMismatch 51 -XfrmInNoPols 65432 -XfrmInPolBlock 100 -XfrmInPolError 10000 -XfrmOutError 1000000 -XfrmOutBundleGenError 43321 -XfrmOutBundleCheckError 555 -XfrmOutNoStates 869 -XfrmOutStateProtoError 4542 -XfrmOutStateModeError 4 -XfrmOutStateSeqError 543 -XfrmOutStateExpired 565 -XfrmOutPolBlock 43456 -XfrmOutPolDead 7656 -XfrmOutPolError 1454 -XfrmFwdHdrError 6654 -XfrmOutStateInvalid 28765 -XfrmAcquireError 24532 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/pressure -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/pressure/cpu -Lines: 1 -some avg10=0.10 avg60=2.00 avg300=3.85 total=15 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/pressure/io -Lines: 2 -some avg10=0.10 avg60=2.00 avg300=3.85 total=15 -full avg10=0.20 avg60=3.00 avg300=4.95 total=25 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/pressure/memory -Lines: 2 -some avg10=0.10 avg60=2.00 avg300=3.85 total=15 -full avg10=0.20 avg60=3.00 avg300=4.95 total=25 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/schedstat -Lines: 6 -version 15 -timestamp 15819019232 -cpu0 498494191 0 3533438552 2553969831 3853684107 2465731542 2045936778163039 343796328169361 4767485306 -domain0 00000000,00000003 212499247 210112015 1861015 1860405436 536440 369895 32599 210079416 25368550 24241256 384652 927363878 807233 6366 1647 24239609 2122447165 1886868564 121112060 2848625533 125678146 241025 1032026 1885836538 2545 12 2533 0 0 0 0 0 0 1387952561 21076581 0 -cpu1 518377256 0 4155211005 2778589869 10466382 2867629021 1904686152592476 364107263788241 5145567945 -domain0 00000000,00000003 217653037 215526982 1577949 1580427380 557469 393576 28538 215498444 28721913 27662819 371153 870843407 745912 5523 1639 27661180 2331056874 2107732788 111442342 652402556 123615235 196159 1045245 2106687543 2400 3 2397 0 0 0 0 0 0 1437804657 26220076 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/self -SymlinkTo: 26231 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/slabinfo -Lines: 302 -slabinfo - version: 2.1 -# name : tunables : slabdata -pid_3 375 532 576 28 4 : tunables 0 0 0 : slabdata 19 19 0 -pid_2 3 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0 -nvidia_p2p_page_cache 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -nvidia_pte_cache 9022 9152 368 22 2 : tunables 0 0 0 : slabdata 416 416 0 -nvidia_stack_cache 321 326 12624 2 8 : tunables 0 0 0 : slabdata 163 163 0 -kvm_async_pf 0 0 472 34 4 : tunables 0 0 0 : slabdata 0 0 0 -kvm_vcpu 0 0 15552 2 8 : tunables 0 0 0 : slabdata 0 0 0 -kvm_mmu_page_header 0 0 504 32 4 : tunables 0 0 0 : slabdata 0 0 0 -pte_list_desc 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -x86_emulator 0 0 3024 10 8 : tunables 0 0 0 : slabdata 0 0 0 -x86_fpu 0 0 4608 7 8 : tunables 0 0 0 : slabdata 0 0 0 -iwl_cmd_pool:0000:04:00.0 0 128 512 32 4 : tunables 0 0 0 : slabdata 4 4 0 -ext4_groupinfo_4k 3719 3740 480 34 4 : tunables 0 0 0 : slabdata 110 110 0 -bio-6 32 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0 -bio-5 16 48 1344 24 8 : tunables 0 0 0 : slabdata 2 2 0 -bio-4 17 92 1408 23 8 : tunables 0 0 0 : slabdata 4 4 0 -fat_inode_cache 0 0 1056 31 8 : tunables 0 0 0 : slabdata 0 0 0 -fat_cache 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -ovl_aio_req 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 -ovl_inode 0 0 1000 32 8 : tunables 0 0 0 : slabdata 0 0 0 -squashfs_inode_cache 0 0 1088 30 8 : tunables 0 0 0 : slabdata 0 0 0 -fuse_request 0 0 472 34 4 : tunables 0 0 0 : slabdata 0 0 0 -fuse_inode 0 0 1152 28 8 : tunables 0 0 0 : slabdata 0 0 0 -xfs_dqtrx 0 0 864 37 8 : tunables 0 0 0 : slabdata 0 0 0 -xfs_dquot 0 0 832 39 8 : tunables 0 0 0 : slabdata 0 0 0 -xfs_buf 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_bui_item 0 0 544 30 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_bud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_cui_item 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_cud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_rui_item 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0 -xfs_rud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_icr 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_ili 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_inode 0 0 1344 24 8 : tunables 0 0 0 : slabdata 0 0 0 -xfs_efi_item 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_efd_item 0 0 776 21 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_buf_item 0 0 608 26 4 : tunables 0 0 0 : slabdata 0 0 0 -xf_trans 0 0 568 28 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_ifork 0 0 376 21 2 : tunables 0 0 0 : slabdata 0 0 0 -xfs_da_state 0 0 816 20 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_btree_cur 0 0 560 29 4 : tunables 0 0 0 : slabdata 0 0 0 -xfs_bmap_free_item 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 -xfs_log_ticket 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0 -nfs_direct_cache 0 0 560 29 4 : tunables 0 0 0 : slabdata 0 0 0 -nfs_commit_data 4 28 1152 28 8 : tunables 0 0 0 : slabdata 1 1 0 -nfs_write_data 32 50 1280 25 8 : tunables 0 0 0 : slabdata 2 2 0 -nfs_read_data 0 0 1280 25 8 : tunables 0 0 0 : slabdata 0 0 0 -nfs_inode_cache 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0 -nfs_page 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 -rpc_inode_cache 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0 -rpc_buffers 8 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0 -rpc_tasks 8 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0 -fscache_cookie_jar 1 35 464 35 4 : tunables 0 0 0 : slabdata 1 1 0 -jfs_mp 32 35 464 35 4 : tunables 0 0 0 : slabdata 1 1 0 -jfs_ip 0 0 1592 20 8 : tunables 0 0 0 : slabdata 0 0 0 -reiser_inode_cache 0 0 1096 29 8 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_end_io_wq 0 0 464 35 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_prelim_ref 0 0 424 38 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_delayed_extent_op 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_delayed_data_ref 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_delayed_tree_ref 0 0 440 37 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_delayed_ref_head 0 0 480 34 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_inode_defrag 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_delayed_node 0 0 648 25 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_ordered_extent 0 0 752 21 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_extent_map 0 0 480 34 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_extent_state 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 -bio-3 35 92 704 23 4 : tunables 0 0 0 : slabdata 4 4 0 -btrfs_extent_buffer 0 0 600 27 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_free_space_bitmap 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_free_space 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_path 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_trans_handle 0 0 440 37 4 : tunables 0 0 0 : slabdata 0 0 0 -btrfs_inode 0 0 1496 21 8 : tunables 0 0 0 : slabdata 0 0 0 -ext4_inode_cache 84136 84755 1400 23 8 : tunables 0 0 0 : slabdata 3685 3685 0 -ext4_free_data 22 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0 -ext4_allocation_context 0 70 464 35 4 : tunables 0 0 0 : slabdata 2 2 0 -ext4_prealloc_space 24 74 440 37 4 : tunables 0 0 0 : slabdata 2 2 0 -ext4_system_zone 267 273 376 21 2 : tunables 0 0 0 : slabdata 13 13 0 -ext4_io_end_vec 0 88 368 22 2 : tunables 0 0 0 : slabdata 4 4 0 -ext4_io_end 0 80 400 20 2 : tunables 0 0 0 : slabdata 4 4 0 -ext4_bio_post_read_ctx 128 147 384 21 2 : tunables 0 0 0 : slabdata 7 7 0 -ext4_pending_reservation 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -ext4_extent_status 79351 79422 376 21 2 : tunables 0 0 0 : slabdata 3782 3782 0 -jbd2_transaction_s 44 100 640 25 4 : tunables 0 0 0 : slabdata 4 4 0 -jbd2_inode 6785 6840 400 20 2 : tunables 0 0 0 : slabdata 342 342 0 -jbd2_journal_handle 0 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0 -jbd2_journal_head 824 1944 448 36 4 : tunables 0 0 0 : slabdata 54 54 0 -jbd2_revoke_table_s 4 23 352 23 2 : tunables 0 0 0 : slabdata 1 1 0 -jbd2_revoke_record_s 0 156 416 39 4 : tunables 0 0 0 : slabdata 4 4 0 -ext2_inode_cache 0 0 1144 28 8 : tunables 0 0 0 : slabdata 0 0 0 -mbcache 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0 -dm_thin_new_mapping 0 152 424 38 4 : tunables 0 0 0 : slabdata 4 4 0 -dm_snap_pending_exception 0 0 464 35 4 : tunables 0 0 0 : slabdata 0 0 0 -dm_exception 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -dm_dirty_log_flush_entry 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -dm_bio_prison_cell_v2 0 0 432 37 4 : tunables 0 0 0 : slabdata 0 0 0 -dm_bio_prison_cell 0 148 432 37 4 : tunables 0 0 0 : slabdata 4 4 0 -kcopyd_job 0 8 3648 8 8 : tunables 0 0 0 : slabdata 1 1 0 -io 0 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0 -dm_uevent 0 0 3224 10 8 : tunables 0 0 0 : slabdata 0 0 0 -dax_cache 1 28 1152 28 8 : tunables 0 0 0 : slabdata 1 1 0 -aic94xx_ascb 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -aic94xx_dma_token 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0 -asd_sas_event 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 -sas_task 0 0 704 23 4 : tunables 0 0 0 : slabdata 0 0 0 -qla2xxx_srbs 0 0 832 39 8 : tunables 0 0 0 : slabdata 0 0 0 -sd_ext_cdb 2 22 368 22 2 : tunables 0 0 0 : slabdata 1 1 0 -scsi_sense_cache 258 288 512 32 4 : tunables 0 0 0 : slabdata 9 9 0 -virtio_scsi_cmd 64 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0 -L2TP/IPv6 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0 -L2TP/IP 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0 -ip6-frags 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0 -fib6_nodes 5 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0 -ip6_dst_cache 4 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0 -ip6_mrt_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -PINGv6 0 0 1600 20 8 : tunables 0 0 0 : slabdata 0 0 0 -RAWv6 25 40 1600 20 8 : tunables 0 0 0 : slabdata 2 2 0 -UDPLITEv6 0 0 1728 18 8 : tunables 0 0 0 : slabdata 0 0 0 -UDPv6 3 54 1728 18 8 : tunables 0 0 0 : slabdata 3 3 0 -tw_sock_TCPv6 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -request_sock_TCPv6 0 0 632 25 4 : tunables 0 0 0 : slabdata 0 0 0 -TCPv6 0 33 2752 11 8 : tunables 0 0 0 : slabdata 3 3 0 -uhci_urb_priv 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0 -sgpool-128 2 14 4544 7 8 : tunables 0 0 0 : slabdata 2 2 0 -sgpool-64 2 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0 -sgpool-32 2 44 1472 22 8 : tunables 0 0 0 : slabdata 2 2 0 -sgpool-16 2 68 960 34 8 : tunables 0 0 0 : slabdata 2 2 0 -sgpool-8 2 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0 -btree_node 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -bfq_io_cq 0 0 488 33 4 : tunables 0 0 0 : slabdata 0 0 0 -bfq_queue 0 0 848 38 8 : tunables 0 0 0 : slabdata 0 0 0 -mqueue_inode_cache 1 24 1344 24 8 : tunables 0 0 0 : slabdata 1 1 0 -isofs_inode_cache 0 0 968 33 8 : tunables 0 0 0 : slabdata 0 0 0 -io_kiocb 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0 -kioctx 0 30 1088 30 8 : tunables 0 0 0 : slabdata 1 1 0 -aio_kiocb 0 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0 -userfaultfd_ctx_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -fanotify_path_event 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0 -fanotify_fid_event 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 -fsnotify_mark 0 0 408 20 2 : tunables 0 0 0 : slabdata 0 0 0 -dnotify_mark 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 -dnotify_struct 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -dio 0 0 1088 30 8 : tunables 0 0 0 : slabdata 0 0 0 -bio-2 4 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0 -fasync_cache 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0 -audit_tree_mark 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 -pid_namespace 30 34 480 34 4 : tunables 0 0 0 : slabdata 1 1 0 -posix_timers_cache 0 27 592 27 4 : tunables 0 0 0 : slabdata 1 1 0 -iommu_devinfo 24 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0 -iommu_domain 10 10 3264 10 8 : tunables 0 0 0 : slabdata 1 1 0 -iommu_iova 8682 8748 448 36 4 : tunables 0 0 0 : slabdata 243 243 0 -UNIX 529 814 1472 22 8 : tunables 0 0 0 : slabdata 37 37 0 -ip4-frags 0 0 536 30 4 : tunables 0 0 0 : slabdata 0 0 0 -ip_mrt_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -UDP-Lite 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0 -tcp_bind_bucket 7 128 512 32 4 : tunables 0 0 0 : slabdata 4 4 0 -inet_peer_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -xfrm_dst_cache 0 0 704 23 4 : tunables 0 0 0 : slabdata 0 0 0 -xfrm_state 0 0 1152 28 8 : tunables 0 0 0 : slabdata 0 0 0 -ip_fib_trie 7 21 384 21 2 : tunables 0 0 0 : slabdata 1 1 0 -ip_fib_alias 9 20 392 20 2 : tunables 0 0 0 : slabdata 1 1 0 -ip_dst_cache 27 84 576 28 4 : tunables 0 0 0 : slabdata 3 3 0 -PING 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0 -RAW 32 46 1408 23 8 : tunables 0 0 0 : slabdata 2 2 0 -UDP 11 168 1536 21 8 : tunables 0 0 0 : slabdata 8 8 0 -tw_sock_TCP 1 56 576 28 4 : tunables 0 0 0 : slabdata 2 2 0 -request_sock_TCP 0 25 632 25 4 : tunables 0 0 0 : slabdata 1 1 0 -TCP 10 60 2624 12 8 : tunables 0 0 0 : slabdata 5 5 0 -hugetlbfs_inode_cache 2 35 928 35 8 : tunables 0 0 0 : slabdata 1 1 0 -dquot 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0 -bio-1 32 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0 -eventpoll_pwq 409 600 408 20 2 : tunables 0 0 0 : slabdata 30 30 0 -eventpoll_epi 408 672 576 28 4 : tunables 0 0 0 : slabdata 24 24 0 -inotify_inode_mark 58 195 416 39 4 : tunables 0 0 0 : slabdata 5 5 0 -scsi_data_buffer 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0 -bio_crypt_ctx 128 147 376 21 2 : tunables 0 0 0 : slabdata 7 7 0 -request_queue 29 39 2408 13 8 : tunables 0 0 0 : slabdata 3 3 0 -blkdev_ioc 81 148 440 37 4 : tunables 0 0 0 : slabdata 4 4 0 -bio-0 125 200 640 25 4 : tunables 0 0 0 : slabdata 8 8 0 -biovec-max 166 196 4544 7 8 : tunables 0 0 0 : slabdata 28 28 0 -biovec-128 0 52 2496 13 8 : tunables 0 0 0 : slabdata 4 4 0 -biovec-64 0 88 1472 22 8 : tunables 0 0 0 : slabdata 4 4 0 -biovec-16 0 92 704 23 4 : tunables 0 0 0 : slabdata 4 4 0 -bio_integrity_payload 4 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0 -khugepaged_mm_slot 59 180 448 36 4 : tunables 0 0 0 : slabdata 5 5 0 -ksm_mm_slot 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0 -ksm_stable_node 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 -ksm_rmap_item 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 -user_namespace 2 37 864 37 8 : tunables 0 0 0 : slabdata 1 1 0 -uid_cache 5 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0 -dmaengine-unmap-256 1 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0 -dmaengine-unmap-128 1 22 1472 22 8 : tunables 0 0 0 : slabdata 1 1 0 -dmaengine-unmap-16 1 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0 -dmaengine-unmap-2 1 36 448 36 4 : tunables 0 0 0 : slabdata 1 1 0 -audit_buffer 0 22 360 22 2 : tunables 0 0 0 : slabdata 1 1 0 -sock_inode_cache 663 1170 1216 26 8 : tunables 0 0 0 : slabdata 45 45 0 -skbuff_ext_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0 -skbuff_fclone_cache 1 72 896 36 8 : tunables 0 0 0 : slabdata 2 2 0 -skbuff_head_cache 3 650 640 25 4 : tunables 0 0 0 : slabdata 26 26 0 -configfs_dir_cache 7 38 424 38 4 : tunables 0 0 0 : slabdata 1 1 0 -file_lock_cache 27 116 552 29 4 : tunables 0 0 0 : slabdata 4 4 0 -file_lock_ctx 106 120 392 20 2 : tunables 0 0 0 : slabdata 6 6 0 -fsnotify_mark_connector 52 66 368 22 2 : tunables 0 0 0 : slabdata 3 3 0 -net_namespace 1 6 5312 6 8 : tunables 0 0 0 : slabdata 1 1 0 -task_delay_info 784 1560 416 39 4 : tunables 0 0 0 : slabdata 40 40 0 -taskstats 45 92 688 23 4 : tunables 0 0 0 : slabdata 4 4 0 -proc_dir_entry 678 682 528 31 4 : tunables 0 0 0 : slabdata 22 22 0 -pde_opener 0 189 376 21 2 : tunables 0 0 0 : slabdata 9 9 0 -proc_inode_cache 7150 8250 992 33 8 : tunables 0 0 0 : slabdata 250 250 0 -seq_file 60 735 456 35 4 : tunables 0 0 0 : slabdata 21 21 0 -sigqueue 0 156 416 39 4 : tunables 0 0 0 : slabdata 4 4 0 -bdev_cache 36 78 1216 26 8 : tunables 0 0 0 : slabdata 3 3 0 -shmem_inode_cache 1599 2208 1016 32 8 : tunables 0 0 0 : slabdata 69 69 0 -kernfs_iattrs_cache 1251 1254 424 38 4 : tunables 0 0 0 : slabdata 33 33 0 -kernfs_node_cache 52898 52920 464 35 4 : tunables 0 0 0 : slabdata 1512 1512 0 -mnt_cache 42 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0 -filp 4314 6371 704 23 4 : tunables 0 0 0 : slabdata 277 277 0 -inode_cache 28695 29505 920 35 8 : tunables 0 0 0 : slabdata 843 843 0 -dentry 166069 169074 528 31 4 : tunables 0 0 0 : slabdata 5454 5454 0 -names_cache 0 35 4544 7 8 : tunables 0 0 0 : slabdata 5 5 0 -hashtab_node 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0 -ebitmap_node 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0 -avtab_extended_perms 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -avtab_node 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0 -avc_xperms_data 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -avc_xperms_decision_node 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0 -avc_xperms_node 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0 -avc_node 37 40 408 20 2 : tunables 0 0 0 : slabdata 2 2 0 -iint_cache 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0 -lsm_inode_cache 122284 122340 392 20 2 : tunables 0 0 0 : slabdata 6117 6117 0 -lsm_file_cache 4266 4485 352 23 2 : tunables 0 0 0 : slabdata 195 195 0 -key_jar 8 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0 -buffer_head 255622 257076 440 37 4 : tunables 0 0 0 : slabdata 6948 6948 0 -uts_namespace 0 0 776 21 4 : tunables 0 0 0 : slabdata 0 0 0 -nsproxy 31 40 408 20 2 : tunables 0 0 0 : slabdata 2 2 0 -vm_area_struct 39115 43214 528 31 4 : tunables 0 0 0 : slabdata 1394 1394 0 -mm_struct 96 529 1408 23 8 : tunables 0 0 0 : slabdata 23 23 0 -fs_cache 102 756 448 36 4 : tunables 0 0 0 : slabdata 21 21 0 -files_cache 102 588 1152 28 8 : tunables 0 0 0 : slabdata 21 21 0 -signal_cache 266 672 1536 21 8 : tunables 0 0 0 : slabdata 32 32 0 -sighand_cache 266 507 2496 13 8 : tunables 0 0 0 : slabdata 39 39 0 -task_struct 783 963 10240 3 8 : tunables 0 0 0 : slabdata 321 321 0 -cred_jar 364 952 576 28 4 : tunables 0 0 0 : slabdata 34 34 0 -anon_vma_chain 63907 67821 416 39 4 : tunables 0 0 0 : slabdata 1739 1739 0 -anon_vma 25891 28899 416 39 4 : tunables 0 0 0 : slabdata 741 741 0 -pid 408 992 512 32 4 : tunables 0 0 0 : slabdata 31 31 0 -Acpi-Operand 6682 6740 408 20 2 : tunables 0 0 0 : slabdata 337 337 0 -Acpi-ParseExt 0 39 416 39 4 : tunables 0 0 0 : slabdata 1 1 0 -Acpi-Parse 0 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0 -Acpi-State 0 78 416 39 4 : tunables 0 0 0 : slabdata 2 2 0 -Acpi-Namespace 3911 3948 384 21 2 : tunables 0 0 0 : slabdata 188 188 0 -trace_event_file 2638 2660 424 38 4 : tunables 0 0 0 : slabdata 70 70 0 -ftrace_event_field 6592 6594 384 21 2 : tunables 0 0 0 : slabdata 314 314 0 -pool_workqueue 41 64 1024 32 8 : tunables 0 0 0 : slabdata 2 2 0 -radix_tree_node 21638 24045 912 35 8 : tunables 0 0 0 : slabdata 687 687 0 -task_group 48 78 1216 26 8 : tunables 0 0 0 : slabdata 3 3 0 -vmap_area 4411 4680 400 20 2 : tunables 0 0 0 : slabdata 234 234 0 -dma-kmalloc-8k 0 0 24576 1 8 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-4k 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-2k 0 0 6144 5 8 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-1k 0 0 3072 10 8 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-512 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-256 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-128 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-64 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-32 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-16 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-8 0 0 344 23 2 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-192 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0 -dma-kmalloc-96 0 0 432 37 4 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-8k 0 0 24576 1 8 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-4k 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-2k 0 0 6144 5 8 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-1k 0 0 3072 10 8 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-512 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-256 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-192 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-128 31 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0 -kmalloc-rcl-96 3371 3626 432 37 4 : tunables 0 0 0 : slabdata 98 98 0 -kmalloc-rcl-64 2080 2272 512 32 4 : tunables 0 0 0 : slabdata 71 71 0 -kmalloc-rcl-32 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-16 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-rcl-8 0 0 344 23 2 : tunables 0 0 0 : slabdata 0 0 0 -kmalloc-8k 133 140 24576 1 8 : tunables 0 0 0 : slabdata 140 140 0 -kmalloc-4k 403 444 12288 2 8 : tunables 0 0 0 : slabdata 222 222 0 -kmalloc-2k 2391 2585 6144 5 8 : tunables 0 0 0 : slabdata 517 517 0 -kmalloc-1k 2163 2420 3072 10 8 : tunables 0 0 0 : slabdata 242 242 0 -kmalloc-512 2972 3633 1536 21 8 : tunables 0 0 0 : slabdata 173 173 0 -kmalloc-256 1841 1856 1024 32 8 : tunables 0 0 0 : slabdata 58 58 0 -kmalloc-192 2165 2914 528 31 4 : tunables 0 0 0 : slabdata 94 94 0 -kmalloc-128 1137 1175 640 25 4 : tunables 0 0 0 : slabdata 47 47 0 -kmalloc-96 1925 2590 432 37 4 : tunables 0 0 0 : slabdata 70 70 0 -kmalloc-64 9433 10688 512 32 4 : tunables 0 0 0 : slabdata 334 334 0 -kmalloc-32 9098 10062 416 39 4 : tunables 0 0 0 : slabdata 258 258 0 -kmalloc-16 10914 10956 368 22 2 : tunables 0 0 0 : slabdata 498 498 0 -kmalloc-8 7576 7705 344 23 2 : tunables 0 0 0 : slabdata 335 335 0 -kmem_cache_node 904 928 512 32 4 : tunables 0 0 0 : slabdata 29 29 0 -kmem_cache 904 936 832 39 8 : tunables 0 0 0 : slabdata 24 24 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/stat -Lines: 16 -cpu 301854 612 111922 8979004 3552 2 3944 0 0 0 -cpu0 44490 19 21045 1087069 220 1 3410 0 0 0 -cpu1 47869 23 16474 1110787 591 0 46 0 0 0 -cpu2 46504 36 15916 1112321 441 0 326 0 0 0 -cpu3 47054 102 15683 1113230 533 0 60 0 0 0 -cpu4 28413 25 10776 1140321 217 0 8 0 0 0 -cpu5 29271 101 11586 1136270 672 0 30 0 0 0 -cpu6 29152 36 10276 1139721 319 0 29 0 0 0 -cpu7 29098 268 10164 1139282 555 0 31 0 0 0 -intr 8885917 17 0 0 0 0 0 0 0 1 79281 0 0 0 0 0 0 0 231237 0 0 0 0 250586 103 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 223424 190745 13 906 1283803 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -ctxt 38014093 -btime 1418183276 -processes 26442 -procs_running 2 -procs_blocked 1 -softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/swaps -Lines: 2 -Filename Type Size Used Priority -/dev/dm-2 partition 131068 176 -2 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/symlinktargets -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/README -Lines: 2 -This directory contains some empty files that are the symlinks the files in the "fd" directory point to. -They are otherwise ignored by the tests -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/abc -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/def -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/ghi -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/uvw -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/xyz -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/sys -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/sys/kernel -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/sys/kernel/random -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/kernel/random/entropy_avail -Lines: 1 -3943 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/kernel/random/poolsize -Lines: 1 -4096 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/kernel/random/urandom_min_reseed_secs -Lines: 1 -60 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/kernel/random/write_wakeup_threshold -Lines: 1 -3072 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/sys/vm -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/admin_reserve_kbytes -Lines: 1 -8192 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/block_dump -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/compact_unevictable_allowed -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirty_background_bytes -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirty_background_ratio -Lines: 1 -10 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirty_bytes -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirty_expire_centisecs -Lines: 1 -3000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirty_ratio -Lines: 1 -20 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirty_writeback_centisecs -Lines: 1 -500 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirtytime_expire_seconds -Lines: 1 -43200 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/drop_caches -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/extfrag_threshold -Lines: 1 -500 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/hugetlb_shm_group -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/laptop_mode -Lines: 1 -5 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/legacy_va_layout -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/lowmem_reserve_ratio -Lines: 1 -256 256 32 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/max_map_count -Lines: 1 -65530 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/memory_failure_early_kill -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/memory_failure_recovery -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/min_free_kbytes -Lines: 1 -67584 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/min_slab_ratio -Lines: 1 -5 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/min_unmapped_ratio -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/mmap_min_addr -Lines: 1 -65536 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/nr_hugepages -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/nr_hugepages_mempolicy -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/nr_overcommit_hugepages -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/numa_stat -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/numa_zonelist_order -Lines: 1 -Node -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/oom_dump_tasks -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/oom_kill_allocating_task -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/overcommit_kbytes -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/overcommit_memory -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/overcommit_ratio -Lines: 1 -50 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/page-cluster -Lines: 1 -3 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/panic_on_oom -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/percpu_pagelist_fraction -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/stat_interval -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/swappiness -Lines: 1 -60 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/user_reserve_kbytes -Lines: 1 -131072 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/vfs_cache_pressure -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/watermark_boost_factor -Lines: 1 -15000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/watermark_scale_factor -Lines: 1 -10 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/zone_reclaim_mode -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/zoneinfo -Lines: 262 -Node 0, zone DMA - per-node stats - nr_inactive_anon 230981 - nr_active_anon 547580 - nr_inactive_file 316904 - nr_active_file 346282 - nr_unevictable 115467 - nr_slab_reclaimable 131220 - nr_slab_unreclaimable 47320 - nr_isolated_anon 0 - nr_isolated_file 0 - workingset_nodes 11627 - workingset_refault 466886 - workingset_activate 276925 - workingset_restore 84055 - workingset_nodereclaim 487 - nr_anon_pages 795576 - nr_mapped 215483 - nr_file_pages 761874 - nr_dirty 908 - nr_writeback 0 - nr_writeback_temp 0 - nr_shmem 224925 - nr_shmem_hugepages 0 - nr_shmem_pmdmapped 0 - nr_anon_transparent_hugepages 0 - nr_unstable 0 - nr_vmscan_write 12950 - nr_vmscan_immediate_reclaim 3033 - nr_dirtied 8007423 - nr_written 7752121 - nr_kernel_misc_reclaimable 0 - pages free 3952 - min 33 - low 41 - high 49 - spanned 4095 - present 3975 - managed 3956 - protection: (0, 2877, 7826, 7826, 7826) - nr_free_pages 3952 - nr_zone_inactive_anon 0 - nr_zone_active_anon 0 - nr_zone_inactive_file 0 - nr_zone_active_file 0 - nr_zone_unevictable 0 - nr_zone_write_pending 0 - nr_mlock 0 - nr_page_table_pages 0 - nr_kernel_stack 0 - nr_bounce 0 - nr_zspages 0 - nr_free_cma 0 - numa_hit 1 - numa_miss 0 - numa_foreign 0 - numa_interleave 0 - numa_local 1 - numa_other 0 - pagesets - cpu: 0 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 1 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 2 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 3 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 4 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 5 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 6 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 7 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - node_unreclaimable: 0 - start_pfn: 1 -Node 0, zone DMA32 - pages free 204252 - min 19510 - low 21059 - high 22608 - spanned 1044480 - present 759231 - managed 742806 - protection: (0, 0, 4949, 4949, 4949) - nr_free_pages 204252 - nr_zone_inactive_anon 118558 - nr_zone_active_anon 106598 - nr_zone_inactive_file 75475 - nr_zone_active_file 70293 - nr_zone_unevictable 66195 - nr_zone_write_pending 64 - nr_mlock 4 - nr_page_table_pages 1756 - nr_kernel_stack 2208 - nr_bounce 0 - nr_zspages 0 - nr_free_cma 0 - numa_hit 113952967 - numa_miss 0 - numa_foreign 0 - numa_interleave 0 - numa_local 113952967 - numa_other 0 - pagesets - cpu: 0 - count: 345 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 1 - count: 356 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 2 - count: 325 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 3 - count: 346 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 4 - count: 321 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 5 - count: 316 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 6 - count: 373 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 7 - count: 339 - high: 378 - batch: 63 - vm stats threshold: 48 - node_unreclaimable: 0 - start_pfn: 4096 -Node 0, zone Normal - pages free 18553 - min 11176 - low 13842 - high 16508 - spanned 1308160 - present 1308160 - managed 1268711 - protection: (0, 0, 0, 0, 0) - nr_free_pages 18553 - nr_zone_inactive_anon 112423 - nr_zone_active_anon 440982 - nr_zone_inactive_file 241429 - nr_zone_active_file 275989 - nr_zone_unevictable 49272 - nr_zone_write_pending 844 - nr_mlock 154 - nr_page_table_pages 9750 - nr_kernel_stack 15136 - nr_bounce 0 - nr_zspages 0 - nr_free_cma 0 - numa_hit 162718019 - numa_miss 0 - numa_foreign 0 - numa_interleave 26812 - numa_local 162718019 - numa_other 0 - pagesets - cpu: 0 - count: 316 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 1 - count: 366 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 2 - count: 60 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 3 - count: 256 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 4 - count: 253 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 5 - count: 159 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 6 - count: 311 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 7 - count: 264 - high: 378 - batch: 63 - vm stats threshold: 56 - node_unreclaimable: 0 - start_pfn: 1048576 -Node 0, zone Movable - pages free 0 - min 0 - low 0 - high 0 - spanned 0 - present 0 - managed 0 - protection: (0, 0, 0, 0, 0) -Node 0, zone Device - pages free 0 - min 0 - low 0 - high 0 - spanned 0 - present 0 - managed 0 - protection: (0, 0, 0, 0, 0) -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/block -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/block/dm-0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/dm-0/stat -Lines: 1 -6447303 0 710266738 1529043 953216 0 31201176 4557464 0 796160 6088971 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/block/sda -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/block/sda/queue -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/add_random -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/chunk_sectors -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/dax -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/discard_granularity -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/discard_max_bytes -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/discard_max_hw_bytes -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/discard_zeroes_data -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/fua -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/hw_sector_size -Lines: 1 -512 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/io_poll -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/io_poll_delay -Lines: 1 --1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/io_timeout -Lines: 1 -30000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/block/sda/queue/iosched -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/back_seek_max -Lines: 1 -16384 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/back_seek_penalty -Lines: 1 -2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/fifo_expire_async -Lines: 1 -250 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/fifo_expire_sync -Lines: 1 -125 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/low_latency -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/max_budget -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/slice_idle -Lines: 1 -8 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/slice_idle_us -Lines: 1 -8000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/strict_guarantees -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iosched/timeout_sync -Lines: 1 -125 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/iostats -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/logical_block_size -Lines: 1 -512 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/max_discard_segments -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/max_hw_sectors_kb -Lines: 1 -32767 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/max_integrity_segments -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/max_sectors_kb -Lines: 1 -1280 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/max_segment_size -Lines: 1 -65536 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/max_segments -Lines: 1 -168 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/minimum_io_size -Lines: 1 -512 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/nomerges -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/nr_requests -Lines: 1 -64 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/nr_zones -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/optimal_io_size -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/physical_block_size -Lines: 1 -512 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/read_ahead_kb -Lines: 1 -128 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/rotational -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/rq_affinity -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/scheduler -Lines: 1 -mq-deadline kyber [bfq] none -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/wbt_lat_usec -Lines: 1 -75000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/write_cache -Lines: 1 -write back -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/write_same_max_bytes -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/write_zeroes_max_bytes -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/queue/zoned -Lines: 1 -none -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/stat -Lines: 1 -9652963 396792 759304206 412943 8422549 6731723 286915323 13947418 0 5658367 19174573 1 2 3 12 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/drm -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/drm/card0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/drm/card0/device -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/aer_dev_correctable -Lines: 9 -RxErr 0 -BadTLP 0 -BadDLLP 0 -Rollover 0 -Timeout 0 -NonFatalErr 0 -CorrIntErr 0 -HeaderOF 0 -TOTAL_ERR_COR 0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/aer_dev_fatal -Lines: 19 -Undefined 0 -DLP 0 -SDES 0 -TLP 0 -FCP 0 -CmpltTO 0 -CmpltAbrt 0 -UnxCmplt 0 -RxOF 0 -MalfTLP 0 -ECRC 0 -UnsupReq 0 -ACSViol 0 -UncorrIntErr 0 -BlockedTLP 0 -AtomicOpBlocked 0 -TLPBlockedErr 0 -PoisonTLPBlocked 0 -TOTAL_ERR_FATAL 0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/aer_dev_nonfatal -Lines: 19 -Undefined 0 -DLP 0 -SDES 0 -TLP 0 -FCP 0 -CmpltTO 0 -CmpltAbrt 0 -UnxCmplt 0 -RxOF 0 -MalfTLP 0 -ECRC 0 -UnsupReq 0 -ACSViol 0 -UncorrIntErr 0 -BlockedTLP 0 -AtomicOpBlocked 0 -TLPBlockedErr 0 -PoisonTLPBlocked 0 -TOTAL_ERR_NONFATAL 0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/ari_enabled -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/boot_vga -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/broken_parity_status -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/class -Lines: 1 -0x030000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/consistent_dma_mask_bits -Lines: 1 -44 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/current_link_speed -Lines: 1 -8.0 GT/s PCIe -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/current_link_width -Lines: 1 -16 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/d3cold_allowed -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/device -Lines: 1 -0x687f -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/dma_mask_bits -Lines: 1 -44 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/driver_override -Lines: 1 -(null) -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/gpu_busy_percent -Lines: 1 -4 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/irq -Lines: 1 -95 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/local_cpulist -Lines: 1 -0-15 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/local_cpus -Lines: 1 -0000ffff -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/max_link_speed -Lines: 1 -8.0 GT/s PCIe -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/max_link_width -Lines: 1 -16 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/mem_info_gtt_total -Lines: 1 -8573157376 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/mem_info_gtt_used -Lines: 1 -144560128 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/mem_info_vis_vram_total -Lines: 1 -8573157376 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/mem_info_vis_vram_used -Lines: 1 -1490378752 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/mem_info_vram_total -Lines: 1 -8573157376 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/mem_info_vram_used -Lines: 1 -1490378752 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/mem_info_vram_vendor -Lines: 1 -samsung -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/modalias -Lines: 1 -pci:v00001002d0000687Fsv00001043sd000004C4bc03sc00i00 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/msi_bus -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/numa_node -Lines: 1 --1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pcie_bw -Lines: 1 -6641 815 256 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pcie_replay_count -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/power_dpm_force_performance_level -Lines: 1 -manual -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/power_dpm_state -Lines: 1 -performance -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/power_state -Lines: 1 -D0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_cur_state -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_dpm_dcefclk -Lines: 5 -0: 600Mhz * -1: 720Mhz -2: 800Mhz -3: 847Mhz -4: 900Mhz -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_dpm_mclk -Lines: 4 -0: 167Mhz * -1: 500Mhz -2: 800Mhz -3: 945Mhz -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_dpm_pcie -Lines: 2 -0: 8.0GT/s, x16 -1: 8.0GT/s, x16 * -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_dpm_sclk -Lines: 8 -0: 852Mhz * -1: 991Mhz -2: 1084Mhz -3: 1138Mhz -4: 1200Mhz -5: 1401Mhz -6: 1536Mhz -7: 1630Mhz -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_dpm_socclk -Lines: 8 -0: 600Mhz -1: 720Mhz * -2: 800Mhz -3: 847Mhz -4: 900Mhz -5: 960Mhz -6: 1028Mhz -7: 1107Mhz -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_features -Lines: 32 -Current ppfeatures: 0x0000000019a1ff4f -FEATURES BITMASK ENABLEMENT -DPM_PREFETCHER 0x0000000000000001 Y -GFXCLK_DPM 0x0000000000000002 Y -UCLK_DPM 0x0000000000000004 Y -SOCCLK_DPM 0x0000000000000008 Y -UVD_DPM 0x0000000000000010 N -VCE_DPM 0x0000000000000020 N -ULV 0x0000000000000040 Y -MP0CLK_DPM 0x0000000000000080 N -LINK_DPM 0x0000000000000100 Y -DCEFCLK_DPM 0x0000000000000200 Y -AVFS 0x0000000000000400 Y -GFXCLK_DS 0x0000000000000800 Y -SOCCLK_DS 0x0000000000001000 Y -LCLK_DS 0x0000000000002000 Y -PPT 0x0000000000004000 Y -TDC 0x0000000000008000 Y -THERMAL 0x0000000000010000 Y -GFX_PER_CU_CG 0x0000000000020000 N -RM 0x0000000000040000 N -DCEFCLK_DS 0x0000000000080000 N -ACDC 0x0000000000100000 N -VR0HOT 0x0000000000200000 Y -VR1HOT 0x0000000000400000 N -FW_CTF 0x0000000000800000 Y -LED_DISPLAY 0x0000000001000000 Y -FAN_CONTROL 0x0000000002000000 N -FAST_PPT 0x0000000004000000 N -DIDT 0x0000000008000000 Y -ACG 0x0000000010000000 Y -PCC_LIMIT 0x0000000020000000 N -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_force_state -Lines: 1 - -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_mclk_od -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_num_states -Lines: 3 -states: 2 -0 boot -1 performance -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_od_clk_voltage -Lines: 18 -OD_SCLK: -0: 852Mhz 800mV -1: 991Mhz 900mV -2: 1084Mhz 950mV -3: 1138Mhz 1000mV -4: 1200Mhz 1050mV -5: 1401Mhz 1100mV -6: 1536Mhz 1150mV -7: 1630Mhz 1200mV -OD_MCLK: -0: 167Mhz 800mV -1: 500Mhz 800mV -2: 800Mhz 950mV -3: 945Mhz 1100mV -OD_RANGE: -SCLK: 852MHz 2400MHz -MCLK: 167MHz 1500MHz -VDDC: 800mV 1200mV -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_power_profile_mode -Lines: 8 -NUM MODE_NAME BUSY_SET_POINT FPS USE_RLC_BUSY MIN_ACTIVE_LEVEL - 0 BOOTUP_DEFAULT : 70 60 0 0 - 1 3D_FULL_SCREEN*: 70 60 1 3 - 2 POWER_SAVING : 90 60 0 0 - 3 VIDEO : 70 60 0 0 - 4 VR : 70 90 0 0 - 5 COMPUTE : 30 60 0 6 - 6 CUSTOM : 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/pp_sclk_od -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/product_name -Lines: 1 - -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/product_number -Lines: 1 - -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/resource -Lines: 13 -0x0000007c00000000 0x0000007dffffffff 0x000000000014220c -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000007e00000000 0x0000007e0fffffff 0x000000000014220c -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x000000000000d000 0x000000000000d0ff 0x0000000000040101 -0x00000000fcd00000 0x00000000fcd7ffff 0x0000000000040200 -0x00000000fcd80000 0x00000000fcd9ffff 0x0000000000046200 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/revision -Lines: 1 -0xc1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/serial_number -Lines: 1 - -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/subsystem_device -Lines: 1 -0x04c4 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/subsystem_vendor -Lines: 1 -0x1043 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/thermal_throttling_logging -Lines: 1 -0000:09:00.0: thermal throttling logging enabled, with interval 60 seconds -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/uevent -Lines: 6 -DRIVER=amdgpu -PCI_CLASS=30000 -PCI_ID=1002:687F -PCI_SUBSYS_ID=1043:04C4 -PCI_SLOT_NAME=0000:09:00.0 -MODALIAS=pci:v00001002d0000687Fsv00001043sd000004C4bc03sc00i00 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/unique_id -Lines: 1 -0123456789abcdef -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/vbios_version -Lines: 1 -115-D050PIL-100 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/drm/card0/device/vendor -Lines: 1 -0x1002 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/fc_host -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/fc_host/host0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/dev_loss_tmo -Lines: 1 -30 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/fabric_name -Lines: 1 -0x0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/node_name -Lines: 1 -0x2000e0071bce95f2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/port_id -Lines: 1 -0x000002 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/port_name -Lines: 1 -0x1000e0071bce95f2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/port_state -Lines: 1 -Online -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/port_type -Lines: 1 -Point-To-Point (direct nport connection) -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/speed -Lines: 1 -16 Gbit -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/fc_host/host0/statistics -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/dumped_frames -Lines: 1 -0xffffffffffffffff -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/error_frames -Lines: 1 -0x0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/fcp_packet_aborts -Lines: 1 -0x13 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/invalid_crc_count -Lines: 1 -0x2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/invalid_tx_word_count -Lines: 1 -0x8 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/link_failure_count -Lines: 1 -0x9 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/loss_of_signal_count -Lines: 1 -0x11 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/loss_of_sync_count -Lines: 1 -0x10 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/nos_count -Lines: 1 -0x12 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/rx_frames -Lines: 1 -0x3 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/rx_words -Lines: 1 -0x4 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/seconds_since_last_reset -Lines: 1 -0x7 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/tx_frames -Lines: 1 -0x5 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/statistics/tx_words -Lines: 1 -0x6 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/supported_classes -Lines: 1 -Class 3 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/supported_speeds -Lines: 1 -4 Gbit, 8 Gbit, 16 Gbit -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/fc_host/host0/symbolic_name -Lines: 1 -Emulex SN1100E2P FV12.4.270.3 DV12.4.0.0. HN:gotest. OS:Linux -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/hfi1_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/board_id -Lines: 1 -HPE 100Gb 1-port OP101 QSFP28 x16 PCIe Gen3 with Intel Omni-Path Adapter -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/fw_ver -Lines: 1 -1.27.0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/hfi1_0/ports -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/hfi1_0/ports/1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/VL15_dropped -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/excessive_buffer_overrun_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/link_downed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/link_error_recovery -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/local_link_integrity_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_rcv_constraint_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_rcv_data -Lines: 1 -345091702026 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_rcv_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_rcv_packets -Lines: 1 -638036947 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_rcv_remote_physical_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_rcv_switch_relay_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_xmit_constraint_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_xmit_data -Lines: 1 -273558326543 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_xmit_discards -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_xmit_packets -Lines: 1 -568318856 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/port_xmit_wait -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/counters/symbol_error -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/phys_state -Lines: 1 -5: LinkUp -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/rate -Lines: 1 -100 Gb/sec (4X EDR) -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/hfi1_0/ports/1/state -Lines: 1 -4: ACTIVE -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/board_id -Lines: 1 -SM_1141000001000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/fw_ver -Lines: 1 -2.31.5050 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/hca_type -Lines: 1 -MT4099 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0/ports -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0/ports/1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/VL15_dropped -Lines: 1 -0 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/excessive_buffer_overrun_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/link_downed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/link_error_recovery -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/local_link_integrity_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_constraint_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_data -Lines: 1 -2221223609 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_packets -Lines: 1 -87169372 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_remote_physical_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_switch_relay_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_constraint_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_data -Lines: 1 -26509113295 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_discards -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_packets -Lines: 1 -85734114 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_wait -Lines: 1 -3599 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/symbol_error -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/phys_state -Lines: 1 -5: LinkUp -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/rate -Lines: 1 -40 Gb/sec (4X QDR) -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/state -Lines: 1 -4: ACTIVE -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0/ports/2 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/VL15_dropped -Lines: 1 -0 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/excessive_buffer_overrun_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/link_downed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/link_error_recovery -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/local_link_integrity_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_constraint_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_data -Lines: 1 -2460436784 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_packets -Lines: 1 -89332064 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_remote_physical_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_switch_relay_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_constraint_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_data -Lines: 1 -26540356890 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_discards -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_packets -Lines: 1 -88622850 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_wait -Lines: 1 -3846 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/symbol_error -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/phys_state -Lines: 1 -5: LinkUp -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/rate -Lines: 1 -40 Gb/sec (4X QDR) -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/state -Lines: 1 -4: ACTIVE -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/net -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/net/eth0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/addr_assign_type -Lines: 1 -3 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/addr_len -Lines: 1 -6 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/address -Lines: 1 -01:01:01:01:01:01 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/broadcast -Lines: 1 -ff:ff:ff:ff:ff:ff -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/carrier -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/carrier_changes -Lines: 1 -2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/carrier_down_count -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/carrier_up_count -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/dev_id -Lines: 1 -0x20 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/device -SymlinkTo: ../../../devices/pci0000:00/0000:00:1f.6/ -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/dormant -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/duplex -Lines: 1 -full -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/flags -Lines: 1 -0x1303 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/ifalias -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/ifindex -Lines: 1 -2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/iflink -Lines: 1 -2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/link_mode -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/mtu -Lines: 1 -1500 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/name_assign_type -Lines: 1 -2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/netdev_group -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/operstate -Lines: 1 -up -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/phys_port_id -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/phys_port_name -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/phys_switch_id -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/speed -Lines: 1 -1000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/tx_queue_len -Lines: 1 -1000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/type -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/nvme -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/nvme/nvme0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/nvme/nvme0/firmware_rev -Lines: 1 -1B2QEXP7 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/nvme/nvme0/model -Lines: 1 -Samsung SSD 970 PRO 512GB -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/nvme/nvme0/serial -Lines: 1 -S680HF8N190894I -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/nvme/nvme0/state -Lines: 1 -live -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/power_supply -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/AC -SymlinkTo: ../../devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0 -SymlinkTo: ../../devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/powercap -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/powercap/intel-rapl -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl/enabled -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl/uevent -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/powercap/intel-rapl:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_max_power_uw -Lines: 1 -95000000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_name -Lines: 1 -long_term -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_power_limit_uw -Lines: 1 -4090000000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_time_window_us -Lines: 1 -999424 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_max_power_uw -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_name -Lines: 1 -short_term -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_power_limit_uw -Lines: 1 -4090000000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_time_window_us -Lines: 1 -2440 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/enabled -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/energy_uj -Lines: 1 -240422366267 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/max_energy_range_uj -Lines: 1 -262143328850 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/name -Lines: 1 -package-0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/uevent -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/powercap/intel-rapl:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_max_power_uw -Lines: 0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_name -Lines: 1 -long_term -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_power_limit_uw -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_time_window_us -Lines: 1 -976 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/enabled -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/energy_uj -Lines: 1 -118821284256 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/max_energy_range_uj -Lines: 1 -262143328850 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/name -Lines: 1 -core -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/uevent -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/powercap/intel-rapl:a -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_max_power_uw -Lines: 1 -95000000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_name -Lines: 1 -long_term -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_power_limit_uw -Lines: 1 -4090000000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_time_window_us -Lines: 1 -999424 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_max_power_uw -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_name -Lines: 1 -short_term -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_power_limit_uw -Lines: 1 -4090000000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_time_window_us -Lines: 1 -2440 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/enabled -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/energy_uj -Lines: 1 -240422366267 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/max_energy_range_uj -Lines: 1 -262143328850 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/name -Lines: 1 -package-10 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:a/uevent -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/scsi_tape -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/scsi_tape/nst0 -SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/scsi_tape/nst0a -SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/scsi_tape/nst0l -SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/scsi_tape/nst0m -SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/scsi_tape/st0 -SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/scsi_tape/st0a -SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/scsi_tape/st0l -SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/scsi_tape/st0m -SymlinkTo: ../../devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/thermal -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/thermal/cooling_device0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/cooling_device0/cur_state -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/cooling_device0/max_state -Lines: 1 -50 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/cooling_device0/type -Lines: 1 -Processor -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/thermal/cooling_device1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/cooling_device1/cur_state -Lines: 1 --1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/cooling_device1/max_state -Lines: 1 -27 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/cooling_device1/type -Lines: 1 -intel_powerclamp -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/thermal/thermal_zone0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone0/policy -Lines: 1 -step_wise -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone0/temp -Lines: 1 -49925 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone0/type -Lines: 1 -bcm2835_thermal -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/thermal/thermal_zone1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/mode -Lines: 1 -enabled -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/passive -Lines: 1 -0 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/policy -Lines: 1 -step_wise -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/temp -Lines: 1 --44000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/type -Lines: 1 -acpitz -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/device -SymlinkTo: ../../../ACPI0003:00 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/online -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/async -Lines: 1 -disabled -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/autosuspend_delay_ms -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/control -Lines: 1 -auto -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_active_kids -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_active_time -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_enabled -Lines: 1 -disabled -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_status -Lines: 1 -unsupported -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_suspended_time -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_usage -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup -Lines: 1 -enabled -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_abort_count -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_active -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_active_count -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_count -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_expire_count -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_last_time_ms -Lines: 1 -10598 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_max_time_ms -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_prevent_sleep_time_ms -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_total_time_ms -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/subsystem -SymlinkTo: ../../../../../../../../../class/power_supply -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/type -Lines: 1 -Mains -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/uevent -Lines: 2 -POWER_SUPPLY_NAME=AC -POWER_SUPPLY_ONLINE=0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/alarm -Lines: 1 -2369000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/capacity -Lines: 1 -98 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/capacity_level -Lines: 1 -Normal -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/charge_start_threshold -Lines: 1 -95 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/charge_stop_threshold -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/cycle_count -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/device -SymlinkTo: ../../../PNP0C0A:00 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_full -Lines: 1 -50060000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_full_design -Lines: 1 -47520000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_now -Lines: 1 -49450000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/manufacturer -Lines: 1 -LGC -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/model_name -Lines: 1 -LNV-45N1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/async -Lines: 1 -disabled -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/autosuspend_delay_ms -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/control -Lines: 1 -auto -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_active_kids -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_active_time -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_enabled -Lines: 1 -disabled -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_status -Lines: 1 -unsupported -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_suspended_time -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_usage -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power_now -Lines: 1 -4830000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/present -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/serial_number -Lines: 1 -38109 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/status -Lines: 1 -Discharging -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/subsystem -SymlinkTo: ../../../../../../../../../class/power_supply -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/technology -Lines: 1 -Li-ion -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/type -Lines: 1 -Battery -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/uevent -Lines: 16 -POWER_SUPPLY_NAME=BAT0 -POWER_SUPPLY_STATUS=Discharging -POWER_SUPPLY_PRESENT=1 -POWER_SUPPLY_TECHNOLOGY=Li-ion -POWER_SUPPLY_CYCLE_COUNT=0 -POWER_SUPPLY_VOLTAGE_MIN_DESIGN=10800000 -POWER_SUPPLY_VOLTAGE_NOW=11750000 -POWER_SUPPLY_POWER_NOW=5064000 -POWER_SUPPLY_ENERGY_FULL_DESIGN=47520000 -POWER_SUPPLY_ENERGY_FULL=47390000 -POWER_SUPPLY_ENERGY_NOW=40730000 -POWER_SUPPLY_CAPACITY=85 -POWER_SUPPLY_CAPACITY_LEVEL=Normal -POWER_SUPPLY_MODEL_NAME=LNV-45N1 -POWER_SUPPLY_MANUFACTURER=LGC -POWER_SUPPLY_SERIAL_NUMBER=38109 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/voltage_min_design -Lines: 1 -10800000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/voltage_now -Lines: 1 -12229000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/in_flight -Lines: 1 -1EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/io_ns -Lines: 1 -9247011087720EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/other_cnt -Lines: 1 -1409EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/read_byte_cnt -Lines: 1 -979383912EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/read_cnt -Lines: 1 -3741EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/read_ns -Lines: 1 -33788355744EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/resid_cnt -Lines: 1 -19EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/write_byte_cnt -Lines: 1 -1496246784000EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/write_cnt -Lines: 1 -53772916EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0/stats/write_ns -Lines: 1 -5233597394395EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/in_flight -Lines: 1 -1EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/io_ns -Lines: 1 -9247011087720EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/other_cnt -Lines: 1 -1409EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/read_byte_cnt -Lines: 1 -979383912EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/read_cnt -Lines: 1 -3741EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/read_ns -Lines: 1 -33788355744EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/resid_cnt -Lines: 1 -19EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/write_byte_cnt -Lines: 1 -1496246784000EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/write_cnt -Lines: 1 -53772916EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0a/stats/write_ns -Lines: 1 -5233597394395EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/in_flight -Lines: 1 -1EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/io_ns -Lines: 1 -9247011087720EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/other_cnt -Lines: 1 -1409EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/read_byte_cnt -Lines: 1 -979383912EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/read_cnt -Lines: 1 -3741EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/read_ns -Lines: 1 -33788355744EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/resid_cnt -Lines: 1 -19EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/write_byte_cnt -Lines: 1 -1496246784000EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/write_cnt -Lines: 1 -53772916EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0l/stats/write_ns -Lines: 1 -5233597394395EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/in_flight -Lines: 1 -1EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/io_ns -Lines: 1 -9247011087720EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/other_cnt -Lines: 1 -1409EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/read_byte_cnt -Lines: 1 -979383912EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/read_cnt -Lines: 1 -3741EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/read_ns -Lines: 1 -33788355744EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/resid_cnt -Lines: 1 -19EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/write_byte_cnt -Lines: 1 -1496246784000EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/write_cnt -Lines: 1 -53772916EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/nst0m/stats/write_ns -Lines: 1 -5233597394395EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/in_flight -Lines: 1 -1EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/io_ns -Lines: 1 -9247011087720EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/other_cnt -Lines: 1 -1409EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/read_byte_cnt -Lines: 1 -979383912EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/read_cnt -Lines: 1 -3741EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/read_ns -Lines: 1 -33788355744EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/resid_cnt -Lines: 1 -19EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/write_byte_cnt -Lines: 1 -1496246784000EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/write_cnt -Lines: 1 -53772916EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0/stats/write_ns -Lines: 1 -5233597394395EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/in_flight -Lines: 1 -1EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/io_ns -Lines: 1 -9247011087720EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/other_cnt -Lines: 1 -1409EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/read_byte_cnt -Lines: 1 -979383912EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/read_cnt -Lines: 1 -3741EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/read_ns -Lines: 1 -33788355744EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/resid_cnt -Lines: 1 -19EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/write_byte_cnt -Lines: 1 -1496246784000EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/write_cnt -Lines: 1 -53772916EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0a/stats/write_ns -Lines: 1 -5233597394395EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/in_flight -Lines: 1 -1EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/io_ns -Lines: 1 -9247011087720EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/other_cnt -Lines: 1 -1409EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/read_byte_cnt -Lines: 1 -979383912EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/read_cnt -Lines: 1 -3741EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/read_ns -Lines: 1 -33788355744EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/resid_cnt -Lines: 1 -19EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/write_byte_cnt -Lines: 1 -1496246784000EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/write_cnt -Lines: 1 -53772916EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0l/stats/write_ns -Lines: 1 -5233597394395EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/in_flight -Lines: 1 -1EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/io_ns -Lines: 1 -9247011087720EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/other_cnt -Lines: 1 -1409EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/read_byte_cnt -Lines: 1 -979383912EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/read_cnt -Lines: 1 -3741EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/read_ns -Lines: 1 -33788355744EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/resid_cnt -Lines: 1 -19EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/write_byte_cnt -Lines: 1 -1496246784000EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/write_cnt -Lines: 1 -53772916EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:00.0/host0/port-0:0/end_device-0:0/target0:0:0/0:0:0:0/scsi_tape/st0m/stats/write_ns -Lines: 1 -5233597394395EOF -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/dirty_data -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hits -Lines: 1 -289 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hits -Lines: 1 -546 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/io_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/metadata_written -Lines: 1 -512 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/priority_stats -Lines: 5 -Unused: 99% -Metadata: 0% -Average: 10473 -Sectors per Q: 64 -Quantiles: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946] -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/written -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:1f.6 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/ari_enabled -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/broken_parity_status -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/class -Lines: 1 -0x020000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/consistent_dma_mask_bits -Lines: 1 -64 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/d3cold_allowed -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/device -Lines: 1 -0x15d7 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/dma_mask_bits -Lines: 1 -64 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/driver_override -Lines: 1 -(null) -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/irq -Lines: 1 -140 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/local_cpulist -Lines: 1 -0-7 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/local_cpus -Lines: 1 -ff -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/modalias -Lines: 1 -pci:v00008086d000015D7sv000017AAsd0000225Abc02sc00i00 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/msi_bus -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/numa_node -Lines: 1 --1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/resource -Lines: 13 -0x00000000ec200000 0x00000000ec21ffff 0x0000000000040200 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/revision -Lines: 1 -0x21 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/subsystem_device -Lines: 1 -0x225a -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/subsystem_vendor -Lines: 1 -0x17aa -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/uevent -Lines: 6 -DRIVER=e1000e -PCI_CLASS=20000 -PCI_ID=8086:15D7 -PCI_SUBSYS_ID=17AA:225A -PCI_SLOT_NAME=0000:00:1f.6 -MODALIAS=pci:v00008086d000015D7sv000017AAsd0000225Abc02sc00i00 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/vendor -Lines: 1 -0x8086 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/rbd -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/rbd/0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/rbd/0/name -Lines: 1 -demo -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/rbd/0/pool -Lines: 1 -iscsi-images -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/rbd/1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/rbd/1/name -Lines: 1 -wrong -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/rbd/1/pool -Lines: 1 -wrong-images -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/clocksource -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/clocksource/clocksource0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/clocksource/clocksource0/available_clocksource -Lines: 1 -tsc hpet acpi_pm -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/clocksource/clocksource0/current_clocksource -Lines: 1 -tsc -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/cpufreq -SymlinkTo: ../cpufreq/policy0 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle/core_throttle_count -Lines: 1 -10084 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle/package_throttle_count -Lines: 1 -34818 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu0/topology -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_id -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_siblings -Lines: 1 -ff -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_siblings_list -Lines: 1 -0-7 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/topology/physical_package_id -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/topology/thread_siblings -Lines: 1 -11 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/topology/thread_siblings_list -Lines: 1 -0,4 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu1 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu1/cpufreq -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_cur_freq -Lines: 1 -1200195 -Mode: 400 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_max_freq -Lines: 1 -3300000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_min_freq -Lines: 1 -1200000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_transition_latency -Lines: 1 -4294967295 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/related_cpus -Lines: 1 -1 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_available_governors -Lines: 1 -performance powersave -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_driver -Lines: 1 -intel_pstate -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_governor -Lines: 1 -powersave -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_max_freq -Lines: 1 -3300000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_min_freq -Lines: 1 -1200000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_setspeed -Lines: 1 - -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle/core_throttle_count -Lines: 1 -523 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle/package_throttle_count -Lines: 1 -34818 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu1/topology -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_id -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_siblings -Lines: 1 -ff -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_siblings_list -Lines: 1 -0-7 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/topology/physical_package_id -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/topology/thread_siblings -Lines: 1 -22 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/topology/thread_siblings_list -Lines: 1 -1,5 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpufreq -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpufreq/policy0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/affected_cpus -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_max_freq -Lines: 1 -2400000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_min_freq -Lines: 1 -800000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_transition_latency -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/related_cpus -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_available_governors -Lines: 1 -performance powersave -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_cur_freq -Lines: 1 -1219917 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_driver -Lines: 1 -intel_pstate -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_governor -Lines: 1 -powersave -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_max_freq -Lines: 1 -2400000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_min_freq -Lines: 1 -800000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_setspeed -Lines: 1 - -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpufreq/policy1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/node -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/node/node1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/node/node1/vmstat -Lines: 6 -nr_free_pages 1 -nr_zone_inactive_anon 2 -nr_zone_active_anon 3 -nr_zone_inactive_file 4 -nr_zone_active_file 5 -nr_zone_unevictable 6 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/node/node2 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/node/node2/vmstat -Lines: 6 -nr_free_pages 7 -nr_zone_inactive_anon 8 -nr_zone_active_anon 9 -nr_zone_inactive_file 10 -nr_zone_active_file 11 -nr_zone_unevictable 12 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/average_key_size -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0 -Mode: 777 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/dirty_data -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_hits -Lines: 1 -289 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_hits -Lines: 1 -546 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/writeback_rate_debug -Lines: 7 -rate: 1.1M/sec -dirty: 20.4G -target: 20.4G -proportional: 427.5k -integral: 790.0k -change: 321.5k/sec -next io: 17ms -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/btree_cache_size -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0 -Mode: 777 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/io_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/metadata_written -Lines: 1 -512 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/priority_stats -Lines: 5 -Unused: 99% -Metadata: 0% -Average: 10473 -Sectors per Q: 64 -Quantiles: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946] -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/written -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache_available_percent -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/congested -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/active_journal_entries -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_nodes -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_read_average_duration_us -Lines: 1 -1305 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/cache_read_races -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/root_usage_percent -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hits -Lines: 1 -289 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hits -Lines: 1 -546 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/tree_depth -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_may_use -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_readonly -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_reserved -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_used -Lines: 1 -808189952 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/disk_total -Lines: 1 -2147483648 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/disk_used -Lines: 1 -808189952 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/flags -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0/total_bytes -Lines: 1 -2147483648 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0/used_bytes -Lines: 1 -808189952 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/total_bytes -Lines: 1 -2147483648 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/total_bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/global_rsv_reserved -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/global_rsv_size -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_may_use -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_readonly -Lines: 1 -131072 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_reserved -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_used -Lines: 1 -933888 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/disk_total -Lines: 1 -2147483648 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/disk_used -Lines: 1 -1867776 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/flags -Lines: 1 -4 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1/total_bytes -Lines: 1 -1073741824 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1/used_bytes -Lines: 1 -933888 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/total_bytes -Lines: 1 -1073741824 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/total_bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_may_use -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_readonly -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_reserved -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_used -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/disk_total -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/disk_used -Lines: 1 -32768 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/flags -Lines: 1 -2 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1/total_bytes -Lines: 1 -8388608 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1/used_bytes -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/total_bytes -Lines: 1 -8388608 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/total_bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/clone_alignment -Lines: 1 -4096 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop25 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop25/size -Lines: 1 -20971520 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop26 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop26/size -Lines: 1 -20971520 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/big_metadata -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/extended_iref -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/mixed_backref -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/skinny_metadata -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/label -Lines: 1 -fixture -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/metadata_uuid -Lines: 1 -0abb23a9-579b-43e6-ad30-227ef47fcb9d -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/nodesize -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/quota_override -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/sectorsize -Lines: 1 -4096 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_may_use -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_readonly -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_reserved -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_used -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/disk_total -Lines: 1 -644087808 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/disk_used -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/flags -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5/total_bytes -Lines: 1 -644087808 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5/used_bytes -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/total_bytes -Lines: 1 -644087808 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/total_bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/global_rsv_reserved -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/global_rsv_size -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_may_use -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_readonly -Lines: 1 -262144 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_reserved -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_used -Lines: 1 -114688 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/disk_total -Lines: 1 -429391872 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/disk_used -Lines: 1 -114688 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/flags -Lines: 1 -4 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6/total_bytes -Lines: 1 -429391872 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6/used_bytes -Lines: 1 -114688 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/total_bytes -Lines: 1 -429391872 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/total_bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_may_use -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_readonly -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_reserved -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_used -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/disk_total -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/disk_used -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/flags -Lines: 1 -2 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6/total_bytes -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6/used_bytes -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/total_bytes -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/total_bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/clone_alignment -Lines: 1 -4096 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop22 -SymlinkTo: ../../../../devices/virtual/block/loop22 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop23 -SymlinkTo: ../../../../devices/virtual/block/loop23 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop24 -SymlinkTo: ../../../../devices/virtual/block/loop24 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop25 -SymlinkTo: ../../../../devices/virtual/block/loop25 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/big_metadata -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/extended_iref -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/mixed_backref -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/raid56 -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/skinny_metadata -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/label -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/metadata_uuid -Lines: 1 -7f07c59f-6136-449c-ab87-e1cf2328731b -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/nodesize -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/quota_override -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/sectorsize -Lines: 1 -4096 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs/sda1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs/sda1/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/xfs/sda1/stats/stats -Lines: 1 -extent_alloc 1 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs/sdb1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs/sdb1/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/xfs/sdb1/stats/stats -Lines: 1 -extent_alloc 2 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/fileio_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/fileio_1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/fileio_1/file_lio_1G -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/fileio_1/file_lio_1G/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/fileio_1/file_lio_1G/udev_path -Lines: 1 -/home/iscsi/file_back_1G -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/iblock_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/iblock_0/block_lio_rbd1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/iblock_0/block_lio_rbd1/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/iblock_0/block_lio_rbd1/udev_path -Lines: 1 -/dev/rbd1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/rbd_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/rbd_0/iscsi-images-demo -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/rbd_0/iscsi-images-demo/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/rbd_0/iscsi-images-demo/udev_path -Lines: 1 -/dev/rbd/iscsi-images/demo -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/rd_mcp_119 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/rd_mcp_119/ramdisk_lio_1G -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/rd_mcp_119/ramdisk_lio_1G/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/rd_mcp_119/ramdisk_lio_1G/udev_path -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/7f4a4eb56d -SymlinkTo: ../../../../../../target/core/rd_mcp_119/ramdisk_lio_1G -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds -Lines: 1 -204950 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes -Lines: 1 -10325 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes -Lines: 1 -40325 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/795b7c7026 -SymlinkTo: ../../../../../../target/core/iblock_0/block_lio_rbd1 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds -Lines: 1 -104950 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes -Lines: 1 -20095 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes -Lines: 1 -71235 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/fff5e16686 -SymlinkTo: ../../../../../../target/core/fileio_1/file_lio_1G -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds -Lines: 1 -301950 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes -Lines: 1 -10195 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes -Lines: 1 -30195 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/eba1edf893 -SymlinkTo: ../../../../../../target/core/rbd_0/iscsi-images-demo -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds -Lines: 1 -1234 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes -Lines: 1 -1504 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes -Lines: 1 -4733 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/tools/vendor/github.com/prometheus/procfs/fs.go b/tools/vendor/github.com/prometheus/procfs/fs.go index 0102ab0fd..9bdaccc7c 100644 --- a/tools/vendor/github.com/prometheus/procfs/fs.go +++ b/tools/vendor/github.com/prometheus/procfs/fs.go @@ -20,11 +20,18 @@ import ( // FS represents the pseudo-filesystem sys, which provides an interface to // kernel data structures. type FS struct { - proc fs.FS + proc fs.FS + isReal bool } -// DefaultMountPoint is the common mount point of the proc filesystem. -const DefaultMountPoint = fs.DefaultProcMountPoint +const ( + // DefaultMountPoint is the common mount point of the proc filesystem. + DefaultMountPoint = fs.DefaultProcMountPoint + + // SectorSize represents the size of a sector in bytes. + // It is specific to Linux block I/O operations. + SectorSize = 512 +) // NewDefaultFS returns a new proc FS mounted under the default proc mountPoint. // It will error if the mount point directory can't be read or is a file. @@ -39,5 +46,11 @@ func NewFS(mountPoint string) (FS, error) { if err != nil { return FS{}, err } - return FS{fs}, nil + + isReal, err := isRealProc(mountPoint) + if err != nil { + return FS{}, err + } + + return FS{fs, isReal}, nil } diff --git a/tools/vendor/github.com/prometheus/procfs/fs_statfs_notype.go b/tools/vendor/github.com/prometheus/procfs/fs_statfs_notype.go new file mode 100644 index 000000000..1b5bdbdf8 --- /dev/null +++ b/tools/vendor/github.com/prometheus/procfs/fs_statfs_notype.go @@ -0,0 +1,23 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !freebsd && !linux +// +build !freebsd,!linux + +package procfs + +// isRealProc returns true on architectures that don't have a Type argument +// in their Statfs_t struct. +func isRealProc(_ string) (bool, error) { + return true, nil +} diff --git a/tools/vendor/github.com/prometheus/procfs/fs_statfs_type.go b/tools/vendor/github.com/prometheus/procfs/fs_statfs_type.go new file mode 100644 index 000000000..80df79c31 --- /dev/null +++ b/tools/vendor/github.com/prometheus/procfs/fs_statfs_type.go @@ -0,0 +1,33 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build freebsd || linux +// +build freebsd linux + +package procfs + +import ( + "syscall" +) + +// isRealProc determines whether supplied mountpoint is really a proc filesystem. +func isRealProc(mountPoint string) (bool, error) { + stat := syscall.Statfs_t{} + err := syscall.Statfs(mountPoint, &stat) + if err != nil { + return false, err + } + + // 0x9fa0 is PROC_SUPER_MAGIC: https://elixir.bootlin.com/linux/v6.1/source/include/uapi/linux/magic.h#L87 + return stat.Type == 0x9fa0, nil +} diff --git a/tools/vendor/github.com/prometheus/procfs/fscache.go b/tools/vendor/github.com/prometheus/procfs/fscache.go index f8070e6e2..7db863307 100644 --- a/tools/vendor/github.com/prometheus/procfs/fscache.go +++ b/tools/vendor/github.com/prometheus/procfs/fscache.go @@ -162,7 +162,7 @@ type Fscacheinfo struct { ReleaseRequestsAgainstPagesStoredByTimeLockGranted uint64 // Number of release reqs ignored due to in-progress store ReleaseRequestsIgnoredDueToInProgressStore uint64 - // Number of page stores cancelled due to release req + // Number of page stores canceled due to release req PageStoresCancelledByReleaseRequests uint64 VmscanWaiting uint64 // Number of times async ops added to pending queues @@ -171,11 +171,11 @@ type Fscacheinfo struct { OpsRunning uint64 // Number of times async ops queued for processing OpsEnqueued uint64 - // Number of async ops cancelled + // Number of async ops canceled OpsCancelled uint64 // Number of async ops rejected due to object lookup/create failure OpsRejected uint64 - // Number of async ops initialised + // Number of async ops initialized OpsInitialised uint64 // Number of async ops queued for deferred release OpsDeferred uint64 @@ -236,7 +236,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) { m, err := parseFscacheinfo(bytes.NewReader(b)) if err != nil { - return Fscacheinfo{}, fmt.Errorf("failed to parse Fscacheinfo: %w", err) + return Fscacheinfo{}, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, m, err) } return *m, nil @@ -245,7 +245,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) { func setFSCacheFields(fields []string, setFields ...*uint64) error { var err error if len(fields) < len(setFields) { - return fmt.Errorf("Insufficient number of fields, expected %v, got %v", len(setFields), len(fields)) + return fmt.Errorf("%w: Expected %d, but got %d: %w", ErrFileParse, len(setFields), len(fields), err) } for i := range setFields { @@ -263,7 +263,7 @@ func parseFscacheinfo(r io.Reader) (*Fscacheinfo, error) { for s.Scan() { fields := strings.Fields(s.Text()) if len(fields) < 2 { - return nil, fmt.Errorf("malformed Fscacheinfo line: %q", s.Text()) + return nil, fmt.Errorf("%w: malformed Fscacheinfo line: %q", ErrFileParse, s.Text()) } switch fields[0] { diff --git a/tools/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/tools/vendor/github.com/prometheus/procfs/internal/fs/fs.go index 0040753b1..3a43e8391 100644 --- a/tools/vendor/github.com/prometheus/procfs/internal/fs/fs.go +++ b/tools/vendor/github.com/prometheus/procfs/internal/fs/fs.go @@ -26,8 +26,11 @@ const ( // DefaultSysMountPoint is the common mount point of the sys filesystem. DefaultSysMountPoint = "/sys" - // DefaultConfigfsMountPoint is the common mount point of the configfs + // DefaultConfigfsMountPoint is the common mount point of the configfs. DefaultConfigfsMountPoint = "/sys/kernel/config" + + // DefaultSelinuxMountPoint is the common mount point of the selinuxfs. + DefaultSelinuxMountPoint = "/sys/fs/selinux" ) // FS represents a pseudo-filesystem, normally /proc or /sys, which provides an diff --git a/tools/vendor/github.com/prometheus/procfs/internal/util/parse.go b/tools/vendor/github.com/prometheus/procfs/internal/util/parse.go index 22cb07a6b..5a7d2df06 100644 --- a/tools/vendor/github.com/prometheus/procfs/internal/util/parse.go +++ b/tools/vendor/github.com/prometheus/procfs/internal/util/parse.go @@ -14,7 +14,8 @@ package util import ( - "io/ioutil" + "errors" + "os" "strconv" "strings" ) @@ -64,9 +65,24 @@ func ParsePInt64s(ss []string) ([]*int64, error) { return us, nil } +// Parses a uint64 from given hex in string. +func ParseHexUint64s(ss []string) ([]*uint64, error) { + us := make([]*uint64, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 16, 64) + if err != nil { + return nil, err + } + + us = append(us, &u) + } + + return us, nil +} + // ReadUintFromFile reads a file and attempts to parse a uint64 from it. func ReadUintFromFile(path string) (uint64, error) { - data, err := ioutil.ReadFile(path) + data, err := os.ReadFile(path) if err != nil { return 0, err } @@ -75,7 +91,7 @@ func ReadUintFromFile(path string) (uint64, error) { // ReadIntFromFile reads a file and attempts to parse a int64 from it. func ReadIntFromFile(path string) (int64, error) { - data, err := ioutil.ReadFile(path) + data, err := os.ReadFile(path) if err != nil { return 0, err } @@ -95,3 +111,16 @@ func ParseBool(b string) *bool { } return &truth } + +// ReadHexFromFile reads a file and attempts to parse a uint64 from a hexadecimal format 0xXX. +func ReadHexFromFile(path string) (uint64, error) { + data, err := os.ReadFile(path) + if err != nil { + return 0, err + } + hexString := strings.TrimSpace(string(data)) + if !strings.HasPrefix(hexString, "0x") { + return 0, errors.New("invalid format: hex string does not start with '0x'") + } + return strconv.ParseUint(hexString[2:], 16, 64) +} diff --git a/tools/vendor/github.com/prometheus/procfs/internal/util/readfile.go b/tools/vendor/github.com/prometheus/procfs/internal/util/readfile.go index 8051161b2..71b7a70eb 100644 --- a/tools/vendor/github.com/prometheus/procfs/internal/util/readfile.go +++ b/tools/vendor/github.com/prometheus/procfs/internal/util/readfile.go @@ -15,17 +15,16 @@ package util import ( "io" - "io/ioutil" "os" ) -// ReadFileNoStat uses ioutil.ReadAll to read contents of entire file. -// This is similar to ioutil.ReadFile but without the call to os.Stat, because +// ReadFileNoStat uses io.ReadAll to read contents of entire file. +// This is similar to os.ReadFile but without the call to os.Stat, because // many files in /proc and /sys report incorrect file sizes (either 0 or 4096). -// Reads a max file size of 512kB. For files larger than this, a scanner +// Reads a max file size of 1024kB. For files larger than this, a scanner // should be used. func ReadFileNoStat(filename string) ([]byte, error) { - const maxBufferSize = 1024 * 512 + const maxBufferSize = 1024 * 1024 f, err := os.Open(filename) if err != nil { @@ -34,5 +33,5 @@ func ReadFileNoStat(filename string) ([]byte, error) { defer f.Close() reader := io.LimitReader(f, maxBufferSize) - return ioutil.ReadAll(reader) + return io.ReadAll(reader) } diff --git a/tools/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go b/tools/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go index c07de0b6c..1ab875cee 100644 --- a/tools/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go +++ b/tools/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go @@ -11,7 +11,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build linux,!appengine +//go:build (linux || darwin) && !appengine +// +build linux darwin +// +build !appengine package util @@ -21,7 +23,7 @@ import ( "syscall" ) -// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly. +// SysReadFile is a simplified os.ReadFile that invokes syscall.Read directly. // https://github.com/prometheus/node_exporter/pull/728/files // // Note that this function will not read files larger than 128 bytes. @@ -33,7 +35,7 @@ func SysReadFile(file string) (string, error) { defer f.Close() // On some machines, hwmon drivers are broken and return EAGAIN. This causes - // Go's ioutil.ReadFile implementation to poll forever. + // Go's os.ReadFile implementation to poll forever. // // Since we either want to read data or bail immediately, do the simplest // possible read using syscall directly. diff --git a/tools/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go b/tools/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go index bd55b4537..1d86f5e63 100644 --- a/tools/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go +++ b/tools/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go @@ -11,7 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build linux,appengine !linux +//go:build (linux && appengine) || (!linux && !darwin) +// +build linux,appengine !linux,!darwin package util diff --git a/tools/vendor/github.com/prometheus/procfs/ipvs.go b/tools/vendor/github.com/prometheus/procfs/ipvs.go index 89e447746..bc3a20c93 100644 --- a/tools/vendor/github.com/prometheus/procfs/ipvs.go +++ b/tools/vendor/github.com/prometheus/procfs/ipvs.go @@ -20,7 +20,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net" "os" "strconv" @@ -84,7 +83,7 @@ func parseIPVSStats(r io.Reader) (IPVSStats, error) { stats IPVSStats ) - statContent, err := ioutil.ReadAll(r) + statContent, err := io.ReadAll(r) if err != nil { return IPVSStats{}, err } @@ -222,15 +221,16 @@ func parseIPPort(s string) (net.IP, uint16, error) { case 46: ip = net.ParseIP(s[1:40]) if ip == nil { - return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40]) + return nil, 0, fmt.Errorf("%w: Invalid IPv6 addr %s: %w", ErrFileParse, s[1:40], err) } default: - return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s) + return nil, 0, fmt.Errorf("%w: Unexpected IP:Port %s: %w", ErrFileParse, s, err) } portString := s[len(s)-4:] if len(portString) != 4 { - return nil, 0, fmt.Errorf("unexpected port string format: %s", portString) + return nil, 0, + fmt.Errorf("%w: Unexpected port string format %s: %w", ErrFileParse, portString, err) } port, err := strconv.ParseUint(portString, 16, 16) if err != nil { diff --git a/tools/vendor/github.com/prometheus/procfs/kernel_random.go b/tools/vendor/github.com/prometheus/procfs/kernel_random.go index da3a941d6..db88566bd 100644 --- a/tools/vendor/github.com/prometheus/procfs/kernel_random.go +++ b/tools/vendor/github.com/prometheus/procfs/kernel_random.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !windows // +build !windows package procfs diff --git a/tools/vendor/github.com/prometheus/procfs/loadavg.go b/tools/vendor/github.com/prometheus/procfs/loadavg.go index 0cce190ec..332e76c17 100644 --- a/tools/vendor/github.com/prometheus/procfs/loadavg.go +++ b/tools/vendor/github.com/prometheus/procfs/loadavg.go @@ -21,7 +21,7 @@ import ( "github.com/prometheus/procfs/internal/util" ) -// LoadAvg represents an entry in /proc/loadavg +// LoadAvg represents an entry in /proc/loadavg. type LoadAvg struct { Load1 float64 Load5 float64 @@ -44,14 +44,14 @@ func parseLoad(loadavgBytes []byte) (*LoadAvg, error) { loads := make([]float64, 3) parts := strings.Fields(string(loadavgBytes)) if len(parts) < 3 { - return nil, fmt.Errorf("malformed loadavg line: too few fields in loadavg string: %q", string(loadavgBytes)) + return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, string(loadavgBytes)) } var err error for i, load := range parts[0:3] { loads[i], err = strconv.ParseFloat(load, 64) if err != nil { - return nil, fmt.Errorf("could not parse load %q: %w", load, err) + return nil, fmt.Errorf("%w: Cannot parse load: %f: %w", ErrFileParse, loads[i], err) } } return &LoadAvg{ diff --git a/tools/vendor/github.com/prometheus/procfs/mdstat.go b/tools/vendor/github.com/prometheus/procfs/mdstat.go index f0b9e5f75..67a9d2b44 100644 --- a/tools/vendor/github.com/prometheus/procfs/mdstat.go +++ b/tools/vendor/github.com/prometheus/procfs/mdstat.go @@ -15,7 +15,7 @@ package procfs import ( "fmt" - "io/ioutil" + "os" "regexp" "strconv" "strings" @@ -23,7 +23,7 @@ import ( var ( statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[([U_]+)\]`) - recoveryLineBlocksRE = regexp.MustCompile(`\((\d+)/\d+\)`) + recoveryLineBlocksRE = regexp.MustCompile(`\((\d+/\d+)\)`) recoveryLinePctRE = regexp.MustCompile(`= (.+)%`) recoveryLineFinishRE = regexp.MustCompile(`finish=(.+)min`) recoveryLineSpeedRE = regexp.MustCompile(`speed=(.+)[A-Z]`) @@ -50,6 +50,8 @@ type MDStat struct { BlocksTotal int64 // Number of blocks on the device that are in sync. BlocksSynced int64 + // Number of blocks on the device that need to be synced. + BlocksToBeSynced int64 // progress percentage of current sync BlocksSyncedPct float64 // estimated finishing time for current sync (in minutes) @@ -64,13 +66,13 @@ type MDStat struct { // structs containing the relevant info. More information available here: // https://raid.wiki.kernel.org/index.php/Mdstat func (fs FS) MDStat() ([]MDStat, error) { - data, err := ioutil.ReadFile(fs.proc.Path("mdstat")) + data, err := os.ReadFile(fs.proc.Path("mdstat")) if err != nil { return nil, err } mdstat, err := parseMDStat(data) if err != nil { - return nil, fmt.Errorf("error parsing mdstat %q: %w", fs.proc.Path("mdstat"), err) + return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, fs.proc.Path("mdstat"), err) } return mdstat, nil } @@ -90,13 +92,13 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { deviceFields := strings.Fields(line) if len(deviceFields) < 3 { - return nil, fmt.Errorf("not enough fields in mdline (expected at least 3): %s", line) + return nil, fmt.Errorf("%w: Expected 3+ lines, got %q", ErrFileParse, line) } mdName := deviceFields[0] // mdx state := deviceFields[2] // active or inactive if len(lines) <= i+3 { - return nil, fmt.Errorf("error parsing %q: too few lines for md device", mdName) + return nil, fmt.Errorf("%w: Too few lines for md device: %q", ErrFileParse, mdName) } // Failed disks have the suffix (F) & Spare disks have the suffix (S). @@ -105,7 +107,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { active, total, down, size, err := evalStatusLine(lines[i], lines[i+1]) if err != nil { - return nil, fmt.Errorf("error parsing md device lines: %w", err) + return nil, fmt.Errorf("%w: Cannot parse md device lines: %v: %w", ErrFileParse, active, err) } syncLineIdx := i + 2 @@ -115,7 +117,8 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { // If device is syncing at the moment, get the number of currently // synced bytes, otherwise that number equals the size of the device. - syncedBlocks := size + blocksSynced := size + blocksToBeSynced := size speed := float64(0) finish := float64(0) pct := float64(0) @@ -136,11 +139,11 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { // Handle case when resync=PENDING or resync=DELAYED. if strings.Contains(lines[syncLineIdx], "PENDING") || strings.Contains(lines[syncLineIdx], "DELAYED") { - syncedBlocks = 0 + blocksSynced = 0 } else { - syncedBlocks, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx]) + blocksSynced, blocksToBeSynced, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx]) if err != nil { - return nil, fmt.Errorf("error parsing sync line in md device %q: %w", mdName, err) + return nil, fmt.Errorf("%w: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err) } } } @@ -154,7 +157,8 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { DisksSpare: spare, DisksTotal: total, BlocksTotal: size, - BlocksSynced: syncedBlocks, + BlocksSynced: blocksSynced, + BlocksToBeSynced: blocksToBeSynced, BlocksSyncedPct: pct, BlocksSyncedFinishTime: finish, BlocksSyncedSpeed: speed, @@ -166,11 +170,15 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) { } func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) { + statusFields := strings.Fields(statusLine) + if len(statusFields) < 1 { + return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) + } - sizeStr := strings.Fields(statusLine)[0] + sizeStr := statusFields[0] size, err = strconv.ParseInt(sizeStr, 10, 64) if err != nil { - return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) + return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) } if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") { @@ -185,65 +193,71 @@ func evalStatusLine(deviceLine, statusLine string) (active, total, down, size in matches := statusLineRE.FindStringSubmatch(statusLine) if len(matches) != 5 { - return 0, 0, 0, 0, fmt.Errorf("couldn't find all the substring matches: %s", statusLine) + return 0, 0, 0, 0, fmt.Errorf("%w: Could not fild all substring matches %s: %w", ErrFileParse, statusLine, err) } total, err = strconv.ParseInt(matches[2], 10, 64) if err != nil { - return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) + return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err) } active, err = strconv.ParseInt(matches[3], 10, 64) if err != nil { - return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) + return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected active %d: %w", ErrFileParse, active, err) } down = int64(strings.Count(matches[4], "_")) return active, total, down, size, nil } -func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, pct float64, finish float64, speed float64, err error) { +func evalRecoveryLine(recoveryLine string) (blocksSynced int64, blocksToBeSynced int64, pct float64, finish float64, speed float64, err error) { matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return 0, 0, 0, 0, fmt.Errorf("unexpected recoveryLine: %s", recoveryLine) + return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine blocks %s: %w", ErrFileParse, recoveryLine, err) + } + + blocks := strings.Split(matches[1], "/") + blocksSynced, err = strconv.ParseInt(blocks[0], 10, 64) + if err != nil { + return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery blocks synced %q: %w", ErrFileParse, matches[1], err) } - syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) + blocksToBeSynced, err = strconv.ParseInt(blocks[1], 10, 64) if err != nil { - return 0, 0, 0, 0, fmt.Errorf("error parsing int from recoveryLine %q: %w", recoveryLine, err) + return blocksSynced, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery to be synced blocks %q: %w", ErrFileParse, matches[2], err) } // Get percentage complete matches = recoveryLinePctRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return syncedBlocks, 0, 0, 0, fmt.Errorf("unexpected recoveryLine matching percentage: %s", recoveryLine) + return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine) } pct, err = strconv.ParseFloat(strings.TrimSpace(matches[1]), 64) if err != nil { - return syncedBlocks, 0, 0, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err) + return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine) } // Get time expected left to complete matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return syncedBlocks, pct, 0, 0, fmt.Errorf("unexpected recoveryLine matching est. finish time: %s", recoveryLine) + return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine) } finish, err = strconv.ParseFloat(matches[1], 64) if err != nil { - return syncedBlocks, pct, 0, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err) + return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine) } // Get recovery speed matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLine) if len(matches) != 2 { - return syncedBlocks, pct, finish, 0, fmt.Errorf("unexpected recoveryLine matching speed: %s", recoveryLine) + return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine) } speed, err = strconv.ParseFloat(matches[1], 64) if err != nil { - return syncedBlocks, pct, finish, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err) + return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err) } - return syncedBlocks, pct, finish, speed, nil + return blocksSynced, blocksToBeSynced, pct, finish, speed, nil } func evalComponentDevices(deviceFields []string) []string { diff --git a/tools/vendor/github.com/prometheus/procfs/meminfo.go b/tools/vendor/github.com/prometheus/procfs/meminfo.go index f65e174e5..4b2c4050a 100644 --- a/tools/vendor/github.com/prometheus/procfs/meminfo.go +++ b/tools/vendor/github.com/prometheus/procfs/meminfo.go @@ -126,6 +126,7 @@ type Meminfo struct { VmallocUsed *uint64 // largest contiguous block of vmalloc area which is free VmallocChunk *uint64 + Percpu *uint64 HardwareCorrupted *uint64 AnonHugePages *uint64 ShmemHugePages *uint64 @@ -140,6 +141,55 @@ type Meminfo struct { DirectMap4k *uint64 DirectMap2M *uint64 DirectMap1G *uint64 + + // The struct fields below are the byte-normalized counterparts to the + // existing struct fields. Values are normalized using the optional + // unit field in the meminfo line. + MemTotalBytes *uint64 + MemFreeBytes *uint64 + MemAvailableBytes *uint64 + BuffersBytes *uint64 + CachedBytes *uint64 + SwapCachedBytes *uint64 + ActiveBytes *uint64 + InactiveBytes *uint64 + ActiveAnonBytes *uint64 + InactiveAnonBytes *uint64 + ActiveFileBytes *uint64 + InactiveFileBytes *uint64 + UnevictableBytes *uint64 + MlockedBytes *uint64 + SwapTotalBytes *uint64 + SwapFreeBytes *uint64 + DirtyBytes *uint64 + WritebackBytes *uint64 + AnonPagesBytes *uint64 + MappedBytes *uint64 + ShmemBytes *uint64 + SlabBytes *uint64 + SReclaimableBytes *uint64 + SUnreclaimBytes *uint64 + KernelStackBytes *uint64 + PageTablesBytes *uint64 + NFSUnstableBytes *uint64 + BounceBytes *uint64 + WritebackTmpBytes *uint64 + CommitLimitBytes *uint64 + CommittedASBytes *uint64 + VmallocTotalBytes *uint64 + VmallocUsedBytes *uint64 + VmallocChunkBytes *uint64 + PercpuBytes *uint64 + HardwareCorruptedBytes *uint64 + AnonHugePagesBytes *uint64 + ShmemHugePagesBytes *uint64 + ShmemPmdMappedBytes *uint64 + CmaTotalBytes *uint64 + CmaFreeBytes *uint64 + HugepagesizeBytes *uint64 + DirectMap4kBytes *uint64 + DirectMap2MBytes *uint64 + DirectMap1GBytes *uint64 } // Meminfo returns an information about current kernel/system memory statistics. @@ -152,7 +202,7 @@ func (fs FS) Meminfo() (Meminfo, error) { m, err := parseMemInfo(bytes.NewReader(b)) if err != nil { - return Meminfo{}, fmt.Errorf("failed to parse meminfo: %w", err) + return Meminfo{}, fmt.Errorf("%w: %w", ErrFileParse, err) } return *m, nil @@ -162,114 +212,176 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) { var m Meminfo s := bufio.NewScanner(r) for s.Scan() { - // Each line has at least a name and value; we ignore the unit. fields := strings.Fields(s.Text()) - if len(fields) < 2 { - return nil, fmt.Errorf("malformed meminfo line: %q", s.Text()) - } + var val, valBytes uint64 - v, err := strconv.ParseUint(fields[1], 0, 64) + val, err := strconv.ParseUint(fields[1], 0, 64) if err != nil { return nil, err } + switch len(fields) { + case 2: + // No unit present, use the parsed the value as bytes directly. + valBytes = val + case 3: + // Unit present in optional 3rd field, convert it to + // bytes. The only unit supported within the Linux + // kernel is `kB`. + if fields[2] != "kB" { + return nil, fmt.Errorf("%w: Unsupported unit in optional 3rd field %q", ErrFileParse, fields[2]) + } + + valBytes = 1024 * val + + default: + return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, s.Text()) + } + switch fields[0] { case "MemTotal:": - m.MemTotal = &v + m.MemTotal = &val + m.MemTotalBytes = &valBytes case "MemFree:": - m.MemFree = &v + m.MemFree = &val + m.MemFreeBytes = &valBytes case "MemAvailable:": - m.MemAvailable = &v + m.MemAvailable = &val + m.MemAvailableBytes = &valBytes case "Buffers:": - m.Buffers = &v + m.Buffers = &val + m.BuffersBytes = &valBytes case "Cached:": - m.Cached = &v + m.Cached = &val + m.CachedBytes = &valBytes case "SwapCached:": - m.SwapCached = &v + m.SwapCached = &val + m.SwapCachedBytes = &valBytes case "Active:": - m.Active = &v + m.Active = &val + m.ActiveBytes = &valBytes case "Inactive:": - m.Inactive = &v + m.Inactive = &val + m.InactiveBytes = &valBytes case "Active(anon):": - m.ActiveAnon = &v + m.ActiveAnon = &val + m.ActiveAnonBytes = &valBytes case "Inactive(anon):": - m.InactiveAnon = &v + m.InactiveAnon = &val + m.InactiveAnonBytes = &valBytes case "Active(file):": - m.ActiveFile = &v + m.ActiveFile = &val + m.ActiveFileBytes = &valBytes case "Inactive(file):": - m.InactiveFile = &v + m.InactiveFile = &val + m.InactiveFileBytes = &valBytes case "Unevictable:": - m.Unevictable = &v + m.Unevictable = &val + m.UnevictableBytes = &valBytes case "Mlocked:": - m.Mlocked = &v + m.Mlocked = &val + m.MlockedBytes = &valBytes case "SwapTotal:": - m.SwapTotal = &v + m.SwapTotal = &val + m.SwapTotalBytes = &valBytes case "SwapFree:": - m.SwapFree = &v + m.SwapFree = &val + m.SwapFreeBytes = &valBytes case "Dirty:": - m.Dirty = &v + m.Dirty = &val + m.DirtyBytes = &valBytes case "Writeback:": - m.Writeback = &v + m.Writeback = &val + m.WritebackBytes = &valBytes case "AnonPages:": - m.AnonPages = &v + m.AnonPages = &val + m.AnonPagesBytes = &valBytes case "Mapped:": - m.Mapped = &v + m.Mapped = &val + m.MappedBytes = &valBytes case "Shmem:": - m.Shmem = &v + m.Shmem = &val + m.ShmemBytes = &valBytes case "Slab:": - m.Slab = &v + m.Slab = &val + m.SlabBytes = &valBytes case "SReclaimable:": - m.SReclaimable = &v + m.SReclaimable = &val + m.SReclaimableBytes = &valBytes case "SUnreclaim:": - m.SUnreclaim = &v + m.SUnreclaim = &val + m.SUnreclaimBytes = &valBytes case "KernelStack:": - m.KernelStack = &v + m.KernelStack = &val + m.KernelStackBytes = &valBytes case "PageTables:": - m.PageTables = &v + m.PageTables = &val + m.PageTablesBytes = &valBytes case "NFS_Unstable:": - m.NFSUnstable = &v + m.NFSUnstable = &val + m.NFSUnstableBytes = &valBytes case "Bounce:": - m.Bounce = &v + m.Bounce = &val + m.BounceBytes = &valBytes case "WritebackTmp:": - m.WritebackTmp = &v + m.WritebackTmp = &val + m.WritebackTmpBytes = &valBytes case "CommitLimit:": - m.CommitLimit = &v + m.CommitLimit = &val + m.CommitLimitBytes = &valBytes case "Committed_AS:": - m.CommittedAS = &v + m.CommittedAS = &val + m.CommittedASBytes = &valBytes case "VmallocTotal:": - m.VmallocTotal = &v + m.VmallocTotal = &val + m.VmallocTotalBytes = &valBytes case "VmallocUsed:": - m.VmallocUsed = &v + m.VmallocUsed = &val + m.VmallocUsedBytes = &valBytes case "VmallocChunk:": - m.VmallocChunk = &v + m.VmallocChunk = &val + m.VmallocChunkBytes = &valBytes + case "Percpu:": + m.Percpu = &val + m.PercpuBytes = &valBytes case "HardwareCorrupted:": - m.HardwareCorrupted = &v + m.HardwareCorrupted = &val + m.HardwareCorruptedBytes = &valBytes case "AnonHugePages:": - m.AnonHugePages = &v + m.AnonHugePages = &val + m.AnonHugePagesBytes = &valBytes case "ShmemHugePages:": - m.ShmemHugePages = &v + m.ShmemHugePages = &val + m.ShmemHugePagesBytes = &valBytes case "ShmemPmdMapped:": - m.ShmemPmdMapped = &v + m.ShmemPmdMapped = &val + m.ShmemPmdMappedBytes = &valBytes case "CmaTotal:": - m.CmaTotal = &v + m.CmaTotal = &val + m.CmaTotalBytes = &valBytes case "CmaFree:": - m.CmaFree = &v + m.CmaFree = &val + m.CmaFreeBytes = &valBytes case "HugePages_Total:": - m.HugePagesTotal = &v + m.HugePagesTotal = &val case "HugePages_Free:": - m.HugePagesFree = &v + m.HugePagesFree = &val case "HugePages_Rsvd:": - m.HugePagesRsvd = &v + m.HugePagesRsvd = &val case "HugePages_Surp:": - m.HugePagesSurp = &v + m.HugePagesSurp = &val case "Hugepagesize:": - m.Hugepagesize = &v + m.Hugepagesize = &val + m.HugepagesizeBytes = &valBytes case "DirectMap4k:": - m.DirectMap4k = &v + m.DirectMap4k = &val + m.DirectMap4kBytes = &valBytes case "DirectMap2M:": - m.DirectMap2M = &v + m.DirectMap2M = &val + m.DirectMap2MBytes = &valBytes case "DirectMap1G:": - m.DirectMap1G = &v + m.DirectMap1G = &val + m.DirectMap1GBytes = &valBytes } } diff --git a/tools/vendor/github.com/prometheus/procfs/mountinfo.go b/tools/vendor/github.com/prometheus/procfs/mountinfo.go index 59f4d5055..a704c5e73 100644 --- a/tools/vendor/github.com/prometheus/procfs/mountinfo.go +++ b/tools/vendor/github.com/prometheus/procfs/mountinfo.go @@ -78,11 +78,11 @@ func parseMountInfoString(mountString string) (*MountInfo, error) { mountInfo := strings.Split(mountString, " ") mountInfoLength := len(mountInfo) if mountInfoLength < 10 { - return nil, fmt.Errorf("couldn't find enough fields in mount string: %s", mountString) + return nil, fmt.Errorf("%w: Too few fields in mount string: %s", ErrFileParse, mountString) } if mountInfo[mountInfoLength-4] != "-" { - return nil, fmt.Errorf("couldn't find separator in expected field: %s", mountInfo[mountInfoLength-4]) + return nil, fmt.Errorf("%w: couldn't find separator in expected field: %s", ErrFileParse, mountInfo[mountInfoLength-4]) } mount := &MountInfo{ @@ -98,18 +98,18 @@ func parseMountInfoString(mountString string) (*MountInfo, error) { mount.MountID, err = strconv.Atoi(mountInfo[0]) if err != nil { - return nil, fmt.Errorf("failed to parse mount ID") + return nil, fmt.Errorf("%w: mount ID: %q", ErrFileParse, mount.MountID) } mount.ParentID, err = strconv.Atoi(mountInfo[1]) if err != nil { - return nil, fmt.Errorf("failed to parse parent ID") + return nil, fmt.Errorf("%w: parent ID: %q", ErrFileParse, mount.ParentID) } // Has optional fields, which is a space separated list of values. // Example: shared:2 master:7 if mountInfo[6] != "" { mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4]) if err != nil { - return nil, err + return nil, fmt.Errorf("%w: %w", ErrFileParse, err) } } return mount, nil diff --git a/tools/vendor/github.com/prometheus/procfs/mountstats.go b/tools/vendor/github.com/prometheus/procfs/mountstats.go index f7a828bb1..b6c8d1a57 100644 --- a/tools/vendor/github.com/prometheus/procfs/mountstats.go +++ b/tools/vendor/github.com/prometheus/procfs/mountstats.go @@ -44,6 +44,14 @@ const ( fieldTransport11TCPLen = 13 fieldTransport11UDPLen = 10 + + // Kernel version >= 4.14 MaxLen + // See: https://elixir.bootlin.com/linux/v6.4.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L393 + fieldTransport11RDMAMaxLen = 28 + + // Kernel version <= 4.2 MinLen + // See: https://elixir.bootlin.com/linux/v4.2.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L331 + fieldTransport11RDMAMinLen = 20 ) // A Mount is a device mount parsed from /proc/[pid]/mountstats. @@ -80,7 +88,7 @@ type MountStatsNFS struct { // Statistics broken down by filesystem operation. Operations []NFSOperationStats // Statistics about the NFS RPC transport. - Transport NFSTransportStats + Transport []NFSTransportStats } // mountStats implements MountStats. @@ -231,6 +239,33 @@ type NFSTransportStats struct { // A running counter, incremented on each request as the current size of the // pending queue. CumulativePendingQueue uint64 + + // Stats below only available with stat version 1.1. + // Transport over RDMA + + // accessed when sending a call + ReadChunkCount uint64 + WriteChunkCount uint64 + ReplyChunkCount uint64 + TotalRdmaRequest uint64 + + // rarely accessed error counters + PullupCopyCount uint64 + HardwayRegisterCount uint64 + FailedMarshalCount uint64 + BadReplyCount uint64 + MrsRecovered uint64 + MrsOrphaned uint64 + MrsAllocated uint64 + EmptySendctxQ uint64 + + // accessed when receiving a reply + TotalRdmaReply uint64 + FixupCopyCount uint64 + ReplyWaitsForSend uint64 + LocalInvNeeded uint64 + NomsgCallCount uint64 + BcallCount uint64 } // parseMountStats parses a /proc/[pid]/mountstats file and returns a slice @@ -264,7 +299,7 @@ func parseMountStats(r io.Reader) ([]*Mount, error) { if len(ss) > deviceEntryLen { // Only NFSv3 and v4 are supported for parsing statistics if m.Type != nfs3Type && m.Type != nfs4Type { - return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type) + return nil, fmt.Errorf("%w: Cannot parse MountStats for %q", ErrFileParse, m.Type) } statVersion := strings.TrimPrefix(ss[8], statVersionPrefix) @@ -284,10 +319,11 @@ func parseMountStats(r io.Reader) ([]*Mount, error) { } // parseMount parses an entry in /proc/[pid]/mountstats in the format: -// device [device] mounted on [mount] with fstype [type] +// +// device [device] mounted on [mount] with fstype [type] func parseMount(ss []string) (*Mount, error) { if len(ss) < deviceEntryLen { - return nil, fmt.Errorf("invalid device entry: %v", ss) + return nil, fmt.Errorf("%w: Invalid device %q", ErrFileParse, ss) } // Check for specific words appearing at specific indices to ensure @@ -305,7 +341,7 @@ func parseMount(ss []string) (*Mount, error) { for _, f := range format { if ss[f.i] != f.s { - return nil, fmt.Errorf("invalid device entry: %v", ss) + return nil, fmt.Errorf("%w: Invalid device %q", ErrFileParse, ss) } } @@ -342,7 +378,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e switch ss[0] { case fieldOpts: if len(ss) < 2 { - return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss) } if stats.Opts == nil { stats.Opts = map[string]string{} @@ -357,7 +393,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e } case fieldAge: if len(ss) < 2 { - return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss) } // Age integer is in seconds d, err := time.ParseDuration(ss[1] + "s") @@ -368,7 +404,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e stats.Age = d case fieldBytes: if len(ss) < 2 { - return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss) } bstats, err := parseNFSBytesStats(ss[1:]) if err != nil { @@ -378,7 +414,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e stats.Bytes = *bstats case fieldEvents: if len(ss) < 2 { - return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + return nil, fmt.Errorf("%w: Incomplete information for NFS events: %v", ErrFileParse, ss) } estats, err := parseNFSEventsStats(ss[1:]) if err != nil { @@ -388,7 +424,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e stats.Events = *estats case fieldTransport: if len(ss) < 3 { - return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss) + return nil, fmt.Errorf("%w: Incomplete information for NFS transport stats: %v", ErrFileParse, ss) } tstats, err := parseNFSTransportStats(ss[1:], statVersion) @@ -396,7 +432,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e return nil, err } - stats.Transport = *tstats + stats.Transport = append(stats.Transport, *tstats) } // When encountering "per-operation statistics", we must break this @@ -427,7 +463,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e // integer fields. func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) { if len(ss) != fieldBytesLen { - return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss) + return nil, fmt.Errorf("%w: Invalid NFS bytes stats: %v", ErrFileParse, ss) } ns := make([]uint64, 0, fieldBytesLen) @@ -456,7 +492,7 @@ func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) { // integer fields. func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) { if len(ss) != fieldEventsLen { - return nil, fmt.Errorf("invalid NFS events stats: %v", ss) + return nil, fmt.Errorf("%w: invalid NFS events stats: %v", ErrFileParse, ss) } ns := make([]uint64, 0, fieldEventsLen) @@ -520,7 +556,7 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { } if len(ss) < minFields { - return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss) + return nil, fmt.Errorf("%w: invalid NFS per-operations stats: %v", ErrFileParse, ss) } // Skip string operation name for integers @@ -533,7 +569,6 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { ns = append(ns, n) } - opStats := NFSOperationStats{ Operation: strings.TrimSuffix(ss[0], ":"), Requests: ns[0], @@ -571,10 +606,10 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats } else if protocol == "udp" { expectedLength = fieldTransport10UDPLen } else { - return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.0 statement: %v", protocol, ss) + return nil, fmt.Errorf("%w: Invalid NFS protocol \"%s\" in stats 1.0 statement: %v", ErrFileParse, protocol, ss) } if len(ss) != expectedLength { - return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss) + return nil, fmt.Errorf("%w: Invalid NFS transport stats 1.0 statement: %v", ErrFileParse, ss) } case statVersion11: var expectedLength int @@ -582,14 +617,17 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats expectedLength = fieldTransport11TCPLen } else if protocol == "udp" { expectedLength = fieldTransport11UDPLen + } else if protocol == "rdma" { + expectedLength = fieldTransport11RDMAMinLen } else { - return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.1 statement: %v", protocol, ss) + return nil, fmt.Errorf("%w: invalid NFS protocol \"%s\" in stats 1.1 statement: %v", ErrFileParse, protocol, ss) } - if len(ss) != expectedLength { - return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss) + if (len(ss) != expectedLength && (protocol == "tcp" || protocol == "udp")) || + (protocol == "rdma" && len(ss) < expectedLength) { + return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v, protocol: %v", ErrFileParse, ss, protocol) } default: - return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion) + return nil, fmt.Errorf("%w: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol) } // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay @@ -599,7 +637,9 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats // Note: slice length must be set to length of v1.1 stats to avoid a panic when // only v1.0 stats are present. // See: https://github.com/prometheus/node_exporter/issues/571. - ns := make([]uint64, fieldTransport11TCPLen) + // + // Note: NFS Over RDMA slice length is fieldTransport11RDMAMaxLen + ns := make([]uint64, fieldTransport11RDMAMaxLen+3) for i, s := range ss { n, err := strconv.ParseUint(s, 10, 64) if err != nil { @@ -617,9 +657,14 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats // we set them to 0 here. if protocol == "udp" { ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...) + } else if protocol == "tcp" { + ns = append(ns[:fieldTransport11TCPLen], make([]uint64, fieldTransport11RDMAMaxLen-fieldTransport11TCPLen+3)...) + } else if protocol == "rdma" { + ns = append(ns[:fieldTransport10TCPLen], append(make([]uint64, 3), ns[fieldTransport10TCPLen:]...)...) } return &NFSTransportStats{ + // NFS xprt over tcp or udp Protocol: protocol, Port: ns[0], Bind: ns[1], @@ -631,8 +676,32 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats BadTransactionIDs: ns[7], CumulativeActiveRequests: ns[8], CumulativeBacklog: ns[9], - MaximumRPCSlotsUsed: ns[10], - CumulativeSendingQueue: ns[11], - CumulativePendingQueue: ns[12], + + // NFS xprt over tcp or udp + // And statVersion 1.1 + MaximumRPCSlotsUsed: ns[10], + CumulativeSendingQueue: ns[11], + CumulativePendingQueue: ns[12], + + // NFS xprt over rdma + // And stat Version 1.1 + ReadChunkCount: ns[13], + WriteChunkCount: ns[14], + ReplyChunkCount: ns[15], + TotalRdmaRequest: ns[16], + PullupCopyCount: ns[17], + HardwayRegisterCount: ns[18], + FailedMarshalCount: ns[19], + BadReplyCount: ns[20], + MrsRecovered: ns[21], + MrsOrphaned: ns[22], + MrsAllocated: ns[23], + EmptySendctxQ: ns[24], + TotalRdmaReply: ns[25], + FixupCopyCount: ns[26], + ReplyWaitsForSend: ns[27], + LocalInvNeeded: ns[28], + NomsgCallCount: ns[29], + BcallCount: ns[30], }, nil } diff --git a/tools/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/tools/vendor/github.com/prometheus/procfs/net_conntrackstat.go index 9964a3600..316df5fbb 100644 --- a/tools/vendor/github.com/prometheus/procfs/net_conntrackstat.go +++ b/tools/vendor/github.com/prometheus/procfs/net_conntrackstat.go @@ -18,19 +18,22 @@ import ( "bytes" "fmt" "io" - "strconv" "strings" "github.com/prometheus/procfs/internal/util" ) // A ConntrackStatEntry represents one line from net/stat/nf_conntrack -// and contains netfilter conntrack statistics at one CPU core +// and contains netfilter conntrack statistics at one CPU core. type ConntrackStatEntry struct { Entries uint64 + Searched uint64 Found uint64 + New uint64 Invalid uint64 Ignore uint64 + Delete uint64 + DeleteList uint64 Insert uint64 InsertFailed uint64 Drop uint64 @@ -38,12 +41,12 @@ type ConntrackStatEntry struct { SearchRestart uint64 } -// ConntrackStat retrieves netfilter's conntrack statistics, split by CPU cores +// ConntrackStat retrieves netfilter's conntrack statistics, split by CPU cores. func (fs FS) ConntrackStat() ([]ConntrackStatEntry, error) { return readConntrackStat(fs.proc.Path("net", "stat", "nf_conntrack")) } -// Parses a slice of ConntrackStatEntries from the given filepath +// Parses a slice of ConntrackStatEntries from the given filepath. func readConntrackStat(path string) ([]ConntrackStatEntry, error) { // This file is small and can be read with one syscall. b, err := util.ReadFileNoStat(path) @@ -55,13 +58,13 @@ func readConntrackStat(path string) ([]ConntrackStatEntry, error) { stat, err := parseConntrackStat(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("failed to read conntrack stats from %q: %w", path, err) + return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, path, err) } return stat, nil } -// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries +// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries. func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) { var entries []ConntrackStatEntry @@ -79,75 +82,37 @@ func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) { return entries, nil } -// Parses a ConntrackStatEntry from given array of fields +// Parses a ConntrackStatEntry from given array of fields. func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) { - if len(fields) != 17 { - return nil, fmt.Errorf("invalid conntrackstat entry, missing fields") - } - entry := &ConntrackStatEntry{} - - entries, err := parseConntrackStatField(fields[0]) - if err != nil { - return nil, err - } - entry.Entries = entries - - found, err := parseConntrackStatField(fields[2]) - if err != nil { - return nil, err - } - entry.Found = found - - invalid, err := parseConntrackStatField(fields[4]) - if err != nil { - return nil, err - } - entry.Invalid = invalid - - ignore, err := parseConntrackStatField(fields[5]) - if err != nil { - return nil, err - } - entry.Ignore = ignore - - insert, err := parseConntrackStatField(fields[8]) + entries, err := util.ParseHexUint64s(fields) if err != nil { - return nil, err + return nil, fmt.Errorf("%w: Cannot parse entry: %d: %w", ErrFileParse, entries, err) } - entry.Insert = insert - - insertFailed, err := parseConntrackStatField(fields[9]) - if err != nil { - return nil, err + numEntries := len(entries) + if numEntries < 16 || numEntries > 17 { + return nil, + fmt.Errorf("%w: invalid conntrackstat entry, invalid number of fields: %d", ErrFileParse, numEntries) } - entry.InsertFailed = insertFailed - drop, err := parseConntrackStatField(fields[10]) - if err != nil { - return nil, err + stats := &ConntrackStatEntry{ + Entries: *entries[0], + Searched: *entries[1], + Found: *entries[2], + New: *entries[3], + Invalid: *entries[4], + Ignore: *entries[5], + Delete: *entries[6], + DeleteList: *entries[7], + Insert: *entries[8], + InsertFailed: *entries[9], + Drop: *entries[10], + EarlyDrop: *entries[11], } - entry.Drop = drop - earlyDrop, err := parseConntrackStatField(fields[11]) - if err != nil { - return nil, err + // Ignore missing search_restart on Linux < 2.6.35. + if numEntries == 17 { + stats.SearchRestart = *entries[16] } - entry.EarlyDrop = earlyDrop - searchRestart, err := parseConntrackStatField(fields[16]) - if err != nil { - return nil, err - } - entry.SearchRestart = searchRestart - - return entry, nil -} - -// Parses a uint64 from given hex in string -func parseConntrackStatField(field string) (uint64, error) { - val, err := strconv.ParseUint(field, 16, 64) - if err != nil { - return 0, fmt.Errorf("couldn't parse %q field: %w", field, err) - } - return val, err + return stats, nil } diff --git a/tools/vendor/github.com/prometheus/procfs/net_dev.go b/tools/vendor/github.com/prometheus/procfs/net_dev.go index 47a710bef..e66208aa0 100644 --- a/tools/vendor/github.com/prometheus/procfs/net_dev.go +++ b/tools/vendor/github.com/prometheus/procfs/net_dev.go @@ -87,17 +87,17 @@ func newNetDev(file string) (NetDev, error) { // parseLine parses a single line from the /proc/net/dev file. Header lines // must be filtered prior to calling this method. func (netDev NetDev) parseLine(rawLine string) (*NetDevLine, error) { - parts := strings.SplitN(rawLine, ":", 2) - if len(parts) != 2 { + idx := strings.LastIndex(rawLine, ":") + if idx == -1 { return nil, errors.New("invalid net/dev line, missing colon") } - fields := strings.Fields(strings.TrimSpace(parts[1])) + fields := strings.Fields(strings.TrimSpace(rawLine[idx+1:])) var err error line := &NetDevLine{} // Interface Name - line.Name = strings.TrimSpace(parts[0]) + line.Name = strings.TrimSpace(rawLine[:idx]) if line.Name == "" { return nil, errors.New("invalid net/dev line, empty interface name") } diff --git a/tools/vendor/github.com/prometheus/procfs/net_dev_snmp6.go b/tools/vendor/github.com/prometheus/procfs/net_dev_snmp6.go new file mode 100644 index 000000000..f50b38e35 --- /dev/null +++ b/tools/vendor/github.com/prometheus/procfs/net_dev_snmp6.go @@ -0,0 +1,96 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "errors" + "io" + "os" + "strconv" + "strings" +) + +// NetDevSNMP6 is parsed from files in /proc/net/dev_snmp6/ or /proc//net/dev_snmp6/. +// The outer map's keys are interface names and the inner map's keys are stat names. +// +// If you'd like a total across all interfaces, please use the Snmp6() method of the Proc type. +type NetDevSNMP6 map[string]map[string]uint64 + +// Returns kernel/system statistics read from interface files within the /proc/net/dev_snmp6/ +// directory. +func (fs FS) NetDevSNMP6() (NetDevSNMP6, error) { + return newNetDevSNMP6(fs.proc.Path("net/dev_snmp6")) +} + +// Returns kernel/system statistics read from interface files within the /proc//net/dev_snmp6/ +// directory. +func (p Proc) NetDevSNMP6() (NetDevSNMP6, error) { + return newNetDevSNMP6(p.path("net/dev_snmp6")) +} + +// newNetDevSNMP6 creates a new NetDevSNMP6 from the contents of the given directory. +func newNetDevSNMP6(dir string) (NetDevSNMP6, error) { + netDevSNMP6 := make(NetDevSNMP6) + + // The net/dev_snmp6 folders contain one file per interface + ifaceFiles, err := os.ReadDir(dir) + if err != nil { + // On systems with IPv6 disabled, this directory won't exist. + // Do nothing. + if errors.Is(err, os.ErrNotExist) { + return netDevSNMP6, err + } + return netDevSNMP6, err + } + + for _, iFaceFile := range ifaceFiles { + f, err := os.Open(dir + "/" + iFaceFile.Name()) + if err != nil { + return netDevSNMP6, err + } + defer f.Close() + + netDevSNMP6[iFaceFile.Name()], err = parseNetDevSNMP6Stats(f) + if err != nil { + return netDevSNMP6, err + } + } + + return netDevSNMP6, nil +} + +func parseNetDevSNMP6Stats(r io.Reader) (map[string]uint64, error) { + m := make(map[string]uint64) + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + stat := strings.Fields(scanner.Text()) + if len(stat) < 2 { + continue + } + key, val := stat[0], stat[1] + + // Expect stat name to contain "6" or be "ifIndex" + if strings.Contains(key, "6") || key == "ifIndex" { + v, err := strconv.ParseUint(val, 10, 64) + if err != nil { + return m, err + } + + m[key] = v + } + } + return m, scanner.Err() +} diff --git a/tools/vendor/github.com/prometheus/procfs/net_ip_socket.go b/tools/vendor/github.com/prometheus/procfs/net_ip_socket.go index 8c9ee3de8..19e3378f7 100644 --- a/tools/vendor/github.com/prometheus/procfs/net_ip_socket.go +++ b/tools/vendor/github.com/prometheus/procfs/net_ip_socket.go @@ -25,7 +25,7 @@ import ( ) const ( - // readLimit is used by io.LimitReader while reading the content of the + // Maximum size limit used by io.LimitReader while reading the content of the // /proc/net/udp{,6} files. The number of lines inside such a file is dynamic // as each line represents a single used socket. // In theory, the number of available sockets is 65535 (2^16 - 1) per IP. @@ -34,7 +34,7 @@ const ( readLimit = 4294967296 // Byte -> 4 GiB ) -// this contains generic data structures for both udp and tcp sockets +// This contains generic data structures for both udp and tcp sockets. type ( // NetIPSocket represents the contents of /proc/net/{t,u}dp{,6} file without the header. NetIPSocket []*netIPSocketLine @@ -50,10 +50,13 @@ type ( // UsedSockets shows the total number of parsed lines representing the // number of used sockets. UsedSockets uint64 + // Drops shows the total number of dropped packets of all UDP sockets. + Drops *uint64 } - // netIPSocketLine represents the fields parsed from a single line - // in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped. + // A single line parser for fields from /proc/net/{t,u}dp{,6}. + // Fields which are not used by IPSocket are skipped. + // Drops is non-nil for udp{,6}, but nil for tcp{,6}. // For the proc file format details, see https://linux.die.net/man/5/proc. netIPSocketLine struct { Sl uint64 @@ -66,6 +69,7 @@ type ( RxQueue uint64 UID uint64 Inode uint64 + Drops *uint64 } ) @@ -77,13 +81,14 @@ func newNetIPSocket(file string) (NetIPSocket, error) { defer f.Close() var netIPSocket NetIPSocket + isUDP := strings.Contains(file, "udp") lr := io.LimitReader(f, readLimit) s := bufio.NewScanner(lr) s.Scan() // skip first line with headers for s.Scan() { fields := strings.Fields(s.Text()) - line, err := parseNetIPSocketLine(fields) + line, err := parseNetIPSocketLine(fields, isUDP) if err != nil { return nil, err } @@ -104,19 +109,25 @@ func newNetIPSocketSummary(file string) (*NetIPSocketSummary, error) { defer f.Close() var netIPSocketSummary NetIPSocketSummary + var udpPacketDrops uint64 + isUDP := strings.Contains(file, "udp") lr := io.LimitReader(f, readLimit) s := bufio.NewScanner(lr) s.Scan() // skip first line with headers for s.Scan() { fields := strings.Fields(s.Text()) - line, err := parseNetIPSocketLine(fields) + line, err := parseNetIPSocketLine(fields, isUDP) if err != nil { return nil, err } netIPSocketSummary.TxQueueLength += line.TxQueue netIPSocketSummary.RxQueueLength += line.RxQueue netIPSocketSummary.UsedSockets++ + if isUDP { + udpPacketDrops += *line.Drops + netIPSocketSummary.Drops = &udpPacketDrops + } } if err := s.Err(); err != nil { return nil, err @@ -130,7 +141,7 @@ func parseIP(hexIP string) (net.IP, error) { var byteIP []byte byteIP, err := hex.DecodeString(hexIP) if err != nil { - return nil, fmt.Errorf("cannot parse address field in socket line %q", hexIP) + return nil, fmt.Errorf("%w: Cannot parse socket field in %q: %w", ErrFileParse, hexIP, err) } switch len(byteIP) { case 4: @@ -144,16 +155,17 @@ func parseIP(hexIP string) (net.IP, error) { } return i, nil default: - return nil, fmt.Errorf("Unable to parse IP %s", hexIP) + return nil, fmt.Errorf("%w: Unable to parse IP %s: %v", ErrFileParse, hexIP, nil) } } // parseNetIPSocketLine parses a single line, represented by a list of fields. -func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) { +func parseNetIPSocketLine(fields []string, isUDP bool) (*netIPSocketLine, error) { line := &netIPSocketLine{} if len(fields) < 10 { return nil, fmt.Errorf( - "cannot parse net socket line as it has less then 10 columns %q", + "%w: Less than 10 columns found %q", + ErrFileParse, strings.Join(fields, " "), ) } @@ -162,64 +174,74 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) { // sl s := strings.Split(fields[0], ":") if len(s) != 2 { - return nil, fmt.Errorf("cannot parse sl field in socket line %q", fields[0]) + return nil, fmt.Errorf("%w: Unable to parse sl field in line %q", ErrFileParse, fields[0]) } if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil { - return nil, fmt.Errorf("cannot parse sl value in socket line: %w", err) + return nil, fmt.Errorf("%w: Unable to parse sl field in %q: %w", ErrFileParse, line.Sl, err) } // local_address l := strings.Split(fields[1], ":") if len(l) != 2 { - return nil, fmt.Errorf("cannot parse local_address field in socket line %q", fields[1]) + return nil, fmt.Errorf("%w: Unable to parse local_address field in %q", ErrFileParse, fields[1]) } if line.LocalAddr, err = parseIP(l[0]); err != nil { return nil, err } if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil { - return nil, fmt.Errorf("cannot parse local_address port value in socket line: %w", err) + return nil, fmt.Errorf("%w: Unable to parse local_address port value line %q: %w", ErrFileParse, line.LocalPort, err) } // remote_address r := strings.Split(fields[2], ":") if len(r) != 2 { - return nil, fmt.Errorf("cannot parse rem_address field in socket line %q", fields[1]) + return nil, fmt.Errorf("%w: Unable to parse rem_address field in %q", ErrFileParse, fields[1]) } if line.RemAddr, err = parseIP(r[0]); err != nil { return nil, err } if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil { - return nil, fmt.Errorf("cannot parse rem_address port value in socket line: %w", err) + return nil, fmt.Errorf("%w: Cannot parse rem_address port value in %q: %w", ErrFileParse, line.RemPort, err) } // st if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil { - return nil, fmt.Errorf("cannot parse st value in socket line: %w", err) + return nil, fmt.Errorf("%w: Cannot parse st value in %q: %w", ErrFileParse, line.St, err) } // tx_queue and rx_queue q := strings.Split(fields[4], ":") if len(q) != 2 { return nil, fmt.Errorf( - "cannot parse tx/rx queues in socket line as it has a missing colon %q", + "%w: Missing colon for tx/rx queues in socket line %q", + ErrFileParse, fields[4], ) } if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil { - return nil, fmt.Errorf("cannot parse tx_queue value in socket line: %w", err) + return nil, fmt.Errorf("%w: Cannot parse tx_queue value in %q: %w", ErrFileParse, line.TxQueue, err) } if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil { - return nil, fmt.Errorf("cannot parse rx_queue value in socket line: %w", err) + return nil, fmt.Errorf("%w: Cannot parse trx_queue value in %q: %w", ErrFileParse, line.RxQueue, err) } // uid if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil { - return nil, fmt.Errorf("cannot parse uid value in socket line: %w", err) + return nil, fmt.Errorf("%w: Cannot parse UID value in %q: %w", ErrFileParse, line.UID, err) } // inode if line.Inode, err = strconv.ParseUint(fields[9], 0, 64); err != nil { - return nil, fmt.Errorf("cannot parse inode value in socket line: %w", err) + return nil, fmt.Errorf("%w: Cannot parse inode value in %q: %w", ErrFileParse, line.Inode, err) + } + + // drops + if isUDP { + drops, err := strconv.ParseUint(fields[12], 0, 64) + if err != nil { + return nil, fmt.Errorf("%w: Cannot parse drops value in %q: %w", ErrFileParse, drops, err) + } + line.Drops = &drops } return line, nil diff --git a/tools/vendor/github.com/prometheus/procfs/net_protocols.go b/tools/vendor/github.com/prometheus/procfs/net_protocols.go index 8c6de3791..b6c77b709 100644 --- a/tools/vendor/github.com/prometheus/procfs/net_protocols.go +++ b/tools/vendor/github.com/prometheus/procfs/net_protocols.go @@ -23,7 +23,7 @@ import ( "github.com/prometheus/procfs/internal/util" ) -// NetProtocolStats stores the contents from /proc/net/protocols +// NetProtocolStats stores the contents from /proc/net/protocols. type NetProtocolStats map[string]NetProtocolStatLine // NetProtocolStatLine contains a single line parsed from /proc/net/protocols. We @@ -41,7 +41,7 @@ type NetProtocolStatLine struct { Capabilities NetProtocolCapabilities } -// NetProtocolCapabilities contains a list of capabilities for each protocol +// NetProtocolCapabilities contains a list of capabilities for each protocol. type NetProtocolCapabilities struct { Close bool // 8 Connect bool // 9 @@ -131,7 +131,7 @@ func (ps NetProtocolStats) parseLine(rawLine string) (*NetProtocolStatLine, erro } else if fields[6] == disabled { line.Slab = false } else { - return nil, fmt.Errorf("unable to parse capability for protocol: %s", line.Name) + return nil, fmt.Errorf("%w: capability for protocol: %s", ErrFileParse, line.Name) } line.ModuleName = fields[7] @@ -173,7 +173,7 @@ func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) erro } else if capabilities[i] == "n" { *capabilityFields[i] = false } else { - return fmt.Errorf("unable to parse capability block for protocol: position %d", i) + return fmt.Errorf("%w: capability block for protocol: position %d", ErrFileParse, i) } } return nil diff --git a/tools/vendor/github.com/prometheus/procfs/net_route.go b/tools/vendor/github.com/prometheus/procfs/net_route.go new file mode 100644 index 000000000..deb7029fe --- /dev/null +++ b/tools/vendor/github.com/prometheus/procfs/net_route.go @@ -0,0 +1,143 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +const ( + blackholeRepresentation string = "*" + blackholeIfaceName string = "blackhole" + routeLineColumns int = 11 +) + +// A NetRouteLine represents one line from net/route. +type NetRouteLine struct { + Iface string + Destination uint32 + Gateway uint32 + Flags uint32 + RefCnt uint32 + Use uint32 + Metric uint32 + Mask uint32 + MTU uint32 + Window uint32 + IRTT uint32 +} + +func (fs FS) NetRoute() ([]NetRouteLine, error) { + return readNetRoute(fs.proc.Path("net", "route")) +} + +func readNetRoute(path string) ([]NetRouteLine, error) { + b, err := util.ReadFileNoStat(path) + if err != nil { + return nil, err + } + + routelines, err := parseNetRoute(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("failed to read net route from %s: %w", path, err) + } + return routelines, nil +} + +func parseNetRoute(r io.Reader) ([]NetRouteLine, error) { + var routelines []NetRouteLine + + scanner := bufio.NewScanner(r) + scanner.Scan() + for scanner.Scan() { + fields := strings.Fields(scanner.Text()) + routeline, err := parseNetRouteLine(fields) + if err != nil { + return nil, err + } + routelines = append(routelines, *routeline) + } + return routelines, nil +} + +func parseNetRouteLine(fields []string) (*NetRouteLine, error) { + if len(fields) != routeLineColumns { + return nil, fmt.Errorf("invalid routeline, num of digits: %d", len(fields)) + } + iface := fields[0] + if iface == blackholeRepresentation { + iface = blackholeIfaceName + } + destination, err := strconv.ParseUint(fields[1], 16, 32) + if err != nil { + return nil, err + } + gateway, err := strconv.ParseUint(fields[2], 16, 32) + if err != nil { + return nil, err + } + flags, err := strconv.ParseUint(fields[3], 10, 32) + if err != nil { + return nil, err + } + refcnt, err := strconv.ParseUint(fields[4], 10, 32) + if err != nil { + return nil, err + } + use, err := strconv.ParseUint(fields[5], 10, 32) + if err != nil { + return nil, err + } + metric, err := strconv.ParseUint(fields[6], 10, 32) + if err != nil { + return nil, err + } + mask, err := strconv.ParseUint(fields[7], 16, 32) + if err != nil { + return nil, err + } + mtu, err := strconv.ParseUint(fields[8], 10, 32) + if err != nil { + return nil, err + } + window, err := strconv.ParseUint(fields[9], 10, 32) + if err != nil { + return nil, err + } + irtt, err := strconv.ParseUint(fields[10], 10, 32) + if err != nil { + return nil, err + } + routeline := &NetRouteLine{ + Iface: iface, + Destination: uint32(destination), + Gateway: uint32(gateway), + Flags: uint32(flags), + RefCnt: uint32(refcnt), + Use: uint32(use), + Metric: uint32(metric), + Mask: uint32(mask), + MTU: uint32(mtu), + Window: uint32(window), + IRTT: uint32(irtt), + } + return routeline, nil +} diff --git a/tools/vendor/github.com/prometheus/procfs/net_sockstat.go b/tools/vendor/github.com/prometheus/procfs/net_sockstat.go index e36f4872d..fae62b13d 100644 --- a/tools/vendor/github.com/prometheus/procfs/net_sockstat.go +++ b/tools/vendor/github.com/prometheus/procfs/net_sockstat.go @@ -16,7 +16,6 @@ package procfs import ( "bufio" "bytes" - "errors" "fmt" "io" "strings" @@ -70,7 +69,7 @@ func readSockstat(name string) (*NetSockstat, error) { stat, err := parseSockstat(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("failed to read sockstats from %q: %w", name, err) + return nil, fmt.Errorf("%w: sockstats from %q: %w", ErrFileRead, name, err) } return stat, nil @@ -84,13 +83,13 @@ func parseSockstat(r io.Reader) (*NetSockstat, error) { // Expect a minimum of a protocol and one key/value pair. fields := strings.Split(s.Text(), " ") if len(fields) < 3 { - return nil, fmt.Errorf("malformed sockstat line: %q", s.Text()) + return nil, fmt.Errorf("%w: Malformed sockstat line: %q", ErrFileParse, s.Text()) } // The remaining fields are key/value pairs. kvs, err := parseSockstatKVs(fields[1:]) if err != nil { - return nil, fmt.Errorf("error parsing sockstat key/value pairs from %q: %w", s.Text(), err) + return nil, fmt.Errorf("%w: sockstat key/value pairs from %q: %w", ErrFileParse, s.Text(), err) } // The first field is the protocol. We must trim its colon suffix. @@ -119,7 +118,7 @@ func parseSockstat(r io.Reader) (*NetSockstat, error) { // parseSockstatKVs parses a string slice into a map of key/value pairs. func parseSockstatKVs(kvs []string) (map[string]int, error) { if len(kvs)%2 != 0 { - return nil, errors.New("odd number of fields in key/value pairs") + return nil, fmt.Errorf("%w:: Odd number of fields in key/value pairs %q", ErrFileParse, kvs) } // Iterate two values at a time to gather key/value pairs. diff --git a/tools/vendor/github.com/prometheus/procfs/net_softnet.go b/tools/vendor/github.com/prometheus/procfs/net_softnet.go index 46f12c61d..71c8059f4 100644 --- a/tools/vendor/github.com/prometheus/procfs/net_softnet.go +++ b/tools/vendor/github.com/prometheus/procfs/net_softnet.go @@ -27,17 +27,30 @@ import ( // For the proc file format details, // See: // * Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2343 -// * Linux 4.17 https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162 -// and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810. +// * Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086 +// * Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162 +// * Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169 -// SoftnetStat contains a single row of data from /proc/net/softnet_stat +// SoftnetStat contains a single row of data from /proc/net/softnet_stat. type SoftnetStat struct { - // Number of processed packets + // Number of processed packets. Processed uint32 - // Number of dropped packets + // Number of dropped packets. Dropped uint32 - // Number of times processing packets ran out of quota + // Number of times processing packets ran out of quota. TimeSqueezed uint32 + // Number of collision occur while obtaining device lock while transmitting. + CPUCollision uint32 + // Number of times cpu woken up received_rps. + ReceivedRps uint32 + // number of times flow limit has been reached. + FlowLimitCount uint32 + // Softnet backlog status. + SoftnetBacklogLen uint32 + // CPU id owning this softnet_data. + Index uint32 + // softnet_data's Width. + Width int } var softNetProcFile = "net/softnet_stat" @@ -51,7 +64,7 @@ func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) { entries, err := parseSoftnet(bytes.NewReader(b)) if err != nil { - return nil, fmt.Errorf("failed to parse /proc/net/softnet_stat: %w", err) + return nil, fmt.Errorf("%w: /proc/net/softnet_stat: %w", ErrFileParse, err) } return entries, nil @@ -63,25 +76,65 @@ func parseSoftnet(r io.Reader) ([]SoftnetStat, error) { s := bufio.NewScanner(r) var stats []SoftnetStat + cpuIndex := 0 for s.Scan() { columns := strings.Fields(s.Text()) width := len(columns) + softnetStat := SoftnetStat{} if width < minColumns { - return nil, fmt.Errorf("%d columns were detected, but at least %d were expected", width, minColumns) + return nil, fmt.Errorf("%w: detected %d columns, but expected at least %d", ErrFileParse, width, minColumns) } - // We only parse the first three columns at the moment. - us, err := parseHexUint32s(columns[0:3]) - if err != nil { - return nil, err + // Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2347 + if width >= minColumns { + us, err := parseHexUint32s(columns[0:9]) + if err != nil { + return nil, err + } + + softnetStat.Processed = us[0] + softnetStat.Dropped = us[1] + softnetStat.TimeSqueezed = us[2] + softnetStat.CPUCollision = us[8] + } + + // Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086 + if width >= 10 { + us, err := parseHexUint32s(columns[9:10]) + if err != nil { + return nil, err + } + + softnetStat.ReceivedRps = us[0] } - stats = append(stats, SoftnetStat{ - Processed: us[0], - Dropped: us[1], - TimeSqueezed: us[2], - }) + // Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162 + if width >= 11 { + us, err := parseHexUint32s(columns[10:11]) + if err != nil { + return nil, err + } + + softnetStat.FlowLimitCount = us[0] + } + + // Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169 + if width >= 13 { + us, err := parseHexUint32s(columns[11:13]) + if err != nil { + return nil, err + } + + softnetStat.SoftnetBacklogLen = us[0] + softnetStat.Index = us[1] + } else { + // For older kernels, create the Index based on the scan line number. + softnetStat.Index = uint32(cpuIndex) + } + softnetStat.Width = width + stats = append(stats, softnetStat) + cpuIndex++ } return stats, nil diff --git a/tools/vendor/github.com/prometheus/procfs/net_tcp.go b/tools/vendor/github.com/prometheus/procfs/net_tcp.go index 527762955..0396d7201 100644 --- a/tools/vendor/github.com/prometheus/procfs/net_tcp.go +++ b/tools/vendor/github.com/prometheus/procfs/net_tcp.go @@ -25,24 +25,28 @@ type ( // NetTCP returns the IPv4 kernel/networking statistics for TCP datagrams // read from /proc/net/tcp. +// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead. func (fs FS) NetTCP() (NetTCP, error) { return newNetTCP(fs.proc.Path("net/tcp")) } // NetTCP6 returns the IPv6 kernel/networking statistics for TCP datagrams // read from /proc/net/tcp6. +// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead. func (fs FS) NetTCP6() (NetTCP, error) { return newNetTCP(fs.proc.Path("net/tcp6")) } // NetTCPSummary returns already computed statistics like the total queue lengths // for TCP datagrams read from /proc/net/tcp. +// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead. func (fs FS) NetTCPSummary() (*NetTCPSummary, error) { return newNetTCPSummary(fs.proc.Path("net/tcp")) } // NetTCP6Summary returns already computed statistics like the total queue lengths // for TCP datagrams read from /proc/net/tcp6. +// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead. func (fs FS) NetTCP6Summary() (*NetTCPSummary, error) { return newNetTCPSummary(fs.proc.Path("net/tcp6")) } diff --git a/tools/vendor/github.com/prometheus/procfs/net_tls_stat.go b/tools/vendor/github.com/prometheus/procfs/net_tls_stat.go new file mode 100644 index 000000000..13994c178 --- /dev/null +++ b/tools/vendor/github.com/prometheus/procfs/net_tls_stat.go @@ -0,0 +1,119 @@ +// Copyright 2023 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" +) + +// TLSStat struct represents data in /proc/net/tls_stat. +// See https://docs.kernel.org/networking/tls.html#statistics +type TLSStat struct { + // number of TX sessions currently installed where host handles cryptography + TLSCurrTxSw int + // number of RX sessions currently installed where host handles cryptography + TLSCurrRxSw int + // number of TX sessions currently installed where NIC handles cryptography + TLSCurrTxDevice int + // number of RX sessions currently installed where NIC handles cryptography + TLSCurrRxDevice int + //number of TX sessions opened with host cryptography + TLSTxSw int + //number of RX sessions opened with host cryptography + TLSRxSw int + // number of TX sessions opened with NIC cryptography + TLSTxDevice int + // number of RX sessions opened with NIC cryptography + TLSRxDevice int + // record decryption failed (e.g. due to incorrect authentication tag) + TLSDecryptError int + // number of RX resyncs sent to NICs handling cryptography + TLSRxDeviceResync int + // number of RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction. Note that this counter will also increment for non-data records. + TLSDecryptRetry int + // number of data RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction. + TLSRxNoPadViolation int +} + +// NewTLSStat reads the tls_stat statistics. +func NewTLSStat() (TLSStat, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return TLSStat{}, err + } + + return fs.NewTLSStat() +} + +// NewTLSStat reads the tls_stat statistics. +func (fs FS) NewTLSStat() (TLSStat, error) { + file, err := os.Open(fs.proc.Path("net/tls_stat")) + if err != nil { + return TLSStat{}, err + } + defer file.Close() + + var ( + tlsstat = TLSStat{} + s = bufio.NewScanner(file) + ) + + for s.Scan() { + fields := strings.Fields(s.Text()) + + if len(fields) != 2 { + return TLSStat{}, fmt.Errorf("%w: %q line %q", ErrFileParse, file.Name(), s.Text()) + } + + name := fields[0] + value, err := strconv.Atoi(fields[1]) + if err != nil { + return TLSStat{}, err + } + + switch name { + case "TlsCurrTxSw": + tlsstat.TLSCurrTxSw = value + case "TlsCurrRxSw": + tlsstat.TLSCurrRxSw = value + case "TlsCurrTxDevice": + tlsstat.TLSCurrTxDevice = value + case "TlsCurrRxDevice": + tlsstat.TLSCurrRxDevice = value + case "TlsTxSw": + tlsstat.TLSTxSw = value + case "TlsRxSw": + tlsstat.TLSRxSw = value + case "TlsTxDevice": + tlsstat.TLSTxDevice = value + case "TlsRxDevice": + tlsstat.TLSRxDevice = value + case "TlsDecryptError": + tlsstat.TLSDecryptError = value + case "TlsRxDeviceResync": + tlsstat.TLSRxDeviceResync = value + case "TlsDecryptRetry": + tlsstat.TLSDecryptRetry = value + case "TlsRxNoPadViolation": + tlsstat.TLSRxNoPadViolation = value + } + + } + + return tlsstat, s.Err() +} diff --git a/tools/vendor/github.com/prometheus/procfs/net_unix.go b/tools/vendor/github.com/prometheus/procfs/net_unix.go index 98aa8e1c3..d7e0cacb4 100644 --- a/tools/vendor/github.com/prometheus/procfs/net_unix.go +++ b/tools/vendor/github.com/prometheus/procfs/net_unix.go @@ -108,25 +108,25 @@ func parseNetUNIX(r io.Reader) (*NetUNIX, error) { line := s.Text() item, err := nu.parseLine(line, hasInode, minFields) if err != nil { - return nil, fmt.Errorf("failed to parse /proc/net/unix data %q: %w", line, err) + return nil, fmt.Errorf("%w: /proc/net/unix encountered data %q: %w", ErrFileParse, line, err) } nu.Rows = append(nu.Rows, item) } if err := s.Err(); err != nil { - return nil, fmt.Errorf("failed to scan /proc/net/unix data: %w", err) + return nil, fmt.Errorf("%w: /proc/net/unix encountered data: %w", ErrFileParse, err) } return &nu, nil } -func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, error) { +func (u *NetUNIX) parseLine(line string, hasInode bool, minFields int) (*NetUNIXLine, error) { fields := strings.Fields(line) l := len(fields) - if l < min { - return nil, fmt.Errorf("expected at least %d fields but got %d", min, l) + if l < minFields { + return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, minFields, l) } // Field offsets are as follows: @@ -136,29 +136,29 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, users, err := u.parseUsers(fields[1]) if err != nil { - return nil, fmt.Errorf("failed to parse ref count %q: %w", fields[1], err) + return nil, fmt.Errorf("%w: ref count %q: %w", ErrFileParse, fields[1], err) } flags, err := u.parseFlags(fields[3]) if err != nil { - return nil, fmt.Errorf("failed to parse flags %q: %w", fields[3], err) + return nil, fmt.Errorf("%w: Unable to parse flags %q: %w", ErrFileParse, fields[3], err) } typ, err := u.parseType(fields[4]) if err != nil { - return nil, fmt.Errorf("failed to parse type %q: %w", fields[4], err) + return nil, fmt.Errorf("%w: Failed to parse type %q: %w", ErrFileParse, fields[4], err) } state, err := u.parseState(fields[5]) if err != nil { - return nil, fmt.Errorf("failed to parse state %q: %w", fields[5], err) + return nil, fmt.Errorf("%w: Failed to parse state %q: %w", ErrFileParse, fields[5], err) } var inode uint64 if hasInode { inode, err = u.parseInode(fields[6]) if err != nil { - return nil, fmt.Errorf("failed to parse inode %q: %w", fields[6], err) + return nil, fmt.Errorf("%w failed to parse inode %q: %w", ErrFileParse, fields[6], err) } } @@ -172,7 +172,7 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, } // Path field is optional. - if l > min { + if l > minFields { // Path occurs at either index 6 or 7 depending on whether inode is // already present. pathIdx := 7 diff --git a/tools/vendor/github.com/prometheus/procfs/net_wireless.go b/tools/vendor/github.com/prometheus/procfs/net_wireless.go new file mode 100644 index 000000000..7c597bc87 --- /dev/null +++ b/tools/vendor/github.com/prometheus/procfs/net_wireless.go @@ -0,0 +1,182 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Wireless models the content of /proc/net/wireless. +type Wireless struct { + Name string + + // Status is the current 4-digit hex value status of the interface. + Status uint64 + + // QualityLink is the link quality. + QualityLink int + + // QualityLevel is the signal gain (dBm). + QualityLevel int + + // QualityNoise is the signal noise baseline (dBm). + QualityNoise int + + // DiscardedNwid is the number of discarded packets with wrong nwid/essid. + DiscardedNwid int + + // DiscardedCrypt is the number of discarded packets with wrong code/decode (WEP). + DiscardedCrypt int + + // DiscardedFrag is the number of discarded packets that can't perform MAC reassembly. + DiscardedFrag int + + // DiscardedRetry is the number of discarded packets that reached max MAC retries. + DiscardedRetry int + + // DiscardedMisc is the number of discarded packets for other reasons. + DiscardedMisc int + + // MissedBeacon is the number of missed beacons/superframe. + MissedBeacon int +} + +// Wireless returns kernel wireless statistics. +func (fs FS) Wireless() ([]*Wireless, error) { + b, err := util.ReadFileNoStat(fs.proc.Path("net/wireless")) + if err != nil { + return nil, err + } + + m, err := parseWireless(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("%w: wireless: %w", ErrFileParse, err) + } + + return m, nil +} + +// parseWireless parses the contents of /proc/net/wireless. +/* +Inter-| sta-| Quality | Discarded packets | Missed | WE +face | tus | link level noise | nwid crypt frag retry misc | beacon | 22 + eth1: 0000 5. -256. -10. 0 1 0 3 0 0 + eth2: 0000 5. -256. -20. 0 2 0 4 0 0 +*/ +func parseWireless(r io.Reader) ([]*Wireless, error) { + var ( + interfaces []*Wireless + scanner = bufio.NewScanner(r) + ) + + for n := 0; scanner.Scan(); n++ { + // Skip the 2 header lines. + if n < 2 { + continue + } + + line := scanner.Text() + + parts := strings.Split(line, ":") + if len(parts) != 2 { + return nil, fmt.Errorf("%w: expected 2 parts after splitting line by ':', got %d for line %q", ErrFileParse, len(parts), line) + } + + name := strings.TrimSpace(parts[0]) + stats := strings.Fields(parts[1]) + + if len(stats) < 10 { + return nil, fmt.Errorf("%w: invalid number of fields in line %d, expected 10+, got %d: %q", ErrFileParse, n, len(stats), line) + } + + status, err := strconv.ParseUint(stats[0], 16, 16) + if err != nil { + return nil, fmt.Errorf("%w: invalid status in line %d: %q", ErrFileParse, n, line) + } + + qlink, err := strconv.Atoi(strings.TrimSuffix(stats[1], ".")) + if err != nil { + return nil, fmt.Errorf("%w: parse Quality:link as integer %q: %w", ErrFileParse, qlink, err) + } + + qlevel, err := strconv.Atoi(strings.TrimSuffix(stats[2], ".")) + if err != nil { + return nil, fmt.Errorf("%w: Quality:level as integer %q: %w", ErrFileParse, qlevel, err) + } + + qnoise, err := strconv.Atoi(strings.TrimSuffix(stats[3], ".")) + if err != nil { + return nil, fmt.Errorf("%w: Quality:noise as integer %q: %w", ErrFileParse, qnoise, err) + } + + dnwid, err := strconv.Atoi(stats[4]) + if err != nil { + return nil, fmt.Errorf("%w: Discarded:nwid as integer %q: %w", ErrFileParse, dnwid, err) + } + + dcrypt, err := strconv.Atoi(stats[5]) + if err != nil { + return nil, fmt.Errorf("%w: Discarded:crypt as integer %q: %w", ErrFileParse, dcrypt, err) + } + + dfrag, err := strconv.Atoi(stats[6]) + if err != nil { + return nil, fmt.Errorf("%w: Discarded:frag as integer %q: %w", ErrFileParse, dfrag, err) + } + + dretry, err := strconv.Atoi(stats[7]) + if err != nil { + return nil, fmt.Errorf("%w: Discarded:retry as integer %q: %w", ErrFileParse, dretry, err) + } + + dmisc, err := strconv.Atoi(stats[8]) + if err != nil { + return nil, fmt.Errorf("%w: Discarded:misc as integer %q: %w", ErrFileParse, dmisc, err) + } + + mbeacon, err := strconv.Atoi(stats[9]) + if err != nil { + return nil, fmt.Errorf("%w: Missed:beacon as integer %q: %w", ErrFileParse, mbeacon, err) + } + + w := &Wireless{ + Name: name, + Status: status, + QualityLink: qlink, + QualityLevel: qlevel, + QualityNoise: qnoise, + DiscardedNwid: dnwid, + DiscardedCrypt: dcrypt, + DiscardedFrag: dfrag, + DiscardedRetry: dretry, + DiscardedMisc: dmisc, + MissedBeacon: mbeacon, + } + + interfaces = append(interfaces, w) + } + + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("%w: Failed to scan /proc/net/wireless: %w", ErrFileRead, err) + } + + return interfaces, nil +} diff --git a/tools/vendor/github.com/prometheus/procfs/xfrm.go b/tools/vendor/github.com/prometheus/procfs/net_xfrm.go similarity index 94% rename from tools/vendor/github.com/prometheus/procfs/xfrm.go rename to tools/vendor/github.com/prometheus/procfs/net_xfrm.go index eed07c7d7..932ef2046 100644 --- a/tools/vendor/github.com/prometheus/procfs/xfrm.go +++ b/tools/vendor/github.com/prometheus/procfs/net_xfrm.go @@ -79,10 +79,13 @@ type XfrmStat struct { // Policy is dead XfrmOutPolDead int // Policy Error - XfrmOutPolError int - XfrmFwdHdrError int + XfrmOutPolError int + // Forward routing of a packet is not allowed + XfrmFwdHdrError int + // State is invalid, perhaps expired XfrmOutStateInvalid int - XfrmAcquireError int + // State hasn’t been fully acquired before use + XfrmAcquireError int } // NewXfrmStat reads the xfrm_stat statistics. @@ -112,7 +115,7 @@ func (fs FS) NewXfrmStat() (XfrmStat, error) { fields := strings.Fields(s.Text()) if len(fields) != 2 { - return XfrmStat{}, fmt.Errorf("couldn't parse %q line %q", file.Name(), s.Text()) + return XfrmStat{}, fmt.Errorf("%w: %q line %q", ErrFileParse, file.Name(), s.Text()) } name := fields[0] diff --git a/tools/vendor/github.com/prometheus/procfs/netstat.go b/tools/vendor/github.com/prometheus/procfs/netstat.go index 94d892f11..742dff453 100644 --- a/tools/vendor/github.com/prometheus/procfs/netstat.go +++ b/tools/vendor/github.com/prometheus/procfs/netstat.go @@ -21,13 +21,13 @@ import ( "strings" ) -// NetStat contains statistics for all the counters from one file +// NetStat contains statistics for all the counters from one file. type NetStat struct { - Filename string Stats map[string][]uint64 + Filename string } -// NetStat retrieves stats from /proc/net/stat/ +// NetStat retrieves stats from `/proc/net/stat/`. func (fs FS) NetStat() ([]NetStat, error) { statFiles, err := filepath.Glob(fs.proc.Path("net/stat/*")) if err != nil { @@ -37,32 +37,46 @@ func (fs FS) NetStat() ([]NetStat, error) { var netStatsTotal []NetStat for _, filePath := range statFiles { - file, err := os.Open(filePath) + procNetstat, err := parseNetstat(filePath) if err != nil { return nil, err } + procNetstat.Filename = filepath.Base(filePath) - netStatFile := NetStat{ - Filename: filepath.Base(filePath), - Stats: make(map[string][]uint64), - } - scanner := bufio.NewScanner(file) - scanner.Scan() - // First string is always a header for stats - var headers []string - headers = append(headers, strings.Fields(scanner.Text())...) + netStatsTotal = append(netStatsTotal, procNetstat) + } + return netStatsTotal, nil +} + +// parseNetstat parses the metrics from `/proc/net/stat/` file +// and returns a NetStat structure. +func parseNetstat(filePath string) (NetStat, error) { + netStat := NetStat{ + Stats: make(map[string][]uint64), + } + file, err := os.Open(filePath) + if err != nil { + return netStat, err + } + defer file.Close() + + scanner := bufio.NewScanner(file) + scanner.Scan() - // Other strings represent per-CPU counters - for scanner.Scan() { - for num, counter := range strings.Fields(scanner.Text()) { - value, err := strconv.ParseUint(counter, 16, 32) - if err != nil { - return nil, err - } - netStatFile.Stats[headers[num]] = append(netStatFile.Stats[headers[num]], value) + // First string is always a header for stats + var headers []string + headers = append(headers, strings.Fields(scanner.Text())...) + + // Other strings represent per-CPU counters + for scanner.Scan() { + for num, counter := range strings.Fields(scanner.Text()) { + value, err := strconv.ParseUint(counter, 16, 64) + if err != nil { + return NetStat{}, err } + netStat.Stats[headers[num]] = append(netStat.Stats[headers[num]], value) } - netStatsTotal = append(netStatsTotal, netStatFile) } - return netStatsTotal, nil + + return netStat, nil } diff --git a/tools/vendor/github.com/prometheus/procfs/proc.go b/tools/vendor/github.com/prometheus/procfs/proc.go index 28f696803..142796368 100644 --- a/tools/vendor/github.com/prometheus/procfs/proc.go +++ b/tools/vendor/github.com/prometheus/procfs/proc.go @@ -15,13 +15,13 @@ package procfs import ( "bytes" + "errors" "fmt" - "io/ioutil" + "io" "os" "strconv" "strings" - "github.com/prometheus/procfs/internal/fs" "github.com/prometheus/procfs/internal/util" ) @@ -30,12 +30,18 @@ type Proc struct { // The process ID. PID int - fs fs.FS + fs FS } // Procs represents a list of Proc structs. type Procs []Proc +var ( + ErrFileParse = errors.New("Error Parsing File") + ErrFileRead = errors.New("Error Reading File") + ErrMountPoint = errors.New("Error Accessing Mount point") +) + func (p Procs) Len() int { return len(p) } func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID } @@ -43,7 +49,7 @@ func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID } // Self returns a process for the current process read via /proc/self. func Self() (Proc, error) { fs, err := NewFS(DefaultMountPoint) - if err != nil { + if err != nil || errors.Unwrap(err) == ErrMountPoint { return Proc{}, err } return fs.Self() @@ -82,7 +88,7 @@ func (fs FS) Self() (Proc, error) { // NewProc returns a process for the given pid. // -// Deprecated: use fs.Proc() instead +// Deprecated: Use fs.Proc() instead. func (fs FS) NewProc(pid int) (Proc, error) { return fs.Proc(pid) } @@ -92,7 +98,7 @@ func (fs FS) Proc(pid int) (Proc, error) { if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil { return Proc{}, err } - return Proc{PID: pid, fs: fs.proc}, nil + return Proc{PID: pid, fs: fs}, nil } // AllProcs returns a list of all currently available processes. @@ -105,7 +111,7 @@ func (fs FS) AllProcs() (Procs, error) { names, err := d.Readdirnames(-1) if err != nil { - return Procs{}, fmt.Errorf("could not read %q: %w", d.Name(), err) + return Procs{}, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err) } p := Procs{} @@ -114,7 +120,7 @@ func (fs FS) AllProcs() (Procs, error) { if err != nil { continue } - p = append(p, Proc{PID: int(pid), fs: fs.proc}) + p = append(p, Proc{PID: int(pid), fs: fs}) } return p, nil @@ -131,7 +137,7 @@ func (p Proc) CmdLine() ([]string, error) { return []string{}, nil } - return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil + return strings.Split(string(bytes.TrimRight(data, "\x00")), "\x00"), nil } // Wchan returns the wchan (wait channel) of a process. @@ -142,7 +148,7 @@ func (p Proc) Wchan() (string, error) { } defer f.Close() - data, err := ioutil.ReadAll(f) + data, err := io.ReadAll(f) if err != nil { return "", err } @@ -185,7 +191,7 @@ func (p Proc) Cwd() (string, error) { return wd, err } -// RootDir returns the absolute path to the process's root directory (as set by chroot) +// RootDir returns the absolute path to the process's root directory (as set by chroot). func (p Proc) RootDir() (string, error) { rdir, err := os.Readlink(p.path("root")) if os.IsNotExist(err) { @@ -206,7 +212,7 @@ func (p Proc) FileDescriptors() ([]uintptr, error) { for i, n := range names { fd, err := strconv.ParseInt(n, 10, 32) if err != nil { - return nil, fmt.Errorf("could not parse fd %q: %w", n, err) + return nil, fmt.Errorf("%w: Cannot parse line: %v: %w", ErrFileParse, i, err) } fds[i] = uintptr(fd) } @@ -237,6 +243,19 @@ func (p Proc) FileDescriptorTargets() ([]string, error) { // FileDescriptorsLen returns the number of currently open file descriptors of // a process. func (p Proc) FileDescriptorsLen() (int, error) { + // Use fast path if available (Linux v6.2): https://github.com/torvalds/linux/commit/f1f1f2569901 + if p.fs.isReal { + stat, err := os.Stat(p.path("fd")) + if err != nil { + return 0, err + } + + size := stat.Size() + if size > 0 { + return int(size), nil + } + } + fds, err := p.fileDescriptors() if err != nil { return 0, err @@ -278,14 +297,14 @@ func (p Proc) fileDescriptors() ([]string, error) { names, err := d.Readdirnames(-1) if err != nil { - return nil, fmt.Errorf("could not read %q: %w", d.Name(), err) + return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err) } return names, nil } func (p Proc) path(pa ...string) string { - return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...) + return p.fs.proc.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...) } // FileDescriptorsInfo retrieves information about all file descriptors of @@ -311,7 +330,7 @@ func (p Proc) FileDescriptorsInfo() (ProcFDInfos, error) { // Schedstat returns task scheduling information for the process. func (p Proc) Schedstat() (ProcSchedstat, error) { - contents, err := ioutil.ReadFile(p.path("schedstat")) + contents, err := os.ReadFile(p.path("schedstat")) if err != nil { return ProcSchedstat{}, err } diff --git a/tools/vendor/github.com/prometheus/procfs/proc_cgroup.go b/tools/vendor/github.com/prometheus/procfs/proc_cgroup.go index be45b7987..4a64347c0 100644 --- a/tools/vendor/github.com/prometheus/procfs/proc_cgroup.go +++ b/tools/vendor/github.com/prometheus/procfs/proc_cgroup.go @@ -23,8 +23,8 @@ import ( "github.com/prometheus/procfs/internal/util" ) -// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the the placement of a PID inside a -// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource +// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the placement of a PID inside a +// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. The v1 has one hierarchy per available resource // controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies // contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in // this hierarchy' (where==what path on the specific cgroupfs). By prefixing this path with the mount point of @@ -45,13 +45,13 @@ type Cgroup struct { } // parseCgroupString parses each line of the /proc/[pid]/cgroup file -// Line format is hierarchyID:[controller1,controller2]:path +// Line format is hierarchyID:[controller1,controller2]:path. func parseCgroupString(cgroupStr string) (*Cgroup, error) { var err error fields := strings.SplitN(cgroupStr, ":", 3) if len(fields) < 3 { - return nil, fmt.Errorf("at least 3 fields required, found %d fields in cgroup string: %s", len(fields), cgroupStr) + return nil, fmt.Errorf("%w: 3+ fields required, found %d fields in cgroup string: %s", ErrFileParse, len(fields), cgroupStr) } cgroup := &Cgroup{ @@ -60,7 +60,7 @@ func parseCgroupString(cgroupStr string) (*Cgroup, error) { } cgroup.HierarchyID, err = strconv.Atoi(fields[0]) if err != nil { - return nil, fmt.Errorf("failed to parse hierarchy ID") + return nil, fmt.Errorf("%w: hierarchy ID: %q", ErrFileParse, cgroup.HierarchyID) } if fields[1] != "" { ssNames := strings.Split(fields[1], ",") @@ -69,7 +69,7 @@ func parseCgroupString(cgroupStr string) (*Cgroup, error) { return cgroup, nil } -// parseCgroups reads each line of the /proc/[pid]/cgroup file +// parseCgroups reads each line of the /proc/[pid]/cgroup file. func parseCgroups(data []byte) ([]Cgroup, error) { var cgroups []Cgroup scanner := bufio.NewScanner(bytes.NewReader(data)) @@ -88,7 +88,7 @@ func parseCgroups(data []byte) ([]Cgroup, error) { // Cgroups reads from /proc//cgroups and returns a []*Cgroup struct locating this PID in each process // control hierarchy running on this system. On every system (v1 and v2), all hierarchies contain all processes, -// so the len of the returned struct is equal to the number of active hierarchies on this system +// so the len of the returned struct is equal to the number of active hierarchies on this system. func (p Proc) Cgroups() ([]Cgroup, error) { data, err := util.ReadFileNoStat(p.path("cgroup")) if err != nil { diff --git a/tools/vendor/github.com/prometheus/procfs/proc_cgroups.go b/tools/vendor/github.com/prometheus/procfs/proc_cgroups.go new file mode 100644 index 000000000..5dd493899 --- /dev/null +++ b/tools/vendor/github.com/prometheus/procfs/proc_cgroups.go @@ -0,0 +1,98 @@ +// Copyright 2021 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// CgroupSummary models one line from /proc/cgroups. +// This file contains information about the controllers that are compiled into the kernel. +// +// Also see http://man7.org/linux/man-pages/man7/cgroups.7.html +type CgroupSummary struct { + // The name of the controller. controller is also known as subsystem. + SubsysName string + // The unique ID of the cgroup hierarchy on which this controller is mounted. + Hierarchy int + // The number of control groups in this hierarchy using this controller. + Cgroups int + // This field contains the value 1 if this controller is enabled, or 0 if it has been disabled + Enabled int +} + +// parseCgroupSummary parses each line of the /proc/cgroup file +// Line format is `subsys_name hierarchy num_cgroups enabled`. +func parseCgroupSummaryString(CgroupSummaryStr string) (*CgroupSummary, error) { + var err error + + fields := strings.Fields(CgroupSummaryStr) + // require at least 4 fields + if len(fields) < 4 { + return nil, fmt.Errorf("%w: 4+ fields required, found %d fields in cgroup info string: %s", ErrFileParse, len(fields), CgroupSummaryStr) + } + + CgroupSummary := &CgroupSummary{ + SubsysName: fields[0], + } + CgroupSummary.Hierarchy, err = strconv.Atoi(fields[1]) + if err != nil { + return nil, fmt.Errorf("%w: Unable to parse hierarchy ID from %q", ErrFileParse, fields[1]) + } + CgroupSummary.Cgroups, err = strconv.Atoi(fields[2]) + if err != nil { + return nil, fmt.Errorf("%w: Unable to parse Cgroup Num from %q", ErrFileParse, fields[2]) + } + CgroupSummary.Enabled, err = strconv.Atoi(fields[3]) + if err != nil { + return nil, fmt.Errorf("%w: Unable to parse Enabled from %q", ErrFileParse, fields[3]) + } + return CgroupSummary, nil +} + +// parseCgroupSummary reads each line of the /proc/cgroup file. +func parseCgroupSummary(data []byte) ([]CgroupSummary, error) { + var CgroupSummarys []CgroupSummary + scanner := bufio.NewScanner(bytes.NewReader(data)) + for scanner.Scan() { + CgroupSummaryString := scanner.Text() + // ignore comment lines + if strings.HasPrefix(CgroupSummaryString, "#") { + continue + } + CgroupSummary, err := parseCgroupSummaryString(CgroupSummaryString) + if err != nil { + return nil, err + } + CgroupSummarys = append(CgroupSummarys, *CgroupSummary) + } + + err := scanner.Err() + return CgroupSummarys, err +} + +// CgroupSummarys returns information about current /proc/cgroups. +func (fs FS) CgroupSummarys() ([]CgroupSummary, error) { + data, err := util.ReadFileNoStat(fs.proc.Path("cgroups")) + if err != nil { + return nil, err + } + return parseCgroupSummary(data) +} diff --git a/tools/vendor/github.com/prometheus/procfs/proc_environ.go b/tools/vendor/github.com/prometheus/procfs/proc_environ.go index 6134b3580..57a89895d 100644 --- a/tools/vendor/github.com/prometheus/procfs/proc_environ.go +++ b/tools/vendor/github.com/prometheus/procfs/proc_environ.go @@ -19,7 +19,7 @@ import ( "github.com/prometheus/procfs/internal/util" ) -// Environ reads process environments from /proc//environ +// Environ reads process environments from `/proc//environ`. func (p Proc) Environ() ([]string, error) { environments := make([]string, 0) diff --git a/tools/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/tools/vendor/github.com/prometheus/procfs/proc_fdinfo.go index cf63227f0..fa761b352 100644 --- a/tools/vendor/github.com/prometheus/procfs/proc_fdinfo.go +++ b/tools/vendor/github.com/prometheus/procfs/proc_fdinfo.go @@ -22,11 +22,11 @@ import ( "github.com/prometheus/procfs/internal/util" ) -// Regexp variables var ( rPos = regexp.MustCompile(`^pos:\s+(\d+)$`) rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`) rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`) + rIno = regexp.MustCompile(`^ino:\s+(\d+)$`) rInotify = regexp.MustCompile(`^inotify`) rInotifyParts = regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)(?:\s+mask:([0-9a-f]+))?`) ) @@ -41,6 +41,8 @@ type ProcFDInfo struct { Flags string // Mount point ID MntID string + // Inode number + Ino string // List of inotify lines (structured) in the fdinfo file (kernel 3.8+ only) InotifyInfos []InotifyInfo } @@ -52,7 +54,7 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) { return nil, err } - var text, pos, flags, mntid string + var text, pos, flags, mntid, ino string var inotify []InotifyInfo scanner := bufio.NewScanner(bytes.NewReader(data)) @@ -64,6 +66,8 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) { flags = rFlags.FindStringSubmatch(text)[1] } else if rMntID.MatchString(text) { mntid = rMntID.FindStringSubmatch(text)[1] + } else if rIno.MatchString(text) { + ino = rIno.FindStringSubmatch(text)[1] } else if rInotify.MatchString(text) { newInotify, err := parseInotifyInfo(text) if err != nil { @@ -78,6 +82,7 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) { Pos: pos, Flags: flags, MntID: mntid, + Ino: ino, InotifyInfos: inotify, } @@ -112,7 +117,7 @@ func parseInotifyInfo(line string) (*InotifyInfo, error) { } return i, nil } - return nil, fmt.Errorf("invalid inode entry: %q", line) + return nil, fmt.Errorf("%w: invalid inode entry: %q", ErrFileParse, line) } // ProcFDInfos represents a list of ProcFDInfo structs. @@ -122,7 +127,7 @@ func (p ProcFDInfos) Len() int { return len(p) } func (p ProcFDInfos) Swap(i, j int) { p[i], p[j] = p[j], p[i] } func (p ProcFDInfos) Less(i, j int) bool { return p[i].FD < p[j].FD } -// InotifyWatchLen returns the total number of inotify watches +// InotifyWatchLen returns the total number of inotify watches. func (p ProcFDInfos) InotifyWatchLen() (int, error) { length := 0 for _, f := range p { diff --git a/tools/vendor/github.com/prometheus/procfs/proc_interrupts.go b/tools/vendor/github.com/prometheus/procfs/proc_interrupts.go new file mode 100644 index 000000000..86b4b4524 --- /dev/null +++ b/tools/vendor/github.com/prometheus/procfs/proc_interrupts.go @@ -0,0 +1,98 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Interrupt represents a single interrupt line. +type Interrupt struct { + // Info is the type of interrupt. + Info string + // Devices is the name of the device that is located at that IRQ + Devices string + // Values is the number of interrupts per CPU. + Values []string +} + +// Interrupts models the content of /proc/interrupts. Key is the IRQ number. +// - https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/deployment_guide/s2-proc-interrupts +// - https://raspberrypi.stackexchange.com/questions/105802/explanation-of-proc-interrupts-output +type Interrupts map[string]Interrupt + +// Interrupts creates a new instance from a given Proc instance. +func (p Proc) Interrupts() (Interrupts, error) { + data, err := util.ReadFileNoStat(p.path("interrupts")) + if err != nil { + return nil, err + } + return parseInterrupts(bytes.NewReader(data)) +} + +func parseInterrupts(r io.Reader) (Interrupts, error) { + var ( + interrupts = Interrupts{} + scanner = bufio.NewScanner(r) + ) + + if !scanner.Scan() { + return nil, errors.New("interrupts empty") + } + cpuNum := len(strings.Fields(scanner.Text())) // one header per cpu + + for scanner.Scan() { + parts := strings.Fields(scanner.Text()) + if len(parts) == 0 { // skip empty lines + continue + } + if len(parts) < 2 { + return nil, fmt.Errorf("%w: Not enough fields in interrupts (expected 2+ fields but got %d): %s", ErrFileParse, len(parts), parts) + } + intName := parts[0][:len(parts[0])-1] // remove trailing : + + if len(parts) == 2 { + interrupts[intName] = Interrupt{ + Info: "", + Devices: "", + Values: []string{ + parts[1], + }, + } + continue + } + + intr := Interrupt{ + Values: parts[1 : cpuNum+1], + } + + if _, err := strconv.Atoi(intName); err == nil { // numeral interrupt + intr.Info = parts[cpuNum+1] + intr.Devices = strings.Join(parts[cpuNum+2:], " ") + } else { + intr.Info = strings.Join(parts[cpuNum+1:], " ") + } + interrupts[intName] = intr + } + + return interrupts, scanner.Err() +} diff --git a/tools/vendor/github.com/prometheus/procfs/proc_io.go b/tools/vendor/github.com/prometheus/procfs/proc_io.go index 776f34971..d15b66ddb 100644 --- a/tools/vendor/github.com/prometheus/procfs/proc_io.go +++ b/tools/vendor/github.com/prometheus/procfs/proc_io.go @@ -50,7 +50,7 @@ func (p Proc) IO() (ProcIO, error) { ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" + "read_bytes: %d\nwrite_bytes: %d\n" + - "cancelled_write_bytes: %d\n" + "cancelled_write_bytes: %d\n" //nolint:misspell _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR, &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes) diff --git a/tools/vendor/github.com/prometheus/procfs/proc_limits.go b/tools/vendor/github.com/prometheus/procfs/proc_limits.go index dd20f198a..9530b14bc 100644 --- a/tools/vendor/github.com/prometheus/procfs/proc_limits.go +++ b/tools/vendor/github.com/prometheus/procfs/proc_limits.go @@ -79,7 +79,7 @@ var ( // NewLimits returns the current soft limits of the process. // -// Deprecated: use p.Limits() instead +// Deprecated: Use p.Limits() instead. func (p Proc) NewLimits() (ProcLimits, error) { return p.Limits() } @@ -103,7 +103,7 @@ func (p Proc) Limits() (ProcLimits, error) { //fields := limitsMatch.Split(s.Text(), limitsFields) fields := limitsMatch.FindStringSubmatch(s.Text()) if len(fields) != limitsFields { - return ProcLimits{}, fmt.Errorf("couldn't parse %q line %q", f.Name(), s.Text()) + return ProcLimits{}, fmt.Errorf("%w: couldn't parse %q line %q", ErrFileParse, f.Name(), s.Text()) } switch fields[1] { @@ -154,7 +154,7 @@ func parseUint(s string) (uint64, error) { } i, err := strconv.ParseUint(s, 10, 64) if err != nil { - return 0, fmt.Errorf("couldn't parse value %q: %w", s, err) + return 0, fmt.Errorf("%w: couldn't parse value %q: %w", ErrFileParse, s, err) } return i, nil } diff --git a/tools/vendor/github.com/prometheus/procfs/proc_maps.go b/tools/vendor/github.com/prometheus/procfs/proc_maps.go index 1d7772d51..7e75c286b 100644 --- a/tools/vendor/github.com/prometheus/procfs/proc_maps.go +++ b/tools/vendor/github.com/prometheus/procfs/proc_maps.go @@ -11,7 +11,9 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris) && !js // +build aix darwin dragonfly freebsd linux netbsd openbsd solaris +// +build !js package procfs @@ -25,7 +27,7 @@ import ( "golang.org/x/sys/unix" ) -// ProcMapPermissions contains permission settings read from /proc/[pid]/maps +// ProcMapPermissions contains permission settings read from `/proc/[pid]/maps`. type ProcMapPermissions struct { // mapping has the [R]ead flag set Read bool @@ -39,8 +41,8 @@ type ProcMapPermissions struct { Private bool } -// ProcMap contains the process memory-mappings of the process, -// read from /proc/[pid]/maps +// ProcMap contains the process memory-mappings of the process +// read from `/proc/[pid]/maps`. type ProcMap struct { // The start address of current mapping. StartAddr uintptr @@ -61,17 +63,17 @@ type ProcMap struct { // parseDevice parses the device token of a line and converts it to a dev_t // (mkdev) like structure. func parseDevice(s string) (uint64, error) { - toks := strings.Split(s, ":") - if len(toks) < 2 { - return 0, fmt.Errorf("unexpected number of fields") + i := strings.Index(s, ":") + if i == -1 { + return 0, fmt.Errorf("%w: expected separator `:` in %s", ErrFileParse, s) } - major, err := strconv.ParseUint(toks[0], 16, 0) + major, err := strconv.ParseUint(s[0:i], 16, 0) if err != nil { return 0, err } - minor, err := strconv.ParseUint(toks[1], 16, 0) + minor, err := strconv.ParseUint(s[i+1:], 16, 0) if err != nil { return 0, err } @@ -79,7 +81,7 @@ func parseDevice(s string) (uint64, error) { return unix.Mkdev(uint32(major), uint32(minor)), nil } -// parseAddress just converts a hex-string to a uintptr +// parseAddress converts a hex-string to a uintptr. func parseAddress(s string) (uintptr, error) { a, err := strconv.ParseUint(s, 16, 0) if err != nil { @@ -89,19 +91,19 @@ func parseAddress(s string) (uintptr, error) { return uintptr(a), nil } -// parseAddresses parses the start-end address +// parseAddresses parses the start-end address. func parseAddresses(s string) (uintptr, uintptr, error) { - toks := strings.Split(s, "-") - if len(toks) < 2 { - return 0, 0, fmt.Errorf("invalid address") + idx := strings.Index(s, "-") + if idx == -1 { + return 0, 0, fmt.Errorf("%w: expected separator `-` in %s", ErrFileParse, s) } - saddr, err := parseAddress(toks[0]) + saddr, err := parseAddress(s[0:idx]) if err != nil { return 0, 0, err } - eaddr, err := parseAddress(toks[1]) + eaddr, err := parseAddress(s[idx+1:]) if err != nil { return 0, 0, err } @@ -112,7 +114,7 @@ func parseAddresses(s string) (uintptr, uintptr, error) { // parsePermissions parses a token and returns any that are set. func parsePermissions(s string) (*ProcMapPermissions, error) { if len(s) < 4 { - return nil, fmt.Errorf("invalid permissions token") + return nil, fmt.Errorf("%w: invalid permissions token", ErrFileParse) } perms := ProcMapPermissions{} @@ -139,7 +141,7 @@ func parsePermissions(s string) (*ProcMapPermissions, error) { func parseProcMap(text string) (*ProcMap, error) { fields := strings.Fields(text) if len(fields) < 5 { - return nil, fmt.Errorf("truncated procmap entry") + return nil, fmt.Errorf("%w: truncated procmap entry", ErrFileParse) } saddr, eaddr, err := parseAddresses(fields[0]) diff --git a/tools/vendor/github.com/prometheus/procfs/proc_netstat.go b/tools/vendor/github.com/prometheus/procfs/proc_netstat.go new file mode 100644 index 000000000..8e3ff4d79 --- /dev/null +++ b/tools/vendor/github.com/prometheus/procfs/proc_netstat.go @@ -0,0 +1,443 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ProcNetstat models the content of /proc//net/netstat. +type ProcNetstat struct { + // The process ID. + PID int + TcpExt + IpExt +} + +type TcpExt struct { // nolint:revive + SyncookiesSent *float64 + SyncookiesRecv *float64 + SyncookiesFailed *float64 + EmbryonicRsts *float64 + PruneCalled *float64 + RcvPruned *float64 + OfoPruned *float64 + OutOfWindowIcmps *float64 + LockDroppedIcmps *float64 + ArpFilter *float64 + TW *float64 + TWRecycled *float64 + TWKilled *float64 + PAWSActive *float64 + PAWSEstab *float64 + DelayedACKs *float64 + DelayedACKLocked *float64 + DelayedACKLost *float64 + ListenOverflows *float64 + ListenDrops *float64 + TCPHPHits *float64 + TCPPureAcks *float64 + TCPHPAcks *float64 + TCPRenoRecovery *float64 + TCPSackRecovery *float64 + TCPSACKReneging *float64 + TCPSACKReorder *float64 + TCPRenoReorder *float64 + TCPTSReorder *float64 + TCPFullUndo *float64 + TCPPartialUndo *float64 + TCPDSACKUndo *float64 + TCPLossUndo *float64 + TCPLostRetransmit *float64 + TCPRenoFailures *float64 + TCPSackFailures *float64 + TCPLossFailures *float64 + TCPFastRetrans *float64 + TCPSlowStartRetrans *float64 + TCPTimeouts *float64 + TCPLossProbes *float64 + TCPLossProbeRecovery *float64 + TCPRenoRecoveryFail *float64 + TCPSackRecoveryFail *float64 + TCPRcvCollapsed *float64 + TCPDSACKOldSent *float64 + TCPDSACKOfoSent *float64 + TCPDSACKRecv *float64 + TCPDSACKOfoRecv *float64 + TCPAbortOnData *float64 + TCPAbortOnClose *float64 + TCPAbortOnMemory *float64 + TCPAbortOnTimeout *float64 + TCPAbortOnLinger *float64 + TCPAbortFailed *float64 + TCPMemoryPressures *float64 + TCPMemoryPressuresChrono *float64 + TCPSACKDiscard *float64 + TCPDSACKIgnoredOld *float64 + TCPDSACKIgnoredNoUndo *float64 + TCPSpuriousRTOs *float64 + TCPMD5NotFound *float64 + TCPMD5Unexpected *float64 + TCPMD5Failure *float64 + TCPSackShifted *float64 + TCPSackMerged *float64 + TCPSackShiftFallback *float64 + TCPBacklogDrop *float64 + PFMemallocDrop *float64 + TCPMinTTLDrop *float64 + TCPDeferAcceptDrop *float64 + IPReversePathFilter *float64 + TCPTimeWaitOverflow *float64 + TCPReqQFullDoCookies *float64 + TCPReqQFullDrop *float64 + TCPRetransFail *float64 + TCPRcvCoalesce *float64 + TCPRcvQDrop *float64 + TCPOFOQueue *float64 + TCPOFODrop *float64 + TCPOFOMerge *float64 + TCPChallengeACK *float64 + TCPSYNChallenge *float64 + TCPFastOpenActive *float64 + TCPFastOpenActiveFail *float64 + TCPFastOpenPassive *float64 + TCPFastOpenPassiveFail *float64 + TCPFastOpenListenOverflow *float64 + TCPFastOpenCookieReqd *float64 + TCPFastOpenBlackhole *float64 + TCPSpuriousRtxHostQueues *float64 + BusyPollRxPackets *float64 + TCPAutoCorking *float64 + TCPFromZeroWindowAdv *float64 + TCPToZeroWindowAdv *float64 + TCPWantZeroWindowAdv *float64 + TCPSynRetrans *float64 + TCPOrigDataSent *float64 + TCPHystartTrainDetect *float64 + TCPHystartTrainCwnd *float64 + TCPHystartDelayDetect *float64 + TCPHystartDelayCwnd *float64 + TCPACKSkippedSynRecv *float64 + TCPACKSkippedPAWS *float64 + TCPACKSkippedSeq *float64 + TCPACKSkippedFinWait2 *float64 + TCPACKSkippedTimeWait *float64 + TCPACKSkippedChallenge *float64 + TCPWinProbe *float64 + TCPKeepAlive *float64 + TCPMTUPFail *float64 + TCPMTUPSuccess *float64 + TCPWqueueTooBig *float64 +} + +type IpExt struct { // nolint:revive + InNoRoutes *float64 + InTruncatedPkts *float64 + InMcastPkts *float64 + OutMcastPkts *float64 + InBcastPkts *float64 + OutBcastPkts *float64 + InOctets *float64 + OutOctets *float64 + InMcastOctets *float64 + OutMcastOctets *float64 + InBcastOctets *float64 + OutBcastOctets *float64 + InCsumErrors *float64 + InNoECTPkts *float64 + InECT1Pkts *float64 + InECT0Pkts *float64 + InCEPkts *float64 + ReasmOverlaps *float64 +} + +func (p Proc) Netstat() (ProcNetstat, error) { + filename := p.path("net/netstat") + data, err := util.ReadFileNoStat(filename) + if err != nil { + return ProcNetstat{PID: p.PID}, err + } + procNetstat, err := parseProcNetstat(bytes.NewReader(data), filename) + procNetstat.PID = p.PID + return procNetstat, err +} + +// parseProcNetstat parses the metrics from proc//net/netstat file +// and returns a ProcNetstat structure. +func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) { + var ( + scanner = bufio.NewScanner(r) + procNetstat = ProcNetstat{} + ) + + for scanner.Scan() { + nameParts := strings.Split(scanner.Text(), " ") + scanner.Scan() + valueParts := strings.Split(scanner.Text(), " ") + // Remove trailing :. + protocol := strings.TrimSuffix(nameParts[0], ":") + if len(nameParts) != len(valueParts) { + return procNetstat, fmt.Errorf("%w: mismatch field count mismatch in %s: %s", + ErrFileParse, fileName, protocol) + } + for i := 1; i < len(nameParts); i++ { + value, err := strconv.ParseFloat(valueParts[i], 64) + if err != nil { + return procNetstat, err + } + key := nameParts[i] + + switch protocol { + case "TcpExt": + switch key { + case "SyncookiesSent": + procNetstat.TcpExt.SyncookiesSent = &value + case "SyncookiesRecv": + procNetstat.TcpExt.SyncookiesRecv = &value + case "SyncookiesFailed": + procNetstat.TcpExt.SyncookiesFailed = &value + case "EmbryonicRsts": + procNetstat.TcpExt.EmbryonicRsts = &value + case "PruneCalled": + procNetstat.TcpExt.PruneCalled = &value + case "RcvPruned": + procNetstat.TcpExt.RcvPruned = &value + case "OfoPruned": + procNetstat.TcpExt.OfoPruned = &value + case "OutOfWindowIcmps": + procNetstat.TcpExt.OutOfWindowIcmps = &value + case "LockDroppedIcmps": + procNetstat.TcpExt.LockDroppedIcmps = &value + case "ArpFilter": + procNetstat.TcpExt.ArpFilter = &value + case "TW": + procNetstat.TcpExt.TW = &value + case "TWRecycled": + procNetstat.TcpExt.TWRecycled = &value + case "TWKilled": + procNetstat.TcpExt.TWKilled = &value + case "PAWSActive": + procNetstat.TcpExt.PAWSActive = &value + case "PAWSEstab": + procNetstat.TcpExt.PAWSEstab = &value + case "DelayedACKs": + procNetstat.TcpExt.DelayedACKs = &value + case "DelayedACKLocked": + procNetstat.TcpExt.DelayedACKLocked = &value + case "DelayedACKLost": + procNetstat.TcpExt.DelayedACKLost = &value + case "ListenOverflows": + procNetstat.TcpExt.ListenOverflows = &value + case "ListenDrops": + procNetstat.TcpExt.ListenDrops = &value + case "TCPHPHits": + procNetstat.TcpExt.TCPHPHits = &value + case "TCPPureAcks": + procNetstat.TcpExt.TCPPureAcks = &value + case "TCPHPAcks": + procNetstat.TcpExt.TCPHPAcks = &value + case "TCPRenoRecovery": + procNetstat.TcpExt.TCPRenoRecovery = &value + case "TCPSackRecovery": + procNetstat.TcpExt.TCPSackRecovery = &value + case "TCPSACKReneging": + procNetstat.TcpExt.TCPSACKReneging = &value + case "TCPSACKReorder": + procNetstat.TcpExt.TCPSACKReorder = &value + case "TCPRenoReorder": + procNetstat.TcpExt.TCPRenoReorder = &value + case "TCPTSReorder": + procNetstat.TcpExt.TCPTSReorder = &value + case "TCPFullUndo": + procNetstat.TcpExt.TCPFullUndo = &value + case "TCPPartialUndo": + procNetstat.TcpExt.TCPPartialUndo = &value + case "TCPDSACKUndo": + procNetstat.TcpExt.TCPDSACKUndo = &value + case "TCPLossUndo": + procNetstat.TcpExt.TCPLossUndo = &value + case "TCPLostRetransmit": + procNetstat.TcpExt.TCPLostRetransmit = &value + case "TCPRenoFailures": + procNetstat.TcpExt.TCPRenoFailures = &value + case "TCPSackFailures": + procNetstat.TcpExt.TCPSackFailures = &value + case "TCPLossFailures": + procNetstat.TcpExt.TCPLossFailures = &value + case "TCPFastRetrans": + procNetstat.TcpExt.TCPFastRetrans = &value + case "TCPSlowStartRetrans": + procNetstat.TcpExt.TCPSlowStartRetrans = &value + case "TCPTimeouts": + procNetstat.TcpExt.TCPTimeouts = &value + case "TCPLossProbes": + procNetstat.TcpExt.TCPLossProbes = &value + case "TCPLossProbeRecovery": + procNetstat.TcpExt.TCPLossProbeRecovery = &value + case "TCPRenoRecoveryFail": + procNetstat.TcpExt.TCPRenoRecoveryFail = &value + case "TCPSackRecoveryFail": + procNetstat.TcpExt.TCPSackRecoveryFail = &value + case "TCPRcvCollapsed": + procNetstat.TcpExt.TCPRcvCollapsed = &value + case "TCPDSACKOldSent": + procNetstat.TcpExt.TCPDSACKOldSent = &value + case "TCPDSACKOfoSent": + procNetstat.TcpExt.TCPDSACKOfoSent = &value + case "TCPDSACKRecv": + procNetstat.TcpExt.TCPDSACKRecv = &value + case "TCPDSACKOfoRecv": + procNetstat.TcpExt.TCPDSACKOfoRecv = &value + case "TCPAbortOnData": + procNetstat.TcpExt.TCPAbortOnData = &value + case "TCPAbortOnClose": + procNetstat.TcpExt.TCPAbortOnClose = &value + case "TCPDeferAcceptDrop": + procNetstat.TcpExt.TCPDeferAcceptDrop = &value + case "IPReversePathFilter": + procNetstat.TcpExt.IPReversePathFilter = &value + case "TCPTimeWaitOverflow": + procNetstat.TcpExt.TCPTimeWaitOverflow = &value + case "TCPReqQFullDoCookies": + procNetstat.TcpExt.TCPReqQFullDoCookies = &value + case "TCPReqQFullDrop": + procNetstat.TcpExt.TCPReqQFullDrop = &value + case "TCPRetransFail": + procNetstat.TcpExt.TCPRetransFail = &value + case "TCPRcvCoalesce": + procNetstat.TcpExt.TCPRcvCoalesce = &value + case "TCPRcvQDrop": + procNetstat.TcpExt.TCPRcvQDrop = &value + case "TCPOFOQueue": + procNetstat.TcpExt.TCPOFOQueue = &value + case "TCPOFODrop": + procNetstat.TcpExt.TCPOFODrop = &value + case "TCPOFOMerge": + procNetstat.TcpExt.TCPOFOMerge = &value + case "TCPChallengeACK": + procNetstat.TcpExt.TCPChallengeACK = &value + case "TCPSYNChallenge": + procNetstat.TcpExt.TCPSYNChallenge = &value + case "TCPFastOpenActive": + procNetstat.TcpExt.TCPFastOpenActive = &value + case "TCPFastOpenActiveFail": + procNetstat.TcpExt.TCPFastOpenActiveFail = &value + case "TCPFastOpenPassive": + procNetstat.TcpExt.TCPFastOpenPassive = &value + case "TCPFastOpenPassiveFail": + procNetstat.TcpExt.TCPFastOpenPassiveFail = &value + case "TCPFastOpenListenOverflow": + procNetstat.TcpExt.TCPFastOpenListenOverflow = &value + case "TCPFastOpenCookieReqd": + procNetstat.TcpExt.TCPFastOpenCookieReqd = &value + case "TCPFastOpenBlackhole": + procNetstat.TcpExt.TCPFastOpenBlackhole = &value + case "TCPSpuriousRtxHostQueues": + procNetstat.TcpExt.TCPSpuriousRtxHostQueues = &value + case "BusyPollRxPackets": + procNetstat.TcpExt.BusyPollRxPackets = &value + case "TCPAutoCorking": + procNetstat.TcpExt.TCPAutoCorking = &value + case "TCPFromZeroWindowAdv": + procNetstat.TcpExt.TCPFromZeroWindowAdv = &value + case "TCPToZeroWindowAdv": + procNetstat.TcpExt.TCPToZeroWindowAdv = &value + case "TCPWantZeroWindowAdv": + procNetstat.TcpExt.TCPWantZeroWindowAdv = &value + case "TCPSynRetrans": + procNetstat.TcpExt.TCPSynRetrans = &value + case "TCPOrigDataSent": + procNetstat.TcpExt.TCPOrigDataSent = &value + case "TCPHystartTrainDetect": + procNetstat.TcpExt.TCPHystartTrainDetect = &value + case "TCPHystartTrainCwnd": + procNetstat.TcpExt.TCPHystartTrainCwnd = &value + case "TCPHystartDelayDetect": + procNetstat.TcpExt.TCPHystartDelayDetect = &value + case "TCPHystartDelayCwnd": + procNetstat.TcpExt.TCPHystartDelayCwnd = &value + case "TCPACKSkippedSynRecv": + procNetstat.TcpExt.TCPACKSkippedSynRecv = &value + case "TCPACKSkippedPAWS": + procNetstat.TcpExt.TCPACKSkippedPAWS = &value + case "TCPACKSkippedSeq": + procNetstat.TcpExt.TCPACKSkippedSeq = &value + case "TCPACKSkippedFinWait2": + procNetstat.TcpExt.TCPACKSkippedFinWait2 = &value + case "TCPACKSkippedTimeWait": + procNetstat.TcpExt.TCPACKSkippedTimeWait = &value + case "TCPACKSkippedChallenge": + procNetstat.TcpExt.TCPACKSkippedChallenge = &value + case "TCPWinProbe": + procNetstat.TcpExt.TCPWinProbe = &value + case "TCPKeepAlive": + procNetstat.TcpExt.TCPKeepAlive = &value + case "TCPMTUPFail": + procNetstat.TcpExt.TCPMTUPFail = &value + case "TCPMTUPSuccess": + procNetstat.TcpExt.TCPMTUPSuccess = &value + case "TCPWqueueTooBig": + procNetstat.TcpExt.TCPWqueueTooBig = &value + } + case "IpExt": + switch key { + case "InNoRoutes": + procNetstat.IpExt.InNoRoutes = &value + case "InTruncatedPkts": + procNetstat.IpExt.InTruncatedPkts = &value + case "InMcastPkts": + procNetstat.IpExt.InMcastPkts = &value + case "OutMcastPkts": + procNetstat.IpExt.OutMcastPkts = &value + case "InBcastPkts": + procNetstat.IpExt.InBcastPkts = &value + case "OutBcastPkts": + procNetstat.IpExt.OutBcastPkts = &value + case "InOctets": + procNetstat.IpExt.InOctets = &value + case "OutOctets": + procNetstat.IpExt.OutOctets = &value + case "InMcastOctets": + procNetstat.IpExt.InMcastOctets = &value + case "OutMcastOctets": + procNetstat.IpExt.OutMcastOctets = &value + case "InBcastOctets": + procNetstat.IpExt.InBcastOctets = &value + case "OutBcastOctets": + procNetstat.IpExt.OutBcastOctets = &value + case "InCsumErrors": + procNetstat.IpExt.InCsumErrors = &value + case "InNoECTPkts": + procNetstat.IpExt.InNoECTPkts = &value + case "InECT1Pkts": + procNetstat.IpExt.InECT1Pkts = &value + case "InECT0Pkts": + procNetstat.IpExt.InECT0Pkts = &value + case "InCEPkts": + procNetstat.IpExt.InCEPkts = &value + case "ReasmOverlaps": + procNetstat.IpExt.ReasmOverlaps = &value + } + } + } + } + return procNetstat, scanner.Err() +} diff --git a/tools/vendor/github.com/prometheus/procfs/proc_ns.go b/tools/vendor/github.com/prometheus/procfs/proc_ns.go index 391b4cbd1..0f8f847f9 100644 --- a/tools/vendor/github.com/prometheus/procfs/proc_ns.go +++ b/tools/vendor/github.com/prometheus/procfs/proc_ns.go @@ -40,7 +40,7 @@ func (p Proc) Namespaces() (Namespaces, error) { names, err := d.Readdirnames(-1) if err != nil { - return nil, fmt.Errorf("failed to read contents of ns dir: %w", err) + return nil, fmt.Errorf("%w: failed to read contents of ns dir: %w", ErrFileRead, err) } ns := make(Namespaces, len(names)) @@ -52,13 +52,13 @@ func (p Proc) Namespaces() (Namespaces, error) { fields := strings.SplitN(target, ":", 2) if len(fields) != 2 { - return nil, fmt.Errorf("failed to parse namespace type and inode from %q", target) + return nil, fmt.Errorf("%w: namespace type and inode from %q", ErrFileParse, target) } typ := fields[0] inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32) if err != nil { - return nil, fmt.Errorf("failed to parse inode from %q: %w", fields[1], err) + return nil, fmt.Errorf("%w: inode from %q: %w", ErrFileParse, fields[1], err) } ns[name] = Namespace{typ, uint32(inode)} diff --git a/tools/vendor/github.com/prometheus/procfs/proc_psi.go b/tools/vendor/github.com/prometheus/procfs/proc_psi.go index dc6c14f0a..ccd35f153 100644 --- a/tools/vendor/github.com/prometheus/procfs/proc_psi.go +++ b/tools/vendor/github.com/prometheus/procfs/proc_psi.go @@ -35,9 +35,10 @@ import ( const lineFormat = "avg10=%f avg60=%f avg300=%f total=%d" -// PSILine is a single line of values as returned by /proc/pressure/* -// The Avg entries are averages over n seconds, as a percentage -// The Total line is in microseconds +// PSILine is a single line of values as returned by `/proc/pressure/*`. +// +// The Avg entries are averages over n seconds, as a percentage. +// The Total line is in microseconds. type PSILine struct { Avg10 float64 Avg60 float64 @@ -46,8 +47,9 @@ type PSILine struct { } // PSIStats represent pressure stall information from /proc/pressure/* -// Some indicates the share of time in which at least some tasks are stalled -// Full indicates the share of time in which all non-idle tasks are stalled simultaneously +// +// "Some" indicates the share of time in which at least some tasks are stalled. +// "Full" indicates the share of time in which all non-idle tasks are stalled simultaneously. type PSIStats struct { Some *PSILine Full *PSILine @@ -59,14 +61,14 @@ type PSIStats struct { func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) { data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource))) if err != nil { - return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %q: %w", resource, err) + return PSIStats{}, fmt.Errorf("%w: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err) } - return parsePSIStats(resource, bytes.NewReader(data)) + return parsePSIStats(bytes.NewReader(data)) } -// parsePSIStats parses the specified file for pressure stall information -func parsePSIStats(resource string, r io.Reader) (PSIStats, error) { +// parsePSIStats parses the specified file for pressure stall information. +func parsePSIStats(r io.Reader) (PSIStats, error) { psiStats := PSIStats{} scanner := bufio.NewScanner(r) diff --git a/tools/vendor/github.com/prometheus/procfs/proc_smaps.go b/tools/vendor/github.com/prometheus/procfs/proc_smaps.go index a576a720a..9a297afcf 100644 --- a/tools/vendor/github.com/prometheus/procfs/proc_smaps.go +++ b/tools/vendor/github.com/prometheus/procfs/proc_smaps.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !windows // +build !windows package procfs @@ -18,7 +19,6 @@ package procfs import ( "bufio" "errors" - "fmt" "os" "regexp" "strconv" @@ -28,30 +28,30 @@ import ( ) var ( - // match the header line before each mapped zone in /proc/pid/smaps + // Match the header line before each mapped zone in `/proc/pid/smaps`. procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`) ) type ProcSMapsRollup struct { - // Amount of the mapping that is currently resident in RAM + // Amount of the mapping that is currently resident in RAM. Rss uint64 - // Process's proportional share of this mapping + // Process's proportional share of this mapping. Pss uint64 - // Size in bytes of clean shared pages + // Size in bytes of clean shared pages. SharedClean uint64 - // Size in bytes of dirty shared pages + // Size in bytes of dirty shared pages. SharedDirty uint64 - // Size in bytes of clean private pages + // Size in bytes of clean private pages. PrivateClean uint64 - // Size in bytes of dirty private pages + // Size in bytes of dirty private pages. PrivateDirty uint64 - // Amount of memory currently marked as referenced or accessed + // Amount of memory currently marked as referenced or accessed. Referenced uint64 - // Amount of memory that does not belong to any file + // Amount of memory that does not belong to any file. Anonymous uint64 - // Amount would-be-anonymous memory currently on swap + // Amount would-be-anonymous memory currently on swap. Swap uint64 - // Process's proportional memory on swap + // Process's proportional memory on swap. SwapPss uint64 } @@ -116,7 +116,6 @@ func (p Proc) procSMapsRollupManual() (ProcSMapsRollup, error) { func (s *ProcSMapsRollup) parseLine(line string) error { kv := strings.SplitN(line, ":", 2) if len(kv) != 2 { - fmt.Println(line) return errors.New("invalid net/dev line, missing colon") } @@ -126,7 +125,7 @@ func (s *ProcSMapsRollup) parseLine(line string) error { } v := strings.TrimSpace(kv[1]) - v = strings.TrimRight(v, " kB") + v = strings.TrimSuffix(v, " kB") vKBytes, err := strconv.ParseUint(v, 10, 64) if err != nil { @@ -134,12 +133,12 @@ func (s *ProcSMapsRollup) parseLine(line string) error { } vBytes := vKBytes * 1024 - s.addValue(k, v, vKBytes, vBytes) + s.addValue(k, vBytes) return nil } -func (s *ProcSMapsRollup) addValue(k string, vString string, vUint uint64, vUintBytes uint64) { +func (s *ProcSMapsRollup) addValue(k string, vUintBytes uint64) { switch k { case "Rss": s.Rss += vUintBytes diff --git a/tools/vendor/github.com/prometheus/procfs/proc_snmp.go b/tools/vendor/github.com/prometheus/procfs/proc_snmp.go new file mode 100644 index 000000000..b9d2cf642 --- /dev/null +++ b/tools/vendor/github.com/prometheus/procfs/proc_snmp.go @@ -0,0 +1,353 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ProcSnmp models the content of /proc//net/snmp. +type ProcSnmp struct { + // The process ID. + PID int + Ip + Icmp + IcmpMsg + Tcp + Udp + UdpLite +} + +type Ip struct { // nolint:revive + Forwarding *float64 + DefaultTTL *float64 + InReceives *float64 + InHdrErrors *float64 + InAddrErrors *float64 + ForwDatagrams *float64 + InUnknownProtos *float64 + InDiscards *float64 + InDelivers *float64 + OutRequests *float64 + OutDiscards *float64 + OutNoRoutes *float64 + ReasmTimeout *float64 + ReasmReqds *float64 + ReasmOKs *float64 + ReasmFails *float64 + FragOKs *float64 + FragFails *float64 + FragCreates *float64 +} + +type Icmp struct { // nolint:revive + InMsgs *float64 + InErrors *float64 + InCsumErrors *float64 + InDestUnreachs *float64 + InTimeExcds *float64 + InParmProbs *float64 + InSrcQuenchs *float64 + InRedirects *float64 + InEchos *float64 + InEchoReps *float64 + InTimestamps *float64 + InTimestampReps *float64 + InAddrMasks *float64 + InAddrMaskReps *float64 + OutMsgs *float64 + OutErrors *float64 + OutDestUnreachs *float64 + OutTimeExcds *float64 + OutParmProbs *float64 + OutSrcQuenchs *float64 + OutRedirects *float64 + OutEchos *float64 + OutEchoReps *float64 + OutTimestamps *float64 + OutTimestampReps *float64 + OutAddrMasks *float64 + OutAddrMaskReps *float64 +} + +type IcmpMsg struct { + InType3 *float64 + OutType3 *float64 +} + +type Tcp struct { // nolint:revive + RtoAlgorithm *float64 + RtoMin *float64 + RtoMax *float64 + MaxConn *float64 + ActiveOpens *float64 + PassiveOpens *float64 + AttemptFails *float64 + EstabResets *float64 + CurrEstab *float64 + InSegs *float64 + OutSegs *float64 + RetransSegs *float64 + InErrs *float64 + OutRsts *float64 + InCsumErrors *float64 +} + +type Udp struct { // nolint:revive + InDatagrams *float64 + NoPorts *float64 + InErrors *float64 + OutDatagrams *float64 + RcvbufErrors *float64 + SndbufErrors *float64 + InCsumErrors *float64 + IgnoredMulti *float64 +} + +type UdpLite struct { // nolint:revive + InDatagrams *float64 + NoPorts *float64 + InErrors *float64 + OutDatagrams *float64 + RcvbufErrors *float64 + SndbufErrors *float64 + InCsumErrors *float64 + IgnoredMulti *float64 +} + +func (p Proc) Snmp() (ProcSnmp, error) { + filename := p.path("net/snmp") + data, err := util.ReadFileNoStat(filename) + if err != nil { + return ProcSnmp{PID: p.PID}, err + } + procSnmp, err := parseSnmp(bytes.NewReader(data), filename) + procSnmp.PID = p.PID + return procSnmp, err +} + +// parseSnmp parses the metrics from proc//net/snmp file +// and returns a map contains those metrics (e.g. {"Ip": {"Forwarding": 2}}). +func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) { + var ( + scanner = bufio.NewScanner(r) + procSnmp = ProcSnmp{} + ) + + for scanner.Scan() { + nameParts := strings.Split(scanner.Text(), " ") + scanner.Scan() + valueParts := strings.Split(scanner.Text(), " ") + // Remove trailing :. + protocol := strings.TrimSuffix(nameParts[0], ":") + if len(nameParts) != len(valueParts) { + return procSnmp, fmt.Errorf("%w: mismatch field count mismatch in %s: %s", + ErrFileParse, fileName, protocol) + } + for i := 1; i < len(nameParts); i++ { + value, err := strconv.ParseFloat(valueParts[i], 64) + if err != nil { + return procSnmp, err + } + key := nameParts[i] + + switch protocol { + case "Ip": + switch key { + case "Forwarding": + procSnmp.Ip.Forwarding = &value + case "DefaultTTL": + procSnmp.Ip.DefaultTTL = &value + case "InReceives": + procSnmp.Ip.InReceives = &value + case "InHdrErrors": + procSnmp.Ip.InHdrErrors = &value + case "InAddrErrors": + procSnmp.Ip.InAddrErrors = &value + case "ForwDatagrams": + procSnmp.Ip.ForwDatagrams = &value + case "InUnknownProtos": + procSnmp.Ip.InUnknownProtos = &value + case "InDiscards": + procSnmp.Ip.InDiscards = &value + case "InDelivers": + procSnmp.Ip.InDelivers = &value + case "OutRequests": + procSnmp.Ip.OutRequests = &value + case "OutDiscards": + procSnmp.Ip.OutDiscards = &value + case "OutNoRoutes": + procSnmp.Ip.OutNoRoutes = &value + case "ReasmTimeout": + procSnmp.Ip.ReasmTimeout = &value + case "ReasmReqds": + procSnmp.Ip.ReasmReqds = &value + case "ReasmOKs": + procSnmp.Ip.ReasmOKs = &value + case "ReasmFails": + procSnmp.Ip.ReasmFails = &value + case "FragOKs": + procSnmp.Ip.FragOKs = &value + case "FragFails": + procSnmp.Ip.FragFails = &value + case "FragCreates": + procSnmp.Ip.FragCreates = &value + } + case "Icmp": + switch key { + case "InMsgs": + procSnmp.Icmp.InMsgs = &value + case "InErrors": + procSnmp.Icmp.InErrors = &value + case "InCsumErrors": + procSnmp.Icmp.InCsumErrors = &value + case "InDestUnreachs": + procSnmp.Icmp.InDestUnreachs = &value + case "InTimeExcds": + procSnmp.Icmp.InTimeExcds = &value + case "InParmProbs": + procSnmp.Icmp.InParmProbs = &value + case "InSrcQuenchs": + procSnmp.Icmp.InSrcQuenchs = &value + case "InRedirects": + procSnmp.Icmp.InRedirects = &value + case "InEchos": + procSnmp.Icmp.InEchos = &value + case "InEchoReps": + procSnmp.Icmp.InEchoReps = &value + case "InTimestamps": + procSnmp.Icmp.InTimestamps = &value + case "InTimestampReps": + procSnmp.Icmp.InTimestampReps = &value + case "InAddrMasks": + procSnmp.Icmp.InAddrMasks = &value + case "InAddrMaskReps": + procSnmp.Icmp.InAddrMaskReps = &value + case "OutMsgs": + procSnmp.Icmp.OutMsgs = &value + case "OutErrors": + procSnmp.Icmp.OutErrors = &value + case "OutDestUnreachs": + procSnmp.Icmp.OutDestUnreachs = &value + case "OutTimeExcds": + procSnmp.Icmp.OutTimeExcds = &value + case "OutParmProbs": + procSnmp.Icmp.OutParmProbs = &value + case "OutSrcQuenchs": + procSnmp.Icmp.OutSrcQuenchs = &value + case "OutRedirects": + procSnmp.Icmp.OutRedirects = &value + case "OutEchos": + procSnmp.Icmp.OutEchos = &value + case "OutEchoReps": + procSnmp.Icmp.OutEchoReps = &value + case "OutTimestamps": + procSnmp.Icmp.OutTimestamps = &value + case "OutTimestampReps": + procSnmp.Icmp.OutTimestampReps = &value + case "OutAddrMasks": + procSnmp.Icmp.OutAddrMasks = &value + case "OutAddrMaskReps": + procSnmp.Icmp.OutAddrMaskReps = &value + } + case "IcmpMsg": + switch key { + case "InType3": + procSnmp.IcmpMsg.InType3 = &value + case "OutType3": + procSnmp.IcmpMsg.OutType3 = &value + } + case "Tcp": + switch key { + case "RtoAlgorithm": + procSnmp.Tcp.RtoAlgorithm = &value + case "RtoMin": + procSnmp.Tcp.RtoMin = &value + case "RtoMax": + procSnmp.Tcp.RtoMax = &value + case "MaxConn": + procSnmp.Tcp.MaxConn = &value + case "ActiveOpens": + procSnmp.Tcp.ActiveOpens = &value + case "PassiveOpens": + procSnmp.Tcp.PassiveOpens = &value + case "AttemptFails": + procSnmp.Tcp.AttemptFails = &value + case "EstabResets": + procSnmp.Tcp.EstabResets = &value + case "CurrEstab": + procSnmp.Tcp.CurrEstab = &value + case "InSegs": + procSnmp.Tcp.InSegs = &value + case "OutSegs": + procSnmp.Tcp.OutSegs = &value + case "RetransSegs": + procSnmp.Tcp.RetransSegs = &value + case "InErrs": + procSnmp.Tcp.InErrs = &value + case "OutRsts": + procSnmp.Tcp.OutRsts = &value + case "InCsumErrors": + procSnmp.Tcp.InCsumErrors = &value + } + case "Udp": + switch key { + case "InDatagrams": + procSnmp.Udp.InDatagrams = &value + case "NoPorts": + procSnmp.Udp.NoPorts = &value + case "InErrors": + procSnmp.Udp.InErrors = &value + case "OutDatagrams": + procSnmp.Udp.OutDatagrams = &value + case "RcvbufErrors": + procSnmp.Udp.RcvbufErrors = &value + case "SndbufErrors": + procSnmp.Udp.SndbufErrors = &value + case "InCsumErrors": + procSnmp.Udp.InCsumErrors = &value + case "IgnoredMulti": + procSnmp.Udp.IgnoredMulti = &value + } + case "UdpLite": + switch key { + case "InDatagrams": + procSnmp.UdpLite.InDatagrams = &value + case "NoPorts": + procSnmp.UdpLite.NoPorts = &value + case "InErrors": + procSnmp.UdpLite.InErrors = &value + case "OutDatagrams": + procSnmp.UdpLite.OutDatagrams = &value + case "RcvbufErrors": + procSnmp.UdpLite.RcvbufErrors = &value + case "SndbufErrors": + procSnmp.UdpLite.SndbufErrors = &value + case "InCsumErrors": + procSnmp.UdpLite.InCsumErrors = &value + case "IgnoredMulti": + procSnmp.UdpLite.IgnoredMulti = &value + } + } + } + } + return procSnmp, scanner.Err() +} diff --git a/tools/vendor/github.com/prometheus/procfs/proc_snmp6.go b/tools/vendor/github.com/prometheus/procfs/proc_snmp6.go new file mode 100644 index 000000000..3059cc6a1 --- /dev/null +++ b/tools/vendor/github.com/prometheus/procfs/proc_snmp6.go @@ -0,0 +1,381 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "errors" + "io" + "os" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ProcSnmp6 models the content of /proc//net/snmp6. +type ProcSnmp6 struct { + // The process ID. + PID int + Ip6 + Icmp6 + Udp6 + UdpLite6 +} + +type Ip6 struct { // nolint:revive + InReceives *float64 + InHdrErrors *float64 + InTooBigErrors *float64 + InNoRoutes *float64 + InAddrErrors *float64 + InUnknownProtos *float64 + InTruncatedPkts *float64 + InDiscards *float64 + InDelivers *float64 + OutForwDatagrams *float64 + OutRequests *float64 + OutDiscards *float64 + OutNoRoutes *float64 + ReasmTimeout *float64 + ReasmReqds *float64 + ReasmOKs *float64 + ReasmFails *float64 + FragOKs *float64 + FragFails *float64 + FragCreates *float64 + InMcastPkts *float64 + OutMcastPkts *float64 + InOctets *float64 + OutOctets *float64 + InMcastOctets *float64 + OutMcastOctets *float64 + InBcastOctets *float64 + OutBcastOctets *float64 + InNoECTPkts *float64 + InECT1Pkts *float64 + InECT0Pkts *float64 + InCEPkts *float64 +} + +type Icmp6 struct { + InMsgs *float64 + InErrors *float64 + OutMsgs *float64 + OutErrors *float64 + InCsumErrors *float64 + InDestUnreachs *float64 + InPktTooBigs *float64 + InTimeExcds *float64 + InParmProblems *float64 + InEchos *float64 + InEchoReplies *float64 + InGroupMembQueries *float64 + InGroupMembResponses *float64 + InGroupMembReductions *float64 + InRouterSolicits *float64 + InRouterAdvertisements *float64 + InNeighborSolicits *float64 + InNeighborAdvertisements *float64 + InRedirects *float64 + InMLDv2Reports *float64 + OutDestUnreachs *float64 + OutPktTooBigs *float64 + OutTimeExcds *float64 + OutParmProblems *float64 + OutEchos *float64 + OutEchoReplies *float64 + OutGroupMembQueries *float64 + OutGroupMembResponses *float64 + OutGroupMembReductions *float64 + OutRouterSolicits *float64 + OutRouterAdvertisements *float64 + OutNeighborSolicits *float64 + OutNeighborAdvertisements *float64 + OutRedirects *float64 + OutMLDv2Reports *float64 + InType1 *float64 + InType134 *float64 + InType135 *float64 + InType136 *float64 + InType143 *float64 + OutType133 *float64 + OutType135 *float64 + OutType136 *float64 + OutType143 *float64 +} + +type Udp6 struct { // nolint:revive + InDatagrams *float64 + NoPorts *float64 + InErrors *float64 + OutDatagrams *float64 + RcvbufErrors *float64 + SndbufErrors *float64 + InCsumErrors *float64 + IgnoredMulti *float64 +} + +type UdpLite6 struct { // nolint:revive + InDatagrams *float64 + NoPorts *float64 + InErrors *float64 + OutDatagrams *float64 + RcvbufErrors *float64 + SndbufErrors *float64 + InCsumErrors *float64 +} + +func (p Proc) Snmp6() (ProcSnmp6, error) { + filename := p.path("net/snmp6") + data, err := util.ReadFileNoStat(filename) + if err != nil { + // On systems with IPv6 disabled, this file won't exist. + // Do nothing. + if errors.Is(err, os.ErrNotExist) { + return ProcSnmp6{PID: p.PID}, nil + } + + return ProcSnmp6{PID: p.PID}, err + } + + procSnmp6, err := parseSNMP6Stats(bytes.NewReader(data)) + procSnmp6.PID = p.PID + return procSnmp6, err +} + +// parseSnmp6 parses the metrics from proc//net/snmp6 file +// and returns a map contains those metrics. +func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) { + var ( + scanner = bufio.NewScanner(r) + procSnmp6 = ProcSnmp6{} + ) + + for scanner.Scan() { + stat := strings.Fields(scanner.Text()) + if len(stat) < 2 { + continue + } + // Expect to have "6" in metric name, skip line otherwise + if sixIndex := strings.Index(stat[0], "6"); sixIndex != -1 { + protocol := stat[0][:sixIndex+1] + key := stat[0][sixIndex+1:] + value, err := strconv.ParseFloat(stat[1], 64) + if err != nil { + return procSnmp6, err + } + + switch protocol { + case "Ip6": + switch key { + case "InReceives": + procSnmp6.Ip6.InReceives = &value + case "InHdrErrors": + procSnmp6.Ip6.InHdrErrors = &value + case "InTooBigErrors": + procSnmp6.Ip6.InTooBigErrors = &value + case "InNoRoutes": + procSnmp6.Ip6.InNoRoutes = &value + case "InAddrErrors": + procSnmp6.Ip6.InAddrErrors = &value + case "InUnknownProtos": + procSnmp6.Ip6.InUnknownProtos = &value + case "InTruncatedPkts": + procSnmp6.Ip6.InTruncatedPkts = &value + case "InDiscards": + procSnmp6.Ip6.InDiscards = &value + case "InDelivers": + procSnmp6.Ip6.InDelivers = &value + case "OutForwDatagrams": + procSnmp6.Ip6.OutForwDatagrams = &value + case "OutRequests": + procSnmp6.Ip6.OutRequests = &value + case "OutDiscards": + procSnmp6.Ip6.OutDiscards = &value + case "OutNoRoutes": + procSnmp6.Ip6.OutNoRoutes = &value + case "ReasmTimeout": + procSnmp6.Ip6.ReasmTimeout = &value + case "ReasmReqds": + procSnmp6.Ip6.ReasmReqds = &value + case "ReasmOKs": + procSnmp6.Ip6.ReasmOKs = &value + case "ReasmFails": + procSnmp6.Ip6.ReasmFails = &value + case "FragOKs": + procSnmp6.Ip6.FragOKs = &value + case "FragFails": + procSnmp6.Ip6.FragFails = &value + case "FragCreates": + procSnmp6.Ip6.FragCreates = &value + case "InMcastPkts": + procSnmp6.Ip6.InMcastPkts = &value + case "OutMcastPkts": + procSnmp6.Ip6.OutMcastPkts = &value + case "InOctets": + procSnmp6.Ip6.InOctets = &value + case "OutOctets": + procSnmp6.Ip6.OutOctets = &value + case "InMcastOctets": + procSnmp6.Ip6.InMcastOctets = &value + case "OutMcastOctets": + procSnmp6.Ip6.OutMcastOctets = &value + case "InBcastOctets": + procSnmp6.Ip6.InBcastOctets = &value + case "OutBcastOctets": + procSnmp6.Ip6.OutBcastOctets = &value + case "InNoECTPkts": + procSnmp6.Ip6.InNoECTPkts = &value + case "InECT1Pkts": + procSnmp6.Ip6.InECT1Pkts = &value + case "InECT0Pkts": + procSnmp6.Ip6.InECT0Pkts = &value + case "InCEPkts": + procSnmp6.Ip6.InCEPkts = &value + + } + case "Icmp6": + switch key { + case "InMsgs": + procSnmp6.Icmp6.InMsgs = &value + case "InErrors": + procSnmp6.Icmp6.InErrors = &value + case "OutMsgs": + procSnmp6.Icmp6.OutMsgs = &value + case "OutErrors": + procSnmp6.Icmp6.OutErrors = &value + case "InCsumErrors": + procSnmp6.Icmp6.InCsumErrors = &value + case "InDestUnreachs": + procSnmp6.Icmp6.InDestUnreachs = &value + case "InPktTooBigs": + procSnmp6.Icmp6.InPktTooBigs = &value + case "InTimeExcds": + procSnmp6.Icmp6.InTimeExcds = &value + case "InParmProblems": + procSnmp6.Icmp6.InParmProblems = &value + case "InEchos": + procSnmp6.Icmp6.InEchos = &value + case "InEchoReplies": + procSnmp6.Icmp6.InEchoReplies = &value + case "InGroupMembQueries": + procSnmp6.Icmp6.InGroupMembQueries = &value + case "InGroupMembResponses": + procSnmp6.Icmp6.InGroupMembResponses = &value + case "InGroupMembReductions": + procSnmp6.Icmp6.InGroupMembReductions = &value + case "InRouterSolicits": + procSnmp6.Icmp6.InRouterSolicits = &value + case "InRouterAdvertisements": + procSnmp6.Icmp6.InRouterAdvertisements = &value + case "InNeighborSolicits": + procSnmp6.Icmp6.InNeighborSolicits = &value + case "InNeighborAdvertisements": + procSnmp6.Icmp6.InNeighborAdvertisements = &value + case "InRedirects": + procSnmp6.Icmp6.InRedirects = &value + case "InMLDv2Reports": + procSnmp6.Icmp6.InMLDv2Reports = &value + case "OutDestUnreachs": + procSnmp6.Icmp6.OutDestUnreachs = &value + case "OutPktTooBigs": + procSnmp6.Icmp6.OutPktTooBigs = &value + case "OutTimeExcds": + procSnmp6.Icmp6.OutTimeExcds = &value + case "OutParmProblems": + procSnmp6.Icmp6.OutParmProblems = &value + case "OutEchos": + procSnmp6.Icmp6.OutEchos = &value + case "OutEchoReplies": + procSnmp6.Icmp6.OutEchoReplies = &value + case "OutGroupMembQueries": + procSnmp6.Icmp6.OutGroupMembQueries = &value + case "OutGroupMembResponses": + procSnmp6.Icmp6.OutGroupMembResponses = &value + case "OutGroupMembReductions": + procSnmp6.Icmp6.OutGroupMembReductions = &value + case "OutRouterSolicits": + procSnmp6.Icmp6.OutRouterSolicits = &value + case "OutRouterAdvertisements": + procSnmp6.Icmp6.OutRouterAdvertisements = &value + case "OutNeighborSolicits": + procSnmp6.Icmp6.OutNeighborSolicits = &value + case "OutNeighborAdvertisements": + procSnmp6.Icmp6.OutNeighborAdvertisements = &value + case "OutRedirects": + procSnmp6.Icmp6.OutRedirects = &value + case "OutMLDv2Reports": + procSnmp6.Icmp6.OutMLDv2Reports = &value + case "InType1": + procSnmp6.Icmp6.InType1 = &value + case "InType134": + procSnmp6.Icmp6.InType134 = &value + case "InType135": + procSnmp6.Icmp6.InType135 = &value + case "InType136": + procSnmp6.Icmp6.InType136 = &value + case "InType143": + procSnmp6.Icmp6.InType143 = &value + case "OutType133": + procSnmp6.Icmp6.OutType133 = &value + case "OutType135": + procSnmp6.Icmp6.OutType135 = &value + case "OutType136": + procSnmp6.Icmp6.OutType136 = &value + case "OutType143": + procSnmp6.Icmp6.OutType143 = &value + } + case "Udp6": + switch key { + case "InDatagrams": + procSnmp6.Udp6.InDatagrams = &value + case "NoPorts": + procSnmp6.Udp6.NoPorts = &value + case "InErrors": + procSnmp6.Udp6.InErrors = &value + case "OutDatagrams": + procSnmp6.Udp6.OutDatagrams = &value + case "RcvbufErrors": + procSnmp6.Udp6.RcvbufErrors = &value + case "SndbufErrors": + procSnmp6.Udp6.SndbufErrors = &value + case "InCsumErrors": + procSnmp6.Udp6.InCsumErrors = &value + case "IgnoredMulti": + procSnmp6.Udp6.IgnoredMulti = &value + } + case "UdpLite6": + switch key { + case "InDatagrams": + procSnmp6.UdpLite6.InDatagrams = &value + case "NoPorts": + procSnmp6.UdpLite6.NoPorts = &value + case "InErrors": + procSnmp6.UdpLite6.InErrors = &value + case "OutDatagrams": + procSnmp6.UdpLite6.OutDatagrams = &value + case "RcvbufErrors": + procSnmp6.UdpLite6.RcvbufErrors = &value + case "SndbufErrors": + procSnmp6.UdpLite6.SndbufErrors = &value + case "InCsumErrors": + procSnmp6.UdpLite6.InCsumErrors = &value + } + } + } + } + return procSnmp6, scanner.Err() +} diff --git a/tools/vendor/github.com/prometheus/procfs/proc_stat.go b/tools/vendor/github.com/prometheus/procfs/proc_stat.go index 8c7b6e80a..06a8d931c 100644 --- a/tools/vendor/github.com/prometheus/procfs/proc_stat.go +++ b/tools/vendor/github.com/prometheus/procfs/proc_stat.go @@ -18,7 +18,6 @@ import ( "fmt" "os" - "github.com/prometheus/procfs/internal/fs" "github.com/prometheus/procfs/internal/util" ) @@ -81,10 +80,10 @@ type ProcStat struct { STime uint // Amount of time that this process's waited-for children have been // scheduled in user mode, measured in clock ticks. - CUTime uint + CUTime int // Amount of time that this process's waited-for children have been // scheduled in kernel mode, measured in clock ticks. - CSTime uint + CSTime int // For processes running a real-time scheduling policy, this is the negated // scheduling priority, minus one. Priority int @@ -102,6 +101,8 @@ type ProcStat struct { RSS int // Soft limit in bytes on the rss of the process. RSSLimit uint64 + // CPU number last executed on. + Processor uint // Real-time scheduling priority, a number in the range 1 to 99 for processes // scheduled under a real-time policy, or 0, for non-real-time processes. RTPriority uint @@ -109,13 +110,18 @@ type ProcStat struct { Policy uint // Aggregated block I/O delays, measured in clock ticks (centiseconds). DelayAcctBlkIOTicks uint64 + // Guest time of the process (time spent running a virtual CPU for a guest + // operating system), measured in clock ticks. + GuestTime int + // Guest time of the process's children, measured in clock ticks. + CGuestTime int - proc fs.FS + proc FS } // NewStat returns the current status information of the process. // -// Deprecated: use p.Stat() instead +// Deprecated: Use p.Stat() instead. func (p Proc) NewStat() (ProcStat, error) { return p.Stat() } @@ -137,10 +143,15 @@ func (p Proc) Stat() (ProcStat, error) { ) if l < 0 || r < 0 { - return ProcStat{}, fmt.Errorf("unexpected format, couldn't extract comm %q", data) + return ProcStat{}, fmt.Errorf("%w: unexpected format, couldn't extract comm %q", ErrFileParse, data) } s.Comm = string(data[l+1 : r]) + + // Check the following resources for the details about the particular stat + // fields and their data types: + // * https://man7.org/linux/man-pages/man5/proc.5.html + // * https://man7.org/linux/man-pages/man3/scanf.3.html _, err = fmt.Fscan( bytes.NewBuffer(data[r+2:]), &s.State, @@ -179,10 +190,12 @@ func (p Proc) Stat() (ProcStat, error) { &ignoreUint64, &ignoreUint64, &ignoreInt64, - &ignoreInt64, + &s.Processor, &s.RTPriority, &s.Policy, &s.DelayAcctBlkIOTicks, + &s.GuestTime, + &s.CGuestTime, ) if err != nil { return ProcStat{}, err @@ -203,8 +216,7 @@ func (s ProcStat) ResidentMemory() int { // StartTime returns the unix timestamp of the process in seconds. func (s ProcStat) StartTime() (float64, error) { - fs := FS{proc: s.proc} - stat, err := fs.Stat() + stat, err := s.proc.Stat() if err != nil { return 0, err } diff --git a/tools/vendor/github.com/prometheus/procfs/proc_status.go b/tools/vendor/github.com/prometheus/procfs/proc_status.go index 6edd8333b..dd8aa5688 100644 --- a/tools/vendor/github.com/prometheus/procfs/proc_status.go +++ b/tools/vendor/github.com/prometheus/procfs/proc_status.go @@ -15,6 +15,8 @@ package procfs import ( "bytes" + "math/bits" + "sort" "strconv" "strings" @@ -22,7 +24,7 @@ import ( ) // ProcStatus provides status information about the process, -// read from /proc/[pid]/stat. +// read from /proc/[pid]/status. type ProcStatus struct { // The process ID. PID int @@ -31,39 +33,41 @@ type ProcStatus struct { // Thread group ID. TGID int + // List of Pid namespace. + NSpids []uint64 // Peak virtual memory size. - VmPeak uint64 // nolint:golint + VmPeak uint64 // nolint:revive // Virtual memory size. - VmSize uint64 // nolint:golint + VmSize uint64 // nolint:revive // Locked memory size. - VmLck uint64 // nolint:golint + VmLck uint64 // nolint:revive // Pinned memory size. - VmPin uint64 // nolint:golint + VmPin uint64 // nolint:revive // Peak resident set size. - VmHWM uint64 // nolint:golint + VmHWM uint64 // nolint:revive // Resident set size (sum of RssAnnon RssFile and RssShmem). - VmRSS uint64 // nolint:golint + VmRSS uint64 // nolint:revive // Size of resident anonymous memory. - RssAnon uint64 // nolint:golint + RssAnon uint64 // nolint:revive // Size of resident file mappings. - RssFile uint64 // nolint:golint + RssFile uint64 // nolint:revive // Size of resident shared memory. - RssShmem uint64 // nolint:golint + RssShmem uint64 // nolint:revive // Size of data segments. - VmData uint64 // nolint:golint + VmData uint64 // nolint:revive // Size of stack segments. - VmStk uint64 // nolint:golint + VmStk uint64 // nolint:revive // Size of text segments. - VmExe uint64 // nolint:golint + VmExe uint64 // nolint:revive // Shared library code size. - VmLib uint64 // nolint:golint + VmLib uint64 // nolint:revive // Page table entries size. - VmPTE uint64 // nolint:golint + VmPTE uint64 // nolint:revive // Size of second-level page tables. - VmPMD uint64 // nolint:golint + VmPMD uint64 // nolint:revive // Swapped-out virtual memory size by anonymous private. - VmSwap uint64 // nolint:golint + VmSwap uint64 // nolint:revive // Size of hugetlb memory portions HugetlbPages uint64 @@ -73,9 +77,12 @@ type ProcStatus struct { NonVoluntaryCtxtSwitches uint64 // UIDs of the process (Real, effective, saved set, and filesystem UIDs) - UIDs [4]string + UIDs [4]uint64 // GIDs of the process (Real, effective, saved set, and filesystem GIDs) - GIDs [4]string + GIDs [4]uint64 + + // CpusAllowedList: List of cpu cores processes are allowed to run on. + CpusAllowedList []uint64 } // NewStatus returns the current status information of the process. @@ -96,10 +103,10 @@ func (p Proc) NewStatus() (ProcStatus, error) { kv := strings.SplitN(line, ":", 2) // removes spaces - k := string(strings.TrimSpace(kv[0])) - v := string(strings.TrimSpace(kv[1])) + k := strings.TrimSpace(kv[0]) + v := strings.TrimSpace(kv[1]) // removes "kB" - v = string(bytes.Trim([]byte(v), " kB")) + v = strings.TrimSuffix(v, " kB") // value to int when possible // we can skip error check here, 'cause vKBytes is not used when value is a string @@ -107,22 +114,43 @@ func (p Proc) NewStatus() (ProcStatus, error) { // convert kB to B vBytes := vKBytes * 1024 - s.fillStatus(k, v, vKBytes, vBytes) + err = s.fillStatus(k, v, vKBytes, vBytes) + if err != nil { + return ProcStatus{}, err + } } return s, nil } -func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) { +func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) error { switch k { case "Tgid": s.TGID = int(vUint) case "Name": s.Name = vString case "Uid": - copy(s.UIDs[:], strings.Split(vString, "\t")) + var err error + for i, v := range strings.Split(vString, "\t") { + s.UIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize) + if err != nil { + return err + } + } case "Gid": - copy(s.GIDs[:], strings.Split(vString, "\t")) + var err error + for i, v := range strings.Split(vString, "\t") { + s.GIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize) + if err != nil { + return err + } + } + case "NSpid": + nspids, err := calcNSPidsList(vString) + if err != nil { + return err + } + s.NSpids = nspids case "VmPeak": s.VmPeak = vUintBytes case "VmSize": @@ -161,10 +189,54 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt s.VoluntaryCtxtSwitches = vUint case "nonvoluntary_ctxt_switches": s.NonVoluntaryCtxtSwitches = vUint + case "Cpus_allowed_list": + s.CpusAllowedList = calcCpusAllowedList(vString) } + + return nil } // TotalCtxtSwitches returns the total context switch. func (s ProcStatus) TotalCtxtSwitches() uint64 { return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches } + +func calcCpusAllowedList(cpuString string) []uint64 { + s := strings.Split(cpuString, ",") + + var g []uint64 + + for _, cpu := range s { + // parse cpu ranges, example: 1-3=[1,2,3] + if l := strings.Split(strings.TrimSpace(cpu), "-"); len(l) > 1 { + startCPU, _ := strconv.ParseUint(l[0], 10, 64) + endCPU, _ := strconv.ParseUint(l[1], 10, 64) + + for i := startCPU; i <= endCPU; i++ { + g = append(g, i) + } + } else if len(l) == 1 { + cpu, _ := strconv.ParseUint(l[0], 10, 64) + g = append(g, cpu) + } + + } + + sort.Slice(g, func(i, j int) bool { return g[i] < g[j] }) + return g +} + +func calcNSPidsList(nspidsString string) ([]uint64, error) { + s := strings.Split(nspidsString, "\t") + var nspids []uint64 + + for _, nspid := range s { + nspid, err := strconv.ParseUint(nspid, 10, 64) + if err != nil { + return nil, err + } + nspids = append(nspids, nspid) + } + + return nspids, nil +} diff --git a/tools/vendor/github.com/prometheus/procfs/proc_sys.go b/tools/vendor/github.com/prometheus/procfs/proc_sys.go new file mode 100644 index 000000000..5eefbe2ef --- /dev/null +++ b/tools/vendor/github.com/prometheus/procfs/proc_sys.go @@ -0,0 +1,51 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +func sysctlToPath(sysctl string) string { + return strings.Replace(sysctl, ".", "/", -1) +} + +func (fs FS) SysctlStrings(sysctl string) ([]string, error) { + value, err := util.SysReadFile(fs.proc.Path("sys", sysctlToPath(sysctl))) + if err != nil { + return nil, err + } + return strings.Fields(value), nil + +} + +func (fs FS) SysctlInts(sysctl string) ([]int, error) { + fields, err := fs.SysctlStrings(sysctl) + if err != nil { + return nil, err + } + + values := make([]int, len(fields)) + for i, f := range fields { + vp := util.NewValueParser(f) + values[i] = vp.Int() + if err := vp.Err(); err != nil { + return nil, fmt.Errorf("%w: field %d in sysctl %s is not a valid int: %w", ErrFileParse, i, sysctl, err) + } + } + return values, nil +} diff --git a/tools/vendor/github.com/prometheus/procfs/schedstat.go b/tools/vendor/github.com/prometheus/procfs/schedstat.go index 28228164e..5f7f32dc8 100644 --- a/tools/vendor/github.com/prometheus/procfs/schedstat.go +++ b/tools/vendor/github.com/prometheus/procfs/schedstat.go @@ -40,7 +40,7 @@ type Schedstat struct { CPUs []*SchedstatCPU } -// SchedstatCPU contains the values from one "cpu" line +// SchedstatCPU contains the values from one "cpu" line. type SchedstatCPU struct { CPUNum string @@ -49,14 +49,14 @@ type SchedstatCPU struct { RunTimeslices uint64 } -// ProcSchedstat contains the values from /proc//schedstat +// ProcSchedstat contains the values from `/proc//schedstat`. type ProcSchedstat struct { RunningNanoseconds uint64 WaitingNanoseconds uint64 RunTimeslices uint64 } -// Schedstat reads data from /proc/schedstat +// Schedstat reads data from `/proc/schedstat`. func (fs FS) Schedstat() (*Schedstat, error) { file, err := os.Open(fs.proc.Path("schedstat")) if err != nil { diff --git a/tools/vendor/github.com/prometheus/procfs/slab.go b/tools/vendor/github.com/prometheus/procfs/slab.go index 7896fd724..8611c9017 100644 --- a/tools/vendor/github.com/prometheus/procfs/slab.go +++ b/tools/vendor/github.com/prometheus/procfs/slab.go @@ -68,7 +68,7 @@ func parseV21SlabEntry(line string) (*Slab, error) { l := slabSpace.ReplaceAllString(line, " ") s := strings.Split(l, " ") if len(s) != 16 { - return nil, fmt.Errorf("unable to parse: %q", line) + return nil, fmt.Errorf("%w: unable to parse: %q", ErrFileParse, line) } var err error i := &Slab{Name: s[0]} @@ -137,7 +137,7 @@ func parseSlabInfo21(r *bytes.Reader) (SlabInfo, error) { return s, nil } -// SlabInfo reads data from /proc/slabinfo +// SlabInfo reads data from `/proc/slabinfo`. func (fs FS) SlabInfo() (SlabInfo, error) { // TODO: Consider passing options to allow for parsing different // slabinfo versions. However, slabinfo 2.1 has been stable since diff --git a/tools/vendor/github.com/prometheus/procfs/softirqs.go b/tools/vendor/github.com/prometheus/procfs/softirqs.go new file mode 100644 index 000000000..28708e074 --- /dev/null +++ b/tools/vendor/github.com/prometheus/procfs/softirqs.go @@ -0,0 +1,160 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// Softirqs represents the softirq statistics. +type Softirqs struct { + Hi []uint64 + Timer []uint64 + NetTx []uint64 + NetRx []uint64 + Block []uint64 + IRQPoll []uint64 + Tasklet []uint64 + Sched []uint64 + HRTimer []uint64 + RCU []uint64 +} + +func (fs FS) Softirqs() (Softirqs, error) { + fileName := fs.proc.Path("softirqs") + data, err := util.ReadFileNoStat(fileName) + if err != nil { + return Softirqs{}, err + } + + reader := bytes.NewReader(data) + + return parseSoftirqs(reader) +} + +func parseSoftirqs(r io.Reader) (Softirqs, error) { + var ( + softirqs = Softirqs{} + scanner = bufio.NewScanner(r) + ) + + if !scanner.Scan() { + return Softirqs{}, fmt.Errorf("%w: softirqs empty", ErrFileRead) + } + + for scanner.Scan() { + parts := strings.Fields(scanner.Text()) + var err error + + // require at least one cpu + if len(parts) < 2 { + continue + } + switch { + case parts[0] == "HI:": + perCPU := parts[1:] + softirqs.Hi = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.Hi[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err) + } + } + case parts[0] == "TIMER:": + perCPU := parts[1:] + softirqs.Timer = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.Timer[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err) + } + } + case parts[0] == "NET_TX:": + perCPU := parts[1:] + softirqs.NetTx = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.NetTx[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err) + } + } + case parts[0] == "NET_RX:": + perCPU := parts[1:] + softirqs.NetRx = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.NetRx[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err) + } + } + case parts[0] == "BLOCK:": + perCPU := parts[1:] + softirqs.Block = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.Block[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err) + } + } + case parts[0] == "IRQ_POLL:": + perCPU := parts[1:] + softirqs.IRQPoll = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.IRQPoll[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err) + } + } + case parts[0] == "TASKLET:": + perCPU := parts[1:] + softirqs.Tasklet = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.Tasklet[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err) + } + } + case parts[0] == "SCHED:": + perCPU := parts[1:] + softirqs.Sched = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.Sched[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err) + } + } + case parts[0] == "HRTIMER:": + perCPU := parts[1:] + softirqs.HRTimer = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.HRTimer[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err) + } + } + case parts[0] == "RCU:": + perCPU := parts[1:] + softirqs.RCU = make([]uint64, len(perCPU)) + for i, count := range perCPU { + if softirqs.RCU[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (RCU%d): %w", ErrFileParse, count, i, err) + } + } + } + } + + if err := scanner.Err(); err != nil { + return Softirqs{}, fmt.Errorf("%w: couldn't parse softirqs: %w", ErrFileParse, err) + } + + return softirqs, scanner.Err() +} diff --git a/tools/vendor/github.com/prometheus/procfs/stat.go b/tools/vendor/github.com/prometheus/procfs/stat.go index 6d8727541..e36b41c18 100644 --- a/tools/vendor/github.com/prometheus/procfs/stat.go +++ b/tools/vendor/github.com/prometheus/procfs/stat.go @@ -41,7 +41,7 @@ type CPUStat struct { // SoftIRQStat represent the softirq statistics as exported in the procfs stat file. // A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html -// It is possible to get per-cpu stats by reading /proc/softirqs +// It is possible to get per-cpu stats by reading `/proc/softirqs`. type SoftIRQStat struct { Hi uint64 Timer uint64 @@ -62,7 +62,7 @@ type Stat struct { // Summed up cpu statistics. CPUTotal CPUStat // Per-CPU statistics. - CPU []CPUStat + CPU map[int64]CPUStat // Number of times interrupts were handled, which contains numbered and unnumbered IRQs. IRQTotal uint64 // Number of times a numbered IRQ was triggered. @@ -93,10 +93,10 @@ func parseCPUStat(line string) (CPUStat, int64, error) { &cpuStat.Guest, &cpuStat.GuestNice) if err != nil && err != io.EOF { - return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): %w", line, err) + return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): %w", ErrFileParse, line, err) } if count == 0 { - return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): 0 elements parsed", line) + return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): 0 elements parsed", ErrFileParse, line) } cpuStat.User /= userHZ @@ -116,7 +116,7 @@ func parseCPUStat(line string) (CPUStat, int64, error) { cpuID, err := strconv.ParseInt(cpu[3:], 10, 64) if err != nil { - return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu/cpuid): %w", line, err) + return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu/cpuid): %w", ErrFileParse, line, err) } return cpuStat, cpuID, nil @@ -136,7 +136,7 @@ func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) { &softIRQStat.Hrtimer, &softIRQStat.Rcu) if err != nil { - return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %q (softirq): %w", line, err) + return SoftIRQStat{}, 0, fmt.Errorf("%w: couldn't parse %q (softirq): %w", ErrFileParse, line, err) } return softIRQStat, total, nil @@ -145,7 +145,7 @@ func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) { // NewStat returns information about current cpu/process statistics. // See https://www.kernel.org/doc/Documentation/filesystems/proc.txt // -// Deprecated: use fs.Stat() instead +// Deprecated: Use fs.Stat() instead. func NewStat() (Stat, error) { fs, err := NewFS(fs.DefaultProcMountPoint) if err != nil { @@ -155,25 +155,42 @@ func NewStat() (Stat, error) { } // NewStat returns information about current cpu/process statistics. -// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +// See: https://www.kernel.org/doc/Documentation/filesystems/proc.txt // -// Deprecated: use fs.Stat() instead +// Deprecated: Use fs.Stat() instead. func (fs FS) NewStat() (Stat, error) { return fs.Stat() } // Stat returns information about current cpu/process statistics. -// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt +// See: https://www.kernel.org/doc/Documentation/filesystems/proc.txt func (fs FS) Stat() (Stat, error) { fileName := fs.proc.Path("stat") data, err := util.ReadFileNoStat(fileName) if err != nil { return Stat{}, err } + procStat, err := parseStat(bytes.NewReader(data), fileName) + if err != nil { + return Stat{}, err + } + return procStat, nil +} + +// parseStat parses the metrics from /proc/[pid]/stat. +func parseStat(r io.Reader, fileName string) (Stat, error) { + var ( + scanner = bufio.NewScanner(r) + stat = Stat{ + CPU: make(map[int64]CPUStat), + } + err error + ) - stat := Stat{} + // Increase default scanner buffer to handle very long `intr` lines. + buf := make([]byte, 0, 8*1024) + scanner.Buffer(buf, 1024*1024) - scanner := bufio.NewScanner(bytes.NewReader(data)) for scanner.Scan() { line := scanner.Text() parts := strings.Fields(scanner.Text()) @@ -184,34 +201,34 @@ func (fs FS) Stat() (Stat, error) { switch { case parts[0] == "btime": if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (btime): %w", parts[1], err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q (btime): %w", ErrFileParse, parts[1], err) } case parts[0] == "intr": if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (intr): %w", parts[1], err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr): %w", ErrFileParse, parts[1], err) } numberedIRQs := parts[2:] stat.IRQ = make([]uint64, len(numberedIRQs)) for i, count := range numberedIRQs { if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (intr%d): %w", count, i, err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr%d): %w", ErrFileParse, count, i, err) } } case parts[0] == "ctxt": if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (ctxt): %w", parts[1], err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q (ctxt): %w", ErrFileParse, parts[1], err) } case parts[0] == "processes": if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (processes): %w", parts[1], err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q (processes): %w", ErrFileParse, parts[1], err) } case parts[0] == "procs_running": if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (procs_running): %w", parts[1], err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_running): %w", ErrFileParse, parts[1], err) } case parts[0] == "procs_blocked": if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (procs_blocked): %w", parts[1], err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_blocked): %w", ErrFileParse, parts[1], err) } case parts[0] == "softirq": softIRQStats, total, err := parseSoftIRQStat(line) @@ -228,16 +245,13 @@ func (fs FS) Stat() (Stat, error) { if cpuID == -1 { stat.CPUTotal = cpuStat } else { - for int64(len(stat.CPU)) <= cpuID { - stat.CPU = append(stat.CPU, CPUStat{}) - } stat.CPU[cpuID] = cpuStat } } } if err := scanner.Err(); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q: %w", fileName, err) + return Stat{}, fmt.Errorf("%w: couldn't parse %q: %w", ErrFileParse, fileName, err) } return stat, nil diff --git a/tools/vendor/github.com/prometheus/procfs/swaps.go b/tools/vendor/github.com/prometheus/procfs/swaps.go index 15edc2212..65fec834b 100644 --- a/tools/vendor/github.com/prometheus/procfs/swaps.go +++ b/tools/vendor/github.com/prometheus/procfs/swaps.go @@ -64,7 +64,7 @@ func parseSwapString(swapString string) (*Swap, error) { swapFields := strings.Fields(swapString) swapLength := len(swapFields) if swapLength < 5 { - return nil, fmt.Errorf("too few fields in swap string: %s", swapString) + return nil, fmt.Errorf("%w: too few fields in swap string: %s", ErrFileParse, swapString) } swap := &Swap{ @@ -74,15 +74,15 @@ func parseSwapString(swapString string) (*Swap, error) { swap.Size, err = strconv.Atoi(swapFields[2]) if err != nil { - return nil, fmt.Errorf("invalid swap size: %s", swapFields[2]) + return nil, fmt.Errorf("%w: invalid swap size: %s: %w", ErrFileParse, swapFields[2], err) } swap.Used, err = strconv.Atoi(swapFields[3]) if err != nil { - return nil, fmt.Errorf("invalid swap used: %s", swapFields[3]) + return nil, fmt.Errorf("%w: invalid swap used: %s: %w", ErrFileParse, swapFields[3], err) } swap.Priority, err = strconv.Atoi(swapFields[4]) if err != nil { - return nil, fmt.Errorf("invalid swap priority: %s", swapFields[4]) + return nil, fmt.Errorf("%w: invalid swap priority: %s: %w", ErrFileParse, swapFields[4], err) } return swap, nil diff --git a/tools/vendor/github.com/prometheus/procfs/thread.go b/tools/vendor/github.com/prometheus/procfs/thread.go new file mode 100644 index 000000000..80e0e947b --- /dev/null +++ b/tools/vendor/github.com/prometheus/procfs/thread.go @@ -0,0 +1,80 @@ +// Copyright 2022 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "os" + "strconv" + + fsi "github.com/prometheus/procfs/internal/fs" +) + +// Provide access to /proc/PID/task/TID files, for thread specific values. Since +// such files have the same structure as /proc/PID/ ones, the data structures +// and the parsers for the latter may be reused. + +// AllThreads returns a list of all currently available threads under /proc/PID. +func AllThreads(pid int) (Procs, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Procs{}, err + } + return fs.AllThreads(pid) +} + +// AllThreads returns a list of all currently available threads for PID. +func (fs FS) AllThreads(pid int) (Procs, error) { + taskPath := fs.proc.Path(strconv.Itoa(pid), "task") + d, err := os.Open(taskPath) + if err != nil { + return Procs{}, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return Procs{}, fmt.Errorf("%w: could not read %q: %w", ErrFileRead, d.Name(), err) + } + + t := Procs{} + for _, n := range names { + tid, err := strconv.ParseInt(n, 10, 64) + if err != nil { + continue + } + + t = append(t, Proc{PID: int(tid), fs: FS{fsi.FS(taskPath), fs.isReal}}) + } + + return t, nil +} + +// Thread returns a process for a given PID, TID. +func (fs FS) Thread(pid, tid int) (Proc, error) { + taskPath := fs.proc.Path(strconv.Itoa(pid), "task") + if _, err := os.Stat(taskPath); err != nil { + return Proc{}, err + } + return Proc{PID: tid, fs: FS{fsi.FS(taskPath), fs.isReal}}, nil +} + +// Thread returns a process for a given TID of Proc. +func (proc Proc) Thread(tid int) (Proc, error) { + tfs := FS{fsi.FS(proc.path("task")), proc.fs.isReal} + if _, err := os.Stat(tfs.proc.Path(strconv.Itoa(tid))); err != nil { + return Proc{}, err + } + return Proc{PID: tid, fs: tfs}, nil +} diff --git a/tools/vendor/github.com/prometheus/procfs/vm.go b/tools/vendor/github.com/prometheus/procfs/vm.go index cb1389141..51c49d89e 100644 --- a/tools/vendor/github.com/prometheus/procfs/vm.go +++ b/tools/vendor/github.com/prometheus/procfs/vm.go @@ -11,13 +11,13 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !windows // +build !windows package procfs import ( "fmt" - "io/ioutil" "os" "path/filepath" "strings" @@ -26,10 +26,12 @@ import ( ) // The VM interface is described at -// https://www.kernel.org/doc/Documentation/sysctl/vm.txt +// +// https://www.kernel.org/doc/Documentation/sysctl/vm.txt +// // Each setting is exposed as a single file. // Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array -// and numa_zonelist_order (deprecated) which is a string +// and numa_zonelist_order (deprecated) which is a string. type VM struct { AdminReserveKbytes *int64 // /proc/sys/vm/admin_reserve_kbytes BlockDump *int64 // /proc/sys/vm/block_dump @@ -84,10 +86,10 @@ func (fs FS) VM() (*VM, error) { return nil, err } if !file.Mode().IsDir() { - return nil, fmt.Errorf("%s is not a directory", path) + return nil, fmt.Errorf("%w: %s is not a directory", ErrFileRead, path) } - files, err := ioutil.ReadDir(path) + files, err := os.ReadDir(path) if err != nil { return nil, err } diff --git a/tools/vendor/github.com/prometheus/procfs/zoneinfo.go b/tools/vendor/github.com/prometheus/procfs/zoneinfo.go index 209e2ac98..e54d94b09 100644 --- a/tools/vendor/github.com/prometheus/procfs/zoneinfo.go +++ b/tools/vendor/github.com/prometheus/procfs/zoneinfo.go @@ -11,6 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !windows // +build !windows package procfs @@ -18,7 +19,7 @@ package procfs import ( "bytes" "fmt" - "io/ioutil" + "os" "regexp" "strings" @@ -72,13 +73,13 @@ var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`) // structs containing the relevant info. More information available here: // https://www.kernel.org/doc/Documentation/sysctl/vm.txt func (fs FS) Zoneinfo() ([]Zoneinfo, error) { - data, err := ioutil.ReadFile(fs.proc.Path("zoneinfo")) + data, err := os.ReadFile(fs.proc.Path("zoneinfo")) if err != nil { - return nil, fmt.Errorf("error reading zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err) + return nil, fmt.Errorf("%w: error reading zoneinfo %q: %w", ErrFileRead, fs.proc.Path("zoneinfo"), err) } zoneinfo, err := parseZoneinfo(data) if err != nil { - return nil, fmt.Errorf("error parsing zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err) + return nil, fmt.Errorf("%w: error parsing zoneinfo %q: %w", ErrFileParse, fs.proc.Path("zoneinfo"), err) } return zoneinfo, nil } diff --git a/tools/vendor/github.com/rogpeppe/go-internal/diff/diff.go b/tools/vendor/github.com/rogpeppe/go-internal/diff/diff.go index 47b285671..9bd8bd781 100644 --- a/tools/vendor/github.com/rogpeppe/go-internal/diff/diff.go +++ b/tools/vendor/github.com/rogpeppe/go-internal/diff/diff.go @@ -76,7 +76,7 @@ func Diff(oldName string, old []byte, newName string, new []byte) []byte { // Expand matching lines as far possible, // establishing that x[start.x:end.x] == y[start.y:end.y]. - // Note that on the first (or last) iteration we may (or definitey do) + // Note that on the first (or last) iteration we may (or definitely do) // have an empty match: start.x==end.x and start.y==end.y. start := m for start.x > done.x && start.y > done.y && x[start.x-1] == y[start.y-1] { @@ -116,10 +116,7 @@ func Diff(oldName string, old []byte, newName string, new []byte) []byte { // End chunk with common lines for context. if len(ctext) > 0 { - n := end.x - start.x - if n > C { - n = C - } + n := min(end.x-start.x, C) for _, s := range x[start.x : start.x+n] { ctext = append(ctext, " "+s) count.x++ @@ -234,7 +231,7 @@ func tgs(x, y []string) []pair { for i := range T { T[i] = n + 1 } - for i := 0; i < n; i++ { + for i := range n { k := sort.Search(n, func(k int) bool { return T[k] >= J[i] }) diff --git a/tools/vendor/github.com/ryancurrah/gomodguard/.golangci.yml b/tools/vendor/github.com/ryancurrah/gomodguard/.golangci.yml index 5be8aa487..b4d72b7e0 100644 --- a/tools/vendor/github.com/ryancurrah/gomodguard/.golangci.yml +++ b/tools/vendor/github.com/ryancurrah/gomodguard/.golangci.yml @@ -2,13 +2,13 @@ linters: enable-all: true disable: + - tenv - lll - gomodguard - gochecknoglobals - paralleltest - varnamelen - exhaustruct - - gomnd - depguard - forbidigo - funlen diff --git a/tools/vendor/github.com/ryancurrah/gomodguard/README.md b/tools/vendor/github.com/ryancurrah/gomodguard/README.md index 68dc86044..e06efaa1f 100644 --- a/tools/vendor/github.com/ryancurrah/gomodguard/README.md +++ b/tools/vendor/github.com/ryancurrah/gomodguard/README.md @@ -37,7 +37,7 @@ Results can be exported to different report formats. Which can be imported into ```yaml allowed: modules: # List of allowed modules - - gopkg.in/yaml.v2 + - gopkg.in/yaml.v3 - github.com/go-xmlfmt/xmlfmt - github.com/phayes/checkstyle - github.com/mitchellh/go-homedir @@ -88,7 +88,7 @@ Flags: ``` ╰─ ./gomodguard -r checkstyle -f gomodguard-checkstyle.xml ./... -info: allowed modules, [gopkg.in/yaml.v2 github.com/go-xmlfmt/xmlfmt github.com/phayes/checkstyle github.com/mitchellh/go-homedir] +info: allowed modules, [gopkg.in/yaml.v3 github.com/go-xmlfmt/xmlfmt github.com/phayes/checkstyle github.com/mitchellh/go-homedir] info: allowed module domains, [golang.org] info: blocked modules, [github.com/uudashr/go-module] info: found `2` blocked modules in the go.mod file, [github.com/gofrs/uuid github.com/uudashr/go-module] diff --git a/tools/vendor/github.com/ryancurrah/gomodguard/processor.go b/tools/vendor/github.com/ryancurrah/gomodguard/processor.go index f3f42a5be..4d2fac48b 100644 --- a/tools/vendor/github.com/ryancurrah/gomodguard/processor.go +++ b/tools/vendor/github.com/ryancurrah/gomodguard/processor.go @@ -146,10 +146,6 @@ func (p *Processor) SetBlockedModules() { //nolint:funlen replacedModules := p.Modfile.Replace for i := range lintedModules { - if lintedModules[i].Indirect { - continue // Do not lint indirect modules. - } - lintedModuleName := strings.TrimSpace(lintedModules[i].Mod.Path) lintedModuleVersion := strings.TrimSpace(lintedModules[i].Mod.Version) diff --git a/tools/vendor/github.com/sagikazarmark/locafero/.editorconfig b/tools/vendor/github.com/sagikazarmark/locafero/.editorconfig new file mode 100644 index 000000000..6f944f540 --- /dev/null +++ b/tools/vendor/github.com/sagikazarmark/locafero/.editorconfig @@ -0,0 +1,21 @@ +root = true + +[*] +charset = utf-8 +end_of_line = lf +indent_size = 4 +indent_style = space +insert_final_newline = true +trim_trailing_whitespace = true + +[{Makefile,*.mk}] +indent_style = tab + +[*.nix] +indent_size = 2 + +[*.go] +indent_style = tab + +[{*.yml,*.yaml}] +indent_size = 2 diff --git a/tools/vendor/github.com/sagikazarmark/locafero/.envrc b/tools/vendor/github.com/sagikazarmark/locafero/.envrc new file mode 100644 index 000000000..2e0f9f5f7 --- /dev/null +++ b/tools/vendor/github.com/sagikazarmark/locafero/.envrc @@ -0,0 +1,4 @@ +if ! has nix_direnv_version || ! nix_direnv_version 3.0.4; then + source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.4/direnvrc" "sha256-DzlYZ33mWF/Gs8DDeyjr8mnVmQGx7ASYqA5WlxwvBG4=" +fi +use flake . --impure diff --git a/vendor/github.com/sagikazarmark/slog-shim/.gitignore b/tools/vendor/github.com/sagikazarmark/locafero/.gitignore similarity index 57% rename from vendor/github.com/sagikazarmark/slog-shim/.gitignore rename to tools/vendor/github.com/sagikazarmark/locafero/.gitignore index dc6d8b587..8f07e6016 100644 --- a/vendor/github.com/sagikazarmark/slog-shim/.gitignore +++ b/tools/vendor/github.com/sagikazarmark/locafero/.gitignore @@ -1,4 +1,8 @@ /.devenv/ /.direnv/ /.task/ +/bin/ /build/ +/tmp/ +/var/ +/vendor/ diff --git a/tools/vendor/github.com/sagikazarmark/locafero/.golangci.yaml b/tools/vendor/github.com/sagikazarmark/locafero/.golangci.yaml new file mode 100644 index 000000000..829de2a4a --- /dev/null +++ b/tools/vendor/github.com/sagikazarmark/locafero/.golangci.yaml @@ -0,0 +1,27 @@ +run: + timeout: 10m + +linters-settings: + gci: + sections: + - standard + - default + - prefix(github.com/sagikazarmark/locafero) + goimports: + local-prefixes: github.com/sagikazarmark/locafero + misspell: + locale: US + nolintlint: + allow-leading-space: false # require machine-readable nolint directives (with no leading space) + allow-unused: false # report any unused nolint directives + require-specific: false # don't require nolint directives to be specific about which linter is being skipped + revive: + confidence: 0 + +linters: + enable: + - gci + - goimports + - misspell + - nolintlint + - revive diff --git a/tools/vendor/github.com/spf13/jwalterweatherman/LICENSE b/tools/vendor/github.com/sagikazarmark/locafero/LICENSE similarity index 86% rename from tools/vendor/github.com/spf13/jwalterweatherman/LICENSE rename to tools/vendor/github.com/sagikazarmark/locafero/LICENSE index 4527efb9c..a70b0f296 100644 --- a/tools/vendor/github.com/spf13/jwalterweatherman/LICENSE +++ b/tools/vendor/github.com/sagikazarmark/locafero/LICENSE @@ -1,13 +1,11 @@ -The MIT License (MIT) - -Copyright (c) 2014 Steve Francia +Copyright (c) 2023 Márk Sági-Kazár Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: +copies of the Software, and to permit persons to whom the Software is furnished +to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. @@ -17,5 +15,5 @@ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/tools/vendor/github.com/sagikazarmark/locafero/README.md b/tools/vendor/github.com/sagikazarmark/locafero/README.md new file mode 100644 index 000000000..a48e8e978 --- /dev/null +++ b/tools/vendor/github.com/sagikazarmark/locafero/README.md @@ -0,0 +1,37 @@ +# Finder library for [Afero](https://github.com/spf13/afero) + +[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/sagikazarmark/locafero/ci.yaml?style=flat-square)](https://github.com/sagikazarmark/locafero/actions/workflows/ci.yaml) +[![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/mod/github.com/sagikazarmark/locafero) +![Go Version](https://img.shields.io/badge/go%20version-%3E=1.20-61CFDD.svg?style=flat-square) +[![built with nix](https://img.shields.io/badge/builtwith-nix-7d81f7?style=flat-square)](https://builtwithnix.org) + +**Finder library for [Afero](https://github.com/spf13/afero) ported from [go-finder](https://github.com/sagikazarmark/go-finder).** + +> [!WARNING] +> This is an experimental library under development. +> +> **Backwards compatibility is not guaranteed, expect breaking changes.** + +## Installation + +```shell +go get github.com/sagikazarmark/locafero +``` + +## Usage + +Check out the [package example](https://pkg.go.dev/github.com/sagikazarmark/locafero#example-package) on go.dev. + +## Development + +**For an optimal developer experience, it is recommended to install [Nix](https://nixos.org/download.html) and [direnv](https://direnv.net/docs/installation.html).** + +Run the test suite: + +```shell +just test +``` + +## License + +The project is licensed under the [MIT License](LICENSE). diff --git a/tools/vendor/github.com/sagikazarmark/locafero/file_type.go b/tools/vendor/github.com/sagikazarmark/locafero/file_type.go new file mode 100644 index 000000000..9a9b14023 --- /dev/null +++ b/tools/vendor/github.com/sagikazarmark/locafero/file_type.go @@ -0,0 +1,28 @@ +package locafero + +import "io/fs" + +// FileType represents the kind of entries [Finder] can return. +type FileType int + +const ( + FileTypeAll FileType = iota + FileTypeFile + FileTypeDir +) + +func (ft FileType) matchFileInfo(info fs.FileInfo) bool { + switch ft { + case FileTypeAll: + return true + + case FileTypeFile: + return !info.IsDir() + + case FileTypeDir: + return info.IsDir() + + default: + return false + } +} diff --git a/tools/vendor/github.com/sagikazarmark/locafero/finder.go b/tools/vendor/github.com/sagikazarmark/locafero/finder.go new file mode 100644 index 000000000..ef8d54712 --- /dev/null +++ b/tools/vendor/github.com/sagikazarmark/locafero/finder.go @@ -0,0 +1,165 @@ +// Package finder looks for files and directories in an {fs.Fs} filesystem. +package locafero + +import ( + "errors" + "io/fs" + "path/filepath" + "strings" + + "github.com/sourcegraph/conc/iter" + "github.com/spf13/afero" +) + +// Finder looks for files and directories in an [afero.Fs] filesystem. +type Finder struct { + // Paths represents a list of locations that the [Finder] will search in. + // + // They are essentially the root directories or starting points for the search. + // + // Examples: + // - home/user + // - etc + Paths []string + + // Names are specific entries that the [Finder] will look for within the given Paths. + // + // It provides the capability to search for entries with depth, + // meaning it can target deeper locations within the directory structure. + // + // It also supports glob syntax (as defined by [filepath.Match]), offering greater flexibility in search patterns. + // + // Examples: + // - config.yaml + // - home/*/config.yaml + // - home/*/config.* + Names []string + + // Type restricts the kind of entries returned by the [Finder]. + // + // This parameter helps in differentiating and filtering out files from directories or vice versa. + Type FileType +} + +// Find looks for files and directories in an [afero.Fs] filesystem. +func (f Finder) Find(fsys afero.Fs) ([]string, error) { + // Arbitrary go routine limit (TODO: make this a parameter) + // pool := pool.NewWithResults[[]string]().WithMaxGoroutines(5).WithErrors().WithFirstError() + + type searchItem struct { + path string + name string + } + + var searchItems []searchItem + + for _, searchPath := range f.Paths { + searchPath := searchPath + + for _, searchName := range f.Names { + searchName := searchName + + searchItems = append(searchItems, searchItem{searchPath, searchName}) + + // pool.Go(func() ([]string, error) { + // // If the name contains any glob character, perform a glob match + // if strings.ContainsAny(searchName, globMatch) { + // return globWalkSearch(fsys, searchPath, searchName, f.Type) + // } + // + // return statSearch(fsys, searchPath, searchName, f.Type) + // }) + } + } + + // allResults, err := pool.Wait() + // if err != nil { + // return nil, err + // } + + allResults, err := iter.MapErr(searchItems, func(item *searchItem) ([]string, error) { + // If the name contains any glob character, perform a glob match + if strings.ContainsAny(item.name, globMatch) { + return globWalkSearch(fsys, item.path, item.name, f.Type) + } + + return statSearch(fsys, item.path, item.name, f.Type) + }) + if err != nil { + return nil, err + } + + var results []string + + for _, r := range allResults { + results = append(results, r...) + } + + // Sort results in alphabetical order for now + // sort.Strings(results) + + return results, nil +} + +func globWalkSearch(fsys afero.Fs, searchPath string, searchName string, searchType FileType) ([]string, error) { + var results []string + + err := afero.Walk(fsys, searchPath, func(p string, fileInfo fs.FileInfo, err error) error { + if err != nil { + return err + } + + // Skip the root path + if p == searchPath { + return nil + } + + var result error + + // Stop reading subdirectories + // TODO: add depth detection here + if fileInfo.IsDir() && filepath.Dir(p) == searchPath { + result = fs.SkipDir + } + + // Skip unmatching type + if !searchType.matchFileInfo(fileInfo) { + return result + } + + match, err := filepath.Match(searchName, fileInfo.Name()) + if err != nil { + return err + } + + if match { + results = append(results, p) + } + + return result + }) + if err != nil { + return results, err + } + + return results, nil +} + +func statSearch(fsys afero.Fs, searchPath string, searchName string, searchType FileType) ([]string, error) { + filePath := filepath.Join(searchPath, searchName) + + fileInfo, err := fsys.Stat(filePath) + if errors.Is(err, fs.ErrNotExist) { + return nil, nil + } + if err != nil { + return nil, err + } + + // Skip unmatching type + if !searchType.matchFileInfo(fileInfo) { + return nil, nil + } + + return []string{filePath}, nil +} diff --git a/tools/vendor/github.com/sagikazarmark/locafero/flake.lock b/tools/vendor/github.com/sagikazarmark/locafero/flake.lock new file mode 100644 index 000000000..df2a8ccec --- /dev/null +++ b/tools/vendor/github.com/sagikazarmark/locafero/flake.lock @@ -0,0 +1,472 @@ +{ + "nodes": { + "cachix": { + "inputs": { + "devenv": "devenv_2", + "flake-compat": [ + "devenv", + "flake-compat" + ], + "nixpkgs": [ + "devenv", + "nixpkgs" + ], + "pre-commit-hooks": [ + "devenv", + "pre-commit-hooks" + ] + }, + "locked": { + "lastModified": 1712055811, + "narHash": "sha256-7FcfMm5A/f02yyzuavJe06zLa9hcMHsagE28ADcmQvk=", + "owner": "cachix", + "repo": "cachix", + "rev": "02e38da89851ec7fec3356a5c04bc8349cae0e30", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "cachix", + "type": "github" + } + }, + "devenv": { + "inputs": { + "cachix": "cachix", + "flake-compat": "flake-compat_2", + "nix": "nix_2", + "nixpkgs": "nixpkgs_2", + "pre-commit-hooks": "pre-commit-hooks" + }, + "locked": { + "lastModified": 1725907707, + "narHash": "sha256-s3pbtzZmVPHzc86WQjK7MGZMNvvw6hWnFMljEkllAfM=", + "owner": "cachix", + "repo": "devenv", + "rev": "2bbbbc468fc02257265a79652a8350651cca495a", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "devenv", + "type": "github" + } + }, + "devenv_2": { + "inputs": { + "flake-compat": [ + "devenv", + "cachix", + "flake-compat" + ], + "nix": "nix", + "nixpkgs": "nixpkgs", + "poetry2nix": "poetry2nix", + "pre-commit-hooks": [ + "devenv", + "cachix", + "pre-commit-hooks" + ] + }, + "locked": { + "lastModified": 1708704632, + "narHash": "sha256-w+dOIW60FKMaHI1q5714CSibk99JfYxm0CzTinYWr+Q=", + "owner": "cachix", + "repo": "devenv", + "rev": "2ee4450b0f4b95a1b90f2eb5ffea98b90e48c196", + "type": "github" + }, + "original": { + "owner": "cachix", + "ref": "python-rewrite", + "repo": "devenv", + "type": "github" + } + }, + "flake-compat": { + "flake": false, + "locked": { + "lastModified": 1673956053, + "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=", + "owner": "edolstra", + "repo": "flake-compat", + "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, + "flake-compat_2": { + "flake": false, + "locked": { + "lastModified": 1696426674, + "narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=", + "owner": "edolstra", + "repo": "flake-compat", + "rev": "0f9255e01c2351cc7d116c072cb317785dd33b33", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, + "flake-parts": { + "inputs": { + "nixpkgs-lib": "nixpkgs-lib" + }, + "locked": { + "lastModified": 1725234343, + "narHash": "sha256-+ebgonl3NbiKD2UD0x4BszCZQ6sTfL4xioaM49o5B3Y=", + "owner": "hercules-ci", + "repo": "flake-parts", + "rev": "567b938d64d4b4112ee253b9274472dc3a346eb6", + "type": "github" + }, + "original": { + "owner": "hercules-ci", + "repo": "flake-parts", + "type": "github" + } + }, + "flake-utils": { + "inputs": { + "systems": "systems" + }, + "locked": { + "lastModified": 1689068808, + "narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "flake-utils_2": { + "inputs": { + "systems": "systems_2" + }, + "locked": { + "lastModified": 1710146030, + "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "gitignore": { + "inputs": { + "nixpkgs": [ + "devenv", + "pre-commit-hooks", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1709087332, + "narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=", + "owner": "hercules-ci", + "repo": "gitignore.nix", + "rev": "637db329424fd7e46cf4185293b9cc8c88c95394", + "type": "github" + }, + "original": { + "owner": "hercules-ci", + "repo": "gitignore.nix", + "type": "github" + } + }, + "nix": { + "inputs": { + "flake-compat": "flake-compat", + "nixpkgs": [ + "devenv", + "cachix", + "devenv", + "nixpkgs" + ], + "nixpkgs-regression": "nixpkgs-regression" + }, + "locked": { + "lastModified": 1712911606, + "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=", + "owner": "domenkozar", + "repo": "nix", + "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12", + "type": "github" + }, + "original": { + "owner": "domenkozar", + "ref": "devenv-2.21", + "repo": "nix", + "type": "github" + } + }, + "nix-github-actions": { + "inputs": { + "nixpkgs": [ + "devenv", + "cachix", + "devenv", + "poetry2nix", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1688870561, + "narHash": "sha256-4UYkifnPEw1nAzqqPOTL2MvWtm3sNGw1UTYTalkTcGY=", + "owner": "nix-community", + "repo": "nix-github-actions", + "rev": "165b1650b753316aa7f1787f3005a8d2da0f5301", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "nix-github-actions", + "type": "github" + } + }, + "nix_2": { + "inputs": { + "flake-compat": [ + "devenv", + "flake-compat" + ], + "nixpkgs": [ + "devenv", + "nixpkgs" + ], + "nixpkgs-regression": "nixpkgs-regression_2" + }, + "locked": { + "lastModified": 1712911606, + "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=", + "owner": "domenkozar", + "repo": "nix", + "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12", + "type": "github" + }, + "original": { + "owner": "domenkozar", + "ref": "devenv-2.21", + "repo": "nix", + "type": "github" + } + }, + "nixpkgs": { + "locked": { + "lastModified": 1692808169, + "narHash": "sha256-x9Opq06rIiwdwGeK2Ykj69dNc2IvUH1fY55Wm7atwrE=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "9201b5ff357e781bf014d0330d18555695df7ba8", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixpkgs-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs-lib": { + "locked": { + "lastModified": 1725233747, + "narHash": "sha256-Ss8QWLXdr2JCBPcYChJhz4xJm+h/xjl4G0c0XlP6a74=", + "type": "tarball", + "url": "https://github.com/NixOS/nixpkgs/archive/356624c12086a18f2ea2825fed34523d60ccc4e3.tar.gz" + }, + "original": { + "type": "tarball", + "url": "https://github.com/NixOS/nixpkgs/archive/356624c12086a18f2ea2825fed34523d60ccc4e3.tar.gz" + } + }, + "nixpkgs-regression": { + "locked": { + "lastModified": 1643052045, + "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", + "type": "github" + }, + "original": { + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", + "type": "github" + } + }, + "nixpkgs-regression_2": { + "locked": { + "lastModified": 1643052045, + "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", + "type": "github" + }, + "original": { + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", + "type": "github" + } + }, + "nixpkgs-stable": { + "locked": { + "lastModified": 1710695816, + "narHash": "sha256-3Eh7fhEID17pv9ZxrPwCLfqXnYP006RKzSs0JptsN84=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "614b4613980a522ba49f0d194531beddbb7220d3", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixos-23.11", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs_2": { + "locked": { + "lastModified": 1713361204, + "narHash": "sha256-TA6EDunWTkc5FvDCqU3W2T3SFn0gRZqh6D/hJnM02MM=", + "owner": "cachix", + "repo": "devenv-nixpkgs", + "rev": "285676e87ad9f0ca23d8714a6ab61e7e027020c6", + "type": "github" + }, + "original": { + "owner": "cachix", + "ref": "rolling", + "repo": "devenv-nixpkgs", + "type": "github" + } + }, + "nixpkgs_3": { + "locked": { + "lastModified": 1725910328, + "narHash": "sha256-n9pCtzGZ0httmTwMuEbi5E78UQ4ZbQMr1pzi5N0LAG8=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "5775c2583f1801df7b790bf7f7d710a19bac66f4", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixpkgs-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "poetry2nix": { + "inputs": { + "flake-utils": "flake-utils", + "nix-github-actions": "nix-github-actions", + "nixpkgs": [ + "devenv", + "cachix", + "devenv", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1692876271, + "narHash": "sha256-IXfZEkI0Mal5y1jr6IRWMqK8GW2/f28xJenZIPQqkY0=", + "owner": "nix-community", + "repo": "poetry2nix", + "rev": "d5006be9c2c2417dafb2e2e5034d83fabd207ee3", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "poetry2nix", + "type": "github" + } + }, + "pre-commit-hooks": { + "inputs": { + "flake-compat": [ + "devenv", + "flake-compat" + ], + "flake-utils": "flake-utils_2", + "gitignore": "gitignore", + "nixpkgs": [ + "devenv", + "nixpkgs" + ], + "nixpkgs-stable": "nixpkgs-stable" + }, + "locked": { + "lastModified": 1713775815, + "narHash": "sha256-Wu9cdYTnGQQwtT20QQMg7jzkANKQjwBD9iccfGKkfls=", + "owner": "cachix", + "repo": "pre-commit-hooks.nix", + "rev": "2ac4dcbf55ed43f3be0bae15e181f08a57af24a4", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "pre-commit-hooks.nix", + "type": "github" + } + }, + "root": { + "inputs": { + "devenv": "devenv", + "flake-parts": "flake-parts", + "nixpkgs": "nixpkgs_3" + } + }, + "systems": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + }, + "systems_2": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/vendor/github.com/sagikazarmark/slog-shim/flake.nix b/tools/vendor/github.com/sagikazarmark/locafero/flake.nix similarity index 70% rename from vendor/github.com/sagikazarmark/slog-shim/flake.nix rename to tools/vendor/github.com/sagikazarmark/locafero/flake.nix index 7239bbc2e..312f1ec8c 100644 --- a/vendor/github.com/sagikazarmark/slog-shim/flake.nix +++ b/tools/vendor/github.com/sagikazarmark/locafero/flake.nix @@ -1,7 +1,8 @@ { + description = "Finder library for Afero"; + inputs = { - # nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; - nixpkgs.url = "github:NixOS/nixpkgs/master"; + nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; flake-parts.url = "github:hercules-ci/flake-parts"; devenv.url = "github:cachix/devenv"; }; @@ -12,43 +13,49 @@ inputs.devenv.flakeModule ]; - systems = [ "x86_64-linux" "x86_64-darwin" "aarch64-darwin" ]; + systems = [ "x86_64-linux" "aarch64-darwin" ]; perSystem = { config, self', inputs', pkgs, system, ... }: rec { devenv.shells = { default = { languages = { go.enable = true; - go.package = pkgs.lib.mkDefault pkgs.go_1_21; + go.package = pkgs.lib.mkDefault pkgs.go_1_23; }; + packages = with pkgs; [ + just + + golangci-lint + ]; + # https://github.com/cachix/devenv/issues/528#issuecomment-1556108767 containers = pkgs.lib.mkForce { }; }; ci = devenv.shells.default; - ci_1_19 = { + ci_1_21 = { imports = [ devenv.shells.ci ]; languages = { - go.package = pkgs.go_1_19; + go.package = pkgs.go_1_21; }; }; - ci_1_20 = { + ci_1_22 = { imports = [ devenv.shells.ci ]; languages = { - go.package = pkgs.go_1_20; + go.package = pkgs.go_1_22; }; }; - ci_1_21 = { + ci_1_23 = { imports = [ devenv.shells.ci ]; languages = { - go.package = pkgs.go_1_21; + go.package = pkgs.go_1_23; }; }; }; diff --git a/tools/vendor/github.com/sagikazarmark/locafero/glob.go b/tools/vendor/github.com/sagikazarmark/locafero/glob.go new file mode 100644 index 000000000..00f833e99 --- /dev/null +++ b/tools/vendor/github.com/sagikazarmark/locafero/glob.go @@ -0,0 +1,5 @@ +//go:build !windows + +package locafero + +const globMatch = "*?[]\\^" diff --git a/tools/vendor/github.com/sagikazarmark/locafero/glob_windows.go b/tools/vendor/github.com/sagikazarmark/locafero/glob_windows.go new file mode 100644 index 000000000..7aec2b247 --- /dev/null +++ b/tools/vendor/github.com/sagikazarmark/locafero/glob_windows.go @@ -0,0 +1,8 @@ +//go:build windows + +package locafero + +// See [filepath.Match]: +// +// On Windows, escaping is disabled. Instead, '\\' is treated as path separator. +const globMatch = "*?[]^" diff --git a/tools/vendor/github.com/sagikazarmark/locafero/helpers.go b/tools/vendor/github.com/sagikazarmark/locafero/helpers.go new file mode 100644 index 000000000..05b434481 --- /dev/null +++ b/tools/vendor/github.com/sagikazarmark/locafero/helpers.go @@ -0,0 +1,41 @@ +package locafero + +import "fmt" + +// NameWithExtensions creates a list of names from a base name and a list of extensions. +// +// TODO: find a better name for this function. +func NameWithExtensions(baseName string, extensions ...string) []string { + var names []string + + if baseName == "" { + return names + } + + for _, ext := range extensions { + if ext == "" { + continue + } + + names = append(names, fmt.Sprintf("%s.%s", baseName, ext)) + } + + return names +} + +// NameWithOptionalExtensions creates a list of names from a base name and a list of extensions, +// plus it adds the base name (without any extensions) to the end of the list. +// +// TODO: find a better name for this function. +func NameWithOptionalExtensions(baseName string, extensions ...string) []string { + var names []string + + if baseName == "" { + return names + } + + names = NameWithExtensions(baseName, extensions...) + names = append(names, baseName) + + return names +} diff --git a/tools/vendor/github.com/sagikazarmark/locafero/justfile b/tools/vendor/github.com/sagikazarmark/locafero/justfile new file mode 100644 index 000000000..00a88850c --- /dev/null +++ b/tools/vendor/github.com/sagikazarmark/locafero/justfile @@ -0,0 +1,11 @@ +default: + just --list + +test: + go test -race -v ./... + +lint: + golangci-lint run + +fmt: + golangci-lint run --fix diff --git a/tools/vendor/github.com/sourcegraph/conc/.golangci.yml b/tools/vendor/github.com/sourcegraph/conc/.golangci.yml new file mode 100644 index 000000000..ae65a760a --- /dev/null +++ b/tools/vendor/github.com/sourcegraph/conc/.golangci.yml @@ -0,0 +1,11 @@ +linters: + disable-all: true + enable: + - errcheck + - godot + - gosimple + - govet + - ineffassign + - staticcheck + - typecheck + - unused diff --git a/vendor/github.com/aead/chacha20/LICENSE b/tools/vendor/github.com/sourcegraph/conc/LICENSE similarity index 94% rename from vendor/github.com/aead/chacha20/LICENSE rename to tools/vendor/github.com/sourcegraph/conc/LICENSE index b6a9210b9..1081f4ef4 100644 --- a/vendor/github.com/aead/chacha20/LICENSE +++ b/tools/vendor/github.com/sourcegraph/conc/LICENSE @@ -1,6 +1,6 @@ -The MIT License (MIT) +MIT License -Copyright (c) 2016 Andreas Auernhammer +Copyright (c) 2023 Sourcegraph Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/tools/vendor/github.com/sourcegraph/conc/README.md b/tools/vendor/github.com/sourcegraph/conc/README.md new file mode 100644 index 000000000..1c87c3c96 --- /dev/null +++ b/tools/vendor/github.com/sourcegraph/conc/README.md @@ -0,0 +1,464 @@ +![conch](https://user-images.githubusercontent.com/12631702/210295964-785cc63d-d697-420c-99ff-f492eb81dec9.svg) + +# `conc`: better structured concurrency for go + +[![Go Reference](https://pkg.go.dev/badge/github.com/sourcegraph/conc.svg)](https://pkg.go.dev/github.com/sourcegraph/conc) +[![Sourcegraph](https://img.shields.io/badge/view%20on-sourcegraph-A112FE?logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADIAAAAyCAYAAAAeP4ixAAAEZklEQVRoQ+2aXWgUZxSG3292sxtNN43BhBakFPyhxSujRSxiU1pr7SaGXqgUxOIEW0IFkeYighYUxAuLUlq0lrq2iCDpjWtmFVtoG6QVNOCFVShVLyxIk0DVjZLMxt3xTGTccd2ZOd/8JBHci0CY9zvnPPN+/7sCIXwKavOwAcy2QgngQiIztDSE0OwQlDPYR1ebiaH6J5kZChyfW12gRG4QVgGTBfMchMbFP9Sn5nlZL2D0JjLD6710lc+z0NfqSGTXQRQ4bX07Mq423yoBL3OSyHSvUxirMuaEvgbJWrdcvkHMoJwxYuq4INUhyuWvQa1jvdMGxAvCxJlyEC9XOBCWL04wwRzpbDoDQ7wfZJzIQLi5Eggk6DiRhZgWIAbE3NrM4A3LPT8Q7UgqAqLqTmLSHLGPkyzG/qXEczhd0q6RH+zaSBfaUoc4iQx19pIClIscrTkNZzG6gd7qMY6eC2Hqyo705ZfTf+eqJmhMzcSbYtQpOXc92ZsZjLVAL4YNUQbJ5Ttg4CQrQdGYj44Xr9m1XJCzmZusFDJOWNpHjmh5x624a2ZFtOKDVL+uNo2TuXE3bZQQZUf8gtgqP31uI94Z/rMqix+IGiRfWw3xN9dCgVx+L3WrHm4Dju6PXz/EkjuXJ6R+IGgyOE1TbZqTq9y1eo0EZo7oMo1ktPu3xjHvuiLT5AFNszUyDULtWpzE2/fEsey8O5TbWuGWwxrs5rS7nFNMWJrNh2No74s9Ec4vRNmRRzPXMP19fBMSVsGcOJ98G8N3Wl2gXcbTjbX7vUBxLaeASDQCm5Cu/0E2tvtb0Ea+BowtskFD0wvlc6Rf2M+Jx7dTu7ubFr2dnKDRaMQe2v/tcIrNB7FH0O50AcrBaApmRDVwFO31ql3pD8QW4dP0feNwl/Q+kFEtRyIGyaWXnpy1OO0qNJWHo1y6iCmAGkBb/Ru+HenDWIF2mo4r8G+tRRzoniSn2uqFLxANhe9LKHVyTbz6egk9+x5w5fK6ulSNNMhZ/Feno+GebLZV6isTTa6k5qNl5RnZ5u56Ib6SBvFzaWBBVFZzvnERWlt/Cg4l27XChLCqFyLekjhy6xJyoytgjPf7opIB8QPx7sYFiMXHPGt76m741MhCKMZfng0nBOIjmoJPsLqWHwgFpe6V6qtfcopxveR2Oy+J0ntIN/zCWkf8QNAJ7y6d8Bq4lxLc2/qJl5K7t432XwcqX5CrI34gzATWuYILQtdQPyePDK3iuOekCR3Efjhig1B1Uq5UoXEEoZX7d1q535J5S9VOeFyYyEBku5XTMXXKQTToX5Rg7OI44nbW5oKYeYK4EniMeF0YFNSmb+grhc84LyRCEP1/OurOcipCQbKxDeK2V5FcVyIDMQvsgz5gwFhcWWwKyRlvQ3gv29RwWoDYAbIofNyBxI9eDlQ+n3YgsgCWnr4MStGXQXmv9pF2La/k3OccV54JEBM4yp9EsXa/3LfO0dGPcYq0Y7DfZB8nJzZw2rppHgKgVHs8L5wvRwAAAABJRU5ErkJggg==)](https://sourcegraph.com/github.com/sourcegraph/conc) +[![Go Report Card](https://goreportcard.com/badge/github.com/sourcegraph/conc)](https://goreportcard.com/report/github.com/sourcegraph/conc) +[![codecov](https://codecov.io/gh/sourcegraph/conc/branch/main/graph/badge.svg?token=MQZTEA1QWT)](https://codecov.io/gh/sourcegraph/conc) +[![Discord](https://img.shields.io/badge/discord-chat-%235765F2)](https://discord.gg/bvXQXmtRjN) + +`conc` is your toolbelt for structured concurrency in go, making common tasks +easier and safer. + +```sh +go get github.com/sourcegraph/conc +``` + +# At a glance + +- Use [`conc.WaitGroup`](https://pkg.go.dev/github.com/sourcegraph/conc#WaitGroup) if you just want a safer version of `sync.WaitGroup` +- Use [`pool.Pool`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#Pool) if you want a concurrency-limited task runner +- Use [`pool.ResultPool`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#ResultPool) if you want a concurrent task runner that collects task results +- Use [`pool.(Result)?ErrorPool`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#ErrorPool) if your tasks are fallible +- Use [`pool.(Result)?ContextPool`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#ContextPool) if your tasks should be canceled on failure +- Use [`stream.Stream`](https://pkg.go.dev/github.com/sourcegraph/conc/stream#Stream) if you want to process an ordered stream of tasks in parallel with serial callbacks +- Use [`iter.Map`](https://pkg.go.dev/github.com/sourcegraph/conc/iter#Map) if you want to concurrently map a slice +- Use [`iter.ForEach`](https://pkg.go.dev/github.com/sourcegraph/conc/iter#ForEach) if you want to concurrently iterate over a slice +- Use [`panics.Catcher`](https://pkg.go.dev/github.com/sourcegraph/conc/panics#Catcher) if you want to catch panics in your own goroutines + +All pools are created with +[`pool.New()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#New) +or +[`pool.NewWithResults[T]()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#NewWithResults), +then configured with methods: + +- [`p.WithMaxGoroutines()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#Pool.MaxGoroutines) configures the maximum number of goroutines in the pool +- [`p.WithErrors()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#Pool.WithErrors) configures the pool to run tasks that return errors +- [`p.WithContext(ctx)`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#Pool.WithContext) configures the pool to run tasks that should be canceled on first error +- [`p.WithFirstError()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#ErrorPool.WithFirstError) configures error pools to only keep the first returned error rather than an aggregated error +- [`p.WithCollectErrored()`](https://pkg.go.dev/github.com/sourcegraph/conc/pool#ResultContextPool.WithCollectErrored) configures result pools to collect results even when the task errored + +# Goals + +The main goals of the package are: +1) Make it harder to leak goroutines +2) Handle panics gracefully +3) Make concurrent code easier to read + +## Goal #1: Make it harder to leak goroutines + +A common pain point when working with goroutines is cleaning them up. It's +really easy to fire off a `go` statement and fail to properly wait for it to +complete. + +`conc` takes the opinionated stance that all concurrency should be scoped. +That is, goroutines should have an owner and that owner should always +ensure that its owned goroutines exit properly. + +In `conc`, the owner of a goroutine is always a `conc.WaitGroup`. Goroutines +are spawned in a `WaitGroup` with `(*WaitGroup).Go()`, and +`(*WaitGroup).Wait()` should always be called before the `WaitGroup` goes out +of scope. + +In some cases, you might want a spawned goroutine to outlast the scope of the +caller. In that case, you could pass a `WaitGroup` into the spawning function. + +```go +func main() { + var wg conc.WaitGroup + defer wg.Wait() + + startTheThing(&wg) +} + +func startTheThing(wg *conc.WaitGroup) { + wg.Go(func() { ... }) +} +``` + +For some more discussion on why scoped concurrency is nice, check out [this +blog +post](https://vorpus.org/blog/notes-on-structured-concurrency-or-go-statement-considered-harmful/). + +## Goal #2: Handle panics gracefully + +A frequent problem with goroutines in long-running applications is handling +panics. A goroutine spawned without a panic handler will crash the whole process +on panic. This is usually undesirable. + +However, if you do add a panic handler to a goroutine, what do you do with the +panic once you catch it? Some options: +1) Ignore it +2) Log it +3) Turn it into an error and return that to the goroutine spawner +4) Propagate the panic to the goroutine spawner + +Ignoring panics is a bad idea since panics usually mean there is actually +something wrong and someone should fix it. + +Just logging panics isn't great either because then there is no indication to the spawner +that something bad happened, and it might just continue on as normal even though your +program is in a really bad state. + +Both (3) and (4) are reasonable options, but both require the goroutine to have +an owner that can actually receive the message that something went wrong. This +is generally not true with a goroutine spawned with `go`, but in the `conc` +package, all goroutines have an owner that must collect the spawned goroutine. +In the conc package, any call to `Wait()` will panic if any of the spawned goroutines +panicked. Additionally, it decorates the panic value with a stacktrace from the child +goroutine so that you don't lose information about what caused the panic. + +Doing this all correctly every time you spawn something with `go` is not +trivial and it requires a lot of boilerplate that makes the important parts of +the code more difficult to read, so `conc` does this for you. + + + + + + + + + + +
stdlibconc
+ +```go +type caughtPanicError struct { + val any + stack []byte +} + +func (e *caughtPanicError) Error() string { + return fmt.Sprintf( + "panic: %q\n%s", + e.val, + string(e.stack) + ) +} + +func main() { + done := make(chan error) + go func() { + defer func() { + if v := recover(); v != nil { + done <- &caughtPanicError{ + val: v, + stack: debug.Stack() + } + } else { + done <- nil + } + }() + doSomethingThatMightPanic() + }() + err := <-done + if err != nil { + panic(err) + } +} +``` + + +```go +func main() { + var wg conc.WaitGroup + wg.Go(doSomethingThatMightPanic) + // panics with a nice stacktrace + wg.Wait() +} +``` +
+ +## Goal #3: Make concurrent code easier to read + +Doing concurrency correctly is difficult. Doing it in a way that doesn't +obfuscate what the code is actually doing is more difficult. The `conc` package +attempts to make common operations easier by abstracting as much boilerplate +complexity as possible. + +Want to run a set of concurrent tasks with a bounded set of goroutines? Use +`pool.New()`. Want to process an ordered stream of results concurrently, but +still maintain order? Try `stream.New()`. What about a concurrent map over +a slice? Take a peek at `iter.Map()`. + +Browse some examples below for some comparisons with doing these by hand. + +# Examples + +Each of these examples forgoes propagating panics for simplicity. To see +what kind of complexity that would add, check out the "Goal #2" header above. + +Spawn a set of goroutines and waiting for them to finish: + + + + + + + + + + +
stdlibconc
+ +```go +func main() { + var wg sync.WaitGroup + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + // crashes on panic! + doSomething() + }() + } + wg.Wait() +} +``` + + +```go +func main() { + var wg conc.WaitGroup + for i := 0; i < 10; i++ { + wg.Go(doSomething) + } + wg.Wait() +} +``` +
+ +Process each element of a stream in a static pool of goroutines: + + + + + + + + + + +
stdlibconc
+ +```go +func process(stream chan int) { + var wg sync.WaitGroup + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for elem := range stream { + handle(elem) + } + }() + } + wg.Wait() +} +``` + + +```go +func process(stream chan int) { + p := pool.New().WithMaxGoroutines(10) + for elem := range stream { + elem := elem + p.Go(func() { + handle(elem) + }) + } + p.Wait() +} +``` +
+ +Process each element of a slice in a static pool of goroutines: + + + + + + + + + + +
stdlibconc
+ +```go +func process(values []int) { + feeder := make(chan int, 8) + + var wg sync.WaitGroup + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for elem := range feeder { + handle(elem) + } + }() + } + + for _, value := range values { + feeder <- value + } + close(feeder) + wg.Wait() +} +``` + + +```go +func process(values []int) { + iter.ForEach(values, handle) +} +``` +
+ +Concurrently map a slice: + + + + + + + + + + +
stdlibconc
+ +```go +func concMap( + input []int, + f func(int) int, +) []int { + res := make([]int, len(input)) + var idx atomic.Int64 + + var wg sync.WaitGroup + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + for { + i := int(idx.Add(1) - 1) + if i >= len(input) { + return + } + + res[i] = f(input[i]) + } + }() + } + wg.Wait() + return res +} +``` + + +```go +func concMap( + input []int, + f func(*int) int, +) []int { + return iter.Map(input, f) +} +``` +
+ +Process an ordered stream concurrently: + + + + + + + + + + + +
stdlibconc
+ +```go +func mapStream( + in chan int, + out chan int, + f func(int) int, +) { + tasks := make(chan func()) + taskResults := make(chan chan int) + + // Worker goroutines + var workerWg sync.WaitGroup + for i := 0; i < 10; i++ { + workerWg.Add(1) + go func() { + defer workerWg.Done() + for task := range tasks { + task() + } + }() + } + + // Ordered reader goroutines + var readerWg sync.WaitGroup + readerWg.Add(1) + go func() { + defer readerWg.Done() + for result := range taskResults { + item := <-result + out <- item + } + }() + + // Feed the workers with tasks + for elem := range in { + resultCh := make(chan int, 1) + taskResults <- resultCh + tasks <- func() { + resultCh <- f(elem) + } + } + + // We've exhausted input. + // Wait for everything to finish + close(tasks) + workerWg.Wait() + close(taskResults) + readerWg.Wait() +} +``` + + +```go +func mapStream( + in chan int, + out chan int, + f func(int) int, +) { + s := stream.New().WithMaxGoroutines(10) + for elem := range in { + elem := elem + s.Go(func() stream.Callback { + res := f(elem) + return func() { out <- res } + }) + } + s.Wait() +} +``` +
+ +# Status + +This package is currently pre-1.0. There are likely to be minor breaking +changes before a 1.0 release as we stabilize the APIs and tweak defaults. +Please open an issue if you have questions, concerns, or requests that you'd +like addressed before the 1.0 release. Currently, a 1.0 is targeted for +March 2023. diff --git a/tools/vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go119.go b/tools/vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go119.go new file mode 100644 index 000000000..7087e32a8 --- /dev/null +++ b/tools/vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go119.go @@ -0,0 +1,10 @@ +//go:build !go1.20 +// +build !go1.20 + +package multierror + +import "go.uber.org/multierr" + +var ( + Join = multierr.Combine +) diff --git a/tools/vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go120.go b/tools/vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go120.go new file mode 100644 index 000000000..39cff829a --- /dev/null +++ b/tools/vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go120.go @@ -0,0 +1,10 @@ +//go:build go1.20 +// +build go1.20 + +package multierror + +import "errors" + +var ( + Join = errors.Join +) diff --git a/tools/vendor/github.com/sourcegraph/conc/iter/iter.go b/tools/vendor/github.com/sourcegraph/conc/iter/iter.go new file mode 100644 index 000000000..124b4f940 --- /dev/null +++ b/tools/vendor/github.com/sourcegraph/conc/iter/iter.go @@ -0,0 +1,85 @@ +package iter + +import ( + "runtime" + "sync/atomic" + + "github.com/sourcegraph/conc" +) + +// defaultMaxGoroutines returns the default maximum number of +// goroutines to use within this package. +func defaultMaxGoroutines() int { return runtime.GOMAXPROCS(0) } + +// Iterator can be used to configure the behaviour of ForEach +// and ForEachIdx. The zero value is safe to use with reasonable +// defaults. +// +// Iterator is also safe for reuse and concurrent use. +type Iterator[T any] struct { + // MaxGoroutines controls the maximum number of goroutines + // to use on this Iterator's methods. + // + // If unset, MaxGoroutines defaults to runtime.GOMAXPROCS(0). + MaxGoroutines int +} + +// ForEach executes f in parallel over each element in input. +// +// It is safe to mutate the input parameter, which makes it +// possible to map in place. +// +// ForEach always uses at most runtime.GOMAXPROCS goroutines. +// It takes roughly 2µs to start up the goroutines and adds +// an overhead of roughly 50ns per element of input. For +// a configurable goroutine limit, use a custom Iterator. +func ForEach[T any](input []T, f func(*T)) { Iterator[T]{}.ForEach(input, f) } + +// ForEach executes f in parallel over each element in input, +// using up to the Iterator's configured maximum number of +// goroutines. +// +// It is safe to mutate the input parameter, which makes it +// possible to map in place. +// +// It takes roughly 2µs to start up the goroutines and adds +// an overhead of roughly 50ns per element of input. +func (iter Iterator[T]) ForEach(input []T, f func(*T)) { + iter.ForEachIdx(input, func(_ int, t *T) { + f(t) + }) +} + +// ForEachIdx is the same as ForEach except it also provides the +// index of the element to the callback. +func ForEachIdx[T any](input []T, f func(int, *T)) { Iterator[T]{}.ForEachIdx(input, f) } + +// ForEachIdx is the same as ForEach except it also provides the +// index of the element to the callback. +func (iter Iterator[T]) ForEachIdx(input []T, f func(int, *T)) { + if iter.MaxGoroutines == 0 { + // iter is a value receiver and is hence safe to mutate + iter.MaxGoroutines = defaultMaxGoroutines() + } + + numInput := len(input) + if iter.MaxGoroutines > numInput { + // No more concurrent tasks than the number of input items. + iter.MaxGoroutines = numInput + } + + var idx atomic.Int64 + // Create the task outside the loop to avoid extra closure allocations. + task := func() { + i := int(idx.Add(1) - 1) + for ; i < numInput; i = int(idx.Add(1) - 1) { + f(i, &input[i]) + } + } + + var wg conc.WaitGroup + for i := 0; i < iter.MaxGoroutines; i++ { + wg.Go(task) + } + wg.Wait() +} diff --git a/tools/vendor/github.com/sourcegraph/conc/iter/map.go b/tools/vendor/github.com/sourcegraph/conc/iter/map.go new file mode 100644 index 000000000..efbe6bfaf --- /dev/null +++ b/tools/vendor/github.com/sourcegraph/conc/iter/map.go @@ -0,0 +1,65 @@ +package iter + +import ( + "sync" + + "github.com/sourcegraph/conc/internal/multierror" +) + +// Mapper is an Iterator with a result type R. It can be used to configure +// the behaviour of Map and MapErr. The zero value is safe to use with +// reasonable defaults. +// +// Mapper is also safe for reuse and concurrent use. +type Mapper[T, R any] Iterator[T] + +// Map applies f to each element of input, returning the mapped result. +// +// Map always uses at most runtime.GOMAXPROCS goroutines. For a configurable +// goroutine limit, use a custom Mapper. +func Map[T, R any](input []T, f func(*T) R) []R { + return Mapper[T, R]{}.Map(input, f) +} + +// Map applies f to each element of input, returning the mapped result. +// +// Map uses up to the configured Mapper's maximum number of goroutines. +func (m Mapper[T, R]) Map(input []T, f func(*T) R) []R { + res := make([]R, len(input)) + Iterator[T](m).ForEachIdx(input, func(i int, t *T) { + res[i] = f(t) + }) + return res +} + +// MapErr applies f to each element of the input, returning the mapped result +// and a combined error of all returned errors. +// +// Map always uses at most runtime.GOMAXPROCS goroutines. For a configurable +// goroutine limit, use a custom Mapper. +func MapErr[T, R any](input []T, f func(*T) (R, error)) ([]R, error) { + return Mapper[T, R]{}.MapErr(input, f) +} + +// MapErr applies f to each element of the input, returning the mapped result +// and a combined error of all returned errors. +// +// Map uses up to the configured Mapper's maximum number of goroutines. +func (m Mapper[T, R]) MapErr(input []T, f func(*T) (R, error)) ([]R, error) { + var ( + res = make([]R, len(input)) + errMux sync.Mutex + errs error + ) + Iterator[T](m).ForEachIdx(input, func(i int, t *T) { + var err error + res[i], err = f(t) + if err != nil { + errMux.Lock() + // TODO: use stdlib errors once multierrors land in go 1.20 + errs = multierror.Join(errs, err) + errMux.Unlock() + } + }) + return res, errs +} diff --git a/tools/vendor/github.com/sourcegraph/conc/panics/panics.go b/tools/vendor/github.com/sourcegraph/conc/panics/panics.go new file mode 100644 index 000000000..abbed7fa0 --- /dev/null +++ b/tools/vendor/github.com/sourcegraph/conc/panics/panics.go @@ -0,0 +1,102 @@ +package panics + +import ( + "fmt" + "runtime" + "runtime/debug" + "sync/atomic" +) + +// Catcher is used to catch panics. You can execute a function with Try, +// which will catch any spawned panic. Try can be called any number of times, +// from any number of goroutines. Once all calls to Try have completed, you can +// get the value of the first panic (if any) with Recovered(), or you can just +// propagate the panic (re-panic) with Repanic(). +type Catcher struct { + recovered atomic.Pointer[Recovered] +} + +// Try executes f, catching any panic it might spawn. It is safe +// to call from multiple goroutines simultaneously. +func (p *Catcher) Try(f func()) { + defer p.tryRecover() + f() +} + +func (p *Catcher) tryRecover() { + if val := recover(); val != nil { + rp := NewRecovered(1, val) + p.recovered.CompareAndSwap(nil, &rp) + } +} + +// Repanic panics if any calls to Try caught a panic. It will panic with the +// value of the first panic caught, wrapped in a panics.Recovered with caller +// information. +func (p *Catcher) Repanic() { + if val := p.Recovered(); val != nil { + panic(val) + } +} + +// Recovered returns the value of the first panic caught by Try, or nil if +// no calls to Try panicked. +func (p *Catcher) Recovered() *Recovered { + return p.recovered.Load() +} + +// NewRecovered creates a panics.Recovered from a panic value and a collected +// stacktrace. The skip parameter allows the caller to skip stack frames when +// collecting the stacktrace. Calling with a skip of 0 means include the call to +// NewRecovered in the stacktrace. +func NewRecovered(skip int, value any) Recovered { + // 64 frames should be plenty + var callers [64]uintptr + n := runtime.Callers(skip+1, callers[:]) + return Recovered{ + Value: value, + Callers: callers[:n], + Stack: debug.Stack(), + } +} + +// Recovered is a panic that was caught with recover(). +type Recovered struct { + // The original value of the panic. + Value any + // The caller list as returned by runtime.Callers when the panic was + // recovered. Can be used to produce a more detailed stack information with + // runtime.CallersFrames. + Callers []uintptr + // The formatted stacktrace from the goroutine where the panic was recovered. + // Easier to use than Callers. + Stack []byte +} + +// String renders a human-readable formatting of the panic. +func (p *Recovered) String() string { + return fmt.Sprintf("panic: %v\nstacktrace:\n%s\n", p.Value, p.Stack) +} + +// AsError casts the panic into an error implementation. The implementation +// is unwrappable with the cause of the panic, if the panic was provided one. +func (p *Recovered) AsError() error { + if p == nil { + return nil + } + return &ErrRecovered{*p} +} + +// ErrRecovered wraps a panics.Recovered in an error implementation. +type ErrRecovered struct{ Recovered } + +var _ error = (*ErrRecovered)(nil) + +func (p *ErrRecovered) Error() string { return p.String() } + +func (p *ErrRecovered) Unwrap() error { + if err, ok := p.Value.(error); ok { + return err + } + return nil +} diff --git a/tools/vendor/github.com/sourcegraph/conc/panics/try.go b/tools/vendor/github.com/sourcegraph/conc/panics/try.go new file mode 100644 index 000000000..4ded92a1c --- /dev/null +++ b/tools/vendor/github.com/sourcegraph/conc/panics/try.go @@ -0,0 +1,11 @@ +package panics + +// Try executes f, catching and returning any panic it might spawn. +// +// The recovered panic can be propagated with panic(), or handled as a normal error with +// (*panics.Recovered).AsError(). +func Try(f func()) *Recovered { + var c Catcher + c.Try(f) + return c.Recovered() +} diff --git a/tools/vendor/github.com/sourcegraph/conc/waitgroup.go b/tools/vendor/github.com/sourcegraph/conc/waitgroup.go new file mode 100644 index 000000000..47b1bc1a5 --- /dev/null +++ b/tools/vendor/github.com/sourcegraph/conc/waitgroup.go @@ -0,0 +1,52 @@ +package conc + +import ( + "sync" + + "github.com/sourcegraph/conc/panics" +) + +// NewWaitGroup creates a new WaitGroup. +func NewWaitGroup() *WaitGroup { + return &WaitGroup{} +} + +// WaitGroup is the primary building block for scoped concurrency. +// Goroutines can be spawned in the WaitGroup with the Go method, +// and calling Wait() will ensure that each of those goroutines exits +// before continuing. Any panics in a child goroutine will be caught +// and propagated to the caller of Wait(). +// +// The zero value of WaitGroup is usable, just like sync.WaitGroup. +// Also like sync.WaitGroup, it must not be copied after first use. +type WaitGroup struct { + wg sync.WaitGroup + pc panics.Catcher +} + +// Go spawns a new goroutine in the WaitGroup. +func (h *WaitGroup) Go(f func()) { + h.wg.Add(1) + go func() { + defer h.wg.Done() + h.pc.Try(f) + }() +} + +// Wait will block until all goroutines spawned with Go exit and will +// propagate any panics spawned in a child goroutine. +func (h *WaitGroup) Wait() { + h.wg.Wait() + + // Propagate a panic if we caught one from a child goroutine. + h.pc.Repanic() +} + +// WaitAndRecover will block until all goroutines spawned with Go exit and +// will return a *panics.Recovered if one of the child goroutines panics. +func (h *WaitGroup) WaitAndRecover() *panics.Recovered { + h.wg.Wait() + + // Return a recovered panic if we caught one from a child goroutine. + return h.pc.Recovered() +} diff --git a/tools/vendor/github.com/spf13/afero/README.md b/tools/vendor/github.com/spf13/afero/README.md index 619af574f..86f154554 100644 --- a/tools/vendor/github.com/spf13/afero/README.md +++ b/tools/vendor/github.com/spf13/afero/README.md @@ -2,7 +2,11 @@ A FileSystem Abstraction System for Go -[![Test](https://github.com/spf13/afero/actions/workflows/test.yml/badge.svg)](https://github.com/spf13/afero/actions/workflows/test.yml) [![GoDoc](https://godoc.org/github.com/spf13/afero?status.svg)](https://godoc.org/github.com/spf13/afero) [![Join the chat at https://gitter.im/spf13/afero](https://badges.gitter.im/Dev%20Chat.svg)](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/spf13/afero/ci.yaml?branch=master&style=flat-square)](https://github.com/spf13/afero/actions?query=workflow%3ACI) +[![Join the chat at https://gitter.im/spf13/afero](https://badges.gitter.im/Dev%20Chat.svg)](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/afero?style=flat-square)](https://goreportcard.com/report/github.com/spf13/afero) +![Go Version](https://img.shields.io/badge/go%20version-%3E=1.23-61CFDD.svg?style=flat-square) +[![PkgGoDev](https://pkg.go.dev/badge/mod/github.com/spf13/afero)](https://pkg.go.dev/mod/github.com/spf13/afero) # Overview @@ -427,6 +431,39 @@ See the [Releases Page](https://github.com/spf13/afero/releases). 4. Push to the branch (`git push origin my-new-feature`) 5. Create new Pull Request +## Releasing + +As of version 1.14.0, Afero moved implementations with third-party libraries to +their own submodules. + +Releasing a new version now requires a few steps: + +``` +VERSION=X.Y.Z +git tag -a v$VERSION -m "Release $VERSION" +git push origin v$VERSION + +cd gcsfs +go get github.com/spf13/afero@v$VERSION +go mod tidy +git commit -am "Update afero to v$VERSION" +git tag -a gcsfs/v$VERSION -m "Release gcsfs $VERSION" +git push origin gcsfs/v$VERSION +cd .. + +cd sftpfs +go get github.com/spf13/afero@v$VERSION +go mod tidy +git commit -am "Update afero to v$VERSION" +git tag -a sftpfs/v$VERSION -m "Release sftpfs $VERSION" +git push origin sftpfs/v$VERSION +cd .. + +git push +``` + +TODO: move these instructions to a Makefile or something + ## Contributors Names in no particular order: diff --git a/tools/vendor/github.com/spf13/cast/README.md b/tools/vendor/github.com/spf13/cast/README.md index 120a57342..1be666a45 100644 --- a/tools/vendor/github.com/spf13/cast/README.md +++ b/tools/vendor/github.com/spf13/cast/README.md @@ -1,8 +1,9 @@ -cast -==== -[![GoDoc](https://godoc.org/github.com/spf13/cast?status.svg)](https://godoc.org/github.com/spf13/cast) -[![Build Status](https://github.com/spf13/cast/actions/workflows/go.yml/badge.svg)](https://github.com/spf13/cast/actions/workflows/go.yml) -[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cast)](https://goreportcard.com/report/github.com/spf13/cast) +# cast + +[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/spf13/cast/test.yaml?branch=master&style=flat-square)](https://github.com/spf13/cast/actions/workflows/test.yaml) +[![PkgGoDev](https://pkg.go.dev/badge/mod/github.com/spf13/cast)](https://pkg.go.dev/mod/github.com/spf13/cast) +![Go Version](https://img.shields.io/badge/go%20version-%3E=1.16-61CFDD.svg?style=flat-square) +[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cast?style=flat-square)](https://goreportcard.com/report/github.com/spf13/cast) Easy and safe casting from one type to another in Go @@ -17,7 +18,7 @@ interface into a bool, etc. Cast does this intelligently when an obvious conversion is possible. It doesn’t make any attempts to guess what you meant, for example you can only convert a string to an int when it is a string representation of an int such as “8”. Cast was developed for use in -[Hugo](http://hugo.spf13.com), a website engine which uses YAML, TOML or JSON +[Hugo](https://gohugo.io), a website engine which uses YAML, TOML or JSON for meta data. ## Why use Cast? @@ -72,4 +73,3 @@ the code for a complete set. var eight interface{} = 8 cast.ToInt(eight) // 8 cast.ToInt(nil) // 0 - diff --git a/tools/vendor/github.com/spf13/cast/caste.go b/tools/vendor/github.com/spf13/cast/caste.go index 514d759bf..4181a2e75 100644 --- a/tools/vendor/github.com/spf13/cast/caste.go +++ b/tools/vendor/github.com/spf13/cast/caste.go @@ -18,6 +18,14 @@ import ( var errNegativeNotAllowed = errors.New("unable to cast negative value") +type float64EProvider interface { + Float64() (float64, error) +} + +type float64Provider interface { + Float64() float64 +} + // ToTimeE casts an interface to a time.Time type. func ToTimeE(i interface{}) (tim time.Time, err error) { return ToTimeInDefaultLocationE(i, time.UTC) @@ -77,11 +85,14 @@ func ToDurationE(i interface{}) (d time.Duration, err error) { d, err = time.ParseDuration(s + "ns") } return - case json.Number: + case float64EProvider: var v float64 v, err = s.Float64() d = time.Duration(v) return + case float64Provider: + d = time.Duration(s.Float64()) + return default: err = fmt.Errorf("unable to cast %#v of type %T to Duration", i, i) return @@ -98,10 +109,31 @@ func ToBoolE(i interface{}) (bool, error) { case nil: return false, nil case int: - if i.(int) != 0 { - return true, nil - } - return false, nil + return b != 0, nil + case int64: + return b != 0, nil + case int32: + return b != 0, nil + case int16: + return b != 0, nil + case int8: + return b != 0, nil + case uint: + return b != 0, nil + case uint64: + return b != 0, nil + case uint32: + return b != 0, nil + case uint16: + return b != 0, nil + case uint8: + return b != 0, nil + case float64: + return b != 0, nil + case float32: + return b != 0, nil + case time.Duration: + return b != 0, nil case string: return strconv.ParseBool(i.(string)) case json.Number: @@ -153,12 +185,14 @@ func ToFloat64E(i interface{}) (float64, error) { return v, nil } return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) - case json.Number: + case float64EProvider: v, err := s.Float64() if err == nil { return v, nil } return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) + case float64Provider: + return s.Float64(), nil case bool: if s { return 1, nil @@ -209,12 +243,14 @@ func ToFloat32E(i interface{}) (float32, error) { return float32(v), nil } return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) - case json.Number: + case float64EProvider: v, err := s.Float64() if err == nil { return float32(v), nil } return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) + case float64Provider: + return float32(s.Float64()), nil case bool: if s { return 1, nil @@ -577,12 +613,12 @@ func ToUint64E(i interface{}) (uint64, error) { switch s := i.(type) { case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) + v, err := strconv.ParseUint(trimZeroDecimal(s), 0, 0) if err == nil { if v < 0 { return 0, errNegativeNotAllowed } - return uint64(v), nil + return v, nil } return 0, fmt.Errorf("unable to cast %#v of type %T to uint64", i, i) case json.Number: @@ -896,8 +932,8 @@ func indirectToStringerOrError(a interface{}) interface{} { return nil } - var errorType = reflect.TypeOf((*error)(nil)).Elem() - var fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem() + errorType := reflect.TypeOf((*error)(nil)).Elem() + fmtStringerType := reflect.TypeOf((*fmt.Stringer)(nil)).Elem() v := reflect.ValueOf(a) for !v.Type().Implements(fmtStringerType) && !v.Type().Implements(errorType) && v.Kind() == reflect.Ptr && !v.IsNil() { @@ -966,7 +1002,7 @@ func ToStringE(i interface{}) (string, error) { // ToStringMapStringE casts an interface to a map[string]string type. func ToStringMapStringE(i interface{}) (map[string]string, error) { - var m = map[string]string{} + m := map[string]string{} switch v := i.(type) { case map[string]string: @@ -996,7 +1032,7 @@ func ToStringMapStringE(i interface{}) (map[string]string, error) { // ToStringMapStringSliceE casts an interface to a map[string][]string type. func ToStringMapStringSliceE(i interface{}) (map[string][]string, error) { - var m = map[string][]string{} + m := map[string][]string{} switch v := i.(type) { case map[string][]string: @@ -1060,7 +1096,7 @@ func ToStringMapStringSliceE(i interface{}) (map[string][]string, error) { // ToStringMapBoolE casts an interface to a map[string]bool type. func ToStringMapBoolE(i interface{}) (map[string]bool, error) { - var m = map[string]bool{} + m := map[string]bool{} switch v := i.(type) { case map[interface{}]interface{}: @@ -1085,7 +1121,7 @@ func ToStringMapBoolE(i interface{}) (map[string]bool, error) { // ToStringMapE casts an interface to a map[string]interface{} type. func ToStringMapE(i interface{}) (map[string]interface{}, error) { - var m = map[string]interface{}{} + m := map[string]interface{}{} switch v := i.(type) { case map[interface{}]interface{}: @@ -1105,7 +1141,7 @@ func ToStringMapE(i interface{}) (map[string]interface{}, error) { // ToStringMapIntE casts an interface to a map[string]int{} type. func ToStringMapIntE(i interface{}) (map[string]int, error) { - var m = map[string]int{} + m := map[string]int{} if i == nil { return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i) } @@ -1146,7 +1182,7 @@ func ToStringMapIntE(i interface{}) (map[string]int, error) { // ToStringMapInt64E casts an interface to a map[string]int64{} type. func ToStringMapInt64E(i interface{}) (map[string]int64, error) { - var m = map[string]int64{} + m := map[string]int64{} if i == nil { return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i) } @@ -1383,37 +1419,35 @@ func (f timeFormat) hasTimezone() bool { return f.typ >= timeFormatNumericTimezone && f.typ <= timeFormatNumericAndNamedTimezone } -var ( - timeFormats = []timeFormat{ - {time.RFC3339, timeFormatNumericTimezone}, - {"2006-01-02T15:04:05", timeFormatNoTimezone}, // iso8601 without timezone - {time.RFC1123Z, timeFormatNumericTimezone}, - {time.RFC1123, timeFormatNamedTimezone}, - {time.RFC822Z, timeFormatNumericTimezone}, - {time.RFC822, timeFormatNamedTimezone}, - {time.RFC850, timeFormatNamedTimezone}, - {"2006-01-02 15:04:05.999999999 -0700 MST", timeFormatNumericAndNamedTimezone}, // Time.String() - {"2006-01-02T15:04:05-0700", timeFormatNumericTimezone}, // RFC3339 without timezone hh:mm colon - {"2006-01-02 15:04:05Z0700", timeFormatNumericTimezone}, // RFC3339 without T or timezone hh:mm colon - {"2006-01-02 15:04:05", timeFormatNoTimezone}, - {time.ANSIC, timeFormatNoTimezone}, - {time.UnixDate, timeFormatNamedTimezone}, - {time.RubyDate, timeFormatNumericTimezone}, - {"2006-01-02 15:04:05Z07:00", timeFormatNumericTimezone}, - {"2006-01-02", timeFormatNoTimezone}, - {"02 Jan 2006", timeFormatNoTimezone}, - {"2006-01-02 15:04:05 -07:00", timeFormatNumericTimezone}, - {"2006-01-02 15:04:05 -0700", timeFormatNumericTimezone}, - {time.Kitchen, timeFormatTimeOnly}, - {time.Stamp, timeFormatTimeOnly}, - {time.StampMilli, timeFormatTimeOnly}, - {time.StampMicro, timeFormatTimeOnly}, - {time.StampNano, timeFormatTimeOnly}, - } -) +var timeFormats = []timeFormat{ + // Keep common formats at the top. + {"2006-01-02", timeFormatNoTimezone}, + {time.RFC3339, timeFormatNumericTimezone}, + {"2006-01-02T15:04:05", timeFormatNoTimezone}, // iso8601 without timezone + {time.RFC1123Z, timeFormatNumericTimezone}, + {time.RFC1123, timeFormatNamedTimezone}, + {time.RFC822Z, timeFormatNumericTimezone}, + {time.RFC822, timeFormatNamedTimezone}, + {time.RFC850, timeFormatNamedTimezone}, + {"2006-01-02 15:04:05.999999999 -0700 MST", timeFormatNumericAndNamedTimezone}, // Time.String() + {"2006-01-02T15:04:05-0700", timeFormatNumericTimezone}, // RFC3339 without timezone hh:mm colon + {"2006-01-02 15:04:05Z0700", timeFormatNumericTimezone}, // RFC3339 without T or timezone hh:mm colon + {"2006-01-02 15:04:05", timeFormatNoTimezone}, + {time.ANSIC, timeFormatNoTimezone}, + {time.UnixDate, timeFormatNamedTimezone}, + {time.RubyDate, timeFormatNumericTimezone}, + {"2006-01-02 15:04:05Z07:00", timeFormatNumericTimezone}, + {"02 Jan 2006", timeFormatNoTimezone}, + {"2006-01-02 15:04:05 -07:00", timeFormatNumericTimezone}, + {"2006-01-02 15:04:05 -0700", timeFormatNumericTimezone}, + {time.Kitchen, timeFormatTimeOnly}, + {time.Stamp, timeFormatTimeOnly}, + {time.StampMilli, timeFormatTimeOnly}, + {time.StampMicro, timeFormatTimeOnly}, + {time.StampNano, timeFormatTimeOnly}, +} func parseDateWith(s string, location *time.Location, formats []timeFormat) (d time.Time, e error) { - for _, format := range formats { if d, e = time.Parse(format.format, s); e == nil { diff --git a/tools/vendor/github.com/spf13/cobra/README.md b/tools/vendor/github.com/spf13/cobra/README.md index 6444f4b7f..71757151c 100644 --- a/tools/vendor/github.com/spf13/cobra/README.md +++ b/tools/vendor/github.com/spf13/cobra/README.md @@ -1,4 +1,5 @@ -![cobra logo](assets/CobraMain.png) + +![cobra logo](https://github.com/user-attachments/assets/cbc3adf8-0dff-46e9-a88d-5e2d971c169e) Cobra is a library for creating powerful modern CLI applications. @@ -105,7 +106,7 @@ go install github.com/spf13/cobra-cli@latest For complete details on using the Cobra-CLI generator, please read [The Cobra Generator README](https://github.com/spf13/cobra-cli/blob/main/README.md) -For complete details on using the Cobra library, please read the [The Cobra User Guide](site/content/user_guide.md). +For complete details on using the Cobra library, please read [The Cobra User Guide](site/content/user_guide.md). # License diff --git a/tools/vendor/github.com/spf13/cobra/active_help.go b/tools/vendor/github.com/spf13/cobra/active_help.go index 25c30e3cc..b3e2dadfe 100644 --- a/tools/vendor/github.com/spf13/cobra/active_help.go +++ b/tools/vendor/github.com/spf13/cobra/active_help.go @@ -35,7 +35,7 @@ const ( // This function can be called multiple times before and/or after completions are added to // the array. Each time this function is called with the same array, the new // ActiveHelp line will be shown below the previous ones when completion is triggered. -func AppendActiveHelp(compArray []string, activeHelpStr string) []string { +func AppendActiveHelp(compArray []Completion, activeHelpStr string) []Completion { return append(compArray, fmt.Sprintf("%s%s", activeHelpMarker, activeHelpStr)) } diff --git a/tools/vendor/github.com/spf13/cobra/bash_completionsV2.go b/tools/vendor/github.com/spf13/cobra/bash_completionsV2.go index 1cce5c329..d2397aa36 100644 --- a/tools/vendor/github.com/spf13/cobra/bash_completionsV2.go +++ b/tools/vendor/github.com/spf13/cobra/bash_completionsV2.go @@ -146,7 +146,7 @@ __%[1]s_process_completion_results() { if (((directive & shellCompDirectiveFilterFileExt) != 0)); then # File extension filtering - local fullFilter filter filteringCmd + local fullFilter="" filter filteringCmd # Do not use quotes around the $completions variable or else newline # characters will be kept. @@ -177,20 +177,71 @@ __%[1]s_process_completion_results() { __%[1]s_handle_special_char "$cur" = # Print the activeHelp statements before we finish + __%[1]s_handle_activeHelp +} + +__%[1]s_handle_activeHelp() { + # Print the activeHelp statements if ((${#activeHelp[*]} != 0)); then - printf "\n"; - printf "%%s\n" "${activeHelp[@]}" - printf "\n" - - # The prompt format is only available from bash 4.4. - # We test if it is available before using it. - if (x=${PS1@P}) 2> /dev/null; then - printf "%%s" "${PS1@P}${COMP_LINE[@]}" - else - # Can't print the prompt. Just print the - # text the user had typed, it is workable enough. - printf "%%s" "${COMP_LINE[@]}" + if [ -z $COMP_TYPE ]; then + # Bash v3 does not set the COMP_TYPE variable. + printf "\n"; + printf "%%s\n" "${activeHelp[@]}" + printf "\n" + __%[1]s_reprint_commandLine + return fi + + # Only print ActiveHelp on the second TAB press + if [ $COMP_TYPE -eq 63 ]; then + printf "\n" + printf "%%s\n" "${activeHelp[@]}" + + if ((${#COMPREPLY[*]} == 0)); then + # When there are no completion choices from the program, file completion + # may kick in if the program has not disabled it; in such a case, we want + # to know if any files will match what the user typed, so that we know if + # there will be completions presented, so that we know how to handle ActiveHelp. + # To find out, we actually trigger the file completion ourselves; + # the call to _filedir will fill COMPREPLY if files match. + if (((directive & shellCompDirectiveNoFileComp) == 0)); then + __%[1]s_debug "Listing files" + _filedir + fi + fi + + if ((${#COMPREPLY[*]} != 0)); then + # If there are completion choices to be shown, print a delimiter. + # Re-printing the command-line will automatically be done + # by the shell when it prints the completion choices. + printf -- "--" + else + # When there are no completion choices at all, we need + # to re-print the command-line since the shell will + # not be doing it itself. + __%[1]s_reprint_commandLine + fi + elif [ $COMP_TYPE -eq 37 ] || [ $COMP_TYPE -eq 42 ]; then + # For completion type: menu-complete/menu-complete-backward and insert-completions + # the completions are immediately inserted into the command-line, so we first + # print the activeHelp message and reprint the command-line since the shell won't. + printf "\n" + printf "%%s\n" "${activeHelp[@]}" + + __%[1]s_reprint_commandLine + fi + fi +} + +__%[1]s_reprint_commandLine() { + # The prompt format is only available from bash 4.4. + # We test if it is available before using it. + if (x=${PS1@P}) 2> /dev/null; then + printf "%%s" "${PS1@P}${COMP_LINE[@]}" + else + # Can't print the prompt. Just print the + # text the user had typed, it is workable enough. + printf "%%s" "${COMP_LINE[@]}" fi } @@ -201,6 +252,8 @@ __%[1]s_extract_activeHelp() { local endIndex=${#activeHelpMarker} while IFS='' read -r comp; do + [[ -z $comp ]] && continue + if [[ ${comp:0:endIndex} == $activeHelpMarker ]]; then comp=${comp:endIndex} __%[1]s_debug "ActiveHelp found: $comp" @@ -223,16 +276,21 @@ __%[1]s_handle_completion_types() { # If the user requested inserting one completion at a time, or all # completions at once on the command-line we must remove the descriptions. # https://github.com/spf13/cobra/issues/1508 - local tab=$'\t' comp - while IFS='' read -r comp; do - [[ -z $comp ]] && continue - # Strip any description - comp=${comp%%%%$tab*} - # Only consider the completions that match - if [[ $comp == "$cur"* ]]; then - COMPREPLY+=("$comp") - fi - done < <(printf "%%s\n" "${completions[@]}") + + # If there are no completions, we don't need to do anything + (( ${#completions[@]} == 0 )) && return 0 + + local tab=$'\t' + + # Strip any description and escape the completion to handled special characters + IFS=$'\n' read -ra completions -d '' < <(printf "%%q\n" "${completions[@]%%%%$tab*}") + + # Only consider the completions that match + IFS=$'\n' read -ra COMPREPLY -d '' < <(IFS=$'\n'; compgen -W "${completions[*]}" -- "${cur}") + + # compgen looses the escaping so we need to escape all completions again since they will + # all be inserted on the command-line. + IFS=$'\n' read -ra COMPREPLY -d '' < <(printf "%%q\n" "${COMPREPLY[@]}") ;; *) @@ -243,11 +301,25 @@ __%[1]s_handle_completion_types() { } __%[1]s_handle_standard_completion_case() { - local tab=$'\t' comp + local tab=$'\t' + + # If there are no completions, we don't need to do anything + (( ${#completions[@]} == 0 )) && return 0 # Short circuit to optimize if we don't have descriptions if [[ "${completions[*]}" != *$tab* ]]; then - IFS=$'\n' read -ra COMPREPLY -d '' < <(compgen -W "${completions[*]}" -- "$cur") + # First, escape the completions to handle special characters + IFS=$'\n' read -ra completions -d '' < <(printf "%%q\n" "${completions[@]}") + # Only consider the completions that match what the user typed + IFS=$'\n' read -ra COMPREPLY -d '' < <(IFS=$'\n'; compgen -W "${completions[*]}" -- "${cur}") + + # compgen looses the escaping so, if there is only a single completion, we need to + # escape it again because it will be inserted on the command-line. If there are multiple + # completions, we don't want to escape them because they will be printed in a list + # and we don't want to show escape characters in that list. + if (( ${#COMPREPLY[@]} == 1 )); then + COMPREPLY[0]=$(printf "%%q" "${COMPREPLY[0]}") + fi return 0 fi @@ -256,23 +328,39 @@ __%[1]s_handle_standard_completion_case() { # Look for the longest completion so that we can format things nicely while IFS='' read -r compline; do [[ -z $compline ]] && continue - # Strip any description before checking the length - comp=${compline%%%%$tab*} + + # Before checking if the completion matches what the user typed, + # we need to strip any description and escape the completion to handle special + # characters because those escape characters are part of what the user typed. + # Don't call "printf" in a sub-shell because it will be much slower + # since we are in a loop. + printf -v comp "%%q" "${compline%%%%$tab*}" &>/dev/null || comp=$(printf "%%q" "${compline%%%%$tab*}") + # Only consider the completions that match [[ $comp == "$cur"* ]] || continue + + # The completions matches. Add it to the list of full completions including + # its description. We don't escape the completion because it may get printed + # in a list if there are more than one and we don't want show escape characters + # in that list. COMPREPLY+=("$compline") + + # Strip any description before checking the length, and again, don't escape + # the completion because this length is only used when printing the completions + # in a list and we don't want show escape characters in that list. + comp=${compline%%%%$tab*} if ((${#comp}>longest)); then longest=${#comp} fi done < <(printf "%%s\n" "${completions[@]}") - # If there is a single completion left, remove the description text + # If there is a single completion left, remove the description text and escape any special characters if ((${#COMPREPLY[*]} == 1)); then __%[1]s_debug "COMPREPLY[0]: ${COMPREPLY[0]}" - comp="${COMPREPLY[0]%%%%$tab*}" - __%[1]s_debug "Removed description from single completion, which is now: ${comp}" - COMPREPLY[0]=$comp - else # Format the descriptions + COMPREPLY[0]=$(printf "%%q" "${COMPREPLY[0]%%%%$tab*}") + __%[1]s_debug "Removed description from single completion, which is now: ${COMPREPLY[0]}" + else + # Format the descriptions __%[1]s_format_comp_descriptions $longest fi } diff --git a/tools/vendor/github.com/spf13/cobra/cobra.go b/tools/vendor/github.com/spf13/cobra/cobra.go index e0b0947b0..d9cd2414e 100644 --- a/tools/vendor/github.com/spf13/cobra/cobra.go +++ b/tools/vendor/github.com/spf13/cobra/cobra.go @@ -176,12 +176,16 @@ func rpad(s string, padding int) string { return fmt.Sprintf(formattedString, s) } -// tmpl executes the given template text on data, writing the result to w. -func tmpl(w io.Writer, text string, data interface{}) error { - t := template.New("top") - t.Funcs(templateFuncs) - template.Must(t.Parse(text)) - return t.Execute(w, data) +func tmpl(text string) *tmplFunc { + return &tmplFunc{ + tmpl: text, + fn: func(w io.Writer, data interface{}) error { + t := template.New("top") + t.Funcs(templateFuncs) + template.Must(t.Parse(text)) + return t.Execute(w, data) + }, + } } // ld compares two strings and returns the levenshtein distance between them. diff --git a/tools/vendor/github.com/spf13/cobra/command.go b/tools/vendor/github.com/spf13/cobra/command.go index 54748fc67..dbb2c298b 100644 --- a/tools/vendor/github.com/spf13/cobra/command.go +++ b/tools/vendor/github.com/spf13/cobra/command.go @@ -33,6 +33,9 @@ import ( const ( FlagSetByCobraAnnotation = "cobra_annotation_flag_set_by_cobra" CommandDisplayNameAnnotation = "cobra_annotation_command_display_name" + + helpFlagName = "help" + helpCommandName = "help" ) // FParseErrWhitelist configures Flag parse errors to be ignored @@ -80,11 +83,11 @@ type Command struct { Example string // ValidArgs is list of all valid non-flag arguments that are accepted in shell completions - ValidArgs []string + ValidArgs []Completion // ValidArgsFunction is an optional function that provides valid non-flag arguments for shell completion. // It is a dynamic version of using ValidArgs. // Only one of ValidArgs and ValidArgsFunction can be used for a command. - ValidArgsFunction func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) + ValidArgsFunction CompletionFunc // Expected arguments Args PositionalArgs @@ -168,12 +171,12 @@ type Command struct { // usageFunc is usage func defined by user. usageFunc func(*Command) error // usageTemplate is usage template defined by user. - usageTemplate string + usageTemplate *tmplFunc // flagErrorFunc is func defined by user and it's called when the parsing of // flags returns an error. flagErrorFunc func(*Command, error) error // helpTemplate is help template defined by user. - helpTemplate string + helpTemplate *tmplFunc // helpFunc is help func defined by user. helpFunc func(*Command, []string) // helpCommand is command with usage 'help'. If it's not defined by user, @@ -186,7 +189,7 @@ type Command struct { completionCommandGroupID string // versionTemplate is the version template defined by user. - versionTemplate string + versionTemplate *tmplFunc // errPrefix is the error message prefix defined by user. errPrefix string @@ -281,6 +284,7 @@ func (c *Command) SetArgs(a []string) { // SetOutput sets the destination for usage and error messages. // If output is nil, os.Stderr is used. +// // Deprecated: Use SetOut and/or SetErr instead func (c *Command) SetOutput(output io.Writer) { c.outWriter = output @@ -312,7 +316,11 @@ func (c *Command) SetUsageFunc(f func(*Command) error) { // SetUsageTemplate sets usage template. Can be defined by Application. func (c *Command) SetUsageTemplate(s string) { - c.usageTemplate = s + if s == "" { + c.usageTemplate = nil + return + } + c.usageTemplate = tmpl(s) } // SetFlagErrorFunc sets a function to generate an error when flag parsing @@ -348,12 +356,20 @@ func (c *Command) SetCompletionCommandGroupID(groupID string) { // SetHelpTemplate sets help template to be used. Application can use it to set custom template. func (c *Command) SetHelpTemplate(s string) { - c.helpTemplate = s + if s == "" { + c.helpTemplate = nil + return + } + c.helpTemplate = tmpl(s) } // SetVersionTemplate sets version template to be used. Application can use it to set custom template. func (c *Command) SetVersionTemplate(s string) { - c.versionTemplate = s + if s == "" { + c.versionTemplate = nil + return + } + c.versionTemplate = tmpl(s) } // SetErrPrefix sets error message prefix to be used. Application can use it to set custom prefix. @@ -434,7 +450,8 @@ func (c *Command) UsageFunc() (f func(*Command) error) { } return func(c *Command) error { c.mergePersistentFlags() - err := tmpl(c.OutOrStderr(), c.UsageTemplate(), c) + fn := c.getUsageTemplateFunc() + err := fn(c.OutOrStderr(), c) if err != nil { c.PrintErrln(err) } @@ -442,6 +459,19 @@ func (c *Command) UsageFunc() (f func(*Command) error) { } } +// getUsageTemplateFunc returns the usage template function for the command +// going up the command tree if necessary. +func (c *Command) getUsageTemplateFunc() func(w io.Writer, data interface{}) error { + if c.usageTemplate != nil { + return c.usageTemplate.fn + } + + if c.HasParent() { + return c.parent.getUsageTemplateFunc() + } + return defaultUsageFunc +} + // Usage puts out the usage for the command. // Used when a user provides invalid input. // Can be defined by user by overriding UsageFunc. @@ -460,15 +490,30 @@ func (c *Command) HelpFunc() func(*Command, []string) { } return func(c *Command, a []string) { c.mergePersistentFlags() + fn := c.getHelpTemplateFunc() // The help should be sent to stdout // See https://github.com/spf13/cobra/issues/1002 - err := tmpl(c.OutOrStdout(), c.HelpTemplate(), c) + err := fn(c.OutOrStdout(), c) if err != nil { c.PrintErrln(err) } } } +// getHelpTemplateFunc returns the help template function for the command +// going up the command tree if necessary. +func (c *Command) getHelpTemplateFunc() func(w io.Writer, data interface{}) error { + if c.helpTemplate != nil { + return c.helpTemplate.fn + } + + if c.HasParent() { + return c.parent.getHelpTemplateFunc() + } + + return defaultHelpFunc +} + // Help puts out the help for the command. // Used when a user calls help [command]. // Can be defined by user by overriding HelpFunc. @@ -543,71 +588,55 @@ func (c *Command) NamePadding() int { } // UsageTemplate returns usage template for the command. +// This function is kept for backwards-compatibility reasons. func (c *Command) UsageTemplate() string { - if c.usageTemplate != "" { - return c.usageTemplate + if c.usageTemplate != nil { + return c.usageTemplate.tmpl } if c.HasParent() { return c.parent.UsageTemplate() } - return `Usage:{{if .Runnable}} - {{.UseLine}}{{end}}{{if .HasAvailableSubCommands}} - {{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}} - -Aliases: - {{.NameAndAliases}}{{end}}{{if .HasExample}} - -Examples: -{{.Example}}{{end}}{{if .HasAvailableSubCommands}}{{$cmds := .Commands}}{{if eq (len .Groups) 0}} - -Available Commands:{{range $cmds}}{{if (or .IsAvailableCommand (eq .Name "help"))}} - {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{else}}{{range $group := .Groups}} - -{{.Title}}{{range $cmds}}{{if (and (eq .GroupID $group.ID) (or .IsAvailableCommand (eq .Name "help")))}} - {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if not .AllChildCommandsHaveGroup}} - -Additional Commands:{{range $cmds}}{{if (and (eq .GroupID "") (or .IsAvailableCommand (eq .Name "help")))}} - {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}} - -Flags: -{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}} - -Global Flags: -{{.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasHelpSubCommands}} - -Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}} - {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableSubCommands}} - -Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}} -` + return defaultUsageTemplate } // HelpTemplate return help template for the command. +// This function is kept for backwards-compatibility reasons. func (c *Command) HelpTemplate() string { - if c.helpTemplate != "" { - return c.helpTemplate + if c.helpTemplate != nil { + return c.helpTemplate.tmpl } if c.HasParent() { return c.parent.HelpTemplate() } - return `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}} - -{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` + return defaultHelpTemplate } // VersionTemplate return version template for the command. +// This function is kept for backwards-compatibility reasons. func (c *Command) VersionTemplate() string { - if c.versionTemplate != "" { - return c.versionTemplate + if c.versionTemplate != nil { + return c.versionTemplate.tmpl } if c.HasParent() { return c.parent.VersionTemplate() } - return `{{with .Name}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}} -` + return defaultVersionTemplate +} + +// getVersionTemplateFunc returns the version template function for the command +// going up the command tree if necessary. +func (c *Command) getVersionTemplateFunc() func(w io.Writer, data interface{}) error { + if c.versionTemplate != nil { + return c.versionTemplate.fn + } + + if c.HasParent() { + return c.parent.getVersionTemplateFunc() + } + return defaultVersionFunc } // ErrPrefix return error message prefix for the command @@ -894,7 +923,7 @@ func (c *Command) execute(a []string) (err error) { // If help is called, regardless of other flags, return we want help. // Also say we need help if the command isn't runnable. - helpVal, err := c.Flags().GetBool("help") + helpVal, err := c.Flags().GetBool(helpFlagName) if err != nil { // should be impossible to get here as we always declare a help // flag in InitDefaultHelpFlag() @@ -914,7 +943,8 @@ func (c *Command) execute(a []string) (err error) { return err } if versionVal { - err := tmpl(c.OutOrStdout(), c.VersionTemplate(), c) + fn := c.getVersionTemplateFunc() + err := fn(c.OutOrStdout(), c) if err != nil { c.Println(err) } @@ -1068,12 +1098,6 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { // initialize help at the last point to allow for user overriding c.InitDefaultHelpCmd() - // initialize completion at the last point to allow for user overriding - c.InitDefaultCompletionCmd() - - // Now that all commands have been created, let's make sure all groups - // are properly created also - c.checkCommandGroups() args := c.args @@ -1082,9 +1106,16 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { args = os.Args[1:] } - // initialize the hidden command to be used for shell completion + // initialize the __complete command to be used for shell completion c.initCompleteCmd(args) + // initialize the default completion command + c.InitDefaultCompletionCmd(args...) + + // Now that all commands have been created, let's make sure all groups + // are properly created also + c.checkCommandGroups() + var flags []string if c.TraverseChildren { cmd, flags, err = c.Traverse(args) @@ -1187,16 +1218,16 @@ func (c *Command) checkCommandGroups() { // If c already has help flag, it will do nothing. func (c *Command) InitDefaultHelpFlag() { c.mergePersistentFlags() - if c.Flags().Lookup("help") == nil { + if c.Flags().Lookup(helpFlagName) == nil { usage := "help for " - name := c.displayName() + name := c.DisplayName() if name == "" { usage += "this command" } else { usage += name } - c.Flags().BoolP("help", "h", false, usage) - _ = c.Flags().SetAnnotation("help", FlagSetByCobraAnnotation, []string{"true"}) + c.Flags().BoolP(helpFlagName, "h", false, usage) + _ = c.Flags().SetAnnotation(helpFlagName, FlagSetByCobraAnnotation, []string{"true"}) } } @@ -1215,7 +1246,7 @@ func (c *Command) InitDefaultVersionFlag() { if c.Name() == "" { usage += "this command" } else { - usage += c.Name() + usage += c.DisplayName() } if c.Flags().ShorthandLookup("v") == nil { c.Flags().BoolP("version", "v", false, usage) @@ -1239,9 +1270,9 @@ func (c *Command) InitDefaultHelpCmd() { Use: "help [command]", Short: "Help about any command", Long: `Help provides help for any command in the application. -Simply type ` + c.displayName() + ` help [path to command] for full details.`, - ValidArgsFunction: func(c *Command, args []string, toComplete string) ([]string, ShellCompDirective) { - var completions []string +Simply type ` + c.DisplayName() + ` help [path to command] for full details.`, + ValidArgsFunction: func(c *Command, args []string, toComplete string) ([]Completion, ShellCompDirective) { + var completions []Completion cmd, _, e := c.Root().Find(args) if e != nil { return nil, ShellCompDirectiveNoFileComp @@ -1253,7 +1284,7 @@ Simply type ` + c.displayName() + ` help [path to command] for full details.`, for _, subCmd := range cmd.Commands() { if subCmd.IsAvailableCommand() || subCmd == cmd.helpCommand { if strings.HasPrefix(subCmd.Name(), toComplete) { - completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short)) + completions = append(completions, CompletionWithDesc(subCmd.Name(), subCmd.Short)) } } } @@ -1430,10 +1461,12 @@ func (c *Command) CommandPath() string { if c.HasParent() { return c.Parent().CommandPath() + " " + c.Name() } - return c.displayName() + return c.DisplayName() } -func (c *Command) displayName() string { +// DisplayName returns the name to display in help text. Returns command Name() +// If CommandDisplayNameAnnoation is not set +func (c *Command) DisplayName() string { if displayName, ok := c.Annotations[CommandDisplayNameAnnotation]; ok { return displayName } @@ -1443,7 +1476,7 @@ func (c *Command) displayName() string { // UseLine puts out the full usage for a given command (including parents). func (c *Command) UseLine() string { var useline string - use := strings.Replace(c.Use, c.Name(), c.displayName(), 1) + use := strings.Replace(c.Use, c.Name(), c.DisplayName(), 1) if c.HasParent() { useline = c.parent.CommandPath() + " " + use } else { @@ -1649,7 +1682,7 @@ func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) f // to this command (local and persistent declared here and by all parents). func (c *Command) Flags() *flag.FlagSet { if c.flags == nil { - c.flags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) + c.flags = flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1664,7 +1697,7 @@ func (c *Command) Flags() *flag.FlagSet { func (c *Command) LocalNonPersistentFlags() *flag.FlagSet { persistentFlags := c.PersistentFlags() - out := flag.NewFlagSet(c.displayName(), flag.ContinueOnError) + out := flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError) c.LocalFlags().VisitAll(func(f *flag.Flag) { if persistentFlags.Lookup(f.Name) == nil { out.AddFlag(f) @@ -1679,7 +1712,7 @@ func (c *Command) LocalFlags() *flag.FlagSet { c.mergePersistentFlags() if c.lflags == nil { - c.lflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) + c.lflags = flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1707,7 +1740,7 @@ func (c *Command) InheritedFlags() *flag.FlagSet { c.mergePersistentFlags() if c.iflags == nil { - c.iflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) + c.iflags = flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1736,7 +1769,7 @@ func (c *Command) NonInheritedFlags() *flag.FlagSet { // PersistentFlags returns the persistent FlagSet specifically set in the current command. func (c *Command) PersistentFlags() *flag.FlagSet { if c.pflags == nil { - c.pflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) + c.pflags = flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError) if c.flagErrorBuf == nil { c.flagErrorBuf = new(bytes.Buffer) } @@ -1749,9 +1782,9 @@ func (c *Command) PersistentFlags() *flag.FlagSet { func (c *Command) ResetFlags() { c.flagErrorBuf = new(bytes.Buffer) c.flagErrorBuf.Reset() - c.flags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) + c.flags = flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError) c.flags.SetOutput(c.flagErrorBuf) - c.pflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) + c.pflags = flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError) c.pflags.SetOutput(c.flagErrorBuf) c.lflags = nil @@ -1868,7 +1901,7 @@ func (c *Command) mergePersistentFlags() { // If c.parentsPflags == nil, it makes new. func (c *Command) updateParentsPflags() { if c.parentsPflags == nil { - c.parentsPflags = flag.NewFlagSet(c.displayName(), flag.ContinueOnError) + c.parentsPflags = flag.NewFlagSet(c.DisplayName(), flag.ContinueOnError) c.parentsPflags.SetOutput(c.flagErrorBuf) c.parentsPflags.SortFlags = false } @@ -1894,3 +1927,141 @@ func commandNameMatches(s string, t string) bool { return s == t } + +// tmplFunc holds a template and a function that will execute said template. +type tmplFunc struct { + tmpl string + fn func(io.Writer, interface{}) error +} + +var defaultUsageTemplate = `Usage:{{if .Runnable}} + {{.UseLine}}{{end}}{{if .HasAvailableSubCommands}} + {{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}} + +Aliases: + {{.NameAndAliases}}{{end}}{{if .HasExample}} + +Examples: +{{.Example}}{{end}}{{if .HasAvailableSubCommands}}{{$cmds := .Commands}}{{if eq (len .Groups) 0}} + +Available Commands:{{range $cmds}}{{if (or .IsAvailableCommand (eq .Name "help"))}} + {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{else}}{{range $group := .Groups}} + +{{.Title}}{{range $cmds}}{{if (and (eq .GroupID $group.ID) (or .IsAvailableCommand (eq .Name "help")))}} + {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if not .AllChildCommandsHaveGroup}} + +Additional Commands:{{range $cmds}}{{if (and (eq .GroupID "") (or .IsAvailableCommand (eq .Name "help")))}} + {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}} + +Flags: +{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}} + +Global Flags: +{{.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasHelpSubCommands}} + +Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}} + {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableSubCommands}} + +Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}} +` + +// defaultUsageFunc is equivalent to executing defaultUsageTemplate. The two should be changed in sync. +func defaultUsageFunc(w io.Writer, in interface{}) error { + c := in.(*Command) + fmt.Fprint(w, "Usage:") + if c.Runnable() { + fmt.Fprintf(w, "\n %s", c.UseLine()) + } + if c.HasAvailableSubCommands() { + fmt.Fprintf(w, "\n %s [command]", c.CommandPath()) + } + if len(c.Aliases) > 0 { + fmt.Fprintf(w, "\n\nAliases:\n") + fmt.Fprintf(w, " %s", c.NameAndAliases()) + } + if c.HasExample() { + fmt.Fprintf(w, "\n\nExamples:\n") + fmt.Fprintf(w, "%s", c.Example) + } + if c.HasAvailableSubCommands() { + cmds := c.Commands() + if len(c.Groups()) == 0 { + fmt.Fprintf(w, "\n\nAvailable Commands:") + for _, subcmd := range cmds { + if subcmd.IsAvailableCommand() || subcmd.Name() == helpCommandName { + fmt.Fprintf(w, "\n %s %s", rpad(subcmd.Name(), subcmd.NamePadding()), subcmd.Short) + } + } + } else { + for _, group := range c.Groups() { + fmt.Fprintf(w, "\n\n%s", group.Title) + for _, subcmd := range cmds { + if subcmd.GroupID == group.ID && (subcmd.IsAvailableCommand() || subcmd.Name() == helpCommandName) { + fmt.Fprintf(w, "\n %s %s", rpad(subcmd.Name(), subcmd.NamePadding()), subcmd.Short) + } + } + } + if !c.AllChildCommandsHaveGroup() { + fmt.Fprintf(w, "\n\nAdditional Commands:") + for _, subcmd := range cmds { + if subcmd.GroupID == "" && (subcmd.IsAvailableCommand() || subcmd.Name() == helpCommandName) { + fmt.Fprintf(w, "\n %s %s", rpad(subcmd.Name(), subcmd.NamePadding()), subcmd.Short) + } + } + } + } + } + if c.HasAvailableLocalFlags() { + fmt.Fprintf(w, "\n\nFlags:\n") + fmt.Fprint(w, trimRightSpace(c.LocalFlags().FlagUsages())) + } + if c.HasAvailableInheritedFlags() { + fmt.Fprintf(w, "\n\nGlobal Flags:\n") + fmt.Fprint(w, trimRightSpace(c.InheritedFlags().FlagUsages())) + } + if c.HasHelpSubCommands() { + fmt.Fprintf(w, "\n\nAdditional help topcis:") + for _, subcmd := range c.Commands() { + if subcmd.IsAdditionalHelpTopicCommand() { + fmt.Fprintf(w, "\n %s %s", rpad(subcmd.CommandPath(), subcmd.CommandPathPadding()), subcmd.Short) + } + } + } + if c.HasAvailableSubCommands() { + fmt.Fprintf(w, "\n\nUse \"%s [command] --help\" for more information about a command.", c.CommandPath()) + } + fmt.Fprintln(w) + return nil +} + +var defaultHelpTemplate = `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}} + +{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` + +// defaultHelpFunc is equivalent to executing defaultHelpTemplate. The two should be changed in sync. +func defaultHelpFunc(w io.Writer, in interface{}) error { + c := in.(*Command) + usage := c.Long + if usage == "" { + usage = c.Short + } + usage = trimRightSpace(usage) + if usage != "" { + fmt.Fprintln(w, usage) + fmt.Fprintln(w) + } + if c.Runnable() || c.HasSubCommands() { + fmt.Fprint(w, c.UsageString()) + } + return nil +} + +var defaultVersionTemplate = `{{with .DisplayName}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}} +` + +// defaultVersionFunc is equivalent to executing defaultVersionTemplate. The two should be changed in sync. +func defaultVersionFunc(w io.Writer, in interface{}) error { + c := in.(*Command) + _, err := fmt.Fprintf(w, "%s version %s\n", c.DisplayName(), c.Version) + return err +} diff --git a/tools/vendor/github.com/spf13/cobra/completions.go b/tools/vendor/github.com/spf13/cobra/completions.go index c0c08b057..a1752f763 100644 --- a/tools/vendor/github.com/spf13/cobra/completions.go +++ b/tools/vendor/github.com/spf13/cobra/completions.go @@ -35,7 +35,7 @@ const ( ) // Global map of flag completion functions. Make sure to use flagCompletionMutex before you try to read and write from it. -var flagCompletionFunctions = map[*pflag.Flag]func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective){} +var flagCompletionFunctions = map[*pflag.Flag]CompletionFunc{} // lock for reading and writing from flagCompletionFunctions var flagCompletionMutex = &sync.RWMutex{} @@ -117,22 +117,50 @@ type CompletionOptions struct { HiddenDefaultCmd bool } +// Completion is a string that can be used for completions +// +// two formats are supported: +// - the completion choice +// - the completion choice with a textual description (separated by a TAB). +// +// [CompletionWithDesc] can be used to create a completion string with a textual description. +// +// Note: Go type alias is used to provide a more descriptive name in the documentation, but any string can be used. +type Completion = string + +// CompletionFunc is a function that provides completion results. +type CompletionFunc = func(cmd *Command, args []string, toComplete string) ([]Completion, ShellCompDirective) + +// CompletionWithDesc returns a [Completion] with a description by using the TAB delimited format. +func CompletionWithDesc(choice string, description string) Completion { + return choice + "\t" + description +} + // NoFileCompletions can be used to disable file completion for commands that should // not trigger file completions. -func NoFileCompletions(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) { +// +// This method satisfies [CompletionFunc]. +// It can be used with [Command.RegisterFlagCompletionFunc] and for [Command.ValidArgsFunction]. +func NoFileCompletions(cmd *Command, args []string, toComplete string) ([]Completion, ShellCompDirective) { return nil, ShellCompDirectiveNoFileComp } // FixedCompletions can be used to create a completion function which always // returns the same results. -func FixedCompletions(choices []string, directive ShellCompDirective) func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) { - return func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) { +// +// This method returns a function that satisfies [CompletionFunc] +// It can be used with [Command.RegisterFlagCompletionFunc] and for [Command.ValidArgsFunction]. +func FixedCompletions(choices []Completion, directive ShellCompDirective) CompletionFunc { + return func(cmd *Command, args []string, toComplete string) ([]Completion, ShellCompDirective) { return choices, directive } } // RegisterFlagCompletionFunc should be called to register a function to provide completion for a flag. -func (c *Command) RegisterFlagCompletionFunc(flagName string, f func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)) error { +// +// You can use pre-defined completion functions such as [FixedCompletions] or [NoFileCompletions], +// or you can define your own. +func (c *Command) RegisterFlagCompletionFunc(flagName string, f CompletionFunc) error { flag := c.Flag(flagName) if flag == nil { return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' does not exist", flagName) @@ -148,7 +176,7 @@ func (c *Command) RegisterFlagCompletionFunc(flagName string, f func(cmd *Comman } // GetFlagCompletionFunc returns the completion function for the given flag of the command, if available. -func (c *Command) GetFlagCompletionFunc(flagName string) (func(*Command, []string, string) ([]string, ShellCompDirective), bool) { +func (c *Command) GetFlagCompletionFunc(flagName string) (CompletionFunc, bool) { flag := c.Flag(flagName) if flag == nil { return nil, false @@ -270,7 +298,15 @@ func (c *Command) initCompleteCmd(args []string) { } } -func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDirective, error) { +// SliceValue is a reduced version of [pflag.SliceValue]. It is used to detect +// flags that accept multiple values and therefore can provide completion +// multiple times. +type SliceValue interface { + // GetSlice returns the flag value list as an array of strings. + GetSlice() []string +} + +func (c *Command) getCompletions(args []string) (*Command, []Completion, ShellCompDirective, error) { // The last argument, which is not completely typed by the user, // should not be part of the list of arguments toComplete := args[len(args)-1] @@ -298,7 +334,7 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi } if err != nil { // Unable to find the real command. E.g., someInvalidCmd - return c, []string{}, ShellCompDirectiveDefault, fmt.Errorf("unable to find a command for arguments: %v", trimmedArgs) + return c, []Completion{}, ShellCompDirectiveDefault, fmt.Errorf("unable to find a command for arguments: %v", trimmedArgs) } finalCmd.ctx = c.ctx @@ -328,7 +364,7 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi // Parse the flags early so we can check if required flags are set if err = finalCmd.ParseFlags(finalArgs); err != nil { - return finalCmd, []string{}, ShellCompDirectiveDefault, fmt.Errorf("Error while parsing flags from args %v: %s", finalArgs, err.Error()) + return finalCmd, []Completion{}, ShellCompDirectiveDefault, fmt.Errorf("Error while parsing flags from args %v: %s", finalArgs, err.Error()) } realArgCount := finalCmd.Flags().NArg() @@ -340,14 +376,14 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi if flagErr != nil { // If error type is flagCompError and we don't want flagCompletion we should ignore the error if _, ok := flagErr.(*flagCompError); !(ok && !flagCompletion) { - return finalCmd, []string{}, ShellCompDirectiveDefault, flagErr + return finalCmd, []Completion{}, ShellCompDirectiveDefault, flagErr } } // Look for the --help or --version flags. If they are present, // there should be no further completions. if helpOrVersionFlagPresent(finalCmd) { - return finalCmd, []string{}, ShellCompDirectiveNoFileComp, nil + return finalCmd, []Completion{}, ShellCompDirectiveNoFileComp, nil } // We only remove the flags from the arguments if DisableFlagParsing is not set. @@ -376,11 +412,11 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi return finalCmd, subDir, ShellCompDirectiveFilterDirs, nil } // Directory completion - return finalCmd, []string{}, ShellCompDirectiveFilterDirs, nil + return finalCmd, []Completion{}, ShellCompDirectiveFilterDirs, nil } } - var completions []string + var completions []Completion var directive ShellCompDirective // Enforce flag groups before doing flag completions @@ -399,10 +435,14 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi // If we have not found any required flags, only then can we show regular flags if len(completions) == 0 { doCompleteFlags := func(flag *pflag.Flag) { - if !flag.Changed || + _, acceptsMultiple := flag.Value.(SliceValue) + acceptsMultiple = acceptsMultiple || strings.Contains(flag.Value.Type(), "Slice") || - strings.Contains(flag.Value.Type(), "Array") { - // If the flag is not already present, or if it can be specified multiple times (Array or Slice) + strings.Contains(flag.Value.Type(), "Array") || + strings.HasPrefix(flag.Value.Type(), "stringTo") + + if !flag.Changed || acceptsMultiple { + // If the flag is not already present, or if it can be specified multiple times (Array, Slice, or stringTo) // we suggest it as a completion completions = append(completions, getFlagNameCompletions(flag, toComplete)...) } @@ -462,7 +502,7 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi for _, subCmd := range finalCmd.Commands() { if subCmd.IsAvailableCommand() || subCmd == finalCmd.helpCommand { if strings.HasPrefix(subCmd.Name(), toComplete) { - completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short)) + completions = append(completions, CompletionWithDesc(subCmd.Name(), subCmd.Short)) } directive = ShellCompDirectiveNoFileComp } @@ -507,7 +547,7 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi } // Find the completion function for the flag or command - var completionFn func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) + var completionFn CompletionFunc if flag != nil && flagCompletion { flagCompletionMutex.RLock() completionFn = flagCompletionFunctions[flag] @@ -518,7 +558,7 @@ func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDi if completionFn != nil { // Go custom completion defined for this flag or command. // Call the registered completion function to get the completions. - var comps []string + var comps []Completion comps, directive = completionFn(finalCmd, finalArgs, toComplete) completions = append(completions, comps...) } @@ -531,23 +571,23 @@ func helpOrVersionFlagPresent(cmd *Command) bool { len(versionFlag.Annotations[FlagSetByCobraAnnotation]) > 0 && versionFlag.Changed { return true } - if helpFlag := cmd.Flags().Lookup("help"); helpFlag != nil && + if helpFlag := cmd.Flags().Lookup(helpFlagName); helpFlag != nil && len(helpFlag.Annotations[FlagSetByCobraAnnotation]) > 0 && helpFlag.Changed { return true } return false } -func getFlagNameCompletions(flag *pflag.Flag, toComplete string) []string { +func getFlagNameCompletions(flag *pflag.Flag, toComplete string) []Completion { if nonCompletableFlag(flag) { - return []string{} + return []Completion{} } - var completions []string + var completions []Completion flagName := "--" + flag.Name if strings.HasPrefix(flagName, toComplete) { // Flag without the = - completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage)) + completions = append(completions, CompletionWithDesc(flagName, flag.Usage)) // Why suggest both long forms: --flag and --flag= ? // This forces the user to *always* have to type either an = or a space after the flag name. @@ -559,20 +599,20 @@ func getFlagNameCompletions(flag *pflag.Flag, toComplete string) []string { // if len(flag.NoOptDefVal) == 0 { // // Flag requires a value, so it can be suffixed with = // flagName += "=" - // completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage)) + // completions = append(completions, CompletionWithDesc(flagName, flag.Usage)) // } } flagName = "-" + flag.Shorthand if len(flag.Shorthand) > 0 && strings.HasPrefix(flagName, toComplete) { - completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage)) + completions = append(completions, CompletionWithDesc(flagName, flag.Usage)) } return completions } -func completeRequireFlags(finalCmd *Command, toComplete string) []string { - var completions []string +func completeRequireFlags(finalCmd *Command, toComplete string) []Completion { + var completions []Completion doCompleteRequiredFlags := func(flag *pflag.Flag) { if _, present := flag.Annotations[BashCompOneRequiredFlag]; present { @@ -687,8 +727,8 @@ func checkIfFlagCompletion(finalCmd *Command, args []string, lastArg string) (*p // 1- the feature has been explicitly disabled by the program, // 2- c has no subcommands (to avoid creating one), // 3- c already has a 'completion' command provided by the program. -func (c *Command) InitDefaultCompletionCmd() { - if c.CompletionOptions.DisableDefaultCmd || !c.HasSubCommands() { +func (c *Command) InitDefaultCompletionCmd(args ...string) { + if c.CompletionOptions.DisableDefaultCmd { return } @@ -701,6 +741,16 @@ func (c *Command) InitDefaultCompletionCmd() { haveNoDescFlag := !c.CompletionOptions.DisableNoDescFlag && !c.CompletionOptions.DisableDescriptions + // Special case to know if there are sub-commands or not. + hasSubCommands := false + for _, cmd := range c.commands { + if cmd.Name() != ShellCompRequestCmd && cmd.Name() != helpCommandName { + // We found a real sub-command (not 'help' or '__complete') + hasSubCommands = true + break + } + } + completionCmd := &Command{ Use: compCmdName, Short: "Generate the autocompletion script for the specified shell", @@ -714,6 +764,22 @@ See each sub-command's help for details on how to use the generated script. } c.AddCommand(completionCmd) + if !hasSubCommands { + // If the 'completion' command will be the only sub-command, + // we only create it if it is actually being called. + // This avoids breaking programs that would suddenly find themselves with + // a subcommand, which would prevent them from accepting arguments. + // We also create the 'completion' command if the user is triggering + // shell completion for it (prog __complete completion '') + subCmd, cmdArgs, err := c.Find(args) + if err != nil || subCmd.Name() != compCmdName && + !(subCmd.Name() == ShellCompRequestCmd && len(cmdArgs) > 1 && cmdArgs[0] == compCmdName) { + // The completion command is not being called or being completed so we remove it. + c.RemoveCommand(completionCmd) + return + } + } + out := c.OutOrStdout() noDesc := c.CompletionOptions.DisableDescriptions shortDesc := "Generate the autocompletion script for %s" diff --git a/tools/vendor/github.com/spf13/cobra/powershell_completions.go b/tools/vendor/github.com/spf13/cobra/powershell_completions.go index a830b7bca..746dcb92e 100644 --- a/tools/vendor/github.com/spf13/cobra/powershell_completions.go +++ b/tools/vendor/github.com/spf13/cobra/powershell_completions.go @@ -162,7 +162,10 @@ filter __%[1]s_escapeStringWithSpecialChars { if (-Not $Description) { $Description = " " } - @{Name="$Name";Description="$Description"} + New-Object -TypeName PSCustomObject -Property @{ + Name = "$Name" + Description = "$Description" + } } @@ -240,7 +243,12 @@ filter __%[1]s_escapeStringWithSpecialChars { __%[1]s_debug "Only one completion left" # insert space after value - [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space, "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + $CompletionText = $($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space + if ($ExecutionContext.SessionState.LanguageMode -eq "FullLanguage"){ + [System.Management.Automation.CompletionResult]::new($CompletionText, "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + } else { + $CompletionText + } } else { # Add the proper number of spaces to align the descriptions @@ -255,7 +263,12 @@ filter __%[1]s_escapeStringWithSpecialChars { $Description = " ($($comp.Description))" } - [System.Management.Automation.CompletionResult]::new("$($comp.Name)$Description", "$($comp.Name)$Description", 'ParameterValue', "$($comp.Description)") + $CompletionText = "$($comp.Name)$Description" + if ($ExecutionContext.SessionState.LanguageMode -eq "FullLanguage"){ + [System.Management.Automation.CompletionResult]::new($CompletionText, "$($comp.Name)$Description", 'ParameterValue', "$($comp.Description)") + } else { + $CompletionText + } } } @@ -264,7 +277,13 @@ filter __%[1]s_escapeStringWithSpecialChars { # insert space after value # MenuComplete will automatically show the ToolTip of # the highlighted value at the bottom of the suggestions. - [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space, "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + + $CompletionText = $($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space + if ($ExecutionContext.SessionState.LanguageMode -eq "FullLanguage"){ + [System.Management.Automation.CompletionResult]::new($CompletionText, "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + } else { + $CompletionText + } } # TabCompleteNext and in case we get something unknown @@ -272,7 +291,13 @@ filter __%[1]s_escapeStringWithSpecialChars { # Like MenuComplete but we don't want to add a space here because # the user need to press space anyway to get the completion. # Description will not be shown because that's not possible with TabCompleteNext - [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars), "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + + $CompletionText = $($comp.Name | __%[1]s_escapeStringWithSpecialChars) + if ($ExecutionContext.SessionState.LanguageMode -eq "FullLanguage"){ + [System.Management.Automation.CompletionResult]::new($CompletionText, "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + } else { + $CompletionText + } } } diff --git a/tools/vendor/github.com/spf13/jwalterweatherman/.gitignore b/tools/vendor/github.com/spf13/jwalterweatherman/.gitignore deleted file mode 100644 index a71f88af8..000000000 --- a/tools/vendor/github.com/spf13/jwalterweatherman/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.bench -go.sum \ No newline at end of file diff --git a/tools/vendor/github.com/spf13/jwalterweatherman/README.md b/tools/vendor/github.com/spf13/jwalterweatherman/README.md deleted file mode 100644 index 932a23fc6..000000000 --- a/tools/vendor/github.com/spf13/jwalterweatherman/README.md +++ /dev/null @@ -1,148 +0,0 @@ -jWalterWeatherman -================= - -Seamless printing to the terminal (stdout) and logging to a io.Writer -(file) that’s as easy to use as fmt.Println. - -![and_that__s_why_you_always_leave_a_note_by_jonnyetc-d57q7um](https://cloud.githubusercontent.com/assets/173412/11002937/ccd01654-847d-11e5-828e-12ebaf582eaf.jpg) -Graphic by [JonnyEtc](http://jonnyetc.deviantart.com/art/And-That-s-Why-You-Always-Leave-a-Note-315311422) - -JWW is primarily a wrapper around the excellent standard log library. It -provides a few advantages over using the standard log library alone. - -1. Ready to go out of the box. -2. One library for both printing to the terminal and logging (to files). -3. Really easy to log to either a temp file or a file you specify. - - -I really wanted a very straightforward library that could seamlessly do -the following things. - -1. Replace all the println, printf, etc statements thoughout my code with - something more useful -2. Allow the user to easily control what levels are printed to stdout -3. Allow the user to easily control what levels are logged -4. Provide an easy mechanism (like fmt.Println) to print info to the user - which can be easily logged as well -5. Due to 2 & 3 provide easy verbose mode for output and logs -6. Not have any unnecessary initialization cruft. Just use it. - -# Usage - -## Step 1. Use it -Put calls throughout your source based on type of feedback. -No initialization or setup needs to happen. Just start calling things. - -Available Loggers are: - - * TRACE - * DEBUG - * INFO - * WARN - * ERROR - * CRITICAL - * FATAL - -These each are loggers based on the log standard library and follow the -standard usage. Eg. - -```go - import ( - jww "github.com/spf13/jwalterweatherman" - ) - - ... - - if err != nil { - - // This is a pretty serious error and the user should know about - // it. It will be printed to the terminal as well as logged under the - // default thresholds. - - jww.ERROR.Println(err) - } - - if err2 != nil { - // This error isn’t going to materially change the behavior of the - // application, but it’s something that may not be what the user - // expects. Under the default thresholds, Warn will be logged, but - // not printed to the terminal. - - jww.WARN.Println(err2) - } - - // Information that’s relevant to what’s happening, but not very - // important for the user. Under the default thresholds this will be - // discarded. - - jww.INFO.Printf("information %q", response) - -``` - -NOTE: You can also use the library in a non-global setting by creating an instance of a Notebook: - -```go -notepad = jww.NewNotepad(jww.LevelInfo, jww.LevelTrace, os.Stdout, ioutil.Discard, "", log.Ldate|log.Ltime) -notepad.WARN.Println("Some warning"") -``` - -_Why 7 levels?_ - -Maybe you think that 7 levels are too much for any application... and you -are probably correct. Just because there are seven levels doesn’t mean -that you should be using all 7 levels. Pick the right set for your needs. -Remember they only have to mean something to your project. - -## Step 2. Optionally configure JWW - -Under the default thresholds : - - * Debug, Trace & Info goto /dev/null - * Warn and above is logged (when a log file/io.Writer is provided) - * Error and above is printed to the terminal (stdout) - -### Changing the thresholds - -The threshold can be changed at any time, but will only affect calls that -execute after the change was made. - -This is very useful if your application has a verbose mode. Of course you -can decide what verbose means to you or even have multiple levels of -verbosity. - - -```go - import ( - jww "github.com/spf13/jwalterweatherman" - ) - - if Verbose { - jww.SetLogThreshold(jww.LevelTrace) - jww.SetStdoutThreshold(jww.LevelInfo) - } -``` - -Note that JWW's own internal output uses log levels as well, so set the log -level before making any other calls if you want to see what it's up to. - - -### Setting a log file - -JWW can log to any `io.Writer`: - - -```go - - jww.SetLogOutput(customWriter) - -``` - - -# More information - -This is an early release. I’ve been using it for a while and this is the -third interface I’ve tried. I like this one pretty well, but no guarantees -that it won’t change a bit. - -I wrote this for use in [hugo](https://gohugo.io). If you are looking -for a static website engine that’s super fast please checkout Hugo. diff --git a/tools/vendor/github.com/spf13/jwalterweatherman/default_notepad.go b/tools/vendor/github.com/spf13/jwalterweatherman/default_notepad.go deleted file mode 100644 index a018c15c4..000000000 --- a/tools/vendor/github.com/spf13/jwalterweatherman/default_notepad.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright © 2016 Steve Francia . -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -package jwalterweatherman - -import ( - "io" - "io/ioutil" - "log" - "os" -) - -var ( - TRACE *log.Logger - DEBUG *log.Logger - INFO *log.Logger - WARN *log.Logger - ERROR *log.Logger - CRITICAL *log.Logger - FATAL *log.Logger - - LOG *log.Logger - FEEDBACK *Feedback - - defaultNotepad *Notepad -) - -func reloadDefaultNotepad() { - TRACE = defaultNotepad.TRACE - DEBUG = defaultNotepad.DEBUG - INFO = defaultNotepad.INFO - WARN = defaultNotepad.WARN - ERROR = defaultNotepad.ERROR - CRITICAL = defaultNotepad.CRITICAL - FATAL = defaultNotepad.FATAL - - LOG = defaultNotepad.LOG - FEEDBACK = defaultNotepad.FEEDBACK -} - -func init() { - defaultNotepad = NewNotepad(LevelError, LevelWarn, os.Stdout, ioutil.Discard, "", log.Ldate|log.Ltime) - reloadDefaultNotepad() -} - -// SetLogThreshold set the log threshold for the default notepad. Trace by default. -func SetLogThreshold(threshold Threshold) { - defaultNotepad.SetLogThreshold(threshold) - reloadDefaultNotepad() -} - -// SetLogOutput set the log output for the default notepad. Discarded by default. -func SetLogOutput(handle io.Writer) { - defaultNotepad.SetLogOutput(handle) - reloadDefaultNotepad() -} - -// SetStdoutThreshold set the standard output threshold for the default notepad. -// Info by default. -func SetStdoutThreshold(threshold Threshold) { - defaultNotepad.SetStdoutThreshold(threshold) - reloadDefaultNotepad() -} - -// SetStdoutOutput set the stdout output for the default notepad. Default is stdout. -func SetStdoutOutput(handle io.Writer) { - defaultNotepad.outHandle = handle - defaultNotepad.init() - reloadDefaultNotepad() -} - -// SetPrefix set the prefix for the default logger. Empty by default. -func SetPrefix(prefix string) { - defaultNotepad.SetPrefix(prefix) - reloadDefaultNotepad() -} - -// SetFlags set the flags for the default logger. "log.Ldate | log.Ltime" by default. -func SetFlags(flags int) { - defaultNotepad.SetFlags(flags) - reloadDefaultNotepad() -} - -// SetLogListeners configures the default logger with one or more log listeners. -func SetLogListeners(l ...LogListener) { - defaultNotepad.logListeners = l - defaultNotepad.init() - reloadDefaultNotepad() -} - -// Level returns the current global log threshold. -func LogThreshold() Threshold { - return defaultNotepad.logThreshold -} - -// Level returns the current global output threshold. -func StdoutThreshold() Threshold { - return defaultNotepad.stdoutThreshold -} - -// GetStdoutThreshold returns the defined Treshold for the log logger. -func GetLogThreshold() Threshold { - return defaultNotepad.GetLogThreshold() -} - -// GetStdoutThreshold returns the Treshold for the stdout logger. -func GetStdoutThreshold() Threshold { - return defaultNotepad.GetStdoutThreshold() -} diff --git a/tools/vendor/github.com/spf13/jwalterweatherman/log_counter.go b/tools/vendor/github.com/spf13/jwalterweatherman/log_counter.go deleted file mode 100644 index 41285f3dc..000000000 --- a/tools/vendor/github.com/spf13/jwalterweatherman/log_counter.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright © 2016 Steve Francia . -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -package jwalterweatherman - -import ( - "io" - "sync/atomic" -) - -// Counter is an io.Writer that increments a counter on Write. -type Counter struct { - count uint64 -} - -func (c *Counter) incr() { - atomic.AddUint64(&c.count, 1) -} - -// Reset resets the counter. -func (c *Counter) Reset() { - atomic.StoreUint64(&c.count, 0) -} - -// Count returns the current count. -func (c *Counter) Count() uint64 { - return atomic.LoadUint64(&c.count) -} - -func (c *Counter) Write(p []byte) (n int, err error) { - c.incr() - return len(p), nil -} - -// LogCounter creates a LogListener that counts log statements >= the given threshold. -func LogCounter(counter *Counter, t1 Threshold) LogListener { - return func(t2 Threshold) io.Writer { - if t2 < t1 { - // Not interested in this threshold. - return nil - } - return counter - } -} diff --git a/tools/vendor/github.com/spf13/jwalterweatherman/notepad.go b/tools/vendor/github.com/spf13/jwalterweatherman/notepad.go deleted file mode 100644 index cc7957bf7..000000000 --- a/tools/vendor/github.com/spf13/jwalterweatherman/notepad.go +++ /dev/null @@ -1,225 +0,0 @@ -// Copyright © 2016 Steve Francia . -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -package jwalterweatherman - -import ( - "fmt" - "io" - "io/ioutil" - "log" -) - -type Threshold int - -func (t Threshold) String() string { - return prefixes[t] -} - -const ( - LevelTrace Threshold = iota - LevelDebug - LevelInfo - LevelWarn - LevelError - LevelCritical - LevelFatal -) - -var prefixes map[Threshold]string = map[Threshold]string{ - LevelTrace: "TRACE", - LevelDebug: "DEBUG", - LevelInfo: "INFO", - LevelWarn: "WARN", - LevelError: "ERROR", - LevelCritical: "CRITICAL", - LevelFatal: "FATAL", -} - -// Notepad is where you leave a note! -type Notepad struct { - TRACE *log.Logger - DEBUG *log.Logger - INFO *log.Logger - WARN *log.Logger - ERROR *log.Logger - CRITICAL *log.Logger - FATAL *log.Logger - - LOG *log.Logger - FEEDBACK *Feedback - - loggers [7]**log.Logger - logHandle io.Writer - outHandle io.Writer - logThreshold Threshold - stdoutThreshold Threshold - prefix string - flags int - - logListeners []LogListener -} - -// A LogListener can ble supplied to a Notepad to listen on log writes for a given -// threshold. This can be used to capture log events in unit tests and similar. -// Note that this function will be invoked once for each log threshold. If -// the given threshold is not of interest to you, return nil. -// Note that these listeners will receive log events for a given threshold, even -// if the current configuration says not to log it. That way you can count ERRORs even -// if you don't print them to the console. -type LogListener func(t Threshold) io.Writer - -// NewNotepad creates a new Notepad. -func NewNotepad( - outThreshold Threshold, - logThreshold Threshold, - outHandle, logHandle io.Writer, - prefix string, flags int, - logListeners ...LogListener, -) *Notepad { - - n := &Notepad{logListeners: logListeners} - - n.loggers = [7]**log.Logger{&n.TRACE, &n.DEBUG, &n.INFO, &n.WARN, &n.ERROR, &n.CRITICAL, &n.FATAL} - n.outHandle = outHandle - n.logHandle = logHandle - n.stdoutThreshold = outThreshold - n.logThreshold = logThreshold - - if len(prefix) != 0 { - n.prefix = "[" + prefix + "] " - } else { - n.prefix = "" - } - - n.flags = flags - - n.LOG = log.New(n.logHandle, - "LOG: ", - n.flags) - n.FEEDBACK = &Feedback{out: log.New(outHandle, "", 0), log: n.LOG} - - n.init() - return n -} - -// init creates the loggers for each level depending on the notepad thresholds. -func (n *Notepad) init() { - logAndOut := io.MultiWriter(n.outHandle, n.logHandle) - - for t, logger := range n.loggers { - threshold := Threshold(t) - prefix := n.prefix + threshold.String() + " " - - switch { - case threshold >= n.logThreshold && threshold >= n.stdoutThreshold: - *logger = log.New(n.createLogWriters(threshold, logAndOut), prefix, n.flags) - - case threshold >= n.logThreshold: - *logger = log.New(n.createLogWriters(threshold, n.logHandle), prefix, n.flags) - - case threshold >= n.stdoutThreshold: - *logger = log.New(n.createLogWriters(threshold, n.outHandle), prefix, n.flags) - - default: - *logger = log.New(n.createLogWriters(threshold, ioutil.Discard), prefix, n.flags) - } - } -} - -func (n *Notepad) createLogWriters(t Threshold, handle io.Writer) io.Writer { - if len(n.logListeners) == 0 { - return handle - } - writers := []io.Writer{handle} - for _, l := range n.logListeners { - w := l(t) - if w != nil { - writers = append(writers, w) - } - } - - if len(writers) == 1 { - return handle - } - - return io.MultiWriter(writers...) -} - -// SetLogThreshold changes the threshold above which messages are written to the -// log file. -func (n *Notepad) SetLogThreshold(threshold Threshold) { - n.logThreshold = threshold - n.init() -} - -// SetLogOutput changes the file where log messages are written. -func (n *Notepad) SetLogOutput(handle io.Writer) { - n.logHandle = handle - n.init() -} - -// GetStdoutThreshold returns the defined Treshold for the log logger. -func (n *Notepad) GetLogThreshold() Threshold { - return n.logThreshold -} - -// SetStdoutThreshold changes the threshold above which messages are written to the -// standard output. -func (n *Notepad) SetStdoutThreshold(threshold Threshold) { - n.stdoutThreshold = threshold - n.init() -} - -// GetStdoutThreshold returns the Treshold for the stdout logger. -func (n *Notepad) GetStdoutThreshold() Threshold { - return n.stdoutThreshold -} - -// SetPrefix changes the prefix used by the notepad. Prefixes are displayed between -// brackets at the beginning of the line. An empty prefix won't be displayed at all. -func (n *Notepad) SetPrefix(prefix string) { - if len(prefix) != 0 { - n.prefix = "[" + prefix + "] " - } else { - n.prefix = "" - } - n.init() -} - -// SetFlags choose which flags the logger will display (after prefix and message -// level). See the package log for more informations on this. -func (n *Notepad) SetFlags(flags int) { - n.flags = flags - n.init() -} - -// Feedback writes plainly to the outHandle while -// logging with the standard extra information (date, file, etc). -type Feedback struct { - out *log.Logger - log *log.Logger -} - -func (fb *Feedback) Println(v ...interface{}) { - fb.output(fmt.Sprintln(v...)) -} - -func (fb *Feedback) Printf(format string, v ...interface{}) { - fb.output(fmt.Sprintf(format, v...)) -} - -func (fb *Feedback) Print(v ...interface{}) { - fb.output(fmt.Sprint(v...)) -} - -func (fb *Feedback) output(s string) { - if fb.out != nil { - fb.out.Output(2, s) - } - if fb.log != nil { - fb.log.Output(2, s) - } -} diff --git a/tools/vendor/github.com/spf13/viper/.editorconfig b/tools/vendor/github.com/spf13/viper/.editorconfig index 6d0b6d356..1f664d13a 100644 --- a/tools/vendor/github.com/spf13/viper/.editorconfig +++ b/tools/vendor/github.com/spf13/viper/.editorconfig @@ -13,3 +13,6 @@ indent_style = tab [{Makefile,*.mk}] indent_style = tab + +[*.nix] +indent_size = 2 diff --git a/tools/vendor/github.com/spf13/viper/.envrc b/tools/vendor/github.com/spf13/viper/.envrc new file mode 100644 index 000000000..2e0f9f5f7 --- /dev/null +++ b/tools/vendor/github.com/spf13/viper/.envrc @@ -0,0 +1,4 @@ +if ! has nix_direnv_version || ! nix_direnv_version 3.0.4; then + source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.4/direnvrc" "sha256-DzlYZ33mWF/Gs8DDeyjr8mnVmQGx7ASYqA5WlxwvBG4=" +fi +use flake . --impure diff --git a/tools/vendor/github.com/spf13/viper/.gitignore b/tools/vendor/github.com/spf13/viper/.gitignore index 896250839..f1bbd4280 100644 --- a/tools/vendor/github.com/spf13/viper/.gitignore +++ b/tools/vendor/github.com/spf13/viper/.gitignore @@ -1,4 +1,7 @@ +/.devenv/ +/.direnv/ /.idea/ +/.pre-commit-config.yaml /bin/ /build/ /var/ diff --git a/tools/vendor/github.com/spf13/viper/.golangci.yaml b/tools/vendor/github.com/spf13/viper/.golangci.yaml index 16e039652..474f41633 100644 --- a/tools/vendor/github.com/spf13/viper/.golangci.yaml +++ b/tools/vendor/github.com/spf13/viper/.golangci.yaml @@ -7,8 +7,16 @@ linters-settings: - standard - default - prefix(github.com/spf13/viper) - golint: - min-confidence: 0 + gocritic: + # Enable multiple checks by tags. See "Tags" section in https://github.com/go-critic/go-critic#usage. + enabled-tags: + - diagnostic + - experimental + - opinionated + - style + disabled-checks: + - importShadow + - unnamedResult goimports: local-prefixes: github.com/spf13/viper @@ -16,13 +24,13 @@ linters: disable-all: true enable: - bodyclose - - deadcode - dogsled - dupl - durationcheck - exhaustive - - exportloopref - gci + - gocritic + - godot - gofmt - gofumpt - goimports @@ -43,14 +51,12 @@ linters: - rowserrcheck - sqlclosecheck - staticcheck - - structcheck - stylecheck - tparallel - typecheck - unconvert - unparam - unused - - varcheck - wastedassign - whitespace @@ -65,9 +71,7 @@ linters: # - gochecknoinits # - gocognit # - goconst - # - gocritic # - gocyclo - # - godot # - gosec # - gosimple # - ifshort @@ -83,6 +87,11 @@ linters: # - goheader # - gomodguard + # deprecated + # - deadcode + # - structcheck + # - varcheck + # don't enable: # - asciicheck # - funlen diff --git a/tools/vendor/github.com/spf13/viper/.yamlignore b/tools/vendor/github.com/spf13/viper/.yamlignore new file mode 100644 index 000000000..c04c4dead --- /dev/null +++ b/tools/vendor/github.com/spf13/viper/.yamlignore @@ -0,0 +1,2 @@ +# TODO: FIXME +/.github/ diff --git a/tools/vendor/github.com/spf13/viper/.yamllint.yaml b/tools/vendor/github.com/spf13/viper/.yamllint.yaml new file mode 100644 index 000000000..bac19ce18 --- /dev/null +++ b/tools/vendor/github.com/spf13/viper/.yamllint.yaml @@ -0,0 +1,6 @@ +ignore-from-file: [.gitignore, .yamlignore] + +extends: default + +rules: + line-length: disable diff --git a/tools/vendor/github.com/spf13/viper/Makefile b/tools/vendor/github.com/spf13/viper/Makefile index 02d3e3715..a77b9c81c 100644 --- a/tools/vendor/github.com/spf13/viper/Makefile +++ b/tools/vendor/github.com/spf13/viper/Makefile @@ -15,8 +15,8 @@ TEST_FORMAT = short-verbose endif # Dependency versions -GOTESTSUM_VERSION = 1.8.0 -GOLANGCI_VERSION = 1.45.2 +GOTESTSUM_VERSION = 1.9.0 +GOLANGCI_VERSION = 1.53.3 # Add the ability to override some variables # Use with care @@ -29,11 +29,6 @@ clear: ## Clear the working area and the project .PHONY: check check: test lint ## Run tests and linters -bin/gotestsum: bin/gotestsum-${GOTESTSUM_VERSION} - @ln -sf gotestsum-${GOTESTSUM_VERSION} bin/gotestsum -bin/gotestsum-${GOTESTSUM_VERSION}: - @mkdir -p bin - curl -L https://github.com/gotestyourself/gotestsum/releases/download/v${GOTESTSUM_VERSION}/gotestsum_${GOTESTSUM_VERSION}_${OS}_amd64.tar.gz | tar -zOxf - gotestsum > ./bin/gotestsum-${GOTESTSUM_VERSION} && chmod +x ./bin/gotestsum-${GOTESTSUM_VERSION} TEST_PKGS ?= ./... .PHONY: test @@ -44,20 +39,36 @@ test: bin/gotestsum ## Run tests @mkdir -p ${BUILD_DIR} bin/gotestsum --no-summary=skipped --junitfile ${BUILD_DIR}/coverage.xml --format ${TEST_FORMAT} -- -race -coverprofile=${BUILD_DIR}/coverage.txt -covermode=atomic $(filter-out -v,${GOARGS}) $(if ${TEST_PKGS},${TEST_PKGS},./...) -bin/golangci-lint: bin/golangci-lint-${GOLANGCI_VERSION} - @ln -sf golangci-lint-${GOLANGCI_VERSION} bin/golangci-lint -bin/golangci-lint-${GOLANGCI_VERSION}: +.PHONY: lint +lint: lint-go lint-yaml +lint: ## Run linters + +.PHONY: lint-go +lint-go: + golangci-lint run $(if ${CI},--out-format github-actions,) + +.PHONY: lint-yaml +lint-yaml: + yamllint $(if ${CI},-f github,) --no-warnings . + +.PHONY: fmt +fmt: ## Format code + golangci-lint run --fix + +deps: bin/golangci-lint bin/gotestsum yamllint +deps: ## Install dependencies + +bin/gotestsum: @mkdir -p bin - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | bash -s -- -b ./bin/ v${GOLANGCI_VERSION} - @mv bin/golangci-lint "$@" + curl -L https://github.com/gotestyourself/gotestsum/releases/download/v${GOTESTSUM_VERSION}/gotestsum_${GOTESTSUM_VERSION}_${OS}_amd64.tar.gz | tar -zOxf - gotestsum > ./bin/gotestsum && chmod +x ./bin/gotestsum -.PHONY: lint -lint: bin/golangci-lint ## Run linter - bin/golangci-lint run +bin/golangci-lint: + @mkdir -p bin + curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | bash -s -- v${GOLANGCI_VERSION} -.PHONY: fix -fix: bin/golangci-lint ## Fix lint violations - bin/golangci-lint run --fix +.PHONY: yamllint +yamllint: + pip3 install --user yamllint # Add custom targets here -include custom.mk diff --git a/tools/vendor/github.com/spf13/viper/README.md b/tools/vendor/github.com/spf13/viper/README.md index c14e8927a..769a5d900 100644 --- a/tools/vendor/github.com/spf13/viper/README.md +++ b/tools/vendor/github.com/spf13/viper/README.md @@ -3,15 +3,16 @@ > > **Thank you!** -![Viper](.github/logo.png?raw=true) +![viper logo](https://github.com/user-attachments/assets/acae9193-2974-41f3-808d-2d433f5ada5e) + [![Mentioned in Awesome Go](https://awesome.re/mentioned-badge-flat.svg)](https://github.com/avelino/awesome-go#configuration) [![run on repl.it](https://repl.it/badge/github/sagikazarmark/Viper-example)](https://repl.it/@sagikazarmark/Viper-example#main.go) -[![GitHub Workflow Status](https://img.shields.io/github/workflow/status/spf13/viper/CI?style=flat-square)](https://github.com/spf13/viper/actions?query=workflow%3ACI) +[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/spf13/viper/ci.yaml?branch=master&style=flat-square)](https://github.com/spf13/viper/actions?query=workflow%3ACI) [![Join the chat at https://gitter.im/spf13/viper](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/spf13/viper?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![Go Report Card](https://goreportcard.com/badge/github.com/spf13/viper?style=flat-square)](https://goreportcard.com/report/github.com/spf13/viper) -![Go Version](https://img.shields.io/badge/go%20version-%3E=1.15-61CFDD.svg?style=flat-square) +![Go Version](https://img.shields.io/badge/go%20version-%3E=1.21-61CFDD.svg?style=flat-square) [![PkgGoDev](https://pkg.go.dev/badge/mod/github.com/spf13/viper)](https://pkg.go.dev/mod/github.com/spf13/viper) **Go configuration with fangs!** @@ -27,6 +28,10 @@ Many Go projects are built using Viper including: * [doctl](https://github.com/digitalocean/doctl) * [Clairctl](https://github.com/jgsqware/clairctl) * [Mercure](https://mercure.rocks) +* [Meshery](https://github.com/meshery/meshery) +* [Bearer](https://github.com/bearer/bearer) +* [Coder](https://github.com/coder/coder) +* [Vitess](https://vitess.io/) ## Install @@ -35,13 +40,13 @@ Many Go projects are built using Viper including: go get github.com/spf13/viper ``` -**Note:** Viper uses [Go Modules](https://github.com/golang/go/wiki/Modules) to manage dependencies. +**Note:** Viper uses [Go Modules](https://go.dev/wiki/Modules) to manage dependencies. ## What is Viper? -Viper is a complete configuration solution for Go applications including 12-Factor apps. It is designed -to work within an application, and can handle all types of configuration needs +Viper is a complete configuration solution for Go applications including [12-Factor apps](https://12factor.net/#the_twelve_factors). +It is designed to work within an application, and can handle all types of configuration needs and formats. It supports: * setting defaults @@ -119,7 +124,7 @@ viper.AddConfigPath("$HOME/.appname") // call multiple times to add many search viper.AddConfigPath(".") // optionally look for config in the working directory err := viper.ReadInConfig() // Find and read the config file if err != nil { // Handle errors reading the config file - panic(fmt.Errorf("Fatal error config file: %w \n", err)) + panic(fmt.Errorf("fatal error config file: %w", err)) } ``` @@ -137,7 +142,7 @@ if err := viper.ReadInConfig(); err != nil { // Config file found and successfully parsed ``` -*NOTE [since 1.6]:* You can also have a file without an extension and specify the format programmaticaly. For those configuration files that lie in the home of the user without any extension like `.bashrc` +*NOTE [since 1.6]:* You can also have a file without an extension and specify the format programmatically. For those configuration files that lie in the home of the user without any extension like `.bashrc` ### Writing Config Files @@ -218,6 +223,7 @@ These could be from a command line flag, or from your own application logic. ```go viper.Set("Verbose", true) viper.Set("LogFile", LogFile) +viper.Set("host.port", 5899) // set subset ``` ### Registering and Using Aliases @@ -413,7 +419,9 @@ in a Key/Value store such as etcd or Consul. These values take precedence over default values, but are overridden by configuration values retrieved from disk, flags, or environment variables. -Viper uses [crypt](https://github.com/bketelsen/crypt) to retrieve +Viper supports multiple hosts. To use, pass a list of endpoints separated by `;`. For example `http://127.0.0.1:4001;http://127.0.0.1:4002`. + +Viper uses [crypt](https://github.com/sagikazarmark/crypt) to retrieve configuration from the K/V store, which means that you can store your configuration values encrypted and have them automatically decrypted if you have the correct gpg keyring. Encryption is optional. @@ -425,7 +433,7 @@ independently of it. K/V store. `crypt` defaults to etcd on http://127.0.0.1:4001. ```bash -$ go get github.com/bketelsen/crypt/bin/crypt +$ go get github.com/sagikazarmark/crypt/bin/crypt $ crypt set -plaintext /config/hugo.json /Users/hugo/settings/config.json ``` @@ -447,6 +455,13 @@ viper.SetConfigType("json") // because there is no file extension in a stream of err := viper.ReadRemoteConfig() ``` +#### etcd3 +```go +viper.AddRemoteProvider("etcd3", "http://127.0.0.1:4001","/config/hugo.json") +viper.SetConfigType("json") // because there is no file extension in a stream of bytes, supported extensions are "json", "toml", "yaml", "yml", "properties", "props", "prop", "env", "dotenv" +err := viper.ReadRemoteConfig() +``` + #### Consul You need to set a key to Consul key/value storage with JSON value containing your desired config. For example, create a Consul key/value store key `MY_CONSUL_KEY` with value: @@ -477,6 +492,15 @@ err := viper.ReadRemoteConfig() Of course, you're allowed to use `SecureRemoteProvider` also + +#### NATS + +```go +viper.AddRemoteProvider("nats", "nats://127.0.0.1:4222", "myapp.config") +viper.SetConfigType("json") +err := viper.ReadRemoteConfig() +``` + ### Remote Key/Value Store Example - Encrypted ```go @@ -524,24 +548,27 @@ go func(){ In Viper, there are a few ways to get a value depending on the value’s type. The following functions and methods exist: - * `Get(key string) : interface{}` + * `Get(key string) : any` * `GetBool(key string) : bool` * `GetFloat64(key string) : float64` * `GetInt(key string) : int` * `GetIntSlice(key string) : []int` * `GetString(key string) : string` - * `GetStringMap(key string) : map[string]interface{}` + * `GetStringMap(key string) : map[string]any` * `GetStringMapString(key string) : map[string]string` * `GetStringSlice(key string) : []string` * `GetTime(key string) : time.Time` * `GetDuration(key string) : time.Duration` * `IsSet(key string) : bool` - * `AllSettings() : map[string]interface{}` + * `AllSettings() : map[string]any` One important thing to recognize is that each Get function will return a zero value if it’s not found. To check if a given key exists, the `IsSet()` method has been provided. +The zero value will also be returned if the value is set, but fails to parse +as the requested type. + Example: ```go viper.GetString("logfile") // case-insensitive Setting & Getting @@ -594,7 +621,7 @@ configuration level. Viper can access array indices by using numbers in the path. For example: -```json +```jsonc { "host": { "address": "localhost", @@ -622,7 +649,7 @@ GetInt("host.ports.1") // returns 6029 Lastly, if there exists a key that matches the delimited key path, its value will be returned instead. E.g. -```json +```jsonc { "datastore.metric.host": "0.0.0.0", "host": { @@ -699,8 +726,8 @@ etc. There are two methods to do this: - * `Unmarshal(rawVal interface{}) : error` - * `UnmarshalKey(key string, rawVal interface{}) : error` + * `Unmarshal(rawVal any) : error` + * `UnmarshalKey(key string, rawVal any) : error` Example: @@ -725,9 +752,9 @@ you have to change the delimiter: ```go v := viper.NewWithOptions(viper.KeyDelimiter("::")) -v.SetDefault("chart::values", map[string]interface{}{ - "ingress": map[string]interface{}{ - "annotations": map[string]interface{}{ +v.SetDefault("chart::values", map[string]any{ + "ingress": map[string]any{ + "annotations": map[string]any{ "traefik.frontend.rule.type": "PathPrefix", "traefik.ingress.kubernetes.io/ssl-redirect": "true", }, @@ -736,7 +763,7 @@ v.SetDefault("chart::values", map[string]interface{}{ type config struct { Chart struct{ - Values map[string]interface{} + Values map[string]any } } @@ -776,7 +803,7 @@ if err != nil { } ``` -Viper uses [github.com/mitchellh/mapstructure](https://github.com/mitchellh/mapstructure) under the hood for unmarshaling values which uses `mapstructure` tags by default. +Viper uses [github.com/go-viper/mapstructure](https://github.com/go-viper/mapstructure) under the hood for unmarshaling values which uses `mapstructure` tags by default. ### Decoding custom formats @@ -810,13 +837,15 @@ func yamlStringSettings() string { ## Viper or Vipers? -Viper comes ready to use out of the box. There is no configuration or -initialization needed to begin using Viper. Since most applications will want -to use a single central repository for their configuration, the viper package -provides this. It is similar to a singleton. +Viper comes with a global instance (singleton) out of the box. + +Although it makes setting up configuration easy, +using it is generally discouraged as it makes testing harder and can lead to unexpected behavior. + +The best practice is to initialize a Viper instance and pass that around when necessary. -In all of the examples above, they demonstrate using viper in its singleton -style approach. +The global instance _MAY_ be deprecated in the future. +See [#1855](https://github.com/spf13/viper/issues/1855) for more details. ### Working with multiple vipers @@ -872,3 +901,31 @@ No, you will need to synchronize access to the viper yourself (for example by us ## Troubleshooting See [TROUBLESHOOTING.md](TROUBLESHOOTING.md). + +## Development + +**For an optimal developer experience, it is recommended to install [Nix](https://nixos.org/download.html) and [direnv](https://direnv.net/docs/installation.html).** + +_Alternatively, install [Go](https://go.dev/dl/) on your computer then run `make deps` to install the rest of the dependencies._ + +Run the test suite: + +```shell +make test +``` + +Run linters: + +```shell +make lint # pass -j option to run them in parallel +``` + +Some linter violations can automatically be fixed: + +```shell +make fmt +``` + +## License + +The project is licensed under the [MIT License](LICENSE). diff --git a/tools/vendor/github.com/spf13/viper/TROUBLESHOOTING.md b/tools/vendor/github.com/spf13/viper/TROUBLESHOOTING.md index c4e36c686..b68993d41 100644 --- a/tools/vendor/github.com/spf13/viper/TROUBLESHOOTING.md +++ b/tools/vendor/github.com/spf13/viper/TROUBLESHOOTING.md @@ -15,10 +15,10 @@ cannot find package "github.com/hashicorp/hcl/tree/hcl1" in any of: ``` As the error message suggests, Go tries to look up dependencies in `GOPATH` mode (as it's commonly called) from the `GOPATH`. -Viper opted to use [Go Modules](https://github.com/golang/go/wiki/Modules) to manage its dependencies. While in many cases the two methods are interchangeable, once a dependency releases new (major) versions, `GOPATH` mode is no longer able to decide which version to use, so it'll either use one that's already present or pick a version (usually the `master` branch). +Viper opted to use [Go Modules](https://go.dev/wiki/Modules) to manage its dependencies. While in many cases the two methods are interchangeable, once a dependency releases new (major) versions, `GOPATH` mode is no longer able to decide which version to use, so it'll either use one that's already present or pick a version (usually the `master` branch). The solution is easy: switch to using Go Modules. -Please refer to the [wiki](https://github.com/golang/go/wiki/Modules) on how to do that. +Please refer to the [wiki](https://go.dev/wiki/Modules) on how to do that. **tl;dr* `export GO111MODULE=on` diff --git a/tools/vendor/github.com/spf13/viper/UPDATES.md b/tools/vendor/github.com/spf13/viper/UPDATES.md new file mode 100644 index 000000000..ccf413ed7 --- /dev/null +++ b/tools/vendor/github.com/spf13/viper/UPDATES.md @@ -0,0 +1,126 @@ +# Update Log + +**This document details any major updates required to use new features or improvements in Viper.** + +## v1.20.x + +### New file searching API + +Viper now includes a new file searching API that allows users to customize how Viper looks for config files. + +Viper accepts a custom [`Finder`](https://pkg.go.dev/github.com/spf13/viper#Finder) interface implementation: + +```go +// Finder looks for files and directories in an [afero.Fs] filesystem. +type Finder interface { + Find(fsys afero.Fs) ([]string, error) +} +``` + +It is supposed to return a list of paths to config files. + +The default implementation uses [github.com/sagikazarmark/locafero](https://github.com/sagikazarmark/locafero) under the hood. + +You can supply your own implementation using `WithFinder`: + +```go +v := viper.NewWithOptions( + viper.WithFinder(&MyFinder{}), +) +``` + +For more information, check out the [Finder examples](https://pkg.go.dev/github.com/spf13/viper#Finder) +and the [documentation](https://pkg.go.dev/github.com/sagikazarmark/locafero) for the locafero package. + +### New encoding API + +Viper now allows customizing the encoding layer by providing an API for encoding and decoding configuration data: + +```go +// Encoder encodes Viper's internal data structures into a byte representation. +// It's primarily used for encoding a map[string]any into a file format. +type Encoder interface { + Encode(v map[string]any) ([]byte, error) +} + +// Decoder decodes the contents of a byte slice into Viper's internal data structures. +// It's primarily used for decoding contents of a file into a map[string]any. +type Decoder interface { + Decode(b []byte, v map[string]any) error +} + +// Codec combines [Encoder] and [Decoder] interfaces. +type Codec interface { + Encoder + Decoder +} +``` + +By default, Viper includes the following codecs: + +- JSON +- TOML +- YAML +- Dotenv + +The rest of the codecs are moved to [github.com/go-viper/encoding](https://github.com/go-viper/encoding) + +Customizing the encoding layer is possible by providing a custom registry of codecs: + +- [Encoder](https://pkg.go.dev/github.com/spf13/viper#Encoder) -> [EncoderRegistry](https://pkg.go.dev/github.com/spf13/viper#EncoderRegistry) +- [Decoder](https://pkg.go.dev/github.com/spf13/viper#Decoder) -> [DecoderRegistry](https://pkg.go.dev/github.com/spf13/viper#DecoderRegistry) +- [Codec](https://pkg.go.dev/github.com/spf13/viper#Codec) -> [CodecRegistry](https://pkg.go.dev/github.com/spf13/viper#CodecRegistry) + +You can supply the registry of codecs to Viper using the appropriate `With*Registry` function: + +```go +codecRegistry := viper.NewCodecRegistry() + +codecRegistry.RegisterCodec("myformat", &MyCodec{}) + +v := viper.NewWithOptions( + viper.WithCodecRegistry(codecRegistry), +) +``` + +### BREAKING: HCL, Java properties, INI removed from core + +In order to reduce third-party dependencies, Viper dropped support for the following formats from the core: + +- HCL +- Java properties +- INI + +You can still use these formats though by importing them from [github.com/go-viper/encoding](https://github.com/go-viper/encoding): + +```go +import ( + "github.com/go-viper/encoding/hcl" + "github.com/go-viper/encoding/javaproperties" + "github.com/go-viper/encoding/ini" +) + +codecRegistry := viper.NewCodecRegistry() + +{ + codec := hcl.Codec{} + + codecRegistry.RegisterCodec("hcl", codec) + codecRegistry.RegisterCodec("tfvars", codec) + +} + +{ + codec := &javaproperties.Codec{} + + codecRegistry.RegisterCodec("properties", codec) + codecRegistry.RegisterCodec("props", codec) + codecRegistry.RegisterCodec("prop", codec) +} + +codecRegistry.RegisterCodec("ini", ini.Codec{}) + +v := viper.NewWithOptions( + viper.WithCodecRegistry(codecRegistry), +) +``` diff --git a/tools/vendor/github.com/spf13/viper/encoding.go b/tools/vendor/github.com/spf13/viper/encoding.go new file mode 100644 index 000000000..a7da55860 --- /dev/null +++ b/tools/vendor/github.com/spf13/viper/encoding.go @@ -0,0 +1,181 @@ +package viper + +import ( + "errors" + "strings" + "sync" + + "github.com/spf13/viper/internal/encoding/dotenv" + "github.com/spf13/viper/internal/encoding/json" + "github.com/spf13/viper/internal/encoding/toml" + "github.com/spf13/viper/internal/encoding/yaml" +) + +// Encoder encodes Viper's internal data structures into a byte representation. +// It's primarily used for encoding a map[string]any into a file format. +type Encoder interface { + Encode(v map[string]any) ([]byte, error) +} + +// Decoder decodes the contents of a byte slice into Viper's internal data structures. +// It's primarily used for decoding contents of a file into a map[string]any. +type Decoder interface { + Decode(b []byte, v map[string]any) error +} + +// Codec combines [Encoder] and [Decoder] interfaces. +type Codec interface { + Encoder + Decoder +} + +// TODO: consider adding specific errors for not found scenarios + +// EncoderRegistry returns an [Encoder] for a given format. +// +// Format is case-insensitive. +// +// [EncoderRegistry] returns an error if no [Encoder] is registered for the format. +type EncoderRegistry interface { + Encoder(format string) (Encoder, error) +} + +// DecoderRegistry returns an [Decoder] for a given format. +// +// Format is case-insensitive. +// +// [DecoderRegistry] returns an error if no [Decoder] is registered for the format. +type DecoderRegistry interface { + Decoder(format string) (Decoder, error) +} + +// [CodecRegistry] combines [EncoderRegistry] and [DecoderRegistry] interfaces. +type CodecRegistry interface { + EncoderRegistry + DecoderRegistry +} + +// WithEncoderRegistry sets a custom [EncoderRegistry]. +func WithEncoderRegistry(r EncoderRegistry) Option { + return optionFunc(func(v *Viper) { + if r == nil { + return + } + + v.encoderRegistry = r + }) +} + +// WithDecoderRegistry sets a custom [DecoderRegistry]. +func WithDecoderRegistry(r DecoderRegistry) Option { + return optionFunc(func(v *Viper) { + if r == nil { + return + } + + v.decoderRegistry = r + }) +} + +// WithCodecRegistry sets a custom [EncoderRegistry] and [DecoderRegistry]. +func WithCodecRegistry(r CodecRegistry) Option { + return optionFunc(func(v *Viper) { + if r == nil { + return + } + + v.encoderRegistry = r + v.decoderRegistry = r + }) +} + +// DefaultCodecRegistry is a simple implementation of [CodecRegistry] that allows registering custom [Codec]s. +type DefaultCodecRegistry struct { + codecs map[string]Codec + + mu sync.RWMutex + once sync.Once +} + +// NewCodecRegistry returns a new [CodecRegistry], ready to accept custom [Codec]s. +func NewCodecRegistry() *DefaultCodecRegistry { + r := &DefaultCodecRegistry{} + + r.init() + + return r +} + +func (r *DefaultCodecRegistry) init() { + r.once.Do(func() { + r.codecs = map[string]Codec{} + }) +} + +// RegisterCodec registers a custom [Codec]. +// +// Format is case-insensitive. +func (r *DefaultCodecRegistry) RegisterCodec(format string, codec Codec) error { + r.init() + + r.mu.Lock() + defer r.mu.Unlock() + + r.codecs[strings.ToLower(format)] = codec + + return nil +} + +// Encoder implements the [EncoderRegistry] interface. +// +// Format is case-insensitive. +func (r *DefaultCodecRegistry) Encoder(format string) (Encoder, error) { + encoder, ok := r.codec(format) + if !ok { + return nil, errors.New("encoder not found for this format") + } + + return encoder, nil +} + +// Decoder implements the [DecoderRegistry] interface. +// +// Format is case-insensitive. +func (r *DefaultCodecRegistry) Decoder(format string) (Decoder, error) { + decoder, ok := r.codec(format) + if !ok { + return nil, errors.New("decoder not found for this format") + } + + return decoder, nil +} + +func (r *DefaultCodecRegistry) codec(format string) (Codec, bool) { + r.mu.Lock() + defer r.mu.Unlock() + + format = strings.ToLower(format) + + if r.codecs != nil { + codec, ok := r.codecs[format] + if ok { + return codec, true + } + } + + switch format { + case "yaml", "yml": + return yaml.Codec{}, true + + case "json": + return json.Codec{}, true + + case "toml": + return toml.Codec{}, true + + case "dotenv", "env": + return &dotenv.Codec{}, true + } + + return nil, false +} diff --git a/tools/vendor/github.com/spf13/viper/experimental.go b/tools/vendor/github.com/spf13/viper/experimental.go new file mode 100644 index 000000000..6e19e8a10 --- /dev/null +++ b/tools/vendor/github.com/spf13/viper/experimental.go @@ -0,0 +1,8 @@ +package viper + +// ExperimentalBindStruct tells Viper to use the new bind struct feature. +func ExperimentalBindStruct() Option { + return optionFunc(func(v *Viper) { + v.experimentalBindStruct = true + }) +} diff --git a/tools/vendor/github.com/spf13/viper/experimental_logger.go b/tools/vendor/github.com/spf13/viper/experimental_logger.go deleted file mode 100644 index 206dad6a0..000000000 --- a/tools/vendor/github.com/spf13/viper/experimental_logger.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build viper_logger -// +build viper_logger - -package viper - -// WithLogger sets a custom logger. -func WithLogger(l Logger) Option { - return optionFunc(func(v *Viper) { - v.logger = l - }) -} diff --git a/tools/vendor/github.com/spf13/viper/viper_go1_15.go b/tools/vendor/github.com/spf13/viper/file.go similarity index 50% rename from tools/vendor/github.com/spf13/viper/viper_go1_15.go rename to tools/vendor/github.com/spf13/viper/file.go index 19a771cbd..50a40581d 100644 --- a/tools/vendor/github.com/spf13/viper/viper_go1_15.go +++ b/tools/vendor/github.com/spf13/viper/file.go @@ -1,6 +1,3 @@ -//go:build !go1.16 || !finder -// +build !go1.16 !finder - package viper import ( @@ -8,12 +5,62 @@ import ( "os" "path/filepath" + "github.com/sagikazarmark/locafero" "github.com/spf13/afero" ) +// ExperimentalFinder tells Viper to use the new Finder interface for finding configuration files. +func ExperimentalFinder() Option { + return optionFunc(func(v *Viper) { + v.experimentalFinder = true + }) +} + +// Search for a config file. +func (v *Viper) findConfigFile() (string, error) { + finder := v.finder + + if finder == nil && v.experimentalFinder { + var names []string + + if v.configType != "" { + names = locafero.NameWithOptionalExtensions(v.configName, SupportedExts...) + } else { + names = locafero.NameWithExtensions(v.configName, SupportedExts...) + } + + finder = locafero.Finder{ + Paths: v.configPaths, + Names: names, + Type: locafero.FileTypeFile, + } + } + + if finder != nil { + return v.findConfigFileWithFinder(finder) + } + + return v.findConfigFileOld() +} + +func (v *Viper) findConfigFileWithFinder(finder Finder) (string, error) { + results, err := finder.Find(v.fs) + if err != nil { + return "", err + } + + if len(results) == 0 { + return "", ConfigFileNotFoundError{v.configName, fmt.Sprintf("%s", v.configPaths)} + } + + // We call clean on the final result to ensure that the path is in its canonical form. + // This is mostly for consistent path handling and to make sure tests pass. + return results[0], nil +} + // Search all configPaths for any config file. // Returns the first path that exists (and is a config file). -func (v *Viper) findConfigFile() (string, error) { +func (v *Viper) findConfigFileOld() (string, error) { v.logger.Info("searching for config in paths", "paths", v.configPaths) for _, cp := range v.configPaths { @@ -44,7 +91,7 @@ func (v *Viper) searchInPath(in string) (filename string) { return "" } -// Check if file Exists +// exists checks if file exists. func exists(fs afero.Fs, path string) (bool, error) { stat, err := fs.Stat(path) if err == nil { diff --git a/tools/vendor/github.com/spf13/viper/finder.go b/tools/vendor/github.com/spf13/viper/finder.go new file mode 100644 index 000000000..9b203ea69 --- /dev/null +++ b/tools/vendor/github.com/spf13/viper/finder.go @@ -0,0 +1,55 @@ +package viper + +import ( + "errors" + + "github.com/spf13/afero" +) + +// WithFinder sets a custom [Finder]. +func WithFinder(f Finder) Option { + return optionFunc(func(v *Viper) { + if f == nil { + return + } + + v.finder = f + }) +} + +// Finder looks for files and directories in an [afero.Fs] filesystem. +type Finder interface { + Find(fsys afero.Fs) ([]string, error) +} + +// Finders combines multiple finders into one. +func Finders(finders ...Finder) Finder { + return &combinedFinder{finders: finders} +} + +// combinedFinder is a Finder that combines multiple finders. +type combinedFinder struct { + finders []Finder +} + +// Find implements the [Finder] interface. +func (c *combinedFinder) Find(fsys afero.Fs) ([]string, error) { + var results []string + var errs []error + + for _, finder := range c.finders { + if finder == nil { + continue + } + + r, err := finder.Find(fsys) + if err != nil { + errs = append(errs, err) + continue + } + + results = append(results, r...) + } + + return results, errors.Join(errs...) +} diff --git a/tools/vendor/github.com/spf13/viper/flags.go b/tools/vendor/github.com/spf13/viper/flags.go index b5ddbf5d4..de033ed58 100644 --- a/tools/vendor/github.com/spf13/viper/flags.go +++ b/tools/vendor/github.com/spf13/viper/flags.go @@ -30,8 +30,8 @@ func (p pflagValueSet) VisitAll(fn func(flag FlagValue)) { }) } -// pflagValue is a wrapper aroung *pflag.flag -// that implements FlagValue +// pflagValue is a wrapper around *pflag.flag +// that implements FlagValue. type pflagValue struct { flag *pflag.Flag } diff --git a/tools/vendor/github.com/spf13/viper/flake.lock b/tools/vendor/github.com/spf13/viper/flake.lock new file mode 100644 index 000000000..d76dfbddd --- /dev/null +++ b/tools/vendor/github.com/spf13/viper/flake.lock @@ -0,0 +1,472 @@ +{ + "nodes": { + "cachix": { + "inputs": { + "devenv": "devenv_2", + "flake-compat": [ + "devenv", + "flake-compat" + ], + "nixpkgs": [ + "devenv", + "nixpkgs" + ], + "pre-commit-hooks": [ + "devenv", + "pre-commit-hooks" + ] + }, + "locked": { + "lastModified": 1712055811, + "narHash": "sha256-7FcfMm5A/f02yyzuavJe06zLa9hcMHsagE28ADcmQvk=", + "owner": "cachix", + "repo": "cachix", + "rev": "02e38da89851ec7fec3356a5c04bc8349cae0e30", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "cachix", + "type": "github" + } + }, + "devenv": { + "inputs": { + "cachix": "cachix", + "flake-compat": "flake-compat_2", + "nix": "nix_2", + "nixpkgs": "nixpkgs_2", + "pre-commit-hooks": "pre-commit-hooks" + }, + "locked": { + "lastModified": 1724763216, + "narHash": "sha256-oW2bwCrJpIzibCNK6zfIDaIQw765yMAuMSG2gyZfGv0=", + "owner": "cachix", + "repo": "devenv", + "rev": "1e4ef61205b9aa20fe04bf1c468b6a316281c4f1", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "devenv", + "type": "github" + } + }, + "devenv_2": { + "inputs": { + "flake-compat": [ + "devenv", + "cachix", + "flake-compat" + ], + "nix": "nix", + "nixpkgs": "nixpkgs", + "poetry2nix": "poetry2nix", + "pre-commit-hooks": [ + "devenv", + "cachix", + "pre-commit-hooks" + ] + }, + "locked": { + "lastModified": 1708704632, + "narHash": "sha256-w+dOIW60FKMaHI1q5714CSibk99JfYxm0CzTinYWr+Q=", + "owner": "cachix", + "repo": "devenv", + "rev": "2ee4450b0f4b95a1b90f2eb5ffea98b90e48c196", + "type": "github" + }, + "original": { + "owner": "cachix", + "ref": "python-rewrite", + "repo": "devenv", + "type": "github" + } + }, + "flake-compat": { + "flake": false, + "locked": { + "lastModified": 1673956053, + "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=", + "owner": "edolstra", + "repo": "flake-compat", + "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, + "flake-compat_2": { + "flake": false, + "locked": { + "lastModified": 1696426674, + "narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=", + "owner": "edolstra", + "repo": "flake-compat", + "rev": "0f9255e01c2351cc7d116c072cb317785dd33b33", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, + "flake-parts": { + "inputs": { + "nixpkgs-lib": "nixpkgs-lib" + }, + "locked": { + "lastModified": 1722555600, + "narHash": "sha256-XOQkdLafnb/p9ij77byFQjDf5m5QYl9b2REiVClC+x4=", + "owner": "hercules-ci", + "repo": "flake-parts", + "rev": "8471fe90ad337a8074e957b69ca4d0089218391d", + "type": "github" + }, + "original": { + "owner": "hercules-ci", + "repo": "flake-parts", + "type": "github" + } + }, + "flake-utils": { + "inputs": { + "systems": "systems" + }, + "locked": { + "lastModified": 1689068808, + "narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "flake-utils_2": { + "inputs": { + "systems": "systems_2" + }, + "locked": { + "lastModified": 1710146030, + "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "gitignore": { + "inputs": { + "nixpkgs": [ + "devenv", + "pre-commit-hooks", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1709087332, + "narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=", + "owner": "hercules-ci", + "repo": "gitignore.nix", + "rev": "637db329424fd7e46cf4185293b9cc8c88c95394", + "type": "github" + }, + "original": { + "owner": "hercules-ci", + "repo": "gitignore.nix", + "type": "github" + } + }, + "nix": { + "inputs": { + "flake-compat": "flake-compat", + "nixpkgs": [ + "devenv", + "cachix", + "devenv", + "nixpkgs" + ], + "nixpkgs-regression": "nixpkgs-regression" + }, + "locked": { + "lastModified": 1712911606, + "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=", + "owner": "domenkozar", + "repo": "nix", + "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12", + "type": "github" + }, + "original": { + "owner": "domenkozar", + "ref": "devenv-2.21", + "repo": "nix", + "type": "github" + } + }, + "nix-github-actions": { + "inputs": { + "nixpkgs": [ + "devenv", + "cachix", + "devenv", + "poetry2nix", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1688870561, + "narHash": "sha256-4UYkifnPEw1nAzqqPOTL2MvWtm3sNGw1UTYTalkTcGY=", + "owner": "nix-community", + "repo": "nix-github-actions", + "rev": "165b1650b753316aa7f1787f3005a8d2da0f5301", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "nix-github-actions", + "type": "github" + } + }, + "nix_2": { + "inputs": { + "flake-compat": [ + "devenv", + "flake-compat" + ], + "nixpkgs": [ + "devenv", + "nixpkgs" + ], + "nixpkgs-regression": "nixpkgs-regression_2" + }, + "locked": { + "lastModified": 1712911606, + "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=", + "owner": "domenkozar", + "repo": "nix", + "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12", + "type": "github" + }, + "original": { + "owner": "domenkozar", + "ref": "devenv-2.21", + "repo": "nix", + "type": "github" + } + }, + "nixpkgs": { + "locked": { + "lastModified": 1692808169, + "narHash": "sha256-x9Opq06rIiwdwGeK2Ykj69dNc2IvUH1fY55Wm7atwrE=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "9201b5ff357e781bf014d0330d18555695df7ba8", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixpkgs-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs-lib": { + "locked": { + "lastModified": 1722555339, + "narHash": "sha256-uFf2QeW7eAHlYXuDktm9c25OxOyCoUOQmh5SZ9amE5Q=", + "type": "tarball", + "url": "https://github.com/NixOS/nixpkgs/archive/a5d394176e64ab29c852d03346c1fc9b0b7d33eb.tar.gz" + }, + "original": { + "type": "tarball", + "url": "https://github.com/NixOS/nixpkgs/archive/a5d394176e64ab29c852d03346c1fc9b0b7d33eb.tar.gz" + } + }, + "nixpkgs-regression": { + "locked": { + "lastModified": 1643052045, + "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", + "type": "github" + }, + "original": { + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", + "type": "github" + } + }, + "nixpkgs-regression_2": { + "locked": { + "lastModified": 1643052045, + "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", + "type": "github" + }, + "original": { + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", + "type": "github" + } + }, + "nixpkgs-stable": { + "locked": { + "lastModified": 1710695816, + "narHash": "sha256-3Eh7fhEID17pv9ZxrPwCLfqXnYP006RKzSs0JptsN84=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "614b4613980a522ba49f0d194531beddbb7220d3", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixos-23.11", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs_2": { + "locked": { + "lastModified": 1713361204, + "narHash": "sha256-TA6EDunWTkc5FvDCqU3W2T3SFn0gRZqh6D/hJnM02MM=", + "owner": "cachix", + "repo": "devenv-nixpkgs", + "rev": "285676e87ad9f0ca23d8714a6ab61e7e027020c6", + "type": "github" + }, + "original": { + "owner": "cachix", + "ref": "rolling", + "repo": "devenv-nixpkgs", + "type": "github" + } + }, + "nixpkgs_3": { + "locked": { + "lastModified": 1724748588, + "narHash": "sha256-NlpGA4+AIf1dKNq76ps90rxowlFXUsV9x7vK/mN37JM=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "a6292e34000dc93d43bccf78338770c1c5ec8a99", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixpkgs-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "poetry2nix": { + "inputs": { + "flake-utils": "flake-utils", + "nix-github-actions": "nix-github-actions", + "nixpkgs": [ + "devenv", + "cachix", + "devenv", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1692876271, + "narHash": "sha256-IXfZEkI0Mal5y1jr6IRWMqK8GW2/f28xJenZIPQqkY0=", + "owner": "nix-community", + "repo": "poetry2nix", + "rev": "d5006be9c2c2417dafb2e2e5034d83fabd207ee3", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "poetry2nix", + "type": "github" + } + }, + "pre-commit-hooks": { + "inputs": { + "flake-compat": [ + "devenv", + "flake-compat" + ], + "flake-utils": "flake-utils_2", + "gitignore": "gitignore", + "nixpkgs": [ + "devenv", + "nixpkgs" + ], + "nixpkgs-stable": "nixpkgs-stable" + }, + "locked": { + "lastModified": 1713775815, + "narHash": "sha256-Wu9cdYTnGQQwtT20QQMg7jzkANKQjwBD9iccfGKkfls=", + "owner": "cachix", + "repo": "pre-commit-hooks.nix", + "rev": "2ac4dcbf55ed43f3be0bae15e181f08a57af24a4", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "pre-commit-hooks.nix", + "type": "github" + } + }, + "root": { + "inputs": { + "devenv": "devenv", + "flake-parts": "flake-parts", + "nixpkgs": "nixpkgs_3" + } + }, + "systems": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + }, + "systems_2": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/tools/vendor/github.com/spf13/viper/flake.nix b/tools/vendor/github.com/spf13/viper/flake.nix new file mode 100644 index 000000000..52ad7d581 --- /dev/null +++ b/tools/vendor/github.com/spf13/viper/flake.nix @@ -0,0 +1,57 @@ +{ + description = "Viper"; + + inputs = { + nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; + flake-parts.url = "github:hercules-ci/flake-parts"; + devenv.url = "github:cachix/devenv"; + }; + + outputs = inputs@{ flake-parts, ... }: + flake-parts.lib.mkFlake { inherit inputs; } { + imports = [ + inputs.devenv.flakeModule + ]; + + systems = [ "x86_64-linux" "x86_64-darwin" "aarch64-darwin" ]; + + perSystem = { config, self', inputs', pkgs, system, ... }: rec { + devenv.shells = { + default = { + languages = { + go.enable = true; + go.package = pkgs.go_1_23; + }; + + pre-commit.hooks = { + nixpkgs-fmt.enable = true; + yamllint.enable = true; + }; + + packages = with pkgs; [ + gnumake + + golangci-lint + yamllint + ]; + + scripts = { + versions.exec = '' + go version + golangci-lint version + ''; + }; + + enterShell = '' + versions + ''; + + # https://github.com/cachix/devenv/issues/528#issuecomment-1556108767 + containers = pkgs.lib.mkForce { }; + }; + + ci = devenv.shells.default; + }; + }; + }; +} diff --git a/tools/vendor/github.com/spf13/viper/fs.go b/tools/vendor/github.com/spf13/viper/fs.go deleted file mode 100644 index ecb1769e5..000000000 --- a/tools/vendor/github.com/spf13/viper/fs.go +++ /dev/null @@ -1,65 +0,0 @@ -//go:build go1.16 && finder -// +build go1.16,finder - -package viper - -import ( - "errors" - "io/fs" - "path" -) - -type finder struct { - paths []string - fileNames []string - extensions []string - - withoutExtension bool -} - -func (f finder) Find(fsys fs.FS) (string, error) { - for _, searchPath := range f.paths { - for _, fileName := range f.fileNames { - for _, extension := range f.extensions { - filePath := path.Join(searchPath, fileName+"."+extension) - - ok, err := fileExists(fsys, filePath) - if err != nil { - return "", err - } - - if ok { - return filePath, nil - } - } - - if f.withoutExtension { - filePath := path.Join(searchPath, fileName) - - ok, err := fileExists(fsys, filePath) - if err != nil { - return "", err - } - - if ok { - return filePath, nil - } - } - } - } - - return "", nil -} - -func fileExists(fsys fs.FS, filePath string) (bool, error) { - fileInfo, err := fs.Stat(fsys, filePath) - if err == nil { - return !fileInfo.IsDir(), nil - } - - if errors.Is(err, fs.ErrNotExist) { - return false, nil - } - - return false, err -} diff --git a/tools/vendor/github.com/spf13/viper/internal/encoding/decoder.go b/tools/vendor/github.com/spf13/viper/internal/encoding/decoder.go deleted file mode 100644 index f472e9ff1..000000000 --- a/tools/vendor/github.com/spf13/viper/internal/encoding/decoder.go +++ /dev/null @@ -1,61 +0,0 @@ -package encoding - -import ( - "sync" -) - -// Decoder decodes the contents of b into v. -// It's primarily used for decoding contents of a file into a map[string]interface{}. -type Decoder interface { - Decode(b []byte, v map[string]interface{}) error -} - -const ( - // ErrDecoderNotFound is returned when there is no decoder registered for a format. - ErrDecoderNotFound = encodingError("decoder not found for this format") - - // ErrDecoderFormatAlreadyRegistered is returned when an decoder is already registered for a format. - ErrDecoderFormatAlreadyRegistered = encodingError("decoder already registered for this format") -) - -// DecoderRegistry can choose an appropriate Decoder based on the provided format. -type DecoderRegistry struct { - decoders map[string]Decoder - - mu sync.RWMutex -} - -// NewDecoderRegistry returns a new, initialized DecoderRegistry. -func NewDecoderRegistry() *DecoderRegistry { - return &DecoderRegistry{ - decoders: make(map[string]Decoder), - } -} - -// RegisterDecoder registers a Decoder for a format. -// Registering a Decoder for an already existing format is not supported. -func (e *DecoderRegistry) RegisterDecoder(format string, enc Decoder) error { - e.mu.Lock() - defer e.mu.Unlock() - - if _, ok := e.decoders[format]; ok { - return ErrDecoderFormatAlreadyRegistered - } - - e.decoders[format] = enc - - return nil -} - -// Decode calls the underlying Decoder based on the format. -func (e *DecoderRegistry) Decode(format string, b []byte, v map[string]interface{}) error { - e.mu.RLock() - decoder, ok := e.decoders[format] - e.mu.RUnlock() - - if !ok { - return ErrDecoderNotFound - } - - return decoder.Decode(b, v) -} diff --git a/tools/vendor/github.com/spf13/viper/internal/encoding/dotenv/codec.go b/tools/vendor/github.com/spf13/viper/internal/encoding/dotenv/codec.go index 4485063b6..3ebc76f02 100644 --- a/tools/vendor/github.com/spf13/viper/internal/encoding/dotenv/codec.go +++ b/tools/vendor/github.com/spf13/viper/internal/encoding/dotenv/codec.go @@ -15,8 +15,8 @@ const keyDelimiter = "_" // (commonly called as dotenv format). type Codec struct{} -func (Codec) Encode(v map[string]interface{}) ([]byte, error) { - flattened := map[string]interface{}{} +func (Codec) Encode(v map[string]any) ([]byte, error) { + flattened := map[string]any{} flattened = flattenAndMergeMap(flattened, v, "", keyDelimiter) @@ -40,7 +40,7 @@ func (Codec) Encode(v map[string]interface{}) ([]byte, error) { return buf.Bytes(), nil } -func (Codec) Decode(b []byte, v map[string]interface{}) error { +func (Codec) Decode(b []byte, v map[string]any) error { var buf bytes.Buffer _, err := buf.Write(b) diff --git a/tools/vendor/github.com/spf13/viper/internal/encoding/dotenv/map_utils.go b/tools/vendor/github.com/spf13/viper/internal/encoding/dotenv/map_utils.go index ce6e6efa3..8bfe0a9de 100644 --- a/tools/vendor/github.com/spf13/viper/internal/encoding/dotenv/map_utils.go +++ b/tools/vendor/github.com/spf13/viper/internal/encoding/dotenv/map_utils.go @@ -7,27 +7,27 @@ import ( ) // flattenAndMergeMap recursively flattens the given map into a new map -// Code is based on the function with the same name in tha main package. -// TODO: move it to a common place -func flattenAndMergeMap(shadow map[string]interface{}, m map[string]interface{}, prefix string, delimiter string) map[string]interface{} { +// Code is based on the function with the same name in the main package. +// TODO: move it to a common place. +func flattenAndMergeMap(shadow, m map[string]any, prefix, delimiter string) map[string]any { if shadow != nil && prefix != "" && shadow[prefix] != nil { // prefix is shadowed => nothing more to flatten return shadow } if shadow == nil { - shadow = make(map[string]interface{}) + shadow = make(map[string]any) } - var m2 map[string]interface{} + var m2 map[string]any if prefix != "" { prefix += delimiter } for k, val := range m { fullKey := prefix + k - switch val.(type) { - case map[string]interface{}: - m2 = val.(map[string]interface{}) - case map[interface{}]interface{}: + switch val := val.(type) { + case map[string]any: + m2 = val + case map[any]any: m2 = cast.ToStringMap(val) default: // immediate value diff --git a/tools/vendor/github.com/spf13/viper/internal/encoding/encoder.go b/tools/vendor/github.com/spf13/viper/internal/encoding/encoder.go deleted file mode 100644 index 2341bf235..000000000 --- a/tools/vendor/github.com/spf13/viper/internal/encoding/encoder.go +++ /dev/null @@ -1,60 +0,0 @@ -package encoding - -import ( - "sync" -) - -// Encoder encodes the contents of v into a byte representation. -// It's primarily used for encoding a map[string]interface{} into a file format. -type Encoder interface { - Encode(v map[string]interface{}) ([]byte, error) -} - -const ( - // ErrEncoderNotFound is returned when there is no encoder registered for a format. - ErrEncoderNotFound = encodingError("encoder not found for this format") - - // ErrEncoderFormatAlreadyRegistered is returned when an encoder is already registered for a format. - ErrEncoderFormatAlreadyRegistered = encodingError("encoder already registered for this format") -) - -// EncoderRegistry can choose an appropriate Encoder based on the provided format. -type EncoderRegistry struct { - encoders map[string]Encoder - - mu sync.RWMutex -} - -// NewEncoderRegistry returns a new, initialized EncoderRegistry. -func NewEncoderRegistry() *EncoderRegistry { - return &EncoderRegistry{ - encoders: make(map[string]Encoder), - } -} - -// RegisterEncoder registers an Encoder for a format. -// Registering a Encoder for an already existing format is not supported. -func (e *EncoderRegistry) RegisterEncoder(format string, enc Encoder) error { - e.mu.Lock() - defer e.mu.Unlock() - - if _, ok := e.encoders[format]; ok { - return ErrEncoderFormatAlreadyRegistered - } - - e.encoders[format] = enc - - return nil -} - -func (e *EncoderRegistry) Encode(format string, v map[string]interface{}) ([]byte, error) { - e.mu.RLock() - encoder, ok := e.encoders[format] - e.mu.RUnlock() - - if !ok { - return nil, ErrEncoderNotFound - } - - return encoder.Encode(v) -} diff --git a/tools/vendor/github.com/spf13/viper/internal/encoding/error.go b/tools/vendor/github.com/spf13/viper/internal/encoding/error.go deleted file mode 100644 index e4cde02d7..000000000 --- a/tools/vendor/github.com/spf13/viper/internal/encoding/error.go +++ /dev/null @@ -1,7 +0,0 @@ -package encoding - -type encodingError string - -func (e encodingError) Error() string { - return string(e) -} diff --git a/tools/vendor/github.com/spf13/viper/internal/encoding/hcl/codec.go b/tools/vendor/github.com/spf13/viper/internal/encoding/hcl/codec.go deleted file mode 100644 index 7fde8e4bc..000000000 --- a/tools/vendor/github.com/spf13/viper/internal/encoding/hcl/codec.go +++ /dev/null @@ -1,40 +0,0 @@ -package hcl - -import ( - "bytes" - "encoding/json" - - "github.com/hashicorp/hcl" - "github.com/hashicorp/hcl/hcl/printer" -) - -// Codec implements the encoding.Encoder and encoding.Decoder interfaces for HCL encoding. -// TODO: add printer config to the codec? -type Codec struct{} - -func (Codec) Encode(v map[string]interface{}) ([]byte, error) { - b, err := json.Marshal(v) - if err != nil { - return nil, err - } - - // TODO: use printer.Format? Is the trailing newline an issue? - - ast, err := hcl.Parse(string(b)) - if err != nil { - return nil, err - } - - var buf bytes.Buffer - - err = printer.Fprint(&buf, ast.Node) - if err != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -func (Codec) Decode(b []byte, v map[string]interface{}) error { - return hcl.Unmarshal(b, &v) -} diff --git a/tools/vendor/github.com/spf13/viper/internal/encoding/ini/codec.go b/tools/vendor/github.com/spf13/viper/internal/encoding/ini/codec.go deleted file mode 100644 index 9acd87fc3..000000000 --- a/tools/vendor/github.com/spf13/viper/internal/encoding/ini/codec.go +++ /dev/null @@ -1,99 +0,0 @@ -package ini - -import ( - "bytes" - "sort" - "strings" - - "github.com/spf13/cast" - "gopkg.in/ini.v1" -) - -// LoadOptions contains all customized options used for load data source(s). -// This type is added here for convenience: this way consumers can import a single package called "ini". -type LoadOptions = ini.LoadOptions - -// Codec implements the encoding.Encoder and encoding.Decoder interfaces for INI encoding. -type Codec struct { - KeyDelimiter string - LoadOptions LoadOptions -} - -func (c Codec) Encode(v map[string]interface{}) ([]byte, error) { - cfg := ini.Empty() - ini.PrettyFormat = false - - flattened := map[string]interface{}{} - - flattened = flattenAndMergeMap(flattened, v, "", c.keyDelimiter()) - - keys := make([]string, 0, len(flattened)) - - for key := range flattened { - keys = append(keys, key) - } - - sort.Strings(keys) - - for _, key := range keys { - sectionName, keyName := "", key - - lastSep := strings.LastIndex(key, ".") - if lastSep != -1 { - sectionName = key[:(lastSep)] - keyName = key[(lastSep + 1):] - } - - // TODO: is this a good idea? - if sectionName == "default" { - sectionName = "" - } - - cfg.Section(sectionName).Key(keyName).SetValue(cast.ToString(flattened[key])) - } - - var buf bytes.Buffer - - _, err := cfg.WriteTo(&buf) - if err != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -func (c Codec) Decode(b []byte, v map[string]interface{}) error { - cfg := ini.Empty(c.LoadOptions) - - err := cfg.Append(b) - if err != nil { - return err - } - - sections := cfg.Sections() - - for i := 0; i < len(sections); i++ { - section := sections[i] - keys := section.Keys() - - for j := 0; j < len(keys); j++ { - key := keys[j] - value := cfg.Section(section.Name()).Key(key.Name()).String() - - deepestMap := deepSearch(v, strings.Split(section.Name(), c.keyDelimiter())) - - // set innermost value - deepestMap[key.Name()] = value - } - } - - return nil -} - -func (c Codec) keyDelimiter() string { - if c.KeyDelimiter == "" { - return "." - } - - return c.KeyDelimiter -} diff --git a/tools/vendor/github.com/spf13/viper/internal/encoding/ini/map_utils.go b/tools/vendor/github.com/spf13/viper/internal/encoding/ini/map_utils.go deleted file mode 100644 index 8329856b5..000000000 --- a/tools/vendor/github.com/spf13/viper/internal/encoding/ini/map_utils.go +++ /dev/null @@ -1,74 +0,0 @@ -package ini - -import ( - "strings" - - "github.com/spf13/cast" -) - -// THIS CODE IS COPIED HERE: IT SHOULD NOT BE MODIFIED -// AT SOME POINT IT WILL BE MOVED TO A COMMON PLACE -// deepSearch scans deep maps, following the key indexes listed in the -// sequence "path". -// The last value is expected to be another map, and is returned. -// -// In case intermediate keys do not exist, or map to a non-map value, -// a new map is created and inserted, and the search continues from there: -// the initial map "m" may be modified! -func deepSearch(m map[string]interface{}, path []string) map[string]interface{} { - for _, k := range path { - m2, ok := m[k] - if !ok { - // intermediate key does not exist - // => create it and continue from there - m3 := make(map[string]interface{}) - m[k] = m3 - m = m3 - continue - } - m3, ok := m2.(map[string]interface{}) - if !ok { - // intermediate key is a value - // => replace with a new map - m3 = make(map[string]interface{}) - m[k] = m3 - } - // continue search from here - m = m3 - } - return m -} - -// flattenAndMergeMap recursively flattens the given map into a new map -// Code is based on the function with the same name in tha main package. -// TODO: move it to a common place -func flattenAndMergeMap(shadow map[string]interface{}, m map[string]interface{}, prefix string, delimiter string) map[string]interface{} { - if shadow != nil && prefix != "" && shadow[prefix] != nil { - // prefix is shadowed => nothing more to flatten - return shadow - } - if shadow == nil { - shadow = make(map[string]interface{}) - } - - var m2 map[string]interface{} - if prefix != "" { - prefix += delimiter - } - for k, val := range m { - fullKey := prefix + k - switch val.(type) { - case map[string]interface{}: - m2 = val.(map[string]interface{}) - case map[interface{}]interface{}: - m2 = cast.ToStringMap(val) - default: - // immediate value - shadow[strings.ToLower(fullKey)] = val - continue - } - // recursively merge to shadow map - shadow = flattenAndMergeMap(shadow, m2, fullKey, delimiter) - } - return shadow -} diff --git a/tools/vendor/github.com/spf13/viper/internal/encoding/javaproperties/codec.go b/tools/vendor/github.com/spf13/viper/internal/encoding/javaproperties/codec.go deleted file mode 100644 index b8a2251c1..000000000 --- a/tools/vendor/github.com/spf13/viper/internal/encoding/javaproperties/codec.go +++ /dev/null @@ -1,86 +0,0 @@ -package javaproperties - -import ( - "bytes" - "sort" - "strings" - - "github.com/magiconair/properties" - "github.com/spf13/cast" -) - -// Codec implements the encoding.Encoder and encoding.Decoder interfaces for Java properties encoding. -type Codec struct { - KeyDelimiter string - - // Store read properties on the object so that we can write back in order with comments. - // This will only be used if the configuration read is a properties file. - // TODO: drop this feature in v2 - // TODO: make use of the global properties object optional - Properties *properties.Properties -} - -func (c *Codec) Encode(v map[string]interface{}) ([]byte, error) { - if c.Properties == nil { - c.Properties = properties.NewProperties() - } - - flattened := map[string]interface{}{} - - flattened = flattenAndMergeMap(flattened, v, "", c.keyDelimiter()) - - keys := make([]string, 0, len(flattened)) - - for key := range flattened { - keys = append(keys, key) - } - - sort.Strings(keys) - - for _, key := range keys { - _, _, err := c.Properties.Set(key, cast.ToString(flattened[key])) - if err != nil { - return nil, err - } - } - - var buf bytes.Buffer - - _, err := c.Properties.WriteComment(&buf, "#", properties.UTF8) - if err != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -func (c *Codec) Decode(b []byte, v map[string]interface{}) error { - var err error - c.Properties, err = properties.Load(b, properties.UTF8) - if err != nil { - return err - } - - for _, key := range c.Properties.Keys() { - // ignore existence check: we know it's there - value, _ := c.Properties.Get(key) - - // recursively build nested maps - path := strings.Split(key, c.keyDelimiter()) - lastKey := strings.ToLower(path[len(path)-1]) - deepestMap := deepSearch(v, path[0:len(path)-1]) - - // set innermost value - deepestMap[lastKey] = value - } - - return nil -} - -func (c Codec) keyDelimiter() string { - if c.KeyDelimiter == "" { - return "." - } - - return c.KeyDelimiter -} diff --git a/tools/vendor/github.com/spf13/viper/internal/encoding/javaproperties/map_utils.go b/tools/vendor/github.com/spf13/viper/internal/encoding/javaproperties/map_utils.go deleted file mode 100644 index 93755cac1..000000000 --- a/tools/vendor/github.com/spf13/viper/internal/encoding/javaproperties/map_utils.go +++ /dev/null @@ -1,74 +0,0 @@ -package javaproperties - -import ( - "strings" - - "github.com/spf13/cast" -) - -// THIS CODE IS COPIED HERE: IT SHOULD NOT BE MODIFIED -// AT SOME POINT IT WILL BE MOVED TO A COMMON PLACE -// deepSearch scans deep maps, following the key indexes listed in the -// sequence "path". -// The last value is expected to be another map, and is returned. -// -// In case intermediate keys do not exist, or map to a non-map value, -// a new map is created and inserted, and the search continues from there: -// the initial map "m" may be modified! -func deepSearch(m map[string]interface{}, path []string) map[string]interface{} { - for _, k := range path { - m2, ok := m[k] - if !ok { - // intermediate key does not exist - // => create it and continue from there - m3 := make(map[string]interface{}) - m[k] = m3 - m = m3 - continue - } - m3, ok := m2.(map[string]interface{}) - if !ok { - // intermediate key is a value - // => replace with a new map - m3 = make(map[string]interface{}) - m[k] = m3 - } - // continue search from here - m = m3 - } - return m -} - -// flattenAndMergeMap recursively flattens the given map into a new map -// Code is based on the function with the same name in tha main package. -// TODO: move it to a common place -func flattenAndMergeMap(shadow map[string]interface{}, m map[string]interface{}, prefix string, delimiter string) map[string]interface{} { - if shadow != nil && prefix != "" && shadow[prefix] != nil { - // prefix is shadowed => nothing more to flatten - return shadow - } - if shadow == nil { - shadow = make(map[string]interface{}) - } - - var m2 map[string]interface{} - if prefix != "" { - prefix += delimiter - } - for k, val := range m { - fullKey := prefix + k - switch val.(type) { - case map[string]interface{}: - m2 = val.(map[string]interface{}) - case map[interface{}]interface{}: - m2 = cast.ToStringMap(val) - default: - // immediate value - shadow[strings.ToLower(fullKey)] = val - continue - } - // recursively merge to shadow map - shadow = flattenAndMergeMap(shadow, m2, fullKey, delimiter) - } - return shadow -} diff --git a/tools/vendor/github.com/spf13/viper/internal/encoding/json/codec.go b/tools/vendor/github.com/spf13/viper/internal/encoding/json/codec.go index 1b7caaceb..da7546b5a 100644 --- a/tools/vendor/github.com/spf13/viper/internal/encoding/json/codec.go +++ b/tools/vendor/github.com/spf13/viper/internal/encoding/json/codec.go @@ -7,11 +7,11 @@ import ( // Codec implements the encoding.Encoder and encoding.Decoder interfaces for JSON encoding. type Codec struct{} -func (Codec) Encode(v map[string]interface{}) ([]byte, error) { +func (Codec) Encode(v map[string]any) ([]byte, error) { // TODO: expose prefix and indent in the Codec as setting? return json.MarshalIndent(v, "", " ") } -func (Codec) Decode(b []byte, v map[string]interface{}) error { +func (Codec) Decode(b []byte, v map[string]any) error { return json.Unmarshal(b, &v) } diff --git a/tools/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go b/tools/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go index 45fddc8b5..c70aa8d28 100644 --- a/tools/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go +++ b/tools/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go @@ -1,39 +1,16 @@ -//go:build viper_toml1 -// +build viper_toml1 - package toml import ( - "github.com/pelletier/go-toml" + "github.com/pelletier/go-toml/v2" ) // Codec implements the encoding.Encoder and encoding.Decoder interfaces for TOML encoding. type Codec struct{} -func (Codec) Encode(v map[string]interface{}) ([]byte, error) { - t, err := toml.TreeFromMap(v) - if err != nil { - return nil, err - } - - s, err := t.ToTomlString() - if err != nil { - return nil, err - } - - return []byte(s), nil +func (Codec) Encode(v map[string]any) ([]byte, error) { + return toml.Marshal(v) } -func (Codec) Decode(b []byte, v map[string]interface{}) error { - tree, err := toml.LoadBytes(b) - if err != nil { - return err - } - - tmap := tree.ToMap() - for key, value := range tmap { - v[key] = value - } - - return nil +func (Codec) Decode(b []byte, v map[string]any) error { + return toml.Unmarshal(b, &v) } diff --git a/tools/vendor/github.com/spf13/viper/internal/encoding/toml/codec2.go b/tools/vendor/github.com/spf13/viper/internal/encoding/toml/codec2.go deleted file mode 100644 index 112c6d372..000000000 --- a/tools/vendor/github.com/spf13/viper/internal/encoding/toml/codec2.go +++ /dev/null @@ -1,19 +0,0 @@ -//go:build !viper_toml1 -// +build !viper_toml1 - -package toml - -import ( - "github.com/pelletier/go-toml/v2" -) - -// Codec implements the encoding.Encoder and encoding.Decoder interfaces for TOML encoding. -type Codec struct{} - -func (Codec) Encode(v map[string]interface{}) ([]byte, error) { - return toml.Marshal(v) -} - -func (Codec) Decode(b []byte, v map[string]interface{}) error { - return toml.Unmarshal(b, &v) -} diff --git a/tools/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go b/tools/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go index 24cc19dfc..036879249 100644 --- a/tools/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go +++ b/tools/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go @@ -1,14 +1,14 @@ package yaml -// import "gopkg.in/yaml.v2" +import "gopkg.in/yaml.v3" // Codec implements the encoding.Encoder and encoding.Decoder interfaces for YAML encoding. type Codec struct{} -func (Codec) Encode(v map[string]interface{}) ([]byte, error) { +func (Codec) Encode(v map[string]any) ([]byte, error) { return yaml.Marshal(v) } -func (Codec) Decode(b []byte, v map[string]interface{}) error { +func (Codec) Decode(b []byte, v map[string]any) error { return yaml.Unmarshal(b, &v) } diff --git a/tools/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml2.go b/tools/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml2.go deleted file mode 100644 index 4c398c2f4..000000000 --- a/tools/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml2.go +++ /dev/null @@ -1,14 +0,0 @@ -//go:build viper_yaml2 -// +build viper_yaml2 - -package yaml - -import yamlv2 "gopkg.in/yaml.v2" - -var yaml = struct { - Marshal func(in interface{}) (out []byte, err error) - Unmarshal func(in []byte, out interface{}) (err error) -}{ - Marshal: yamlv2.Marshal, - Unmarshal: yamlv2.Unmarshal, -} diff --git a/tools/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml3.go b/tools/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml3.go deleted file mode 100644 index 3a4775ced..000000000 --- a/tools/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml3.go +++ /dev/null @@ -1,14 +0,0 @@ -//go:build !viper_yaml2 -// +build !viper_yaml2 - -package yaml - -import yamlv3 "gopkg.in/yaml.v3" - -var yaml = struct { - Marshal func(in interface{}) (out []byte, err error) - Unmarshal func(in []byte, out interface{}) (err error) -}{ - Marshal: yamlv3.Marshal, - Unmarshal: yamlv3.Unmarshal, -} diff --git a/tools/vendor/github.com/spf13/viper/internal/features/bind_struct.go b/tools/vendor/github.com/spf13/viper/internal/features/bind_struct.go new file mode 100644 index 000000000..89302c216 --- /dev/null +++ b/tools/vendor/github.com/spf13/viper/internal/features/bind_struct.go @@ -0,0 +1,5 @@ +//go:build viper_bind_struct + +package features + +const BindStruct = true diff --git a/tools/vendor/github.com/spf13/viper/internal/features/bind_struct_default.go b/tools/vendor/github.com/spf13/viper/internal/features/bind_struct_default.go new file mode 100644 index 000000000..edfaf73b6 --- /dev/null +++ b/tools/vendor/github.com/spf13/viper/internal/features/bind_struct_default.go @@ -0,0 +1,5 @@ +//go:build !viper_bind_struct + +package features + +const BindStruct = false diff --git a/tools/vendor/github.com/spf13/viper/internal/features/finder.go b/tools/vendor/github.com/spf13/viper/internal/features/finder.go new file mode 100644 index 000000000..983ea3a9d --- /dev/null +++ b/tools/vendor/github.com/spf13/viper/internal/features/finder.go @@ -0,0 +1,5 @@ +//go:build viper_finder + +package features + +const Finder = true diff --git a/tools/vendor/github.com/spf13/viper/internal/features/finder_default.go b/tools/vendor/github.com/spf13/viper/internal/features/finder_default.go new file mode 100644 index 000000000..89bcb06ee --- /dev/null +++ b/tools/vendor/github.com/spf13/viper/internal/features/finder_default.go @@ -0,0 +1,5 @@ +//go:build !viper_finder + +package features + +const Finder = false diff --git a/tools/vendor/github.com/spf13/viper/logger.go b/tools/vendor/github.com/spf13/viper/logger.go index 0115067ae..828042f29 100644 --- a/tools/vendor/github.com/spf13/viper/logger.go +++ b/tools/vendor/github.com/spf13/viper/logger.go @@ -1,77 +1,31 @@ package viper import ( - "fmt" - - jww "github.com/spf13/jwalterweatherman" + "context" + "log/slog" ) -// Logger is a unified interface for various logging use cases and practices, including: -// - leveled logging -// - structured logging -type Logger interface { - // Trace logs a Trace event. - // - // Even more fine-grained information than Debug events. - // Loggers not supporting this level should fall back to Debug. - Trace(msg string, keyvals ...interface{}) - - // Debug logs a Debug event. - // - // A verbose series of information events. - // They are useful when debugging the system. - Debug(msg string, keyvals ...interface{}) - - // Info logs an Info event. - // - // General information about what's happening inside the system. - Info(msg string, keyvals ...interface{}) - - // Warn logs a Warn(ing) event. - // - // Non-critical events that should be looked at. - Warn(msg string, keyvals ...interface{}) - - // Error logs an Error event. - // - // Critical events that require immediate attention. - // Loggers commonly provide Fatal and Panic levels above Error level, - // but exiting and panicing is out of scope for a logging library. - Error(msg string, keyvals ...interface{}) -} - -type jwwLogger struct{} - -func (jwwLogger) Trace(msg string, keyvals ...interface{}) { - jww.TRACE.Printf(jwwLogMessage(msg, keyvals...)) +// WithLogger sets a custom logger. +func WithLogger(l *slog.Logger) Option { + return optionFunc(func(v *Viper) { + v.logger = l + }) } -func (jwwLogger) Debug(msg string, keyvals ...interface{}) { - jww.DEBUG.Printf(jwwLogMessage(msg, keyvals...)) -} +type discardHandler struct{} -func (jwwLogger) Info(msg string, keyvals ...interface{}) { - jww.INFO.Printf(jwwLogMessage(msg, keyvals...)) +func (n *discardHandler) Enabled(_ context.Context, _ slog.Level) bool { + return false } -func (jwwLogger) Warn(msg string, keyvals ...interface{}) { - jww.WARN.Printf(jwwLogMessage(msg, keyvals...)) +func (n *discardHandler) Handle(_ context.Context, _ slog.Record) error { + return nil } -func (jwwLogger) Error(msg string, keyvals ...interface{}) { - jww.ERROR.Printf(jwwLogMessage(msg, keyvals...)) +func (n *discardHandler) WithAttrs(_ []slog.Attr) slog.Handler { + return n } -func jwwLogMessage(msg string, keyvals ...interface{}) string { - out := msg - - if len(keyvals) > 0 && len(keyvals)%2 == 1 { - keyvals = append(keyvals, nil) - } - - for i := 0; i <= len(keyvals)-2; i += 2 { - out = fmt.Sprintf("%s %v=%v", out, keyvals[i], keyvals[i+1]) - } - - return out +func (n *discardHandler) WithGroup(_ string) slog.Handler { + return n } diff --git a/tools/vendor/github.com/spf13/viper/remote.go b/tools/vendor/github.com/spf13/viper/remote.go new file mode 100644 index 000000000..bdde7de26 --- /dev/null +++ b/tools/vendor/github.com/spf13/viper/remote.go @@ -0,0 +1,256 @@ +package viper + +import ( + "bytes" + "fmt" + "io" + "reflect" + "slices" +) + +// SupportedRemoteProviders are universally supported remote providers. +var SupportedRemoteProviders = []string{"etcd", "etcd3", "consul", "firestore", "nats"} + +func resetRemote() { + SupportedRemoteProviders = []string{"etcd", "etcd3", "consul", "firestore", "nats"} +} + +type remoteConfigFactory interface { + Get(rp RemoteProvider) (io.Reader, error) + Watch(rp RemoteProvider) (io.Reader, error) + WatchChannel(rp RemoteProvider) (<-chan *RemoteResponse, chan bool) +} + +type RemoteResponse struct { + Value []byte + Error error +} + +// RemoteConfig is optional, see the remote package. +var RemoteConfig remoteConfigFactory + +// UnsupportedRemoteProviderError denotes encountering an unsupported remote +// provider. Currently only etcd and Consul are supported. +type UnsupportedRemoteProviderError string + +// Error returns the formatted remote provider error. +func (str UnsupportedRemoteProviderError) Error() string { + return fmt.Sprintf("Unsupported Remote Provider Type %q", string(str)) +} + +// RemoteConfigError denotes encountering an error while trying to +// pull the configuration from the remote provider. +type RemoteConfigError string + +// Error returns the formatted remote provider error. +func (rce RemoteConfigError) Error() string { + return fmt.Sprintf("Remote Configurations Error: %s", string(rce)) +} + +type defaultRemoteProvider struct { + provider string + endpoint string + path string + secretKeyring string +} + +func (rp defaultRemoteProvider) Provider() string { + return rp.provider +} + +func (rp defaultRemoteProvider) Endpoint() string { + return rp.endpoint +} + +func (rp defaultRemoteProvider) Path() string { + return rp.path +} + +func (rp defaultRemoteProvider) SecretKeyring() string { + return rp.secretKeyring +} + +// RemoteProvider stores the configuration necessary +// to connect to a remote key/value store. +// Optional secretKeyring to unencrypt encrypted values +// can be provided. +type RemoteProvider interface { + Provider() string + Endpoint() string + Path() string + SecretKeyring() string +} + +// AddRemoteProvider adds a remote configuration source. +// Remote Providers are searched in the order they are added. +// provider is a string value: "etcd", "etcd3", "consul", "firestore" or "nats" are currently supported. +// endpoint is the url. etcd requires http://ip:port, consul requires ip:port, nats requires nats://ip:port +// path is the path in the k/v store to retrieve configuration +// To retrieve a config file called myapp.json from /configs/myapp.json +// you should set path to /configs and set config name (SetConfigName()) to +// "myapp". +func AddRemoteProvider(provider, endpoint, path string) error { + return v.AddRemoteProvider(provider, endpoint, path) +} + +func (v *Viper) AddRemoteProvider(provider, endpoint, path string) error { + if !slices.Contains(SupportedRemoteProviders, provider) { + return UnsupportedRemoteProviderError(provider) + } + if provider != "" && endpoint != "" { + v.logger.Info("adding remote provider", "provider", provider, "endpoint", endpoint) + + rp := &defaultRemoteProvider{ + endpoint: endpoint, + provider: provider, + path: path, + } + if !v.providerPathExists(rp) { + v.remoteProviders = append(v.remoteProviders, rp) + } + } + return nil +} + +// AddSecureRemoteProvider adds a remote configuration source. +// Secure Remote Providers are searched in the order they are added. +// provider is a string value: "etcd", "etcd3", "consul", "firestore" or "nats" are currently supported. +// endpoint is the url. etcd requires http://ip:port consul requires ip:port +// secretkeyring is the filepath to your openpgp secret keyring. e.g. /etc/secrets/myring.gpg +// path is the path in the k/v store to retrieve configuration +// To retrieve a config file called myapp.json from /configs/myapp.json +// you should set path to /configs and set config name (SetConfigName()) to +// "myapp". +// Secure Remote Providers are implemented with github.com/sagikazarmark/crypt. +func AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error { + return v.AddSecureRemoteProvider(provider, endpoint, path, secretkeyring) +} + +func (v *Viper) AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error { + if !slices.Contains(SupportedRemoteProviders, provider) { + return UnsupportedRemoteProviderError(provider) + } + if provider != "" && endpoint != "" { + v.logger.Info("adding remote provider", "provider", provider, "endpoint", endpoint) + + rp := &defaultRemoteProvider{ + endpoint: endpoint, + provider: provider, + path: path, + secretKeyring: secretkeyring, + } + if !v.providerPathExists(rp) { + v.remoteProviders = append(v.remoteProviders, rp) + } + } + return nil +} + +func (v *Viper) providerPathExists(p *defaultRemoteProvider) bool { + for _, y := range v.remoteProviders { + if reflect.DeepEqual(y, p) { + return true + } + } + return false +} + +// ReadRemoteConfig attempts to get configuration from a remote source +// and read it in the remote configuration registry. +func ReadRemoteConfig() error { return v.ReadRemoteConfig() } + +func (v *Viper) ReadRemoteConfig() error { + return v.getKeyValueConfig() +} + +func WatchRemoteConfig() error { return v.WatchRemoteConfig() } +func (v *Viper) WatchRemoteConfig() error { + return v.watchKeyValueConfig() +} + +func (v *Viper) WatchRemoteConfigOnChannel() error { + return v.watchKeyValueConfigOnChannel() +} + +// Retrieve the first found remote configuration. +func (v *Viper) getKeyValueConfig() error { + if RemoteConfig == nil { + return RemoteConfigError("Enable the remote features by doing a blank import of the viper/remote package: '_ github.com/spf13/viper/remote'") + } + + if len(v.remoteProviders) == 0 { + return RemoteConfigError("No Remote Providers") + } + + for _, rp := range v.remoteProviders { + val, err := v.getRemoteConfig(rp) + if err != nil { + v.logger.Error(fmt.Errorf("get remote config: %w", err).Error()) + + continue + } + + v.kvstore = val + + return nil + } + return RemoteConfigError("No Files Found") +} + +func (v *Viper) getRemoteConfig(provider RemoteProvider) (map[string]any, error) { + reader, err := RemoteConfig.Get(provider) + if err != nil { + return nil, err + } + err = v.unmarshalReader(reader, v.kvstore) + return v.kvstore, err +} + +// Retrieve the first found remote configuration. +func (v *Viper) watchKeyValueConfigOnChannel() error { + if len(v.remoteProviders) == 0 { + return RemoteConfigError("No Remote Providers") + } + + for _, rp := range v.remoteProviders { + respc, _ := RemoteConfig.WatchChannel(rp) + // Todo: Add quit channel + go func(rc <-chan *RemoteResponse) { + for { + b := <-rc + reader := bytes.NewReader(b.Value) + v.unmarshalReader(reader, v.kvstore) + } + }(respc) + return nil + } + return RemoteConfigError("No Files Found") +} + +// Retrieve the first found remote configuration. +func (v *Viper) watchKeyValueConfig() error { + if len(v.remoteProviders) == 0 { + return RemoteConfigError("No Remote Providers") + } + + for _, rp := range v.remoteProviders { + val, err := v.watchRemoteConfig(rp) + if err != nil { + v.logger.Error(fmt.Errorf("watch remote config: %w", err).Error()) + + continue + } + v.kvstore = val + return nil + } + return RemoteConfigError("No Files Found") +} + +func (v *Viper) watchRemoteConfig(provider RemoteProvider) (map[string]any, error) { + reader, err := RemoteConfig.Watch(provider) + if err != nil { + return nil, err + } + err = v.unmarshalReader(reader, v.kvstore) + return v.kvstore, err +} diff --git a/tools/vendor/github.com/spf13/viper/util.go b/tools/vendor/github.com/spf13/viper/util.go index ee7a86d9d..2a08074bc 100644 --- a/tools/vendor/github.com/spf13/viper/util.go +++ b/tools/vendor/github.com/spf13/viper/util.go @@ -12,6 +12,7 @@ package viper import ( "fmt" + "log/slog" "os" "path/filepath" "runtime" @@ -31,13 +32,18 @@ func (pe ConfigParseError) Error() string { return fmt.Sprintf("While parsing config: %s", pe.err.Error()) } +// Unwrap returns the wrapped error. +func (pe ConfigParseError) Unwrap() error { + return pe.err +} + // toCaseInsensitiveValue checks if the value is a map; // if so, create a copy and lower-case the keys recursively. -func toCaseInsensitiveValue(value interface{}) interface{} { +func toCaseInsensitiveValue(value any) any { switch v := value.(type) { - case map[interface{}]interface{}: + case map[any]any: value = copyAndInsensitiviseMap(cast.ToStringMap(v)) - case map[string]interface{}: + case map[string]any: value = copyAndInsensitiviseMap(v) } @@ -46,15 +52,15 @@ func toCaseInsensitiveValue(value interface{}) interface{} { // copyAndInsensitiviseMap behaves like insensitiviseMap, but creates a copy of // any map it makes case insensitive. -func copyAndInsensitiviseMap(m map[string]interface{}) map[string]interface{} { - nm := make(map[string]interface{}) +func copyAndInsensitiviseMap(m map[string]any) map[string]any { + nm := make(map[string]any) for key, val := range m { lkey := strings.ToLower(key) switch v := val.(type) { - case map[interface{}]interface{}: + case map[any]any: nm[lkey] = copyAndInsensitiviseMap(cast.ToStringMap(v)) - case map[string]interface{}: + case map[string]any: nm[lkey] = copyAndInsensitiviseMap(v) default: nm[lkey] = v @@ -64,18 +70,25 @@ func copyAndInsensitiviseMap(m map[string]interface{}) map[string]interface{} { return nm } -func insensitiviseMap(m map[string]interface{}) { - for key, val := range m { - switch val.(type) { - case map[interface{}]interface{}: - // nested map: cast and recursively insensitivise - val = cast.ToStringMap(val) - insensitiviseMap(val.(map[string]interface{})) - case map[string]interface{}: - // nested map: recursively insensitivise - insensitiviseMap(val.(map[string]interface{})) - } +func insensitiviseVal(val any) any { + switch v := val.(type) { + case map[any]any: + // nested map: cast and recursively insensitivise + val = cast.ToStringMap(val) + insensitiviseMap(val.(map[string]any)) + case map[string]any: + // nested map: recursively insensitivise + insensitiviseMap(v) + case []any: + // nested array: recursively insensitivise + insensitiveArray(v) + } + return val +} +func insensitiviseMap(m map[string]any) { + for key, val := range m { + val = insensitiviseVal(val) lower := strings.ToLower(key) if key != lower { // remove old key (not lower-cased) @@ -86,7 +99,13 @@ func insensitiviseMap(m map[string]interface{}) { } } -func absPathify(logger Logger, inPath string) string { +func insensitiveArray(a []any) { + for i, val := range a { + a[i] = insensitiviseVal(val) + } +} + +func absPathify(logger *slog.Logger, inPath string) string { logger.Info("trying to resolve absolute path", "path", inPath) if inPath == "$HOME" || strings.HasPrefix(inPath, "$HOME"+string(os.PathSeparator)) { @@ -109,15 +128,6 @@ func absPathify(logger Logger, inPath string) string { return "" } -func stringInSlice(a string, list []string) bool { - for _, b := range list { - if b == a { - return true - } - } - return false -} - func userHomeDir() string { if runtime.GOOS == "windows" { home := os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH") @@ -137,7 +147,7 @@ func safeMul(a, b uint) uint { return c } -// parseSizeInBytes converts strings like 1GB or 12 mb into an unsigned integer number of bytes +// parseSizeInBytes converts strings like 1GB or 12 mb into an unsigned integer number of bytes. func parseSizeInBytes(sizeStr string) uint { sizeStr = strings.TrimSpace(sizeStr) lastChar := len(sizeStr) - 1 @@ -179,22 +189,22 @@ func parseSizeInBytes(sizeStr string) uint { // In case intermediate keys do not exist, or map to a non-map value, // a new map is created and inserted, and the search continues from there: // the initial map "m" may be modified! -func deepSearch(m map[string]interface{}, path []string) map[string]interface{} { +func deepSearch(m map[string]any, path []string) map[string]any { for _, k := range path { m2, ok := m[k] if !ok { // intermediate key does not exist // => create it and continue from there - m3 := make(map[string]interface{}) + m3 := make(map[string]any) m[k] = m3 m = m3 continue } - m3, ok := m2.(map[string]interface{}) + m3, ok := m2.(map[string]any) if !ok { // intermediate key is a value // => replace with a new map - m3 = make(map[string]interface{}) + m3 = make(map[string]any) m[k] = m3 } // continue search from here diff --git a/tools/vendor/github.com/spf13/viper/viper.go b/tools/vendor/github.com/spf13/viper/viper.go index a3812e92f..f900e58b1 100644 --- a/tools/vendor/github.com/spf13/viper/viper.go +++ b/tools/vendor/github.com/spf13/viper/viper.go @@ -25,29 +25,23 @@ import ( "errors" "fmt" "io" - "log" + "log/slog" "os" "path/filepath" "reflect" + "slices" "strconv" "strings" "sync" "time" "github.com/fsnotify/fsnotify" - "github.com/mitchellh/mapstructure" + "github.com/go-viper/mapstructure/v2" "github.com/spf13/afero" "github.com/spf13/cast" "github.com/spf13/pflag" - "github.com/spf13/viper/internal/encoding" - "github.com/spf13/viper/internal/encoding/dotenv" - "github.com/spf13/viper/internal/encoding/hcl" - "github.com/spf13/viper/internal/encoding/ini" - "github.com/spf13/viper/internal/encoding/javaproperties" - "github.com/spf13/viper/internal/encoding/json" - "github.com/spf13/viper/internal/encoding/toml" - "github.com/spf13/viper/internal/encoding/yaml" + "github.com/spf13/viper/internal/features" ) // ConfigMarshalError happens when failing to marshal the configuration. @@ -62,24 +56,10 @@ func (e ConfigMarshalError) Error() string { var v *Viper -type RemoteResponse struct { - Value []byte - Error error -} - func init() { v = New() } -type remoteConfigFactory interface { - Get(rp RemoteProvider) (io.Reader, error) - Watch(rp RemoteProvider) (io.Reader, error) - WatchChannel(rp RemoteProvider) (<-chan *RemoteResponse, chan bool) -} - -// RemoteConfig is optional, see the remote package -var RemoteConfig remoteConfigFactory - // UnsupportedConfigError denotes encountering an unsupported // configuration filetype. type UnsupportedConfigError string @@ -89,24 +69,6 @@ func (str UnsupportedConfigError) Error() string { return fmt.Sprintf("Unsupported Config Type %q", string(str)) } -// UnsupportedRemoteProviderError denotes encountering an unsupported remote -// provider. Currently only etcd and Consul are supported. -type UnsupportedRemoteProviderError string - -// Error returns the formatted remote provider error. -func (str UnsupportedRemoteProviderError) Error() string { - return fmt.Sprintf("Unsupported Remote Provider Type %q", string(str)) -} - -// RemoteConfigError denotes encountering an error while trying to -// pull the configuration from the remote provider. -type RemoteConfigError string - -// Error returns the formatted remote provider error -func (rce RemoteConfigError) Error() string { - return fmt.Sprintf("Remote Configurations Error: %s", string(rce)) -} - // ConfigFileNotFoundError denotes failing to find configuration file. type ConfigFileNotFoundError struct { name, locations string @@ -126,16 +88,16 @@ func (faee ConfigFileAlreadyExistsError) Error() string { } // A DecoderConfigOption can be passed to viper.Unmarshal to configure -// mapstructure.DecoderConfig options +// mapstructure.DecoderConfig options. type DecoderConfigOption func(*mapstructure.DecoderConfig) // DecodeHook returns a DecoderConfigOption which overrides the default // DecoderConfig.DecodeHook value, the default is: // -// mapstructure.ComposeDecodeHookFunc( -// mapstructure.StringToTimeDurationHookFunc(), -// mapstructure.StringToSliceHookFunc(","), -// ) +// mapstructure.ComposeDecodeHookFunc( +// mapstructure.StringToTimeDurationHookFunc(), +// mapstructure.StringToSliceHookFunc(","), +// ) func DecodeHook(hook mapstructure.DecodeHookFunc) DecoderConfigOption { return func(c *mapstructure.DecoderConfig) { c.DecodeHook = hook @@ -156,18 +118,18 @@ func DecodeHook(hook mapstructure.DecodeHookFunc) DecoderConfigOption { // // For example, if values from the following sources were loaded: // -// Defaults : { -// "secret": "", -// "user": "default", -// "endpoint": "https://localhost" -// } -// Config : { -// "user": "root" -// "secret": "defaultsecret" -// } -// Env : { -// "secret": "somesecretkey" -// } +// Defaults : { +// "secret": "", +// "user": "default", +// "endpoint": "https://localhost" +// } +// Config : { +// "user": "root" +// "secret": "defaultsecret" +// } +// Env : { +// "secret": "somesecretkey" +// } // // The resulting config will have the following values: // @@ -189,6 +151,8 @@ type Viper struct { // The filesystem to read config from. fs afero.Fs + finder Finder + // A set of remote providers to search for the configuration remoteProviders []*defaultRemoteProvider @@ -199,17 +163,15 @@ type Viper struct { configPermissions os.FileMode envPrefix string - // Specific commands for ini parsing - iniLoadOptions ini.LoadOptions - automaticEnvApplied bool envKeyReplacer StringReplacer allowEmptyEnv bool - config map[string]interface{} - override map[string]interface{} - defaults map[string]interface{} - kvstore map[string]interface{} + parents []string + config map[string]any + override map[string]any + defaults map[string]any + kvstore map[string]any pflags map[string]FlagValue env map[string][]string aliases map[string]string @@ -217,11 +179,15 @@ type Viper struct { onConfigChange func(fsnotify.Event) - logger Logger + logger *slog.Logger - // TODO: should probably be protected with a mutex - encoderRegistry *encoding.EncoderRegistry - decoderRegistry *encoding.DecoderRegistry + encoderRegistry EncoderRegistry + decoderRegistry DecoderRegistry + + decodeHook mapstructure.DecodeHookFunc + + experimentalFinder bool + experimentalBindStruct bool } // New returns an initialized Viper instance. @@ -231,17 +197,24 @@ func New() *Viper { v.configName = "config" v.configPermissions = os.FileMode(0o644) v.fs = afero.NewOsFs() - v.config = make(map[string]interface{}) - v.override = make(map[string]interface{}) - v.defaults = make(map[string]interface{}) - v.kvstore = make(map[string]interface{}) + v.config = make(map[string]any) + v.parents = []string{} + v.override = make(map[string]any) + v.defaults = make(map[string]any) + v.kvstore = make(map[string]any) v.pflags = make(map[string]FlagValue) v.env = make(map[string][]string) v.aliases = make(map[string]string) v.typeByDefValue = false - v.logger = jwwLogger{} + v.logger = slog.New(&discardHandler{}) + + codecRegistry := NewCodecRegistry() + + v.encoderRegistry = codecRegistry + v.decoderRegistry = codecRegistry - v.resetEncoding() + v.experimentalFinder = features.Finder + v.experimentalBindStruct = features.BindStruct return v } @@ -277,10 +250,25 @@ type StringReplacer interface { // EnvKeyReplacer sets a replacer used for mapping environment variables to internal keys. func EnvKeyReplacer(r StringReplacer) Option { return optionFunc(func(v *Viper) { + if r == nil { + return + } + v.envKeyReplacer = r }) } +// WithDecodeHook sets a default decode hook for mapstructure. +func WithDecodeHook(h mapstructure.DecodeHookFunc) Option { + return optionFunc(func(v *Viper) { + if h == nil { + return + } + + v.decodeHook = h + }) +} + // NewWithOptions creates a new Viper instance. func NewWithOptions(opts ...Option) *Viper { v := New() @@ -289,158 +277,58 @@ func NewWithOptions(opts ...Option) *Viper { opt.apply(v) } - v.resetEncoding() - return v } +// SetOptions sets the options on the global Viper instance. +// +// Be careful when using this function: subsequent calls may override options you set. +// It's always better to use a local Viper instance. +func SetOptions(opts ...Option) { + for _, opt := range opts { + opt.apply(v) + } +} + // Reset is intended for testing, will reset all to default settings. // In the public interface for the viper package so applications // can use it in their testing as well. func Reset() { v = New() SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "tfvars", "dotenv", "env", "ini"} - SupportedRemoteProviders = []string{"etcd", "consul", "firestore"} -} - -// TODO: make this lazy initialization instead -func (v *Viper) resetEncoding() { - encoderRegistry := encoding.NewEncoderRegistry() - decoderRegistry := encoding.NewDecoderRegistry() - - { - codec := yaml.Codec{} - - encoderRegistry.RegisterEncoder("yaml", codec) - decoderRegistry.RegisterDecoder("yaml", codec) - - encoderRegistry.RegisterEncoder("yml", codec) - decoderRegistry.RegisterDecoder("yml", codec) - } - - { - codec := json.Codec{} - - encoderRegistry.RegisterEncoder("json", codec) - decoderRegistry.RegisterDecoder("json", codec) - } - - { - codec := toml.Codec{} - - encoderRegistry.RegisterEncoder("toml", codec) - decoderRegistry.RegisterDecoder("toml", codec) - } - - { - codec := hcl.Codec{} - - encoderRegistry.RegisterEncoder("hcl", codec) - decoderRegistry.RegisterDecoder("hcl", codec) - - encoderRegistry.RegisterEncoder("tfvars", codec) - decoderRegistry.RegisterDecoder("tfvars", codec) - } - - { - codec := ini.Codec{ - KeyDelimiter: v.keyDelim, - LoadOptions: v.iniLoadOptions, - } - - encoderRegistry.RegisterEncoder("ini", codec) - decoderRegistry.RegisterDecoder("ini", codec) - } - - { - codec := &javaproperties.Codec{ - KeyDelimiter: v.keyDelim, - } - - encoderRegistry.RegisterEncoder("properties", codec) - decoderRegistry.RegisterDecoder("properties", codec) - - encoderRegistry.RegisterEncoder("props", codec) - decoderRegistry.RegisterDecoder("props", codec) - - encoderRegistry.RegisterEncoder("prop", codec) - decoderRegistry.RegisterDecoder("prop", codec) - } - - { - codec := &dotenv.Codec{} - encoderRegistry.RegisterEncoder("dotenv", codec) - decoderRegistry.RegisterDecoder("dotenv", codec) - - encoderRegistry.RegisterEncoder("env", codec) - decoderRegistry.RegisterDecoder("env", codec) - } - - v.encoderRegistry = encoderRegistry - v.decoderRegistry = decoderRegistry -} - -type defaultRemoteProvider struct { - provider string - endpoint string - path string - secretKeyring string -} - -func (rp defaultRemoteProvider) Provider() string { - return rp.provider -} - -func (rp defaultRemoteProvider) Endpoint() string { - return rp.endpoint -} - -func (rp defaultRemoteProvider) Path() string { - return rp.path -} - -func (rp defaultRemoteProvider) SecretKeyring() string { - return rp.secretKeyring -} - -// RemoteProvider stores the configuration necessary -// to connect to a remote key/value store. -// Optional secretKeyring to unencrypt encrypted values -// can be provided. -type RemoteProvider interface { - Provider() string - Endpoint() string - Path() string - SecretKeyring() string + resetRemote() } // SupportedExts are universally supported extensions. var SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "tfvars", "dotenv", "env", "ini"} -// SupportedRemoteProviders are universally supported remote providers. -var SupportedRemoteProviders = []string{"etcd", "consul", "firestore"} - +// OnConfigChange sets the event handler that is called when a config file changes. func OnConfigChange(run func(in fsnotify.Event)) { v.OnConfigChange(run) } + +// OnConfigChange sets the event handler that is called when a config file changes. func (v *Viper) OnConfigChange(run func(in fsnotify.Event)) { v.onConfigChange = run } +// WatchConfig starts watching a config file for changes. func WatchConfig() { v.WatchConfig() } +// WatchConfig starts watching a config file for changes. func (v *Viper) WatchConfig() { initWG := sync.WaitGroup{} initWG.Add(1) go func() { - watcher, err := newWatcher() + watcher, err := fsnotify.NewWatcher() if err != nil { - log.Fatal(err) + v.logger.Error(fmt.Sprintf("failed to create watcher: %s", err)) + os.Exit(1) } defer watcher.Close() // we have to watch the entire directory to pick up renames/atomic saves in a cross-platform way filename, err := v.getConfigFile() if err != nil { - log.Printf("error: %v\n", err) + v.logger.Error(fmt.Sprintf("get config file: %s", err)) initWG.Done() return } @@ -463,27 +351,25 @@ func (v *Viper) WatchConfig() { // we only care about the config file with the following cases: // 1 - if the config file was modified or created // 2 - if the real path to the config file changed (eg: k8s ConfigMap replacement) - const writeOrCreateMask = fsnotify.Write | fsnotify.Create if (filepath.Clean(event.Name) == configFile && - event.Op&writeOrCreateMask != 0) || + (event.Has(fsnotify.Write) || event.Has(fsnotify.Create))) || (currentConfigFile != "" && currentConfigFile != realConfigFile) { realConfigFile = currentConfigFile err := v.ReadInConfig() if err != nil { - log.Printf("error reading config file: %v\n", err) + v.logger.Error(fmt.Sprintf("read config file: %s", err)) } if v.onConfigChange != nil { v.onConfigChange(event) } - } else if filepath.Clean(event.Name) == configFile && - event.Op&fsnotify.Remove != 0 { + } else if filepath.Clean(event.Name) == configFile && event.Has(fsnotify.Remove) { eventsWG.Done() return } case err, ok := <-watcher.Errors: if ok { // 'Errors' channel is not closed - log.Printf("watcher error: %v\n", err) + v.logger.Error(fmt.Sprintf("watcher error: %s", err)) } eventsWG.Done() return @@ -518,6 +404,12 @@ func (v *Viper) SetEnvPrefix(in string) { } } +func GetEnvPrefix() string { return v.GetEnvPrefix() } + +func (v *Viper) GetEnvPrefix() string { + return v.envPrefix +} + func (v *Viper) mergeWithEnvPrefix(in string) string { if v.envPrefix != "" { return strings.ToUpper(v.envPrefix + "_" + in) @@ -561,94 +453,24 @@ func (v *Viper) ConfigFileUsed() string { return v.configFile } func AddConfigPath(in string) { v.AddConfigPath(in) } func (v *Viper) AddConfigPath(in string) { + if v.finder != nil { + v.logger.Warn("ineffective call to function: custom finder takes precedence", slog.String("function", "AddConfigPath")) + } + if in != "" { absin := absPathify(v.logger, in) v.logger.Info("adding path to search paths", "path", absin) - if !stringInSlice(absin, v.configPaths) { + if !slices.Contains(v.configPaths, absin) { v.configPaths = append(v.configPaths, absin) } } } -// AddRemoteProvider adds a remote configuration source. -// Remote Providers are searched in the order they are added. -// provider is a string value: "etcd", "consul" or "firestore" are currently supported. -// endpoint is the url. etcd requires http://ip:port consul requires ip:port -// path is the path in the k/v store to retrieve configuration -// To retrieve a config file called myapp.json from /configs/myapp.json -// you should set path to /configs and set config name (SetConfigName()) to -// "myapp" -func AddRemoteProvider(provider, endpoint, path string) error { - return v.AddRemoteProvider(provider, endpoint, path) -} - -func (v *Viper) AddRemoteProvider(provider, endpoint, path string) error { - if !stringInSlice(provider, SupportedRemoteProviders) { - return UnsupportedRemoteProviderError(provider) - } - if provider != "" && endpoint != "" { - v.logger.Info("adding remote provider", "provider", provider, "endpoint", endpoint) - - rp := &defaultRemoteProvider{ - endpoint: endpoint, - provider: provider, - path: path, - } - if !v.providerPathExists(rp) { - v.remoteProviders = append(v.remoteProviders, rp) - } - } - return nil -} - -// AddSecureRemoteProvider adds a remote configuration source. -// Secure Remote Providers are searched in the order they are added. -// provider is a string value: "etcd", "consul" or "firestore" are currently supported. -// endpoint is the url. etcd requires http://ip:port consul requires ip:port -// secretkeyring is the filepath to your openpgp secret keyring. e.g. /etc/secrets/myring.gpg -// path is the path in the k/v store to retrieve configuration -// To retrieve a config file called myapp.json from /configs/myapp.json -// you should set path to /configs and set config name (SetConfigName()) to -// "myapp" -// Secure Remote Providers are implemented with github.com/bketelsen/crypt -func AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error { - return v.AddSecureRemoteProvider(provider, endpoint, path, secretkeyring) -} - -func (v *Viper) AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error { - if !stringInSlice(provider, SupportedRemoteProviders) { - return UnsupportedRemoteProviderError(provider) - } - if provider != "" && endpoint != "" { - v.logger.Info("adding remote provider", "provider", provider, "endpoint", endpoint) - - rp := &defaultRemoteProvider{ - endpoint: endpoint, - provider: provider, - path: path, - secretKeyring: secretkeyring, - } - if !v.providerPathExists(rp) { - v.remoteProviders = append(v.remoteProviders, rp) - } - } - return nil -} - -func (v *Viper) providerPathExists(p *defaultRemoteProvider) bool { - for _, y := range v.remoteProviders { - if reflect.DeepEqual(y, p) { - return true - } - } - return false -} - // searchMap recursively searches for a value for path in source map. // Returns nil if not found. // Note: This assumes that the path entries and map keys are lower cased. -func (v *Viper) searchMap(source map[string]interface{}, path []string) interface{} { +func (v *Viper) searchMap(source map[string]any, path []string) any { if len(path) == 0 { return source } @@ -661,13 +483,13 @@ func (v *Viper) searchMap(source map[string]interface{}, path []string) interfac } // Nested case - switch next.(type) { - case map[interface{}]interface{}: + switch next := next.(type) { + case map[any]any: return v.searchMap(cast.ToStringMap(next), path[1:]) - case map[string]interface{}: + case map[string]any: // Type assertion is safe here since it is only reached // if the type of `next` is the same as the type being asserted - return v.searchMap(next.(map[string]interface{}), path[1:]) + return v.searchMap(next, path[1:]) default: // got a value but nested key expected, return "nil" for not found return nil @@ -687,7 +509,7 @@ func (v *Viper) searchMap(source map[string]interface{}, path []string) interfac // in their keys). // // Note: This assumes that the path entries and map keys are lower cased. -func (v *Viper) searchIndexableWithPathPrefixes(source interface{}, path []string) interface{} { +func (v *Viper) searchIndexableWithPathPrefixes(source any, path []string) any { if len(path) == 0 { return source } @@ -696,11 +518,11 @@ func (v *Viper) searchIndexableWithPathPrefixes(source interface{}, path []strin for i := len(path); i > 0; i-- { prefixKey := strings.ToLower(strings.Join(path[0:i], v.keyDelim)) - var val interface{} + var val any switch sourceIndexable := source.(type) { - case []interface{}: + case []any: val = v.searchSliceWithPathPrefixes(sourceIndexable, prefixKey, i, path) - case map[string]interface{}: + case map[string]any: val = v.searchMapWithPathPrefixes(sourceIndexable, prefixKey, i, path) } if val != nil { @@ -717,11 +539,11 @@ func (v *Viper) searchIndexableWithPathPrefixes(source interface{}, path []strin // This function is part of the searchIndexableWithPathPrefixes recurring search and // should not be called directly from functions other than searchIndexableWithPathPrefixes. func (v *Viper) searchSliceWithPathPrefixes( - sourceSlice []interface{}, + sourceSlice []any, prefixKey string, pathIndex int, path []string, -) interface{} { +) any { // if the prefixKey is not a number or it is out of bounds of the slice index, err := strconv.Atoi(prefixKey) if err != nil || len(sourceSlice) <= index { @@ -736,9 +558,9 @@ func (v *Viper) searchSliceWithPathPrefixes( } switch n := next.(type) { - case map[interface{}]interface{}: + case map[any]any: return v.searchIndexableWithPathPrefixes(cast.ToStringMap(n), path[pathIndex:]) - case map[string]interface{}, []interface{}: + case map[string]any, []any: return v.searchIndexableWithPathPrefixes(n, path[pathIndex:]) default: // got a value but nested key expected, do nothing and look for next prefix @@ -753,11 +575,11 @@ func (v *Viper) searchSliceWithPathPrefixes( // This function is part of the searchIndexableWithPathPrefixes recurring search and // should not be called directly from functions other than searchIndexableWithPathPrefixes. func (v *Viper) searchMapWithPathPrefixes( - sourceMap map[string]interface{}, + sourceMap map[string]any, prefixKey string, pathIndex int, path []string, -) interface{} { +) any { next, ok := sourceMap[prefixKey] if !ok { return nil @@ -770,9 +592,9 @@ func (v *Viper) searchMapWithPathPrefixes( // Nested case switch n := next.(type) { - case map[interface{}]interface{}: + case map[any]any: return v.searchIndexableWithPathPrefixes(cast.ToStringMap(n), path[pathIndex:]) - case map[string]interface{}, []interface{}: + case map[string]any, []any: return v.searchIndexableWithPathPrefixes(n, path[pathIndex:]) default: // got a value but nested key expected, do nothing and look for next prefix @@ -785,9 +607,10 @@ func (v *Viper) searchMapWithPathPrefixes( // isPathShadowedInDeepMap makes sure the given path is not shadowed somewhere // on its path in the map. // e.g., if "foo.bar" has a value in the given map, it “shadows” -// "foo.bar.baz" in a lower-priority map -func (v *Viper) isPathShadowedInDeepMap(path []string, m map[string]interface{}) string { - var parentVal interface{} +// +// "foo.bar.baz" in a lower-priority map +func (v *Viper) isPathShadowedInDeepMap(path []string, m map[string]any) string { + var parentVal any for i := 1; i < len(path); i++ { parentVal = v.searchMap(m, path[0:i]) if parentVal == nil { @@ -795,9 +618,9 @@ func (v *Viper) isPathShadowedInDeepMap(path []string, m map[string]interface{}) return "" } switch parentVal.(type) { - case map[interface{}]interface{}: + case map[any]any: continue - case map[string]interface{}: + case map[string]any: continue default: // parentVal is a regular value which shadows "path" @@ -810,13 +633,16 @@ func (v *Viper) isPathShadowedInDeepMap(path []string, m map[string]interface{}) // isPathShadowedInFlatMap makes sure the given path is not shadowed somewhere // in a sub-path of the map. // e.g., if "foo.bar" has a value in the given map, it “shadows” -// "foo.bar.baz" in a lower-priority map -func (v *Viper) isPathShadowedInFlatMap(path []string, mi interface{}) string { +// +// "foo.bar.baz" in a lower-priority map +func (v *Viper) isPathShadowedInFlatMap(path []string, mi any) string { // unify input map var m map[string]interface{} - switch mi.(type) { - case map[string]string, map[string]FlagValue: - m = cast.ToStringMap(mi) + switch miv := mi.(type) { + case map[string]string: + m = castMapStringToMapInterface(miv) + case map[string]FlagValue: + m = castMapFlagToMapInterface(miv) default: return "" } @@ -835,7 +661,8 @@ func (v *Viper) isPathShadowedInFlatMap(path []string, mi interface{}) string { // isPathShadowedInAutoEnv makes sure the given path is not shadowed somewhere // in the environment, when automatic env is on. // e.g., if "foo.bar" has a value in the environment, it “shadows” -// "foo.bar.baz" in a lower-priority map +// +// "foo.bar.baz" in a lower-priority map func (v *Viper) isPathShadowedInAutoEnv(path []string) string { var parentKey string for i := 1; i < len(path); i++ { @@ -856,11 +683,11 @@ func (v *Viper) isPathShadowedInAutoEnv(path []string) string { // would return a string slice for the key if the key's type is inferred by // the default value and the Get function would return: // -// []string {"a", "b", "c"} +// []string {"a", "b", "c"} // // Otherwise the Get function would return: // -// "a b c" +// "a b c" func SetTypeByDefaultValue(enable bool) { v.SetTypeByDefaultValue(enable) } func (v *Viper) SetTypeByDefaultValue(enable bool) { @@ -879,9 +706,9 @@ func GetViper() *Viper { // override, flag, env, config file, key/value store, default // // Get returns an interface. For a specific value use one of the Get____ methods. -func Get(key string) interface{} { return v.Get(key) } +func Get(key string) any { return v.Get(key) } -func (v *Viper) Get(key string) interface{} { +func (v *Viper) Get(key string) any { lcaseKey := strings.ToLower(key) val := v.find(lcaseKey, true) if val == nil { @@ -922,6 +749,8 @@ func (v *Viper) Get(key string) interface{} { return cast.ToStringSlice(val) case []int: return cast.ToIntSlice(val) + case []time.Duration: + return cast.ToDurationSlice(val) } } @@ -940,6 +769,12 @@ func (v *Viper) Sub(key string) *Viper { } if reflect.TypeOf(data).Kind() == reflect.Map { + subv.parents = append([]string(nil), v.parents...) + subv.parents = append(subv.parents, strings.ToLower(key)) + subv.automaticEnvApplied = v.automaticEnvApplied + subv.envPrefix = v.envPrefix + subv.envKeyReplacer = v.envKeyReplacer + subv.keyDelim = v.keyDelim subv.config = cast.ToStringMap(data) return subv } @@ -981,6 +816,13 @@ func (v *Viper) GetInt64(key string) int64 { return cast.ToInt64(v.Get(key)) } +// GetUint8 returns the value associated with the key as an unsigned integer. +func GetUint8(key string) uint8 { return v.GetUint8(key) } + +func (v *Viper) GetUint8(key string) uint8 { + return cast.ToUint8(v.Get(key)) +} + // GetUint returns the value associated with the key as an unsigned integer. func GetUint(key string) uint { return v.GetUint(key) } @@ -988,6 +830,13 @@ func (v *Viper) GetUint(key string) uint { return cast.ToUint(v.Get(key)) } +// GetUint16 returns the value associated with the key as an unsigned integer. +func GetUint16(key string) uint16 { return v.GetUint16(key) } + +func (v *Viper) GetUint16(key string) uint16 { + return cast.ToUint16(v.Get(key)) +} + // GetUint32 returns the value associated with the key as an unsigned integer. func GetUint32(key string) uint32 { return v.GetUint32(key) } @@ -1038,9 +887,9 @@ func (v *Viper) GetStringSlice(key string) []string { } // GetStringMap returns the value associated with the key as a map of interfaces. -func GetStringMap(key string) map[string]interface{} { return v.GetStringMap(key) } +func GetStringMap(key string) map[string]any { return v.GetStringMap(key) } -func (v *Viper) GetStringMap(key string) map[string]interface{} { +func (v *Viper) GetStringMap(key string) map[string]any { return cast.ToStringMap(v.Get(key)) } @@ -1068,44 +917,107 @@ func (v *Viper) GetSizeInBytes(key string) uint { } // UnmarshalKey takes a single key and unmarshals it into a Struct. -func UnmarshalKey(key string, rawVal interface{}, opts ...DecoderConfigOption) error { +func UnmarshalKey(key string, rawVal any, opts ...DecoderConfigOption) error { return v.UnmarshalKey(key, rawVal, opts...) } -func (v *Viper) UnmarshalKey(key string, rawVal interface{}, opts ...DecoderConfigOption) error { - return decode(v.Get(key), defaultDecoderConfig(rawVal, opts...)) +func (v *Viper) UnmarshalKey(key string, rawVal any, opts ...DecoderConfigOption) error { + return decode(v.Get(key), v.defaultDecoderConfig(rawVal, opts...)) } // Unmarshal unmarshals the config into a Struct. Make sure that the tags // on the fields of the structure are properly set. -func Unmarshal(rawVal interface{}, opts ...DecoderConfigOption) error { +func Unmarshal(rawVal any, opts ...DecoderConfigOption) error { return v.Unmarshal(rawVal, opts...) } -func (v *Viper) Unmarshal(rawVal interface{}, opts ...DecoderConfigOption) error { - return decode(v.AllSettings(), defaultDecoderConfig(rawVal, opts...)) +func (v *Viper) Unmarshal(rawVal any, opts ...DecoderConfigOption) error { + keys := v.AllKeys() + + if v.experimentalBindStruct { + // TODO: make this optional? + structKeys, err := v.decodeStructKeys(rawVal, opts...) + if err != nil { + return err + } + + keys = append(keys, structKeys...) + } + + // TODO: struct keys should be enough? + return decode(v.getSettings(keys), v.defaultDecoderConfig(rawVal, opts...)) } -// defaultDecoderConfig returns default mapsstructure.DecoderConfig with suppot -// of time.Duration values & string slices -func defaultDecoderConfig(output interface{}, opts ...DecoderConfigOption) *mapstructure.DecoderConfig { +func (v *Viper) decodeStructKeys(input any, opts ...DecoderConfigOption) ([]string, error) { + var structKeyMap map[string]any + + err := decode(input, v.defaultDecoderConfig(&structKeyMap, opts...)) + if err != nil { + return nil, err + } + + flattenedStructKeyMap := v.flattenAndMergeMap(map[string]bool{}, structKeyMap, "") + + r := make([]string, 0, len(flattenedStructKeyMap)) + for v := range flattenedStructKeyMap { + r = append(r, v) + } + + return r, nil +} + +// defaultDecoderConfig returns default mapstructure.DecoderConfig with support +// of time.Duration values & string slices. +func (v *Viper) defaultDecoderConfig(output any, opts ...DecoderConfigOption) *mapstructure.DecoderConfig { + decodeHook := v.decodeHook + if decodeHook == nil { + decodeHook = mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + // mapstructure.StringToSliceHookFunc(","), + stringToWeakSliceHookFunc(","), + ) + } + c := &mapstructure.DecoderConfig{ Metadata: nil, - Result: output, WeaklyTypedInput: true, - DecodeHook: mapstructure.ComposeDecodeHookFunc( - mapstructure.StringToTimeDurationHookFunc(), - mapstructure.StringToSliceHookFunc(","), - ), + DecodeHook: decodeHook, } + for _, opt := range opts { opt(c) } + + // Do not allow overwriting the output + c.Result = output + return c } -// A wrapper around mapstructure.Decode that mimics the WeakDecode functionality -func decode(input interface{}, config *mapstructure.DecoderConfig) error { +// As of mapstructure v2.0.0 StringToSliceHookFunc checks if the return type is a string slice. +// This function removes that check. +// TODO: implement a function that checks if the value can be converted to the return type and use it instead. +func stringToWeakSliceHookFunc(sep string) mapstructure.DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Slice { + return data, nil + } + + raw := data.(string) + if raw == "" { + return []string{}, nil + } + + return strings.Split(raw, sep), nil + } +} + +// decode is a wrapper around mapstructure.Decode that mimics the WeakDecode functionality. +func decode(input any, config *mapstructure.DecoderConfig) error { decoder, err := mapstructure.NewDecoder(config) if err != nil { return err @@ -1115,15 +1027,28 @@ func decode(input interface{}, config *mapstructure.DecoderConfig) error { // UnmarshalExact unmarshals the config into a Struct, erroring if a field is nonexistent // in the destination struct. -func UnmarshalExact(rawVal interface{}, opts ...DecoderConfigOption) error { +func UnmarshalExact(rawVal any, opts ...DecoderConfigOption) error { return v.UnmarshalExact(rawVal, opts...) } -func (v *Viper) UnmarshalExact(rawVal interface{}, opts ...DecoderConfigOption) error { - config := defaultDecoderConfig(rawVal, opts...) +func (v *Viper) UnmarshalExact(rawVal any, opts ...DecoderConfigOption) error { + config := v.defaultDecoderConfig(rawVal, opts...) config.ErrorUnused = true - return decode(v.AllSettings(), config) + keys := v.AllKeys() + + if v.experimentalBindStruct { + // TODO: make this optional? + structKeys, err := v.decodeStructKeys(rawVal, opts...) + if err != nil { + return err + } + + keys = append(keys, structKeys...) + } + + // TODO: struct keys should be enough? + return decode(v.getSettings(keys), config) } // BindPFlags binds a full flag set to the configuration, using each flag's long @@ -1137,9 +1062,8 @@ func (v *Viper) BindPFlags(flags *pflag.FlagSet) error { // BindPFlag binds a specific key to a pflag (as used by cobra). // Example (where serverCmd is a Cobra instance): // -// serverCmd.Flags().Int("port", 1138, "Port to run Application server on") -// Viper.BindPFlag("port", serverCmd.Flags().Lookup("port")) -// +// serverCmd.Flags().Int("port", 1138, "Port to run Application server on") +// Viper.BindPFlag("port", serverCmd.Flags().Lookup("port")) func BindPFlag(key string, flag *pflag.Flag) error { return v.BindPFlag(key, flag) } func (v *Viper) BindPFlag(key string, flag *pflag.Flag) error { @@ -1217,9 +1141,9 @@ func (v *Viper) MustBindEnv(input ...string) { // corresponds to a flag, the flag's default value is returned. // // Note: this assumes a lower-cased key given. -func (v *Viper) find(lcaseKey string, flagDefault bool) interface{} { +func (v *Viper) find(lcaseKey string, flagDefault bool) any { var ( - val interface{} + val any exists bool path = strings.Split(lcaseKey, v.keyDelim) nested = len(path) > 1 @@ -1262,8 +1186,15 @@ func (v *Viper) find(lcaseKey string, flagDefault bool) interface{} { s = strings.TrimSuffix(s, "]") res, _ := readAsCSV(s) return cast.ToIntSlice(res) + case "durationSlice": + s := strings.TrimPrefix(flag.ValueString(), "[") + s = strings.TrimSuffix(s, "]") + slice := strings.Split(s, ",") + return cast.ToDurationSlice(slice) case "stringToString": return stringToStringConv(flag.ValueString()) + case "stringToInt": + return stringToIntConv(flag.ValueString()) default: return flag.ValueString() } @@ -1274,9 +1205,10 @@ func (v *Viper) find(lcaseKey string, flagDefault bool) interface{} { // Env override next if v.automaticEnvApplied { + envKey := strings.Join(append(v.parents, lcaseKey), ".") // even if it hasn't been registered, if automaticEnv is used, // check any Get request - if val, ok := v.getEnv(v.mergeWithEnvPrefix(lcaseKey)); ok { + if val, ok := v.getEnv(v.mergeWithEnvPrefix(envKey)); ok { return val } if nested && v.isPathShadowedInAutoEnv(path) != "" { @@ -1343,6 +1275,13 @@ func (v *Viper) find(lcaseKey string, flagDefault bool) interface{} { return cast.ToIntSlice(res) case "stringToString": return stringToStringConv(flag.ValueString()) + case "stringToInt": + return stringToIntConv(flag.ValueString()) + case "durationSlice": + s := strings.TrimPrefix(flag.ValueString(), "[") + s = strings.TrimSuffix(s, "]") + slice := strings.Split(s, ",") + return cast.ToDurationSlice(slice) default: return flag.ValueString() } @@ -1363,25 +1302,49 @@ func readAsCSV(val string) ([]string, error) { } // mostly copied from pflag's implementation of this operation here https://github.com/spf13/pflag/blob/master/string_to_string.go#L79 -// alterations are: errors are swallowed, map[string]interface{} is returned in order to enable cast.ToStringMap -func stringToStringConv(val string) interface{} { +// alterations are: errors are swallowed, map[string]any is returned in order to enable cast.ToStringMap. +func stringToStringConv(val string) any { val = strings.Trim(val, "[]") // An empty string would cause an empty map - if len(val) == 0 { - return map[string]interface{}{} + if val == "" { + return map[string]any{} } r := csv.NewReader(strings.NewReader(val)) ss, err := r.Read() if err != nil { return nil } - out := make(map[string]interface{}, len(ss)) + out := make(map[string]any, len(ss)) + for _, pair := range ss { + k, vv, found := strings.Cut(pair, "=") + if !found { + return nil + } + out[k] = vv + } + return out +} + +// mostly copied from pflag's implementation of this operation here https://github.com/spf13/pflag/blob/d5e0c0615acee7028e1e2740a11102313be88de1/string_to_int.go#L68 +// alterations are: errors are swallowed, map[string]any is returned in order to enable cast.ToStringMap. +func stringToIntConv(val string) any { + val = strings.Trim(val, "[]") + // An empty string would cause an empty map + if val == "" { + return map[string]any{} + } + ss := strings.Split(val, ",") + out := make(map[string]any, len(ss)) for _, pair := range ss { - kv := strings.SplitN(pair, "=", 2) - if len(kv) != 2 { + k, vv, found := strings.Cut(pair, "=") + if !found { + return nil + } + var err error + out[k], err = strconv.Atoi(vv) + if err != nil { return nil } - out[kv[0]] = kv[1] } return out } @@ -1415,13 +1378,13 @@ func (v *Viper) SetEnvKeyReplacer(r *strings.Replacer) { // RegisterAlias creates an alias that provides another accessor for the same key. // This enables one to change a name without breaking the application. -func RegisterAlias(alias string, key string) { v.RegisterAlias(alias, key) } +func RegisterAlias(alias, key string) { v.RegisterAlias(alias, key) } -func (v *Viper) RegisterAlias(alias string, key string) { +func (v *Viper) RegisterAlias(alias, key string) { v.registerAlias(alias, strings.ToLower(key)) } -func (v *Viper) registerAlias(alias string, key string) { +func (v *Viper) registerAlias(alias, key string) { alias = strings.ToLower(alias) if alias != key && alias != v.realKey(key) { _, exists := v.aliases[alias] @@ -1479,9 +1442,9 @@ func (v *Viper) InConfig(key string) bool { // SetDefault sets the default value for this key. // SetDefault is case-insensitive for a key. // Default only used when no value is provided by the user via flag, config or ENV. -func SetDefault(key string, value interface{}) { v.SetDefault(key, value) } +func SetDefault(key string, value any) { v.SetDefault(key, value) } -func (v *Viper) SetDefault(key string, value interface{}) { +func (v *Viper) SetDefault(key string, value any) { // If alias passed in, then set the proper default key = v.realKey(strings.ToLower(key)) value = toCaseInsensitiveValue(value) @@ -1498,9 +1461,9 @@ func (v *Viper) SetDefault(key string, value interface{}) { // Set is case-insensitive for a key. // Will be used instead of values obtained via // flags, config file, ENV, default, or key/value store. -func Set(key string, value interface{}) { v.Set(key, value) } +func Set(key string, value any) { v.Set(key, value) } -func (v *Viper) Set(key string, value interface{}) { +func (v *Viper) Set(key string, value any) { // If alias passed in, then set the proper override key = v.realKey(strings.ToLower(key)) value = toCaseInsensitiveValue(value) @@ -1524,7 +1487,7 @@ func (v *Viper) ReadInConfig() error { return err } - if !stringInSlice(v.getConfigType(), SupportedExts) { + if !slices.Contains(SupportedExts, v.getConfigType()) { return UnsupportedConfigError(v.getConfigType()) } @@ -1534,7 +1497,7 @@ func (v *Viper) ReadInConfig() error { return err } - config := make(map[string]interface{}) + config := make(map[string]any) err = v.unmarshalReader(bytes.NewReader(file), config) if err != nil { @@ -1555,7 +1518,7 @@ func (v *Viper) MergeInConfig() error { return err } - if !stringInSlice(v.getConfigType(), SupportedExts) { + if !slices.Contains(SupportedExts, v.getConfigType()) { return UnsupportedConfigError(v.getConfigType()) } @@ -1572,7 +1535,11 @@ func (v *Viper) MergeInConfig() error { func ReadConfig(in io.Reader) error { return v.ReadConfig(in) } func (v *Viper) ReadConfig(in io.Reader) error { - v.config = make(map[string]interface{}) + if v.configType == "" { + return errors.New("cannot decode configuration: config type is not set") + } + + v.config = make(map[string]any) return v.unmarshalReader(in, v.config) } @@ -1580,7 +1547,11 @@ func (v *Viper) ReadConfig(in io.Reader) error { func MergeConfig(in io.Reader) error { return v.MergeConfig(in) } func (v *Viper) MergeConfig(in io.Reader) error { - cfg := make(map[string]interface{}) + if v.configType == "" { + return errors.New("cannot decode configuration: config type is not set") + } + + cfg := make(map[string]any) if err := v.unmarshalReader(in, cfg); err != nil { return err } @@ -1589,11 +1560,11 @@ func (v *Viper) MergeConfig(in io.Reader) error { // MergeConfigMap merges the configuration from the map given with an existing config. // Note that the map given may be modified. -func MergeConfigMap(cfg map[string]interface{}) error { return v.MergeConfigMap(cfg) } +func MergeConfigMap(cfg map[string]any) error { return v.MergeConfigMap(cfg) } -func (v *Viper) MergeConfigMap(cfg map[string]interface{}) error { +func (v *Viper) MergeConfigMap(cfg map[string]any) error { if v.config == nil { - v.config = make(map[string]interface{}) + v.config = make(map[string]any) } insensitiviseMap(cfg) mergeMaps(cfg, v.config, nil) @@ -1628,6 +1599,19 @@ func (v *Viper) WriteConfigAs(filename string) error { return v.writeConfig(filename, true) } +// WriteConfigTo writes current configuration to an [io.Writer]. +func WriteConfigTo(w io.Writer) error { return v.WriteConfigTo(w) } + +func (v *Viper) WriteConfigTo(w io.Writer) error { + format := strings.ToLower(v.getConfigType()) + + if !slices.Contains(SupportedExts, format) { + return UnsupportedConfigError(format) + } + + return v.marshalWriter(w, format) +} + // SafeWriteConfigAs writes current configuration to a given filename if it does not exist. func SafeWriteConfigAs(filename string) error { return v.SafeWriteConfigAs(filename) } @@ -1654,11 +1638,11 @@ func (v *Viper) writeConfig(filename string, force bool) error { return fmt.Errorf("config type could not be determined for %s", filename) } - if !stringInSlice(configType, SupportedExts) { + if !slices.Contains(SupportedExts, configType) { return UnsupportedConfigError(configType) } if v.config == nil { - v.config = make(map[string]interface{}) + v.config = make(map[string]any) } flags := os.O_CREATE | os.O_TRUNC | os.O_WRONLY if !force { @@ -1677,22 +1661,24 @@ func (v *Viper) writeConfig(filename string, force bool) error { return f.Sync() } -// Unmarshal a Reader into a map. -// Should probably be an unexported function. -func unmarshalReader(in io.Reader, c map[string]interface{}) error { - return v.unmarshalReader(in, c) -} - -func (v *Viper) unmarshalReader(in io.Reader, c map[string]interface{}) error { +func (v *Viper) unmarshalReader(in io.Reader, c map[string]any) error { buf := new(bytes.Buffer) buf.ReadFrom(in) - switch format := strings.ToLower(v.getConfigType()); format { - case "yaml", "yml", "json", "toml", "hcl", "tfvars", "ini", "properties", "props", "prop", "dotenv", "env": - err := v.decoderRegistry.Decode(format, buf.Bytes(), c) - if err != nil { - return ConfigParseError{err} - } + format := strings.ToLower(v.getConfigType()) + + if !slices.Contains(SupportedExts, format) { + return UnsupportedConfigError(format) + } + + decoder, err := v.decoderRegistry.Decoder(format) + if err != nil { + return ConfigParseError{err} + } + + err = decoder.Decode(buf.Bytes(), c) + if err != nil { + return ConfigParseError{err} } insensitiviseMap(c) @@ -1700,24 +1686,28 @@ func (v *Viper) unmarshalReader(in io.Reader, c map[string]interface{}) error { } // Marshal a map into Writer. -func (v *Viper) marshalWriter(f afero.File, configType string) error { +func (v *Viper) marshalWriter(w io.Writer, configType string) error { c := v.AllSettings() - switch configType { - case "yaml", "yml", "json", "toml", "hcl", "tfvars", "ini", "prop", "props", "properties", "dotenv", "env": - b, err := v.encoderRegistry.Encode(configType, c) - if err != nil { - return ConfigMarshalError{err} - } - _, err = f.WriteString(string(b)) - if err != nil { - return ConfigMarshalError{err} - } + encoder, err := v.encoderRegistry.Encoder(configType) + if err != nil { + return ConfigMarshalError{err} + } + + b, err := encoder.Encode(c) + if err != nil { + return ConfigMarshalError{err} + } + + _, err = w.Write(b) + if err != nil { + return ConfigMarshalError{err} } + return nil } -func keyExists(k string, m map[string]interface{}) string { +func keyExists(k string, m map[string]any) string { lk := strings.ToLower(k) for mk := range m { lmk := strings.ToLower(mk) @@ -1729,33 +1719,33 @@ func keyExists(k string, m map[string]interface{}) string { } func castToMapStringInterface( - src map[interface{}]interface{}, -) map[string]interface{} { - tgt := map[string]interface{}{} + src map[any]any, +) map[string]any { + tgt := map[string]any{} for k, v := range src { tgt[fmt.Sprintf("%v", k)] = v } return tgt } -func castMapStringSliceToMapInterface(src map[string][]string) map[string]interface{} { - tgt := map[string]interface{}{} +func castMapStringSliceToMapInterface(src map[string][]string) map[string]any { + tgt := map[string]any{} for k, v := range src { tgt[k] = v } return tgt } -func castMapStringToMapInterface(src map[string]string) map[string]interface{} { - tgt := map[string]interface{}{} +func castMapStringToMapInterface(src map[string]string) map[string]any { + tgt := map[string]any{} for k, v := range src { tgt[k] = v } return tgt } -func castMapFlagToMapInterface(src map[string]FlagValue) map[string]interface{} { - tgt := map[string]interface{}{} +func castMapFlagToMapInterface(src map[string]FlagValue) map[string]any { + tgt := map[string]any{} for k, v := range src { tgt[k] = v } @@ -1763,17 +1753,15 @@ func castMapFlagToMapInterface(src map[string]FlagValue) map[string]interface{} } // mergeMaps merges two maps. The `itgt` parameter is for handling go-yaml's -// insistence on parsing nested structures as `map[interface{}]interface{}` +// insistence on parsing nested structures as `map[any]any` // instead of using a `string` as the key for nest structures beyond one level // deep. Both map types are supported as there is a go-yaml fork that uses -// `map[string]interface{}` instead. -func mergeMaps( - src, tgt map[string]interface{}, itgt map[interface{}]interface{}, -) { +// `map[string]any` instead. +func mergeMaps(src, tgt map[string]any, itgt map[any]any) { for sk, sv := range src { tk := keyExists(sk, tgt) if tk == "" { - v.logger.Trace("", "tk", "\"\"", fmt.Sprintf("tgt[%s]", sk), sv) + v.logger.Debug("", "tk", "\"\"", fmt.Sprintf("tgt[%s]", sk), sv) tgt[sk] = sv if itgt != nil { itgt[sk] = sv @@ -1783,7 +1771,7 @@ func mergeMaps( tv, ok := tgt[tk] if !ok { - v.logger.Trace("", fmt.Sprintf("ok[%s]", tk), false, fmt.Sprintf("tgt[%s]", sk), sv) + v.logger.Debug("", fmt.Sprintf("ok[%s]", tk), false, fmt.Sprintf("tgt[%s]", sk), sv) tgt[sk] = sv if itgt != nil { itgt[sk] = sv @@ -1794,7 +1782,7 @@ func mergeMaps( svType := reflect.TypeOf(sv) tvType := reflect.TypeOf(tv) - v.logger.Trace( + v.logger.Debug( "processing", "key", sk, "st", svType, @@ -1804,12 +1792,12 @@ func mergeMaps( ) switch ttv := tv.(type) { - case map[interface{}]interface{}: - v.logger.Trace("merging maps (must convert)") - tsv, ok := sv.(map[interface{}]interface{}) + case map[any]any: + v.logger.Debug("merging maps (must convert)") + tsv, ok := sv.(map[any]any) if !ok { v.logger.Error( - "Could not cast sv to map[interface{}]interface{}", + "Could not cast sv to map[any]any", "key", sk, "st", svType, "tt", tvType, @@ -1822,12 +1810,12 @@ func mergeMaps( ssv := castToMapStringInterface(tsv) stv := castToMapStringInterface(ttv) mergeMaps(ssv, stv, ttv) - case map[string]interface{}: - v.logger.Trace("merging maps") - tsv, ok := sv.(map[string]interface{}) + case map[string]any: + v.logger.Debug("merging maps") + tsv, ok := sv.(map[string]any) if !ok { v.logger.Error( - "Could not cast sv to map[string]interface{}", + "Could not cast sv to map[string]any", "key", sk, "st", svType, "tt", tvType, @@ -1838,7 +1826,7 @@ func mergeMaps( } mergeMaps(tsv, ttv, nil) default: - v.logger.Trace("setting value") + v.logger.Debug("setting value") tgt[tk] = sv if itgt != nil { itgt[tk] = sv @@ -1847,94 +1835,8 @@ func mergeMaps( } } -// ReadRemoteConfig attempts to get configuration from a remote source -// and read it in the remote configuration registry. -func ReadRemoteConfig() error { return v.ReadRemoteConfig() } - -func (v *Viper) ReadRemoteConfig() error { - return v.getKeyValueConfig() -} - -func WatchRemoteConfig() error { return v.WatchRemoteConfig() } -func (v *Viper) WatchRemoteConfig() error { - return v.watchKeyValueConfig() -} - -func (v *Viper) WatchRemoteConfigOnChannel() error { - return v.watchKeyValueConfigOnChannel() -} - -// Retrieve the first found remote configuration. -func (v *Viper) getKeyValueConfig() error { - if RemoteConfig == nil { - return RemoteConfigError("Enable the remote features by doing a blank import of the viper/remote package: '_ github.com/spf13/viper/remote'") - } - - for _, rp := range v.remoteProviders { - val, err := v.getRemoteConfig(rp) - if err != nil { - v.logger.Error(fmt.Errorf("get remote config: %w", err).Error()) - - continue - } - - v.kvstore = val - - return nil - } - return RemoteConfigError("No Files Found") -} - -func (v *Viper) getRemoteConfig(provider RemoteProvider) (map[string]interface{}, error) { - reader, err := RemoteConfig.Get(provider) - if err != nil { - return nil, err - } - err = v.unmarshalReader(reader, v.kvstore) - return v.kvstore, err -} - -// Retrieve the first found remote configuration. -func (v *Viper) watchKeyValueConfigOnChannel() error { - for _, rp := range v.remoteProviders { - respc, _ := RemoteConfig.WatchChannel(rp) - // Todo: Add quit channel - go func(rc <-chan *RemoteResponse) { - for { - b := <-rc - reader := bytes.NewReader(b.Value) - v.unmarshalReader(reader, v.kvstore) - } - }(respc) - return nil - } - return RemoteConfigError("No Files Found") -} - -// Retrieve the first found remote configuration. -func (v *Viper) watchKeyValueConfig() error { - for _, rp := range v.remoteProviders { - val, err := v.watchRemoteConfig(rp) - if err != nil { - continue - } - v.kvstore = val - return nil - } - return RemoteConfigError("No Files Found") -} - -func (v *Viper) watchRemoteConfig(provider RemoteProvider) (map[string]interface{}, error) { - reader, err := RemoteConfig.Watch(provider) - if err != nil { - return nil, err - } - err = v.unmarshalReader(reader, v.kvstore) - return v.kvstore, err -} - // AllKeys returns all keys holding a value, regardless of where they are set. -// Nested keys are returned with a v.keyDelim separator +// Nested keys are returned with a v.keyDelim separator. func AllKeys() []string { return v.AllKeys() } func (v *Viper) AllKeys() []string { @@ -1958,11 +1860,12 @@ func (v *Viper) AllKeys() []string { // flattenAndMergeMap recursively flattens the given map into a map[string]bool // of key paths (used as a set, easier to manipulate than a []string): -// - each path is merged into a single key string, delimited with v.keyDelim -// - if a path is shadowed by an earlier value in the initial shadow map, -// it is skipped. +// - each path is merged into a single key string, delimited with v.keyDelim +// - if a path is shadowed by an earlier value in the initial shadow map, +// it is skipped. +// // The resulting set of paths is merged to the given shadow set at the same time. -func (v *Viper) flattenAndMergeMap(shadow map[string]bool, m map[string]interface{}, prefix string) map[string]bool { +func (v *Viper) flattenAndMergeMap(shadow map[string]bool, m map[string]any, prefix string) map[string]bool { if shadow != nil && prefix != "" && shadow[prefix] { // prefix is shadowed => nothing more to flatten return shadow @@ -1971,16 +1874,16 @@ func (v *Viper) flattenAndMergeMap(shadow map[string]bool, m map[string]interfac shadow = make(map[string]bool) } - var m2 map[string]interface{} + var m2 map[string]any if prefix != "" { prefix += v.keyDelim } for k, val := range m { fullKey := prefix + k - switch val.(type) { - case map[string]interface{}: - m2 = val.(map[string]interface{}) - case map[interface{}]interface{}: + switch val := val.(type) { + case map[string]any: + m2 = val + case map[any]any: m2 = cast.ToStringMap(val) default: // immediate value @@ -1995,7 +1898,7 @@ func (v *Viper) flattenAndMergeMap(shadow map[string]bool, m map[string]interfac // mergeFlatMap merges the given maps, excluding values of the second map // shadowed by values from the first map. -func (v *Viper) mergeFlatMap(shadow map[string]bool, m map[string]interface{}) map[string]bool { +func (v *Viper) mergeFlatMap(shadow map[string]bool, m map[string]any) map[string]bool { // scan keys outer: for k := range m { @@ -2015,13 +1918,17 @@ outer: return shadow } -// AllSettings merges all settings and returns them as a map[string]interface{}. -func AllSettings() map[string]interface{} { return v.AllSettings() } +// AllSettings merges all settings and returns them as a map[string]any. +func AllSettings() map[string]any { return v.AllSettings() } -func (v *Viper) AllSettings() map[string]interface{} { - m := map[string]interface{}{} +func (v *Viper) AllSettings() map[string]any { + return v.getSettings(v.AllKeys()) +} + +func (v *Viper) getSettings(keys []string) map[string]any { + m := map[string]any{} // start from the list of keys, and construct the map one value at a time - for _, k := range v.AllKeys() { + for _, k := range keys { value := v.Get(k) if value == nil { // should not happen, since AllKeys() returns only keys holding a value, @@ -2049,6 +1956,10 @@ func (v *Viper) SetFs(fs afero.Fs) { func SetConfigName(in string) { v.SetConfigName(in) } func (v *Viper) SetConfigName(in string) { + if v.finder != nil { + v.logger.Warn("ineffective call to function: custom finder takes precedence", slog.String("function", "SetConfigName")) + } + if in != "" { v.configName = in v.configFile = "" @@ -2072,13 +1983,6 @@ func (v *Viper) SetConfigPermissions(perm os.FileMode) { v.configPermissions = perm.Perm() } -// IniLoadOptions sets the load options for ini parsing. -func IniLoadOptions(in ini.LoadOptions) Option { - return optionFunc(func(v *Viper) { - v.iniLoadOptions = in - }) -} - func (v *Viper) getConfigType() string { if v.configType != "" { return v.configType @@ -2111,14 +2015,17 @@ func (v *Viper) getConfigFile() (string, error) { // Debug prints all configuration registries for debugging // purposes. -func Debug() { v.Debug() } - -func (v *Viper) Debug() { - fmt.Printf("Aliases:\n%#v\n", v.aliases) - fmt.Printf("Override:\n%#v\n", v.override) - fmt.Printf("PFlags:\n%#v\n", v.pflags) - fmt.Printf("Env:\n%#v\n", v.env) - fmt.Printf("Key/Value Store:\n%#v\n", v.kvstore) - fmt.Printf("Config:\n%#v\n", v.config) - fmt.Printf("Defaults:\n%#v\n", v.defaults) +func Debug() { v.Debug() } +func DebugTo(w io.Writer) { v.DebugTo(w) } + +func (v *Viper) Debug() { v.DebugTo(os.Stdout) } + +func (v *Viper) DebugTo(w io.Writer) { + fmt.Fprintf(w, "Aliases:\n%#v\n", v.aliases) + fmt.Fprintf(w, "Override:\n%#v\n", v.override) + fmt.Fprintf(w, "PFlags:\n%#v\n", v.pflags) + fmt.Fprintf(w, "Env:\n%#v\n", v.env) + fmt.Fprintf(w, "Key/Value Store:\n%#v\n", v.kvstore) + fmt.Fprintf(w, "Config:\n%#v\n", v.config) + fmt.Fprintf(w, "Defaults:\n%#v\n", v.defaults) } diff --git a/tools/vendor/github.com/spf13/viper/viper_go1_16.go b/tools/vendor/github.com/spf13/viper/viper_go1_16.go deleted file mode 100644 index e10172fa3..000000000 --- a/tools/vendor/github.com/spf13/viper/viper_go1_16.go +++ /dev/null @@ -1,32 +0,0 @@ -//go:build go1.16 && finder -// +build go1.16,finder - -package viper - -import ( - "fmt" - - "github.com/spf13/afero" -) - -// Search all configPaths for any config file. -// Returns the first path that exists (and is a config file). -func (v *Viper) findConfigFile() (string, error) { - finder := finder{ - paths: v.configPaths, - fileNames: []string{v.configName}, - extensions: SupportedExts, - withoutExtension: v.configType != "", - } - - file, err := finder.Find(afero.NewIOFS(v.fs)) - if err != nil { - return "", err - } - - if file == "" { - return "", ConfigFileNotFoundError{v.configName, fmt.Sprintf("%s", v.configPaths)} - } - - return file, nil -} diff --git a/tools/vendor/github.com/spf13/viper/watch.go b/tools/vendor/github.com/spf13/viper/watch.go deleted file mode 100644 index b5523b8f9..000000000 --- a/tools/vendor/github.com/spf13/viper/watch.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build !js -// +build !js - -package viper - -import "github.com/fsnotify/fsnotify" - -type watcher = fsnotify.Watcher - -func newWatcher() (*watcher, error) { - return fsnotify.NewWatcher() -} diff --git a/tools/vendor/github.com/spf13/viper/watch_wasm.go b/tools/vendor/github.com/spf13/viper/watch_wasm.go deleted file mode 100644 index 8e47e6a91..000000000 --- a/tools/vendor/github.com/spf13/viper/watch_wasm.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build js,wasm - -package viper - -import ( - "errors" - - "github.com/fsnotify/fsnotify" -) - -type watcher struct { - Events chan fsnotify.Event - Errors chan error -} - -func (*watcher) Close() error { - return nil -} - -func (*watcher) Add(name string) error { - return nil -} - -func (*watcher) Remove(name string) error { - return nil -} - -func newWatcher() (*watcher, error) { - return &watcher{}, errors.New("fsnotify is not supported on WASM") -} diff --git a/tools/vendor/github.com/subosito/gotenv/CHANGELOG.md b/tools/vendor/github.com/subosito/gotenv/CHANGELOG.md index 757caad26..c4fe7d326 100644 --- a/tools/vendor/github.com/subosito/gotenv/CHANGELOG.md +++ b/tools/vendor/github.com/subosito/gotenv/CHANGELOG.md @@ -1,5 +1,42 @@ # Changelog +## [1.5.0] - 2023-08-15 + +### Fixed + +- Use io.Reader instead of custom Reader + +## [1.5.0] - 2023-08-15 + +### Added + +- Support for reading UTF16 files + +### Fixed + +- Scanner error handling +- Reader error handling + +## [1.4.2] - 2023-01-11 + +### Fixed + +- Env var initialization + +### Changed + +- More consitent line splitting + +## [1.4.1] - 2022-08-23 + +### Fixed + +- Missing file close + +### Changed + +- Updated dependencies + ## [1.4.0] - 2022-06-02 ### Added diff --git a/tools/vendor/github.com/subosito/gotenv/gotenv.go b/tools/vendor/github.com/subosito/gotenv/gotenv.go index 7b1186e1f..1191d3587 100644 --- a/tools/vendor/github.com/subosito/gotenv/gotenv.go +++ b/tools/vendor/github.com/subosito/gotenv/gotenv.go @@ -3,6 +3,7 @@ package gotenv import ( "bufio" + "bytes" "fmt" "io" "os" @@ -11,6 +12,9 @@ import ( "sort" "strconv" "strings" + + "golang.org/x/text/encoding/unicode" + "golang.org/x/text/transform" ) const ( @@ -19,9 +23,13 @@ const ( // Pattern for detecting valid variable within a value variablePattern = `(\\)?(\$)(\{?([A-Z0-9_]+)?\}?)` +) - // Byte order mark character - bom = "\xef\xbb\xbf" +// Byte order mark character +var ( + bomUTF8 = []byte("\xEF\xBB\xBF") + bomUTF16LE = []byte("\xFF\xFE") + bomUTF16BE = []byte("\xFE\xFF") ) // Env holds key/value pair of valid environment variable @@ -174,20 +182,68 @@ func Write(env Env, filename string) error { return file.Sync() } +// splitLines is a valid SplitFunc for a bufio.Scanner. It will split lines on CR ('\r'), LF ('\n') or CRLF (any of the three sequences). +// If a CR is immediately followed by a LF, it is treated as a CRLF (one single line break). +func splitLines(data []byte, atEOF bool) (advance int, token []byte, err error) { + if atEOF && len(data) == 0 { + return 0, nil, bufio.ErrFinalToken + } + + idx := bytes.IndexAny(data, "\r\n") + switch { + case atEOF && idx < 0: + return len(data), data, bufio.ErrFinalToken + + case idx < 0: + return 0, nil, nil + } + + // consume CR or LF + eol := idx + 1 + // detect CRLF + if len(data) > eol && data[eol-1] == '\r' && data[eol] == '\n' { + eol++ + } + + return eol, data[:idx], nil +} + func strictParse(r io.Reader, override bool) (Env, error) { env := make(Env) - scanner := bufio.NewScanner(r) - firstLine := true + buf := new(bytes.Buffer) + tee := io.TeeReader(r, buf) - for scanner.Scan() { - line := strings.TrimSpace(scanner.Text()) + // There can be a maximum of 3 BOM bytes. + bomByteBuffer := make([]byte, 3) + _, err := tee.Read(bomByteBuffer) + if err != nil && err != io.EOF { + return env, err + } + + z := io.MultiReader(buf, r) - if firstLine { - line = strings.TrimPrefix(line, bom) - firstLine = false + // We chooes a different scanner depending on file encoding. + var scanner *bufio.Scanner + + if bytes.HasPrefix(bomByteBuffer, bomUTF8) { + scanner = bufio.NewScanner(transform.NewReader(z, unicode.UTF8BOM.NewDecoder())) + } else if bytes.HasPrefix(bomByteBuffer, bomUTF16LE) { + scanner = bufio.NewScanner(transform.NewReader(z, unicode.UTF16(unicode.LittleEndian, unicode.ExpectBOM).NewDecoder())) + } else if bytes.HasPrefix(bomByteBuffer, bomUTF16BE) { + scanner = bufio.NewScanner(transform.NewReader(z, unicode.UTF16(unicode.BigEndian, unicode.ExpectBOM).NewDecoder())) + } else { + scanner = bufio.NewScanner(z) + } + + scanner.Split(splitLines) + + for scanner.Scan() { + if err := scanner.Err(); err != nil { + return env, err } + line := strings.TrimSpace(scanner.Text()) if line == "" || line[0] == '#' { continue } @@ -235,7 +291,7 @@ func strictParse(r io.Reader, override bool) (Env, error) { } } - return env, nil + return env, scanner.Err() } var ( @@ -283,7 +339,6 @@ func parseLine(s string, env Env, override bool) error { return varReplacement(s, hsq, env, override) } val = varRgx.ReplaceAllStringFunc(val, fv) - val = parseVal(val, env, hdq, override) } env[key] = val @@ -352,18 +407,3 @@ func checkFormat(s string, env Env) error { return fmt.Errorf("line `%s` doesn't match format", s) } - -func parseVal(val string, env Env, ignoreNewlines bool, override bool) string { - if strings.Contains(val, "=") && !ignoreNewlines { - kv := strings.Split(val, "\r") - - if len(kv) > 1 { - val = kv[0] - for _, l := range kv[1:] { - _ = parseLine(l, env, override) - } - } - } - - return val -} diff --git a/tools/vendor/github.com/tdakkota/asciicheck/asciicheck.go b/tools/vendor/github.com/tdakkota/asciicheck/asciicheck.go index 690728022..2ec141ec3 100644 --- a/tools/vendor/github.com/tdakkota/asciicheck/asciicheck.go +++ b/tools/vendor/github.com/tdakkota/asciicheck/asciicheck.go @@ -3,47 +3,100 @@ package asciicheck import ( "fmt" "go/ast" + "go/token" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" ) func NewAnalyzer() *analysis.Analyzer { return &analysis.Analyzer{ - Name: "asciicheck", - Doc: "checks that all code identifiers does not have non-ASCII symbols in the name", - Run: run, + Name: "asciicheck", + Doc: "checks that all code identifiers does not have non-ASCII symbols in the name", + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: run, } } -func run(pass *analysis.Pass) (interface{}, error) { - for _, file := range pass.Files { - alreadyViewed := map[*ast.Object]struct{}{} - ast.Inspect( - file, func(node ast.Node) bool { - cb(pass, node, alreadyViewed) - return true - }, - ) +func run(pass *analysis.Pass) (any, error) { + inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + + nodeFilter := []ast.Node{ + (*ast.File)(nil), + (*ast.ImportSpec)(nil), + (*ast.TypeSpec)(nil), + (*ast.ValueSpec)(nil), + (*ast.FuncDecl)(nil), + (*ast.StructType)(nil), + (*ast.FuncType)(nil), + (*ast.InterfaceType)(nil), + (*ast.LabeledStmt)(nil), + (*ast.AssignStmt)(nil), } + inspect.Preorder(nodeFilter, func(n ast.Node) { + switch n := n.(type) { + case *ast.File: + checkIdent(pass, n.Name) + case *ast.ImportSpec: + checkIdent(pass, n.Name) + case *ast.TypeSpec: + checkIdent(pass, n.Name) + checkFieldList(pass, n.TypeParams) + case *ast.ValueSpec: + for _, name := range n.Names { + checkIdent(pass, name) + } + case *ast.FuncDecl: + checkIdent(pass, n.Name) + checkFieldList(pass, n.Recv) + case *ast.StructType: + checkFieldList(pass, n.Fields) + case *ast.FuncType: + checkFieldList(pass, n.TypeParams) + checkFieldList(pass, n.Params) + checkFieldList(pass, n.Results) + case *ast.InterfaceType: + checkFieldList(pass, n.Methods) + case *ast.LabeledStmt: + checkIdent(pass, n.Label) + case *ast.AssignStmt: + if n.Tok == token.DEFINE { + for _, expr := range n.Lhs { + if ident, ok := expr.(*ast.Ident); ok { + checkIdent(pass, ident) + } + } + } + } + }) return nil, nil } -func cb(pass *analysis.Pass, n ast.Node, m map[*ast.Object]struct{}) { - if v, ok := n.(*ast.Ident); ok { - if _, ok := m[v.Obj]; ok { - return - } else { - m[v.Obj] = struct{}{} - } +func checkIdent(pass *analysis.Pass, v *ast.Ident) { + if v == nil { + return + } - ch, ascii := isASCII(v.Name) - if !ascii { - pass.Report( - analysis.Diagnostic{ - Pos: v.Pos(), - Message: fmt.Sprintf("identifier \"%s\" contain non-ASCII character: %#U", v.Name, ch), - }, - ) + ch, ascii := isASCII(v.Name) + if !ascii { + pass.Report( + analysis.Diagnostic{ + Pos: v.Pos(), + Message: fmt.Sprintf("identifier \"%s\" contain non-ASCII character: %#U", v.Name, ch), + }, + ) + } +} + +func checkFieldList(pass *analysis.Pass, f *ast.FieldList) { + if f == nil { + return + } + for _, f := range f.List { + for _, name := range f.Names { + checkIdent(pass, name) } } } diff --git a/tools/vendor/github.com/tetafro/godot/README.md b/tools/vendor/github.com/tetafro/godot/README.md index 6b2e530b9..15a85778e 100644 --- a/tools/vendor/github.com/tetafro/godot/README.md +++ b/tools/vendor/github.com/tetafro/godot/README.md @@ -36,6 +36,7 @@ defaults are used: # Which comments to check: # declarations - for top level declaration comments (default); # toplevel - for top level comments; +# noinline - for all except inline comments; # all - for all comments. scope: declarations diff --git a/tools/vendor/github.com/tetafro/godot/getters.go b/tools/vendor/github.com/tetafro/godot/getters.go index eff836b40..fda0619f4 100644 --- a/tools/vendor/github.com/tetafro/godot/getters.go +++ b/tools/vendor/github.com/tetafro/godot/getters.go @@ -66,6 +66,9 @@ func (pf *parsedFile) getComments(scope Scope, exclude []*regexp.Regexp) []comme case AllScope: // All comments comments = pf.getAllComments(exclude) + case NoInlineScope: + // All except inline comments + comments = pf.getNoInline(exclude) case TopLevelScope: // All top level comments and comments from the inside // of top level blocks @@ -173,6 +176,34 @@ func (pf *parsedFile) getDeclarationComments(exclude []*regexp.Regexp) []comment return comments } +// getNoInline gets all except inline comments. +func (pf *parsedFile) getNoInline(exclude []*regexp.Regexp) []comment { + var comments []comment //nolint:prealloc + for _, c := range pf.file.Comments { + if c == nil || len(c.List) == 0 { + continue + } + firstLine := pf.fset.Position(c.Pos()).Line + lastLine := pf.fset.Position(c.End()).Line + + c := comment{ + lines: pf.lines[firstLine-1 : lastLine], + start: pf.fset.Position(c.List[0].Slash), + text: getText(c, exclude), + } + + // Skip inline + if len(c.lines) == 1 { + before := c.lines[0][:c.start.Column-1] + if len(strings.TrimSpace(before)) > 0 { + continue + } + } + comments = append(comments, c) + } + return comments +} + // getAllComments gets every single comment from the file. func (pf *parsedFile) getAllComments(exclude []*regexp.Regexp) []comment { var comments []comment //nolint:prealloc diff --git a/tools/vendor/github.com/tetafro/godot/settings.go b/tools/vendor/github.com/tetafro/godot/settings.go index b71bf5d58..d770cbcf7 100644 --- a/tools/vendor/github.com/tetafro/godot/settings.go +++ b/tools/vendor/github.com/tetafro/godot/settings.go @@ -24,6 +24,8 @@ const ( DeclScope Scope = "declarations" // TopLevelScope is for all top level comments. TopLevelScope Scope = "toplevel" + // NoInlineScope is for all except inline comments. + NoInlineScope Scope = "noinline" // AllScope is for all comments. AllScope Scope = "all" ) diff --git a/tools/vendor/github.com/xen0n/gosmopolitan/.golangci.yml b/tools/vendor/github.com/xen0n/gosmopolitan/.golangci.yml index 0bce12501..0f58fc526 100644 --- a/tools/vendor/github.com/xen0n/gosmopolitan/.golangci.yml +++ b/tools/vendor/github.com/xen0n/gosmopolitan/.golangci.yml @@ -1,5 +1,5 @@ run: - go: '1.19' + go: '1.24' modules-download-mode: readonly linters: @@ -20,12 +20,8 @@ linters-settings: SPDX-License-Identifier: GPL-3.0-or-later goimports: local-prefixes: github.com/xen0n/gosmopolitan - gosimple: - go: '1.19' lll: line-length: 120 tab-width: 4 nakedret: max-func-lines: 1 - stylecheck: - go: '1.19' diff --git a/tools/vendor/go.uber.org/atomic/.codecov.yml b/tools/vendor/go.uber.org/atomic/.codecov.yml deleted file mode 100644 index 571116cc3..000000000 --- a/tools/vendor/go.uber.org/atomic/.codecov.yml +++ /dev/null @@ -1,19 +0,0 @@ -coverage: - range: 80..100 - round: down - precision: 2 - - status: - project: # measuring the overall project coverage - default: # context, you can create multiple ones with custom titles - enabled: yes # must be yes|true to enable this status - target: 100 # specify the target coverage for each commit status - # option: "auto" (must increase from parent commit or pull request base) - # option: "X%" a static target percentage to hit - if_not_found: success # if parent is not found report status as success, error, or failure - if_ci_failed: error # if ci fails report status as success, error, or failure - -# Also update COVER_IGNORE_PKGS in the Makefile. -ignore: - - /internal/gen-atomicint/ - - /internal/gen-valuewrapper/ diff --git a/tools/vendor/go.uber.org/atomic/.gitignore b/tools/vendor/go.uber.org/atomic/.gitignore deleted file mode 100644 index c3fa25389..000000000 --- a/tools/vendor/go.uber.org/atomic/.gitignore +++ /dev/null @@ -1,12 +0,0 @@ -/bin -.DS_Store -/vendor -cover.html -cover.out -lint.log - -# Binaries -*.test - -# Profiling output -*.prof diff --git a/tools/vendor/go.uber.org/atomic/.travis.yml b/tools/vendor/go.uber.org/atomic/.travis.yml deleted file mode 100644 index 13d0a4f25..000000000 --- a/tools/vendor/go.uber.org/atomic/.travis.yml +++ /dev/null @@ -1,27 +0,0 @@ -sudo: false -language: go -go_import_path: go.uber.org/atomic - -env: - global: - - GO111MODULE=on - -matrix: - include: - - go: oldstable - - go: stable - env: LINT=1 - -cache: - directories: - - vendor - -before_install: - - go version - -script: - - test -z "$LINT" || make lint - - make cover - -after_success: - - bash <(curl -s https://codecov.io/bash) diff --git a/tools/vendor/go.uber.org/atomic/CHANGELOG.md b/tools/vendor/go.uber.org/atomic/CHANGELOG.md deleted file mode 100644 index 24c0274dc..000000000 --- a/tools/vendor/go.uber.org/atomic/CHANGELOG.md +++ /dev/null @@ -1,76 +0,0 @@ -# Changelog -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## [1.7.0] - 2020-09-14 -### Added -- Support JSON serialization and deserialization of primitive atomic types. -- Support Text marshalling and unmarshalling for string atomics. - -### Changed -- Disallow incorrect comparison of atomic values in a non-atomic way. - -### Removed -- Remove dependency on `golang.org/x/{lint, tools}`. - -## [1.6.0] - 2020-02-24 -### Changed -- Drop library dependency on `golang.org/x/{lint, tools}`. - -## [1.5.1] - 2019-11-19 -- Fix bug where `Bool.CAS` and `Bool.Toggle` do work correctly together - causing `CAS` to fail even though the old value matches. - -## [1.5.0] - 2019-10-29 -### Changed -- With Go modules, only the `go.uber.org/atomic` import path is supported now. - If you need to use the old import path, please add a `replace` directive to - your `go.mod`. - -## [1.4.0] - 2019-05-01 -### Added - - Add `atomic.Error` type for atomic operations on `error` values. - -## [1.3.2] - 2018-05-02 -### Added -- Add `atomic.Duration` type for atomic operations on `time.Duration` values. - -## [1.3.1] - 2017-11-14 -### Fixed -- Revert optimization for `atomic.String.Store("")` which caused data races. - -## [1.3.0] - 2017-11-13 -### Added -- Add `atomic.Bool.CAS` for compare-and-swap semantics on bools. - -### Changed -- Optimize `atomic.String.Store("")` by avoiding an allocation. - -## [1.2.0] - 2017-04-12 -### Added -- Shadow `atomic.Value` from `sync/atomic`. - -## [1.1.0] - 2017-03-10 -### Added -- Add atomic `Float64` type. - -### Changed -- Support new `go.uber.org/atomic` import path. - -## [1.0.0] - 2016-07-18 - -- Initial release. - -[1.7.0]: https://github.com/uber-go/atomic/compare/v1.6.0...v1.7.0 -[1.6.0]: https://github.com/uber-go/atomic/compare/v1.5.1...v1.6.0 -[1.5.1]: https://github.com/uber-go/atomic/compare/v1.5.0...v1.5.1 -[1.5.0]: https://github.com/uber-go/atomic/compare/v1.4.0...v1.5.0 -[1.4.0]: https://github.com/uber-go/atomic/compare/v1.3.2...v1.4.0 -[1.3.2]: https://github.com/uber-go/atomic/compare/v1.3.1...v1.3.2 -[1.3.1]: https://github.com/uber-go/atomic/compare/v1.3.0...v1.3.1 -[1.3.0]: https://github.com/uber-go/atomic/compare/v1.2.0...v1.3.0 -[1.2.0]: https://github.com/uber-go/atomic/compare/v1.1.0...v1.2.0 -[1.1.0]: https://github.com/uber-go/atomic/compare/v1.0.0...v1.1.0 -[1.0.0]: https://github.com/uber-go/atomic/releases/tag/v1.0.0 diff --git a/tools/vendor/go.uber.org/atomic/LICENSE.txt b/tools/vendor/go.uber.org/atomic/LICENSE.txt deleted file mode 100644 index 8765c9fbc..000000000 --- a/tools/vendor/go.uber.org/atomic/LICENSE.txt +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2016 Uber Technologies, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/tools/vendor/go.uber.org/atomic/Makefile b/tools/vendor/go.uber.org/atomic/Makefile deleted file mode 100644 index 1b1376d42..000000000 --- a/tools/vendor/go.uber.org/atomic/Makefile +++ /dev/null @@ -1,78 +0,0 @@ -# Directory to place `go install`ed binaries into. -export GOBIN ?= $(shell pwd)/bin - -GOLINT = $(GOBIN)/golint -GEN_ATOMICINT = $(GOBIN)/gen-atomicint -GEN_ATOMICWRAPPER = $(GOBIN)/gen-atomicwrapper -STATICCHECK = $(GOBIN)/staticcheck - -GO_FILES ?= $(shell find . '(' -path .git -o -path vendor ')' -prune -o -name '*.go' -print) - -# Also update ignore section in .codecov.yml. -COVER_IGNORE_PKGS = \ - go.uber.org/atomic/internal/gen-atomicint \ - go.uber.org/atomic/internal/gen-atomicwrapper - -.PHONY: build -build: - go build ./... - -.PHONY: test -test: - go test -race ./... - -.PHONY: gofmt -gofmt: - $(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX)) - gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true - @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" && cat $(FMT_LOG) && false) - -$(GOLINT): - cd tools && go install golang.org/x/lint/golint - -$(STATICCHECK): - cd tools && go install honnef.co/go/tools/cmd/staticcheck - -$(GEN_ATOMICWRAPPER): $(wildcard ./internal/gen-atomicwrapper/*) - go build -o $@ ./internal/gen-atomicwrapper - -$(GEN_ATOMICINT): $(wildcard ./internal/gen-atomicint/*) - go build -o $@ ./internal/gen-atomicint - -.PHONY: golint -golint: $(GOLINT) - $(GOLINT) ./... - -.PHONY: staticcheck -staticcheck: $(STATICCHECK) - $(STATICCHECK) ./... - -.PHONY: lint -lint: gofmt golint staticcheck generatenodirty - -# comma separated list of packages to consider for code coverage. -COVER_PKG = $(shell \ - go list -find ./... | \ - grep -v $(foreach pkg,$(COVER_IGNORE_PKGS),-e "^$(pkg)$$") | \ - paste -sd, -) - -.PHONY: cover -cover: - go test -coverprofile=cover.out -coverpkg $(COVER_PKG) -v ./... - go tool cover -html=cover.out -o cover.html - -.PHONY: generate -generate: $(GEN_ATOMICINT) $(GEN_ATOMICWRAPPER) - go generate ./... - -.PHONY: generatenodirty -generatenodirty: - @[ -z "$$(git status --porcelain)" ] || ( \ - echo "Working tree is dirty. Commit your changes first."; \ - exit 1 ) - @make generate - @status=$$(git status --porcelain); \ - [ -z "$$status" ] || ( \ - echo "Working tree is dirty after `make generate`:"; \ - echo "$$status"; \ - echo "Please ensure that the generated code is up-to-date." ) diff --git a/tools/vendor/go.uber.org/atomic/README.md b/tools/vendor/go.uber.org/atomic/README.md deleted file mode 100644 index ade0c20f1..000000000 --- a/tools/vendor/go.uber.org/atomic/README.md +++ /dev/null @@ -1,63 +0,0 @@ -# atomic [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![Go Report Card][reportcard-img]][reportcard] - -Simple wrappers for primitive types to enforce atomic access. - -## Installation - -```shell -$ go get -u go.uber.org/atomic@v1 -``` - -### Legacy Import Path - -As of v1.5.0, the import path `go.uber.org/atomic` is the only supported way -of using this package. If you are using Go modules, this package will fail to -compile with the legacy import path path `github.com/uber-go/atomic`. - -We recommend migrating your code to the new import path but if you're unable -to do so, or if your dependencies are still using the old import path, you -will have to add a `replace` directive to your `go.mod` file downgrading the -legacy import path to an older version. - -``` -replace github.com/uber-go/atomic => github.com/uber-go/atomic v1.4.0 -``` - -You can do so automatically by running the following command. - -```shell -$ go mod edit -replace github.com/uber-go/atomic=github.com/uber-go/atomic@v1.4.0 -``` - -## Usage - -The standard library's `sync/atomic` is powerful, but it's easy to forget which -variables must be accessed atomically. `go.uber.org/atomic` preserves all the -functionality of the standard library, but wraps the primitive types to -provide a safer, more convenient API. - -```go -var atom atomic.Uint32 -atom.Store(42) -atom.Sub(2) -atom.CAS(40, 11) -``` - -See the [documentation][doc] for a complete API specification. - -## Development Status - -Stable. - ---- - -Released under the [MIT License](LICENSE.txt). - -[doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg -[doc]: https://godoc.org/go.uber.org/atomic -[ci-img]: https://travis-ci.com/uber-go/atomic.svg?branch=master -[ci]: https://travis-ci.com/uber-go/atomic -[cov-img]: https://codecov.io/gh/uber-go/atomic/branch/master/graph/badge.svg -[cov]: https://codecov.io/gh/uber-go/atomic -[reportcard-img]: https://goreportcard.com/badge/go.uber.org/atomic -[reportcard]: https://goreportcard.com/report/go.uber.org/atomic diff --git a/tools/vendor/go.uber.org/atomic/bool.go b/tools/vendor/go.uber.org/atomic/bool.go deleted file mode 100644 index 9cf1914b1..000000000 --- a/tools/vendor/go.uber.org/atomic/bool.go +++ /dev/null @@ -1,81 +0,0 @@ -// @generated Code generated by gen-atomicwrapper. - -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" -) - -// Bool is an atomic type-safe wrapper for bool values. -type Bool struct { - _ nocmp // disallow non-atomic comparison - - v Uint32 -} - -var _zeroBool bool - -// NewBool creates a new Bool. -func NewBool(v bool) *Bool { - x := &Bool{} - if v != _zeroBool { - x.Store(v) - } - return x -} - -// Load atomically loads the wrapped bool. -func (x *Bool) Load() bool { - return truthy(x.v.Load()) -} - -// Store atomically stores the passed bool. -func (x *Bool) Store(v bool) { - x.v.Store(boolToInt(v)) -} - -// CAS is an atomic compare-and-swap for bool values. -func (x *Bool) CAS(o, n bool) bool { - return x.v.CAS(boolToInt(o), boolToInt(n)) -} - -// Swap atomically stores the given bool and returns the old -// value. -func (x *Bool) Swap(o bool) bool { - return truthy(x.v.Swap(boolToInt(o))) -} - -// MarshalJSON encodes the wrapped bool into JSON. -func (x *Bool) MarshalJSON() ([]byte, error) { - return json.Marshal(x.Load()) -} - -// UnmarshalJSON decodes a bool from JSON. -func (x *Bool) UnmarshalJSON(b []byte) error { - var v bool - if err := json.Unmarshal(b, &v); err != nil { - return err - } - x.Store(v) - return nil -} diff --git a/tools/vendor/go.uber.org/atomic/doc.go b/tools/vendor/go.uber.org/atomic/doc.go deleted file mode 100644 index ae7390ee6..000000000 --- a/tools/vendor/go.uber.org/atomic/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Package atomic provides simple wrappers around numerics to enforce atomic -// access. -package atomic diff --git a/tools/vendor/go.uber.org/atomic/duration.go b/tools/vendor/go.uber.org/atomic/duration.go deleted file mode 100644 index 027cfcb20..000000000 --- a/tools/vendor/go.uber.org/atomic/duration.go +++ /dev/null @@ -1,82 +0,0 @@ -// @generated Code generated by gen-atomicwrapper. - -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" - "time" -) - -// Duration is an atomic type-safe wrapper for time.Duration values. -type Duration struct { - _ nocmp // disallow non-atomic comparison - - v Int64 -} - -var _zeroDuration time.Duration - -// NewDuration creates a new Duration. -func NewDuration(v time.Duration) *Duration { - x := &Duration{} - if v != _zeroDuration { - x.Store(v) - } - return x -} - -// Load atomically loads the wrapped time.Duration. -func (x *Duration) Load() time.Duration { - return time.Duration(x.v.Load()) -} - -// Store atomically stores the passed time.Duration. -func (x *Duration) Store(v time.Duration) { - x.v.Store(int64(v)) -} - -// CAS is an atomic compare-and-swap for time.Duration values. -func (x *Duration) CAS(o, n time.Duration) bool { - return x.v.CAS(int64(o), int64(n)) -} - -// Swap atomically stores the given time.Duration and returns the old -// value. -func (x *Duration) Swap(o time.Duration) time.Duration { - return time.Duration(x.v.Swap(int64(o))) -} - -// MarshalJSON encodes the wrapped time.Duration into JSON. -func (x *Duration) MarshalJSON() ([]byte, error) { - return json.Marshal(x.Load()) -} - -// UnmarshalJSON decodes a time.Duration from JSON. -func (x *Duration) UnmarshalJSON(b []byte) error { - var v time.Duration - if err := json.Unmarshal(b, &v); err != nil { - return err - } - x.Store(v) - return nil -} diff --git a/tools/vendor/go.uber.org/atomic/duration_ext.go b/tools/vendor/go.uber.org/atomic/duration_ext.go deleted file mode 100644 index 6273b66bd..000000000 --- a/tools/vendor/go.uber.org/atomic/duration_ext.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import "time" - -//go:generate bin/gen-atomicwrapper -name=Duration -type=time.Duration -wrapped=Int64 -pack=int64 -unpack=time.Duration -cas -swap -json -imports time -file=duration.go - -// Add atomically adds to the wrapped time.Duration and returns the new value. -func (d *Duration) Add(n time.Duration) time.Duration { - return time.Duration(d.v.Add(int64(n))) -} - -// Sub atomically subtracts from the wrapped time.Duration and returns the new value. -func (d *Duration) Sub(n time.Duration) time.Duration { - return time.Duration(d.v.Sub(int64(n))) -} - -// String encodes the wrapped value as a string. -func (d *Duration) String() string { - return d.Load().String() -} diff --git a/tools/vendor/go.uber.org/atomic/float64.go b/tools/vendor/go.uber.org/atomic/float64.go deleted file mode 100644 index 071906020..000000000 --- a/tools/vendor/go.uber.org/atomic/float64.go +++ /dev/null @@ -1,76 +0,0 @@ -// @generated Code generated by gen-atomicwrapper. - -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" - "math" -) - -// Float64 is an atomic type-safe wrapper for float64 values. -type Float64 struct { - _ nocmp // disallow non-atomic comparison - - v Uint64 -} - -var _zeroFloat64 float64 - -// NewFloat64 creates a new Float64. -func NewFloat64(v float64) *Float64 { - x := &Float64{} - if v != _zeroFloat64 { - x.Store(v) - } - return x -} - -// Load atomically loads the wrapped float64. -func (x *Float64) Load() float64 { - return math.Float64frombits(x.v.Load()) -} - -// Store atomically stores the passed float64. -func (x *Float64) Store(v float64) { - x.v.Store(math.Float64bits(v)) -} - -// CAS is an atomic compare-and-swap for float64 values. -func (x *Float64) CAS(o, n float64) bool { - return x.v.CAS(math.Float64bits(o), math.Float64bits(n)) -} - -// MarshalJSON encodes the wrapped float64 into JSON. -func (x *Float64) MarshalJSON() ([]byte, error) { - return json.Marshal(x.Load()) -} - -// UnmarshalJSON decodes a float64 from JSON. -func (x *Float64) UnmarshalJSON(b []byte) error { - var v float64 - if err := json.Unmarshal(b, &v); err != nil { - return err - } - x.Store(v) - return nil -} diff --git a/tools/vendor/go.uber.org/atomic/float64_ext.go b/tools/vendor/go.uber.org/atomic/float64_ext.go deleted file mode 100644 index 927b1add7..000000000 --- a/tools/vendor/go.uber.org/atomic/float64_ext.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import "strconv" - -//go:generate bin/gen-atomicwrapper -name=Float64 -type=float64 -wrapped=Uint64 -pack=math.Float64bits -unpack=math.Float64frombits -cas -json -imports math -file=float64.go - -// Add atomically adds to the wrapped float64 and returns the new value. -func (f *Float64) Add(s float64) float64 { - for { - old := f.Load() - new := old + s - if f.CAS(old, new) { - return new - } - } -} - -// Sub atomically subtracts from the wrapped float64 and returns the new value. -func (f *Float64) Sub(s float64) float64 { - return f.Add(-s) -} - -// String encodes the wrapped value as a string. -func (f *Float64) String() string { - // 'g' is the behavior for floats with %v. - return strconv.FormatFloat(f.Load(), 'g', -1, 64) -} diff --git a/tools/vendor/go.uber.org/atomic/gen.go b/tools/vendor/go.uber.org/atomic/gen.go deleted file mode 100644 index 50d6b2485..000000000 --- a/tools/vendor/go.uber.org/atomic/gen.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -//go:generate bin/gen-atomicint -name=Int32 -wrapped=int32 -file=int32.go -//go:generate bin/gen-atomicint -name=Int64 -wrapped=int64 -file=int64.go -//go:generate bin/gen-atomicint -name=Uint32 -wrapped=uint32 -unsigned -file=uint32.go -//go:generate bin/gen-atomicint -name=Uint64 -wrapped=uint64 -unsigned -file=uint64.go diff --git a/tools/vendor/go.uber.org/atomic/int32.go b/tools/vendor/go.uber.org/atomic/int32.go deleted file mode 100644 index 18ae56493..000000000 --- a/tools/vendor/go.uber.org/atomic/int32.go +++ /dev/null @@ -1,102 +0,0 @@ -// @generated Code generated by gen-atomicint. - -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" - "strconv" - "sync/atomic" -) - -// Int32 is an atomic wrapper around int32. -type Int32 struct { - _ nocmp // disallow non-atomic comparison - - v int32 -} - -// NewInt32 creates a new Int32. -func NewInt32(i int32) *Int32 { - return &Int32{v: i} -} - -// Load atomically loads the wrapped value. -func (i *Int32) Load() int32 { - return atomic.LoadInt32(&i.v) -} - -// Add atomically adds to the wrapped int32 and returns the new value. -func (i *Int32) Add(n int32) int32 { - return atomic.AddInt32(&i.v, n) -} - -// Sub atomically subtracts from the wrapped int32 and returns the new value. -func (i *Int32) Sub(n int32) int32 { - return atomic.AddInt32(&i.v, -n) -} - -// Inc atomically increments the wrapped int32 and returns the new value. -func (i *Int32) Inc() int32 { - return i.Add(1) -} - -// Dec atomically decrements the wrapped int32 and returns the new value. -func (i *Int32) Dec() int32 { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -func (i *Int32) CAS(old, new int32) bool { - return atomic.CompareAndSwapInt32(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Int32) Store(n int32) { - atomic.StoreInt32(&i.v, n) -} - -// Swap atomically swaps the wrapped int32 and returns the old value. -func (i *Int32) Swap(n int32) int32 { - return atomic.SwapInt32(&i.v, n) -} - -// MarshalJSON encodes the wrapped int32 into JSON. -func (i *Int32) MarshalJSON() ([]byte, error) { - return json.Marshal(i.Load()) -} - -// UnmarshalJSON decodes JSON into the wrapped int32. -func (i *Int32) UnmarshalJSON(b []byte) error { - var v int32 - if err := json.Unmarshal(b, &v); err != nil { - return err - } - i.Store(v) - return nil -} - -// String encodes the wrapped value as a string. -func (i *Int32) String() string { - v := i.Load() - return strconv.FormatInt(int64(v), 10) -} diff --git a/tools/vendor/go.uber.org/atomic/int64.go b/tools/vendor/go.uber.org/atomic/int64.go deleted file mode 100644 index 2bcbbfaa9..000000000 --- a/tools/vendor/go.uber.org/atomic/int64.go +++ /dev/null @@ -1,102 +0,0 @@ -// @generated Code generated by gen-atomicint. - -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" - "strconv" - "sync/atomic" -) - -// Int64 is an atomic wrapper around int64. -type Int64 struct { - _ nocmp // disallow non-atomic comparison - - v int64 -} - -// NewInt64 creates a new Int64. -func NewInt64(i int64) *Int64 { - return &Int64{v: i} -} - -// Load atomically loads the wrapped value. -func (i *Int64) Load() int64 { - return atomic.LoadInt64(&i.v) -} - -// Add atomically adds to the wrapped int64 and returns the new value. -func (i *Int64) Add(n int64) int64 { - return atomic.AddInt64(&i.v, n) -} - -// Sub atomically subtracts from the wrapped int64 and returns the new value. -func (i *Int64) Sub(n int64) int64 { - return atomic.AddInt64(&i.v, -n) -} - -// Inc atomically increments the wrapped int64 and returns the new value. -func (i *Int64) Inc() int64 { - return i.Add(1) -} - -// Dec atomically decrements the wrapped int64 and returns the new value. -func (i *Int64) Dec() int64 { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -func (i *Int64) CAS(old, new int64) bool { - return atomic.CompareAndSwapInt64(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Int64) Store(n int64) { - atomic.StoreInt64(&i.v, n) -} - -// Swap atomically swaps the wrapped int64 and returns the old value. -func (i *Int64) Swap(n int64) int64 { - return atomic.SwapInt64(&i.v, n) -} - -// MarshalJSON encodes the wrapped int64 into JSON. -func (i *Int64) MarshalJSON() ([]byte, error) { - return json.Marshal(i.Load()) -} - -// UnmarshalJSON decodes JSON into the wrapped int64. -func (i *Int64) UnmarshalJSON(b []byte) error { - var v int64 - if err := json.Unmarshal(b, &v); err != nil { - return err - } - i.Store(v) - return nil -} - -// String encodes the wrapped value as a string. -func (i *Int64) String() string { - v := i.Load() - return strconv.FormatInt(int64(v), 10) -} diff --git a/tools/vendor/go.uber.org/atomic/nocmp.go b/tools/vendor/go.uber.org/atomic/nocmp.go deleted file mode 100644 index a8201cb4a..000000000 --- a/tools/vendor/go.uber.org/atomic/nocmp.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -// nocmp is an uncomparable struct. Embed this inside another struct to make -// it uncomparable. -// -// type Foo struct { -// nocmp -// // ... -// } -// -// This DOES NOT: -// -// - Disallow shallow copies of structs -// - Disallow comparison of pointers to uncomparable structs -type nocmp [0]func() diff --git a/tools/vendor/go.uber.org/atomic/string.go b/tools/vendor/go.uber.org/atomic/string.go deleted file mode 100644 index 225b7a2be..000000000 --- a/tools/vendor/go.uber.org/atomic/string.go +++ /dev/null @@ -1,54 +0,0 @@ -// @generated Code generated by gen-atomicwrapper. - -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -// String is an atomic type-safe wrapper for string values. -type String struct { - _ nocmp // disallow non-atomic comparison - - v Value -} - -var _zeroString string - -// NewString creates a new String. -func NewString(v string) *String { - x := &String{} - if v != _zeroString { - x.Store(v) - } - return x -} - -// Load atomically loads the wrapped string. -func (x *String) Load() string { - if v := x.v.Load(); v != nil { - return v.(string) - } - return _zeroString -} - -// Store atomically stores the passed string. -func (x *String) Store(v string) { - x.v.Store(v) -} diff --git a/tools/vendor/go.uber.org/atomic/string_ext.go b/tools/vendor/go.uber.org/atomic/string_ext.go deleted file mode 100644 index 3a9558213..000000000 --- a/tools/vendor/go.uber.org/atomic/string_ext.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped=Value -file=string.go - -// String returns the wrapped value. -func (s *String) String() string { - return s.Load() -} - -// MarshalText encodes the wrapped string into a textual form. -// -// This makes it encodable as JSON, YAML, XML, and more. -func (s *String) MarshalText() ([]byte, error) { - return []byte(s.Load()), nil -} - -// UnmarshalText decodes text and replaces the wrapped string with it. -// -// This makes it decodable from JSON, YAML, XML, and more. -func (s *String) UnmarshalText(b []byte) error { - s.Store(string(b)) - return nil -} diff --git a/tools/vendor/go.uber.org/atomic/uint32.go b/tools/vendor/go.uber.org/atomic/uint32.go deleted file mode 100644 index a973aba1a..000000000 --- a/tools/vendor/go.uber.org/atomic/uint32.go +++ /dev/null @@ -1,102 +0,0 @@ -// @generated Code generated by gen-atomicint. - -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" - "strconv" - "sync/atomic" -) - -// Uint32 is an atomic wrapper around uint32. -type Uint32 struct { - _ nocmp // disallow non-atomic comparison - - v uint32 -} - -// NewUint32 creates a new Uint32. -func NewUint32(i uint32) *Uint32 { - return &Uint32{v: i} -} - -// Load atomically loads the wrapped value. -func (i *Uint32) Load() uint32 { - return atomic.LoadUint32(&i.v) -} - -// Add atomically adds to the wrapped uint32 and returns the new value. -func (i *Uint32) Add(n uint32) uint32 { - return atomic.AddUint32(&i.v, n) -} - -// Sub atomically subtracts from the wrapped uint32 and returns the new value. -func (i *Uint32) Sub(n uint32) uint32 { - return atomic.AddUint32(&i.v, ^(n - 1)) -} - -// Inc atomically increments the wrapped uint32 and returns the new value. -func (i *Uint32) Inc() uint32 { - return i.Add(1) -} - -// Dec atomically decrements the wrapped uint32 and returns the new value. -func (i *Uint32) Dec() uint32 { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -func (i *Uint32) CAS(old, new uint32) bool { - return atomic.CompareAndSwapUint32(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Uint32) Store(n uint32) { - atomic.StoreUint32(&i.v, n) -} - -// Swap atomically swaps the wrapped uint32 and returns the old value. -func (i *Uint32) Swap(n uint32) uint32 { - return atomic.SwapUint32(&i.v, n) -} - -// MarshalJSON encodes the wrapped uint32 into JSON. -func (i *Uint32) MarshalJSON() ([]byte, error) { - return json.Marshal(i.Load()) -} - -// UnmarshalJSON decodes JSON into the wrapped uint32. -func (i *Uint32) UnmarshalJSON(b []byte) error { - var v uint32 - if err := json.Unmarshal(b, &v); err != nil { - return err - } - i.Store(v) - return nil -} - -// String encodes the wrapped value as a string. -func (i *Uint32) String() string { - v := i.Load() - return strconv.FormatUint(uint64(v), 10) -} diff --git a/tools/vendor/go.uber.org/atomic/uint64.go b/tools/vendor/go.uber.org/atomic/uint64.go deleted file mode 100644 index 3b6c71fd5..000000000 --- a/tools/vendor/go.uber.org/atomic/uint64.go +++ /dev/null @@ -1,102 +0,0 @@ -// @generated Code generated by gen-atomicint. - -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import ( - "encoding/json" - "strconv" - "sync/atomic" -) - -// Uint64 is an atomic wrapper around uint64. -type Uint64 struct { - _ nocmp // disallow non-atomic comparison - - v uint64 -} - -// NewUint64 creates a new Uint64. -func NewUint64(i uint64) *Uint64 { - return &Uint64{v: i} -} - -// Load atomically loads the wrapped value. -func (i *Uint64) Load() uint64 { - return atomic.LoadUint64(&i.v) -} - -// Add atomically adds to the wrapped uint64 and returns the new value. -func (i *Uint64) Add(n uint64) uint64 { - return atomic.AddUint64(&i.v, n) -} - -// Sub atomically subtracts from the wrapped uint64 and returns the new value. -func (i *Uint64) Sub(n uint64) uint64 { - return atomic.AddUint64(&i.v, ^(n - 1)) -} - -// Inc atomically increments the wrapped uint64 and returns the new value. -func (i *Uint64) Inc() uint64 { - return i.Add(1) -} - -// Dec atomically decrements the wrapped uint64 and returns the new value. -func (i *Uint64) Dec() uint64 { - return i.Sub(1) -} - -// CAS is an atomic compare-and-swap. -func (i *Uint64) CAS(old, new uint64) bool { - return atomic.CompareAndSwapUint64(&i.v, old, new) -} - -// Store atomically stores the passed value. -func (i *Uint64) Store(n uint64) { - atomic.StoreUint64(&i.v, n) -} - -// Swap atomically swaps the wrapped uint64 and returns the old value. -func (i *Uint64) Swap(n uint64) uint64 { - return atomic.SwapUint64(&i.v, n) -} - -// MarshalJSON encodes the wrapped uint64 into JSON. -func (i *Uint64) MarshalJSON() ([]byte, error) { - return json.Marshal(i.Load()) -} - -// UnmarshalJSON decodes JSON into the wrapped uint64. -func (i *Uint64) UnmarshalJSON(b []byte) error { - var v uint64 - if err := json.Unmarshal(b, &v); err != nil { - return err - } - i.Store(v) - return nil -} - -// String encodes the wrapped value as a string. -func (i *Uint64) String() string { - v := i.Load() - return strconv.FormatUint(uint64(v), 10) -} diff --git a/tools/vendor/go.uber.org/atomic/value.go b/tools/vendor/go.uber.org/atomic/value.go deleted file mode 100644 index 671f3a382..000000000 --- a/tools/vendor/go.uber.org/atomic/value.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package atomic - -import "sync/atomic" - -// Value shadows the type of the same name from sync/atomic -// https://godoc.org/sync/atomic#Value -type Value struct { - atomic.Value - - _ nocmp // disallow non-atomic comparison -} diff --git a/tools/vendor/go.uber.org/multierr/.travis.yml b/tools/vendor/go.uber.org/multierr/.travis.yml deleted file mode 100644 index 8636ab42a..000000000 --- a/tools/vendor/go.uber.org/multierr/.travis.yml +++ /dev/null @@ -1,23 +0,0 @@ -sudo: false -language: go -go_import_path: go.uber.org/multierr - -env: - global: - - GO111MODULE=on - -go: - - oldstable - - stable - -before_install: -- go version - -script: -- | - set -e - make lint - make cover - -after_success: -- bash <(curl -s https://codecov.io/bash) diff --git a/tools/vendor/go.uber.org/multierr/CHANGELOG.md b/tools/vendor/go.uber.org/multierr/CHANGELOG.md index 6f1db9ef4..f8177b978 100644 --- a/tools/vendor/go.uber.org/multierr/CHANGELOG.md +++ b/tools/vendor/go.uber.org/multierr/CHANGELOG.md @@ -1,6 +1,41 @@ Releases ======== +v1.11.0 (2023-03-28) +==================== +- `Errors` now supports any error that implements multiple-error + interface. +- Add `Every` function to allow checking if all errors in the chain + satisfies `errors.Is` against the target error. + +v1.10.0 (2023-03-08) +==================== + +- Comply with Go 1.20's multiple-error interface. +- Drop Go 1.18 support. + Per the support policy, only Go 1.19 and 1.20 are supported now. +- Drop all non-test external dependencies. + +v1.9.0 (2022-12-12) +=================== + +- Add `AppendFunc` that allow passsing functions to similar to + `AppendInvoke`. + +- Bump up yaml.v3 dependency to 3.0.1. + +v1.8.0 (2022-02-28) +=================== + +- `Combine`: perform zero allocations when there are no errors. + + +v1.7.0 (2021-05-06) +=================== + +- Add `AppendInvoke` to append into errors from `defer` blocks. + + v1.6.0 (2020-09-14) =================== diff --git a/tools/vendor/go.uber.org/multierr/LICENSE.txt b/tools/vendor/go.uber.org/multierr/LICENSE.txt index 858e02475..413e30f7c 100644 --- a/tools/vendor/go.uber.org/multierr/LICENSE.txt +++ b/tools/vendor/go.uber.org/multierr/LICENSE.txt @@ -1,4 +1,4 @@ -Copyright (c) 2017 Uber Technologies, Inc. +Copyright (c) 2017-2021 Uber Technologies, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/tools/vendor/go.uber.org/multierr/Makefile b/tools/vendor/go.uber.org/multierr/Makefile index 316004400..dcb6fe723 100644 --- a/tools/vendor/go.uber.org/multierr/Makefile +++ b/tools/vendor/go.uber.org/multierr/Makefile @@ -34,9 +34,5 @@ lint: gofmt golint staticcheck .PHONY: cover cover: - go test -coverprofile=cover.out -coverpkg=./... -v ./... + go test -race -coverprofile=cover.out -coverpkg=./... -v ./... go tool cover -html=cover.out -o cover.html - -update-license: - @cd tools && go install go.uber.org/tools/update-license - @$(GOBIN)/update-license $(GO_FILES) diff --git a/tools/vendor/go.uber.org/multierr/README.md b/tools/vendor/go.uber.org/multierr/README.md index 751bd65e5..5ab6ac40f 100644 --- a/tools/vendor/go.uber.org/multierr/README.md +++ b/tools/vendor/go.uber.org/multierr/README.md @@ -2,9 +2,29 @@ `multierr` allows combining one or more Go `error`s together. +## Features + +- **Idiomatic**: + multierr follows best practices in Go, and keeps your code idiomatic. + - It keeps the underlying error type hidden, + allowing you to deal in `error` values exclusively. + - It provides APIs to safely append into an error from a `defer` statement. +- **Performant**: + multierr is optimized for performance: + - It avoids allocations where possible. + - It utilizes slice resizing semantics to optimize common cases + like appending into the same error object from a loop. +- **Interoperable**: + multierr interoperates with the Go standard library's error APIs seamlessly: + - The `errors.Is` and `errors.As` functions *just work*. +- **Lightweight**: + multierr comes with virtually no dependencies. + ## Installation - go get -u go.uber.org/multierr +```bash +go get -u go.uber.org/multierr@latest +``` ## Status @@ -15,9 +35,9 @@ Stable: No breaking changes will be made before 2.0. Released under the [MIT License]. [MIT License]: LICENSE.txt -[doc-img]: https://godoc.org/go.uber.org/multierr?status.svg -[doc]: https://godoc.org/go.uber.org/multierr -[ci-img]: https://travis-ci.com/uber-go/multierr.svg?branch=master +[doc-img]: https://pkg.go.dev/badge/go.uber.org/multierr +[doc]: https://pkg.go.dev/go.uber.org/multierr +[ci-img]: https://github.com/uber-go/multierr/actions/workflows/go.yml/badge.svg [cov-img]: https://codecov.io/gh/uber-go/multierr/branch/master/graph/badge.svg -[ci]: https://travis-ci.com/uber-go/multierr +[ci]: https://github.com/uber-go/multierr/actions/workflows/go.yml [cov]: https://codecov.io/gh/uber-go/multierr diff --git a/tools/vendor/go.uber.org/multierr/error.go b/tools/vendor/go.uber.org/multierr/error.go index 5c9b67d53..3a828b2df 100644 --- a/tools/vendor/go.uber.org/multierr/error.go +++ b/tools/vendor/go.uber.org/multierr/error.go @@ -1,4 +1,4 @@ -// Copyright (c) 2019 Uber Technologies, Inc. +// Copyright (c) 2017-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -20,54 +20,109 @@ // Package multierr allows combining one or more errors together. // -// Overview +// # Overview // // Errors can be combined with the use of the Combine function. // -// multierr.Combine( -// reader.Close(), -// writer.Close(), -// conn.Close(), -// ) +// multierr.Combine( +// reader.Close(), +// writer.Close(), +// conn.Close(), +// ) // // If only two errors are being combined, the Append function may be used // instead. // -// err = multierr.Append(reader.Close(), writer.Close()) -// -// This makes it possible to record resource cleanup failures from deferred -// blocks with the help of named return values. -// -// func sendRequest(req Request) (err error) { -// conn, err := openConnection() -// if err != nil { -// return err -// } -// defer func() { -// err = multierr.Append(err, conn.Close()) -// }() -// // ... -// } +// err = multierr.Append(reader.Close(), writer.Close()) // // The underlying list of errors for a returned error object may be retrieved // with the Errors function. // -// errors := multierr.Errors(err) -// if len(errors) > 0 { -// fmt.Println("The following errors occurred:", errors) -// } +// errors := multierr.Errors(err) +// if len(errors) > 0 { +// fmt.Println("The following errors occurred:", errors) +// } +// +// # Appending from a loop +// +// You sometimes need to append into an error from a loop. +// +// var err error +// for _, item := range items { +// err = multierr.Append(err, process(item)) +// } +// +// Cases like this may require knowledge of whether an individual instance +// failed. This usually requires introduction of a new variable. +// +// var err error +// for _, item := range items { +// if perr := process(item); perr != nil { +// log.Warn("skipping item", item) +// err = multierr.Append(err, perr) +// } +// } +// +// multierr includes AppendInto to simplify cases like this. +// +// var err error +// for _, item := range items { +// if multierr.AppendInto(&err, process(item)) { +// log.Warn("skipping item", item) +// } +// } +// +// This will append the error into the err variable, and return true if that +// individual error was non-nil. // -// Advanced Usage +// See [AppendInto] for more information. +// +// # Deferred Functions +// +// Go makes it possible to modify the return value of a function in a defer +// block if the function was using named returns. This makes it possible to +// record resource cleanup failures from deferred blocks. +// +// func sendRequest(req Request) (err error) { +// conn, err := openConnection() +// if err != nil { +// return err +// } +// defer func() { +// err = multierr.Append(err, conn.Close()) +// }() +// // ... +// } +// +// multierr provides the Invoker type and AppendInvoke function to make cases +// like the above simpler and obviate the need for a closure. The following is +// roughly equivalent to the example above. +// +// func sendRequest(req Request) (err error) { +// conn, err := openConnection() +// if err != nil { +// return err +// } +// defer multierr.AppendInvoke(&err, multierr.Close(conn)) +// // ... +// } +// +// See [AppendInvoke] and [Invoker] for more information. +// +// NOTE: If you're modifying an error from inside a defer, you MUST use a named +// return value for that function. +// +// # Advanced Usage // // Errors returned by Combine and Append MAY implement the following // interface. // -// type errorGroup interface { -// // Returns a slice containing the underlying list of errors. -// // -// // This slice MUST NOT be modified by the caller. -// Errors() []error -// } +// type errorGroup interface { +// // Returns a slice containing the underlying list of errors. +// // +// // This slice MUST NOT be modified by the caller. +// Errors() []error +// } // // Note that if you need access to list of errors behind a multierr error, you // should prefer using the Errors function. That said, if you need cheap @@ -76,23 +131,23 @@ // because errors returned by Combine and Append are not guaranteed to // implement this interface. // -// var errors []error -// group, ok := err.(errorGroup) -// if ok { -// errors = group.Errors() -// } else { -// errors = []error{err} -// } +// var errors []error +// group, ok := err.(errorGroup) +// if ok { +// errors = group.Errors() +// } else { +// errors = []error{err} +// } package multierr // import "go.uber.org/multierr" import ( "bytes" + "errors" "fmt" "io" "strings" "sync" - - "go.uber.org/atomic" + "sync/atomic" ) var ( @@ -132,34 +187,15 @@ type errorGroup interface { // Errors returns a slice containing zero or more errors that the supplied // error is composed of. If the error is nil, a nil slice is returned. // -// err := multierr.Append(r.Close(), w.Close()) -// errors := multierr.Errors(err) +// err := multierr.Append(r.Close(), w.Close()) +// errors := multierr.Errors(err) // // If the error is not composed of other errors, the returned slice contains // just the error that was passed in. // // Callers of this function are free to modify the returned slice. func Errors(err error) []error { - if err == nil { - return nil - } - - // Note that we're casting to multiError, not errorGroup. Our contract is - // that returned errors MAY implement errorGroup. Errors, however, only - // has special behavior for multierr-specific error objects. - // - // This behavior can be expanded in the future but I think it's prudent to - // start with as little as possible in terms of contract and possibility - // of misuse. - eg, ok := err.(*multiError) - if !ok { - return []error{err} - } - - errors := eg.Errors() - result := make([]error, len(errors)) - copy(result, errors) - return result + return extractErrors(err) } // multiError is an error that holds one or more errors. @@ -174,8 +210,6 @@ type multiError struct { errors []error } -var _ errorGroup = (*multiError)(nil) - // Errors returns the list of underlying errors. // // This slice MUST NOT be modified. @@ -201,6 +235,17 @@ func (merr *multiError) Error() string { return result } +// Every compares every error in the given err against the given target error +// using [errors.Is], and returns true only if every comparison returned true. +func Every(err error, target error) bool { + for _, e := range extractErrors(err) { + if !errors.Is(e, target) { + return false + } + } + return true +} + func (merr *multiError) Format(f fmt.State, c rune) { if c == 'v' && f.Flag('+') { merr.writeMultiline(f) @@ -292,6 +337,14 @@ func inspect(errors []error) (res inspectResult) { // fromSlice converts the given list of errors into a single error. func fromSlice(errors []error) error { + // Don't pay to inspect small slices. + switch len(errors) { + case 0: + return nil + case 1: + return errors[0] + } + res := inspect(errors) switch res.Count { case 0: @@ -301,8 +354,12 @@ func fromSlice(errors []error) error { return errors[res.FirstErrorIdx] case len(errors): if !res.ContainsMultiError { - // already flat - return &multiError{errors: errors} + // Error list is flat. Make a copy of it + // Otherwise "errors" escapes to the heap + // unconditionally for all other cases. + // This lets us optimize for the "no errors" case. + out := append(([]error)(nil), errors...) + return &multiError{errors: out} } } @@ -327,32 +384,32 @@ func fromSlice(errors []error) error { // If zero arguments were passed or if all items are nil, a nil error is // returned. // -// Combine(nil, nil) // == nil +// Combine(nil, nil) // == nil // // If only a single error was passed, it is returned as-is. // -// Combine(err) // == err +// Combine(err) // == err // // Combine skips over nil arguments so this function may be used to combine // together errors from operations that fail independently of each other. // -// multierr.Combine( -// reader.Close(), -// writer.Close(), -// pipe.Close(), -// ) +// multierr.Combine( +// reader.Close(), +// writer.Close(), +// pipe.Close(), +// ) // // If any of the passed errors is a multierr error, it will be flattened along // with the other errors. // -// multierr.Combine(multierr.Combine(err1, err2), err3) -// // is the same as -// multierr.Combine(err1, err2, err3) +// multierr.Combine(multierr.Combine(err1, err2), err3) +// // is the same as +// multierr.Combine(err1, err2, err3) // // The returned error formats into a readable multi-line error message if // formatted with %+v. // -// fmt.Sprintf("%+v", multierr.Combine(err1, err2)) +// fmt.Sprintf("%+v", multierr.Combine(err1, err2)) func Combine(errors ...error) error { return fromSlice(errors) } @@ -362,16 +419,19 @@ func Combine(errors ...error) error { // This function is a specialization of Combine for the common case where // there are only two errors. // -// err = multierr.Append(reader.Close(), writer.Close()) +// err = multierr.Append(reader.Close(), writer.Close()) // // The following pattern may also be used to record failure of deferred // operations without losing information about the original error. // -// func doSomething(..) (err error) { -// f := acquireResource() -// defer func() { -// err = multierr.Append(err, f.Close()) -// }() +// func doSomething(..) (err error) { +// f := acquireResource() +// defer func() { +// err = multierr.Append(err, f.Close()) +// }() +// +// Note that the variable MUST be a named return to append an error to it from +// the defer statement. See also [AppendInvoke]. func Append(left error, right error) error { switch { case left == nil: @@ -401,37 +461,37 @@ func Append(left error, right error) error { // AppendInto appends an error into the destination of an error pointer and // returns whether the error being appended was non-nil. // -// var err error -// multierr.AppendInto(&err, r.Close()) -// multierr.AppendInto(&err, w.Close()) +// var err error +// multierr.AppendInto(&err, r.Close()) +// multierr.AppendInto(&err, w.Close()) // // The above is equivalent to, // -// err := multierr.Append(r.Close(), w.Close()) +// err := multierr.Append(r.Close(), w.Close()) // // As AppendInto reports whether the provided error was non-nil, it may be // used to build a multierr error in a loop more ergonomically. For example: // -// var err error -// for line := range lines { -// var item Item -// if multierr.AppendInto(&err, parse(line, &item)) { -// continue -// } -// items = append(items, item) -// } -// -// Compare this with a verison that relies solely on Append: -// -// var err error -// for line := range lines { -// var item Item -// if parseErr := parse(line, &item); parseErr != nil { -// err = multierr.Append(err, parseErr) -// continue -// } -// items = append(items, item) -// } +// var err error +// for line := range lines { +// var item Item +// if multierr.AppendInto(&err, parse(line, &item)) { +// continue +// } +// items = append(items, item) +// } +// +// Compare this with a version that relies solely on Append: +// +// var err error +// for line := range lines { +// var item Item +// if parseErr := parse(line, &item); parseErr != nil { +// err = multierr.Append(err, parseErr) +// continue +// } +// items = append(items, item) +// } func AppendInto(into *error, err error) (errored bool) { if into == nil { // We panic if 'into' is nil. This is not documented above @@ -447,3 +507,140 @@ func AppendInto(into *error, err error) (errored bool) { *into = Append(*into, err) return true } + +// Invoker is an operation that may fail with an error. Use it with +// AppendInvoke to append the result of calling the function into an error. +// This allows you to conveniently defer capture of failing operations. +// +// See also, [Close] and [Invoke]. +type Invoker interface { + Invoke() error +} + +// Invoke wraps a function which may fail with an error to match the Invoker +// interface. Use it to supply functions matching this signature to +// AppendInvoke. +// +// For example, +// +// func processReader(r io.Reader) (err error) { +// scanner := bufio.NewScanner(r) +// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err)) +// for scanner.Scan() { +// // ... +// } +// // ... +// } +// +// In this example, the following line will construct the Invoker right away, +// but defer the invocation of scanner.Err() until the function returns. +// +// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err)) +// +// Note that the error you're appending to from the defer statement MUST be a +// named return. +type Invoke func() error + +// Invoke calls the supplied function and returns its result. +func (i Invoke) Invoke() error { return i() } + +// Close builds an Invoker that closes the provided io.Closer. Use it with +// AppendInvoke to close io.Closers and append their results into an error. +// +// For example, +// +// func processFile(path string) (err error) { +// f, err := os.Open(path) +// if err != nil { +// return err +// } +// defer multierr.AppendInvoke(&err, multierr.Close(f)) +// return processReader(f) +// } +// +// In this example, multierr.Close will construct the Invoker right away, but +// defer the invocation of f.Close until the function returns. +// +// defer multierr.AppendInvoke(&err, multierr.Close(f)) +// +// Note that the error you're appending to from the defer statement MUST be a +// named return. +func Close(closer io.Closer) Invoker { + return Invoke(closer.Close) +} + +// AppendInvoke appends the result of calling the given Invoker into the +// provided error pointer. Use it with named returns to safely defer +// invocation of fallible operations until a function returns, and capture the +// resulting errors. +// +// func doSomething(...) (err error) { +// // ... +// f, err := openFile(..) +// if err != nil { +// return err +// } +// +// // multierr will call f.Close() when this function returns and +// // if the operation fails, its append its error into the +// // returned error. +// defer multierr.AppendInvoke(&err, multierr.Close(f)) +// +// scanner := bufio.NewScanner(f) +// // Similarly, this scheduled scanner.Err to be called and +// // inspected when the function returns and append its error +// // into the returned error. +// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err)) +// +// // ... +// } +// +// NOTE: If used with a defer, the error variable MUST be a named return. +// +// Without defer, AppendInvoke behaves exactly like AppendInto. +// +// err := // ... +// multierr.AppendInvoke(&err, mutltierr.Invoke(foo)) +// +// // ...is roughly equivalent to... +// +// err := // ... +// multierr.AppendInto(&err, foo()) +// +// The advantage of the indirection introduced by Invoker is to make it easy +// to defer the invocation of a function. Without this indirection, the +// invoked function will be evaluated at the time of the defer block rather +// than when the function returns. +// +// // BAD: This is likely not what the caller intended. This will evaluate +// // foo() right away and append its result into the error when the +// // function returns. +// defer multierr.AppendInto(&err, foo()) +// +// // GOOD: This will defer invocation of foo unutil the function returns. +// defer multierr.AppendInvoke(&err, multierr.Invoke(foo)) +// +// multierr provides a few Invoker implementations out of the box for +// convenience. See [Invoker] for more information. +func AppendInvoke(into *error, invoker Invoker) { + AppendInto(into, invoker.Invoke()) +} + +// AppendFunc is a shorthand for [AppendInvoke]. +// It allows using function or method value directly +// without having to wrap it into an [Invoker] interface. +// +// func doSomething(...) (err error) { +// w, err := startWorker(...) +// if err != nil { +// return err +// } +// +// // multierr will call w.Stop() when this function returns and +// // if the operation fails, it appends its error into the +// // returned error. +// defer multierr.AppendFunc(&err, w.Stop) +// } +func AppendFunc(into *error, fn func() error) { + AppendInvoke(into, Invoke(fn)) +} diff --git a/tools/vendor/go.uber.org/atomic/error_ext.go b/tools/vendor/go.uber.org/multierr/error_post_go120.go similarity index 65% rename from tools/vendor/go.uber.org/atomic/error_ext.go rename to tools/vendor/go.uber.org/multierr/error_post_go120.go index ffe0be21c..a173f9c25 100644 --- a/tools/vendor/go.uber.org/atomic/error_ext.go +++ b/tools/vendor/go.uber.org/multierr/error_post_go120.go @@ -1,4 +1,4 @@ -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2017-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -18,22 +18,31 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -package atomic +//go:build go1.20 +// +build go1.20 -// atomic.Value panics on nil inputs, or if the underlying type changes. -// Stabilize by always storing a custom struct that we control. +package multierr -//go:generate bin/gen-atomicwrapper -name=Error -type=error -wrapped=Value -pack=packError -unpack=unpackError -file=error.go - -type packedError struct{ Value error } +// Unwrap returns a list of errors wrapped by this multierr. +func (merr *multiError) Unwrap() []error { + return merr.Errors() +} -func packError(v error) interface{} { - return packedError{v} +type multipleErrors interface { + Unwrap() []error } -func unpackError(v interface{}) error { - if err, ok := v.(packedError); ok { - return err.Value +func extractErrors(err error) []error { + if err == nil { + return nil } - return nil + + // check if the given err is an Unwrapable error that + // implements multipleErrors interface. + eg, ok := err.(multipleErrors) + if !ok { + return []error{err} + } + + return append(([]error)(nil), eg.Unwrap()...) } diff --git a/tools/vendor/go.uber.org/multierr/go113.go b/tools/vendor/go.uber.org/multierr/error_pre_go120.go similarity index 66% rename from tools/vendor/go.uber.org/multierr/go113.go rename to tools/vendor/go.uber.org/multierr/error_pre_go120.go index 264b0eac0..93872a3fc 100644 --- a/tools/vendor/go.uber.org/multierr/go113.go +++ b/tools/vendor/go.uber.org/multierr/error_pre_go120.go @@ -1,4 +1,4 @@ -// Copyright (c) 2019 Uber Technologies, Inc. +// Copyright (c) 2017-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -18,12 +18,19 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -// +build go1.13 +//go:build !go1.20 +// +build !go1.20 package multierr import "errors" +// Versions of Go before 1.20 did not support the Unwrap() []error method. +// This provides a similar behavior by implementing the Is(..) and As(..) +// methods. +// See the errors.Join proposal for details: +// https://github.com/golang/go/issues/53435 + // As attempts to find the first error in the error list that matches the type // of the value that target points to. // @@ -50,3 +57,23 @@ func (merr *multiError) Is(target error) bool { } return false } + +func extractErrors(err error) []error { + if err == nil { + return nil + } + + // Note that we're casting to multiError, not errorGroup. Our contract is + // that returned errors MAY implement errorGroup. Errors, however, only + // has special behavior for multierr-specific error objects. + // + // This behavior can be expanded in the future but I think it's prudent to + // start with as little as possible in terms of contract and possibility + // of misuse. + eg, ok := err.(*multiError) + if !ok { + return []error{err} + } + + return append(([]error)(nil), eg.Errors()...) +} diff --git a/tools/vendor/go.uber.org/multierr/glide.yaml b/tools/vendor/go.uber.org/multierr/glide.yaml deleted file mode 100644 index 6ef084ec2..000000000 --- a/tools/vendor/go.uber.org/multierr/glide.yaml +++ /dev/null @@ -1,8 +0,0 @@ -package: go.uber.org/multierr -import: -- package: go.uber.org/atomic - version: ^1 -testImport: -- package: github.com/stretchr/testify - subpackages: - - assert diff --git a/tools/vendor/go.uber.org/zap/.golangci.yml b/tools/vendor/go.uber.org/zap/.golangci.yml new file mode 100644 index 000000000..2346df135 --- /dev/null +++ b/tools/vendor/go.uber.org/zap/.golangci.yml @@ -0,0 +1,77 @@ +output: + # Make output more digestible with quickfix in vim/emacs/etc. + sort-results: true + print-issued-lines: false + +linters: + # We'll track the golangci-lint default linters manually + # instead of letting them change without our control. + disable-all: true + enable: + # golangci-lint defaults: + - errcheck + - gosimple + - govet + - ineffassign + - staticcheck + - unused + + # Our own extras: + - gofumpt + - nolintlint # lints nolint directives + - revive + +linters-settings: + govet: + # These govet checks are disabled by default, but they're useful. + enable: + - niliness + - reflectvaluecompare + - sortslice + - unusedwrite + + errcheck: + exclude-functions: + # These methods can not fail. + # They operate on an in-memory buffer. + - (*go.uber.org/zap/buffer.Buffer).Write + - (*go.uber.org/zap/buffer.Buffer).WriteByte + - (*go.uber.org/zap/buffer.Buffer).WriteString + + - (*go.uber.org/zap/zapio.Writer).Close + - (*go.uber.org/zap/zapio.Writer).Sync + - (*go.uber.org/zap/zapio.Writer).Write + # Write to zapio.Writer cannot fail, + # so io.WriteString on it cannot fail. + - io.WriteString(*go.uber.org/zap/zapio.Writer) + + # Writing a plain string to a fmt.State cannot fail. + - io.WriteString(fmt.State) + +issues: + # Print all issues reported by all linters. + max-issues-per-linter: 0 + max-same-issues: 0 + + # Don't ignore some of the issues that golangci-lint considers okay. + # This includes documenting all exported entities. + exclude-use-default: false + + exclude-rules: + # Don't warn on unused parameters. + # Parameter names are useful; replacing them with '_' is undesirable. + - linters: [revive] + text: 'unused-parameter: parameter \S+ seems to be unused, consider removing or renaming it as _' + + # staticcheck already has smarter checks for empty blocks. + # revive's empty-block linter has false positives. + # For example, as of writing this, the following is not allowed. + # for foo() { } + - linters: [revive] + text: 'empty-block: this block is empty, you can remove it' + + # Ignore logger.Sync() errcheck failures in example_test.go + # since those are intended to be uncomplicated examples. + - linters: [errcheck] + path: example_test.go + text: 'Error return value of `logger.Sync` is not checked' diff --git a/tools/vendor/go.uber.org/zap/.readme.tmpl b/tools/vendor/go.uber.org/zap/.readme.tmpl index 92aa65d66..4fea3027a 100644 --- a/tools/vendor/go.uber.org/zap/.readme.tmpl +++ b/tools/vendor/go.uber.org/zap/.readme.tmpl @@ -1,7 +1,15 @@ # :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] +

+ Blazing fast, structured, leveled logging in Go. +![Zap logo](assets/logo.png) + +[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +
+ ## Installation `go get -u go.uber.org/zap` @@ -92,7 +100,7 @@ standard.
-Released under the [MIT License](LICENSE.txt). +Released under the [MIT License](LICENSE). 1 In particular, keep in mind that we may be benchmarking against slightly older versions of other packages. Versions are diff --git a/tools/vendor/go.uber.org/zap/CHANGELOG.md b/tools/vendor/go.uber.org/zap/CHANGELOG.md index 0db1f9f15..6d6cd5f4d 100644 --- a/tools/vendor/go.uber.org/zap/CHANGELOG.md +++ b/tools/vendor/go.uber.org/zap/CHANGELOG.md @@ -1,7 +1,55 @@ # Changelog All notable changes to this project will be documented in this file. -This project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). +This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## 1.27.0 (20 Feb 2024) +Enhancements: +* [#1378][]: Add `WithLazy` method for `SugaredLogger`. +* [#1399][]: zaptest: Add `NewTestingWriter` for customizing TestingWriter with more flexibility than `NewLogger`. +* [#1406][]: Add `Log`, `Logw`, `Logln` methods for `SugaredLogger`. +* [#1416][]: Add `WithPanicHook` option for testing panic logs. + +Thanks to @defval, @dimmo, @arxeiss, and @MKrupauskas for their contributions to this release. + +[#1378]: https://github.com/uber-go/zap/pull/1378 +[#1399]: https://github.com/uber-go/zap/pull/1399 +[#1406]: https://github.com/uber-go/zap/pull/1406 +[#1416]: https://github.com/uber-go/zap/pull/1416 + +## 1.26.0 (14 Sep 2023) +Enhancements: +* [#1297][]: Add Dict as a Field. +* [#1319][]: Add `WithLazy` method to `Logger` which lazily evaluates the structured +context. +* [#1350][]: String encoding is much (~50%) faster now. + +Thanks to @hhk7734, @jquirke, and @cdvr1993 for their contributions to this release. + +[#1297]: https://github.com/uber-go/zap/pull/1297 +[#1319]: https://github.com/uber-go/zap/pull/1319 +[#1350]: https://github.com/uber-go/zap/pull/1350 + +## 1.25.0 (1 Aug 2023) + +This release contains several improvements including performance, API additions, +and two new experimental packages whose APIs are unstable and may change in the +future. + +Enhancements: +* [#1246][]: Add `zap/exp/zapslog` package for integration with slog. +* [#1273][]: Add `Name` to `Logger` which returns the Logger's name if one is set. +* [#1281][]: Add `zap/exp/expfield` package which contains helper methods +`Str` and `Strs` for constructing String-like zap.Fields. +* [#1310][]: Reduce stack size on `Any`. + +Thanks to @knight42, @dzakaammar, @bcspragu, and @rexywork for their contributions +to this release. + +[#1246]: https://github.com/uber-go/zap/pull/1246 +[#1273]: https://github.com/uber-go/zap/pull/1273 +[#1281]: https://github.com/uber-go/zap/pull/1281 +[#1310]: https://github.com/uber-go/zap/pull/1310 ## 1.24.0 (30 Nov 2022) @@ -27,7 +75,6 @@ Enhancements: [#1147]: https://github.com/uber-go/zap/pull/1147 [#1155]: https://github.com/uber-go/zap/pull/1155 - ## 1.22.0 (8 Aug 2022) Enhancements: @@ -176,6 +223,16 @@ Enhancements: Thanks to @ash2k, @FMLS, @jimmystewpot, @Oncilla, @tsoslow, @tylitianrui, @withshubh, and @wziww for their contributions to this release. +[#865]: https://github.com/uber-go/zap/pull/865 +[#867]: https://github.com/uber-go/zap/pull/867 +[#881]: https://github.com/uber-go/zap/pull/881 +[#903]: https://github.com/uber-go/zap/pull/903 +[#912]: https://github.com/uber-go/zap/pull/912 +[#913]: https://github.com/uber-go/zap/pull/913 +[#928]: https://github.com/uber-go/zap/pull/928 +[#931]: https://github.com/uber-go/zap/pull/931 +[#936]: https://github.com/uber-go/zap/pull/936 + ## 1.16.0 (1 Sep 2020) Bugfixes: @@ -197,6 +254,17 @@ Enhancements: Thanks to @SteelPhase, @tmshn, @lixingwang, @wyxloading, @moul, @segevfiner, @andy-retailnext and @jcorbin for their contributions to this release. +[#629]: https://github.com/uber-go/zap/pull/629 +[#697]: https://github.com/uber-go/zap/pull/697 +[#828]: https://github.com/uber-go/zap/pull/828 +[#835]: https://github.com/uber-go/zap/pull/835 +[#843]: https://github.com/uber-go/zap/pull/843 +[#844]: https://github.com/uber-go/zap/pull/844 +[#852]: https://github.com/uber-go/zap/pull/852 +[#854]: https://github.com/uber-go/zap/pull/854 +[#861]: https://github.com/uber-go/zap/pull/861 +[#862]: https://github.com/uber-go/zap/pull/862 + ## 1.15.0 (23 Apr 2020) Bugfixes: @@ -213,6 +281,11 @@ Enhancements: Thanks to @danielbprice for their contributions to this release. +[#804]: https://github.com/uber-go/zap/pull/804 +[#812]: https://github.com/uber-go/zap/pull/812 +[#806]: https://github.com/uber-go/zap/pull/806 +[#813]: https://github.com/uber-go/zap/pull/813 + ## 1.14.1 (14 Mar 2020) Bugfixes: @@ -225,6 +298,10 @@ Bugfixes: Thanks to @YashishDua for their contributions to this release. +[#791]: https://github.com/uber-go/zap/pull/791 +[#795]: https://github.com/uber-go/zap/pull/795 +[#799]: https://github.com/uber-go/zap/pull/799 + ## 1.14.0 (20 Feb 2020) Enhancements: @@ -235,6 +312,11 @@ Enhancements: Thanks to @caibirdme for their contributions to this release. +[#771]: https://github.com/uber-go/zap/pull/771 +[#773]: https://github.com/uber-go/zap/pull/773 +[#775]: https://github.com/uber-go/zap/pull/775 +[#786]: https://github.com/uber-go/zap/pull/786 + ## 1.13.0 (13 Nov 2019) Enhancements: @@ -243,11 +325,15 @@ Enhancements: Thanks to @jbizzle for their contributions to this release. +[#758]: https://github.com/uber-go/zap/pull/758 + ## 1.12.0 (29 Oct 2019) Enhancements: * [#751][]: Migrate to Go modules. +[#751]: https://github.com/uber-go/zap/pull/751 + ## 1.11.0 (21 Oct 2019) Enhancements: @@ -256,6 +342,9 @@ Enhancements: Thanks to @juicemia, @uhthomas for their contributions to this release. +[#725]: https://github.com/uber-go/zap/pull/725 +[#736]: https://github.com/uber-go/zap/pull/736 + ## 1.10.0 (29 Apr 2019) Bugfixes: @@ -273,13 +362,21 @@ Enhancements: Thanks to @iaroslav-ciupin, @lelenanam, @joa, @NWilson for their contributions to this release. -## v1.9.1 (06 Aug 2018) +[#657]: https://github.com/uber-go/zap/pull/657 +[#706]: https://github.com/uber-go/zap/pull/706 +[#610]: https://github.com/uber-go/zap/pull/610 +[#675]: https://github.com/uber-go/zap/pull/675 +[#704]: https://github.com/uber-go/zap/pull/704 + +## 1.9.1 (06 Aug 2018) Bugfixes: * [#614][]: MapObjectEncoder should not ignore empty slices. -## v1.9.0 (19 Jul 2018) +[#614]: https://github.com/uber-go/zap/pull/614 + +## 1.9.0 (19 Jul 2018) Enhancements: * [#602][]: Reduce number of allocations when logging with reflection. @@ -288,7 +385,11 @@ Enhancements: Thanks to @nfarah86, @AlekSi, @JeanMertz, @philippgille, @etsangsplk, and @dimroc for their contributions to this release. -## v1.8.0 (13 Apr 2018) +[#602]: https://github.com/uber-go/zap/pull/602 +[#572]: https://github.com/uber-go/zap/pull/572 +[#606]: https://github.com/uber-go/zap/pull/606 + +## 1.8.0 (13 Apr 2018) Enhancements: * [#508][]: Make log level configurable when redirecting the standard @@ -301,19 +402,28 @@ Bugfixes: Thanks to @DiSiqueira and @djui for their contributions to this release. -## v1.7.1 (25 Sep 2017) +[#508]: https://github.com/uber-go/zap/pull/508 +[#518]: https://github.com/uber-go/zap/pull/518 +[#577]: https://github.com/uber-go/zap/pull/577 +[#574]: https://github.com/uber-go/zap/pull/574 + +## 1.7.1 (25 Sep 2017) Bugfixes: * [#504][]: Store strings when using AddByteString with the map encoder. -## v1.7.0 (21 Sep 2017) +[#504]: https://github.com/uber-go/zap/pull/504 + +## 1.7.0 (21 Sep 2017) Enhancements: * [#487][]: Add `NewStdLogAt`, which extends `NewStdLog` by allowing the user to specify the level of the logged messages. -## v1.6.0 (30 Aug 2017) +[#487]: https://github.com/uber-go/zap/pull/487 + +## 1.6.0 (30 Aug 2017) Enhancements: @@ -321,7 +431,10 @@ Enhancements: * [#490][]: Add a `ContextMap` method to observer logs for simpler field validation in tests. -## v1.5.0 (22 Jul 2017) +[#490]: https://github.com/uber-go/zap/pull/490 +[#491]: https://github.com/uber-go/zap/pull/491 + +## 1.5.0 (22 Jul 2017) Enhancements: @@ -334,7 +447,12 @@ Bugfixes: Thanks to @richard-tunein and @pavius for their contributions to this release. -## v1.4.1 (08 Jun 2017) +[#477]: https://github.com/uber-go/zap/pull/477 +[#465]: https://github.com/uber-go/zap/pull/465 +[#460]: https://github.com/uber-go/zap/pull/460 +[#470]: https://github.com/uber-go/zap/pull/470 + +## 1.4.1 (08 Jun 2017) This release fixes two bugs. @@ -343,7 +461,10 @@ Bugfixes: * [#435][]: Support a variety of case conventions when unmarshaling levels. * [#444][]: Fix a panic in the observer. -## v1.4.0 (12 May 2017) +[#435]: https://github.com/uber-go/zap/pull/435 +[#444]: https://github.com/uber-go/zap/pull/444 + +## 1.4.0 (12 May 2017) This release adds a few small features and is fully backward-compatible. @@ -355,7 +476,11 @@ Enhancements: * [#431][]: Make `zap.AtomicLevel` implement `fmt.Stringer`, which makes a variety of operations a bit simpler. -## v1.3.0 (25 Apr 2017) +[#424]: https://github.com/uber-go/zap/pull/424 +[#425]: https://github.com/uber-go/zap/pull/425 +[#431]: https://github.com/uber-go/zap/pull/431 + +## 1.3.0 (25 Apr 2017) This release adds an enhancement to zap's testing helpers as well as the ability to marshal an AtomicLevel. It is fully backward-compatible. @@ -366,7 +491,10 @@ Enhancements: particularly useful when testing the `SugaredLogger`. * [#416][]: Make `AtomicLevel` implement `encoding.TextMarshaler`. -## v1.2.0 (13 Apr 2017) +[#415]: https://github.com/uber-go/zap/pull/415 +[#416]: https://github.com/uber-go/zap/pull/416 + +## 1.2.0 (13 Apr 2017) This release adds a gRPC compatibility wrapper. It is fully backward-compatible. @@ -375,7 +503,9 @@ Enhancements: * [#402][]: Add a `zapgrpc` package that wraps zap's Logger and implements `grpclog.Logger`. -## v1.1.0 (31 Mar 2017) +[#402]: https://github.com/uber-go/zap/pull/402 + +## 1.1.0 (31 Mar 2017) This release fixes two bugs and adds some enhancements to zap's testing helpers. It is fully backward-compatible. @@ -392,7 +522,11 @@ Enhancements: Thanks to @moitias for contributing to this release. -## v1.0.0 (14 Mar 2017) +[#385]: https://github.com/uber-go/zap/pull/385 +[#396]: https://github.com/uber-go/zap/pull/396 +[#386]: https://github.com/uber-go/zap/pull/386 + +## 1.0.0 (14 Mar 2017) This is zap's first stable release. All exported APIs are now final, and no further breaking changes will be made in the 1.x release series. Anyone using a @@ -437,7 +571,21 @@ Enhancements: Thanks to @suyash, @htrendev, @flisky, @Ulexus, and @skipor for their contributions to this release. -## v1.0.0-rc.3 (7 Mar 2017) +[#366]: https://github.com/uber-go/zap/pull/366 +[#364]: https://github.com/uber-go/zap/pull/364 +[#371]: https://github.com/uber-go/zap/pull/371 +[#362]: https://github.com/uber-go/zap/pull/362 +[#369]: https://github.com/uber-go/zap/pull/369 +[#347]: https://github.com/uber-go/zap/pull/347 +[#373]: https://github.com/uber-go/zap/pull/373 +[#348]: https://github.com/uber-go/zap/pull/348 +[#327]: https://github.com/uber-go/zap/pull/327 +[#376]: https://github.com/uber-go/zap/pull/376 +[#346]: https://github.com/uber-go/zap/pull/346 +[#365]: https://github.com/uber-go/zap/pull/365 +[#372]: https://github.com/uber-go/zap/pull/372 + +## 1.0.0-rc.3 (7 Mar 2017) This is the third release candidate for zap's stable release. There are no breaking changes. @@ -458,7 +606,12 @@ Enhancements: Thanks to @ansel1 and @suyash for their contributions to this release. -## v1.0.0-rc.2 (21 Feb 2017) +[#339]: https://github.com/uber-go/zap/pull/339 +[#307]: https://github.com/uber-go/zap/pull/307 +[#353]: https://github.com/uber-go/zap/pull/353 +[#311]: https://github.com/uber-go/zap/pull/311 + +## 1.0.0-rc.2 (21 Feb 2017) This is the second release candidate for zap's stable release. It includes two breaking changes. @@ -495,7 +648,16 @@ Enhancements: Thanks to @skipor and @chapsuk for their contributions to this release. -## v1.0.0-rc.1 (14 Feb 2017) +[#316]: https://github.com/uber-go/zap/pull/316 +[#309]: https://github.com/uber-go/zap/pull/309 +[#317]: https://github.com/uber-go/zap/pull/317 +[#321]: https://github.com/uber-go/zap/pull/321 +[#325]: https://github.com/uber-go/zap/pull/325 +[#333]: https://github.com/uber-go/zap/pull/333 +[#326]: https://github.com/uber-go/zap/pull/326 +[#300]: https://github.com/uber-go/zap/pull/300 + +## 1.0.0-rc.1 (14 Feb 2017) This is the first release candidate for zap's stable release. There are multiple breaking changes and improvements from the pre-release version. Most notably: @@ -515,7 +677,7 @@ breaking changes and improvements from the pre-release version. Most notably: * Sampling is more accurate, and doesn't depend on the standard library's shared timer heap. -## v0.1.0-beta.1 (6 Feb 2017) +## 0.1.0-beta.1 (6 Feb 2017) This is a minor version, tagged to allow users to pin to the pre-1.0 APIs and upgrade at their leisure. Since this is the first tagged release, there are no @@ -523,95 +685,3 @@ backward compatibility concerns and all functionality is new. Early zap adopters should pin to the 0.1.x minor version until they're ready to upgrade to the upcoming stable release. - -[#316]: https://github.com/uber-go/zap/pull/316 -[#309]: https://github.com/uber-go/zap/pull/309 -[#317]: https://github.com/uber-go/zap/pull/317 -[#321]: https://github.com/uber-go/zap/pull/321 -[#325]: https://github.com/uber-go/zap/pull/325 -[#333]: https://github.com/uber-go/zap/pull/333 -[#326]: https://github.com/uber-go/zap/pull/326 -[#300]: https://github.com/uber-go/zap/pull/300 -[#339]: https://github.com/uber-go/zap/pull/339 -[#307]: https://github.com/uber-go/zap/pull/307 -[#353]: https://github.com/uber-go/zap/pull/353 -[#311]: https://github.com/uber-go/zap/pull/311 -[#366]: https://github.com/uber-go/zap/pull/366 -[#364]: https://github.com/uber-go/zap/pull/364 -[#371]: https://github.com/uber-go/zap/pull/371 -[#362]: https://github.com/uber-go/zap/pull/362 -[#369]: https://github.com/uber-go/zap/pull/369 -[#347]: https://github.com/uber-go/zap/pull/347 -[#373]: https://github.com/uber-go/zap/pull/373 -[#348]: https://github.com/uber-go/zap/pull/348 -[#327]: https://github.com/uber-go/zap/pull/327 -[#376]: https://github.com/uber-go/zap/pull/376 -[#346]: https://github.com/uber-go/zap/pull/346 -[#365]: https://github.com/uber-go/zap/pull/365 -[#372]: https://github.com/uber-go/zap/pull/372 -[#385]: https://github.com/uber-go/zap/pull/385 -[#396]: https://github.com/uber-go/zap/pull/396 -[#386]: https://github.com/uber-go/zap/pull/386 -[#402]: https://github.com/uber-go/zap/pull/402 -[#415]: https://github.com/uber-go/zap/pull/415 -[#416]: https://github.com/uber-go/zap/pull/416 -[#424]: https://github.com/uber-go/zap/pull/424 -[#425]: https://github.com/uber-go/zap/pull/425 -[#431]: https://github.com/uber-go/zap/pull/431 -[#435]: https://github.com/uber-go/zap/pull/435 -[#444]: https://github.com/uber-go/zap/pull/444 -[#477]: https://github.com/uber-go/zap/pull/477 -[#465]: https://github.com/uber-go/zap/pull/465 -[#460]: https://github.com/uber-go/zap/pull/460 -[#470]: https://github.com/uber-go/zap/pull/470 -[#487]: https://github.com/uber-go/zap/pull/487 -[#490]: https://github.com/uber-go/zap/pull/490 -[#491]: https://github.com/uber-go/zap/pull/491 -[#504]: https://github.com/uber-go/zap/pull/504 -[#508]: https://github.com/uber-go/zap/pull/508 -[#518]: https://github.com/uber-go/zap/pull/518 -[#577]: https://github.com/uber-go/zap/pull/577 -[#574]: https://github.com/uber-go/zap/pull/574 -[#602]: https://github.com/uber-go/zap/pull/602 -[#572]: https://github.com/uber-go/zap/pull/572 -[#606]: https://github.com/uber-go/zap/pull/606 -[#614]: https://github.com/uber-go/zap/pull/614 -[#657]: https://github.com/uber-go/zap/pull/657 -[#706]: https://github.com/uber-go/zap/pull/706 -[#610]: https://github.com/uber-go/zap/pull/610 -[#675]: https://github.com/uber-go/zap/pull/675 -[#704]: https://github.com/uber-go/zap/pull/704 -[#725]: https://github.com/uber-go/zap/pull/725 -[#736]: https://github.com/uber-go/zap/pull/736 -[#751]: https://github.com/uber-go/zap/pull/751 -[#758]: https://github.com/uber-go/zap/pull/758 -[#771]: https://github.com/uber-go/zap/pull/771 -[#773]: https://github.com/uber-go/zap/pull/773 -[#775]: https://github.com/uber-go/zap/pull/775 -[#786]: https://github.com/uber-go/zap/pull/786 -[#791]: https://github.com/uber-go/zap/pull/791 -[#795]: https://github.com/uber-go/zap/pull/795 -[#799]: https://github.com/uber-go/zap/pull/799 -[#804]: https://github.com/uber-go/zap/pull/804 -[#812]: https://github.com/uber-go/zap/pull/812 -[#806]: https://github.com/uber-go/zap/pull/806 -[#813]: https://github.com/uber-go/zap/pull/813 -[#629]: https://github.com/uber-go/zap/pull/629 -[#697]: https://github.com/uber-go/zap/pull/697 -[#828]: https://github.com/uber-go/zap/pull/828 -[#835]: https://github.com/uber-go/zap/pull/835 -[#843]: https://github.com/uber-go/zap/pull/843 -[#844]: https://github.com/uber-go/zap/pull/844 -[#852]: https://github.com/uber-go/zap/pull/852 -[#854]: https://github.com/uber-go/zap/pull/854 -[#861]: https://github.com/uber-go/zap/pull/861 -[#862]: https://github.com/uber-go/zap/pull/862 -[#865]: https://github.com/uber-go/zap/pull/865 -[#867]: https://github.com/uber-go/zap/pull/867 -[#881]: https://github.com/uber-go/zap/pull/881 -[#903]: https://github.com/uber-go/zap/pull/903 -[#912]: https://github.com/uber-go/zap/pull/912 -[#913]: https://github.com/uber-go/zap/pull/913 -[#928]: https://github.com/uber-go/zap/pull/928 -[#931]: https://github.com/uber-go/zap/pull/931 -[#936]: https://github.com/uber-go/zap/pull/936 diff --git a/tools/vendor/go.uber.org/zap/LICENSE.txt b/tools/vendor/go.uber.org/zap/LICENSE similarity index 100% rename from tools/vendor/go.uber.org/zap/LICENSE.txt rename to tools/vendor/go.uber.org/zap/LICENSE diff --git a/tools/vendor/go.uber.org/zap/Makefile b/tools/vendor/go.uber.org/zap/Makefile index 9b1bc3b0e..eb1cee53b 100644 --- a/tools/vendor/go.uber.org/zap/Makefile +++ b/tools/vendor/go.uber.org/zap/Makefile @@ -1,50 +1,51 @@ -export GOBIN ?= $(shell pwd)/bin +# Directory containing the Makefile. +PROJECT_ROOT = $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) -GOLINT = $(GOBIN)/golint -STATICCHECK = $(GOBIN)/staticcheck +export GOBIN ?= $(PROJECT_ROOT)/bin +export PATH := $(GOBIN):$(PATH) + +GOVULNCHECK = $(GOBIN)/govulncheck BENCH_FLAGS ?= -cpuprofile=cpu.pprof -memprofile=mem.pprof -benchmem # Directories containing independent Go modules. -# -# We track coverage only for the main module. -MODULE_DIRS = . ./benchmarks ./zapgrpc/internal/test +MODULE_DIRS = . ./exp ./benchmarks ./zapgrpc/internal/test -# Many Go tools take file globs or directories as arguments instead of packages. -GO_FILES := $(shell \ - find . '(' -path '*/.*' -o -path './vendor' ')' -prune \ - -o -name '*.go' -print | cut -b3-) +# Directories that we want to track coverage for. +COVER_DIRS = . ./exp .PHONY: all all: lint test .PHONY: lint -lint: $(GOLINT) $(STATICCHECK) - @rm -rf lint.log - @echo "Checking formatting..." - @gofmt -d -s $(GO_FILES) 2>&1 | tee lint.log - @echo "Checking vet..." - @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go vet ./... 2>&1) &&) true | tee -a lint.log - @echo "Checking lint..." - @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && $(GOLINT) ./... 2>&1) &&) true | tee -a lint.log - @echo "Checking staticcheck..." - @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && $(STATICCHECK) ./... 2>&1) &&) true | tee -a lint.log - @echo "Checking for unresolved FIXMEs..." - @git grep -i fixme | grep -v -e Makefile | tee -a lint.log - @echo "Checking for license headers..." - @./checklicense.sh | tee -a lint.log - @[ ! -s lint.log ] - @echo "Checking 'go mod tidy'..." - @make tidy - @if ! git diff --quiet; then \ - echo "'go mod tidy' resulted in changes or working tree is dirty:"; \ - git --no-pager diff; \ - fi - -$(GOLINT): - cd tools && go install golang.org/x/lint/golint - -$(STATICCHECK): - cd tools && go install honnef.co/go/tools/cmd/staticcheck +lint: golangci-lint tidy-lint license-lint + +.PHONY: golangci-lint +golangci-lint: + @$(foreach mod,$(MODULE_DIRS), \ + (cd $(mod) && \ + echo "[lint] golangci-lint: $(mod)" && \ + golangci-lint run --path-prefix $(mod)) &&) true + +.PHONY: tidy +tidy: + @$(foreach dir,$(MODULE_DIRS), \ + (cd $(dir) && go mod tidy) &&) true + +.PHONY: tidy-lint +tidy-lint: + @$(foreach mod,$(MODULE_DIRS), \ + (cd $(mod) && \ + echo "[lint] tidy: $(mod)" && \ + go mod tidy && \ + git diff --exit-code -- go.mod go.sum) &&) true + + +.PHONY: license-lint +license-lint: + ./checklicense.sh + +$(GOVULNCHECK): + cd tools && go install golang.org/x/vuln/cmd/govulncheck .PHONY: test test: @@ -52,8 +53,10 @@ test: .PHONY: cover cover: - go test -race -coverprofile=cover.out -coverpkg=./... ./... - go tool cover -html=cover.out -o cover.html + @$(foreach dir,$(COVER_DIRS), ( \ + cd $(dir) && \ + go test -race -coverprofile=cover.out -coverpkg=./... ./... \ + && go tool cover -html=cover.out -o cover.html) &&) true .PHONY: bench BENCH ?= . @@ -68,6 +71,6 @@ updatereadme: rm -f README.md cat .readme.tmpl | go run internal/readme/readme.go > README.md -.PHONY: tidy -tidy: - @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go mod tidy) &&) true +.PHONY: vulncheck +vulncheck: $(GOVULNCHECK) + $(GOVULNCHECK) ./... diff --git a/tools/vendor/go.uber.org/zap/README.md b/tools/vendor/go.uber.org/zap/README.md index a553a428c..a17035cb6 100644 --- a/tools/vendor/go.uber.org/zap/README.md +++ b/tools/vendor/go.uber.org/zap/README.md @@ -1,7 +1,16 @@ -# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] +# :zap: zap + + +
Blazing fast, structured, leveled logging in Go. +![Zap logo](assets/logo.png) + +[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +
+ ## Installation `go get -u go.uber.org/zap` @@ -54,7 +63,7 @@ and make many small allocations. Put differently, using `encoding/json` and Zap takes a different approach. It includes a reflection-free, zero-allocation JSON encoder, and the base `Logger` strives to avoid serialization overhead and allocations wherever possible. By building the high-level `SugaredLogger` -on that foundation, zap lets users _choose_ when they need to count every +on that foundation, zap lets users *choose* when they need to count every allocation and when they'd prefer a more familiar, loosely typed API. As measured by its own [benchmarking suite][], not only is zap more performant @@ -64,40 +73,46 @@ id="anchor-versions">[1](#footnote-versions) Log a message and 10 fields: -| Package | Time | Time % to zap | Objects Allocated | -| :------------------ | :---------: | :-----------: | :---------------: | -| :zap: zap | 2900 ns/op | +0% | 5 allocs/op | -| :zap: zap (sugared) | 3475 ns/op | +20% | 10 allocs/op | -| zerolog | 10639 ns/op | +267% | 32 allocs/op | -| go-kit | 14434 ns/op | +398% | 59 allocs/op | -| logrus | 17104 ns/op | +490% | 81 allocs/op | -| apex/log | 32424 ns/op | +1018% | 66 allocs/op | -| log15 | 33579 ns/op | +1058% | 76 allocs/op | +| Package | Time | Time % to zap | Objects Allocated | +| :------ | :--: | :-----------: | :---------------: | +| :zap: zap | 656 ns/op | +0% | 5 allocs/op +| :zap: zap (sugared) | 935 ns/op | +43% | 10 allocs/op +| zerolog | 380 ns/op | -42% | 1 allocs/op +| go-kit | 2249 ns/op | +243% | 57 allocs/op +| slog (LogAttrs) | 2479 ns/op | +278% | 40 allocs/op +| slog | 2481 ns/op | +278% | 42 allocs/op +| apex/log | 9591 ns/op | +1362% | 63 allocs/op +| log15 | 11393 ns/op | +1637% | 75 allocs/op +| logrus | 11654 ns/op | +1677% | 79 allocs/op Log a message with a logger that already has 10 fields of context: -| Package | Time | Time % to zap | Objects Allocated | -| :------------------ | :---------: | :-----------: | :---------------: | -| :zap: zap | 373 ns/op | +0% | 0 allocs/op | -| :zap: zap (sugared) | 452 ns/op | +21% | 1 allocs/op | -| zerolog | 288 ns/op | -23% | 0 allocs/op | -| go-kit | 11785 ns/op | +3060% | 58 allocs/op | -| logrus | 19629 ns/op | +5162% | 70 allocs/op | -| log15 | 21866 ns/op | +5762% | 72 allocs/op | -| apex/log | 30890 ns/op | +8182% | 55 allocs/op | +| Package | Time | Time % to zap | Objects Allocated | +| :------ | :--: | :-----------: | :---------------: | +| :zap: zap | 67 ns/op | +0% | 0 allocs/op +| :zap: zap (sugared) | 84 ns/op | +25% | 1 allocs/op +| zerolog | 35 ns/op | -48% | 0 allocs/op +| slog | 193 ns/op | +188% | 0 allocs/op +| slog (LogAttrs) | 200 ns/op | +199% | 0 allocs/op +| go-kit | 2460 ns/op | +3572% | 56 allocs/op +| log15 | 9038 ns/op | +13390% | 70 allocs/op +| apex/log | 9068 ns/op | +13434% | 53 allocs/op +| logrus | 10521 ns/op | +15603% | 68 allocs/op Log a static string, without any context or `printf`-style templating: -| Package | Time | Time % to zap | Objects Allocated | -| :------------------ | :--------: | :-----------: | :---------------: | -| :zap: zap | 381 ns/op | +0% | 0 allocs/op | -| :zap: zap (sugared) | 410 ns/op | +8% | 1 allocs/op | -| zerolog | 369 ns/op | -3% | 0 allocs/op | -| standard library | 385 ns/op | +1% | 2 allocs/op | -| go-kit | 606 ns/op | +59% | 11 allocs/op | -| logrus | 1730 ns/op | +354% | 25 allocs/op | -| apex/log | 1998 ns/op | +424% | 7 allocs/op | -| log15 | 4546 ns/op | +1093% | 22 allocs/op | +| Package | Time | Time % to zap | Objects Allocated | +| :------ | :--: | :-----------: | :---------------: | +| :zap: zap | 63 ns/op | +0% | 0 allocs/op +| :zap: zap (sugared) | 81 ns/op | +29% | 1 allocs/op +| zerolog | 32 ns/op | -49% | 0 allocs/op +| standard library | 124 ns/op | +97% | 1 allocs/op +| slog | 196 ns/op | +211% | 0 allocs/op +| slog (LogAttrs) | 200 ns/op | +217% | 0 allocs/op +| go-kit | 213 ns/op | +238% | 9 allocs/op +| apex/log | 771 ns/op | +1124% | 5 allocs/op +| logrus | 1439 ns/op | +2184% | 23 allocs/op +| log15 | 2069 ns/op | +3184% | 20 allocs/op ## Development Status: Stable @@ -117,7 +132,7 @@ standard.
-Released under the [MIT License](LICENSE.txt). +Released under the [MIT License](LICENSE). 1 In particular, keep in mind that we may be benchmarking against slightly older versions of other packages. Versions are @@ -131,3 +146,4 @@ pinned in the [benchmarks/go.mod][] file. [↩](#anchor-versions) [cov]: https://codecov.io/gh/uber-go/zap [benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks [benchmarks/go.mod]: https://github.com/uber-go/zap/blob/master/benchmarks/go.mod + diff --git a/tools/vendor/go.uber.org/zap/array.go b/tools/vendor/go.uber.org/zap/array.go index 5be3704a3..abfccb566 100644 --- a/tools/vendor/go.uber.org/zap/array.go +++ b/tools/vendor/go.uber.org/zap/array.go @@ -21,6 +21,7 @@ package zap import ( + "fmt" "time" "go.uber.org/zap/zapcore" @@ -94,11 +95,137 @@ func Int8s(key string, nums []int8) Field { return Array(key, int8s(nums)) } +// Objects constructs a field with the given key, holding a list of the +// provided objects that can be marshaled by Zap. +// +// Note that these objects must implement zapcore.ObjectMarshaler directly. +// That is, if you're trying to marshal a []Request, the MarshalLogObject +// method must be declared on the Request type, not its pointer (*Request). +// If it's on the pointer, use ObjectValues. +// +// Given an object that implements MarshalLogObject on the value receiver, you +// can log a slice of those objects with Objects like so: +// +// type Author struct{ ... } +// func (a Author) MarshalLogObject(enc zapcore.ObjectEncoder) error +// +// var authors []Author = ... +// logger.Info("loading article", zap.Objects("authors", authors)) +// +// Similarly, given a type that implements MarshalLogObject on its pointer +// receiver, you can log a slice of pointers to that object with Objects like +// so: +// +// type Request struct{ ... } +// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error +// +// var requests []*Request = ... +// logger.Info("sending requests", zap.Objects("requests", requests)) +// +// If instead, you have a slice of values of such an object, use the +// ObjectValues constructor. +// +// var requests []Request = ... +// logger.Info("sending requests", zap.ObjectValues("requests", requests)) +func Objects[T zapcore.ObjectMarshaler](key string, values []T) Field { + return Array(key, objects[T](values)) +} + +type objects[T zapcore.ObjectMarshaler] []T + +func (os objects[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for _, o := range os { + if err := arr.AppendObject(o); err != nil { + return err + } + } + return nil +} + +// ObjectMarshalerPtr is a constraint that specifies that the given type +// implements zapcore.ObjectMarshaler on a pointer receiver. +type ObjectMarshalerPtr[T any] interface { + *T + zapcore.ObjectMarshaler +} + +// ObjectValues constructs a field with the given key, holding a list of the +// provided objects, where pointers to these objects can be marshaled by Zap. +// +// Note that pointers to these objects must implement zapcore.ObjectMarshaler. +// That is, if you're trying to marshal a []Request, the MarshalLogObject +// method must be declared on the *Request type, not the value (Request). +// If it's on the value, use Objects. +// +// Given an object that implements MarshalLogObject on the pointer receiver, +// you can log a slice of those objects with ObjectValues like so: +// +// type Request struct{ ... } +// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error +// +// var requests []Request = ... +// logger.Info("sending requests", zap.ObjectValues("requests", requests)) +// +// If instead, you have a slice of pointers of such an object, use the Objects +// field constructor. +// +// var requests []*Request = ... +// logger.Info("sending requests", zap.Objects("requests", requests)) +func ObjectValues[T any, P ObjectMarshalerPtr[T]](key string, values []T) Field { + return Array(key, objectValues[T, P](values)) +} + +type objectValues[T any, P ObjectMarshalerPtr[T]] []T + +func (os objectValues[T, P]) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range os { + // It is necessary for us to explicitly reference the "P" type. + // We cannot simply pass "&os[i]" to AppendObject because its type + // is "*T", which the type system does not consider as + // implementing ObjectMarshaler. + // Only the type "P" satisfies ObjectMarshaler, which we have + // to convert "*T" to explicitly. + var p P = &os[i] + if err := arr.AppendObject(p); err != nil { + return err + } + } + return nil +} + // Strings constructs a field that carries a slice of strings. func Strings(key string, ss []string) Field { return Array(key, stringArray(ss)) } +// Stringers constructs a field with the given key, holding a list of the +// output provided by the value's String method +// +// Given an object that implements String on the value receiver, you +// can log a slice of those objects with Objects like so: +// +// type Request struct{ ... } +// func (a Request) String() string +// +// var requests []Request = ... +// logger.Info("sending requests", zap.Stringers("requests", requests)) +// +// Note that these objects must implement fmt.Stringer directly. +// That is, if you're trying to marshal a []Request, the String method +// must be declared on the Request type, not its pointer (*Request). +func Stringers[T fmt.Stringer](key string, values []T) Field { + return Array(key, stringers[T](values)) +} + +type stringers[T fmt.Stringer] []T + +func (os stringers[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for _, o := range os { + arr.AppendString(o.String()) + } + return nil +} + // Times constructs a field that carries a slice of time.Times. func Times(key string, ts []time.Time) Field { return Array(key, times(ts)) diff --git a/tools/vendor/go.uber.org/zap/array_go118.go b/tools/vendor/go.uber.org/zap/array_go118.go deleted file mode 100644 index d0d2c49d6..000000000 --- a/tools/vendor/go.uber.org/zap/array_go118.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright (c) 2022 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -//go:build go1.18 -// +build go1.18 - -package zap - -import ( - "fmt" - - "go.uber.org/zap/zapcore" -) - -// Objects constructs a field with the given key, holding a list of the -// provided objects that can be marshaled by Zap. -// -// Note that these objects must implement zapcore.ObjectMarshaler directly. -// That is, if you're trying to marshal a []Request, the MarshalLogObject -// method must be declared on the Request type, not its pointer (*Request). -// If it's on the pointer, use ObjectValues. -// -// Given an object that implements MarshalLogObject on the value receiver, you -// can log a slice of those objects with Objects like so: -// -// type Author struct{ ... } -// func (a Author) MarshalLogObject(enc zapcore.ObjectEncoder) error -// -// var authors []Author = ... -// logger.Info("loading article", zap.Objects("authors", authors)) -// -// Similarly, given a type that implements MarshalLogObject on its pointer -// receiver, you can log a slice of pointers to that object with Objects like -// so: -// -// type Request struct{ ... } -// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error -// -// var requests []*Request = ... -// logger.Info("sending requests", zap.Objects("requests", requests)) -// -// If instead, you have a slice of values of such an object, use the -// ObjectValues constructor. -// -// var requests []Request = ... -// logger.Info("sending requests", zap.ObjectValues("requests", requests)) -func Objects[T zapcore.ObjectMarshaler](key string, values []T) Field { - return Array(key, objects[T](values)) -} - -type objects[T zapcore.ObjectMarshaler] []T - -func (os objects[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for _, o := range os { - if err := arr.AppendObject(o); err != nil { - return err - } - } - return nil -} - -// ObjectMarshalerPtr is a constraint that specifies that the given type -// implements zapcore.ObjectMarshaler on a pointer receiver. -type ObjectMarshalerPtr[T any] interface { - *T - zapcore.ObjectMarshaler -} - -// ObjectValues constructs a field with the given key, holding a list of the -// provided objects, where pointers to these objects can be marshaled by Zap. -// -// Note that pointers to these objects must implement zapcore.ObjectMarshaler. -// That is, if you're trying to marshal a []Request, the MarshalLogObject -// method must be declared on the *Request type, not the value (Request). -// If it's on the value, use Objects. -// -// Given an object that implements MarshalLogObject on the pointer receiver, -// you can log a slice of those objects with ObjectValues like so: -// -// type Request struct{ ... } -// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error -// -// var requests []Request = ... -// logger.Info("sending requests", zap.ObjectValues("requests", requests)) -// -// If instead, you have a slice of pointers of such an object, use the Objects -// field constructor. -// -// var requests []*Request = ... -// logger.Info("sending requests", zap.Objects("requests", requests)) -func ObjectValues[T any, P ObjectMarshalerPtr[T]](key string, values []T) Field { - return Array(key, objectValues[T, P](values)) -} - -type objectValues[T any, P ObjectMarshalerPtr[T]] []T - -func (os objectValues[T, P]) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for i := range os { - // It is necessary for us to explicitly reference the "P" type. - // We cannot simply pass "&os[i]" to AppendObject because its type - // is "*T", which the type system does not consider as - // implementing ObjectMarshaler. - // Only the type "P" satisfies ObjectMarshaler, which we have - // to convert "*T" to explicitly. - var p P = &os[i] - if err := arr.AppendObject(p); err != nil { - return err - } - } - return nil -} - -// Stringers constructs a field with the given key, holding a list of the -// output provided by the value's String method -// -// Given an object that implements String on the value receiver, you -// can log a slice of those objects with Objects like so: -// -// type Request struct{ ... } -// func (a Request) String() string -// -// var requests []Request = ... -// logger.Info("sending requests", zap.Stringers("requests", requests)) -// -// Note that these objects must implement fmt.Stringer directly. -// That is, if you're trying to marshal a []Request, the String method -// must be declared on the Request type, not its pointer (*Request). -func Stringers[T fmt.Stringer](key string, values []T) Field { - return Array(key, stringers[T](values)) -} - -type stringers[T fmt.Stringer] []T - -func (os stringers[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error { - for _, o := range os { - arr.AppendString(o.String()) - } - return nil -} diff --git a/tools/vendor/go.uber.org/zap/buffer/buffer.go b/tools/vendor/go.uber.org/zap/buffer/buffer.go index 9e929cd98..0b8540c21 100644 --- a/tools/vendor/go.uber.org/zap/buffer/buffer.go +++ b/tools/vendor/go.uber.org/zap/buffer/buffer.go @@ -42,6 +42,11 @@ func (b *Buffer) AppendByte(v byte) { b.bs = append(b.bs, v) } +// AppendBytes writes the given slice of bytes to the Buffer. +func (b *Buffer) AppendBytes(v []byte) { + b.bs = append(b.bs, v...) +} + // AppendString writes a string to the Buffer. func (b *Buffer) AppendString(s string) { b.bs = append(b.bs, s...) diff --git a/tools/vendor/go.uber.org/zap/buffer/pool.go b/tools/vendor/go.uber.org/zap/buffer/pool.go index 8fb3e202c..846323360 100644 --- a/tools/vendor/go.uber.org/zap/buffer/pool.go +++ b/tools/vendor/go.uber.org/zap/buffer/pool.go @@ -20,25 +20,29 @@ package buffer -import "sync" +import ( + "go.uber.org/zap/internal/pool" +) // A Pool is a type-safe wrapper around a sync.Pool. type Pool struct { - p *sync.Pool + p *pool.Pool[*Buffer] } // NewPool constructs a new Pool. func NewPool() Pool { - return Pool{p: &sync.Pool{ - New: func() interface{} { - return &Buffer{bs: make([]byte, 0, _size)} - }, - }} + return Pool{ + p: pool.New(func() *Buffer { + return &Buffer{ + bs: make([]byte, 0, _size), + } + }), + } } // Get retrieves a Buffer from the pool, creating one if necessary. func (p Pool) Get() *Buffer { - buf := p.p.Get().(*Buffer) + buf := p.p.Get() buf.Reset() buf.pool = p return buf diff --git a/tools/vendor/go.uber.org/zap/config.go b/tools/vendor/go.uber.org/zap/config.go index ee6096766..e76e4e64f 100644 --- a/tools/vendor/go.uber.org/zap/config.go +++ b/tools/vendor/go.uber.org/zap/config.go @@ -95,6 +95,32 @@ type Config struct { // NewProductionEncoderConfig returns an opinionated EncoderConfig for // production environments. +// +// Messages encoded with this configuration will be JSON-formatted +// and will have the following keys by default: +// +// - "level": The logging level (e.g. "info", "error"). +// - "ts": The current time in number of seconds since the Unix epoch. +// - "msg": The message passed to the log statement. +// - "caller": If available, a short path to the file and line number +// where the log statement was issued. +// The logger configuration determines whether this field is captured. +// - "stacktrace": If available, a stack trace from the line +// where the log statement was issued. +// The logger configuration determines whether this field is captured. +// +// By default, the following formats are used for different types: +// +// - Time is formatted as floating-point number of seconds since the Unix +// epoch. +// - Duration is formatted as floating-point number of seconds. +// +// You may change these by setting the appropriate fields in the returned +// object. +// For example, use the following to change the time encoding format: +// +// cfg := zap.NewProductionEncoderConfig() +// cfg.EncodeTime = zapcore.ISO8601TimeEncoder func NewProductionEncoderConfig() zapcore.EncoderConfig { return zapcore.EncoderConfig{ TimeKey: "ts", @@ -112,11 +138,22 @@ func NewProductionEncoderConfig() zapcore.EncoderConfig { } } -// NewProductionConfig is a reasonable production logging configuration. -// Logging is enabled at InfoLevel and above. +// NewProductionConfig builds a reasonable default production logging +// configuration. +// Logging is enabled at InfoLevel and above, and uses a JSON encoder. +// Logs are written to standard error. +// Stacktraces are included on logs of ErrorLevel and above. +// DPanicLevel logs will not panic, but will write a stacktrace. +// +// Sampling is enabled at 100:100 by default, +// meaning that after the first 100 log entries +// with the same level and message in the same second, +// it will log every 100th entry +// with the same level and message in the same second. +// You may disable this behavior by setting Sampling to nil. // -// It uses a JSON encoder, writes to standard error, and enables sampling. -// Stacktraces are automatically included on logs of ErrorLevel and above. +// See [NewProductionEncoderConfig] for information +// on the default encoder configuration. func NewProductionConfig() Config { return Config{ Level: NewAtomicLevelAt(InfoLevel), @@ -134,6 +171,32 @@ func NewProductionConfig() Config { // NewDevelopmentEncoderConfig returns an opinionated EncoderConfig for // development environments. +// +// Messages encoded with this configuration will use Zap's console encoder +// intended to print human-readable output. +// It will print log messages with the following information: +// +// - The log level (e.g. "INFO", "ERROR"). +// - The time in ISO8601 format (e.g. "2017-01-01T12:00:00Z"). +// - The message passed to the log statement. +// - If available, a short path to the file and line number +// where the log statement was issued. +// The logger configuration determines whether this field is captured. +// - If available, a stacktrace from the line +// where the log statement was issued. +// The logger configuration determines whether this field is captured. +// +// By default, the following formats are used for different types: +// +// - Time is formatted in ISO8601 format (e.g. "2017-01-01T12:00:00Z"). +// - Duration is formatted as a string (e.g. "1.234s"). +// +// You may change these by setting the appropriate fields in the returned +// object. +// For example, use the following to change the time encoding format: +// +// cfg := zap.NewDevelopmentEncoderConfig() +// cfg.EncodeTime = zapcore.ISO8601TimeEncoder func NewDevelopmentEncoderConfig() zapcore.EncoderConfig { return zapcore.EncoderConfig{ // Keys can be anything except the empty string. @@ -152,12 +215,15 @@ func NewDevelopmentEncoderConfig() zapcore.EncoderConfig { } } -// NewDevelopmentConfig is a reasonable development logging configuration. -// Logging is enabled at DebugLevel and above. +// NewDevelopmentConfig builds a reasonable default development logging +// configuration. +// Logging is enabled at DebugLevel and above, and uses a console encoder. +// Logs are written to standard error. +// Stacktraces are included on logs of WarnLevel and above. +// DPanicLevel logs will panic. // -// It enables development mode (which makes DPanicLevel logs panic), uses a -// console encoder, writes to standard error, and disables sampling. -// Stacktraces are automatically included on logs of WarnLevel and above. +// See [NewDevelopmentEncoderConfig] for information +// on the default encoder configuration. func NewDevelopmentConfig() Config { return Config{ Level: NewAtomicLevelAt(DebugLevel), diff --git a/tools/vendor/go.uber.org/zap/error.go b/tools/vendor/go.uber.org/zap/error.go index 65982a51e..45f7b838d 100644 --- a/tools/vendor/go.uber.org/zap/error.go +++ b/tools/vendor/go.uber.org/zap/error.go @@ -21,14 +21,13 @@ package zap import ( - "sync" - + "go.uber.org/zap/internal/pool" "go.uber.org/zap/zapcore" ) -var _errArrayElemPool = sync.Pool{New: func() interface{} { +var _errArrayElemPool = pool.New(func() *errArrayElem { return &errArrayElem{} -}} +}) // Error is shorthand for the common idiom NamedError("error", err). func Error(err error) Field { @@ -60,11 +59,14 @@ func (errs errArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { // potentially an "errorVerbose" attribute, we need to wrap it in a // type that implements LogObjectMarshaler. To prevent this from // allocating, pool the wrapper type. - elem := _errArrayElemPool.Get().(*errArrayElem) + elem := _errArrayElemPool.Get() elem.error = errs[i] - arr.AppendObject(elem) + err := arr.AppendObject(elem) elem.error = nil _errArrayElemPool.Put(elem) + if err != nil { + return err + } } return nil } diff --git a/tools/vendor/go.uber.org/zap/field.go b/tools/vendor/go.uber.org/zap/field.go index bbb745db5..6743930b8 100644 --- a/tools/vendor/go.uber.org/zap/field.go +++ b/tools/vendor/go.uber.org/zap/field.go @@ -25,6 +25,7 @@ import ( "math" "time" + "go.uber.org/zap/internal/stacktrace" "go.uber.org/zap/zapcore" ) @@ -374,7 +375,7 @@ func StackSkip(key string, skip int) Field { // from expanding the zapcore.Field union struct to include a byte slice. Since // taking a stacktrace is already so expensive (~10us), the extra allocation // is okay. - return String(key, takeStacktrace(skip+1)) // skip StackSkip + return String(key, stacktrace.Take(skip+1)) // skip StackSkip } // Duration constructs a field with the given key and value. The encoder @@ -410,6 +411,65 @@ func Inline(val zapcore.ObjectMarshaler) Field { } } +// Dict constructs a field containing the provided key-value pairs. +// It acts similar to [Object], but with the fields specified as arguments. +func Dict(key string, val ...Field) Field { + return dictField(key, val) +} + +// We need a function with the signature (string, T) for zap.Any. +func dictField(key string, val []Field) Field { + return Object(key, dictObject(val)) +} + +type dictObject []Field + +func (d dictObject) MarshalLogObject(enc zapcore.ObjectEncoder) error { + for _, f := range d { + f.AddTo(enc) + } + return nil +} + +// We discovered an issue where zap.Any can cause a performance degradation +// when used in new goroutines. +// +// This happens because the compiler assigns 4.8kb (one zap.Field per arm of +// switch statement) of stack space for zap.Any when it takes the form: +// +// switch v := v.(type) { +// case string: +// return String(key, v) +// case int: +// return Int(key, v) +// // ... +// default: +// return Reflect(key, v) +// } +// +// To avoid this, we use the type switch to assign a value to a single local variable +// and then call a function on it. +// The local variable is just a function reference so it doesn't allocate +// when converted to an interface{}. +// +// A fair bit of experimentation went into this. +// See also: +// +// - https://github.com/uber-go/zap/pull/1301 +// - https://github.com/uber-go/zap/pull/1303 +// - https://github.com/uber-go/zap/pull/1304 +// - https://github.com/uber-go/zap/pull/1305 +// - https://github.com/uber-go/zap/pull/1308 +// +// See https://github.com/golang/go/issues/62077 for upstream issue. +type anyFieldC[T any] func(string, T) Field + +func (f anyFieldC[T]) Any(key string, val any) Field { + v, _ := val.(T) + // val is guaranteed to be a T, except when it's nil. + return f(key, v) +} + // Any takes a key and an arbitrary value and chooses the best way to represent // them as a field, falling back to a reflection-based approach only if // necessary. @@ -418,132 +478,138 @@ func Inline(val zapcore.ObjectMarshaler) Field { // them. To minimize surprises, []byte values are treated as binary blobs, byte // values are treated as uint8, and runes are always treated as integers. func Any(key string, value interface{}) Field { - switch val := value.(type) { + var c interface{ Any(string, any) Field } + + switch value.(type) { case zapcore.ObjectMarshaler: - return Object(key, val) + c = anyFieldC[zapcore.ObjectMarshaler](Object) case zapcore.ArrayMarshaler: - return Array(key, val) + c = anyFieldC[zapcore.ArrayMarshaler](Array) + case []Field: + c = anyFieldC[[]Field](dictField) case bool: - return Bool(key, val) + c = anyFieldC[bool](Bool) case *bool: - return Boolp(key, val) + c = anyFieldC[*bool](Boolp) case []bool: - return Bools(key, val) + c = anyFieldC[[]bool](Bools) case complex128: - return Complex128(key, val) + c = anyFieldC[complex128](Complex128) case *complex128: - return Complex128p(key, val) + c = anyFieldC[*complex128](Complex128p) case []complex128: - return Complex128s(key, val) + c = anyFieldC[[]complex128](Complex128s) case complex64: - return Complex64(key, val) + c = anyFieldC[complex64](Complex64) case *complex64: - return Complex64p(key, val) + c = anyFieldC[*complex64](Complex64p) case []complex64: - return Complex64s(key, val) + c = anyFieldC[[]complex64](Complex64s) case float64: - return Float64(key, val) + c = anyFieldC[float64](Float64) case *float64: - return Float64p(key, val) + c = anyFieldC[*float64](Float64p) case []float64: - return Float64s(key, val) + c = anyFieldC[[]float64](Float64s) case float32: - return Float32(key, val) + c = anyFieldC[float32](Float32) case *float32: - return Float32p(key, val) + c = anyFieldC[*float32](Float32p) case []float32: - return Float32s(key, val) + c = anyFieldC[[]float32](Float32s) case int: - return Int(key, val) + c = anyFieldC[int](Int) case *int: - return Intp(key, val) + c = anyFieldC[*int](Intp) case []int: - return Ints(key, val) + c = anyFieldC[[]int](Ints) case int64: - return Int64(key, val) + c = anyFieldC[int64](Int64) case *int64: - return Int64p(key, val) + c = anyFieldC[*int64](Int64p) case []int64: - return Int64s(key, val) + c = anyFieldC[[]int64](Int64s) case int32: - return Int32(key, val) + c = anyFieldC[int32](Int32) case *int32: - return Int32p(key, val) + c = anyFieldC[*int32](Int32p) case []int32: - return Int32s(key, val) + c = anyFieldC[[]int32](Int32s) case int16: - return Int16(key, val) + c = anyFieldC[int16](Int16) case *int16: - return Int16p(key, val) + c = anyFieldC[*int16](Int16p) case []int16: - return Int16s(key, val) + c = anyFieldC[[]int16](Int16s) case int8: - return Int8(key, val) + c = anyFieldC[int8](Int8) case *int8: - return Int8p(key, val) + c = anyFieldC[*int8](Int8p) case []int8: - return Int8s(key, val) + c = anyFieldC[[]int8](Int8s) case string: - return String(key, val) + c = anyFieldC[string](String) case *string: - return Stringp(key, val) + c = anyFieldC[*string](Stringp) case []string: - return Strings(key, val) + c = anyFieldC[[]string](Strings) case uint: - return Uint(key, val) + c = anyFieldC[uint](Uint) case *uint: - return Uintp(key, val) + c = anyFieldC[*uint](Uintp) case []uint: - return Uints(key, val) + c = anyFieldC[[]uint](Uints) case uint64: - return Uint64(key, val) + c = anyFieldC[uint64](Uint64) case *uint64: - return Uint64p(key, val) + c = anyFieldC[*uint64](Uint64p) case []uint64: - return Uint64s(key, val) + c = anyFieldC[[]uint64](Uint64s) case uint32: - return Uint32(key, val) + c = anyFieldC[uint32](Uint32) case *uint32: - return Uint32p(key, val) + c = anyFieldC[*uint32](Uint32p) case []uint32: - return Uint32s(key, val) + c = anyFieldC[[]uint32](Uint32s) case uint16: - return Uint16(key, val) + c = anyFieldC[uint16](Uint16) case *uint16: - return Uint16p(key, val) + c = anyFieldC[*uint16](Uint16p) case []uint16: - return Uint16s(key, val) + c = anyFieldC[[]uint16](Uint16s) case uint8: - return Uint8(key, val) + c = anyFieldC[uint8](Uint8) case *uint8: - return Uint8p(key, val) + c = anyFieldC[*uint8](Uint8p) case []byte: - return Binary(key, val) + c = anyFieldC[[]byte](Binary) case uintptr: - return Uintptr(key, val) + c = anyFieldC[uintptr](Uintptr) case *uintptr: - return Uintptrp(key, val) + c = anyFieldC[*uintptr](Uintptrp) case []uintptr: - return Uintptrs(key, val) + c = anyFieldC[[]uintptr](Uintptrs) case time.Time: - return Time(key, val) + c = anyFieldC[time.Time](Time) case *time.Time: - return Timep(key, val) + c = anyFieldC[*time.Time](Timep) case []time.Time: - return Times(key, val) + c = anyFieldC[[]time.Time](Times) case time.Duration: - return Duration(key, val) + c = anyFieldC[time.Duration](Duration) case *time.Duration: - return Durationp(key, val) + c = anyFieldC[*time.Duration](Durationp) case []time.Duration: - return Durations(key, val) + c = anyFieldC[[]time.Duration](Durations) case error: - return NamedError(key, val) + c = anyFieldC[error](NamedError) case []error: - return Errors(key, val) + c = anyFieldC[[]error](Errors) case fmt.Stringer: - return Stringer(key, val) + c = anyFieldC[fmt.Stringer](Stringer) default: - return Reflect(key, val) + c = anyFieldC[any](Reflect) } + + return c.Any(key, value) } diff --git a/tools/vendor/go.uber.org/zap/http_handler.go b/tools/vendor/go.uber.org/zap/http_handler.go index 632b6831a..2be8f6515 100644 --- a/tools/vendor/go.uber.org/zap/http_handler.go +++ b/tools/vendor/go.uber.org/zap/http_handler.go @@ -69,6 +69,13 @@ import ( // // curl -X PUT localhost:8080/log/level -H "Content-Type: application/json" -d '{"level":"debug"}' func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if err := lvl.serveHTTP(w, r); err != nil { + w.WriteHeader(http.StatusInternalServerError) + fmt.Fprintf(w, "internal error: %v", err) + } +} + +func (lvl AtomicLevel) serveHTTP(w http.ResponseWriter, r *http.Request) error { type errorResponse struct { Error string `json:"error"` } @@ -80,19 +87,20 @@ func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) { switch r.Method { case http.MethodGet: - enc.Encode(payload{Level: lvl.Level()}) + return enc.Encode(payload{Level: lvl.Level()}) + case http.MethodPut: requestedLvl, err := decodePutRequest(r.Header.Get("Content-Type"), r) if err != nil { w.WriteHeader(http.StatusBadRequest) - enc.Encode(errorResponse{Error: err.Error()}) - return + return enc.Encode(errorResponse{Error: err.Error()}) } lvl.SetLevel(requestedLvl) - enc.Encode(payload{Level: lvl.Level()}) + return enc.Encode(payload{Level: lvl.Level()}) + default: w.WriteHeader(http.StatusMethodNotAllowed) - enc.Encode(errorResponse{ + return enc.Encode(errorResponse{ Error: "Only GET and PUT are supported.", }) } @@ -129,5 +137,4 @@ func decodePutJSON(body io.Reader) (zapcore.Level, error) { return 0, errors.New("must specify logging level") } return *pld.Level, nil - } diff --git a/tools/vendor/go.uber.org/zap/internal/level_enabler.go b/tools/vendor/go.uber.org/zap/internal/level_enabler.go index 5f3e3f1b9..40bfed81e 100644 --- a/tools/vendor/go.uber.org/zap/internal/level_enabler.go +++ b/tools/vendor/go.uber.org/zap/internal/level_enabler.go @@ -18,6 +18,8 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. +// Package internal and its subpackages hold types and functionality +// that are not part of Zap's public API. package internal import "go.uber.org/zap/zapcore" diff --git a/tools/vendor/go.uber.org/atomic/bool_ext.go b/tools/vendor/go.uber.org/zap/internal/pool/pool.go similarity index 56% rename from tools/vendor/go.uber.org/atomic/bool_ext.go rename to tools/vendor/go.uber.org/zap/internal/pool/pool.go index c7bf7a827..60e9d2c43 100644 --- a/tools/vendor/go.uber.org/atomic/bool_ext.go +++ b/tools/vendor/go.uber.org/zap/internal/pool/pool.go @@ -1,4 +1,4 @@ -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -18,36 +18,41 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -package atomic +// Package pool provides internal pool utilities. +package pool import ( - "strconv" + "sync" ) -//go:generate bin/gen-atomicwrapper -name=Bool -type=bool -wrapped=Uint32 -pack=boolToInt -unpack=truthy -cas -swap -json -file=bool.go - -func truthy(n uint32) bool { - return n == 1 +// A Pool is a generic wrapper around [sync.Pool] to provide strongly-typed +// object pooling. +// +// Note that SA6002 (ref: https://staticcheck.io/docs/checks/#SA6002) will +// not be detected, so all internal pool use must take care to only store +// pointer types. +type Pool[T any] struct { + pool sync.Pool } -func boolToInt(b bool) uint32 { - if b { - return 1 +// New returns a new [Pool] for T, and will use fn to construct new Ts when +// the pool is empty. +func New[T any](fn func() T) *Pool[T] { + return &Pool[T]{ + pool: sync.Pool{ + New: func() any { + return fn() + }, + }, } - return 0 } -// Toggle atomically negates the Boolean and returns the previous value. -func (b *Bool) Toggle() bool { - for { - old := b.Load() - if b.CAS(old, !old) { - return old - } - } +// Get gets a T from the pool, or creates a new one if the pool is empty. +func (p *Pool[T]) Get() T { + return p.pool.Get().(T) } -// String encodes the wrapped value as a string. -func (b *Bool) String() string { - return strconv.FormatBool(b.Load()) +// Put returns x into the pool. +func (p *Pool[T]) Put(x T) { + p.pool.Put(x) } diff --git a/tools/vendor/go.uber.org/zap/stacktrace.go b/tools/vendor/go.uber.org/zap/internal/stacktrace/stack.go similarity index 73% rename from tools/vendor/go.uber.org/zap/stacktrace.go rename to tools/vendor/go.uber.org/zap/internal/stacktrace/stack.go index 817a3bde8..82af7551f 100644 --- a/tools/vendor/go.uber.org/zap/stacktrace.go +++ b/tools/vendor/go.uber.org/zap/internal/stacktrace/stack.go @@ -1,4 +1,4 @@ -// Copyright (c) 2016 Uber Technologies, Inc. +// Copyright (c) 2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -18,25 +18,26 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -package zap +// Package stacktrace provides support for gathering stack traces +// efficiently. +package stacktrace import ( "runtime" - "sync" "go.uber.org/zap/buffer" "go.uber.org/zap/internal/bufferpool" + "go.uber.org/zap/internal/pool" ) -var _stacktracePool = sync.Pool{ - New: func() interface{} { - return &stacktrace{ - storage: make([]uintptr, 64), - } - }, -} +var _stackPool = pool.New(func() *Stack { + return &Stack{ + storage: make([]uintptr, 64), + } +}) -type stacktrace struct { +// Stack is a captured stack trace. +type Stack struct { pcs []uintptr // program counters; always a subslice of storage frames *runtime.Frames @@ -50,30 +51,30 @@ type stacktrace struct { storage []uintptr } -// stacktraceDepth specifies how deep of a stack trace should be captured. -type stacktraceDepth int +// Depth specifies how deep of a stack trace should be captured. +type Depth int const ( - // stacktraceFirst captures only the first frame. - stacktraceFirst stacktraceDepth = iota + // First captures only the first frame. + First Depth = iota - // stacktraceFull captures the entire call stack, allocating more + // Full captures the entire call stack, allocating more // storage for it if needed. - stacktraceFull + Full ) -// captureStacktrace captures a stack trace of the specified depth, skipping +// Capture captures a stack trace of the specified depth, skipping // the provided number of frames. skip=0 identifies the caller of -// captureStacktrace. +// Capture. // // The caller must call Free on the returned stacktrace after using it. -func captureStacktrace(skip int, depth stacktraceDepth) *stacktrace { - stack := _stacktracePool.Get().(*stacktrace) +func Capture(skip int, depth Depth) *Stack { + stack := _stackPool.Get() switch depth { - case stacktraceFirst: + case First: stack.pcs = stack.storage[:1] - case stacktraceFull: + case Full: stack.pcs = stack.storage } @@ -87,7 +88,7 @@ func captureStacktrace(skip int, depth stacktraceDepth) *stacktrace { // runtime.Callers truncates the recorded stacktrace if there is no // room in the provided slice. For the full stack trace, keep expanding // storage until there are fewer frames than there is room. - if depth == stacktraceFull { + if depth == Full { pcs := stack.pcs for numFrames == len(pcs) { pcs = make([]uintptr, len(pcs)*2) @@ -109,50 +110,54 @@ func captureStacktrace(skip int, depth stacktraceDepth) *stacktrace { // Free releases resources associated with this stacktrace // and returns it back to the pool. -func (st *stacktrace) Free() { +func (st *Stack) Free() { st.frames = nil st.pcs = nil - _stacktracePool.Put(st) + _stackPool.Put(st) } // Count reports the total number of frames in this stacktrace. // Count DOES NOT change as Next is called. -func (st *stacktrace) Count() int { +func (st *Stack) Count() int { return len(st.pcs) } // Next returns the next frame in the stack trace, // and a boolean indicating whether there are more after it. -func (st *stacktrace) Next() (_ runtime.Frame, more bool) { +func (st *Stack) Next() (_ runtime.Frame, more bool) { return st.frames.Next() } -func takeStacktrace(skip int) string { - stack := captureStacktrace(skip+1, stacktraceFull) +// Take returns a string representation of the current stacktrace. +// +// skip is the number of frames to skip before recording the stack trace. +// skip=0 identifies the caller of Take. +func Take(skip int) string { + stack := Capture(skip+1, Full) defer stack.Free() buffer := bufferpool.Get() defer buffer.Free() - stackfmt := newStackFormatter(buffer) + stackfmt := NewFormatter(buffer) stackfmt.FormatStack(stack) return buffer.String() } -// stackFormatter formats a stack trace into a readable string representation. -type stackFormatter struct { +// Formatter formats a stack trace into a readable string representation. +type Formatter struct { b *buffer.Buffer nonEmpty bool // whehther we've written at least one frame already } -// newStackFormatter builds a new stackFormatter. -func newStackFormatter(b *buffer.Buffer) stackFormatter { - return stackFormatter{b: b} +// NewFormatter builds a new Formatter. +func NewFormatter(b *buffer.Buffer) Formatter { + return Formatter{b: b} } // FormatStack formats all remaining frames in the provided stacktrace -- minus // the final runtime.main/runtime.goexit frame. -func (sf *stackFormatter) FormatStack(stack *stacktrace) { +func (sf *Formatter) FormatStack(stack *Stack) { // Note: On the last iteration, frames.Next() returns false, with a valid // frame, but we ignore this frame. The last frame is a runtime frame which // adds noise, since it's only either runtime.main or runtime.goexit. @@ -162,7 +167,7 @@ func (sf *stackFormatter) FormatStack(stack *stacktrace) { } // FormatFrame formats the given frame. -func (sf *stackFormatter) FormatFrame(frame runtime.Frame) { +func (sf *Formatter) FormatFrame(frame runtime.Frame) { if sf.nonEmpty { sf.b.AppendByte('\n') } diff --git a/tools/vendor/go.uber.org/zap/level.go b/tools/vendor/go.uber.org/zap/level.go index db951e19a..155b208bd 100644 --- a/tools/vendor/go.uber.org/zap/level.go +++ b/tools/vendor/go.uber.org/zap/level.go @@ -21,7 +21,8 @@ package zap import ( - "go.uber.org/atomic" + "sync/atomic" + "go.uber.org/zap/internal" "go.uber.org/zap/zapcore" ) @@ -76,9 +77,9 @@ var _ internal.LeveledEnabler = AtomicLevel{} // NewAtomicLevel creates an AtomicLevel with InfoLevel and above logging // enabled. func NewAtomicLevel() AtomicLevel { - return AtomicLevel{ - l: atomic.NewInt32(int32(InfoLevel)), - } + lvl := AtomicLevel{l: new(atomic.Int32)} + lvl.l.Store(int32(InfoLevel)) + return lvl } // NewAtomicLevelAt is a convenience function that creates an AtomicLevel diff --git a/tools/vendor/go.uber.org/zap/logger.go b/tools/vendor/go.uber.org/zap/logger.go index cd44030d1..c4d300323 100644 --- a/tools/vendor/go.uber.org/zap/logger.go +++ b/tools/vendor/go.uber.org/zap/logger.go @@ -27,6 +27,7 @@ import ( "strings" "go.uber.org/zap/internal/bufferpool" + "go.uber.org/zap/internal/stacktrace" "go.uber.org/zap/zapcore" ) @@ -42,6 +43,7 @@ type Logger struct { development bool addCaller bool + onPanic zapcore.CheckWriteHook // default is WriteThenPanic onFatal zapcore.CheckWriteHook // default is WriteThenFatal name string @@ -173,7 +175,8 @@ func (log *Logger) WithOptions(opts ...Option) *Logger { } // With creates a child logger and adds structured context to it. Fields added -// to the child don't affect the parent, and vice versa. +// to the child don't affect the parent, and vice versa. Any fields that +// require evaluation (such as Objects) are evaluated upon invocation of With. func (log *Logger) With(fields ...Field) *Logger { if len(fields) == 0 { return log @@ -183,6 +186,28 @@ func (log *Logger) With(fields ...Field) *Logger { return l } +// WithLazy creates a child logger and adds structured context to it lazily. +// +// The fields are evaluated only if the logger is further chained with [With] +// or is written to with any of the log level methods. +// Until that occurs, the logger may retain references to objects inside the fields, +// and logging will reflect the state of an object at the time of logging, +// not the time of WithLazy(). +// +// WithLazy provides a worthwhile performance optimization for contextual loggers +// when the likelihood of using the child logger is low, +// such as error paths and rarely taken branches. +// +// Similar to [With], fields added to the child don't affect the parent, and vice versa. +func (log *Logger) WithLazy(fields ...Field) *Logger { + if len(fields) == 0 { + return log + } + return log.WithOptions(WrapCore(func(core zapcore.Core) zapcore.Core { + return zapcore.NewLazyWith(core, fields) + })) +} + // Level reports the minimum enabled level for this logger. // // For NopLoggers, this is [zapcore.InvalidLevel]. @@ -199,6 +224,8 @@ func (log *Logger) Check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { // Log logs a message at the specified level. The message includes any fields // passed at the log site, as well as any fields accumulated on the logger. +// Any Fields that require evaluation (such as Objects) are evaluated upon +// invocation of Log. func (log *Logger) Log(lvl zapcore.Level, msg string, fields ...Field) { if ce := log.check(lvl, msg); ce != nil { ce.Write(fields...) @@ -281,9 +308,15 @@ func (log *Logger) Core() zapcore.Core { return log.core } +// Name returns the Logger's underlying name, +// or an empty string if the logger is unnamed. +func (log *Logger) Name() string { + return log.name +} + func (log *Logger) clone() *Logger { - copy := *log - return © + clone := *log + return &clone } func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { @@ -313,27 +346,12 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { // Set up any required terminal behavior. switch ent.Level { case zapcore.PanicLevel: - ce = ce.After(ent, zapcore.WriteThenPanic) + ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenPanic, log.onPanic)) case zapcore.FatalLevel: - onFatal := log.onFatal - // nil or WriteThenNoop will lead to continued execution after - // a Fatal log entry, which is unexpected. For example, - // - // f, err := os.Open(..) - // if err != nil { - // log.Fatal("cannot open", zap.Error(err)) - // } - // fmt.Println(f.Name()) - // - // The f.Name() will panic if we continue execution after the - // log.Fatal. - if onFatal == nil || onFatal == zapcore.WriteThenNoop { - onFatal = zapcore.WriteThenFatal - } - ce = ce.After(ent, onFatal) + ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenFatal, log.onFatal)) case zapcore.DPanicLevel: if log.development { - ce = ce.After(ent, zapcore.WriteThenPanic) + ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenPanic, log.onPanic)) } } @@ -354,17 +372,17 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { // Adding the caller or stack trace requires capturing the callers of // this function. We'll share information between these two. - stackDepth := stacktraceFirst + stackDepth := stacktrace.First if addStack { - stackDepth = stacktraceFull + stackDepth = stacktrace.Full } - stack := captureStacktrace(log.callerSkip+callerSkipOffset, stackDepth) + stack := stacktrace.Capture(log.callerSkip+callerSkipOffset, stackDepth) defer stack.Free() if stack.Count() == 0 { if log.addCaller { fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", ent.Time.UTC()) - log.errorOutput.Sync() + _ = log.errorOutput.Sync() } return ce } @@ -385,7 +403,7 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { buffer := bufferpool.Get() defer buffer.Free() - stackfmt := newStackFormatter(buffer) + stackfmt := stacktrace.NewFormatter(buffer) // We've already extracted the first frame, so format that // separately and defer to stackfmt for the rest. @@ -398,3 +416,20 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { return ce } + +func terminalHookOverride(defaultHook, override zapcore.CheckWriteHook) zapcore.CheckWriteHook { + // A nil or WriteThenNoop hook will lead to continued execution after + // a Panic or Fatal log entry, which is unexpected. For example, + // + // f, err := os.Open(..) + // if err != nil { + // log.Fatal("cannot open", zap.Error(err)) + // } + // fmt.Println(f.Name()) + // + // The f.Name() will panic if we continue execution after the log.Fatal. + if override == nil || override == zapcore.WriteThenNoop { + return defaultHook + } + return override +} diff --git a/tools/vendor/go.uber.org/zap/options.go b/tools/vendor/go.uber.org/zap/options.go index c4f3bca3d..43d357ac9 100644 --- a/tools/vendor/go.uber.org/zap/options.go +++ b/tools/vendor/go.uber.org/zap/options.go @@ -132,6 +132,21 @@ func IncreaseLevel(lvl zapcore.LevelEnabler) Option { }) } +// WithPanicHook sets a CheckWriteHook to run on Panic/DPanic logs. +// Zap will call this hook after writing a log statement with a Panic/DPanic level. +// +// For example, the following builds a logger that will exit the current +// goroutine after writing a Panic/DPanic log message, but it will not start a panic. +// +// zap.New(core, zap.WithPanicHook(zapcore.WriteThenGoexit)) +// +// This is useful for testing Panic/DPanic log output. +func WithPanicHook(hook zapcore.CheckWriteHook) Option { + return optionFunc(func(log *Logger) { + log.onPanic = hook + }) +} + // OnFatal sets the action to take on fatal logs. // // Deprecated: Use [WithFatalHook] instead. diff --git a/tools/vendor/go.uber.org/zap/sink.go b/tools/vendor/go.uber.org/zap/sink.go index 478c9a10f..499772a00 100644 --- a/tools/vendor/go.uber.org/zap/sink.go +++ b/tools/vendor/go.uber.org/zap/sink.go @@ -66,7 +66,8 @@ func newSinkRegistry() *sinkRegistry { factories: make(map[string]func(*url.URL) (Sink, error)), openFile: os.OpenFile, } - sr.RegisterSink(schemeFile, sr.newFileSinkFromURL) + // Infallible operation: the registry is empty, so we can't have a conflict. + _ = sr.RegisterSink(schemeFile, sr.newFileSinkFromURL) return sr } @@ -154,7 +155,7 @@ func (sr *sinkRegistry) newFileSinkFromPath(path string) (Sink, error) { case "stderr": return nopCloserSink{os.Stderr}, nil } - return sr.openFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666) + return sr.openFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0o666) } func normalizeScheme(s string) (string, error) { diff --git a/tools/vendor/go.uber.org/zap/sugar.go b/tools/vendor/go.uber.org/zap/sugar.go index ac387b3e4..8904cd087 100644 --- a/tools/vendor/go.uber.org/zap/sugar.go +++ b/tools/vendor/go.uber.org/zap/sugar.go @@ -115,6 +115,21 @@ func (s *SugaredLogger) With(args ...interface{}) *SugaredLogger { return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)} } +// WithLazy adds a variadic number of fields to the logging context lazily. +// The fields are evaluated only if the logger is further chained with [With] +// or is written to with any of the log level methods. +// Until that occurs, the logger may retain references to objects inside the fields, +// and logging will reflect the state of an object at the time of logging, +// not the time of WithLazy(). +// +// Similar to [With], fields added to the child don't affect the parent, +// and vice versa. Also, the keys in key-value pairs should be strings. In development, +// passing a non-string key panics, while in production it logs an error and skips the pair. +// Passing an orphaned key has the same behavior. +func (s *SugaredLogger) WithLazy(args ...interface{}) *SugaredLogger { + return &SugaredLogger{base: s.base.WithLazy(s.sweetenFields(args)...)} +} + // Level reports the minimum enabled level for this logger. // // For NopLoggers, this is [zapcore.InvalidLevel]. @@ -122,78 +137,110 @@ func (s *SugaredLogger) Level() zapcore.Level { return zapcore.LevelOf(s.base.core) } -// Debug uses fmt.Sprint to construct and log a message. +// Log logs the provided arguments at provided level. +// Spaces are added between arguments when neither is a string. +func (s *SugaredLogger) Log(lvl zapcore.Level, args ...interface{}) { + s.log(lvl, "", args, nil) +} + +// Debug logs the provided arguments at [DebugLevel]. +// Spaces are added between arguments when neither is a string. func (s *SugaredLogger) Debug(args ...interface{}) { s.log(DebugLevel, "", args, nil) } -// Info uses fmt.Sprint to construct and log a message. +// Info logs the provided arguments at [InfoLevel]. +// Spaces are added between arguments when neither is a string. func (s *SugaredLogger) Info(args ...interface{}) { s.log(InfoLevel, "", args, nil) } -// Warn uses fmt.Sprint to construct and log a message. +// Warn logs the provided arguments at [WarnLevel]. +// Spaces are added between arguments when neither is a string. func (s *SugaredLogger) Warn(args ...interface{}) { s.log(WarnLevel, "", args, nil) } -// Error uses fmt.Sprint to construct and log a message. +// Error logs the provided arguments at [ErrorLevel]. +// Spaces are added between arguments when neither is a string. func (s *SugaredLogger) Error(args ...interface{}) { s.log(ErrorLevel, "", args, nil) } -// DPanic uses fmt.Sprint to construct and log a message. In development, the -// logger then panics. (See DPanicLevel for details.) +// DPanic logs the provided arguments at [DPanicLevel]. +// In development, the logger then panics. (See [DPanicLevel] for details.) +// Spaces are added between arguments when neither is a string. func (s *SugaredLogger) DPanic(args ...interface{}) { s.log(DPanicLevel, "", args, nil) } -// Panic uses fmt.Sprint to construct and log a message, then panics. +// Panic constructs a message with the provided arguments and panics. +// Spaces are added between arguments when neither is a string. func (s *SugaredLogger) Panic(args ...interface{}) { s.log(PanicLevel, "", args, nil) } -// Fatal uses fmt.Sprint to construct and log a message, then calls os.Exit. +// Fatal constructs a message with the provided arguments and calls os.Exit. +// Spaces are added between arguments when neither is a string. func (s *SugaredLogger) Fatal(args ...interface{}) { s.log(FatalLevel, "", args, nil) } -// Debugf uses fmt.Sprintf to log a templated message. +// Logf formats the message according to the format specifier +// and logs it at provided level. +func (s *SugaredLogger) Logf(lvl zapcore.Level, template string, args ...interface{}) { + s.log(lvl, template, args, nil) +} + +// Debugf formats the message according to the format specifier +// and logs it at [DebugLevel]. func (s *SugaredLogger) Debugf(template string, args ...interface{}) { s.log(DebugLevel, template, args, nil) } -// Infof uses fmt.Sprintf to log a templated message. +// Infof formats the message according to the format specifier +// and logs it at [InfoLevel]. func (s *SugaredLogger) Infof(template string, args ...interface{}) { s.log(InfoLevel, template, args, nil) } -// Warnf uses fmt.Sprintf to log a templated message. +// Warnf formats the message according to the format specifier +// and logs it at [WarnLevel]. func (s *SugaredLogger) Warnf(template string, args ...interface{}) { s.log(WarnLevel, template, args, nil) } -// Errorf uses fmt.Sprintf to log a templated message. +// Errorf formats the message according to the format specifier +// and logs it at [ErrorLevel]. func (s *SugaredLogger) Errorf(template string, args ...interface{}) { s.log(ErrorLevel, template, args, nil) } -// DPanicf uses fmt.Sprintf to log a templated message. In development, the -// logger then panics. (See DPanicLevel for details.) +// DPanicf formats the message according to the format specifier +// and logs it at [DPanicLevel]. +// In development, the logger then panics. (See [DPanicLevel] for details.) func (s *SugaredLogger) DPanicf(template string, args ...interface{}) { s.log(DPanicLevel, template, args, nil) } -// Panicf uses fmt.Sprintf to log a templated message, then panics. +// Panicf formats the message according to the format specifier +// and panics. func (s *SugaredLogger) Panicf(template string, args ...interface{}) { s.log(PanicLevel, template, args, nil) } -// Fatalf uses fmt.Sprintf to log a templated message, then calls os.Exit. +// Fatalf formats the message according to the format specifier +// and calls os.Exit. func (s *SugaredLogger) Fatalf(template string, args ...interface{}) { s.log(FatalLevel, template, args, nil) } +// Logw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Logw(lvl zapcore.Level, msg string, keysAndValues ...interface{}) { + s.log(lvl, msg, nil, keysAndValues) +} + // Debugw logs a message with some additional context. The variadic key-value // pairs are treated as they are in With. // @@ -241,38 +288,51 @@ func (s *SugaredLogger) Fatalw(msg string, keysAndValues ...interface{}) { s.log(FatalLevel, msg, nil, keysAndValues) } -// Debugln uses fmt.Sprintln to construct and log a message. +// Logln logs a message at provided level. +// Spaces are always added between arguments. +func (s *SugaredLogger) Logln(lvl zapcore.Level, args ...interface{}) { + s.logln(lvl, args, nil) +} + +// Debugln logs a message at [DebugLevel]. +// Spaces are always added between arguments. func (s *SugaredLogger) Debugln(args ...interface{}) { s.logln(DebugLevel, args, nil) } -// Infoln uses fmt.Sprintln to construct and log a message. +// Infoln logs a message at [InfoLevel]. +// Spaces are always added between arguments. func (s *SugaredLogger) Infoln(args ...interface{}) { s.logln(InfoLevel, args, nil) } -// Warnln uses fmt.Sprintln to construct and log a message. +// Warnln logs a message at [WarnLevel]. +// Spaces are always added between arguments. func (s *SugaredLogger) Warnln(args ...interface{}) { s.logln(WarnLevel, args, nil) } -// Errorln uses fmt.Sprintln to construct and log a message. +// Errorln logs a message at [ErrorLevel]. +// Spaces are always added between arguments. func (s *SugaredLogger) Errorln(args ...interface{}) { s.logln(ErrorLevel, args, nil) } -// DPanicln uses fmt.Sprintln to construct and log a message. In development, the -// logger then panics. (See DPanicLevel for details.) +// DPanicln logs a message at [DPanicLevel]. +// In development, the logger then panics. (See [DPanicLevel] for details.) +// Spaces are always added between arguments. func (s *SugaredLogger) DPanicln(args ...interface{}) { s.logln(DPanicLevel, args, nil) } -// Panicln uses fmt.Sprintln to construct and log a message, then panics. +// Panicln logs a message at [PanicLevel] and panics. +// Spaces are always added between arguments. func (s *SugaredLogger) Panicln(args ...interface{}) { s.logln(PanicLevel, args, nil) } -// Fatalln uses fmt.Sprintln to construct and log a message, then calls os.Exit. +// Fatalln logs a message at [FatalLevel] and calls os.Exit. +// Spaces are always added between arguments. func (s *SugaredLogger) Fatalln(args ...interface{}) { s.logln(FatalLevel, args, nil) } diff --git a/tools/vendor/go.uber.org/zap/writer.go b/tools/vendor/go.uber.org/zap/writer.go index f08728e1e..06768c679 100644 --- a/tools/vendor/go.uber.org/zap/writer.go +++ b/tools/vendor/go.uber.org/zap/writer.go @@ -48,21 +48,21 @@ import ( // os.Stdout and os.Stderr. When specified without a scheme, relative file // paths also work. func Open(paths ...string) (zapcore.WriteSyncer, func(), error) { - writers, close, err := open(paths) + writers, closeAll, err := open(paths) if err != nil { return nil, nil, err } writer := CombineWriteSyncers(writers...) - return writer, close, nil + return writer, closeAll, nil } func open(paths []string) ([]zapcore.WriteSyncer, func(), error) { writers := make([]zapcore.WriteSyncer, 0, len(paths)) closers := make([]io.Closer, 0, len(paths)) - close := func() { + closeAll := func() { for _, c := range closers { - c.Close() + _ = c.Close() } } @@ -77,11 +77,11 @@ func open(paths []string) ([]zapcore.WriteSyncer, func(), error) { closers = append(closers, sink) } if openErr != nil { - close() + closeAll() return nil, nil, openErr } - return writers, close, nil + return writers, closeAll, nil } // CombineWriteSyncers is a utility that combines multiple WriteSyncers into a diff --git a/tools/vendor/go.uber.org/zap/zapcore/console_encoder.go b/tools/vendor/go.uber.org/zap/zapcore/console_encoder.go index 1aa5dc364..cc2b4e07b 100644 --- a/tools/vendor/go.uber.org/zap/zapcore/console_encoder.go +++ b/tools/vendor/go.uber.org/zap/zapcore/console_encoder.go @@ -22,20 +22,20 @@ package zapcore import ( "fmt" - "sync" "go.uber.org/zap/buffer" "go.uber.org/zap/internal/bufferpool" + "go.uber.org/zap/internal/pool" ) -var _sliceEncoderPool = sync.Pool{ - New: func() interface{} { - return &sliceArrayEncoder{elems: make([]interface{}, 0, 2)} - }, -} +var _sliceEncoderPool = pool.New(func() *sliceArrayEncoder { + return &sliceArrayEncoder{ + elems: make([]interface{}, 0, 2), + } +}) func getSliceEncoder() *sliceArrayEncoder { - return _sliceEncoderPool.Get().(*sliceArrayEncoder) + return _sliceEncoderPool.Get() } func putSliceEncoder(e *sliceArrayEncoder) { @@ -77,7 +77,7 @@ func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, // If this ever becomes a performance bottleneck, we can implement // ArrayEncoder for our plain-text format. arr := getSliceEncoder() - if c.TimeKey != "" && c.EncodeTime != nil { + if c.TimeKey != "" && c.EncodeTime != nil && !ent.Time.IsZero() { c.EncodeTime(ent.Time, arr) } if c.LevelKey != "" && c.EncodeLevel != nil { diff --git a/tools/vendor/go.uber.org/zap/zapcore/core.go b/tools/vendor/go.uber.org/zap/zapcore/core.go index 9dfd64051..776e93f6f 100644 --- a/tools/vendor/go.uber.org/zap/zapcore/core.go +++ b/tools/vendor/go.uber.org/zap/zapcore/core.go @@ -102,9 +102,9 @@ func (c *ioCore) Write(ent Entry, fields []Field) error { return err } if ent.Level > ErrorLevel { - // Since we may be crashing the program, sync the output. Ignore Sync - // errors, pending a clean solution to issue #370. - c.Sync() + // Since we may be crashing the program, sync the output. + // Ignore Sync errors, pending a clean solution to issue #370. + _ = c.Sync() } return nil } diff --git a/tools/vendor/go.uber.org/zap/zapcore/encoder.go b/tools/vendor/go.uber.org/zap/zapcore/encoder.go index 5769ff3e4..044625415 100644 --- a/tools/vendor/go.uber.org/zap/zapcore/encoder.go +++ b/tools/vendor/go.uber.org/zap/zapcore/encoder.go @@ -37,6 +37,9 @@ const DefaultLineEnding = "\n" const OmitKey = "" // A LevelEncoder serializes a Level to a primitive type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type LevelEncoder func(Level, PrimitiveArrayEncoder) // LowercaseLevelEncoder serializes a Level to a lowercase string. For example, @@ -90,6 +93,9 @@ func (e *LevelEncoder) UnmarshalText(text []byte) error { } // A TimeEncoder serializes a time.Time to a primitive type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type TimeEncoder func(time.Time, PrimitiveArrayEncoder) // EpochTimeEncoder serializes a time.Time to a floating-point number of seconds @@ -219,6 +225,9 @@ func (e *TimeEncoder) UnmarshalJSON(data []byte) error { } // A DurationEncoder serializes a time.Duration to a primitive type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type DurationEncoder func(time.Duration, PrimitiveArrayEncoder) // SecondsDurationEncoder serializes a time.Duration to a floating-point number of seconds elapsed. @@ -262,6 +271,9 @@ func (e *DurationEncoder) UnmarshalText(text []byte) error { } // A CallerEncoder serializes an EntryCaller to a primitive type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type CallerEncoder func(EntryCaller, PrimitiveArrayEncoder) // FullCallerEncoder serializes a caller in /full/path/to/package/file:line @@ -292,6 +304,9 @@ func (e *CallerEncoder) UnmarshalText(text []byte) error { // A NameEncoder serializes a period-separated logger name to a primitive // type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type NameEncoder func(string, PrimitiveArrayEncoder) // FullNameEncoder serializes the logger name as-is. diff --git a/tools/vendor/go.uber.org/zap/zapcore/entry.go b/tools/vendor/go.uber.org/zap/zapcore/entry.go index 9d326e95e..459a5d7ce 100644 --- a/tools/vendor/go.uber.org/zap/zapcore/entry.go +++ b/tools/vendor/go.uber.org/zap/zapcore/entry.go @@ -24,25 +24,23 @@ import ( "fmt" "runtime" "strings" - "sync" "time" "go.uber.org/multierr" "go.uber.org/zap/internal/bufferpool" "go.uber.org/zap/internal/exit" + "go.uber.org/zap/internal/pool" ) -var ( - _cePool = sync.Pool{New: func() interface{} { - // Pre-allocate some space for cores. - return &CheckedEntry{ - cores: make([]Core, 4), - } - }} -) +var _cePool = pool.New(func() *CheckedEntry { + // Pre-allocate some space for cores. + return &CheckedEntry{ + cores: make([]Core, 4), + } +}) func getCheckedEntry() *CheckedEntry { - ce := _cePool.Get().(*CheckedEntry) + ce := _cePool.Get() ce.reset() return ce } @@ -244,7 +242,7 @@ func (ce *CheckedEntry) Write(fields ...Field) { // CheckedEntry is being used after it was returned to the pool, // the message may be an amalgamation from multiple call sites. fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", ce.Time, ce.Entry) - ce.ErrorOutput.Sync() + _ = ce.ErrorOutput.Sync() // ignore error } return } @@ -256,7 +254,7 @@ func (ce *CheckedEntry) Write(fields ...Field) { } if err != nil && ce.ErrorOutput != nil { fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", ce.Time, err) - ce.ErrorOutput.Sync() + _ = ce.ErrorOutput.Sync() // ignore error } hook := ce.after diff --git a/tools/vendor/go.uber.org/zap/zapcore/error.go b/tools/vendor/go.uber.org/zap/zapcore/error.go index 06359907a..c40df1326 100644 --- a/tools/vendor/go.uber.org/zap/zapcore/error.go +++ b/tools/vendor/go.uber.org/zap/zapcore/error.go @@ -23,7 +23,8 @@ package zapcore import ( "fmt" "reflect" - "sync" + + "go.uber.org/zap/internal/pool" ) // Encodes the given error into fields of an object. A field with the given @@ -97,15 +98,18 @@ func (errs errArray) MarshalLogArray(arr ArrayEncoder) error { } el := newErrArrayElem(errs[i]) - arr.AppendObject(el) + err := arr.AppendObject(el) el.Free() + if err != nil { + return err + } } return nil } -var _errArrayElemPool = sync.Pool{New: func() interface{} { +var _errArrayElemPool = pool.New(func() *errArrayElem { return &errArrayElem{} -}} +}) // Encodes any error into a {"error": ...} re-using the same errors logic. // @@ -113,7 +117,7 @@ var _errArrayElemPool = sync.Pool{New: func() interface{} { type errArrayElem struct{ err error } func newErrArrayElem(err error) *errArrayElem { - e := _errArrayElemPool.Get().(*errArrayElem) + e := _errArrayElemPool.Get() e.err = err return e } diff --git a/tools/vendor/go.uber.org/zap/zapcore/field.go b/tools/vendor/go.uber.org/zap/zapcore/field.go index 95bdb0a12..308c9781e 100644 --- a/tools/vendor/go.uber.org/zap/zapcore/field.go +++ b/tools/vendor/go.uber.org/zap/zapcore/field.go @@ -47,7 +47,7 @@ const ( ByteStringType // Complex128Type indicates that the field carries a complex128. Complex128Type - // Complex64Type indicates that the field carries a complex128. + // Complex64Type indicates that the field carries a complex64. Complex64Type // DurationType indicates that the field carries a time.Duration. DurationType diff --git a/tools/vendor/go.uber.org/zap/zapcore/json_encoder.go b/tools/vendor/go.uber.org/zap/zapcore/json_encoder.go index 3921c5cd3..9685169b2 100644 --- a/tools/vendor/go.uber.org/zap/zapcore/json_encoder.go +++ b/tools/vendor/go.uber.org/zap/zapcore/json_encoder.go @@ -23,24 +23,20 @@ package zapcore import ( "encoding/base64" "math" - "sync" "time" "unicode/utf8" "go.uber.org/zap/buffer" "go.uber.org/zap/internal/bufferpool" + "go.uber.org/zap/internal/pool" ) // For JSON-escaping; see jsonEncoder.safeAddString below. const _hex = "0123456789abcdef" -var _jsonPool = sync.Pool{New: func() interface{} { +var _jsonPool = pool.New(func() *jsonEncoder { return &jsonEncoder{} -}} - -func getJSONEncoder() *jsonEncoder { - return _jsonPool.Get().(*jsonEncoder) -} +}) func putJSONEncoder(enc *jsonEncoder) { if enc.reflectBuf != nil { @@ -354,7 +350,7 @@ func (enc *jsonEncoder) Clone() Encoder { } func (enc *jsonEncoder) clone() *jsonEncoder { - clone := getJSONEncoder() + clone := _jsonPool.Get() clone.EncoderConfig = enc.EncoderConfig clone.spaced = enc.spaced clone.openNamespaces = enc.openNamespaces @@ -376,7 +372,7 @@ func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, final.AppendString(ent.Level.String()) } } - if final.TimeKey != "" { + if final.TimeKey != "" && !ent.Time.IsZero() { final.AddTime(final.TimeKey, ent.Time) } if ent.LoggerName != "" && final.NameKey != "" { @@ -490,73 +486,98 @@ func (enc *jsonEncoder) appendFloat(val float64, bitSize int) { // Unlike the standard library's encoder, it doesn't attempt to protect the // user from browser vulnerabilities or JSONP-related problems. func (enc *jsonEncoder) safeAddString(s string) { - for i := 0; i < len(s); { - if enc.tryAddRuneSelf(s[i]) { - i++ - continue - } - r, size := utf8.DecodeRuneInString(s[i:]) - if enc.tryAddRuneError(r, size) { - i++ - continue - } - enc.buf.AppendString(s[i : i+size]) - i += size - } + safeAppendStringLike( + (*buffer.Buffer).AppendString, + utf8.DecodeRuneInString, + enc.buf, + s, + ) } // safeAddByteString is no-alloc equivalent of safeAddString(string(s)) for s []byte. func (enc *jsonEncoder) safeAddByteString(s []byte) { + safeAppendStringLike( + (*buffer.Buffer).AppendBytes, + utf8.DecodeRune, + enc.buf, + s, + ) +} + +// safeAppendStringLike is a generic implementation of safeAddString and safeAddByteString. +// It appends a string or byte slice to the buffer, escaping all special characters. +func safeAppendStringLike[S []byte | string]( + // appendTo appends this string-like object to the buffer. + appendTo func(*buffer.Buffer, S), + // decodeRune decodes the next rune from the string-like object + // and returns its value and width in bytes. + decodeRune func(S) (rune, int), + buf *buffer.Buffer, + s S, +) { + // The encoding logic below works by skipping over characters + // that can be safely copied as-is, + // until a character is found that needs special handling. + // At that point, we copy everything we've seen so far, + // and then handle that special character. + // + // last is the index of the last byte that was copied to the buffer. + last := 0 for i := 0; i < len(s); { - if enc.tryAddRuneSelf(s[i]) { + if s[i] >= utf8.RuneSelf { + // Character >= RuneSelf may be part of a multi-byte rune. + // They need to be decoded before we can decide how to handle them. + r, size := decodeRune(s[i:]) + if r != utf8.RuneError || size != 1 { + // No special handling required. + // Skip over this rune and continue. + i += size + continue + } + + // Invalid UTF-8 sequence. + // Replace it with the Unicode replacement character. + appendTo(buf, s[last:i]) + buf.AppendString(`\ufffd`) + i++ - continue - } - r, size := utf8.DecodeRune(s[i:]) - if enc.tryAddRuneError(r, size) { + last = i + } else { + // Character < RuneSelf is a single-byte UTF-8 rune. + if s[i] >= 0x20 && s[i] != '\\' && s[i] != '"' { + // No escaping necessary. + // Skip over this character and continue. + i++ + continue + } + + // This character needs to be escaped. + appendTo(buf, s[last:i]) + switch s[i] { + case '\\', '"': + buf.AppendByte('\\') + buf.AppendByte(s[i]) + case '\n': + buf.AppendByte('\\') + buf.AppendByte('n') + case '\r': + buf.AppendByte('\\') + buf.AppendByte('r') + case '\t': + buf.AppendByte('\\') + buf.AppendByte('t') + default: + // Encode bytes < 0x20, except for the escape sequences above. + buf.AppendString(`\u00`) + buf.AppendByte(_hex[s[i]>>4]) + buf.AppendByte(_hex[s[i]&0xF]) + } + i++ - continue + last = i } - enc.buf.Write(s[i : i+size]) - i += size - } -} - -// tryAddRuneSelf appends b if it is valid UTF-8 character represented in a single byte. -func (enc *jsonEncoder) tryAddRuneSelf(b byte) bool { - if b >= utf8.RuneSelf { - return false - } - if 0x20 <= b && b != '\\' && b != '"' { - enc.buf.AppendByte(b) - return true - } - switch b { - case '\\', '"': - enc.buf.AppendByte('\\') - enc.buf.AppendByte(b) - case '\n': - enc.buf.AppendByte('\\') - enc.buf.AppendByte('n') - case '\r': - enc.buf.AppendByte('\\') - enc.buf.AppendByte('r') - case '\t': - enc.buf.AppendByte('\\') - enc.buf.AppendByte('t') - default: - // Encode bytes < 0x20, except for the escape sequences above. - enc.buf.AppendString(`\u00`) - enc.buf.AppendByte(_hex[b>>4]) - enc.buf.AppendByte(_hex[b&0xF]) } - return true -} -func (enc *jsonEncoder) tryAddRuneError(r rune, size int) bool { - if r == utf8.RuneError && size == 1 { - enc.buf.AppendString(`\ufffd`) - return true - } - return false + // add remaining + appendTo(buf, s[last:]) } diff --git a/tools/vendor/go.uber.org/atomic/error.go b/tools/vendor/go.uber.org/zap/zapcore/lazy_with.go similarity index 60% rename from tools/vendor/go.uber.org/atomic/error.go rename to tools/vendor/go.uber.org/zap/zapcore/lazy_with.go index a6166fbea..05288d6a8 100644 --- a/tools/vendor/go.uber.org/atomic/error.go +++ b/tools/vendor/go.uber.org/zap/zapcore/lazy_with.go @@ -1,6 +1,4 @@ -// @generated Code generated by gen-atomicwrapper. - -// Copyright (c) 2020 Uber Technologies, Inc. +// Copyright (c) 2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -20,32 +18,37 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -package atomic +package zapcore -// Error is an atomic type-safe wrapper for error values. -type Error struct { - _ nocmp // disallow non-atomic comparison +import "sync" - v Value +type lazyWithCore struct { + Core + sync.Once + fields []Field } -var _zeroError error - -// NewError creates a new Error. -func NewError(v error) *Error { - x := &Error{} - if v != _zeroError { - x.Store(v) +// NewLazyWith wraps a Core with a "lazy" Core that will only encode fields if +// the logger is written to (or is further chained in a lon-lazy manner). +func NewLazyWith(core Core, fields []Field) Core { + return &lazyWithCore{ + Core: core, + fields: fields, } - return x } -// Load atomically loads the wrapped error. -func (x *Error) Load() error { - return unpackError(x.v.Load()) +func (d *lazyWithCore) initOnce() { + d.Once.Do(func() { + d.Core = d.Core.With(d.fields) + }) +} + +func (d *lazyWithCore) With(fields []Field) Core { + d.initOnce() + return d.Core.With(fields) } -// Store atomically stores the passed error. -func (x *Error) Store(v error) { - x.v.Store(packError(v)) +func (d *lazyWithCore) Check(e Entry, ce *CheckedEntry) *CheckedEntry { + d.initOnce() + return d.Core.Check(e, ce) } diff --git a/tools/vendor/go.uber.org/zap/zapcore/sampler.go b/tools/vendor/go.uber.org/zap/zapcore/sampler.go index dc518055a..b7c093a4f 100644 --- a/tools/vendor/go.uber.org/zap/zapcore/sampler.go +++ b/tools/vendor/go.uber.org/zap/zapcore/sampler.go @@ -21,9 +21,8 @@ package zapcore import ( + "sync/atomic" "time" - - "go.uber.org/atomic" ) const ( @@ -66,16 +65,16 @@ func (c *counter) IncCheckReset(t time.Time, tick time.Duration) uint64 { tn := t.UnixNano() resetAfter := c.resetAt.Load() if resetAfter > tn { - return c.counter.Inc() + return c.counter.Add(1) } c.counter.Store(1) newResetAfter := tn + tick.Nanoseconds() - if !c.resetAt.CAS(resetAfter, newResetAfter) { + if !c.resetAt.CompareAndSwap(resetAfter, newResetAfter) { // We raced with another goroutine trying to reset, and it also reset // the counter to 1, so we need to reincrement the counter. - return c.counter.Inc() + return c.counter.Add(1) } return 1 diff --git a/tools/vendor/golang.org/x/text/encoding/encoding.go b/tools/vendor/golang.org/x/text/encoding/encoding.go new file mode 100644 index 000000000..a0bd7cd4d --- /dev/null +++ b/tools/vendor/golang.org/x/text/encoding/encoding.go @@ -0,0 +1,335 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package encoding defines an interface for character encodings, such as Shift +// JIS and Windows 1252, that can convert to and from UTF-8. +// +// Encoding implementations are provided in other packages, such as +// golang.org/x/text/encoding/charmap and +// golang.org/x/text/encoding/japanese. +package encoding // import "golang.org/x/text/encoding" + +import ( + "errors" + "io" + "strconv" + "unicode/utf8" + + "golang.org/x/text/encoding/internal/identifier" + "golang.org/x/text/transform" +) + +// TODO: +// - There seems to be some inconsistency in when decoders return errors +// and when not. Also documentation seems to suggest they shouldn't return +// errors at all (except for UTF-16). +// - Encoders seem to rely on or at least benefit from the input being in NFC +// normal form. Perhaps add an example how users could prepare their output. + +// Encoding is a character set encoding that can be transformed to and from +// UTF-8. +type Encoding interface { + // NewDecoder returns a Decoder. + NewDecoder() *Decoder + + // NewEncoder returns an Encoder. + NewEncoder() *Encoder +} + +// A Decoder converts bytes to UTF-8. It implements transform.Transformer. +// +// Transforming source bytes that are not of that encoding will not result in an +// error per se. Each byte that cannot be transcoded will be represented in the +// output by the UTF-8 encoding of '\uFFFD', the replacement rune. +type Decoder struct { + transform.Transformer + + // This forces external creators of Decoders to use names in struct + // initializers, allowing for future extendibility without having to break + // code. + _ struct{} +} + +// Bytes converts the given encoded bytes to UTF-8. It returns the converted +// bytes or nil, err if any error occurred. +func (d *Decoder) Bytes(b []byte) ([]byte, error) { + b, _, err := transform.Bytes(d, b) + if err != nil { + return nil, err + } + return b, nil +} + +// String converts the given encoded string to UTF-8. It returns the converted +// string or "", err if any error occurred. +func (d *Decoder) String(s string) (string, error) { + s, _, err := transform.String(d, s) + if err != nil { + return "", err + } + return s, nil +} + +// Reader wraps another Reader to decode its bytes. +// +// The Decoder may not be used for any other operation as long as the returned +// Reader is in use. +func (d *Decoder) Reader(r io.Reader) io.Reader { + return transform.NewReader(r, d) +} + +// An Encoder converts bytes from UTF-8. It implements transform.Transformer. +// +// Each rune that cannot be transcoded will result in an error. In this case, +// the transform will consume all source byte up to, not including the offending +// rune. Transforming source bytes that are not valid UTF-8 will be replaced by +// `\uFFFD`. To return early with an error instead, use transform.Chain to +// preprocess the data with a UTF8Validator. +type Encoder struct { + transform.Transformer + + // This forces external creators of Encoders to use names in struct + // initializers, allowing for future extendibility without having to break + // code. + _ struct{} +} + +// Bytes converts bytes from UTF-8. It returns the converted bytes or nil, err if +// any error occurred. +func (e *Encoder) Bytes(b []byte) ([]byte, error) { + b, _, err := transform.Bytes(e, b) + if err != nil { + return nil, err + } + return b, nil +} + +// String converts a string from UTF-8. It returns the converted string or +// "", err if any error occurred. +func (e *Encoder) String(s string) (string, error) { + s, _, err := transform.String(e, s) + if err != nil { + return "", err + } + return s, nil +} + +// Writer wraps another Writer to encode its UTF-8 output. +// +// The Encoder may not be used for any other operation as long as the returned +// Writer is in use. +func (e *Encoder) Writer(w io.Writer) io.Writer { + return transform.NewWriter(w, e) +} + +// ASCIISub is the ASCII substitute character, as recommended by +// https://unicode.org/reports/tr36/#Text_Comparison +const ASCIISub = '\x1a' + +// Nop is the nop encoding. Its transformed bytes are the same as the source +// bytes; it does not replace invalid UTF-8 sequences. +var Nop Encoding = nop{} + +type nop struct{} + +func (nop) NewDecoder() *Decoder { + return &Decoder{Transformer: transform.Nop} +} +func (nop) NewEncoder() *Encoder { + return &Encoder{Transformer: transform.Nop} +} + +// Replacement is the replacement encoding. Decoding from the replacement +// encoding yields a single '\uFFFD' replacement rune. Encoding from UTF-8 to +// the replacement encoding yields the same as the source bytes except that +// invalid UTF-8 is converted to '\uFFFD'. +// +// It is defined at http://encoding.spec.whatwg.org/#replacement +var Replacement Encoding = replacement{} + +type replacement struct{} + +func (replacement) NewDecoder() *Decoder { + return &Decoder{Transformer: replacementDecoder{}} +} + +func (replacement) NewEncoder() *Encoder { + return &Encoder{Transformer: replacementEncoder{}} +} + +func (replacement) ID() (mib identifier.MIB, other string) { + return identifier.Replacement, "" +} + +type replacementDecoder struct{ transform.NopResetter } + +func (replacementDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + if len(dst) < 3 { + return 0, 0, transform.ErrShortDst + } + if atEOF { + const fffd = "\ufffd" + dst[0] = fffd[0] + dst[1] = fffd[1] + dst[2] = fffd[2] + nDst = 3 + } + return nDst, len(src), nil +} + +type replacementEncoder struct{ transform.NopResetter } + +func (replacementEncoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + r, size := rune(0), 0 + + for ; nSrc < len(src); nSrc += size { + r = rune(src[nSrc]) + + // Decode a 1-byte rune. + if r < utf8.RuneSelf { + size = 1 + + } else { + // Decode a multi-byte rune. + r, size = utf8.DecodeRune(src[nSrc:]) + if size == 1 { + // All valid runes of size 1 (those below utf8.RuneSelf) were + // handled above. We have invalid UTF-8 or we haven't seen the + // full character yet. + if !atEOF && !utf8.FullRune(src[nSrc:]) { + err = transform.ErrShortSrc + break + } + r = '\ufffd' + } + } + + if nDst+utf8.RuneLen(r) > len(dst) { + err = transform.ErrShortDst + break + } + nDst += utf8.EncodeRune(dst[nDst:], r) + } + return nDst, nSrc, err +} + +// HTMLEscapeUnsupported wraps encoders to replace source runes outside the +// repertoire of the destination encoding with HTML escape sequences. +// +// This wrapper exists to comply to URL and HTML forms requiring a +// non-terminating legacy encoder. The produced sequences may lead to data +// loss as they are indistinguishable from legitimate input. To avoid this +// issue, use UTF-8 encodings whenever possible. +func HTMLEscapeUnsupported(e *Encoder) *Encoder { + return &Encoder{Transformer: &errorHandler{e, errorToHTML}} +} + +// ReplaceUnsupported wraps encoders to replace source runes outside the +// repertoire of the destination encoding with an encoding-specific +// replacement. +// +// This wrapper is only provided for backwards compatibility and legacy +// handling. Its use is strongly discouraged. Use UTF-8 whenever possible. +func ReplaceUnsupported(e *Encoder) *Encoder { + return &Encoder{Transformer: &errorHandler{e, errorToReplacement}} +} + +type errorHandler struct { + *Encoder + handler func(dst []byte, r rune, err repertoireError) (n int, ok bool) +} + +// TODO: consider making this error public in some form. +type repertoireError interface { + Replacement() byte +} + +func (h errorHandler) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + nDst, nSrc, err = h.Transformer.Transform(dst, src, atEOF) + for err != nil { + rerr, ok := err.(repertoireError) + if !ok { + return nDst, nSrc, err + } + r, sz := utf8.DecodeRune(src[nSrc:]) + n, ok := h.handler(dst[nDst:], r, rerr) + if !ok { + return nDst, nSrc, transform.ErrShortDst + } + err = nil + nDst += n + if nSrc += sz; nSrc < len(src) { + var dn, sn int + dn, sn, err = h.Transformer.Transform(dst[nDst:], src[nSrc:], atEOF) + nDst += dn + nSrc += sn + } + } + return nDst, nSrc, err +} + +func errorToHTML(dst []byte, r rune, err repertoireError) (n int, ok bool) { + buf := [8]byte{} + b := strconv.AppendUint(buf[:0], uint64(r), 10) + if n = len(b) + len("&#;"); n >= len(dst) { + return 0, false + } + dst[0] = '&' + dst[1] = '#' + dst[copy(dst[2:], b)+2] = ';' + return n, true +} + +func errorToReplacement(dst []byte, r rune, err repertoireError) (n int, ok bool) { + if len(dst) == 0 { + return 0, false + } + dst[0] = err.Replacement() + return 1, true +} + +// ErrInvalidUTF8 means that a transformer encountered invalid UTF-8. +var ErrInvalidUTF8 = errors.New("encoding: invalid UTF-8") + +// UTF8Validator is a transformer that returns ErrInvalidUTF8 on the first +// input byte that is not valid UTF-8. +var UTF8Validator transform.Transformer = utf8Validator{} + +type utf8Validator struct{ transform.NopResetter } + +func (utf8Validator) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + n := len(src) + if n > len(dst) { + n = len(dst) + } + for i := 0; i < n; { + if c := src[i]; c < utf8.RuneSelf { + dst[i] = c + i++ + continue + } + _, size := utf8.DecodeRune(src[i:]) + if size == 1 { + // All valid runes of size 1 (those below utf8.RuneSelf) were + // handled above. We have invalid UTF-8 or we haven't seen the + // full character yet. + err = ErrInvalidUTF8 + if !atEOF && !utf8.FullRune(src[i:]) { + err = transform.ErrShortSrc + } + return i, i, err + } + if i+size > len(dst) { + return i, i, transform.ErrShortDst + } + for ; size > 0; size-- { + dst[i] = src[i] + i++ + } + } + if len(src) > len(dst) { + err = transform.ErrShortDst + } + return n, n, err +} diff --git a/tools/vendor/golang.org/x/text/encoding/internal/identifier/identifier.go b/tools/vendor/golang.org/x/text/encoding/internal/identifier/identifier.go new file mode 100644 index 000000000..5c9b85c28 --- /dev/null +++ b/tools/vendor/golang.org/x/text/encoding/internal/identifier/identifier.go @@ -0,0 +1,81 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go + +// Package identifier defines the contract between implementations of Encoding +// and Index by defining identifiers that uniquely identify standardized coded +// character sets (CCS) and character encoding schemes (CES), which we will +// together refer to as encodings, for which Encoding implementations provide +// converters to and from UTF-8. This package is typically only of concern to +// implementers of Indexes and Encodings. +// +// One part of the identifier is the MIB code, which is defined by IANA and +// uniquely identifies a CCS or CES. Each code is associated with data that +// references authorities, official documentation as well as aliases and MIME +// names. +// +// Not all CESs are covered by the IANA registry. The "other" string that is +// returned by ID can be used to identify other character sets or versions of +// existing ones. +// +// It is recommended that each package that provides a set of Encodings provide +// the All and Common variables to reference all supported encodings and +// commonly used subset. This allows Index implementations to include all +// available encodings without explicitly referencing or knowing about them. +package identifier + +// Note: this package is internal, but could be made public if there is a need +// for writing third-party Indexes and Encodings. + +// References: +// - http://source.icu-project.org/repos/icu/icu/trunk/source/data/mappings/convrtrs.txt +// - http://www.iana.org/assignments/character-sets/character-sets.xhtml +// - http://www.iana.org/assignments/ianacharset-mib/ianacharset-mib +// - http://www.ietf.org/rfc/rfc2978.txt +// - https://www.unicode.org/reports/tr22/ +// - http://www.w3.org/TR/encoding/ +// - https://encoding.spec.whatwg.org/ +// - https://encoding.spec.whatwg.org/encodings.json +// - https://tools.ietf.org/html/rfc6657#section-5 + +// Interface can be implemented by Encodings to define the CCS or CES for which +// it implements conversions. +type Interface interface { + // ID returns an encoding identifier. Exactly one of the mib and other + // values should be non-zero. + // + // In the usual case it is only necessary to indicate the MIB code. The + // other string can be used to specify encodings for which there is no MIB, + // such as "x-mac-dingbat". + // + // The other string may only contain the characters a-z, A-Z, 0-9, - and _. + ID() (mib MIB, other string) + + // NOTE: the restrictions on the encoding are to allow extending the syntax + // with additional information such as versions, vendors and other variants. +} + +// A MIB identifies an encoding. It is derived from the IANA MIB codes and adds +// some identifiers for some encodings that are not covered by the IANA +// standard. +// +// See http://www.iana.org/assignments/ianacharset-mib. +type MIB uint16 + +// These additional MIB types are not defined in IANA. They are added because +// they are common and defined within the text repo. +const ( + // Unofficial marks the start of encodings not registered by IANA. + Unofficial MIB = 10000 + iota + + // Replacement is the WhatWG replacement encoding. + Replacement + + // XUserDefined is the code for x-user-defined. + XUserDefined + + // MacintoshCyrillic is the code for x-mac-cyrillic. + MacintoshCyrillic +) diff --git a/tools/vendor/golang.org/x/text/encoding/internal/identifier/mib.go b/tools/vendor/golang.org/x/text/encoding/internal/identifier/mib.go new file mode 100644 index 000000000..351fb86e2 --- /dev/null +++ b/tools/vendor/golang.org/x/text/encoding/internal/identifier/mib.go @@ -0,0 +1,1627 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package identifier + +const ( + // ASCII is the MIB identifier with IANA name US-ASCII (MIME: US-ASCII). + // + // ANSI X3.4-1986 + // Reference: RFC2046 + ASCII MIB = 3 + + // ISOLatin1 is the MIB identifier with IANA name ISO_8859-1:1987 (MIME: ISO-8859-1). + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISOLatin1 MIB = 4 + + // ISOLatin2 is the MIB identifier with IANA name ISO_8859-2:1987 (MIME: ISO-8859-2). + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISOLatin2 MIB = 5 + + // ISOLatin3 is the MIB identifier with IANA name ISO_8859-3:1988 (MIME: ISO-8859-3). + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISOLatin3 MIB = 6 + + // ISOLatin4 is the MIB identifier with IANA name ISO_8859-4:1988 (MIME: ISO-8859-4). + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISOLatin4 MIB = 7 + + // ISOLatinCyrillic is the MIB identifier with IANA name ISO_8859-5:1988 (MIME: ISO-8859-5). + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISOLatinCyrillic MIB = 8 + + // ISOLatinArabic is the MIB identifier with IANA name ISO_8859-6:1987 (MIME: ISO-8859-6). + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISOLatinArabic MIB = 9 + + // ISOLatinGreek is the MIB identifier with IANA name ISO_8859-7:1987 (MIME: ISO-8859-7). + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1947 + // Reference: RFC1345 + ISOLatinGreek MIB = 10 + + // ISOLatinHebrew is the MIB identifier with IANA name ISO_8859-8:1988 (MIME: ISO-8859-8). + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISOLatinHebrew MIB = 11 + + // ISOLatin5 is the MIB identifier with IANA name ISO_8859-9:1989 (MIME: ISO-8859-9). + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISOLatin5 MIB = 12 + + // ISOLatin6 is the MIB identifier with IANA name ISO-8859-10 (MIME: ISO-8859-10). + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISOLatin6 MIB = 13 + + // ISOTextComm is the MIB identifier with IANA name ISO_6937-2-add. + // + // ISO-IR: International Register of Escape Sequences and ISO 6937-2:1983 + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISOTextComm MIB = 14 + + // HalfWidthKatakana is the MIB identifier with IANA name JIS_X0201. + // + // JIS X 0201-1976. One byte only, this is equivalent to + // JIS/Roman (similar to ASCII) plus eight-bit half-width + // Katakana + // Reference: RFC1345 + HalfWidthKatakana MIB = 15 + + // JISEncoding is the MIB identifier with IANA name JIS_Encoding. + // + // JIS X 0202-1991. Uses ISO 2022 escape sequences to + // shift code sets as documented in JIS X 0202-1991. + JISEncoding MIB = 16 + + // ShiftJIS is the MIB identifier with IANA name Shift_JIS (MIME: Shift_JIS). + // + // This charset is an extension of csHalfWidthKatakana by + // adding graphic characters in JIS X 0208. The CCS's are + // JIS X0201:1997 and JIS X0208:1997. The + // complete definition is shown in Appendix 1 of JIS + // X0208:1997. + // This charset can be used for the top-level media type "text". + ShiftJIS MIB = 17 + + // EUCPkdFmtJapanese is the MIB identifier with IANA name Extended_UNIX_Code_Packed_Format_for_Japanese (MIME: EUC-JP). + // + // Standardized by OSF, UNIX International, and UNIX Systems + // Laboratories Pacific. Uses ISO 2022 rules to select + // code set 0: US-ASCII (a single 7-bit byte set) + // code set 1: JIS X0208-1990 (a double 8-bit byte set) + // restricted to A0-FF in both bytes + // code set 2: Half Width Katakana (a single 7-bit byte set) + // requiring SS2 as the character prefix + // code set 3: JIS X0212-1990 (a double 7-bit byte set) + // restricted to A0-FF in both bytes + // requiring SS3 as the character prefix + EUCPkdFmtJapanese MIB = 18 + + // EUCFixWidJapanese is the MIB identifier with IANA name Extended_UNIX_Code_Fixed_Width_for_Japanese. + // + // Used in Japan. Each character is 2 octets. + // code set 0: US-ASCII (a single 7-bit byte set) + // 1st byte = 00 + // 2nd byte = 20-7E + // code set 1: JIS X0208-1990 (a double 7-bit byte set) + // restricted to A0-FF in both bytes + // code set 2: Half Width Katakana (a single 7-bit byte set) + // 1st byte = 00 + // 2nd byte = A0-FF + // code set 3: JIS X0212-1990 (a double 7-bit byte set) + // restricted to A0-FF in + // the first byte + // and 21-7E in the second byte + EUCFixWidJapanese MIB = 19 + + // ISO4UnitedKingdom is the MIB identifier with IANA name BS_4730. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO4UnitedKingdom MIB = 20 + + // ISO11SwedishForNames is the MIB identifier with IANA name SEN_850200_C. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO11SwedishForNames MIB = 21 + + // ISO15Italian is the MIB identifier with IANA name IT. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO15Italian MIB = 22 + + // ISO17Spanish is the MIB identifier with IANA name ES. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO17Spanish MIB = 23 + + // ISO21German is the MIB identifier with IANA name DIN_66003. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO21German MIB = 24 + + // ISO60Norwegian1 is the MIB identifier with IANA name NS_4551-1. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO60Norwegian1 MIB = 25 + + // ISO69French is the MIB identifier with IANA name NF_Z_62-010. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO69French MIB = 26 + + // ISO10646UTF1 is the MIB identifier with IANA name ISO-10646-UTF-1. + // + // Universal Transfer Format (1), this is the multibyte + // encoding, that subsets ASCII-7. It does not have byte + // ordering issues. + ISO10646UTF1 MIB = 27 + + // ISO646basic1983 is the MIB identifier with IANA name ISO_646.basic:1983. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO646basic1983 MIB = 28 + + // INVARIANT is the MIB identifier with IANA name INVARIANT. + // + // Reference: RFC1345 + INVARIANT MIB = 29 + + // ISO2IntlRefVersion is the MIB identifier with IANA name ISO_646.irv:1983. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO2IntlRefVersion MIB = 30 + + // NATSSEFI is the MIB identifier with IANA name NATS-SEFI. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + NATSSEFI MIB = 31 + + // NATSSEFIADD is the MIB identifier with IANA name NATS-SEFI-ADD. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + NATSSEFIADD MIB = 32 + + // NATSDANO is the MIB identifier with IANA name NATS-DANO. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + NATSDANO MIB = 33 + + // NATSDANOADD is the MIB identifier with IANA name NATS-DANO-ADD. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + NATSDANOADD MIB = 34 + + // ISO10Swedish is the MIB identifier with IANA name SEN_850200_B. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO10Swedish MIB = 35 + + // KSC56011987 is the MIB identifier with IANA name KS_C_5601-1987. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + KSC56011987 MIB = 36 + + // ISO2022KR is the MIB identifier with IANA name ISO-2022-KR (MIME: ISO-2022-KR). + // + // rfc1557 (see also KS_C_5601-1987) + // Reference: RFC1557 + ISO2022KR MIB = 37 + + // EUCKR is the MIB identifier with IANA name EUC-KR (MIME: EUC-KR). + // + // rfc1557 (see also KS_C_5861-1992) + // Reference: RFC1557 + EUCKR MIB = 38 + + // ISO2022JP is the MIB identifier with IANA name ISO-2022-JP (MIME: ISO-2022-JP). + // + // rfc1468 (see also rfc2237 ) + // Reference: RFC1468 + ISO2022JP MIB = 39 + + // ISO2022JP2 is the MIB identifier with IANA name ISO-2022-JP-2 (MIME: ISO-2022-JP-2). + // + // rfc1554 + // Reference: RFC1554 + ISO2022JP2 MIB = 40 + + // ISO13JISC6220jp is the MIB identifier with IANA name JIS_C6220-1969-jp. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO13JISC6220jp MIB = 41 + + // ISO14JISC6220ro is the MIB identifier with IANA name JIS_C6220-1969-ro. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO14JISC6220ro MIB = 42 + + // ISO16Portuguese is the MIB identifier with IANA name PT. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO16Portuguese MIB = 43 + + // ISO18Greek7Old is the MIB identifier with IANA name greek7-old. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO18Greek7Old MIB = 44 + + // ISO19LatinGreek is the MIB identifier with IANA name latin-greek. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO19LatinGreek MIB = 45 + + // ISO25French is the MIB identifier with IANA name NF_Z_62-010_(1973). + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO25French MIB = 46 + + // ISO27LatinGreek1 is the MIB identifier with IANA name Latin-greek-1. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO27LatinGreek1 MIB = 47 + + // ISO5427Cyrillic is the MIB identifier with IANA name ISO_5427. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO5427Cyrillic MIB = 48 + + // ISO42JISC62261978 is the MIB identifier with IANA name JIS_C6226-1978. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO42JISC62261978 MIB = 49 + + // ISO47BSViewdata is the MIB identifier with IANA name BS_viewdata. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO47BSViewdata MIB = 50 + + // ISO49INIS is the MIB identifier with IANA name INIS. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO49INIS MIB = 51 + + // ISO50INIS8 is the MIB identifier with IANA name INIS-8. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO50INIS8 MIB = 52 + + // ISO51INISCyrillic is the MIB identifier with IANA name INIS-cyrillic. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO51INISCyrillic MIB = 53 + + // ISO54271981 is the MIB identifier with IANA name ISO_5427:1981. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO54271981 MIB = 54 + + // ISO5428Greek is the MIB identifier with IANA name ISO_5428:1980. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO5428Greek MIB = 55 + + // ISO57GB1988 is the MIB identifier with IANA name GB_1988-80. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO57GB1988 MIB = 56 + + // ISO58GB231280 is the MIB identifier with IANA name GB_2312-80. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO58GB231280 MIB = 57 + + // ISO61Norwegian2 is the MIB identifier with IANA name NS_4551-2. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO61Norwegian2 MIB = 58 + + // ISO70VideotexSupp1 is the MIB identifier with IANA name videotex-suppl. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO70VideotexSupp1 MIB = 59 + + // ISO84Portuguese2 is the MIB identifier with IANA name PT2. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO84Portuguese2 MIB = 60 + + // ISO85Spanish2 is the MIB identifier with IANA name ES2. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO85Spanish2 MIB = 61 + + // ISO86Hungarian is the MIB identifier with IANA name MSZ_7795.3. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO86Hungarian MIB = 62 + + // ISO87JISX0208 is the MIB identifier with IANA name JIS_C6226-1983. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO87JISX0208 MIB = 63 + + // ISO88Greek7 is the MIB identifier with IANA name greek7. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO88Greek7 MIB = 64 + + // ISO89ASMO449 is the MIB identifier with IANA name ASMO_449. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO89ASMO449 MIB = 65 + + // ISO90 is the MIB identifier with IANA name iso-ir-90. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO90 MIB = 66 + + // ISO91JISC62291984a is the MIB identifier with IANA name JIS_C6229-1984-a. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO91JISC62291984a MIB = 67 + + // ISO92JISC62991984b is the MIB identifier with IANA name JIS_C6229-1984-b. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO92JISC62991984b MIB = 68 + + // ISO93JIS62291984badd is the MIB identifier with IANA name JIS_C6229-1984-b-add. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO93JIS62291984badd MIB = 69 + + // ISO94JIS62291984hand is the MIB identifier with IANA name JIS_C6229-1984-hand. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO94JIS62291984hand MIB = 70 + + // ISO95JIS62291984handadd is the MIB identifier with IANA name JIS_C6229-1984-hand-add. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO95JIS62291984handadd MIB = 71 + + // ISO96JISC62291984kana is the MIB identifier with IANA name JIS_C6229-1984-kana. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO96JISC62291984kana MIB = 72 + + // ISO2033 is the MIB identifier with IANA name ISO_2033-1983. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO2033 MIB = 73 + + // ISO99NAPLPS is the MIB identifier with IANA name ANSI_X3.110-1983. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO99NAPLPS MIB = 74 + + // ISO102T617bit is the MIB identifier with IANA name T.61-7bit. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO102T617bit MIB = 75 + + // ISO103T618bit is the MIB identifier with IANA name T.61-8bit. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO103T618bit MIB = 76 + + // ISO111ECMACyrillic is the MIB identifier with IANA name ECMA-cyrillic. + // + // ISO registry + ISO111ECMACyrillic MIB = 77 + + // ISO121Canadian1 is the MIB identifier with IANA name CSA_Z243.4-1985-1. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO121Canadian1 MIB = 78 + + // ISO122Canadian2 is the MIB identifier with IANA name CSA_Z243.4-1985-2. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO122Canadian2 MIB = 79 + + // ISO123CSAZ24341985gr is the MIB identifier with IANA name CSA_Z243.4-1985-gr. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO123CSAZ24341985gr MIB = 80 + + // ISO88596E is the MIB identifier with IANA name ISO_8859-6-E (MIME: ISO-8859-6-E). + // + // rfc1556 + // Reference: RFC1556 + ISO88596E MIB = 81 + + // ISO88596I is the MIB identifier with IANA name ISO_8859-6-I (MIME: ISO-8859-6-I). + // + // rfc1556 + // Reference: RFC1556 + ISO88596I MIB = 82 + + // ISO128T101G2 is the MIB identifier with IANA name T.101-G2. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO128T101G2 MIB = 83 + + // ISO88598E is the MIB identifier with IANA name ISO_8859-8-E (MIME: ISO-8859-8-E). + // + // rfc1556 + // Reference: RFC1556 + ISO88598E MIB = 84 + + // ISO88598I is the MIB identifier with IANA name ISO_8859-8-I (MIME: ISO-8859-8-I). + // + // rfc1556 + // Reference: RFC1556 + ISO88598I MIB = 85 + + // ISO139CSN369103 is the MIB identifier with IANA name CSN_369103. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO139CSN369103 MIB = 86 + + // ISO141JUSIB1002 is the MIB identifier with IANA name JUS_I.B1.002. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO141JUSIB1002 MIB = 87 + + // ISO143IECP271 is the MIB identifier with IANA name IEC_P27-1. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO143IECP271 MIB = 88 + + // ISO146Serbian is the MIB identifier with IANA name JUS_I.B1.003-serb. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO146Serbian MIB = 89 + + // ISO147Macedonian is the MIB identifier with IANA name JUS_I.B1.003-mac. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO147Macedonian MIB = 90 + + // ISO150GreekCCITT is the MIB identifier with IANA name greek-ccitt. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO150GreekCCITT MIB = 91 + + // ISO151Cuba is the MIB identifier with IANA name NC_NC00-10:81. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO151Cuba MIB = 92 + + // ISO6937Add is the MIB identifier with IANA name ISO_6937-2-25. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO6937Add MIB = 93 + + // ISO153GOST1976874 is the MIB identifier with IANA name GOST_19768-74. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO153GOST1976874 MIB = 94 + + // ISO8859Supp is the MIB identifier with IANA name ISO_8859-supp. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO8859Supp MIB = 95 + + // ISO10367Box is the MIB identifier with IANA name ISO_10367-box. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO10367Box MIB = 96 + + // ISO158Lap is the MIB identifier with IANA name latin-lap. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO158Lap MIB = 97 + + // ISO159JISX02121990 is the MIB identifier with IANA name JIS_X0212-1990. + // + // ISO-IR: International Register of Escape Sequences + // Note: The current registration authority is IPSJ/ITSCJ, Japan. + // Reference: RFC1345 + ISO159JISX02121990 MIB = 98 + + // ISO646Danish is the MIB identifier with IANA name DS_2089. + // + // Danish Standard, DS 2089, February 1974 + // Reference: RFC1345 + ISO646Danish MIB = 99 + + // USDK is the MIB identifier with IANA name us-dk. + // + // Reference: RFC1345 + USDK MIB = 100 + + // DKUS is the MIB identifier with IANA name dk-us. + // + // Reference: RFC1345 + DKUS MIB = 101 + + // KSC5636 is the MIB identifier with IANA name KSC5636. + // + // Reference: RFC1345 + KSC5636 MIB = 102 + + // Unicode11UTF7 is the MIB identifier with IANA name UNICODE-1-1-UTF-7. + // + // rfc1642 + // Reference: RFC1642 + Unicode11UTF7 MIB = 103 + + // ISO2022CN is the MIB identifier with IANA name ISO-2022-CN. + // + // rfc1922 + // Reference: RFC1922 + ISO2022CN MIB = 104 + + // ISO2022CNEXT is the MIB identifier with IANA name ISO-2022-CN-EXT. + // + // rfc1922 + // Reference: RFC1922 + ISO2022CNEXT MIB = 105 + + // UTF8 is the MIB identifier with IANA name UTF-8. + // + // rfc3629 + // Reference: RFC3629 + UTF8 MIB = 106 + + // ISO885913 is the MIB identifier with IANA name ISO-8859-13. + // + // ISO See https://www.iana.org/assignments/charset-reg/ISO-8859-13 https://www.iana.org/assignments/charset-reg/ISO-8859-13 + ISO885913 MIB = 109 + + // ISO885914 is the MIB identifier with IANA name ISO-8859-14. + // + // ISO See https://www.iana.org/assignments/charset-reg/ISO-8859-14 + ISO885914 MIB = 110 + + // ISO885915 is the MIB identifier with IANA name ISO-8859-15. + // + // ISO + // Please see: https://www.iana.org/assignments/charset-reg/ISO-8859-15 + ISO885915 MIB = 111 + + // ISO885916 is the MIB identifier with IANA name ISO-8859-16. + // + // ISO + ISO885916 MIB = 112 + + // GBK is the MIB identifier with IANA name GBK. + // + // Chinese IT Standardization Technical Committee + // Please see: https://www.iana.org/assignments/charset-reg/GBK + GBK MIB = 113 + + // GB18030 is the MIB identifier with IANA name GB18030. + // + // Chinese IT Standardization Technical Committee + // Please see: https://www.iana.org/assignments/charset-reg/GB18030 + GB18030 MIB = 114 + + // OSDEBCDICDF0415 is the MIB identifier with IANA name OSD_EBCDIC_DF04_15. + // + // Fujitsu-Siemens standard mainframe EBCDIC encoding + // Please see: https://www.iana.org/assignments/charset-reg/OSD-EBCDIC-DF04-15 + OSDEBCDICDF0415 MIB = 115 + + // OSDEBCDICDF03IRV is the MIB identifier with IANA name OSD_EBCDIC_DF03_IRV. + // + // Fujitsu-Siemens standard mainframe EBCDIC encoding + // Please see: https://www.iana.org/assignments/charset-reg/OSD-EBCDIC-DF03-IRV + OSDEBCDICDF03IRV MIB = 116 + + // OSDEBCDICDF041 is the MIB identifier with IANA name OSD_EBCDIC_DF04_1. + // + // Fujitsu-Siemens standard mainframe EBCDIC encoding + // Please see: https://www.iana.org/assignments/charset-reg/OSD-EBCDIC-DF04-1 + OSDEBCDICDF041 MIB = 117 + + // ISO115481 is the MIB identifier with IANA name ISO-11548-1. + // + // See https://www.iana.org/assignments/charset-reg/ISO-11548-1 + ISO115481 MIB = 118 + + // KZ1048 is the MIB identifier with IANA name KZ-1048. + // + // See https://www.iana.org/assignments/charset-reg/KZ-1048 + KZ1048 MIB = 119 + + // Unicode is the MIB identifier with IANA name ISO-10646-UCS-2. + // + // the 2-octet Basic Multilingual Plane, aka Unicode + // this needs to specify network byte order: the standard + // does not specify (it is a 16-bit integer space) + Unicode MIB = 1000 + + // UCS4 is the MIB identifier with IANA name ISO-10646-UCS-4. + // + // the full code space. (same comment about byte order, + // these are 31-bit numbers. + UCS4 MIB = 1001 + + // UnicodeASCII is the MIB identifier with IANA name ISO-10646-UCS-Basic. + // + // ASCII subset of Unicode. Basic Latin = collection 1 + // See ISO 10646, Appendix A + UnicodeASCII MIB = 1002 + + // UnicodeLatin1 is the MIB identifier with IANA name ISO-10646-Unicode-Latin1. + // + // ISO Latin-1 subset of Unicode. Basic Latin and Latin-1 + // Supplement = collections 1 and 2. See ISO 10646, + // Appendix A. See rfc1815 . + UnicodeLatin1 MIB = 1003 + + // UnicodeJapanese is the MIB identifier with IANA name ISO-10646-J-1. + // + // ISO 10646 Japanese, see rfc1815 . + UnicodeJapanese MIB = 1004 + + // UnicodeIBM1261 is the MIB identifier with IANA name ISO-Unicode-IBM-1261. + // + // IBM Latin-2, -3, -5, Extended Presentation Set, GCSGID: 1261 + UnicodeIBM1261 MIB = 1005 + + // UnicodeIBM1268 is the MIB identifier with IANA name ISO-Unicode-IBM-1268. + // + // IBM Latin-4 Extended Presentation Set, GCSGID: 1268 + UnicodeIBM1268 MIB = 1006 + + // UnicodeIBM1276 is the MIB identifier with IANA name ISO-Unicode-IBM-1276. + // + // IBM Cyrillic Greek Extended Presentation Set, GCSGID: 1276 + UnicodeIBM1276 MIB = 1007 + + // UnicodeIBM1264 is the MIB identifier with IANA name ISO-Unicode-IBM-1264. + // + // IBM Arabic Presentation Set, GCSGID: 1264 + UnicodeIBM1264 MIB = 1008 + + // UnicodeIBM1265 is the MIB identifier with IANA name ISO-Unicode-IBM-1265. + // + // IBM Hebrew Presentation Set, GCSGID: 1265 + UnicodeIBM1265 MIB = 1009 + + // Unicode11 is the MIB identifier with IANA name UNICODE-1-1. + // + // rfc1641 + // Reference: RFC1641 + Unicode11 MIB = 1010 + + // SCSU is the MIB identifier with IANA name SCSU. + // + // SCSU See https://www.iana.org/assignments/charset-reg/SCSU + SCSU MIB = 1011 + + // UTF7 is the MIB identifier with IANA name UTF-7. + // + // rfc2152 + // Reference: RFC2152 + UTF7 MIB = 1012 + + // UTF16BE is the MIB identifier with IANA name UTF-16BE. + // + // rfc2781 + // Reference: RFC2781 + UTF16BE MIB = 1013 + + // UTF16LE is the MIB identifier with IANA name UTF-16LE. + // + // rfc2781 + // Reference: RFC2781 + UTF16LE MIB = 1014 + + // UTF16 is the MIB identifier with IANA name UTF-16. + // + // rfc2781 + // Reference: RFC2781 + UTF16 MIB = 1015 + + // CESU8 is the MIB identifier with IANA name CESU-8. + // + // https://www.unicode.org/reports/tr26 + CESU8 MIB = 1016 + + // UTF32 is the MIB identifier with IANA name UTF-32. + // + // https://www.unicode.org/reports/tr19/ + UTF32 MIB = 1017 + + // UTF32BE is the MIB identifier with IANA name UTF-32BE. + // + // https://www.unicode.org/reports/tr19/ + UTF32BE MIB = 1018 + + // UTF32LE is the MIB identifier with IANA name UTF-32LE. + // + // https://www.unicode.org/reports/tr19/ + UTF32LE MIB = 1019 + + // BOCU1 is the MIB identifier with IANA name BOCU-1. + // + // https://www.unicode.org/notes/tn6/ + BOCU1 MIB = 1020 + + // UTF7IMAP is the MIB identifier with IANA name UTF-7-IMAP. + // + // Note: This charset is used to encode Unicode in IMAP mailbox names; + // see section 5.1.3 of rfc3501 . It should never be used + // outside this context. A name has been assigned so that charset processing + // implementations can refer to it in a consistent way. + UTF7IMAP MIB = 1021 + + // Windows30Latin1 is the MIB identifier with IANA name ISO-8859-1-Windows-3.0-Latin-1. + // + // Extended ISO 8859-1 Latin-1 for Windows 3.0. + // PCL Symbol Set id: 9U + Windows30Latin1 MIB = 2000 + + // Windows31Latin1 is the MIB identifier with IANA name ISO-8859-1-Windows-3.1-Latin-1. + // + // Extended ISO 8859-1 Latin-1 for Windows 3.1. + // PCL Symbol Set id: 19U + Windows31Latin1 MIB = 2001 + + // Windows31Latin2 is the MIB identifier with IANA name ISO-8859-2-Windows-Latin-2. + // + // Extended ISO 8859-2. Latin-2 for Windows 3.1. + // PCL Symbol Set id: 9E + Windows31Latin2 MIB = 2002 + + // Windows31Latin5 is the MIB identifier with IANA name ISO-8859-9-Windows-Latin-5. + // + // Extended ISO 8859-9. Latin-5 for Windows 3.1 + // PCL Symbol Set id: 5T + Windows31Latin5 MIB = 2003 + + // HPRoman8 is the MIB identifier with IANA name hp-roman8. + // + // LaserJet IIP Printer User's Manual, + // HP part no 33471-90901, Hewlet-Packard, June 1989. + // Reference: RFC1345 + HPRoman8 MIB = 2004 + + // AdobeStandardEncoding is the MIB identifier with IANA name Adobe-Standard-Encoding. + // + // PostScript Language Reference Manual + // PCL Symbol Set id: 10J + AdobeStandardEncoding MIB = 2005 + + // VenturaUS is the MIB identifier with IANA name Ventura-US. + // + // Ventura US. ASCII plus characters typically used in + // publishing, like pilcrow, copyright, registered, trade mark, + // section, dagger, and double dagger in the range A0 (hex) + // to FF (hex). + // PCL Symbol Set id: 14J + VenturaUS MIB = 2006 + + // VenturaInternational is the MIB identifier with IANA name Ventura-International. + // + // Ventura International. ASCII plus coded characters similar + // to Roman8. + // PCL Symbol Set id: 13J + VenturaInternational MIB = 2007 + + // DECMCS is the MIB identifier with IANA name DEC-MCS. + // + // VAX/VMS User's Manual, + // Order Number: AI-Y517A-TE, April 1986. + // Reference: RFC1345 + DECMCS MIB = 2008 + + // PC850Multilingual is the MIB identifier with IANA name IBM850. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + PC850Multilingual MIB = 2009 + + // PC8DanishNorwegian is the MIB identifier with IANA name PC8-Danish-Norwegian. + // + // PC Danish Norwegian + // 8-bit PC set for Danish Norwegian + // PCL Symbol Set id: 11U + PC8DanishNorwegian MIB = 2012 + + // PC862LatinHebrew is the MIB identifier with IANA name IBM862. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + PC862LatinHebrew MIB = 2013 + + // PC8Turkish is the MIB identifier with IANA name PC8-Turkish. + // + // PC Latin Turkish. PCL Symbol Set id: 9T + PC8Turkish MIB = 2014 + + // IBMSymbols is the MIB identifier with IANA name IBM-Symbols. + // + // Presentation Set, CPGID: 259 + IBMSymbols MIB = 2015 + + // IBMThai is the MIB identifier with IANA name IBM-Thai. + // + // Presentation Set, CPGID: 838 + IBMThai MIB = 2016 + + // HPLegal is the MIB identifier with IANA name HP-Legal. + // + // PCL 5 Comparison Guide, Hewlett-Packard, + // HP part number 5961-0510, October 1992 + // PCL Symbol Set id: 1U + HPLegal MIB = 2017 + + // HPPiFont is the MIB identifier with IANA name HP-Pi-font. + // + // PCL 5 Comparison Guide, Hewlett-Packard, + // HP part number 5961-0510, October 1992 + // PCL Symbol Set id: 15U + HPPiFont MIB = 2018 + + // HPMath8 is the MIB identifier with IANA name HP-Math8. + // + // PCL 5 Comparison Guide, Hewlett-Packard, + // HP part number 5961-0510, October 1992 + // PCL Symbol Set id: 8M + HPMath8 MIB = 2019 + + // HPPSMath is the MIB identifier with IANA name Adobe-Symbol-Encoding. + // + // PostScript Language Reference Manual + // PCL Symbol Set id: 5M + HPPSMath MIB = 2020 + + // HPDesktop is the MIB identifier with IANA name HP-DeskTop. + // + // PCL 5 Comparison Guide, Hewlett-Packard, + // HP part number 5961-0510, October 1992 + // PCL Symbol Set id: 7J + HPDesktop MIB = 2021 + + // VenturaMath is the MIB identifier with IANA name Ventura-Math. + // + // PCL 5 Comparison Guide, Hewlett-Packard, + // HP part number 5961-0510, October 1992 + // PCL Symbol Set id: 6M + VenturaMath MIB = 2022 + + // MicrosoftPublishing is the MIB identifier with IANA name Microsoft-Publishing. + // + // PCL 5 Comparison Guide, Hewlett-Packard, + // HP part number 5961-0510, October 1992 + // PCL Symbol Set id: 6J + MicrosoftPublishing MIB = 2023 + + // Windows31J is the MIB identifier with IANA name Windows-31J. + // + // Windows Japanese. A further extension of Shift_JIS + // to include NEC special characters (Row 13), NEC + // selection of IBM extensions (Rows 89 to 92), and IBM + // extensions (Rows 115 to 119). The CCS's are + // JIS X0201:1997, JIS X0208:1997, and these extensions. + // This charset can be used for the top-level media type "text", + // but it is of limited or specialized use (see rfc2278 ). + // PCL Symbol Set id: 19K + Windows31J MIB = 2024 + + // GB2312 is the MIB identifier with IANA name GB2312 (MIME: GB2312). + // + // Chinese for People's Republic of China (PRC) mixed one byte, + // two byte set: + // 20-7E = one byte ASCII + // A1-FE = two byte PRC Kanji + // See GB 2312-80 + // PCL Symbol Set Id: 18C + GB2312 MIB = 2025 + + // Big5 is the MIB identifier with IANA name Big5 (MIME: Big5). + // + // Chinese for Taiwan Multi-byte set. + // PCL Symbol Set Id: 18T + Big5 MIB = 2026 + + // Macintosh is the MIB identifier with IANA name macintosh. + // + // The Unicode Standard ver1.0, ISBN 0-201-56788-1, Oct 1991 + // Reference: RFC1345 + Macintosh MIB = 2027 + + // IBM037 is the MIB identifier with IANA name IBM037. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM037 MIB = 2028 + + // IBM038 is the MIB identifier with IANA name IBM038. + // + // IBM 3174 Character Set Ref, GA27-3831-02, March 1990 + // Reference: RFC1345 + IBM038 MIB = 2029 + + // IBM273 is the MIB identifier with IANA name IBM273. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM273 MIB = 2030 + + // IBM274 is the MIB identifier with IANA name IBM274. + // + // IBM 3174 Character Set Ref, GA27-3831-02, March 1990 + // Reference: RFC1345 + IBM274 MIB = 2031 + + // IBM275 is the MIB identifier with IANA name IBM275. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM275 MIB = 2032 + + // IBM277 is the MIB identifier with IANA name IBM277. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM277 MIB = 2033 + + // IBM278 is the MIB identifier with IANA name IBM278. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM278 MIB = 2034 + + // IBM280 is the MIB identifier with IANA name IBM280. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM280 MIB = 2035 + + // IBM281 is the MIB identifier with IANA name IBM281. + // + // IBM 3174 Character Set Ref, GA27-3831-02, March 1990 + // Reference: RFC1345 + IBM281 MIB = 2036 + + // IBM284 is the MIB identifier with IANA name IBM284. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM284 MIB = 2037 + + // IBM285 is the MIB identifier with IANA name IBM285. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM285 MIB = 2038 + + // IBM290 is the MIB identifier with IANA name IBM290. + // + // IBM 3174 Character Set Ref, GA27-3831-02, March 1990 + // Reference: RFC1345 + IBM290 MIB = 2039 + + // IBM297 is the MIB identifier with IANA name IBM297. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM297 MIB = 2040 + + // IBM420 is the MIB identifier with IANA name IBM420. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990, + // IBM NLS RM p 11-11 + // Reference: RFC1345 + IBM420 MIB = 2041 + + // IBM423 is the MIB identifier with IANA name IBM423. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM423 MIB = 2042 + + // IBM424 is the MIB identifier with IANA name IBM424. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM424 MIB = 2043 + + // PC8CodePage437 is the MIB identifier with IANA name IBM437. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + PC8CodePage437 MIB = 2011 + + // IBM500 is the MIB identifier with IANA name IBM500. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM500 MIB = 2044 + + // IBM851 is the MIB identifier with IANA name IBM851. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM851 MIB = 2045 + + // PCp852 is the MIB identifier with IANA name IBM852. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + PCp852 MIB = 2010 + + // IBM855 is the MIB identifier with IANA name IBM855. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM855 MIB = 2046 + + // IBM857 is the MIB identifier with IANA name IBM857. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM857 MIB = 2047 + + // IBM860 is the MIB identifier with IANA name IBM860. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM860 MIB = 2048 + + // IBM861 is the MIB identifier with IANA name IBM861. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM861 MIB = 2049 + + // IBM863 is the MIB identifier with IANA name IBM863. + // + // IBM Keyboard layouts and code pages, PN 07G4586 June 1991 + // Reference: RFC1345 + IBM863 MIB = 2050 + + // IBM864 is the MIB identifier with IANA name IBM864. + // + // IBM Keyboard layouts and code pages, PN 07G4586 June 1991 + // Reference: RFC1345 + IBM864 MIB = 2051 + + // IBM865 is the MIB identifier with IANA name IBM865. + // + // IBM DOS 3.3 Ref (Abridged), 94X9575 (Feb 1987) + // Reference: RFC1345 + IBM865 MIB = 2052 + + // IBM868 is the MIB identifier with IANA name IBM868. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM868 MIB = 2053 + + // IBM869 is the MIB identifier with IANA name IBM869. + // + // IBM Keyboard layouts and code pages, PN 07G4586 June 1991 + // Reference: RFC1345 + IBM869 MIB = 2054 + + // IBM870 is the MIB identifier with IANA name IBM870. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM870 MIB = 2055 + + // IBM871 is the MIB identifier with IANA name IBM871. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM871 MIB = 2056 + + // IBM880 is the MIB identifier with IANA name IBM880. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM880 MIB = 2057 + + // IBM891 is the MIB identifier with IANA name IBM891. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM891 MIB = 2058 + + // IBM903 is the MIB identifier with IANA name IBM903. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM903 MIB = 2059 + + // IBBM904 is the MIB identifier with IANA name IBM904. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBBM904 MIB = 2060 + + // IBM905 is the MIB identifier with IANA name IBM905. + // + // IBM 3174 Character Set Ref, GA27-3831-02, March 1990 + // Reference: RFC1345 + IBM905 MIB = 2061 + + // IBM918 is the MIB identifier with IANA name IBM918. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM918 MIB = 2062 + + // IBM1026 is the MIB identifier with IANA name IBM1026. + // + // IBM NLS RM Vol2 SE09-8002-01, March 1990 + // Reference: RFC1345 + IBM1026 MIB = 2063 + + // IBMEBCDICATDE is the MIB identifier with IANA name EBCDIC-AT-DE. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + IBMEBCDICATDE MIB = 2064 + + // EBCDICATDEA is the MIB identifier with IANA name EBCDIC-AT-DE-A. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICATDEA MIB = 2065 + + // EBCDICCAFR is the MIB identifier with IANA name EBCDIC-CA-FR. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICCAFR MIB = 2066 + + // EBCDICDKNO is the MIB identifier with IANA name EBCDIC-DK-NO. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICDKNO MIB = 2067 + + // EBCDICDKNOA is the MIB identifier with IANA name EBCDIC-DK-NO-A. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICDKNOA MIB = 2068 + + // EBCDICFISE is the MIB identifier with IANA name EBCDIC-FI-SE. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICFISE MIB = 2069 + + // EBCDICFISEA is the MIB identifier with IANA name EBCDIC-FI-SE-A. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICFISEA MIB = 2070 + + // EBCDICFR is the MIB identifier with IANA name EBCDIC-FR. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICFR MIB = 2071 + + // EBCDICIT is the MIB identifier with IANA name EBCDIC-IT. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICIT MIB = 2072 + + // EBCDICPT is the MIB identifier with IANA name EBCDIC-PT. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICPT MIB = 2073 + + // EBCDICES is the MIB identifier with IANA name EBCDIC-ES. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICES MIB = 2074 + + // EBCDICESA is the MIB identifier with IANA name EBCDIC-ES-A. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICESA MIB = 2075 + + // EBCDICESS is the MIB identifier with IANA name EBCDIC-ES-S. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICESS MIB = 2076 + + // EBCDICUK is the MIB identifier with IANA name EBCDIC-UK. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICUK MIB = 2077 + + // EBCDICUS is the MIB identifier with IANA name EBCDIC-US. + // + // IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987 + // Reference: RFC1345 + EBCDICUS MIB = 2078 + + // Unknown8BiT is the MIB identifier with IANA name UNKNOWN-8BIT. + // + // Reference: RFC1428 + Unknown8BiT MIB = 2079 + + // Mnemonic is the MIB identifier with IANA name MNEMONIC. + // + // rfc1345 , also known as "mnemonic+ascii+38" + // Reference: RFC1345 + Mnemonic MIB = 2080 + + // Mnem is the MIB identifier with IANA name MNEM. + // + // rfc1345 , also known as "mnemonic+ascii+8200" + // Reference: RFC1345 + Mnem MIB = 2081 + + // VISCII is the MIB identifier with IANA name VISCII. + // + // rfc1456 + // Reference: RFC1456 + VISCII MIB = 2082 + + // VIQR is the MIB identifier with IANA name VIQR. + // + // rfc1456 + // Reference: RFC1456 + VIQR MIB = 2083 + + // KOI8R is the MIB identifier with IANA name KOI8-R (MIME: KOI8-R). + // + // rfc1489 , based on GOST-19768-74, ISO-6937/8, + // INIS-Cyrillic, ISO-5427. + // Reference: RFC1489 + KOI8R MIB = 2084 + + // HZGB2312 is the MIB identifier with IANA name HZ-GB-2312. + // + // rfc1842 , rfc1843 rfc1843 rfc1842 + HZGB2312 MIB = 2085 + + // IBM866 is the MIB identifier with IANA name IBM866. + // + // IBM NLDG Volume 2 (SE09-8002-03) August 1994 + IBM866 MIB = 2086 + + // PC775Baltic is the MIB identifier with IANA name IBM775. + // + // HP PCL 5 Comparison Guide (P/N 5021-0329) pp B-13, 1996 + PC775Baltic MIB = 2087 + + // KOI8U is the MIB identifier with IANA name KOI8-U. + // + // rfc2319 + // Reference: RFC2319 + KOI8U MIB = 2088 + + // IBM00858 is the MIB identifier with IANA name IBM00858. + // + // IBM See https://www.iana.org/assignments/charset-reg/IBM00858 + IBM00858 MIB = 2089 + + // IBM00924 is the MIB identifier with IANA name IBM00924. + // + // IBM See https://www.iana.org/assignments/charset-reg/IBM00924 + IBM00924 MIB = 2090 + + // IBM01140 is the MIB identifier with IANA name IBM01140. + // + // IBM See https://www.iana.org/assignments/charset-reg/IBM01140 + IBM01140 MIB = 2091 + + // IBM01141 is the MIB identifier with IANA name IBM01141. + // + // IBM See https://www.iana.org/assignments/charset-reg/IBM01141 + IBM01141 MIB = 2092 + + // IBM01142 is the MIB identifier with IANA name IBM01142. + // + // IBM See https://www.iana.org/assignments/charset-reg/IBM01142 + IBM01142 MIB = 2093 + + // IBM01143 is the MIB identifier with IANA name IBM01143. + // + // IBM See https://www.iana.org/assignments/charset-reg/IBM01143 + IBM01143 MIB = 2094 + + // IBM01144 is the MIB identifier with IANA name IBM01144. + // + // IBM See https://www.iana.org/assignments/charset-reg/IBM01144 + IBM01144 MIB = 2095 + + // IBM01145 is the MIB identifier with IANA name IBM01145. + // + // IBM See https://www.iana.org/assignments/charset-reg/IBM01145 + IBM01145 MIB = 2096 + + // IBM01146 is the MIB identifier with IANA name IBM01146. + // + // IBM See https://www.iana.org/assignments/charset-reg/IBM01146 + IBM01146 MIB = 2097 + + // IBM01147 is the MIB identifier with IANA name IBM01147. + // + // IBM See https://www.iana.org/assignments/charset-reg/IBM01147 + IBM01147 MIB = 2098 + + // IBM01148 is the MIB identifier with IANA name IBM01148. + // + // IBM See https://www.iana.org/assignments/charset-reg/IBM01148 + IBM01148 MIB = 2099 + + // IBM01149 is the MIB identifier with IANA name IBM01149. + // + // IBM See https://www.iana.org/assignments/charset-reg/IBM01149 + IBM01149 MIB = 2100 + + // Big5HKSCS is the MIB identifier with IANA name Big5-HKSCS. + // + // See https://www.iana.org/assignments/charset-reg/Big5-HKSCS + Big5HKSCS MIB = 2101 + + // IBM1047 is the MIB identifier with IANA name IBM1047. + // + // IBM1047 (EBCDIC Latin 1/Open Systems) https://www-1.ibm.com/servers/eserver/iseries/software/globalization/pdf/cp01047z.pdf + IBM1047 MIB = 2102 + + // PTCP154 is the MIB identifier with IANA name PTCP154. + // + // See https://www.iana.org/assignments/charset-reg/PTCP154 + PTCP154 MIB = 2103 + + // Amiga1251 is the MIB identifier with IANA name Amiga-1251. + // + // See https://www.amiga.ultranet.ru/Amiga-1251.html + Amiga1251 MIB = 2104 + + // KOI7switched is the MIB identifier with IANA name KOI7-switched. + // + // See https://www.iana.org/assignments/charset-reg/KOI7-switched + KOI7switched MIB = 2105 + + // BRF is the MIB identifier with IANA name BRF. + // + // See https://www.iana.org/assignments/charset-reg/BRF + BRF MIB = 2106 + + // TSCII is the MIB identifier with IANA name TSCII. + // + // See https://www.iana.org/assignments/charset-reg/TSCII + TSCII MIB = 2107 + + // CP51932 is the MIB identifier with IANA name CP51932. + // + // See https://www.iana.org/assignments/charset-reg/CP51932 + CP51932 MIB = 2108 + + // Windows874 is the MIB identifier with IANA name windows-874. + // + // See https://www.iana.org/assignments/charset-reg/windows-874 + Windows874 MIB = 2109 + + // Windows1250 is the MIB identifier with IANA name windows-1250. + // + // Microsoft https://www.iana.org/assignments/charset-reg/windows-1250 + Windows1250 MIB = 2250 + + // Windows1251 is the MIB identifier with IANA name windows-1251. + // + // Microsoft https://www.iana.org/assignments/charset-reg/windows-1251 + Windows1251 MIB = 2251 + + // Windows1252 is the MIB identifier with IANA name windows-1252. + // + // Microsoft https://www.iana.org/assignments/charset-reg/windows-1252 + Windows1252 MIB = 2252 + + // Windows1253 is the MIB identifier with IANA name windows-1253. + // + // Microsoft https://www.iana.org/assignments/charset-reg/windows-1253 + Windows1253 MIB = 2253 + + // Windows1254 is the MIB identifier with IANA name windows-1254. + // + // Microsoft https://www.iana.org/assignments/charset-reg/windows-1254 + Windows1254 MIB = 2254 + + // Windows1255 is the MIB identifier with IANA name windows-1255. + // + // Microsoft https://www.iana.org/assignments/charset-reg/windows-1255 + Windows1255 MIB = 2255 + + // Windows1256 is the MIB identifier with IANA name windows-1256. + // + // Microsoft https://www.iana.org/assignments/charset-reg/windows-1256 + Windows1256 MIB = 2256 + + // Windows1257 is the MIB identifier with IANA name windows-1257. + // + // Microsoft https://www.iana.org/assignments/charset-reg/windows-1257 + Windows1257 MIB = 2257 + + // Windows1258 is the MIB identifier with IANA name windows-1258. + // + // Microsoft https://www.iana.org/assignments/charset-reg/windows-1258 + Windows1258 MIB = 2258 + + // TIS620 is the MIB identifier with IANA name TIS-620. + // + // Thai Industrial Standards Institute (TISI) + TIS620 MIB = 2259 + + // CP50220 is the MIB identifier with IANA name CP50220. + // + // See https://www.iana.org/assignments/charset-reg/CP50220 + CP50220 MIB = 2260 +) diff --git a/tools/vendor/golang.org/x/text/encoding/internal/internal.go b/tools/vendor/golang.org/x/text/encoding/internal/internal.go new file mode 100644 index 000000000..413e6fc6d --- /dev/null +++ b/tools/vendor/golang.org/x/text/encoding/internal/internal.go @@ -0,0 +1,75 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains code that is shared among encoding implementations. +package internal + +import ( + "golang.org/x/text/encoding" + "golang.org/x/text/encoding/internal/identifier" + "golang.org/x/text/transform" +) + +// Encoding is an implementation of the Encoding interface that adds the String +// and ID methods to an existing encoding. +type Encoding struct { + encoding.Encoding + Name string + MIB identifier.MIB +} + +// _ verifies that Encoding implements identifier.Interface. +var _ identifier.Interface = (*Encoding)(nil) + +func (e *Encoding) String() string { + return e.Name +} + +func (e *Encoding) ID() (mib identifier.MIB, other string) { + return e.MIB, "" +} + +// SimpleEncoding is an Encoding that combines two Transformers. +type SimpleEncoding struct { + Decoder transform.Transformer + Encoder transform.Transformer +} + +func (e *SimpleEncoding) NewDecoder() *encoding.Decoder { + return &encoding.Decoder{Transformer: e.Decoder} +} + +func (e *SimpleEncoding) NewEncoder() *encoding.Encoder { + return &encoding.Encoder{Transformer: e.Encoder} +} + +// FuncEncoding is an Encoding that combines two functions returning a new +// Transformer. +type FuncEncoding struct { + Decoder func() transform.Transformer + Encoder func() transform.Transformer +} + +func (e FuncEncoding) NewDecoder() *encoding.Decoder { + return &encoding.Decoder{Transformer: e.Decoder()} +} + +func (e FuncEncoding) NewEncoder() *encoding.Encoder { + return &encoding.Encoder{Transformer: e.Encoder()} +} + +// A RepertoireError indicates a rune is not in the repertoire of a destination +// encoding. It is associated with an encoding-specific suggested replacement +// byte. +type RepertoireError byte + +// Error implements the error interface. +func (r RepertoireError) Error() string { + return "encoding: rune not supported by encoding." +} + +// Replacement returns the replacement string associated with this error. +func (r RepertoireError) Replacement() byte { return byte(r) } + +var ErrASCIIReplacement = RepertoireError(encoding.ASCIISub) diff --git a/tools/vendor/golang.org/x/text/encoding/unicode/override.go b/tools/vendor/golang.org/x/text/encoding/unicode/override.go new file mode 100644 index 000000000..35d62fcc9 --- /dev/null +++ b/tools/vendor/golang.org/x/text/encoding/unicode/override.go @@ -0,0 +1,82 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unicode + +import ( + "golang.org/x/text/transform" +) + +// BOMOverride returns a new decoder transformer that is identical to fallback, +// except that the presence of a Byte Order Mark at the start of the input +// causes it to switch to the corresponding Unicode decoding. It will only +// consider BOMs for UTF-8, UTF-16BE, and UTF-16LE. +// +// This differs from using ExpectBOM by allowing a BOM to switch to UTF-8, not +// just UTF-16 variants, and allowing falling back to any encoding scheme. +// +// This technique is recommended by the W3C for use in HTML 5: "For +// compatibility with deployed content, the byte order mark (also known as BOM) +// is considered more authoritative than anything else." +// http://www.w3.org/TR/encoding/#specification-hooks +// +// Using BOMOverride is mostly intended for use cases where the first characters +// of a fallback encoding are known to not be a BOM, for example, for valid HTML +// and most encodings. +func BOMOverride(fallback transform.Transformer) transform.Transformer { + // TODO: possibly allow a variadic argument of unicode encodings to allow + // specifying details of which fallbacks are supported as well as + // specifying the details of the implementations. This would also allow for + // support for UTF-32, which should not be supported by default. + return &bomOverride{fallback: fallback} +} + +type bomOverride struct { + fallback transform.Transformer + current transform.Transformer +} + +func (d *bomOverride) Reset() { + d.current = nil + d.fallback.Reset() +} + +var ( + // TODO: we could use decode functions here, instead of allocating a new + // decoder on every NewDecoder as IgnoreBOM decoders can be stateless. + utf16le = UTF16(LittleEndian, IgnoreBOM) + utf16be = UTF16(BigEndian, IgnoreBOM) +) + +const utf8BOM = "\ufeff" + +func (d *bomOverride) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + if d.current != nil { + return d.current.Transform(dst, src, atEOF) + } + if len(src) < 3 && !atEOF { + return 0, 0, transform.ErrShortSrc + } + d.current = d.fallback + bomSize := 0 + if len(src) >= 2 { + if src[0] == 0xFF && src[1] == 0xFE { + d.current = utf16le.NewDecoder() + bomSize = 2 + } else if src[0] == 0xFE && src[1] == 0xFF { + d.current = utf16be.NewDecoder() + bomSize = 2 + } else if len(src) >= 3 && + src[0] == utf8BOM[0] && + src[1] == utf8BOM[1] && + src[2] == utf8BOM[2] { + d.current = transform.Nop + bomSize = 3 + } + } + if bomSize < len(src) { + nDst, nSrc, err = d.current.Transform(dst, src[bomSize:], atEOF) + } + return nDst, nSrc + bomSize, err +} diff --git a/tools/vendor/golang.org/x/text/encoding/unicode/unicode.go b/tools/vendor/golang.org/x/text/encoding/unicode/unicode.go new file mode 100644 index 000000000..dd99ad14d --- /dev/null +++ b/tools/vendor/golang.org/x/text/encoding/unicode/unicode.go @@ -0,0 +1,512 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package unicode provides Unicode encodings such as UTF-16. +package unicode // import "golang.org/x/text/encoding/unicode" + +import ( + "bytes" + "errors" + "unicode/utf16" + "unicode/utf8" + + "golang.org/x/text/encoding" + "golang.org/x/text/encoding/internal" + "golang.org/x/text/encoding/internal/identifier" + "golang.org/x/text/internal/utf8internal" + "golang.org/x/text/runes" + "golang.org/x/text/transform" +) + +// TODO: I think the Transformers really should return errors on unmatched +// surrogate pairs and odd numbers of bytes. This is not required by RFC 2781, +// which leaves it open, but is suggested by WhatWG. It will allow for all error +// modes as defined by WhatWG: fatal, HTML and Replacement. This would require +// the introduction of some kind of error type for conveying the erroneous code +// point. + +// UTF8 is the UTF-8 encoding. It neither removes nor adds byte order marks. +var UTF8 encoding.Encoding = utf8enc + +// UTF8BOM is an UTF-8 encoding where the decoder strips a leading byte order +// mark while the encoder adds one. +// +// Some editors add a byte order mark as a signature to UTF-8 files. Although +// the byte order mark is not useful for detecting byte order in UTF-8, it is +// sometimes used as a convention to mark UTF-8-encoded files. This relies on +// the observation that the UTF-8 byte order mark is either an illegal or at +// least very unlikely sequence in any other character encoding. +var UTF8BOM encoding.Encoding = utf8bomEncoding{} + +type utf8bomEncoding struct{} + +func (utf8bomEncoding) String() string { + return "UTF-8-BOM" +} + +func (utf8bomEncoding) ID() (identifier.MIB, string) { + return identifier.Unofficial, "x-utf8bom" +} + +func (utf8bomEncoding) NewEncoder() *encoding.Encoder { + return &encoding.Encoder{ + Transformer: &utf8bomEncoder{t: runes.ReplaceIllFormed()}, + } +} + +func (utf8bomEncoding) NewDecoder() *encoding.Decoder { + return &encoding.Decoder{Transformer: &utf8bomDecoder{}} +} + +var utf8enc = &internal.Encoding{ + &internal.SimpleEncoding{utf8Decoder{}, runes.ReplaceIllFormed()}, + "UTF-8", + identifier.UTF8, +} + +type utf8bomDecoder struct { + checked bool +} + +func (t *utf8bomDecoder) Reset() { + t.checked = false +} + +func (t *utf8bomDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + if !t.checked { + if !atEOF && len(src) < len(utf8BOM) { + if len(src) == 0 { + return 0, 0, nil + } + return 0, 0, transform.ErrShortSrc + } + if bytes.HasPrefix(src, []byte(utf8BOM)) { + nSrc += len(utf8BOM) + src = src[len(utf8BOM):] + } + t.checked = true + } + nDst, n, err := utf8Decoder.Transform(utf8Decoder{}, dst[nDst:], src, atEOF) + nSrc += n + return nDst, nSrc, err +} + +type utf8bomEncoder struct { + written bool + t transform.Transformer +} + +func (t *utf8bomEncoder) Reset() { + t.written = false + t.t.Reset() +} + +func (t *utf8bomEncoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + if !t.written { + if len(dst) < len(utf8BOM) { + return nDst, 0, transform.ErrShortDst + } + nDst = copy(dst, utf8BOM) + t.written = true + } + n, nSrc, err := utf8Decoder.Transform(utf8Decoder{}, dst[nDst:], src, atEOF) + nDst += n + return nDst, nSrc, err +} + +type utf8Decoder struct{ transform.NopResetter } + +func (utf8Decoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + var pSrc int // point from which to start copy in src + var accept utf8internal.AcceptRange + + // The decoder can only make the input larger, not smaller. + n := len(src) + if len(dst) < n { + err = transform.ErrShortDst + n = len(dst) + atEOF = false + } + for nSrc < n { + c := src[nSrc] + if c < utf8.RuneSelf { + nSrc++ + continue + } + first := utf8internal.First[c] + size := int(first & utf8internal.SizeMask) + if first == utf8internal.FirstInvalid { + goto handleInvalid // invalid starter byte + } + accept = utf8internal.AcceptRanges[first>>utf8internal.AcceptShift] + if nSrc+size > n { + if !atEOF { + // We may stop earlier than necessary here if the short sequence + // has invalid bytes. Not checking for this simplifies the code + // and may avoid duplicate computations in certain conditions. + if err == nil { + err = transform.ErrShortSrc + } + break + } + // Determine the maximal subpart of an ill-formed subsequence. + switch { + case nSrc+1 >= n || src[nSrc+1] < accept.Lo || accept.Hi < src[nSrc+1]: + size = 1 + case nSrc+2 >= n || src[nSrc+2] < utf8internal.LoCB || utf8internal.HiCB < src[nSrc+2]: + size = 2 + default: + size = 3 // As we are short, the maximum is 3. + } + goto handleInvalid + } + if c = src[nSrc+1]; c < accept.Lo || accept.Hi < c { + size = 1 + goto handleInvalid // invalid continuation byte + } else if size == 2 { + } else if c = src[nSrc+2]; c < utf8internal.LoCB || utf8internal.HiCB < c { + size = 2 + goto handleInvalid // invalid continuation byte + } else if size == 3 { + } else if c = src[nSrc+3]; c < utf8internal.LoCB || utf8internal.HiCB < c { + size = 3 + goto handleInvalid // invalid continuation byte + } + nSrc += size + continue + + handleInvalid: + // Copy the scanned input so far. + nDst += copy(dst[nDst:], src[pSrc:nSrc]) + + // Append RuneError to the destination. + const runeError = "\ufffd" + if nDst+len(runeError) > len(dst) { + return nDst, nSrc, transform.ErrShortDst + } + nDst += copy(dst[nDst:], runeError) + + // Skip the maximal subpart of an ill-formed subsequence according to + // the W3C standard way instead of the Go way. This Transform is + // probably the only place in the text repo where it is warranted. + nSrc += size + pSrc = nSrc + + // Recompute the maximum source length. + if sz := len(dst) - nDst; sz < len(src)-nSrc { + err = transform.ErrShortDst + n = nSrc + sz + atEOF = false + } + } + return nDst + copy(dst[nDst:], src[pSrc:nSrc]), nSrc, err +} + +// UTF16 returns a UTF-16 Encoding for the given default endianness and byte +// order mark (BOM) policy. +// +// When decoding from UTF-16 to UTF-8, if the BOMPolicy is IgnoreBOM then +// neither BOMs U+FEFF nor noncharacters U+FFFE in the input stream will affect +// the endianness used for decoding, and will instead be output as their +// standard UTF-8 encodings: "\xef\xbb\xbf" and "\xef\xbf\xbe". If the BOMPolicy +// is UseBOM or ExpectBOM a staring BOM is not written to the UTF-8 output. +// Instead, it overrides the default endianness e for the remainder of the +// transformation. Any subsequent BOMs U+FEFF or noncharacters U+FFFE will not +// affect the endianness used, and will instead be output as their standard +// UTF-8 encodings. For UseBOM, if there is no starting BOM, it will proceed +// with the default Endianness. For ExpectBOM, in that case, the transformation +// will return early with an ErrMissingBOM error. +// +// When encoding from UTF-8 to UTF-16, a BOM will be inserted at the start of +// the output if the BOMPolicy is UseBOM or ExpectBOM. Otherwise, a BOM will not +// be inserted. The UTF-8 input does not need to contain a BOM. +// +// There is no concept of a 'native' endianness. If the UTF-16 data is produced +// and consumed in a greater context that implies a certain endianness, use +// IgnoreBOM. Otherwise, use ExpectBOM and always produce and consume a BOM. +// +// In the language of https://www.unicode.org/faq/utf_bom.html#bom10, IgnoreBOM +// corresponds to "Where the precise type of the data stream is known... the +// BOM should not be used" and ExpectBOM corresponds to "A particular +// protocol... may require use of the BOM". +func UTF16(e Endianness, b BOMPolicy) encoding.Encoding { + return utf16Encoding{config{e, b}, mibValue[e][b&bomMask]} +} + +// mibValue maps Endianness and BOMPolicy settings to MIB constants. Note that +// some configurations map to the same MIB identifier. RFC 2781 has requirements +// and recommendations. Some of the "configurations" are merely recommendations, +// so multiple configurations could match. +var mibValue = map[Endianness][numBOMValues]identifier.MIB{ + BigEndian: [numBOMValues]identifier.MIB{ + IgnoreBOM: identifier.UTF16BE, + UseBOM: identifier.UTF16, // BigEnding default is preferred by RFC 2781. + // TODO: acceptBOM | strictBOM would map to UTF16BE as well. + }, + LittleEndian: [numBOMValues]identifier.MIB{ + IgnoreBOM: identifier.UTF16LE, + UseBOM: identifier.UTF16, // LittleEndian default is allowed and preferred on Windows. + // TODO: acceptBOM | strictBOM would map to UTF16LE as well. + }, + // ExpectBOM is not widely used and has no valid MIB identifier. +} + +// All lists a configuration for each IANA-defined UTF-16 variant. +var All = []encoding.Encoding{ + UTF8, + UTF16(BigEndian, UseBOM), + UTF16(BigEndian, IgnoreBOM), + UTF16(LittleEndian, IgnoreBOM), +} + +// BOMPolicy is a UTF-16 encoding's byte order mark policy. +type BOMPolicy uint8 + +const ( + writeBOM BOMPolicy = 0x01 + acceptBOM BOMPolicy = 0x02 + requireBOM BOMPolicy = 0x04 + bomMask BOMPolicy = 0x07 + + // HACK: numBOMValues == 8 triggers a bug in the 1.4 compiler (cannot have a + // map of an array of length 8 of a type that is also used as a key or value + // in another map). See golang.org/issue/11354. + // TODO: consider changing this value back to 8 if the use of 1.4.* has + // been minimized. + numBOMValues = 8 + 1 + + // IgnoreBOM means to ignore any byte order marks. + IgnoreBOM BOMPolicy = 0 + // Common and RFC 2781-compliant interpretation for UTF-16BE/LE. + + // UseBOM means that the UTF-16 form may start with a byte order mark, which + // will be used to override the default encoding. + UseBOM BOMPolicy = writeBOM | acceptBOM + // Common and RFC 2781-compliant interpretation for UTF-16. + + // ExpectBOM means that the UTF-16 form must start with a byte order mark, + // which will be used to override the default encoding. + ExpectBOM BOMPolicy = writeBOM | acceptBOM | requireBOM + // Used in Java as Unicode (not to be confused with Java's UTF-16) and + // ICU's UTF-16,version=1. Not compliant with RFC 2781. + + // TODO (maybe): strictBOM: BOM must match Endianness. This would allow: + // - UTF-16(B|L)E,version=1: writeBOM | acceptBOM | requireBOM | strictBOM + // (UnicodeBig and UnicodeLittle in Java) + // - RFC 2781-compliant, but less common interpretation for UTF-16(B|L)E: + // acceptBOM | strictBOM (e.g. assigned to CheckBOM). + // This addition would be consistent with supporting ExpectBOM. +) + +// Endianness is a UTF-16 encoding's default endianness. +type Endianness bool + +const ( + // BigEndian is UTF-16BE. + BigEndian Endianness = false + // LittleEndian is UTF-16LE. + LittleEndian Endianness = true +) + +// ErrMissingBOM means that decoding UTF-16 input with ExpectBOM did not find a +// starting byte order mark. +var ErrMissingBOM = errors.New("encoding: missing byte order mark") + +type utf16Encoding struct { + config + mib identifier.MIB +} + +type config struct { + endianness Endianness + bomPolicy BOMPolicy +} + +func (u utf16Encoding) NewDecoder() *encoding.Decoder { + return &encoding.Decoder{Transformer: &utf16Decoder{ + initial: u.config, + current: u.config, + }} +} + +func (u utf16Encoding) NewEncoder() *encoding.Encoder { + return &encoding.Encoder{Transformer: &utf16Encoder{ + endianness: u.endianness, + initialBOMPolicy: u.bomPolicy, + currentBOMPolicy: u.bomPolicy, + }} +} + +func (u utf16Encoding) ID() (mib identifier.MIB, other string) { + return u.mib, "" +} + +func (u utf16Encoding) String() string { + e, b := "B", "" + if u.endianness == LittleEndian { + e = "L" + } + switch u.bomPolicy { + case ExpectBOM: + b = "Expect" + case UseBOM: + b = "Use" + case IgnoreBOM: + b = "Ignore" + } + return "UTF-16" + e + "E (" + b + " BOM)" +} + +type utf16Decoder struct { + initial config + current config +} + +func (u *utf16Decoder) Reset() { + u.current = u.initial +} + +func (u *utf16Decoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + if len(src) < 2 && atEOF && u.current.bomPolicy&requireBOM != 0 { + return 0, 0, ErrMissingBOM + } + if len(src) == 0 { + return 0, 0, nil + } + if len(src) >= 2 && u.current.bomPolicy&acceptBOM != 0 { + switch { + case src[0] == 0xfe && src[1] == 0xff: + u.current.endianness = BigEndian + nSrc = 2 + case src[0] == 0xff && src[1] == 0xfe: + u.current.endianness = LittleEndian + nSrc = 2 + default: + if u.current.bomPolicy&requireBOM != 0 { + return 0, 0, ErrMissingBOM + } + } + u.current.bomPolicy = IgnoreBOM + } + + var r rune + var dSize, sSize int + for nSrc < len(src) { + if nSrc+1 < len(src) { + x := uint16(src[nSrc+0])<<8 | uint16(src[nSrc+1]) + if u.current.endianness == LittleEndian { + x = x>>8 | x<<8 + } + r, sSize = rune(x), 2 + if utf16.IsSurrogate(r) { + if nSrc+3 < len(src) { + x = uint16(src[nSrc+2])<<8 | uint16(src[nSrc+3]) + if u.current.endianness == LittleEndian { + x = x>>8 | x<<8 + } + // Save for next iteration if it is not a high surrogate. + if isHighSurrogate(rune(x)) { + r, sSize = utf16.DecodeRune(r, rune(x)), 4 + } + } else if !atEOF { + err = transform.ErrShortSrc + break + } + } + if dSize = utf8.RuneLen(r); dSize < 0 { + r, dSize = utf8.RuneError, 3 + } + } else if atEOF { + // Single trailing byte. + r, dSize, sSize = utf8.RuneError, 3, 1 + } else { + err = transform.ErrShortSrc + break + } + if nDst+dSize > len(dst) { + err = transform.ErrShortDst + break + } + nDst += utf8.EncodeRune(dst[nDst:], r) + nSrc += sSize + } + return nDst, nSrc, err +} + +func isHighSurrogate(r rune) bool { + return 0xDC00 <= r && r <= 0xDFFF +} + +type utf16Encoder struct { + endianness Endianness + initialBOMPolicy BOMPolicy + currentBOMPolicy BOMPolicy +} + +func (u *utf16Encoder) Reset() { + u.currentBOMPolicy = u.initialBOMPolicy +} + +func (u *utf16Encoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + if u.currentBOMPolicy&writeBOM != 0 { + if len(dst) < 2 { + return 0, 0, transform.ErrShortDst + } + dst[0], dst[1] = 0xfe, 0xff + u.currentBOMPolicy = IgnoreBOM + nDst = 2 + } + + r, size := rune(0), 0 + for nSrc < len(src) { + r = rune(src[nSrc]) + + // Decode a 1-byte rune. + if r < utf8.RuneSelf { + size = 1 + + } else { + // Decode a multi-byte rune. + r, size = utf8.DecodeRune(src[nSrc:]) + if size == 1 { + // All valid runes of size 1 (those below utf8.RuneSelf) were + // handled above. We have invalid UTF-8 or we haven't seen the + // full character yet. + if !atEOF && !utf8.FullRune(src[nSrc:]) { + err = transform.ErrShortSrc + break + } + } + } + + if r <= 0xffff { + if nDst+2 > len(dst) { + err = transform.ErrShortDst + break + } + dst[nDst+0] = uint8(r >> 8) + dst[nDst+1] = uint8(r) + nDst += 2 + } else { + if nDst+4 > len(dst) { + err = transform.ErrShortDst + break + } + r1, r2 := utf16.EncodeRune(r) + dst[nDst+0] = uint8(r1 >> 8) + dst[nDst+1] = uint8(r1) + dst[nDst+2] = uint8(r2 >> 8) + dst[nDst+3] = uint8(r2) + nDst += 4 + } + nSrc += size + } + + if u.endianness == LittleEndian { + for i := 0; i < nDst; i += 2 { + dst[i], dst[i+1] = dst[i+1], dst[i] + } + } + return nDst, nSrc, err +} diff --git a/tools/vendor/golang.org/x/text/internal/number/format.go b/tools/vendor/golang.org/x/text/internal/number/format.go index cd94c5dc4..1aadcf407 100644 --- a/tools/vendor/golang.org/x/text/internal/number/format.go +++ b/tools/vendor/golang.org/x/text/internal/number/format.go @@ -394,9 +394,7 @@ func appendScientific(dst []byte, f *Formatter, n *Digits) (b []byte, postPre, p exp := n.Exp - int32(n.Comma) exponential := f.Symbol(SymExponential) if exponential == "E" { - dst = append(dst, "\u202f"...) // NARROW NO-BREAK SPACE dst = append(dst, f.Symbol(SymSuperscriptingExponent)...) - dst = append(dst, "\u202f"...) // NARROW NO-BREAK SPACE dst = f.AppendDigit(dst, 1) dst = f.AppendDigit(dst, 0) switch { diff --git a/tools/vendor/golang.org/x/text/internal/utf8internal/utf8internal.go b/tools/vendor/golang.org/x/text/internal/utf8internal/utf8internal.go new file mode 100644 index 000000000..e5c53b1b3 --- /dev/null +++ b/tools/vendor/golang.org/x/text/internal/utf8internal/utf8internal.go @@ -0,0 +1,87 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package utf8internal contains low-level utf8-related constants, tables, etc. +// that are used internally by the text package. +package utf8internal + +// The default lowest and highest continuation byte. +const ( + LoCB = 0x80 // 1000 0000 + HiCB = 0xBF // 1011 1111 +) + +// Constants related to getting information of first bytes of UTF-8 sequences. +const ( + // ASCII identifies a UTF-8 byte as ASCII. + ASCII = as + + // FirstInvalid indicates a byte is invalid as a first byte of a UTF-8 + // sequence. + FirstInvalid = xx + + // SizeMask is a mask for the size bits. Use use x&SizeMask to get the size. + SizeMask = 7 + + // AcceptShift is the right-shift count for the first byte info byte to get + // the index into the AcceptRanges table. See AcceptRanges. + AcceptShift = 4 + + // The names of these constants are chosen to give nice alignment in the + // table below. The first nibble is an index into acceptRanges or F for + // special one-byte cases. The second nibble is the Rune length or the + // Status for the special one-byte case. + xx = 0xF1 // invalid: size 1 + as = 0xF0 // ASCII: size 1 + s1 = 0x02 // accept 0, size 2 + s2 = 0x13 // accept 1, size 3 + s3 = 0x03 // accept 0, size 3 + s4 = 0x23 // accept 2, size 3 + s5 = 0x34 // accept 3, size 4 + s6 = 0x04 // accept 0, size 4 + s7 = 0x44 // accept 4, size 4 +) + +// First is information about the first byte in a UTF-8 sequence. +var First = [256]uint8{ + // 1 2 3 4 5 6 7 8 9 A B C D E F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x00-0x0F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x10-0x1F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x20-0x2F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x30-0x3F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x40-0x4F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x50-0x5F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x60-0x6F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x70-0x7F + // 1 2 3 4 5 6 7 8 9 A B C D E F + xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x80-0x8F + xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x90-0x9F + xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xA0-0xAF + xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xB0-0xBF + xx, xx, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xC0-0xCF + s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xD0-0xDF + s2, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s4, s3, s3, // 0xE0-0xEF + s5, s6, s6, s6, s7, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xF0-0xFF +} + +// AcceptRange gives the range of valid values for the second byte in a UTF-8 +// sequence for any value for First that is not ASCII or FirstInvalid. +type AcceptRange struct { + Lo uint8 // lowest value for second byte. + Hi uint8 // highest value for second byte. +} + +// AcceptRanges is a slice of AcceptRange values. For a given byte sequence b +// +// AcceptRanges[First[b[0]]>>AcceptShift] +// +// will give the value of AcceptRange for the multi-byte UTF-8 sequence starting +// at b[0]. +var AcceptRanges = [...]AcceptRange{ + 0: {LoCB, HiCB}, + 1: {0xA0, HiCB}, + 2: {LoCB, 0x9F}, + 3: {0x90, HiCB}, + 4: {LoCB, 0x8F}, +} diff --git a/tools/vendor/golang.org/x/text/language/parse.go b/tools/vendor/golang.org/x/text/language/parse.go index 4d57222e7..053336e28 100644 --- a/tools/vendor/golang.org/x/text/language/parse.go +++ b/tools/vendor/golang.org/x/text/language/parse.go @@ -59,7 +59,7 @@ func (c CanonType) Parse(s string) (t Tag, err error) { if changed { tt.RemakeString() } - return makeTag(tt), err + return makeTag(tt), nil } // Compose creates a Tag from individual parts, which may be of type Tag, Base, diff --git a/tools/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go b/tools/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go new file mode 100644 index 000000000..2ef36bbcf --- /dev/null +++ b/tools/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go @@ -0,0 +1,160 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protodelim marshals and unmarshals varint size-delimited messages. +package protodelim + +import ( + "bufio" + "encoding/binary" + "fmt" + "io" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/proto" +) + +// MarshalOptions is a configurable varint size-delimited marshaler. +type MarshalOptions struct{ proto.MarshalOptions } + +// MarshalTo writes a varint size-delimited wire-format message to w. +// If w returns an error, MarshalTo returns it unchanged. +func (o MarshalOptions) MarshalTo(w io.Writer, m proto.Message) (int, error) { + msgBytes, err := o.MarshalOptions.Marshal(m) + if err != nil { + return 0, err + } + + sizeBytes := protowire.AppendVarint(nil, uint64(len(msgBytes))) + sizeWritten, err := w.Write(sizeBytes) + if err != nil { + return sizeWritten, err + } + msgWritten, err := w.Write(msgBytes) + if err != nil { + return sizeWritten + msgWritten, err + } + return sizeWritten + msgWritten, nil +} + +// MarshalTo writes a varint size-delimited wire-format message to w +// with the default options. +// +// See the documentation for [MarshalOptions.MarshalTo]. +func MarshalTo(w io.Writer, m proto.Message) (int, error) { + return MarshalOptions{}.MarshalTo(w, m) +} + +// UnmarshalOptions is a configurable varint size-delimited unmarshaler. +type UnmarshalOptions struct { + proto.UnmarshalOptions + + // MaxSize is the maximum size in wire-format bytes of a single message. + // Unmarshaling a message larger than MaxSize will return an error. + // A zero MaxSize will default to 4 MiB. + // Setting MaxSize to -1 disables the limit. + MaxSize int64 +} + +const defaultMaxSize = 4 << 20 // 4 MiB, corresponds to the default gRPC max request/response size + +// SizeTooLargeError is an error that is returned when the unmarshaler encounters a message size +// that is larger than its configured [UnmarshalOptions.MaxSize]. +type SizeTooLargeError struct { + // Size is the varint size of the message encountered + // that was larger than the provided MaxSize. + Size uint64 + + // MaxSize is the MaxSize limit configured in UnmarshalOptions, which Size exceeded. + MaxSize uint64 +} + +func (e *SizeTooLargeError) Error() string { + return fmt.Sprintf("message size %d exceeded unmarshaler's maximum configured size %d", e.Size, e.MaxSize) +} + +// Reader is the interface expected by [UnmarshalFrom]. +// It is implemented by *[bufio.Reader]. +type Reader interface { + io.Reader + io.ByteReader +} + +// UnmarshalFrom parses and consumes a varint size-delimited wire-format message +// from r. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +// +// The error is [io.EOF] error only if no bytes are read. +// If an EOF happens after reading some but not all the bytes, +// UnmarshalFrom returns a non-io.EOF error. +// In particular if r returns a non-io.EOF error, UnmarshalFrom returns it unchanged, +// and if only a size is read with no subsequent message, [io.ErrUnexpectedEOF] is returned. +func (o UnmarshalOptions) UnmarshalFrom(r Reader, m proto.Message) error { + var sizeArr [binary.MaxVarintLen64]byte + sizeBuf := sizeArr[:0] + for i := range sizeArr { + b, err := r.ReadByte() + if err != nil { + // Immediate EOF is unexpected. + if err == io.EOF && i != 0 { + break + } + return err + } + sizeBuf = append(sizeBuf, b) + if b < 0x80 { + break + } + } + size, n := protowire.ConsumeVarint(sizeBuf) + if n < 0 { + return protowire.ParseError(n) + } + + maxSize := o.MaxSize + if maxSize == 0 { + maxSize = defaultMaxSize + } + if maxSize != -1 && size > uint64(maxSize) { + return errors.Wrap(&SizeTooLargeError{Size: size, MaxSize: uint64(maxSize)}, "") + } + + var b []byte + var err error + if br, ok := r.(*bufio.Reader); ok { + // Use the []byte from the bufio.Reader instead of having to allocate one. + // This reduces CPU usage and allocated bytes. + b, err = br.Peek(int(size)) + if err == nil { + defer br.Discard(int(size)) + } else { + b = nil + } + } + if b == nil { + b = make([]byte, size) + _, err = io.ReadFull(r, b) + } + + if err == io.EOF { + return io.ErrUnexpectedEOF + } + if err != nil { + return err + } + if err := o.Unmarshal(b, m); err != nil { + return err + } + return nil +} + +// UnmarshalFrom parses and consumes a varint size-delimited wire-format message +// from r with the default options. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +// +// See the documentation for [UnmarshalOptions.UnmarshalFrom]. +func UnmarshalFrom(r Reader, m proto.Message) error { + return UnmarshalOptions{}.UnmarshalFrom(r, m) +} diff --git a/tools/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/tools/vendor/google.golang.org/protobuf/encoding/prototext/decode.go index d972a3d98..b53805056 100644 --- a/tools/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +++ b/tools/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -185,11 +185,6 @@ func (d decoder) unmarshalMessage(m protoreflect.Message, checkDelims bool) erro } else if xtErr != nil && xtErr != protoregistry.NotFound { return d.newError(tok.Pos(), "unable to resolve [%s]: %v", tok.RawString(), xtErr) } - if flags.ProtoLegacyWeak { - if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() { - fd = nil // reset since the weak reference is not linked in - } - } // Handle unknown fields. if fd == nil { diff --git a/tools/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go b/tools/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go deleted file mode 100644 index bf1aba0e8..000000000 --- a/tools/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package editionssupport defines constants for editions that are supported. -package editionssupport - -import "google.golang.org/protobuf/types/descriptorpb" - -const ( - Minimum = descriptorpb.Edition_EDITION_PROTO2 - Maximum = descriptorpb.Edition_EDITION_2023 - - // MaximumKnown is the maximum edition that is known to Go Protobuf, but not - // declared as supported. In other words: end users cannot use it, but - // testprotos inside Go Protobuf can. - MaximumKnown = descriptorpb.Edition_EDITION_2024 -) diff --git a/tools/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/tools/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go index 7e87c7604..669133d04 100644 --- a/tools/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go +++ b/tools/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go @@ -26,7 +26,7 @@ var byteType = reflect.TypeOf(byte(0)) // The type is the underlying field type (e.g., a repeated field may be // represented by []T, but the Go type passed in is just T). // A list of enum value descriptors must be provided for enum fields. -// This does not populate the Enum or Message (except for weak message). +// This does not populate the Enum or Message. // // This function is a best effort attempt; parsing errors are ignored. func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescriptors) protoreflect.FieldDescriptor { @@ -109,9 +109,6 @@ func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescri } case s == "packed": f.L1.EditionFeatures.IsPacked = true - case strings.HasPrefix(s, "weak="): - f.L1.IsWeak = true - f.L1.Message = filedesc.PlaceholderMessage(protoreflect.FullName(s[len("weak="):])) case strings.HasPrefix(s, "def="): // The default tag is special in that everything afterwards is the // default regardless of the presence of commas. @@ -183,9 +180,6 @@ func Marshal(fd protoreflect.FieldDescriptor, enumName string) string { // the exact same semantics from the previous generator. tag = append(tag, "json="+jsonName) } - if fd.IsWeak() { - tag = append(tag, "weak="+string(fd.Message().FullName())) - } // The previous implementation does not tag extension fields as proto3, // even when the field is defined in a proto3 file. Match that behavior // for consistency. diff --git a/tools/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/tools/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index 378b826fa..688aabe43 100644 --- a/tools/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/tools/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -19,7 +19,6 @@ import ( "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" ) // Edition is an Enum for proto2.Edition @@ -275,7 +274,6 @@ type ( Kind protoreflect.Kind StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto - IsWeak bool // promoted from google.protobuf.FieldOptions IsLazy bool // promoted from google.protobuf.FieldOptions Default defaultValue ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields @@ -369,7 +367,7 @@ func (fd *Field) IsPacked() bool { return fd.L1.EditionFeatures.IsPacked } func (fd *Field) IsExtension() bool { return false } -func (fd *Field) IsWeak() bool { return fd.L1.IsWeak } +func (fd *Field) IsWeak() bool { return false } func (fd *Field) IsLazy() bool { return fd.L1.IsLazy } func (fd *Field) IsList() bool { return fd.Cardinality() == protoreflect.Repeated && !fd.IsMap() } func (fd *Field) IsMap() bool { return fd.Message() != nil && fd.Message().IsMapEntry() } @@ -396,11 +394,6 @@ func (fd *Field) Enum() protoreflect.EnumDescriptor { return fd.L1.Enum } func (fd *Field) Message() protoreflect.MessageDescriptor { - if fd.L1.IsWeak { - if d, _ := protoregistry.GlobalFiles.FindDescriptorByName(fd.L1.Message.FullName()); d != nil { - return d.(protoreflect.MessageDescriptor) - } - } return fd.L1.Message } func (fd *Field) IsMapEntry() bool { diff --git a/tools/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/tools/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index 67a51b327..d4c94458b 100644 --- a/tools/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/tools/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -32,11 +32,6 @@ func (file *File) resolveMessages() { for j := range md.L2.Fields.List { fd := &md.L2.Fields.List[j] - // Weak fields are resolved upon actual use. - if fd.L1.IsWeak { - continue - } - // Resolve message field dependency. switch fd.L1.Kind { case protoreflect.EnumKind: @@ -150,8 +145,6 @@ func (fd *File) unmarshalFull(b []byte) { switch num { case genid.FileDescriptorProto_PublicDependency_field_number: fd.L2.Imports[v].IsPublic = true - case genid.FileDescriptorProto_WeakDependency_field_number: - fd.L2.Imports[v].IsWeak = true } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) @@ -502,8 +495,6 @@ func (fd *Field) unmarshalOptions(b []byte) { switch num { case genid.FieldOptions_Packed_field_number: fd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) - case genid.FieldOptions_Weak_field_number: - fd.L1.IsWeak = protowire.DecodeBool(v) case genid.FieldOptions_Lazy_field_number: fd.L1.IsLazy = protowire.DecodeBool(v) case FieldOptions_EnforceUTF8: diff --git a/tools/vendor/google.golang.org/protobuf/internal/filetype/build.go b/tools/vendor/google.golang.org/protobuf/internal/filetype/build.go index ba83fea44..e1b4130bd 100644 --- a/tools/vendor/google.golang.org/protobuf/internal/filetype/build.go +++ b/tools/vendor/google.golang.org/protobuf/internal/filetype/build.go @@ -63,7 +63,7 @@ type Builder struct { // message declarations in "flattened ordering". // // Dependencies are Go types for enums or messages referenced by - // message fields (excluding weak fields), for parent extended messages of + // message fields, for parent extended messages of // extension fields, for enums or messages referenced by extension fields, // and for input and output messages referenced by service methods. // Dependencies must come after declarations, but the ordering of diff --git a/tools/vendor/google.golang.org/protobuf/internal/flags/flags.go b/tools/vendor/google.golang.org/protobuf/internal/flags/flags.go index 5cb3ee70f..a06ccabc2 100644 --- a/tools/vendor/google.golang.org/protobuf/internal/flags/flags.go +++ b/tools/vendor/google.golang.org/protobuf/internal/flags/flags.go @@ -6,7 +6,7 @@ package flags // ProtoLegacy specifies whether to enable support for legacy functionality -// such as MessageSets, weak fields, and various other obscure behavior +// such as MessageSets, and various other obscure behavior // that is necessary to maintain backwards compatibility with proto1 or // the pre-release variants of proto2 and proto3. // @@ -22,8 +22,3 @@ const ProtoLegacy = protoLegacy // extension fields at unmarshal time, but defers creating the message // structure until the extension is first accessed. const LazyUnmarshalExtensions = ProtoLegacy - -// ProtoLegacyWeak specifies whether to enable support for weak fields. -// This flag was split out of ProtoLegacy in preparation for removing -// support for weak fields (independent of the other protolegacy features). -const ProtoLegacyWeak = ProtoLegacy diff --git a/tools/vendor/google.golang.org/protobuf/internal/genid/goname.go b/tools/vendor/google.golang.org/protobuf/internal/genid/goname.go index 693d2e9e1..99bb95baf 100644 --- a/tools/vendor/google.golang.org/protobuf/internal/genid/goname.go +++ b/tools/vendor/google.golang.org/protobuf/internal/genid/goname.go @@ -11,15 +11,10 @@ const ( SizeCache_goname = "sizeCache" SizeCacheA_goname = "XXX_sizecache" - WeakFields_goname = "weakFields" - WeakFieldsA_goname = "XXX_weak" - UnknownFields_goname = "unknownFields" UnknownFieldsA_goname = "XXX_unrecognized" ExtensionFields_goname = "extensionFields" ExtensionFieldsA_goname = "XXX_InternalExtensions" ExtensionFieldsB_goname = "XXX_extensions" - - WeakFieldPrefix_goname = "XXX_weak_" ) diff --git a/tools/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/tools/vendor/google.golang.org/protobuf/internal/impl/codec_field.go index 7c1f66c8c..d14d7d93c 100644 --- a/tools/vendor/google.golang.org/protobuf/internal/impl/codec_field.go +++ b/tools/vendor/google.golang.org/protobuf/internal/impl/codec_field.go @@ -5,15 +5,12 @@ package impl import ( - "fmt" "reflect" - "sync" "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" "google.golang.org/protobuf/runtime/protoiface" ) @@ -121,78 +118,6 @@ func (mi *MessageInfo) initOneofFieldCoders(od protoreflect.OneofDescriptor, si } } -func makeWeakMessageFieldCoder(fd protoreflect.FieldDescriptor) pointerCoderFuncs { - var once sync.Once - var messageType protoreflect.MessageType - lazyInit := func() { - once.Do(func() { - messageName := fd.Message().FullName() - messageType, _ = protoregistry.GlobalTypes.FindMessageByName(messageName) - }) - } - - return pointerCoderFuncs{ - size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { - m, ok := p.WeakFields().get(f.num) - if !ok { - return 0 - } - lazyInit() - if messageType == nil { - panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName())) - } - return sizeMessage(m, f.tagsize, opts) - }, - marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - m, ok := p.WeakFields().get(f.num) - if !ok { - return b, nil - } - lazyInit() - if messageType == nil { - panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName())) - } - return appendMessage(b, m, f.wiretag, opts) - }, - unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { - fs := p.WeakFields() - m, ok := fs.get(f.num) - if !ok { - lazyInit() - if messageType == nil { - return unmarshalOutput{}, errUnknown - } - m = messageType.New().Interface() - fs.set(f.num, m) - } - return consumeMessage(b, m, wtyp, opts) - }, - isInit: func(p pointer, f *coderFieldInfo) error { - m, ok := p.WeakFields().get(f.num) - if !ok { - return nil - } - return proto.CheckInitialized(m) - }, - merge: func(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { - sm, ok := src.WeakFields().get(f.num) - if !ok { - return - } - dm, ok := dst.WeakFields().get(f.num) - if !ok { - lazyInit() - if messageType == nil { - panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName())) - } - dm = messageType.New().Interface() - dst.WeakFields().set(f.num, dm) - } - opts.Merge(dm, sm) - }, - } -} - func makeMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { if mi := getMessageInfo(ft); mi != nil { funcs := pointerCoderFuncs{ diff --git a/tools/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/tools/vendor/google.golang.org/protobuf/internal/impl/codec_message.go index 111d95833..f78b57b04 100644 --- a/tools/vendor/google.golang.org/protobuf/internal/impl/codec_message.go +++ b/tools/vendor/google.golang.org/protobuf/internal/impl/codec_message.go @@ -119,9 +119,6 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { } case isOneof: fieldOffset = offsetOf(fs) - case fd.IsWeak(): - fieldOffset = si.weakOffset - funcs = makeWeakMessageFieldCoder(fd) default: fieldOffset = offsetOf(fs) childMessage, funcs = fieldCoder(fd, ft) diff --git a/tools/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go b/tools/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go index f81d7d0db..41c1f74ef 100644 --- a/tools/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go +++ b/tools/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go @@ -46,9 +46,6 @@ func (mi *MessageInfo) makeOpaqueCoderMethods(t reflect.Type, si opaqueStructInf switch { case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): fieldOffset = offsetOf(fs) - case fd.IsWeak(): - fieldOffset = si.weakOffset - funcs = makeWeakMessageFieldCoder(fd) case fd.Message() != nil && !fd.IsMap(): fieldOffset = offsetOf(fs) if fd.IsList() { diff --git a/tools/vendor/google.golang.org/protobuf/internal/impl/lazy.go b/tools/vendor/google.golang.org/protobuf/internal/impl/lazy.go index e8fb6c35b..c7de31e24 100644 --- a/tools/vendor/google.golang.org/protobuf/internal/impl/lazy.go +++ b/tools/vendor/google.golang.org/protobuf/internal/impl/lazy.go @@ -131,7 +131,7 @@ func (mi *MessageInfo) skipField(b []byte, f *coderFieldInfo, wtyp protowire.Typ fmi := f.validation.mi if fmi == nil { fd := mi.Desc.Fields().ByNumber(f.num) - if fd == nil || !fd.IsWeak() { + if fd == nil { return out, ValidationUnknown } messageName := fd.Message().FullName() diff --git a/tools/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/tools/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go index bf0b6049b..a51dffbe2 100644 --- a/tools/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go +++ b/tools/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go @@ -310,12 +310,9 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, fd.L0.Parent = md fd.L0.Index = n - if fd.L1.IsWeak || fd.L1.EditionFeatures.IsPacked { + if fd.L1.EditionFeatures.IsPacked { fd.L1.Options = func() protoreflect.ProtoMessage { opts := descopts.Field.ProtoReflect().New() - if fd.L1.IsWeak { - opts.Set(opts.Descriptor().Fields().ByName("weak"), protoreflect.ValueOfBool(true)) - } if fd.L1.EditionFeatures.IsPacked { opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.EditionFeatures.IsPacked)) } diff --git a/tools/vendor/google.golang.org/protobuf/internal/impl/message.go b/tools/vendor/google.golang.org/protobuf/internal/impl/message.go index d1f79b422..d50423dcb 100644 --- a/tools/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/tools/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -14,7 +14,6 @@ import ( "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" ) // MessageInfo provides protobuf related functionality for a given Go type @@ -120,7 +119,6 @@ type ( var ( sizecacheType = reflect.TypeOf(SizeCache(0)) - weakFieldsType = reflect.TypeOf(WeakFields(nil)) unknownFieldsAType = reflect.TypeOf(unknownFieldsA(nil)) unknownFieldsBType = reflect.TypeOf(unknownFieldsB(nil)) extensionFieldsType = reflect.TypeOf(ExtensionFields(nil)) @@ -129,8 +127,6 @@ var ( type structInfo struct { sizecacheOffset offset sizecacheType reflect.Type - weakOffset offset - weakType reflect.Type unknownOffset offset unknownType reflect.Type extensionOffset offset @@ -148,7 +144,6 @@ type structInfo struct { func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo { si := structInfo{ sizecacheOffset: invalidOffset, - weakOffset: invalidOffset, unknownOffset: invalidOffset, extensionOffset: invalidOffset, lazyOffset: invalidOffset, @@ -168,11 +163,6 @@ fieldLoop: si.sizecacheOffset = offsetOf(f) si.sizecacheType = f.Type } - case genid.WeakFields_goname, genid.WeakFieldsA_goname: - if f.Type == weakFieldsType { - si.weakOffset = offsetOf(f) - si.weakType = f.Type - } case genid.UnknownFields_goname, genid.UnknownFieldsA_goname: if f.Type == unknownFieldsAType || f.Type == unknownFieldsBType { si.unknownOffset = offsetOf(f) @@ -256,9 +246,6 @@ func (mi *MessageInfo) Message(i int) protoreflect.MessageType { mi.init() fd := mi.Desc.Fields().Get(i) switch { - case fd.IsWeak(): - mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()) - return mt case fd.IsMap(): return mapEntryType{fd.Message(), mi.fieldTypes[fd.Number()]} default: diff --git a/tools/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go b/tools/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go index d8dcd7886..dd55e8e00 100644 --- a/tools/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go +++ b/tools/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go @@ -56,9 +56,6 @@ func opaqueInitHook(mi *MessageInfo) bool { usePresence, _ := usePresenceForField(si, fd) switch { - case fd.IsWeak(): - // Weak fields are no different for opaque. - fi = fieldInfoForWeakMessage(fd, si.weakOffset) case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): // Oneofs are no different for opaque. fi = fieldInfoForOneof(fd, si.oneofsByName[fd.ContainingOneof().Name()], mi.Exporter, si.oneofWrappersByNumber[fd.Number()]) @@ -620,8 +617,6 @@ func usePresenceForField(si opaqueStructInfo, fd protoreflect.FieldDescriptor) ( switch { case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): return false, false - case fd.IsWeak(): - return false, false case fd.IsMap(): return false, false case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind: diff --git a/tools/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/tools/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go index 31c19b54f..0d20132fa 100644 --- a/tools/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go +++ b/tools/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go @@ -72,8 +72,6 @@ func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) { fi = fieldInfoForMap(fd, fs, mi.Exporter) case fd.IsList(): fi = fieldInfoForList(fd, fs, mi.Exporter) - case fd.IsWeak(): - fi = fieldInfoForWeakMessage(fd, si.weakOffset) case fd.Message() != nil: fi = fieldInfoForMessage(fd, fs, mi.Exporter) default: @@ -219,9 +217,6 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) { } case fd.Message() != nil: ft = fs.Type - if fd.IsWeak() { - ft = nil - } isMessage = true } if isMessage && ft != nil && ft.Kind() != reflect.Ptr { diff --git a/tools/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/tools/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go index 3cd1fbc21..68d4ae32e 100644 --- a/tools/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go +++ b/tools/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go @@ -8,11 +8,8 @@ import ( "fmt" "math" "reflect" - "sync" - "google.golang.org/protobuf/internal/flags" "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" ) type fieldInfo struct { @@ -332,79 +329,6 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, } } -func fieldInfoForWeakMessage(fd protoreflect.FieldDescriptor, weakOffset offset) fieldInfo { - if !flags.ProtoLegacyWeak { - panic("no support for proto1 weak fields") - } - - var once sync.Once - var messageType protoreflect.MessageType - lazyInit := func() { - once.Do(func() { - messageName := fd.Message().FullName() - messageType, _ = protoregistry.GlobalTypes.FindMessageByName(messageName) - if messageType == nil { - panic(fmt.Sprintf("weak message %v for field %v is not linked in", messageName, fd.FullName())) - } - }) - } - - num := fd.Number() - return fieldInfo{ - fieldDesc: fd, - has: func(p pointer) bool { - if p.IsNil() { - return false - } - _, ok := p.Apply(weakOffset).WeakFields().get(num) - return ok - }, - clear: func(p pointer) { - p.Apply(weakOffset).WeakFields().clear(num) - }, - get: func(p pointer) protoreflect.Value { - lazyInit() - if p.IsNil() { - return protoreflect.ValueOfMessage(messageType.Zero()) - } - m, ok := p.Apply(weakOffset).WeakFields().get(num) - if !ok { - return protoreflect.ValueOfMessage(messageType.Zero()) - } - return protoreflect.ValueOfMessage(m.ProtoReflect()) - }, - set: func(p pointer, v protoreflect.Value) { - lazyInit() - m := v.Message() - if m.Descriptor() != messageType.Descriptor() { - if got, want := m.Descriptor().FullName(), messageType.Descriptor().FullName(); got != want { - panic(fmt.Sprintf("field %v has mismatching message descriptor: got %v, want %v", fd.FullName(), got, want)) - } - panic(fmt.Sprintf("field %v has mismatching message descriptor: %v", fd.FullName(), m.Descriptor().FullName())) - } - p.Apply(weakOffset).WeakFields().set(num, m.Interface()) - }, - mutable: func(p pointer) protoreflect.Value { - lazyInit() - fs := p.Apply(weakOffset).WeakFields() - m, ok := fs.get(num) - if !ok { - m = messageType.New().Interface() - fs.set(num, m) - } - return protoreflect.ValueOfMessage(m.ProtoReflect()) - }, - newMessage: func() protoreflect.Message { - lazyInit() - return messageType.New() - }, - newField: func() protoreflect.Value { - lazyInit() - return protoreflect.ValueOfMessage(messageType.New()) - }, - } -} - func fieldInfoForMessage(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { ft := fs.Type conv := NewConverter(ft, fd) diff --git a/tools/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/tools/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index 6bed45e35..62f8bf663 100644 --- a/tools/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/tools/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -111,7 +111,6 @@ func (p pointer) StringSlice() *[]string { return (*[]string)(p.p func (p pointer) Bytes() *[]byte { return (*[]byte)(p.p) } func (p pointer) BytesPtr() **[]byte { return (**[]byte)(p.p) } func (p pointer) BytesSlice() *[][]byte { return (*[][]byte)(p.p) } -func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.p) } func (p pointer) Extensions() *map[int32]ExtensionField { return (*map[int32]ExtensionField)(p.p) } func (p pointer) LazyInfoPtr() **protolazy.XXX_lazyUnmarshalInfo { return (**protolazy.XXX_lazyUnmarshalInfo)(p.p) diff --git a/tools/vendor/google.golang.org/protobuf/internal/impl/validate.go b/tools/vendor/google.golang.org/protobuf/internal/impl/validate.go index b534a3d6d..7b2995dde 100644 --- a/tools/vendor/google.golang.org/protobuf/internal/impl/validate.go +++ b/tools/vendor/google.golang.org/protobuf/internal/impl/validate.go @@ -211,9 +211,7 @@ func newValidationInfo(fd protoreflect.FieldDescriptor, ft reflect.Type) validat switch fd.Kind() { case protoreflect.MessageKind: vi.typ = validationTypeMessage - if !fd.IsWeak() { - vi.mi = getMessageInfo(ft) - } + vi.mi = getMessageInfo(ft) case protoreflect.GroupKind: vi.typ = validationTypeGroup vi.mi = getMessageInfo(ft) @@ -320,26 +318,6 @@ State: } if f != nil { vi = f.validation - if vi.typ == validationTypeMessage && vi.mi == nil { - // Probable weak field. - // - // TODO: Consider storing the results of this lookup somewhere - // rather than recomputing it on every validation. - fd := st.mi.Desc.Fields().ByNumber(num) - if fd == nil || !fd.IsWeak() { - break - } - messageName := fd.Message().FullName() - messageType, err := protoregistry.GlobalTypes.FindMessageByName(messageName) - switch err { - case nil: - vi.mi, _ = messageType.(*MessageInfo) - case protoregistry.NotFound: - vi.typ = validationTypeBytes - default: - return out, ValidationUnknown - } - } break } // Possible extension field. diff --git a/tools/vendor/google.golang.org/protobuf/internal/impl/weak.go b/tools/vendor/google.golang.org/protobuf/internal/impl/weak.go deleted file mode 100644 index eb79a7ba9..000000000 --- a/tools/vendor/google.golang.org/protobuf/internal/impl/weak.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package impl - -import ( - "fmt" - - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" -) - -// weakFields adds methods to the exported WeakFields type for internal use. -// -// The exported type is an alias to an unnamed type, so methods can't be -// defined directly on it. -type weakFields WeakFields - -func (w weakFields) get(num protoreflect.FieldNumber) (protoreflect.ProtoMessage, bool) { - m, ok := w[int32(num)] - return m, ok -} - -func (w *weakFields) set(num protoreflect.FieldNumber, m protoreflect.ProtoMessage) { - if *w == nil { - *w = make(weakFields) - } - (*w)[int32(num)] = m -} - -func (w *weakFields) clear(num protoreflect.FieldNumber) { - delete(*w, int32(num)) -} - -func (Export) HasWeak(w WeakFields, num protoreflect.FieldNumber) bool { - _, ok := w[int32(num)] - return ok -} - -func (Export) ClearWeak(w *WeakFields, num protoreflect.FieldNumber) { - delete(*w, int32(num)) -} - -func (Export) GetWeak(w WeakFields, num protoreflect.FieldNumber, name protoreflect.FullName) protoreflect.ProtoMessage { - if m, ok := w[int32(num)]; ok { - return m - } - mt, _ := protoregistry.GlobalTypes.FindMessageByName(name) - if mt == nil { - panic(fmt.Sprintf("message %v for weak field is not linked in", name)) - } - return mt.Zero().Interface() -} - -func (Export) SetWeak(w *WeakFields, num protoreflect.FieldNumber, name protoreflect.FullName, m protoreflect.ProtoMessage) { - if m != nil { - mt, _ := protoregistry.GlobalTypes.FindMessageByName(name) - if mt == nil { - panic(fmt.Sprintf("message %v for weak field is not linked in", name)) - } - if mt != m.ProtoReflect().Type() { - panic(fmt.Sprintf("invalid message type for weak field: got %T, want %T", m, mt.Zero().Interface())) - } - } - if m == nil || !m.ProtoReflect().IsValid() { - delete(*w, int32(num)) - return - } - if *w == nil { - *w = make(weakFields) - } - (*w)[int32(num)] = m -} diff --git a/tools/vendor/google.golang.org/protobuf/internal/version/version.go b/tools/vendor/google.golang.org/protobuf/internal/version/version.go index 4a39af0c6..01efc3303 100644 --- a/tools/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/tools/vendor/google.golang.org/protobuf/internal/version/version.go @@ -52,7 +52,7 @@ import ( const ( Major = 1 Minor = 36 - Patch = 4 + Patch = 5 PreRelease = "" ) diff --git a/tools/vendor/google.golang.org/protobuf/proto/decode.go b/tools/vendor/google.golang.org/protobuf/proto/decode.go index e28d7acb3..4cbf1aeaf 100644 --- a/tools/vendor/google.golang.org/protobuf/proto/decode.go +++ b/tools/vendor/google.golang.org/protobuf/proto/decode.go @@ -8,7 +8,6 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/errors" - "google.golang.org/protobuf/internal/flags" "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/reflect/protoreflect" @@ -172,10 +171,6 @@ func (o UnmarshalOptions) unmarshalMessageSlow(b []byte, m protoreflect.Message) var err error if fd == nil { err = errUnknown - } else if flags.ProtoLegacyWeak { - if fd.IsWeak() && fd.Message().IsPlaceholder() { - err = errUnknown // weak referent is not linked in - } } // Parse the field value. diff --git a/tools/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/tools/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go deleted file mode 100644 index 69a050509..000000000 --- a/tools/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go +++ /dev/null @@ -1,292 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package protodesc provides functionality for converting -// FileDescriptorProto messages to/from [protoreflect.FileDescriptor] values. -// -// The google.protobuf.FileDescriptorProto is a protobuf message that describes -// the type information for a .proto file in a form that is easily serializable. -// The [protoreflect.FileDescriptor] is a more structured representation of -// the FileDescriptorProto message where references and remote dependencies -// can be directly followed. -package protodesc - -import ( - "strings" - - "google.golang.org/protobuf/internal/editionssupport" - "google.golang.org/protobuf/internal/errors" - "google.golang.org/protobuf/internal/filedesc" - "google.golang.org/protobuf/internal/pragma" - "google.golang.org/protobuf/internal/strs" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" - - "google.golang.org/protobuf/types/descriptorpb" -) - -// Resolver is the resolver used by [NewFile] to resolve dependencies. -// The enums and messages provided must belong to some parent file, -// which is also registered. -// -// It is implemented by [protoregistry.Files]. -type Resolver interface { - FindFileByPath(string) (protoreflect.FileDescriptor, error) - FindDescriptorByName(protoreflect.FullName) (protoreflect.Descriptor, error) -} - -// FileOptions configures the construction of file descriptors. -type FileOptions struct { - pragma.NoUnkeyedLiterals - - // AllowUnresolvable configures New to permissively allow unresolvable - // file, enum, or message dependencies. Unresolved dependencies are replaced - // by placeholder equivalents. - // - // The following dependencies may be left unresolved: - // • Resolving an imported file. - // • Resolving the type for a message field or extension field. - // If the kind of the field is unknown, then a placeholder is used for both - // the Enum and Message accessors on the protoreflect.FieldDescriptor. - // • Resolving an enum value set as the default for an optional enum field. - // If unresolvable, the protoreflect.FieldDescriptor.Default is set to the - // first value in the associated enum (or zero if the also enum dependency - // is also unresolvable). The protoreflect.FieldDescriptor.DefaultEnumValue - // is populated with a placeholder. - // • Resolving the extended message type for an extension field. - // • Resolving the input or output message type for a service method. - // - // If the unresolved dependency uses a relative name, - // then the placeholder will contain an invalid FullName with a "*." prefix, - // indicating that the starting prefix of the full name is unknown. - AllowUnresolvable bool -} - -// NewFile creates a new [protoreflect.FileDescriptor] from the provided -// file descriptor message. See [FileOptions.New] for more information. -func NewFile(fd *descriptorpb.FileDescriptorProto, r Resolver) (protoreflect.FileDescriptor, error) { - return FileOptions{}.New(fd, r) -} - -// NewFiles creates a new [protoregistry.Files] from the provided -// FileDescriptorSet message. See [FileOptions.NewFiles] for more information. -func NewFiles(fd *descriptorpb.FileDescriptorSet) (*protoregistry.Files, error) { - return FileOptions{}.NewFiles(fd) -} - -// New creates a new [protoreflect.FileDescriptor] from the provided -// file descriptor message. The file must represent a valid proto file according -// to protobuf semantics. The returned descriptor is a deep copy of the input. -// -// Any imported files, enum types, or message types referenced in the file are -// resolved using the provided registry. When looking up an import file path, -// the path must be unique. The newly created file descriptor is not registered -// back into the provided file registry. -func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (protoreflect.FileDescriptor, error) { - if r == nil { - r = (*protoregistry.Files)(nil) // empty resolver - } - - // Handle the file descriptor content. - f := &filedesc.File{L2: &filedesc.FileL2{}} - switch fd.GetSyntax() { - case "proto2", "": - f.L1.Syntax = protoreflect.Proto2 - f.L1.Edition = filedesc.EditionProto2 - case "proto3": - f.L1.Syntax = protoreflect.Proto3 - f.L1.Edition = filedesc.EditionProto3 - case "editions": - f.L1.Syntax = protoreflect.Editions - f.L1.Edition = fromEditionProto(fd.GetEdition()) - default: - return nil, errors.New("invalid syntax: %q", fd.GetSyntax()) - } - f.L1.Path = fd.GetName() - if f.L1.Path == "" { - return nil, errors.New("file path must be populated") - } - if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < editionssupport.Minimum || fd.GetEdition() > editionssupport.Maximum) { - // Allow cmd/protoc-gen-go/testdata to use any edition for easier - // testing of upcoming edition features. - if !strings.HasPrefix(fd.GetName(), "cmd/protoc-gen-go/testdata/") { - return nil, errors.New("use of edition %v not yet supported by the Go Protobuf runtime", fd.GetEdition()) - } - } - f.L1.Package = protoreflect.FullName(fd.GetPackage()) - if !f.L1.Package.IsValid() && f.L1.Package != "" { - return nil, errors.New("invalid package: %q", f.L1.Package) - } - if opts := fd.GetOptions(); opts != nil { - opts = proto.Clone(opts).(*descriptorpb.FileOptions) - f.L2.Options = func() protoreflect.ProtoMessage { return opts } - } - initFileDescFromFeatureSet(f, fd.GetOptions().GetFeatures()) - - f.L2.Imports = make(filedesc.FileImports, len(fd.GetDependency())) - for _, i := range fd.GetPublicDependency() { - if !(0 <= i && int(i) < len(f.L2.Imports)) || f.L2.Imports[i].IsPublic { - return nil, errors.New("invalid or duplicate public import index: %d", i) - } - f.L2.Imports[i].IsPublic = true - } - for _, i := range fd.GetWeakDependency() { - if !(0 <= i && int(i) < len(f.L2.Imports)) || f.L2.Imports[i].IsWeak { - return nil, errors.New("invalid or duplicate weak import index: %d", i) - } - f.L2.Imports[i].IsWeak = true - } - imps := importSet{f.Path(): true} - for i, path := range fd.GetDependency() { - imp := &f.L2.Imports[i] - f, err := r.FindFileByPath(path) - if err == protoregistry.NotFound && (o.AllowUnresolvable || imp.IsWeak) { - f = filedesc.PlaceholderFile(path) - } else if err != nil { - return nil, errors.New("could not resolve import %q: %v", path, err) - } - imp.FileDescriptor = f - - if imps[imp.Path()] { - return nil, errors.New("already imported %q", path) - } - imps[imp.Path()] = true - } - for i := range fd.GetDependency() { - imp := &f.L2.Imports[i] - imps.importPublic(imp.Imports()) - } - - // Handle source locations. - f.L2.Locations.File = f - for _, loc := range fd.GetSourceCodeInfo().GetLocation() { - var l protoreflect.SourceLocation - // TODO: Validate that the path points to an actual declaration? - l.Path = protoreflect.SourcePath(loc.GetPath()) - s := loc.GetSpan() - switch len(s) { - case 3: - l.StartLine, l.StartColumn, l.EndLine, l.EndColumn = int(s[0]), int(s[1]), int(s[0]), int(s[2]) - case 4: - l.StartLine, l.StartColumn, l.EndLine, l.EndColumn = int(s[0]), int(s[1]), int(s[2]), int(s[3]) - default: - return nil, errors.New("invalid span: %v", s) - } - // TODO: Validate that the span information is sensible? - // See https://github.com/protocolbuffers/protobuf/issues/6378. - if false && (l.EndLine < l.StartLine || l.StartLine < 0 || l.StartColumn < 0 || l.EndColumn < 0 || - (l.StartLine == l.EndLine && l.EndColumn <= l.StartColumn)) { - return nil, errors.New("invalid span: %v", s) - } - l.LeadingDetachedComments = loc.GetLeadingDetachedComments() - l.LeadingComments = loc.GetLeadingComments() - l.TrailingComments = loc.GetTrailingComments() - f.L2.Locations.List = append(f.L2.Locations.List, l) - } - - // Step 1: Allocate and derive the names for all declarations. - // This copies all fields from the descriptor proto except: - // google.protobuf.FieldDescriptorProto.type_name - // google.protobuf.FieldDescriptorProto.default_value - // google.protobuf.FieldDescriptorProto.oneof_index - // google.protobuf.FieldDescriptorProto.extendee - // google.protobuf.MethodDescriptorProto.input - // google.protobuf.MethodDescriptorProto.output - var err error - sb := new(strs.Builder) - r1 := make(descsByName) - if f.L1.Enums.List, err = r1.initEnumDeclarations(fd.GetEnumType(), f, sb); err != nil { - return nil, err - } - if f.L1.Messages.List, err = r1.initMessagesDeclarations(fd.GetMessageType(), f, sb); err != nil { - return nil, err - } - if f.L1.Extensions.List, err = r1.initExtensionDeclarations(fd.GetExtension(), f, sb); err != nil { - return nil, err - } - if f.L1.Services.List, err = r1.initServiceDeclarations(fd.GetService(), f, sb); err != nil { - return nil, err - } - - // Step 2: Resolve every dependency reference not handled by step 1. - r2 := &resolver{local: r1, remote: r, imports: imps, allowUnresolvable: o.AllowUnresolvable} - if err := r2.resolveMessageDependencies(f.L1.Messages.List, fd.GetMessageType()); err != nil { - return nil, err - } - if err := r2.resolveExtensionDependencies(f.L1.Extensions.List, fd.GetExtension()); err != nil { - return nil, err - } - if err := r2.resolveServiceDependencies(f.L1.Services.List, fd.GetService()); err != nil { - return nil, err - } - - // Step 3: Validate every enum, message, and extension declaration. - if err := validateEnumDeclarations(f.L1.Enums.List, fd.GetEnumType()); err != nil { - return nil, err - } - if err := validateMessageDeclarations(f, f.L1.Messages.List, fd.GetMessageType()); err != nil { - return nil, err - } - if err := validateExtensionDeclarations(f, f.L1.Extensions.List, fd.GetExtension()); err != nil { - return nil, err - } - - return f, nil -} - -type importSet map[string]bool - -func (is importSet) importPublic(imps protoreflect.FileImports) { - for i := 0; i < imps.Len(); i++ { - if imp := imps.Get(i); imp.IsPublic { - is[imp.Path()] = true - is.importPublic(imp.Imports()) - } - } -} - -// NewFiles creates a new [protoregistry.Files] from the provided -// FileDescriptorSet message. The descriptor set must include only -// valid files according to protobuf semantics. The returned descriptors -// are a deep copy of the input. -func (o FileOptions) NewFiles(fds *descriptorpb.FileDescriptorSet) (*protoregistry.Files, error) { - files := make(map[string]*descriptorpb.FileDescriptorProto) - for _, fd := range fds.File { - if _, ok := files[fd.GetName()]; ok { - return nil, errors.New("file appears multiple times: %q", fd.GetName()) - } - files[fd.GetName()] = fd - } - r := &protoregistry.Files{} - for _, fd := range files { - if err := o.addFileDeps(r, fd, files); err != nil { - return nil, err - } - } - return r, nil -} -func (o FileOptions) addFileDeps(r *protoregistry.Files, fd *descriptorpb.FileDescriptorProto, files map[string]*descriptorpb.FileDescriptorProto) error { - // Set the entry to nil while descending into a file's dependencies to detect cycles. - files[fd.GetName()] = nil - for _, dep := range fd.Dependency { - depfd, ok := files[dep] - if depfd == nil { - if ok { - return errors.New("import cycle in file: %q", dep) - } - continue - } - if err := o.addFileDeps(r, depfd, files); err != nil { - return err - } - } - // Delete the entry once dependencies are processed. - delete(files, fd.GetName()) - f, err := o.New(fd, r) - if err != nil { - return err - } - return r.RegisterFile(f) -} diff --git a/tools/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/tools/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go deleted file mode 100644 index ebcb4a8ab..000000000 --- a/tools/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go +++ /dev/null @@ -1,289 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package protodesc - -import ( - "google.golang.org/protobuf/internal/errors" - "google.golang.org/protobuf/internal/filedesc" - "google.golang.org/protobuf/internal/strs" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - - "google.golang.org/protobuf/types/descriptorpb" -) - -type descsByName map[protoreflect.FullName]protoreflect.Descriptor - -func (r descsByName) initEnumDeclarations(eds []*descriptorpb.EnumDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (es []filedesc.Enum, err error) { - es = make([]filedesc.Enum, len(eds)) // allocate up-front to ensure stable pointers - for i, ed := range eds { - e := &es[i] - e.L2 = new(filedesc.EnumL2) - if e.L0, err = r.makeBase(e, parent, ed.GetName(), i, sb); err != nil { - return nil, err - } - if opts := ed.GetOptions(); opts != nil { - opts = proto.Clone(opts).(*descriptorpb.EnumOptions) - e.L2.Options = func() protoreflect.ProtoMessage { return opts } - } - e.L1.EditionFeatures = mergeEditionFeatures(parent, ed.GetOptions().GetFeatures()) - for _, s := range ed.GetReservedName() { - e.L2.ReservedNames.List = append(e.L2.ReservedNames.List, protoreflect.Name(s)) - } - for _, rr := range ed.GetReservedRange() { - e.L2.ReservedRanges.List = append(e.L2.ReservedRanges.List, [2]protoreflect.EnumNumber{ - protoreflect.EnumNumber(rr.GetStart()), - protoreflect.EnumNumber(rr.GetEnd()), - }) - } - if e.L2.Values.List, err = r.initEnumValuesFromDescriptorProto(ed.GetValue(), e, sb); err != nil { - return nil, err - } - } - return es, nil -} - -func (r descsByName) initEnumValuesFromDescriptorProto(vds []*descriptorpb.EnumValueDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (vs []filedesc.EnumValue, err error) { - vs = make([]filedesc.EnumValue, len(vds)) // allocate up-front to ensure stable pointers - for i, vd := range vds { - v := &vs[i] - if v.L0, err = r.makeBase(v, parent, vd.GetName(), i, sb); err != nil { - return nil, err - } - if opts := vd.GetOptions(); opts != nil { - opts = proto.Clone(opts).(*descriptorpb.EnumValueOptions) - v.L1.Options = func() protoreflect.ProtoMessage { return opts } - } - v.L1.Number = protoreflect.EnumNumber(vd.GetNumber()) - } - return vs, nil -} - -func (r descsByName) initMessagesDeclarations(mds []*descriptorpb.DescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (ms []filedesc.Message, err error) { - ms = make([]filedesc.Message, len(mds)) // allocate up-front to ensure stable pointers - for i, md := range mds { - m := &ms[i] - m.L2 = new(filedesc.MessageL2) - if m.L0, err = r.makeBase(m, parent, md.GetName(), i, sb); err != nil { - return nil, err - } - m.L1.EditionFeatures = mergeEditionFeatures(parent, md.GetOptions().GetFeatures()) - if opts := md.GetOptions(); opts != nil { - opts = proto.Clone(opts).(*descriptorpb.MessageOptions) - m.L2.Options = func() protoreflect.ProtoMessage { return opts } - m.L1.IsMapEntry = opts.GetMapEntry() - m.L1.IsMessageSet = opts.GetMessageSetWireFormat() - } - for _, s := range md.GetReservedName() { - m.L2.ReservedNames.List = append(m.L2.ReservedNames.List, protoreflect.Name(s)) - } - for _, rr := range md.GetReservedRange() { - m.L2.ReservedRanges.List = append(m.L2.ReservedRanges.List, [2]protoreflect.FieldNumber{ - protoreflect.FieldNumber(rr.GetStart()), - protoreflect.FieldNumber(rr.GetEnd()), - }) - } - for _, xr := range md.GetExtensionRange() { - m.L2.ExtensionRanges.List = append(m.L2.ExtensionRanges.List, [2]protoreflect.FieldNumber{ - protoreflect.FieldNumber(xr.GetStart()), - protoreflect.FieldNumber(xr.GetEnd()), - }) - var optsFunc func() protoreflect.ProtoMessage - if opts := xr.GetOptions(); opts != nil { - opts = proto.Clone(opts).(*descriptorpb.ExtensionRangeOptions) - optsFunc = func() protoreflect.ProtoMessage { return opts } - } - m.L2.ExtensionRangeOptions = append(m.L2.ExtensionRangeOptions, optsFunc) - } - if m.L2.Fields.List, err = r.initFieldsFromDescriptorProto(md.GetField(), m, sb); err != nil { - return nil, err - } - if m.L2.Oneofs.List, err = r.initOneofsFromDescriptorProto(md.GetOneofDecl(), m, sb); err != nil { - return nil, err - } - if m.L1.Enums.List, err = r.initEnumDeclarations(md.GetEnumType(), m, sb); err != nil { - return nil, err - } - if m.L1.Messages.List, err = r.initMessagesDeclarations(md.GetNestedType(), m, sb); err != nil { - return nil, err - } - if m.L1.Extensions.List, err = r.initExtensionDeclarations(md.GetExtension(), m, sb); err != nil { - return nil, err - } - } - return ms, nil -} - -// canBePacked returns whether the field can use packed encoding: -// https://protobuf.dev/programming-guides/encoding/#packed -func canBePacked(fd *descriptorpb.FieldDescriptorProto) bool { - if fd.GetLabel() != descriptorpb.FieldDescriptorProto_LABEL_REPEATED { - return false // not a repeated field - } - - switch protoreflect.Kind(fd.GetType()) { - case protoreflect.MessageKind, protoreflect.GroupKind: - return false // not a scalar type field - - case protoreflect.StringKind, protoreflect.BytesKind: - // string and bytes can explicitly not be declared as packed, - // see https://protobuf.dev/programming-guides/encoding/#packed - return false - - default: - return true - } -} - -func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (fs []filedesc.Field, err error) { - fs = make([]filedesc.Field, len(fds)) // allocate up-front to ensure stable pointers - for i, fd := range fds { - f := &fs[i] - if f.L0, err = r.makeBase(f, parent, fd.GetName(), i, sb); err != nil { - return nil, err - } - f.L1.EditionFeatures = mergeEditionFeatures(parent, fd.GetOptions().GetFeatures()) - f.L1.IsProto3Optional = fd.GetProto3Optional() - if opts := fd.GetOptions(); opts != nil { - opts = proto.Clone(opts).(*descriptorpb.FieldOptions) - f.L1.Options = func() protoreflect.ProtoMessage { return opts } - f.L1.IsWeak = opts.GetWeak() - f.L1.IsLazy = opts.GetLazy() - if opts.Packed != nil { - f.L1.EditionFeatures.IsPacked = opts.GetPacked() - } - } - f.L1.Number = protoreflect.FieldNumber(fd.GetNumber()) - f.L1.Cardinality = protoreflect.Cardinality(fd.GetLabel()) - if fd.Type != nil { - f.L1.Kind = protoreflect.Kind(fd.GetType()) - } - if fd.JsonName != nil { - f.L1.StringName.InitJSON(fd.GetJsonName()) - } - - if f.L1.EditionFeatures.IsLegacyRequired { - f.L1.Cardinality = protoreflect.Required - } - - if f.L1.Kind == protoreflect.MessageKind && f.L1.EditionFeatures.IsDelimitedEncoded { - f.L1.Kind = protoreflect.GroupKind - } - } - return fs, nil -} - -func (r descsByName) initOneofsFromDescriptorProto(ods []*descriptorpb.OneofDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (os []filedesc.Oneof, err error) { - os = make([]filedesc.Oneof, len(ods)) // allocate up-front to ensure stable pointers - for i, od := range ods { - o := &os[i] - if o.L0, err = r.makeBase(o, parent, od.GetName(), i, sb); err != nil { - return nil, err - } - o.L1.EditionFeatures = mergeEditionFeatures(parent, od.GetOptions().GetFeatures()) - if opts := od.GetOptions(); opts != nil { - opts = proto.Clone(opts).(*descriptorpb.OneofOptions) - o.L1.Options = func() protoreflect.ProtoMessage { return opts } - } - } - return os, nil -} - -func (r descsByName) initExtensionDeclarations(xds []*descriptorpb.FieldDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (xs []filedesc.Extension, err error) { - xs = make([]filedesc.Extension, len(xds)) // allocate up-front to ensure stable pointers - for i, xd := range xds { - x := &xs[i] - x.L2 = new(filedesc.ExtensionL2) - if x.L0, err = r.makeBase(x, parent, xd.GetName(), i, sb); err != nil { - return nil, err - } - x.L1.EditionFeatures = mergeEditionFeatures(parent, xd.GetOptions().GetFeatures()) - if opts := xd.GetOptions(); opts != nil { - opts = proto.Clone(opts).(*descriptorpb.FieldOptions) - x.L2.Options = func() protoreflect.ProtoMessage { return opts } - if opts.Packed != nil { - x.L1.EditionFeatures.IsPacked = opts.GetPacked() - } - } - x.L1.Number = protoreflect.FieldNumber(xd.GetNumber()) - x.L1.Cardinality = protoreflect.Cardinality(xd.GetLabel()) - if xd.Type != nil { - x.L1.Kind = protoreflect.Kind(xd.GetType()) - } - if xd.JsonName != nil { - x.L2.StringName.InitJSON(xd.GetJsonName()) - } - if x.L1.Kind == protoreflect.MessageKind && x.L1.EditionFeatures.IsDelimitedEncoded { - x.L1.Kind = protoreflect.GroupKind - } - } - return xs, nil -} - -func (r descsByName) initServiceDeclarations(sds []*descriptorpb.ServiceDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (ss []filedesc.Service, err error) { - ss = make([]filedesc.Service, len(sds)) // allocate up-front to ensure stable pointers - for i, sd := range sds { - s := &ss[i] - s.L2 = new(filedesc.ServiceL2) - if s.L0, err = r.makeBase(s, parent, sd.GetName(), i, sb); err != nil { - return nil, err - } - if opts := sd.GetOptions(); opts != nil { - opts = proto.Clone(opts).(*descriptorpb.ServiceOptions) - s.L2.Options = func() protoreflect.ProtoMessage { return opts } - } - if s.L2.Methods.List, err = r.initMethodsFromDescriptorProto(sd.GetMethod(), s, sb); err != nil { - return nil, err - } - } - return ss, nil -} - -func (r descsByName) initMethodsFromDescriptorProto(mds []*descriptorpb.MethodDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (ms []filedesc.Method, err error) { - ms = make([]filedesc.Method, len(mds)) // allocate up-front to ensure stable pointers - for i, md := range mds { - m := &ms[i] - if m.L0, err = r.makeBase(m, parent, md.GetName(), i, sb); err != nil { - return nil, err - } - if opts := md.GetOptions(); opts != nil { - opts = proto.Clone(opts).(*descriptorpb.MethodOptions) - m.L1.Options = func() protoreflect.ProtoMessage { return opts } - } - m.L1.IsStreamingClient = md.GetClientStreaming() - m.L1.IsStreamingServer = md.GetServerStreaming() - } - return ms, nil -} - -func (r descsByName) makeBase(child, parent protoreflect.Descriptor, name string, idx int, sb *strs.Builder) (filedesc.BaseL0, error) { - if !protoreflect.Name(name).IsValid() { - return filedesc.BaseL0{}, errors.New("descriptor %q has an invalid nested name: %q", parent.FullName(), name) - } - - // Derive the full name of the child. - // Note that enum values are a sibling to the enum parent in the namespace. - var fullName protoreflect.FullName - if _, ok := parent.(protoreflect.EnumDescriptor); ok { - fullName = sb.AppendFullName(parent.FullName().Parent(), protoreflect.Name(name)) - } else { - fullName = sb.AppendFullName(parent.FullName(), protoreflect.Name(name)) - } - if _, ok := r[fullName]; ok { - return filedesc.BaseL0{}, errors.New("descriptor %q already declared", fullName) - } - r[fullName] = child - - // TODO: Verify that the full name does not already exist in the resolver? - // This is not as critical since most usages of NewFile will register - // the created file back into the registry, which will perform this check. - - return filedesc.BaseL0{ - FullName: fullName, - ParentFile: parent.ParentFile().(*filedesc.File), - Parent: parent, - Index: idx, - }, nil -} diff --git a/tools/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/tools/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go deleted file mode 100644 index f3cebab29..000000000 --- a/tools/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go +++ /dev/null @@ -1,291 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package protodesc - -import ( - "google.golang.org/protobuf/internal/encoding/defval" - "google.golang.org/protobuf/internal/errors" - "google.golang.org/protobuf/internal/filedesc" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" - - "google.golang.org/protobuf/types/descriptorpb" -) - -// resolver is a wrapper around a local registry of declarations within the file -// and the remote resolver. The remote resolver is restricted to only return -// descriptors that have been imported. -type resolver struct { - local descsByName - remote Resolver - imports importSet - - allowUnresolvable bool -} - -func (r *resolver) resolveMessageDependencies(ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) (err error) { - for i, md := range mds { - m := &ms[i] - for j, fd := range md.GetField() { - f := &m.L2.Fields.List[j] - if f.L1.Cardinality == protoreflect.Required { - m.L2.RequiredNumbers.List = append(m.L2.RequiredNumbers.List, f.L1.Number) - } - if fd.OneofIndex != nil { - k := int(fd.GetOneofIndex()) - if !(0 <= k && k < len(md.GetOneofDecl())) { - return errors.New("message field %q has an invalid oneof index: %d", f.FullName(), k) - } - o := &m.L2.Oneofs.List[k] - f.L1.ContainingOneof = o - o.L1.Fields.List = append(o.L1.Fields.List, f) - } - - if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName()), f.IsWeak()); err != nil { - return errors.New("message field %q cannot resolve type: %v", f.FullName(), err) - } - if f.L1.Kind == protoreflect.GroupKind && (f.IsMap() || f.IsMapEntry()) { - // A map field might inherit delimited encoding from a file-wide default feature. - // But maps never actually use delimited encoding. (At least for now...) - f.L1.Kind = protoreflect.MessageKind - } - if fd.DefaultValue != nil { - v, ev, err := unmarshalDefault(fd.GetDefaultValue(), f, r.allowUnresolvable) - if err != nil { - return errors.New("message field %q has invalid default: %v", f.FullName(), err) - } - f.L1.Default = filedesc.DefaultValue(v, ev) - } - } - - if err := r.resolveMessageDependencies(m.L1.Messages.List, md.GetNestedType()); err != nil { - return err - } - if err := r.resolveExtensionDependencies(m.L1.Extensions.List, md.GetExtension()); err != nil { - return err - } - } - return nil -} - -func (r *resolver) resolveExtensionDependencies(xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) (err error) { - for i, xd := range xds { - x := &xs[i] - if x.L1.Extendee, err = r.findMessageDescriptor(x.Parent().FullName(), partialName(xd.GetExtendee()), false); err != nil { - return errors.New("extension field %q cannot resolve extendee: %v", x.FullName(), err) - } - if x.L1.Kind, x.L2.Enum, x.L2.Message, err = r.findTarget(x.Kind(), x.Parent().FullName(), partialName(xd.GetTypeName()), false); err != nil { - return errors.New("extension field %q cannot resolve type: %v", x.FullName(), err) - } - if xd.DefaultValue != nil { - v, ev, err := unmarshalDefault(xd.GetDefaultValue(), x, r.allowUnresolvable) - if err != nil { - return errors.New("extension field %q has invalid default: %v", x.FullName(), err) - } - x.L2.Default = filedesc.DefaultValue(v, ev) - } - } - return nil -} - -func (r *resolver) resolveServiceDependencies(ss []filedesc.Service, sds []*descriptorpb.ServiceDescriptorProto) (err error) { - for i, sd := range sds { - s := &ss[i] - for j, md := range sd.GetMethod() { - m := &s.L2.Methods.List[j] - m.L1.Input, err = r.findMessageDescriptor(m.Parent().FullName(), partialName(md.GetInputType()), false) - if err != nil { - return errors.New("service method %q cannot resolve input: %v", m.FullName(), err) - } - m.L1.Output, err = r.findMessageDescriptor(s.FullName(), partialName(md.GetOutputType()), false) - if err != nil { - return errors.New("service method %q cannot resolve output: %v", m.FullName(), err) - } - } - } - return nil -} - -// findTarget finds an enum or message descriptor if k is an enum, message, -// group, or unknown. If unknown, and the name could be resolved, the kind -// returned kind is set based on the type of the resolved descriptor. -func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.Kind, protoreflect.EnumDescriptor, protoreflect.MessageDescriptor, error) { - switch k { - case protoreflect.EnumKind: - ed, err := r.findEnumDescriptor(scope, ref, isWeak) - if err != nil { - return 0, nil, nil, err - } - return k, ed, nil, nil - case protoreflect.MessageKind, protoreflect.GroupKind: - md, err := r.findMessageDescriptor(scope, ref, isWeak) - if err != nil { - return 0, nil, nil, err - } - return k, nil, md, nil - case 0: - // Handle unspecified kinds (possible with parsers that operate - // on a per-file basis without knowledge of dependencies). - d, err := r.findDescriptor(scope, ref) - if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { - return k, filedesc.PlaceholderEnum(ref.FullName()), filedesc.PlaceholderMessage(ref.FullName()), nil - } else if err == protoregistry.NotFound { - return 0, nil, nil, errors.New("%q not found", ref.FullName()) - } else if err != nil { - return 0, nil, nil, err - } - switch d := d.(type) { - case protoreflect.EnumDescriptor: - return protoreflect.EnumKind, d, nil, nil - case protoreflect.MessageDescriptor: - return protoreflect.MessageKind, nil, d, nil - default: - return 0, nil, nil, errors.New("unknown kind") - } - default: - if ref != "" { - return 0, nil, nil, errors.New("target name cannot be specified for %v", k) - } - if !k.IsValid() { - return 0, nil, nil, errors.New("invalid kind: %d", k) - } - return k, nil, nil, nil - } -} - -// findDescriptor finds the descriptor by name, -// which may be a relative name within some scope. -// -// Suppose the scope was "fizz.buzz" and the reference was "Foo.Bar", -// then the following full names are searched: -// - fizz.buzz.Foo.Bar -// - fizz.Foo.Bar -// - Foo.Bar -func (r *resolver) findDescriptor(scope protoreflect.FullName, ref partialName) (protoreflect.Descriptor, error) { - if !ref.IsValid() { - return nil, errors.New("invalid name reference: %q", ref) - } - if ref.IsFull() { - scope, ref = "", ref[1:] - } - var foundButNotImported protoreflect.Descriptor - for { - // Derive the full name to search. - s := protoreflect.FullName(ref) - if scope != "" { - s = scope + "." + s - } - - // Check the current file for the descriptor. - if d, ok := r.local[s]; ok { - return d, nil - } - - // Check the remote registry for the descriptor. - d, err := r.remote.FindDescriptorByName(s) - if err == nil { - // Only allow descriptors covered by one of the imports. - if r.imports[d.ParentFile().Path()] { - return d, nil - } - foundButNotImported = d - } else if err != protoregistry.NotFound { - return nil, errors.Wrap(err, "%q", s) - } - - // Continue on at a higher level of scoping. - if scope == "" { - if d := foundButNotImported; d != nil { - return nil, errors.New("resolved %q, but %q is not imported", d.FullName(), d.ParentFile().Path()) - } - return nil, protoregistry.NotFound - } - scope = scope.Parent() - } -} - -func (r *resolver) findEnumDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.EnumDescriptor, error) { - d, err := r.findDescriptor(scope, ref) - if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { - return filedesc.PlaceholderEnum(ref.FullName()), nil - } else if err == protoregistry.NotFound { - return nil, errors.New("%q not found", ref.FullName()) - } else if err != nil { - return nil, err - } - ed, ok := d.(protoreflect.EnumDescriptor) - if !ok { - return nil, errors.New("resolved %q, but it is not an enum", d.FullName()) - } - return ed, nil -} - -func (r *resolver) findMessageDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.MessageDescriptor, error) { - d, err := r.findDescriptor(scope, ref) - if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { - return filedesc.PlaceholderMessage(ref.FullName()), nil - } else if err == protoregistry.NotFound { - return nil, errors.New("%q not found", ref.FullName()) - } else if err != nil { - return nil, err - } - md, ok := d.(protoreflect.MessageDescriptor) - if !ok { - return nil, errors.New("resolved %q, but it is not an message", d.FullName()) - } - return md, nil -} - -// partialName is the partial name. A leading dot means that the name is full, -// otherwise the name is relative to some current scope. -// See google.protobuf.FieldDescriptorProto.type_name. -type partialName string - -func (s partialName) IsFull() bool { - return len(s) > 0 && s[0] == '.' -} - -func (s partialName) IsValid() bool { - if s.IsFull() { - return protoreflect.FullName(s[1:]).IsValid() - } - return protoreflect.FullName(s).IsValid() -} - -const unknownPrefix = "*." - -// FullName converts the partial name to a full name on a best-effort basis. -// If relative, it creates an invalid full name, using a "*." prefix -// to indicate that the start of the full name is unknown. -func (s partialName) FullName() protoreflect.FullName { - if s.IsFull() { - return protoreflect.FullName(s[1:]) - } - return protoreflect.FullName(unknownPrefix + s) -} - -func unmarshalDefault(s string, fd protoreflect.FieldDescriptor, allowUnresolvable bool) (protoreflect.Value, protoreflect.EnumValueDescriptor, error) { - var evs protoreflect.EnumValueDescriptors - if fd.Enum() != nil { - evs = fd.Enum().Values() - } - v, ev, err := defval.Unmarshal(s, fd.Kind(), evs, defval.Descriptor) - if err != nil && allowUnresolvable && evs != nil && protoreflect.Name(s).IsValid() { - v = protoreflect.ValueOfEnum(0) - if evs.Len() > 0 { - v = protoreflect.ValueOfEnum(evs.Get(0).Number()) - } - ev = filedesc.PlaceholderEnumValue(fd.Enum().FullName().Parent().Append(protoreflect.Name(s))) - } else if err != nil { - return v, ev, err - } - if !fd.HasPresence() { - return v, ev, errors.New("cannot be specified with implicit field presence") - } - if fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind || fd.Cardinality() == protoreflect.Repeated { - return v, ev, errors.New("cannot be specified on composite types") - } - return v, ev, nil -} diff --git a/tools/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/tools/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go deleted file mode 100644 index 5eaf65217..000000000 --- a/tools/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go +++ /dev/null @@ -1,371 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package protodesc - -import ( - "strings" - "unicode" - - "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/internal/errors" - "google.golang.org/protobuf/internal/filedesc" - "google.golang.org/protobuf/internal/flags" - "google.golang.org/protobuf/internal/genid" - "google.golang.org/protobuf/internal/strs" - "google.golang.org/protobuf/reflect/protoreflect" - - "google.golang.org/protobuf/types/descriptorpb" -) - -func validateEnumDeclarations(es []filedesc.Enum, eds []*descriptorpb.EnumDescriptorProto) error { - for i, ed := range eds { - e := &es[i] - if err := e.L2.ReservedNames.CheckValid(); err != nil { - return errors.New("enum %q reserved names has %v", e.FullName(), err) - } - if err := e.L2.ReservedRanges.CheckValid(); err != nil { - return errors.New("enum %q reserved ranges has %v", e.FullName(), err) - } - if len(ed.GetValue()) == 0 { - return errors.New("enum %q must contain at least one value declaration", e.FullName()) - } - allowAlias := ed.GetOptions().GetAllowAlias() - foundAlias := false - for i := 0; i < e.Values().Len(); i++ { - v1 := e.Values().Get(i) - if v2 := e.Values().ByNumber(v1.Number()); v1 != v2 { - foundAlias = true - if !allowAlias { - return errors.New("enum %q has conflicting non-aliased values on number %d: %q with %q", e.FullName(), v1.Number(), v1.Name(), v2.Name()) - } - } - } - if allowAlias && !foundAlias { - return errors.New("enum %q allows aliases, but none were found", e.FullName()) - } - if !e.IsClosed() { - if v := e.Values().Get(0); v.Number() != 0 { - return errors.New("enum %q using open semantics must have zero number for the first value", v.FullName()) - } - // Verify that value names in open enums do not conflict if the - // case-insensitive prefix is removed. - // See protoc v3.8.0: src/google/protobuf/descriptor.cc:4991-5055 - names := map[string]protoreflect.EnumValueDescriptor{} - prefix := strings.Replace(strings.ToLower(string(e.Name())), "_", "", -1) - for i := 0; i < e.Values().Len(); i++ { - v1 := e.Values().Get(i) - s := strs.EnumValueName(strs.TrimEnumPrefix(string(v1.Name()), prefix)) - if v2, ok := names[s]; ok && v1.Number() != v2.Number() { - return errors.New("enum %q using open semantics has conflict: %q with %q", e.FullName(), v1.Name(), v2.Name()) - } - names[s] = v1 - } - } - - for j, vd := range ed.GetValue() { - v := &e.L2.Values.List[j] - if vd.Number == nil { - return errors.New("enum value %q must have a specified number", v.FullName()) - } - if e.L2.ReservedNames.Has(v.Name()) { - return errors.New("enum value %q must not use reserved name", v.FullName()) - } - if e.L2.ReservedRanges.Has(v.Number()) { - return errors.New("enum value %q must not use reserved number %d", v.FullName(), v.Number()) - } - } - } - return nil -} - -func validateMessageDeclarations(file *filedesc.File, ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) error { - // There are a few limited exceptions only for proto3 - isProto3 := file.L1.Edition == fromEditionProto(descriptorpb.Edition_EDITION_PROTO3) - for i, md := range mds { - m := &ms[i] - - // Handle the message descriptor itself. - isMessageSet := md.GetOptions().GetMessageSetWireFormat() - if err := m.L2.ReservedNames.CheckValid(); err != nil { - return errors.New("message %q reserved names has %v", m.FullName(), err) - } - if err := m.L2.ReservedRanges.CheckValid(isMessageSet); err != nil { - return errors.New("message %q reserved ranges has %v", m.FullName(), err) - } - if err := m.L2.ExtensionRanges.CheckValid(isMessageSet); err != nil { - return errors.New("message %q extension ranges has %v", m.FullName(), err) - } - if err := (*filedesc.FieldRanges).CheckOverlap(&m.L2.ReservedRanges, &m.L2.ExtensionRanges); err != nil { - return errors.New("message %q reserved and extension ranges has %v", m.FullName(), err) - } - for i := 0; i < m.Fields().Len(); i++ { - f1 := m.Fields().Get(i) - if f2 := m.Fields().ByNumber(f1.Number()); f1 != f2 { - return errors.New("message %q has conflicting fields: %q with %q", m.FullName(), f1.Name(), f2.Name()) - } - } - if isMessageSet && !flags.ProtoLegacy { - return errors.New("message %q is a MessageSet, which is a legacy proto1 feature that is no longer supported", m.FullName()) - } - if isMessageSet && (isProto3 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) { - return errors.New("message %q is an invalid proto1 MessageSet", m.FullName()) - } - if isProto3 { - if m.ExtensionRanges().Len() > 0 { - return errors.New("message %q using proto3 semantics cannot have extension ranges", m.FullName()) - } - } - - for j, fd := range md.GetField() { - f := &m.L2.Fields.List[j] - if m.L2.ReservedNames.Has(f.Name()) { - return errors.New("message field %q must not use reserved name", f.FullName()) - } - if !f.Number().IsValid() { - return errors.New("message field %q has an invalid number: %d", f.FullName(), f.Number()) - } - if !f.Cardinality().IsValid() { - return errors.New("message field %q has an invalid cardinality: %d", f.FullName(), f.Cardinality()) - } - if m.L2.ReservedRanges.Has(f.Number()) { - return errors.New("message field %q must not use reserved number %d", f.FullName(), f.Number()) - } - if m.L2.ExtensionRanges.Has(f.Number()) { - return errors.New("message field %q with number %d in extension range", f.FullName(), f.Number()) - } - if fd.Extendee != nil { - return errors.New("message field %q may not have extendee: %q", f.FullName(), fd.GetExtendee()) - } - if f.L1.IsProto3Optional { - if !isProto3 { - return errors.New("message field %q under proto3 optional semantics must be specified in the proto3 syntax", f.FullName()) - } - if f.Cardinality() != protoreflect.Optional { - return errors.New("message field %q under proto3 optional semantics must have optional cardinality", f.FullName()) - } - if f.ContainingOneof() != nil && f.ContainingOneof().Fields().Len() != 1 { - return errors.New("message field %q under proto3 optional semantics must be within a single element oneof", f.FullName()) - } - } - if f.IsWeak() && !flags.ProtoLegacyWeak { - return errors.New("message field %q is a weak field, which is a legacy proto1 feature that is no longer supported", f.FullName()) - } - if f.IsWeak() && (!f.HasPresence() || !isOptionalMessage(f) || f.ContainingOneof() != nil) { - return errors.New("message field %q may only be weak for an optional message", f.FullName()) - } - if f.IsPacked() && !isPackable(f) { - return errors.New("message field %q is not packable", f.FullName()) - } - if err := checkValidGroup(file, f); err != nil { - return errors.New("message field %q is an invalid group: %v", f.FullName(), err) - } - if err := checkValidMap(f); err != nil { - return errors.New("message field %q is an invalid map: %v", f.FullName(), err) - } - if isProto3 { - if f.Cardinality() == protoreflect.Required { - return errors.New("message field %q using proto3 semantics cannot be required", f.FullName()) - } - if f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().IsClosed() { - return errors.New("message field %q using proto3 semantics may only depend on open enums", f.FullName()) - } - } - if f.Cardinality() == protoreflect.Optional && !f.HasPresence() && f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().IsClosed() { - return errors.New("message field %q with implicit presence may only use open enums", f.FullName()) - } - } - seenSynthetic := false // synthetic oneofs for proto3 optional must come after real oneofs - for j := range md.GetOneofDecl() { - o := &m.L2.Oneofs.List[j] - if o.Fields().Len() == 0 { - return errors.New("message oneof %q must contain at least one field declaration", o.FullName()) - } - if n := o.Fields().Len(); n-1 != (o.Fields().Get(n-1).Index() - o.Fields().Get(0).Index()) { - return errors.New("message oneof %q must have consecutively declared fields", o.FullName()) - } - - if o.IsSynthetic() { - seenSynthetic = true - continue - } - if !o.IsSynthetic() && seenSynthetic { - return errors.New("message oneof %q must be declared before synthetic oneofs", o.FullName()) - } - - for i := 0; i < o.Fields().Len(); i++ { - f := o.Fields().Get(i) - if f.Cardinality() != protoreflect.Optional { - return errors.New("message field %q belongs in a oneof and must be optional", f.FullName()) - } - if f.IsWeak() { - return errors.New("message field %q belongs in a oneof and must not be a weak reference", f.FullName()) - } - } - } - - if err := validateEnumDeclarations(m.L1.Enums.List, md.GetEnumType()); err != nil { - return err - } - if err := validateMessageDeclarations(file, m.L1.Messages.List, md.GetNestedType()); err != nil { - return err - } - if err := validateExtensionDeclarations(file, m.L1.Extensions.List, md.GetExtension()); err != nil { - return err - } - } - return nil -} - -func validateExtensionDeclarations(f *filedesc.File, xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) error { - for i, xd := range xds { - x := &xs[i] - // NOTE: Avoid using the IsValid method since extensions to MessageSet - // may have a field number higher than normal. This check only verifies - // that the number is not negative or reserved. We check again later - // if we know that the extendee is definitely not a MessageSet. - if n := x.Number(); n < 0 || (protowire.FirstReservedNumber <= n && n <= protowire.LastReservedNumber) { - return errors.New("extension field %q has an invalid number: %d", x.FullName(), x.Number()) - } - if !x.Cardinality().IsValid() || x.Cardinality() == protoreflect.Required { - return errors.New("extension field %q has an invalid cardinality: %d", x.FullName(), x.Cardinality()) - } - if xd.JsonName != nil { - // A bug in older versions of protoc would always populate the - // "json_name" option for extensions when it is meaningless. - // When it did so, it would always use the camel-cased field name. - if xd.GetJsonName() != strs.JSONCamelCase(string(x.Name())) { - return errors.New("extension field %q may not have an explicitly set JSON name: %q", x.FullName(), xd.GetJsonName()) - } - } - if xd.OneofIndex != nil { - return errors.New("extension field %q may not be part of a oneof", x.FullName()) - } - if md := x.ContainingMessage(); !md.IsPlaceholder() { - if !md.ExtensionRanges().Has(x.Number()) { - return errors.New("extension field %q extends %q with non-extension field number: %d", x.FullName(), md.FullName(), x.Number()) - } - isMessageSet := md.Options().(*descriptorpb.MessageOptions).GetMessageSetWireFormat() - if isMessageSet && !isOptionalMessage(x) { - return errors.New("extension field %q extends MessageSet and must be an optional message", x.FullName()) - } - if !isMessageSet && !x.Number().IsValid() { - return errors.New("extension field %q has an invalid number: %d", x.FullName(), x.Number()) - } - } - if xd.GetOptions().GetWeak() { - return errors.New("extension field %q cannot be a weak reference", x.FullName()) - } - if x.IsPacked() && !isPackable(x) { - return errors.New("extension field %q is not packable", x.FullName()) - } - if err := checkValidGroup(f, x); err != nil { - return errors.New("extension field %q is an invalid group: %v", x.FullName(), err) - } - if md := x.Message(); md != nil && md.IsMapEntry() { - return errors.New("extension field %q cannot be a map entry", x.FullName()) - } - if f.L1.Edition == fromEditionProto(descriptorpb.Edition_EDITION_PROTO3) { - switch x.ContainingMessage().FullName() { - case (*descriptorpb.FileOptions)(nil).ProtoReflect().Descriptor().FullName(): - case (*descriptorpb.EnumOptions)(nil).ProtoReflect().Descriptor().FullName(): - case (*descriptorpb.EnumValueOptions)(nil).ProtoReflect().Descriptor().FullName(): - case (*descriptorpb.MessageOptions)(nil).ProtoReflect().Descriptor().FullName(): - case (*descriptorpb.FieldOptions)(nil).ProtoReflect().Descriptor().FullName(): - case (*descriptorpb.OneofOptions)(nil).ProtoReflect().Descriptor().FullName(): - case (*descriptorpb.ExtensionRangeOptions)(nil).ProtoReflect().Descriptor().FullName(): - case (*descriptorpb.ServiceOptions)(nil).ProtoReflect().Descriptor().FullName(): - case (*descriptorpb.MethodOptions)(nil).ProtoReflect().Descriptor().FullName(): - default: - return errors.New("extension field %q cannot be declared in proto3 unless extended descriptor options", x.FullName()) - } - } - } - return nil -} - -// isOptionalMessage reports whether this is an optional message. -// If the kind is unknown, it is assumed to be a message. -func isOptionalMessage(fd protoreflect.FieldDescriptor) bool { - return (fd.Kind() == 0 || fd.Kind() == protoreflect.MessageKind) && fd.Cardinality() == protoreflect.Optional -} - -// isPackable checks whether the pack option can be specified. -func isPackable(fd protoreflect.FieldDescriptor) bool { - switch fd.Kind() { - case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: - return false - } - return fd.IsList() -} - -// checkValidGroup reports whether fd is a valid group according to the same -// rules that protoc imposes. -func checkValidGroup(f *filedesc.File, fd protoreflect.FieldDescriptor) error { - md := fd.Message() - switch { - case fd.Kind() != protoreflect.GroupKind: - return nil - case f.L1.Edition == fromEditionProto(descriptorpb.Edition_EDITION_PROTO3): - return errors.New("invalid under proto3 semantics") - case md == nil || md.IsPlaceholder(): - return errors.New("message must be resolvable") - } - if f.L1.Edition < fromEditionProto(descriptorpb.Edition_EDITION_2023) { - switch { - case fd.FullName().Parent() != md.FullName().Parent(): - return errors.New("message and field must be declared in the same scope") - case !unicode.IsUpper(rune(md.Name()[0])): - return errors.New("message name must start with an uppercase") - case fd.Name() != protoreflect.Name(strings.ToLower(string(md.Name()))): - return errors.New("field name must be lowercased form of the message name") - } - } - return nil -} - -// checkValidMap checks whether the field is a valid map according to the same -// rules that protoc imposes. -// See protoc v3.8.0: src/google/protobuf/descriptor.cc:6045-6115 -func checkValidMap(fd protoreflect.FieldDescriptor) error { - md := fd.Message() - switch { - case md == nil || !md.IsMapEntry(): - return nil - case fd.FullName().Parent() != md.FullName().Parent(): - return errors.New("message and field must be declared in the same scope") - case md.Name() != protoreflect.Name(strs.MapEntryName(string(fd.Name()))): - return errors.New("incorrect implicit map entry name") - case fd.Cardinality() != protoreflect.Repeated: - return errors.New("field must be repeated") - case md.Fields().Len() != 2: - return errors.New("message must have exactly two fields") - case md.ExtensionRanges().Len() > 0: - return errors.New("message must not have any extension ranges") - case md.Enums().Len()+md.Messages().Len()+md.Extensions().Len() > 0: - return errors.New("message must not have any nested declarations") - } - kf := md.Fields().Get(0) - vf := md.Fields().Get(1) - switch { - case kf.Name() != genid.MapEntry_Key_field_name || kf.Number() != genid.MapEntry_Key_field_number || kf.Cardinality() != protoreflect.Optional || kf.ContainingOneof() != nil || kf.HasDefault(): - return errors.New("invalid key field") - case vf.Name() != genid.MapEntry_Value_field_name || vf.Number() != genid.MapEntry_Value_field_number || vf.Cardinality() != protoreflect.Optional || vf.ContainingOneof() != nil || vf.HasDefault(): - return errors.New("invalid value field") - } - switch kf.Kind() { - case protoreflect.BoolKind: // bool - case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: // int32 - case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: // int64 - case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: // uint32 - case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: // uint64 - case protoreflect.StringKind: // string - default: - return errors.New("invalid key kind: %v", kf.Kind()) - } - if e := vf.Enum(); e != nil && e.Values().Len() > 0 && e.Values().Get(0).Number() != 0 { - return errors.New("map enum value must have zero number for the first value") - } - return nil -} diff --git a/tools/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/tools/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go deleted file mode 100644 index 697a61b29..000000000 --- a/tools/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package protodesc - -import ( - "fmt" - "os" - "sync" - - "google.golang.org/protobuf/internal/editiondefaults" - "google.golang.org/protobuf/internal/filedesc" - "google.golang.org/protobuf/internal/genid" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/types/descriptorpb" - "google.golang.org/protobuf/types/gofeaturespb" -) - -var defaults = &descriptorpb.FeatureSetDefaults{} -var defaultsCacheMu sync.Mutex -var defaultsCache = make(map[filedesc.Edition]*descriptorpb.FeatureSet) - -func init() { - err := proto.Unmarshal(editiondefaults.Defaults, defaults) - if err != nil { - fmt.Fprintf(os.Stderr, "unmarshal editions defaults: %v\n", err) - os.Exit(1) - } -} - -func fromEditionProto(epb descriptorpb.Edition) filedesc.Edition { - return filedesc.Edition(epb) -} - -func toEditionProto(ed filedesc.Edition) descriptorpb.Edition { - switch ed { - case filedesc.EditionUnknown: - return descriptorpb.Edition_EDITION_UNKNOWN - case filedesc.EditionProto2: - return descriptorpb.Edition_EDITION_PROTO2 - case filedesc.EditionProto3: - return descriptorpb.Edition_EDITION_PROTO3 - case filedesc.Edition2023: - return descriptorpb.Edition_EDITION_2023 - case filedesc.Edition2024: - return descriptorpb.Edition_EDITION_2024 - default: - panic(fmt.Sprintf("unknown value for edition: %v", ed)) - } -} - -func getFeatureSetFor(ed filedesc.Edition) *descriptorpb.FeatureSet { - defaultsCacheMu.Lock() - defer defaultsCacheMu.Unlock() - if def, ok := defaultsCache[ed]; ok { - return def - } - edpb := toEditionProto(ed) - if defaults.GetMinimumEdition() > edpb || defaults.GetMaximumEdition() < edpb { - // This should never happen protodesc.(FileOptions).New would fail when - // initializing the file descriptor. - // This most likely means the embedded defaults were not updated. - fmt.Fprintf(os.Stderr, "internal error: unsupported edition %v (did you forget to update the embedded defaults (i.e. the bootstrap descriptor proto)?)\n", edpb) - os.Exit(1) - } - fsed := defaults.GetDefaults()[0] - // Using a linear search for now. - // Editions are guaranteed to be sorted and thus we could use a binary search. - // Given that there are only a handful of editions (with one more per year) - // there is not much reason to use a binary search. - for _, def := range defaults.GetDefaults() { - if def.GetEdition() <= edpb { - fsed = def - } else { - break - } - } - fs := proto.Clone(fsed.GetFixedFeatures()).(*descriptorpb.FeatureSet) - proto.Merge(fs, fsed.GetOverridableFeatures()) - defaultsCache[ed] = fs - return fs -} - -// mergeEditionFeatures merges the parent and child feature sets. This function -// should be used when initializing Go descriptors from descriptor protos which -// is why the parent is a filedesc.EditionsFeatures (Go representation) while -// the child is a descriptorproto.FeatureSet (protoc representation). -// Any feature set by the child overwrites what is set by the parent. -func mergeEditionFeatures(parentDesc protoreflect.Descriptor, child *descriptorpb.FeatureSet) filedesc.EditionFeatures { - var parentFS filedesc.EditionFeatures - switch p := parentDesc.(type) { - case *filedesc.File: - parentFS = p.L1.EditionFeatures - case *filedesc.Message: - parentFS = p.L1.EditionFeatures - default: - panic(fmt.Sprintf("unknown parent type %T", parentDesc)) - } - if child == nil { - return parentFS - } - if fp := child.FieldPresence; fp != nil { - parentFS.IsFieldPresence = *fp == descriptorpb.FeatureSet_LEGACY_REQUIRED || - *fp == descriptorpb.FeatureSet_EXPLICIT - parentFS.IsLegacyRequired = *fp == descriptorpb.FeatureSet_LEGACY_REQUIRED - } - if et := child.EnumType; et != nil { - parentFS.IsOpenEnum = *et == descriptorpb.FeatureSet_OPEN - } - - if rfe := child.RepeatedFieldEncoding; rfe != nil { - parentFS.IsPacked = *rfe == descriptorpb.FeatureSet_PACKED - } - - if utf8val := child.Utf8Validation; utf8val != nil { - parentFS.IsUTF8Validated = *utf8val == descriptorpb.FeatureSet_VERIFY - } - - if me := child.MessageEncoding; me != nil { - parentFS.IsDelimitedEncoded = *me == descriptorpb.FeatureSet_DELIMITED - } - - if jf := child.JsonFormat; jf != nil { - parentFS.IsJSONCompliant = *jf == descriptorpb.FeatureSet_ALLOW - } - - // We must not use proto.GetExtension(child, gofeaturespb.E_Go) - // because that only works for messages we generated, but not for - // dynamicpb messages. See golang/protobuf#1669. - // - // Further, we harden this code against adversarial inputs: a - // service which accepts descriptors from a possibly malicious - // source shouldn't crash. - goFeatures := child.ProtoReflect().Get(gofeaturespb.E_Go.TypeDescriptor()) - if !goFeatures.IsValid() { - return parentFS - } - gf, ok := goFeatures.Interface().(protoreflect.Message) - if !ok { - return parentFS - } - // gf.Interface() could be *dynamicpb.Message or *gofeaturespb.GoFeatures. - fields := gf.Descriptor().Fields() - - if fd := fields.ByNumber(genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number); fd != nil && - !fd.IsList() && - fd.Kind() == protoreflect.BoolKind && - gf.Has(fd) { - parentFS.GenerateLegacyUnmarshalJSON = gf.Get(fd).Bool() - } - - if fd := fields.ByNumber(genid.GoFeatures_StripEnumPrefix_field_number); fd != nil && - !fd.IsList() && - fd.Kind() == protoreflect.EnumKind && - gf.Has(fd) { - parentFS.StripEnumPrefix = int(gf.Get(fd).Enum()) - } - - if fd := fields.ByNumber(genid.GoFeatures_ApiLevel_field_number); fd != nil && - !fd.IsList() && - fd.Kind() == protoreflect.EnumKind && - gf.Has(fd) { - parentFS.APILevel = int(gf.Get(fd).Enum()) - } - - return parentFS -} - -// initFileDescFromFeatureSet initializes editions related fields in fd based -// on fs. If fs is nil it is assumed to be an empty featureset and all fields -// will be initialized with the appropriate default. fd.L1.Edition must be set -// before calling this function. -func initFileDescFromFeatureSet(fd *filedesc.File, fs *descriptorpb.FeatureSet) { - dfs := getFeatureSetFor(fd.L1.Edition) - // initialize the featureset with the defaults - fd.L1.EditionFeatures = mergeEditionFeatures(fd, dfs) - // overwrite any options explicitly specified - fd.L1.EditionFeatures = mergeEditionFeatures(fd, fs) -} diff --git a/tools/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/tools/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go deleted file mode 100644 index a5de8d400..000000000 --- a/tools/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go +++ /dev/null @@ -1,274 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package protodesc - -import ( - "fmt" - "strings" - - "google.golang.org/protobuf/internal/encoding/defval" - "google.golang.org/protobuf/internal/strs" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - - "google.golang.org/protobuf/types/descriptorpb" -) - -// ToFileDescriptorProto copies a [protoreflect.FileDescriptor] into a -// google.protobuf.FileDescriptorProto message. -func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileDescriptorProto { - p := &descriptorpb.FileDescriptorProto{ - Name: proto.String(file.Path()), - Options: proto.Clone(file.Options()).(*descriptorpb.FileOptions), - } - if file.Package() != "" { - p.Package = proto.String(string(file.Package())) - } - for i, imports := 0, file.Imports(); i < imports.Len(); i++ { - imp := imports.Get(i) - p.Dependency = append(p.Dependency, imp.Path()) - if imp.IsPublic { - p.PublicDependency = append(p.PublicDependency, int32(i)) - } - if imp.IsWeak { - p.WeakDependency = append(p.WeakDependency, int32(i)) - } - } - for i, locs := 0, file.SourceLocations(); i < locs.Len(); i++ { - loc := locs.Get(i) - l := &descriptorpb.SourceCodeInfo_Location{} - l.Path = append(l.Path, loc.Path...) - if loc.StartLine == loc.EndLine { - l.Span = []int32{int32(loc.StartLine), int32(loc.StartColumn), int32(loc.EndColumn)} - } else { - l.Span = []int32{int32(loc.StartLine), int32(loc.StartColumn), int32(loc.EndLine), int32(loc.EndColumn)} - } - l.LeadingDetachedComments = append([]string(nil), loc.LeadingDetachedComments...) - if loc.LeadingComments != "" { - l.LeadingComments = proto.String(loc.LeadingComments) - } - if loc.TrailingComments != "" { - l.TrailingComments = proto.String(loc.TrailingComments) - } - if p.SourceCodeInfo == nil { - p.SourceCodeInfo = &descriptorpb.SourceCodeInfo{} - } - p.SourceCodeInfo.Location = append(p.SourceCodeInfo.Location, l) - - } - for i, messages := 0, file.Messages(); i < messages.Len(); i++ { - p.MessageType = append(p.MessageType, ToDescriptorProto(messages.Get(i))) - } - for i, enums := 0, file.Enums(); i < enums.Len(); i++ { - p.EnumType = append(p.EnumType, ToEnumDescriptorProto(enums.Get(i))) - } - for i, services := 0, file.Services(); i < services.Len(); i++ { - p.Service = append(p.Service, ToServiceDescriptorProto(services.Get(i))) - } - for i, exts := 0, file.Extensions(); i < exts.Len(); i++ { - p.Extension = append(p.Extension, ToFieldDescriptorProto(exts.Get(i))) - } - if syntax := file.Syntax(); syntax != protoreflect.Proto2 && syntax.IsValid() { - p.Syntax = proto.String(file.Syntax().String()) - } - if file.Syntax() == protoreflect.Editions { - desc := file - if fileImportDesc, ok := file.(protoreflect.FileImport); ok { - desc = fileImportDesc.FileDescriptor - } - - if editionsInterface, ok := desc.(interface{ Edition() int32 }); ok { - p.Edition = descriptorpb.Edition(editionsInterface.Edition()).Enum() - } - } - return p -} - -// ToDescriptorProto copies a [protoreflect.MessageDescriptor] into a -// google.protobuf.DescriptorProto message. -func ToDescriptorProto(message protoreflect.MessageDescriptor) *descriptorpb.DescriptorProto { - p := &descriptorpb.DescriptorProto{ - Name: proto.String(string(message.Name())), - Options: proto.Clone(message.Options()).(*descriptorpb.MessageOptions), - } - for i, fields := 0, message.Fields(); i < fields.Len(); i++ { - p.Field = append(p.Field, ToFieldDescriptorProto(fields.Get(i))) - } - for i, exts := 0, message.Extensions(); i < exts.Len(); i++ { - p.Extension = append(p.Extension, ToFieldDescriptorProto(exts.Get(i))) - } - for i, messages := 0, message.Messages(); i < messages.Len(); i++ { - p.NestedType = append(p.NestedType, ToDescriptorProto(messages.Get(i))) - } - for i, enums := 0, message.Enums(); i < enums.Len(); i++ { - p.EnumType = append(p.EnumType, ToEnumDescriptorProto(enums.Get(i))) - } - for i, xranges := 0, message.ExtensionRanges(); i < xranges.Len(); i++ { - xrange := xranges.Get(i) - p.ExtensionRange = append(p.ExtensionRange, &descriptorpb.DescriptorProto_ExtensionRange{ - Start: proto.Int32(int32(xrange[0])), - End: proto.Int32(int32(xrange[1])), - Options: proto.Clone(message.ExtensionRangeOptions(i)).(*descriptorpb.ExtensionRangeOptions), - }) - } - for i, oneofs := 0, message.Oneofs(); i < oneofs.Len(); i++ { - p.OneofDecl = append(p.OneofDecl, ToOneofDescriptorProto(oneofs.Get(i))) - } - for i, ranges := 0, message.ReservedRanges(); i < ranges.Len(); i++ { - rrange := ranges.Get(i) - p.ReservedRange = append(p.ReservedRange, &descriptorpb.DescriptorProto_ReservedRange{ - Start: proto.Int32(int32(rrange[0])), - End: proto.Int32(int32(rrange[1])), - }) - } - for i, names := 0, message.ReservedNames(); i < names.Len(); i++ { - p.ReservedName = append(p.ReservedName, string(names.Get(i))) - } - return p -} - -// ToFieldDescriptorProto copies a [protoreflect.FieldDescriptor] into a -// google.protobuf.FieldDescriptorProto message. -func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.FieldDescriptorProto { - p := &descriptorpb.FieldDescriptorProto{ - Name: proto.String(string(field.Name())), - Number: proto.Int32(int32(field.Number())), - Label: descriptorpb.FieldDescriptorProto_Label(field.Cardinality()).Enum(), - Options: proto.Clone(field.Options()).(*descriptorpb.FieldOptions), - } - if field.IsExtension() { - p.Extendee = fullNameOf(field.ContainingMessage()) - } - if field.Kind().IsValid() { - p.Type = descriptorpb.FieldDescriptorProto_Type(field.Kind()).Enum() - } - if field.Enum() != nil { - p.TypeName = fullNameOf(field.Enum()) - } - if field.Message() != nil { - p.TypeName = fullNameOf(field.Message()) - } - if field.HasJSONName() { - // A bug in older versions of protoc would always populate the - // "json_name" option for extensions when it is meaningless. - // When it did so, it would always use the camel-cased field name. - if field.IsExtension() { - p.JsonName = proto.String(strs.JSONCamelCase(string(field.Name()))) - } else { - p.JsonName = proto.String(field.JSONName()) - } - } - if field.Syntax() == protoreflect.Proto3 && field.HasOptionalKeyword() { - p.Proto3Optional = proto.Bool(true) - } - if field.Syntax() == protoreflect.Editions { - // Editions have no group keyword, this type is only set so that downstream users continue - // treating this as delimited encoding. - if p.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP { - p.Type = descriptorpb.FieldDescriptorProto_TYPE_MESSAGE.Enum() - } - // Editions have no required keyword, this label is only set so that downstream users continue - // treating it as required. - if p.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_REQUIRED { - p.Label = descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL.Enum() - } - } - if field.HasDefault() { - def, err := defval.Marshal(field.Default(), field.DefaultEnumValue(), field.Kind(), defval.Descriptor) - if err != nil && field.DefaultEnumValue() != nil { - def = string(field.DefaultEnumValue().Name()) // occurs for unresolved enum values - } else if err != nil { - panic(fmt.Sprintf("%v: %v", field.FullName(), err)) - } - p.DefaultValue = proto.String(def) - } - if oneof := field.ContainingOneof(); oneof != nil { - p.OneofIndex = proto.Int32(int32(oneof.Index())) - } - return p -} - -// ToOneofDescriptorProto copies a [protoreflect.OneofDescriptor] into a -// google.protobuf.OneofDescriptorProto message. -func ToOneofDescriptorProto(oneof protoreflect.OneofDescriptor) *descriptorpb.OneofDescriptorProto { - return &descriptorpb.OneofDescriptorProto{ - Name: proto.String(string(oneof.Name())), - Options: proto.Clone(oneof.Options()).(*descriptorpb.OneofOptions), - } -} - -// ToEnumDescriptorProto copies a [protoreflect.EnumDescriptor] into a -// google.protobuf.EnumDescriptorProto message. -func ToEnumDescriptorProto(enum protoreflect.EnumDescriptor) *descriptorpb.EnumDescriptorProto { - p := &descriptorpb.EnumDescriptorProto{ - Name: proto.String(string(enum.Name())), - Options: proto.Clone(enum.Options()).(*descriptorpb.EnumOptions), - } - for i, values := 0, enum.Values(); i < values.Len(); i++ { - p.Value = append(p.Value, ToEnumValueDescriptorProto(values.Get(i))) - } - for i, ranges := 0, enum.ReservedRanges(); i < ranges.Len(); i++ { - rrange := ranges.Get(i) - p.ReservedRange = append(p.ReservedRange, &descriptorpb.EnumDescriptorProto_EnumReservedRange{ - Start: proto.Int32(int32(rrange[0])), - End: proto.Int32(int32(rrange[1])), - }) - } - for i, names := 0, enum.ReservedNames(); i < names.Len(); i++ { - p.ReservedName = append(p.ReservedName, string(names.Get(i))) - } - return p -} - -// ToEnumValueDescriptorProto copies a [protoreflect.EnumValueDescriptor] into a -// google.protobuf.EnumValueDescriptorProto message. -func ToEnumValueDescriptorProto(value protoreflect.EnumValueDescriptor) *descriptorpb.EnumValueDescriptorProto { - return &descriptorpb.EnumValueDescriptorProto{ - Name: proto.String(string(value.Name())), - Number: proto.Int32(int32(value.Number())), - Options: proto.Clone(value.Options()).(*descriptorpb.EnumValueOptions), - } -} - -// ToServiceDescriptorProto copies a [protoreflect.ServiceDescriptor] into a -// google.protobuf.ServiceDescriptorProto message. -func ToServiceDescriptorProto(service protoreflect.ServiceDescriptor) *descriptorpb.ServiceDescriptorProto { - p := &descriptorpb.ServiceDescriptorProto{ - Name: proto.String(string(service.Name())), - Options: proto.Clone(service.Options()).(*descriptorpb.ServiceOptions), - } - for i, methods := 0, service.Methods(); i < methods.Len(); i++ { - p.Method = append(p.Method, ToMethodDescriptorProto(methods.Get(i))) - } - return p -} - -// ToMethodDescriptorProto copies a [protoreflect.MethodDescriptor] into a -// google.protobuf.MethodDescriptorProto message. -func ToMethodDescriptorProto(method protoreflect.MethodDescriptor) *descriptorpb.MethodDescriptorProto { - p := &descriptorpb.MethodDescriptorProto{ - Name: proto.String(string(method.Name())), - InputType: fullNameOf(method.Input()), - OutputType: fullNameOf(method.Output()), - Options: proto.Clone(method.Options()).(*descriptorpb.MethodOptions), - } - if method.IsStreamingClient() { - p.ClientStreaming = proto.Bool(true) - } - if method.IsStreamingServer() { - p.ServerStreaming = proto.Bool(true) - } - return p -} - -func fullNameOf(d protoreflect.Descriptor) *string { - if d == nil { - return nil - } - if strings.HasPrefix(string(d.FullName()), unknownPrefix) { - return proto.String(string(d.FullName()[len(unknownPrefix):])) - } - return proto.String("." + string(d.FullName())) -} diff --git a/tools/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/tools/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go index cd8fadbaf..cd7fbc87a 100644 --- a/tools/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go +++ b/tools/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go @@ -68,7 +68,7 @@ type Descriptor interface { // dependency is not resolved, in which case only name information is known. // // Placeholder types may only be returned by the following accessors - // as a result of unresolved dependencies or weak imports: + // as a result of unresolved dependencies: // // ╔═══════════════════════════════════╤═════════════════════╗ // ║ Accessor │ Descriptor ║ @@ -168,11 +168,7 @@ type FileImport struct { // The current file and the imported file must be within proto package. IsPublic bool - // IsWeak reports whether this is a weak import, which does not impose - // a direct dependency on the target file. - // - // Weak imports are a legacy proto1 feature. Equivalent behavior is - // achieved using proto2 extension fields or proto3 Any messages. + // Deprecated: support for weak fields has been removed. IsWeak bool } @@ -325,9 +321,7 @@ type FieldDescriptor interface { // specified in the source .proto file. HasOptionalKeyword() bool - // IsWeak reports whether this is a weak field, which does not impose a - // direct dependency on the target type. - // If true, then Message returns a placeholder type. + // Deprecated: support for weak fields has been removed. IsWeak() bool // IsPacked reports whether repeated primitive numeric kinds should be diff --git a/tools/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/tools/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go deleted file mode 100644 index a51633767..000000000 --- a/tools/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ /dev/null @@ -1,5310 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Author: kenton@google.com (Kenton Varda) -// Based on original Protocol Buffers design by -// Sanjay Ghemawat, Jeff Dean, and others. -// -// The messages in this file describe the definitions found in .proto files. -// A valid .proto file can be translated directly to a FileDescriptorProto -// without any other information (e.g. without reading its imports). - -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/descriptor.proto - -package descriptorpb - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" - unsafe "unsafe" -) - -// The full set of known editions. -type Edition int32 - -const ( - // A placeholder for an unknown edition value. - Edition_EDITION_UNKNOWN Edition = 0 - // A placeholder edition for specifying default behaviors *before* a feature - // was first introduced. This is effectively an "infinite past". - Edition_EDITION_LEGACY Edition = 900 - // Legacy syntax "editions". These pre-date editions, but behave much like - // distinct editions. These can't be used to specify the edition of proto - // files, but feature definitions must supply proto2/proto3 defaults for - // backwards compatibility. - Edition_EDITION_PROTO2 Edition = 998 - Edition_EDITION_PROTO3 Edition = 999 - // Editions that have been released. The specific values are arbitrary and - // should not be depended on, but they will always be time-ordered for easy - // comparison. - Edition_EDITION_2023 Edition = 1000 - Edition_EDITION_2024 Edition = 1001 - // Placeholder editions for testing feature resolution. These should not be - // used or relied on outside of tests. - Edition_EDITION_1_TEST_ONLY Edition = 1 - Edition_EDITION_2_TEST_ONLY Edition = 2 - Edition_EDITION_99997_TEST_ONLY Edition = 99997 - Edition_EDITION_99998_TEST_ONLY Edition = 99998 - Edition_EDITION_99999_TEST_ONLY Edition = 99999 - // Placeholder for specifying unbounded edition support. This should only - // ever be used by plugins that can expect to never require any changes to - // support a new edition. - Edition_EDITION_MAX Edition = 2147483647 -) - -// Enum value maps for Edition. -var ( - Edition_name = map[int32]string{ - 0: "EDITION_UNKNOWN", - 900: "EDITION_LEGACY", - 998: "EDITION_PROTO2", - 999: "EDITION_PROTO3", - 1000: "EDITION_2023", - 1001: "EDITION_2024", - 1: "EDITION_1_TEST_ONLY", - 2: "EDITION_2_TEST_ONLY", - 99997: "EDITION_99997_TEST_ONLY", - 99998: "EDITION_99998_TEST_ONLY", - 99999: "EDITION_99999_TEST_ONLY", - 2147483647: "EDITION_MAX", - } - Edition_value = map[string]int32{ - "EDITION_UNKNOWN": 0, - "EDITION_LEGACY": 900, - "EDITION_PROTO2": 998, - "EDITION_PROTO3": 999, - "EDITION_2023": 1000, - "EDITION_2024": 1001, - "EDITION_1_TEST_ONLY": 1, - "EDITION_2_TEST_ONLY": 2, - "EDITION_99997_TEST_ONLY": 99997, - "EDITION_99998_TEST_ONLY": 99998, - "EDITION_99999_TEST_ONLY": 99999, - "EDITION_MAX": 2147483647, - } -) - -func (x Edition) Enum() *Edition { - p := new(Edition) - *p = x - return p -} - -func (x Edition) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (Edition) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[0].Descriptor() -} - -func (Edition) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[0] -} - -func (x Edition) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Do not use. -func (x *Edition) UnmarshalJSON(b []byte) error { - num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) - if err != nil { - return err - } - *x = Edition(num) - return nil -} - -// Deprecated: Use Edition.Descriptor instead. -func (Edition) EnumDescriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{0} -} - -// The verification state of the extension range. -type ExtensionRangeOptions_VerificationState int32 - -const ( - // All the extensions of the range must be declared. - ExtensionRangeOptions_DECLARATION ExtensionRangeOptions_VerificationState = 0 - ExtensionRangeOptions_UNVERIFIED ExtensionRangeOptions_VerificationState = 1 -) - -// Enum value maps for ExtensionRangeOptions_VerificationState. -var ( - ExtensionRangeOptions_VerificationState_name = map[int32]string{ - 0: "DECLARATION", - 1: "UNVERIFIED", - } - ExtensionRangeOptions_VerificationState_value = map[string]int32{ - "DECLARATION": 0, - "UNVERIFIED": 1, - } -) - -func (x ExtensionRangeOptions_VerificationState) Enum() *ExtensionRangeOptions_VerificationState { - p := new(ExtensionRangeOptions_VerificationState) - *p = x - return p -} - -func (x ExtensionRangeOptions_VerificationState) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (ExtensionRangeOptions_VerificationState) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() -} - -func (ExtensionRangeOptions_VerificationState) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[1] -} - -func (x ExtensionRangeOptions_VerificationState) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Do not use. -func (x *ExtensionRangeOptions_VerificationState) UnmarshalJSON(b []byte) error { - num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) - if err != nil { - return err - } - *x = ExtensionRangeOptions_VerificationState(num) - return nil -} - -// Deprecated: Use ExtensionRangeOptions_VerificationState.Descriptor instead. -func (ExtensionRangeOptions_VerificationState) EnumDescriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{3, 0} -} - -type FieldDescriptorProto_Type int32 - -const ( - // 0 is reserved for errors. - // Order is weird for historical reasons. - FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 - FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 - // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if - // negative values are likely. - FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 - FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 - // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if - // negative values are likely. - FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 - FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 - FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 - FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 - FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 - // Tag-delimited aggregate. - // Group type is deprecated and not supported after google.protobuf. However, Proto3 - // implementations should still be able to parse the group wire format and - // treat group fields as unknown fields. In Editions, the group wire format - // can be enabled via the `message_encoding` feature. - FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 - FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 // Length-delimited aggregate. - // New in version 2. - FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 - FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 - FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 - FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 - FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 - FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 // Uses ZigZag encoding. - FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 // Uses ZigZag encoding. -) - -// Enum value maps for FieldDescriptorProto_Type. -var ( - FieldDescriptorProto_Type_name = map[int32]string{ - 1: "TYPE_DOUBLE", - 2: "TYPE_FLOAT", - 3: "TYPE_INT64", - 4: "TYPE_UINT64", - 5: "TYPE_INT32", - 6: "TYPE_FIXED64", - 7: "TYPE_FIXED32", - 8: "TYPE_BOOL", - 9: "TYPE_STRING", - 10: "TYPE_GROUP", - 11: "TYPE_MESSAGE", - 12: "TYPE_BYTES", - 13: "TYPE_UINT32", - 14: "TYPE_ENUM", - 15: "TYPE_SFIXED32", - 16: "TYPE_SFIXED64", - 17: "TYPE_SINT32", - 18: "TYPE_SINT64", - } - FieldDescriptorProto_Type_value = map[string]int32{ - "TYPE_DOUBLE": 1, - "TYPE_FLOAT": 2, - "TYPE_INT64": 3, - "TYPE_UINT64": 4, - "TYPE_INT32": 5, - "TYPE_FIXED64": 6, - "TYPE_FIXED32": 7, - "TYPE_BOOL": 8, - "TYPE_STRING": 9, - "TYPE_GROUP": 10, - "TYPE_MESSAGE": 11, - "TYPE_BYTES": 12, - "TYPE_UINT32": 13, - "TYPE_ENUM": 14, - "TYPE_SFIXED32": 15, - "TYPE_SFIXED64": 16, - "TYPE_SINT32": 17, - "TYPE_SINT64": 18, - } -) - -func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { - p := new(FieldDescriptorProto_Type) - *p = x - return p -} - -func (x FieldDescriptorProto_Type) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (FieldDescriptorProto_Type) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() -} - -func (FieldDescriptorProto_Type) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[2] -} - -func (x FieldDescriptorProto_Type) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Do not use. -func (x *FieldDescriptorProto_Type) UnmarshalJSON(b []byte) error { - num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) - if err != nil { - return err - } - *x = FieldDescriptorProto_Type(num) - return nil -} - -// Deprecated: Use FieldDescriptorProto_Type.Descriptor instead. -func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{4, 0} -} - -type FieldDescriptorProto_Label int32 - -const ( - // 0 is reserved for errors - FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 - FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 - // The required label is only allowed in google.protobuf. In proto3 and Editions - // it's explicitly prohibited. In Editions, the `field_presence` feature - // can be used to get this behavior. - FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 -) - -// Enum value maps for FieldDescriptorProto_Label. -var ( - FieldDescriptorProto_Label_name = map[int32]string{ - 1: "LABEL_OPTIONAL", - 3: "LABEL_REPEATED", - 2: "LABEL_REQUIRED", - } - FieldDescriptorProto_Label_value = map[string]int32{ - "LABEL_OPTIONAL": 1, - "LABEL_REPEATED": 3, - "LABEL_REQUIRED": 2, - } -) - -func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { - p := new(FieldDescriptorProto_Label) - *p = x - return p -} - -func (x FieldDescriptorProto_Label) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (FieldDescriptorProto_Label) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() -} - -func (FieldDescriptorProto_Label) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[3] -} - -func (x FieldDescriptorProto_Label) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Do not use. -func (x *FieldDescriptorProto_Label) UnmarshalJSON(b []byte) error { - num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) - if err != nil { - return err - } - *x = FieldDescriptorProto_Label(num) - return nil -} - -// Deprecated: Use FieldDescriptorProto_Label.Descriptor instead. -func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{4, 1} -} - -// Generated classes can be optimized for speed or code size. -type FileOptions_OptimizeMode int32 - -const ( - FileOptions_SPEED FileOptions_OptimizeMode = 1 // Generate complete code for parsing, serialization, - // etc. - FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 // Use ReflectionOps to implement these methods. - FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 // Generate code using MessageLite and the lite runtime. -) - -// Enum value maps for FileOptions_OptimizeMode. -var ( - FileOptions_OptimizeMode_name = map[int32]string{ - 1: "SPEED", - 2: "CODE_SIZE", - 3: "LITE_RUNTIME", - } - FileOptions_OptimizeMode_value = map[string]int32{ - "SPEED": 1, - "CODE_SIZE": 2, - "LITE_RUNTIME": 3, - } -) - -func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { - p := new(FileOptions_OptimizeMode) - *p = x - return p -} - -func (x FileOptions_OptimizeMode) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (FileOptions_OptimizeMode) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() -} - -func (FileOptions_OptimizeMode) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[4] -} - -func (x FileOptions_OptimizeMode) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Do not use. -func (x *FileOptions_OptimizeMode) UnmarshalJSON(b []byte) error { - num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) - if err != nil { - return err - } - *x = FileOptions_OptimizeMode(num) - return nil -} - -// Deprecated: Use FileOptions_OptimizeMode.Descriptor instead. -func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{10, 0} -} - -type FieldOptions_CType int32 - -const ( - // Default mode. - FieldOptions_STRING FieldOptions_CType = 0 - // The option [ctype=CORD] may be applied to a non-repeated field of type - // "bytes". It indicates that in C++, the data should be stored in a Cord - // instead of a string. For very large strings, this may reduce memory - // fragmentation. It may also allow better performance when parsing from a - // Cord, or when parsing with aliasing enabled, as the parsed Cord may then - // alias the original buffer. - FieldOptions_CORD FieldOptions_CType = 1 - FieldOptions_STRING_PIECE FieldOptions_CType = 2 -) - -// Enum value maps for FieldOptions_CType. -var ( - FieldOptions_CType_name = map[int32]string{ - 0: "STRING", - 1: "CORD", - 2: "STRING_PIECE", - } - FieldOptions_CType_value = map[string]int32{ - "STRING": 0, - "CORD": 1, - "STRING_PIECE": 2, - } -) - -func (x FieldOptions_CType) Enum() *FieldOptions_CType { - p := new(FieldOptions_CType) - *p = x - return p -} - -func (x FieldOptions_CType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (FieldOptions_CType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() -} - -func (FieldOptions_CType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[5] -} - -func (x FieldOptions_CType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Do not use. -func (x *FieldOptions_CType) UnmarshalJSON(b []byte) error { - num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) - if err != nil { - return err - } - *x = FieldOptions_CType(num) - return nil -} - -// Deprecated: Use FieldOptions_CType.Descriptor instead. -func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 0} -} - -type FieldOptions_JSType int32 - -const ( - // Use the default type. - FieldOptions_JS_NORMAL FieldOptions_JSType = 0 - // Use JavaScript strings. - FieldOptions_JS_STRING FieldOptions_JSType = 1 - // Use JavaScript numbers. - FieldOptions_JS_NUMBER FieldOptions_JSType = 2 -) - -// Enum value maps for FieldOptions_JSType. -var ( - FieldOptions_JSType_name = map[int32]string{ - 0: "JS_NORMAL", - 1: "JS_STRING", - 2: "JS_NUMBER", - } - FieldOptions_JSType_value = map[string]int32{ - "JS_NORMAL": 0, - "JS_STRING": 1, - "JS_NUMBER": 2, - } -) - -func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { - p := new(FieldOptions_JSType) - *p = x - return p -} - -func (x FieldOptions_JSType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (FieldOptions_JSType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor() -} - -func (FieldOptions_JSType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[6] -} - -func (x FieldOptions_JSType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Do not use. -func (x *FieldOptions_JSType) UnmarshalJSON(b []byte) error { - num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) - if err != nil { - return err - } - *x = FieldOptions_JSType(num) - return nil -} - -// Deprecated: Use FieldOptions_JSType.Descriptor instead. -func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 1} -} - -// If set to RETENTION_SOURCE, the option will be omitted from the binary. -type FieldOptions_OptionRetention int32 - -const ( - FieldOptions_RETENTION_UNKNOWN FieldOptions_OptionRetention = 0 - FieldOptions_RETENTION_RUNTIME FieldOptions_OptionRetention = 1 - FieldOptions_RETENTION_SOURCE FieldOptions_OptionRetention = 2 -) - -// Enum value maps for FieldOptions_OptionRetention. -var ( - FieldOptions_OptionRetention_name = map[int32]string{ - 0: "RETENTION_UNKNOWN", - 1: "RETENTION_RUNTIME", - 2: "RETENTION_SOURCE", - } - FieldOptions_OptionRetention_value = map[string]int32{ - "RETENTION_UNKNOWN": 0, - "RETENTION_RUNTIME": 1, - "RETENTION_SOURCE": 2, - } -) - -func (x FieldOptions_OptionRetention) Enum() *FieldOptions_OptionRetention { - p := new(FieldOptions_OptionRetention) - *p = x - return p -} - -func (x FieldOptions_OptionRetention) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (FieldOptions_OptionRetention) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor() -} - -func (FieldOptions_OptionRetention) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[7] -} - -func (x FieldOptions_OptionRetention) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Do not use. -func (x *FieldOptions_OptionRetention) UnmarshalJSON(b []byte) error { - num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) - if err != nil { - return err - } - *x = FieldOptions_OptionRetention(num) - return nil -} - -// Deprecated: Use FieldOptions_OptionRetention.Descriptor instead. -func (FieldOptions_OptionRetention) EnumDescriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 2} -} - -// This indicates the types of entities that the field may apply to when used -// as an option. If it is unset, then the field may be freely used as an -// option on any kind of entity. -type FieldOptions_OptionTargetType int32 - -const ( - FieldOptions_TARGET_TYPE_UNKNOWN FieldOptions_OptionTargetType = 0 - FieldOptions_TARGET_TYPE_FILE FieldOptions_OptionTargetType = 1 - FieldOptions_TARGET_TYPE_EXTENSION_RANGE FieldOptions_OptionTargetType = 2 - FieldOptions_TARGET_TYPE_MESSAGE FieldOptions_OptionTargetType = 3 - FieldOptions_TARGET_TYPE_FIELD FieldOptions_OptionTargetType = 4 - FieldOptions_TARGET_TYPE_ONEOF FieldOptions_OptionTargetType = 5 - FieldOptions_TARGET_TYPE_ENUM FieldOptions_OptionTargetType = 6 - FieldOptions_TARGET_TYPE_ENUM_ENTRY FieldOptions_OptionTargetType = 7 - FieldOptions_TARGET_TYPE_SERVICE FieldOptions_OptionTargetType = 8 - FieldOptions_TARGET_TYPE_METHOD FieldOptions_OptionTargetType = 9 -) - -// Enum value maps for FieldOptions_OptionTargetType. -var ( - FieldOptions_OptionTargetType_name = map[int32]string{ - 0: "TARGET_TYPE_UNKNOWN", - 1: "TARGET_TYPE_FILE", - 2: "TARGET_TYPE_EXTENSION_RANGE", - 3: "TARGET_TYPE_MESSAGE", - 4: "TARGET_TYPE_FIELD", - 5: "TARGET_TYPE_ONEOF", - 6: "TARGET_TYPE_ENUM", - 7: "TARGET_TYPE_ENUM_ENTRY", - 8: "TARGET_TYPE_SERVICE", - 9: "TARGET_TYPE_METHOD", - } - FieldOptions_OptionTargetType_value = map[string]int32{ - "TARGET_TYPE_UNKNOWN": 0, - "TARGET_TYPE_FILE": 1, - "TARGET_TYPE_EXTENSION_RANGE": 2, - "TARGET_TYPE_MESSAGE": 3, - "TARGET_TYPE_FIELD": 4, - "TARGET_TYPE_ONEOF": 5, - "TARGET_TYPE_ENUM": 6, - "TARGET_TYPE_ENUM_ENTRY": 7, - "TARGET_TYPE_SERVICE": 8, - "TARGET_TYPE_METHOD": 9, - } -) - -func (x FieldOptions_OptionTargetType) Enum() *FieldOptions_OptionTargetType { - p := new(FieldOptions_OptionTargetType) - *p = x - return p -} - -func (x FieldOptions_OptionTargetType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (FieldOptions_OptionTargetType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor() -} - -func (FieldOptions_OptionTargetType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[8] -} - -func (x FieldOptions_OptionTargetType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Do not use. -func (x *FieldOptions_OptionTargetType) UnmarshalJSON(b []byte) error { - num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) - if err != nil { - return err - } - *x = FieldOptions_OptionTargetType(num) - return nil -} - -// Deprecated: Use FieldOptions_OptionTargetType.Descriptor instead. -func (FieldOptions_OptionTargetType) EnumDescriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 3} -} - -// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, -// or neither? HTTP based RPC implementation may choose GET verb for safe -// methods, and PUT verb for idempotent methods instead of the default POST. -type MethodOptions_IdempotencyLevel int32 - -const ( - MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 - MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 // implies idempotent - MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 // idempotent, but may have side effects -) - -// Enum value maps for MethodOptions_IdempotencyLevel. -var ( - MethodOptions_IdempotencyLevel_name = map[int32]string{ - 0: "IDEMPOTENCY_UNKNOWN", - 1: "NO_SIDE_EFFECTS", - 2: "IDEMPOTENT", - } - MethodOptions_IdempotencyLevel_value = map[string]int32{ - "IDEMPOTENCY_UNKNOWN": 0, - "NO_SIDE_EFFECTS": 1, - "IDEMPOTENT": 2, - } -) - -func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { - p := new(MethodOptions_IdempotencyLevel) - *p = x - return p -} - -func (x MethodOptions_IdempotencyLevel) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor() -} - -func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[9] -} - -func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Do not use. -func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(b []byte) error { - num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) - if err != nil { - return err - } - *x = MethodOptions_IdempotencyLevel(num) - return nil -} - -// Deprecated: Use MethodOptions_IdempotencyLevel.Descriptor instead. -func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{17, 0} -} - -type FeatureSet_FieldPresence int32 - -const ( - FeatureSet_FIELD_PRESENCE_UNKNOWN FeatureSet_FieldPresence = 0 - FeatureSet_EXPLICIT FeatureSet_FieldPresence = 1 - FeatureSet_IMPLICIT FeatureSet_FieldPresence = 2 - FeatureSet_LEGACY_REQUIRED FeatureSet_FieldPresence = 3 -) - -// Enum value maps for FeatureSet_FieldPresence. -var ( - FeatureSet_FieldPresence_name = map[int32]string{ - 0: "FIELD_PRESENCE_UNKNOWN", - 1: "EXPLICIT", - 2: "IMPLICIT", - 3: "LEGACY_REQUIRED", - } - FeatureSet_FieldPresence_value = map[string]int32{ - "FIELD_PRESENCE_UNKNOWN": 0, - "EXPLICIT": 1, - "IMPLICIT": 2, - "LEGACY_REQUIRED": 3, - } -) - -func (x FeatureSet_FieldPresence) Enum() *FeatureSet_FieldPresence { - p := new(FeatureSet_FieldPresence) - *p = x - return p -} - -func (x FeatureSet_FieldPresence) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (FeatureSet_FieldPresence) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[10].Descriptor() -} - -func (FeatureSet_FieldPresence) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[10] -} - -func (x FeatureSet_FieldPresence) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Do not use. -func (x *FeatureSet_FieldPresence) UnmarshalJSON(b []byte) error { - num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) - if err != nil { - return err - } - *x = FeatureSet_FieldPresence(num) - return nil -} - -// Deprecated: Use FeatureSet_FieldPresence.Descriptor instead. -func (FeatureSet_FieldPresence) EnumDescriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0} -} - -type FeatureSet_EnumType int32 - -const ( - FeatureSet_ENUM_TYPE_UNKNOWN FeatureSet_EnumType = 0 - FeatureSet_OPEN FeatureSet_EnumType = 1 - FeatureSet_CLOSED FeatureSet_EnumType = 2 -) - -// Enum value maps for FeatureSet_EnumType. -var ( - FeatureSet_EnumType_name = map[int32]string{ - 0: "ENUM_TYPE_UNKNOWN", - 1: "OPEN", - 2: "CLOSED", - } - FeatureSet_EnumType_value = map[string]int32{ - "ENUM_TYPE_UNKNOWN": 0, - "OPEN": 1, - "CLOSED": 2, - } -) - -func (x FeatureSet_EnumType) Enum() *FeatureSet_EnumType { - p := new(FeatureSet_EnumType) - *p = x - return p -} - -func (x FeatureSet_EnumType) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (FeatureSet_EnumType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[11].Descriptor() -} - -func (FeatureSet_EnumType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[11] -} - -func (x FeatureSet_EnumType) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Do not use. -func (x *FeatureSet_EnumType) UnmarshalJSON(b []byte) error { - num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) - if err != nil { - return err - } - *x = FeatureSet_EnumType(num) - return nil -} - -// Deprecated: Use FeatureSet_EnumType.Descriptor instead. -func (FeatureSet_EnumType) EnumDescriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 1} -} - -type FeatureSet_RepeatedFieldEncoding int32 - -const ( - FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN FeatureSet_RepeatedFieldEncoding = 0 - FeatureSet_PACKED FeatureSet_RepeatedFieldEncoding = 1 - FeatureSet_EXPANDED FeatureSet_RepeatedFieldEncoding = 2 -) - -// Enum value maps for FeatureSet_RepeatedFieldEncoding. -var ( - FeatureSet_RepeatedFieldEncoding_name = map[int32]string{ - 0: "REPEATED_FIELD_ENCODING_UNKNOWN", - 1: "PACKED", - 2: "EXPANDED", - } - FeatureSet_RepeatedFieldEncoding_value = map[string]int32{ - "REPEATED_FIELD_ENCODING_UNKNOWN": 0, - "PACKED": 1, - "EXPANDED": 2, - } -) - -func (x FeatureSet_RepeatedFieldEncoding) Enum() *FeatureSet_RepeatedFieldEncoding { - p := new(FeatureSet_RepeatedFieldEncoding) - *p = x - return p -} - -func (x FeatureSet_RepeatedFieldEncoding) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (FeatureSet_RepeatedFieldEncoding) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[12].Descriptor() -} - -func (FeatureSet_RepeatedFieldEncoding) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[12] -} - -func (x FeatureSet_RepeatedFieldEncoding) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Do not use. -func (x *FeatureSet_RepeatedFieldEncoding) UnmarshalJSON(b []byte) error { - num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) - if err != nil { - return err - } - *x = FeatureSet_RepeatedFieldEncoding(num) - return nil -} - -// Deprecated: Use FeatureSet_RepeatedFieldEncoding.Descriptor instead. -func (FeatureSet_RepeatedFieldEncoding) EnumDescriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 2} -} - -type FeatureSet_Utf8Validation int32 - -const ( - FeatureSet_UTF8_VALIDATION_UNKNOWN FeatureSet_Utf8Validation = 0 - FeatureSet_VERIFY FeatureSet_Utf8Validation = 2 - FeatureSet_NONE FeatureSet_Utf8Validation = 3 -) - -// Enum value maps for FeatureSet_Utf8Validation. -var ( - FeatureSet_Utf8Validation_name = map[int32]string{ - 0: "UTF8_VALIDATION_UNKNOWN", - 2: "VERIFY", - 3: "NONE", - } - FeatureSet_Utf8Validation_value = map[string]int32{ - "UTF8_VALIDATION_UNKNOWN": 0, - "VERIFY": 2, - "NONE": 3, - } -) - -func (x FeatureSet_Utf8Validation) Enum() *FeatureSet_Utf8Validation { - p := new(FeatureSet_Utf8Validation) - *p = x - return p -} - -func (x FeatureSet_Utf8Validation) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (FeatureSet_Utf8Validation) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[13].Descriptor() -} - -func (FeatureSet_Utf8Validation) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[13] -} - -func (x FeatureSet_Utf8Validation) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Do not use. -func (x *FeatureSet_Utf8Validation) UnmarshalJSON(b []byte) error { - num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) - if err != nil { - return err - } - *x = FeatureSet_Utf8Validation(num) - return nil -} - -// Deprecated: Use FeatureSet_Utf8Validation.Descriptor instead. -func (FeatureSet_Utf8Validation) EnumDescriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 3} -} - -type FeatureSet_MessageEncoding int32 - -const ( - FeatureSet_MESSAGE_ENCODING_UNKNOWN FeatureSet_MessageEncoding = 0 - FeatureSet_LENGTH_PREFIXED FeatureSet_MessageEncoding = 1 - FeatureSet_DELIMITED FeatureSet_MessageEncoding = 2 -) - -// Enum value maps for FeatureSet_MessageEncoding. -var ( - FeatureSet_MessageEncoding_name = map[int32]string{ - 0: "MESSAGE_ENCODING_UNKNOWN", - 1: "LENGTH_PREFIXED", - 2: "DELIMITED", - } - FeatureSet_MessageEncoding_value = map[string]int32{ - "MESSAGE_ENCODING_UNKNOWN": 0, - "LENGTH_PREFIXED": 1, - "DELIMITED": 2, - } -) - -func (x FeatureSet_MessageEncoding) Enum() *FeatureSet_MessageEncoding { - p := new(FeatureSet_MessageEncoding) - *p = x - return p -} - -func (x FeatureSet_MessageEncoding) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (FeatureSet_MessageEncoding) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[14].Descriptor() -} - -func (FeatureSet_MessageEncoding) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[14] -} - -func (x FeatureSet_MessageEncoding) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Do not use. -func (x *FeatureSet_MessageEncoding) UnmarshalJSON(b []byte) error { - num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) - if err != nil { - return err - } - *x = FeatureSet_MessageEncoding(num) - return nil -} - -// Deprecated: Use FeatureSet_MessageEncoding.Descriptor instead. -func (FeatureSet_MessageEncoding) EnumDescriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 4} -} - -type FeatureSet_JsonFormat int32 - -const ( - FeatureSet_JSON_FORMAT_UNKNOWN FeatureSet_JsonFormat = 0 - FeatureSet_ALLOW FeatureSet_JsonFormat = 1 - FeatureSet_LEGACY_BEST_EFFORT FeatureSet_JsonFormat = 2 -) - -// Enum value maps for FeatureSet_JsonFormat. -var ( - FeatureSet_JsonFormat_name = map[int32]string{ - 0: "JSON_FORMAT_UNKNOWN", - 1: "ALLOW", - 2: "LEGACY_BEST_EFFORT", - } - FeatureSet_JsonFormat_value = map[string]int32{ - "JSON_FORMAT_UNKNOWN": 0, - "ALLOW": 1, - "LEGACY_BEST_EFFORT": 2, - } -) - -func (x FeatureSet_JsonFormat) Enum() *FeatureSet_JsonFormat { - p := new(FeatureSet_JsonFormat) - *p = x - return p -} - -func (x FeatureSet_JsonFormat) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (FeatureSet_JsonFormat) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[15].Descriptor() -} - -func (FeatureSet_JsonFormat) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[15] -} - -func (x FeatureSet_JsonFormat) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Do not use. -func (x *FeatureSet_JsonFormat) UnmarshalJSON(b []byte) error { - num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) - if err != nil { - return err - } - *x = FeatureSet_JsonFormat(num) - return nil -} - -// Deprecated: Use FeatureSet_JsonFormat.Descriptor instead. -func (FeatureSet_JsonFormat) EnumDescriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 5} -} - -// Represents the identified object's effect on the element in the original -// .proto file. -type GeneratedCodeInfo_Annotation_Semantic int32 - -const ( - // There is no effect or the effect is indescribable. - GeneratedCodeInfo_Annotation_NONE GeneratedCodeInfo_Annotation_Semantic = 0 - // The element is set or otherwise mutated. - GeneratedCodeInfo_Annotation_SET GeneratedCodeInfo_Annotation_Semantic = 1 - // An alias to the element is returned. - GeneratedCodeInfo_Annotation_ALIAS GeneratedCodeInfo_Annotation_Semantic = 2 -) - -// Enum value maps for GeneratedCodeInfo_Annotation_Semantic. -var ( - GeneratedCodeInfo_Annotation_Semantic_name = map[int32]string{ - 0: "NONE", - 1: "SET", - 2: "ALIAS", - } - GeneratedCodeInfo_Annotation_Semantic_value = map[string]int32{ - "NONE": 0, - "SET": 1, - "ALIAS": 2, - } -) - -func (x GeneratedCodeInfo_Annotation_Semantic) Enum() *GeneratedCodeInfo_Annotation_Semantic { - p := new(GeneratedCodeInfo_Annotation_Semantic) - *p = x - return p -} - -func (x GeneratedCodeInfo_Annotation_Semantic) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (GeneratedCodeInfo_Annotation_Semantic) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor() -} - -func (GeneratedCodeInfo_Annotation_Semantic) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[16] -} - -func (x GeneratedCodeInfo_Annotation_Semantic) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Do not use. -func (x *GeneratedCodeInfo_Annotation_Semantic) UnmarshalJSON(b []byte) error { - num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) - if err != nil { - return err - } - *x = GeneratedCodeInfo_Annotation_Semantic(num) - return nil -} - -// Deprecated: Use GeneratedCodeInfo_Annotation_Semantic.Descriptor instead. -func (GeneratedCodeInfo_Annotation_Semantic) EnumDescriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{22, 0, 0} -} - -// The protocol compiler can output a FileDescriptorSet containing the .proto -// files it parses. -type FileDescriptorSet struct { - state protoimpl.MessageState `protogen:"open.v1"` - File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` - extensionFields protoimpl.ExtensionFields - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *FileDescriptorSet) Reset() { - *x = FileDescriptorSet{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *FileDescriptorSet) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FileDescriptorSet) ProtoMessage() {} - -func (x *FileDescriptorSet) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[0] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FileDescriptorSet.ProtoReflect.Descriptor instead. -func (*FileDescriptorSet) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{0} -} - -func (x *FileDescriptorSet) GetFile() []*FileDescriptorProto { - if x != nil { - return x.File - } - return nil -} - -// Describes a complete .proto file. -type FileDescriptorProto struct { - state protoimpl.MessageState `protogen:"open.v1"` - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // file name, relative to root of source tree - Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` // e.g. "foo", "foo.bar", etc. - // Names of files imported by this file. - Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` - // Indexes of the public imported files in the dependency list above. - PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` - // Indexes of the weak imported files in the dependency list. - // For Google-internal migration only. Do not use. - WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` - // All top-level definitions in this file. - MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` - EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` - Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` - Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` - Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` - // This field contains optional information about the original source code. - // You may safely remove this entire field without harming runtime - // functionality of the descriptors -- the information is needed only by - // development tools. - SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` - // The syntax of the proto file. - // The supported values are "proto2", "proto3", and "editions". - // - // If `edition` is present, this value must be "editions". - Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` - // The edition of the proto file. - Edition *Edition `protobuf:"varint,14,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *FileDescriptorProto) Reset() { - *x = FileDescriptorProto{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *FileDescriptorProto) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FileDescriptorProto) ProtoMessage() {} - -func (x *FileDescriptorProto) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[1] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FileDescriptorProto.ProtoReflect.Descriptor instead. -func (*FileDescriptorProto) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{1} -} - -func (x *FileDescriptorProto) GetName() string { - if x != nil && x.Name != nil { - return *x.Name - } - return "" -} - -func (x *FileDescriptorProto) GetPackage() string { - if x != nil && x.Package != nil { - return *x.Package - } - return "" -} - -func (x *FileDescriptorProto) GetDependency() []string { - if x != nil { - return x.Dependency - } - return nil -} - -func (x *FileDescriptorProto) GetPublicDependency() []int32 { - if x != nil { - return x.PublicDependency - } - return nil -} - -func (x *FileDescriptorProto) GetWeakDependency() []int32 { - if x != nil { - return x.WeakDependency - } - return nil -} - -func (x *FileDescriptorProto) GetMessageType() []*DescriptorProto { - if x != nil { - return x.MessageType - } - return nil -} - -func (x *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { - if x != nil { - return x.EnumType - } - return nil -} - -func (x *FileDescriptorProto) GetService() []*ServiceDescriptorProto { - if x != nil { - return x.Service - } - return nil -} - -func (x *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { - if x != nil { - return x.Extension - } - return nil -} - -func (x *FileDescriptorProto) GetOptions() *FileOptions { - if x != nil { - return x.Options - } - return nil -} - -func (x *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { - if x != nil { - return x.SourceCodeInfo - } - return nil -} - -func (x *FileDescriptorProto) GetSyntax() string { - if x != nil && x.Syntax != nil { - return *x.Syntax - } - return "" -} - -func (x *FileDescriptorProto) GetEdition() Edition { - if x != nil && x.Edition != nil { - return *x.Edition - } - return Edition_EDITION_UNKNOWN -} - -// Describes a message type. -type DescriptorProto struct { - state protoimpl.MessageState `protogen:"open.v1"` - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` - Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` - NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` - EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` - ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` - OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` - Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` - ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` - // Reserved field names, which may not be used by fields in the same message. - // A given name may only be reserved once. - ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *DescriptorProto) Reset() { - *x = DescriptorProto{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *DescriptorProto) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DescriptorProto) ProtoMessage() {} - -func (x *DescriptorProto) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[2] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DescriptorProto.ProtoReflect.Descriptor instead. -func (*DescriptorProto) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{2} -} - -func (x *DescriptorProto) GetName() string { - if x != nil && x.Name != nil { - return *x.Name - } - return "" -} - -func (x *DescriptorProto) GetField() []*FieldDescriptorProto { - if x != nil { - return x.Field - } - return nil -} - -func (x *DescriptorProto) GetExtension() []*FieldDescriptorProto { - if x != nil { - return x.Extension - } - return nil -} - -func (x *DescriptorProto) GetNestedType() []*DescriptorProto { - if x != nil { - return x.NestedType - } - return nil -} - -func (x *DescriptorProto) GetEnumType() []*EnumDescriptorProto { - if x != nil { - return x.EnumType - } - return nil -} - -func (x *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { - if x != nil { - return x.ExtensionRange - } - return nil -} - -func (x *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { - if x != nil { - return x.OneofDecl - } - return nil -} - -func (x *DescriptorProto) GetOptions() *MessageOptions { - if x != nil { - return x.Options - } - return nil -} - -func (x *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { - if x != nil { - return x.ReservedRange - } - return nil -} - -func (x *DescriptorProto) GetReservedName() []string { - if x != nil { - return x.ReservedName - } - return nil -} - -type ExtensionRangeOptions struct { - state protoimpl.MessageState `protogen:"open.v1"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - // For external users: DO NOT USE. We are in the process of open sourcing - // extension declaration and executing internal cleanups before it can be - // used externally. - Declaration []*ExtensionRangeOptions_Declaration `protobuf:"bytes,2,rep,name=declaration" json:"declaration,omitempty"` - // Any features defined in the specific edition. - Features *FeatureSet `protobuf:"bytes,50,opt,name=features" json:"features,omitempty"` - // The verification state of the range. - // TODO: flip the default to DECLARATION once all empty ranges - // are marked as UNVERIFIED. - Verification *ExtensionRangeOptions_VerificationState `protobuf:"varint,3,opt,name=verification,enum=google.protobuf.ExtensionRangeOptions_VerificationState,def=1" json:"verification,omitempty"` - extensionFields protoimpl.ExtensionFields - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -// Default values for ExtensionRangeOptions fields. -const ( - Default_ExtensionRangeOptions_Verification = ExtensionRangeOptions_UNVERIFIED -) - -func (x *ExtensionRangeOptions) Reset() { - *x = ExtensionRangeOptions{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ExtensionRangeOptions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ExtensionRangeOptions) ProtoMessage() {} - -func (x *ExtensionRangeOptions) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[3] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ExtensionRangeOptions.ProtoReflect.Descriptor instead. -func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{3} -} - -func (x *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { - if x != nil { - return x.UninterpretedOption - } - return nil -} - -func (x *ExtensionRangeOptions) GetDeclaration() []*ExtensionRangeOptions_Declaration { - if x != nil { - return x.Declaration - } - return nil -} - -func (x *ExtensionRangeOptions) GetFeatures() *FeatureSet { - if x != nil { - return x.Features - } - return nil -} - -func (x *ExtensionRangeOptions) GetVerification() ExtensionRangeOptions_VerificationState { - if x != nil && x.Verification != nil { - return *x.Verification - } - return Default_ExtensionRangeOptions_Verification -} - -// Describes a field within a message. -type FieldDescriptorProto struct { - state protoimpl.MessageState `protogen:"open.v1"` - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` - Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` - // If type_name is set, this need not be set. If both this and type_name - // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. - Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` - // For message and enum types, this is the name of the type. If the name - // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping - // rules are used to find the type (i.e. first the nested types within this - // message are searched, then within the parent, on up to the root - // namespace). - TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` - // For extensions, this is the name of the type being extended. It is - // resolved in the same manner as type_name. - Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` - // For numeric types, contains the original text representation of the value. - // For booleans, "true" or "false". - // For strings, contains the default text contents (not escaped in any way). - // For bytes, contains the C escaped value. All bytes >= 128 are escaped. - DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` - // If set, gives the index of a oneof in the containing type's oneof_decl - // list. This field is a member of that oneof. - OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` - // JSON name of this field. The value is set by protocol compiler. If the - // user has set a "json_name" option on this field, that option's value - // will be used. Otherwise, it's deduced from the field's name by converting - // it to camelCase. - JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` - Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` - // If true, this is a proto3 "optional". When a proto3 field is optional, it - // tracks presence regardless of field type. - // - // When proto3_optional is true, this field must belong to a oneof to signal - // to old proto3 clients that presence is tracked for this field. This oneof - // is known as a "synthetic" oneof, and this field must be its sole member - // (each proto3 optional field gets its own synthetic oneof). Synthetic oneofs - // exist in the descriptor only, and do not generate any API. Synthetic oneofs - // must be ordered after all "real" oneofs. - // - // For message fields, proto3_optional doesn't create any semantic change, - // since non-repeated message fields always track presence. However it still - // indicates the semantic detail of whether the user wrote "optional" or not. - // This can be useful for round-tripping the .proto file. For consistency we - // give message fields a synthetic oneof also, even though it is not required - // to track presence. This is especially important because the parser can't - // tell if a field is a message or an enum, so it must always create a - // synthetic oneof. - // - // Proto2 optional fields do not set this flag, because they already indicate - // optional with `LABEL_OPTIONAL`. - Proto3Optional *bool `protobuf:"varint,17,opt,name=proto3_optional,json=proto3Optional" json:"proto3_optional,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *FieldDescriptorProto) Reset() { - *x = FieldDescriptorProto{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *FieldDescriptorProto) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FieldDescriptorProto) ProtoMessage() {} - -func (x *FieldDescriptorProto) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[4] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FieldDescriptorProto.ProtoReflect.Descriptor instead. -func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{4} -} - -func (x *FieldDescriptorProto) GetName() string { - if x != nil && x.Name != nil { - return *x.Name - } - return "" -} - -func (x *FieldDescriptorProto) GetNumber() int32 { - if x != nil && x.Number != nil { - return *x.Number - } - return 0 -} - -func (x *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { - if x != nil && x.Label != nil { - return *x.Label - } - return FieldDescriptorProto_LABEL_OPTIONAL -} - -func (x *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { - if x != nil && x.Type != nil { - return *x.Type - } - return FieldDescriptorProto_TYPE_DOUBLE -} - -func (x *FieldDescriptorProto) GetTypeName() string { - if x != nil && x.TypeName != nil { - return *x.TypeName - } - return "" -} - -func (x *FieldDescriptorProto) GetExtendee() string { - if x != nil && x.Extendee != nil { - return *x.Extendee - } - return "" -} - -func (x *FieldDescriptorProto) GetDefaultValue() string { - if x != nil && x.DefaultValue != nil { - return *x.DefaultValue - } - return "" -} - -func (x *FieldDescriptorProto) GetOneofIndex() int32 { - if x != nil && x.OneofIndex != nil { - return *x.OneofIndex - } - return 0 -} - -func (x *FieldDescriptorProto) GetJsonName() string { - if x != nil && x.JsonName != nil { - return *x.JsonName - } - return "" -} - -func (x *FieldDescriptorProto) GetOptions() *FieldOptions { - if x != nil { - return x.Options - } - return nil -} - -func (x *FieldDescriptorProto) GetProto3Optional() bool { - if x != nil && x.Proto3Optional != nil { - return *x.Proto3Optional - } - return false -} - -// Describes a oneof. -type OneofDescriptorProto struct { - state protoimpl.MessageState `protogen:"open.v1"` - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *OneofDescriptorProto) Reset() { - *x = OneofDescriptorProto{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *OneofDescriptorProto) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*OneofDescriptorProto) ProtoMessage() {} - -func (x *OneofDescriptorProto) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[5] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use OneofDescriptorProto.ProtoReflect.Descriptor instead. -func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{5} -} - -func (x *OneofDescriptorProto) GetName() string { - if x != nil && x.Name != nil { - return *x.Name - } - return "" -} - -func (x *OneofDescriptorProto) GetOptions() *OneofOptions { - if x != nil { - return x.Options - } - return nil -} - -// Describes an enum type. -type EnumDescriptorProto struct { - state protoimpl.MessageState `protogen:"open.v1"` - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` - Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - // Range of reserved numeric values. Reserved numeric values may not be used - // by enum values in the same enum declaration. Reserved ranges may not - // overlap. - ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` - // Reserved enum value names, which may not be reused. A given name may only - // be reserved once. - ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *EnumDescriptorProto) Reset() { - *x = EnumDescriptorProto{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *EnumDescriptorProto) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EnumDescriptorProto) ProtoMessage() {} - -func (x *EnumDescriptorProto) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[6] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EnumDescriptorProto.ProtoReflect.Descriptor instead. -func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{6} -} - -func (x *EnumDescriptorProto) GetName() string { - if x != nil && x.Name != nil { - return *x.Name - } - return "" -} - -func (x *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { - if x != nil { - return x.Value - } - return nil -} - -func (x *EnumDescriptorProto) GetOptions() *EnumOptions { - if x != nil { - return x.Options - } - return nil -} - -func (x *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { - if x != nil { - return x.ReservedRange - } - return nil -} - -func (x *EnumDescriptorProto) GetReservedName() []string { - if x != nil { - return x.ReservedName - } - return nil -} - -// Describes a value within an enum. -type EnumValueDescriptorProto struct { - state protoimpl.MessageState `protogen:"open.v1"` - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` - Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *EnumValueDescriptorProto) Reset() { - *x = EnumValueDescriptorProto{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *EnumValueDescriptorProto) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EnumValueDescriptorProto) ProtoMessage() {} - -func (x *EnumValueDescriptorProto) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[7] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EnumValueDescriptorProto.ProtoReflect.Descriptor instead. -func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{7} -} - -func (x *EnumValueDescriptorProto) GetName() string { - if x != nil && x.Name != nil { - return *x.Name - } - return "" -} - -func (x *EnumValueDescriptorProto) GetNumber() int32 { - if x != nil && x.Number != nil { - return *x.Number - } - return 0 -} - -func (x *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { - if x != nil { - return x.Options - } - return nil -} - -// Describes a service. -type ServiceDescriptorProto struct { - state protoimpl.MessageState `protogen:"open.v1"` - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` - Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ServiceDescriptorProto) Reset() { - *x = ServiceDescriptorProto{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ServiceDescriptorProto) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ServiceDescriptorProto) ProtoMessage() {} - -func (x *ServiceDescriptorProto) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[8] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ServiceDescriptorProto.ProtoReflect.Descriptor instead. -func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{8} -} - -func (x *ServiceDescriptorProto) GetName() string { - if x != nil && x.Name != nil { - return *x.Name - } - return "" -} - -func (x *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { - if x != nil { - return x.Method - } - return nil -} - -func (x *ServiceDescriptorProto) GetOptions() *ServiceOptions { - if x != nil { - return x.Options - } - return nil -} - -// Describes a method of a service. -type MethodDescriptorProto struct { - state protoimpl.MessageState `protogen:"open.v1"` - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Input and output type names. These are resolved in the same way as - // FieldDescriptorProto.type_name, but must refer to a message type. - InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` - OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` - Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` - // Identifies if client streams multiple client messages - ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` - // Identifies if server streams multiple server messages - ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -// Default values for MethodDescriptorProto fields. -const ( - Default_MethodDescriptorProto_ClientStreaming = bool(false) - Default_MethodDescriptorProto_ServerStreaming = bool(false) -) - -func (x *MethodDescriptorProto) Reset() { - *x = MethodDescriptorProto{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *MethodDescriptorProto) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MethodDescriptorProto) ProtoMessage() {} - -func (x *MethodDescriptorProto) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[9] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MethodDescriptorProto.ProtoReflect.Descriptor instead. -func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{9} -} - -func (x *MethodDescriptorProto) GetName() string { - if x != nil && x.Name != nil { - return *x.Name - } - return "" -} - -func (x *MethodDescriptorProto) GetInputType() string { - if x != nil && x.InputType != nil { - return *x.InputType - } - return "" -} - -func (x *MethodDescriptorProto) GetOutputType() string { - if x != nil && x.OutputType != nil { - return *x.OutputType - } - return "" -} - -func (x *MethodDescriptorProto) GetOptions() *MethodOptions { - if x != nil { - return x.Options - } - return nil -} - -func (x *MethodDescriptorProto) GetClientStreaming() bool { - if x != nil && x.ClientStreaming != nil { - return *x.ClientStreaming - } - return Default_MethodDescriptorProto_ClientStreaming -} - -func (x *MethodDescriptorProto) GetServerStreaming() bool { - if x != nil && x.ServerStreaming != nil { - return *x.ServerStreaming - } - return Default_MethodDescriptorProto_ServerStreaming -} - -type FileOptions struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Sets the Java package where classes generated from this .proto will be - // placed. By default, the proto package is used, but this is often - // inappropriate because proto packages do not normally start with backwards - // domain names. - JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` - // Controls the name of the wrapper Java class generated for the .proto file. - // That class will always contain the .proto file's getDescriptor() method as - // well as any top-level extensions defined in the .proto file. - // If java_multiple_files is disabled, then all the other classes from the - // .proto file will be nested inside the single wrapper outer class. - JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` - // If enabled, then the Java code generator will generate a separate .java - // file for each top-level message, enum, and service defined in the .proto - // file. Thus, these types will *not* be nested inside the wrapper class - // named by java_outer_classname. However, the wrapper class will still be - // generated to contain the file's getDescriptor() method as well as any - // top-level extensions defined in the file. - JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` - // This option does nothing. - // - // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. - JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` - // A proto2 file can set this to true to opt in to UTF-8 checking for Java, - // which will throw an exception if invalid UTF-8 is parsed from the wire or - // assigned to a string field. - // - // TODO: clarify exactly what kinds of field types this option - // applies to, and update these docs accordingly. - // - // Proto3 files already perform these checks. Setting the option explicitly to - // false has no effect: it cannot be used to opt proto3 files out of UTF-8 - // checks. - JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` - OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` - // Sets the Go package where structs generated from this .proto will be - // placed. If omitted, the Go package will be derived from the following: - // - The basename of the package import path, if provided. - // - Otherwise, the package statement in the .proto file, if present. - // - Otherwise, the basename of the .proto file, without extension. - GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` - // Should generic services be generated in each language? "Generic" services - // are not specific to any particular RPC system. They are generated by the - // main code generators in each language (without additional plugins). - // Generic services were the only kind of service generation supported by - // early versions of google.protobuf. - // - // Generic services are now considered deprecated in favor of using plugins - // that generate code specific to your particular RPC system. Therefore, - // these default to false. Old code which depends on generic services should - // explicitly set them to true. - CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` - JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` - PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` - // Is this file deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for everything in the file, or it will be completely ignored; in the very - // least, this is a formalization for deprecating files. - Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // Enables the use of arenas for the proto messages in this file. This applies - // only to generated classes for C++. - CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=1" json:"cc_enable_arenas,omitempty"` - // Sets the objective c class prefix which is prepended to all objective c - // generated classes from this .proto. There is no default. - ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` - // Namespace for generated classes; defaults to the package. - CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` - // By default Swift generators will take the proto package and CamelCase it - // replacing '.' with underscore and use that to prefix the types/symbols - // defined. When this options is provided, they will use this value instead - // to prefix the types/symbols defined. - SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` - // Sets the php class prefix which is prepended to all php generated classes - // from this .proto. Default is empty. - PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` - // Use this option to change the namespace of php generated classes. Default - // is empty. When this option is empty, the package name will be used for - // determining the namespace. - PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` - // Use this option to change the namespace of php generated metadata classes. - // Default is empty. When this option is empty, the proto file name will be - // used for determining the namespace. - PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"` - // Use this option to change the package of ruby generated classes. Default - // is empty. When this option is not set, the package name will be used for - // determining the ruby package. - RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` - // Any features defined in the specific edition. - Features *FeatureSet `protobuf:"bytes,50,opt,name=features" json:"features,omitempty"` - // The parser stores options it doesn't recognize here. - // See the documentation for the "Options" section above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - extensionFields protoimpl.ExtensionFields - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -// Default values for FileOptions fields. -const ( - Default_FileOptions_JavaMultipleFiles = bool(false) - Default_FileOptions_JavaStringCheckUtf8 = bool(false) - Default_FileOptions_OptimizeFor = FileOptions_SPEED - Default_FileOptions_CcGenericServices = bool(false) - Default_FileOptions_JavaGenericServices = bool(false) - Default_FileOptions_PyGenericServices = bool(false) - Default_FileOptions_Deprecated = bool(false) - Default_FileOptions_CcEnableArenas = bool(true) -) - -func (x *FileOptions) Reset() { - *x = FileOptions{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *FileOptions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FileOptions) ProtoMessage() {} - -func (x *FileOptions) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[10] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FileOptions.ProtoReflect.Descriptor instead. -func (*FileOptions) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{10} -} - -func (x *FileOptions) GetJavaPackage() string { - if x != nil && x.JavaPackage != nil { - return *x.JavaPackage - } - return "" -} - -func (x *FileOptions) GetJavaOuterClassname() string { - if x != nil && x.JavaOuterClassname != nil { - return *x.JavaOuterClassname - } - return "" -} - -func (x *FileOptions) GetJavaMultipleFiles() bool { - if x != nil && x.JavaMultipleFiles != nil { - return *x.JavaMultipleFiles - } - return Default_FileOptions_JavaMultipleFiles -} - -// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. -func (x *FileOptions) GetJavaGenerateEqualsAndHash() bool { - if x != nil && x.JavaGenerateEqualsAndHash != nil { - return *x.JavaGenerateEqualsAndHash - } - return false -} - -func (x *FileOptions) GetJavaStringCheckUtf8() bool { - if x != nil && x.JavaStringCheckUtf8 != nil { - return *x.JavaStringCheckUtf8 - } - return Default_FileOptions_JavaStringCheckUtf8 -} - -func (x *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { - if x != nil && x.OptimizeFor != nil { - return *x.OptimizeFor - } - return Default_FileOptions_OptimizeFor -} - -func (x *FileOptions) GetGoPackage() string { - if x != nil && x.GoPackage != nil { - return *x.GoPackage - } - return "" -} - -func (x *FileOptions) GetCcGenericServices() bool { - if x != nil && x.CcGenericServices != nil { - return *x.CcGenericServices - } - return Default_FileOptions_CcGenericServices -} - -func (x *FileOptions) GetJavaGenericServices() bool { - if x != nil && x.JavaGenericServices != nil { - return *x.JavaGenericServices - } - return Default_FileOptions_JavaGenericServices -} - -func (x *FileOptions) GetPyGenericServices() bool { - if x != nil && x.PyGenericServices != nil { - return *x.PyGenericServices - } - return Default_FileOptions_PyGenericServices -} - -func (x *FileOptions) GetDeprecated() bool { - if x != nil && x.Deprecated != nil { - return *x.Deprecated - } - return Default_FileOptions_Deprecated -} - -func (x *FileOptions) GetCcEnableArenas() bool { - if x != nil && x.CcEnableArenas != nil { - return *x.CcEnableArenas - } - return Default_FileOptions_CcEnableArenas -} - -func (x *FileOptions) GetObjcClassPrefix() string { - if x != nil && x.ObjcClassPrefix != nil { - return *x.ObjcClassPrefix - } - return "" -} - -func (x *FileOptions) GetCsharpNamespace() string { - if x != nil && x.CsharpNamespace != nil { - return *x.CsharpNamespace - } - return "" -} - -func (x *FileOptions) GetSwiftPrefix() string { - if x != nil && x.SwiftPrefix != nil { - return *x.SwiftPrefix - } - return "" -} - -func (x *FileOptions) GetPhpClassPrefix() string { - if x != nil && x.PhpClassPrefix != nil { - return *x.PhpClassPrefix - } - return "" -} - -func (x *FileOptions) GetPhpNamespace() string { - if x != nil && x.PhpNamespace != nil { - return *x.PhpNamespace - } - return "" -} - -func (x *FileOptions) GetPhpMetadataNamespace() string { - if x != nil && x.PhpMetadataNamespace != nil { - return *x.PhpMetadataNamespace - } - return "" -} - -func (x *FileOptions) GetRubyPackage() string { - if x != nil && x.RubyPackage != nil { - return *x.RubyPackage - } - return "" -} - -func (x *FileOptions) GetFeatures() *FeatureSet { - if x != nil { - return x.Features - } - return nil -} - -func (x *FileOptions) GetUninterpretedOption() []*UninterpretedOption { - if x != nil { - return x.UninterpretedOption - } - return nil -} - -type MessageOptions struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Set true to use the old proto1 MessageSet wire format for extensions. - // This is provided for backwards-compatibility with the MessageSet wire - // format. You should not use this for any other reason: It's less - // efficient, has fewer features, and is more complicated. - // - // The message must be defined exactly as follows: - // - // message Foo { - // option message_set_wire_format = true; - // extensions 4 to max; - // } - // - // Note that the message cannot have any defined fields; MessageSets only - // have extensions. - // - // All extensions of your type must be singular messages; e.g. they cannot - // be int32s, enums, or repeated messages. - // - // Because this is an option, the above two restrictions are not enforced by - // the protocol compiler. - MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` - // Disables the generation of the standard "descriptor()" accessor, which can - // conflict with a field of the same name. This is meant to make migration - // from proto1 easier; new code should avoid fields named "descriptor". - NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` - // Is this message deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the message, or it will be completely ignored; in the very least, - // this is a formalization for deprecating messages. - Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // Whether the message is an automatically generated map entry type for the - // maps field. - // - // For maps fields: - // - // map map_field = 1; - // - // The parsed descriptor looks like: - // - // message MapFieldEntry { - // option map_entry = true; - // optional KeyType key = 1; - // optional ValueType value = 2; - // } - // repeated MapFieldEntry map_field = 1; - // - // Implementations may choose not to generate the map_entry=true message, but - // use a native map in the target language to hold the keys and values. - // The reflection APIs in such implementations still need to work as - // if the field is a repeated message field. - // - // NOTE: Do not set the option in .proto files. Always use the maps syntax - // instead. The option should only be implicitly set by the proto compiler - // parser. - MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` - // Enable the legacy handling of JSON field name conflicts. This lowercases - // and strips underscored from the fields before comparison in proto3 only. - // The new behavior takes `json_name` into account and applies to proto2 as - // well. - // - // This should only be used as a temporary measure against broken builds due - // to the change in behavior for JSON field name conflicts. - // - // TODO This is legacy behavior we plan to remove once downstream - // teams have had time to migrate. - // - // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. - DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,11,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` - // Any features defined in the specific edition. - Features *FeatureSet `protobuf:"bytes,12,opt,name=features" json:"features,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - extensionFields protoimpl.ExtensionFields - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -// Default values for MessageOptions fields. -const ( - Default_MessageOptions_MessageSetWireFormat = bool(false) - Default_MessageOptions_NoStandardDescriptorAccessor = bool(false) - Default_MessageOptions_Deprecated = bool(false) -) - -func (x *MessageOptions) Reset() { - *x = MessageOptions{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *MessageOptions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MessageOptions) ProtoMessage() {} - -func (x *MessageOptions) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[11] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MessageOptions.ProtoReflect.Descriptor instead. -func (*MessageOptions) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{11} -} - -func (x *MessageOptions) GetMessageSetWireFormat() bool { - if x != nil && x.MessageSetWireFormat != nil { - return *x.MessageSetWireFormat - } - return Default_MessageOptions_MessageSetWireFormat -} - -func (x *MessageOptions) GetNoStandardDescriptorAccessor() bool { - if x != nil && x.NoStandardDescriptorAccessor != nil { - return *x.NoStandardDescriptorAccessor - } - return Default_MessageOptions_NoStandardDescriptorAccessor -} - -func (x *MessageOptions) GetDeprecated() bool { - if x != nil && x.Deprecated != nil { - return *x.Deprecated - } - return Default_MessageOptions_Deprecated -} - -func (x *MessageOptions) GetMapEntry() bool { - if x != nil && x.MapEntry != nil { - return *x.MapEntry - } - return false -} - -// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. -func (x *MessageOptions) GetDeprecatedLegacyJsonFieldConflicts() bool { - if x != nil && x.DeprecatedLegacyJsonFieldConflicts != nil { - return *x.DeprecatedLegacyJsonFieldConflicts - } - return false -} - -func (x *MessageOptions) GetFeatures() *FeatureSet { - if x != nil { - return x.Features - } - return nil -} - -func (x *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { - if x != nil { - return x.UninterpretedOption - } - return nil -} - -type FieldOptions struct { - state protoimpl.MessageState `protogen:"open.v1"` - // NOTE: ctype is deprecated. Use `features.(pb.cpp).string_type` instead. - // The ctype option instructs the C++ code generator to use a different - // representation of the field than it normally would. See the specific - // options below. This option is only implemented to support use of - // [ctype=CORD] and [ctype=STRING] (the default) on non-repeated fields of - // type "bytes" in the open source release. - // TODO: make ctype actually deprecated. - Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` - // The packed option can be enabled for repeated primitive fields to enable - // a more efficient representation on the wire. Rather than repeatedly - // writing the tag and type for each element, the entire array is encoded as - // a single length-delimited blob. In proto3, only explicit setting it to - // false will avoid using packed encoding. This option is prohibited in - // Editions, but the `repeated_field_encoding` feature can be used to control - // the behavior. - Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` - // The jstype option determines the JavaScript type used for values of the - // field. The option is permitted only for 64 bit integral and fixed types - // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING - // is represented as JavaScript string, which avoids loss of precision that - // can happen when a large value is converted to a floating point JavaScript. - // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to - // use the JavaScript "number" type. The behavior of the default option - // JS_NORMAL is implementation dependent. - // - // This option is an enum to permit additional types to be added, e.g. - // goog.math.Integer. - Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` - // Should this field be parsed lazily? Lazy applies only to message-type - // fields. It means that when the outer message is initially parsed, the - // inner message's contents will not be parsed but instead stored in encoded - // form. The inner message will actually be parsed when it is first accessed. - // - // This is only a hint. Implementations are free to choose whether to use - // eager or lazy parsing regardless of the value of this option. However, - // setting this option true suggests that the protocol author believes that - // using lazy parsing on this field is worth the additional bookkeeping - // overhead typically needed to implement it. - // - // This option does not affect the public interface of any generated code; - // all method signatures remain the same. Furthermore, thread-safety of the - // interface is not affected by this option; const methods remain safe to - // call from multiple threads concurrently, while non-const methods continue - // to require exclusive access. - // - // Note that lazy message fields are still eagerly verified to check - // ill-formed wireformat or missing required fields. Calling IsInitialized() - // on the outer message would fail if the inner message has missing required - // fields. Failed verification would result in parsing failure (except when - // uninitialized messages are acceptable). - Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` - // unverified_lazy does no correctness checks on the byte stream. This should - // only be used where lazy with verification is prohibitive for performance - // reasons. - UnverifiedLazy *bool `protobuf:"varint,15,opt,name=unverified_lazy,json=unverifiedLazy,def=0" json:"unverified_lazy,omitempty"` - // Is this field deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for accessors, or it will be completely ignored; in the very least, this - // is a formalization for deprecating fields. - Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // For Google-internal migration only. Do not use. - Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` - // Indicate that the field value should not be printed out when using debug - // formats, e.g. when the field contains sensitive credentials. - DebugRedact *bool `protobuf:"varint,16,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` - Retention *FieldOptions_OptionRetention `protobuf:"varint,17,opt,name=retention,enum=google.protobuf.FieldOptions_OptionRetention" json:"retention,omitempty"` - Targets []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"` - EditionDefaults []*FieldOptions_EditionDefault `protobuf:"bytes,20,rep,name=edition_defaults,json=editionDefaults" json:"edition_defaults,omitempty"` - // Any features defined in the specific edition. - Features *FeatureSet `protobuf:"bytes,21,opt,name=features" json:"features,omitempty"` - FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,22,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - extensionFields protoimpl.ExtensionFields - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -// Default values for FieldOptions fields. -const ( - Default_FieldOptions_Ctype = FieldOptions_STRING - Default_FieldOptions_Jstype = FieldOptions_JS_NORMAL - Default_FieldOptions_Lazy = bool(false) - Default_FieldOptions_UnverifiedLazy = bool(false) - Default_FieldOptions_Deprecated = bool(false) - Default_FieldOptions_Weak = bool(false) - Default_FieldOptions_DebugRedact = bool(false) -) - -func (x *FieldOptions) Reset() { - *x = FieldOptions{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *FieldOptions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FieldOptions) ProtoMessage() {} - -func (x *FieldOptions) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[12] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FieldOptions.ProtoReflect.Descriptor instead. -func (*FieldOptions) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12} -} - -func (x *FieldOptions) GetCtype() FieldOptions_CType { - if x != nil && x.Ctype != nil { - return *x.Ctype - } - return Default_FieldOptions_Ctype -} - -func (x *FieldOptions) GetPacked() bool { - if x != nil && x.Packed != nil { - return *x.Packed - } - return false -} - -func (x *FieldOptions) GetJstype() FieldOptions_JSType { - if x != nil && x.Jstype != nil { - return *x.Jstype - } - return Default_FieldOptions_Jstype -} - -func (x *FieldOptions) GetLazy() bool { - if x != nil && x.Lazy != nil { - return *x.Lazy - } - return Default_FieldOptions_Lazy -} - -func (x *FieldOptions) GetUnverifiedLazy() bool { - if x != nil && x.UnverifiedLazy != nil { - return *x.UnverifiedLazy - } - return Default_FieldOptions_UnverifiedLazy -} - -func (x *FieldOptions) GetDeprecated() bool { - if x != nil && x.Deprecated != nil { - return *x.Deprecated - } - return Default_FieldOptions_Deprecated -} - -func (x *FieldOptions) GetWeak() bool { - if x != nil && x.Weak != nil { - return *x.Weak - } - return Default_FieldOptions_Weak -} - -func (x *FieldOptions) GetDebugRedact() bool { - if x != nil && x.DebugRedact != nil { - return *x.DebugRedact - } - return Default_FieldOptions_DebugRedact -} - -func (x *FieldOptions) GetRetention() FieldOptions_OptionRetention { - if x != nil && x.Retention != nil { - return *x.Retention - } - return FieldOptions_RETENTION_UNKNOWN -} - -func (x *FieldOptions) GetTargets() []FieldOptions_OptionTargetType { - if x != nil { - return x.Targets - } - return nil -} - -func (x *FieldOptions) GetEditionDefaults() []*FieldOptions_EditionDefault { - if x != nil { - return x.EditionDefaults - } - return nil -} - -func (x *FieldOptions) GetFeatures() *FeatureSet { - if x != nil { - return x.Features - } - return nil -} - -func (x *FieldOptions) GetFeatureSupport() *FieldOptions_FeatureSupport { - if x != nil { - return x.FeatureSupport - } - return nil -} - -func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { - if x != nil { - return x.UninterpretedOption - } - return nil -} - -type OneofOptions struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Any features defined in the specific edition. - Features *FeatureSet `protobuf:"bytes,1,opt,name=features" json:"features,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - extensionFields protoimpl.ExtensionFields - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *OneofOptions) Reset() { - *x = OneofOptions{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *OneofOptions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*OneofOptions) ProtoMessage() {} - -func (x *OneofOptions) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[13] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use OneofOptions.ProtoReflect.Descriptor instead. -func (*OneofOptions) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{13} -} - -func (x *OneofOptions) GetFeatures() *FeatureSet { - if x != nil { - return x.Features - } - return nil -} - -func (x *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { - if x != nil { - return x.UninterpretedOption - } - return nil -} - -type EnumOptions struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Set this option to true to allow mapping different tag names to the same - // value. - AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` - // Is this enum deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the enum, or it will be completely ignored; in the very least, this - // is a formalization for deprecating enums. - Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // Enable the legacy handling of JSON field name conflicts. This lowercases - // and strips underscored from the fields before comparison in proto3 only. - // The new behavior takes `json_name` into account and applies to proto2 as - // well. - // TODO Remove this legacy behavior once downstream teams have - // had time to migrate. - // - // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. - DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,6,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` - // Any features defined in the specific edition. - Features *FeatureSet `protobuf:"bytes,7,opt,name=features" json:"features,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - extensionFields protoimpl.ExtensionFields - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -// Default values for EnumOptions fields. -const ( - Default_EnumOptions_Deprecated = bool(false) -) - -func (x *EnumOptions) Reset() { - *x = EnumOptions{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *EnumOptions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EnumOptions) ProtoMessage() {} - -func (x *EnumOptions) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[14] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EnumOptions.ProtoReflect.Descriptor instead. -func (*EnumOptions) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{14} -} - -func (x *EnumOptions) GetAllowAlias() bool { - if x != nil && x.AllowAlias != nil { - return *x.AllowAlias - } - return false -} - -func (x *EnumOptions) GetDeprecated() bool { - if x != nil && x.Deprecated != nil { - return *x.Deprecated - } - return Default_EnumOptions_Deprecated -} - -// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. -func (x *EnumOptions) GetDeprecatedLegacyJsonFieldConflicts() bool { - if x != nil && x.DeprecatedLegacyJsonFieldConflicts != nil { - return *x.DeprecatedLegacyJsonFieldConflicts - } - return false -} - -func (x *EnumOptions) GetFeatures() *FeatureSet { - if x != nil { - return x.Features - } - return nil -} - -func (x *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { - if x != nil { - return x.UninterpretedOption - } - return nil -} - -type EnumValueOptions struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Is this enum value deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the enum value, or it will be completely ignored; in the very least, - // this is a formalization for deprecating enum values. - Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // Any features defined in the specific edition. - Features *FeatureSet `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"` - // Indicate that fields annotated with this enum value should not be printed - // out when using debug formats, e.g. when the field contains sensitive - // credentials. - DebugRedact *bool `protobuf:"varint,3,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` - // Information about the support window of a feature value. - FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,4,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - extensionFields protoimpl.ExtensionFields - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -// Default values for EnumValueOptions fields. -const ( - Default_EnumValueOptions_Deprecated = bool(false) - Default_EnumValueOptions_DebugRedact = bool(false) -) - -func (x *EnumValueOptions) Reset() { - *x = EnumValueOptions{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *EnumValueOptions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EnumValueOptions) ProtoMessage() {} - -func (x *EnumValueOptions) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[15] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EnumValueOptions.ProtoReflect.Descriptor instead. -func (*EnumValueOptions) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{15} -} - -func (x *EnumValueOptions) GetDeprecated() bool { - if x != nil && x.Deprecated != nil { - return *x.Deprecated - } - return Default_EnumValueOptions_Deprecated -} - -func (x *EnumValueOptions) GetFeatures() *FeatureSet { - if x != nil { - return x.Features - } - return nil -} - -func (x *EnumValueOptions) GetDebugRedact() bool { - if x != nil && x.DebugRedact != nil { - return *x.DebugRedact - } - return Default_EnumValueOptions_DebugRedact -} - -func (x *EnumValueOptions) GetFeatureSupport() *FieldOptions_FeatureSupport { - if x != nil { - return x.FeatureSupport - } - return nil -} - -func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { - if x != nil { - return x.UninterpretedOption - } - return nil -} - -type ServiceOptions struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Any features defined in the specific edition. - Features *FeatureSet `protobuf:"bytes,34,opt,name=features" json:"features,omitempty"` - // Is this service deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the service, or it will be completely ignored; in the very least, - // this is a formalization for deprecating services. - Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - extensionFields protoimpl.ExtensionFields - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -// Default values for ServiceOptions fields. -const ( - Default_ServiceOptions_Deprecated = bool(false) -) - -func (x *ServiceOptions) Reset() { - *x = ServiceOptions{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ServiceOptions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ServiceOptions) ProtoMessage() {} - -func (x *ServiceOptions) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[16] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ServiceOptions.ProtoReflect.Descriptor instead. -func (*ServiceOptions) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{16} -} - -func (x *ServiceOptions) GetFeatures() *FeatureSet { - if x != nil { - return x.Features - } - return nil -} - -func (x *ServiceOptions) GetDeprecated() bool { - if x != nil && x.Deprecated != nil { - return *x.Deprecated - } - return Default_ServiceOptions_Deprecated -} - -func (x *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { - if x != nil { - return x.UninterpretedOption - } - return nil -} - -type MethodOptions struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Is this method deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the method, or it will be completely ignored; in the very least, - // this is a formalization for deprecating methods. - Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` - // Any features defined in the specific edition. - Features *FeatureSet `protobuf:"bytes,35,opt,name=features" json:"features,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - extensionFields protoimpl.ExtensionFields - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -// Default values for MethodOptions fields. -const ( - Default_MethodOptions_Deprecated = bool(false) - Default_MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN -) - -func (x *MethodOptions) Reset() { - *x = MethodOptions{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *MethodOptions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*MethodOptions) ProtoMessage() {} - -func (x *MethodOptions) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[17] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use MethodOptions.ProtoReflect.Descriptor instead. -func (*MethodOptions) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{17} -} - -func (x *MethodOptions) GetDeprecated() bool { - if x != nil && x.Deprecated != nil { - return *x.Deprecated - } - return Default_MethodOptions_Deprecated -} - -func (x *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { - if x != nil && x.IdempotencyLevel != nil { - return *x.IdempotencyLevel - } - return Default_MethodOptions_IdempotencyLevel -} - -func (x *MethodOptions) GetFeatures() *FeatureSet { - if x != nil { - return x.Features - } - return nil -} - -func (x *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { - if x != nil { - return x.UninterpretedOption - } - return nil -} - -// A message representing a option the parser does not recognize. This only -// appears in options protos created by the compiler::Parser class. -// DescriptorPool resolves these when building Descriptor objects. Therefore, -// options protos in descriptor objects (e.g. returned by Descriptor::options(), -// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions -// in them. -type UninterpretedOption struct { - state protoimpl.MessageState `protogen:"open.v1"` - Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` - // The value of the uninterpreted option, in whatever type the tokenizer - // identified it as during parsing. Exactly one of these should be set. - IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` - PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` - NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` - DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` - StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` - AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *UninterpretedOption) Reset() { - *x = UninterpretedOption{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *UninterpretedOption) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UninterpretedOption) ProtoMessage() {} - -func (x *UninterpretedOption) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[18] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UninterpretedOption.ProtoReflect.Descriptor instead. -func (*UninterpretedOption) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{18} -} - -func (x *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { - if x != nil { - return x.Name - } - return nil -} - -func (x *UninterpretedOption) GetIdentifierValue() string { - if x != nil && x.IdentifierValue != nil { - return *x.IdentifierValue - } - return "" -} - -func (x *UninterpretedOption) GetPositiveIntValue() uint64 { - if x != nil && x.PositiveIntValue != nil { - return *x.PositiveIntValue - } - return 0 -} - -func (x *UninterpretedOption) GetNegativeIntValue() int64 { - if x != nil && x.NegativeIntValue != nil { - return *x.NegativeIntValue - } - return 0 -} - -func (x *UninterpretedOption) GetDoubleValue() float64 { - if x != nil && x.DoubleValue != nil { - return *x.DoubleValue - } - return 0 -} - -func (x *UninterpretedOption) GetStringValue() []byte { - if x != nil { - return x.StringValue - } - return nil -} - -func (x *UninterpretedOption) GetAggregateValue() string { - if x != nil && x.AggregateValue != nil { - return *x.AggregateValue - } - return "" -} - -// TODO Enums in C++ gencode (and potentially other languages) are -// not well scoped. This means that each of the feature enums below can clash -// with each other. The short names we've chosen maximize call-site -// readability, but leave us very open to this scenario. A future feature will -// be designed and implemented to handle this, hopefully before we ever hit a -// conflict here. -type FeatureSet struct { - state protoimpl.MessageState `protogen:"open.v1"` - FieldPresence *FeatureSet_FieldPresence `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"` - EnumType *FeatureSet_EnumType `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"` - RepeatedFieldEncoding *FeatureSet_RepeatedFieldEncoding `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"` - Utf8Validation *FeatureSet_Utf8Validation `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"` - MessageEncoding *FeatureSet_MessageEncoding `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"` - JsonFormat *FeatureSet_JsonFormat `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"` - extensionFields protoimpl.ExtensionFields - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *FeatureSet) Reset() { - *x = FeatureSet{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *FeatureSet) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FeatureSet) ProtoMessage() {} - -func (x *FeatureSet) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[19] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FeatureSet.ProtoReflect.Descriptor instead. -func (*FeatureSet) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19} -} - -func (x *FeatureSet) GetFieldPresence() FeatureSet_FieldPresence { - if x != nil && x.FieldPresence != nil { - return *x.FieldPresence - } - return FeatureSet_FIELD_PRESENCE_UNKNOWN -} - -func (x *FeatureSet) GetEnumType() FeatureSet_EnumType { - if x != nil && x.EnumType != nil { - return *x.EnumType - } - return FeatureSet_ENUM_TYPE_UNKNOWN -} - -func (x *FeatureSet) GetRepeatedFieldEncoding() FeatureSet_RepeatedFieldEncoding { - if x != nil && x.RepeatedFieldEncoding != nil { - return *x.RepeatedFieldEncoding - } - return FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN -} - -func (x *FeatureSet) GetUtf8Validation() FeatureSet_Utf8Validation { - if x != nil && x.Utf8Validation != nil { - return *x.Utf8Validation - } - return FeatureSet_UTF8_VALIDATION_UNKNOWN -} - -func (x *FeatureSet) GetMessageEncoding() FeatureSet_MessageEncoding { - if x != nil && x.MessageEncoding != nil { - return *x.MessageEncoding - } - return FeatureSet_MESSAGE_ENCODING_UNKNOWN -} - -func (x *FeatureSet) GetJsonFormat() FeatureSet_JsonFormat { - if x != nil && x.JsonFormat != nil { - return *x.JsonFormat - } - return FeatureSet_JSON_FORMAT_UNKNOWN -} - -// A compiled specification for the defaults of a set of features. These -// messages are generated from FeatureSet extensions and can be used to seed -// feature resolution. The resolution with this object becomes a simple search -// for the closest matching edition, followed by proto merges. -type FeatureSetDefaults struct { - state protoimpl.MessageState `protogen:"open.v1"` - Defaults []*FeatureSetDefaults_FeatureSetEditionDefault `protobuf:"bytes,1,rep,name=defaults" json:"defaults,omitempty"` - // The minimum supported edition (inclusive) when this was constructed. - // Editions before this will not have defaults. - MinimumEdition *Edition `protobuf:"varint,4,opt,name=minimum_edition,json=minimumEdition,enum=google.protobuf.Edition" json:"minimum_edition,omitempty"` - // The maximum known edition (inclusive) when this was constructed. Editions - // after this will not have reliable defaults. - MaximumEdition *Edition `protobuf:"varint,5,opt,name=maximum_edition,json=maximumEdition,enum=google.protobuf.Edition" json:"maximum_edition,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *FeatureSetDefaults) Reset() { - *x = FeatureSetDefaults{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *FeatureSetDefaults) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FeatureSetDefaults) ProtoMessage() {} - -func (x *FeatureSetDefaults) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[20] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FeatureSetDefaults.ProtoReflect.Descriptor instead. -func (*FeatureSetDefaults) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20} -} - -func (x *FeatureSetDefaults) GetDefaults() []*FeatureSetDefaults_FeatureSetEditionDefault { - if x != nil { - return x.Defaults - } - return nil -} - -func (x *FeatureSetDefaults) GetMinimumEdition() Edition { - if x != nil && x.MinimumEdition != nil { - return *x.MinimumEdition - } - return Edition_EDITION_UNKNOWN -} - -func (x *FeatureSetDefaults) GetMaximumEdition() Edition { - if x != nil && x.MaximumEdition != nil { - return *x.MaximumEdition - } - return Edition_EDITION_UNKNOWN -} - -// Encapsulates information about the original source file from which a -// FileDescriptorProto was generated. -type SourceCodeInfo struct { - state protoimpl.MessageState `protogen:"open.v1"` - // A Location identifies a piece of source code in a .proto file which - // corresponds to a particular definition. This information is intended - // to be useful to IDEs, code indexers, documentation generators, and similar - // tools. - // - // For example, say we have a file like: - // - // message Foo { - // optional string foo = 1; - // } - // - // Let's look at just the field definition: - // - // optional string foo = 1; - // ^ ^^ ^^ ^ ^^^ - // a bc de f ghi - // - // We have the following locations: - // - // span path represents - // [a,i) [ 4, 0, 2, 0 ] The whole field definition. - // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). - // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). - // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). - // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). - // - // Notes: - // - A location may refer to a repeated field itself (i.e. not to any - // particular index within it). This is used whenever a set of elements are - // logically enclosed in a single code segment. For example, an entire - // extend block (possibly containing multiple extension definitions) will - // have an outer location whose path refers to the "extensions" repeated - // field without an index. - // - Multiple locations may have the same path. This happens when a single - // logical declaration is spread out across multiple places. The most - // obvious example is the "extend" block again -- there may be multiple - // extend blocks in the same scope, each of which will have the same path. - // - A location's span is not always a subset of its parent's span. For - // example, the "extendee" of an extension declaration appears at the - // beginning of the "extend" block and is shared by all extensions within - // the block. - // - Just because a location's span is a subset of some other location's span - // does not mean that it is a descendant. For example, a "group" defines - // both a type and a field in a single declaration. Thus, the locations - // corresponding to the type and field and their components will overlap. - // - Code which tries to interpret locations should probably be designed to - // ignore those that it doesn't understand, as more types of locations could - // be recorded in the future. - Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` - extensionFields protoimpl.ExtensionFields - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SourceCodeInfo) Reset() { - *x = SourceCodeInfo{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SourceCodeInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SourceCodeInfo) ProtoMessage() {} - -func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[21] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SourceCodeInfo.ProtoReflect.Descriptor instead. -func (*SourceCodeInfo) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{21} -} - -func (x *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { - if x != nil { - return x.Location - } - return nil -} - -// Describes the relationship between generated code and its original source -// file. A GeneratedCodeInfo message is associated with only one generated -// source file, but may contain references to different source .proto files. -type GeneratedCodeInfo struct { - state protoimpl.MessageState `protogen:"open.v1"` - // An Annotation connects some span of text in generated code to an element - // of its generating .proto file. - Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *GeneratedCodeInfo) Reset() { - *x = GeneratedCodeInfo{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *GeneratedCodeInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GeneratedCodeInfo) ProtoMessage() {} - -func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[22] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GeneratedCodeInfo.ProtoReflect.Descriptor instead. -func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{22} -} - -func (x *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { - if x != nil { - return x.Annotation - } - return nil -} - -type DescriptorProto_ExtensionRange struct { - state protoimpl.MessageState `protogen:"open.v1"` - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive. - Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *DescriptorProto_ExtensionRange) Reset() { - *x = DescriptorProto_ExtensionRange{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *DescriptorProto_ExtensionRange) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DescriptorProto_ExtensionRange) ProtoMessage() {} - -func (x *DescriptorProto_ExtensionRange) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[23] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DescriptorProto_ExtensionRange.ProtoReflect.Descriptor instead. -func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{2, 0} -} - -func (x *DescriptorProto_ExtensionRange) GetStart() int32 { - if x != nil && x.Start != nil { - return *x.Start - } - return 0 -} - -func (x *DescriptorProto_ExtensionRange) GetEnd() int32 { - if x != nil && x.End != nil { - return *x.End - } - return 0 -} - -func (x *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { - if x != nil { - return x.Options - } - return nil -} - -// Range of reserved tag numbers. Reserved tag numbers may not be used by -// fields or extension ranges in the same message. Reserved ranges may -// not overlap. -type DescriptorProto_ReservedRange struct { - state protoimpl.MessageState `protogen:"open.v1"` - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive. - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *DescriptorProto_ReservedRange) Reset() { - *x = DescriptorProto_ReservedRange{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *DescriptorProto_ReservedRange) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DescriptorProto_ReservedRange) ProtoMessage() {} - -func (x *DescriptorProto_ReservedRange) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[24] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DescriptorProto_ReservedRange.ProtoReflect.Descriptor instead. -func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{2, 1} -} - -func (x *DescriptorProto_ReservedRange) GetStart() int32 { - if x != nil && x.Start != nil { - return *x.Start - } - return 0 -} - -func (x *DescriptorProto_ReservedRange) GetEnd() int32 { - if x != nil && x.End != nil { - return *x.End - } - return 0 -} - -type ExtensionRangeOptions_Declaration struct { - state protoimpl.MessageState `protogen:"open.v1"` - // The extension number declared within the extension range. - Number *int32 `protobuf:"varint,1,opt,name=number" json:"number,omitempty"` - // The fully-qualified name of the extension field. There must be a leading - // dot in front of the full name. - FullName *string `protobuf:"bytes,2,opt,name=full_name,json=fullName" json:"full_name,omitempty"` - // The fully-qualified type name of the extension field. Unlike - // Metadata.type, Declaration.type must have a leading dot for messages - // and enums. - Type *string `protobuf:"bytes,3,opt,name=type" json:"type,omitempty"` - // If true, indicates that the number is reserved in the extension range, - // and any extension field with the number will fail to compile. Set this - // when a declared extension field is deleted. - Reserved *bool `protobuf:"varint,5,opt,name=reserved" json:"reserved,omitempty"` - // If true, indicates that the extension must be defined as repeated. - // Otherwise the extension must be defined as optional. - Repeated *bool `protobuf:"varint,6,opt,name=repeated" json:"repeated,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ExtensionRangeOptions_Declaration) Reset() { - *x = ExtensionRangeOptions_Declaration{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ExtensionRangeOptions_Declaration) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ExtensionRangeOptions_Declaration) ProtoMessage() {} - -func (x *ExtensionRangeOptions_Declaration) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[25] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ExtensionRangeOptions_Declaration.ProtoReflect.Descriptor instead. -func (*ExtensionRangeOptions_Declaration) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{3, 0} -} - -func (x *ExtensionRangeOptions_Declaration) GetNumber() int32 { - if x != nil && x.Number != nil { - return *x.Number - } - return 0 -} - -func (x *ExtensionRangeOptions_Declaration) GetFullName() string { - if x != nil && x.FullName != nil { - return *x.FullName - } - return "" -} - -func (x *ExtensionRangeOptions_Declaration) GetType() string { - if x != nil && x.Type != nil { - return *x.Type - } - return "" -} - -func (x *ExtensionRangeOptions_Declaration) GetReserved() bool { - if x != nil && x.Reserved != nil { - return *x.Reserved - } - return false -} - -func (x *ExtensionRangeOptions_Declaration) GetRepeated() bool { - if x != nil && x.Repeated != nil { - return *x.Repeated - } - return false -} - -// Range of reserved numeric values. Reserved values may not be used by -// entries in the same enum. Reserved ranges may not overlap. -// -// Note that this is distinct from DescriptorProto.ReservedRange in that it -// is inclusive such that it can appropriately represent the entire int32 -// domain. -type EnumDescriptorProto_EnumReservedRange struct { - state protoimpl.MessageState `protogen:"open.v1"` - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Inclusive. - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *EnumDescriptorProto_EnumReservedRange) Reset() { - *x = EnumDescriptorProto_EnumReservedRange{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *EnumDescriptorProto_EnumReservedRange) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} - -func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[26] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use EnumDescriptorProto_EnumReservedRange.ProtoReflect.Descriptor instead. -func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{6, 0} -} - -func (x *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { - if x != nil && x.Start != nil { - return *x.Start - } - return 0 -} - -func (x *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { - if x != nil && x.End != nil { - return *x.End - } - return 0 -} - -type FieldOptions_EditionDefault struct { - state protoimpl.MessageState `protogen:"open.v1"` - Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` - Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` // Textproto value. - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *FieldOptions_EditionDefault) Reset() { - *x = FieldOptions_EditionDefault{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *FieldOptions_EditionDefault) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FieldOptions_EditionDefault) ProtoMessage() {} - -func (x *FieldOptions_EditionDefault) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[27] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FieldOptions_EditionDefault.ProtoReflect.Descriptor instead. -func (*FieldOptions_EditionDefault) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 0} -} - -func (x *FieldOptions_EditionDefault) GetEdition() Edition { - if x != nil && x.Edition != nil { - return *x.Edition - } - return Edition_EDITION_UNKNOWN -} - -func (x *FieldOptions_EditionDefault) GetValue() string { - if x != nil && x.Value != nil { - return *x.Value - } - return "" -} - -// Information about the support window of a feature. -type FieldOptions_FeatureSupport struct { - state protoimpl.MessageState `protogen:"open.v1"` - // The edition that this feature was first available in. In editions - // earlier than this one, the default assigned to EDITION_LEGACY will be - // used, and proto files will not be able to override it. - EditionIntroduced *Edition `protobuf:"varint,1,opt,name=edition_introduced,json=editionIntroduced,enum=google.protobuf.Edition" json:"edition_introduced,omitempty"` - // The edition this feature becomes deprecated in. Using this after this - // edition may trigger warnings. - EditionDeprecated *Edition `protobuf:"varint,2,opt,name=edition_deprecated,json=editionDeprecated,enum=google.protobuf.Edition" json:"edition_deprecated,omitempty"` - // The deprecation warning text if this feature is used after the edition it - // was marked deprecated in. - DeprecationWarning *string `protobuf:"bytes,3,opt,name=deprecation_warning,json=deprecationWarning" json:"deprecation_warning,omitempty"` - // The edition this feature is no longer available in. In editions after - // this one, the last default assigned will be used, and proto files will - // not be able to override it. - EditionRemoved *Edition `protobuf:"varint,4,opt,name=edition_removed,json=editionRemoved,enum=google.protobuf.Edition" json:"edition_removed,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *FieldOptions_FeatureSupport) Reset() { - *x = FieldOptions_FeatureSupport{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *FieldOptions_FeatureSupport) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FieldOptions_FeatureSupport) ProtoMessage() {} - -func (x *FieldOptions_FeatureSupport) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[28] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FieldOptions_FeatureSupport.ProtoReflect.Descriptor instead. -func (*FieldOptions_FeatureSupport) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 1} -} - -func (x *FieldOptions_FeatureSupport) GetEditionIntroduced() Edition { - if x != nil && x.EditionIntroduced != nil { - return *x.EditionIntroduced - } - return Edition_EDITION_UNKNOWN -} - -func (x *FieldOptions_FeatureSupport) GetEditionDeprecated() Edition { - if x != nil && x.EditionDeprecated != nil { - return *x.EditionDeprecated - } - return Edition_EDITION_UNKNOWN -} - -func (x *FieldOptions_FeatureSupport) GetDeprecationWarning() string { - if x != nil && x.DeprecationWarning != nil { - return *x.DeprecationWarning - } - return "" -} - -func (x *FieldOptions_FeatureSupport) GetEditionRemoved() Edition { - if x != nil && x.EditionRemoved != nil { - return *x.EditionRemoved - } - return Edition_EDITION_UNKNOWN -} - -// The name of the uninterpreted option. Each string represents a segment in -// a dot-separated name. is_extension is true iff a segment represents an -// extension (denoted with parentheses in options specs in .proto files). -// E.g.,{ ["foo", false], ["bar.baz", true], ["moo", false] } represents -// "foo.(bar.baz).moo". -type UninterpretedOption_NamePart struct { - state protoimpl.MessageState `protogen:"open.v1"` - NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` - IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *UninterpretedOption_NamePart) Reset() { - *x = UninterpretedOption_NamePart{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *UninterpretedOption_NamePart) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UninterpretedOption_NamePart) ProtoMessage() {} - -func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[29] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UninterpretedOption_NamePart.ProtoReflect.Descriptor instead. -func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{18, 0} -} - -func (x *UninterpretedOption_NamePart) GetNamePart() string { - if x != nil && x.NamePart != nil { - return *x.NamePart - } - return "" -} - -func (x *UninterpretedOption_NamePart) GetIsExtension() bool { - if x != nil && x.IsExtension != nil { - return *x.IsExtension - } - return false -} - -// A map from every known edition with a unique set of defaults to its -// defaults. Not all editions may be contained here. For a given edition, -// the defaults at the closest matching edition ordered at or before it should -// be used. This field must be in strict ascending order by edition. -type FeatureSetDefaults_FeatureSetEditionDefault struct { - state protoimpl.MessageState `protogen:"open.v1"` - Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` - // Defaults of features that can be overridden in this edition. - OverridableFeatures *FeatureSet `protobuf:"bytes,4,opt,name=overridable_features,json=overridableFeatures" json:"overridable_features,omitempty"` - // Defaults of features that can't be overridden in this edition. - FixedFeatures *FeatureSet `protobuf:"bytes,5,opt,name=fixed_features,json=fixedFeatures" json:"fixed_features,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() { - *x = FeatureSetDefaults_FeatureSetEditionDefault{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {} - -func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[30] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FeatureSetDefaults_FeatureSetEditionDefault.ProtoReflect.Descriptor instead. -func (*FeatureSetDefaults_FeatureSetEditionDefault) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0} -} - -func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetEdition() Edition { - if x != nil && x.Edition != nil { - return *x.Edition - } - return Edition_EDITION_UNKNOWN -} - -func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetOverridableFeatures() *FeatureSet { - if x != nil { - return x.OverridableFeatures - } - return nil -} - -func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetFixedFeatures() *FeatureSet { - if x != nil { - return x.FixedFeatures - } - return nil -} - -type SourceCodeInfo_Location struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Identifies which part of the FileDescriptorProto was defined at this - // location. - // - // Each element is a field number or an index. They form a path from - // the root FileDescriptorProto to the place where the definition appears. - // For example, this path: - // - // [ 4, 3, 2, 7, 1 ] - // - // refers to: - // - // file.message_type(3) // 4, 3 - // .field(7) // 2, 7 - // .name() // 1 - // - // This is because FileDescriptorProto.message_type has field number 4: - // - // repeated DescriptorProto message_type = 4; - // - // and DescriptorProto.field has field number 2: - // - // repeated FieldDescriptorProto field = 2; - // - // and FieldDescriptorProto.name has field number 1: - // - // optional string name = 1; - // - // Thus, the above path gives the location of a field name. If we removed - // the last element: - // - // [ 4, 3, 2, 7 ] - // - // this path refers to the whole field declaration (from the beginning - // of the label to the terminating semicolon). - Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` - // Always has exactly three or four elements: start line, start column, - // end line (optional, otherwise assumed same as start line), end column. - // These are packed into a single field for efficiency. Note that line - // and column numbers are zero-based -- typically you will want to add - // 1 to each before displaying to a user. - Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` - // If this SourceCodeInfo represents a complete declaration, these are any - // comments appearing before and after the declaration which appear to be - // attached to the declaration. - // - // A series of line comments appearing on consecutive lines, with no other - // tokens appearing on those lines, will be treated as a single comment. - // - // leading_detached_comments will keep paragraphs of comments that appear - // before (but not connected to) the current element. Each paragraph, - // separated by empty lines, will be one comment element in the repeated - // field. - // - // Only the comment content is provided; comment markers (e.g. //) are - // stripped out. For block comments, leading whitespace and an asterisk - // will be stripped from the beginning of each line other than the first. - // Newlines are included in the output. - // - // Examples: - // - // optional int32 foo = 1; // Comment attached to foo. - // // Comment attached to bar. - // optional int32 bar = 2; - // - // optional string baz = 3; - // // Comment attached to baz. - // // Another line attached to baz. - // - // // Comment attached to moo. - // // - // // Another line attached to moo. - // optional double moo = 4; - // - // // Detached comment for corge. This is not leading or trailing comments - // // to moo or corge because there are blank lines separating it from - // // both. - // - // // Detached comment for corge paragraph 2. - // - // optional string corge = 5; - // /* Block comment attached - // * to corge. Leading asterisks - // * will be removed. */ - // /* Block comment attached to - // * grault. */ - // optional int32 grault = 6; - // - // // ignored detached comments. - LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` - TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` - LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SourceCodeInfo_Location) Reset() { - *x = SourceCodeInfo_Location{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[31] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SourceCodeInfo_Location) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SourceCodeInfo_Location) ProtoMessage() {} - -func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[31] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SourceCodeInfo_Location.ProtoReflect.Descriptor instead. -func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{21, 0} -} - -func (x *SourceCodeInfo_Location) GetPath() []int32 { - if x != nil { - return x.Path - } - return nil -} - -func (x *SourceCodeInfo_Location) GetSpan() []int32 { - if x != nil { - return x.Span - } - return nil -} - -func (x *SourceCodeInfo_Location) GetLeadingComments() string { - if x != nil && x.LeadingComments != nil { - return *x.LeadingComments - } - return "" -} - -func (x *SourceCodeInfo_Location) GetTrailingComments() string { - if x != nil && x.TrailingComments != nil { - return *x.TrailingComments - } - return "" -} - -func (x *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { - if x != nil { - return x.LeadingDetachedComments - } - return nil -} - -type GeneratedCodeInfo_Annotation struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Identifies the element in the original source .proto file. This field - // is formatted the same as SourceCodeInfo.Location.path. - Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` - // Identifies the filesystem path to the original source .proto. - SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` - // Identifies the starting offset in bytes in the generated code - // that relates to the identified object. - Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` - // Identifies the ending offset in bytes in the generated code that - // relates to the identified object. The end offset should be one past - // the last relevant byte (so the length of the text = end - begin). - End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` - Semantic *GeneratedCodeInfo_Annotation_Semantic `protobuf:"varint,5,opt,name=semantic,enum=google.protobuf.GeneratedCodeInfo_Annotation_Semantic" json:"semantic,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *GeneratedCodeInfo_Annotation) Reset() { - *x = GeneratedCodeInfo_Annotation{} - mi := &file_google_protobuf_descriptor_proto_msgTypes[32] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *GeneratedCodeInfo_Annotation) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} - -func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[32] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GeneratedCodeInfo_Annotation.ProtoReflect.Descriptor instead. -func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{22, 0} -} - -func (x *GeneratedCodeInfo_Annotation) GetPath() []int32 { - if x != nil { - return x.Path - } - return nil -} - -func (x *GeneratedCodeInfo_Annotation) GetSourceFile() string { - if x != nil && x.SourceFile != nil { - return *x.SourceFile - } - return "" -} - -func (x *GeneratedCodeInfo_Annotation) GetBegin() int32 { - if x != nil && x.Begin != nil { - return *x.Begin - } - return 0 -} - -func (x *GeneratedCodeInfo_Annotation) GetEnd() int32 { - if x != nil && x.End != nil { - return *x.End - } - return 0 -} - -func (x *GeneratedCodeInfo_Annotation) GetSemantic() GeneratedCodeInfo_Annotation_Semantic { - if x != nil && x.Semantic != nil { - return *x.Semantic - } - return GeneratedCodeInfo_Annotation_NONE -} - -var File_google_protobuf_descriptor_proto protoreflect.FileDescriptor - -var file_google_protobuf_descriptor_proto_rawDesc = string([]byte{ - 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x22, 0x5b, 0x0a, 0x11, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x38, 0x0a, 0x04, 0x66, 0x69, 0x6c, 0x65, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69, - 0x6c, 0x65, 0x2a, 0x0c, 0x08, 0x80, 0xec, 0xca, 0xff, 0x01, 0x10, 0x81, 0xec, 0xca, 0xff, 0x01, - 0x22, 0x98, 0x05, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, - 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, - 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, - 0x65, 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x65, - 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, - 0x5f, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0a, 0x20, 0x03, 0x28, - 0x05, 0x52, 0x10, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, - 0x6e, 0x63, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x65, 0x61, 0x6b, 0x5f, 0x64, 0x65, 0x70, 0x65, - 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0e, 0x77, 0x65, - 0x61, 0x6b, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x43, 0x0a, 0x0c, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, - 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x07, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, - 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, - 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, - 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, 0x0a, 0x0f, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65, - 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, - 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64, - 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, - 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, - 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, - 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, - 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, - 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37, - 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, - 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xcc, 0x04, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, 0x0b, 0x64, - 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, - 0x6d, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65, - 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x3a, - 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x42, 0x03, 0x88, 0x01, 0x02, - 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x94, - 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, - 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, - 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x4a, - 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45, - 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, - 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07, - 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, - 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, - 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, - 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, - 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, - 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, - 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, - 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, - 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, - 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, - 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, - 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, - 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, - 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, - 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, - 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, - 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, - 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, - 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, - 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, - 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, - 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, - 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, - 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, - 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, - 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, - 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, - 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, - 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, - 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, - 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, - 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, - 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, - 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, - 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, - 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, - 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, - 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, - 0x67, 0x22, 0xad, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, - 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, - 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, - 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, - 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, - 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, - 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, - 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, - 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, - 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, - 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, - 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, - 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, - 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, - 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, - 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, - 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, - 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, - 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, - 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, - 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, - 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, - 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, - 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, - 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, - 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, - 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, - 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, - 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, - 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, - 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, - 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, - 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, - 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, - 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, - 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, - 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, - 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, - 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, - 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, - 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, - 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, - 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x52, 0x14, 0x70, 0x68, 0x70, - 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x73, 0x22, 0xf4, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, - 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, - 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, - 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, - 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, - 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, - 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, - 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, - 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, - 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, - 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, - 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, - 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, - 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, - 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x9d, 0x0d, 0x0a, 0x0c, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, - 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, - 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, - 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, - 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, - 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65, - 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, - 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, - 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, - 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, - 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, - 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, - 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, - 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, - 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, - 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, - 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x55, 0x0a, - 0x0f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, - 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, - 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, - 0x70, 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, - 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x5a, - 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x96, 0x02, 0x0a, 0x0e, 0x46, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x47, 0x0a, - 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x64, 0x75, - 0x63, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x72, - 0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, - 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x77, - 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, - 0x12, 0x41, 0x0a, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x6d, 0x6f, - 0x76, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, - 0x76, 0x65, 0x64, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, - 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, - 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, - 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, - 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, - 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, - 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, - 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, - 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, - 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, - 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, - 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, - 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, - 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, - 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f, - 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, - 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, - 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, - 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14, - 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, - 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07, - 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, - 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10, - 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, - 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, - 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, - 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, - 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, - 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, - 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, - 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, - 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, - 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42, - 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, - 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, - 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, - 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, - 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0xd8, 0x02, 0x0a, 0x10, - 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, - 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, - 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, - 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x55, 0x0a, 0x0f, 0x66, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, - 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, - 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, - 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, - 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x99, - 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, - 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, - 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, 0x65, 0x6d, 0x70, - 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x22, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, - 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, - 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, - 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, - 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, - 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, - 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, - 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, - 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, - 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, - 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, - 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, - 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, - 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, - 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, - 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, - 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, - 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, - 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, - 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, - 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x0a, 0x0a, 0x0a, 0x46, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x3f, 0x88, 0x01, 0x01, 0x98, - 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, - 0x49, 0x54, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, - 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, - 0x49, 0x54, 0x18, 0xe8, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0d, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x6c, 0x0a, 0x09, 0x65, 0x6e, - 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x54, - 0x79, 0x70, 0x65, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, - 0x0b, 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x09, 0x12, - 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x08, - 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x17, 0x72, 0x65, 0x70, - 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x6f, - 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x2d, 0x88, - 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, - 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, 0x41, 0x43, - 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x15, 0x72, 0x65, - 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, - 0x69, 0x6e, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, - 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0x84, 0x07, 0xa2, - 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, - 0x08, 0xe8, 0x07, 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65, - 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x26, 0x88, 0x01, 0x01, 0x98, - 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, - 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0x84, 0x07, 0xb2, 0x01, 0x03, 0x08, - 0xe8, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, - 0x69, 0x6e, 0x67, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, - 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, - 0x74, 0x42, 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, - 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, - 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x41, 0x4c, - 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0a, 0x6a, 0x73, - 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, 0x49, 0x45, - 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, - 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, - 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, - 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, 0x51, 0x55, - 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45, - 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x02, 0x22, - 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, 0x50, 0x45, - 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, - 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, - 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, - 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x49, 0x0a, 0x0e, 0x55, 0x74, 0x66, 0x38, 0x56, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x54, 0x46, - 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, - 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, - 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x04, 0x08, 0x01, - 0x10, 0x01, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, - 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, - 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, - 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, - 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49, - 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46, - 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f, - 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, - 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47, - 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10, - 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0x8b, 0x4e, 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90, - 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8, - 0x07, 0x22, 0xef, 0x03, 0x0a, 0x12, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, - 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, - 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, - 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, - 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xf8, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, - 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x14, 0x6f, 0x76, 0x65, - 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x53, 0x65, 0x74, 0x52, 0x13, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, - 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x0e, 0x66, 0x69, 0x78, - 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x0d, - 0x66, 0x69, 0x78, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x4a, 0x04, 0x08, - 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x22, 0xb5, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, - 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, - 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, - 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, - 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, - 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, - 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, - 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, - 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, - 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, - 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, - 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2a, 0x0c, 0x08, - 0x80, 0xec, 0xca, 0xff, 0x01, 0x10, 0x81, 0xec, 0xca, 0xff, 0x01, 0x22, 0xd0, 0x02, 0x0a, 0x11, - 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, - 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, - 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, - 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69, - 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10, - 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, - 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, - 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x61, - 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, - 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, - 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x02, 0x2a, 0xa7, - 0x02, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x44, - 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, - 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c, 0x45, 0x47, 0x41, 0x43, - 0x59, 0x10, 0x84, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, - 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, 0xe7, 0x07, 0x12, 0x11, - 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, 0x33, 0x10, 0xe8, - 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, - 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, - 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, 0x17, 0x0a, - 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, - 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, - 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, - 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, - 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, - 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, - 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, - 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, - 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, - 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, - 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, - 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, - 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, - 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, -}) - -var ( - file_google_protobuf_descriptor_proto_rawDescOnce sync.Once - file_google_protobuf_descriptor_proto_rawDescData []byte -) - -func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { - file_google_protobuf_descriptor_proto_rawDescOnce.Do(func() { - file_google_protobuf_descriptor_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_descriptor_proto_rawDesc), len(file_google_protobuf_descriptor_proto_rawDesc))) - }) - return file_google_protobuf_descriptor_proto_rawDescData -} - -var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 17) -var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 33) -var file_google_protobuf_descriptor_proto_goTypes = []any{ - (Edition)(0), // 0: google.protobuf.Edition - (ExtensionRangeOptions_VerificationState)(0), // 1: google.protobuf.ExtensionRangeOptions.VerificationState - (FieldDescriptorProto_Type)(0), // 2: google.protobuf.FieldDescriptorProto.Type - (FieldDescriptorProto_Label)(0), // 3: google.protobuf.FieldDescriptorProto.Label - (FileOptions_OptimizeMode)(0), // 4: google.protobuf.FileOptions.OptimizeMode - (FieldOptions_CType)(0), // 5: google.protobuf.FieldOptions.CType - (FieldOptions_JSType)(0), // 6: google.protobuf.FieldOptions.JSType - (FieldOptions_OptionRetention)(0), // 7: google.protobuf.FieldOptions.OptionRetention - (FieldOptions_OptionTargetType)(0), // 8: google.protobuf.FieldOptions.OptionTargetType - (MethodOptions_IdempotencyLevel)(0), // 9: google.protobuf.MethodOptions.IdempotencyLevel - (FeatureSet_FieldPresence)(0), // 10: google.protobuf.FeatureSet.FieldPresence - (FeatureSet_EnumType)(0), // 11: google.protobuf.FeatureSet.EnumType - (FeatureSet_RepeatedFieldEncoding)(0), // 12: google.protobuf.FeatureSet.RepeatedFieldEncoding - (FeatureSet_Utf8Validation)(0), // 13: google.protobuf.FeatureSet.Utf8Validation - (FeatureSet_MessageEncoding)(0), // 14: google.protobuf.FeatureSet.MessageEncoding - (FeatureSet_JsonFormat)(0), // 15: google.protobuf.FeatureSet.JsonFormat - (GeneratedCodeInfo_Annotation_Semantic)(0), // 16: google.protobuf.GeneratedCodeInfo.Annotation.Semantic - (*FileDescriptorSet)(nil), // 17: google.protobuf.FileDescriptorSet - (*FileDescriptorProto)(nil), // 18: google.protobuf.FileDescriptorProto - (*DescriptorProto)(nil), // 19: google.protobuf.DescriptorProto - (*ExtensionRangeOptions)(nil), // 20: google.protobuf.ExtensionRangeOptions - (*FieldDescriptorProto)(nil), // 21: google.protobuf.FieldDescriptorProto - (*OneofDescriptorProto)(nil), // 22: google.protobuf.OneofDescriptorProto - (*EnumDescriptorProto)(nil), // 23: google.protobuf.EnumDescriptorProto - (*EnumValueDescriptorProto)(nil), // 24: google.protobuf.EnumValueDescriptorProto - (*ServiceDescriptorProto)(nil), // 25: google.protobuf.ServiceDescriptorProto - (*MethodDescriptorProto)(nil), // 26: google.protobuf.MethodDescriptorProto - (*FileOptions)(nil), // 27: google.protobuf.FileOptions - (*MessageOptions)(nil), // 28: google.protobuf.MessageOptions - (*FieldOptions)(nil), // 29: google.protobuf.FieldOptions - (*OneofOptions)(nil), // 30: google.protobuf.OneofOptions - (*EnumOptions)(nil), // 31: google.protobuf.EnumOptions - (*EnumValueOptions)(nil), // 32: google.protobuf.EnumValueOptions - (*ServiceOptions)(nil), // 33: google.protobuf.ServiceOptions - (*MethodOptions)(nil), // 34: google.protobuf.MethodOptions - (*UninterpretedOption)(nil), // 35: google.protobuf.UninterpretedOption - (*FeatureSet)(nil), // 36: google.protobuf.FeatureSet - (*FeatureSetDefaults)(nil), // 37: google.protobuf.FeatureSetDefaults - (*SourceCodeInfo)(nil), // 38: google.protobuf.SourceCodeInfo - (*GeneratedCodeInfo)(nil), // 39: google.protobuf.GeneratedCodeInfo - (*DescriptorProto_ExtensionRange)(nil), // 40: google.protobuf.DescriptorProto.ExtensionRange - (*DescriptorProto_ReservedRange)(nil), // 41: google.protobuf.DescriptorProto.ReservedRange - (*ExtensionRangeOptions_Declaration)(nil), // 42: google.protobuf.ExtensionRangeOptions.Declaration - (*EnumDescriptorProto_EnumReservedRange)(nil), // 43: google.protobuf.EnumDescriptorProto.EnumReservedRange - (*FieldOptions_EditionDefault)(nil), // 44: google.protobuf.FieldOptions.EditionDefault - (*FieldOptions_FeatureSupport)(nil), // 45: google.protobuf.FieldOptions.FeatureSupport - (*UninterpretedOption_NamePart)(nil), // 46: google.protobuf.UninterpretedOption.NamePart - (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 47: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault - (*SourceCodeInfo_Location)(nil), // 48: google.protobuf.SourceCodeInfo.Location - (*GeneratedCodeInfo_Annotation)(nil), // 49: google.protobuf.GeneratedCodeInfo.Annotation -} -var file_google_protobuf_descriptor_proto_depIdxs = []int32{ - 18, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto - 19, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto - 23, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 25, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto - 21, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 27, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions - 38, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo - 0, // 7: google.protobuf.FileDescriptorProto.edition:type_name -> google.protobuf.Edition - 21, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto - 21, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 19, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto - 23, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 40, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange - 22, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto - 28, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions - 41, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange - 35, // 16: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 42, // 17: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration - 36, // 18: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet - 1, // 19: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState - 3, // 20: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label - 2, // 21: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type - 29, // 22: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions - 30, // 23: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions - 24, // 24: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto - 31, // 25: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions - 43, // 26: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange - 32, // 27: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions - 26, // 28: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto - 33, // 29: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions - 34, // 30: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions - 4, // 31: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode - 36, // 32: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 33: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 34: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 35: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 5, // 36: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType - 6, // 37: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType - 7, // 38: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention - 8, // 39: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType - 44, // 40: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault - 36, // 41: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet - 45, // 42: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport - 35, // 43: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 44: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 45: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 46: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 47: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 48: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet - 45, // 49: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport - 35, // 50: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 51: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 52: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 9, // 53: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel - 36, // 54: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 55: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 46, // 56: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart - 10, // 57: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence - 11, // 58: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType - 12, // 59: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding - 13, // 60: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation - 14, // 61: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding - 15, // 62: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat - 47, // 63: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault - 0, // 64: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition - 0, // 65: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition - 48, // 66: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location - 49, // 67: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation - 20, // 68: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions - 0, // 69: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition - 0, // 70: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition - 0, // 71: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition - 0, // 72: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition - 0, // 73: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition - 36, // 74: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet - 36, // 75: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet - 16, // 76: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic - 77, // [77:77] is the sub-list for method output_type - 77, // [77:77] is the sub-list for method input_type - 77, // [77:77] is the sub-list for extension type_name - 77, // [77:77] is the sub-list for extension extendee - 0, // [0:77] is the sub-list for field type_name -} - -func init() { file_google_protobuf_descriptor_proto_init() } -func file_google_protobuf_descriptor_proto_init() { - if File_google_protobuf_descriptor_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_descriptor_proto_rawDesc), len(file_google_protobuf_descriptor_proto_rawDesc)), - NumEnums: 17, - NumMessages: 33, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_protobuf_descriptor_proto_goTypes, - DependencyIndexes: file_google_protobuf_descriptor_proto_depIdxs, - EnumInfos: file_google_protobuf_descriptor_proto_enumTypes, - MessageInfos: file_google_protobuf_descriptor_proto_msgTypes, - }.Build() - File_google_protobuf_descriptor_proto = out.File - file_google_protobuf_descriptor_proto_goTypes = nil - file_google_protobuf_descriptor_proto_depIdxs = nil -} diff --git a/tools/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/tools/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go deleted file mode 100644 index 28d24bad7..000000000 --- a/tools/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go +++ /dev/null @@ -1,345 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2023 Google Inc. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file or at -// https://developers.google.com/open-source/licenses/bsd - -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/go_features.proto - -package gofeaturespb - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - descriptorpb "google.golang.org/protobuf/types/descriptorpb" - reflect "reflect" - sync "sync" - unsafe "unsafe" -) - -type GoFeatures_APILevel int32 - -const ( - // API_LEVEL_UNSPECIFIED results in selecting the OPEN API, - // but needs to be a separate value to distinguish between - // an explicitly set api level or a missing api level. - GoFeatures_API_LEVEL_UNSPECIFIED GoFeatures_APILevel = 0 - GoFeatures_API_OPEN GoFeatures_APILevel = 1 - GoFeatures_API_HYBRID GoFeatures_APILevel = 2 - GoFeatures_API_OPAQUE GoFeatures_APILevel = 3 -) - -// Enum value maps for GoFeatures_APILevel. -var ( - GoFeatures_APILevel_name = map[int32]string{ - 0: "API_LEVEL_UNSPECIFIED", - 1: "API_OPEN", - 2: "API_HYBRID", - 3: "API_OPAQUE", - } - GoFeatures_APILevel_value = map[string]int32{ - "API_LEVEL_UNSPECIFIED": 0, - "API_OPEN": 1, - "API_HYBRID": 2, - "API_OPAQUE": 3, - } -) - -func (x GoFeatures_APILevel) Enum() *GoFeatures_APILevel { - p := new(GoFeatures_APILevel) - *p = x - return p -} - -func (x GoFeatures_APILevel) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (GoFeatures_APILevel) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_go_features_proto_enumTypes[0].Descriptor() -} - -func (GoFeatures_APILevel) Type() protoreflect.EnumType { - return &file_google_protobuf_go_features_proto_enumTypes[0] -} - -func (x GoFeatures_APILevel) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Do not use. -func (x *GoFeatures_APILevel) UnmarshalJSON(b []byte) error { - num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) - if err != nil { - return err - } - *x = GoFeatures_APILevel(num) - return nil -} - -// Deprecated: Use GoFeatures_APILevel.Descriptor instead. -func (GoFeatures_APILevel) EnumDescriptor() ([]byte, []int) { - return file_google_protobuf_go_features_proto_rawDescGZIP(), []int{0, 0} -} - -type GoFeatures_StripEnumPrefix int32 - -const ( - GoFeatures_STRIP_ENUM_PREFIX_UNSPECIFIED GoFeatures_StripEnumPrefix = 0 - GoFeatures_STRIP_ENUM_PREFIX_KEEP GoFeatures_StripEnumPrefix = 1 - GoFeatures_STRIP_ENUM_PREFIX_GENERATE_BOTH GoFeatures_StripEnumPrefix = 2 - GoFeatures_STRIP_ENUM_PREFIX_STRIP GoFeatures_StripEnumPrefix = 3 -) - -// Enum value maps for GoFeatures_StripEnumPrefix. -var ( - GoFeatures_StripEnumPrefix_name = map[int32]string{ - 0: "STRIP_ENUM_PREFIX_UNSPECIFIED", - 1: "STRIP_ENUM_PREFIX_KEEP", - 2: "STRIP_ENUM_PREFIX_GENERATE_BOTH", - 3: "STRIP_ENUM_PREFIX_STRIP", - } - GoFeatures_StripEnumPrefix_value = map[string]int32{ - "STRIP_ENUM_PREFIX_UNSPECIFIED": 0, - "STRIP_ENUM_PREFIX_KEEP": 1, - "STRIP_ENUM_PREFIX_GENERATE_BOTH": 2, - "STRIP_ENUM_PREFIX_STRIP": 3, - } -) - -func (x GoFeatures_StripEnumPrefix) Enum() *GoFeatures_StripEnumPrefix { - p := new(GoFeatures_StripEnumPrefix) - *p = x - return p -} - -func (x GoFeatures_StripEnumPrefix) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (GoFeatures_StripEnumPrefix) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_go_features_proto_enumTypes[1].Descriptor() -} - -func (GoFeatures_StripEnumPrefix) Type() protoreflect.EnumType { - return &file_google_protobuf_go_features_proto_enumTypes[1] -} - -func (x GoFeatures_StripEnumPrefix) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Do not use. -func (x *GoFeatures_StripEnumPrefix) UnmarshalJSON(b []byte) error { - num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) - if err != nil { - return err - } - *x = GoFeatures_StripEnumPrefix(num) - return nil -} - -// Deprecated: Use GoFeatures_StripEnumPrefix.Descriptor instead. -func (GoFeatures_StripEnumPrefix) EnumDescriptor() ([]byte, []int) { - return file_google_protobuf_go_features_proto_rawDescGZIP(), []int{0, 1} -} - -type GoFeatures struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Whether or not to generate the deprecated UnmarshalJSON method for enums. - // Can only be true for proto using the Open Struct api. - LegacyUnmarshalJsonEnum *bool `protobuf:"varint,1,opt,name=legacy_unmarshal_json_enum,json=legacyUnmarshalJsonEnum" json:"legacy_unmarshal_json_enum,omitempty"` - // One of OPEN, HYBRID or OPAQUE. - ApiLevel *GoFeatures_APILevel `protobuf:"varint,2,opt,name=api_level,json=apiLevel,enum=pb.GoFeatures_APILevel" json:"api_level,omitempty"` - StripEnumPrefix *GoFeatures_StripEnumPrefix `protobuf:"varint,3,opt,name=strip_enum_prefix,json=stripEnumPrefix,enum=pb.GoFeatures_StripEnumPrefix" json:"strip_enum_prefix,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *GoFeatures) Reset() { - *x = GoFeatures{} - mi := &file_google_protobuf_go_features_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *GoFeatures) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GoFeatures) ProtoMessage() {} - -func (x *GoFeatures) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_go_features_proto_msgTypes[0] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GoFeatures.ProtoReflect.Descriptor instead. -func (*GoFeatures) Descriptor() ([]byte, []int) { - return file_google_protobuf_go_features_proto_rawDescGZIP(), []int{0} -} - -func (x *GoFeatures) GetLegacyUnmarshalJsonEnum() bool { - if x != nil && x.LegacyUnmarshalJsonEnum != nil { - return *x.LegacyUnmarshalJsonEnum - } - return false -} - -func (x *GoFeatures) GetApiLevel() GoFeatures_APILevel { - if x != nil && x.ApiLevel != nil { - return *x.ApiLevel - } - return GoFeatures_API_LEVEL_UNSPECIFIED -} - -func (x *GoFeatures) GetStripEnumPrefix() GoFeatures_StripEnumPrefix { - if x != nil && x.StripEnumPrefix != nil { - return *x.StripEnumPrefix - } - return GoFeatures_STRIP_ENUM_PREFIX_UNSPECIFIED -} - -var file_google_protobuf_go_features_proto_extTypes = []protoimpl.ExtensionInfo{ - { - ExtendedType: (*descriptorpb.FeatureSet)(nil), - ExtensionType: (*GoFeatures)(nil), - Field: 1002, - Name: "pb.go", - Tag: "bytes,1002,opt,name=go", - Filename: "google/protobuf/go_features.proto", - }, -} - -// Extension fields to descriptorpb.FeatureSet. -var ( - // optional pb.GoFeatures go = 1002; - E_Go = &file_google_protobuf_go_features_proto_extTypes[0] -) - -var File_google_protobuf_go_features_proto protoreflect.FileDescriptor - -var file_google_protobuf_go_features_proto_rawDesc = string([]byte{ - 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xab, 0x05, 0x0a, 0x0a, 0x47, 0x6f, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0xbe, 0x01, 0x0a, 0x1a, 0x6c, 0x65, 0x67, - 0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x5f, 0x6a, 0x73, - 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x80, 0x01, - 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72, - 0x75, 0x65, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18, - 0xe7, 0x07, 0xb2, 0x01, 0x5b, 0x08, 0xe8, 0x07, 0x10, 0xe8, 0x07, 0x1a, 0x53, 0x54, 0x68, 0x65, - 0x20, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x20, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, - 0x6c, 0x4a, 0x53, 0x4f, 0x4e, 0x20, 0x41, 0x50, 0x49, 0x20, 0x69, 0x73, 0x20, 0x64, 0x65, 0x70, - 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x77, 0x69, 0x6c, 0x6c, - 0x20, 0x62, 0x65, 0x20, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x61, - 0x20, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x20, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, - 0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x12, 0x74, 0x0a, 0x09, 0x61, 0x70, 0x69, - 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x70, - 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x41, 0x50, 0x49, - 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x42, 0x3e, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x01, - 0xa2, 0x01, 0x1a, 0x12, 0x15, 0x41, 0x50, 0x49, 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55, - 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0f, - 0x12, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x41, 0x51, 0x55, 0x45, 0x18, 0xe9, 0x07, 0xb2, - 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x08, 0x61, 0x70, 0x69, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, - 0x7c, 0x0a, 0x11, 0x73, 0x74, 0x72, 0x69, 0x70, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x70, 0x72, - 0x65, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x70, 0x62, 0x2e, - 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x70, - 0x45, 0x6e, 0x75, 0x6d, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x42, 0x30, 0x88, 0x01, 0x01, 0x98, - 0x01, 0x06, 0x98, 0x01, 0x07, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x1b, 0x12, 0x16, 0x53, 0x54, 0x52, - 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x4b, - 0x45, 0x45, 0x50, 0x18, 0x84, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe9, 0x07, 0x52, 0x0f, 0x73, 0x74, - 0x72, 0x69, 0x70, 0x45, 0x6e, 0x75, 0x6d, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x22, 0x53, 0x0a, - 0x08, 0x41, 0x50, 0x49, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x19, 0x0a, 0x15, 0x41, 0x50, 0x49, - 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, - 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x45, 0x4e, - 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x48, 0x59, 0x42, 0x52, 0x49, 0x44, - 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x41, 0x51, 0x55, 0x45, - 0x10, 0x03, 0x22, 0x92, 0x01, 0x0a, 0x0f, 0x53, 0x74, 0x72, 0x69, 0x70, 0x45, 0x6e, 0x75, 0x6d, - 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x54, 0x52, 0x49, 0x50, 0x5f, - 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x55, 0x4e, 0x53, 0x50, - 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x54, 0x52, - 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x4b, - 0x45, 0x45, 0x50, 0x10, 0x01, 0x12, 0x23, 0x0a, 0x1f, 0x53, 0x54, 0x52, 0x49, 0x50, 0x5f, 0x45, - 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x47, 0x45, 0x4e, 0x45, 0x52, - 0x41, 0x54, 0x45, 0x5f, 0x42, 0x4f, 0x54, 0x48, 0x10, 0x02, 0x12, 0x1b, 0x0a, 0x17, 0x53, 0x54, - 0x52, 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, - 0x53, 0x54, 0x52, 0x49, 0x50, 0x10, 0x03, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12, 0x1b, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x70, 0x62, -}) - -var ( - file_google_protobuf_go_features_proto_rawDescOnce sync.Once - file_google_protobuf_go_features_proto_rawDescData []byte -) - -func file_google_protobuf_go_features_proto_rawDescGZIP() []byte { - file_google_protobuf_go_features_proto_rawDescOnce.Do(func() { - file_google_protobuf_go_features_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_go_features_proto_rawDesc), len(file_google_protobuf_go_features_proto_rawDesc))) - }) - return file_google_protobuf_go_features_proto_rawDescData -} - -var file_google_protobuf_go_features_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_google_protobuf_go_features_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_go_features_proto_goTypes = []any{ - (GoFeatures_APILevel)(0), // 0: pb.GoFeatures.APILevel - (GoFeatures_StripEnumPrefix)(0), // 1: pb.GoFeatures.StripEnumPrefix - (*GoFeatures)(nil), // 2: pb.GoFeatures - (*descriptorpb.FeatureSet)(nil), // 3: google.protobuf.FeatureSet -} -var file_google_protobuf_go_features_proto_depIdxs = []int32{ - 0, // 0: pb.GoFeatures.api_level:type_name -> pb.GoFeatures.APILevel - 1, // 1: pb.GoFeatures.strip_enum_prefix:type_name -> pb.GoFeatures.StripEnumPrefix - 3, // 2: pb.go:extendee -> google.protobuf.FeatureSet - 2, // 3: pb.go:type_name -> pb.GoFeatures - 4, // [4:4] is the sub-list for method output_type - 4, // [4:4] is the sub-list for method input_type - 3, // [3:4] is the sub-list for extension type_name - 2, // [2:3] is the sub-list for extension extendee - 0, // [0:2] is the sub-list for field type_name -} - -func init() { file_google_protobuf_go_features_proto_init() } -func file_google_protobuf_go_features_proto_init() { - if File_google_protobuf_go_features_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_go_features_proto_rawDesc), len(file_google_protobuf_go_features_proto_rawDesc)), - NumEnums: 2, - NumMessages: 1, - NumExtensions: 1, - NumServices: 0, - }, - GoTypes: file_google_protobuf_go_features_proto_goTypes, - DependencyIndexes: file_google_protobuf_go_features_proto_depIdxs, - EnumInfos: file_google_protobuf_go_features_proto_enumTypes, - MessageInfos: file_google_protobuf_go_features_proto_msgTypes, - ExtensionInfos: file_google_protobuf_go_features_proto_extTypes, - }.Build() - File_google_protobuf_go_features_proto = out.File - file_google_protobuf_go_features_proto_goTypes = nil - file_google_protobuf_go_features_proto_depIdxs = nil -} diff --git a/tools/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/tools/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go deleted file mode 100644 index 497da66e9..000000000 --- a/tools/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ /dev/null @@ -1,479 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/any.proto - -// Package anypb contains generated types for google/protobuf/any.proto. -// -// The Any message is a dynamic representation of any other message value. -// It is functionally a tuple of the full name of the remote message type and -// the serialized bytes of the remote message value. -// -// # Constructing an Any -// -// An Any message containing another message value is constructed using New: -// -// any, err := anypb.New(m) -// if err != nil { -// ... // handle error -// } -// ... // make use of any -// -// # Unmarshaling an Any -// -// With a populated Any message, the underlying message can be serialized into -// a remote concrete message value in a few ways. -// -// If the exact concrete type is known, then a new (or pre-existing) instance -// of that message can be passed to the UnmarshalTo method: -// -// m := new(foopb.MyMessage) -// if err := any.UnmarshalTo(m); err != nil { -// ... // handle error -// } -// ... // make use of m -// -// If the exact concrete type is not known, then the UnmarshalNew method can be -// used to unmarshal the contents into a new instance of the remote message type: -// -// m, err := any.UnmarshalNew() -// if err != nil { -// ... // handle error -// } -// ... // make use of m -// -// UnmarshalNew uses the global type registry to resolve the message type and -// construct a new instance of that message to unmarshal into. In order for a -// message type to appear in the global registry, the Go type representing that -// protobuf message type must be linked into the Go binary. For messages -// generated by protoc-gen-go, this is achieved through an import of the -// generated Go package representing a .proto file. -// -// A common pattern with UnmarshalNew is to use a type switch with the resulting -// proto.Message value: -// -// switch m := m.(type) { -// case *foopb.MyMessage: -// ... // make use of m as a *foopb.MyMessage -// case *barpb.OtherMessage: -// ... // make use of m as a *barpb.OtherMessage -// case *bazpb.SomeMessage: -// ... // make use of m as a *bazpb.SomeMessage -// } -// -// This pattern ensures that the generated packages containing the message types -// listed in the case clauses are linked into the Go binary and therefore also -// registered in the global registry. -// -// # Type checking an Any -// -// In order to type check whether an Any message represents some other message, -// then use the MessageIs method: -// -// if any.MessageIs((*foopb.MyMessage)(nil)) { -// ... // make use of any, knowing that it contains a foopb.MyMessage -// } -// -// The MessageIs method can also be used with an allocated instance of the target -// message type if the intention is to unmarshal into it if the type matches: -// -// m := new(foopb.MyMessage) -// if any.MessageIs(m) { -// if err := any.UnmarshalTo(m); err != nil { -// ... // handle error -// } -// ... // make use of m -// } -package anypb - -import ( - proto "google.golang.org/protobuf/proto" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoregistry "google.golang.org/protobuf/reflect/protoregistry" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - strings "strings" - sync "sync" - unsafe "unsafe" -) - -// `Any` contains an arbitrary serialized protocol buffer message along with a -// URL that describes the type of the serialized message. -// -// Protobuf library provides support to pack/unpack Any values in the form -// of utility functions or additional generated methods of the Any type. -// -// Example 1: Pack and unpack a message in C++. -// -// Foo foo = ...; -// Any any; -// any.PackFrom(foo); -// ... -// if (any.UnpackTo(&foo)) { -// ... -// } -// -// Example 2: Pack and unpack a message in Java. -// -// Foo foo = ...; -// Any any = Any.pack(foo); -// ... -// if (any.is(Foo.class)) { -// foo = any.unpack(Foo.class); -// } -// // or ... -// if (any.isSameTypeAs(Foo.getDefaultInstance())) { -// foo = any.unpack(Foo.getDefaultInstance()); -// } -// -// Example 3: Pack and unpack a message in Python. -// -// foo = Foo(...) -// any = Any() -// any.Pack(foo) -// ... -// if any.Is(Foo.DESCRIPTOR): -// any.Unpack(foo) -// ... -// -// Example 4: Pack and unpack a message in Go -// -// foo := &pb.Foo{...} -// any, err := anypb.New(foo) -// if err != nil { -// ... -// } -// ... -// foo := &pb.Foo{} -// if err := any.UnmarshalTo(foo); err != nil { -// ... -// } -// -// The pack methods provided by protobuf library will by default use -// 'type.googleapis.com/full.type.name' as the type URL and the unpack -// methods only use the fully qualified type name after the last '/' -// in the type URL, for example "foo.bar.com/x/y.z" will yield type -// name "y.z". -// -// JSON -// ==== -// The JSON representation of an `Any` value uses the regular -// representation of the deserialized, embedded message, with an -// additional field `@type` which contains the type URL. Example: -// -// package google.profile; -// message Person { -// string first_name = 1; -// string last_name = 2; -// } -// -// { -// "@type": "type.googleapis.com/google.profile.Person", -// "firstName": , -// "lastName": -// } -// -// If the embedded message type is well-known and has a custom JSON -// representation, that representation will be embedded adding a field -// `value` which holds the custom JSON in addition to the `@type` -// field. Example (for message [google.protobuf.Duration][]): -// -// { -// "@type": "type.googleapis.com/google.protobuf.Duration", -// "value": "1.212s" -// } -type Any struct { - state protoimpl.MessageState `protogen:"open.v1"` - // A URL/resource name that uniquely identifies the type of the serialized - // protocol buffer message. This string must contain at least - // one "/" character. The last segment of the URL's path must represent - // the fully qualified name of the type (as in - // `path/google.protobuf.Duration`). The name should be in a canonical form - // (e.g., leading "." is not accepted). - // - // In practice, teams usually precompile into the binary all types that they - // expect it to use in the context of Any. However, for URLs which use the - // scheme `http`, `https`, or no scheme, one can optionally set up a type - // server that maps type URLs to message definitions as follows: - // - // - If no scheme is provided, `https` is assumed. - // - An HTTP GET on the URL must yield a [google.protobuf.Type][] - // value in binary format, or produce an error. - // - Applications are allowed to cache lookup results based on the - // URL, or have them precompiled into a binary to avoid any - // lookup. Therefore, binary compatibility needs to be preserved - // on changes to types. (Use versioned type names to manage - // breaking changes.) - // - // Note: this functionality is not currently available in the official - // protobuf release, and it is not used for type URLs beginning with - // type.googleapis.com. As of May 2023, there are no widely used type server - // implementations and no plans to implement one. - // - // Schemes other than `http`, `https` (or the empty scheme) might be - // used with implementation specific semantics. - TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` - // Must be a valid serialized protocol buffer of the above specified type. - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -// New marshals src into a new Any instance. -func New(src proto.Message) (*Any, error) { - dst := new(Any) - if err := dst.MarshalFrom(src); err != nil { - return nil, err - } - return dst, nil -} - -// MarshalFrom marshals src into dst as the underlying message -// using the provided marshal options. -// -// If no options are specified, call dst.MarshalFrom instead. -func MarshalFrom(dst *Any, src proto.Message, opts proto.MarshalOptions) error { - const urlPrefix = "type.googleapis.com/" - if src == nil { - return protoimpl.X.NewError("invalid nil source message") - } - b, err := opts.Marshal(src) - if err != nil { - return err - } - dst.TypeUrl = urlPrefix + string(src.ProtoReflect().Descriptor().FullName()) - dst.Value = b - return nil -} - -// UnmarshalTo unmarshals the underlying message from src into dst -// using the provided unmarshal options. -// It reports an error if dst is not of the right message type. -// -// If no options are specified, call src.UnmarshalTo instead. -func UnmarshalTo(src *Any, dst proto.Message, opts proto.UnmarshalOptions) error { - if src == nil { - return protoimpl.X.NewError("invalid nil source message") - } - if !src.MessageIs(dst) { - got := dst.ProtoReflect().Descriptor().FullName() - want := src.MessageName() - return protoimpl.X.NewError("mismatched message type: got %q, want %q", got, want) - } - return opts.Unmarshal(src.GetValue(), dst) -} - -// UnmarshalNew unmarshals the underlying message from src into dst, -// which is newly created message using a type resolved from the type URL. -// The message type is resolved according to opt.Resolver, -// which should implement protoregistry.MessageTypeResolver. -// It reports an error if the underlying message type could not be resolved. -// -// If no options are specified, call src.UnmarshalNew instead. -func UnmarshalNew(src *Any, opts proto.UnmarshalOptions) (dst proto.Message, err error) { - if src.GetTypeUrl() == "" { - return nil, protoimpl.X.NewError("invalid empty type URL") - } - if opts.Resolver == nil { - opts.Resolver = protoregistry.GlobalTypes - } - r, ok := opts.Resolver.(protoregistry.MessageTypeResolver) - if !ok { - return nil, protoregistry.NotFound - } - mt, err := r.FindMessageByURL(src.GetTypeUrl()) - if err != nil { - if err == protoregistry.NotFound { - return nil, err - } - return nil, protoimpl.X.NewError("could not resolve %q: %v", src.GetTypeUrl(), err) - } - dst = mt.New().Interface() - return dst, opts.Unmarshal(src.GetValue(), dst) -} - -// MessageIs reports whether the underlying message is of the same type as m. -func (x *Any) MessageIs(m proto.Message) bool { - if m == nil { - return false - } - url := x.GetTypeUrl() - name := string(m.ProtoReflect().Descriptor().FullName()) - if !strings.HasSuffix(url, name) { - return false - } - return len(url) == len(name) || url[len(url)-len(name)-1] == '/' -} - -// MessageName reports the full name of the underlying message, -// returning an empty string if invalid. -func (x *Any) MessageName() protoreflect.FullName { - url := x.GetTypeUrl() - name := protoreflect.FullName(url) - if i := strings.LastIndexByte(url, '/'); i >= 0 { - name = name[i+len("/"):] - } - if !name.IsValid() { - return "" - } - return name -} - -// MarshalFrom marshals m into x as the underlying message. -func (x *Any) MarshalFrom(m proto.Message) error { - return MarshalFrom(x, m, proto.MarshalOptions{}) -} - -// UnmarshalTo unmarshals the contents of the underlying message of x into m. -// It resets m before performing the unmarshal operation. -// It reports an error if m is not of the right message type. -func (x *Any) UnmarshalTo(m proto.Message) error { - return UnmarshalTo(x, m, proto.UnmarshalOptions{}) -} - -// UnmarshalNew unmarshals the contents of the underlying message of x into -// a newly allocated message of the specified type. -// It reports an error if the underlying message type could not be resolved. -func (x *Any) UnmarshalNew() (proto.Message, error) { - return UnmarshalNew(x, proto.UnmarshalOptions{}) -} - -func (x *Any) Reset() { - *x = Any{} - mi := &file_google_protobuf_any_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *Any) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Any) ProtoMessage() {} - -func (x *Any) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_any_proto_msgTypes[0] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Any.ProtoReflect.Descriptor instead. -func (*Any) Descriptor() ([]byte, []int) { - return file_google_protobuf_any_proto_rawDescGZIP(), []int{0} -} - -func (x *Any) GetTypeUrl() string { - if x != nil { - return x.TypeUrl - } - return "" -} - -func (x *Any) GetValue() []byte { - if x != nil { - return x.Value - } - return nil -} - -var File_google_protobuf_any_proto protoreflect.FileDescriptor - -var file_google_protobuf_any_proto_rawDesc = string([]byte{ - 0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x36, 0x0a, 0x03, - 0x41, 0x6e, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x42, 0x76, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x08, 0x41, 0x6e, 0x79, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, - 0x61, 0x6e, 0x79, 0x70, 0x62, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, - 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, -}) - -var ( - file_google_protobuf_any_proto_rawDescOnce sync.Once - file_google_protobuf_any_proto_rawDescData []byte -) - -func file_google_protobuf_any_proto_rawDescGZIP() []byte { - file_google_protobuf_any_proto_rawDescOnce.Do(func() { - file_google_protobuf_any_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_any_proto_rawDesc), len(file_google_protobuf_any_proto_rawDesc))) - }) - return file_google_protobuf_any_proto_rawDescData -} - -var file_google_protobuf_any_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_any_proto_goTypes = []any{ - (*Any)(nil), // 0: google.protobuf.Any -} -var file_google_protobuf_any_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_google_protobuf_any_proto_init() } -func file_google_protobuf_any_proto_init() { - if File_google_protobuf_any_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_any_proto_rawDesc), len(file_google_protobuf_any_proto_rawDesc)), - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_protobuf_any_proto_goTypes, - DependencyIndexes: file_google_protobuf_any_proto_depIdxs, - MessageInfos: file_google_protobuf_any_proto_msgTypes, - }.Build() - File_google_protobuf_any_proto = out.File - file_google_protobuf_any_proto_goTypes = nil - file_google_protobuf_any_proto_depIdxs = nil -} diff --git a/tools/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/tools/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go deleted file mode 100644 index 193880d18..000000000 --- a/tools/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +++ /dev/null @@ -1,357 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/duration.proto - -// Package durationpb contains generated types for google/protobuf/duration.proto. -// -// The Duration message represents a signed span of time. -// -// # Conversion to a Go Duration -// -// The AsDuration method can be used to convert a Duration message to a -// standard Go time.Duration value: -// -// d := dur.AsDuration() -// ... // make use of d as a time.Duration -// -// Converting to a time.Duration is a common operation so that the extensive -// set of time-based operations provided by the time package can be leveraged. -// See https://golang.org/pkg/time for more information. -// -// The AsDuration method performs the conversion on a best-effort basis. -// Durations with denormal values (e.g., nanoseconds beyond -99999999 and -// +99999999, inclusive; or seconds and nanoseconds with opposite signs) -// are normalized during the conversion to a time.Duration. To manually check for -// invalid Duration per the documented limitations in duration.proto, -// additionally call the CheckValid method: -// -// if err := dur.CheckValid(); err != nil { -// ... // handle error -// } -// -// Note that the documented limitations in duration.proto does not protect a -// Duration from overflowing the representable range of a time.Duration in Go. -// The AsDuration method uses saturation arithmetic such that an overflow clamps -// the resulting value to the closest representable value (e.g., math.MaxInt64 -// for positive overflow and math.MinInt64 for negative overflow). -// -// # Conversion from a Go Duration -// -// The durationpb.New function can be used to construct a Duration message -// from a standard Go time.Duration value: -// -// dur := durationpb.New(d) -// ... // make use of d as a *durationpb.Duration -package durationpb - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - math "math" - reflect "reflect" - sync "sync" - time "time" - unsafe "unsafe" -) - -// A Duration represents a signed, fixed-length span of time represented -// as a count of seconds and fractions of seconds at nanosecond -// resolution. It is independent of any calendar and concepts like "day" -// or "month". It is related to Timestamp in that the difference between -// two Timestamp values is a Duration and it can be added or subtracted -// from a Timestamp. Range is approximately +-10,000 years. -// -// # Examples -// -// Example 1: Compute Duration from two Timestamps in pseudo code. -// -// Timestamp start = ...; -// Timestamp end = ...; -// Duration duration = ...; -// -// duration.seconds = end.seconds - start.seconds; -// duration.nanos = end.nanos - start.nanos; -// -// if (duration.seconds < 0 && duration.nanos > 0) { -// duration.seconds += 1; -// duration.nanos -= 1000000000; -// } else if (duration.seconds > 0 && duration.nanos < 0) { -// duration.seconds -= 1; -// duration.nanos += 1000000000; -// } -// -// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. -// -// Timestamp start = ...; -// Duration duration = ...; -// Timestamp end = ...; -// -// end.seconds = start.seconds + duration.seconds; -// end.nanos = start.nanos + duration.nanos; -// -// if (end.nanos < 0) { -// end.seconds -= 1; -// end.nanos += 1000000000; -// } else if (end.nanos >= 1000000000) { -// end.seconds += 1; -// end.nanos -= 1000000000; -// } -// -// Example 3: Compute Duration from datetime.timedelta in Python. -// -// td = datetime.timedelta(days=3, minutes=10) -// duration = Duration() -// duration.FromTimedelta(td) -// -// # JSON Mapping -// -// In JSON format, the Duration type is encoded as a string rather than an -// object, where the string ends in the suffix "s" (indicating seconds) and -// is preceded by the number of seconds, with nanoseconds expressed as -// fractional seconds. For example, 3 seconds with 0 nanoseconds should be -// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should -// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 -// microsecond should be expressed in JSON format as "3.000001s". -type Duration struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Signed seconds of the span of time. Must be from -315,576,000,000 - // to +315,576,000,000 inclusive. Note: these bounds are computed from: - // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - // Signed fractions of a second at nanosecond resolution of the span - // of time. Durations less than one second are represented with a 0 - // `seconds` field and a positive or negative `nanos` field. For durations - // of one second or more, a non-zero value for the `nanos` field must be - // of the same sign as the `seconds` field. Must be from -999,999,999 - // to +999,999,999 inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -// New constructs a new Duration from the provided time.Duration. -func New(d time.Duration) *Duration { - nanos := d.Nanoseconds() - secs := nanos / 1e9 - nanos -= secs * 1e9 - return &Duration{Seconds: int64(secs), Nanos: int32(nanos)} -} - -// AsDuration converts x to a time.Duration, -// returning the closest duration value in the event of overflow. -func (x *Duration) AsDuration() time.Duration { - secs := x.GetSeconds() - nanos := x.GetNanos() - d := time.Duration(secs) * time.Second - overflow := d/time.Second != time.Duration(secs) - d += time.Duration(nanos) * time.Nanosecond - overflow = overflow || (secs < 0 && nanos < 0 && d > 0) - overflow = overflow || (secs > 0 && nanos > 0 && d < 0) - if overflow { - switch { - case secs < 0: - return time.Duration(math.MinInt64) - case secs > 0: - return time.Duration(math.MaxInt64) - } - } - return d -} - -// IsValid reports whether the duration is valid. -// It is equivalent to CheckValid == nil. -func (x *Duration) IsValid() bool { - return x.check() == 0 -} - -// CheckValid returns an error if the duration is invalid. -// In particular, it checks whether the value is within the range of -// -10000 years to +10000 years inclusive. -// An error is reported for a nil Duration. -func (x *Duration) CheckValid() error { - switch x.check() { - case invalidNil: - return protoimpl.X.NewError("invalid nil Duration") - case invalidUnderflow: - return protoimpl.X.NewError("duration (%v) exceeds -10000 years", x) - case invalidOverflow: - return protoimpl.X.NewError("duration (%v) exceeds +10000 years", x) - case invalidNanosRange: - return protoimpl.X.NewError("duration (%v) has out-of-range nanos", x) - case invalidNanosSign: - return protoimpl.X.NewError("duration (%v) has seconds and nanos with different signs", x) - default: - return nil - } -} - -const ( - _ = iota - invalidNil - invalidUnderflow - invalidOverflow - invalidNanosRange - invalidNanosSign -) - -func (x *Duration) check() uint { - const absDuration = 315576000000 // 10000yr * 365.25day/yr * 24hr/day * 60min/hr * 60sec/min - secs := x.GetSeconds() - nanos := x.GetNanos() - switch { - case x == nil: - return invalidNil - case secs < -absDuration: - return invalidUnderflow - case secs > +absDuration: - return invalidOverflow - case nanos <= -1e9 || nanos >= +1e9: - return invalidNanosRange - case (secs > 0 && nanos < 0) || (secs < 0 && nanos > 0): - return invalidNanosSign - default: - return 0 - } -} - -func (x *Duration) Reset() { - *x = Duration{} - mi := &file_google_protobuf_duration_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *Duration) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Duration) ProtoMessage() {} - -func (x *Duration) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_duration_proto_msgTypes[0] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Duration.ProtoReflect.Descriptor instead. -func (*Duration) Descriptor() ([]byte, []int) { - return file_google_protobuf_duration_proto_rawDescGZIP(), []int{0} -} - -func (x *Duration) GetSeconds() int64 { - if x != nil { - return x.Seconds - } - return 0 -} - -func (x *Duration) GetNanos() int32 { - if x != nil { - return x.Nanos - } - return 0 -} - -var File_google_protobuf_duration_proto protoreflect.FileDescriptor - -var file_google_protobuf_duration_proto_rawDesc = string([]byte{ - 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x22, 0x3a, 0x0a, 0x08, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, - 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, - 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, 0x83, 0x01, - 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x64, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, - 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, - 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -}) - -var ( - file_google_protobuf_duration_proto_rawDescOnce sync.Once - file_google_protobuf_duration_proto_rawDescData []byte -) - -func file_google_protobuf_duration_proto_rawDescGZIP() []byte { - file_google_protobuf_duration_proto_rawDescOnce.Do(func() { - file_google_protobuf_duration_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_duration_proto_rawDesc), len(file_google_protobuf_duration_proto_rawDesc))) - }) - return file_google_protobuf_duration_proto_rawDescData -} - -var file_google_protobuf_duration_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_duration_proto_goTypes = []any{ - (*Duration)(nil), // 0: google.protobuf.Duration -} -var file_google_protobuf_duration_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_google_protobuf_duration_proto_init() } -func file_google_protobuf_duration_proto_init() { - if File_google_protobuf_duration_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_duration_proto_rawDesc), len(file_google_protobuf_duration_proto_rawDesc)), - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_protobuf_duration_proto_goTypes, - DependencyIndexes: file_google_protobuf_duration_proto_depIdxs, - MessageInfos: file_google_protobuf_duration_proto_msgTypes, - }.Build() - File_google_protobuf_duration_proto = out.File - file_google_protobuf_duration_proto_goTypes = nil - file_google_protobuf_duration_proto_depIdxs = nil -} diff --git a/tools/vendor/gopkg.in/ini.v1/.editorconfig b/tools/vendor/gopkg.in/ini.v1/.editorconfig deleted file mode 100644 index 4a2d9180f..000000000 --- a/tools/vendor/gopkg.in/ini.v1/.editorconfig +++ /dev/null @@ -1,12 +0,0 @@ -# http://editorconfig.org - -root = true - -[*] -charset = utf-8 -end_of_line = lf -insert_final_newline = true -trim_trailing_whitespace = true - -[*_test.go] -trim_trailing_whitespace = false diff --git a/tools/vendor/gopkg.in/ini.v1/.gitignore b/tools/vendor/gopkg.in/ini.v1/.gitignore deleted file mode 100644 index 588388bda..000000000 --- a/tools/vendor/gopkg.in/ini.v1/.gitignore +++ /dev/null @@ -1,7 +0,0 @@ -testdata/conf_out.ini -ini.sublime-project -ini.sublime-workspace -testdata/conf_reflect.ini -.idea -/.vscode -.DS_Store diff --git a/tools/vendor/gopkg.in/ini.v1/.golangci.yml b/tools/vendor/gopkg.in/ini.v1/.golangci.yml deleted file mode 100644 index 631e36925..000000000 --- a/tools/vendor/gopkg.in/ini.v1/.golangci.yml +++ /dev/null @@ -1,27 +0,0 @@ -linters-settings: - staticcheck: - checks: [ - "all", - "-SA1019" # There are valid use cases of strings.Title - ] - nakedret: - max-func-lines: 0 # Disallow any unnamed return statement - -linters: - enable: - - deadcode - - errcheck - - gosimple - - govet - - ineffassign - - staticcheck - - structcheck - - typecheck - - unused - - varcheck - - nakedret - - gofmt - - rowserrcheck - - unconvert - - goimports - - unparam diff --git a/tools/vendor/gopkg.in/ini.v1/LICENSE b/tools/vendor/gopkg.in/ini.v1/LICENSE deleted file mode 100644 index d361bbcdf..000000000 --- a/tools/vendor/gopkg.in/ini.v1/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright 2014 Unknwon - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/tools/vendor/gopkg.in/ini.v1/Makefile b/tools/vendor/gopkg.in/ini.v1/Makefile deleted file mode 100644 index f3b0dae2d..000000000 --- a/tools/vendor/gopkg.in/ini.v1/Makefile +++ /dev/null @@ -1,15 +0,0 @@ -.PHONY: build test bench vet coverage - -build: vet bench - -test: - go test -v -cover -race - -bench: - go test -v -cover -test.bench=. -test.benchmem - -vet: - go vet - -coverage: - go test -coverprofile=c.out && go tool cover -html=c.out && rm c.out diff --git a/tools/vendor/gopkg.in/ini.v1/README.md b/tools/vendor/gopkg.in/ini.v1/README.md deleted file mode 100644 index 30606d970..000000000 --- a/tools/vendor/gopkg.in/ini.v1/README.md +++ /dev/null @@ -1,43 +0,0 @@ -# INI - -[![GitHub Workflow Status](https://img.shields.io/github/checks-status/go-ini/ini/main?logo=github&style=for-the-badge)](https://github.com/go-ini/ini/actions?query=branch%3Amain) -[![codecov](https://img.shields.io/codecov/c/github/go-ini/ini/master?logo=codecov&style=for-the-badge)](https://codecov.io/gh/go-ini/ini) -[![GoDoc](https://img.shields.io/badge/GoDoc-Reference-blue?style=for-the-badge&logo=go)](https://pkg.go.dev/github.com/go-ini/ini?tab=doc) -[![Sourcegraph](https://img.shields.io/badge/view%20on-Sourcegraph-brightgreen.svg?style=for-the-badge&logo=sourcegraph)](https://sourcegraph.com/github.com/go-ini/ini) - -![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) - -Package ini provides INI file read and write functionality in Go. - -## Features - -- Load from multiple data sources(file, `[]byte`, `io.Reader` and `io.ReadCloser`) with overwrites. -- Read with recursion values. -- Read with parent-child sections. -- Read with auto-increment key names. -- Read with multiple-line values. -- Read with tons of helper methods. -- Read and convert values to Go types. -- Read and **WRITE** comments of sections and keys. -- Manipulate sections, keys and comments with ease. -- Keep sections and keys in order as you parse and save. - -## Installation - -The minimum requirement of Go is **1.13**. - -```sh -$ go get gopkg.in/ini.v1 -``` - -Please add `-u` flag to update in the future. - -## Getting Help - -- [Getting Started](https://ini.unknwon.io/docs/intro/getting_started) -- [API Documentation](https://gowalker.org/gopkg.in/ini.v1) -- 中国大陆镜像:https://ini.unknwon.cn - -## License - -This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text. diff --git a/tools/vendor/gopkg.in/ini.v1/codecov.yml b/tools/vendor/gopkg.in/ini.v1/codecov.yml deleted file mode 100644 index e02ec84bc..000000000 --- a/tools/vendor/gopkg.in/ini.v1/codecov.yml +++ /dev/null @@ -1,16 +0,0 @@ -coverage: - range: "60...95" - status: - project: - default: - threshold: 1% - informational: true - patch: - defualt: - only_pulls: true - informational: true - -comment: - layout: 'diff' - -github_checks: false diff --git a/tools/vendor/gopkg.in/ini.v1/data_source.go b/tools/vendor/gopkg.in/ini.v1/data_source.go deleted file mode 100644 index c3a541f1d..000000000 --- a/tools/vendor/gopkg.in/ini.v1/data_source.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2019 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "os" -) - -var ( - _ dataSource = (*sourceFile)(nil) - _ dataSource = (*sourceData)(nil) - _ dataSource = (*sourceReadCloser)(nil) -) - -// dataSource is an interface that returns object which can be read and closed. -type dataSource interface { - ReadCloser() (io.ReadCloser, error) -} - -// sourceFile represents an object that contains content on the local file system. -type sourceFile struct { - name string -} - -func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) { - return os.Open(s.name) -} - -// sourceData represents an object that contains content in memory. -type sourceData struct { - data []byte -} - -func (s *sourceData) ReadCloser() (io.ReadCloser, error) { - return ioutil.NopCloser(bytes.NewReader(s.data)), nil -} - -// sourceReadCloser represents an input stream with Close method. -type sourceReadCloser struct { - reader io.ReadCloser -} - -func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) { - return s.reader, nil -} - -func parseDataSource(source interface{}) (dataSource, error) { - switch s := source.(type) { - case string: - return sourceFile{s}, nil - case []byte: - return &sourceData{s}, nil - case io.ReadCloser: - return &sourceReadCloser{s}, nil - case io.Reader: - return &sourceReadCloser{ioutil.NopCloser(s)}, nil - default: - return nil, fmt.Errorf("error parsing data source: unknown type %q", s) - } -} diff --git a/tools/vendor/gopkg.in/ini.v1/deprecated.go b/tools/vendor/gopkg.in/ini.v1/deprecated.go deleted file mode 100644 index 48b8e66d6..000000000 --- a/tools/vendor/gopkg.in/ini.v1/deprecated.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2019 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -var ( - // Deprecated: Use "DefaultSection" instead. - DEFAULT_SECTION = DefaultSection - // Deprecated: AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE. - AllCapsUnderscore = SnackCase -) diff --git a/tools/vendor/gopkg.in/ini.v1/error.go b/tools/vendor/gopkg.in/ini.v1/error.go deleted file mode 100644 index f66bc94b8..000000000 --- a/tools/vendor/gopkg.in/ini.v1/error.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2016 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "fmt" -) - -// ErrDelimiterNotFound indicates the error type of no delimiter is found which there should be one. -type ErrDelimiterNotFound struct { - Line string -} - -// IsErrDelimiterNotFound returns true if the given error is an instance of ErrDelimiterNotFound. -func IsErrDelimiterNotFound(err error) bool { - _, ok := err.(ErrDelimiterNotFound) - return ok -} - -func (err ErrDelimiterNotFound) Error() string { - return fmt.Sprintf("key-value delimiter not found: %s", err.Line) -} - -// ErrEmptyKeyName indicates the error type of no key name is found which there should be one. -type ErrEmptyKeyName struct { - Line string -} - -// IsErrEmptyKeyName returns true if the given error is an instance of ErrEmptyKeyName. -func IsErrEmptyKeyName(err error) bool { - _, ok := err.(ErrEmptyKeyName) - return ok -} - -func (err ErrEmptyKeyName) Error() string { - return fmt.Sprintf("empty key name: %s", err.Line) -} diff --git a/tools/vendor/gopkg.in/ini.v1/file.go b/tools/vendor/gopkg.in/ini.v1/file.go deleted file mode 100644 index f8b22408b..000000000 --- a/tools/vendor/gopkg.in/ini.v1/file.go +++ /dev/null @@ -1,541 +0,0 @@ -// Copyright 2017 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bytes" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "strings" - "sync" -) - -// File represents a combination of one or more INI files in memory. -type File struct { - options LoadOptions - dataSources []dataSource - - // Should make things safe, but sometimes doesn't matter. - BlockMode bool - lock sync.RWMutex - - // To keep data in order. - sectionList []string - // To keep track of the index of a section with same name. - // This meta list is only used with non-unique section names are allowed. - sectionIndexes []int - - // Actual data is stored here. - sections map[string][]*Section - - NameMapper - ValueMapper -} - -// newFile initializes File object with given data sources. -func newFile(dataSources []dataSource, opts LoadOptions) *File { - if len(opts.KeyValueDelimiters) == 0 { - opts.KeyValueDelimiters = "=:" - } - if len(opts.KeyValueDelimiterOnWrite) == 0 { - opts.KeyValueDelimiterOnWrite = "=" - } - if len(opts.ChildSectionDelimiter) == 0 { - opts.ChildSectionDelimiter = "." - } - - return &File{ - BlockMode: true, - dataSources: dataSources, - sections: make(map[string][]*Section), - options: opts, - } -} - -// Empty returns an empty file object. -func Empty(opts ...LoadOptions) *File { - var opt LoadOptions - if len(opts) > 0 { - opt = opts[0] - } - - // Ignore error here, we are sure our data is good. - f, _ := LoadSources(opt, []byte("")) - return f -} - -// NewSection creates a new section. -func (f *File) NewSection(name string) (*Section, error) { - if len(name) == 0 { - return nil, errors.New("empty section name") - } - - if (f.options.Insensitive || f.options.InsensitiveSections) && name != DefaultSection { - name = strings.ToLower(name) - } - - if f.BlockMode { - f.lock.Lock() - defer f.lock.Unlock() - } - - if !f.options.AllowNonUniqueSections && inSlice(name, f.sectionList) { - return f.sections[name][0], nil - } - - f.sectionList = append(f.sectionList, name) - - // NOTE: Append to indexes must happen before appending to sections, - // otherwise index will have off-by-one problem. - f.sectionIndexes = append(f.sectionIndexes, len(f.sections[name])) - - sec := newSection(f, name) - f.sections[name] = append(f.sections[name], sec) - - return sec, nil -} - -// NewRawSection creates a new section with an unparseable body. -func (f *File) NewRawSection(name, body string) (*Section, error) { - section, err := f.NewSection(name) - if err != nil { - return nil, err - } - - section.isRawSection = true - section.rawBody = body - return section, nil -} - -// NewSections creates a list of sections. -func (f *File) NewSections(names ...string) (err error) { - for _, name := range names { - if _, err = f.NewSection(name); err != nil { - return err - } - } - return nil -} - -// GetSection returns section by given name. -func (f *File) GetSection(name string) (*Section, error) { - secs, err := f.SectionsByName(name) - if err != nil { - return nil, err - } - - return secs[0], err -} - -// HasSection returns true if the file contains a section with given name. -func (f *File) HasSection(name string) bool { - section, _ := f.GetSection(name) - return section != nil -} - -// SectionsByName returns all sections with given name. -func (f *File) SectionsByName(name string) ([]*Section, error) { - if len(name) == 0 { - name = DefaultSection - } - if f.options.Insensitive || f.options.InsensitiveSections { - name = strings.ToLower(name) - } - - if f.BlockMode { - f.lock.RLock() - defer f.lock.RUnlock() - } - - secs := f.sections[name] - if len(secs) == 0 { - return nil, fmt.Errorf("section %q does not exist", name) - } - - return secs, nil -} - -// Section assumes named section exists and returns a zero-value when not. -func (f *File) Section(name string) *Section { - sec, err := f.GetSection(name) - if err != nil { - if name == "" { - name = DefaultSection - } - sec, _ = f.NewSection(name) - return sec - } - return sec -} - -// SectionWithIndex assumes named section exists and returns a new section when not. -func (f *File) SectionWithIndex(name string, index int) *Section { - secs, err := f.SectionsByName(name) - if err != nil || len(secs) <= index { - // NOTE: It's OK here because the only possible error is empty section name, - // but if it's empty, this piece of code won't be executed. - newSec, _ := f.NewSection(name) - return newSec - } - - return secs[index] -} - -// Sections returns a list of Section stored in the current instance. -func (f *File) Sections() []*Section { - if f.BlockMode { - f.lock.RLock() - defer f.lock.RUnlock() - } - - sections := make([]*Section, len(f.sectionList)) - for i, name := range f.sectionList { - sections[i] = f.sections[name][f.sectionIndexes[i]] - } - return sections -} - -// ChildSections returns a list of child sections of given section name. -func (f *File) ChildSections(name string) []*Section { - return f.Section(name).ChildSections() -} - -// SectionStrings returns list of section names. -func (f *File) SectionStrings() []string { - list := make([]string, len(f.sectionList)) - copy(list, f.sectionList) - return list -} - -// DeleteSection deletes a section or all sections with given name. -func (f *File) DeleteSection(name string) { - secs, err := f.SectionsByName(name) - if err != nil { - return - } - - for i := 0; i < len(secs); i++ { - // For non-unique sections, it is always needed to remove the first one so - // in the next iteration, the subsequent section continue having index 0. - // Ignoring the error as index 0 never returns an error. - _ = f.DeleteSectionWithIndex(name, 0) - } -} - -// DeleteSectionWithIndex deletes a section with given name and index. -func (f *File) DeleteSectionWithIndex(name string, index int) error { - if !f.options.AllowNonUniqueSections && index != 0 { - return fmt.Errorf("delete section with non-zero index is only allowed when non-unique sections is enabled") - } - - if len(name) == 0 { - name = DefaultSection - } - if f.options.Insensitive || f.options.InsensitiveSections { - name = strings.ToLower(name) - } - - if f.BlockMode { - f.lock.Lock() - defer f.lock.Unlock() - } - - // Count occurrences of the sections - occurrences := 0 - - sectionListCopy := make([]string, len(f.sectionList)) - copy(sectionListCopy, f.sectionList) - - for i, s := range sectionListCopy { - if s != name { - continue - } - - if occurrences == index { - if len(f.sections[name]) <= 1 { - delete(f.sections, name) // The last one in the map - } else { - f.sections[name] = append(f.sections[name][:index], f.sections[name][index+1:]...) - } - - // Fix section lists - f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...) - f.sectionIndexes = append(f.sectionIndexes[:i], f.sectionIndexes[i+1:]...) - - } else if occurrences > index { - // Fix the indices of all following sections with this name. - f.sectionIndexes[i-1]-- - } - - occurrences++ - } - - return nil -} - -func (f *File) reload(s dataSource) error { - r, err := s.ReadCloser() - if err != nil { - return err - } - defer r.Close() - - return f.parse(r) -} - -// Reload reloads and parses all data sources. -func (f *File) Reload() (err error) { - for _, s := range f.dataSources { - if err = f.reload(s); err != nil { - // In loose mode, we create an empty default section for nonexistent files. - if os.IsNotExist(err) && f.options.Loose { - _ = f.parse(bytes.NewBuffer(nil)) - continue - } - return err - } - if f.options.ShortCircuit { - return nil - } - } - return nil -} - -// Append appends one or more data sources and reloads automatically. -func (f *File) Append(source interface{}, others ...interface{}) error { - ds, err := parseDataSource(source) - if err != nil { - return err - } - f.dataSources = append(f.dataSources, ds) - for _, s := range others { - ds, err = parseDataSource(s) - if err != nil { - return err - } - f.dataSources = append(f.dataSources, ds) - } - return f.Reload() -} - -func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) { - equalSign := DefaultFormatLeft + f.options.KeyValueDelimiterOnWrite + DefaultFormatRight - - if PrettyFormat || PrettyEqual { - equalSign = fmt.Sprintf(" %s ", f.options.KeyValueDelimiterOnWrite) - } - - // Use buffer to make sure target is safe until finish encoding. - buf := bytes.NewBuffer(nil) - lastSectionIdx := len(f.sectionList) - 1 - for i, sname := range f.sectionList { - sec := f.SectionWithIndex(sname, f.sectionIndexes[i]) - if len(sec.Comment) > 0 { - // Support multiline comments - lines := strings.Split(sec.Comment, LineBreak) - for i := range lines { - if lines[i][0] != '#' && lines[i][0] != ';' { - lines[i] = "; " + lines[i] - } else { - lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:]) - } - - if _, err := buf.WriteString(lines[i] + LineBreak); err != nil { - return nil, err - } - } - } - - if i > 0 || DefaultHeader || (i == 0 && strings.ToUpper(sec.name) != DefaultSection) { - if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil { - return nil, err - } - } else { - // Write nothing if default section is empty - if len(sec.keyList) == 0 { - continue - } - } - - isLastSection := i == lastSectionIdx - if sec.isRawSection { - if _, err := buf.WriteString(sec.rawBody); err != nil { - return nil, err - } - - if PrettySection && !isLastSection { - // Put a line between sections - if _, err := buf.WriteString(LineBreak); err != nil { - return nil, err - } - } - continue - } - - // Count and generate alignment length and buffer spaces using the - // longest key. Keys may be modified if they contain certain characters so - // we need to take that into account in our calculation. - alignLength := 0 - if PrettyFormat { - for _, kname := range sec.keyList { - keyLength := len(kname) - // First case will surround key by ` and second by """ - if strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters) { - keyLength += 2 - } else if strings.Contains(kname, "`") { - keyLength += 6 - } - - if keyLength > alignLength { - alignLength = keyLength - } - } - } - alignSpaces := bytes.Repeat([]byte(" "), alignLength) - - KeyList: - for _, kname := range sec.keyList { - key := sec.Key(kname) - if len(key.Comment) > 0 { - if len(indent) > 0 && sname != DefaultSection { - buf.WriteString(indent) - } - - // Support multiline comments - lines := strings.Split(key.Comment, LineBreak) - for i := range lines { - if lines[i][0] != '#' && lines[i][0] != ';' { - lines[i] = "; " + strings.TrimSpace(lines[i]) - } else { - lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:]) - } - - if _, err := buf.WriteString(lines[i] + LineBreak); err != nil { - return nil, err - } - } - } - - if len(indent) > 0 && sname != DefaultSection { - buf.WriteString(indent) - } - - switch { - case key.isAutoIncrement: - kname = "-" - case strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters): - kname = "`" + kname + "`" - case strings.Contains(kname, "`"): - kname = `"""` + kname + `"""` - } - - writeKeyValue := func(val string) (bool, error) { - if _, err := buf.WriteString(kname); err != nil { - return false, err - } - - if key.isBooleanType { - buf.WriteString(LineBreak) - return true, nil - } - - // Write out alignment spaces before "=" sign - if PrettyFormat { - buf.Write(alignSpaces[:alignLength-len(kname)]) - } - - // In case key value contains "\n", "`", "\"", "#" or ";" - if strings.ContainsAny(val, "\n`") { - val = `"""` + val + `"""` - } else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") { - val = "`" + val + "`" - } else if len(strings.TrimSpace(val)) != len(val) { - val = `"` + val + `"` - } - if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil { - return false, err - } - return false, nil - } - - shadows := key.ValueWithShadows() - if len(shadows) == 0 { - if _, err := writeKeyValue(""); err != nil { - return nil, err - } - } - - for _, val := range shadows { - exitLoop, err := writeKeyValue(val) - if err != nil { - return nil, err - } else if exitLoop { - continue KeyList - } - } - - for _, val := range key.nestedValues { - if _, err := buf.WriteString(indent + " " + val + LineBreak); err != nil { - return nil, err - } - } - } - - if PrettySection && !isLastSection { - // Put a line between sections - if _, err := buf.WriteString(LineBreak); err != nil { - return nil, err - } - } - } - - return buf, nil -} - -// WriteToIndent writes content into io.Writer with given indention. -// If PrettyFormat has been set to be true, -// it will align "=" sign with spaces under each section. -func (f *File) WriteToIndent(w io.Writer, indent string) (int64, error) { - buf, err := f.writeToBuffer(indent) - if err != nil { - return 0, err - } - return buf.WriteTo(w) -} - -// WriteTo writes file content into io.Writer. -func (f *File) WriteTo(w io.Writer) (int64, error) { - return f.WriteToIndent(w, "") -} - -// SaveToIndent writes content to file system with given value indention. -func (f *File) SaveToIndent(filename, indent string) error { - // Note: Because we are truncating with os.Create, - // so it's safer to save to a temporary file location and rename after done. - buf, err := f.writeToBuffer(indent) - if err != nil { - return err - } - - return ioutil.WriteFile(filename, buf.Bytes(), 0666) -} - -// SaveTo writes content to file system. -func (f *File) SaveTo(filename string) error { - return f.SaveToIndent(filename, "") -} diff --git a/tools/vendor/gopkg.in/ini.v1/helper.go b/tools/vendor/gopkg.in/ini.v1/helper.go deleted file mode 100644 index f9d80a682..000000000 --- a/tools/vendor/gopkg.in/ini.v1/helper.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2019 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -func inSlice(str string, s []string) bool { - for _, v := range s { - if str == v { - return true - } - } - return false -} diff --git a/tools/vendor/gopkg.in/ini.v1/ini.go b/tools/vendor/gopkg.in/ini.v1/ini.go deleted file mode 100644 index 99e7f8651..000000000 --- a/tools/vendor/gopkg.in/ini.v1/ini.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -// Package ini provides INI file read and write functionality in Go. -package ini - -import ( - "os" - "regexp" - "runtime" - "strings" -) - -const ( - // Maximum allowed depth when recursively substituing variable names. - depthValues = 99 -) - -var ( - // DefaultSection is the name of default section. You can use this var or the string literal. - // In most of cases, an empty string is all you need to access the section. - DefaultSection = "DEFAULT" - - // LineBreak is the delimiter to determine or compose a new line. - // This variable will be changed to "\r\n" automatically on Windows at package init time. - LineBreak = "\n" - - // Variable regexp pattern: %(variable)s - varPattern = regexp.MustCompile(`%\(([^)]+)\)s`) - - // DefaultHeader explicitly writes default section header. - DefaultHeader = false - - // PrettySection indicates whether to put a line between sections. - PrettySection = true - // PrettyFormat indicates whether to align "=" sign with spaces to produce pretty output - // or reduce all possible spaces for compact format. - PrettyFormat = true - // PrettyEqual places spaces around "=" sign even when PrettyFormat is false. - PrettyEqual = false - // DefaultFormatLeft places custom spaces on the left when PrettyFormat and PrettyEqual are both disabled. - DefaultFormatLeft = "" - // DefaultFormatRight places custom spaces on the right when PrettyFormat and PrettyEqual are both disabled. - DefaultFormatRight = "" -) - -var inTest = len(os.Args) > 0 && strings.HasSuffix(strings.TrimSuffix(os.Args[0], ".exe"), ".test") - -func init() { - if runtime.GOOS == "windows" && !inTest { - LineBreak = "\r\n" - } -} - -// LoadOptions contains all customized options used for load data source(s). -type LoadOptions struct { - // Loose indicates whether the parser should ignore nonexistent files or return error. - Loose bool - // Insensitive indicates whether the parser forces all section and key names to lowercase. - Insensitive bool - // InsensitiveSections indicates whether the parser forces all section to lowercase. - InsensitiveSections bool - // InsensitiveKeys indicates whether the parser forces all key names to lowercase. - InsensitiveKeys bool - // IgnoreContinuation indicates whether to ignore continuation lines while parsing. - IgnoreContinuation bool - // IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value. - IgnoreInlineComment bool - // SkipUnrecognizableLines indicates whether to skip unrecognizable lines that do not conform to key/value pairs. - SkipUnrecognizableLines bool - // ShortCircuit indicates whether to ignore other configuration sources after loaded the first available configuration source. - ShortCircuit bool - // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing. - // This type of keys are mostly used in my.cnf. - AllowBooleanKeys bool - // AllowShadows indicates whether to keep track of keys with same name under same section. - AllowShadows bool - // AllowNestedValues indicates whether to allow AWS-like nested values. - // Docs: http://docs.aws.amazon.com/cli/latest/topic/config-vars.html#nested-values - AllowNestedValues bool - // AllowPythonMultilineValues indicates whether to allow Python-like multi-line values. - // Docs: https://docs.python.org/3/library/configparser.html#supported-ini-file-structure - // Relevant quote: Values can also span multiple lines, as long as they are indented deeper - // than the first line of the value. - AllowPythonMultilineValues bool - // SpaceBeforeInlineComment indicates whether to allow comment symbols (\# and \;) inside value. - // Docs: https://docs.python.org/2/library/configparser.html - // Quote: Comments may appear on their own in an otherwise empty line, or may be entered in lines holding values or section names. - // In the latter case, they need to be preceded by a whitespace character to be recognized as a comment. - SpaceBeforeInlineComment bool - // UnescapeValueDoubleQuotes indicates whether to unescape double quotes inside value to regular format - // when value is surrounded by double quotes, e.g. key="a \"value\"" => key=a "value" - UnescapeValueDoubleQuotes bool - // UnescapeValueCommentSymbols indicates to unescape comment symbols (\# and \;) inside value to regular format - // when value is NOT surrounded by any quotes. - // Note: UNSTABLE, behavior might change to only unescape inside double quotes but may noy necessary at all. - UnescapeValueCommentSymbols bool - // UnparseableSections stores a list of blocks that are allowed with raw content which do not otherwise - // conform to key/value pairs. Specify the names of those blocks here. - UnparseableSections []string - // KeyValueDelimiters is the sequence of delimiters that are used to separate key and value. By default, it is "=:". - KeyValueDelimiters string - // KeyValueDelimiterOnWrite is the delimiter that are used to separate key and value output. By default, it is "=". - KeyValueDelimiterOnWrite string - // ChildSectionDelimiter is the delimiter that is used to separate child sections. By default, it is ".". - ChildSectionDelimiter string - // PreserveSurroundedQuote indicates whether to preserve surrounded quote (single and double quotes). - PreserveSurroundedQuote bool - // DebugFunc is called to collect debug information (currently only useful to debug parsing Python-style multiline values). - DebugFunc DebugFunc - // ReaderBufferSize is the buffer size of the reader in bytes. - ReaderBufferSize int - // AllowNonUniqueSections indicates whether to allow sections with the same name multiple times. - AllowNonUniqueSections bool - // AllowDuplicateShadowValues indicates whether values for shadowed keys should be deduplicated. - AllowDuplicateShadowValues bool -} - -// DebugFunc is the type of function called to log parse events. -type DebugFunc func(message string) - -// LoadSources allows caller to apply customized options for loading from data source(s). -func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) { - sources := make([]dataSource, len(others)+1) - sources[0], err = parseDataSource(source) - if err != nil { - return nil, err - } - for i := range others { - sources[i+1], err = parseDataSource(others[i]) - if err != nil { - return nil, err - } - } - f := newFile(sources, opts) - if err = f.Reload(); err != nil { - return nil, err - } - return f, nil -} - -// Load loads and parses from INI data sources. -// Arguments can be mixed of file name with string type, or raw data in []byte. -// It will return error if list contains nonexistent files. -func Load(source interface{}, others ...interface{}) (*File, error) { - return LoadSources(LoadOptions{}, source, others...) -} - -// LooseLoad has exactly same functionality as Load function -// except it ignores nonexistent files instead of returning error. -func LooseLoad(source interface{}, others ...interface{}) (*File, error) { - return LoadSources(LoadOptions{Loose: true}, source, others...) -} - -// InsensitiveLoad has exactly same functionality as Load function -// except it forces all section and key names to be lowercased. -func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) { - return LoadSources(LoadOptions{Insensitive: true}, source, others...) -} - -// ShadowLoad has exactly same functionality as Load function -// except it allows have shadow keys. -func ShadowLoad(source interface{}, others ...interface{}) (*File, error) { - return LoadSources(LoadOptions{AllowShadows: true}, source, others...) -} diff --git a/tools/vendor/gopkg.in/ini.v1/key.go b/tools/vendor/gopkg.in/ini.v1/key.go deleted file mode 100644 index a19d9f38e..000000000 --- a/tools/vendor/gopkg.in/ini.v1/key.go +++ /dev/null @@ -1,837 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bytes" - "errors" - "fmt" - "strconv" - "strings" - "time" -) - -// Key represents a key under a section. -type Key struct { - s *Section - Comment string - name string - value string - isAutoIncrement bool - isBooleanType bool - - isShadow bool - shadows []*Key - - nestedValues []string -} - -// newKey simply return a key object with given values. -func newKey(s *Section, name, val string) *Key { - return &Key{ - s: s, - name: name, - value: val, - } -} - -func (k *Key) addShadow(val string) error { - if k.isShadow { - return errors.New("cannot add shadow to another shadow key") - } else if k.isAutoIncrement || k.isBooleanType { - return errors.New("cannot add shadow to auto-increment or boolean key") - } - - if !k.s.f.options.AllowDuplicateShadowValues { - // Deduplicate shadows based on their values. - if k.value == val { - return nil - } - for i := range k.shadows { - if k.shadows[i].value == val { - return nil - } - } - } - - shadow := newKey(k.s, k.name, val) - shadow.isShadow = true - k.shadows = append(k.shadows, shadow) - return nil -} - -// AddShadow adds a new shadow key to itself. -func (k *Key) AddShadow(val string) error { - if !k.s.f.options.AllowShadows { - return errors.New("shadow key is not allowed") - } - return k.addShadow(val) -} - -func (k *Key) addNestedValue(val string) error { - if k.isAutoIncrement || k.isBooleanType { - return errors.New("cannot add nested value to auto-increment or boolean key") - } - - k.nestedValues = append(k.nestedValues, val) - return nil -} - -// AddNestedValue adds a nested value to the key. -func (k *Key) AddNestedValue(val string) error { - if !k.s.f.options.AllowNestedValues { - return errors.New("nested value is not allowed") - } - return k.addNestedValue(val) -} - -// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv -type ValueMapper func(string) string - -// Name returns name of key. -func (k *Key) Name() string { - return k.name -} - -// Value returns raw value of key for performance purpose. -func (k *Key) Value() string { - return k.value -} - -// ValueWithShadows returns raw values of key and its shadows if any. Shadow -// keys with empty values are ignored from the returned list. -func (k *Key) ValueWithShadows() []string { - if len(k.shadows) == 0 { - if k.value == "" { - return []string{} - } - return []string{k.value} - } - - vals := make([]string, 0, len(k.shadows)+1) - if k.value != "" { - vals = append(vals, k.value) - } - for _, s := range k.shadows { - if s.value != "" { - vals = append(vals, s.value) - } - } - return vals -} - -// NestedValues returns nested values stored in the key. -// It is possible returned value is nil if no nested values stored in the key. -func (k *Key) NestedValues() []string { - return k.nestedValues -} - -// transformValue takes a raw value and transforms to its final string. -func (k *Key) transformValue(val string) string { - if k.s.f.ValueMapper != nil { - val = k.s.f.ValueMapper(val) - } - - // Fail-fast if no indicate char found for recursive value - if !strings.Contains(val, "%") { - return val - } - for i := 0; i < depthValues; i++ { - vr := varPattern.FindString(val) - if len(vr) == 0 { - break - } - - // Take off leading '%(' and trailing ')s'. - noption := vr[2 : len(vr)-2] - - // Search in the same section. - // If not found or found the key itself, then search again in default section. - nk, err := k.s.GetKey(noption) - if err != nil || k == nk { - nk, _ = k.s.f.Section("").GetKey(noption) - if nk == nil { - // Stop when no results found in the default section, - // and returns the value as-is. - break - } - } - - // Substitute by new value and take off leading '%(' and trailing ')s'. - val = strings.Replace(val, vr, nk.value, -1) - } - return val -} - -// String returns string representation of value. -func (k *Key) String() string { - return k.transformValue(k.value) -} - -// Validate accepts a validate function which can -// return modifed result as key value. -func (k *Key) Validate(fn func(string) string) string { - return fn(k.String()) -} - -// parseBool returns the boolean value represented by the string. -// -// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On, -// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off. -// Any other value returns an error. -func parseBool(str string) (value bool, err error) { - switch str { - case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On": - return true, nil - case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off": - return false, nil - } - return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) -} - -// Bool returns bool type value. -func (k *Key) Bool() (bool, error) { - return parseBool(k.String()) -} - -// Float64 returns float64 type value. -func (k *Key) Float64() (float64, error) { - return strconv.ParseFloat(k.String(), 64) -} - -// Int returns int type value. -func (k *Key) Int() (int, error) { - v, err := strconv.ParseInt(k.String(), 0, 64) - return int(v), err -} - -// Int64 returns int64 type value. -func (k *Key) Int64() (int64, error) { - return strconv.ParseInt(k.String(), 0, 64) -} - -// Uint returns uint type valued. -func (k *Key) Uint() (uint, error) { - u, e := strconv.ParseUint(k.String(), 0, 64) - return uint(u), e -} - -// Uint64 returns uint64 type value. -func (k *Key) Uint64() (uint64, error) { - return strconv.ParseUint(k.String(), 0, 64) -} - -// Duration returns time.Duration type value. -func (k *Key) Duration() (time.Duration, error) { - return time.ParseDuration(k.String()) -} - -// TimeFormat parses with given format and returns time.Time type value. -func (k *Key) TimeFormat(format string) (time.Time, error) { - return time.Parse(format, k.String()) -} - -// Time parses with RFC3339 format and returns time.Time type value. -func (k *Key) Time() (time.Time, error) { - return k.TimeFormat(time.RFC3339) -} - -// MustString returns default value if key value is empty. -func (k *Key) MustString(defaultVal string) string { - val := k.String() - if len(val) == 0 { - k.value = defaultVal - return defaultVal - } - return val -} - -// MustBool always returns value without error, -// it returns false if error occurs. -func (k *Key) MustBool(defaultVal ...bool) bool { - val, err := k.Bool() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatBool(defaultVal[0]) - return defaultVal[0] - } - return val -} - -// MustFloat64 always returns value without error, -// it returns 0.0 if error occurs. -func (k *Key) MustFloat64(defaultVal ...float64) float64 { - val, err := k.Float64() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64) - return defaultVal[0] - } - return val -} - -// MustInt always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustInt(defaultVal ...int) int { - val, err := k.Int() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatInt(int64(defaultVal[0]), 10) - return defaultVal[0] - } - return val -} - -// MustInt64 always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustInt64(defaultVal ...int64) int64 { - val, err := k.Int64() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatInt(defaultVal[0], 10) - return defaultVal[0] - } - return val -} - -// MustUint always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustUint(defaultVal ...uint) uint { - val, err := k.Uint() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatUint(uint64(defaultVal[0]), 10) - return defaultVal[0] - } - return val -} - -// MustUint64 always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustUint64(defaultVal ...uint64) uint64 { - val, err := k.Uint64() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatUint(defaultVal[0], 10) - return defaultVal[0] - } - return val -} - -// MustDuration always returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { - val, err := k.Duration() - if len(defaultVal) > 0 && err != nil { - k.value = defaultVal[0].String() - return defaultVal[0] - } - return val -} - -// MustTimeFormat always parses with given format and returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { - val, err := k.TimeFormat(format) - if len(defaultVal) > 0 && err != nil { - k.value = defaultVal[0].Format(format) - return defaultVal[0] - } - return val -} - -// MustTime always parses with RFC3339 format and returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustTime(defaultVal ...time.Time) time.Time { - return k.MustTimeFormat(time.RFC3339, defaultVal...) -} - -// In always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) In(defaultVal string, candidates []string) string { - val := k.String() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InFloat64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { - val := k.MustFloat64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InInt always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InInt(defaultVal int, candidates []int) int { - val := k.MustInt() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InInt64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { - val := k.MustInt64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InUint always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InUint(defaultVal uint, candidates []uint) uint { - val := k.MustUint() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InUint64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { - val := k.MustUint64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InTimeFormat always parses with given format and returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { - val := k.MustTimeFormat(format) - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InTime always parses with RFC3339 format and returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { - return k.InTimeFormat(time.RFC3339, defaultVal, candidates) -} - -// RangeFloat64 checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { - val := k.MustFloat64() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeInt checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeInt(defaultVal, min, max int) int { - val := k.MustInt() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeInt64 checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { - val := k.MustInt64() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeTimeFormat checks if value with given format is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { - val := k.MustTimeFormat(format) - if val.Unix() < min.Unix() || val.Unix() > max.Unix() { - return defaultVal - } - return val -} - -// RangeTime checks if value with RFC3339 format is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { - return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) -} - -// Strings returns list of string divided by given delimiter. -func (k *Key) Strings(delim string) []string { - str := k.String() - if len(str) == 0 { - return []string{} - } - - runes := []rune(str) - vals := make([]string, 0, 2) - var buf bytes.Buffer - escape := false - idx := 0 - for { - if escape { - escape = false - if runes[idx] != '\\' && !strings.HasPrefix(string(runes[idx:]), delim) { - buf.WriteRune('\\') - } - buf.WriteRune(runes[idx]) - } else { - if runes[idx] == '\\' { - escape = true - } else if strings.HasPrefix(string(runes[idx:]), delim) { - idx += len(delim) - 1 - vals = append(vals, strings.TrimSpace(buf.String())) - buf.Reset() - } else { - buf.WriteRune(runes[idx]) - } - } - idx++ - if idx == len(runes) { - break - } - } - - if buf.Len() > 0 { - vals = append(vals, strings.TrimSpace(buf.String())) - } - - return vals -} - -// StringsWithShadows returns list of string divided by given delimiter. -// Shadows will also be appended if any. -func (k *Key) StringsWithShadows(delim string) []string { - vals := k.ValueWithShadows() - results := make([]string, 0, len(vals)*2) - for i := range vals { - if len(vals) == 0 { - continue - } - - results = append(results, strings.Split(vals[i], delim)...) - } - - for i := range results { - results[i] = k.transformValue(strings.TrimSpace(results[i])) - } - return results -} - -// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Float64s(delim string) []float64 { - vals, _ := k.parseFloat64s(k.Strings(delim), true, false) - return vals -} - -// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Ints(delim string) []int { - vals, _ := k.parseInts(k.Strings(delim), true, false) - return vals -} - -// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Int64s(delim string) []int64 { - vals, _ := k.parseInt64s(k.Strings(delim), true, false) - return vals -} - -// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Uints(delim string) []uint { - vals, _ := k.parseUints(k.Strings(delim), true, false) - return vals -} - -// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Uint64s(delim string) []uint64 { - vals, _ := k.parseUint64s(k.Strings(delim), true, false) - return vals -} - -// Bools returns list of bool divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Bools(delim string) []bool { - vals, _ := k.parseBools(k.Strings(delim), true, false) - return vals -} - -// TimesFormat parses with given format and returns list of time.Time divided by given delimiter. -// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). -func (k *Key) TimesFormat(format, delim string) []time.Time { - vals, _ := k.parseTimesFormat(format, k.Strings(delim), true, false) - return vals -} - -// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter. -// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). -func (k *Key) Times(delim string) []time.Time { - return k.TimesFormat(time.RFC3339, delim) -} - -// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then -// it will not be included to result list. -func (k *Key) ValidFloat64s(delim string) []float64 { - vals, _ := k.parseFloat64s(k.Strings(delim), false, false) - return vals -} - -// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will -// not be included to result list. -func (k *Key) ValidInts(delim string) []int { - vals, _ := k.parseInts(k.Strings(delim), false, false) - return vals -} - -// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer, -// then it will not be included to result list. -func (k *Key) ValidInt64s(delim string) []int64 { - vals, _ := k.parseInt64s(k.Strings(delim), false, false) - return vals -} - -// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer, -// then it will not be included to result list. -func (k *Key) ValidUints(delim string) []uint { - vals, _ := k.parseUints(k.Strings(delim), false, false) - return vals -} - -// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned -// integer, then it will not be included to result list. -func (k *Key) ValidUint64s(delim string) []uint64 { - vals, _ := k.parseUint64s(k.Strings(delim), false, false) - return vals -} - -// ValidBools returns list of bool divided by given delimiter. If some value is not 64-bit unsigned -// integer, then it will not be included to result list. -func (k *Key) ValidBools(delim string) []bool { - vals, _ := k.parseBools(k.Strings(delim), false, false) - return vals -} - -// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter. -func (k *Key) ValidTimesFormat(format, delim string) []time.Time { - vals, _ := k.parseTimesFormat(format, k.Strings(delim), false, false) - return vals -} - -// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter. -func (k *Key) ValidTimes(delim string) []time.Time { - return k.ValidTimesFormat(time.RFC3339, delim) -} - -// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input. -func (k *Key) StrictFloat64s(delim string) ([]float64, error) { - return k.parseFloat64s(k.Strings(delim), false, true) -} - -// StrictInts returns list of int divided by given delimiter or error on first invalid input. -func (k *Key) StrictInts(delim string) ([]int, error) { - return k.parseInts(k.Strings(delim), false, true) -} - -// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input. -func (k *Key) StrictInt64s(delim string) ([]int64, error) { - return k.parseInt64s(k.Strings(delim), false, true) -} - -// StrictUints returns list of uint divided by given delimiter or error on first invalid input. -func (k *Key) StrictUints(delim string) ([]uint, error) { - return k.parseUints(k.Strings(delim), false, true) -} - -// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input. -func (k *Key) StrictUint64s(delim string) ([]uint64, error) { - return k.parseUint64s(k.Strings(delim), false, true) -} - -// StrictBools returns list of bool divided by given delimiter or error on first invalid input. -func (k *Key) StrictBools(delim string) ([]bool, error) { - return k.parseBools(k.Strings(delim), false, true) -} - -// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter -// or error on first invalid input. -func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) { - return k.parseTimesFormat(format, k.Strings(delim), false, true) -} - -// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter -// or error on first invalid input. -func (k *Key) StrictTimes(delim string) ([]time.Time, error) { - return k.StrictTimesFormat(time.RFC3339, delim) -} - -// parseBools transforms strings to bools. -func (k *Key) parseBools(strs []string, addInvalid, returnOnInvalid bool) ([]bool, error) { - vals := make([]bool, 0, len(strs)) - parser := func(str string) (interface{}, error) { - val, err := parseBool(str) - return val, err - } - rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) - if err == nil { - for _, val := range rawVals { - vals = append(vals, val.(bool)) - } - } - return vals, err -} - -// parseFloat64s transforms strings to float64s. -func (k *Key) parseFloat64s(strs []string, addInvalid, returnOnInvalid bool) ([]float64, error) { - vals := make([]float64, 0, len(strs)) - parser := func(str string) (interface{}, error) { - val, err := strconv.ParseFloat(str, 64) - return val, err - } - rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) - if err == nil { - for _, val := range rawVals { - vals = append(vals, val.(float64)) - } - } - return vals, err -} - -// parseInts transforms strings to ints. -func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) { - vals := make([]int, 0, len(strs)) - parser := func(str string) (interface{}, error) { - val, err := strconv.ParseInt(str, 0, 64) - return val, err - } - rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) - if err == nil { - for _, val := range rawVals { - vals = append(vals, int(val.(int64))) - } - } - return vals, err -} - -// parseInt64s transforms strings to int64s. -func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) { - vals := make([]int64, 0, len(strs)) - parser := func(str string) (interface{}, error) { - val, err := strconv.ParseInt(str, 0, 64) - return val, err - } - - rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) - if err == nil { - for _, val := range rawVals { - vals = append(vals, val.(int64)) - } - } - return vals, err -} - -// parseUints transforms strings to uints. -func (k *Key) parseUints(strs []string, addInvalid, returnOnInvalid bool) ([]uint, error) { - vals := make([]uint, 0, len(strs)) - parser := func(str string) (interface{}, error) { - val, err := strconv.ParseUint(str, 0, 64) - return val, err - } - - rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) - if err == nil { - for _, val := range rawVals { - vals = append(vals, uint(val.(uint64))) - } - } - return vals, err -} - -// parseUint64s transforms strings to uint64s. -func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]uint64, error) { - vals := make([]uint64, 0, len(strs)) - parser := func(str string) (interface{}, error) { - val, err := strconv.ParseUint(str, 0, 64) - return val, err - } - rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) - if err == nil { - for _, val := range rawVals { - vals = append(vals, val.(uint64)) - } - } - return vals, err -} - -type Parser func(str string) (interface{}, error) - -// parseTimesFormat transforms strings to times in given format. -func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) { - vals := make([]time.Time, 0, len(strs)) - parser := func(str string) (interface{}, error) { - val, err := time.Parse(format, str) - return val, err - } - rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) - if err == nil { - for _, val := range rawVals { - vals = append(vals, val.(time.Time)) - } - } - return vals, err -} - -// doParse transforms strings to different types -func (k *Key) doParse(strs []string, addInvalid, returnOnInvalid bool, parser Parser) ([]interface{}, error) { - vals := make([]interface{}, 0, len(strs)) - for _, str := range strs { - val, err := parser(str) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, val) - } - } - return vals, nil -} - -// SetValue changes key value. -func (k *Key) SetValue(v string) { - if k.s.f.BlockMode { - k.s.f.lock.Lock() - defer k.s.f.lock.Unlock() - } - - k.value = v - k.s.keysHash[k.name] = v -} diff --git a/tools/vendor/gopkg.in/ini.v1/parser.go b/tools/vendor/gopkg.in/ini.v1/parser.go deleted file mode 100644 index 44fc526c2..000000000 --- a/tools/vendor/gopkg.in/ini.v1/parser.go +++ /dev/null @@ -1,520 +0,0 @@ -// Copyright 2015 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bufio" - "bytes" - "fmt" - "io" - "regexp" - "strconv" - "strings" - "unicode" -) - -const minReaderBufferSize = 4096 - -var pythonMultiline = regexp.MustCompile(`^([\t\f ]+)(.*)`) - -type parserOptions struct { - IgnoreContinuation bool - IgnoreInlineComment bool - AllowPythonMultilineValues bool - SpaceBeforeInlineComment bool - UnescapeValueDoubleQuotes bool - UnescapeValueCommentSymbols bool - PreserveSurroundedQuote bool - DebugFunc DebugFunc - ReaderBufferSize int -} - -type parser struct { - buf *bufio.Reader - options parserOptions - - isEOF bool - count int - comment *bytes.Buffer -} - -func (p *parser) debug(format string, args ...interface{}) { - if p.options.DebugFunc != nil { - p.options.DebugFunc(fmt.Sprintf(format, args...)) - } -} - -func newParser(r io.Reader, opts parserOptions) *parser { - size := opts.ReaderBufferSize - if size < minReaderBufferSize { - size = minReaderBufferSize - } - - return &parser{ - buf: bufio.NewReaderSize(r, size), - options: opts, - count: 1, - comment: &bytes.Buffer{}, - } -} - -// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format. -// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding -func (p *parser) BOM() error { - mask, err := p.buf.Peek(2) - if err != nil && err != io.EOF { - return err - } else if len(mask) < 2 { - return nil - } - - switch { - case mask[0] == 254 && mask[1] == 255: - fallthrough - case mask[0] == 255 && mask[1] == 254: - _, err = p.buf.Read(mask) - if err != nil { - return err - } - case mask[0] == 239 && mask[1] == 187: - mask, err := p.buf.Peek(3) - if err != nil && err != io.EOF { - return err - } else if len(mask) < 3 { - return nil - } - if mask[2] == 191 { - _, err = p.buf.Read(mask) - if err != nil { - return err - } - } - } - return nil -} - -func (p *parser) readUntil(delim byte) ([]byte, error) { - data, err := p.buf.ReadBytes(delim) - if err != nil { - if err == io.EOF { - p.isEOF = true - } else { - return nil, err - } - } - return data, nil -} - -func cleanComment(in []byte) ([]byte, bool) { - i := bytes.IndexAny(in, "#;") - if i == -1 { - return nil, false - } - return in[i:], true -} - -func readKeyName(delimiters string, in []byte) (string, int, error) { - line := string(in) - - // Check if key name surrounded by quotes. - var keyQuote string - if line[0] == '"' { - if len(line) > 6 && line[0:3] == `"""` { - keyQuote = `"""` - } else { - keyQuote = `"` - } - } else if line[0] == '`' { - keyQuote = "`" - } - - // Get out key name - var endIdx int - if len(keyQuote) > 0 { - startIdx := len(keyQuote) - // FIXME: fail case -> """"""name"""=value - pos := strings.Index(line[startIdx:], keyQuote) - if pos == -1 { - return "", -1, fmt.Errorf("missing closing key quote: %s", line) - } - pos += startIdx - - // Find key-value delimiter - i := strings.IndexAny(line[pos+startIdx:], delimiters) - if i < 0 { - return "", -1, ErrDelimiterNotFound{line} - } - endIdx = pos + i - return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil - } - - endIdx = strings.IndexAny(line, delimiters) - if endIdx < 0 { - return "", -1, ErrDelimiterNotFound{line} - } - if endIdx == 0 { - return "", -1, ErrEmptyKeyName{line} - } - - return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil -} - -func (p *parser) readMultilines(line, val, valQuote string) (string, error) { - for { - data, err := p.readUntil('\n') - if err != nil { - return "", err - } - next := string(data) - - pos := strings.LastIndex(next, valQuote) - if pos > -1 { - val += next[:pos] - - comment, has := cleanComment([]byte(next[pos:])) - if has { - p.comment.Write(bytes.TrimSpace(comment)) - } - break - } - val += next - if p.isEOF { - return "", fmt.Errorf("missing closing key quote from %q to %q", line, next) - } - } - return val, nil -} - -func (p *parser) readContinuationLines(val string) (string, error) { - for { - data, err := p.readUntil('\n') - if err != nil { - return "", err - } - next := strings.TrimSpace(string(data)) - - if len(next) == 0 { - break - } - val += next - if val[len(val)-1] != '\\' { - break - } - val = val[:len(val)-1] - } - return val, nil -} - -// hasSurroundedQuote check if and only if the first and last characters -// are quotes \" or \'. -// It returns false if any other parts also contain same kind of quotes. -func hasSurroundedQuote(in string, quote byte) bool { - return len(in) >= 2 && in[0] == quote && in[len(in)-1] == quote && - strings.IndexByte(in[1:], quote) == len(in)-2 -} - -func (p *parser) readValue(in []byte, bufferSize int) (string, error) { - - line := strings.TrimLeftFunc(string(in), unicode.IsSpace) - if len(line) == 0 { - if p.options.AllowPythonMultilineValues && len(in) > 0 && in[len(in)-1] == '\n' { - return p.readPythonMultilines(line, bufferSize) - } - return "", nil - } - - var valQuote string - if len(line) > 3 && line[0:3] == `"""` { - valQuote = `"""` - } else if line[0] == '`' { - valQuote = "`" - } else if p.options.UnescapeValueDoubleQuotes && line[0] == '"' { - valQuote = `"` - } - - if len(valQuote) > 0 { - startIdx := len(valQuote) - pos := strings.LastIndex(line[startIdx:], valQuote) - // Check for multi-line value - if pos == -1 { - return p.readMultilines(line, line[startIdx:], valQuote) - } - - if p.options.UnescapeValueDoubleQuotes && valQuote == `"` { - return strings.Replace(line[startIdx:pos+startIdx], `\"`, `"`, -1), nil - } - return line[startIdx : pos+startIdx], nil - } - - lastChar := line[len(line)-1] - // Won't be able to reach here if value only contains whitespace - line = strings.TrimSpace(line) - trimmedLastChar := line[len(line)-1] - - // Check continuation lines when desired - if !p.options.IgnoreContinuation && trimmedLastChar == '\\' { - return p.readContinuationLines(line[:len(line)-1]) - } - - // Check if ignore inline comment - if !p.options.IgnoreInlineComment { - var i int - if p.options.SpaceBeforeInlineComment { - i = strings.Index(line, " #") - if i == -1 { - i = strings.Index(line, " ;") - } - - } else { - i = strings.IndexAny(line, "#;") - } - - if i > -1 { - p.comment.WriteString(line[i:]) - line = strings.TrimSpace(line[:i]) - } - - } - - // Trim single and double quotes - if (hasSurroundedQuote(line, '\'') || - hasSurroundedQuote(line, '"')) && !p.options.PreserveSurroundedQuote { - line = line[1 : len(line)-1] - } else if len(valQuote) == 0 && p.options.UnescapeValueCommentSymbols { - line = strings.ReplaceAll(line, `\;`, ";") - line = strings.ReplaceAll(line, `\#`, "#") - } else if p.options.AllowPythonMultilineValues && lastChar == '\n' { - return p.readPythonMultilines(line, bufferSize) - } - - return line, nil -} - -func (p *parser) readPythonMultilines(line string, bufferSize int) (string, error) { - parserBufferPeekResult, _ := p.buf.Peek(bufferSize) - peekBuffer := bytes.NewBuffer(parserBufferPeekResult) - - for { - peekData, peekErr := peekBuffer.ReadBytes('\n') - if peekErr != nil && peekErr != io.EOF { - p.debug("readPythonMultilines: failed to peek with error: %v", peekErr) - return "", peekErr - } - - p.debug("readPythonMultilines: parsing %q", string(peekData)) - - peekMatches := pythonMultiline.FindStringSubmatch(string(peekData)) - p.debug("readPythonMultilines: matched %d parts", len(peekMatches)) - for n, v := range peekMatches { - p.debug(" %d: %q", n, v) - } - - // Return if not a Python multiline value. - if len(peekMatches) != 3 { - p.debug("readPythonMultilines: end of value, got: %q", line) - return line, nil - } - - // Advance the parser reader (buffer) in-sync with the peek buffer. - _, err := p.buf.Discard(len(peekData)) - if err != nil { - p.debug("readPythonMultilines: failed to skip to the end, returning error") - return "", err - } - - line += "\n" + peekMatches[0] - } -} - -// parse parses data through an io.Reader. -func (f *File) parse(reader io.Reader) (err error) { - p := newParser(reader, parserOptions{ - IgnoreContinuation: f.options.IgnoreContinuation, - IgnoreInlineComment: f.options.IgnoreInlineComment, - AllowPythonMultilineValues: f.options.AllowPythonMultilineValues, - SpaceBeforeInlineComment: f.options.SpaceBeforeInlineComment, - UnescapeValueDoubleQuotes: f.options.UnescapeValueDoubleQuotes, - UnescapeValueCommentSymbols: f.options.UnescapeValueCommentSymbols, - PreserveSurroundedQuote: f.options.PreserveSurroundedQuote, - DebugFunc: f.options.DebugFunc, - ReaderBufferSize: f.options.ReaderBufferSize, - }) - if err = p.BOM(); err != nil { - return fmt.Errorf("BOM: %v", err) - } - - // Ignore error because default section name is never empty string. - name := DefaultSection - if f.options.Insensitive || f.options.InsensitiveSections { - name = strings.ToLower(DefaultSection) - } - section, _ := f.NewSection(name) - - // This "last" is not strictly equivalent to "previous one" if current key is not the first nested key - var isLastValueEmpty bool - var lastRegularKey *Key - - var line []byte - var inUnparseableSection bool - - // NOTE: Iterate and increase `currentPeekSize` until - // the size of the parser buffer is found. - // TODO(unknwon): When Golang 1.10 is the lowest version supported, replace with `parserBufferSize := p.buf.Size()`. - parserBufferSize := 0 - // NOTE: Peek 4kb at a time. - currentPeekSize := minReaderBufferSize - - if f.options.AllowPythonMultilineValues { - for { - peekBytes, _ := p.buf.Peek(currentPeekSize) - peekBytesLength := len(peekBytes) - - if parserBufferSize >= peekBytesLength { - break - } - - currentPeekSize *= 2 - parserBufferSize = peekBytesLength - } - } - - for !p.isEOF { - line, err = p.readUntil('\n') - if err != nil { - return err - } - - if f.options.AllowNestedValues && - isLastValueEmpty && len(line) > 0 { - if line[0] == ' ' || line[0] == '\t' { - err = lastRegularKey.addNestedValue(string(bytes.TrimSpace(line))) - if err != nil { - return err - } - continue - } - } - - line = bytes.TrimLeftFunc(line, unicode.IsSpace) - if len(line) == 0 { - continue - } - - // Comments - if line[0] == '#' || line[0] == ';' { - // Note: we do not care ending line break, - // it is needed for adding second line, - // so just clean it once at the end when set to value. - p.comment.Write(line) - continue - } - - // Section - if line[0] == '[' { - // Read to the next ']' (TODO: support quoted strings) - closeIdx := bytes.LastIndexByte(line, ']') - if closeIdx == -1 { - return fmt.Errorf("unclosed section: %s", line) - } - - name := string(line[1:closeIdx]) - section, err = f.NewSection(name) - if err != nil { - return err - } - - comment, has := cleanComment(line[closeIdx+1:]) - if has { - p.comment.Write(comment) - } - - section.Comment = strings.TrimSpace(p.comment.String()) - - // Reset auto-counter and comments - p.comment.Reset() - p.count = 1 - // Nested values can't span sections - isLastValueEmpty = false - - inUnparseableSection = false - for i := range f.options.UnparseableSections { - if f.options.UnparseableSections[i] == name || - ((f.options.Insensitive || f.options.InsensitiveSections) && strings.EqualFold(f.options.UnparseableSections[i], name)) { - inUnparseableSection = true - continue - } - } - continue - } - - if inUnparseableSection { - section.isRawSection = true - section.rawBody += string(line) - continue - } - - kname, offset, err := readKeyName(f.options.KeyValueDelimiters, line) - if err != nil { - switch { - // Treat as boolean key when desired, and whole line is key name. - case IsErrDelimiterNotFound(err): - switch { - case f.options.AllowBooleanKeys: - kname, err := p.readValue(line, parserBufferSize) - if err != nil { - return err - } - key, err := section.NewBooleanKey(kname) - if err != nil { - return err - } - key.Comment = strings.TrimSpace(p.comment.String()) - p.comment.Reset() - continue - - case f.options.SkipUnrecognizableLines: - continue - } - case IsErrEmptyKeyName(err) && f.options.SkipUnrecognizableLines: - continue - } - return err - } - - // Auto increment. - isAutoIncr := false - if kname == "-" { - isAutoIncr = true - kname = "#" + strconv.Itoa(p.count) - p.count++ - } - - value, err := p.readValue(line[offset:], parserBufferSize) - if err != nil { - return err - } - isLastValueEmpty = len(value) == 0 - - key, err := section.NewKey(kname, value) - if err != nil { - return err - } - key.isAutoIncrement = isAutoIncr - key.Comment = strings.TrimSpace(p.comment.String()) - p.comment.Reset() - lastRegularKey = key - } - return nil -} diff --git a/tools/vendor/gopkg.in/ini.v1/section.go b/tools/vendor/gopkg.in/ini.v1/section.go deleted file mode 100644 index a3615d820..000000000 --- a/tools/vendor/gopkg.in/ini.v1/section.go +++ /dev/null @@ -1,256 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "errors" - "fmt" - "strings" -) - -// Section represents a config section. -type Section struct { - f *File - Comment string - name string - keys map[string]*Key - keyList []string - keysHash map[string]string - - isRawSection bool - rawBody string -} - -func newSection(f *File, name string) *Section { - return &Section{ - f: f, - name: name, - keys: make(map[string]*Key), - keyList: make([]string, 0, 10), - keysHash: make(map[string]string), - } -} - -// Name returns name of Section. -func (s *Section) Name() string { - return s.name -} - -// Body returns rawBody of Section if the section was marked as unparseable. -// It still follows the other rules of the INI format surrounding leading/trailing whitespace. -func (s *Section) Body() string { - return strings.TrimSpace(s.rawBody) -} - -// SetBody updates body content only if section is raw. -func (s *Section) SetBody(body string) { - if !s.isRawSection { - return - } - s.rawBody = body -} - -// NewKey creates a new key to given section. -func (s *Section) NewKey(name, val string) (*Key, error) { - if len(name) == 0 { - return nil, errors.New("error creating new key: empty key name") - } else if s.f.options.Insensitive || s.f.options.InsensitiveKeys { - name = strings.ToLower(name) - } - - if s.f.BlockMode { - s.f.lock.Lock() - defer s.f.lock.Unlock() - } - - if inSlice(name, s.keyList) { - if s.f.options.AllowShadows { - if err := s.keys[name].addShadow(val); err != nil { - return nil, err - } - } else { - s.keys[name].value = val - s.keysHash[name] = val - } - return s.keys[name], nil - } - - s.keyList = append(s.keyList, name) - s.keys[name] = newKey(s, name, val) - s.keysHash[name] = val - return s.keys[name], nil -} - -// NewBooleanKey creates a new boolean type key to given section. -func (s *Section) NewBooleanKey(name string) (*Key, error) { - key, err := s.NewKey(name, "true") - if err != nil { - return nil, err - } - - key.isBooleanType = true - return key, nil -} - -// GetKey returns key in section by given name. -func (s *Section) GetKey(name string) (*Key, error) { - if s.f.BlockMode { - s.f.lock.RLock() - } - if s.f.options.Insensitive || s.f.options.InsensitiveKeys { - name = strings.ToLower(name) - } - key := s.keys[name] - if s.f.BlockMode { - s.f.lock.RUnlock() - } - - if key == nil { - // Check if it is a child-section. - sname := s.name - for { - if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 { - sname = sname[:i] - sec, err := s.f.GetSection(sname) - if err != nil { - continue - } - return sec.GetKey(name) - } - break - } - return nil, fmt.Errorf("error when getting key of section %q: key %q not exists", s.name, name) - } - return key, nil -} - -// HasKey returns true if section contains a key with given name. -func (s *Section) HasKey(name string) bool { - key, _ := s.GetKey(name) - return key != nil -} - -// Deprecated: Use "HasKey" instead. -func (s *Section) Haskey(name string) bool { - return s.HasKey(name) -} - -// HasValue returns true if section contains given raw value. -func (s *Section) HasValue(value string) bool { - if s.f.BlockMode { - s.f.lock.RLock() - defer s.f.lock.RUnlock() - } - - for _, k := range s.keys { - if value == k.value { - return true - } - } - return false -} - -// Key assumes named Key exists in section and returns a zero-value when not. -func (s *Section) Key(name string) *Key { - key, err := s.GetKey(name) - if err != nil { - // It's OK here because the only possible error is empty key name, - // but if it's empty, this piece of code won't be executed. - key, _ = s.NewKey(name, "") - return key - } - return key -} - -// Keys returns list of keys of section. -func (s *Section) Keys() []*Key { - keys := make([]*Key, len(s.keyList)) - for i := range s.keyList { - keys[i] = s.Key(s.keyList[i]) - } - return keys -} - -// ParentKeys returns list of keys of parent section. -func (s *Section) ParentKeys() []*Key { - var parentKeys []*Key - sname := s.name - for { - if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 { - sname = sname[:i] - sec, err := s.f.GetSection(sname) - if err != nil { - continue - } - parentKeys = append(parentKeys, sec.Keys()...) - } else { - break - } - - } - return parentKeys -} - -// KeyStrings returns list of key names of section. -func (s *Section) KeyStrings() []string { - list := make([]string, len(s.keyList)) - copy(list, s.keyList) - return list -} - -// KeysHash returns keys hash consisting of names and values. -func (s *Section) KeysHash() map[string]string { - if s.f.BlockMode { - s.f.lock.RLock() - defer s.f.lock.RUnlock() - } - - hash := make(map[string]string, len(s.keysHash)) - for key, value := range s.keysHash { - hash[key] = value - } - return hash -} - -// DeleteKey deletes a key from section. -func (s *Section) DeleteKey(name string) { - if s.f.BlockMode { - s.f.lock.Lock() - defer s.f.lock.Unlock() - } - - for i, k := range s.keyList { - if k == name { - s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) - delete(s.keys, name) - delete(s.keysHash, name) - return - } - } -} - -// ChildSections returns a list of child sections of current section. -// For example, "[parent.child1]" and "[parent.child12]" are child sections -// of section "[parent]". -func (s *Section) ChildSections() []*Section { - prefix := s.name + s.f.options.ChildSectionDelimiter - children := make([]*Section, 0, 3) - for _, name := range s.f.sectionList { - if strings.HasPrefix(name, prefix) { - children = append(children, s.f.sections[name]...) - } - } - return children -} diff --git a/tools/vendor/gopkg.in/ini.v1/struct.go b/tools/vendor/gopkg.in/ini.v1/struct.go deleted file mode 100644 index a486b2fe0..000000000 --- a/tools/vendor/gopkg.in/ini.v1/struct.go +++ /dev/null @@ -1,747 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bytes" - "errors" - "fmt" - "reflect" - "strings" - "time" - "unicode" -) - -// NameMapper represents a ini tag name mapper. -type NameMapper func(string) string - -// Built-in name getters. -var ( - // SnackCase converts to format SNACK_CASE. - SnackCase NameMapper = func(raw string) string { - newstr := make([]rune, 0, len(raw)) - for i, chr := range raw { - if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { - if i > 0 { - newstr = append(newstr, '_') - } - } - newstr = append(newstr, unicode.ToUpper(chr)) - } - return string(newstr) - } - // TitleUnderscore converts to format title_underscore. - TitleUnderscore NameMapper = func(raw string) string { - newstr := make([]rune, 0, len(raw)) - for i, chr := range raw { - if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { - if i > 0 { - newstr = append(newstr, '_') - } - chr -= 'A' - 'a' - } - newstr = append(newstr, chr) - } - return string(newstr) - } -) - -func (s *Section) parseFieldName(raw, actual string) string { - if len(actual) > 0 { - return actual - } - if s.f.NameMapper != nil { - return s.f.NameMapper(raw) - } - return raw -} - -func parseDelim(actual string) string { - if len(actual) > 0 { - return actual - } - return "," -} - -var reflectTime = reflect.TypeOf(time.Now()).Kind() - -// setSliceWithProperType sets proper values to slice based on its type. -func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { - var strs []string - if allowShadow { - strs = key.StringsWithShadows(delim) - } else { - strs = key.Strings(delim) - } - - numVals := len(strs) - if numVals == 0 { - return nil - } - - var vals interface{} - var err error - - sliceOf := field.Type().Elem().Kind() - switch sliceOf { - case reflect.String: - vals = strs - case reflect.Int: - vals, err = key.parseInts(strs, true, false) - case reflect.Int64: - vals, err = key.parseInt64s(strs, true, false) - case reflect.Uint: - vals, err = key.parseUints(strs, true, false) - case reflect.Uint64: - vals, err = key.parseUint64s(strs, true, false) - case reflect.Float64: - vals, err = key.parseFloat64s(strs, true, false) - case reflect.Bool: - vals, err = key.parseBools(strs, true, false) - case reflectTime: - vals, err = key.parseTimesFormat(time.RFC3339, strs, true, false) - default: - return fmt.Errorf("unsupported type '[]%s'", sliceOf) - } - if err != nil && isStrict { - return err - } - - slice := reflect.MakeSlice(field.Type(), numVals, numVals) - for i := 0; i < numVals; i++ { - switch sliceOf { - case reflect.String: - slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i])) - case reflect.Int: - slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i])) - case reflect.Int64: - slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i])) - case reflect.Uint: - slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i])) - case reflect.Uint64: - slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i])) - case reflect.Float64: - slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i])) - case reflect.Bool: - slice.Index(i).Set(reflect.ValueOf(vals.([]bool)[i])) - case reflectTime: - slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i])) - } - } - field.Set(slice) - return nil -} - -func wrapStrictError(err error, isStrict bool) error { - if isStrict { - return err - } - return nil -} - -// setWithProperType sets proper value to field based on its type, -// but it does not return error for failing parsing, -// because we want to use default value that is already assigned to struct. -func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { - vt := t - isPtr := t.Kind() == reflect.Ptr - if isPtr { - vt = t.Elem() - } - switch vt.Kind() { - case reflect.String: - stringVal := key.String() - if isPtr { - field.Set(reflect.ValueOf(&stringVal)) - } else if len(stringVal) > 0 { - field.SetString(key.String()) - } - case reflect.Bool: - boolVal, err := key.Bool() - if err != nil { - return wrapStrictError(err, isStrict) - } - if isPtr { - field.Set(reflect.ValueOf(&boolVal)) - } else { - field.SetBool(boolVal) - } - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - // ParseDuration will not return err for `0`, so check the type name - if vt.Name() == "Duration" { - durationVal, err := key.Duration() - if err != nil { - if intVal, err := key.Int64(); err == nil { - field.SetInt(intVal) - return nil - } - return wrapStrictError(err, isStrict) - } - if isPtr { - field.Set(reflect.ValueOf(&durationVal)) - } else if int64(durationVal) > 0 { - field.Set(reflect.ValueOf(durationVal)) - } - return nil - } - - intVal, err := key.Int64() - if err != nil { - return wrapStrictError(err, isStrict) - } - if isPtr { - pv := reflect.New(t.Elem()) - pv.Elem().SetInt(intVal) - field.Set(pv) - } else { - field.SetInt(intVal) - } - // byte is an alias for uint8, so supporting uint8 breaks support for byte - case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: - durationVal, err := key.Duration() - // Skip zero value - if err == nil && uint64(durationVal) > 0 { - if isPtr { - field.Set(reflect.ValueOf(&durationVal)) - } else { - field.Set(reflect.ValueOf(durationVal)) - } - return nil - } - - uintVal, err := key.Uint64() - if err != nil { - return wrapStrictError(err, isStrict) - } - if isPtr { - pv := reflect.New(t.Elem()) - pv.Elem().SetUint(uintVal) - field.Set(pv) - } else { - field.SetUint(uintVal) - } - - case reflect.Float32, reflect.Float64: - floatVal, err := key.Float64() - if err != nil { - return wrapStrictError(err, isStrict) - } - if isPtr { - pv := reflect.New(t.Elem()) - pv.Elem().SetFloat(floatVal) - field.Set(pv) - } else { - field.SetFloat(floatVal) - } - case reflectTime: - timeVal, err := key.Time() - if err != nil { - return wrapStrictError(err, isStrict) - } - if isPtr { - field.Set(reflect.ValueOf(&timeVal)) - } else { - field.Set(reflect.ValueOf(timeVal)) - } - case reflect.Slice: - return setSliceWithProperType(key, field, delim, allowShadow, isStrict) - default: - return fmt.Errorf("unsupported type %q", t) - } - return nil -} - -func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool, allowNonUnique bool, extends bool) { - opts := strings.SplitN(tag, ",", 5) - rawName = opts[0] - for _, opt := range opts[1:] { - omitEmpty = omitEmpty || (opt == "omitempty") - allowShadow = allowShadow || (opt == "allowshadow") - allowNonUnique = allowNonUnique || (opt == "nonunique") - extends = extends || (opt == "extends") - } - return rawName, omitEmpty, allowShadow, allowNonUnique, extends -} - -// mapToField maps the given value to the matching field of the given section. -// The sectionIndex is the index (if non unique sections are enabled) to which the value should be added. -func (s *Section) mapToField(val reflect.Value, isStrict bool, sectionIndex int, sectionName string) error { - if val.Kind() == reflect.Ptr { - val = val.Elem() - } - typ := val.Type() - - for i := 0; i < typ.NumField(); i++ { - field := val.Field(i) - tpField := typ.Field(i) - - tag := tpField.Tag.Get("ini") - if tag == "-" { - continue - } - - rawName, _, allowShadow, allowNonUnique, extends := parseTagOptions(tag) - fieldName := s.parseFieldName(tpField.Name, rawName) - if len(fieldName) == 0 || !field.CanSet() { - continue - } - - isStruct := tpField.Type.Kind() == reflect.Struct - isStructPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct - isAnonymousPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous - if isAnonymousPtr { - field.Set(reflect.New(tpField.Type.Elem())) - } - - if extends && (isAnonymousPtr || (isStruct && tpField.Anonymous)) { - if isStructPtr && field.IsNil() { - field.Set(reflect.New(tpField.Type.Elem())) - } - fieldSection := s - if rawName != "" { - sectionName = s.name + s.f.options.ChildSectionDelimiter + rawName - if secs, err := s.f.SectionsByName(sectionName); err == nil && sectionIndex < len(secs) { - fieldSection = secs[sectionIndex] - } - } - if err := fieldSection.mapToField(field, isStrict, sectionIndex, sectionName); err != nil { - return fmt.Errorf("map to field %q: %v", fieldName, err) - } - } else if isAnonymousPtr || isStruct || isStructPtr { - if secs, err := s.f.SectionsByName(fieldName); err == nil { - if len(secs) <= sectionIndex { - return fmt.Errorf("there are not enough sections (%d <= %d) for the field %q", len(secs), sectionIndex, fieldName) - } - // Only set the field to non-nil struct value if we have a section for it. - // Otherwise, we end up with a non-nil struct ptr even though there is no data. - if isStructPtr && field.IsNil() { - field.Set(reflect.New(tpField.Type.Elem())) - } - if err = secs[sectionIndex].mapToField(field, isStrict, sectionIndex, fieldName); err != nil { - return fmt.Errorf("map to field %q: %v", fieldName, err) - } - continue - } - } - - // Map non-unique sections - if allowNonUnique && tpField.Type.Kind() == reflect.Slice { - newField, err := s.mapToSlice(fieldName, field, isStrict) - if err != nil { - return fmt.Errorf("map to slice %q: %v", fieldName, err) - } - - field.Set(newField) - continue - } - - if key, err := s.GetKey(fieldName); err == nil { - delim := parseDelim(tpField.Tag.Get("delim")) - if err = setWithProperType(tpField.Type, key, field, delim, allowShadow, isStrict); err != nil { - return fmt.Errorf("set field %q: %v", fieldName, err) - } - } - } - return nil -} - -// mapToSlice maps all sections with the same name and returns the new value. -// The type of the Value must be a slice. -func (s *Section) mapToSlice(secName string, val reflect.Value, isStrict bool) (reflect.Value, error) { - secs, err := s.f.SectionsByName(secName) - if err != nil { - return reflect.Value{}, err - } - - typ := val.Type().Elem() - for i, sec := range secs { - elem := reflect.New(typ) - if err = sec.mapToField(elem, isStrict, i, sec.name); err != nil { - return reflect.Value{}, fmt.Errorf("map to field from section %q: %v", secName, err) - } - - val = reflect.Append(val, elem.Elem()) - } - return val, nil -} - -// mapTo maps a section to object v. -func (s *Section) mapTo(v interface{}, isStrict bool) error { - typ := reflect.TypeOf(v) - val := reflect.ValueOf(v) - if typ.Kind() == reflect.Ptr { - typ = typ.Elem() - val = val.Elem() - } else { - return errors.New("not a pointer to a struct") - } - - if typ.Kind() == reflect.Slice { - newField, err := s.mapToSlice(s.name, val, isStrict) - if err != nil { - return err - } - - val.Set(newField) - return nil - } - - return s.mapToField(val, isStrict, 0, s.name) -} - -// MapTo maps section to given struct. -func (s *Section) MapTo(v interface{}) error { - return s.mapTo(v, false) -} - -// StrictMapTo maps section to given struct in strict mode, -// which returns all possible error including value parsing error. -func (s *Section) StrictMapTo(v interface{}) error { - return s.mapTo(v, true) -} - -// MapTo maps file to given struct. -func (f *File) MapTo(v interface{}) error { - return f.Section("").MapTo(v) -} - -// StrictMapTo maps file to given struct in strict mode, -// which returns all possible error including value parsing error. -func (f *File) StrictMapTo(v interface{}) error { - return f.Section("").StrictMapTo(v) -} - -// MapToWithMapper maps data sources to given struct with name mapper. -func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { - cfg, err := Load(source, others...) - if err != nil { - return err - } - cfg.NameMapper = mapper - return cfg.MapTo(v) -} - -// StrictMapToWithMapper maps data sources to given struct with name mapper in strict mode, -// which returns all possible error including value parsing error. -func StrictMapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { - cfg, err := Load(source, others...) - if err != nil { - return err - } - cfg.NameMapper = mapper - return cfg.StrictMapTo(v) -} - -// MapTo maps data sources to given struct. -func MapTo(v, source interface{}, others ...interface{}) error { - return MapToWithMapper(v, nil, source, others...) -} - -// StrictMapTo maps data sources to given struct in strict mode, -// which returns all possible error including value parsing error. -func StrictMapTo(v, source interface{}, others ...interface{}) error { - return StrictMapToWithMapper(v, nil, source, others...) -} - -// reflectSliceWithProperType does the opposite thing as setSliceWithProperType. -func reflectSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow bool) error { - slice := field.Slice(0, field.Len()) - if field.Len() == 0 { - return nil - } - sliceOf := field.Type().Elem().Kind() - - if allowShadow { - var keyWithShadows *Key - for i := 0; i < field.Len(); i++ { - var val string - switch sliceOf { - case reflect.String: - val = slice.Index(i).String() - case reflect.Int, reflect.Int64: - val = fmt.Sprint(slice.Index(i).Int()) - case reflect.Uint, reflect.Uint64: - val = fmt.Sprint(slice.Index(i).Uint()) - case reflect.Float64: - val = fmt.Sprint(slice.Index(i).Float()) - case reflect.Bool: - val = fmt.Sprint(slice.Index(i).Bool()) - case reflectTime: - val = slice.Index(i).Interface().(time.Time).Format(time.RFC3339) - default: - return fmt.Errorf("unsupported type '[]%s'", sliceOf) - } - - if i == 0 { - keyWithShadows = newKey(key.s, key.name, val) - } else { - _ = keyWithShadows.AddShadow(val) - } - } - *key = *keyWithShadows - return nil - } - - var buf bytes.Buffer - for i := 0; i < field.Len(); i++ { - switch sliceOf { - case reflect.String: - buf.WriteString(slice.Index(i).String()) - case reflect.Int, reflect.Int64: - buf.WriteString(fmt.Sprint(slice.Index(i).Int())) - case reflect.Uint, reflect.Uint64: - buf.WriteString(fmt.Sprint(slice.Index(i).Uint())) - case reflect.Float64: - buf.WriteString(fmt.Sprint(slice.Index(i).Float())) - case reflect.Bool: - buf.WriteString(fmt.Sprint(slice.Index(i).Bool())) - case reflectTime: - buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339)) - default: - return fmt.Errorf("unsupported type '[]%s'", sliceOf) - } - buf.WriteString(delim) - } - key.SetValue(buf.String()[:buf.Len()-len(delim)]) - return nil -} - -// reflectWithProperType does the opposite thing as setWithProperType. -func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow bool) error { - switch t.Kind() { - case reflect.String: - key.SetValue(field.String()) - case reflect.Bool: - key.SetValue(fmt.Sprint(field.Bool())) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - key.SetValue(fmt.Sprint(field.Int())) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - key.SetValue(fmt.Sprint(field.Uint())) - case reflect.Float32, reflect.Float64: - key.SetValue(fmt.Sprint(field.Float())) - case reflectTime: - key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339))) - case reflect.Slice: - return reflectSliceWithProperType(key, field, delim, allowShadow) - case reflect.Ptr: - if !field.IsNil() { - return reflectWithProperType(t.Elem(), key, field.Elem(), delim, allowShadow) - } - default: - return fmt.Errorf("unsupported type %q", t) - } - return nil -} - -// CR: copied from encoding/json/encode.go with modifications of time.Time support. -// TODO: add more test coverage. -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - case reflectTime: - t, ok := v.Interface().(time.Time) - return ok && t.IsZero() - } - return false -} - -// StructReflector is the interface implemented by struct types that can extract themselves into INI objects. -type StructReflector interface { - ReflectINIStruct(*File) error -} - -func (s *Section) reflectFrom(val reflect.Value) error { - if val.Kind() == reflect.Ptr { - val = val.Elem() - } - typ := val.Type() - - for i := 0; i < typ.NumField(); i++ { - if !val.Field(i).CanInterface() { - continue - } - - field := val.Field(i) - tpField := typ.Field(i) - - tag := tpField.Tag.Get("ini") - if tag == "-" { - continue - } - - rawName, omitEmpty, allowShadow, allowNonUnique, extends := parseTagOptions(tag) - if omitEmpty && isEmptyValue(field) { - continue - } - - if r, ok := field.Interface().(StructReflector); ok { - return r.ReflectINIStruct(s.f) - } - - fieldName := s.parseFieldName(tpField.Name, rawName) - if len(fieldName) == 0 || !field.CanSet() { - continue - } - - if extends && tpField.Anonymous && (tpField.Type.Kind() == reflect.Ptr || tpField.Type.Kind() == reflect.Struct) { - if err := s.reflectFrom(field); err != nil { - return fmt.Errorf("reflect from field %q: %v", fieldName, err) - } - continue - } - - if (tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct) || - (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") { - // Note: The only error here is section doesn't exist. - sec, err := s.f.GetSection(fieldName) - if err != nil { - // Note: fieldName can never be empty here, ignore error. - sec, _ = s.f.NewSection(fieldName) - } - - // Add comment from comment tag - if len(sec.Comment) == 0 { - sec.Comment = tpField.Tag.Get("comment") - } - - if err = sec.reflectFrom(field); err != nil { - return fmt.Errorf("reflect from field %q: %v", fieldName, err) - } - continue - } - - if allowNonUnique && tpField.Type.Kind() == reflect.Slice { - slice := field.Slice(0, field.Len()) - if field.Len() == 0 { - return nil - } - sliceOf := field.Type().Elem().Kind() - - for i := 0; i < field.Len(); i++ { - if sliceOf != reflect.Struct && sliceOf != reflect.Ptr { - return fmt.Errorf("field %q is not a slice of pointer or struct", fieldName) - } - - sec, err := s.f.NewSection(fieldName) - if err != nil { - return err - } - - // Add comment from comment tag - if len(sec.Comment) == 0 { - sec.Comment = tpField.Tag.Get("comment") - } - - if err := sec.reflectFrom(slice.Index(i)); err != nil { - return fmt.Errorf("reflect from field %q: %v", fieldName, err) - } - } - continue - } - - // Note: Same reason as section. - key, err := s.GetKey(fieldName) - if err != nil { - key, _ = s.NewKey(fieldName, "") - } - - // Add comment from comment tag - if len(key.Comment) == 0 { - key.Comment = tpField.Tag.Get("comment") - } - - delim := parseDelim(tpField.Tag.Get("delim")) - if err = reflectWithProperType(tpField.Type, key, field, delim, allowShadow); err != nil { - return fmt.Errorf("reflect field %q: %v", fieldName, err) - } - - } - return nil -} - -// ReflectFrom reflects section from given struct. It overwrites existing ones. -func (s *Section) ReflectFrom(v interface{}) error { - typ := reflect.TypeOf(v) - val := reflect.ValueOf(v) - - if s.name != DefaultSection && s.f.options.AllowNonUniqueSections && - (typ.Kind() == reflect.Slice || typ.Kind() == reflect.Ptr) { - // Clear sections to make sure none exists before adding the new ones - s.f.DeleteSection(s.name) - - if typ.Kind() == reflect.Ptr { - sec, err := s.f.NewSection(s.name) - if err != nil { - return err - } - return sec.reflectFrom(val.Elem()) - } - - slice := val.Slice(0, val.Len()) - sliceOf := val.Type().Elem().Kind() - if sliceOf != reflect.Ptr { - return fmt.Errorf("not a slice of pointers") - } - - for i := 0; i < slice.Len(); i++ { - sec, err := s.f.NewSection(s.name) - if err != nil { - return err - } - - err = sec.reflectFrom(slice.Index(i)) - if err != nil { - return fmt.Errorf("reflect from %dth field: %v", i, err) - } - } - - return nil - } - - if typ.Kind() == reflect.Ptr { - val = val.Elem() - } else { - return errors.New("not a pointer to a struct") - } - - return s.reflectFrom(val) -} - -// ReflectFrom reflects file from given struct. -func (f *File) ReflectFrom(v interface{}) error { - return f.Section("").ReflectFrom(v) -} - -// ReflectFromWithMapper reflects data sources from given struct with name mapper. -func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error { - cfg.NameMapper = mapper - return cfg.ReflectFrom(v) -} - -// ReflectFrom reflects data sources from given struct. -func ReflectFrom(cfg *File, v interface{}) error { - return ReflectFromWithMapper(cfg, v, nil) -} diff --git a/tools/vendor/modules.txt b/tools/vendor/modules.txt index 1c0db20fd..0eb1b3df2 100644 --- a/tools/vendor/modules.txt +++ b/tools/vendor/modules.txt @@ -1,46 +1,46 @@ -# 4d63.com/gocheckcompilerdirectives v1.2.1 -## explicit; go 1.19 +# 4d63.com/gocheckcompilerdirectives v1.3.0 +## explicit; go 1.22.0 4d63.com/gocheckcompilerdirectives/checkcompilerdirectives # 4d63.com/gochecknoglobals v0.2.2 ## explicit; go 1.18 4d63.com/gochecknoglobals/checknoglobals -# github.com/4meepo/tagalign v1.4.1 -## explicit; go 1.21.0 +# github.com/4meepo/tagalign v1.4.2 +## explicit; go 1.22.0 github.com/4meepo/tagalign # github.com/Abirdcfly/dupword v0.1.3 ## explicit; go 1.22.0 github.com/Abirdcfly/dupword -# github.com/Antonboom/errname v1.0.0 -## explicit; go 1.22.1 +# github.com/Antonboom/errname v1.1.0 +## explicit; go 1.23.0 github.com/Antonboom/errname/pkg/analyzer -# github.com/Antonboom/nilnil v1.0.1 -## explicit; go 1.22.0 +# github.com/Antonboom/nilnil v1.1.0 +## explicit; go 1.23.0 github.com/Antonboom/nilnil/pkg/analyzer -# github.com/Antonboom/testifylint v1.5.2 -## explicit; go 1.22.1 +# github.com/Antonboom/testifylint v1.6.0 +## explicit; go 1.23.0 github.com/Antonboom/testifylint/analyzer github.com/Antonboom/testifylint/internal/analysisutil github.com/Antonboom/testifylint/internal/checkers github.com/Antonboom/testifylint/internal/checkers/printf github.com/Antonboom/testifylint/internal/config github.com/Antonboom/testifylint/internal/testify -# github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c +# github.com/BurntSushi/toml v1.5.0 ## explicit; go 1.18 github.com/BurntSushi/toml github.com/BurntSushi/toml/internal # github.com/Crocmagnon/fatcontext v0.7.1 ## explicit; go 1.22.0 github.com/Crocmagnon/fatcontext/pkg/analyzer -# github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 +# github.com/Djarvur/go-err113 v0.1.0 ## explicit; go 1.13 github.com/Djarvur/go-err113 -# github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.0 -## explicit; go 1.21 +# github.com/GaijinEntertainment/go-exhaustruct/v3 v3.3.1 +## explicit; go 1.23.0 github.com/GaijinEntertainment/go-exhaustruct/v3/analyzer github.com/GaijinEntertainment/go-exhaustruct/v3/internal/comment github.com/GaijinEntertainment/go-exhaustruct/v3/internal/pattern github.com/GaijinEntertainment/go-exhaustruct/v3/internal/structure -# github.com/Masterminds/semver/v3 v3.3.0 +# github.com/Masterminds/semver/v3 v3.3.1 ## explicit; go 1.21 github.com/Masterminds/semver/v3 # github.com/OpenPeeDeeP/depguard/v2 v2.2.1 @@ -78,8 +78,8 @@ github.com/bkielbasa/cyclop/pkg/analyzer # github.com/blizzy78/varnamelen v0.8.0 ## explicit; go 1.16 github.com/blizzy78/varnamelen -# github.com/bombsimon/wsl/v4 v4.5.0 -## explicit; go 1.22 +# github.com/bombsimon/wsl/v4 v4.6.0 +## explicit; go 1.23 github.com/bombsimon/wsl/v4 # github.com/breml/bidichk v0.3.3 ## explicit; go 1.23.0 @@ -96,8 +96,8 @@ github.com/butuzov/ireturn/analyzer/internal/types ## explicit; go 1.19 github.com/butuzov/mirror github.com/butuzov/mirror/internal/checker -# github.com/catenacyber/perfsprint v0.8.1 -## explicit; go 1.20 +# github.com/catenacyber/perfsprint v0.9.1 +## explicit; go 1.22.0 github.com/catenacyber/perfsprint/analyzer # github.com/ccojocar/zxcvbn-go v1.0.2 ## explicit; go 1.20 @@ -155,17 +155,18 @@ github.com/fatih/structtag # github.com/firefart/nonamedreturns v1.0.5 ## explicit; go 1.18 github.com/firefart/nonamedreturns/analyzer -# github.com/fsnotify/fsnotify v1.5.4 -## explicit; go 1.16 +# github.com/fsnotify/fsnotify v1.8.0 +## explicit; go 1.17 github.com/fsnotify/fsnotify +github.com/fsnotify/fsnotify/internal # github.com/fzipp/gocyclo v0.6.0 ## explicit; go 1.18 github.com/fzipp/gocyclo # github.com/ghostiam/protogetter v0.3.12 ## explicit; go 1.22.0 github.com/ghostiam/protogetter -# github.com/go-critic/go-critic v0.12.0 -## explicit; go 1.22.0 +# github.com/go-critic/go-critic v0.13.0 +## explicit; go 1.23.0 github.com/go-critic/go-critic/checkers github.com/go-critic/go-critic/checkers/internal/astwalk github.com/go-critic/go-critic/checkers/internal/lintutil @@ -212,13 +213,6 @@ github.com/gobwas/glob/util/strings # github.com/gofrs/flock v0.12.1 ## explicit; go 1.21.0 github.com/gofrs/flock -# github.com/golang/protobuf v1.5.4 -## explicit; go 1.17 -github.com/golang/protobuf/proto -github.com/golang/protobuf/ptypes -github.com/golang/protobuf/ptypes/any -github.com/golang/protobuf/ptypes/duration -github.com/golang/protobuf/ptypes/timestamp # github.com/golangci/dupl v0.0.0-20250308024227-f665c8d69b32 ## explicit; go 1.22.0 github.com/golangci/dupl/job @@ -233,11 +227,7 @@ github.com/golangci/go-printf-func-name/pkg/analyzer # github.com/golangci/gofmt v0.0.0-20250106114630-d62b90e6713d ## explicit; go 1.22.0 github.com/golangci/gofmt/gofmt -<<<<<<< HEAD -# github.com/golangci/golangci-lint v1.64.5 -======= # github.com/golangci/golangci-lint v1.64.8 ->>>>>>> 916eb401 (chore(deps): update module golang.org/x/net to v0.36.0 [security]) ## explicit; go 1.23.0 github.com/golangci/golangci-lint/cmd/golangci-lint github.com/golangci/golangci-lint/internal/cache @@ -401,8 +391,8 @@ github.com/golangci/revgrep # github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed ## explicit; go 1.20 github.com/golangci/unconvert -# github.com/google/go-cmp v0.6.0 -## explicit; go 1.13 +# github.com/google/go-cmp v0.7.0 +## explicit; go 1.21 github.com/google/go-cmp/cmp github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags @@ -414,8 +404,8 @@ github.com/gordonklaus/ineffassign/pkg/ineffassign # github.com/gostaticanalysis/analysisutil v0.7.1 ## explicit; go 1.16 github.com/gostaticanalysis/analysisutil -# github.com/gostaticanalysis/comment v1.4.2 -## explicit; go 1.15 +# github.com/gostaticanalysis/comment v1.5.0 +## explicit; go 1.22.9 github.com/gostaticanalysis/comment github.com/gostaticanalysis/comment/passes/commentmap # github.com/gostaticanalysis/forcetypeassert v0.2.0 @@ -434,18 +424,6 @@ github.com/hashicorp/go-version ## explicit; go 1.18 github.com/hashicorp/golang-lru/v2/internal github.com/hashicorp/golang-lru/v2/simplelru -# github.com/hashicorp/hcl v1.0.0 -## explicit -github.com/hashicorp/hcl -github.com/hashicorp/hcl/hcl/ast -github.com/hashicorp/hcl/hcl/parser -github.com/hashicorp/hcl/hcl/printer -github.com/hashicorp/hcl/hcl/scanner -github.com/hashicorp/hcl/hcl/strconv -github.com/hashicorp/hcl/hcl/token -github.com/hashicorp/hcl/json/parser -github.com/hashicorp/hcl/json/scanner -github.com/hashicorp/hcl/json/token # github.com/hexops/gotextdiff v1.0.3 ## explicit; go 1.16 github.com/hexops/gotextdiff @@ -469,11 +447,11 @@ github.com/julz/importas # github.com/karamaru-alpha/copyloopvar v1.2.1 ## explicit; go 1.21 github.com/karamaru-alpha/copyloopvar -# github.com/kisielk/errcheck v1.8.0 -## explicit; go 1.18 +# github.com/kisielk/errcheck v1.9.0 +## explicit; go 1.22.0 github.com/kisielk/errcheck/errcheck -# github.com/kkHAIKE/contextcheck v1.1.5 -## explicit; go 1.20 +# github.com/kkHAIKE/contextcheck v1.1.6 +## explicit; go 1.23.0 github.com/kkHAIKE/contextcheck # github.com/kulti/thelper v0.6.3 ## explicit; go 1.18 @@ -484,7 +462,7 @@ github.com/kunwardeep/paralleltest/pkg/paralleltest # github.com/lasiar/canonicalheader v1.1.2 ## explicit; go 1.22.0 github.com/lasiar/canonicalheader -# github.com/ldez/exptostd v0.4.1 +# github.com/ldez/exptostd v0.4.2 ## explicit; go 1.22.0 github.com/ldez/exptostd # github.com/ldez/gomoddirectives v0.6.1 @@ -508,12 +486,9 @@ github.com/leonklingele/grouper/pkg/analyzer/globals github.com/leonklingele/grouper/pkg/analyzer/imports github.com/leonklingele/grouper/pkg/analyzer/types github.com/leonklingele/grouper/pkg/analyzer/vars -# github.com/macabu/inamedparam v0.1.3 -## explicit; go 1.20 +# github.com/macabu/inamedparam v0.2.0 +## explicit; go 1.23.0 github.com/macabu/inamedparam -# github.com/magiconair/properties v1.8.7 -## explicit; go 1.19 -github.com/magiconair/properties # github.com/maratori/testableexamples v1.0.0 ## explicit; go 1.19 github.com/maratori/testableexamples/pkg/testableexamples @@ -532,10 +507,7 @@ github.com/mattn/go-isatty # github.com/mattn/go-runewidth v0.0.16 ## explicit; go 1.9 github.com/mattn/go-runewidth -# github.com/matttproud/golang_protobuf_extensions v1.0.1 -## explicit -github.com/matttproud/golang_protobuf_extensions/pbutil -# github.com/mgechev/revive v1.6.1 +# github.com/mgechev/revive v1.7.0 ## explicit; go 1.22.1 github.com/mgechev/revive/config github.com/mgechev/revive/formatter @@ -547,14 +519,14 @@ github.com/mgechev/revive/rule # github.com/mitchellh/go-homedir v1.1.0 ## explicit github.com/mitchellh/go-homedir -# github.com/mitchellh/mapstructure v1.5.0 -## explicit; go 1.14 -github.com/mitchellh/mapstructure # github.com/moricho/tparallel v0.3.2 ## explicit; go 1.20 github.com/moricho/tparallel github.com/moricho/tparallel/pkg/ssafunc github.com/moricho/tparallel/pkg/ssainstr +# github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 +## explicit +github.com/munnerz/goautoneg # github.com/nakabonne/nestif v0.3.1 ## explicit; go 1.15 github.com/nakabonne/nestif @@ -564,7 +536,7 @@ github.com/nishanths/exhaustive # github.com/nishanths/predeclared v0.2.2 ## explicit; go 1.14 github.com/nishanths/predeclared/passes/predeclared -# github.com/nunnatsa/ginkgolinter v0.19.0 +# github.com/nunnatsa/ginkgolinter v0.19.1 ## explicit; go 1.23.0 github.com/nunnatsa/ginkgolinter github.com/nunnatsa/ginkgolinter/internal/expression @@ -573,6 +545,7 @@ github.com/nunnatsa/ginkgolinter/internal/expression/matcher github.com/nunnatsa/ginkgolinter/internal/expression/value github.com/nunnatsa/ginkgolinter/internal/formatter github.com/nunnatsa/ginkgolinter/internal/ginkgohandler +github.com/nunnatsa/ginkgolinter/internal/ginkgoinfo github.com/nunnatsa/ginkgolinter/internal/gomegahandler github.com/nunnatsa/ginkgolinter/internal/gomegainfo github.com/nunnatsa/ginkgolinter/internal/interfaces @@ -586,9 +559,6 @@ github.com/nunnatsa/ginkgolinter/version # github.com/olekukonko/tablewriter v0.0.5 ## explicit; go 1.12 github.com/olekukonko/tablewriter -# github.com/pelletier/go-toml v1.9.5 -## explicit; go 1.12 -github.com/pelletier/go-toml # github.com/pelletier/go-toml/v2 v2.2.3 ## explicit; go 1.21.0 github.com/pelletier/go-toml/v2 @@ -602,26 +572,26 @@ github.com/pmezard/go-difflib/difflib # github.com/polyfloyd/go-errorlint v1.7.1 ## explicit; go 1.22.0 github.com/polyfloyd/go-errorlint/errorlint -# github.com/prometheus/client_golang v1.12.1 -## explicit; go 1.13 +# github.com/prometheus/client_golang v1.21.1 +## explicit; go 1.21 github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/testutil/promlint -# github.com/prometheus/client_model v0.2.0 -## explicit; go 1.9 +github.com/prometheus/client_golang/prometheus/testutil/promlint/validations +# github.com/prometheus/client_model v0.6.1 +## explicit; go 1.19 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.32.1 -## explicit; go 1.13 +# github.com/prometheus/common v0.63.0 +## explicit; go 1.21 github.com/prometheus/common/expfmt -github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg github.com/prometheus/common/model -# github.com/prometheus/procfs v0.7.3 -## explicit; go 1.13 +# github.com/prometheus/procfs v0.16.0 +## explicit; go 1.21 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/quasilyte/go-ruleguard v0.4.3-0.20240823090925-0fe6f58b47b1 -## explicit; go 1.19 +# github.com/quasilyte/go-ruleguard v0.4.4 +## explicit; go 1.22.0 github.com/quasilyte/go-ruleguard/internal/goenv github.com/quasilyte/go-ruleguard/internal/golist github.com/quasilyte/go-ruleguard/internal/xsrcimporter @@ -658,19 +628,22 @@ github.com/raeperd/recvcheck # github.com/rivo/uniseg v0.4.7 ## explicit; go 1.18 github.com/rivo/uniseg -# github.com/rogpeppe/go-internal v1.13.1 -## explicit; go 1.22 +# github.com/rogpeppe/go-internal v1.14.1 +## explicit; go 1.23 github.com/rogpeppe/go-internal/diff github.com/rogpeppe/go-internal/internal/syscall/windows github.com/rogpeppe/go-internal/internal/syscall/windows/sysdll github.com/rogpeppe/go-internal/lockedfile github.com/rogpeppe/go-internal/lockedfile/internal/filelock -# github.com/ryancurrah/gomodguard v1.3.5 -## explicit; go 1.22.0 +# github.com/ryancurrah/gomodguard v1.4.1 +## explicit; go 1.23.0 github.com/ryancurrah/gomodguard # github.com/ryanrolds/sqlclosecheck v0.5.1 ## explicit; go 1.20 github.com/ryanrolds/sqlclosecheck/pkg/analyzer +# github.com/sagikazarmark/locafero v0.7.0 +## explicit; go 1.21 +github.com/sagikazarmark/locafero # github.com/sanposhiho/wastedassign/v2 v2.1.0 ## explicit; go 1.18 github.com/sanposhiho/wastedassign/v2 @@ -706,37 +679,37 @@ github.com/sivchari/tenv github.com/sonatard/noctx github.com/sonatard/noctx/ngfunc github.com/sonatard/noctx/reqwithoutctx +# github.com/sourcegraph/conc v0.3.0 +## explicit; go 1.19 +github.com/sourcegraph/conc +github.com/sourcegraph/conc/internal/multierror +github.com/sourcegraph/conc/iter +github.com/sourcegraph/conc/panics # github.com/sourcegraph/go-diff v0.7.0 ## explicit; go 1.14 github.com/sourcegraph/go-diff/diff -# github.com/spf13/afero v1.12.0 -## explicit; go 1.21 +# github.com/spf13/afero v1.14.0 +## explicit; go 1.23.0 github.com/spf13/afero github.com/spf13/afero/internal/common github.com/spf13/afero/mem -# github.com/spf13/cast v1.5.0 -## explicit; go 1.18 +# github.com/spf13/cast v1.7.1 +## explicit; go 1.19 github.com/spf13/cast -# github.com/spf13/cobra v1.8.1 +# github.com/spf13/cobra v1.9.1 ## explicit; go 1.15 github.com/spf13/cobra -# github.com/spf13/jwalterweatherman v1.1.0 -## explicit -github.com/spf13/jwalterweatherman # github.com/spf13/pflag v1.0.6 ## explicit; go 1.12 github.com/spf13/pflag -# github.com/spf13/viper v1.12.0 -## explicit; go 1.17 +# github.com/spf13/viper v1.20.0 +## explicit; go 1.21.0 github.com/spf13/viper -github.com/spf13/viper/internal/encoding github.com/spf13/viper/internal/encoding/dotenv -github.com/spf13/viper/internal/encoding/hcl -github.com/spf13/viper/internal/encoding/ini -github.com/spf13/viper/internal/encoding/javaproperties github.com/spf13/viper/internal/encoding/json github.com/spf13/viper/internal/encoding/toml github.com/spf13/viper/internal/encoding/yaml +github.com/spf13/viper/internal/features # github.com/ssgreg/nlreturn/v2 v2.2.1 ## explicit; go 1.13 github.com/ssgreg/nlreturn/v2/pkg/nlreturn @@ -751,16 +724,16 @@ github.com/stretchr/objx github.com/stretchr/testify/assert github.com/stretchr/testify/assert/yaml github.com/stretchr/testify/mock -# github.com/subosito/gotenv v1.4.1 +# github.com/subosito/gotenv v1.6.0 ## explicit; go 1.18 github.com/subosito/gotenv -# github.com/tdakkota/asciicheck v0.4.0 +# github.com/tdakkota/asciicheck v0.4.1 ## explicit; go 1.22.0 github.com/tdakkota/asciicheck -# github.com/tetafro/godot v1.4.20 +# github.com/tetafro/godot v1.5.0 ## explicit; go 1.20 github.com/tetafro/godot -# github.com/timakin/bodyclose v0.0.0-20241017074812-ed6a65f985e3 +# github.com/timakin/bodyclose v0.0.0-20241222091800-1db5c5ca4d67 ## explicit; go 1.12 github.com/timakin/bodyclose/passes/bodyclose # github.com/timonwong/loggercheck v0.10.1 @@ -794,8 +767,8 @@ github.com/uudashr/iface/identical github.com/uudashr/iface/internal/directive github.com/uudashr/iface/opaque github.com/uudashr/iface/unused -# github.com/xen0n/gosmopolitan v1.2.2 -## explicit; go 1.19 +# github.com/xen0n/gosmopolitan v1.3.0 +## explicit; go 1.23.0 github.com/xen0n/gosmopolitan # github.com/yagipy/maintidx v1.0.0 ## explicit; go 1.17 @@ -817,18 +790,15 @@ go-simpler.org/musttag # go-simpler.org/sloglint v0.9.0 ## explicit; go 1.22.0 go-simpler.org/sloglint -# go.uber.org/atomic v1.7.0 -## explicit; go 1.13 -go.uber.org/atomic # go.uber.org/automaxprocs v1.6.0 ## explicit; go 1.20 go.uber.org/automaxprocs/internal/cgroups go.uber.org/automaxprocs/internal/runtime go.uber.org/automaxprocs/maxprocs -# go.uber.org/multierr v1.6.0 -## explicit; go 1.12 +# go.uber.org/multierr v1.11.0 +## explicit; go 1.19 go.uber.org/multierr -# go.uber.org/zap v1.24.0 +# go.uber.org/zap v1.27.0 ## explicit; go 1.19 go.uber.org/zap go.uber.org/zap/buffer @@ -836,9 +806,11 @@ go.uber.org/zap/internal go.uber.org/zap/internal/bufferpool go.uber.org/zap/internal/color go.uber.org/zap/internal/exit +go.uber.org/zap/internal/pool +go.uber.org/zap/internal/stacktrace go.uber.org/zap/zapcore -# golang.org/x/exp/typeparams v0.0.0-20250210185358-939b2ce775ac -## explicit; go 1.18 +# golang.org/x/exp/typeparams v0.0.0-20250305212735-054e65f0b394 +## explicit; go 1.23.0 golang.org/x/exp/typeparams # golang.org/x/mod v0.24.0 ## explicit; go 1.23.0 @@ -854,8 +826,12 @@ golang.org/x/sync/semaphore ## explicit; go 1.23.0 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/text v0.22.0 -## explicit; go 1.18 +# golang.org/x/text v0.23.0 +## explicit; go 1.23.0 +golang.org/x/text/encoding +golang.org/x/text/encoding/internal +golang.org/x/text/encoding/internal/identifier +golang.org/x/text/encoding/unicode golang.org/x/text/feature/plural golang.org/x/text/internal golang.org/x/text/internal/catmsg @@ -865,6 +841,7 @@ golang.org/x/text/internal/language/compact golang.org/x/text/internal/number golang.org/x/text/internal/stringset golang.org/x/text/internal/tag +golang.org/x/text/internal/utf8internal golang.org/x/text/language golang.org/x/text/message golang.org/x/text/message/catalog @@ -955,15 +932,15 @@ golang.org/x/tools/internal/stdlib golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal golang.org/x/tools/internal/versions -# google.golang.org/protobuf v1.36.4 +# google.golang.org/protobuf v1.36.5 ## explicit; go 1.21 +google.golang.org/protobuf/encoding/protodelim google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire google.golang.org/protobuf/internal/descfmt google.golang.org/protobuf/internal/descopts google.golang.org/protobuf/internal/detrand google.golang.org/protobuf/internal/editiondefaults -google.golang.org/protobuf/internal/editionssupport google.golang.org/protobuf/internal/encoding/defval google.golang.org/protobuf/internal/encoding/messageset google.golang.org/protobuf/internal/encoding/tag @@ -981,19 +958,11 @@ google.golang.org/protobuf/internal/set google.golang.org/protobuf/internal/strs google.golang.org/protobuf/internal/version google.golang.org/protobuf/proto -google.golang.org/protobuf/reflect/protodesc google.golang.org/protobuf/reflect/protoreflect google.golang.org/protobuf/reflect/protoregistry google.golang.org/protobuf/runtime/protoiface google.golang.org/protobuf/runtime/protoimpl -google.golang.org/protobuf/types/descriptorpb -google.golang.org/protobuf/types/gofeaturespb -google.golang.org/protobuf/types/known/anypb -google.golang.org/protobuf/types/known/durationpb google.golang.org/protobuf/types/known/timestamppb -# gopkg.in/ini.v1 v1.67.0 -## explicit -gopkg.in/ini.v1 # gopkg.in/yaml.v2 v2.4.0 ## explicit; go 1.15 gopkg.in/yaml.v2 @@ -1186,6 +1155,6 @@ mvdan.cc/gofumpt/internal/govendor/go/doc/comment mvdan.cc/gofumpt/internal/govendor/go/format mvdan.cc/gofumpt/internal/govendor/go/printer mvdan.cc/gofumpt/internal/version -# mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f -## explicit; go 1.21 +# mvdan.cc/unparam v0.0.0-20250301125049-0df0534333a4 +## explicit; go 1.23 mvdan.cc/unparam/check diff --git a/tools/vendor/mvdan.cc/unparam/check/check.go b/tools/vendor/mvdan.cc/unparam/check/check.go index 2bda01305..38421a09a 100644 --- a/tools/vendor/mvdan.cc/unparam/check/check.go +++ b/tools/vendor/mvdan.cc/unparam/check/check.go @@ -411,7 +411,7 @@ func (c *Checker) addImplementing(named *types.Named, iface *types.Interface) { } func findNamed(typ types.Type) *types.Named { - switch typ := typ.(type) { + switch typ := types.Unalias(typ).(type) { case *types.Pointer: return findNamed(typ.Elem()) case *types.Named: diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md index 639e6c399..235496eeb 100644 --- a/vendor/github.com/BurntSushi/toml/README.md +++ b/vendor/github.com/BurntSushi/toml/README.md @@ -3,7 +3,7 @@ reflection interface similar to Go's standard library `json` and `xml` packages. Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0). -Documentation: https://godocs.io/github.com/BurntSushi/toml +Documentation: https://pkg.go.dev/github.com/BurntSushi/toml See the [releases page](https://github.com/BurntSushi/toml/releases) for a changelog; this information is also in the git tag annotations (e.g. `git show diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go index 7aaf462c9..3fa516caa 100644 --- a/vendor/github.com/BurntSushi/toml/decode.go +++ b/vendor/github.com/BurntSushi/toml/decode.go @@ -196,6 +196,19 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v any) error { return md.unify(primValue.undecoded, rvalue(v)) } +// markDecodedRecursive is a helper to mark any key under the given tmap as +// decoded, recursing as needed +func markDecodedRecursive(md *MetaData, tmap map[string]any) { + for key := range tmap { + md.decoded[md.context.add(key).String()] = struct{}{} + if tmap, ok := tmap[key].(map[string]any); ok { + md.context = append(md.context, key) + markDecodedRecursive(md, tmap) + md.context = md.context[0 : len(md.context)-1] + } + } +} + // unify performs a sort of type unification based on the structure of `rv`, // which is the client representation. // @@ -222,6 +235,16 @@ func (md *MetaData) unify(data any, rv reflect.Value) error { if err != nil { return md.parseErr(err) } + // Assume the Unmarshaler decoded everything, so mark all keys under + // this table as decoded. + if tmap, ok := data.(map[string]any); ok { + markDecodedRecursive(md, tmap) + } + if aot, ok := data.([]map[string]any); ok { + for _, tmap := range aot { + markDecodedRecursive(md, tmap) + } + } return nil } if v, ok := rvi.(encoding.TextUnmarshaler); ok { @@ -540,12 +563,14 @@ func (md *MetaData) badtype(dst string, data any) error { func (md *MetaData) parseErr(err error) error { k := md.context.String() + d := string(md.data) return ParseError{ + Message: err.Error(), + err: err, LastKey: k, - Position: md.keyInfo[k].pos, + Position: md.keyInfo[k].pos.withCol(d), Line: md.keyInfo[k].pos.Line, - err: err, - input: string(md.data), + input: d, } } diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go index 73366c0d9..ac196e7df 100644 --- a/vendor/github.com/BurntSushi/toml/encode.go +++ b/vendor/github.com/BurntSushi/toml/encode.go @@ -402,31 +402,30 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) { // Sort keys so that we have deterministic output. And write keys directly // underneath this key first, before writing sub-structs or sub-maps. - var mapKeysDirect, mapKeysSub []string + var mapKeysDirect, mapKeysSub []reflect.Value for _, mapKey := range rv.MapKeys() { - k := mapKey.String() if typeIsTable(tomlTypeOfGo(eindirect(rv.MapIndex(mapKey)))) { - mapKeysSub = append(mapKeysSub, k) + mapKeysSub = append(mapKeysSub, mapKey) } else { - mapKeysDirect = append(mapKeysDirect, k) + mapKeysDirect = append(mapKeysDirect, mapKey) } } - var writeMapKeys = func(mapKeys []string, trailC bool) { - sort.Strings(mapKeys) + writeMapKeys := func(mapKeys []reflect.Value, trailC bool) { + sort.Slice(mapKeys, func(i, j int) bool { return mapKeys[i].String() < mapKeys[j].String() }) for i, mapKey := range mapKeys { - val := eindirect(rv.MapIndex(reflect.ValueOf(mapKey))) + val := eindirect(rv.MapIndex(mapKey)) if isNil(val) { continue } if inline { - enc.writeKeyValue(Key{mapKey}, val, true) + enc.writeKeyValue(Key{mapKey.String()}, val, true) if trailC || i != len(mapKeys)-1 { enc.wf(", ") } } else { - enc.encode(key.add(mapKey), val) + enc.encode(key.add(mapKey.String()), val) } } } @@ -441,8 +440,6 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) { } } -const is32Bit = (32 << (^uint(0) >> 63)) == 32 - func pointerTo(t reflect.Type) reflect.Type { if t.Kind() == reflect.Ptr { return pointerTo(t.Elem()) @@ -477,15 +474,14 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { frv := eindirect(rv.Field(i)) - if is32Bit { - // Copy so it works correct on 32bit archs; not clear why this - // is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4 - // This also works fine on 64bit, but 32bit archs are somewhat - // rare and this is a wee bit faster. - copyStart := make([]int, len(start)) - copy(copyStart, start) - start = copyStart - } + // Need to make a copy because ... ehm, I don't know why... I guess + // allocating a new array can cause it to fail(?) + // + // Done for: https://github.com/BurntSushi/toml/issues/430 + // Previously only on 32bit for: https://github.com/BurntSushi/toml/issues/314 + copyStart := make([]int, len(start)) + copy(copyStart, start) + start = copyStart // Treat anonymous struct fields with tag names as though they are // not anonymous, like encoding/json does. @@ -507,7 +503,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { } addFields(rt, rv, nil) - writeFields := func(fields [][]int) { + writeFields := func(fields [][]int, totalFields int) { for _, fieldIndex := range fields { fieldType := rt.FieldByIndex(fieldIndex) fieldVal := rv.FieldByIndex(fieldIndex) @@ -537,7 +533,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { if inline { enc.writeKeyValue(Key{keyName}, fieldVal, true) - if fieldIndex[0] != len(fields)-1 { + if fieldIndex[0] != totalFields-1 { enc.wf(", ") } } else { @@ -549,8 +545,10 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { if inline { enc.wf("{") } - writeFields(fieldsDirect) - writeFields(fieldsSub) + + l := len(fieldsDirect) + len(fieldsSub) + writeFields(fieldsDirect, l) + writeFields(fieldsSub, l) if inline { enc.wf("}") } diff --git a/vendor/github.com/BurntSushi/toml/error.go b/vendor/github.com/BurntSushi/toml/error.go index b45a3f45f..b7077d3ae 100644 --- a/vendor/github.com/BurntSushi/toml/error.go +++ b/vendor/github.com/BurntSushi/toml/error.go @@ -67,21 +67,36 @@ type ParseError struct { // Position of an error. type Position struct { Line int // Line number, starting at 1. + Col int // Error column, starting at 1. Start int // Start of error, as byte offset starting at 0. - Len int // Lenght in bytes. + Len int // Length of the error in bytes. } -func (pe ParseError) Error() string { - msg := pe.Message - if msg == "" { // Error from errorf() - msg = pe.err.Error() +func (p Position) withCol(tomlFile string) Position { + var ( + pos int + lines = strings.Split(tomlFile, "\n") + ) + for i := range lines { + ll := len(lines[i]) + 1 // +1 for the removed newline + if pos+ll >= p.Start { + p.Col = p.Start - pos + 1 + if p.Col < 1 { // Should never happen, but just in case. + p.Col = 1 + } + break + } + pos += ll } + return p +} +func (pe ParseError) Error() string { if pe.LastKey == "" { - return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, msg) + return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, pe.Message) } return fmt.Sprintf("toml: line %d (last key %q): %s", - pe.Position.Line, pe.LastKey, msg) + pe.Position.Line, pe.LastKey, pe.Message) } // ErrorWithPosition returns the error with detailed location context. @@ -92,26 +107,19 @@ func (pe ParseError) ErrorWithPosition() string { return pe.Error() } + // TODO: don't show control characters as literals? This may not show up + // well everywhere. + var ( lines = strings.Split(pe.input, "\n") - col = pe.column(lines) b = new(strings.Builder) ) - - msg := pe.Message - if msg == "" { - msg = pe.err.Error() - } - - // TODO: don't show control characters as literals? This may not show up - // well everywhere. - if pe.Position.Len == 1 { fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d:\n\n", - msg, pe.Position.Line, col+1) + pe.Message, pe.Position.Line, pe.Position.Col) } else { fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d-%d:\n\n", - msg, pe.Position.Line, col, col+pe.Position.Len) + pe.Message, pe.Position.Line, pe.Position.Col, pe.Position.Col+pe.Position.Len-1) } if pe.Position.Line > 2 { fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, expandTab(lines[pe.Position.Line-3])) @@ -129,7 +137,7 @@ func (pe ParseError) ErrorWithPosition() string { diff := len(expanded) - len(lines[pe.Position.Line-1]) fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, expanded) - fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col+diff), strings.Repeat("^", pe.Position.Len)) + fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", pe.Position.Col-1+diff), strings.Repeat("^", pe.Position.Len)) return b.String() } @@ -151,23 +159,6 @@ func (pe ParseError) ErrorWithUsage() string { return m } -func (pe ParseError) column(lines []string) int { - var pos, col int - for i := range lines { - ll := len(lines[i]) + 1 // +1 for the removed newline - if pos+ll >= pe.Position.Start { - col = pe.Position.Start - pos - if col < 0 { // Should never happen, but just in case. - col = 0 - } - break - } - pos += ll - } - - return col -} - func expandTab(s string) string { var ( b strings.Builder diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go index a1016d98a..1c3b47702 100644 --- a/vendor/github.com/BurntSushi/toml/lex.go +++ b/vendor/github.com/BurntSushi/toml/lex.go @@ -275,7 +275,9 @@ func (lx *lexer) errorPos(start, length int, err error) stateFn { func (lx *lexer) errorf(format string, values ...any) stateFn { if lx.atEOF { pos := lx.getPos() - pos.Line-- + if lx.pos >= 1 && lx.input[lx.pos-1] == '\n' { + pos.Line-- + } pos.Len = 1 pos.Start = lx.pos - 1 lx.items <- item{typ: itemError, pos: pos, err: fmt.Errorf(format, values...)} @@ -492,6 +494,9 @@ func lexKeyEnd(lx *lexer) stateFn { lx.emit(itemKeyEnd) return lexSkip(lx, lexValue) default: + if r == '\n' { + return lx.errorPrevLine(fmt.Errorf("expected '.' or '=', but got %q instead", r)) + } return lx.errorf("expected '.' or '=', but got %q instead", r) } } @@ -560,6 +565,9 @@ func lexValue(lx *lexer) stateFn { if r == eof { return lx.errorf("unexpected EOF; expected value") } + if r == '\n' { + return lx.errorPrevLine(fmt.Errorf("expected value but found %q instead", r)) + } return lx.errorf("expected value but found %q instead", r) } @@ -1111,7 +1119,7 @@ func lexBaseNumberOrDate(lx *lexer) stateFn { case 'x': r = lx.peek() if !isHex(r) { - lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r) + lx.errorf("not a hexadecimal number: '%s%c'", lx.current(), r) } return lexHexInteger } @@ -1259,23 +1267,6 @@ func isBinary(r rune) bool { return r == '0' || r == '1' } func isOctal(r rune) bool { return r >= '0' && r <= '7' } func isHex(r rune) bool { return (r >= '0' && r <= '9') || (r|0x20 >= 'a' && r|0x20 <= 'f') } func isBareKeyChar(r rune, tomlNext bool) bool { - if tomlNext { - return (r >= 'A' && r <= 'Z') || - (r >= 'a' && r <= 'z') || - (r >= '0' && r <= '9') || - r == '_' || r == '-' || - r == 0xb2 || r == 0xb3 || r == 0xb9 || (r >= 0xbc && r <= 0xbe) || - (r >= 0xc0 && r <= 0xd6) || (r >= 0xd8 && r <= 0xf6) || (r >= 0xf8 && r <= 0x037d) || - (r >= 0x037f && r <= 0x1fff) || - (r >= 0x200c && r <= 0x200d) || (r >= 0x203f && r <= 0x2040) || - (r >= 0x2070 && r <= 0x218f) || (r >= 0x2460 && r <= 0x24ff) || - (r >= 0x2c00 && r <= 0x2fef) || (r >= 0x3001 && r <= 0xd7ff) || - (r >= 0xf900 && r <= 0xfdcf) || (r >= 0xfdf0 && r <= 0xfffd) || - (r >= 0x10000 && r <= 0xeffff) - } - - return (r >= 'A' && r <= 'Z') || - (r >= 'a' && r <= 'z') || - (r >= '0' && r <= '9') || - r == '_' || r == '-' + return (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || + (r >= '0' && r <= '9') || r == '_' || r == '-' } diff --git a/vendor/github.com/BurntSushi/toml/meta.go b/vendor/github.com/BurntSushi/toml/meta.go index e61453730..0d337026c 100644 --- a/vendor/github.com/BurntSushi/toml/meta.go +++ b/vendor/github.com/BurntSushi/toml/meta.go @@ -135,9 +135,6 @@ func (k Key) maybeQuoted(i int) string { // Like append(), but only increase the cap by 1. func (k Key) add(piece string) Key { - if cap(k) > len(k) { - return append(k, piece) - } newKey := make(Key, len(k)+1) copy(newKey, k) newKey[len(k)] = piece diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go index 11ac3108b..e3ea8a9a2 100644 --- a/vendor/github.com/BurntSushi/toml/parse.go +++ b/vendor/github.com/BurntSushi/toml/parse.go @@ -50,7 +50,6 @@ func parse(data string) (p *parser, err error) { // it anyway. if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { // UTF-16 data = data[2:] - //lint:ignore S1017 https://github.com/dominikh/go-tools/issues/1447 } else if strings.HasPrefix(data, "\xef\xbb\xbf") { // UTF-8 data = data[3:] } @@ -65,7 +64,7 @@ func parse(data string) (p *parser, err error) { if i := strings.IndexRune(data[:ex], 0); i > -1 { return nil, ParseError{ Message: "files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8", - Position: Position{Line: 1, Start: i, Len: 1}, + Position: Position{Line: 1, Col: 1, Start: i, Len: 1}, Line: 1, input: data, } @@ -92,8 +91,9 @@ func parse(data string) (p *parser, err error) { func (p *parser) panicErr(it item, err error) { panic(ParseError{ + Message: err.Error(), err: err, - Position: it.pos, + Position: it.pos.withCol(p.lx.input), Line: it.pos.Len, LastKey: p.current(), }) @@ -102,7 +102,7 @@ func (p *parser) panicErr(it item, err error) { func (p *parser) panicItemf(it item, format string, v ...any) { panic(ParseError{ Message: fmt.Sprintf(format, v...), - Position: it.pos, + Position: it.pos.withCol(p.lx.input), Line: it.pos.Len, LastKey: p.current(), }) @@ -111,7 +111,7 @@ func (p *parser) panicItemf(it item, format string, v ...any) { func (p *parser) panicf(format string, v ...any) { panic(ParseError{ Message: fmt.Sprintf(format, v...), - Position: p.pos, + Position: p.pos.withCol(p.lx.input), Line: p.pos.Line, LastKey: p.current(), }) @@ -123,10 +123,11 @@ func (p *parser) next() item { if it.typ == itemError { if it.err != nil { panic(ParseError{ - Position: it.pos, + Message: it.err.Error(), + err: it.err, + Position: it.pos.withCol(p.lx.input), Line: it.pos.Line, LastKey: p.current(), - err: it.err, }) } @@ -527,7 +528,7 @@ func numUnderscoresOK(s string) bool { } } - // isHexis a superset of all the permissable characters surrounding an + // isHex is a superset of all the permissible characters surrounding an // underscore. accept = isHex(r) } diff --git a/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go b/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go index affb74a76..d558b9bd8 100644 --- a/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go +++ b/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go @@ -49,16 +49,16 @@ func ShiftNBytesLeft(dst, x []byte, n int) { dst = append(dst, make([]byte, n/8)...) } -// XorBytesMut assumes equal input length, replaces X with X XOR Y +// XorBytesMut replaces X with X XOR Y. len(X) must be >= len(Y). func XorBytesMut(X, Y []byte) { - for i := 0; i < len(X); i++ { + for i := 0; i < len(Y); i++ { X[i] ^= Y[i] } } -// XorBytes assumes equal input length, puts X XOR Y into Z +// XorBytes puts X XOR Y into Z. len(Z) and len(X) must be >= len(Y). func XorBytes(Z, X, Y []byte) { - for i := 0; i < len(X); i++ { + for i := 0; i < len(Y); i++ { Z[i] = X[i] ^ Y[i] } } diff --git a/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go b/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go index 5022285b4..24f893017 100644 --- a/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go +++ b/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go @@ -109,8 +109,10 @@ func (o *ocb) Seal(dst, nonce, plaintext, adata []byte) []byte { if len(nonce) > o.nonceSize { panic("crypto/ocb: Incorrect nonce length given to OCB") } - ret, out := byteutil.SliceForAppend(dst, len(plaintext)+o.tagSize) - o.crypt(enc, out, nonce, adata, plaintext) + sep := len(plaintext) + ret, out := byteutil.SliceForAppend(dst, sep+o.tagSize) + tag := o.crypt(enc, out[:sep], nonce, adata, plaintext) + copy(out[sep:], tag) return ret } @@ -122,12 +124,10 @@ func (o *ocb) Open(dst, nonce, ciphertext, adata []byte) ([]byte, error) { return nil, ocbError("Ciphertext shorter than tag length") } sep := len(ciphertext) - o.tagSize - ret, out := byteutil.SliceForAppend(dst, len(ciphertext)) + ret, out := byteutil.SliceForAppend(dst, sep) ciphertextData := ciphertext[:sep] - tag := ciphertext[sep:] - o.crypt(dec, out, nonce, adata, ciphertextData) - if subtle.ConstantTimeCompare(ret[sep:], tag) == 1 { - ret = ret[:sep] + tag := o.crypt(dec, out, nonce, adata, ciphertextData) + if subtle.ConstantTimeCompare(tag, ciphertext[sep:]) == 1 { return ret, nil } for i := range out { @@ -137,7 +137,8 @@ func (o *ocb) Open(dst, nonce, ciphertext, adata []byte) ([]byte, error) { } // On instruction enc (resp. dec), crypt is the encrypt (resp. decrypt) -// function. It returns the resulting plain/ciphertext with the tag appended. +// function. It writes the resulting plain/ciphertext into Y and returns +// the tag. func (o *ocb) crypt(instruction int, Y, nonce, adata, X []byte) []byte { // // Consider X as a sequence of 128-bit blocks @@ -194,13 +195,14 @@ func (o *ocb) crypt(instruction int, Y, nonce, adata, X []byte) []byte { byteutil.XorBytesMut(offset, o.mask.L[bits.TrailingZeros(uint(i+1))]) blockX := X[i*blockSize : (i+1)*blockSize] blockY := Y[i*blockSize : (i+1)*blockSize] - byteutil.XorBytes(blockY, blockX, offset) switch instruction { case enc: + byteutil.XorBytesMut(checksum, blockX) + byteutil.XorBytes(blockY, blockX, offset) o.block.Encrypt(blockY, blockY) byteutil.XorBytesMut(blockY, offset) - byteutil.XorBytesMut(checksum, blockX) case dec: + byteutil.XorBytes(blockY, blockX, offset) o.block.Decrypt(blockY, blockY) byteutil.XorBytesMut(blockY, offset) byteutil.XorBytesMut(checksum, blockY) @@ -216,31 +218,24 @@ func (o *ocb) crypt(instruction int, Y, nonce, adata, X []byte) []byte { o.block.Encrypt(pad, offset) chunkX := X[blockSize*m:] chunkY := Y[blockSize*m : len(X)] - byteutil.XorBytes(chunkY, chunkX, pad[:len(chunkX)]) - // P_* || bit(1) || zeroes(127) - len(P_*) switch instruction { case enc: - paddedY := append(chunkX, byte(128)) - paddedY = append(paddedY, make([]byte, blockSize-len(chunkX)-1)...) - byteutil.XorBytesMut(checksum, paddedY) + byteutil.XorBytesMut(checksum, chunkX) + checksum[len(chunkX)] ^= 128 + byteutil.XorBytes(chunkY, chunkX, pad[:len(chunkX)]) + // P_* || bit(1) || zeroes(127) - len(P_*) case dec: - paddedX := append(chunkY, byte(128)) - paddedX = append(paddedX, make([]byte, blockSize-len(chunkY)-1)...) - byteutil.XorBytesMut(checksum, paddedX) + byteutil.XorBytes(chunkY, chunkX, pad[:len(chunkX)]) + // P_* || bit(1) || zeroes(127) - len(P_*) + byteutil.XorBytesMut(checksum, chunkY) + checksum[len(chunkY)] ^= 128 } - byteutil.XorBytes(tag, checksum, offset) - byteutil.XorBytesMut(tag, o.mask.lDol) - o.block.Encrypt(tag, tag) - byteutil.XorBytesMut(tag, o.hash(adata)) - copy(Y[blockSize*m+len(chunkY):], tag[:o.tagSize]) - } else { - byteutil.XorBytes(tag, checksum, offset) - byteutil.XorBytesMut(tag, o.mask.lDol) - o.block.Encrypt(tag, tag) - byteutil.XorBytesMut(tag, o.hash(adata)) - copy(Y[blockSize*m:], tag[:o.tagSize]) } - return Y + byteutil.XorBytes(tag, checksum, offset) + byteutil.XorBytesMut(tag, o.mask.lDol) + o.block.Encrypt(tag, tag) + byteutil.XorBytesMut(tag, o.hash(adata)) + return tag[:o.tagSize] } // This hash function is used to compute the tag. Per design, on empty input it diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go index 112f98b83..550efddf0 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go @@ -7,6 +7,7 @@ package armor import ( "encoding/base64" "io" + "sort" ) var armorHeaderSep = []byte(": ") @@ -159,8 +160,15 @@ func encode(out io.Writer, blockType string, headers map[string]string, checksum return } - for k, v := range headers { - err = writeSlices(out, []byte(k), armorHeaderSep, []byte(v), newline) + keys := make([]string, len(headers)) + i := 0 + for k := range headers { + keys[i] = k + i++ + } + sort.Strings(keys) + for _, k := range keys { + err = writeSlices(out, []byte(k), armorHeaderSep, []byte(headers[k]), newline) if err != nil { return } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go index 0eb3937b3..e44b45734 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go @@ -6,6 +6,7 @@ package errors // import "github.com/ProtonMail/go-crypto/openpgp/errors" import ( + "fmt" "strconv" ) @@ -178,3 +179,22 @@ type ErrMalformedMessage string func (dke ErrMalformedMessage) Error() string { return "openpgp: malformed message " + string(dke) } + +// ErrEncryptionKeySelection is returned if encryption key selection fails (v2 API). +type ErrEncryptionKeySelection struct { + PrimaryKeyId string + PrimaryKeyErr error + EncSelectionKeyId *string + EncSelectionErr error +} + +func (eks ErrEncryptionKeySelection) Error() string { + prefix := fmt.Sprintf("openpgp: key selection for primary key %s:", eks.PrimaryKeyId) + if eks.PrimaryKeyErr != nil { + return fmt.Sprintf("%s invalid primary key: %s", prefix, eks.PrimaryKeyErr) + } + if eks.EncSelectionKeyId != nil { + return fmt.Sprintf("%s invalid encryption key %s: %s", prefix, *eks.EncSelectionKeyId, eks.EncSelectionErr) + } + return fmt.Sprintf("%s no encryption key: %s", prefix, eks.EncSelectionErr) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_crypter.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_crypter.go index 2eecd062f..5e4604656 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_crypter.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_crypter.go @@ -3,7 +3,6 @@ package packet import ( - "bytes" "crypto/cipher" "encoding/binary" "io" @@ -15,12 +14,11 @@ import ( type aeadCrypter struct { aead cipher.AEAD chunkSize int - initialNonce []byte + nonce []byte associatedData []byte // Chunk-independent associated data chunkIndex []byte // Chunk counter packetTag packetType // SEIP packet (v2) or AEAD Encrypted Data packet bytesProcessed int // Amount of plaintext bytes encrypted/decrypted - buffer bytes.Buffer // Buffered bytes across chunks } // computeNonce takes the incremental index and computes an eXclusive OR with @@ -28,12 +26,12 @@ type aeadCrypter struct { // 5.16.1 and 5.16.2). It returns the resulting nonce. func (wo *aeadCrypter) computeNextNonce() (nonce []byte) { if wo.packetTag == packetTypeSymmetricallyEncryptedIntegrityProtected { - return append(wo.initialNonce, wo.chunkIndex...) + return wo.nonce } - nonce = make([]byte, len(wo.initialNonce)) - copy(nonce, wo.initialNonce) - offset := len(wo.initialNonce) - 8 + nonce = make([]byte, len(wo.nonce)) + copy(nonce, wo.nonce) + offset := len(wo.nonce) - 8 for i := 0; i < 8; i++ { nonce[i+offset] ^= wo.chunkIndex[i] } @@ -62,8 +60,9 @@ func (wo *aeadCrypter) incrementIndex() error { type aeadDecrypter struct { aeadCrypter // Embedded ciphertext opener reader io.Reader // 'reader' is a partialLengthReader + chunkBytes []byte peekedBytes []byte // Used to detect last chunk - eof bool + buffer []byte // Buffered decrypted bytes } // Read decrypts bytes and reads them into dst. It decrypts when necessary and @@ -71,59 +70,44 @@ type aeadDecrypter struct { // and an error. func (ar *aeadDecrypter) Read(dst []byte) (n int, err error) { // Return buffered plaintext bytes from previous calls - if ar.buffer.Len() > 0 { - return ar.buffer.Read(dst) - } - - // Return EOF if we've previously validated the final tag - if ar.eof { - return 0, io.EOF + if len(ar.buffer) > 0 { + n = copy(dst, ar.buffer) + ar.buffer = ar.buffer[n:] + return } // Read a chunk tagLen := ar.aead.Overhead() - cipherChunkBuf := new(bytes.Buffer) - _, errRead := io.CopyN(cipherChunkBuf, ar.reader, int64(ar.chunkSize+tagLen)) - cipherChunk := cipherChunkBuf.Bytes() - if errRead != nil && errRead != io.EOF { + copy(ar.chunkBytes, ar.peekedBytes) // Copy bytes peeked in previous chunk or in initialization + bytesRead, errRead := io.ReadFull(ar.reader, ar.chunkBytes[tagLen:]) + if errRead != nil && errRead != io.EOF && errRead != io.ErrUnexpectedEOF { return 0, errRead } - if len(cipherChunk) > 0 { - decrypted, errChunk := ar.openChunk(cipherChunk) + if bytesRead > 0 { + ar.peekedBytes = ar.chunkBytes[bytesRead:bytesRead+tagLen] + + decrypted, errChunk := ar.openChunk(ar.chunkBytes[:bytesRead]) if errChunk != nil { return 0, errChunk } // Return decrypted bytes, buffering if necessary - if len(dst) < len(decrypted) { - n = copy(dst, decrypted[:len(dst)]) - ar.buffer.Write(decrypted[len(dst):]) - } else { - n = copy(dst, decrypted) - } + n = copy(dst, decrypted) + ar.buffer = decrypted[n:] + return } - // Check final authentication tag - if errRead == io.EOF { - errChunk := ar.validateFinalTag(ar.peekedBytes) - if errChunk != nil { - return n, errChunk - } - ar.eof = true // Mark EOF for when we've returned all buffered data - } - return + return 0, io.EOF } -// Close is noOp. The final authentication tag of the stream was already -// checked in the last Read call. In the future, this function could be used to -// wipe the reader and peeked, decrypted bytes, if necessary. +// Close checks the final authentication tag of the stream. +// In the future, this function could also be used to wipe the reader +// and peeked & decrypted bytes, if necessary. func (ar *aeadDecrypter) Close() (err error) { - if !ar.eof { - errChunk := ar.validateFinalTag(ar.peekedBytes) - if errChunk != nil { - return errChunk - } + errChunk := ar.validateFinalTag(ar.peekedBytes) + if errChunk != nil { + return errChunk } return nil } @@ -132,20 +116,13 @@ func (ar *aeadDecrypter) Close() (err error) { // the underlying plaintext and an error. It accesses peeked bytes from next // chunk, to identify the last chunk and decrypt/validate accordingly. func (ar *aeadDecrypter) openChunk(data []byte) ([]byte, error) { - tagLen := ar.aead.Overhead() - // Restore carried bytes from last call - chunkExtra := append(ar.peekedBytes, data...) - // 'chunk' contains encrypted bytes, followed by an authentication tag. - chunk := chunkExtra[:len(chunkExtra)-tagLen] - ar.peekedBytes = chunkExtra[len(chunkExtra)-tagLen:] - adata := ar.associatedData if ar.aeadCrypter.packetTag == packetTypeAEADEncrypted { adata = append(ar.associatedData, ar.chunkIndex...) } nonce := ar.computeNextNonce() - plainChunk, err := ar.aead.Open(nil, nonce, chunk, adata) + plainChunk, err := ar.aead.Open(data[:0:len(data)], nonce, data, adata) if err != nil { return nil, errors.ErrAEADTagVerification } @@ -183,27 +160,29 @@ func (ar *aeadDecrypter) validateFinalTag(tag []byte) error { type aeadEncrypter struct { aeadCrypter // Embedded plaintext sealer writer io.WriteCloser // 'writer' is a partialLengthWriter + chunkBytes []byte + offset int } // Write encrypts and writes bytes. It encrypts when necessary and buffers extra // plaintext bytes for next call. When the stream is finished, Close() MUST be // called to append the final tag. func (aw *aeadEncrypter) Write(plaintextBytes []byte) (n int, err error) { - // Append plaintextBytes to existing buffered bytes - n, err = aw.buffer.Write(plaintextBytes) - if err != nil { - return n, err - } - // Encrypt and write chunks - for aw.buffer.Len() >= aw.chunkSize { - plainChunk := aw.buffer.Next(aw.chunkSize) - encryptedChunk, err := aw.sealChunk(plainChunk) - if err != nil { - return n, err - } - _, err = aw.writer.Write(encryptedChunk) - if err != nil { - return n, err + for n != len(plaintextBytes) { + copied := copy(aw.chunkBytes[aw.offset:aw.chunkSize], plaintextBytes[n:]) + n += copied + aw.offset += copied + + if aw.offset == aw.chunkSize { + encryptedChunk, err := aw.sealChunk(aw.chunkBytes[:aw.offset]) + if err != nil { + return n, err + } + _, err = aw.writer.Write(encryptedChunk) + if err != nil { + return n, err + } + aw.offset = 0 } } return @@ -215,9 +194,8 @@ func (aw *aeadEncrypter) Write(plaintextBytes []byte) (n int, err error) { func (aw *aeadEncrypter) Close() (err error) { // Encrypt and write a chunk if there's buffered data left, or if we haven't // written any chunks yet. - if aw.buffer.Len() > 0 || aw.bytesProcessed == 0 { - plainChunk := aw.buffer.Bytes() - lastEncryptedChunk, err := aw.sealChunk(plainChunk) + if aw.offset > 0 || aw.bytesProcessed == 0 { + lastEncryptedChunk, err := aw.sealChunk(aw.chunkBytes[:aw.offset]) if err != nil { return err } @@ -263,7 +241,7 @@ func (aw *aeadEncrypter) sealChunk(data []byte) ([]byte, error) { } nonce := aw.computeNextNonce() - encrypted := aw.aead.Seal(nil, nonce, data, adata) + encrypted := aw.aead.Seal(data[:0], nonce, data, adata) aw.bytesProcessed += len(data) if err := aw.aeadCrypter.incrementIndex(); err != nil { return nil, err diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go index 98bd876bf..583765d87 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go @@ -65,24 +65,28 @@ func (ae *AEADEncrypted) decrypt(key []byte) (io.ReadCloser, error) { blockCipher := ae.cipher.new(key) aead := ae.mode.new(blockCipher) // Carry the first tagLen bytes + chunkSize := decodeAEADChunkSize(ae.chunkSizeByte) tagLen := ae.mode.TagLength() - peekedBytes := make([]byte, tagLen) + chunkBytes := make([]byte, chunkSize+tagLen*2) + peekedBytes := chunkBytes[chunkSize+tagLen:] n, err := io.ReadFull(ae.Contents, peekedBytes) if n < tagLen || (err != nil && err != io.EOF) { return nil, errors.AEADError("Not enough data to decrypt:" + err.Error()) } - chunkSize := decodeAEADChunkSize(ae.chunkSizeByte) + return &aeadDecrypter{ aeadCrypter: aeadCrypter{ aead: aead, chunkSize: chunkSize, - initialNonce: ae.initialNonce, + nonce: ae.initialNonce, associatedData: ae.associatedData(), chunkIndex: make([]byte, 8), packetTag: packetTypeAEADEncrypted, }, reader: ae.Contents, - peekedBytes: peekedBytes}, nil + chunkBytes: chunkBytes, + peekedBytes: peekedBytes, + }, nil } // associatedData for chunks: tag, version, cipher, mode, chunk size byte diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go index 8bf8e6e51..257398d9d 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go @@ -173,6 +173,11 @@ type Config struct { // weaknesses in the hash algo, potentially hindering e.g. some chosen-prefix attacks. // The default behavior, when the config or flag is nil, is to enable the feature. NonDeterministicSignaturesViaNotation *bool + + // InsecureAllowAllKeyFlagsWhenMissing determines how a key without valid key flags is handled. + // When set to true, a key without flags is treated as if all flags are enabled. + // This behavior is consistent with GPG. + InsecureAllowAllKeyFlagsWhenMissing bool } func (c *Config) Random() io.Reader { @@ -403,6 +408,13 @@ func (c *Config) RandomizeSignaturesViaNotation() bool { return *c.NonDeterministicSignaturesViaNotation } +func (c *Config) AllowAllKeyFlagsWhenMissing() bool { + if c == nil { + return false + } + return c.InsecureAllowAllKeyFlagsWhenMissing +} + // BoolPointer is a helper function to set a boolean pointer in the Config. // e.g., config.CheckPacketSequence = BoolPointer(true) func BoolPointer(value bool) *bool { diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go index f8da781bb..e2813396e 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go @@ -1048,12 +1048,17 @@ func (pk *PublicKey) VerifyDirectKeySignature(sig *Signature) (err error) { // KeyIdString returns the public key's fingerprint in capital hex // (e.g. "6C7EE1B8621CC013"). func (pk *PublicKey) KeyIdString() string { - return fmt.Sprintf("%X", pk.Fingerprint[12:20]) + return fmt.Sprintf("%016X", pk.KeyId) } // KeyIdShortString returns the short form of public key's fingerprint // in capital hex, as shown by gpg --list-keys (e.g. "621CC013"). +// This function will return the full key id for v5 and v6 keys +// since the short key id is undefined for them. func (pk *PublicKey) KeyIdShortString() string { + if pk.Version >= 5 { + return pk.KeyIdString() + } return fmt.Sprintf("%X", pk.Fingerprint[16:20]) } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go index 3a4b366d8..84dd3b86f 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go @@ -1288,7 +1288,9 @@ func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubp if sig.IssuerKeyId != nil && sig.Version == 4 { keyId := make([]byte, 8) binary.BigEndian.PutUint64(keyId, *sig.IssuerKeyId) - subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, true, keyId}) + // Note: making this critical breaks RPM <=4.16. + // See: https://github.com/ProtonMail/go-crypto/issues/263 + subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, false, keyId}) } // Notation Data for _, notation := range sig.Notations { diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_aead.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_aead.go index 3957b2d53..3ddc4fe4a 100644 --- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_aead.go +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_aead.go @@ -70,8 +70,10 @@ func (se *SymmetricallyEncrypted) decryptAead(inputKey []byte) (io.ReadCloser, e aead, nonce := getSymmetricallyEncryptedAeadInstance(se.Cipher, se.Mode, inputKey, se.Salt[:], se.associatedData()) // Carry the first tagLen bytes + chunkSize := decodeAEADChunkSize(se.ChunkSizeByte) tagLen := se.Mode.TagLength() - peekedBytes := make([]byte, tagLen) + chunkBytes := make([]byte, chunkSize+tagLen*2) + peekedBytes := chunkBytes[chunkSize+tagLen:] n, err := io.ReadFull(se.Contents, peekedBytes) if n < tagLen || (err != nil && err != io.EOF) { return nil, errors.StructuralError("not enough data to decrypt:" + err.Error()) @@ -81,12 +83,13 @@ func (se *SymmetricallyEncrypted) decryptAead(inputKey []byte) (io.ReadCloser, e aeadCrypter: aeadCrypter{ aead: aead, chunkSize: decodeAEADChunkSize(se.ChunkSizeByte), - initialNonce: nonce, + nonce: nonce, associatedData: se.associatedData(), - chunkIndex: make([]byte, 8), + chunkIndex: nonce[len(nonce)-8:], packetTag: packetTypeSymmetricallyEncryptedIntegrityProtected, }, reader: se.Contents, + chunkBytes: chunkBytes, peekedBytes: peekedBytes, }, nil } @@ -130,16 +133,20 @@ func serializeSymmetricallyEncryptedAead(ciphertext io.WriteCloser, cipherSuite aead, nonce := getSymmetricallyEncryptedAeadInstance(cipherSuite.Cipher, cipherSuite.Mode, inputKey, salt, prefix) + chunkSize := decodeAEADChunkSize(chunkSizeByte) + tagLen := aead.Overhead() + chunkBytes := make([]byte, chunkSize+tagLen) return &aeadEncrypter{ aeadCrypter: aeadCrypter{ aead: aead, - chunkSize: decodeAEADChunkSize(chunkSizeByte), + chunkSize: chunkSize, associatedData: prefix, - chunkIndex: make([]byte, 8), - initialNonce: nonce, + nonce: nonce, + chunkIndex: nonce[len(nonce)-8:], packetTag: packetTypeSymmetricallyEncryptedIntegrityProtected, }, - writer: ciphertext, + writer: ciphertext, + chunkBytes: chunkBytes, }, nil } @@ -149,10 +156,10 @@ func getSymmetricallyEncryptedAeadInstance(c CipherFunction, mode AEADMode, inpu encryptionKey := make([]byte, c.KeySize()) _, _ = readFull(hkdfReader, encryptionKey) - // Last 64 bits of nonce are the counter - nonce = make([]byte, mode.IvLength()-8) + nonce = make([]byte, mode.IvLength()) - _, _ = readFull(hkdfReader, nonce) + // Last 64 bits of nonce are the counter + _, _ = readFull(hkdfReader, nonce[:len(nonce)-8]) blockCipher := c.new(encryptionKey) aead = mode.new(blockCipher) diff --git a/vendor/github.com/aead/chacha20/chacha/chacha.go b/vendor/github.com/aead/chacha20/chacha/chacha.go deleted file mode 100644 index c2b39da4f..000000000 --- a/vendor/github.com/aead/chacha20/chacha/chacha.go +++ /dev/null @@ -1,197 +0,0 @@ -// Copyright (c) 2016 Andreas Auernhammer. All rights reserved. -// Use of this source code is governed by a license that can be -// found in the LICENSE file. - -// Package chacha implements some low-level functions of the -// ChaCha cipher family. -package chacha // import "github.com/aead/chacha20/chacha" - -import ( - "encoding/binary" - "errors" - "math" -) - -const ( - // NonceSize is the size of the ChaCha20 nonce in bytes. - NonceSize = 8 - - // INonceSize is the size of the IETF-ChaCha20 nonce in bytes. - INonceSize = 12 - - // XNonceSize is the size of the XChaCha20 nonce in bytes. - XNonceSize = 24 - - // KeySize is the size of the key in bytes. - KeySize = 32 -) - -var ( - useSSE2 bool - useSSSE3 bool - useAVX bool - useAVX2 bool -) - -var ( - errKeySize = errors.New("chacha20/chacha: bad key length") - errInvalidNonce = errors.New("chacha20/chacha: bad nonce length") -) - -func setup(state *[64]byte, nonce, key []byte) (err error) { - if len(key) != KeySize { - err = errKeySize - return - } - var Nonce [16]byte - switch len(nonce) { - case NonceSize: - copy(Nonce[8:], nonce) - initialize(state, key, &Nonce) - case INonceSize: - copy(Nonce[4:], nonce) - initialize(state, key, &Nonce) - case XNonceSize: - var tmpKey [32]byte - var hNonce [16]byte - - copy(hNonce[:], nonce[:16]) - copy(tmpKey[:], key) - HChaCha20(&tmpKey, &hNonce, &tmpKey) - copy(Nonce[8:], nonce[16:]) - initialize(state, tmpKey[:], &Nonce) - - // BUG(aead): A "good" compiler will remove this (optimizations) - // But using the provided key instead of tmpKey, - // will change the key (-> probably confuses users) - for i := range tmpKey { - tmpKey[i] = 0 - } - default: - err = errInvalidNonce - } - return -} - -// XORKeyStream crypts bytes from src to dst using the given nonce and key. -// The length of the nonce determinds the version of ChaCha20: -// - NonceSize: ChaCha20/r with a 64 bit nonce and a 2^64 * 64 byte period. -// - INonceSize: ChaCha20/r as defined in RFC 7539 and a 2^32 * 64 byte period. -// - XNonceSize: XChaCha20/r with a 192 bit nonce and a 2^64 * 64 byte period. -// The rounds argument specifies the number of rounds performed for keystream -// generation - valid values are 8, 12 or 20. The src and dst may be the same slice -// but otherwise should not overlap. If len(dst) < len(src) this function panics. -// If the nonce is neither 64, 96 nor 192 bits long, this function panics. -func XORKeyStream(dst, src, nonce, key []byte, rounds int) { - if rounds != 20 && rounds != 12 && rounds != 8 { - panic("chacha20/chacha: bad number of rounds") - } - if len(dst) < len(src) { - panic("chacha20/chacha: dst buffer is to small") - } - if len(nonce) == INonceSize && uint64(len(src)) > (1<<38) { - panic("chacha20/chacha: src is too large") - } - - var block, state [64]byte - if err := setup(&state, nonce, key); err != nil { - panic(err) - } - xorKeyStream(dst, src, &block, &state, rounds) -} - -// Cipher implements ChaCha20/r (XChaCha20/r) for a given number of rounds r. -type Cipher struct { - state, block [64]byte - off int - rounds int // 20 for ChaCha20 - noncesize int -} - -// NewCipher returns a new *chacha.Cipher implementing the ChaCha20/r or XChaCha20/r -// (r = 8, 12 or 20) stream cipher. The nonce must be unique for one key for all time. -// The length of the nonce determinds the version of ChaCha20: -// - NonceSize: ChaCha20/r with a 64 bit nonce and a 2^64 * 64 byte period. -// - INonceSize: ChaCha20/r as defined in RFC 7539 and a 2^32 * 64 byte period. -// - XNonceSize: XChaCha20/r with a 192 bit nonce and a 2^64 * 64 byte period. -// If the nonce is neither 64, 96 nor 192 bits long, a non-nil error is returned. -func NewCipher(nonce, key []byte, rounds int) (*Cipher, error) { - if rounds != 20 && rounds != 12 && rounds != 8 { - panic("chacha20/chacha: bad number of rounds") - } - - c := new(Cipher) - if err := setup(&(c.state), nonce, key); err != nil { - return nil, err - } - c.rounds = rounds - - if len(nonce) == INonceSize { - c.noncesize = INonceSize - } else { - c.noncesize = NonceSize - } - - return c, nil -} - -// XORKeyStream crypts bytes from src to dst. Src and dst may be the same slice -// but otherwise should not overlap. If len(dst) < len(src) the function panics. -func (c *Cipher) XORKeyStream(dst, src []byte) { - if len(dst) < len(src) { - panic("chacha20/chacha: dst buffer is to small") - } - - if c.off > 0 { - n := len(c.block[c.off:]) - if len(src) <= n { - for i, v := range src { - dst[i] = v ^ c.block[c.off] - c.off++ - } - if c.off == 64 { - c.off = 0 - } - return - } - - for i, v := range c.block[c.off:] { - dst[i] = src[i] ^ v - } - src = src[n:] - dst = dst[n:] - c.off = 0 - } - - // check for counter overflow - blocksToXOR := len(src) / 64 - if len(src)%64 != 0 { - blocksToXOR++ - } - var overflow bool - if c.noncesize == INonceSize { - overflow = binary.LittleEndian.Uint32(c.state[48:]) > math.MaxUint32-uint32(blocksToXOR) - } else { - overflow = binary.LittleEndian.Uint64(c.state[48:]) > math.MaxUint64-uint64(blocksToXOR) - } - if overflow { - panic("chacha20/chacha: counter overflow") - } - - c.off += xorKeyStream(dst, src, &(c.block), &(c.state), c.rounds) -} - -// SetCounter skips ctr * 64 byte blocks. SetCounter(0) resets the cipher. -// This function always skips the unused keystream of the current 64 byte block. -func (c *Cipher) SetCounter(ctr uint64) { - if c.noncesize == INonceSize { - binary.LittleEndian.PutUint32(c.state[48:], uint32(ctr)) - } else { - binary.LittleEndian.PutUint64(c.state[48:], ctr) - } - c.off = 0 -} - -// HChaCha20 generates 32 pseudo-random bytes from a 128 bit nonce and a 256 bit secret key. -// It can be used as a key-derivation-function (KDF). -func HChaCha20(out *[32]byte, nonce *[16]byte, key *[32]byte) { hChaCha20(out, nonce, key) } diff --git a/vendor/github.com/aead/chacha20/chacha/chachaAVX2_amd64.s b/vendor/github.com/aead/chacha20/chacha/chachaAVX2_amd64.s deleted file mode 100644 index c2b5f52f1..000000000 --- a/vendor/github.com/aead/chacha20/chacha/chachaAVX2_amd64.s +++ /dev/null @@ -1,406 +0,0 @@ -// Copyright (c) 2016 Andreas Auernhammer. All rights reserved. -// Use of this source code is governed by a license that can be -// found in the LICENSE file. - -// +build amd64,!gccgo,!appengine,!nacl - -#include "const.s" -#include "macro.s" - -#define TWO 0(SP) -#define C16 32(SP) -#define C8 64(SP) -#define STATE_0 96(SP) -#define STATE_1 128(SP) -#define STATE_2 160(SP) -#define STATE_3 192(SP) -#define TMP_0 224(SP) -#define TMP_1 256(SP) - -// func xorKeyStreamAVX(dst, src []byte, block, state *[64]byte, rounds int) int -TEXT ·xorKeyStreamAVX2(SB), 4, $320-80 - MOVQ dst_base+0(FP), DI - MOVQ src_base+24(FP), SI - MOVQ block+48(FP), BX - MOVQ state+56(FP), AX - MOVQ rounds+64(FP), DX - MOVQ src_len+32(FP), CX - - MOVQ SP, R8 - ADDQ $32, SP - ANDQ $-32, SP - - VMOVDQU 0(AX), Y2 - VMOVDQU 32(AX), Y3 - VPERM2I128 $0x22, Y2, Y0, Y0 - VPERM2I128 $0x33, Y2, Y1, Y1 - VPERM2I128 $0x22, Y3, Y2, Y2 - VPERM2I128 $0x33, Y3, Y3, Y3 - - TESTQ CX, CX - JZ done - - VMOVDQU ·one_AVX2<>(SB), Y4 - VPADDD Y4, Y3, Y3 - - VMOVDQA Y0, STATE_0 - VMOVDQA Y1, STATE_1 - VMOVDQA Y2, STATE_2 - VMOVDQA Y3, STATE_3 - - VMOVDQU ·rol16_AVX2<>(SB), Y4 - VMOVDQU ·rol8_AVX2<>(SB), Y5 - VMOVDQU ·two_AVX2<>(SB), Y6 - VMOVDQA Y4, Y14 - VMOVDQA Y5, Y15 - VMOVDQA Y4, C16 - VMOVDQA Y5, C8 - VMOVDQA Y6, TWO - - CMPQ CX, $64 - JBE between_0_and_64 - CMPQ CX, $192 - JBE between_64_and_192 - CMPQ CX, $320 - JBE between_192_and_320 - CMPQ CX, $448 - JBE between_320_and_448 - -at_least_512: - VMOVDQA Y0, Y4 - VMOVDQA Y1, Y5 - VMOVDQA Y2, Y6 - VPADDQ TWO, Y3, Y7 - VMOVDQA Y0, Y8 - VMOVDQA Y1, Y9 - VMOVDQA Y2, Y10 - VPADDQ TWO, Y7, Y11 - VMOVDQA Y0, Y12 - VMOVDQA Y1, Y13 - VMOVDQA Y2, Y14 - VPADDQ TWO, Y11, Y15 - - MOVQ DX, R9 - -chacha_loop_512: - VMOVDQA Y8, TMP_0 - CHACHA_QROUND_AVX(Y0, Y1, Y2, Y3, Y8, C16, C8) - CHACHA_QROUND_AVX(Y4, Y5, Y6, Y7, Y8, C16, C8) - VMOVDQA TMP_0, Y8 - VMOVDQA Y0, TMP_0 - CHACHA_QROUND_AVX(Y8, Y9, Y10, Y11, Y0, C16, C8) - CHACHA_QROUND_AVX(Y12, Y13, Y14, Y15, Y0, C16, C8) - CHACHA_SHUFFLE_AVX(Y1, Y2, Y3) - CHACHA_SHUFFLE_AVX(Y5, Y6, Y7) - CHACHA_SHUFFLE_AVX(Y9, Y10, Y11) - CHACHA_SHUFFLE_AVX(Y13, Y14, Y15) - - CHACHA_QROUND_AVX(Y12, Y13, Y14, Y15, Y0, C16, C8) - CHACHA_QROUND_AVX(Y8, Y9, Y10, Y11, Y0, C16, C8) - VMOVDQA TMP_0, Y0 - VMOVDQA Y8, TMP_0 - CHACHA_QROUND_AVX(Y4, Y5, Y6, Y7, Y8, C16, C8) - CHACHA_QROUND_AVX(Y0, Y1, Y2, Y3, Y8, C16, C8) - VMOVDQA TMP_0, Y8 - CHACHA_SHUFFLE_AVX(Y3, Y2, Y1) - CHACHA_SHUFFLE_AVX(Y7, Y6, Y5) - CHACHA_SHUFFLE_AVX(Y11, Y10, Y9) - CHACHA_SHUFFLE_AVX(Y15, Y14, Y13) - SUBQ $2, R9 - JA chacha_loop_512 - - VMOVDQA Y12, TMP_0 - VMOVDQA Y13, TMP_1 - VPADDD STATE_0, Y0, Y0 - VPADDD STATE_1, Y1, Y1 - VPADDD STATE_2, Y2, Y2 - VPADDD STATE_3, Y3, Y3 - XOR_AVX2(DI, SI, 0, Y0, Y1, Y2, Y3, Y12, Y13) - VMOVDQA STATE_0, Y0 - VMOVDQA STATE_1, Y1 - VMOVDQA STATE_2, Y2 - VMOVDQA STATE_3, Y3 - VPADDQ TWO, Y3, Y3 - - VPADDD Y0, Y4, Y4 - VPADDD Y1, Y5, Y5 - VPADDD Y2, Y6, Y6 - VPADDD Y3, Y7, Y7 - XOR_AVX2(DI, SI, 128, Y4, Y5, Y6, Y7, Y12, Y13) - VPADDQ TWO, Y3, Y3 - - VPADDD Y0, Y8, Y8 - VPADDD Y1, Y9, Y9 - VPADDD Y2, Y10, Y10 - VPADDD Y3, Y11, Y11 - XOR_AVX2(DI, SI, 256, Y8, Y9, Y10, Y11, Y12, Y13) - VPADDQ TWO, Y3, Y3 - - VPADDD TMP_0, Y0, Y12 - VPADDD TMP_1, Y1, Y13 - VPADDD Y2, Y14, Y14 - VPADDD Y3, Y15, Y15 - VPADDQ TWO, Y3, Y3 - - CMPQ CX, $512 - JB less_than_512 - - XOR_AVX2(DI, SI, 384, Y12, Y13, Y14, Y15, Y4, Y5) - VMOVDQA Y3, STATE_3 - ADDQ $512, SI - ADDQ $512, DI - SUBQ $512, CX - CMPQ CX, $448 - JA at_least_512 - - TESTQ CX, CX - JZ done - - VMOVDQA C16, Y14 - VMOVDQA C8, Y15 - - CMPQ CX, $64 - JBE between_0_and_64 - CMPQ CX, $192 - JBE between_64_and_192 - CMPQ CX, $320 - JBE between_192_and_320 - JMP between_320_and_448 - -less_than_512: - XOR_UPPER_AVX2(DI, SI, 384, Y12, Y13, Y14, Y15, Y4, Y5) - EXTRACT_LOWER(BX, Y12, Y13, Y14, Y15, Y4) - ADDQ $448, SI - ADDQ $448, DI - SUBQ $448, CX - JMP finalize - -between_320_and_448: - VMOVDQA Y0, Y4 - VMOVDQA Y1, Y5 - VMOVDQA Y2, Y6 - VPADDQ TWO, Y3, Y7 - VMOVDQA Y0, Y8 - VMOVDQA Y1, Y9 - VMOVDQA Y2, Y10 - VPADDQ TWO, Y7, Y11 - - MOVQ DX, R9 - -chacha_loop_384: - CHACHA_QROUND_AVX(Y0, Y1, Y2, Y3, Y13, Y14, Y15) - CHACHA_QROUND_AVX(Y4, Y5, Y6, Y7, Y13, Y14, Y15) - CHACHA_QROUND_AVX(Y8, Y9, Y10, Y11, Y13, Y14, Y15) - CHACHA_SHUFFLE_AVX(Y1, Y2, Y3) - CHACHA_SHUFFLE_AVX(Y5, Y6, Y7) - CHACHA_SHUFFLE_AVX(Y9, Y10, Y11) - CHACHA_QROUND_AVX(Y0, Y1, Y2, Y3, Y13, Y14, Y15) - CHACHA_QROUND_AVX(Y4, Y5, Y6, Y7, Y13, Y14, Y15) - CHACHA_QROUND_AVX(Y8, Y9, Y10, Y11, Y13, Y14, Y15) - CHACHA_SHUFFLE_AVX(Y3, Y2, Y1) - CHACHA_SHUFFLE_AVX(Y7, Y6, Y5) - CHACHA_SHUFFLE_AVX(Y11, Y10, Y9) - SUBQ $2, R9 - JA chacha_loop_384 - - VPADDD STATE_0, Y0, Y0 - VPADDD STATE_1, Y1, Y1 - VPADDD STATE_2, Y2, Y2 - VPADDD STATE_3, Y3, Y3 - XOR_AVX2(DI, SI, 0, Y0, Y1, Y2, Y3, Y12, Y13) - VMOVDQA STATE_0, Y0 - VMOVDQA STATE_1, Y1 - VMOVDQA STATE_2, Y2 - VMOVDQA STATE_3, Y3 - VPADDQ TWO, Y3, Y3 - - VPADDD Y0, Y4, Y4 - VPADDD Y1, Y5, Y5 - VPADDD Y2, Y6, Y6 - VPADDD Y3, Y7, Y7 - XOR_AVX2(DI, SI, 128, Y4, Y5, Y6, Y7, Y12, Y13) - VPADDQ TWO, Y3, Y3 - - VPADDD Y0, Y8, Y8 - VPADDD Y1, Y9, Y9 - VPADDD Y2, Y10, Y10 - VPADDD Y3, Y11, Y11 - VPADDQ TWO, Y3, Y3 - - CMPQ CX, $384 - JB less_than_384 - - XOR_AVX2(DI, SI, 256, Y8, Y9, Y10, Y11, Y12, Y13) - SUBQ $384, CX - TESTQ CX, CX - JE done - - ADDQ $384, SI - ADDQ $384, DI - JMP between_0_and_64 - -less_than_384: - XOR_UPPER_AVX2(DI, SI, 256, Y8, Y9, Y10, Y11, Y12, Y13) - EXTRACT_LOWER(BX, Y8, Y9, Y10, Y11, Y12) - ADDQ $320, SI - ADDQ $320, DI - SUBQ $320, CX - JMP finalize - -between_192_and_320: - VMOVDQA Y0, Y4 - VMOVDQA Y1, Y5 - VMOVDQA Y2, Y6 - VMOVDQA Y3, Y7 - VMOVDQA Y0, Y8 - VMOVDQA Y1, Y9 - VMOVDQA Y2, Y10 - VPADDQ TWO, Y3, Y11 - - MOVQ DX, R9 - -chacha_loop_256: - CHACHA_QROUND_AVX(Y4, Y5, Y6, Y7, Y13, Y14, Y15) - CHACHA_QROUND_AVX(Y8, Y9, Y10, Y11, Y13, Y14, Y15) - CHACHA_SHUFFLE_AVX(Y5, Y6, Y7) - CHACHA_SHUFFLE_AVX(Y9, Y10, Y11) - CHACHA_QROUND_AVX(Y4, Y5, Y6, Y7, Y13, Y14, Y15) - CHACHA_QROUND_AVX(Y8, Y9, Y10, Y11, Y13, Y14, Y15) - CHACHA_SHUFFLE_AVX(Y7, Y6, Y5) - CHACHA_SHUFFLE_AVX(Y11, Y10, Y9) - SUBQ $2, R9 - JA chacha_loop_256 - - VPADDD Y0, Y4, Y4 - VPADDD Y1, Y5, Y5 - VPADDD Y2, Y6, Y6 - VPADDD Y3, Y7, Y7 - VPADDQ TWO, Y3, Y3 - XOR_AVX2(DI, SI, 0, Y4, Y5, Y6, Y7, Y12, Y13) - VPADDD Y0, Y8, Y8 - VPADDD Y1, Y9, Y9 - VPADDD Y2, Y10, Y10 - VPADDD Y3, Y11, Y11 - VPADDQ TWO, Y3, Y3 - - CMPQ CX, $256 - JB less_than_256 - - XOR_AVX2(DI, SI, 128, Y8, Y9, Y10, Y11, Y12, Y13) - SUBQ $256, CX - TESTQ CX, CX - JE done - - ADDQ $256, SI - ADDQ $256, DI - JMP between_0_and_64 - -less_than_256: - XOR_UPPER_AVX2(DI, SI, 128, Y8, Y9, Y10, Y11, Y12, Y13) - EXTRACT_LOWER(BX, Y8, Y9, Y10, Y11, Y12) - ADDQ $192, SI - ADDQ $192, DI - SUBQ $192, CX - JMP finalize - -between_64_and_192: - VMOVDQA Y0, Y4 - VMOVDQA Y1, Y5 - VMOVDQA Y2, Y6 - VMOVDQA Y3, Y7 - - MOVQ DX, R9 - -chacha_loop_128: - CHACHA_QROUND_AVX(Y4, Y5, Y6, Y7, Y13, Y14, Y15) - CHACHA_SHUFFLE_AVX(Y5, Y6, Y7) - CHACHA_QROUND_AVX(Y4, Y5, Y6, Y7, Y13, Y14, Y15) - CHACHA_SHUFFLE_AVX(Y7, Y6, Y5) - SUBQ $2, R9 - JA chacha_loop_128 - - VPADDD Y0, Y4, Y4 - VPADDD Y1, Y5, Y5 - VPADDD Y2, Y6, Y6 - VPADDD Y3, Y7, Y7 - VPADDQ TWO, Y3, Y3 - - CMPQ CX, $128 - JB less_than_128 - - XOR_AVX2(DI, SI, 0, Y4, Y5, Y6, Y7, Y12, Y13) - SUBQ $128, CX - TESTQ CX, CX - JE done - - ADDQ $128, SI - ADDQ $128, DI - JMP between_0_and_64 - -less_than_128: - XOR_UPPER_AVX2(DI, SI, 0, Y4, Y5, Y6, Y7, Y12, Y13) - EXTRACT_LOWER(BX, Y4, Y5, Y6, Y7, Y13) - ADDQ $64, SI - ADDQ $64, DI - SUBQ $64, CX - JMP finalize - -between_0_and_64: - VMOVDQA X0, X4 - VMOVDQA X1, X5 - VMOVDQA X2, X6 - VMOVDQA X3, X7 - - MOVQ DX, R9 - -chacha_loop_64: - CHACHA_QROUND_AVX(X4, X5, X6, X7, X13, X14, X15) - CHACHA_SHUFFLE_AVX(X5, X6, X7) - CHACHA_QROUND_AVX(X4, X5, X6, X7, X13, X14, X15) - CHACHA_SHUFFLE_AVX(X7, X6, X5) - SUBQ $2, R9 - JA chacha_loop_64 - - VPADDD X0, X4, X4 - VPADDD X1, X5, X5 - VPADDD X2, X6, X6 - VPADDD X3, X7, X7 - VMOVDQU ·one<>(SB), X0 - VPADDQ X0, X3, X3 - - CMPQ CX, $64 - JB less_than_64 - - XOR_AVX(DI, SI, 0, X4, X5, X6, X7, X13) - SUBQ $64, CX - JMP done - -less_than_64: - VMOVDQU X4, 0(BX) - VMOVDQU X5, 16(BX) - VMOVDQU X6, 32(BX) - VMOVDQU X7, 48(BX) - -finalize: - XORQ R11, R11 - XORQ R12, R12 - MOVQ CX, BP - -xor_loop: - MOVB 0(SI), R11 - MOVB 0(BX), R12 - XORQ R11, R12 - MOVB R12, 0(DI) - INCQ SI - INCQ BX - INCQ DI - DECQ BP - JA xor_loop - -done: - VMOVDQU X3, 48(AX) - VZEROUPPER - MOVQ R8, SP - MOVQ CX, ret+72(FP) - RET - diff --git a/vendor/github.com/aead/chacha20/chacha/chacha_386.go b/vendor/github.com/aead/chacha20/chacha/chacha_386.go deleted file mode 100644 index 97e533d3c..000000000 --- a/vendor/github.com/aead/chacha20/chacha/chacha_386.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (c) 2016 Andreas Auernhammer. All rights reserved. -// Use of this source code is governed by a license that can be -// found in the LICENSE file. - -// +build 386,!gccgo,!appengine,!nacl - -package chacha - -import ( - "encoding/binary" - - "golang.org/x/sys/cpu" -) - -func init() { - useSSE2 = cpu.X86.HasSSE2 - useSSSE3 = cpu.X86.HasSSSE3 - useAVX = false - useAVX2 = false -} - -func initialize(state *[64]byte, key []byte, nonce *[16]byte) { - binary.LittleEndian.PutUint32(state[0:], sigma[0]) - binary.LittleEndian.PutUint32(state[4:], sigma[1]) - binary.LittleEndian.PutUint32(state[8:], sigma[2]) - binary.LittleEndian.PutUint32(state[12:], sigma[3]) - copy(state[16:], key[:]) - copy(state[48:], nonce[:]) -} - -// This function is implemented in chacha_386.s -//go:noescape -func hChaCha20SSE2(out *[32]byte, nonce *[16]byte, key *[32]byte) - -// This function is implemented in chacha_386.s -//go:noescape -func hChaCha20SSSE3(out *[32]byte, nonce *[16]byte, key *[32]byte) - -// This function is implemented in chacha_386.s -//go:noescape -func xorKeyStreamSSE2(dst, src []byte, block, state *[64]byte, rounds int) int - -func hChaCha20(out *[32]byte, nonce *[16]byte, key *[32]byte) { - switch { - case useSSSE3: - hChaCha20SSSE3(out, nonce, key) - case useSSE2: - hChaCha20SSE2(out, nonce, key) - default: - hChaCha20Generic(out, nonce, key) - } -} - -func xorKeyStream(dst, src []byte, block, state *[64]byte, rounds int) int { - if useSSE2 { - return xorKeyStreamSSE2(dst, src, block, state, rounds) - } else { - return xorKeyStreamGeneric(dst, src, block, state, rounds) - } -} diff --git a/vendor/github.com/aead/chacha20/chacha/chacha_386.s b/vendor/github.com/aead/chacha20/chacha/chacha_386.s deleted file mode 100644 index 262fc8698..000000000 --- a/vendor/github.com/aead/chacha20/chacha/chacha_386.s +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright (c) 2016 Andreas Auernhammer. All rights reserved. -// Use of this source code is governed by a license that can be -// found in the LICENSE file. - -// +build 386,!gccgo,!appengine,!nacl - -#include "const.s" -#include "macro.s" - -// FINALIZE xors len bytes from src and block using -// the temp. registers t0 and t1 and writes the result -// to dst. -#define FINALIZE(dst, src, block, len, t0, t1) \ - XORL t0, t0; \ - XORL t1, t1; \ - FINALIZE_LOOP:; \ - MOVB 0(src), t0; \ - MOVB 0(block), t1; \ - XORL t0, t1; \ - MOVB t1, 0(dst); \ - INCL src; \ - INCL block; \ - INCL dst; \ - DECL len; \ - JG FINALIZE_LOOP \ - -#define Dst DI -#define Nonce AX -#define Key BX -#define Rounds DX - -// func hChaCha20SSE2(out *[32]byte, nonce *[16]byte, key *[32]byte) -TEXT ·hChaCha20SSE2(SB), 4, $0-12 - MOVL out+0(FP), Dst - MOVL nonce+4(FP), Nonce - MOVL key+8(FP), Key - - MOVOU ·sigma<>(SB), X0 - MOVOU 0*16(Key), X1 - MOVOU 1*16(Key), X2 - MOVOU 0*16(Nonce), X3 - MOVL $20, Rounds - -chacha_loop: - CHACHA_QROUND_SSE2(X0, X1, X2, X3, X4) - CHACHA_SHUFFLE_SSE(X1, X2, X3) - CHACHA_QROUND_SSE2(X0, X1, X2, X3, X4) - CHACHA_SHUFFLE_SSE(X3, X2, X1) - SUBL $2, Rounds - JNZ chacha_loop - - MOVOU X0, 0*16(Dst) - MOVOU X3, 1*16(Dst) - RET - -// func hChaCha20SSSE3(out *[32]byte, nonce *[16]byte, key *[32]byte) -TEXT ·hChaCha20SSSE3(SB), 4, $0-12 - MOVL out+0(FP), Dst - MOVL nonce+4(FP), Nonce - MOVL key+8(FP), Key - - MOVOU ·sigma<>(SB), X0 - MOVOU 0*16(Key), X1 - MOVOU 1*16(Key), X2 - MOVOU 0*16(Nonce), X3 - MOVL $20, Rounds - - MOVOU ·rol16<>(SB), X5 - MOVOU ·rol8<>(SB), X6 - -chacha_loop: - CHACHA_QROUND_SSSE3(X0, X1, X2, X3, X4, X5, X6) - CHACHA_SHUFFLE_SSE(X1, X2, X3) - CHACHA_QROUND_SSSE3(X0, X1, X2, X3, X4, X5, X6) - CHACHA_SHUFFLE_SSE(X3, X2, X1) - SUBL $2, Rounds - JNZ chacha_loop - - MOVOU X0, 0*16(Dst) - MOVOU X3, 1*16(Dst) - RET - -#undef Dst -#undef Nonce -#undef Key -#undef Rounds - -#define State AX -#define Dst DI -#define Src SI -#define Len DX -#define Tmp0 BX -#define Tmp1 BP - -// func xorKeyStreamSSE2(dst, src []byte, block, state *[64]byte, rounds int) int -TEXT ·xorKeyStreamSSE2(SB), 4, $0-40 - MOVL dst_base+0(FP), Dst - MOVL src_base+12(FP), Src - MOVL state+28(FP), State - MOVL src_len+16(FP), Len - MOVL $0, ret+36(FP) // Number of bytes written to the keystream buffer - 0 iff len mod 64 == 0 - - MOVOU 0*16(State), X0 - MOVOU 1*16(State), X1 - MOVOU 2*16(State), X2 - MOVOU 3*16(State), X3 - TESTL Len, Len - JZ DONE - -GENERATE_KEYSTREAM: - MOVO X0, X4 - MOVO X1, X5 - MOVO X2, X6 - MOVO X3, X7 - MOVL rounds+32(FP), Tmp0 - -CHACHA_LOOP: - CHACHA_QROUND_SSE2(X4, X5, X6, X7, X0) - CHACHA_SHUFFLE_SSE(X5, X6, X7) - CHACHA_QROUND_SSE2(X4, X5, X6, X7, X0) - CHACHA_SHUFFLE_SSE(X7, X6, X5) - SUBL $2, Tmp0 - JA CHACHA_LOOP - - MOVOU 0*16(State), X0 // Restore X0 from state - PADDL X0, X4 - PADDL X1, X5 - PADDL X2, X6 - PADDL X3, X7 - MOVOU ·one<>(SB), X0 - PADDQ X0, X3 - - CMPL Len, $64 - JL BUFFER_KEYSTREAM - - XOR_SSE(Dst, Src, 0, X4, X5, X6, X7, X0) - MOVOU 0*16(State), X0 // Restore X0 from state - ADDL $64, Src - ADDL $64, Dst - SUBL $64, Len - JZ DONE - JMP GENERATE_KEYSTREAM // There is at least one more plaintext byte - -BUFFER_KEYSTREAM: - MOVL block+24(FP), State - MOVOU X4, 0(State) - MOVOU X5, 16(State) - MOVOU X6, 32(State) - MOVOU X7, 48(State) - MOVL Len, ret+36(FP) // Number of bytes written to the keystream buffer - 0 < Len < 64 - FINALIZE(Dst, Src, State, Len, Tmp0, Tmp1) - -DONE: - MOVL state+28(FP), State - MOVOU X3, 3*16(State) - RET - -#undef State -#undef Dst -#undef Src -#undef Len -#undef Tmp0 -#undef Tmp1 diff --git a/vendor/github.com/aead/chacha20/chacha/chacha_amd64.go b/vendor/github.com/aead/chacha20/chacha/chacha_amd64.go deleted file mode 100644 index 635f7de84..000000000 --- a/vendor/github.com/aead/chacha20/chacha/chacha_amd64.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (c) 2017 Andreas Auernhammer. All rights reserved. -// Use of this source code is governed by a license that can be -// found in the LICENSE file. - -// +build go1.7,amd64,!gccgo,!appengine,!nacl - -package chacha - -import "golang.org/x/sys/cpu" - -func init() { - useSSE2 = cpu.X86.HasSSE2 - useSSSE3 = cpu.X86.HasSSSE3 - useAVX = cpu.X86.HasAVX - useAVX2 = cpu.X86.HasAVX2 -} - -// This function is implemented in chacha_amd64.s -//go:noescape -func initialize(state *[64]byte, key []byte, nonce *[16]byte) - -// This function is implemented in chacha_amd64.s -//go:noescape -func hChaCha20SSE2(out *[32]byte, nonce *[16]byte, key *[32]byte) - -// This function is implemented in chacha_amd64.s -//go:noescape -func hChaCha20SSSE3(out *[32]byte, nonce *[16]byte, key *[32]byte) - -// This function is implemented in chachaAVX2_amd64.s -//go:noescape -func hChaCha20AVX(out *[32]byte, nonce *[16]byte, key *[32]byte) - -// This function is implemented in chacha_amd64.s -//go:noescape -func xorKeyStreamSSE2(dst, src []byte, block, state *[64]byte, rounds int) int - -// This function is implemented in chacha_amd64.s -//go:noescape -func xorKeyStreamSSSE3(dst, src []byte, block, state *[64]byte, rounds int) int - -// This function is implemented in chacha_amd64.s -//go:noescape -func xorKeyStreamAVX(dst, src []byte, block, state *[64]byte, rounds int) int - -// This function is implemented in chachaAVX2_amd64.s -//go:noescape -func xorKeyStreamAVX2(dst, src []byte, block, state *[64]byte, rounds int) int - -func hChaCha20(out *[32]byte, nonce *[16]byte, key *[32]byte) { - switch { - case useAVX: - hChaCha20AVX(out, nonce, key) - case useSSSE3: - hChaCha20SSSE3(out, nonce, key) - case useSSE2: - hChaCha20SSE2(out, nonce, key) - default: - hChaCha20Generic(out, nonce, key) - } -} - -func xorKeyStream(dst, src []byte, block, state *[64]byte, rounds int) int { - switch { - case useAVX2: - return xorKeyStreamAVX2(dst, src, block, state, rounds) - case useAVX: - return xorKeyStreamAVX(dst, src, block, state, rounds) - case useSSSE3: - return xorKeyStreamSSSE3(dst, src, block, state, rounds) - case useSSE2: - return xorKeyStreamSSE2(dst, src, block, state, rounds) - default: - return xorKeyStreamGeneric(dst, src, block, state, rounds) - } -} diff --git a/vendor/github.com/aead/chacha20/chacha/chacha_amd64.s b/vendor/github.com/aead/chacha20/chacha/chacha_amd64.s deleted file mode 100644 index 26a238350..000000000 --- a/vendor/github.com/aead/chacha20/chacha/chacha_amd64.s +++ /dev/null @@ -1,1072 +0,0 @@ -// Copyright (c) 2016 Andreas Auernhammer. All rights reserved. -// Use of this source code is governed by a license that can be -// found in the LICENSE file. - -// +build amd64,!gccgo,!appengine,!nacl - -#include "const.s" -#include "macro.s" - -// FINALIZE xors len bytes from src and block using -// the temp. registers t0 and t1 and writes the result -// to dst. -#define FINALIZE(dst, src, block, len, t0, t1) \ - XORQ t0, t0; \ - XORQ t1, t1; \ - FINALIZE_LOOP:; \ - MOVB 0(src), t0; \ - MOVB 0(block), t1; \ - XORQ t0, t1; \ - MOVB t1, 0(dst); \ - INCQ src; \ - INCQ block; \ - INCQ dst; \ - DECQ len; \ - JG FINALIZE_LOOP \ - -#define Dst DI -#define Nonce AX -#define Key BX -#define Rounds DX - -// func initialize(state *[64]byte, key []byte, nonce *[16]byte) -TEXT ·initialize(SB), 4, $0-40 - MOVQ state+0(FP), Dst - MOVQ key+8(FP), Key - MOVQ nonce+32(FP), Nonce - - MOVOU ·sigma<>(SB), X0 - MOVOU 0*16(Key), X1 - MOVOU 1*16(Key), X2 - MOVOU 0*16(Nonce), X3 - - MOVOU X0, 0*16(Dst) - MOVOU X1, 1*16(Dst) - MOVOU X2, 2*16(Dst) - MOVOU X3, 3*16(Dst) - RET - -// func hChaCha20AVX(out *[32]byte, nonce *[16]byte, key *[32]byte) -TEXT ·hChaCha20AVX(SB), 4, $0-24 - MOVQ out+0(FP), Dst - MOVQ nonce+8(FP), Nonce - MOVQ key+16(FP), Key - - VMOVDQU ·sigma<>(SB), X0 - VMOVDQU 0*16(Key), X1 - VMOVDQU 1*16(Key), X2 - VMOVDQU 0*16(Nonce), X3 - VMOVDQU ·rol16_AVX2<>(SB), X5 - VMOVDQU ·rol8_AVX2<>(SB), X6 - MOVQ $20, Rounds - -CHACHA_LOOP: - CHACHA_QROUND_AVX(X0, X1, X2, X3, X4, X5, X6) - CHACHA_SHUFFLE_AVX(X1, X2, X3) - CHACHA_QROUND_AVX(X0, X1, X2, X3, X4, X5, X6) - CHACHA_SHUFFLE_AVX(X3, X2, X1) - SUBQ $2, Rounds - JNZ CHACHA_LOOP - - VMOVDQU X0, 0*16(Dst) - VMOVDQU X3, 1*16(Dst) - VZEROUPPER - RET - -// func hChaCha20SSE2(out *[32]byte, nonce *[16]byte, key *[32]byte) -TEXT ·hChaCha20SSE2(SB), 4, $0-24 - MOVQ out+0(FP), Dst - MOVQ nonce+8(FP), Nonce - MOVQ key+16(FP), Key - - MOVOU ·sigma<>(SB), X0 - MOVOU 0*16(Key), X1 - MOVOU 1*16(Key), X2 - MOVOU 0*16(Nonce), X3 - MOVQ $20, Rounds - -CHACHA_LOOP: - CHACHA_QROUND_SSE2(X0, X1, X2, X3, X4) - CHACHA_SHUFFLE_SSE(X1, X2, X3) - CHACHA_QROUND_SSE2(X0, X1, X2, X3, X4) - CHACHA_SHUFFLE_SSE(X3, X2, X1) - SUBQ $2, Rounds - JNZ CHACHA_LOOP - - MOVOU X0, 0*16(Dst) - MOVOU X3, 1*16(Dst) - RET - -// func hChaCha20SSSE3(out *[32]byte, nonce *[16]byte, key *[32]byte) -TEXT ·hChaCha20SSSE3(SB), 4, $0-24 - MOVQ out+0(FP), Dst - MOVQ nonce+8(FP), Nonce - MOVQ key+16(FP), Key - - MOVOU ·sigma<>(SB), X0 - MOVOU 0*16(Key), X1 - MOVOU 1*16(Key), X2 - MOVOU 0*16(Nonce), X3 - MOVOU ·rol16<>(SB), X5 - MOVOU ·rol8<>(SB), X6 - MOVQ $20, Rounds - -chacha_loop: - CHACHA_QROUND_SSSE3(X0, X1, X2, X3, X4, X5, X6) - CHACHA_SHUFFLE_SSE(X1, X2, X3) - CHACHA_QROUND_SSSE3(X0, X1, X2, X3, X4, X5, X6) - CHACHA_SHUFFLE_SSE(X3, X2, X1) - SUBQ $2, Rounds - JNZ chacha_loop - - MOVOU X0, 0*16(Dst) - MOVOU X3, 1*16(Dst) - RET - -#undef Dst -#undef Nonce -#undef Key -#undef Rounds - -#define Dst DI -#define Src SI -#define Len R12 -#define Rounds DX -#define Buffer BX -#define State AX -#define Stack SP -#define SavedSP R8 -#define Tmp0 R9 -#define Tmp1 R10 -#define Tmp2 R11 - -// func xorKeyStreamSSE2(dst, src []byte, block, state *[64]byte, rounds int) int -TEXT ·xorKeyStreamSSE2(SB), 4, $112-80 - MOVQ dst_base+0(FP), Dst - MOVQ src_base+24(FP), Src - MOVQ block+48(FP), Buffer - MOVQ state+56(FP), State - MOVQ rounds+64(FP), Rounds - MOVQ src_len+32(FP), Len - - MOVOU 0*16(State), X0 - MOVOU 1*16(State), X1 - MOVOU 2*16(State), X2 - MOVOU 3*16(State), X3 - - MOVQ Stack, SavedSP - ADDQ $16, Stack - ANDQ $-16, Stack - - TESTQ Len, Len - JZ DONE - - MOVOU ·one<>(SB), X4 - MOVO X0, 0*16(Stack) - MOVO X1, 1*16(Stack) - MOVO X2, 2*16(Stack) - MOVO X3, 3*16(Stack) - MOVO X4, 4*16(Stack) - - CMPQ Len, $64 - JLE GENERATE_KEYSTREAM_64 - CMPQ Len, $128 - JLE GENERATE_KEYSTREAM_128 - CMPQ Len, $192 - JLE GENERATE_KEYSTREAM_192 - -GENERATE_KEYSTREAM_256: - MOVO X0, X12 - MOVO X1, X13 - MOVO X2, X14 - MOVO X3, X15 - PADDQ 4*16(Stack), X15 - MOVO X0, X8 - MOVO X1, X9 - MOVO X2, X10 - MOVO X15, X11 - PADDQ 4*16(Stack), X11 - MOVO X0, X4 - MOVO X1, X5 - MOVO X2, X6 - MOVO X11, X7 - PADDQ 4*16(Stack), X7 - MOVQ Rounds, Tmp0 - - MOVO X3, 3*16(Stack) // Save X3 - -CHACHA_LOOP_256: - MOVO X4, 5*16(Stack) - CHACHA_QROUND_SSE2(X0, X1, X2, X3, X4) - CHACHA_QROUND_SSE2(X12, X13, X14, X15, X4) - MOVO 5*16(Stack), X4 - MOVO X0, 5*16(Stack) - CHACHA_QROUND_SSE2(X8, X9, X10, X11, X0) - CHACHA_QROUND_SSE2(X4, X5, X6, X7, X0) - MOVO 5*16(Stack), X0 - CHACHA_SHUFFLE_SSE(X1, X2, X3) - CHACHA_SHUFFLE_SSE(X13, X14, X15) - CHACHA_SHUFFLE_SSE(X9, X10, X11) - CHACHA_SHUFFLE_SSE(X5, X6, X7) - MOVO X4, 5*16(Stack) - CHACHA_QROUND_SSE2(X0, X1, X2, X3, X4) - CHACHA_QROUND_SSE2(X12, X13, X14, X15, X4) - MOVO 5*16(Stack), X4 - MOVO X0, 5*16(Stack) - CHACHA_QROUND_SSE2(X8, X9, X10, X11, X0) - CHACHA_QROUND_SSE2(X4, X5, X6, X7, X0) - MOVO 5*16(Stack), X0 - CHACHA_SHUFFLE_SSE(X3, X2, X1) - CHACHA_SHUFFLE_SSE(X15, X14, X13) - CHACHA_SHUFFLE_SSE(X11, X10, X9) - CHACHA_SHUFFLE_SSE(X7, X6, X5) - SUBQ $2, Tmp0 - JNZ CHACHA_LOOP_256 - - PADDL 0*16(Stack), X0 - PADDL 1*16(Stack), X1 - PADDL 2*16(Stack), X2 - PADDL 3*16(Stack), X3 - MOVO X4, 5*16(Stack) // Save X4 - XOR_SSE(Dst, Src, 0, X0, X1, X2, X3, X4) - MOVO 5*16(Stack), X4 // Restore X4 - - MOVO 0*16(Stack), X0 - MOVO 1*16(Stack), X1 - MOVO 2*16(Stack), X2 - MOVO 3*16(Stack), X3 - PADDQ 4*16(Stack), X3 - - PADDL X0, X12 - PADDL X1, X13 - PADDL X2, X14 - PADDL X3, X15 - PADDQ 4*16(Stack), X3 - PADDL X0, X8 - PADDL X1, X9 - PADDL X2, X10 - PADDL X3, X11 - PADDQ 4*16(Stack), X3 - PADDL X0, X4 - PADDL X1, X5 - PADDL X2, X6 - PADDL X3, X7 - PADDQ 4*16(Stack), X3 - - XOR_SSE(Dst, Src, 64, X12, X13, X14, X15, X0) - XOR_SSE(Dst, Src, 128, X8, X9, X10, X11, X0) - MOVO 0*16(Stack), X0 // Restore X0 - ADDQ $192, Dst - ADDQ $192, Src - SUBQ $192, Len - - CMPQ Len, $64 - JL BUFFER_KEYSTREAM - - XOR_SSE(Dst, Src, 0, X4, X5, X6, X7, X8) - ADDQ $64, Dst - ADDQ $64, Src - SUBQ $64, Len - JZ DONE - CMPQ Len, $64 // If Len <= 64 -> gen. only 64 byte keystream. - JLE GENERATE_KEYSTREAM_64 - CMPQ Len, $128 // If 64 < Len <= 128 -> gen. only 128 byte keystream. - JLE GENERATE_KEYSTREAM_128 - CMPQ Len, $192 // If Len > 192 -> repeat, otherwise Len > 128 && Len <= 192 -> gen. 192 byte keystream - JG GENERATE_KEYSTREAM_256 - -GENERATE_KEYSTREAM_192: - MOVO X0, X12 - MOVO X1, X13 - MOVO X2, X14 - MOVO X3, X15 - MOVO X0, X8 - MOVO X1, X9 - MOVO X2, X10 - MOVO X3, X11 - PADDQ 4*16(Stack), X11 - MOVO X0, X4 - MOVO X1, X5 - MOVO X2, X6 - MOVO X11, X7 - PADDQ 4*16(Stack), X7 - MOVQ Rounds, Tmp0 - -CHACHA_LOOP_192: - CHACHA_QROUND_SSE2(X12, X13, X14, X15, X0) - CHACHA_QROUND_SSE2(X8, X9, X10, X11, X0) - CHACHA_QROUND_SSE2(X4, X5, X6, X7, X0) - CHACHA_SHUFFLE_SSE(X13, X14, X15) - CHACHA_SHUFFLE_SSE(X9, X10, X11) - CHACHA_SHUFFLE_SSE(X5, X6, X7) - CHACHA_QROUND_SSE2(X12, X13, X14, X15, X0) - CHACHA_QROUND_SSE2(X8, X9, X10, X11, X0) - CHACHA_QROUND_SSE2(X4, X5, X6, X7, X0) - CHACHA_SHUFFLE_SSE(X15, X14, X13) - CHACHA_SHUFFLE_SSE(X11, X10, X9) - CHACHA_SHUFFLE_SSE(X7, X6, X5) - SUBQ $2, Tmp0 - JNZ CHACHA_LOOP_192 - - MOVO 0*16(Stack), X0 // Restore X0 - PADDL X0, X12 - PADDL X1, X13 - PADDL X2, X14 - PADDL X3, X15 - PADDQ 4*16(Stack), X3 - PADDL X0, X8 - PADDL X1, X9 - PADDL X2, X10 - PADDL X3, X11 - PADDQ 4*16(Stack), X3 - PADDL X0, X4 - PADDL X1, X5 - PADDL X2, X6 - PADDL X3, X7 - PADDQ 4*16(Stack), X3 - - XOR_SSE(Dst, Src, 0, X12, X13, X14, X15, X0) - XOR_SSE(Dst, Src, 64, X8, X9, X10, X11, X0) - MOVO 0*16(Stack), X0 // Restore X0 - ADDQ $128, Dst - ADDQ $128, Src - SUBQ $128, Len - - CMPQ Len, $64 - JL BUFFER_KEYSTREAM - - XOR_SSE(Dst, Src, 0, X4, X5, X6, X7, X8) - ADDQ $64, Dst - ADDQ $64, Src - SUBQ $64, Len - JZ DONE - CMPQ Len, $64 // If Len <= 64 -> gen. only 64 byte keystream. - JLE GENERATE_KEYSTREAM_64 - -GENERATE_KEYSTREAM_128: - MOVO X0, X8 - MOVO X1, X9 - MOVO X2, X10 - MOVO X3, X11 - MOVO X0, X4 - MOVO X1, X5 - MOVO X2, X6 - MOVO X3, X7 - PADDQ 4*16(Stack), X7 - MOVQ Rounds, Tmp0 - -CHACHA_LOOP_128: - CHACHA_QROUND_SSE2(X8, X9, X10, X11, X12) - CHACHA_QROUND_SSE2(X4, X5, X6, X7, X12) - CHACHA_SHUFFLE_SSE(X9, X10, X11) - CHACHA_SHUFFLE_SSE(X5, X6, X7) - CHACHA_QROUND_SSE2(X8, X9, X10, X11, X12) - CHACHA_QROUND_SSE2(X4, X5, X6, X7, X12) - CHACHA_SHUFFLE_SSE(X11, X10, X9) - CHACHA_SHUFFLE_SSE(X7, X6, X5) - SUBQ $2, Tmp0 - JNZ CHACHA_LOOP_128 - - PADDL X0, X8 - PADDL X1, X9 - PADDL X2, X10 - PADDL X3, X11 - PADDQ 4*16(Stack), X3 - PADDL X0, X4 - PADDL X1, X5 - PADDL X2, X6 - PADDL X3, X7 - PADDQ 4*16(Stack), X3 - - XOR_SSE(Dst, Src, 0, X8, X9, X10, X11, X12) - ADDQ $64, Dst - ADDQ $64, Src - SUBQ $64, Len - - CMPQ Len, $64 - JL BUFFER_KEYSTREAM - - XOR_SSE(Dst, Src, 0, X4, X5, X6, X7, X8) - ADDQ $64, Dst - ADDQ $64, Src - SUBQ $64, Len - JZ DONE // If Len == 0 -> DONE, otherwise Len <= 64 -> gen 64 byte keystream - -GENERATE_KEYSTREAM_64: - MOVO X0, X4 - MOVO X1, X5 - MOVO X2, X6 - MOVO X3, X7 - MOVQ Rounds, Tmp0 - -CHACHA_LOOP_64: - CHACHA_QROUND_SSE2(X4, X5, X6, X7, X8) - CHACHA_SHUFFLE_SSE(X5, X6, X7) - CHACHA_QROUND_SSE2(X4, X5, X6, X7, X8) - CHACHA_SHUFFLE_SSE(X7, X6, X5) - SUBQ $2, Tmp0 - JNZ CHACHA_LOOP_64 - - PADDL X0, X4 - PADDL X1, X5 - PADDL X2, X6 - PADDL X3, X7 - PADDQ 4*16(Stack), X3 - - CMPQ Len, $64 - JL BUFFER_KEYSTREAM - - XOR_SSE(Dst, Src, 0, X4, X5, X6, X7, X8) - ADDQ $64, Src - ADDQ $64, Dst - SUBQ $64, Len - JMP DONE // jump directly to DONE - there is no keystream to buffer, Len == 0 always true. - -BUFFER_KEYSTREAM: - MOVOU X4, 0*16(Buffer) - MOVOU X5, 1*16(Buffer) - MOVOU X6, 2*16(Buffer) - MOVOU X7, 3*16(Buffer) - MOVQ Len, Tmp0 - FINALIZE(Dst, Src, Buffer, Tmp0, Tmp1, Tmp2) - -DONE: - MOVQ SavedSP, Stack // Restore stack pointer - MOVOU X3, 3*16(State) - MOVQ Len, ret+72(FP) - RET - -// func xorKeyStreamSSSE3(dst, src []byte, block, state *[64]byte, rounds int) int -TEXT ·xorKeyStreamSSSE3(SB), 4, $144-80 - MOVQ dst_base+0(FP), Dst - MOVQ src_base+24(FP), Src - MOVQ block+48(FP), Buffer - MOVQ state+56(FP), State - MOVQ rounds+64(FP), Rounds - MOVQ src_len+32(FP), Len - - MOVOU 0*16(State), X0 - MOVOU 1*16(State), X1 - MOVOU 2*16(State), X2 - MOVOU 3*16(State), X3 - - MOVQ Stack, SavedSP - ADDQ $16, Stack - ANDQ $-16, Stack - - TESTQ Len, Len - JZ DONE - - MOVOU ·one<>(SB), X4 - MOVOU ·rol16<>(SB), X5 - MOVOU ·rol8<>(SB), X6 - MOVO X0, 0*16(Stack) - MOVO X1, 1*16(Stack) - MOVO X2, 2*16(Stack) - MOVO X3, 3*16(Stack) - MOVO X4, 4*16(Stack) - MOVO X5, 6*16(Stack) - MOVO X6, 7*16(Stack) - - CMPQ Len, $64 - JLE GENERATE_KEYSTREAM_64 - CMPQ Len, $128 - JLE GENERATE_KEYSTREAM_128 - CMPQ Len, $192 - JLE GENERATE_KEYSTREAM_192 - -GENERATE_KEYSTREAM_256: - MOVO X0, X12 - MOVO X1, X13 - MOVO X2, X14 - MOVO X3, X15 - PADDQ 4*16(Stack), X15 - MOVO X0, X8 - MOVO X1, X9 - MOVO X2, X10 - MOVO X15, X11 - PADDQ 4*16(Stack), X11 - MOVO X0, X4 - MOVO X1, X5 - MOVO X2, X6 - MOVO X11, X7 - PADDQ 4*16(Stack), X7 - MOVQ Rounds, Tmp0 - - MOVO X3, 3*16(Stack) // Save X3 - -CHACHA_LOOP_256: - MOVO X4, 5*16(Stack) - CHACHA_QROUND_SSSE3(X0, X1, X2, X3, X4, 6*16(Stack), 7*16(Stack)) - CHACHA_QROUND_SSSE3(X12, X13, X14, X15, X4, 6*16(Stack), 7*16(Stack)) - MOVO 5*16(Stack), X4 - MOVO X0, 5*16(Stack) - CHACHA_QROUND_SSSE3(X8, X9, X10, X11, X0, 6*16(Stack), 7*16(Stack)) - CHACHA_QROUND_SSSE3(X4, X5, X6, X7, X0, 6*16(Stack), 7*16(Stack)) - MOVO 5*16(Stack), X0 - CHACHA_SHUFFLE_SSE(X1, X2, X3) - CHACHA_SHUFFLE_SSE(X13, X14, X15) - CHACHA_SHUFFLE_SSE(X9, X10, X11) - CHACHA_SHUFFLE_SSE(X5, X6, X7) - MOVO X4, 5*16(Stack) - CHACHA_QROUND_SSSE3(X0, X1, X2, X3, X4, 6*16(Stack), 7*16(Stack)) - CHACHA_QROUND_SSSE3(X12, X13, X14, X15, X4, 6*16(Stack), 7*16(Stack)) - MOVO 5*16(Stack), X4 - MOVO X0, 5*16(Stack) - CHACHA_QROUND_SSSE3(X8, X9, X10, X11, X0, 6*16(Stack), 7*16(Stack)) - CHACHA_QROUND_SSSE3(X4, X5, X6, X7, X0, 6*16(Stack), 7*16(Stack)) - MOVO 5*16(Stack), X0 - CHACHA_SHUFFLE_SSE(X3, X2, X1) - CHACHA_SHUFFLE_SSE(X15, X14, X13) - CHACHA_SHUFFLE_SSE(X11, X10, X9) - CHACHA_SHUFFLE_SSE(X7, X6, X5) - SUBQ $2, Tmp0 - JNZ CHACHA_LOOP_256 - - PADDL 0*16(Stack), X0 - PADDL 1*16(Stack), X1 - PADDL 2*16(Stack), X2 - PADDL 3*16(Stack), X3 - MOVO X4, 5*16(Stack) // Save X4 - XOR_SSE(Dst, Src, 0, X0, X1, X2, X3, X4) - MOVO 5*16(Stack), X4 // Restore X4 - - MOVO 0*16(Stack), X0 - MOVO 1*16(Stack), X1 - MOVO 2*16(Stack), X2 - MOVO 3*16(Stack), X3 - PADDQ 4*16(Stack), X3 - - PADDL X0, X12 - PADDL X1, X13 - PADDL X2, X14 - PADDL X3, X15 - PADDQ 4*16(Stack), X3 - PADDL X0, X8 - PADDL X1, X9 - PADDL X2, X10 - PADDL X3, X11 - PADDQ 4*16(Stack), X3 - PADDL X0, X4 - PADDL X1, X5 - PADDL X2, X6 - PADDL X3, X7 - PADDQ 4*16(Stack), X3 - - XOR_SSE(Dst, Src, 64, X12, X13, X14, X15, X0) - XOR_SSE(Dst, Src, 128, X8, X9, X10, X11, X0) - MOVO 0*16(Stack), X0 // Restore X0 - ADDQ $192, Dst - ADDQ $192, Src - SUBQ $192, Len - - CMPQ Len, $64 - JL BUFFER_KEYSTREAM - - XOR_SSE(Dst, Src, 0, X4, X5, X6, X7, X8) - ADDQ $64, Dst - ADDQ $64, Src - SUBQ $64, Len - JZ DONE - CMPQ Len, $64 // If Len <= 64 -> gen. only 64 byte keystream. - JLE GENERATE_KEYSTREAM_64 - CMPQ Len, $128 // If 64 < Len <= 128 -> gen. only 128 byte keystream. - JLE GENERATE_KEYSTREAM_128 - CMPQ Len, $192 // If Len > 192 -> repeat, otherwise Len > 128 && Len <= 192 -> gen. 192 byte keystream - JG GENERATE_KEYSTREAM_256 - -GENERATE_KEYSTREAM_192: - MOVO X0, X12 - MOVO X1, X13 - MOVO X2, X14 - MOVO X3, X15 - MOVO X0, X8 - MOVO X1, X9 - MOVO X2, X10 - MOVO X3, X11 - PADDQ 4*16(Stack), X11 - MOVO X0, X4 - MOVO X1, X5 - MOVO X2, X6 - MOVO X11, X7 - PADDQ 4*16(Stack), X7 - MOVQ Rounds, Tmp0 - - MOVO 6*16(Stack), X1 // Load 16 bit rotate-left constant - MOVO 7*16(Stack), X2 // Load 8 bit rotate-left constant - -CHACHA_LOOP_192: - CHACHA_QROUND_SSSE3(X12, X13, X14, X15, X0, X1, X2) - CHACHA_QROUND_SSSE3(X8, X9, X10, X11, X0, X1, X2) - CHACHA_QROUND_SSSE3(X4, X5, X6, X7, X0, X1, X2) - CHACHA_SHUFFLE_SSE(X13, X14, X15) - CHACHA_SHUFFLE_SSE(X9, X10, X11) - CHACHA_SHUFFLE_SSE(X5, X6, X7) - CHACHA_QROUND_SSSE3(X12, X13, X14, X15, X0, X1, X2) - CHACHA_QROUND_SSSE3(X8, X9, X10, X11, X0, X1, X2) - CHACHA_QROUND_SSSE3(X4, X5, X6, X7, X0, X1, X2) - CHACHA_SHUFFLE_SSE(X15, X14, X13) - CHACHA_SHUFFLE_SSE(X11, X10, X9) - CHACHA_SHUFFLE_SSE(X7, X6, X5) - SUBQ $2, Tmp0 - JNZ CHACHA_LOOP_192 - - MOVO 0*16(Stack), X0 // Restore X0 - MOVO 1*16(Stack), X1 // Restore X1 - MOVO 2*16(Stack), X2 // Restore X2 - PADDL X0, X12 - PADDL X1, X13 - PADDL X2, X14 - PADDL X3, X15 - PADDQ 4*16(Stack), X3 - PADDL X0, X8 - PADDL X1, X9 - PADDL X2, X10 - PADDL X3, X11 - PADDQ 4*16(Stack), X3 - PADDL X0, X4 - PADDL X1, X5 - PADDL X2, X6 - PADDL X3, X7 - PADDQ 4*16(Stack), X3 - - XOR_SSE(Dst, Src, 0, X12, X13, X14, X15, X0) - XOR_SSE(Dst, Src, 64, X8, X9, X10, X11, X0) - MOVO 0*16(Stack), X0 // Restore X0 - ADDQ $128, Dst - ADDQ $128, Src - SUBQ $128, Len - - CMPQ Len, $64 - JL BUFFER_KEYSTREAM - - XOR_SSE(Dst, Src, 0, X4, X5, X6, X7, X8) - ADDQ $64, Dst - ADDQ $64, Src - SUBQ $64, Len - JZ DONE - CMPQ Len, $64 // If Len <= 64 -> gen. only 64 byte keystream. - JLE GENERATE_KEYSTREAM_64 - -GENERATE_KEYSTREAM_128: - MOVO X0, X8 - MOVO X1, X9 - MOVO X2, X10 - MOVO X3, X11 - MOVO X0, X4 - MOVO X1, X5 - MOVO X2, X6 - MOVO X3, X7 - PADDQ 4*16(Stack), X7 - MOVQ Rounds, Tmp0 - - MOVO 6*16(Stack), X13 // Load 16 bit rotate-left constant - MOVO 7*16(Stack), X14 // Load 8 bit rotate-left constant - -CHACHA_LOOP_128: - CHACHA_QROUND_SSSE3(X8, X9, X10, X11, X12, X13, X14) - CHACHA_QROUND_SSSE3(X4, X5, X6, X7, X12, X13, X14) - CHACHA_SHUFFLE_SSE(X9, X10, X11) - CHACHA_SHUFFLE_SSE(X5, X6, X7) - CHACHA_QROUND_SSSE3(X8, X9, X10, X11, X12, X13, X14) - CHACHA_QROUND_SSSE3(X4, X5, X6, X7, X12, X13, X14) - CHACHA_SHUFFLE_SSE(X11, X10, X9) - CHACHA_SHUFFLE_SSE(X7, X6, X5) - SUBQ $2, Tmp0 - JNZ CHACHA_LOOP_128 - - PADDL X0, X8 - PADDL X1, X9 - PADDL X2, X10 - PADDL X3, X11 - PADDQ 4*16(Stack), X3 - PADDL X0, X4 - PADDL X1, X5 - PADDL X2, X6 - PADDL X3, X7 - PADDQ 4*16(Stack), X3 - - XOR_SSE(Dst, Src, 0, X8, X9, X10, X11, X12) - ADDQ $64, Dst - ADDQ $64, Src - SUBQ $64, Len - - CMPQ Len, $64 - JL BUFFER_KEYSTREAM - - XOR_SSE(Dst, Src, 0, X4, X5, X6, X7, X8) - ADDQ $64, Dst - ADDQ $64, Src - SUBQ $64, Len - JZ DONE // If Len == 0 -> DONE, otherwise Len <= 64 -> gen 64 byte keystream - -GENERATE_KEYSTREAM_64: - MOVO X0, X4 - MOVO X1, X5 - MOVO X2, X6 - MOVO X3, X7 - MOVQ Rounds, Tmp0 - - MOVO 6*16(Stack), X9 // Load 16 bit rotate-left constant - MOVO 7*16(Stack), X10 // Load 8 bit rotate-left constant - -CHACHA_LOOP_64: - CHACHA_QROUND_SSSE3(X4, X5, X6, X7, X8, X9, X10) - CHACHA_SHUFFLE_SSE(X5, X6, X7) - CHACHA_QROUND_SSSE3(X4, X5, X6, X7, X8, X9, X10) - CHACHA_SHUFFLE_SSE(X7, X6, X5) - SUBQ $2, Tmp0 - JNZ CHACHA_LOOP_64 - - PADDL X0, X4 - PADDL X1, X5 - PADDL X2, X6 - PADDL X3, X7 - PADDQ 4*16(Stack), X3 - - CMPQ Len, $64 - JL BUFFER_KEYSTREAM - - XOR_SSE(Dst, Src, 0, X4, X5, X6, X7, X8) - ADDQ $64, Src - ADDQ $64, Dst - SUBQ $64, Len - JMP DONE // jump directly to DONE - there is no keystream to buffer, Len == 0 always true. - -BUFFER_KEYSTREAM: - MOVOU X4, 0*16(Buffer) - MOVOU X5, 1*16(Buffer) - MOVOU X6, 2*16(Buffer) - MOVOU X7, 3*16(Buffer) - MOVQ Len, Tmp0 - FINALIZE(Dst, Src, Buffer, Tmp0, Tmp1, Tmp2) - -DONE: - MOVQ SavedSP, Stack // Restore stack pointer - MOVOU X3, 3*16(State) - MOVQ Len, ret+72(FP) - RET - -// func xorKeyStreamAVX(dst, src []byte, block, state *[64]byte, rounds int) int -TEXT ·xorKeyStreamAVX(SB), 4, $144-80 - MOVQ dst_base+0(FP), Dst - MOVQ src_base+24(FP), Src - MOVQ block+48(FP), Buffer - MOVQ state+56(FP), State - MOVQ rounds+64(FP), Rounds - MOVQ src_len+32(FP), Len - - VMOVDQU 0*16(State), X0 - VMOVDQU 1*16(State), X1 - VMOVDQU 2*16(State), X2 - VMOVDQU 3*16(State), X3 - - MOVQ Stack, SavedSP - ADDQ $16, Stack - ANDQ $-16, Stack - - TESTQ Len, Len - JZ DONE - - VMOVDQU ·one<>(SB), X4 - VMOVDQU ·rol16<>(SB), X5 - VMOVDQU ·rol8<>(SB), X6 - VMOVDQA X0, 0*16(Stack) - VMOVDQA X1, 1*16(Stack) - VMOVDQA X2, 2*16(Stack) - VMOVDQA X3, 3*16(Stack) - VMOVDQA X4, 4*16(Stack) - VMOVDQA X5, 6*16(Stack) - VMOVDQA X6, 7*16(Stack) - - CMPQ Len, $64 - JLE GENERATE_KEYSTREAM_64 - CMPQ Len, $128 - JLE GENERATE_KEYSTREAM_128 - CMPQ Len, $192 - JLE GENERATE_KEYSTREAM_192 - -GENERATE_KEYSTREAM_256: - VMOVDQA X0, X12 - VMOVDQA X1, X13 - VMOVDQA X2, X14 - VMOVDQA X3, X15 - VPADDQ 4*16(Stack), X15, X15 - VMOVDQA X0, X8 - VMOVDQA X1, X9 - VMOVDQA X2, X10 - VMOVDQA X15, X11 - VPADDQ 4*16(Stack), X11, X11 - VMOVDQA X0, X4 - VMOVDQA X1, X5 - VMOVDQA X2, X6 - VMOVDQA X11, X7 - VPADDQ 4*16(Stack), X7, X7 - MOVQ Rounds, Tmp0 - - VMOVDQA X3, 3*16(Stack) // Save X3 - -CHACHA_LOOP_256: - VMOVDQA X4, 5*16(Stack) - CHACHA_QROUND_AVX(X0, X1, X2, X3, X4, 6*16(Stack), 7*16(Stack)) - CHACHA_QROUND_AVX(X12, X13, X14, X15, X4, 6*16(Stack), 7*16(Stack)) - VMOVDQA 5*16(Stack), X4 - VMOVDQA X0, 5*16(Stack) - CHACHA_QROUND_AVX(X8, X9, X10, X11, X0, 6*16(Stack), 7*16(Stack)) - CHACHA_QROUND_AVX(X4, X5, X6, X7, X0, 6*16(Stack), 7*16(Stack)) - VMOVDQA 5*16(Stack), X0 - CHACHA_SHUFFLE_AVX(X1, X2, X3) - CHACHA_SHUFFLE_AVX(X13, X14, X15) - CHACHA_SHUFFLE_AVX(X9, X10, X11) - CHACHA_SHUFFLE_AVX(X5, X6, X7) - VMOVDQA X4, 5*16(Stack) - CHACHA_QROUND_AVX(X0, X1, X2, X3, X4, 6*16(Stack), 7*16(Stack)) - CHACHA_QROUND_AVX(X12, X13, X14, X15, X4, 6*16(Stack), 7*16(Stack)) - VMOVDQA 5*16(Stack), X4 - VMOVDQA X0, 5*16(Stack) - CHACHA_QROUND_AVX(X8, X9, X10, X11, X0, 6*16(Stack), 7*16(Stack)) - CHACHA_QROUND_AVX(X4, X5, X6, X7, X0, 6*16(Stack), 7*16(Stack)) - VMOVDQA 5*16(Stack), X0 - CHACHA_SHUFFLE_AVX(X3, X2, X1) - CHACHA_SHUFFLE_AVX(X15, X14, X13) - CHACHA_SHUFFLE_AVX(X11, X10, X9) - CHACHA_SHUFFLE_AVX(X7, X6, X5) - SUBQ $2, Tmp0 - JNZ CHACHA_LOOP_256 - - VPADDD 0*16(Stack), X0, X0 - VPADDD 1*16(Stack), X1, X1 - VPADDD 2*16(Stack), X2, X2 - VPADDD 3*16(Stack), X3, X3 - VMOVDQA X4, 5*16(Stack) // Save X4 - XOR_AVX(Dst, Src, 0, X0, X1, X2, X3, X4) - VMOVDQA 5*16(Stack), X4 // Restore X4 - - VMOVDQA 0*16(Stack), X0 - VMOVDQA 1*16(Stack), X1 - VMOVDQA 2*16(Stack), X2 - VMOVDQA 3*16(Stack), X3 - VPADDQ 4*16(Stack), X3, X3 - - VPADDD X0, X12, X12 - VPADDD X1, X13, X13 - VPADDD X2, X14, X14 - VPADDD X3, X15, X15 - VPADDQ 4*16(Stack), X3, X3 - VPADDD X0, X8, X8 - VPADDD X1, X9, X9 - VPADDD X2, X10, X10 - VPADDD X3, X11, X11 - VPADDQ 4*16(Stack), X3, X3 - VPADDD X0, X4, X4 - VPADDD X1, X5, X5 - VPADDD X2, X6, X6 - VPADDD X3, X7, X7 - VPADDQ 4*16(Stack), X3, X3 - - XOR_AVX(Dst, Src, 64, X12, X13, X14, X15, X0) - XOR_AVX(Dst, Src, 128, X8, X9, X10, X11, X0) - VMOVDQA 0*16(Stack), X0 // Restore X0 - ADDQ $192, Dst - ADDQ $192, Src - SUBQ $192, Len - - CMPQ Len, $64 - JL BUFFER_KEYSTREAM - - XOR_AVX(Dst, Src, 0, X4, X5, X6, X7, X8) - ADDQ $64, Dst - ADDQ $64, Src - SUBQ $64, Len - JZ DONE - CMPQ Len, $64 // If Len <= 64 -> gen. only 64 byte keystream. - JLE GENERATE_KEYSTREAM_64 - CMPQ Len, $128 // If 64 < Len <= 128 -> gen. only 128 byte keystream. - JLE GENERATE_KEYSTREAM_128 - CMPQ Len, $192 // If Len > 192 -> repeat, otherwise Len > 128 && Len <= 192 -> gen. 192 byte keystream - JG GENERATE_KEYSTREAM_256 - -GENERATE_KEYSTREAM_192: - VMOVDQA X0, X12 - VMOVDQA X1, X13 - VMOVDQA X2, X14 - VMOVDQA X3, X15 - VMOVDQA X0, X8 - VMOVDQA X1, X9 - VMOVDQA X2, X10 - VMOVDQA X3, X11 - VPADDQ 4*16(Stack), X11, X11 - VMOVDQA X0, X4 - VMOVDQA X1, X5 - VMOVDQA X2, X6 - VMOVDQA X11, X7 - VPADDQ 4*16(Stack), X7, X7 - MOVQ Rounds, Tmp0 - - VMOVDQA 6*16(Stack), X1 // Load 16 bit rotate-left constant - VMOVDQA 7*16(Stack), X2 // Load 8 bit rotate-left constant - -CHACHA_LOOP_192: - CHACHA_QROUND_AVX(X12, X13, X14, X15, X0, X1, X2) - CHACHA_QROUND_AVX(X8, X9, X10, X11, X0, X1, X2) - CHACHA_QROUND_AVX(X4, X5, X6, X7, X0, X1, X2) - CHACHA_SHUFFLE_AVX(X13, X14, X15) - CHACHA_SHUFFLE_AVX(X9, X10, X11) - CHACHA_SHUFFLE_AVX(X5, X6, X7) - CHACHA_QROUND_AVX(X12, X13, X14, X15, X0, X1, X2) - CHACHA_QROUND_AVX(X8, X9, X10, X11, X0, X1, X2) - CHACHA_QROUND_AVX(X4, X5, X6, X7, X0, X1, X2) - CHACHA_SHUFFLE_AVX(X15, X14, X13) - CHACHA_SHUFFLE_AVX(X11, X10, X9) - CHACHA_SHUFFLE_AVX(X7, X6, X5) - SUBQ $2, Tmp0 - JNZ CHACHA_LOOP_192 - - VMOVDQA 0*16(Stack), X0 // Restore X0 - VMOVDQA 1*16(Stack), X1 // Restore X1 - VMOVDQA 2*16(Stack), X2 // Restore X2 - VPADDD X0, X12, X12 - VPADDD X1, X13, X13 - VPADDD X2, X14, X14 - VPADDD X3, X15, X15 - VPADDQ 4*16(Stack), X3, X3 - VPADDD X0, X8, X8 - VPADDD X1, X9, X9 - VPADDD X2, X10, X10 - VPADDD X3, X11, X11 - VPADDQ 4*16(Stack), X3, X3 - VPADDD X0, X4, X4 - VPADDD X1, X5, X5 - VPADDD X2, X6, X6 - VPADDD X3, X7, X7 - VPADDQ 4*16(Stack), X3, X3 - - XOR_AVX(Dst, Src, 0, X12, X13, X14, X15, X0) - XOR_AVX(Dst, Src, 64, X8, X9, X10, X11, X0) - VMOVDQA 0*16(Stack), X0 // Restore X0 - ADDQ $128, Dst - ADDQ $128, Src - SUBQ $128, Len - - CMPQ Len, $64 - JL BUFFER_KEYSTREAM - - XOR_AVX(Dst, Src, 0, X4, X5, X6, X7, X8) - ADDQ $64, Dst - ADDQ $64, Src - SUBQ $64, Len - JZ DONE - CMPQ Len, $64 // If Len <= 64 -> gen. only 64 byte keystream. - JLE GENERATE_KEYSTREAM_64 - -GENERATE_KEYSTREAM_128: - VMOVDQA X0, X8 - VMOVDQA X1, X9 - VMOVDQA X2, X10 - VMOVDQA X3, X11 - VMOVDQA X0, X4 - VMOVDQA X1, X5 - VMOVDQA X2, X6 - VMOVDQA X3, X7 - VPADDQ 4*16(Stack), X7, X7 - MOVQ Rounds, Tmp0 - - VMOVDQA 6*16(Stack), X13 // Load 16 bit rotate-left constant - VMOVDQA 7*16(Stack), X14 // Load 8 bit rotate-left constant - -CHACHA_LOOP_128: - CHACHA_QROUND_AVX(X8, X9, X10, X11, X12, X13, X14) - CHACHA_QROUND_AVX(X4, X5, X6, X7, X12, X13, X14) - CHACHA_SHUFFLE_AVX(X9, X10, X11) - CHACHA_SHUFFLE_AVX(X5, X6, X7) - CHACHA_QROUND_AVX(X8, X9, X10, X11, X12, X13, X14) - CHACHA_QROUND_AVX(X4, X5, X6, X7, X12, X13, X14) - CHACHA_SHUFFLE_AVX(X11, X10, X9) - CHACHA_SHUFFLE_AVX(X7, X6, X5) - SUBQ $2, Tmp0 - JNZ CHACHA_LOOP_128 - - VPADDD X0, X8, X8 - VPADDD X1, X9, X9 - VPADDD X2, X10, X10 - VPADDD X3, X11, X11 - VPADDQ 4*16(Stack), X3, X3 - VPADDD X0, X4, X4 - VPADDD X1, X5, X5 - VPADDD X2, X6, X6 - VPADDD X3, X7, X7 - VPADDQ 4*16(Stack), X3, X3 - - XOR_AVX(Dst, Src, 0, X8, X9, X10, X11, X12) - ADDQ $64, Dst - ADDQ $64, Src - SUBQ $64, Len - - CMPQ Len, $64 - JL BUFFER_KEYSTREAM - - XOR_AVX(Dst, Src, 0, X4, X5, X6, X7, X8) - ADDQ $64, Dst - ADDQ $64, Src - SUBQ $64, Len - JZ DONE // If Len == 0 -> DONE, otherwise Len <= 64 -> gen 64 byte keystream - -GENERATE_KEYSTREAM_64: - VMOVDQA X0, X4 - VMOVDQA X1, X5 - VMOVDQA X2, X6 - VMOVDQA X3, X7 - MOVQ Rounds, Tmp0 - - VMOVDQA 6*16(Stack), X9 // Load 16 bit rotate-left constant - VMOVDQA 7*16(Stack), X10 // Load 8 bit rotate-left constant - -CHACHA_LOOP_64: - CHACHA_QROUND_AVX(X4, X5, X6, X7, X8, X9, X10) - CHACHA_SHUFFLE_AVX(X5, X6, X7) - CHACHA_QROUND_AVX(X4, X5, X6, X7, X8, X9, X10) - CHACHA_SHUFFLE_AVX(X7, X6, X5) - SUBQ $2, Tmp0 - JNZ CHACHA_LOOP_64 - - VPADDD X0, X4, X4 - VPADDD X1, X5, X5 - VPADDD X2, X6, X6 - VPADDD X3, X7, X7 - VPADDQ 4*16(Stack), X3, X3 - - CMPQ Len, $64 - JL BUFFER_KEYSTREAM - - XOR_AVX(Dst, Src, 0, X4, X5, X6, X7, X8) - ADDQ $64, Src - ADDQ $64, Dst - SUBQ $64, Len - JMP DONE // jump directly to DONE - there is no keystream to buffer, Len == 0 always true. - -BUFFER_KEYSTREAM: - VMOVDQU X4, 0*16(Buffer) - VMOVDQU X5, 1*16(Buffer) - VMOVDQU X6, 2*16(Buffer) - VMOVDQU X7, 3*16(Buffer) - MOVQ Len, Tmp0 - FINALIZE(Dst, Src, Buffer, Tmp0, Tmp1, Tmp2) - -DONE: - MOVQ SavedSP, Stack // Restore stack pointer - VMOVDQU X3, 3*16(State) - VZEROUPPER - MOVQ Len, ret+72(FP) - RET - -#undef Dst -#undef Src -#undef Len -#undef Rounds -#undef Buffer -#undef State -#undef Stack -#undef SavedSP -#undef Tmp0 -#undef Tmp1 -#undef Tmp2 diff --git a/vendor/github.com/aead/chacha20/chacha/chacha_generic.go b/vendor/github.com/aead/chacha20/chacha/chacha_generic.go deleted file mode 100644 index 8832d5bce..000000000 --- a/vendor/github.com/aead/chacha20/chacha/chacha_generic.go +++ /dev/null @@ -1,319 +0,0 @@ -// Copyright (c) 2016 Andreas Auernhammer. All rights reserved. -// Use of this source code is governed by a license that can be -// found in the LICENSE file. - -package chacha - -import "encoding/binary" - -var sigma = [4]uint32{0x61707865, 0x3320646e, 0x79622d32, 0x6b206574} - -func xorKeyStreamGeneric(dst, src []byte, block, state *[64]byte, rounds int) int { - for len(src) >= 64 { - chachaGeneric(block, state, rounds) - - for i, v := range block { - dst[i] = src[i] ^ v - } - src = src[64:] - dst = dst[64:] - } - - n := len(src) - if n > 0 { - chachaGeneric(block, state, rounds) - for i, v := range src { - dst[i] = v ^ block[i] - } - } - return n -} - -func chachaGeneric(dst *[64]byte, state *[64]byte, rounds int) { - v00 := binary.LittleEndian.Uint32(state[0:]) - v01 := binary.LittleEndian.Uint32(state[4:]) - v02 := binary.LittleEndian.Uint32(state[8:]) - v03 := binary.LittleEndian.Uint32(state[12:]) - v04 := binary.LittleEndian.Uint32(state[16:]) - v05 := binary.LittleEndian.Uint32(state[20:]) - v06 := binary.LittleEndian.Uint32(state[24:]) - v07 := binary.LittleEndian.Uint32(state[28:]) - v08 := binary.LittleEndian.Uint32(state[32:]) - v09 := binary.LittleEndian.Uint32(state[36:]) - v10 := binary.LittleEndian.Uint32(state[40:]) - v11 := binary.LittleEndian.Uint32(state[44:]) - v12 := binary.LittleEndian.Uint32(state[48:]) - v13 := binary.LittleEndian.Uint32(state[52:]) - v14 := binary.LittleEndian.Uint32(state[56:]) - v15 := binary.LittleEndian.Uint32(state[60:]) - - s00, s01, s02, s03, s04, s05, s06, s07 := v00, v01, v02, v03, v04, v05, v06, v07 - s08, s09, s10, s11, s12, s13, s14, s15 := v08, v09, v10, v11, v12, v13, v14, v15 - - for i := 0; i < rounds; i += 2 { - v00 += v04 - v12 ^= v00 - v12 = (v12 << 16) | (v12 >> 16) - v08 += v12 - v04 ^= v08 - v04 = (v04 << 12) | (v04 >> 20) - v00 += v04 - v12 ^= v00 - v12 = (v12 << 8) | (v12 >> 24) - v08 += v12 - v04 ^= v08 - v04 = (v04 << 7) | (v04 >> 25) - v01 += v05 - v13 ^= v01 - v13 = (v13 << 16) | (v13 >> 16) - v09 += v13 - v05 ^= v09 - v05 = (v05 << 12) | (v05 >> 20) - v01 += v05 - v13 ^= v01 - v13 = (v13 << 8) | (v13 >> 24) - v09 += v13 - v05 ^= v09 - v05 = (v05 << 7) | (v05 >> 25) - v02 += v06 - v14 ^= v02 - v14 = (v14 << 16) | (v14 >> 16) - v10 += v14 - v06 ^= v10 - v06 = (v06 << 12) | (v06 >> 20) - v02 += v06 - v14 ^= v02 - v14 = (v14 << 8) | (v14 >> 24) - v10 += v14 - v06 ^= v10 - v06 = (v06 << 7) | (v06 >> 25) - v03 += v07 - v15 ^= v03 - v15 = (v15 << 16) | (v15 >> 16) - v11 += v15 - v07 ^= v11 - v07 = (v07 << 12) | (v07 >> 20) - v03 += v07 - v15 ^= v03 - v15 = (v15 << 8) | (v15 >> 24) - v11 += v15 - v07 ^= v11 - v07 = (v07 << 7) | (v07 >> 25) - v00 += v05 - v15 ^= v00 - v15 = (v15 << 16) | (v15 >> 16) - v10 += v15 - v05 ^= v10 - v05 = (v05 << 12) | (v05 >> 20) - v00 += v05 - v15 ^= v00 - v15 = (v15 << 8) | (v15 >> 24) - v10 += v15 - v05 ^= v10 - v05 = (v05 << 7) | (v05 >> 25) - v01 += v06 - v12 ^= v01 - v12 = (v12 << 16) | (v12 >> 16) - v11 += v12 - v06 ^= v11 - v06 = (v06 << 12) | (v06 >> 20) - v01 += v06 - v12 ^= v01 - v12 = (v12 << 8) | (v12 >> 24) - v11 += v12 - v06 ^= v11 - v06 = (v06 << 7) | (v06 >> 25) - v02 += v07 - v13 ^= v02 - v13 = (v13 << 16) | (v13 >> 16) - v08 += v13 - v07 ^= v08 - v07 = (v07 << 12) | (v07 >> 20) - v02 += v07 - v13 ^= v02 - v13 = (v13 << 8) | (v13 >> 24) - v08 += v13 - v07 ^= v08 - v07 = (v07 << 7) | (v07 >> 25) - v03 += v04 - v14 ^= v03 - v14 = (v14 << 16) | (v14 >> 16) - v09 += v14 - v04 ^= v09 - v04 = (v04 << 12) | (v04 >> 20) - v03 += v04 - v14 ^= v03 - v14 = (v14 << 8) | (v14 >> 24) - v09 += v14 - v04 ^= v09 - v04 = (v04 << 7) | (v04 >> 25) - } - - v00 += s00 - v01 += s01 - v02 += s02 - v03 += s03 - v04 += s04 - v05 += s05 - v06 += s06 - v07 += s07 - v08 += s08 - v09 += s09 - v10 += s10 - v11 += s11 - v12 += s12 - v13 += s13 - v14 += s14 - v15 += s15 - - s12++ - binary.LittleEndian.PutUint32(state[48:], s12) - if s12 == 0 { // indicates overflow - s13++ - binary.LittleEndian.PutUint32(state[52:], s13) - } - - binary.LittleEndian.PutUint32(dst[0:], v00) - binary.LittleEndian.PutUint32(dst[4:], v01) - binary.LittleEndian.PutUint32(dst[8:], v02) - binary.LittleEndian.PutUint32(dst[12:], v03) - binary.LittleEndian.PutUint32(dst[16:], v04) - binary.LittleEndian.PutUint32(dst[20:], v05) - binary.LittleEndian.PutUint32(dst[24:], v06) - binary.LittleEndian.PutUint32(dst[28:], v07) - binary.LittleEndian.PutUint32(dst[32:], v08) - binary.LittleEndian.PutUint32(dst[36:], v09) - binary.LittleEndian.PutUint32(dst[40:], v10) - binary.LittleEndian.PutUint32(dst[44:], v11) - binary.LittleEndian.PutUint32(dst[48:], v12) - binary.LittleEndian.PutUint32(dst[52:], v13) - binary.LittleEndian.PutUint32(dst[56:], v14) - binary.LittleEndian.PutUint32(dst[60:], v15) -} - -func hChaCha20Generic(out *[32]byte, nonce *[16]byte, key *[32]byte) { - v00 := sigma[0] - v01 := sigma[1] - v02 := sigma[2] - v03 := sigma[3] - v04 := binary.LittleEndian.Uint32(key[0:]) - v05 := binary.LittleEndian.Uint32(key[4:]) - v06 := binary.LittleEndian.Uint32(key[8:]) - v07 := binary.LittleEndian.Uint32(key[12:]) - v08 := binary.LittleEndian.Uint32(key[16:]) - v09 := binary.LittleEndian.Uint32(key[20:]) - v10 := binary.LittleEndian.Uint32(key[24:]) - v11 := binary.LittleEndian.Uint32(key[28:]) - v12 := binary.LittleEndian.Uint32(nonce[0:]) - v13 := binary.LittleEndian.Uint32(nonce[4:]) - v14 := binary.LittleEndian.Uint32(nonce[8:]) - v15 := binary.LittleEndian.Uint32(nonce[12:]) - - for i := 0; i < 20; i += 2 { - v00 += v04 - v12 ^= v00 - v12 = (v12 << 16) | (v12 >> 16) - v08 += v12 - v04 ^= v08 - v04 = (v04 << 12) | (v04 >> 20) - v00 += v04 - v12 ^= v00 - v12 = (v12 << 8) | (v12 >> 24) - v08 += v12 - v04 ^= v08 - v04 = (v04 << 7) | (v04 >> 25) - v01 += v05 - v13 ^= v01 - v13 = (v13 << 16) | (v13 >> 16) - v09 += v13 - v05 ^= v09 - v05 = (v05 << 12) | (v05 >> 20) - v01 += v05 - v13 ^= v01 - v13 = (v13 << 8) | (v13 >> 24) - v09 += v13 - v05 ^= v09 - v05 = (v05 << 7) | (v05 >> 25) - v02 += v06 - v14 ^= v02 - v14 = (v14 << 16) | (v14 >> 16) - v10 += v14 - v06 ^= v10 - v06 = (v06 << 12) | (v06 >> 20) - v02 += v06 - v14 ^= v02 - v14 = (v14 << 8) | (v14 >> 24) - v10 += v14 - v06 ^= v10 - v06 = (v06 << 7) | (v06 >> 25) - v03 += v07 - v15 ^= v03 - v15 = (v15 << 16) | (v15 >> 16) - v11 += v15 - v07 ^= v11 - v07 = (v07 << 12) | (v07 >> 20) - v03 += v07 - v15 ^= v03 - v15 = (v15 << 8) | (v15 >> 24) - v11 += v15 - v07 ^= v11 - v07 = (v07 << 7) | (v07 >> 25) - v00 += v05 - v15 ^= v00 - v15 = (v15 << 16) | (v15 >> 16) - v10 += v15 - v05 ^= v10 - v05 = (v05 << 12) | (v05 >> 20) - v00 += v05 - v15 ^= v00 - v15 = (v15 << 8) | (v15 >> 24) - v10 += v15 - v05 ^= v10 - v05 = (v05 << 7) | (v05 >> 25) - v01 += v06 - v12 ^= v01 - v12 = (v12 << 16) | (v12 >> 16) - v11 += v12 - v06 ^= v11 - v06 = (v06 << 12) | (v06 >> 20) - v01 += v06 - v12 ^= v01 - v12 = (v12 << 8) | (v12 >> 24) - v11 += v12 - v06 ^= v11 - v06 = (v06 << 7) | (v06 >> 25) - v02 += v07 - v13 ^= v02 - v13 = (v13 << 16) | (v13 >> 16) - v08 += v13 - v07 ^= v08 - v07 = (v07 << 12) | (v07 >> 20) - v02 += v07 - v13 ^= v02 - v13 = (v13 << 8) | (v13 >> 24) - v08 += v13 - v07 ^= v08 - v07 = (v07 << 7) | (v07 >> 25) - v03 += v04 - v14 ^= v03 - v14 = (v14 << 16) | (v14 >> 16) - v09 += v14 - v04 ^= v09 - v04 = (v04 << 12) | (v04 >> 20) - v03 += v04 - v14 ^= v03 - v14 = (v14 << 8) | (v14 >> 24) - v09 += v14 - v04 ^= v09 - v04 = (v04 << 7) | (v04 >> 25) - } - - binary.LittleEndian.PutUint32(out[0:], v00) - binary.LittleEndian.PutUint32(out[4:], v01) - binary.LittleEndian.PutUint32(out[8:], v02) - binary.LittleEndian.PutUint32(out[12:], v03) - binary.LittleEndian.PutUint32(out[16:], v12) - binary.LittleEndian.PutUint32(out[20:], v13) - binary.LittleEndian.PutUint32(out[24:], v14) - binary.LittleEndian.PutUint32(out[28:], v15) -} diff --git a/vendor/github.com/aead/chacha20/chacha/chacha_ref.go b/vendor/github.com/aead/chacha20/chacha/chacha_ref.go deleted file mode 100644 index 526877c21..000000000 --- a/vendor/github.com/aead/chacha20/chacha/chacha_ref.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (c) 2016 Andreas Auernhammer. All rights reserved. -// Use of this source code is governed by a license that can be -// found in the LICENSE file. - -// +build !amd64,!386 gccgo appengine nacl - -package chacha - -import "encoding/binary" - -func init() { - useSSE2 = false - useSSSE3 = false - useAVX = false - useAVX2 = false -} - -func initialize(state *[64]byte, key []byte, nonce *[16]byte) { - binary.LittleEndian.PutUint32(state[0:], sigma[0]) - binary.LittleEndian.PutUint32(state[4:], sigma[1]) - binary.LittleEndian.PutUint32(state[8:], sigma[2]) - binary.LittleEndian.PutUint32(state[12:], sigma[3]) - copy(state[16:], key[:]) - copy(state[48:], nonce[:]) -} - -func xorKeyStream(dst, src []byte, block, state *[64]byte, rounds int) int { - return xorKeyStreamGeneric(dst, src, block, state, rounds) -} - -func hChaCha20(out *[32]byte, nonce *[16]byte, key *[32]byte) { - hChaCha20Generic(out, nonce, key) -} diff --git a/vendor/github.com/aead/chacha20/chacha/const.s b/vendor/github.com/aead/chacha20/chacha/const.s deleted file mode 100644 index c7a94a47e..000000000 --- a/vendor/github.com/aead/chacha20/chacha/const.s +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright (c) 2018 Andreas Auernhammer. All rights reserved. -// Use of this source code is governed by a license that can be -// found in the LICENSE file. - -// +build 386,!gccgo,!appengine,!nacl amd64,!gccgo,!appengine,!nacl - -#include "textflag.h" - -DATA ·sigma<>+0x00(SB)/4, $0x61707865 -DATA ·sigma<>+0x04(SB)/4, $0x3320646e -DATA ·sigma<>+0x08(SB)/4, $0x79622d32 -DATA ·sigma<>+0x0C(SB)/4, $0x6b206574 -GLOBL ·sigma<>(SB), (NOPTR+RODATA), $16 // The 4 ChaCha initialization constants - -// SSE2/SSE3/AVX constants - -DATA ·one<>+0x00(SB)/8, $1 -DATA ·one<>+0x08(SB)/8, $0 -GLOBL ·one<>(SB), (NOPTR+RODATA), $16 // The constant 1 as 128 bit value - -DATA ·rol16<>+0x00(SB)/8, $0x0504070601000302 -DATA ·rol16<>+0x08(SB)/8, $0x0D0C0F0E09080B0A -GLOBL ·rol16<>(SB), (NOPTR+RODATA), $16 // The PSHUFB 16 bit left rotate constant - -DATA ·rol8<>+0x00(SB)/8, $0x0605040702010003 -DATA ·rol8<>+0x08(SB)/8, $0x0E0D0C0F0A09080B -GLOBL ·rol8<>(SB), (NOPTR+RODATA), $16 // The PSHUFB 8 bit left rotate constant - -// AVX2 constants - -DATA ·one_AVX2<>+0x00(SB)/8, $0 -DATA ·one_AVX2<>+0x08(SB)/8, $0 -DATA ·one_AVX2<>+0x10(SB)/8, $1 -DATA ·one_AVX2<>+0x18(SB)/8, $0 -GLOBL ·one_AVX2<>(SB), (NOPTR+RODATA), $32 // The constant 1 as 256 bit value - -DATA ·two_AVX2<>+0x00(SB)/8, $2 -DATA ·two_AVX2<>+0x08(SB)/8, $0 -DATA ·two_AVX2<>+0x10(SB)/8, $2 -DATA ·two_AVX2<>+0x18(SB)/8, $0 -GLOBL ·two_AVX2<>(SB), (NOPTR+RODATA), $32 - -DATA ·rol16_AVX2<>+0x00(SB)/8, $0x0504070601000302 -DATA ·rol16_AVX2<>+0x08(SB)/8, $0x0D0C0F0E09080B0A -DATA ·rol16_AVX2<>+0x10(SB)/8, $0x0504070601000302 -DATA ·rol16_AVX2<>+0x18(SB)/8, $0x0D0C0F0E09080B0A -GLOBL ·rol16_AVX2<>(SB), (NOPTR+RODATA), $32 // The VPSHUFB 16 bit left rotate constant - -DATA ·rol8_AVX2<>+0x00(SB)/8, $0x0605040702010003 -DATA ·rol8_AVX2<>+0x08(SB)/8, $0x0E0D0C0F0A09080B -DATA ·rol8_AVX2<>+0x10(SB)/8, $0x0605040702010003 -DATA ·rol8_AVX2<>+0x18(SB)/8, $0x0E0D0C0F0A09080B -GLOBL ·rol8_AVX2<>(SB), (NOPTR+RODATA), $32 // The VPSHUFB 8 bit left rotate constant diff --git a/vendor/github.com/aead/chacha20/chacha/macro.s b/vendor/github.com/aead/chacha20/chacha/macro.s deleted file mode 100644 index 780108f8e..000000000 --- a/vendor/github.com/aead/chacha20/chacha/macro.s +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright (c) 2018 Andreas Auernhammer. All rights reserved. -// Use of this source code is governed by a license that can be -// found in the LICENSE file. - -// +build 386,!gccgo,!appengine,!nacl amd64,!gccgo,!appengine,!nacl - -// ROTL_SSE rotates all 4 32 bit values of the XMM register v -// left by n bits using SSE2 instructions (0 <= n <= 32). -// The XMM register t is used as a temp. register. -#define ROTL_SSE(n, t, v) \ - MOVO v, t; \ - PSLLL $n, t; \ - PSRLL $(32-n), v; \ - PXOR t, v - -// ROTL_AVX rotates all 4/8 32 bit values of the AVX/AVX2 register v -// left by n bits using AVX/AVX2 instructions (0 <= n <= 32). -// The AVX/AVX2 register t is used as a temp. register. -#define ROTL_AVX(n, t, v) \ - VPSLLD $n, v, t; \ - VPSRLD $(32-n), v, v; \ - VPXOR v, t, v - -// CHACHA_QROUND_SSE2 performs a ChaCha quarter-round using the -// 4 XMM registers v0, v1, v2 and v3. It uses only ROTL_SSE2 for -// rotations. The XMM register t is used as a temp. register. -#define CHACHA_QROUND_SSE2(v0, v1, v2, v3, t) \ - PADDL v1, v0; \ - PXOR v0, v3; \ - ROTL_SSE(16, t, v3); \ - PADDL v3, v2; \ - PXOR v2, v1; \ - ROTL_SSE(12, t, v1); \ - PADDL v1, v0; \ - PXOR v0, v3; \ - ROTL_SSE(8, t, v3); \ - PADDL v3, v2; \ - PXOR v2, v1; \ - ROTL_SSE(7, t, v1) - -// CHACHA_QROUND_SSSE3 performs a ChaCha quarter-round using the -// 4 XMM registers v0, v1, v2 and v3. It uses PSHUFB for 8/16 bit -// rotations. The XMM register t is used as a temp. register. -// -// r16 holds the PSHUFB constant for a 16 bit left rotate. -// r8 holds the PSHUFB constant for a 8 bit left rotate. -#define CHACHA_QROUND_SSSE3(v0, v1, v2, v3, t, r16, r8) \ - PADDL v1, v0; \ - PXOR v0, v3; \ - PSHUFB r16, v3; \ - PADDL v3, v2; \ - PXOR v2, v1; \ - ROTL_SSE(12, t, v1); \ - PADDL v1, v0; \ - PXOR v0, v3; \ - PSHUFB r8, v3; \ - PADDL v3, v2; \ - PXOR v2, v1; \ - ROTL_SSE(7, t, v1) - -// CHACHA_QROUND_AVX performs a ChaCha quarter-round using the -// 4 AVX/AVX2 registers v0, v1, v2 and v3. It uses VPSHUFB for 8/16 bit -// rotations. The AVX/AVX2 register t is used as a temp. register. -// -// r16 holds the VPSHUFB constant for a 16 bit left rotate. -// r8 holds the VPSHUFB constant for a 8 bit left rotate. -#define CHACHA_QROUND_AVX(v0, v1, v2, v3, t, r16, r8) \ - VPADDD v0, v1, v0; \ - VPXOR v3, v0, v3; \ - VPSHUFB r16, v3, v3; \ - VPADDD v2, v3, v2; \ - VPXOR v1, v2, v1; \ - ROTL_AVX(12, t, v1); \ - VPADDD v0, v1, v0; \ - VPXOR v3, v0, v3; \ - VPSHUFB r8, v3, v3; \ - VPADDD v2, v3, v2; \ - VPXOR v1, v2, v1; \ - ROTL_AVX(7, t, v1) - -// CHACHA_SHUFFLE_SSE performs a ChaCha shuffle using the -// 3 XMM registers v1, v2 and v3. The inverse shuffle is -// performed by switching v1 and v3: CHACHA_SHUFFLE_SSE(v3, v2, v1). -#define CHACHA_SHUFFLE_SSE(v1, v2, v3) \ - PSHUFL $0x39, v1, v1; \ - PSHUFL $0x4E, v2, v2; \ - PSHUFL $0x93, v3, v3 - -// CHACHA_SHUFFLE_AVX performs a ChaCha shuffle using the -// 3 AVX/AVX2 registers v1, v2 and v3. The inverse shuffle is -// performed by switching v1 and v3: CHACHA_SHUFFLE_AVX(v3, v2, v1). -#define CHACHA_SHUFFLE_AVX(v1, v2, v3) \ - VPSHUFD $0x39, v1, v1; \ - VPSHUFD $0x4E, v2, v2; \ - VPSHUFD $0x93, v3, v3 - -// XOR_SSE extracts 4x16 byte vectors from src at -// off, xors all vectors with the corresponding XMM -// register (v0 - v3) and writes the result to dst -// at off. -// The XMM register t is used as a temp. register. -#define XOR_SSE(dst, src, off, v0, v1, v2, v3, t) \ - MOVOU 0+off(src), t; \ - PXOR v0, t; \ - MOVOU t, 0+off(dst); \ - MOVOU 16+off(src), t; \ - PXOR v1, t; \ - MOVOU t, 16+off(dst); \ - MOVOU 32+off(src), t; \ - PXOR v2, t; \ - MOVOU t, 32+off(dst); \ - MOVOU 48+off(src), t; \ - PXOR v3, t; \ - MOVOU t, 48+off(dst) - -// XOR_AVX extracts 4x16 byte vectors from src at -// off, xors all vectors with the corresponding AVX -// register (v0 - v3) and writes the result to dst -// at off. -// The XMM register t is used as a temp. register. -#define XOR_AVX(dst, src, off, v0, v1, v2, v3, t) \ - VPXOR 0+off(src), v0, t; \ - VMOVDQU t, 0+off(dst); \ - VPXOR 16+off(src), v1, t; \ - VMOVDQU t, 16+off(dst); \ - VPXOR 32+off(src), v2, t; \ - VMOVDQU t, 32+off(dst); \ - VPXOR 48+off(src), v3, t; \ - VMOVDQU t, 48+off(dst) - -#define XOR_AVX2(dst, src, off, v0, v1, v2, v3, t0, t1) \ - VMOVDQU (0+off)(src), t0; \ - VPERM2I128 $32, v1, v0, t1; \ - VPXOR t0, t1, t0; \ - VMOVDQU t0, (0+off)(dst); \ - VMOVDQU (32+off)(src), t0; \ - VPERM2I128 $32, v3, v2, t1; \ - VPXOR t0, t1, t0; \ - VMOVDQU t0, (32+off)(dst); \ - VMOVDQU (64+off)(src), t0; \ - VPERM2I128 $49, v1, v0, t1; \ - VPXOR t0, t1, t0; \ - VMOVDQU t0, (64+off)(dst); \ - VMOVDQU (96+off)(src), t0; \ - VPERM2I128 $49, v3, v2, t1; \ - VPXOR t0, t1, t0; \ - VMOVDQU t0, (96+off)(dst) - -#define XOR_UPPER_AVX2(dst, src, off, v0, v1, v2, v3, t0, t1) \ - VMOVDQU (0+off)(src), t0; \ - VPERM2I128 $32, v1, v0, t1; \ - VPXOR t0, t1, t0; \ - VMOVDQU t0, (0+off)(dst); \ - VMOVDQU (32+off)(src), t0; \ - VPERM2I128 $32, v3, v2, t1; \ - VPXOR t0, t1, t0; \ - VMOVDQU t0, (32+off)(dst); \ - -#define EXTRACT_LOWER(dst, v0, v1, v2, v3, t0) \ - VPERM2I128 $49, v1, v0, t0; \ - VMOVDQU t0, 0(dst); \ - VPERM2I128 $49, v3, v2, t0; \ - VMOVDQU t0, 32(dst) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md index 186c48892..ddb162b36 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md @@ -1,3 +1,35 @@ +# v1.6.10 (2025-02-18) + +* **Bug Fix**: Bump go version to 1.22 + +# v1.6.9 (2025-02-14) + +* **Bug Fix**: Remove max limit on event stream messages + +# v1.6.8 (2025-01-24) + +* **Dependency Update**: Upgrade to smithy-go v1.22.2. + +# v1.6.7 (2024-11-18) + +* **Dependency Update**: Update to smithy-go v1.22.1. + +# v1.6.6 (2024-10-04) + +* No change notes available for this release. + +# v1.6.5 (2024-09-20) + +* No change notes available for this release. + +# v1.6.4 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. + +# v1.6.3 (2024-06-28) + +* No change notes available for this release. + # v1.6.2 (2024-03-29) * No change notes available for this release. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go index a0479b9be..01981f464 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go @@ -3,4 +3,4 @@ package eventstream // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.6.2" +const goModuleVersion = "1.6.10" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/message.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/message.go index f7427da03..1a77654f7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/message.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/message.go @@ -10,9 +10,6 @@ const preludeLen = 8 const preludeCRCLen = 4 const msgCRCLen = 4 const minMsgLen = preludeLen + preludeCRCLen + msgCRCLen -const maxPayloadLen = 1024 * 1024 * 16 // 16MB -const maxHeadersLen = 1024 * 128 // 128KB -const maxMsgLen = minMsgLen + maxHeadersLen + maxPayloadLen var crc32IEEETable = crc32.MakeTable(crc32.IEEE) @@ -82,28 +79,13 @@ func (p messagePrelude) PayloadLen() uint32 { } func (p messagePrelude) ValidateLens() error { - if p.Length == 0 || p.Length > maxMsgLen { + if p.Length == 0 { return LengthError{ Part: "message prelude", - Want: maxMsgLen, + Want: minMsgLen, Have: int(p.Length), } } - if p.HeadersLen > maxHeadersLen { - return LengthError{ - Part: "message headers", - Want: maxHeadersLen, - Have: int(p.HeadersLen), - } - } - if payloadLen := p.PayloadLen(); payloadLen > maxPayloadLen { - return LengthError{ - Part: "message payload", - Want: maxPayloadLen, - Have: int(payloadLen), - } - } - return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md index 9bcc21600..97f59c071 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md @@ -1,7 +1,3 @@ -# v1.29.9 (2025-03-04.2) - -* **Dependency Update**: Updated to the latest SDK module versions - # v1.29.8 (2025-02-27) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go index 51b0c57ce..da7e89e28 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go @@ -3,4 +3,4 @@ package config // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.29.9" +const goModuleVersion = "1.29.8" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md index 0f10e0228..c574867d0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md @@ -1,3 +1,124 @@ +# v1.3.34 (2025-02-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.33 (2025-02-18) + +* **Bug Fix**: Bump go version to 1.22 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.32 (2025-02-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.31 (2025-01-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.30 (2025-01-30) + +* **Bug Fix**: Do not sign Transfer-Encoding header in Sigv4[a]. Fixes a signer mismatch issue with S3 Accelerate. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.29 (2025-01-24) + +* **Dependency Update**: Updated to the latest SDK module versions +* **Dependency Update**: Upgrade to smithy-go v1.22.2. + +# v1.3.28 (2025-01-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.27 (2025-01-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.26 (2024-12-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.25 (2024-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.24 (2024-11-18) + +* **Dependency Update**: Update to smithy-go v1.22.1. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.23 (2024-11-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.22 (2024-10-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.21 (2024-10-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.20 (2024-10-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.19 (2024-10-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.18 (2024-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.17 (2024-09-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.16 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.15 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.14 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.13 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.12 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.11 (2024-06-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.10 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.9 (2024-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.8 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.7 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.6 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.3.5 (2024-03-29) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go index 51aa32cf7..b6c06c704 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go @@ -3,4 +3,4 @@ package v4a // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.3.5" +const goModuleVersion = "1.3.34" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/headers.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/headers.go index 3487dc335..688f83474 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/headers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4/headers.go @@ -4,9 +4,10 @@ package v4 var IgnoredHeaders = Rules{ DenyList{ MapRule{ - "Authorization": struct{}{}, - "User-Agent": struct{}{}, - "X-Amzn-Trace-Id": struct{}{}, + "Authorization": struct{}{}, + "User-Agent": struct{}{}, + "X-Amzn-Trace-Id": struct{}{}, + "Transfer-Encoding": struct{}{}, }, }, } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/smithy.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/smithy.go index 516d459d5..af4f6abcf 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/smithy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/smithy.go @@ -5,6 +5,8 @@ import ( "fmt" "time" + internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" + v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" "github.com/aws/aws-sdk-go-v2/internal/sdk" "github.com/aws/smithy-go" @@ -72,7 +74,11 @@ func (v *SignerAdapter) SignRequest(ctx context.Context, r *smithyhttp.Request, } hash := v4.GetPayloadHash(ctx) - err := v.Signer.SignHTTP(ctx, ca.Credentials, r.Request, hash, name, regions, sdk.NowTime(), func(o *SignerOptions) { + signingTime := sdk.NowTime() + if skew := internalcontext.GetAttemptSkewContext(ctx); skew != 0 { + signingTime.Add(skew) + } + err := v.Signer.SignHTTP(ctx, ca.Credentials, r.Request, hash, name, regions, signingTime, func(o *SignerOptions) { o.DisableURIPathEscaping, _ = smithyhttp.GetDisableDoubleEncoding(&props) o.Logger = v.Logger diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/CHANGELOG.md index 2c1565c70..5525b4b21 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/CHANGELOG.md @@ -1,3 +1,40 @@ +# v1.210.1 (2025-03-19) + +* **Documentation**: Doc-only updates for EC2 for March 2025. + +# v1.210.0 (2025-03-13) + +* **Feature**: This release changes the CreateLaunchTemplate, CreateLaunchTemplateVersion, ModifyLaunchTemplate CLI and SDKs such that if you do not specify a client token, a randomly generated token is used for the request to ensure idempotency. + +# v1.209.0 (2025-03-11) + +* **Feature**: This release adds the GroupLongName field to the response of the DescribeAvailabilityZones API. + +# v1.208.0 (2025-03-07) + +* **Feature**: Add serviceManaged field to DescribeAddresses API response. + +# v1.207.1 (2025-03-04.2) + +* **Bug Fix**: Add assurance test for operation order. + +# v1.207.0 (2025-03-04) + +* **Feature**: Update the DescribeVpcs response + +# v1.206.0 (2025-02-27) + +* **Feature**: Track credential providers via User-Agent Feature ids +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.205.0 (2025-02-26) + +* **Feature**: Amazon EC2 Fleet customers can now override the Block Device Mapping specified in the Launch Template when creating a new Fleet request, saving the effort of creating and associating new Launch Templates to customize the Block Device Mapping. + +# v1.204.0 (2025-02-25) + +* **Feature**: Adds support for time-based EBS-backed AMI copy operations. Time-based copy ensures that EBS-backed AMIs are copied within and across Regions in a specified timeframe. + # v1.203.1 (2025-02-18) * **Bug Fix**: Bump go version to 1.22 diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_client.go index 7b3e92207..2a7294485 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_client.go @@ -776,6 +776,37 @@ func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { return nil } +type setCredentialSourceMiddleware struct { + ua *awsmiddleware.RequestUserAgent + options Options +} + +func (m setCredentialSourceMiddleware) ID() string { return "SetCredentialSourceMiddleware" } + +func (m setCredentialSourceMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + asProviderSource, ok := m.options.Credentials.(aws.CredentialProviderSource) + if !ok { + return next.HandleBuild(ctx, in) + } + providerSources := asProviderSource.ProviderSources() + for _, source := range providerSources { + m.ua.AddCredentialsSource(source) + } + return next.HandleBuild(ctx, in) +} + +func addCredentialSource(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + mw := setCredentialSourceMiddleware{ua: ua, options: options} + return stack.Build.Insert(&mw, "UserAgent", middleware.Before) +} + func resolveTracerProvider(options *Options) { if options.TracerProvider == nil { options.TracerProvider = &tracing.NopTracerProvider{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptAddressTransfer.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptAddressTransfer.go index 860b8d40c..4652a6b0c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptAddressTransfer.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptAddressTransfer.go @@ -127,6 +127,9 @@ func (c *Client) addOperationAcceptAddressTransferMiddlewares(stack *middleware. if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAcceptAddressTransferValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptCapacityReservationBillingOwnership.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptCapacityReservationBillingOwnership.go index 7d758a948..e6f0c2a7b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptCapacityReservationBillingOwnership.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptCapacityReservationBillingOwnership.go @@ -120,6 +120,9 @@ func (c *Client) addOperationAcceptCapacityReservationBillingOwnershipMiddleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAcceptCapacityReservationBillingOwnershipValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptReservedInstancesExchangeQuote.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptReservedInstancesExchangeQuote.go index 4743ac266..8827cfe50 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptReservedInstancesExchangeQuote.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptReservedInstancesExchangeQuote.go @@ -125,6 +125,9 @@ func (c *Client) addOperationAcceptReservedInstancesExchangeQuoteMiddlewares(sta if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAcceptReservedInstancesExchangeQuoteValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptTransitGatewayMulticastDomainAssociations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptTransitGatewayMulticastDomainAssociations.go index 09812215e..ddb8e9347 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptTransitGatewayMulticastDomainAssociations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptTransitGatewayMulticastDomainAssociations.go @@ -122,6 +122,9 @@ func (c *Client) addOperationAcceptTransitGatewayMulticastDomainAssociationsMidd if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAcceptTransitGatewayMulticastDomainAssociations(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptTransitGatewayPeeringAttachment.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptTransitGatewayPeeringAttachment.go index 0be0a9e59..5ea0d0147 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptTransitGatewayPeeringAttachment.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptTransitGatewayPeeringAttachment.go @@ -119,6 +119,9 @@ func (c *Client) addOperationAcceptTransitGatewayPeeringAttachmentMiddlewares(st if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAcceptTransitGatewayPeeringAttachmentValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptTransitGatewayVpcAttachment.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptTransitGatewayVpcAttachment.go index 2ac259ade..6d8077c5a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptTransitGatewayVpcAttachment.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptTransitGatewayVpcAttachment.go @@ -121,6 +121,9 @@ func (c *Client) addOperationAcceptTransitGatewayVpcAttachmentMiddlewares(stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAcceptTransitGatewayVpcAttachmentValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptVpcEndpointConnections.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptVpcEndpointConnections.go index 24003b549..52327eb0e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptVpcEndpointConnections.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptVpcEndpointConnections.go @@ -123,6 +123,9 @@ func (c *Client) addOperationAcceptVpcEndpointConnectionsMiddlewares(stack *midd if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAcceptVpcEndpointConnectionsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptVpcPeeringConnection.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptVpcPeeringConnection.go index b653004d3..4ca7541cb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptVpcPeeringConnection.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AcceptVpcPeeringConnection.go @@ -124,6 +124,9 @@ func (c *Client) addOperationAcceptVpcPeeringConnectionMiddlewares(stack *middle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAcceptVpcPeeringConnectionValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AdvertiseByoipCidr.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AdvertiseByoipCidr.go index d26534cb1..21a166c98 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AdvertiseByoipCidr.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AdvertiseByoipCidr.go @@ -157,6 +157,9 @@ func (c *Client) addOperationAdvertiseByoipCidrMiddlewares(stack *middleware.Sta if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAdvertiseByoipCidrValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AllocateAddress.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AllocateAddress.go index a39af6a4b..2e5b63915 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AllocateAddress.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AllocateAddress.go @@ -190,6 +190,9 @@ func (c *Client) addOperationAllocateAddressMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAllocateAddress(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AllocateHosts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AllocateHosts.go index 30f67dce7..4c0e8bf55 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AllocateHosts.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AllocateHosts.go @@ -196,6 +196,9 @@ func (c *Client) addOperationAllocateHostsMiddlewares(stack *middleware.Stack, o if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAllocateHostsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AllocateIpamPoolCidr.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AllocateIpamPoolCidr.go index 3f16b4db5..933d56278 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AllocateIpamPoolCidr.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AllocateIpamPoolCidr.go @@ -175,6 +175,9 @@ func (c *Client) addOperationAllocateIpamPoolCidrMiddlewares(stack *middleware.S if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opAllocateIpamPoolCidrMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ApplySecurityGroupsToClientVpnTargetNetwork.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ApplySecurityGroupsToClientVpnTargetNetwork.go index 7f395537f..93999d67a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ApplySecurityGroupsToClientVpnTargetNetwork.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ApplySecurityGroupsToClientVpnTargetNetwork.go @@ -130,6 +130,9 @@ func (c *Client) addOperationApplySecurityGroupsToClientVpnTargetNetworkMiddlewa if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpApplySecurityGroupsToClientVpnTargetNetworkValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssignIpv6Addresses.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssignIpv6Addresses.go index 099fed0d8..6323f310e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssignIpv6Addresses.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssignIpv6Addresses.go @@ -152,6 +152,9 @@ func (c *Client) addOperationAssignIpv6AddressesMiddlewares(stack *middleware.St if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAssignIpv6AddressesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssignPrivateIpAddresses.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssignPrivateIpAddresses.go index b9c85cf61..72df0d701 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssignPrivateIpAddresses.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssignPrivateIpAddresses.go @@ -169,6 +169,9 @@ func (c *Client) addOperationAssignPrivateIpAddressesMiddlewares(stack *middlewa if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAssignPrivateIpAddressesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssignPrivateNatGatewayAddress.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssignPrivateNatGatewayAddress.go index 1c9a592e8..08d17ad10 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssignPrivateNatGatewayAddress.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssignPrivateNatGatewayAddress.go @@ -131,6 +131,9 @@ func (c *Client) addOperationAssignPrivateNatGatewayAddressMiddlewares(stack *mi if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAssignPrivateNatGatewayAddressValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateAddress.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateAddress.go index 88dd66173..cd8bcb78b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateAddress.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateAddress.go @@ -161,6 +161,9 @@ func (c *Client) addOperationAssociateAddressMiddlewares(stack *middleware.Stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssociateAddress(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateCapacityReservationBillingOwner.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateCapacityReservationBillingOwner.go index 4c33c8b50..d4145916e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateCapacityReservationBillingOwner.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateCapacityReservationBillingOwner.go @@ -126,6 +126,9 @@ func (c *Client) addOperationAssociateCapacityReservationBillingOwnerMiddlewares if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAssociateCapacityReservationBillingOwnerValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateClientVpnTargetNetwork.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateClientVpnTargetNetwork.go index 5282a4a04..3edf2cca0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateClientVpnTargetNetwork.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateClientVpnTargetNetwork.go @@ -141,6 +141,9 @@ func (c *Client) addOperationAssociateClientVpnTargetNetworkMiddlewares(stack *m if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opAssociateClientVpnTargetNetworkMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateDhcpOptions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateDhcpOptions.go index 7f8a1b47c..1b04c25a9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateDhcpOptions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateDhcpOptions.go @@ -130,6 +130,9 @@ func (c *Client) addOperationAssociateDhcpOptionsMiddlewares(stack *middleware.S if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAssociateDhcpOptionsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateEnclaveCertificateIamRole.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateEnclaveCertificateIamRole.go index ff0ae6f7e..c3e3e1ada 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateEnclaveCertificateIamRole.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateEnclaveCertificateIamRole.go @@ -149,6 +149,9 @@ func (c *Client) addOperationAssociateEnclaveCertificateIamRoleMiddlewares(stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAssociateEnclaveCertificateIamRoleValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateIamInstanceProfile.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateIamInstanceProfile.go index 51103ac78..885ccab24 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateIamInstanceProfile.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateIamInstanceProfile.go @@ -118,6 +118,9 @@ func (c *Client) addOperationAssociateIamInstanceProfileMiddlewares(stack *middl if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAssociateIamInstanceProfileValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateInstanceEventWindow.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateInstanceEventWindow.go index f6d370097..9fb87f544 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateInstanceEventWindow.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateInstanceEventWindow.go @@ -129,6 +129,9 @@ func (c *Client) addOperationAssociateInstanceEventWindowMiddlewares(stack *midd if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAssociateInstanceEventWindowValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateIpamByoasn.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateIpamByoasn.go index ec61a919d..afe7a72e1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateIpamByoasn.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateIpamByoasn.go @@ -132,6 +132,9 @@ func (c *Client) addOperationAssociateIpamByoasnMiddlewares(stack *middleware.St if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAssociateIpamByoasnValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateIpamResourceDiscovery.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateIpamResourceDiscovery.go index 373cc4089..24d328e46 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateIpamResourceDiscovery.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateIpamResourceDiscovery.go @@ -132,6 +132,9 @@ func (c *Client) addOperationAssociateIpamResourceDiscoveryMiddlewares(stack *mi if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opAssociateIpamResourceDiscoveryMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateNatGatewayAddress.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateNatGatewayAddress.go index f7cbc2f87..3c87b06be 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateNatGatewayAddress.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateNatGatewayAddress.go @@ -147,6 +147,9 @@ func (c *Client) addOperationAssociateNatGatewayAddressMiddlewares(stack *middle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAssociateNatGatewayAddressValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateRouteTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateRouteTable.go index 3df6ff498..a4c5941c9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateRouteTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateRouteTable.go @@ -137,6 +137,9 @@ func (c *Client) addOperationAssociateRouteTableMiddlewares(stack *middleware.St if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAssociateRouteTableValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateSecurityGroupVpc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateSecurityGroupVpc.go index 92d271231..7287e7a67 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateSecurityGroupVpc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateSecurityGroupVpc.go @@ -137,6 +137,9 @@ func (c *Client) addOperationAssociateSecurityGroupVpcMiddlewares(stack *middlew if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAssociateSecurityGroupVpcValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateSubnetCidrBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateSubnetCidrBlock.go index 03354097d..f14b485fa 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateSubnetCidrBlock.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateSubnetCidrBlock.go @@ -125,6 +125,9 @@ func (c *Client) addOperationAssociateSubnetCidrBlockMiddlewares(stack *middlewa if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAssociateSubnetCidrBlockValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateTransitGatewayMulticastDomain.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateTransitGatewayMulticastDomain.go index 55020f3b1..50f8329f9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateTransitGatewayMulticastDomain.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateTransitGatewayMulticastDomain.go @@ -135,6 +135,9 @@ func (c *Client) addOperationAssociateTransitGatewayMulticastDomainMiddlewares(s if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAssociateTransitGatewayMulticastDomainValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateTransitGatewayPolicyTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateTransitGatewayPolicyTable.go index 873f75a82..1629c934e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateTransitGatewayPolicyTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateTransitGatewayPolicyTable.go @@ -126,6 +126,9 @@ func (c *Client) addOperationAssociateTransitGatewayPolicyTableMiddlewares(stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAssociateTransitGatewayPolicyTableValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateTransitGatewayRouteTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateTransitGatewayRouteTable.go index 607f9a41a..e90872100 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateTransitGatewayRouteTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateTransitGatewayRouteTable.go @@ -124,6 +124,9 @@ func (c *Client) addOperationAssociateTransitGatewayRouteTableMiddlewares(stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAssociateTransitGatewayRouteTableValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateTrunkInterface.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateTrunkInterface.go index a01aea339..cd0feeebb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateTrunkInterface.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateTrunkInterface.go @@ -148,6 +148,9 @@ func (c *Client) addOperationAssociateTrunkInterfaceMiddlewares(stack *middlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opAssociateTrunkInterfaceMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateVpcCidrBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateVpcCidrBlock.go index ac21512bf..fe239aac7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateVpcCidrBlock.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AssociateVpcCidrBlock.go @@ -182,6 +182,9 @@ func (c *Client) addOperationAssociateVpcCidrBlockMiddlewares(stack *middleware. if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAssociateVpcCidrBlockValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachClassicLinkVpc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachClassicLinkVpc.go index 286cf58a5..8ca899fc0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachClassicLinkVpc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachClassicLinkVpc.go @@ -141,6 +141,9 @@ func (c *Client) addOperationAttachClassicLinkVpcMiddlewares(stack *middleware.S if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAttachClassicLinkVpcValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachInternetGateway.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachInternetGateway.go index 046481510..e6ff7ecb2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachInternetGateway.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachInternetGateway.go @@ -122,6 +122,9 @@ func (c *Client) addOperationAttachInternetGatewayMiddlewares(stack *middleware. if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAttachInternetGatewayValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachNetworkInterface.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachNetworkInterface.go index 45808aae7..f60168160 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachNetworkInterface.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachNetworkInterface.go @@ -142,6 +142,9 @@ func (c *Client) addOperationAttachNetworkInterfaceMiddlewares(stack *middleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAttachNetworkInterfaceValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachVerifiedAccessTrustProvider.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachVerifiedAccessTrustProvider.go index 1c239bfdc..9e32f0ce6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachVerifiedAccessTrustProvider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachVerifiedAccessTrustProvider.go @@ -133,6 +133,9 @@ func (c *Client) addOperationAttachVerifiedAccessTrustProviderMiddlewares(stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opAttachVerifiedAccessTrustProviderMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachVolume.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachVolume.go index 495070731..4cea55b0a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachVolume.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachVolume.go @@ -185,6 +185,9 @@ func (c *Client) addOperationAttachVolumeMiddlewares(stack *middleware.Stack, op if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAttachVolumeValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachVpnGateway.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachVpnGateway.go index 2a405e0b2..12b106367 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachVpnGateway.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AttachVpnGateway.go @@ -131,6 +131,9 @@ func (c *Client) addOperationAttachVpnGatewayMiddlewares(stack *middleware.Stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAttachVpnGatewayValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AuthorizeClientVpnIngress.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AuthorizeClientVpnIngress.go index 8dd2b799c..66d3cbf1f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AuthorizeClientVpnIngress.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AuthorizeClientVpnIngress.go @@ -146,6 +146,9 @@ func (c *Client) addOperationAuthorizeClientVpnIngressMiddlewares(stack *middlew if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opAuthorizeClientVpnIngressMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AuthorizeSecurityGroupEgress.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AuthorizeSecurityGroupEgress.go index a34d8632e..4519480d0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AuthorizeSecurityGroupEgress.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AuthorizeSecurityGroupEgress.go @@ -168,6 +168,9 @@ func (c *Client) addOperationAuthorizeSecurityGroupEgressMiddlewares(stack *midd if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAuthorizeSecurityGroupEgressValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AuthorizeSecurityGroupIngress.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AuthorizeSecurityGroupIngress.go index 1c4a9159a..a0ebe5ede 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AuthorizeSecurityGroupIngress.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_AuthorizeSecurityGroupIngress.go @@ -216,6 +216,9 @@ func (c *Client) addOperationAuthorizeSecurityGroupIngressMiddlewares(stack *mid if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAuthorizeSecurityGroupIngress(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_BundleInstance.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_BundleInstance.go index 0e1de4e83..532ab269f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_BundleInstance.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_BundleInstance.go @@ -135,6 +135,9 @@ func (c *Client) addOperationBundleInstanceMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpBundleInstanceValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelBundleTask.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelBundleTask.go index 4e39192ea..7393864c1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelBundleTask.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelBundleTask.go @@ -120,6 +120,9 @@ func (c *Client) addOperationCancelBundleTaskMiddlewares(stack *middleware.Stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCancelBundleTaskValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelCapacityReservation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelCapacityReservation.go index 88661628c..ae5819686 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelCapacityReservation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelCapacityReservation.go @@ -139,6 +139,9 @@ func (c *Client) addOperationCancelCapacityReservationMiddlewares(stack *middlew if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCancelCapacityReservationValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelCapacityReservationFleets.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelCapacityReservationFleets.go index d07157369..9e9a61dc2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelCapacityReservationFleets.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelCapacityReservationFleets.go @@ -131,6 +131,9 @@ func (c *Client) addOperationCancelCapacityReservationFleetsMiddlewares(stack *m if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCancelCapacityReservationFleetsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelConversionTask.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelConversionTask.go index 81fc8747c..663dfefff 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelConversionTask.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelConversionTask.go @@ -120,6 +120,9 @@ func (c *Client) addOperationCancelConversionTaskMiddlewares(stack *middleware.S if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCancelConversionTaskValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelDeclarativePoliciesReport.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelDeclarativePoliciesReport.go index 6913b7f31..f720038a4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelDeclarativePoliciesReport.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelDeclarativePoliciesReport.go @@ -124,6 +124,9 @@ func (c *Client) addOperationCancelDeclarativePoliciesReportMiddlewares(stack *m if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCancelDeclarativePoliciesReportValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelExportTask.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelExportTask.go index 43a5840c0..7be8de075 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelExportTask.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelExportTask.go @@ -111,6 +111,9 @@ func (c *Client) addOperationCancelExportTaskMiddlewares(stack *middleware.Stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCancelExportTaskValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelImageLaunchPermission.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelImageLaunchPermission.go index 88de16eef..6f1767050 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelImageLaunchPermission.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelImageLaunchPermission.go @@ -120,6 +120,9 @@ func (c *Client) addOperationCancelImageLaunchPermissionMiddlewares(stack *middl if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCancelImageLaunchPermissionValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelImportTask.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelImportTask.go index eb13efbed..935fa60e6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelImportTask.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelImportTask.go @@ -124,6 +124,9 @@ func (c *Client) addOperationCancelImportTaskMiddlewares(stack *middleware.Stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCancelImportTask(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelReservedInstancesListing.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelReservedInstancesListing.go index 0c16952eb..8c410d673 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelReservedInstancesListing.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelReservedInstancesListing.go @@ -119,6 +119,9 @@ func (c *Client) addOperationCancelReservedInstancesListingMiddlewares(stack *mi if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCancelReservedInstancesListingValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelSpotFleetRequests.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelSpotFleetRequests.go index 4207b62a5..c95478771 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelSpotFleetRequests.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelSpotFleetRequests.go @@ -147,6 +147,9 @@ func (c *Client) addOperationCancelSpotFleetRequestsMiddlewares(stack *middlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCancelSpotFleetRequestsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelSpotInstanceRequests.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelSpotInstanceRequests.go index d4e090a9a..4212b5803 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelSpotInstanceRequests.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CancelSpotInstanceRequests.go @@ -123,6 +123,9 @@ func (c *Client) addOperationCancelSpotInstanceRequestsMiddlewares(stack *middle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCancelSpotInstanceRequestsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ConfirmProductInstance.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ConfirmProductInstance.go index 68dbe666c..059090583 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ConfirmProductInstance.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ConfirmProductInstance.go @@ -129,6 +129,9 @@ func (c *Client) addOperationConfirmProductInstanceMiddlewares(stack *middleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpConfirmProductInstanceValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CopyFpgaImage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CopyFpgaImage.go index 4869ec535..7d227a013 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CopyFpgaImage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CopyFpgaImage.go @@ -134,6 +134,9 @@ func (c *Client) addOperationCopyFpgaImageMiddlewares(stack *middleware.Stack, o if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCopyFpgaImageValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CopyImage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CopyImage.go index 7f4227a85..18a9bcc05 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CopyImage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CopyImage.go @@ -136,6 +136,19 @@ type CopyImageInput struct { // Amazon EBS does not support asymmetric KMS keys. KmsKeyId *string + // Specify a completion duration, in 15 minute increments, to initiate a + // time-based AMI copy. The specified completion duration applies to each of the + // snapshots associated with the AMI. Each snapshot associated with the AMI will be + // completed within the specified completion duration, regardless of their size. + // + // If you do not specify a value, the AMI copy operation is completed on a + // best-effort basis. + // + // For more information, see [Time-based copies]. + // + // [Time-based copies]: https://docs.aws.amazon.com/ebs/latest/userguide/time-based-copies.html + SnapshotCopyCompletionDurationMinutes *int64 + // The tags to apply to the new AMI and new snapshots. You can tag the AMI, the // snapshots, or both. // @@ -230,6 +243,9 @@ func (c *Client) addOperationCopyImageMiddlewares(stack *middleware.Stack, optio if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCopyImageValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CopySnapshot.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CopySnapshot.go index d8f3f35b8..463203491 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CopySnapshot.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CopySnapshot.go @@ -233,6 +233,9 @@ func (c *Client) addOperationCopySnapshotMiddlewares(stack *middleware.Stack, op if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCopySnapshotValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCapacityReservation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCapacityReservation.go index 641f2b6e5..5c36aae99 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCapacityReservation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCapacityReservation.go @@ -298,6 +298,9 @@ func (c *Client) addOperationCreateCapacityReservationMiddlewares(stack *middlew if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateCapacityReservationValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCapacityReservationBySplitting.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCapacityReservationBySplitting.go index 077fb661c..8d2918f6e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCapacityReservationBySplitting.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCapacityReservationBySplitting.go @@ -143,6 +143,9 @@ func (c *Client) addOperationCreateCapacityReservationBySplittingMiddlewares(sta if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opCreateCapacityReservationBySplittingMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCapacityReservationFleet.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCapacityReservationFleet.go index 6d97ae1b9..f4a9fbacf 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCapacityReservationFleet.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCapacityReservationFleet.go @@ -215,6 +215,9 @@ func (c *Client) addOperationCreateCapacityReservationFleetMiddlewares(stack *mi if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opCreateCapacityReservationFleetMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCarrierGateway.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCarrierGateway.go index 3a19dc6c0..9b4d96802 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCarrierGateway.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCarrierGateway.go @@ -130,6 +130,9 @@ func (c *Client) addOperationCreateCarrierGatewayMiddlewares(stack *middleware.S if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opCreateCarrierGatewayMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateClientVpnEndpoint.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateClientVpnEndpoint.go index c63420b50..6679773ae 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateClientVpnEndpoint.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateClientVpnEndpoint.go @@ -232,6 +232,9 @@ func (c *Client) addOperationCreateClientVpnEndpointMiddlewares(stack *middlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opCreateClientVpnEndpointMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateClientVpnRoute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateClientVpnRoute.go index ac71e1871..39ead32b0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateClientVpnRoute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateClientVpnRoute.go @@ -152,6 +152,9 @@ func (c *Client) addOperationCreateClientVpnRouteMiddlewares(stack *middleware.S if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opCreateClientVpnRouteMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCoipCidr.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCoipCidr.go index b6f2fdcca..bd52562b7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCoipCidr.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCoipCidr.go @@ -123,6 +123,9 @@ func (c *Client) addOperationCreateCoipCidrMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateCoipCidrValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCoipPool.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCoipPool.go index 0fb53bf86..9858dd9e1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCoipPool.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCoipPool.go @@ -121,6 +121,9 @@ func (c *Client) addOperationCreateCoipPoolMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateCoipPoolValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCustomerGateway.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCustomerGateway.go index 317a57725..3a4012aa4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCustomerGateway.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateCustomerGateway.go @@ -174,6 +174,9 @@ func (c *Client) addOperationCreateCustomerGatewayMiddlewares(stack *middleware. if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateCustomerGatewayValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateDefaultSubnet.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateDefaultSubnet.go index 9f0ccc49c..5d7ef24ec 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateDefaultSubnet.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateDefaultSubnet.go @@ -127,6 +127,9 @@ func (c *Client) addOperationCreateDefaultSubnetMiddlewares(stack *middleware.St if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateDefaultSubnetValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateDefaultVpc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateDefaultVpc.go index 3a28f3cc8..37d297ff1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateDefaultVpc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateDefaultVpc.go @@ -121,6 +121,9 @@ func (c *Client) addOperationCreateDefaultVpcMiddlewares(stack *middleware.Stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateDefaultVpc(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateDhcpOptions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateDhcpOptions.go index 1528bb28a..72229495a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateDhcpOptions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateDhcpOptions.go @@ -165,6 +165,9 @@ func (c *Client) addOperationCreateDhcpOptionsMiddlewares(stack *middleware.Stac if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateDhcpOptionsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateEgressOnlyInternetGateway.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateEgressOnlyInternetGateway.go index 0fe505cd4..c2347e585 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateEgressOnlyInternetGateway.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateEgressOnlyInternetGateway.go @@ -134,6 +134,9 @@ func (c *Client) addOperationCreateEgressOnlyInternetGatewayMiddlewares(stack *m if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateEgressOnlyInternetGatewayValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateFleet.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateFleet.go index addd63f7c..3301784f6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateFleet.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateFleet.go @@ -221,6 +221,9 @@ func (c *Client) addOperationCreateFleetMiddlewares(stack *middleware.Stack, opt if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opCreateFleetMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateFlowLogs.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateFlowLogs.go index f2659cedb..64582576c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateFlowLogs.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateFlowLogs.go @@ -237,6 +237,9 @@ func (c *Client) addOperationCreateFlowLogsMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateFlowLogsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateFpgaImage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateFpgaImage.go index 7f80bead6..764d38bf4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateFpgaImage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateFpgaImage.go @@ -149,6 +149,9 @@ func (c *Client) addOperationCreateFpgaImageMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateFpgaImageValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateImage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateImage.go index f769dbd71..38fe4ff75 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateImage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateImage.go @@ -190,6 +190,9 @@ func (c *Client) addOperationCreateImageMiddlewares(stack *middleware.Stack, opt if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateImageValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateInstanceConnectEndpoint.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateInstanceConnectEndpoint.go index 6dd7a06c2..477a38ac3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateInstanceConnectEndpoint.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateInstanceConnectEndpoint.go @@ -150,6 +150,9 @@ func (c *Client) addOperationCreateInstanceConnectEndpointMiddlewares(stack *mid if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opCreateInstanceConnectEndpointMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateInstanceEventWindow.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateInstanceEventWindow.go index e0722deb4..de9609807 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateInstanceEventWindow.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateInstanceEventWindow.go @@ -171,6 +171,9 @@ func (c *Client) addOperationCreateInstanceEventWindowMiddlewares(stack *middlew if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateInstanceEventWindow(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateInstanceExportTask.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateInstanceExportTask.go index f9c7df910..b3387ca02 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateInstanceExportTask.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateInstanceExportTask.go @@ -135,6 +135,9 @@ func (c *Client) addOperationCreateInstanceExportTaskMiddlewares(stack *middlewa if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateInstanceExportTaskValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateInternetGateway.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateInternetGateway.go index fff387b51..38aa1c36c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateInternetGateway.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateInternetGateway.go @@ -121,6 +121,9 @@ func (c *Client) addOperationCreateInternetGatewayMiddlewares(stack *middleware. if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateInternetGateway(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateIpam.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateIpam.go index 5aff61f48..97553d116 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateIpam.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateIpam.go @@ -157,6 +157,9 @@ func (c *Client) addOperationCreateIpamMiddlewares(stack *middleware.Stack, opti if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opCreateIpamMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateIpamExternalResourceVerificationToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateIpamExternalResourceVerificationToken.go index 7e52c01ce..69ccb5fd8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateIpamExternalResourceVerificationToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateIpamExternalResourceVerificationToken.go @@ -131,6 +131,9 @@ func (c *Client) addOperationCreateIpamExternalResourceVerificationTokenMiddlewa if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opCreateIpamExternalResourceVerificationTokenMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateIpamPool.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateIpamPool.go index 38124441c..d03c7cb4f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateIpamPool.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateIpamPool.go @@ -226,6 +226,9 @@ func (c *Client) addOperationCreateIpamPoolMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opCreateIpamPoolMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateIpamResourceDiscovery.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateIpamResourceDiscovery.go index 10e91afe3..dea05d7b6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateIpamResourceDiscovery.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateIpamResourceDiscovery.go @@ -130,6 +130,9 @@ func (c *Client) addOperationCreateIpamResourceDiscoveryMiddlewares(stack *middl if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opCreateIpamResourceDiscoveryMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateIpamScope.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateIpamScope.go index 3974eac2e..d8e63b6eb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateIpamScope.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateIpamScope.go @@ -142,6 +142,9 @@ func (c *Client) addOperationCreateIpamScopeMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opCreateIpamScopeMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateKeyPair.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateKeyPair.go index f6450c3ed..15adb7061 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateKeyPair.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateKeyPair.go @@ -166,6 +166,9 @@ func (c *Client) addOperationCreateKeyPairMiddlewares(stack *middleware.Stack, o if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateKeyPairValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLaunchTemplate.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLaunchTemplate.go index 08646ccf7..f8b630f75 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLaunchTemplate.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLaunchTemplate.go @@ -15,15 +15,15 @@ import ( // // A launch template contains the parameters to launch an instance. When you // launch an instance using RunInstances, you can specify a launch template instead of -// providing the launch parameters in the request. For more information, see [Launch an instance from a launch template]in +// providing the launch parameters in the request. For more information, see [Store instance launch parameters in Amazon EC2 launch templates]in // the Amazon EC2 User Guide. // // To clone an existing launch template as the basis for a new launch template, // use the Amazon EC2 console. The API, SDKs, and CLI do not support cloning a // template. For more information, see [Create a launch template from an existing launch template]in the Amazon EC2 User Guide. // -// [Create a launch template from an existing launch template]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html#create-launch-template-from-existing-launch-template -// [Launch an instance from a launch template]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html +// [Create a launch template from an existing launch template]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/create-launch-template.html#create-launch-template-from-existing-launch-template +// [Store instance launch parameters in Amazon EC2 launch templates]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html func (c *Client) CreateLaunchTemplate(ctx context.Context, params *CreateLaunchTemplateInput, optFns ...func(*Options)) (*CreateLaunchTemplateOutput, error) { if params == nil { params = &CreateLaunchTemplateInput{} @@ -52,7 +52,10 @@ type CreateLaunchTemplateInput struct { LaunchTemplateName *string // Unique, case-sensitive identifier you provide to ensure the idempotency of the - // request. For more information, see [Ensuring idempotency]. + // request. If a client token isn't specified, a randomly generated token is used + // in the request to ensure idempotency. + // + // For more information, see [Ensuring idempotency]. // // Constraint: Maximum 128 ASCII characters. // @@ -163,6 +166,12 @@ func (c *Client) addOperationCreateLaunchTemplateMiddlewares(stack *middleware.S if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addIdempotencyToken_opCreateLaunchTemplateMiddleware(stack, options); err != nil { + return err + } if err = addOpCreateLaunchTemplateValidationMiddleware(stack); err != nil { return err } @@ -199,6 +208,39 @@ func (c *Client) addOperationCreateLaunchTemplateMiddlewares(stack *middleware.S return nil } +type idempotencyToken_initializeOpCreateLaunchTemplate struct { + tokenProvider IdempotencyTokenProvider +} + +func (*idempotencyToken_initializeOpCreateLaunchTemplate) ID() string { + return "OperationIdempotencyTokenAutoFill" +} + +func (m *idempotencyToken_initializeOpCreateLaunchTemplate) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + if m.tokenProvider == nil { + return next.HandleInitialize(ctx, in) + } + + input, ok := in.Parameters.(*CreateLaunchTemplateInput) + if !ok { + return out, metadata, fmt.Errorf("expected middleware input to be of type *CreateLaunchTemplateInput ") + } + + if input.ClientToken == nil { + t, err := m.tokenProvider.GetIdempotencyToken() + if err != nil { + return out, metadata, err + } + input.ClientToken = &t + } + return next.HandleInitialize(ctx, in) +} +func addIdempotencyToken_opCreateLaunchTemplateMiddleware(stack *middleware.Stack, cfg Options) error { + return stack.Initialize.Add(&idempotencyToken_initializeOpCreateLaunchTemplate{tokenProvider: cfg.IdempotencyTokenProvider}, middleware.Before) +} + func newServiceMetadataMiddleware_opCreateLaunchTemplate(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLaunchTemplateVersion.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLaunchTemplateVersion.go index 666bd5c10..6fc79a0ac 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLaunchTemplateVersion.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLaunchTemplateVersion.go @@ -25,7 +25,7 @@ import ( // // For more information, see [Modify a launch template (manage launch template versions)] in the Amazon EC2 User Guide. // -// [Modify a launch template (manage launch template versions)]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html#manage-launch-template-versions +// [Modify a launch template (manage launch template versions)]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/manage-launch-template-versions.html func (c *Client) CreateLaunchTemplateVersion(ctx context.Context, params *CreateLaunchTemplateVersionInput, optFns ...func(*Options)) (*CreateLaunchTemplateVersionOutput, error) { if params == nil { params = &CreateLaunchTemplateVersionInput{} @@ -49,7 +49,10 @@ type CreateLaunchTemplateVersionInput struct { LaunchTemplateData *types.RequestLaunchTemplateData // Unique, case-sensitive identifier you provide to ensure the idempotency of the - // request. For more information, see [Ensuring idempotency]. + // request. If a client token isn't specified, a randomly generated token is used + // in the request to ensure idempotency. + // + // For more information, see [Ensuring idempotency]. // // Constraint: Maximum 128 ASCII characters. // @@ -80,7 +83,7 @@ type CreateLaunchTemplateVersionInput struct { // // Default: false // - // [Use a Systems Manager parameter instead of an AMI ID]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html#use-an-ssm-parameter-instead-of-an-ami-id + // [Use a Systems Manager parameter instead of an AMI ID]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/create-launch-template.html#use-an-ssm-parameter-instead-of-an-ami-id ResolveAlias *bool // The version of the launch template on which to base the new version. Snapshots @@ -182,6 +185,12 @@ func (c *Client) addOperationCreateLaunchTemplateVersionMiddlewares(stack *middl if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addIdempotencyToken_opCreateLaunchTemplateVersionMiddleware(stack, options); err != nil { + return err + } if err = addOpCreateLaunchTemplateVersionValidationMiddleware(stack); err != nil { return err } @@ -218,6 +227,39 @@ func (c *Client) addOperationCreateLaunchTemplateVersionMiddlewares(stack *middl return nil } +type idempotencyToken_initializeOpCreateLaunchTemplateVersion struct { + tokenProvider IdempotencyTokenProvider +} + +func (*idempotencyToken_initializeOpCreateLaunchTemplateVersion) ID() string { + return "OperationIdempotencyTokenAutoFill" +} + +func (m *idempotencyToken_initializeOpCreateLaunchTemplateVersion) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + if m.tokenProvider == nil { + return next.HandleInitialize(ctx, in) + } + + input, ok := in.Parameters.(*CreateLaunchTemplateVersionInput) + if !ok { + return out, metadata, fmt.Errorf("expected middleware input to be of type *CreateLaunchTemplateVersionInput ") + } + + if input.ClientToken == nil { + t, err := m.tokenProvider.GetIdempotencyToken() + if err != nil { + return out, metadata, err + } + input.ClientToken = &t + } + return next.HandleInitialize(ctx, in) +} +func addIdempotencyToken_opCreateLaunchTemplateVersionMiddleware(stack *middleware.Stack, cfg Options) error { + return stack.Initialize.Add(&idempotencyToken_initializeOpCreateLaunchTemplateVersion{tokenProvider: cfg.IdempotencyTokenProvider}, middleware.Before) +} + func newServiceMetadataMiddleware_opCreateLaunchTemplateVersion(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLocalGatewayRoute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLocalGatewayRoute.go index 607c84ffc..d953f96c3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLocalGatewayRoute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLocalGatewayRoute.go @@ -138,6 +138,9 @@ func (c *Client) addOperationCreateLocalGatewayRouteMiddlewares(stack *middlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateLocalGatewayRouteValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLocalGatewayRouteTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLocalGatewayRouteTable.go index de563acd7..870ed9970 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLocalGatewayRouteTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLocalGatewayRouteTable.go @@ -124,6 +124,9 @@ func (c *Client) addOperationCreateLocalGatewayRouteTableMiddlewares(stack *midd if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateLocalGatewayRouteTableValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLocalGatewayRouteTableVirtualInterfaceGroupAssociation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLocalGatewayRouteTableVirtualInterfaceGroupAssociation.go index e2974460e..82be8858a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLocalGatewayRouteTableVirtualInterfaceGroupAssociation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLocalGatewayRouteTableVirtualInterfaceGroupAssociation.go @@ -128,6 +128,9 @@ func (c *Client) addOperationCreateLocalGatewayRouteTableVirtualInterfaceGroupAs if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateLocalGatewayRouteTableVirtualInterfaceGroupAssociationValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLocalGatewayRouteTableVpcAssociation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLocalGatewayRouteTableVpcAssociation.go index 167ae2df6..31a8a3f9e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLocalGatewayRouteTableVpcAssociation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateLocalGatewayRouteTableVpcAssociation.go @@ -126,6 +126,9 @@ func (c *Client) addOperationCreateLocalGatewayRouteTableVpcAssociationMiddlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateLocalGatewayRouteTableVpcAssociationValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateManagedPrefixList.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateManagedPrefixList.go index 68159e0c5..5c44553f4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateManagedPrefixList.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateManagedPrefixList.go @@ -148,6 +148,9 @@ func (c *Client) addOperationCreateManagedPrefixListMiddlewares(stack *middlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opCreateManagedPrefixListMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNatGateway.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNatGateway.go index 1724226d1..7d566dbcb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNatGateway.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNatGateway.go @@ -193,6 +193,9 @@ func (c *Client) addOperationCreateNatGatewayMiddlewares(stack *middleware.Stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opCreateNatGatewayMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkAcl.go index c8dfe28c9..ed5637d43 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkAcl.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkAcl.go @@ -136,6 +136,9 @@ func (c *Client) addOperationCreateNetworkAclMiddlewares(stack *middleware.Stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opCreateNetworkAclMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkAclEntry.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkAclEntry.go index 116d61c21..314441d87 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkAclEntry.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkAclEntry.go @@ -179,6 +179,9 @@ func (c *Client) addOperationCreateNetworkAclEntryMiddlewares(stack *middleware. if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateNetworkAclEntryValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkInsightsAccessScope.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkInsightsAccessScope.go index c91efe971..4e00ec7f0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkInsightsAccessScope.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkInsightsAccessScope.go @@ -140,6 +140,9 @@ func (c *Client) addOperationCreateNetworkInsightsAccessScopeMiddlewares(stack * if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opCreateNetworkInsightsAccessScopeMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkInsightsPath.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkInsightsPath.go index 382d0de3f..173e8f38f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkInsightsPath.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkInsightsPath.go @@ -164,6 +164,9 @@ func (c *Client) addOperationCreateNetworkInsightsPathMiddlewares(stack *middlew if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opCreateNetworkInsightsPathMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkInterface.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkInterface.go index fb0940bcf..001085d14 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkInterface.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkInterface.go @@ -246,6 +246,9 @@ func (c *Client) addOperationCreateNetworkInterfaceMiddlewares(stack *middleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opCreateNetworkInterfaceMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkInterfacePermission.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkInterfacePermission.go index 8814e97c0..75b8ea79a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkInterfacePermission.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateNetworkInterfacePermission.go @@ -135,6 +135,9 @@ func (c *Client) addOperationCreateNetworkInterfacePermissionMiddlewares(stack * if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateNetworkInterfacePermissionValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreatePlacementGroup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreatePlacementGroup.go index fad99996d..d8ad7048a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreatePlacementGroup.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreatePlacementGroup.go @@ -147,6 +147,9 @@ func (c *Client) addOperationCreatePlacementGroupMiddlewares(stack *middleware.S if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreatePlacementGroup(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreatePublicIpv4Pool.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreatePublicIpv4Pool.go index 6dd7f5ee0..b1dcd7534 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreatePublicIpv4Pool.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreatePublicIpv4Pool.go @@ -132,6 +132,9 @@ func (c *Client) addOperationCreatePublicIpv4PoolMiddlewares(stack *middleware.S if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreatePublicIpv4Pool(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateReplaceRootVolumeTask.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateReplaceRootVolumeTask.go index 5341adaf6..738018055 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateReplaceRootVolumeTask.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateReplaceRootVolumeTask.go @@ -159,6 +159,9 @@ func (c *Client) addOperationCreateReplaceRootVolumeTaskMiddlewares(stack *middl if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opCreateReplaceRootVolumeTaskMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateReservedInstancesListing.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateReservedInstancesListing.go index eef8d4018..c7f958109 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateReservedInstancesListing.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateReservedInstancesListing.go @@ -159,6 +159,9 @@ func (c *Client) addOperationCreateReservedInstancesListingMiddlewares(stack *mi if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateReservedInstancesListingValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateRestoreImageTask.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateRestoreImageTask.go index f9a697dd9..3bb8fa328 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateRestoreImageTask.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateRestoreImageTask.go @@ -147,6 +147,9 @@ func (c *Client) addOperationCreateRestoreImageTaskMiddlewares(stack *middleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateRestoreImageTaskValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateRoute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateRoute.go index b472462f1..419037bf2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateRoute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateRoute.go @@ -186,6 +186,9 @@ func (c *Client) addOperationCreateRouteMiddlewares(stack *middleware.Stack, opt if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateRouteValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateRouteTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateRouteTable.go index 294e5702e..5fa58ddda 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateRouteTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateRouteTable.go @@ -136,6 +136,9 @@ func (c *Client) addOperationCreateRouteTableMiddlewares(stack *middleware.Stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opCreateRouteTableMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSecurityGroup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSecurityGroup.go index f34ef98e3..5c3c98472 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSecurityGroup.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSecurityGroup.go @@ -58,9 +58,10 @@ type CreateSecurityGroupInput struct { // This member is required. Description *string - // The name of the security group. + // The name of the security group. Names are case-insensitive and must be unique + // within the VPC. // - // Constraints: Up to 255 characters in length. Cannot start with sg- . + // Constraints: Up to 255 characters in length. Can't start with sg- . // // Valid characters: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=&;{}!$* // @@ -163,6 +164,9 @@ func (c *Client) addOperationCreateSecurityGroupMiddlewares(stack *middleware.St if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateSecurityGroupValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSnapshot.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSnapshot.go index f73e5e02b..1e25c4f7c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSnapshot.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSnapshot.go @@ -294,6 +294,9 @@ func (c *Client) addOperationCreateSnapshotMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateSnapshotValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSnapshots.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSnapshots.go index db82a7174..f6277b1ff 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSnapshots.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSnapshots.go @@ -171,6 +171,9 @@ func (c *Client) addOperationCreateSnapshotsMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateSnapshotsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSpotDatafeedSubscription.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSpotDatafeedSubscription.go index c817e870d..082030ba0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSpotDatafeedSubscription.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSpotDatafeedSubscription.go @@ -130,6 +130,9 @@ func (c *Client) addOperationCreateSpotDatafeedSubscriptionMiddlewares(stack *mi if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateSpotDatafeedSubscriptionValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateStoreImageTask.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateStoreImageTask.go index 8a0ae2e69..355a02112 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateStoreImageTask.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateStoreImageTask.go @@ -137,6 +137,9 @@ func (c *Client) addOperationCreateStoreImageTaskMiddlewares(stack *middleware.S if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateStoreImageTaskValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSubnet.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSubnet.go index 9c395e8ac..9ee93678d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSubnet.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSubnet.go @@ -196,6 +196,9 @@ func (c *Client) addOperationCreateSubnetMiddlewares(stack *middleware.Stack, op if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateSubnetValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSubnetCidrReservation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSubnetCidrReservation.go index 9fe2999ac..0b7898d09 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSubnetCidrReservation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateSubnetCidrReservation.go @@ -144,6 +144,9 @@ func (c *Client) addOperationCreateSubnetCidrReservationMiddlewares(stack *middl if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateSubnetCidrReservationValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTags.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTags.go index 366b3fb0f..5f0b00a71 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTags.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTags.go @@ -136,6 +136,9 @@ func (c *Client) addOperationCreateTagsMiddlewares(stack *middleware.Stack, opti if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateTagsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTrafficMirrorFilter.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTrafficMirrorFilter.go index 8f1e55de4..8470f9e74 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTrafficMirrorFilter.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTrafficMirrorFilter.go @@ -140,6 +140,9 @@ func (c *Client) addOperationCreateTrafficMirrorFilterMiddlewares(stack *middlew if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opCreateTrafficMirrorFilterMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTrafficMirrorFilterRule.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTrafficMirrorFilterRule.go index 991a884fc..674a696de 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTrafficMirrorFilterRule.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTrafficMirrorFilterRule.go @@ -181,6 +181,9 @@ func (c *Client) addOperationCreateTrafficMirrorFilterRuleMiddlewares(stack *mid if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opCreateTrafficMirrorFilterRuleMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTrafficMirrorSession.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTrafficMirrorSession.go index 9d8ed178c..57f827c89 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTrafficMirrorSession.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTrafficMirrorSession.go @@ -189,6 +189,9 @@ func (c *Client) addOperationCreateTrafficMirrorSessionMiddlewares(stack *middle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opCreateTrafficMirrorSessionMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTrafficMirrorTarget.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTrafficMirrorTarget.go index cfdf3c018..bb08112d3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTrafficMirrorTarget.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTrafficMirrorTarget.go @@ -153,6 +153,9 @@ func (c *Client) addOperationCreateTrafficMirrorTargetMiddlewares(stack *middlew if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opCreateTrafficMirrorTargetMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGateway.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGateway.go index bbdb6dd47..f0a85021b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGateway.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGateway.go @@ -140,6 +140,9 @@ func (c *Client) addOperationCreateTransitGatewayMiddlewares(stack *middleware.S if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateTransitGateway(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayConnect.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayConnect.go index 1eb6b6daf..3fa5ae971 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayConnect.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayConnect.go @@ -132,6 +132,9 @@ func (c *Client) addOperationCreateTransitGatewayConnectMiddlewares(stack *middl if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateTransitGatewayConnectValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayConnectPeer.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayConnectPeer.go index f416975b7..10c957d2b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayConnectPeer.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayConnectPeer.go @@ -153,6 +153,9 @@ func (c *Client) addOperationCreateTransitGatewayConnectPeerMiddlewares(stack *m if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateTransitGatewayConnectPeerValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayMulticastDomain.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayMulticastDomain.go index 7a3faaf1b..7108a7350 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayMulticastDomain.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayMulticastDomain.go @@ -129,6 +129,9 @@ func (c *Client) addOperationCreateTransitGatewayMulticastDomainMiddlewares(stac if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateTransitGatewayMulticastDomainValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayPeeringAttachment.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayPeeringAttachment.go index c8fa78b2a..9093d50c4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayPeeringAttachment.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayPeeringAttachment.go @@ -144,6 +144,9 @@ func (c *Client) addOperationCreateTransitGatewayPeeringAttachmentMiddlewares(st if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateTransitGatewayPeeringAttachmentValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayPolicyTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayPolicyTable.go index 7e3835c69..bbb3a4f58 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayPolicyTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayPolicyTable.go @@ -122,6 +122,9 @@ func (c *Client) addOperationCreateTransitGatewayPolicyTableMiddlewares(stack *m if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateTransitGatewayPolicyTableValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayPrefixListReference.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayPrefixListReference.go index f6b1d154f..b961ec8c3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayPrefixListReference.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayPrefixListReference.go @@ -130,6 +130,9 @@ func (c *Client) addOperationCreateTransitGatewayPrefixListReferenceMiddlewares( if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateTransitGatewayPrefixListReferenceValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayRoute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayRoute.go index a368d4835..ecc17331a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayRoute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayRoute.go @@ -130,6 +130,9 @@ func (c *Client) addOperationCreateTransitGatewayRouteMiddlewares(stack *middlew if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateTransitGatewayRouteValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayRouteTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayRouteTable.go index 83c49bcdd..433367ff3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayRouteTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayRouteTable.go @@ -121,6 +121,9 @@ func (c *Client) addOperationCreateTransitGatewayRouteTableMiddlewares(stack *mi if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateTransitGatewayRouteTableValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayRouteTableAnnouncement.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayRouteTableAnnouncement.go index ac9275022..838a336dd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayRouteTableAnnouncement.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayRouteTableAnnouncement.go @@ -126,6 +126,9 @@ func (c *Client) addOperationCreateTransitGatewayRouteTableAnnouncementMiddlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateTransitGatewayRouteTableAnnouncementValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayVpcAttachment.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayVpcAttachment.go index d9da4b761..29cb317c7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayVpcAttachment.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateTransitGatewayVpcAttachment.go @@ -144,6 +144,9 @@ func (c *Client) addOperationCreateTransitGatewayVpcAttachmentMiddlewares(stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateTransitGatewayVpcAttachmentValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVerifiedAccessEndpoint.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVerifiedAccessEndpoint.go index 67560ed78..5125ae6c9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVerifiedAccessEndpoint.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVerifiedAccessEndpoint.go @@ -177,6 +177,9 @@ func (c *Client) addOperationCreateVerifiedAccessEndpointMiddlewares(stack *midd if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opCreateVerifiedAccessEndpointMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVerifiedAccessGroup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVerifiedAccessGroup.go index 05bab5ac0..279065e7c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVerifiedAccessGroup.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVerifiedAccessGroup.go @@ -141,6 +141,9 @@ func (c *Client) addOperationCreateVerifiedAccessGroupMiddlewares(stack *middlew if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opCreateVerifiedAccessGroupMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVerifiedAccessInstance.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVerifiedAccessInstance.go index 6cc4f97d7..f1d2a7ca6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVerifiedAccessInstance.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVerifiedAccessInstance.go @@ -134,6 +134,9 @@ func (c *Client) addOperationCreateVerifiedAccessInstanceMiddlewares(stack *midd if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opCreateVerifiedAccessInstanceMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVerifiedAccessTrustProvider.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVerifiedAccessTrustProvider.go index 1f882b480..0098b27ac 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVerifiedAccessTrustProvider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVerifiedAccessTrustProvider.go @@ -160,6 +160,9 @@ func (c *Client) addOperationCreateVerifiedAccessTrustProviderMiddlewares(stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opCreateVerifiedAccessTrustProviderMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVolume.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVolume.go index 0dabbced6..64e516b51 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVolume.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVolume.go @@ -336,6 +336,9 @@ func (c *Client) addOperationCreateVolumeMiddlewares(stack *middleware.Stack, op if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opCreateVolumeMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpc.go index 07f14c645..c346a24cb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpc.go @@ -202,6 +202,9 @@ func (c *Client) addOperationCreateVpcMiddlewares(stack *middleware.Stack, optio if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateVpc(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpcBlockPublicAccessExclusion.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpcBlockPublicAccessExclusion.go index b0668b1d6..e9484cd77 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpcBlockPublicAccessExclusion.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpcBlockPublicAccessExclusion.go @@ -144,6 +144,9 @@ func (c *Client) addOperationCreateVpcBlockPublicAccessExclusionMiddlewares(stac if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateVpcBlockPublicAccessExclusionValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpcEndpoint.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpcEndpoint.go index aa89b5a55..3945e92d9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpcEndpoint.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpcEndpoint.go @@ -197,6 +197,9 @@ func (c *Client) addOperationCreateVpcEndpointMiddlewares(stack *middleware.Stac if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateVpcEndpointValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpcEndpointConnectionNotification.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpcEndpointConnectionNotification.go index 6468cc902..0d795a017 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpcEndpointConnectionNotification.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpcEndpointConnectionNotification.go @@ -147,6 +147,9 @@ func (c *Client) addOperationCreateVpcEndpointConnectionNotificationMiddlewares( if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateVpcEndpointConnectionNotificationValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpcEndpointServiceConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpcEndpointServiceConfiguration.go index 0113e2ded..27caf777f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpcEndpointServiceConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpcEndpointServiceConfiguration.go @@ -164,6 +164,9 @@ func (c *Client) addOperationCreateVpcEndpointServiceConfigurationMiddlewares(st if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateVpcEndpointServiceConfiguration(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpcPeeringConnection.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpcPeeringConnection.go index 0c354824b..cab19a1f6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpcPeeringConnection.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpcPeeringConnection.go @@ -152,6 +152,9 @@ func (c *Client) addOperationCreateVpcPeeringConnectionMiddlewares(stack *middle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateVpcPeeringConnectionValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpnConnection.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpnConnection.go index 8801de1b7..1661a4572 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpnConnection.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpnConnection.go @@ -159,6 +159,9 @@ func (c *Client) addOperationCreateVpnConnectionMiddlewares(stack *middleware.St if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateVpnConnectionValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpnConnectionRoute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpnConnectionRoute.go index ffa76ecf2..17db95b79 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpnConnectionRoute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpnConnectionRoute.go @@ -121,6 +121,9 @@ func (c *Client) addOperationCreateVpnConnectionRouteMiddlewares(stack *middlewa if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateVpnConnectionRouteValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpnGateway.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpnGateway.go index 689e78e3e..a5951f0a0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpnGateway.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_CreateVpnGateway.go @@ -140,6 +140,9 @@ func (c *Client) addOperationCreateVpnGatewayMiddlewares(stack *middleware.Stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateVpnGatewayValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteCarrierGateway.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteCarrierGateway.go index 475a7acf3..3a51bb375 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteCarrierGateway.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteCarrierGateway.go @@ -124,6 +124,9 @@ func (c *Client) addOperationDeleteCarrierGatewayMiddlewares(stack *middleware.S if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteCarrierGatewayValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteClientVpnEndpoint.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteClientVpnEndpoint.go index de24f3e54..7c1f54322 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteClientVpnEndpoint.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteClientVpnEndpoint.go @@ -119,6 +119,9 @@ func (c *Client) addOperationDeleteClientVpnEndpointMiddlewares(stack *middlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteClientVpnEndpointValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteClientVpnRoute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteClientVpnRoute.go index bd42e10b3..0568afee7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteClientVpnRoute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteClientVpnRoute.go @@ -130,6 +130,9 @@ func (c *Client) addOperationDeleteClientVpnRouteMiddlewares(stack *middleware.S if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteClientVpnRouteValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteCoipCidr.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteCoipCidr.go index 8600df337..674d27637 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteCoipCidr.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteCoipCidr.go @@ -123,6 +123,9 @@ func (c *Client) addOperationDeleteCoipCidrMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteCoipCidrValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteCoipPool.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteCoipPool.go index 4a70498c3..6c4e01453 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteCoipPool.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteCoipPool.go @@ -118,6 +118,9 @@ func (c *Client) addOperationDeleteCoipPoolMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteCoipPoolValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteCustomerGateway.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteCustomerGateway.go index cb73fc762..99055b42c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteCustomerGateway.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteCustomerGateway.go @@ -115,6 +115,9 @@ func (c *Client) addOperationDeleteCustomerGatewayMiddlewares(stack *middleware. if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteCustomerGatewayValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteDhcpOptions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteDhcpOptions.go index 7abcf61e9..6268c2344 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteDhcpOptions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteDhcpOptions.go @@ -116,6 +116,9 @@ func (c *Client) addOperationDeleteDhcpOptionsMiddlewares(stack *middleware.Stac if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteDhcpOptionsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteEgressOnlyInternetGateway.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteEgressOnlyInternetGateway.go index 56c397fa9..fba1d447d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteEgressOnlyInternetGateway.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteEgressOnlyInternetGateway.go @@ -117,6 +117,9 @@ func (c *Client) addOperationDeleteEgressOnlyInternetGatewayMiddlewares(stack *m if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteEgressOnlyInternetGatewayValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteFleets.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteFleets.go index 5d3554f4f..127a24e89 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteFleets.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteFleets.go @@ -11,19 +11,22 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Deletes the specified EC2 Fleets. +// Deletes the specified EC2 Fleet request. // -// After you delete an EC2 Fleet, it launches no new instances. +// After you delete an EC2 Fleet request, it launches no new instances. // -// You must also specify whether a deleted EC2 Fleet should terminate its -// instances. If you choose to terminate the instances, the EC2 Fleet enters the -// deleted_terminating state. Otherwise, the EC2 Fleet enters the deleted_running +// You must also specify whether a deleted EC2 Fleet request should terminate its +// instances. If you choose to terminate the instances, the EC2 Fleet request +// enters the deleted_terminating state. Otherwise, it enters the deleted_running // state, and the instances continue to run until they are interrupted or you // terminate them manually. // -// For instant fleets, EC2 Fleet must terminate the instances when the fleet is -// deleted. Up to 1000 instances can be terminated in a single request to delete -// instant fleets. A deleted instant fleet with running instances is not supported. +// A deleted instant fleet with running instances is not supported. When you +// delete an instant fleet, Amazon EC2 automatically terminates all its instances. +// For fleets with more than 1000 instances, the deletion request might fail. If +// your fleet has more than 1000 instances, first terminate most of the instances +// manually, leaving 1000 or fewer. Then delete the fleet, and the remaining +// instances will be terminated automatically. // // Restrictions // @@ -38,9 +41,9 @@ import ( // - If you exceed the specified number of fleets to delete, no fleets are // deleted. // -// For more information, see [Delete an EC2 Fleet] in the Amazon EC2 User Guide. +// For more information, see [Delete an EC2 Fleet request and the instances in the fleet] in the Amazon EC2 User Guide. // -// [Delete an EC2 Fleet]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/manage-ec2-fleet.html#delete-fleet +// [Delete an EC2 Fleet request and the instances in the fleet]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/delete-fleet.html func (c *Client) DeleteFleets(ctx context.Context, params *DeleteFleetsInput, optFns ...func(*Options)) (*DeleteFleetsOutput, error) { if params == nil { params = &DeleteFleetsInput{} @@ -165,6 +168,9 @@ func (c *Client) addOperationDeleteFleetsMiddlewares(stack *middleware.Stack, op if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteFleetsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteFlowLogs.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteFlowLogs.go index 88e3df8a3..13c315621 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteFlowLogs.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteFlowLogs.go @@ -120,6 +120,9 @@ func (c *Client) addOperationDeleteFlowLogsMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteFlowLogsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteFpgaImage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteFpgaImage.go index 5f7655eca..2881935a2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteFpgaImage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteFpgaImage.go @@ -117,6 +117,9 @@ func (c *Client) addOperationDeleteFpgaImageMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteFpgaImageValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteInstanceConnectEndpoint.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteInstanceConnectEndpoint.go index 85b446079..72acbf30a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteInstanceConnectEndpoint.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteInstanceConnectEndpoint.go @@ -118,6 +118,9 @@ func (c *Client) addOperationDeleteInstanceConnectEndpointMiddlewares(stack *mid if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteInstanceConnectEndpointValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteInstanceEventWindow.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteInstanceEventWindow.go index 254a47d91..fd33b60e5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteInstanceEventWindow.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteInstanceEventWindow.go @@ -126,6 +126,9 @@ func (c *Client) addOperationDeleteInstanceEventWindowMiddlewares(stack *middlew if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteInstanceEventWindowValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteInternetGateway.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteInternetGateway.go index ce5a00b0c..5f31ceb9d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteInternetGateway.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteInternetGateway.go @@ -114,6 +114,9 @@ func (c *Client) addOperationDeleteInternetGatewayMiddlewares(stack *middleware. if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteInternetGatewayValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteIpam.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteIpam.go index e1cb2aa09..3871ff155 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteIpam.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteIpam.go @@ -144,6 +144,9 @@ func (c *Client) addOperationDeleteIpamMiddlewares(stack *middleware.Stack, opti if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteIpamValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteIpamExternalResourceVerificationToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteIpamExternalResourceVerificationToken.go index 17d859a50..acc901e9f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteIpamExternalResourceVerificationToken.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteIpamExternalResourceVerificationToken.go @@ -122,6 +122,9 @@ func (c *Client) addOperationDeleteIpamExternalResourceVerificationTokenMiddlewa if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteIpamExternalResourceVerificationTokenValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteIpamPool.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteIpamPool.go index e42947a50..5f54a9f20 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteIpamPool.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteIpamPool.go @@ -136,6 +136,9 @@ func (c *Client) addOperationDeleteIpamPoolMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteIpamPoolValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteIpamResourceDiscovery.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteIpamResourceDiscovery.go index 2e178aef4..2b1ad0006 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteIpamResourceDiscovery.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteIpamResourceDiscovery.go @@ -120,6 +120,9 @@ func (c *Client) addOperationDeleteIpamResourceDiscoveryMiddlewares(stack *middl if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteIpamResourceDiscoveryValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteIpamScope.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteIpamScope.go index 5c8705ded..4f0c79ab2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteIpamScope.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteIpamScope.go @@ -122,6 +122,9 @@ func (c *Client) addOperationDeleteIpamScopeMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteIpamScopeValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteKeyPair.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteKeyPair.go index e0f5d9486..e8e64e0a1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteKeyPair.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteKeyPair.go @@ -121,6 +121,9 @@ func (c *Client) addOperationDeleteKeyPairMiddlewares(stack *middleware.Stack, o if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteKeyPair(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLaunchTemplate.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLaunchTemplate.go index a582e6fbb..7d51783a6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLaunchTemplate.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLaunchTemplate.go @@ -126,6 +126,9 @@ func (c *Client) addOperationDeleteLaunchTemplateMiddlewares(stack *middleware.S if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteLaunchTemplate(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLaunchTemplateVersions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLaunchTemplateVersions.go index 298b31a6b..07b579ecb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLaunchTemplateVersions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLaunchTemplateVersions.go @@ -24,7 +24,7 @@ import ( // // For more information, see [Delete a launch template version] in the Amazon EC2 User Guide. // -// [Delete a launch template version]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/manage-launch-template-versions.html#delete-launch-template-version +// [Delete a launch template version]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/delete-launch-template.html#delete-launch-template-version func (c *Client) DeleteLaunchTemplateVersions(ctx context.Context, params *DeleteLaunchTemplateVersionsInput, optFns ...func(*Options)) (*DeleteLaunchTemplateVersionsOutput, error) { if params == nil { params = &DeleteLaunchTemplateVersionsInput{} @@ -147,6 +147,9 @@ func (c *Client) addOperationDeleteLaunchTemplateVersionsMiddlewares(stack *midd if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteLaunchTemplateVersionsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLocalGatewayRoute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLocalGatewayRoute.go index 2c629365a..e8fb5add0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLocalGatewayRoute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLocalGatewayRoute.go @@ -125,6 +125,9 @@ func (c *Client) addOperationDeleteLocalGatewayRouteMiddlewares(stack *middlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteLocalGatewayRouteValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLocalGatewayRouteTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLocalGatewayRouteTable.go index 92bb0a88f..95c9a6a7d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLocalGatewayRouteTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLocalGatewayRouteTable.go @@ -118,6 +118,9 @@ func (c *Client) addOperationDeleteLocalGatewayRouteTableMiddlewares(stack *midd if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteLocalGatewayRouteTableValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociation.go index d145c82d9..0418ae14c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociation.go @@ -118,6 +118,9 @@ func (c *Client) addOperationDeleteLocalGatewayRouteTableVirtualInterfaceGroupAs if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteLocalGatewayRouteTableVirtualInterfaceGroupAssociationValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLocalGatewayRouteTableVpcAssociation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLocalGatewayRouteTableVpcAssociation.go index 7eda02656..b64ff1d3f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLocalGatewayRouteTableVpcAssociation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteLocalGatewayRouteTableVpcAssociation.go @@ -118,6 +118,9 @@ func (c *Client) addOperationDeleteLocalGatewayRouteTableVpcAssociationMiddlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteLocalGatewayRouteTableVpcAssociationValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteManagedPrefixList.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteManagedPrefixList.go index 146e5041c..b06035076 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteManagedPrefixList.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteManagedPrefixList.go @@ -119,6 +119,9 @@ func (c *Client) addOperationDeleteManagedPrefixListMiddlewares(stack *middlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteManagedPrefixListValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNatGateway.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNatGateway.go index 82345efa1..be0b89790 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNatGateway.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNatGateway.go @@ -120,6 +120,9 @@ func (c *Client) addOperationDeleteNatGatewayMiddlewares(stack *middleware.Stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteNatGatewayValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkAcl.go index 45f42be03..a73d44b09 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkAcl.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkAcl.go @@ -114,6 +114,9 @@ func (c *Client) addOperationDeleteNetworkAclMiddlewares(stack *middleware.Stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteNetworkAclValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkAclEntry.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkAclEntry.go index 892a166a5..e2818bf9a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkAclEntry.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkAclEntry.go @@ -124,6 +124,9 @@ func (c *Client) addOperationDeleteNetworkAclEntryMiddlewares(stack *middleware. if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteNetworkAclEntryValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInsightsAccessScope.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInsightsAccessScope.go index b12de9bc4..d8de77338 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInsightsAccessScope.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInsightsAccessScope.go @@ -117,6 +117,9 @@ func (c *Client) addOperationDeleteNetworkInsightsAccessScopeMiddlewares(stack * if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteNetworkInsightsAccessScopeValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInsightsAccessScopeAnalysis.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInsightsAccessScopeAnalysis.go index bed952777..4ca144ef1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInsightsAccessScopeAnalysis.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInsightsAccessScopeAnalysis.go @@ -117,6 +117,9 @@ func (c *Client) addOperationDeleteNetworkInsightsAccessScopeAnalysisMiddlewares if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteNetworkInsightsAccessScopeAnalysisValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInsightsAnalysis.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInsightsAnalysis.go index 28d70e686..09f89e155 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInsightsAnalysis.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInsightsAnalysis.go @@ -117,6 +117,9 @@ func (c *Client) addOperationDeleteNetworkInsightsAnalysisMiddlewares(stack *mid if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteNetworkInsightsAnalysisValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInsightsPath.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInsightsPath.go index 1990d7c61..a041f51ac 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInsightsPath.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInsightsPath.go @@ -117,6 +117,9 @@ func (c *Client) addOperationDeleteNetworkInsightsPathMiddlewares(stack *middlew if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteNetworkInsightsPathValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInterface.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInterface.go index b14f124fc..a6b72b5ad 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInterface.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInterface.go @@ -115,6 +115,9 @@ func (c *Client) addOperationDeleteNetworkInterfaceMiddlewares(stack *middleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteNetworkInterfaceValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInterfacePermission.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInterfacePermission.go index 0e8d805a2..18942c811 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInterfacePermission.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteNetworkInterfacePermission.go @@ -126,6 +126,9 @@ func (c *Client) addOperationDeleteNetworkInterfacePermissionMiddlewares(stack * if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteNetworkInterfacePermissionValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeletePlacementGroup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeletePlacementGroup.go index fab461a98..265e1078e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeletePlacementGroup.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeletePlacementGroup.go @@ -117,6 +117,9 @@ func (c *Client) addOperationDeletePlacementGroupMiddlewares(stack *middleware.S if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeletePlacementGroupValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeletePublicIpv4Pool.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeletePublicIpv4Pool.go index 4e2ce129a..0932f9685 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeletePublicIpv4Pool.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeletePublicIpv4Pool.go @@ -128,6 +128,9 @@ func (c *Client) addOperationDeletePublicIpv4PoolMiddlewares(stack *middleware.S if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeletePublicIpv4PoolValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteQueuedReservedInstances.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteQueuedReservedInstances.go index 5b627360e..80704b3a1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteQueuedReservedInstances.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteQueuedReservedInstances.go @@ -121,6 +121,9 @@ func (c *Client) addOperationDeleteQueuedReservedInstancesMiddlewares(stack *mid if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteQueuedReservedInstancesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteRoute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteRoute.go index 7c9868eb5..46460e9ec 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteRoute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteRoute.go @@ -124,6 +124,9 @@ func (c *Client) addOperationDeleteRouteMiddlewares(stack *middleware.Stack, opt if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteRouteValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteRouteTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteRouteTable.go index 203f91c8b..81227066a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteRouteTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteRouteTable.go @@ -114,6 +114,9 @@ func (c *Client) addOperationDeleteRouteTableMiddlewares(stack *middleware.Stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteRouteTableValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteSecurityGroup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteSecurityGroup.go index 80d5cc783..4422b1543 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteSecurityGroup.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteSecurityGroup.go @@ -127,6 +127,9 @@ func (c *Client) addOperationDeleteSecurityGroupMiddlewares(stack *middleware.St if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteSecurityGroup(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteSnapshot.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteSnapshot.go index 6ab49f457..f60ed99a0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteSnapshot.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteSnapshot.go @@ -128,6 +128,9 @@ func (c *Client) addOperationDeleteSnapshotMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteSnapshotValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteSpotDatafeedSubscription.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteSpotDatafeedSubscription.go index 1316e611c..fc1fef6f5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteSpotDatafeedSubscription.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteSpotDatafeedSubscription.go @@ -109,6 +109,9 @@ func (c *Client) addOperationDeleteSpotDatafeedSubscriptionMiddlewares(stack *mi if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteSpotDatafeedSubscription(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteSubnet.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteSubnet.go index 81f3acd66..192472bb5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteSubnet.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteSubnet.go @@ -114,6 +114,9 @@ func (c *Client) addOperationDeleteSubnetMiddlewares(stack *middleware.Stack, op if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteSubnetValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteSubnetCidrReservation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteSubnetCidrReservation.go index a20aac44e..928b556d9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteSubnetCidrReservation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteSubnetCidrReservation.go @@ -118,6 +118,9 @@ func (c *Client) addOperationDeleteSubnetCidrReservationMiddlewares(stack *middl if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteSubnetCidrReservationValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTags.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTags.go index bd274c050..b08c304a6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTags.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTags.go @@ -134,6 +134,9 @@ func (c *Client) addOperationDeleteTagsMiddlewares(stack *middleware.Stack, opti if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteTagsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTrafficMirrorFilter.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTrafficMirrorFilter.go index f06b7472e..1244ed6ec 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTrafficMirrorFilter.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTrafficMirrorFilter.go @@ -120,6 +120,9 @@ func (c *Client) addOperationDeleteTrafficMirrorFilterMiddlewares(stack *middlew if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteTrafficMirrorFilterValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTrafficMirrorFilterRule.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTrafficMirrorFilterRule.go index 78b61ec6b..37ba2eee5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTrafficMirrorFilterRule.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTrafficMirrorFilterRule.go @@ -117,6 +117,9 @@ func (c *Client) addOperationDeleteTrafficMirrorFilterRuleMiddlewares(stack *mid if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteTrafficMirrorFilterRuleValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTrafficMirrorSession.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTrafficMirrorSession.go index bae017765..5da049274 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTrafficMirrorSession.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTrafficMirrorSession.go @@ -117,6 +117,9 @@ func (c *Client) addOperationDeleteTrafficMirrorSessionMiddlewares(stack *middle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteTrafficMirrorSessionValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTrafficMirrorTarget.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTrafficMirrorTarget.go index ccbd646e7..8d989afe7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTrafficMirrorTarget.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTrafficMirrorTarget.go @@ -120,6 +120,9 @@ func (c *Client) addOperationDeleteTrafficMirrorTargetMiddlewares(stack *middlew if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteTrafficMirrorTargetValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGateway.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGateway.go index 9f1ac2b8e..1dbab0f91 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGateway.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGateway.go @@ -118,6 +118,9 @@ func (c *Client) addOperationDeleteTransitGatewayMiddlewares(stack *middleware.S if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteTransitGatewayValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayConnect.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayConnect.go index 50ee05a9a..eb5e51873 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayConnect.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayConnect.go @@ -119,6 +119,9 @@ func (c *Client) addOperationDeleteTransitGatewayConnectMiddlewares(stack *middl if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteTransitGatewayConnectValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayConnectPeer.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayConnectPeer.go index 8ea46ede2..69bb9acec 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayConnectPeer.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayConnectPeer.go @@ -118,6 +118,9 @@ func (c *Client) addOperationDeleteTransitGatewayConnectPeerMiddlewares(stack *m if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteTransitGatewayConnectPeerValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayMulticastDomain.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayMulticastDomain.go index ac12d4842..4c9f83491 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayMulticastDomain.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayMulticastDomain.go @@ -118,6 +118,9 @@ func (c *Client) addOperationDeleteTransitGatewayMulticastDomainMiddlewares(stac if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteTransitGatewayMulticastDomainValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayPeeringAttachment.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayPeeringAttachment.go index 5aebda28a..7b93ab1fd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayPeeringAttachment.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayPeeringAttachment.go @@ -118,6 +118,9 @@ func (c *Client) addOperationDeleteTransitGatewayPeeringAttachmentMiddlewares(st if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteTransitGatewayPeeringAttachmentValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayPolicyTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayPolicyTable.go index b518334b5..787d20fbc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayPolicyTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayPolicyTable.go @@ -118,6 +118,9 @@ func (c *Client) addOperationDeleteTransitGatewayPolicyTableMiddlewares(stack *m if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteTransitGatewayPolicyTableValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayPrefixListReference.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayPrefixListReference.go index 9b45c6095..1757471ad 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayPrefixListReference.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayPrefixListReference.go @@ -124,6 +124,9 @@ func (c *Client) addOperationDeleteTransitGatewayPrefixListReferenceMiddlewares( if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteTransitGatewayPrefixListReferenceValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayRoute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayRoute.go index 23f12530a..97c0b6355 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayRoute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayRoute.go @@ -123,6 +123,9 @@ func (c *Client) addOperationDeleteTransitGatewayRouteMiddlewares(stack *middlew if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteTransitGatewayRouteValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayRouteTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayRouteTable.go index e23529c8f..65a48c4a3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayRouteTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayRouteTable.go @@ -121,6 +121,9 @@ func (c *Client) addOperationDeleteTransitGatewayRouteTableMiddlewares(stack *mi if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteTransitGatewayRouteTableValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayRouteTableAnnouncement.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayRouteTableAnnouncement.go index 930abbfb3..330f164c0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayRouteTableAnnouncement.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayRouteTableAnnouncement.go @@ -118,6 +118,9 @@ func (c *Client) addOperationDeleteTransitGatewayRouteTableAnnouncementMiddlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteTransitGatewayRouteTableAnnouncementValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayVpcAttachment.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayVpcAttachment.go index f651b2d2a..0e7cfc290 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayVpcAttachment.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteTransitGatewayVpcAttachment.go @@ -118,6 +118,9 @@ func (c *Client) addOperationDeleteTransitGatewayVpcAttachmentMiddlewares(stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteTransitGatewayVpcAttachmentValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVerifiedAccessEndpoint.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVerifiedAccessEndpoint.go index d6a5f6838..071a31738 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVerifiedAccessEndpoint.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVerifiedAccessEndpoint.go @@ -124,6 +124,9 @@ func (c *Client) addOperationDeleteVerifiedAccessEndpointMiddlewares(stack *midd if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opDeleteVerifiedAccessEndpointMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVerifiedAccessGroup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVerifiedAccessGroup.go index 7c9d51587..2e159865d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVerifiedAccessGroup.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVerifiedAccessGroup.go @@ -124,6 +124,9 @@ func (c *Client) addOperationDeleteVerifiedAccessGroupMiddlewares(stack *middlew if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opDeleteVerifiedAccessGroupMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVerifiedAccessInstance.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVerifiedAccessInstance.go index c1ce43743..b60833172 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVerifiedAccessInstance.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVerifiedAccessInstance.go @@ -124,6 +124,9 @@ func (c *Client) addOperationDeleteVerifiedAccessInstanceMiddlewares(stack *midd if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opDeleteVerifiedAccessInstanceMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVerifiedAccessTrustProvider.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVerifiedAccessTrustProvider.go index 28b1f5164..804127497 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVerifiedAccessTrustProvider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVerifiedAccessTrustProvider.go @@ -124,6 +124,9 @@ func (c *Client) addOperationDeleteVerifiedAccessTrustProviderMiddlewares(stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opDeleteVerifiedAccessTrustProviderMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVolume.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVolume.go index 5d92d4aa7..e6261caeb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVolume.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVolume.go @@ -120,6 +120,9 @@ func (c *Client) addOperationDeleteVolumeMiddlewares(stack *middleware.Stack, op if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteVolumeValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpc.go index 477257a76..626b1d095 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpc.go @@ -122,6 +122,9 @@ func (c *Client) addOperationDeleteVpcMiddlewares(stack *middleware.Stack, optio if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteVpcValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpcBlockPublicAccessExclusion.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpcBlockPublicAccessExclusion.go index a77060e37..e2b0a57e7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpcBlockPublicAccessExclusion.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpcBlockPublicAccessExclusion.go @@ -125,6 +125,9 @@ func (c *Client) addOperationDeleteVpcBlockPublicAccessExclusionMiddlewares(stac if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteVpcBlockPublicAccessExclusionValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpcEndpointConnectionNotifications.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpcEndpointConnectionNotifications.go index 986432e2b..608eba0bc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpcEndpointConnectionNotifications.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpcEndpointConnectionNotifications.go @@ -118,6 +118,9 @@ func (c *Client) addOperationDeleteVpcEndpointConnectionNotificationsMiddlewares if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteVpcEndpointConnectionNotificationsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpcEndpointServiceConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpcEndpointServiceConfigurations.go index e66a53f60..0edda6b47 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpcEndpointServiceConfigurations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpcEndpointServiceConfigurations.go @@ -122,6 +122,9 @@ func (c *Client) addOperationDeleteVpcEndpointServiceConfigurationsMiddlewares(s if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteVpcEndpointServiceConfigurationsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpcEndpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpcEndpoints.go index efe8df0f8..dffbb307c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpcEndpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpcEndpoints.go @@ -128,6 +128,9 @@ func (c *Client) addOperationDeleteVpcEndpointsMiddlewares(stack *middleware.Sta if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteVpcEndpointsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpcPeeringConnection.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpcPeeringConnection.go index f292d5abf..197221c62 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpcPeeringConnection.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpcPeeringConnection.go @@ -121,6 +121,9 @@ func (c *Client) addOperationDeleteVpcPeeringConnectionMiddlewares(stack *middle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteVpcPeeringConnectionValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpnConnection.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpnConnection.go index 45a70f8f9..27040b521 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpnConnection.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpnConnection.go @@ -127,6 +127,9 @@ func (c *Client) addOperationDeleteVpnConnectionMiddlewares(stack *middleware.St if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteVpnConnectionValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpnConnectionRoute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpnConnectionRoute.go index 771398391..2748ce0bf 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpnConnectionRoute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpnConnectionRoute.go @@ -116,6 +116,9 @@ func (c *Client) addOperationDeleteVpnConnectionRouteMiddlewares(stack *middlewa if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteVpnConnectionRouteValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpnGateway.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpnGateway.go index 43d6cd2f2..c09165901 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpnGateway.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeleteVpnGateway.go @@ -117,6 +117,9 @@ func (c *Client) addOperationDeleteVpnGatewayMiddlewares(stack *middleware.Stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteVpnGatewayValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeprovisionByoipCidr.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeprovisionByoipCidr.go index daaed94b8..0f146b8cd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeprovisionByoipCidr.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeprovisionByoipCidr.go @@ -124,6 +124,9 @@ func (c *Client) addOperationDeprovisionByoipCidrMiddlewares(stack *middleware.S if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeprovisionByoipCidrValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeprovisionIpamByoasn.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeprovisionIpamByoasn.go index 438a1db34..bbef3e41d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeprovisionIpamByoasn.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeprovisionIpamByoasn.go @@ -129,6 +129,9 @@ func (c *Client) addOperationDeprovisionIpamByoasnMiddlewares(stack *middleware. if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeprovisionIpamByoasnValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeprovisionIpamPoolCidr.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeprovisionIpamPoolCidr.go index 773ef2c3a..930565b74 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeprovisionIpamPoolCidr.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeprovisionIpamPoolCidr.go @@ -125,6 +125,9 @@ func (c *Client) addOperationDeprovisionIpamPoolCidrMiddlewares(stack *middlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeprovisionIpamPoolCidrValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeprovisionPublicIpv4PoolCidr.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeprovisionPublicIpv4PoolCidr.go index ac8c7caa3..2170f92d2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeprovisionPublicIpv4PoolCidr.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeprovisionPublicIpv4PoolCidr.go @@ -128,6 +128,9 @@ func (c *Client) addOperationDeprovisionPublicIpv4PoolCidrMiddlewares(stack *mid if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeprovisionPublicIpv4PoolCidrValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeregisterImage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeregisterImage.go index 0a2ea6d71..90e7c064b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeregisterImage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeregisterImage.go @@ -130,6 +130,9 @@ func (c *Client) addOperationDeregisterImageMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeregisterImageValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeregisterInstanceEventNotificationAttributes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeregisterInstanceEventNotificationAttributes.go index 57234f3e3..398d0d65b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeregisterInstanceEventNotificationAttributes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeregisterInstanceEventNotificationAttributes.go @@ -119,6 +119,9 @@ func (c *Client) addOperationDeregisterInstanceEventNotificationAttributesMiddle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeregisterInstanceEventNotificationAttributesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeregisterTransitGatewayMulticastGroupMembers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeregisterTransitGatewayMulticastGroupMembers.go index bbc548c82..7fb4bcf17 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeregisterTransitGatewayMulticastGroupMembers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeregisterTransitGatewayMulticastGroupMembers.go @@ -123,6 +123,9 @@ func (c *Client) addOperationDeregisterTransitGatewayMulticastGroupMembersMiddle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeregisterTransitGatewayMulticastGroupMembers(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeregisterTransitGatewayMulticastGroupSources.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeregisterTransitGatewayMulticastGroupSources.go index b0a6db9a9..6430fb4c2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeregisterTransitGatewayMulticastGroupSources.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DeregisterTransitGatewayMulticastGroupSources.go @@ -123,6 +123,9 @@ func (c *Client) addOperationDeregisterTransitGatewayMulticastGroupSourcesMiddle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeregisterTransitGatewayMulticastGroupSources(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAccountAttributes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAccountAttributes.go index 315aead35..c8ab04279 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAccountAttributes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAccountAttributes.go @@ -140,6 +140,9 @@ func (c *Client) addOperationDescribeAccountAttributesMiddlewares(stack *middlew if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeAccountAttributes(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAddressTransfers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAddressTransfers.go index a5498c2e3..fe741ed55 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAddressTransfers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAddressTransfers.go @@ -139,6 +139,9 @@ func (c *Client) addOperationDescribeAddressTransfersMiddlewares(stack *middlewa if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeAddressTransfers(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAddresses.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAddresses.go index a49e9bf58..83a3a8f6c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAddresses.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAddresses.go @@ -152,6 +152,9 @@ func (c *Client) addOperationDescribeAddressesMiddlewares(stack *middleware.Stac if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeAddresses(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAddressesAttribute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAddressesAttribute.go index fafea68c4..a9d046804 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAddressesAttribute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAddressesAttribute.go @@ -133,6 +133,9 @@ func (c *Client) addOperationDescribeAddressesAttributeMiddlewares(stack *middle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeAddressesAttribute(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAggregateIdFormat.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAggregateIdFormat.go index 8a17577ce..3db068c43 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAggregateIdFormat.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAggregateIdFormat.go @@ -132,6 +132,9 @@ func (c *Client) addOperationDescribeAggregateIdFormatMiddlewares(stack *middlew if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeAggregateIdFormat(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAvailabilityZones.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAvailabilityZones.go index 07f61bb21..24a09a759 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAvailabilityZones.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAvailabilityZones.go @@ -12,8 +12,7 @@ import ( ) // Describes the Availability Zones, Local Zones, and Wavelength Zones that are -// available to you. If there is an event impacting a zone, you can use this -// request to view the state and any provided messages for that zone. +// available to you. // // For more information about Availability Zones, Local Zones, and Wavelength // Zones, see [Regions and zones]in the Amazon EC2 User Guide. @@ -55,6 +54,11 @@ type DescribeAvailabilityZonesInput struct { // The filters. // + // - group-long-name - The long name of the zone group for the Availability Zone + // (for example, US West (Oregon) 1 ), the Local Zone (for example, for Zone + // group us-west-2-lax-1 , it is US West (Los Angeles) , or the Wavelength Zone + // (for example, for Zone group us-east-1-wl1 , it is US East (Verizon) . + // // - group-name - The name of the zone group for the Availability Zone (for // example, us-east-1-zg-1 ), the Local Zone (for example, us-west-2-lax-1 ), or // the Wavelength Zone (for example, us-east-1-wl1 ). @@ -73,7 +77,7 @@ type DescribeAvailabilityZonesInput struct { // - region-name - The name of the Region for the Zone (for example, us-east-1 ). // // - state - The state of the Availability Zone, the Local Zone, or the - // Wavelength Zone ( available ). + // Wavelength Zone ( available | unavailable | constrained ). // // - zone-id - The ID of the Availability Zone (for example, use1-az1 ), the // Local Zone (for example, usw2-lax1-az1 ), or the Wavelength Zone (for example, @@ -171,6 +175,9 @@ func (c *Client) addOperationDescribeAvailabilityZonesMiddlewares(stack *middlew if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeAvailabilityZones(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAwsNetworkPerformanceMetricSubscriptions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAwsNetworkPerformanceMetricSubscriptions.go index 8d3bbda74..b5fda910f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAwsNetworkPerformanceMetricSubscriptions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeAwsNetworkPerformanceMetricSubscriptions.go @@ -127,6 +127,9 @@ func (c *Client) addOperationDescribeAwsNetworkPerformanceMetricSubscriptionsMid if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeAwsNetworkPerformanceMetricSubscriptions(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeBundleTasks.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeBundleTasks.go index 2b98cb24a..96cd8f1d4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeBundleTasks.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeBundleTasks.go @@ -155,6 +155,9 @@ func (c *Client) addOperationDescribeBundleTasksMiddlewares(stack *middleware.St if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeBundleTasks(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeByoipCidrs.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeByoipCidrs.go index b9f4eafcb..4fb42b255 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeByoipCidrs.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeByoipCidrs.go @@ -129,6 +129,9 @@ func (c *Client) addOperationDescribeByoipCidrsMiddlewares(stack *middleware.Sta if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDescribeByoipCidrsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityBlockExtensionHistory.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityBlockExtensionHistory.go index 5b1b21ef2..587de5caf 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityBlockExtensionHistory.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityBlockExtensionHistory.go @@ -149,6 +149,9 @@ func (c *Client) addOperationDescribeCapacityBlockExtensionHistoryMiddlewares(st if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeCapacityBlockExtensionHistory(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityBlockExtensionOfferings.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityBlockExtensionOfferings.go index 401633758..e62032385 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityBlockExtensionOfferings.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityBlockExtensionOfferings.go @@ -138,6 +138,9 @@ func (c *Client) addOperationDescribeCapacityBlockExtensionOfferingsMiddlewares( if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDescribeCapacityBlockExtensionOfferingsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityBlockOfferings.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityBlockOfferings.go index 7855a8dfa..985a90748 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityBlockOfferings.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityBlockOfferings.go @@ -155,6 +155,9 @@ func (c *Client) addOperationDescribeCapacityBlockOfferingsMiddlewares(stack *mi if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDescribeCapacityBlockOfferingsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityReservationBillingRequests.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityReservationBillingRequests.go index b3cc6cdc7..4e4bff5ac 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityReservationBillingRequests.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityReservationBillingRequests.go @@ -159,6 +159,9 @@ func (c *Client) addOperationDescribeCapacityReservationBillingRequestsMiddlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDescribeCapacityReservationBillingRequestsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityReservationFleets.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityReservationFleets.go index bdee6f277..1ed6c8482 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityReservationFleets.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityReservationFleets.go @@ -144,6 +144,9 @@ func (c *Client) addOperationDescribeCapacityReservationFleetsMiddlewares(stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeCapacityReservationFleets(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityReservations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityReservations.go index 787b4fc34..b07e2047a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityReservations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCapacityReservations.go @@ -210,6 +210,9 @@ func (c *Client) addOperationDescribeCapacityReservationsMiddlewares(stack *midd if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeCapacityReservations(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCarrierGateways.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCarrierGateways.go index 63ec9f8fa..2adcf8c45 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCarrierGateways.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCarrierGateways.go @@ -148,6 +148,9 @@ func (c *Client) addOperationDescribeCarrierGatewaysMiddlewares(stack *middlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeCarrierGateways(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClassicLinkInstances.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClassicLinkInstances.go index 6e3660e68..6d7aa7170 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClassicLinkInstances.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClassicLinkInstances.go @@ -155,6 +155,9 @@ func (c *Client) addOperationDescribeClassicLinkInstancesMiddlewares(stack *midd if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeClassicLinkInstances(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClientVpnAuthorizationRules.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClientVpnAuthorizationRules.go index ef30867f4..65e292c7b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClientVpnAuthorizationRules.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClientVpnAuthorizationRules.go @@ -141,6 +141,9 @@ func (c *Client) addOperationDescribeClientVpnAuthorizationRulesMiddlewares(stac if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDescribeClientVpnAuthorizationRulesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClientVpnConnections.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClientVpnConnections.go index 989c5c136..f6029c655 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClientVpnConnections.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClientVpnConnections.go @@ -139,6 +139,9 @@ func (c *Client) addOperationDescribeClientVpnConnectionsMiddlewares(stack *midd if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDescribeClientVpnConnectionsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClientVpnEndpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClientVpnEndpoints.go index eb99baee6..7b41afd5e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClientVpnEndpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClientVpnEndpoints.go @@ -135,6 +135,9 @@ func (c *Client) addOperationDescribeClientVpnEndpointsMiddlewares(stack *middle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeClientVpnEndpoints(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClientVpnRoutes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClientVpnRoutes.go index 08d79a8e8..5216d44d4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClientVpnRoutes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClientVpnRoutes.go @@ -140,6 +140,9 @@ func (c *Client) addOperationDescribeClientVpnRoutesMiddlewares(stack *middlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDescribeClientVpnRoutesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClientVpnTargetNetworks.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClientVpnTargetNetworks.go index c6a7549ff..4b586cfbb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClientVpnTargetNetworks.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeClientVpnTargetNetworks.go @@ -142,6 +142,9 @@ func (c *Client) addOperationDescribeClientVpnTargetNetworksMiddlewares(stack *m if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDescribeClientVpnTargetNetworksValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCoipPools.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCoipPools.go index ee096acd1..6da91482e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCoipPools.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCoipPools.go @@ -136,6 +136,9 @@ func (c *Client) addOperationDescribeCoipPoolsMiddlewares(stack *middleware.Stac if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeCoipPools(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeConversionTasks.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeConversionTasks.go index 9d961f738..17d0d5a55 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeConversionTasks.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeConversionTasks.go @@ -125,6 +125,9 @@ func (c *Client) addOperationDescribeConversionTasksMiddlewares(stack *middlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeConversionTasks(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCustomerGateways.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCustomerGateways.go index 0c6c4c98f..af39ca3cf 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCustomerGateways.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeCustomerGateways.go @@ -153,6 +153,9 @@ func (c *Client) addOperationDescribeCustomerGatewaysMiddlewares(stack *middlewa if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeCustomerGateways(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeDeclarativePoliciesReports.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeDeclarativePoliciesReports.go index 83954e27b..f17fce607 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeDeclarativePoliciesReports.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeDeclarativePoliciesReports.go @@ -143,6 +143,9 @@ func (c *Client) addOperationDescribeDeclarativePoliciesReportsMiddlewares(stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeDeclarativePoliciesReports(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeDhcpOptions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeDhcpOptions.go index ffaa347b7..5d9cd01c1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeDhcpOptions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeDhcpOptions.go @@ -158,6 +158,9 @@ func (c *Client) addOperationDescribeDhcpOptionsMiddlewares(stack *middleware.St if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeDhcpOptions(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeEgressOnlyInternetGateways.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeEgressOnlyInternetGateways.go index 6bebccc42..1e3a4d09b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeEgressOnlyInternetGateways.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeEgressOnlyInternetGateways.go @@ -145,6 +145,9 @@ func (c *Client) addOperationDescribeEgressOnlyInternetGatewaysMiddlewares(stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeEgressOnlyInternetGateways(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeElasticGpus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeElasticGpus.go index a76e6bf49..1daa57499 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeElasticGpus.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeElasticGpus.go @@ -153,6 +153,9 @@ func (c *Client) addOperationDescribeElasticGpusMiddlewares(stack *middleware.St if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeElasticGpus(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeExportImageTasks.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeExportImageTasks.go index 6b67e3668..0fb2842c4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeExportImageTasks.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeExportImageTasks.go @@ -130,6 +130,9 @@ func (c *Client) addOperationDescribeExportImageTasksMiddlewares(stack *middlewa if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeExportImageTasks(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeExportTasks.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeExportTasks.go index 695df9dc4..4ce00435f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeExportTasks.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeExportTasks.go @@ -117,6 +117,9 @@ func (c *Client) addOperationDescribeExportTasksMiddlewares(stack *middleware.St if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeExportTasks(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFastLaunchImages.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFastLaunchImages.go index 905e69050..0494a6d4b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFastLaunchImages.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFastLaunchImages.go @@ -141,6 +141,9 @@ func (c *Client) addOperationDescribeFastLaunchImagesMiddlewares(stack *middlewa if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeFastLaunchImages(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFastSnapshotRestores.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFastSnapshotRestores.go index c2dd83b99..77ff4fbd1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFastSnapshotRestores.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFastSnapshotRestores.go @@ -141,6 +141,9 @@ func (c *Client) addOperationDescribeFastSnapshotRestoresMiddlewares(stack *midd if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeFastSnapshotRestores(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFleetHistory.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFleetHistory.go index e867d667a..7c6f12838 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFleetHistory.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFleetHistory.go @@ -164,6 +164,9 @@ func (c *Client) addOperationDescribeFleetHistoryMiddlewares(stack *middleware.S if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDescribeFleetHistoryValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFleetInstances.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFleetInstances.go index 018b5e1b1..a0febff4d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFleetInstances.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFleetInstances.go @@ -149,6 +149,9 @@ func (c *Client) addOperationDescribeFleetInstancesMiddlewares(stack *middleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDescribeFleetInstancesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFleets.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFleets.go index 34ac73682..a32d0218c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFleets.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFleets.go @@ -159,6 +159,9 @@ func (c *Client) addOperationDescribeFleetsMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeFleets(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFlowLogs.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFlowLogs.go index 1bc04dada..d4dfabd3a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFlowLogs.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFlowLogs.go @@ -161,6 +161,9 @@ func (c *Client) addOperationDescribeFlowLogsMiddlewares(stack *middleware.Stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeFlowLogs(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFpgaImageAttribute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFpgaImageAttribute.go index de925a19c..5118f81c4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFpgaImageAttribute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFpgaImageAttribute.go @@ -123,6 +123,9 @@ func (c *Client) addOperationDescribeFpgaImageAttributeMiddlewares(stack *middle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDescribeFpgaImageAttributeValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFpgaImages.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFpgaImages.go index 327489571..0b4a9ae2c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFpgaImages.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeFpgaImages.go @@ -163,6 +163,9 @@ func (c *Client) addOperationDescribeFpgaImagesMiddlewares(stack *middleware.Sta if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeFpgaImages(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeHostReservationOfferings.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeHostReservationOfferings.go index 9dbf98f15..618f462db 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeHostReservationOfferings.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeHostReservationOfferings.go @@ -154,6 +154,9 @@ func (c *Client) addOperationDescribeHostReservationOfferingsMiddlewares(stack * if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeHostReservationOfferings(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeHostReservations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeHostReservations.go index 1be7d3240..691768d3a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeHostReservations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeHostReservations.go @@ -142,6 +142,9 @@ func (c *Client) addOperationDescribeHostReservationsMiddlewares(stack *middlewa if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeHostReservations(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeHosts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeHosts.go index 2b8b26e82..07758c6fe 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeHosts.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeHosts.go @@ -151,6 +151,9 @@ func (c *Client) addOperationDescribeHostsMiddlewares(stack *middleware.Stack, o if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeHosts(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIamInstanceProfileAssociations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIamInstanceProfileAssociations.go index 402538059..1a05ef069 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIamInstanceProfileAssociations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIamInstanceProfileAssociations.go @@ -133,6 +133,9 @@ func (c *Client) addOperationDescribeIamInstanceProfileAssociationsMiddlewares(s if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeIamInstanceProfileAssociations(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIdFormat.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIdFormat.go index f33e54175..58dbb612f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIdFormat.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIdFormat.go @@ -136,6 +136,9 @@ func (c *Client) addOperationDescribeIdFormatMiddlewares(stack *middleware.Stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeIdFormat(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIdentityIdFormat.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIdentityIdFormat.go index 63d473ae7..4753f2350 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIdentityIdFormat.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIdentityIdFormat.go @@ -141,6 +141,9 @@ func (c *Client) addOperationDescribeIdentityIdFormatMiddlewares(stack *middlewa if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDescribeIdentityIdFormatValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImageAttribute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImageAttribute.go index 95ec365f2..c8c98eb35 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImageAttribute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImageAttribute.go @@ -192,6 +192,9 @@ func (c *Client) addOperationDescribeImageAttributeMiddlewares(stack *middleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDescribeImageAttributeValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImages.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImages.go index e1d46f827..2ba734d75 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImages.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImages.go @@ -298,6 +298,9 @@ func (c *Client) addOperationDescribeImagesMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeImages(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImportImageTasks.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImportImageTasks.go index 80e748f1f..39339ad12 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImportImageTasks.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImportImageTasks.go @@ -132,6 +132,9 @@ func (c *Client) addOperationDescribeImportImageTasksMiddlewares(stack *middlewa if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeImportImageTasks(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImportSnapshotTasks.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImportSnapshotTasks.go index a6331b2e4..d29680d29 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImportSnapshotTasks.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeImportSnapshotTasks.go @@ -134,6 +134,9 @@ func (c *Client) addOperationDescribeImportSnapshotTasksMiddlewares(stack *middl if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeImportSnapshotTasks(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceAttribute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceAttribute.go index 1e25ee5c6..4c5bba14f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceAttribute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceAttribute.go @@ -180,6 +180,9 @@ func (c *Client) addOperationDescribeInstanceAttributeMiddlewares(stack *middlew if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDescribeInstanceAttributeValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceConnectEndpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceConnectEndpoints.go index 33711a9cb..d3c83cfe8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceConnectEndpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceConnectEndpoints.go @@ -158,6 +158,9 @@ func (c *Client) addOperationDescribeInstanceConnectEndpointsMiddlewares(stack * if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeInstanceConnectEndpoints(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceCreditSpecifications.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceCreditSpecifications.go index 2f4706636..efd0476b3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceCreditSpecifications.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceCreditSpecifications.go @@ -167,6 +167,9 @@ func (c *Client) addOperationDescribeInstanceCreditSpecificationsMiddlewares(sta if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeInstanceCreditSpecifications(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceEventNotificationAttributes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceEventNotificationAttributes.go index 15186ba58..ff3f7f984 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceEventNotificationAttributes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceEventNotificationAttributes.go @@ -114,6 +114,9 @@ func (c *Client) addOperationDescribeInstanceEventNotificationAttributesMiddlewa if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeInstanceEventNotificationAttributes(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceEventWindows.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceEventWindows.go index ab06eb69c..d7286441c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceEventWindows.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceEventWindows.go @@ -173,6 +173,9 @@ func (c *Client) addOperationDescribeInstanceEventWindowsMiddlewares(stack *midd if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeInstanceEventWindows(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceImageMetadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceImageMetadata.go index e07c5c7ee..4a7901896 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceImageMetadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceImageMetadata.go @@ -201,6 +201,9 @@ func (c *Client) addOperationDescribeInstanceImageMetadataMiddlewares(stack *mid if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeInstanceImageMetadata(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceStatus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceStatus.go index 23aacd98b..69ba78f90 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceStatus.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceStatus.go @@ -226,6 +226,9 @@ func (c *Client) addOperationDescribeInstanceStatusMiddlewares(stack *middleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeInstanceStatus(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceTopology.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceTopology.go index af4ad0552..f986eb4b6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceTopology.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceTopology.go @@ -186,6 +186,9 @@ func (c *Client) addOperationDescribeInstanceTopologyMiddlewares(stack *middlewa if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeInstanceTopology(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceTypeOfferings.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceTypeOfferings.go index 08ba59b4c..6e09eb761 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceTypeOfferings.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceTypeOfferings.go @@ -155,6 +155,9 @@ func (c *Client) addOperationDescribeInstanceTypeOfferingsMiddlewares(stack *mid if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeInstanceTypeOfferings(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceTypes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceTypes.go index 68fff0b6b..4541bf6b4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceTypes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstanceTypes.go @@ -280,6 +280,9 @@ func (c *Client) addOperationDescribeInstanceTypesMiddlewares(stack *middleware. if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeInstanceTypes(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstances.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstances.go index d3c34a1fe..6ed5b1982 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstances.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInstances.go @@ -571,6 +571,9 @@ func (c *Client) addOperationDescribeInstancesMiddlewares(stack *middleware.Stac if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeInstances(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInternetGateways.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInternetGateways.go index 2bc1e8fa2..b8a246ec4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInternetGateways.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeInternetGateways.go @@ -162,6 +162,9 @@ func (c *Client) addOperationDescribeInternetGatewaysMiddlewares(stack *middlewa if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeInternetGateways(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamByoasn.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamByoasn.go index 1060036f2..73ff267a1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamByoasn.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamByoasn.go @@ -128,6 +128,9 @@ func (c *Client) addOperationDescribeIpamByoasnMiddlewares(stack *middleware.Sta if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeIpamByoasn(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamExternalResourceVerificationTokens.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamExternalResourceVerificationTokens.go index d0117269b..a86e9b168 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamExternalResourceVerificationTokens.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamExternalResourceVerificationTokens.go @@ -155,6 +155,9 @@ func (c *Client) addOperationDescribeIpamExternalResourceVerificationTokensMiddl if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeIpamExternalResourceVerificationTokens(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamPools.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamPools.go index f83b6cd65..471e20f9f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamPools.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamPools.go @@ -131,6 +131,9 @@ func (c *Client) addOperationDescribeIpamPoolsMiddlewares(stack *middleware.Stac if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeIpamPools(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamResourceDiscoveries.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamResourceDiscoveries.go index cebaa7af3..a05766632 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamResourceDiscoveries.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamResourceDiscoveries.go @@ -132,6 +132,9 @@ func (c *Client) addOperationDescribeIpamResourceDiscoveriesMiddlewares(stack *m if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeIpamResourceDiscoveries(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamResourceDiscoveryAssociations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamResourceDiscoveryAssociations.go index 6e08e0c80..0eb7134e1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamResourceDiscoveryAssociations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamResourceDiscoveryAssociations.go @@ -133,6 +133,9 @@ func (c *Client) addOperationDescribeIpamResourceDiscoveryAssociationsMiddleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeIpamResourceDiscoveryAssociations(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamScopes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamScopes.go index f783f6f19..2dfa57ea8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamScopes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpamScopes.go @@ -131,6 +131,9 @@ func (c *Client) addOperationDescribeIpamScopesMiddlewares(stack *middleware.Sta if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeIpamScopes(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpams.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpams.go index d10d9171b..2e071f051 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpams.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpams.go @@ -135,6 +135,9 @@ func (c *Client) addOperationDescribeIpamsMiddlewares(stack *middleware.Stack, o if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeIpams(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpv6Pools.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpv6Pools.go index e56d05c77..efc198981 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpv6Pools.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeIpv6Pools.go @@ -138,6 +138,9 @@ func (c *Client) addOperationDescribeIpv6PoolsMiddlewares(stack *middleware.Stac if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeIpv6Pools(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeKeyPairs.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeKeyPairs.go index 68e4e140f..13f13a7b2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeKeyPairs.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeKeyPairs.go @@ -153,6 +153,9 @@ func (c *Client) addOperationDescribeKeyPairsMiddlewares(stack *middleware.Stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeKeyPairs(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLaunchTemplateVersions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLaunchTemplateVersions.go index 430dd30e9..8ada63ca1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLaunchTemplateVersions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLaunchTemplateVersions.go @@ -117,7 +117,7 @@ type DescribeLaunchTemplateVersionsInput struct { // // Default: false // - // [Use a Systems Manager parameter instead of an AMI ID]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html#use-an-ssm-parameter-instead-of-an-ami-id + // [Use a Systems Manager parameter instead of an AMI ID]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/create-launch-template.html#use-an-ssm-parameter-instead-of-an-ami-id ResolveAlias *bool // One or more versions of the launch template. Valid values depend on whether you @@ -216,6 +216,9 @@ func (c *Client) addOperationDescribeLaunchTemplateVersionsMiddlewares(stack *mi if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeLaunchTemplateVersions(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLaunchTemplates.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLaunchTemplates.go index 71e5ab428..9e972034c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLaunchTemplates.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLaunchTemplates.go @@ -146,6 +146,9 @@ func (c *Client) addOperationDescribeLaunchTemplatesMiddlewares(stack *middlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeLaunchTemplates(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations.go index 05b7bd0ae..2779c2a8e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations.go @@ -149,6 +149,9 @@ func (c *Client) addOperationDescribeLocalGatewayRouteTableVirtualInterfaceGroup if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociations(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGatewayRouteTableVpcAssociations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGatewayRouteTableVpcAssociations.go index b902fd198..3bee2c82a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGatewayRouteTableVpcAssociations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGatewayRouteTableVpcAssociations.go @@ -147,6 +147,9 @@ func (c *Client) addOperationDescribeLocalGatewayRouteTableVpcAssociationsMiddle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeLocalGatewayRouteTableVpcAssociations(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGatewayRouteTables.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGatewayRouteTables.go index e4a0f4705..08f7a1fa2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGatewayRouteTables.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGatewayRouteTables.go @@ -145,6 +145,9 @@ func (c *Client) addOperationDescribeLocalGatewayRouteTablesMiddlewares(stack *m if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeLocalGatewayRouteTables(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGatewayVirtualInterfaceGroups.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGatewayVirtualInterfaceGroups.go index 767bea3be..0c23a9035 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGatewayVirtualInterfaceGroups.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGatewayVirtualInterfaceGroups.go @@ -140,6 +140,9 @@ func (c *Client) addOperationDescribeLocalGatewayVirtualInterfaceGroupsMiddlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeLocalGatewayVirtualInterfaceGroups(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGatewayVirtualInterfaces.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGatewayVirtualInterfaces.go index 325382f40..324ccdeaf 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGatewayVirtualInterfaces.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGatewayVirtualInterfaces.go @@ -148,6 +148,9 @@ func (c *Client) addOperationDescribeLocalGatewayVirtualInterfacesMiddlewares(st if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeLocalGatewayVirtualInterfaces(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGateways.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGateways.go index 036a80d69..149c9a316 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGateways.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLocalGateways.go @@ -140,6 +140,9 @@ func (c *Client) addOperationDescribeLocalGatewaysMiddlewares(stack *middleware. if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeLocalGateways(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLockedSnapshots.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLockedSnapshots.go index 1821a0cf0..5ad0e5f0e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLockedSnapshots.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeLockedSnapshots.go @@ -137,6 +137,9 @@ func (c *Client) addOperationDescribeLockedSnapshotsMiddlewares(stack *middlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeLockedSnapshots(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeMacHosts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeMacHosts.go index e22534d5e..d5a8111a0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeMacHosts.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeMacHosts.go @@ -131,6 +131,9 @@ func (c *Client) addOperationDescribeMacHostsMiddlewares(stack *middleware.Stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeMacHosts(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeManagedPrefixLists.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeManagedPrefixLists.go index e6f04affe..ae7d45ca5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeManagedPrefixLists.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeManagedPrefixLists.go @@ -139,6 +139,9 @@ func (c *Client) addOperationDescribeManagedPrefixListsMiddlewares(stack *middle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeManagedPrefixLists(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeMovingAddresses.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeMovingAddresses.go index a150d068f..32c78c828 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeMovingAddresses.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeMovingAddresses.go @@ -141,6 +141,9 @@ func (c *Client) addOperationDescribeMovingAddressesMiddlewares(stack *middlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeMovingAddresses(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNatGateways.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNatGateways.go index 3e2e6683f..1a7ff3e95 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNatGateways.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNatGateways.go @@ -158,6 +158,9 @@ func (c *Client) addOperationDescribeNatGatewaysMiddlewares(stack *middleware.St if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeNatGateways(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkAcls.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkAcls.go index 77167aeec..20d11aa0c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkAcls.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkAcls.go @@ -187,6 +187,9 @@ func (c *Client) addOperationDescribeNetworkAclsMiddlewares(stack *middleware.St if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeNetworkAcls(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInsightsAccessScopeAnalyses.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInsightsAccessScopeAnalyses.go index f13ffa887..65390b5ce 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInsightsAccessScopeAnalyses.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInsightsAccessScopeAnalyses.go @@ -142,6 +142,9 @@ func (c *Client) addOperationDescribeNetworkInsightsAccessScopeAnalysesMiddlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeNetworkInsightsAccessScopeAnalyses(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInsightsAccessScopes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInsightsAccessScopes.go index 7328ecaff..f18f0f544 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInsightsAccessScopes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInsightsAccessScopes.go @@ -130,6 +130,9 @@ func (c *Client) addOperationDescribeNetworkInsightsAccessScopesMiddlewares(stac if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeNetworkInsightsAccessScopes(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInsightsAnalyses.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInsightsAnalyses.go index 8190f319f..a53cffe6c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInsightsAnalyses.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInsightsAnalyses.go @@ -146,6 +146,9 @@ func (c *Client) addOperationDescribeNetworkInsightsAnalysesMiddlewares(stack *m if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeNetworkInsightsAnalyses(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInsightsPaths.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInsightsPaths.go index 114d9b62b..b0094826b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInsightsPaths.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInsightsPaths.go @@ -158,6 +158,9 @@ func (c *Client) addOperationDescribeNetworkInsightsPathsMiddlewares(stack *midd if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeNetworkInsightsPaths(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInterfaceAttribute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInterfaceAttribute.go index e9235d3b9..2d4709a52 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInterfaceAttribute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInterfaceAttribute.go @@ -141,6 +141,9 @@ func (c *Client) addOperationDescribeNetworkInterfaceAttributeMiddlewares(stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDescribeNetworkInterfaceAttributeValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInterfacePermissions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInterfacePermissions.go index 392f6a9d1..c9d088e27 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInterfacePermissions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInterfacePermissions.go @@ -145,6 +145,9 @@ func (c *Client) addOperationDescribeNetworkInterfacePermissionsMiddlewares(stac if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeNetworkInterfacePermissions(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInterfaces.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInterfaces.go index d23cc6f8b..82b3e438d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInterfaces.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeNetworkInterfaces.go @@ -20,8 +20,7 @@ import ( // // If you have a large number of network interfaces, the operation fails unless // you use pagination or one of the following filters: group-id , mac-address , -// private-dns-name , private-ip-address , private-dns-name , subnet-id , or vpc-id -// . +// private-dns-name , private-ip-address , subnet-id , or vpc-id . // // We strongly recommend using only paginated requests. Unpaginated requests are // susceptible to throttling and timeouts. @@ -259,6 +258,9 @@ func (c *Client) addOperationDescribeNetworkInterfacesMiddlewares(stack *middlew if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeNetworkInterfaces(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribePlacementGroups.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribePlacementGroups.go index 15f9e50ea..36c492b1a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribePlacementGroups.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribePlacementGroups.go @@ -158,6 +158,9 @@ func (c *Client) addOperationDescribePlacementGroupsMiddlewares(stack *middlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribePlacementGroups(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribePrefixLists.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribePrefixLists.go index 77eb2c538..0deb7e144 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribePrefixLists.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribePrefixLists.go @@ -138,6 +138,9 @@ func (c *Client) addOperationDescribePrefixListsMiddlewares(stack *middleware.St if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribePrefixLists(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribePrincipalIdFormat.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribePrincipalIdFormat.go index 96d1d2a19..e4a5c9618 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribePrincipalIdFormat.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribePrincipalIdFormat.go @@ -149,6 +149,9 @@ func (c *Client) addOperationDescribePrincipalIdFormatMiddlewares(stack *middlew if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribePrincipalIdFormat(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribePublicIpv4Pools.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribePublicIpv4Pools.go index 94018dbb9..c230b91d2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribePublicIpv4Pools.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribePublicIpv4Pools.go @@ -132,6 +132,9 @@ func (c *Client) addOperationDescribePublicIpv4PoolsMiddlewares(stack *middlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribePublicIpv4Pools(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeRegions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeRegions.go index 21c5d0952..3176e0cf3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeRegions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeRegions.go @@ -144,6 +144,9 @@ func (c *Client) addOperationDescribeRegionsMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeRegions(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeReplaceRootVolumeTasks.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeReplaceRootVolumeTasks.go index 4dab2641b..0209a20ab 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeReplaceRootVolumeTasks.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeReplaceRootVolumeTasks.go @@ -140,6 +140,9 @@ func (c *Client) addOperationDescribeReplaceRootVolumeTasksMiddlewares(stack *mi if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeReplaceRootVolumeTasks(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeReservedInstances.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeReservedInstances.go index a570989f1..35bad937e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeReservedInstances.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeReservedInstances.go @@ -182,6 +182,9 @@ func (c *Client) addOperationDescribeReservedInstancesMiddlewares(stack *middlew if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeReservedInstances(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeReservedInstancesListings.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeReservedInstancesListings.go index 0f884dd24..f1b25d597 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeReservedInstancesListings.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeReservedInstancesListings.go @@ -152,6 +152,9 @@ func (c *Client) addOperationDescribeReservedInstancesListingsMiddlewares(stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeReservedInstancesListings(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeReservedInstancesModifications.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeReservedInstancesModifications.go index 62ba7585a..fd5c8d978 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeReservedInstancesModifications.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeReservedInstancesModifications.go @@ -163,6 +163,9 @@ func (c *Client) addOperationDescribeReservedInstancesModificationsMiddlewares(s if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeReservedInstancesModifications(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeReservedInstancesOfferings.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeReservedInstancesOfferings.go index bc3ce7b25..46bad9fbf 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeReservedInstancesOfferings.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeReservedInstancesOfferings.go @@ -229,6 +229,9 @@ func (c *Client) addOperationDescribeReservedInstancesOfferingsMiddlewares(stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeReservedInstancesOfferings(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeRouteTables.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeRouteTables.go index cbee884f9..03215958a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeRouteTables.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeRouteTables.go @@ -209,6 +209,9 @@ func (c *Client) addOperationDescribeRouteTablesMiddlewares(stack *middleware.St if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeRouteTables(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeScheduledInstanceAvailability.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeScheduledInstanceAvailability.go index 632cb6374..90c65d3a6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeScheduledInstanceAvailability.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeScheduledInstanceAvailability.go @@ -164,6 +164,9 @@ func (c *Client) addOperationDescribeScheduledInstanceAvailabilityMiddlewares(st if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDescribeScheduledInstanceAvailabilityValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeScheduledInstances.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeScheduledInstances.go index 22dc20ff9..815204803 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeScheduledInstances.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeScheduledInstances.go @@ -142,6 +142,9 @@ func (c *Client) addOperationDescribeScheduledInstancesMiddlewares(stack *middle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeScheduledInstances(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSecurityGroupReferences.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSecurityGroupReferences.go index ce5ee798a..2622e7886 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSecurityGroupReferences.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSecurityGroupReferences.go @@ -120,6 +120,9 @@ func (c *Client) addOperationDescribeSecurityGroupReferencesMiddlewares(stack *m if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDescribeSecurityGroupReferencesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSecurityGroupRules.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSecurityGroupRules.go index 74af41e2f..07b729055 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSecurityGroupRules.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSecurityGroupRules.go @@ -144,6 +144,9 @@ func (c *Client) addOperationDescribeSecurityGroupRulesMiddlewares(stack *middle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeSecurityGroupRules(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSecurityGroupVpcAssociations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSecurityGroupVpcAssociations.go index e14bf082d..6e1f59226 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSecurityGroupVpcAssociations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSecurityGroupVpcAssociations.go @@ -149,6 +149,9 @@ func (c *Client) addOperationDescribeSecurityGroupVpcAssociationsMiddlewares(sta if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeSecurityGroupVpcAssociations(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSecurityGroups.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSecurityGroups.go index f8ba9d203..d2df992bc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSecurityGroups.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSecurityGroups.go @@ -224,6 +224,9 @@ func (c *Client) addOperationDescribeSecurityGroupsMiddlewares(stack *middleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeSecurityGroups(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSnapshotAttribute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSnapshotAttribute.go index 9c8055fbb..c602e5ee9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSnapshotAttribute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSnapshotAttribute.go @@ -135,6 +135,9 @@ func (c *Client) addOperationDescribeSnapshotAttributeMiddlewares(stack *middlew if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDescribeSnapshotAttributeValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSnapshotTierStatus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSnapshotTierStatus.go index 25e266ad9..01e185f0f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSnapshotTierStatus.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSnapshotTierStatus.go @@ -141,6 +141,9 @@ func (c *Client) addOperationDescribeSnapshotTierStatusMiddlewares(stack *middle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeSnapshotTierStatus(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSnapshots.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSnapshots.go index cc654fa72..7616f534c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSnapshots.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSnapshots.go @@ -235,6 +235,9 @@ func (c *Client) addOperationDescribeSnapshotsMiddlewares(stack *middleware.Stac if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeSnapshots(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotDatafeedSubscription.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotDatafeedSubscription.go index 99de9e5f6..67132b123 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotDatafeedSubscription.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotDatafeedSubscription.go @@ -118,6 +118,9 @@ func (c *Client) addOperationDescribeSpotDatafeedSubscriptionMiddlewares(stack * if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeSpotDatafeedSubscription(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotFleetInstances.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotFleetInstances.go index a6fdd2a2d..35e558b2a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotFleetInstances.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotFleetInstances.go @@ -139,6 +139,9 @@ func (c *Client) addOperationDescribeSpotFleetInstancesMiddlewares(stack *middle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDescribeSpotFleetInstancesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotFleetRequestHistory.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotFleetRequestHistory.go index f69f8c49e..8a72bb8b2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotFleetRequestHistory.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotFleetRequestHistory.go @@ -167,6 +167,9 @@ func (c *Client) addOperationDescribeSpotFleetRequestHistoryMiddlewares(stack *m if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDescribeSpotFleetRequestHistoryValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotFleetRequests.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotFleetRequests.go index 03be894f5..b97954c79 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotFleetRequests.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotFleetRequests.go @@ -136,6 +136,9 @@ func (c *Client) addOperationDescribeSpotFleetRequestsMiddlewares(stack *middlew if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeSpotFleetRequests(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotInstanceRequests.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotInstanceRequests.go index 73cd5dc17..01c017919 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotInstanceRequests.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotInstanceRequests.go @@ -262,6 +262,9 @@ func (c *Client) addOperationDescribeSpotInstanceRequestsMiddlewares(stack *midd if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeSpotInstanceRequests(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotPriceHistory.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotPriceHistory.go index 64ed8c06c..fe8400c64 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotPriceHistory.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSpotPriceHistory.go @@ -175,6 +175,9 @@ func (c *Client) addOperationDescribeSpotPriceHistoryMiddlewares(stack *middlewa if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeSpotPriceHistory(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeStaleSecurityGroups.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeStaleSecurityGroups.go index f88dab16c..20cd68344 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeStaleSecurityGroups.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeStaleSecurityGroups.go @@ -141,6 +141,9 @@ func (c *Client) addOperationDescribeStaleSecurityGroupsMiddlewares(stack *middl if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDescribeStaleSecurityGroupsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeStoreImageTasks.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeStoreImageTasks.go index c19d4d0a8..46a28ff7e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeStoreImageTasks.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeStoreImageTasks.go @@ -166,6 +166,9 @@ func (c *Client) addOperationDescribeStoreImageTasksMiddlewares(stack *middlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeStoreImageTasks(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSubnets.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSubnets.go index d33149529..7772da2b4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSubnets.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeSubnets.go @@ -225,6 +225,9 @@ func (c *Client) addOperationDescribeSubnetsMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeSubnets(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTags.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTags.go index 5d84de5fb..59193f8c6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTags.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTags.go @@ -157,6 +157,9 @@ func (c *Client) addOperationDescribeTagsMiddlewares(stack *middleware.Stack, op if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeTags(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTrafficMirrorFilterRules.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTrafficMirrorFilterRules.go index 7e4d18bca..3baf1a572 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTrafficMirrorFilterRules.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTrafficMirrorFilterRules.go @@ -157,6 +157,9 @@ func (c *Client) addOperationDescribeTrafficMirrorFilterRulesMiddlewares(stack * if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeTrafficMirrorFilterRules(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTrafficMirrorFilters.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTrafficMirrorFilters.go index 399e9e1a0..ce1db7e72 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTrafficMirrorFilters.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTrafficMirrorFilters.go @@ -134,6 +134,9 @@ func (c *Client) addOperationDescribeTrafficMirrorFiltersMiddlewares(stack *midd if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeTrafficMirrorFilters(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTrafficMirrorSessions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTrafficMirrorSessions.go index 8de5f4c01..5146c65fb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTrafficMirrorSessions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTrafficMirrorSessions.go @@ -151,6 +151,9 @@ func (c *Client) addOperationDescribeTrafficMirrorSessionsMiddlewares(stack *mid if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeTrafficMirrorSessions(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTrafficMirrorTargets.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTrafficMirrorTargets.go index 43dd9175d..a36cc59ff 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTrafficMirrorTargets.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTrafficMirrorTargets.go @@ -142,6 +142,9 @@ func (c *Client) addOperationDescribeTrafficMirrorTargetsMiddlewares(stack *midd if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeTrafficMirrorTargets(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayAttachments.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayAttachments.go index d717e5384..6e4406be0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayAttachments.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayAttachments.go @@ -157,6 +157,9 @@ func (c *Client) addOperationDescribeTransitGatewayAttachmentsMiddlewares(stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeTransitGatewayAttachments(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayConnectPeers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayConnectPeers.go index f173ca05d..e09b552c5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayConnectPeers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayConnectPeers.go @@ -137,6 +137,9 @@ func (c *Client) addOperationDescribeTransitGatewayConnectPeersMiddlewares(stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeTransitGatewayConnectPeers(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayConnects.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayConnects.go index 87013f5b7..4af640cac 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayConnects.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayConnects.go @@ -143,6 +143,9 @@ func (c *Client) addOperationDescribeTransitGatewayConnectsMiddlewares(stack *mi if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeTransitGatewayConnects(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayMulticastDomains.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayMulticastDomains.go index 0f6f23b78..60d911ac6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayMulticastDomains.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayMulticastDomains.go @@ -138,6 +138,9 @@ func (c *Client) addOperationDescribeTransitGatewayMulticastDomainsMiddlewares(s if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeTransitGatewayMulticastDomains(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayPeeringAttachments.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayPeeringAttachments.go index 7fd671df7..e15d5c6e4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayPeeringAttachments.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayPeeringAttachments.go @@ -151,6 +151,9 @@ func (c *Client) addOperationDescribeTransitGatewayPeeringAttachmentsMiddlewares if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeTransitGatewayPeeringAttachments(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayPolicyTables.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayPolicyTables.go index 106b8dcd9..60fe0dcf7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayPolicyTables.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayPolicyTables.go @@ -129,6 +129,9 @@ func (c *Client) addOperationDescribeTransitGatewayPolicyTablesMiddlewares(stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeTransitGatewayPolicyTables(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayRouteTableAnnouncements.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayRouteTableAnnouncements.go index 478724c20..ce275c6e7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayRouteTableAnnouncements.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayRouteTableAnnouncements.go @@ -129,6 +129,9 @@ func (c *Client) addOperationDescribeTransitGatewayRouteTableAnnouncementsMiddle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeTransitGatewayRouteTableAnnouncements(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayRouteTables.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayRouteTables.go index 2e334e889..93217a6e3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayRouteTables.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayRouteTables.go @@ -144,6 +144,9 @@ func (c *Client) addOperationDescribeTransitGatewayRouteTablesMiddlewares(stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeTransitGatewayRouteTables(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayVpcAttachments.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayVpcAttachments.go index 237b221fb..0a540fe13 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayVpcAttachments.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGatewayVpcAttachments.go @@ -141,6 +141,9 @@ func (c *Client) addOperationDescribeTransitGatewayVpcAttachmentsMiddlewares(sta if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeTransitGatewayVpcAttachments(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGateways.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGateways.go index 190bc4c45..1e2990d94 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGateways.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTransitGateways.go @@ -171,6 +171,9 @@ func (c *Client) addOperationDescribeTransitGatewaysMiddlewares(stack *middlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeTransitGateways(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTrunkInterfaceAssociations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTrunkInterfaceAssociations.go index 2c0c72ae8..a97b3b82d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTrunkInterfaceAssociations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeTrunkInterfaceAssociations.go @@ -134,6 +134,9 @@ func (c *Client) addOperationDescribeTrunkInterfaceAssociationsMiddlewares(stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeTrunkInterfaceAssociations(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVerifiedAccessEndpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVerifiedAccessEndpoints.go index e25bb676e..cbe9f60a2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVerifiedAccessEndpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVerifiedAccessEndpoints.go @@ -136,6 +136,9 @@ func (c *Client) addOperationDescribeVerifiedAccessEndpointsMiddlewares(stack *m if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeVerifiedAccessEndpoints(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVerifiedAccessGroups.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVerifiedAccessGroups.go index 98589f86a..e51f27b5b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVerifiedAccessGroups.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVerifiedAccessGroups.go @@ -133,6 +133,9 @@ func (c *Client) addOperationDescribeVerifiedAccessGroupsMiddlewares(stack *midd if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeVerifiedAccessGroups(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVerifiedAccessInstanceLoggingConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVerifiedAccessInstanceLoggingConfigurations.go index c78b02181..bb555f04f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVerifiedAccessInstanceLoggingConfigurations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVerifiedAccessInstanceLoggingConfigurations.go @@ -130,6 +130,9 @@ func (c *Client) addOperationDescribeVerifiedAccessInstanceLoggingConfigurations if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeVerifiedAccessInstanceLoggingConfigurations(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVerifiedAccessInstances.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVerifiedAccessInstances.go index 2d8e6befb..7e1114ba3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVerifiedAccessInstances.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVerifiedAccessInstances.go @@ -130,6 +130,9 @@ func (c *Client) addOperationDescribeVerifiedAccessInstancesMiddlewares(stack *m if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeVerifiedAccessInstances(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVerifiedAccessTrustProviders.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVerifiedAccessTrustProviders.go index 9697a9665..52f195203 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVerifiedAccessTrustProviders.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVerifiedAccessTrustProviders.go @@ -130,6 +130,9 @@ func (c *Client) addOperationDescribeVerifiedAccessTrustProvidersMiddlewares(sta if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeVerifiedAccessTrustProviders(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVolumeAttribute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVolumeAttribute.go index bcbdc0cfd..5834d5dae 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVolumeAttribute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVolumeAttribute.go @@ -134,6 +134,9 @@ func (c *Client) addOperationDescribeVolumeAttributeMiddlewares(stack *middlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDescribeVolumeAttributeValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVolumeStatus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVolumeStatus.go index b16c79435..fa6238e61 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVolumeStatus.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVolumeStatus.go @@ -205,6 +205,9 @@ func (c *Client) addOperationDescribeVolumeStatusMiddlewares(stack *middleware.S if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeVolumeStatus(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVolumes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVolumes.go index 70768e150..13b6f6246 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVolumes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVolumes.go @@ -204,6 +204,9 @@ func (c *Client) addOperationDescribeVolumesMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeVolumes(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVolumesModifications.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVolumesModifications.go index 3bf070224..69e2e7ad8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVolumesModifications.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVolumesModifications.go @@ -165,6 +165,9 @@ func (c *Client) addOperationDescribeVolumesModificationsMiddlewares(stack *midd if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeVolumesModifications(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcAttribute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcAttribute.go index b5e3dc63c..8dfa29556 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcAttribute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcAttribute.go @@ -137,6 +137,9 @@ func (c *Client) addOperationDescribeVpcAttributeMiddlewares(stack *middleware.S if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDescribeVpcAttributeValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcBlockPublicAccessExclusions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcBlockPublicAccessExclusions.go index 5a7a87536..5f7868172 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcBlockPublicAccessExclusions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcBlockPublicAccessExclusions.go @@ -162,6 +162,9 @@ func (c *Client) addOperationDescribeVpcBlockPublicAccessExclusionsMiddlewares(s if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeVpcBlockPublicAccessExclusions(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcBlockPublicAccessOptions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcBlockPublicAccessOptions.go index 0a6b955d4..16267e889 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcBlockPublicAccessOptions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcBlockPublicAccessOptions.go @@ -119,6 +119,9 @@ func (c *Client) addOperationDescribeVpcBlockPublicAccessOptionsMiddlewares(stac if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeVpcBlockPublicAccessOptions(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcClassicLink.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcClassicLink.go index 68f82bcd4..3e61ed0b0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcClassicLink.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcClassicLink.go @@ -132,6 +132,9 @@ func (c *Client) addOperationDescribeVpcClassicLinkMiddlewares(stack *middleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeVpcClassicLink(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcClassicLinkDnsSupport.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcClassicLinkDnsSupport.go index 81d4beb7d..f68bed780 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcClassicLinkDnsSupport.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcClassicLinkDnsSupport.go @@ -131,6 +131,9 @@ func (c *Client) addOperationDescribeVpcClassicLinkDnsSupportMiddlewares(stack * if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeVpcClassicLinkDnsSupport(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointAssociations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointAssociations.go index 7b1d7f02f..e8b27f231 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointAssociations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointAssociations.go @@ -147,6 +147,9 @@ func (c *Client) addOperationDescribeVpcEndpointAssociationsMiddlewares(stack *m if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeVpcEndpointAssociations(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointConnectionNotifications.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointConnectionNotifications.go index 3f1ac7b20..01ab89185 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointConnectionNotifications.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointConnectionNotifications.go @@ -144,6 +144,9 @@ func (c *Client) addOperationDescribeVpcEndpointConnectionNotificationsMiddlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeVpcEndpointConnectionNotifications(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointConnections.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointConnections.go index cb7553f35..cc75904c3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointConnections.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointConnections.go @@ -145,6 +145,9 @@ func (c *Client) addOperationDescribeVpcEndpointConnectionsMiddlewares(stack *mi if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeVpcEndpointConnections(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointServiceConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointServiceConfigurations.go index f8a5147f5..55f818ffe 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointServiceConfigurations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointServiceConfigurations.go @@ -150,6 +150,9 @@ func (c *Client) addOperationDescribeVpcEndpointServiceConfigurationsMiddlewares if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeVpcEndpointServiceConfigurations(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointServicePermissions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointServicePermissions.go index 1382b07f1..00e21004e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointServicePermissions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointServicePermissions.go @@ -140,6 +140,9 @@ func (c *Client) addOperationDescribeVpcEndpointServicePermissionsMiddlewares(st if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDescribeVpcEndpointServicePermissionsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointServices.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointServices.go index 005f15e14..5485d0018 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointServices.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpointServices.go @@ -166,6 +166,9 @@ func (c *Client) addOperationDescribeVpcEndpointServicesMiddlewares(stack *middl if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeVpcEndpointServices(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpoints.go index eab7c6377..304b7b895 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcEndpoints.go @@ -159,6 +159,9 @@ func (c *Client) addOperationDescribeVpcEndpointsMiddlewares(stack *middleware.S if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeVpcEndpoints(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcPeeringConnections.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcPeeringConnections.go index 5c1e09e0d..29ae4ebd8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcPeeringConnections.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcPeeringConnections.go @@ -177,6 +177,9 @@ func (c *Client) addOperationDescribeVpcPeeringConnectionsMiddlewares(stack *mid if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeVpcPeeringConnections(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcs.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcs.go index ef2ec0350..09b4bdcda 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcs.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpcs.go @@ -184,6 +184,9 @@ func (c *Client) addOperationDescribeVpcsMiddlewares(stack *middleware.Stack, op if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeVpcs(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpnConnections.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpnConnections.go index b19ba019b..79cd9cfd4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpnConnections.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpnConnections.go @@ -168,6 +168,9 @@ func (c *Client) addOperationDescribeVpnConnectionsMiddlewares(stack *middleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeVpnConnections(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpnGateways.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpnGateways.go index 41f769003..956fc7bf5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpnGateways.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DescribeVpnGateways.go @@ -155,6 +155,9 @@ func (c *Client) addOperationDescribeVpnGatewaysMiddlewares(stack *middleware.St if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeVpnGateways(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachClassicLinkVpc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachClassicLinkVpc.go index 9edc636b3..ac0911eaf 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachClassicLinkVpc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachClassicLinkVpc.go @@ -126,6 +126,9 @@ func (c *Client) addOperationDetachClassicLinkVpcMiddlewares(stack *middleware.S if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDetachClassicLinkVpcValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachInternetGateway.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachInternetGateway.go index 553254c7c..f7768be5f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachInternetGateway.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachInternetGateway.go @@ -120,6 +120,9 @@ func (c *Client) addOperationDetachInternetGatewayMiddlewares(stack *middleware. if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDetachInternetGatewayValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachNetworkInterface.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachNetworkInterface.go index 2906182d2..02b589a58 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachNetworkInterface.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachNetworkInterface.go @@ -131,6 +131,9 @@ func (c *Client) addOperationDetachNetworkInterfaceMiddlewares(stack *middleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDetachNetworkInterfaceValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachVerifiedAccessTrustProvider.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachVerifiedAccessTrustProvider.go index 50623069e..76e0562bc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachVerifiedAccessTrustProvider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachVerifiedAccessTrustProvider.go @@ -133,6 +133,9 @@ func (c *Client) addOperationDetachVerifiedAccessTrustProviderMiddlewares(stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opDetachVerifiedAccessTrustProviderMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachVolume.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachVolume.go index be4631699..343ea9539 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachVolume.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachVolume.go @@ -182,6 +182,9 @@ func (c *Client) addOperationDetachVolumeMiddlewares(stack *middleware.Stack, op if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDetachVolumeValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachVpnGateway.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachVpnGateway.go index 21c94077f..71bfe7cc7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachVpnGateway.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DetachVpnGateway.go @@ -126,6 +126,9 @@ func (c *Client) addOperationDetachVpnGatewayMiddlewares(stack *middleware.Stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDetachVpnGatewayValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableAddressTransfer.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableAddressTransfer.go index 40735edfa..e6bb42eea 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableAddressTransfer.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableAddressTransfer.go @@ -121,6 +121,9 @@ func (c *Client) addOperationDisableAddressTransferMiddlewares(stack *middleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDisableAddressTransferValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableAllowedImagesSettings.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableAllowedImagesSettings.go index 982ae5582..48c950aac 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableAllowedImagesSettings.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableAllowedImagesSettings.go @@ -125,6 +125,9 @@ func (c *Client) addOperationDisableAllowedImagesSettingsMiddlewares(stack *midd if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDisableAllowedImagesSettings(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableAwsNetworkPerformanceMetricSubscription.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableAwsNetworkPerformanceMetricSubscription.go index 3d2066e2f..97e67a75e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableAwsNetworkPerformanceMetricSubscription.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableAwsNetworkPerformanceMetricSubscription.go @@ -127,6 +127,9 @@ func (c *Client) addOperationDisableAwsNetworkPerformanceMetricSubscriptionMiddl if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDisableAwsNetworkPerformanceMetricSubscription(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableEbsEncryptionByDefault.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableEbsEncryptionByDefault.go index 0d9dfbea2..ae1ee7b02 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableEbsEncryptionByDefault.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableEbsEncryptionByDefault.go @@ -122,6 +122,9 @@ func (c *Client) addOperationDisableEbsEncryptionByDefaultMiddlewares(stack *mid if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDisableEbsEncryptionByDefault(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableFastLaunch.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableFastLaunch.go index 1122f445c..d693db3cd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableFastLaunch.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableFastLaunch.go @@ -158,6 +158,9 @@ func (c *Client) addOperationDisableFastLaunchMiddlewares(stack *middleware.Stac if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDisableFastLaunchValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableFastSnapshotRestores.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableFastSnapshotRestores.go index 9360b20fa..284c8da6a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableFastSnapshotRestores.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableFastSnapshotRestores.go @@ -129,6 +129,9 @@ func (c *Client) addOperationDisableFastSnapshotRestoresMiddlewares(stack *middl if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDisableFastSnapshotRestoresValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableImage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableImage.go index 25b7494a2..e109e4aaf 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableImage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableImage.go @@ -134,6 +134,9 @@ func (c *Client) addOperationDisableImageMiddlewares(stack *middleware.Stack, op if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDisableImageValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableImageBlockPublicAccess.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableImageBlockPublicAccess.go index 07a885d3f..d4dc61be3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableImageBlockPublicAccess.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableImageBlockPublicAccess.go @@ -125,6 +125,9 @@ func (c *Client) addOperationDisableImageBlockPublicAccessMiddlewares(stack *mid if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDisableImageBlockPublicAccess(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableImageDeprecation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableImageDeprecation.go index f2f76a0b3..234dfde61 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableImageDeprecation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableImageDeprecation.go @@ -121,6 +121,9 @@ func (c *Client) addOperationDisableImageDeprecationMiddlewares(stack *middlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDisableImageDeprecationValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableImageDeregistrationProtection.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableImageDeregistrationProtection.go index 04559cbc7..95068d3ca 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableImageDeregistrationProtection.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableImageDeregistrationProtection.go @@ -126,6 +126,9 @@ func (c *Client) addOperationDisableImageDeregistrationProtectionMiddlewares(sta if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDisableImageDeregistrationProtectionValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableIpamOrganizationAdminAccount.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableIpamOrganizationAdminAccount.go index bb96a2cf6..ecb450eb3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableIpamOrganizationAdminAccount.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableIpamOrganizationAdminAccount.go @@ -120,6 +120,9 @@ func (c *Client) addOperationDisableIpamOrganizationAdminAccountMiddlewares(stac if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDisableIpamOrganizationAdminAccountValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableSerialConsoleAccess.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableSerialConsoleAccess.go index 8be4db5ae..18a80126e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableSerialConsoleAccess.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableSerialConsoleAccess.go @@ -118,6 +118,9 @@ func (c *Client) addOperationDisableSerialConsoleAccessMiddlewares(stack *middle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDisableSerialConsoleAccess(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableSnapshotBlockPublicAccess.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableSnapshotBlockPublicAccess.go index ef375f63e..6a7134c62 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableSnapshotBlockPublicAccess.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableSnapshotBlockPublicAccess.go @@ -128,6 +128,9 @@ func (c *Client) addOperationDisableSnapshotBlockPublicAccessMiddlewares(stack * if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDisableSnapshotBlockPublicAccess(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableTransitGatewayRouteTablePropagation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableTransitGatewayRouteTablePropagation.go index c662f10bf..b09984fe6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableTransitGatewayRouteTablePropagation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableTransitGatewayRouteTablePropagation.go @@ -125,6 +125,9 @@ func (c *Client) addOperationDisableTransitGatewayRouteTablePropagationMiddlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDisableTransitGatewayRouteTablePropagationValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableVgwRoutePropagation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableVgwRoutePropagation.go index 832026429..227fe0f1d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableVgwRoutePropagation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableVgwRoutePropagation.go @@ -120,6 +120,9 @@ func (c *Client) addOperationDisableVgwRoutePropagationMiddlewares(stack *middle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDisableVgwRoutePropagationValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableVpcClassicLink.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableVpcClassicLink.go index e079a267a..73f51e901 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableVpcClassicLink.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableVpcClassicLink.go @@ -120,6 +120,9 @@ func (c *Client) addOperationDisableVpcClassicLinkMiddlewares(stack *middleware. if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDisableVpcClassicLinkValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableVpcClassicLinkDnsSupport.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableVpcClassicLinkDnsSupport.go index 38bd27d91..f5f2c0fbd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableVpcClassicLinkDnsSupport.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisableVpcClassicLinkDnsSupport.go @@ -115,6 +115,9 @@ func (c *Client) addOperationDisableVpcClassicLinkDnsSupportMiddlewares(stack *m if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDisableVpcClassicLinkDnsSupport(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateAddress.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateAddress.go index a8197be45..3dfc1aab7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateAddress.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateAddress.go @@ -118,6 +118,9 @@ func (c *Client) addOperationDisassociateAddressMiddlewares(stack *middleware.St if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDisassociateAddress(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateCapacityReservationBillingOwner.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateCapacityReservationBillingOwner.go index 2d6667002..88d764f31 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateCapacityReservationBillingOwner.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateCapacityReservationBillingOwner.go @@ -126,6 +126,9 @@ func (c *Client) addOperationDisassociateCapacityReservationBillingOwnerMiddlewa if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDisassociateCapacityReservationBillingOwnerValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateClientVpnTargetNetwork.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateClientVpnTargetNetwork.go index bca624496..f3bb15c73 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateClientVpnTargetNetwork.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateClientVpnTargetNetwork.go @@ -135,6 +135,9 @@ func (c *Client) addOperationDisassociateClientVpnTargetNetworkMiddlewares(stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDisassociateClientVpnTargetNetworkValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateEnclaveCertificateIamRole.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateEnclaveCertificateIamRole.go index bc5271687..b833ea019 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateEnclaveCertificateIamRole.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateEnclaveCertificateIamRole.go @@ -127,6 +127,9 @@ func (c *Client) addOperationDisassociateEnclaveCertificateIamRoleMiddlewares(st if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDisassociateEnclaveCertificateIamRoleValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateIamInstanceProfile.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateIamInstanceProfile.go index ed00b32c5..5c17867eb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateIamInstanceProfile.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateIamInstanceProfile.go @@ -114,6 +114,9 @@ func (c *Client) addOperationDisassociateIamInstanceProfileMiddlewares(stack *mi if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDisassociateIamInstanceProfileValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateInstanceEventWindow.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateInstanceEventWindow.go index d106a07e2..e4736d4de 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateInstanceEventWindow.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateInstanceEventWindow.go @@ -127,6 +127,9 @@ func (c *Client) addOperationDisassociateInstanceEventWindowMiddlewares(stack *m if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDisassociateInstanceEventWindowValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateIpamByoasn.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateIpamByoasn.go index 711ed063e..99237e515 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateIpamByoasn.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateIpamByoasn.go @@ -128,6 +128,9 @@ func (c *Client) addOperationDisassociateIpamByoasnMiddlewares(stack *middleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDisassociateIpamByoasnValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateIpamResourceDiscovery.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateIpamResourceDiscovery.go index bb2154a8f..7dd604072 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateIpamResourceDiscovery.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateIpamResourceDiscovery.go @@ -120,6 +120,9 @@ func (c *Client) addOperationDisassociateIpamResourceDiscoveryMiddlewares(stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDisassociateIpamResourceDiscoveryValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateNatGatewayAddress.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateNatGatewayAddress.go index c8e74930a..d084720b9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateNatGatewayAddress.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateNatGatewayAddress.go @@ -144,6 +144,9 @@ func (c *Client) addOperationDisassociateNatGatewayAddressMiddlewares(stack *mid if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDisassociateNatGatewayAddressValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateRouteTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateRouteTable.go index 549a1f25d..3961e9ec6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateRouteTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateRouteTable.go @@ -120,6 +120,9 @@ func (c *Client) addOperationDisassociateRouteTableMiddlewares(stack *middleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDisassociateRouteTableValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateSecurityGroupVpc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateSecurityGroupVpc.go index 2aa815e13..1f8b2088e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateSecurityGroupVpc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateSecurityGroupVpc.go @@ -130,6 +130,9 @@ func (c *Client) addOperationDisassociateSecurityGroupVpcMiddlewares(stack *midd if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDisassociateSecurityGroupVpcValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateSubnetCidrBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateSubnetCidrBlock.go index 3c017027f..61006bdc8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateSubnetCidrBlock.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateSubnetCidrBlock.go @@ -117,6 +117,9 @@ func (c *Client) addOperationDisassociateSubnetCidrBlockMiddlewares(stack *middl if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDisassociateSubnetCidrBlockValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateTransitGatewayMulticastDomain.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateTransitGatewayMulticastDomain.go index 0e5ed3d13..ec2ef2cc6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateTransitGatewayMulticastDomain.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateTransitGatewayMulticastDomain.go @@ -128,6 +128,9 @@ func (c *Client) addOperationDisassociateTransitGatewayMulticastDomainMiddleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDisassociateTransitGatewayMulticastDomainValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateTransitGatewayPolicyTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateTransitGatewayPolicyTable.go index ba20348e7..9f6293829 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateTransitGatewayPolicyTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateTransitGatewayPolicyTable.go @@ -123,6 +123,9 @@ func (c *Client) addOperationDisassociateTransitGatewayPolicyTableMiddlewares(st if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDisassociateTransitGatewayPolicyTableValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateTransitGatewayRouteTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateTransitGatewayRouteTable.go index ca02a05c2..328e067d1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateTransitGatewayRouteTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateTransitGatewayRouteTable.go @@ -123,6 +123,9 @@ func (c *Client) addOperationDisassociateTransitGatewayRouteTableMiddlewares(sta if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDisassociateTransitGatewayRouteTableValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateTrunkInterface.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateTrunkInterface.go index 7de09a924..1ad27ed39 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateTrunkInterface.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateTrunkInterface.go @@ -130,6 +130,9 @@ func (c *Client) addOperationDisassociateTrunkInterfaceMiddlewares(stack *middle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opDisassociateTrunkInterfaceMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateVpcCidrBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateVpcCidrBlock.go index f7e2d7112..7373a7a2b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateVpcCidrBlock.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_DisassociateVpcCidrBlock.go @@ -124,6 +124,9 @@ func (c *Client) addOperationDisassociateVpcCidrBlockMiddlewares(stack *middlewa if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDisassociateVpcCidrBlockValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableAddressTransfer.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableAddressTransfer.go index dff886a82..2639464f5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableAddressTransfer.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableAddressTransfer.go @@ -126,6 +126,9 @@ func (c *Client) addOperationEnableAddressTransferMiddlewares(stack *middleware. if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpEnableAddressTransferValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableAllowedImagesSettings.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableAllowedImagesSettings.go index ce3c87e4d..ef61e08b6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableAllowedImagesSettings.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableAllowedImagesSettings.go @@ -146,6 +146,9 @@ func (c *Client) addOperationEnableAllowedImagesSettingsMiddlewares(stack *middl if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpEnableAllowedImagesSettingsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableAwsNetworkPerformanceMetricSubscription.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableAwsNetworkPerformanceMetricSubscription.go index a9994b439..8e5ba6700 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableAwsNetworkPerformanceMetricSubscription.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableAwsNetworkPerformanceMetricSubscription.go @@ -129,6 +129,9 @@ func (c *Client) addOperationEnableAwsNetworkPerformanceMetricSubscriptionMiddle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opEnableAwsNetworkPerformanceMetricSubscription(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableEbsEncryptionByDefault.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableEbsEncryptionByDefault.go index 0d5191d24..e9ada8f0d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableEbsEncryptionByDefault.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableEbsEncryptionByDefault.go @@ -128,6 +128,9 @@ func (c *Client) addOperationEnableEbsEncryptionByDefaultMiddlewares(stack *midd if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opEnableEbsEncryptionByDefault(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableFastLaunch.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableFastLaunch.go index aca1eb5f9..c0d164578 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableFastLaunch.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableFastLaunch.go @@ -176,6 +176,9 @@ func (c *Client) addOperationEnableFastLaunchMiddlewares(stack *middleware.Stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpEnableFastLaunchValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableFastSnapshotRestores.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableFastSnapshotRestores.go index e058f8143..57c77bbe1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableFastSnapshotRestores.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableFastSnapshotRestores.go @@ -139,6 +139,9 @@ func (c *Client) addOperationEnableFastSnapshotRestoresMiddlewares(stack *middle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpEnableFastSnapshotRestoresValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableImage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableImage.go index 151564e4b..1ce5221d2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableImage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableImage.go @@ -127,6 +127,9 @@ func (c *Client) addOperationEnableImageMiddlewares(stack *middleware.Stack, opt if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpEnableImageValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableImageBlockPublicAccess.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableImageBlockPublicAccess.go index b81df494b..7209bc43b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableImageBlockPublicAccess.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableImageBlockPublicAccess.go @@ -132,6 +132,9 @@ func (c *Client) addOperationEnableImageBlockPublicAccessMiddlewares(stack *midd if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpEnableImageBlockPublicAccessValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableImageDeprecation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableImageDeprecation.go index c20d5f141..33a3d746c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableImageDeprecation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableImageDeprecation.go @@ -133,6 +133,9 @@ func (c *Client) addOperationEnableImageDeprecationMiddlewares(stack *middleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpEnableImageDeprecationValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableImageDeregistrationProtection.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableImageDeregistrationProtection.go index 0123c49aa..8e75e272b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableImageDeregistrationProtection.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableImageDeregistrationProtection.go @@ -129,6 +129,9 @@ func (c *Client) addOperationEnableImageDeregistrationProtectionMiddlewares(stac if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpEnableImageDeregistrationProtectionValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableIpamOrganizationAdminAccount.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableIpamOrganizationAdminAccount.go index 1bc1e7df3..1d2cd5a2b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableIpamOrganizationAdminAccount.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableIpamOrganizationAdminAccount.go @@ -121,6 +121,9 @@ func (c *Client) addOperationEnableIpamOrganizationAdminAccountMiddlewares(stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpEnableIpamOrganizationAdminAccountValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableReachabilityAnalyzerOrganizationSharing.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableReachabilityAnalyzerOrganizationSharing.go index 90c139d64..a4ae0990f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableReachabilityAnalyzerOrganizationSharing.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableReachabilityAnalyzerOrganizationSharing.go @@ -118,6 +118,9 @@ func (c *Client) addOperationEnableReachabilityAnalyzerOrganizationSharingMiddle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opEnableReachabilityAnalyzerOrganizationSharing(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableSerialConsoleAccess.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableSerialConsoleAccess.go index 47b3b2bc8..942421c62 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableSerialConsoleAccess.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableSerialConsoleAccess.go @@ -118,6 +118,9 @@ func (c *Client) addOperationEnableSerialConsoleAccessMiddlewares(stack *middlew if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opEnableSerialConsoleAccess(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableSnapshotBlockPublicAccess.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableSnapshotBlockPublicAccess.go index 8f99b1961..2333c0264 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableSnapshotBlockPublicAccess.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableSnapshotBlockPublicAccess.go @@ -150,6 +150,9 @@ func (c *Client) addOperationEnableSnapshotBlockPublicAccessMiddlewares(stack *m if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpEnableSnapshotBlockPublicAccessValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableTransitGatewayRouteTablePropagation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableTransitGatewayRouteTablePropagation.go index 1814aeded..aa3159320 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableTransitGatewayRouteTablePropagation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableTransitGatewayRouteTablePropagation.go @@ -125,6 +125,9 @@ func (c *Client) addOperationEnableTransitGatewayRouteTablePropagationMiddleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpEnableTransitGatewayRouteTablePropagationValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableVgwRoutePropagation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableVgwRoutePropagation.go index 4a2a82d5e..05d0131a3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableVgwRoutePropagation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableVgwRoutePropagation.go @@ -123,6 +123,9 @@ func (c *Client) addOperationEnableVgwRoutePropagationMiddlewares(stack *middlew if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpEnableVgwRoutePropagationValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableVolumeIO.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableVolumeIO.go index 31366afbe..10c65f795 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableVolumeIO.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableVolumeIO.go @@ -114,6 +114,9 @@ func (c *Client) addOperationEnableVolumeIOMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpEnableVolumeIOValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableVpcClassicLink.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableVpcClassicLink.go index ed1effeac..1bc2c50b4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableVpcClassicLink.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableVpcClassicLink.go @@ -124,6 +124,9 @@ func (c *Client) addOperationEnableVpcClassicLinkMiddlewares(stack *middleware.S if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpEnableVpcClassicLinkValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableVpcClassicLinkDnsSupport.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableVpcClassicLinkDnsSupport.go index b8a65d1d0..9f7f0856b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableVpcClassicLinkDnsSupport.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_EnableVpcClassicLinkDnsSupport.go @@ -117,6 +117,9 @@ func (c *Client) addOperationEnableVpcClassicLinkDnsSupportMiddlewares(stack *mi if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opEnableVpcClassicLinkDnsSupport(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ExportClientVpnClientCertificateRevocationList.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ExportClientVpnClientCertificateRevocationList.go index e2e2a937e..c1ee512d6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ExportClientVpnClientCertificateRevocationList.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ExportClientVpnClientCertificateRevocationList.go @@ -122,6 +122,9 @@ func (c *Client) addOperationExportClientVpnClientCertificateRevocationListMiddl if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpExportClientVpnClientCertificateRevocationListValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ExportClientVpnClientConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ExportClientVpnClientConfiguration.go index 503ba338c..2a4b2c6c6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ExportClientVpnClientConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ExportClientVpnClientConfiguration.go @@ -120,6 +120,9 @@ func (c *Client) addOperationExportClientVpnClientConfigurationMiddlewares(stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpExportClientVpnClientConfigurationValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ExportImage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ExportImage.go index 665788765..ffd9f2e32 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ExportImage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ExportImage.go @@ -175,6 +175,9 @@ func (c *Client) addOperationExportImageMiddlewares(stack *middleware.Stack, opt if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opExportImageMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ExportTransitGatewayRoutes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ExportTransitGatewayRoutes.go index 2109d3c46..4761de005 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ExportTransitGatewayRoutes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ExportTransitGatewayRoutes.go @@ -158,6 +158,9 @@ func (c *Client) addOperationExportTransitGatewayRoutesMiddlewares(stack *middle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpExportTransitGatewayRoutesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ExportVerifiedAccessInstanceClientConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ExportVerifiedAccessInstanceClientConfiguration.go index 3367bc623..4aafbe3c6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ExportVerifiedAccessInstanceClientConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ExportVerifiedAccessInstanceClientConfiguration.go @@ -133,6 +133,9 @@ func (c *Client) addOperationExportVerifiedAccessInstanceClientConfigurationMidd if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpExportVerifiedAccessInstanceClientConfigurationValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetAllowedImagesSettings.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetAllowedImagesSettings.go index fc37d2eb1..75a40caf2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetAllowedImagesSettings.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetAllowedImagesSettings.go @@ -146,6 +146,9 @@ func (c *Client) addOperationGetAllowedImagesSettingsMiddlewares(stack *middlewa if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetAllowedImagesSettings(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetAssociatedEnclaveCertificateIamRoles.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetAssociatedEnclaveCertificateIamRoles.go index fbb024158..6fbad3128 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetAssociatedEnclaveCertificateIamRoles.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetAssociatedEnclaveCertificateIamRoles.go @@ -123,6 +123,9 @@ func (c *Client) addOperationGetAssociatedEnclaveCertificateIamRolesMiddlewares( if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetAssociatedEnclaveCertificateIamRolesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetAssociatedIpv6PoolCidrs.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetAssociatedIpv6PoolCidrs.go index e93c22088..d43e72399 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetAssociatedIpv6PoolCidrs.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetAssociatedIpv6PoolCidrs.go @@ -130,6 +130,9 @@ func (c *Client) addOperationGetAssociatedIpv6PoolCidrsMiddlewares(stack *middle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetAssociatedIpv6PoolCidrsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetAwsNetworkPerformanceData.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetAwsNetworkPerformanceData.go index d094d98e5..9274dc314 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetAwsNetworkPerformanceData.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetAwsNetworkPerformanceData.go @@ -136,6 +136,9 @@ func (c *Client) addOperationGetAwsNetworkPerformanceDataMiddlewares(stack *midd if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetAwsNetworkPerformanceData(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetCapacityReservationUsage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetCapacityReservationUsage.go index cb8c4f757..69b2f8605 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetCapacityReservationUsage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetCapacityReservationUsage.go @@ -95,22 +95,28 @@ type GetCapacityReservationUsageOutput struct { // to request parameters that are not valid, capacity constraints, or instance // limit constraints. You can view a failed request for 60 minutes. // - // - scheduled - (Future-dated Capacity Reservations only) The future-dated - // Capacity Reservation request was approved and the Capacity Reservation is - // scheduled for delivery on the requested start date. + // - scheduled - (Future-dated Capacity Reservations) The future-dated Capacity + // Reservation request was approved and the Capacity Reservation is scheduled for + // delivery on the requested start date. // - // - assessing - (Future-dated Capacity Reservations only) Amazon EC2 is - // assessing your request for a future-dated Capacity Reservation. + // - payment-pending - (Capacity Blocks) The upfront payment has not been + // processed yet. // - // - delayed - (Future-dated Capacity Reservations only) Amazon EC2 encountered a + // - payment-failed - (Capacity Blocks) The upfront payment was not processed in + // the 12-hour time frame. Your Capacity Block was released. + // + // - assessing - (Future-dated Capacity Reservations) Amazon EC2 is assessing + // your request for a future-dated Capacity Reservation. + // + // - delayed - (Future-dated Capacity Reservations) Amazon EC2 encountered a // delay in provisioning the requested future-dated Capacity Reservation. Amazon // EC2 is unable to deliver the requested capacity by the requested start date and // time. // - // - unsupported - (Future-dated Capacity Reservations only) Amazon EC2 can't - // support the future-dated Capacity Reservation request due to capacity - // constraints. You can view unsupported requests for 30 days. The Capacity - // Reservation will not be delivered. + // - unsupported - (Future-dated Capacity Reservations) Amazon EC2 can't support + // the future-dated Capacity Reservation request due to capacity constraints. You + // can view unsupported requests for 30 days. The Capacity Reservation will not be + // delivered. State types.CapacityReservationState // The number of instances for which the Capacity Reservation reserves capacity. @@ -186,6 +192,9 @@ func (c *Client) addOperationGetCapacityReservationUsageMiddlewares(stack *middl if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetCapacityReservationUsageValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetCoipPoolUsage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetCoipPoolUsage.go index fcc348524..0d8d7a4c6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetCoipPoolUsage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetCoipPoolUsage.go @@ -148,6 +148,9 @@ func (c *Client) addOperationGetCoipPoolUsageMiddlewares(stack *middleware.Stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetCoipPoolUsageValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetConsoleOutput.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetConsoleOutput.go index 9cf47ba49..7f185b9bc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetConsoleOutput.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetConsoleOutput.go @@ -137,6 +137,9 @@ func (c *Client) addOperationGetConsoleOutputMiddlewares(stack *middleware.Stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetConsoleOutputValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetConsoleScreenshot.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetConsoleScreenshot.go index 2a4f842fa..e113f64d6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetConsoleScreenshot.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetConsoleScreenshot.go @@ -131,6 +131,9 @@ func (c *Client) addOperationGetConsoleScreenshotMiddlewares(stack *middleware.S if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetConsoleScreenshotValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetDeclarativePoliciesReportSummary.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetDeclarativePoliciesReportSummary.go index 1008ff84c..d4ce0b169 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetDeclarativePoliciesReportSummary.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetDeclarativePoliciesReportSummary.go @@ -162,6 +162,9 @@ func (c *Client) addOperationGetDeclarativePoliciesReportSummaryMiddlewares(stac if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetDeclarativePoliciesReportSummaryValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetDefaultCreditSpecification.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetDefaultCreditSpecification.go index be695ff72..10cc79343 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetDefaultCreditSpecification.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetDefaultCreditSpecification.go @@ -123,6 +123,9 @@ func (c *Client) addOperationGetDefaultCreditSpecificationMiddlewares(stack *mid if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetDefaultCreditSpecificationValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetEbsDefaultKmsKeyId.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetEbsDefaultKmsKeyId.go index 914cd940e..d5b131b6b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetEbsDefaultKmsKeyId.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetEbsDefaultKmsKeyId.go @@ -118,6 +118,9 @@ func (c *Client) addOperationGetEbsDefaultKmsKeyIdMiddlewares(stack *middleware. if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetEbsDefaultKmsKeyId(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetEbsEncryptionByDefault.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetEbsEncryptionByDefault.go index ebaad273d..2a0a14552 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetEbsEncryptionByDefault.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetEbsEncryptionByDefault.go @@ -121,6 +121,9 @@ func (c *Client) addOperationGetEbsEncryptionByDefaultMiddlewares(stack *middlew if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetEbsEncryptionByDefault(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetFlowLogsIntegrationTemplate.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetFlowLogsIntegrationTemplate.go index 64e42c138..bed8eadd9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetFlowLogsIntegrationTemplate.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetFlowLogsIntegrationTemplate.go @@ -145,6 +145,9 @@ func (c *Client) addOperationGetFlowLogsIntegrationTemplateMiddlewares(stack *mi if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetFlowLogsIntegrationTemplateValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetGroupsForCapacityReservation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetGroupsForCapacityReservation.go index 298f4f263..d7075fd85 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetGroupsForCapacityReservation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetGroupsForCapacityReservation.go @@ -135,6 +135,9 @@ func (c *Client) addOperationGetGroupsForCapacityReservationMiddlewares(stack *m if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetGroupsForCapacityReservationValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetHostReservationPurchasePreview.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetHostReservationPurchasePreview.go index f9a4b7d0c..f4bc61b4b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetHostReservationPurchasePreview.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetHostReservationPurchasePreview.go @@ -133,6 +133,9 @@ func (c *Client) addOperationGetHostReservationPurchasePreviewMiddlewares(stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetHostReservationPurchasePreviewValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetImageBlockPublicAccessState.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetImageBlockPublicAccessState.go index 1e4ba67c5..ed6bb30c3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetImageBlockPublicAccessState.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetImageBlockPublicAccessState.go @@ -135,6 +135,9 @@ func (c *Client) addOperationGetImageBlockPublicAccessStateMiddlewares(stack *mi if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetImageBlockPublicAccessState(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetInstanceMetadataDefaults.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetInstanceMetadataDefaults.go index ce5552c8c..44b922e06 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetInstanceMetadataDefaults.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetInstanceMetadataDefaults.go @@ -118,6 +118,9 @@ func (c *Client) addOperationGetInstanceMetadataDefaultsMiddlewares(stack *middl if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetInstanceMetadataDefaults(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetInstanceTpmEkPub.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetInstanceTpmEkPub.go index fc92fbf46..ee5c2cb6f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetInstanceTpmEkPub.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetInstanceTpmEkPub.go @@ -139,6 +139,9 @@ func (c *Client) addOperationGetInstanceTpmEkPubMiddlewares(stack *middleware.St if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetInstanceTpmEkPubValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetInstanceTypesFromInstanceRequirements.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetInstanceTypesFromInstanceRequirements.go index 6e6e630b2..f43a3095b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetInstanceTypesFromInstanceRequirements.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetInstanceTypesFromInstanceRequirements.go @@ -157,6 +157,9 @@ func (c *Client) addOperationGetInstanceTypesFromInstanceRequirementsMiddlewares if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetInstanceTypesFromInstanceRequirementsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetInstanceUefiData.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetInstanceUefiData.go index 2c21019fd..81726ac3b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetInstanceUefiData.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetInstanceUefiData.go @@ -134,6 +134,9 @@ func (c *Client) addOperationGetInstanceUefiDataMiddlewares(stack *middleware.St if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetInstanceUefiDataValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamAddressHistory.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamAddressHistory.go index 6c2b0f819..84b547046 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamAddressHistory.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamAddressHistory.go @@ -153,6 +153,9 @@ func (c *Client) addOperationGetIpamAddressHistoryMiddlewares(stack *middleware. if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetIpamAddressHistoryValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamDiscoveredAccounts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamDiscoveredAccounts.go index 2a8e90f64..0ca614048 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamDiscoveredAccounts.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamDiscoveredAccounts.go @@ -141,6 +141,9 @@ func (c *Client) addOperationGetIpamDiscoveredAccountsMiddlewares(stack *middlew if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetIpamDiscoveredAccountsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamDiscoveredPublicAddresses.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamDiscoveredPublicAddresses.go index 2ca35dcbf..d9195575b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamDiscoveredPublicAddresses.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamDiscoveredPublicAddresses.go @@ -141,6 +141,9 @@ func (c *Client) addOperationGetIpamDiscoveredPublicAddressesMiddlewares(stack * if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetIpamDiscoveredPublicAddressesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamDiscoveredResourceCidrs.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamDiscoveredResourceCidrs.go index dce50c28d..5fe6bc08f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamDiscoveredResourceCidrs.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamDiscoveredResourceCidrs.go @@ -141,6 +141,9 @@ func (c *Client) addOperationGetIpamDiscoveredResourceCidrsMiddlewares(stack *mi if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetIpamDiscoveredResourceCidrsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamPoolAllocations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamPoolAllocations.go index f7d517419..ef1d1f0ca 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamPoolAllocations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamPoolAllocations.go @@ -145,6 +145,9 @@ func (c *Client) addOperationGetIpamPoolAllocationsMiddlewares(stack *middleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetIpamPoolAllocationsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamPoolCidrs.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamPoolCidrs.go index 0932bc591..b0d8904f8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamPoolCidrs.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamPoolCidrs.go @@ -133,6 +133,9 @@ func (c *Client) addOperationGetIpamPoolCidrsMiddlewares(stack *middleware.Stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetIpamPoolCidrsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamResourceCidrs.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamResourceCidrs.go index 3b01fceaa..6fcebbdd7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamResourceCidrs.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetIpamResourceCidrs.go @@ -152,6 +152,9 @@ func (c *Client) addOperationGetIpamResourceCidrsMiddlewares(stack *middleware.S if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetIpamResourceCidrsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetLaunchTemplateData.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetLaunchTemplateData.go index b2a300e03..3cf2728f8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetLaunchTemplateData.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetLaunchTemplateData.go @@ -126,6 +126,9 @@ func (c *Client) addOperationGetLaunchTemplateDataMiddlewares(stack *middleware. if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetLaunchTemplateDataValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetManagedPrefixListAssociations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetManagedPrefixListAssociations.go index 372a4f212..2c72a29d8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetManagedPrefixListAssociations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetManagedPrefixListAssociations.go @@ -130,6 +130,9 @@ func (c *Client) addOperationGetManagedPrefixListAssociationsMiddlewares(stack * if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetManagedPrefixListAssociationsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetManagedPrefixListEntries.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetManagedPrefixListEntries.go index 6fb3a857e..b9e52a382 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetManagedPrefixListEntries.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetManagedPrefixListEntries.go @@ -133,6 +133,9 @@ func (c *Client) addOperationGetManagedPrefixListEntriesMiddlewares(stack *middl if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetManagedPrefixListEntriesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetNetworkInsightsAccessScopeAnalysisFindings.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetNetworkInsightsAccessScopeAnalysisFindings.go index 7477f407a..28a222611 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetNetworkInsightsAccessScopeAnalysisFindings.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetNetworkInsightsAccessScopeAnalysisFindings.go @@ -135,6 +135,9 @@ func (c *Client) addOperationGetNetworkInsightsAccessScopeAnalysisFindingsMiddle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetNetworkInsightsAccessScopeAnalysisFindingsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetNetworkInsightsAccessScopeContent.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetNetworkInsightsAccessScopeContent.go index 086e84360..c191bd80c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetNetworkInsightsAccessScopeContent.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetNetworkInsightsAccessScopeContent.go @@ -118,6 +118,9 @@ func (c *Client) addOperationGetNetworkInsightsAccessScopeContentMiddlewares(sta if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetNetworkInsightsAccessScopeContentValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetPasswordData.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetPasswordData.go index 605f30c1a..9941ceda8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetPasswordData.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetPasswordData.go @@ -147,6 +147,9 @@ func (c *Client) addOperationGetPasswordDataMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetPasswordDataValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetReservedInstancesExchangeQuote.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetReservedInstancesExchangeQuote.go index 691786315..f259cc0db 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetReservedInstancesExchangeQuote.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetReservedInstancesExchangeQuote.go @@ -152,6 +152,9 @@ func (c *Client) addOperationGetReservedInstancesExchangeQuoteMiddlewares(stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetReservedInstancesExchangeQuoteValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetSecurityGroupsForVpc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetSecurityGroupsForVpc.go index 3c23c1488..75d065c35 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetSecurityGroupsForVpc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetSecurityGroupsForVpc.go @@ -148,6 +148,9 @@ func (c *Client) addOperationGetSecurityGroupsForVpcMiddlewares(stack *middlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetSecurityGroupsForVpcValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetSerialConsoleAccessStatus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetSerialConsoleAccessStatus.go index 765a5a1ae..763d97618 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetSerialConsoleAccessStatus.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetSerialConsoleAccessStatus.go @@ -127,6 +127,9 @@ func (c *Client) addOperationGetSerialConsoleAccessStatusMiddlewares(stack *midd if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetSerialConsoleAccessStatus(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetSnapshotBlockPublicAccessState.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetSnapshotBlockPublicAccessState.go index bc8b02851..e30b96096 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetSnapshotBlockPublicAccessState.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetSnapshotBlockPublicAccessState.go @@ -138,6 +138,9 @@ func (c *Client) addOperationGetSnapshotBlockPublicAccessStateMiddlewares(stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetSnapshotBlockPublicAccessState(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetSpotPlacementScores.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetSpotPlacementScores.go index 089fb4862..053b67154 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetSpotPlacementScores.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetSpotPlacementScores.go @@ -190,6 +190,9 @@ func (c *Client) addOperationGetSpotPlacementScoresMiddlewares(stack *middleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetSpotPlacementScoresValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetSubnetCidrReservations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetSubnetCidrReservations.go index df5f70d75..3d9822ec7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetSubnetCidrReservations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetSubnetCidrReservations.go @@ -147,6 +147,9 @@ func (c *Client) addOperationGetSubnetCidrReservationsMiddlewares(stack *middlew if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetSubnetCidrReservationsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayAttachmentPropagations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayAttachmentPropagations.go index d77a84f69..96f7ca3cd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayAttachmentPropagations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayAttachmentPropagations.go @@ -135,6 +135,9 @@ func (c *Client) addOperationGetTransitGatewayAttachmentPropagationsMiddlewares( if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetTransitGatewayAttachmentPropagationsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayMulticastDomainAssociations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayMulticastDomainAssociations.go index da8a47d17..7ad4876cb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayMulticastDomainAssociations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayMulticastDomainAssociations.go @@ -144,6 +144,9 @@ func (c *Client) addOperationGetTransitGatewayMulticastDomainAssociationsMiddlew if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetTransitGatewayMulticastDomainAssociationsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayPolicyTableAssociations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayPolicyTableAssociations.go index c434418d2..17f7b8790 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayPolicyTableAssociations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayPolicyTableAssociations.go @@ -131,6 +131,9 @@ func (c *Client) addOperationGetTransitGatewayPolicyTableAssociationsMiddlewares if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetTransitGatewayPolicyTableAssociationsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayPolicyTableEntries.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayPolicyTableEntries.go index fd6fd8a39..ae1130e8c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayPolicyTableEntries.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayPolicyTableEntries.go @@ -128,6 +128,9 @@ func (c *Client) addOperationGetTransitGatewayPolicyTableEntriesMiddlewares(stac if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetTransitGatewayPolicyTableEntriesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayPrefixListReferences.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayPrefixListReferences.go index a210cf04f..cbd83aacf 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayPrefixListReferences.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayPrefixListReferences.go @@ -150,6 +150,9 @@ func (c *Client) addOperationGetTransitGatewayPrefixListReferencesMiddlewares(st if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetTransitGatewayPrefixListReferencesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayRouteTableAssociations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayRouteTableAssociations.go index 6c12d186d..d3fea7c8c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayRouteTableAssociations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayRouteTableAssociations.go @@ -140,6 +140,9 @@ func (c *Client) addOperationGetTransitGatewayRouteTableAssociationsMiddlewares( if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetTransitGatewayRouteTableAssociationsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayRouteTablePropagations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayRouteTablePropagations.go index 737cab399..f8cf786a3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayRouteTablePropagations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetTransitGatewayRouteTablePropagations.go @@ -140,6 +140,9 @@ func (c *Client) addOperationGetTransitGatewayRouteTablePropagationsMiddlewares( if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetTransitGatewayRouteTablePropagationsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVerifiedAccessEndpointPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVerifiedAccessEndpointPolicy.go index 4eba6ac15..fb46192f8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVerifiedAccessEndpointPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVerifiedAccessEndpointPolicy.go @@ -120,6 +120,9 @@ func (c *Client) addOperationGetVerifiedAccessEndpointPolicyMiddlewares(stack *m if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetVerifiedAccessEndpointPolicyValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVerifiedAccessEndpointTargets.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVerifiedAccessEndpointTargets.go index 28b3ee1f6..eabdea908 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVerifiedAccessEndpointTargets.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVerifiedAccessEndpointTargets.go @@ -129,6 +129,9 @@ func (c *Client) addOperationGetVerifiedAccessEndpointTargetsMiddlewares(stack * if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetVerifiedAccessEndpointTargetsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVerifiedAccessGroupPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVerifiedAccessGroupPolicy.go index efc63af8d..eb2bca1d5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVerifiedAccessGroupPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVerifiedAccessGroupPolicy.go @@ -120,6 +120,9 @@ func (c *Client) addOperationGetVerifiedAccessGroupPolicyMiddlewares(stack *midd if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetVerifiedAccessGroupPolicyValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVpnConnectionDeviceSampleConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVpnConnectionDeviceSampleConfiguration.go index 4c5dfcff9..f715998b1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVpnConnectionDeviceSampleConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVpnConnectionDeviceSampleConfiguration.go @@ -128,6 +128,9 @@ func (c *Client) addOperationGetVpnConnectionDeviceSampleConfigurationMiddleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetVpnConnectionDeviceSampleConfigurationValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVpnConnectionDeviceTypes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVpnConnectionDeviceTypes.go index 42e011d58..8212f0c21 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVpnConnectionDeviceTypes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVpnConnectionDeviceTypes.go @@ -141,6 +141,9 @@ func (c *Client) addOperationGetVpnConnectionDeviceTypesMiddlewares(stack *middl if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetVpnConnectionDeviceTypes(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVpnTunnelReplacementStatus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVpnTunnelReplacementStatus.go index 80ec3c869..a55e3d9b4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVpnTunnelReplacementStatus.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_GetVpnTunnelReplacementStatus.go @@ -138,6 +138,9 @@ func (c *Client) addOperationGetVpnTunnelReplacementStatusMiddlewares(stack *mid if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetVpnTunnelReplacementStatusValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportClientVpnClientCertificateRevocationList.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportClientVpnClientCertificateRevocationList.go index 007da9f71..f5ff8033d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportClientVpnClientCertificateRevocationList.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportClientVpnClientCertificateRevocationList.go @@ -131,6 +131,9 @@ func (c *Client) addOperationImportClientVpnClientCertificateRevocationListMiddl if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpImportClientVpnClientCertificateRevocationListValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportImage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportImage.go index 999dcbed7..92bb8b38a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportImage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportImage.go @@ -280,6 +280,9 @@ func (c *Client) addOperationImportImageMiddlewares(stack *middleware.Stack, opt if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opImportImage(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportInstance.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportInstance.go index 596353d40..2f95e9140 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportInstance.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportInstance.go @@ -141,6 +141,9 @@ func (c *Client) addOperationImportInstanceMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpImportInstanceValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportKeyPair.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportKeyPair.go index 36b15cb49..aa86e1013 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportKeyPair.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportKeyPair.go @@ -148,6 +148,9 @@ func (c *Client) addOperationImportKeyPairMiddlewares(stack *middleware.Stack, o if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpImportKeyPairValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportSnapshot.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportSnapshot.go index 5d06aed13..f2219bf88 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportSnapshot.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportSnapshot.go @@ -183,6 +183,9 @@ func (c *Client) addOperationImportSnapshotMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opImportSnapshot(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportVolume.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportVolume.go index bd688e6af..a079c4f95 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportVolume.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ImportVolume.go @@ -140,6 +140,9 @@ func (c *Client) addOperationImportVolumeMiddlewares(stack *middleware.Stack, op if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpImportVolumeValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ListImagesInRecycleBin.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ListImagesInRecycleBin.go index 521ccbc44..274d87e22 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ListImagesInRecycleBin.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ListImagesInRecycleBin.go @@ -135,6 +135,9 @@ func (c *Client) addOperationListImagesInRecycleBinMiddlewares(stack *middleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListImagesInRecycleBin(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ListSnapshotsInRecycleBin.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ListSnapshotsInRecycleBin.go index 71ad9989f..34fc82851 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ListSnapshotsInRecycleBin.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ListSnapshotsInRecycleBin.go @@ -132,6 +132,9 @@ func (c *Client) addOperationListSnapshotsInRecycleBinMiddlewares(stack *middlew if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListSnapshotsInRecycleBin(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_LockSnapshot.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_LockSnapshot.go index 8dad17949..682fe3e19 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_LockSnapshot.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_LockSnapshot.go @@ -236,6 +236,9 @@ func (c *Client) addOperationLockSnapshotMiddlewares(stack *middleware.Stack, op if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpLockSnapshotValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyAddressAttribute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyAddressAttribute.go index 45ceba406..84f8863e1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyAddressAttribute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyAddressAttribute.go @@ -124,6 +124,9 @@ func (c *Client) addOperationModifyAddressAttributeMiddlewares(stack *middleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyAddressAttributeValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyAvailabilityZoneGroup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyAvailabilityZoneGroup.go index dbb97a73b..afb33c524 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyAvailabilityZoneGroup.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyAvailabilityZoneGroup.go @@ -126,6 +126,9 @@ func (c *Client) addOperationModifyAvailabilityZoneGroupMiddlewares(stack *middl if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyAvailabilityZoneGroupValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyCapacityReservation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyCapacityReservation.go index 8a90bd9fb..b0087674b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyCapacityReservation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyCapacityReservation.go @@ -186,6 +186,9 @@ func (c *Client) addOperationModifyCapacityReservationMiddlewares(stack *middlew if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyCapacityReservationValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyCapacityReservationFleet.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyCapacityReservationFleet.go index e6214acd3..76bc192a8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyCapacityReservationFleet.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyCapacityReservationFleet.go @@ -152,6 +152,9 @@ func (c *Client) addOperationModifyCapacityReservationFleetMiddlewares(stack *mi if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyCapacityReservationFleetValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyClientVpnEndpoint.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyClientVpnEndpoint.go index d928a0ca6..edb57da49 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyClientVpnEndpoint.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyClientVpnEndpoint.go @@ -188,6 +188,9 @@ func (c *Client) addOperationModifyClientVpnEndpointMiddlewares(stack *middlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyClientVpnEndpointValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyDefaultCreditSpecification.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyDefaultCreditSpecification.go index 08f823783..a422898f6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyDefaultCreditSpecification.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyDefaultCreditSpecification.go @@ -140,6 +140,9 @@ func (c *Client) addOperationModifyDefaultCreditSpecificationMiddlewares(stack * if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyDefaultCreditSpecificationValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyEbsDefaultKmsKeyId.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyEbsDefaultKmsKeyId.go index 3fdf54c95..62ef2035d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyEbsDefaultKmsKeyId.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyEbsDefaultKmsKeyId.go @@ -152,6 +152,9 @@ func (c *Client) addOperationModifyEbsDefaultKmsKeyIdMiddlewares(stack *middlewa if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyEbsDefaultKmsKeyIdValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyFleet.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyFleet.go index c5d52869c..04e733958 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyFleet.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyFleet.go @@ -163,6 +163,9 @@ func (c *Client) addOperationModifyFleetMiddlewares(stack *middleware.Stack, opt if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyFleetValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyFpgaImageAttribute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyFpgaImageAttribute.go index de44a815d..b7b7d01d0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyFpgaImageAttribute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyFpgaImageAttribute.go @@ -145,6 +145,9 @@ func (c *Client) addOperationModifyFpgaImageAttributeMiddlewares(stack *middlewa if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyFpgaImageAttributeValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyHosts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyHosts.go index 06d20ca5d..db9e7e60a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyHosts.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyHosts.go @@ -158,6 +158,9 @@ func (c *Client) addOperationModifyHostsMiddlewares(stack *middleware.Stack, opt if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyHostsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIdFormat.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIdFormat.go index e21bc683e..671b64a91 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIdFormat.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIdFormat.go @@ -146,6 +146,9 @@ func (c *Client) addOperationModifyIdFormatMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyIdFormatValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIdentityIdFormat.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIdentityIdFormat.go index 3e2ee06bc..62072ad52 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIdentityIdFormat.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIdentityIdFormat.go @@ -152,6 +152,9 @@ func (c *Client) addOperationModifyIdentityIdFormatMiddlewares(stack *middleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyIdentityIdFormatValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyImageAttribute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyImageAttribute.go index c032ec984..3f92bf30c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyImageAttribute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyImageAttribute.go @@ -176,6 +176,9 @@ func (c *Client) addOperationModifyImageAttributeMiddlewares(stack *middleware.S if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyImageAttributeValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceAttribute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceAttribute.go index f57cfa1ad..82ef52664 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceAttribute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceAttribute.go @@ -228,6 +228,9 @@ func (c *Client) addOperationModifyInstanceAttributeMiddlewares(stack *middlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyInstanceAttributeValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceCapacityReservationAttributes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceCapacityReservationAttributes.go index 03b0dd7a1..58ab3247e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceCapacityReservationAttributes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceCapacityReservationAttributes.go @@ -126,6 +126,9 @@ func (c *Client) addOperationModifyInstanceCapacityReservationAttributesMiddlewa if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyInstanceCapacityReservationAttributesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceCpuOptions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceCpuOptions.go index bd73d7904..b5344013c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceCpuOptions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceCpuOptions.go @@ -147,6 +147,9 @@ func (c *Client) addOperationModifyInstanceCpuOptionsMiddlewares(stack *middlewa if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyInstanceCpuOptionsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceCreditSpecification.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceCreditSpecification.go index 454fdbea0..37d2ef4be 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceCreditSpecification.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceCreditSpecification.go @@ -134,6 +134,9 @@ func (c *Client) addOperationModifyInstanceCreditSpecificationMiddlewares(stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyInstanceCreditSpecificationValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceEventStartTime.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceEventStartTime.go index 2ee34b239..448cd409b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceEventStartTime.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceEventStartTime.go @@ -129,6 +129,9 @@ func (c *Client) addOperationModifyInstanceEventStartTimeMiddlewares(stack *midd if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyInstanceEventStartTimeValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceEventWindow.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceEventWindow.go index dd1c903a4..7bf3386ba 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceEventWindow.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceEventWindow.go @@ -160,6 +160,9 @@ func (c *Client) addOperationModifyInstanceEventWindowMiddlewares(stack *middlew if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyInstanceEventWindowValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceMaintenanceOptions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceMaintenanceOptions.go index 778d6de24..907c0cd1d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceMaintenanceOptions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceMaintenanceOptions.go @@ -130,6 +130,9 @@ func (c *Client) addOperationModifyInstanceMaintenanceOptionsMiddlewares(stack * if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyInstanceMaintenanceOptionsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceMetadataDefaults.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceMetadataDefaults.go index 9ed51b509..a1b3e5b75 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceMetadataDefaults.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceMetadataDefaults.go @@ -147,6 +147,9 @@ func (c *Client) addOperationModifyInstanceMetadataDefaultsMiddlewares(stack *mi if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opModifyInstanceMetadataDefaults(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceMetadataOptions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceMetadataOptions.go index e0ce9b349..476a09559 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceMetadataOptions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceMetadataOptions.go @@ -182,6 +182,9 @@ func (c *Client) addOperationModifyInstanceMetadataOptionsMiddlewares(stack *mid if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyInstanceMetadataOptionsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceNetworkPerformanceOptions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceNetworkPerformanceOptions.go index ac61ba9e7..91ae753ce 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceNetworkPerformanceOptions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstanceNetworkPerformanceOptions.go @@ -138,6 +138,9 @@ func (c *Client) addOperationModifyInstanceNetworkPerformanceOptionsMiddlewares( if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyInstanceNetworkPerformanceOptionsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstancePlacement.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstancePlacement.go index cdcd14918..54db51f36 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstancePlacement.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyInstancePlacement.go @@ -171,6 +171,9 @@ func (c *Client) addOperationModifyInstancePlacementMiddlewares(stack *middlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyInstancePlacementValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIpam.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIpam.go index e3eb789b2..b03b8f4cc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIpam.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIpam.go @@ -146,6 +146,9 @@ func (c *Client) addOperationModifyIpamMiddlewares(stack *middleware.Stack, opti if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyIpamValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIpamPool.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIpamPool.go index 24d0d0f3a..438d0e61a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIpamPool.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIpamPool.go @@ -167,6 +167,9 @@ func (c *Client) addOperationModifyIpamPoolMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyIpamPoolValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIpamResourceCidr.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIpamResourceCidr.go index f2294e062..87d45f4fb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIpamResourceCidr.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIpamResourceCidr.go @@ -152,6 +152,9 @@ func (c *Client) addOperationModifyIpamResourceCidrMiddlewares(stack *middleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyIpamResourceCidrValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIpamResourceDiscovery.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIpamResourceDiscovery.go index ebaaaccf5..d124fb84d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIpamResourceDiscovery.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIpamResourceDiscovery.go @@ -161,6 +161,9 @@ func (c *Client) addOperationModifyIpamResourceDiscoveryMiddlewares(stack *middl if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyIpamResourceDiscoveryValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIpamScope.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIpamScope.go index 7932f95d1..c47c242d7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIpamScope.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyIpamScope.go @@ -121,6 +121,9 @@ func (c *Client) addOperationModifyIpamScopeMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyIpamScopeValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyLaunchTemplate.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyLaunchTemplate.go index 32b37a4fd..aec93e538 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyLaunchTemplate.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyLaunchTemplate.go @@ -32,7 +32,10 @@ func (c *Client) ModifyLaunchTemplate(ctx context.Context, params *ModifyLaunchT type ModifyLaunchTemplateInput struct { // Unique, case-sensitive identifier you provide to ensure the idempotency of the - // request. For more information, see [Ensuring idempotency]. + // request. If a client token isn't specified, a randomly generated token is used + // in the request to ensure idempotency. + // + // For more information, see [Ensuring idempotency]. // // Constraint: Maximum 128 ASCII characters. // @@ -138,6 +141,12 @@ func (c *Client) addOperationModifyLaunchTemplateMiddlewares(stack *middleware.S if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addIdempotencyToken_opModifyLaunchTemplateMiddleware(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opModifyLaunchTemplate(options.Region), middleware.Before); err != nil { return err } @@ -171,6 +180,39 @@ func (c *Client) addOperationModifyLaunchTemplateMiddlewares(stack *middleware.S return nil } +type idempotencyToken_initializeOpModifyLaunchTemplate struct { + tokenProvider IdempotencyTokenProvider +} + +func (*idempotencyToken_initializeOpModifyLaunchTemplate) ID() string { + return "OperationIdempotencyTokenAutoFill" +} + +func (m *idempotencyToken_initializeOpModifyLaunchTemplate) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + if m.tokenProvider == nil { + return next.HandleInitialize(ctx, in) + } + + input, ok := in.Parameters.(*ModifyLaunchTemplateInput) + if !ok { + return out, metadata, fmt.Errorf("expected middleware input to be of type *ModifyLaunchTemplateInput ") + } + + if input.ClientToken == nil { + t, err := m.tokenProvider.GetIdempotencyToken() + if err != nil { + return out, metadata, err + } + input.ClientToken = &t + } + return next.HandleInitialize(ctx, in) +} +func addIdempotencyToken_opModifyLaunchTemplateMiddleware(stack *middleware.Stack, cfg Options) error { + return stack.Initialize.Add(&idempotencyToken_initializeOpModifyLaunchTemplate{tokenProvider: cfg.IdempotencyTokenProvider}, middleware.Before) +} + func newServiceMetadataMiddleware_opModifyLaunchTemplate(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyLocalGatewayRoute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyLocalGatewayRoute.go index 02f1fcb96..f4c0ea688 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyLocalGatewayRoute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyLocalGatewayRoute.go @@ -133,6 +133,9 @@ func (c *Client) addOperationModifyLocalGatewayRouteMiddlewares(stack *middlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyLocalGatewayRouteValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyManagedPrefixList.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyManagedPrefixList.go index 7b8c3c437..e732a83ae 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyManagedPrefixList.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyManagedPrefixList.go @@ -144,6 +144,9 @@ func (c *Client) addOperationModifyManagedPrefixListMiddlewares(stack *middlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyManagedPrefixListValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyNetworkInterfaceAttribute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyNetworkInterfaceAttribute.go index 781088d44..c5567e0dd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyNetworkInterfaceAttribute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyNetworkInterfaceAttribute.go @@ -165,6 +165,9 @@ func (c *Client) addOperationModifyNetworkInterfaceAttributeMiddlewares(stack *m if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyNetworkInterfaceAttributeValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyPrivateDnsNameOptions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyPrivateDnsNameOptions.go index 8a11ad101..db6750b86 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyPrivateDnsNameOptions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyPrivateDnsNameOptions.go @@ -132,6 +132,9 @@ func (c *Client) addOperationModifyPrivateDnsNameOptionsMiddlewares(stack *middl if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyPrivateDnsNameOptionsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyReservedInstances.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyReservedInstances.go index 31394a949..baa6b3a3c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyReservedInstances.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyReservedInstances.go @@ -132,6 +132,9 @@ func (c *Client) addOperationModifyReservedInstancesMiddlewares(stack *middlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyReservedInstancesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifySecurityGroupRules.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifySecurityGroupRules.go index 573062da6..6fdcf6bf4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifySecurityGroupRules.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifySecurityGroupRules.go @@ -123,6 +123,9 @@ func (c *Client) addOperationModifySecurityGroupRulesMiddlewares(stack *middlewa if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifySecurityGroupRulesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifySnapshotAttribute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifySnapshotAttribute.go index 2fb1b8720..7859686eb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifySnapshotAttribute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifySnapshotAttribute.go @@ -144,6 +144,9 @@ func (c *Client) addOperationModifySnapshotAttributeMiddlewares(stack *middlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifySnapshotAttributeValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifySnapshotTier.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifySnapshotTier.go index 0cfab02bb..f7a5638ab 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifySnapshotTier.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifySnapshotTier.go @@ -131,6 +131,9 @@ func (c *Client) addOperationModifySnapshotTierMiddlewares(stack *middleware.Sta if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifySnapshotTierValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifySpotFleetRequest.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifySpotFleetRequest.go index 4fde83d96..ade6b27e1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifySpotFleetRequest.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifySpotFleetRequest.go @@ -165,6 +165,9 @@ func (c *Client) addOperationModifySpotFleetRequestMiddlewares(stack *middleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifySpotFleetRequestValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifySubnetAttribute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifySubnetAttribute.go index 1a7fd685c..084623a55 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifySubnetAttribute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifySubnetAttribute.go @@ -193,6 +193,9 @@ func (c *Client) addOperationModifySubnetAttributeMiddlewares(stack *middleware. if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifySubnetAttributeValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTrafficMirrorFilterNetworkServices.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTrafficMirrorFilterNetworkServices.go index 2e0436a2c..0b0aff75a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTrafficMirrorFilterNetworkServices.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTrafficMirrorFilterNetworkServices.go @@ -131,6 +131,9 @@ func (c *Client) addOperationModifyTrafficMirrorFilterNetworkServicesMiddlewares if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyTrafficMirrorFilterNetworkServicesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTrafficMirrorFilterRule.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTrafficMirrorFilterRule.go index 038a761ee..d53823b59 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTrafficMirrorFilterRule.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTrafficMirrorFilterRule.go @@ -158,6 +158,9 @@ func (c *Client) addOperationModifyTrafficMirrorFilterRuleMiddlewares(stack *mid if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyTrafficMirrorFilterRuleValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTrafficMirrorSession.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTrafficMirrorSession.go index 9034a949d..42dc53e19 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTrafficMirrorSession.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTrafficMirrorSession.go @@ -155,6 +155,9 @@ func (c *Client) addOperationModifyTrafficMirrorSessionMiddlewares(stack *middle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyTrafficMirrorSessionValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTransitGateway.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTransitGateway.go index b8691ecc8..60e23b5cf 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTransitGateway.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTransitGateway.go @@ -126,6 +126,9 @@ func (c *Client) addOperationModifyTransitGatewayMiddlewares(stack *middleware.S if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyTransitGatewayValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTransitGatewayPrefixListReference.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTransitGatewayPrefixListReference.go index b7f3d88eb..9e4ea672b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTransitGatewayPrefixListReference.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTransitGatewayPrefixListReference.go @@ -130,6 +130,9 @@ func (c *Client) addOperationModifyTransitGatewayPrefixListReferenceMiddlewares( if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyTransitGatewayPrefixListReferenceValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTransitGatewayVpcAttachment.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTransitGatewayVpcAttachment.go index 3a53e67e2..47cb7f654 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTransitGatewayVpcAttachment.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyTransitGatewayVpcAttachment.go @@ -128,6 +128,9 @@ func (c *Client) addOperationModifyTransitGatewayVpcAttachmentMiddlewares(stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyTransitGatewayVpcAttachmentValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessEndpoint.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessEndpoint.go index 4cc8409fd..5e55777e4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessEndpoint.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessEndpoint.go @@ -144,6 +144,9 @@ func (c *Client) addOperationModifyVerifiedAccessEndpointMiddlewares(stack *midd if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opModifyVerifiedAccessEndpointMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessEndpointPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessEndpointPolicy.go index db9b02b71..28d75f54d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessEndpointPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessEndpointPolicy.go @@ -139,6 +139,9 @@ func (c *Client) addOperationModifyVerifiedAccessEndpointPolicyMiddlewares(stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opModifyVerifiedAccessEndpointPolicyMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessGroup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessGroup.go index 5f0aea23f..36eb30823 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessGroup.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessGroup.go @@ -130,6 +130,9 @@ func (c *Client) addOperationModifyVerifiedAccessGroupMiddlewares(stack *middlew if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opModifyVerifiedAccessGroupMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessGroupPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessGroupPolicy.go index fb4946df8..2b9352893 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessGroupPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessGroupPolicy.go @@ -139,6 +139,9 @@ func (c *Client) addOperationModifyVerifiedAccessGroupPolicyMiddlewares(stack *m if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opModifyVerifiedAccessGroupPolicyMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessInstance.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessInstance.go index dee0aec04..492d54e4e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessInstance.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessInstance.go @@ -131,6 +131,9 @@ func (c *Client) addOperationModifyVerifiedAccessInstanceMiddlewares(stack *midd if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opModifyVerifiedAccessInstanceMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessInstanceLoggingConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessInstanceLoggingConfiguration.go index f7b602cb1..ca23d8f2e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessInstanceLoggingConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessInstanceLoggingConfiguration.go @@ -130,6 +130,9 @@ func (c *Client) addOperationModifyVerifiedAccessInstanceLoggingConfigurationMid if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opModifyVerifiedAccessInstanceLoggingConfigurationMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessTrustProvider.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessTrustProvider.go index 097a1155e..5e8be30d6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessTrustProvider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVerifiedAccessTrustProvider.go @@ -141,6 +141,9 @@ func (c *Client) addOperationModifyVerifiedAccessTrustProviderMiddlewares(stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opModifyVerifiedAccessTrustProviderMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVolume.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVolume.go index ae361c532..ce905f1fa 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVolume.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVolume.go @@ -203,6 +203,9 @@ func (c *Client) addOperationModifyVolumeMiddlewares(stack *middleware.Stack, op if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyVolumeValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVolumeAttribute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVolumeAttribute.go index dc35ff31b..c15a08d12 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVolumeAttribute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVolumeAttribute.go @@ -127,6 +127,9 @@ func (c *Client) addOperationModifyVolumeAttributeMiddlewares(stack *middleware. if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyVolumeAttributeValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcAttribute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcAttribute.go index 30be990cc..d75e7d116 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcAttribute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcAttribute.go @@ -129,6 +129,9 @@ func (c *Client) addOperationModifyVpcAttributeMiddlewares(stack *middleware.Sta if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyVpcAttributeValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcBlockPublicAccessExclusion.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcBlockPublicAccessExclusion.go index cc2a26f74..b70c3f466 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcBlockPublicAccessExclusion.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcBlockPublicAccessExclusion.go @@ -135,6 +135,9 @@ func (c *Client) addOperationModifyVpcBlockPublicAccessExclusionMiddlewares(stac if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyVpcBlockPublicAccessExclusionValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcBlockPublicAccessOptions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcBlockPublicAccessOptions.go index 2202604bd..0bfd6b6f6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcBlockPublicAccessOptions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcBlockPublicAccessOptions.go @@ -136,6 +136,9 @@ func (c *Client) addOperationModifyVpcBlockPublicAccessOptionsMiddlewares(stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyVpcBlockPublicAccessOptionsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcEndpoint.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcEndpoint.go index 7d7d357e2..19f783008 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcEndpoint.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcEndpoint.go @@ -166,6 +166,9 @@ func (c *Client) addOperationModifyVpcEndpointMiddlewares(stack *middleware.Stac if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyVpcEndpointValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcEndpointConnectionNotification.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcEndpointConnectionNotification.go index 88442f49e..34ad32959 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcEndpointConnectionNotification.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcEndpointConnectionNotification.go @@ -126,6 +126,9 @@ func (c *Client) addOperationModifyVpcEndpointConnectionNotificationMiddlewares( if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyVpcEndpointConnectionNotificationValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcEndpointServiceConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcEndpointServiceConfiguration.go index addbaadd3..f47c1d0db 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcEndpointServiceConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcEndpointServiceConfiguration.go @@ -160,6 +160,9 @@ func (c *Client) addOperationModifyVpcEndpointServiceConfigurationMiddlewares(st if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyVpcEndpointServiceConfigurationValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcEndpointServicePayerResponsibility.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcEndpointServicePayerResponsibility.go index 05e587f4e..0623bbe71 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcEndpointServicePayerResponsibility.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcEndpointServicePayerResponsibility.go @@ -125,6 +125,9 @@ func (c *Client) addOperationModifyVpcEndpointServicePayerResponsibilityMiddlewa if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyVpcEndpointServicePayerResponsibilityValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcEndpointServicePermissions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcEndpointServicePermissions.go index 0b0ce31c8..5245f2c88 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcEndpointServicePermissions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcEndpointServicePermissions.go @@ -137,6 +137,9 @@ func (c *Client) addOperationModifyVpcEndpointServicePermissionsMiddlewares(stac if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyVpcEndpointServicePermissionsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcPeeringConnectionOptions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcPeeringConnectionOptions.go index 5902320cd..e90c06c55 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcPeeringConnectionOptions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcPeeringConnectionOptions.go @@ -140,6 +140,9 @@ func (c *Client) addOperationModifyVpcPeeringConnectionOptionsMiddlewares(stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyVpcPeeringConnectionOptionsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcTenancy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcTenancy.go index 3b1fbc61e..9ff055442 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcTenancy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpcTenancy.go @@ -133,6 +133,9 @@ func (c *Client) addOperationModifyVpcTenancyMiddlewares(stack *middleware.Stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyVpcTenancyValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpnConnection.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpnConnection.go index aaf966e14..eb47726b8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpnConnection.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpnConnection.go @@ -169,6 +169,9 @@ func (c *Client) addOperationModifyVpnConnectionMiddlewares(stack *middleware.St if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyVpnConnectionValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpnConnectionOptions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpnConnectionOptions.go index 0ea2982c6..492a64585 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpnConnectionOptions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpnConnectionOptions.go @@ -143,6 +143,9 @@ func (c *Client) addOperationModifyVpnConnectionOptionsMiddlewares(stack *middle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyVpnConnectionOptionsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpnTunnelCertificate.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpnTunnelCertificate.go index c1008219f..41fd1b700 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpnTunnelCertificate.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpnTunnelCertificate.go @@ -123,6 +123,9 @@ func (c *Client) addOperationModifyVpnTunnelCertificateMiddlewares(stack *middle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyVpnTunnelCertificateValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpnTunnelOptions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpnTunnelOptions.go index 43252ab3b..5659a47d1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpnTunnelOptions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ModifyVpnTunnelOptions.go @@ -139,6 +139,9 @@ func (c *Client) addOperationModifyVpnTunnelOptionsMiddlewares(stack *middleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpModifyVpnTunnelOptionsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_MonitorInstances.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_MonitorInstances.go index 3d8f12500..9c1fc3efc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_MonitorInstances.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_MonitorInstances.go @@ -124,6 +124,9 @@ func (c *Client) addOperationMonitorInstancesMiddlewares(stack *middleware.Stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpMonitorInstancesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_MoveAddressToVpc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_MoveAddressToVpc.go index 16ac01b06..d48870507 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_MoveAddressToVpc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_MoveAddressToVpc.go @@ -129,6 +129,9 @@ func (c *Client) addOperationMoveAddressToVpcMiddlewares(stack *middleware.Stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpMoveAddressToVpcValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_MoveByoipCidrToIpam.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_MoveByoipCidrToIpam.go index 8470843b7..9820d6159 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_MoveByoipCidrToIpam.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_MoveByoipCidrToIpam.go @@ -135,6 +135,9 @@ func (c *Client) addOperationMoveByoipCidrToIpamMiddlewares(stack *middleware.St if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpMoveByoipCidrToIpamValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_MoveCapacityReservationInstances.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_MoveCapacityReservationInstances.go index 1df53c5e9..831794af7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_MoveCapacityReservationInstances.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_MoveCapacityReservationInstances.go @@ -157,6 +157,9 @@ func (c *Client) addOperationMoveCapacityReservationInstancesMiddlewares(stack * if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opMoveCapacityReservationInstancesMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ProvisionByoipCidr.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ProvisionByoipCidr.go index 887d074e1..ebb73589d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ProvisionByoipCidr.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ProvisionByoipCidr.go @@ -179,6 +179,9 @@ func (c *Client) addOperationProvisionByoipCidrMiddlewares(stack *middleware.Sta if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpProvisionByoipCidrValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ProvisionIpamByoasn.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ProvisionIpamByoasn.go index 2f09367d3..e07342273 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ProvisionIpamByoasn.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ProvisionIpamByoasn.go @@ -133,6 +133,9 @@ func (c *Client) addOperationProvisionIpamByoasnMiddlewares(stack *middleware.St if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpProvisionIpamByoasnValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ProvisionIpamPoolCidr.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ProvisionIpamPoolCidr.go index 57748d591..85e722c61 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ProvisionIpamPoolCidr.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ProvisionIpamPoolCidr.go @@ -155,6 +155,9 @@ func (c *Client) addOperationProvisionIpamPoolCidrMiddlewares(stack *middleware. if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opProvisionIpamPoolCidrMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ProvisionPublicIpv4PoolCidr.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ProvisionPublicIpv4PoolCidr.go index b7d5144ab..03dac86a6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ProvisionPublicIpv4PoolCidr.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ProvisionPublicIpv4PoolCidr.go @@ -144,6 +144,9 @@ func (c *Client) addOperationProvisionPublicIpv4PoolCidrMiddlewares(stack *middl if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpProvisionPublicIpv4PoolCidrValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_PurchaseCapacityBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_PurchaseCapacityBlock.go index d8f12962e..a935ba93f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_PurchaseCapacityBlock.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_PurchaseCapacityBlock.go @@ -128,6 +128,9 @@ func (c *Client) addOperationPurchaseCapacityBlockMiddlewares(stack *middleware. if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPurchaseCapacityBlockValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_PurchaseCapacityBlockExtension.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_PurchaseCapacityBlockExtension.go index c907bf948..36a59a061 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_PurchaseCapacityBlockExtension.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_PurchaseCapacityBlockExtension.go @@ -124,6 +124,9 @@ func (c *Client) addOperationPurchaseCapacityBlockExtensionMiddlewares(stack *mi if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPurchaseCapacityBlockExtensionValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_PurchaseHostReservation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_PurchaseHostReservation.go index 1a2452eaa..eeb6c7320 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_PurchaseHostReservation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_PurchaseHostReservation.go @@ -157,6 +157,9 @@ func (c *Client) addOperationPurchaseHostReservationMiddlewares(stack *middlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPurchaseHostReservationValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_PurchaseReservedInstancesOffering.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_PurchaseReservedInstancesOffering.go index 09172efd8..08fef06a1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_PurchaseReservedInstancesOffering.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_PurchaseReservedInstancesOffering.go @@ -151,6 +151,9 @@ func (c *Client) addOperationPurchaseReservedInstancesOfferingMiddlewares(stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPurchaseReservedInstancesOfferingValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_PurchaseScheduledInstances.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_PurchaseScheduledInstances.go index be7d66659..0a7a12d9d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_PurchaseScheduledInstances.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_PurchaseScheduledInstances.go @@ -136,6 +136,9 @@ func (c *Client) addOperationPurchaseScheduledInstancesMiddlewares(stack *middle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opPurchaseScheduledInstancesMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RebootInstances.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RebootInstances.go index d18658f85..4535284b0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RebootInstances.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RebootInstances.go @@ -123,6 +123,9 @@ func (c *Client) addOperationRebootInstancesMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpRebootInstancesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RegisterImage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RegisterImage.go index 8b7a3a85c..d055e7e26 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RegisterImage.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RegisterImage.go @@ -297,6 +297,9 @@ func (c *Client) addOperationRegisterImageMiddlewares(stack *middleware.Stack, o if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpRegisterImageValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RegisterInstanceEventNotificationAttributes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RegisterInstanceEventNotificationAttributes.go index 016b965ad..8b75f8e1a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RegisterInstanceEventNotificationAttributes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RegisterInstanceEventNotificationAttributes.go @@ -123,6 +123,9 @@ func (c *Client) addOperationRegisterInstanceEventNotificationAttributesMiddlewa if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpRegisterInstanceEventNotificationAttributesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RegisterTransitGatewayMulticastGroupMembers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RegisterTransitGatewayMulticastGroupMembers.go index 022e3131e..e4023f878 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RegisterTransitGatewayMulticastGroupMembers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RegisterTransitGatewayMulticastGroupMembers.go @@ -136,6 +136,9 @@ func (c *Client) addOperationRegisterTransitGatewayMulticastGroupMembersMiddlewa if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpRegisterTransitGatewayMulticastGroupMembersValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RegisterTransitGatewayMulticastGroupSources.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RegisterTransitGatewayMulticastGroupSources.go index 96e321dd7..139c3f445 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RegisterTransitGatewayMulticastGroupSources.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RegisterTransitGatewayMulticastGroupSources.go @@ -138,6 +138,9 @@ func (c *Client) addOperationRegisterTransitGatewayMulticastGroupSourcesMiddlewa if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpRegisterTransitGatewayMulticastGroupSourcesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectCapacityReservationBillingOwnership.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectCapacityReservationBillingOwnership.go index 9ebddd84a..e81596144 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectCapacityReservationBillingOwnership.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectCapacityReservationBillingOwnership.go @@ -120,6 +120,9 @@ func (c *Client) addOperationRejectCapacityReservationBillingOwnershipMiddleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpRejectCapacityReservationBillingOwnershipValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectTransitGatewayMulticastDomainAssociations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectTransitGatewayMulticastDomainAssociations.go index 269db1d8d..fa6546e94 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectTransitGatewayMulticastDomainAssociations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectTransitGatewayMulticastDomainAssociations.go @@ -123,6 +123,9 @@ func (c *Client) addOperationRejectTransitGatewayMulticastDomainAssociationsMidd if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRejectTransitGatewayMulticastDomainAssociations(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectTransitGatewayPeeringAttachment.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectTransitGatewayPeeringAttachment.go index 3297afd92..6d1f1b8d4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectTransitGatewayPeeringAttachment.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectTransitGatewayPeeringAttachment.go @@ -118,6 +118,9 @@ func (c *Client) addOperationRejectTransitGatewayPeeringAttachmentMiddlewares(st if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpRejectTransitGatewayPeeringAttachmentValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectTransitGatewayVpcAttachment.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectTransitGatewayVpcAttachment.go index 4c310589e..7bbe41ef9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectTransitGatewayVpcAttachment.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectTransitGatewayVpcAttachment.go @@ -121,6 +121,9 @@ func (c *Client) addOperationRejectTransitGatewayVpcAttachmentMiddlewares(stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpRejectTransitGatewayVpcAttachmentValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectVpcEndpointConnections.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectVpcEndpointConnections.go index 7b1b853e5..c869dae06 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectVpcEndpointConnections.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectVpcEndpointConnections.go @@ -123,6 +123,9 @@ func (c *Client) addOperationRejectVpcEndpointConnectionsMiddlewares(stack *midd if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpRejectVpcEndpointConnectionsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectVpcPeeringConnection.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectVpcPeeringConnection.go index 01d13401c..16313c5ce 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectVpcPeeringConnection.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RejectVpcPeeringConnection.go @@ -120,6 +120,9 @@ func (c *Client) addOperationRejectVpcPeeringConnectionMiddlewares(stack *middle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpRejectVpcPeeringConnectionValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReleaseAddress.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReleaseAddress.go index 27563095f..5adfacaac 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReleaseAddress.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReleaseAddress.go @@ -138,6 +138,9 @@ func (c *Client) addOperationReleaseAddressMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opReleaseAddress(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReleaseHosts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReleaseHosts.go index 49a909db9..212b99180 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReleaseHosts.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReleaseHosts.go @@ -126,6 +126,9 @@ func (c *Client) addOperationReleaseHostsMiddlewares(stack *middleware.Stack, op if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpReleaseHostsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReleaseIpamPoolAllocation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReleaseIpamPoolAllocation.go index ad2602d55..a063b06a9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReleaseIpamPoolAllocation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReleaseIpamPoolAllocation.go @@ -138,6 +138,9 @@ func (c *Client) addOperationReleaseIpamPoolAllocationMiddlewares(stack *middlew if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpReleaseIpamPoolAllocationValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceIamInstanceProfileAssociation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceIamInstanceProfileAssociation.go index bc1e38a11..c7e6be166 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceIamInstanceProfileAssociation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceIamInstanceProfileAssociation.go @@ -121,6 +121,9 @@ func (c *Client) addOperationReplaceIamInstanceProfileAssociationMiddlewares(sta if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpReplaceIamInstanceProfileAssociationValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceImageCriteriaInAllowedImagesSettings.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceImageCriteriaInAllowedImagesSettings.go index 1ce40eae6..e3eb32c73 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceImageCriteriaInAllowedImagesSettings.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceImageCriteriaInAllowedImagesSettings.go @@ -126,6 +126,9 @@ func (c *Client) addOperationReplaceImageCriteriaInAllowedImagesSettingsMiddlewa if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opReplaceImageCriteriaInAllowedImagesSettings(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceNetworkAclAssociation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceNetworkAclAssociation.go index 8fe1c17cb..5b34fd45a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceNetworkAclAssociation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceNetworkAclAssociation.go @@ -129,6 +129,9 @@ func (c *Client) addOperationReplaceNetworkAclAssociationMiddlewares(stack *midd if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpReplaceNetworkAclAssociationValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceNetworkAclEntry.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceNetworkAclEntry.go index 44bd60d0d..0d1d256b4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceNetworkAclEntry.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceNetworkAclEntry.go @@ -161,6 +161,9 @@ func (c *Client) addOperationReplaceNetworkAclEntryMiddlewares(stack *middleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpReplaceNetworkAclEntryValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceRoute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceRoute.go index b7d123c4d..84db6836c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceRoute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceRoute.go @@ -168,6 +168,9 @@ func (c *Client) addOperationReplaceRouteMiddlewares(stack *middleware.Stack, op if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpReplaceRouteValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceRouteTableAssociation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceRouteTableAssociation.go index 91e971108..fb79fc094 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceRouteTableAssociation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceRouteTableAssociation.go @@ -135,6 +135,9 @@ func (c *Client) addOperationReplaceRouteTableAssociationMiddlewares(stack *midd if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpReplaceRouteTableAssociationValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceTransitGatewayRoute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceTransitGatewayRoute.go index e3566d0bb..06c369180 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceTransitGatewayRoute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceTransitGatewayRoute.go @@ -130,6 +130,9 @@ func (c *Client) addOperationReplaceTransitGatewayRouteMiddlewares(stack *middle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpReplaceTransitGatewayRouteValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceVpnTunnel.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceVpnTunnel.go index ff3f5a54f..84bed6dc2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceVpnTunnel.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReplaceVpnTunnel.go @@ -125,6 +125,9 @@ func (c *Client) addOperationReplaceVpnTunnelMiddlewares(stack *middleware.Stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpReplaceVpnTunnelValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReportInstanceStatus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReportInstanceStatus.go index e6b5217ec..2172db87f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReportInstanceStatus.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ReportInstanceStatus.go @@ -162,6 +162,9 @@ func (c *Client) addOperationReportInstanceStatusMiddlewares(stack *middleware.S if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpReportInstanceStatusValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RequestSpotFleet.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RequestSpotFleet.go index 5c8c6bc43..5db4d2505 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RequestSpotFleet.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RequestSpotFleet.go @@ -150,6 +150,9 @@ func (c *Client) addOperationRequestSpotFleetMiddlewares(stack *middleware.Stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpRequestSpotFleetValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RequestSpotInstances.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RequestSpotInstances.go index 25a5b77c0..f0abba7ab 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RequestSpotInstances.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RequestSpotInstances.go @@ -218,6 +218,9 @@ func (c *Client) addOperationRequestSpotInstancesMiddlewares(stack *middleware.S if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpRequestSpotInstancesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetAddressAttribute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetAddressAttribute.go index 2bdc5ad58..97839a172 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetAddressAttribute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetAddressAttribute.go @@ -125,6 +125,9 @@ func (c *Client) addOperationResetAddressAttributeMiddlewares(stack *middleware. if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpResetAddressAttributeValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetEbsDefaultKmsKeyId.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetEbsDefaultKmsKeyId.go index 9b59eed95..70613ad59 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetEbsDefaultKmsKeyId.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetEbsDefaultKmsKeyId.go @@ -120,6 +120,9 @@ func (c *Client) addOperationResetEbsDefaultKmsKeyIdMiddlewares(stack *middlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opResetEbsDefaultKmsKeyId(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetFpgaImageAttribute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetFpgaImageAttribute.go index e73df8b6b..cce50b258 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetFpgaImageAttribute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetFpgaImageAttribute.go @@ -122,6 +122,9 @@ func (c *Client) addOperationResetFpgaImageAttributeMiddlewares(stack *middlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpResetFpgaImageAttributeValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetImageAttribute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetImageAttribute.go index 854d2f2eb..4782499b2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetImageAttribute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetImageAttribute.go @@ -121,6 +121,9 @@ func (c *Client) addOperationResetImageAttributeMiddlewares(stack *middleware.St if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpResetImageAttributeValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetInstanceAttribute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetInstanceAttribute.go index e7cc49752..dfbae0e39 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetInstanceAttribute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetInstanceAttribute.go @@ -130,6 +130,9 @@ func (c *Client) addOperationResetInstanceAttributeMiddlewares(stack *middleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpResetInstanceAttributeValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetNetworkInterfaceAttribute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetNetworkInterfaceAttribute.go index e06b29088..741d64102 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetNetworkInterfaceAttribute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetNetworkInterfaceAttribute.go @@ -118,6 +118,9 @@ func (c *Client) addOperationResetNetworkInterfaceAttributeMiddlewares(stack *mi if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpResetNetworkInterfaceAttributeValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetSnapshotAttribute.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetSnapshotAttribute.go index 77cd61a67..b44d4b725 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetSnapshotAttribute.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_ResetSnapshotAttribute.go @@ -125,6 +125,9 @@ func (c *Client) addOperationResetSnapshotAttributeMiddlewares(stack *middleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpResetSnapshotAttributeValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RestoreAddressToClassic.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RestoreAddressToClassic.go index 31fb29564..afdf13c84 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RestoreAddressToClassic.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RestoreAddressToClassic.go @@ -126,6 +126,9 @@ func (c *Client) addOperationRestoreAddressToClassicMiddlewares(stack *middlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpRestoreAddressToClassicValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RestoreImageFromRecycleBin.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RestoreImageFromRecycleBin.go index ffac1708f..1e80d93f9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RestoreImageFromRecycleBin.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RestoreImageFromRecycleBin.go @@ -120,6 +120,9 @@ func (c *Client) addOperationRestoreImageFromRecycleBinMiddlewares(stack *middle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpRestoreImageFromRecycleBinValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RestoreManagedPrefixListVersion.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RestoreManagedPrefixListVersion.go index b997d7d99..e712cecd9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RestoreManagedPrefixListVersion.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RestoreManagedPrefixListVersion.go @@ -129,6 +129,9 @@ func (c *Client) addOperationRestoreManagedPrefixListVersionMiddlewares(stack *m if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpRestoreManagedPrefixListVersionValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RestoreSnapshotFromRecycleBin.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RestoreSnapshotFromRecycleBin.go index 5e69c70cb..5e5e0e28c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RestoreSnapshotFromRecycleBin.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RestoreSnapshotFromRecycleBin.go @@ -155,6 +155,9 @@ func (c *Client) addOperationRestoreSnapshotFromRecycleBinMiddlewares(stack *mid if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpRestoreSnapshotFromRecycleBinValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RestoreSnapshotTier.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RestoreSnapshotTier.go index 59e35b5e8..159304187 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RestoreSnapshotTier.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RestoreSnapshotTier.go @@ -149,6 +149,9 @@ func (c *Client) addOperationRestoreSnapshotTierMiddlewares(stack *middleware.St if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpRestoreSnapshotTierValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RevokeClientVpnIngress.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RevokeClientVpnIngress.go index 24084e3fa..7a287609a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RevokeClientVpnIngress.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RevokeClientVpnIngress.go @@ -134,6 +134,9 @@ func (c *Client) addOperationRevokeClientVpnIngressMiddlewares(stack *middleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpRevokeClientVpnIngressValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RevokeSecurityGroupEgress.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RevokeSecurityGroupEgress.go index 49237b56a..4b95eae30 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RevokeSecurityGroupEgress.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RevokeSecurityGroupEgress.go @@ -173,6 +173,9 @@ func (c *Client) addOperationRevokeSecurityGroupEgressMiddlewares(stack *middlew if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpRevokeSecurityGroupEgressValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RevokeSecurityGroupIngress.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RevokeSecurityGroupIngress.go index 66b7d3331..0d29a58a2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RevokeSecurityGroupIngress.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RevokeSecurityGroupIngress.go @@ -187,6 +187,9 @@ func (c *Client) addOperationRevokeSecurityGroupIngressMiddlewares(stack *middle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRevokeSecurityGroupIngress(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RunInstances.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RunInstances.go index 49373b9c6..fb3d480ae 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RunInstances.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RunInstances.go @@ -482,6 +482,9 @@ func (c *Client) addOperationRunInstancesMiddlewares(stack *middleware.Stack, op if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opRunInstancesMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RunScheduledInstances.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RunScheduledInstances.go index ab74d6e74..115a5ea02 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RunScheduledInstances.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_RunScheduledInstances.go @@ -145,6 +145,9 @@ func (c *Client) addOperationRunScheduledInstancesMiddlewares(stack *middleware. if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opRunScheduledInstancesMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_SearchLocalGatewayRoutes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_SearchLocalGatewayRoutes.go index da0f507ef..ce524019c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_SearchLocalGatewayRoutes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_SearchLocalGatewayRoutes.go @@ -151,6 +151,9 @@ func (c *Client) addOperationSearchLocalGatewayRoutesMiddlewares(stack *middlewa if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpSearchLocalGatewayRoutesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_SearchTransitGatewayMulticastGroups.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_SearchTransitGatewayMulticastGroups.go index 2c2cb2a07..bbb9cdef5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_SearchTransitGatewayMulticastGroups.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_SearchTransitGatewayMulticastGroups.go @@ -154,6 +154,9 @@ func (c *Client) addOperationSearchTransitGatewayMulticastGroupsMiddlewares(stac if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpSearchTransitGatewayMulticastGroupsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_SearchTransitGatewayRoutes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_SearchTransitGatewayRoutes.go index cfd4d5056..d74b9d0eb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_SearchTransitGatewayRoutes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_SearchTransitGatewayRoutes.go @@ -157,6 +157,9 @@ func (c *Client) addOperationSearchTransitGatewayRoutesMiddlewares(stack *middle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpSearchTransitGatewayRoutesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_SendDiagnosticInterrupt.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_SendDiagnosticInterrupt.go index 2e29fba8a..564dea3bd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_SendDiagnosticInterrupt.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_SendDiagnosticInterrupt.go @@ -130,6 +130,9 @@ func (c *Client) addOperationSendDiagnosticInterruptMiddlewares(stack *middlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpSendDiagnosticInterruptValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StartDeclarativePoliciesReport.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StartDeclarativePoliciesReport.go index f2ccf8541..e2bc45b3d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StartDeclarativePoliciesReport.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StartDeclarativePoliciesReport.go @@ -177,6 +177,9 @@ func (c *Client) addOperationStartDeclarativePoliciesReportMiddlewares(stack *mi if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpStartDeclarativePoliciesReportValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StartInstances.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StartInstances.go index 7d142fd9c..c1b55560c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StartInstances.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StartInstances.go @@ -144,6 +144,9 @@ func (c *Client) addOperationStartInstancesMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpStartInstancesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StartNetworkInsightsAccessScopeAnalysis.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StartNetworkInsightsAccessScopeAnalysis.go index 68ab5c788..dab841f97 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StartNetworkInsightsAccessScopeAnalysis.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StartNetworkInsightsAccessScopeAnalysis.go @@ -129,6 +129,9 @@ func (c *Client) addOperationStartNetworkInsightsAccessScopeAnalysisMiddlewares( if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opStartNetworkInsightsAccessScopeAnalysisMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StartNetworkInsightsAnalysis.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StartNetworkInsightsAnalysis.go index e50afe837..12dbd1c79 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StartNetworkInsightsAnalysis.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StartNetworkInsightsAnalysis.go @@ -136,6 +136,9 @@ func (c *Client) addOperationStartNetworkInsightsAnalysisMiddlewares(stack *midd if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opStartNetworkInsightsAnalysisMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StartVpcEndpointServicePrivateDnsVerification.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StartVpcEndpointServicePrivateDnsVerification.go index 98b52746c..8ee0dff8b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StartVpcEndpointServicePrivateDnsVerification.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StartVpcEndpointServicePrivateDnsVerification.go @@ -124,6 +124,9 @@ func (c *Client) addOperationStartVpcEndpointServicePrivateDnsVerificationMiddle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpStartVpcEndpointServicePrivateDnsVerificationValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StopInstances.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StopInstances.go index ebb45289f..e698d6167 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StopInstances.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_StopInstances.go @@ -178,6 +178,9 @@ func (c *Client) addOperationStopInstancesMiddlewares(stack *middleware.Stack, o if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpStopInstancesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_TerminateClientVpnConnections.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_TerminateClientVpnConnections.go index a1aeaab6c..02c9fc6c7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_TerminateClientVpnConnections.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_TerminateClientVpnConnections.go @@ -134,6 +134,9 @@ func (c *Client) addOperationTerminateClientVpnConnectionsMiddlewares(stack *mid if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpTerminateClientVpnConnectionsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_TerminateInstances.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_TerminateInstances.go index f0a4e0e12..92386215f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_TerminateInstances.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_TerminateInstances.go @@ -175,6 +175,9 @@ func (c *Client) addOperationTerminateInstancesMiddlewares(stack *middleware.Sta if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpTerminateInstancesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UnassignIpv6Addresses.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UnassignIpv6Addresses.go index 7eced4229..9391b4ea7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UnassignIpv6Addresses.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UnassignIpv6Addresses.go @@ -124,6 +124,9 @@ func (c *Client) addOperationUnassignIpv6AddressesMiddlewares(stack *middleware. if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpUnassignIpv6AddressesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UnassignPrivateIpAddresses.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UnassignPrivateIpAddresses.go index a3066b551..5e0947232 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UnassignPrivateIpAddresses.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UnassignPrivateIpAddresses.go @@ -116,6 +116,9 @@ func (c *Client) addOperationUnassignPrivateIpAddressesMiddlewares(stack *middle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpUnassignPrivateIpAddressesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UnassignPrivateNatGatewayAddress.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UnassignPrivateNatGatewayAddress.go index 7db7b0852..bda920aef 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UnassignPrivateNatGatewayAddress.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UnassignPrivateNatGatewayAddress.go @@ -144,6 +144,9 @@ func (c *Client) addOperationUnassignPrivateNatGatewayAddressMiddlewares(stack * if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpUnassignPrivateNatGatewayAddressValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UnlockSnapshot.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UnlockSnapshot.go index 026673c28..f012a89ca 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UnlockSnapshot.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UnlockSnapshot.go @@ -119,6 +119,9 @@ func (c *Client) addOperationUnlockSnapshotMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpUnlockSnapshotValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UnmonitorInstances.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UnmonitorInstances.go index 339312272..35e27ca55 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UnmonitorInstances.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UnmonitorInstances.go @@ -121,6 +121,9 @@ func (c *Client) addOperationUnmonitorInstancesMiddlewares(stack *middleware.Sta if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpUnmonitorInstancesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UpdateSecurityGroupRuleDescriptionsEgress.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UpdateSecurityGroupRuleDescriptionsEgress.go index 4b61a4afa..690480f66 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UpdateSecurityGroupRuleDescriptionsEgress.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UpdateSecurityGroupRuleDescriptionsEgress.go @@ -133,6 +133,9 @@ func (c *Client) addOperationUpdateSecurityGroupRuleDescriptionsEgressMiddleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateSecurityGroupRuleDescriptionsEgress(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UpdateSecurityGroupRuleDescriptionsIngress.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UpdateSecurityGroupRuleDescriptionsIngress.go index f3562fe37..f2ee39e81 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UpdateSecurityGroupRuleDescriptionsIngress.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_UpdateSecurityGroupRuleDescriptionsIngress.go @@ -134,6 +134,9 @@ func (c *Client) addOperationUpdateSecurityGroupRuleDescriptionsIngressMiddlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateSecurityGroupRuleDescriptionsIngress(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_WithdrawByoipCidr.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_WithdrawByoipCidr.go index 5e33b7857..abb6d6536 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_WithdrawByoipCidr.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/api_op_WithdrawByoipCidr.go @@ -124,6 +124,9 @@ func (c *Client) addOperationWithdrawByoipCidrMiddlewares(stack *middleware.Stac if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpWithdrawByoipCidrValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/deserializers.go index 20fac8640..a9ec26df7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/deserializers.go @@ -20769,12 +20769,35 @@ func (m *awsEc2query_deserializeOpDeregisterImage) HandleDeserialize(ctx context output := &DeregisterImageOutput{} out.Result = output - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), } } + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsEc2query_deserializeOpDocumentDeregisterImageOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + return out, metadata, err } @@ -62862,6 +62885,19 @@ func awsEc2query_deserializeDocumentAddress(v **types.Address, decoder smithyxml sv.PublicIpv4Pool = ptr.String(xtv) } + case strings.EqualFold("serviceManaged", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ServiceManaged = types.ServiceManaged(xtv) + } + case strings.EqualFold("tagSet", t.Name.Local): nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) if err := awsEc2query_deserializeDocumentTagList(&sv.Tags, nodeDecoder); err != nil { @@ -65833,6 +65869,19 @@ func awsEc2query_deserializeDocumentAvailabilityZone(v **types.AvailabilityZone, originalDecoder := decoder decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) switch { + case strings.EqualFold("groupLongName", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.GroupLongName = ptr.String(xtv) + } + case strings.EqualFold("groupName", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -66636,6 +66685,155 @@ func awsEc2query_deserializeDocumentBlockDeviceMappingListUnwrapped(v *[]types.B *v = sv return nil } +func awsEc2query_deserializeDocumentBlockDeviceMappingResponse(v **types.BlockDeviceMappingResponse, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.BlockDeviceMappingResponse + if *v == nil { + sv = &types.BlockDeviceMappingResponse{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("deviceName", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.DeviceName = ptr.String(xtv) + } + + case strings.EqualFold("ebs", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentEbsBlockDeviceResponse(&sv.Ebs, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("noDevice", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.NoDevice = ptr.String(xtv) + } + + case strings.EqualFold("virtualName", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.VirtualName = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsEc2query_deserializeDocumentBlockDeviceMappingResponseList(v *[]types.BlockDeviceMappingResponse, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.BlockDeviceMappingResponse + if *v == nil { + sv = make([]types.BlockDeviceMappingResponse, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("item", t.Name.Local): + var col types.BlockDeviceMappingResponse + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsEc2query_deserializeDocumentBlockDeviceMappingResponse(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsEc2query_deserializeDocumentBlockDeviceMappingResponseListUnwrapped(v *[]types.BlockDeviceMappingResponse, decoder smithyxml.NodeDecoder) error { + var sv []types.BlockDeviceMappingResponse + if *v == nil { + sv = make([]types.BlockDeviceMappingResponse, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.BlockDeviceMappingResponse + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsEc2query_deserializeDocumentBlockDeviceMappingResponse(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} func awsEc2query_deserializeDocumentBlockPublicAccessStates(v **types.BlockPublicAccessStates, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -78739,6 +78937,164 @@ func awsEc2query_deserializeDocumentEbsBlockDevice(v **types.EbsBlockDevice, dec return nil } +func awsEc2query_deserializeDocumentEbsBlockDeviceResponse(v **types.EbsBlockDeviceResponse, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.EbsBlockDeviceResponse + if *v == nil { + sv = &types.EbsBlockDeviceResponse{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("deleteOnTermination", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", val) + } + sv.DeleteOnTermination = ptr.Bool(xtv) + } + + case strings.EqualFold("encrypted", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected Boolean to be of type *bool, got %T instead", val) + } + sv.Encrypted = ptr.Bool(xtv) + } + + case strings.EqualFold("iops", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.Iops = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("kmsKeyId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.KmsKeyId = ptr.String(xtv) + } + + case strings.EqualFold("snapshotId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SnapshotId = ptr.String(xtv) + } + + case strings.EqualFold("throughput", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.Throughput = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("volumeSize", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.VolumeSize = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("volumeType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.VolumeType = types.VolumeType(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsEc2query_deserializeDocumentEbsInfo(v **types.EbsInfo, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -83847,6 +84203,12 @@ func awsEc2query_deserializeDocumentFleetLaunchTemplateOverrides(v **types.Fleet sv.AvailabilityZone = ptr.String(xtv) } + case strings.EqualFold("blockDeviceMappingSet", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentBlockDeviceMappingResponseList(&sv.BlockDeviceMappings, nodeDecoder); err != nil { + return err + } + case strings.EqualFold("imageId", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -148692,6 +149054,12 @@ func awsEc2query_deserializeDocumentVpc(v **types.Vpc, decoder smithyxml.NodeDec sv.DhcpOptionsId = ptr.String(xtv) } + case strings.EqualFold("encryptionControl", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentVpcEncryptionControl(&sv.EncryptionControl, nodeDecoder); err != nil { + return err + } + case strings.EqualFold("instanceTenancy", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -149623,6 +149991,247 @@ func awsEc2query_deserializeDocumentVpcClassicLinkListUnwrapped(v *[]types.VpcCl *v = sv return nil } +func awsEc2query_deserializeDocumentVpcEncryptionControl(v **types.VpcEncryptionControl, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.VpcEncryptionControl + if *v == nil { + sv = &types.VpcEncryptionControl{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("mode", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Mode = types.VpcEncryptionControlMode(xtv) + } + + case strings.EqualFold("resourceExclusions", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentVpcEncryptionControlExclusions(&sv.ResourceExclusions, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("state", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.State = types.VpcEncryptionControlState(xtv) + } + + case strings.EqualFold("stateMessage", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.StateMessage = ptr.String(xtv) + } + + case strings.EqualFold("tagSet", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentTagList(&sv.Tags, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("vpcEncryptionControlId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.VpcEncryptionControlId = ptr.String(xtv) + } + + case strings.EqualFold("vpcId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.VpcId = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsEc2query_deserializeDocumentVpcEncryptionControlExclusion(v **types.VpcEncryptionControlExclusion, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.VpcEncryptionControlExclusion + if *v == nil { + sv = &types.VpcEncryptionControlExclusion{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("state", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.State = types.VpcEncryptionControlExclusionState(xtv) + } + + case strings.EqualFold("stateMessage", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.StateMessage = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsEc2query_deserializeDocumentVpcEncryptionControlExclusions(v **types.VpcEncryptionControlExclusions, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.VpcEncryptionControlExclusions + if *v == nil { + sv = &types.VpcEncryptionControlExclusions{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("egressOnlyInternetGateway", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentVpcEncryptionControlExclusion(&sv.EgressOnlyInternetGateway, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("internetGateway", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentVpcEncryptionControlExclusion(&sv.InternetGateway, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("natGateway", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentVpcEncryptionControlExclusion(&sv.NatGateway, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("virtualPrivateGateway", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentVpcEncryptionControlExclusion(&sv.VirtualPrivateGateway, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("vpcPeering", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsEc2query_deserializeDocumentVpcEncryptionControlExclusion(&sv.VpcPeering, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsEc2query_deserializeDocumentVpcEndpoint(v **types.VpcEndpoint, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -162346,6 +162955,42 @@ func awsEc2query_deserializeOpDocumentDeprovisionPublicIpv4PoolCidrOutput(v **De return nil } +func awsEc2query_deserializeOpDocumentDeregisterImageOutput(v **DeregisterImageOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *DeregisterImageOutput + if *v == nil { + sv = &DeregisterImageOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsEc2query_deserializeOpDocumentDeregisterInstanceEventNotificationAttributesOutput(v **DeregisterInstanceEventNotificationAttributesOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/generated.json index a10edff0b..8d95a4e82 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/generated.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/generated.json @@ -682,6 +682,7 @@ "protocol_test.go", "serializers.go", "snapshot_test.go", + "sra_operation_order_test.go", "types/enums.go", "types/types.go", "validators.go" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/go_module_metadata.go index 487a50ae4..c9bca4dcd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/go_module_metadata.go @@ -3,4 +3,4 @@ package ec2 // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.203.1" +const goModuleVersion = "1.210.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/serializers.go index 8aa724869..d053fc4c4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/serializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/serializers.go @@ -48306,6 +48306,97 @@ func awsEc2query_serializeDocumentFilterList(v []types.Filter, value query.Value return nil } +func awsEc2query_serializeDocumentFleetBlockDeviceMappingRequest(v *types.FleetBlockDeviceMappingRequest, value query.Value) error { + object := value.Object() + _ = object + + if v.DeviceName != nil { + objectKey := object.Key("DeviceName") + objectKey.String(*v.DeviceName) + } + + if v.Ebs != nil { + objectKey := object.Key("Ebs") + if err := awsEc2query_serializeDocumentFleetEbsBlockDeviceRequest(v.Ebs, objectKey); err != nil { + return err + } + } + + if v.NoDevice != nil { + objectKey := object.Key("NoDevice") + objectKey.String(*v.NoDevice) + } + + if v.VirtualName != nil { + objectKey := object.Key("VirtualName") + objectKey.String(*v.VirtualName) + } + + return nil +} + +func awsEc2query_serializeDocumentFleetBlockDeviceMappingRequestList(v []types.FleetBlockDeviceMappingRequest, value query.Value) error { + if len(v) == 0 { + return nil + } + array := value.Array("BlockDeviceMapping") + + for i := range v { + av := array.Value() + if err := awsEc2query_serializeDocumentFleetBlockDeviceMappingRequest(&v[i], av); err != nil { + return err + } + } + return nil +} + +func awsEc2query_serializeDocumentFleetEbsBlockDeviceRequest(v *types.FleetEbsBlockDeviceRequest, value query.Value) error { + object := value.Object() + _ = object + + if v.DeleteOnTermination != nil { + objectKey := object.Key("DeleteOnTermination") + objectKey.Boolean(*v.DeleteOnTermination) + } + + if v.Encrypted != nil { + objectKey := object.Key("Encrypted") + objectKey.Boolean(*v.Encrypted) + } + + if v.Iops != nil { + objectKey := object.Key("Iops") + objectKey.Integer(*v.Iops) + } + + if v.KmsKeyId != nil { + objectKey := object.Key("KmsKeyId") + objectKey.String(*v.KmsKeyId) + } + + if v.SnapshotId != nil { + objectKey := object.Key("SnapshotId") + objectKey.String(*v.SnapshotId) + } + + if v.Throughput != nil { + objectKey := object.Key("Throughput") + objectKey.Integer(*v.Throughput) + } + + if v.VolumeSize != nil { + objectKey := object.Key("VolumeSize") + objectKey.Integer(*v.VolumeSize) + } + + if len(v.VolumeType) > 0 { + objectKey := object.Key("VolumeType") + objectKey.String(string(v.VolumeType)) + } + + return nil +} + func awsEc2query_serializeDocumentFleetIdSet(v []string, value query.Value) error { if len(v) == 0 { return nil @@ -48379,6 +48470,13 @@ func awsEc2query_serializeDocumentFleetLaunchTemplateOverridesRequest(v *types.F objectKey.String(*v.AvailabilityZone) } + if v.BlockDeviceMappings != nil { + objectKey := object.FlatKey("BlockDeviceMapping") + if err := awsEc2query_serializeDocumentFleetBlockDeviceMappingRequestList(v.BlockDeviceMappings, objectKey); err != nil { + return err + } + } + if v.ImageId != nil { objectKey := object.Key("ImageId") objectKey.String(*v.ImageId) @@ -58203,6 +58301,11 @@ func awsEc2query_serializeOpDocumentCopyImageInput(v *CopyImageInput, value quer objectKey.String(*v.Name) } + if v.SnapshotCopyCompletionDurationMinutes != nil { + objectKey := object.Key("SnapshotCopyCompletionDurationMinutes") + objectKey.Long(*v.SnapshotCopyCompletionDurationMinutes) + } + if v.SourceImageId != nil { objectKey := object.Key("SourceImageId") objectKey.String(*v.SourceImageId) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/types/enums.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/types/enums.go index 90c212720..78e31dafd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/types/enums.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/types/enums.go @@ -7875,6 +7875,25 @@ func (ServiceConnectivityType) Values() []ServiceConnectivityType { } } +type ServiceManaged string + +// Enum values for ServiceManaged +const ( + ServiceManagedAlb ServiceManaged = "alb" + ServiceManagedNlb ServiceManaged = "nlb" +) + +// Values returns all known values for ServiceManaged. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ServiceManaged) Values() []ServiceManaged { + return []ServiceManaged{ + "alb", + "nlb", + } +} + type ServiceState string // Enum values for ServiceState @@ -9690,6 +9709,78 @@ func (VpcCidrBlockStateCode) Values() []VpcCidrBlockStateCode { } } +type VpcEncryptionControlExclusionState string + +// Enum values for VpcEncryptionControlExclusionState +const ( + VpcEncryptionControlExclusionStateEnabling VpcEncryptionControlExclusionState = "enabling" + VpcEncryptionControlExclusionStateEnabled VpcEncryptionControlExclusionState = "enabled" + VpcEncryptionControlExclusionStateDisabling VpcEncryptionControlExclusionState = "disabling" + VpcEncryptionControlExclusionStateDisabled VpcEncryptionControlExclusionState = "disabled" +) + +// Values returns all known values for VpcEncryptionControlExclusionState. Note +// that this can be expanded in the future, and so it is only as up to date as the +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (VpcEncryptionControlExclusionState) Values() []VpcEncryptionControlExclusionState { + return []VpcEncryptionControlExclusionState{ + "enabling", + "enabled", + "disabling", + "disabled", + } +} + +type VpcEncryptionControlMode string + +// Enum values for VpcEncryptionControlMode +const ( + VpcEncryptionControlModeMonitor VpcEncryptionControlMode = "monitor" + VpcEncryptionControlModeEnforce VpcEncryptionControlMode = "enforce" +) + +// Values returns all known values for VpcEncryptionControlMode. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (VpcEncryptionControlMode) Values() []VpcEncryptionControlMode { + return []VpcEncryptionControlMode{ + "monitor", + "enforce", + } +} + +type VpcEncryptionControlState string + +// Enum values for VpcEncryptionControlState +const ( + VpcEncryptionControlStateEnforceInProgress VpcEncryptionControlState = "enforce-in-progress" + VpcEncryptionControlStateMonitorInProgress VpcEncryptionControlState = "monitor-in-progress" + VpcEncryptionControlStateEnforceFailed VpcEncryptionControlState = "enforce-failed" + VpcEncryptionControlStateMonitorFailed VpcEncryptionControlState = "monitor-failed" + VpcEncryptionControlStateDeleting VpcEncryptionControlState = "deleting" + VpcEncryptionControlStateDeleted VpcEncryptionControlState = "deleted" + VpcEncryptionControlStateAvailable VpcEncryptionControlState = "available" +) + +// Values returns all known values for VpcEncryptionControlState. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (VpcEncryptionControlState) Values() []VpcEncryptionControlState { + return []VpcEncryptionControlState{ + "enforce-in-progress", + "monitor-in-progress", + "enforce-failed", + "monitor-failed", + "deleting", + "deleted", + "available", + } +} + type VpcEndpointType string // Enum values for VpcEndpointType diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/types/types.go index d9cfe4646..521b28341 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/types/types.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ec2/types/types.go @@ -324,6 +324,11 @@ type Address struct { // The ID of an address pool. PublicIpv4Pool *string + // The service that manages the elastic IP address. + // + // The only option supported today is alb . + ServiceManaged ServiceManaged + // Any tags assigned to the Elastic IP address. Tags []Tag @@ -814,6 +819,10 @@ type AuthorizationRule struct { // Describes Availability Zones, Local Zones, and Wavelength Zones. type AvailabilityZone struct { + // The long name of the Availability Zone group, Local Zone group, or Wavelength + // Zone group. + GroupLongName *string + // The name of the zone group. For example: // // - Availability Zones - us-east-1-zg-1 @@ -833,7 +842,7 @@ type AvailabilityZone struct { // opt-in-not-required . // // For Local Zones and Wavelength Zones, this parameter is the opt-in status. The - // possible values are opted-in , and not-opted-in . + // possible values are opted-in and not-opted-in . OptInStatus AvailabilityZoneOptInStatus // The ID of the zone that handles some of the Local Zone or Wavelength Zone @@ -847,8 +856,8 @@ type AvailabilityZone struct { // The name of the Region. RegionName *string - // The state of the Availability Zone, Local Zone, or Wavelength Zone. This value - // is always available . + // The state of the Availability Zone, Local Zone, or Wavelength Zone. The + // possible values are available , unavailable , and constrained . State AvailabilityZoneState // The ID of the Availability Zone, Local Zone, or Wavelength Zone. @@ -857,8 +866,9 @@ type AvailabilityZone struct { // The name of the Availability Zone, Local Zone, or Wavelength Zone. ZoneName *string - // The type of zone. The valid values are availability-zone , local-zone , and - // wavelength-zone . + // The type of zone. + // + // Valid values: availability-zone | local-zone | wavelength-zone ZoneType *string noSmithyDocumentSerde @@ -998,6 +1008,26 @@ type BlockDeviceMapping struct { noSmithyDocumentSerde } +// Describes a block device mapping, which defines the EBS volumes and instance +// store volumes to attach to an instance at launch. +type BlockDeviceMappingResponse struct { + + // The device name (for example, /dev/sdh or xvdh ). + DeviceName *string + + // Parameters used to automatically set up EBS volumes when the instance is + // launched. + Ebs *EbsBlockDeviceResponse + + // Suppresses the specified device included in the block device mapping. + NoDevice *string + + // The virtual device name. + VirtualName *string + + noSmithyDocumentSerde +} + // The state of VPC Block Public Access (BPA). type BlockPublicAccessStates struct { @@ -1503,22 +1533,28 @@ type CapacityReservation struct { // to request parameters that are not valid, capacity constraints, or instance // limit constraints. You can view a failed request for 60 minutes. // - // - scheduled - (Future-dated Capacity Reservations only) The future-dated - // Capacity Reservation request was approved and the Capacity Reservation is - // scheduled for delivery on the requested start date. + // - scheduled - (Future-dated Capacity Reservations) The future-dated Capacity + // Reservation request was approved and the Capacity Reservation is scheduled for + // delivery on the requested start date. + // + // - payment-pending - (Capacity Blocks) The upfront payment has not been + // processed yet. // - // - assessing - (Future-dated Capacity Reservations only) Amazon EC2 is - // assessing your request for a future-dated Capacity Reservation. + // - payment-failed - (Capacity Blocks) The upfront payment was not processed in + // the 12-hour time frame. Your Capacity Block was released. // - // - delayed - (Future-dated Capacity Reservations only) Amazon EC2 encountered a + // - assessing - (Future-dated Capacity Reservations) Amazon EC2 is assessing + // your request for a future-dated Capacity Reservation. + // + // - delayed - (Future-dated Capacity Reservations) Amazon EC2 encountered a // delay in provisioning the requested future-dated Capacity Reservation. Amazon // EC2 is unable to deliver the requested capacity by the requested start date and // time. // - // - unsupported - (Future-dated Capacity Reservations only) Amazon EC2 can't - // support the future-dated Capacity Reservation request due to capacity - // constraints. You can view unsupported requests for 30 days. The Capacity - // Reservation will not be delivered. + // - unsupported - (Future-dated Capacity Reservations) Amazon EC2 can't support + // the future-dated Capacity Reservation request due to capacity constraints. You + // can view unsupported requests for 30 days. The Capacity Reservation will not be + // delivered. State CapacityReservationState // Any tags assigned to the Capacity Reservation. @@ -2922,7 +2958,7 @@ type CreateVerifiedAccessEndpointLoadBalancerOptions struct { // The IP protocol. Protocol VerifiedAccessEndpointProtocol - // The IDs of the subnets. + // The IDs of the subnets. You can specify only one subnet per Availability Zone. SubnetIds []string noSmithyDocumentSerde @@ -2961,7 +2997,7 @@ type CreateVerifiedAccessEndpointRdsOptions struct { // The RDS endpoint. RdsEndpoint *string - // The IDs of the subnets. + // The IDs of the subnets. You can specify only one subnet per Availability Zone. SubnetIds []string noSmithyDocumentSerde @@ -3925,6 +3961,42 @@ type EbsBlockDevice struct { noSmithyDocumentSerde } +// Describes a block device for an EBS volume. +type EbsBlockDeviceResponse struct { + + // Indicates whether the volume is deleted on instance termination. + DeleteOnTermination *bool + + // Indicates whether the volume is encrypted. + Encrypted *bool + + // The number of I/O operations per second (IOPS). For gp3 , io1 , and io2 + // volumes, this represents the number of IOPS that are provisioned for the volume. + // For gp2 volumes, this represents the baseline performance of the volume and the + // rate at which the volume accumulates I/O credits for bursting. + Iops *int32 + + // Identifier (key ID, key alias, key ARN, or alias ARN) of the customer managed + // KMS key to use for EBS encryption. + KmsKeyId *string + + // The ID of the snapshot. + SnapshotId *string + + // The throughput that the volume supports, in MiB/s. + Throughput *int32 + + // The size of the volume, in GiBs. + VolumeSize *int32 + + // The volume type. For more information, see [Amazon EBS volume types] in the Amazon EBS User Guide. + // + // [Amazon EBS volume types]: https://docs.aws.amazon.com/ebs/latest/userguide/ebs-volume-types.html + VolumeType VolumeType + + noSmithyDocumentSerde +} + // Describes the Amazon EBS features supported by the instance type. type EbsInfo struct { @@ -4206,16 +4278,12 @@ type ElasticGpuSpecification struct { // Deprecated. // -// Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads -// that require graphics acceleration, we recommend that you use Amazon EC2 G4ad, -// G4dn, or G5 instances. +// Amazon Elastic Graphics reached end of life on January 8, 2024. type ElasticGpuSpecificationResponse struct { // Deprecated. // - // Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads - // that require graphics acceleration, we recommend that you use Amazon EC2 G4ad, - // G4dn, or G5 instances. + // Amazon Elastic Graphics reached end of life on January 8, 2024. Type *string noSmithyDocumentSerde @@ -5044,6 +5112,54 @@ type FirewallStatelessRule struct { noSmithyDocumentSerde } +// Describes a block device mapping, which defines the EBS volumes and instance +// store volumes to attach to an instance at launch. +// +// To override a block device mapping specified in the launch template: +// +// - Specify the exact same DeviceName here as specified in the launch template. +// +// - Only specify the parameters you want to change. +// +// - Any parameters you don't specify here will keep their original launch +// template values. +// +// To add a new block device mapping: +// +// - Specify a DeviceName that doesn't exist in the launch template. +// +// - Specify all desired parameters here. +type FleetBlockDeviceMappingRequest struct { + + // The device name (for example, /dev/sdh or xvdh ). + DeviceName *string + + // Parameters used to automatically set up EBS volumes when the instance is + // launched. + Ebs *FleetEbsBlockDeviceRequest + + // To omit the device from the block device mapping, specify an empty string. When + // this property is specified, the device is removed from the block device mapping + // regardless of the assigned value. + NoDevice *string + + // The virtual device name ( ephemeralN ). Instance store volumes are numbered + // starting from 0. An instance type with 2 available instance store volumes can + // specify mappings for ephemeral0 and ephemeral1 . The number of available + // instance store volumes depends on the instance type. After you connect to the + // instance, you must mount the volume. + // + // NVMe instance store volumes are automatically enumerated and assigned a device + // name. Including them in your block device mapping has no effect. + // + // Constraints: For M3 instances, you must specify instance store volumes in the + // block device mapping for the instance. When you launch an M3 instance, we ignore + // any instance store volumes specified in the block device mapping for the AMI. + VirtualName *string + + noSmithyDocumentSerde +} + // Information about a Capacity Reservation in a Capacity Reservation Fleet. type FleetCapacityReservation struct { @@ -5201,6 +5317,123 @@ type FleetData struct { noSmithyDocumentSerde } +// Describes a block device for an EBS volume. +type FleetEbsBlockDeviceRequest struct { + + // Indicates whether the EBS volume is deleted on instance termination. For more + // information, see [Preserve data when an instance is terminated]in the Amazon EC2 User Guide. + // + // [Preserve data when an instance is terminated]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/preserving-volumes-on-termination.html + DeleteOnTermination *bool + + // Indicates whether the encryption state of an EBS volume is changed while being + // restored from a backing snapshot. The effect of setting the encryption state to + // true depends on the volume origin (new or from a snapshot), starting encryption + // state, ownership, and whether encryption by default is enabled. For more + // information, see [Amazon EBS encryption]in the Amazon EBS User Guide. + // + // In no case can you remove encryption from an encrypted volume. + // + // Encrypted volumes can only be attached to instances that support Amazon EBS + // encryption. For more information, see [Supported instance types]. + // + // This parameter is not returned by . + // + // For and , whether you can include this parameter, and the allowed values differ + // depending on the type of block device mapping you are creating. + // + // - If you are creating a block device mapping for a new (empty) volume, you + // can include this parameter, and specify either true for an encrypted volume, + // or false for an unencrypted volume. If you omit this parameter, it defaults to + // false (unencrypted). + // + // - If you are creating a block device mapping from an existing encrypted or + // unencrypted snapshot, you must omit this parameter. If you include this + // parameter, the request will fail, regardless of the value that you specify. + // + // - If you are creating a block device mapping from an existing unencrypted + // volume, you can include this parameter, but you must specify false . If you + // specify true , the request will fail. In this case, we recommend that you omit + // the parameter. + // + // - If you are creating a block device mapping from an existing encrypted + // volume, you can include this parameter, and specify either true or false . + // However, if you specify false , the parameter is ignored and the block device + // mapping is always encrypted. In this case, we recommend that you omit the + // parameter. + // + // [Amazon EBS encryption]: https://docs.aws.amazon.com/ebs/latest/userguide/ebs-encryption.html + // [Supported instance types]: https://docs.aws.amazon.com/ebs/latest/userguide/ebs-encryption-requirements.html#ebs-encryption_supported_instances + Encrypted *bool + + // The number of I/O operations per second (IOPS). For gp3 , io1 , and io2 + // volumes, this represents the number of IOPS that are provisioned for the volume. + // For gp2 volumes, this represents the baseline performance of the volume and the + // rate at which the volume accumulates I/O credits for bursting. + // + // The following are the supported values for each volume type: + // + // - gp3 : 3,000 - 16,000 IOPS + // + // - io1 : 100 - 64,000 IOPS + // + // - io2 : 100 - 256,000 IOPS + // + // For io2 volumes, you can achieve up to 256,000 IOPS on [instances built on the Nitro System]. On other instances, + // you can achieve performance up to 32,000 IOPS. + // + // This parameter is required for io1 and io2 volumes. The default for gp3 volumes + // is 3,000 IOPS. + // + // [instances built on the Nitro System]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances + Iops *int32 + + // Identifier (key ID, key alias, key ARN, or alias ARN) of the customer managed + // KMS key to use for EBS encryption. + // + // This parameter is only supported on BlockDeviceMapping objects called by [CreateFleet], [RequestSpotInstances], + // and [RunInstances]. + // + // [CreateFleet]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet.html + // [RequestSpotInstances]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RequestSpotInstances.html + // [RunInstances]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html + KmsKeyId *string + + // The ID of the snapshot. + SnapshotId *string + + // The throughput that the volume supports, in MiB/s. + // + // This parameter is valid only for gp3 volumes. + // + // Valid Range: Minimum value of 125. Maximum value of 1000. + Throughput *int32 + + // The size of the volume, in GiBs. You must specify either a snapshot ID or a + // volume size. If you specify a snapshot, the default is the snapshot size. You + // can specify a volume size that is equal to or larger than the snapshot size. + // + // The following are the supported sizes for each volume type: + // + // - gp2 and gp3 : 1 - 16,384 GiB + // + // - io1 : 4 - 16,384 GiB + // + // - io2 : 4 - 65,536 GiB + // + // - st1 and sc1 : 125 - 16,384 GiB + // + // - standard : 1 - 1024 GiB + VolumeSize *int32 + + // The volume type. For more information, see [Amazon EBS volume types] in the Amazon EBS User Guide. + // + // [Amazon EBS volume types]: https://docs.aws.amazon.com/ebs/latest/userguide/ebs-volume-types.html + VolumeType VolumeType + + noSmithyDocumentSerde +} + // Describes a launch template and overrides. type FleetLaunchTemplateConfig struct { @@ -5237,6 +5470,16 @@ type FleetLaunchTemplateOverrides struct { // The Availability Zone in which to launch the instances. AvailabilityZone *string + // The block device mappings, which define the EBS volumes and instance store + // volumes to attach to the instance at launch. + // + // Supported only for fleets of type instant . + // + // For more information, see [Block device mappings for volumes on Amazon EC2 instances] in the Amazon EC2 User Guide. + // + // [Block device mappings for volumes on Amazon EC2 instances]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html + BlockDeviceMappings []BlockDeviceMappingResponse + // The ID of the AMI in the format ami-17characters00000 . // // Alternatively, you can specify a Systems Manager parameter, using one of the @@ -5291,6 +5534,9 @@ type FleetLaunchTemplateOverrides struct { // // If you specify a maximum price, your instances will be interrupted more // frequently than if you do not specify this parameter. + // + // If you specify a maximum price, it must be more than USD $0.001. Specifying a + // value below USD $0.001 will result in an InvalidParameterValue error message. MaxPrice *string // The location where the instance launched, if applicable. @@ -5339,6 +5585,16 @@ type FleetLaunchTemplateOverridesRequest struct { // The Availability Zone in which to launch the instances. AvailabilityZone *string + // The block device mappings, which define the EBS volumes and instance store + // volumes to attach to the instance at launch. + // + // Supported only for fleets of type instant . + // + // For more information, see [Block device mappings for volumes on Amazon EC2 instances] in the Amazon EC2 User Guide. + // + // [Block device mappings for volumes on Amazon EC2 instances]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html + BlockDeviceMappings []FleetBlockDeviceMappingRequest + // The ID of the AMI in the format ami-17characters00000 . // // Alternatively, you can specify a Systems Manager parameter, using one of the @@ -5393,6 +5649,9 @@ type FleetLaunchTemplateOverridesRequest struct { // // If you specify a maximum price, your instances will be interrupted more // frequently than if you do not specify this parameter. + // + // If you specify a maximum price, it must be more than USD $0.001. Specifying a + // value below USD $0.001 will result in an InvalidParameterValue error message. MaxPrice *string // The location where the instance launched, if applicable. @@ -7213,6 +7472,10 @@ type InstanceEventWindowAssociationRequest struct { // The instance tags to associate with the event window. Any instances associated // with the tags will be associated with the event window. + // + // Note that while you can't create tag keys beginning with aws: , you can specify + // existing Amazon Web Services managed tag keys (with the aws: prefix) when + // specifying them as targets to associate with the event window. InstanceTags []Tag noSmithyDocumentSerde @@ -7229,6 +7492,10 @@ type InstanceEventWindowAssociationTarget struct { // The instance tags associated with the event window. Any instances associated // with the tags will be associated with the event window. + // + // Note that while you can't create tag keys beginning with aws: , you can specify + // existing Amazon Web Services managed tag keys (with the aws: prefix) when + // specifying them as targets to associate with the event window. Tags []Tag noSmithyDocumentSerde @@ -8015,8 +8282,6 @@ type InstanceRequirements struct { // // - For instance types with GPU accelerators, specify gpu . // - // - For instance types with Inference accelerators, specify inference . - // // Default: Any accelerator type AcceleratorTypes []AcceleratorType @@ -8386,8 +8651,6 @@ type InstanceRequirementsRequest struct { // // - For instance types with GPU accelerators, specify gpu . // - // - For instance types with Inference accelerators, specify inference . - // // Default: Any accelerator type AcceleratorTypes []AcceleratorType @@ -10562,9 +10825,9 @@ type LaunchTemplateConfig struct { type LaunchTemplateCpuOptions struct { // Indicates whether the instance is enabled for AMD SEV-SNP. For more - // information, see [AMD SEV-SNP]. + // information, see [AMD SEV-SNP for Amazon EC2 instances]. // - // [AMD SEV-SNP]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/sev-snp.html + // [AMD SEV-SNP for Amazon EC2 instances]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/sev-snp.html AmdSevSnp AmdSevSnpSpecification // The number of CPU cores for the instance. @@ -10581,10 +10844,10 @@ type LaunchTemplateCpuOptions struct { type LaunchTemplateCpuOptionsRequest struct { // Indicates whether to enable the instance for AMD SEV-SNP. AMD SEV-SNP is - // supported with M6a, R6a, and C6a instance types only. For more information, see [AMD SEV-SNP] + // supported with M6a, R6a, and C6a instance types only. For more information, see [AMD SEV-SNP for Amazon EC2 instances] // . // - // [AMD SEV-SNP]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/sev-snp.html + // [AMD SEV-SNP for Amazon EC2 instances]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/sev-snp.html AmdSevSnp AmdSevSnpSpecification // The number of CPU cores for the instance. @@ -10657,7 +10920,7 @@ type LaunchTemplateEbsBlockDeviceRequest struct { // // This parameter is supported for io1 , io2 , and gp3 volumes only. // - // [instances built on the Nitro System]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances + // [instances built on the Nitro System]: https://docs.aws.amazon.com/ec2/latest/instancetypes/ec2-nitro-instances.html Iops *int32 // Identifier (key ID, key alias, key ARN, or alias ARN) of the customer managed @@ -10781,10 +11044,10 @@ type LaunchTemplateEnclaveOptions struct { } // Indicates whether the instance is enabled for Amazon Web Services Nitro -// Enclaves. For more information, see [What is Amazon Web Services Nitro Enclaves?]in the Amazon Web Services Nitro Enclaves +// Enclaves. For more information, see [What is Nitro Enclaves?]in the Amazon Web Services Nitro Enclaves // User Guide. // -// [What is Amazon Web Services Nitro Enclaves?]: https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave.html +// [What is Nitro Enclaves?]: https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave.html type LaunchTemplateEnclaveOptionsRequest struct { // To enable the instance for Amazon Web Services Nitro Enclaves, set this @@ -10887,10 +11150,10 @@ type LaunchTemplateInstanceMarketOptionsRequest struct { noSmithyDocumentSerde } -// The metadata options for the instance. For more information, see [Instance metadata and user data] in the Amazon +// The metadata options for the instance. For more information, see [Use instance metadata to manage your EC2 instance] in the Amazon // EC2 User Guide. // -// [Instance metadata and user data]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html +// [Use instance metadata to manage your EC2 instance]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html type LaunchTemplateInstanceMetadataOptions struct { // Enables or disables the HTTP metadata endpoint on your instances. If the @@ -10929,11 +11192,11 @@ type LaunchTemplateInstanceMetadataOptions struct { // Set to enabled to allow access to instance tags from the instance metadata. Set // to disabled to turn off access to instance tags from the instance metadata. For - // more information, see [Work with instance tags using the instance metadata]. + // more information, see [View tags for your EC2 instances using instance metadata]. // // Default: disabled // - // [Work with instance tags using the instance metadata]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#work-with-tags-in-IMDS + // [View tags for your EC2 instances using instance metadata]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/work-with-tags-in-IMDS.html InstanceMetadataTags LaunchTemplateInstanceMetadataTagsState // The state of the metadata option changes. @@ -10947,10 +11210,10 @@ type LaunchTemplateInstanceMetadataOptions struct { noSmithyDocumentSerde } -// The metadata options for the instance. For more information, see [Instance metadata and user data] in the Amazon +// The metadata options for the instance. For more information, see [Use instance metadata to manage your EC2 instance] in the Amazon // EC2 User Guide. // -// [Instance metadata and user data]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html +// [Use instance metadata to manage your EC2 instance]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html type LaunchTemplateInstanceMetadataOptionsRequest struct { // Enables or disables the HTTP metadata endpoint on your instances. If the @@ -10992,11 +11255,11 @@ type LaunchTemplateInstanceMetadataOptionsRequest struct { // Set to enabled to allow access to instance tags from the instance metadata. Set // to disabled to turn off access to instance tags from the instance metadata. For - // more information, see [Work with instance tags using the instance metadata]. + // more information, see [View tags for your EC2 instances using instance metadata]. // // Default: disabled // - // [Work with instance tags using the instance metadata]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#work-with-tags-in-IMDS + // [View tags for your EC2 instances using instance metadata]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/work-with-tags-in-IMDS.html InstanceMetadataTags LaunchTemplateInstanceMetadataTagsState noSmithyDocumentSerde @@ -11010,9 +11273,9 @@ type LaunchTemplateInstanceNetworkInterfaceSpecification struct { // // Use this option when you launch an instance in a Wavelength Zone and want to // associate a Carrier IP address with the network interface. For more information - // about Carrier IP addresses, see [Carrier IP addresses]in the Wavelength Developer Guide. + // about Carrier IP addresses, see [Carrier IP address]in the Wavelength Developer Guide. // - // [Carrier IP addresses]: https://docs.aws.amazon.com/wavelength/latest/developerguide/how-wavelengths-work.html#provider-owned-ip + // [Carrier IP address]: https://docs.aws.amazon.com/wavelength/latest/developerguide/how-wavelengths-work.html#provider-owned-ip AssociateCarrierIpAddress *bool // Indicates whether to associate a public IPv4 address with eth0 for a new @@ -11150,7 +11413,7 @@ type LaunchTemplateInstanceNetworkInterfaceSpecificationRequest struct { Groups []string // The type of network interface. To create an Elastic Fabric Adapter (EFA), - // specify efa or efa . For more information, see [Elastic Fabric Adapter] in the Amazon EC2 User Guide. + // specify efa or efa . For more information, see [Elastic Fabric Adapter for AI/ML and HPC workloads on Amazon EC2] in the Amazon EC2 User Guide. // // If you are not creating an EFA, specify interface or omit this parameter. // @@ -11159,7 +11422,7 @@ type LaunchTemplateInstanceNetworkInterfaceSpecificationRequest struct { // // Valid values: interface | efa | efa-only // - // [Elastic Fabric Adapter]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa.html + // [Elastic Fabric Adapter for AI/ML and HPC workloads on Amazon EC2]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa.html InterfaceType *string // The number of IPv4 prefixes to be automatically assigned to the network @@ -11502,13 +11765,12 @@ type LaunchTemplateSpotMarketOptions struct { // The behavior when a Spot Instance is interrupted. InstanceInterruptionBehavior InstanceInterruptionBehavior - // The maximum hourly price you're willing to pay for the Spot Instances. We do - // not recommend using this parameter because it can lead to increased - // interruptions. If you do not specify this parameter, you will pay the current - // Spot price. - // - // If you specify a maximum price, your Spot Instances will be interrupted more - // frequently than if you do not specify this parameter. + // The maximum hourly price you're willing to pay for a Spot Instance. We do not + // recommend using this parameter because it can lead to increased interruptions. + // If you do not specify this parameter, you will pay the current Spot price. If + // you do specify this parameter, it must be more than USD $0.001. Specifying a + // value below USD $0.001 will result in an InvalidParameterValue error message + // when the launch template is used to launch an instance. MaxPrice *string // The Spot Instance request type. @@ -11532,10 +11794,12 @@ type LaunchTemplateSpotMarketOptionsRequest struct { // The behavior when a Spot Instance is interrupted. The default is terminate . InstanceInterruptionBehavior InstanceInterruptionBehavior - // The maximum hourly price you're willing to pay for the Spot Instances. We do - // not recommend using this parameter because it can lead to increased - // interruptions. If you do not specify this parameter, you will pay the current - // Spot price. + // The maximum hourly price you're willing to pay for a Spot Instance. We do not + // recommend using this parameter because it can lead to increased interruptions. + // If you do not specify this parameter, you will pay the current Spot price. If + // you do specify this parameter, it must be more than USD $0.001. Specifying a + // value below USD $0.001 will result in an InvalidParameterValue error message + // when the launch template is used to launch an instance. // // If you specify a maximum price, your Spot Instances will be interrupted more // frequently than if you do not specify this parameter. @@ -13599,11 +13863,11 @@ type OperatorRequest struct { noSmithyDocumentSerde } -// Describes whether the resource is managed by an service provider and, if so, +// Describes whether the resource is managed by a service provider and, if so, // describes the service provider that manages it. type OperatorResponse struct { - // If true , the resource is managed by an service provider. + // If true , the resource is managed by a service provider. Managed *bool // If managed is true , then the principal is returned. The principal is the @@ -14872,19 +15136,19 @@ type RequestLaunchTemplateData struct { // attributes (instance type, platform, Availability Zone). CapacityReservationSpecification *LaunchTemplateCapacityReservationSpecificationRequest - // The CPU options for the instance. For more information, see [Optimize CPU options] in the Amazon EC2 + // The CPU options for the instance. For more information, see [CPU options for Amazon EC2 instances] in the Amazon EC2 // User Guide. // - // [Optimize CPU options]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html + // [CPU options for Amazon EC2 instances]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html CpuOptions *LaunchTemplateCpuOptionsRequest // The credit option for CPU usage of the instance. Valid only for T instances. CreditSpecification *CreditSpecificationRequest // Indicates whether to enable the instance for stop protection. For more - // information, see [Enable stop protection for your instance]in the Amazon EC2 User Guide. + // information, see [Enable stop protection for your EC2 instances]in the Amazon EC2 User Guide. // - // [Enable stop protection for your instance]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-stop-protection.html + // [Enable stop protection for your EC2 instances]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-stop-protection.html DisableApiStop *bool // Indicates whether termination protection is enabled for the instance. The @@ -14903,9 +15167,7 @@ type RequestLaunchTemplateData struct { // Deprecated. // - // Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads - // that require graphics acceleration, we recommend that you use Amazon EC2 G4ad, - // G4dn, or G5 instances. + // Amazon Elastic Graphics reached end of life on January 8, 2024. ElasticGpuSpecifications []ElasticGpuSpecification // Amazon Elastic Inference is no longer available. @@ -14915,24 +15177,16 @@ type RequestLaunchTemplateData struct { // instances to accelerate your Deep Learning (DL) inference workloads. // // You cannot specify accelerators from different generations in the same request. - // - // Starting April 15, 2023, Amazon Web Services will not onboard new customers to - // Amazon Elastic Inference (EI), and will help current customers migrate their - // workloads to options that offer better price and performance. After April 15, - // 2023, new customers will not be able to launch instances with Amazon EI - // accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers - // who have used Amazon EI at least once during the past 30-day period are - // considered current customers and will be able to continue using the service. ElasticInferenceAccelerators []LaunchTemplateElasticInferenceAccelerator // Indicates whether the instance is enabled for Amazon Web Services Nitro - // Enclaves. For more information, see [What is Amazon Web Services Nitro Enclaves?]in the Amazon Web Services Nitro Enclaves + // Enclaves. For more information, see [What is Nitro Enclaves?]in the Amazon Web Services Nitro Enclaves // User Guide. // // You can't enable Amazon Web Services Nitro Enclaves and hibernation on the same // instance. // - // [What is Amazon Web Services Nitro Enclaves?]: https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave.html + // [What is Nitro Enclaves?]: https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave.html EnclaveOptions *LaunchTemplateEnclaveOptionsRequest // Indicates whether an instance is enabled for hibernation. This parameter is @@ -15041,9 +15295,9 @@ type RequestLaunchTemplateData struct { // The ID of the kernel. // // We recommend that you use PV-GRUB instead of kernels and RAM disks. For more - // information, see [User provided kernels]in the Amazon EC2 User Guide. + // information, see [User provided kernels]in the Amazon Linux 2 User Guide. // - // [User provided kernels]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html + // [User provided kernels]: https://docs.aws.amazon.com/linux/al2/ug/UserProvidedKernels.html KernelId *string // The name of the key pair. You can create a key pair using [CreateKeyPair] or [ImportKeyPair]. @@ -15061,10 +15315,10 @@ type RequestLaunchTemplateData struct { // The maintenance options for the instance. MaintenanceOptions *LaunchTemplateInstanceMaintenanceOptionsRequest - // The metadata options for the instance. For more information, see [Instance metadata and user data] in the Amazon + // The metadata options for the instance. For more information, see [Configure the Instance Metadata Service options] in the Amazon // EC2 User Guide. // - // [Instance metadata and user data]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html + // [Configure the Instance Metadata Service options]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-options.html MetadataOptions *LaunchTemplateInstanceMetadataOptionsRequest // The monitoring for the instance. @@ -15113,15 +15367,15 @@ type RequestLaunchTemplateData struct { TagSpecifications []LaunchTemplateTagSpecificationRequest // The user data to make available to the instance. You must provide - // base64-encoded text. User data is limited to 16 KB. For more information, see [Run commands on your Amazon EC2 instance at launch] + // base64-encoded text. User data is limited to 16 KB. For more information, see [Run commands when you launch an EC2 instance with user data input] // in the Amazon EC2 User Guide. // // If you are creating the launch template for use with Batch, the user data must // be provided in the [MIME multi-part archive format]. For more information, see [Amazon EC2 user data in launch templates] in the Batch User Guide. // - // [Amazon EC2 user data in launch templates]: https://docs.aws.amazon.com/batch/latest/userguide/launch-templates.html + // [Amazon EC2 user data in launch templates]: https://docs.aws.amazon.com/batch/latest/userguide/launch-templates.html#lt-user-data + // [Run commands when you launch an EC2 instance with user data input]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html // [MIME multi-part archive format]: https://cloudinit.readthedocs.io/en/latest/topics/format.html#mime-multi-part-archive - // [Run commands on your Amazon EC2 instance at launch]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html UserData *string noSmithyDocumentSerde @@ -15599,19 +15853,19 @@ type ResponseLaunchTemplateData struct { // Information about the Capacity Reservation targeting option. CapacityReservationSpecification *LaunchTemplateCapacityReservationSpecificationResponse - // The CPU options for the instance. For more information, see [Optimize CPU options] in the Amazon EC2 + // The CPU options for the instance. For more information, see [CPU options for Amazon EC2 instances] in the Amazon EC2 // User Guide. // - // [Optimize CPU options]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html + // [CPU options for Amazon EC2 instances]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html CpuOptions *LaunchTemplateCpuOptions // The credit option for CPU usage of the instance. CreditSpecification *CreditSpecification // Indicates whether the instance is enabled for stop protection. For more - // information, see [Enable stop protection for your instance]in the Amazon EC2 User Guide. + // information, see [Enable stop protection for your EC2 instances]in the Amazon EC2 User Guide. // - // [Enable stop protection for your instance]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-stop-protection.html + // [Enable stop protection for your EC2 instances]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-stop-protection.html DisableApiStop *bool // If set to true , indicates that the instance cannot be terminated using the @@ -15623,9 +15877,7 @@ type ResponseLaunchTemplateData struct { // Deprecated. // - // Amazon Elastic Graphics reached end of life on January 8, 2024. For workloads - // that require graphics acceleration, we recommend that you use Amazon EC2 G4ad, - // G4dn, or G5 instances. + // Amazon Elastic Graphics reached end of life on January 8, 2024. ElasticGpuSpecifications []ElasticGpuSpecificationResponse // Amazon Elastic Inference is no longer available. @@ -15635,14 +15887,6 @@ type ResponseLaunchTemplateData struct { // instances to accelerate your Deep Learning (DL) inference workloads. // // You cannot specify accelerators from different generations in the same request. - // - // Starting April 15, 2023, Amazon Web Services will not onboard new customers to - // Amazon Elastic Inference (EI), and will help current customers migrate their - // workloads to options that offer better price and performance. After April 15, - // 2023, new customers will not be able to launch instances with Amazon EI - // accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers - // who have used Amazon EI at least once during the past 30-day period are - // considered current customers and will be able to continue using the service. ElasticInferenceAccelerators []LaunchTemplateElasticInferenceAcceleratorResponse // Indicates whether the instance is enabled for Amazon Web Services Nitro @@ -15674,7 +15918,7 @@ type ResponseLaunchTemplateData struct { // // For more information, see [Use a Systems Manager parameter instead of an AMI ID] in the Amazon EC2 User Guide. // - // [Use a Systems Manager parameter instead of an AMI ID]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html#use-an-ssm-parameter-instead-of-an-ami-id + // [Use a Systems Manager parameter instead of an AMI ID]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/create-launch-template.html#use-an-ssm-parameter-instead-of-an-ami-id ImageId *string // Indicates whether an instance stops or terminates when you initiate shutdown @@ -15705,10 +15949,10 @@ type ResponseLaunchTemplateData struct { // The maintenance options for your instance. MaintenanceOptions *LaunchTemplateInstanceMaintenanceOptions - // The metadata options for the instance. For more information, see [Instance metadata and user data] in the Amazon + // The metadata options for the instance. For more information, see [Configure the Instance Metadata Service options] in the Amazon // EC2 User Guide. // - // [Instance metadata and user data]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html + // [Configure the Instance Metadata Service options]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-options.html MetadataOptions *LaunchTemplateInstanceMetadataOptions // The monitoring for the instance. @@ -16470,8 +16714,6 @@ type SecurityGroupReference struct { // The ID of the VPC with the referencing security group. ReferencingVpcId *string - // This parameter is in preview and may not be available for your account. - // // The ID of the transit gateway (if applicable). TransitGatewayId *string @@ -21152,6 +21394,8 @@ type Vpc struct { // The ID of the set of DHCP options you've associated with the VPC. DhcpOptionsId *string + EncryptionControl *VpcEncryptionControl + // The allowed tenancy of instances launched into the VPC. InstanceTenancy Tenancy @@ -21337,6 +21581,46 @@ type VpcClassicLink struct { noSmithyDocumentSerde } +type VpcEncryptionControl struct { + Mode VpcEncryptionControlMode + + ResourceExclusions *VpcEncryptionControlExclusions + + State VpcEncryptionControlState + + StateMessage *string + + Tags []Tag + + VpcEncryptionControlId *string + + VpcId *string + + noSmithyDocumentSerde +} + +type VpcEncryptionControlExclusion struct { + State VpcEncryptionControlExclusionState + + StateMessage *string + + noSmithyDocumentSerde +} + +type VpcEncryptionControlExclusions struct { + EgressOnlyInternetGateway *VpcEncryptionControlExclusion + + InternetGateway *VpcEncryptionControlExclusion + + NatGateway *VpcEncryptionControlExclusion + + VirtualPrivateGateway *VpcEncryptionControlExclusion + + VpcPeering *VpcEncryptionControlExclusion + + noSmithyDocumentSerde +} + // Describes a VPC endpoint. type VpcEndpoint struct { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/CHANGELOG.md index bf774e97b..9cb40e3de 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/CHANGELOG.md @@ -1,3 +1,83 @@ +# v1.54.2 (2025-03-11) + +* **Documentation**: This is a documentation only update for Amazon ECS to address various tickets. + +# v1.54.1 (2025-03-04.2) + +* **Bug Fix**: Add assurance test for operation order. + +# v1.54.0 (2025-02-27) + +* **Feature**: Track credential providers via User-Agent Feature ids +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.53.16 (2025-02-19) + +* **Documentation**: This is a documentation only release for Amazon ECS that supports the CPU task limit increase. + +# v1.53.15 (2025-02-18) + +* **Bug Fix**: Bump go version to 1.22 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.53.14 (2025-02-13) + +* **Documentation**: This is a documentation only release to support migrating Amazon ECS service ARNs to the long ARN format. + +# v1.53.13 (2025-02-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.53.12 (2025-02-04) + +* No change notes available for this release. + +# v1.53.11 (2025-01-31) + +* **Dependency Update**: Switch to code-generated waiter matchers, removing the dependency on go-jmespath. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.53.10 (2025-01-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.53.9 (2025-01-24) + +* **Dependency Update**: Updated to the latest SDK module versions +* **Dependency Update**: Upgrade to smithy-go v1.22.2. + +# v1.53.8 (2025-01-17) + +* **Bug Fix**: Fix bug where credentials weren't refreshed during retry loop. + +# v1.53.7 (2025-01-16) + +* **Documentation**: The release addresses Amazon ECS documentation tickets. + +# v1.53.6 (2025-01-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.53.5 (2025-01-14) + +* **Bug Fix**: Fix issue where waiters were not failing on unmatched errors as they should. This may have breaking behavioral changes for users in fringe cases. See [this announcement](https://github.com/aws/aws-sdk-go-v2/discussions/2954) for more information. + +# v1.53.4 (2025-01-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.53.3 (2025-01-08) + +* No change notes available for this release. + +# v1.53.2 (2025-01-03) + +* **Documentation**: Adding SDK reference examples for Amazon ECS operations. + +# v1.53.1 (2024-12-19) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.53.0 (2024-12-17) * **Feature**: Added support for enableFaultInjection task definition parameter which can be used to enable Fault Injection feature on ECS tasks. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_client.go index 221ba5506..f12d4017b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_client.go @@ -696,7 +696,7 @@ func addRetry(stack *middleware.Stack, o Options) error { m.LogAttempts = o.ClientLogMode.IsRetries() m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/ecs") }) - if err := stack.Finalize.Insert(attempt, "Signing", middleware.Before); err != nil { + if err := stack.Finalize.Insert(attempt, "ResolveAuthScheme", middleware.Before); err != nil { return err } if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil { @@ -773,6 +773,37 @@ func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { return nil } +type setCredentialSourceMiddleware struct { + ua *awsmiddleware.RequestUserAgent + options Options +} + +func (m setCredentialSourceMiddleware) ID() string { return "SetCredentialSourceMiddleware" } + +func (m setCredentialSourceMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + asProviderSource, ok := m.options.Credentials.(aws.CredentialProviderSource) + if !ok { + return next.HandleBuild(ctx, in) + } + providerSources := asProviderSource.ProviderSources() + for _, source := range providerSources { + m.ua.AddCredentialsSource(source) + } + return next.HandleBuild(ctx, in) +} + +func addCredentialSource(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + mw := setCredentialSourceMiddleware{ua: ua, options: options} + return stack.Build.Insert(&mw, "UserAgent", middleware.Before) +} + func resolveTracerProvider(options *Options) { if options.TracerProvider == nil { options.TracerProvider = &tracing.NopTracerProvider{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_CreateCapacityProvider.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_CreateCapacityProvider.go index 60a9f5ef0..898aa6780 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_CreateCapacityProvider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_CreateCapacityProvider.go @@ -155,6 +155,9 @@ func (c *Client) addOperationCreateCapacityProviderMiddlewares(stack *middleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateCapacityProviderValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_CreateCluster.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_CreateCluster.go index f08f5df70..2f8993a2e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_CreateCluster.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_CreateCluster.go @@ -215,6 +215,9 @@ func (c *Client) addOperationCreateClusterMiddlewares(stack *middleware.Stack, o if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateClusterValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_CreateService.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_CreateService.go index 72aac5c3e..e21d6c3c6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_CreateService.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_CreateService.go @@ -142,7 +142,7 @@ type CreateServiceInput struct { // Indicates whether to use Availability Zone rebalancing for the service. // // For more information, see [Balancing an Amazon ECS service across Availability Zones] in the Amazon Elastic Container Service Developer - // Guide. + // Guide . // // [Balancing an Amazon ECS service across Availability Zones]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-rebalancing.html AvailabilityZoneRebalancing types.AvailabilityZoneRebalancing @@ -528,6 +528,9 @@ func (c *Client) addOperationCreateServiceMiddlewares(stack *middleware.Stack, o if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateServiceValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_CreateTaskSet.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_CreateTaskSet.go index 9f7fe6f90..99d655915 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_CreateTaskSet.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_CreateTaskSet.go @@ -238,6 +238,9 @@ func (c *Client) addOperationCreateTaskSetMiddlewares(stack *middleware.Stack, o if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateTaskSetValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DeleteAccountSetting.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DeleteAccountSetting.go index 8b41400dd..bac570ecd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DeleteAccountSetting.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DeleteAccountSetting.go @@ -126,6 +126,9 @@ func (c *Client) addOperationDeleteAccountSettingMiddlewares(stack *middleware.S if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteAccountSettingValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DeleteAttributes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DeleteAttributes.go index 8062c0856..2550b978e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DeleteAttributes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DeleteAttributes.go @@ -120,6 +120,9 @@ func (c *Client) addOperationDeleteAttributesMiddlewares(stack *middleware.Stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteAttributesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DeleteCapacityProvider.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DeleteCapacityProvider.go index f026ec17d..df508c696 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DeleteCapacityProvider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DeleteCapacityProvider.go @@ -130,6 +130,9 @@ func (c *Client) addOperationDeleteCapacityProviderMiddlewares(stack *middleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteCapacityProviderValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DeleteCluster.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DeleteCluster.go index 7b8c15b40..a6fb3878d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DeleteCluster.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DeleteCluster.go @@ -122,6 +122,9 @@ func (c *Client) addOperationDeleteClusterMiddlewares(stack *middleware.Stack, o if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteClusterValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DeleteService.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DeleteService.go index 9625686c9..f87b8a685 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DeleteService.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DeleteService.go @@ -141,6 +141,9 @@ func (c *Client) addOperationDeleteServiceMiddlewares(stack *middleware.Stack, o if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteServiceValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DeleteTaskDefinitions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DeleteTaskDefinitions.go index 3fdc4ce09..805e4f424 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DeleteTaskDefinitions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DeleteTaskDefinitions.go @@ -144,6 +144,9 @@ func (c *Client) addOperationDeleteTaskDefinitionsMiddlewares(stack *middleware. if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteTaskDefinitionsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DeleteTaskSet.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DeleteTaskSet.go index 098197e53..506dd2218 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DeleteTaskSet.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DeleteTaskSet.go @@ -131,6 +131,9 @@ func (c *Client) addOperationDeleteTaskSetMiddlewares(stack *middleware.Stack, o if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteTaskSetValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DeregisterContainerInstance.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DeregisterContainerInstance.go index 16e3435d7..ca1350eeb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DeregisterContainerInstance.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DeregisterContainerInstance.go @@ -150,6 +150,9 @@ func (c *Client) addOperationDeregisterContainerInstanceMiddlewares(stack *middl if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeregisterContainerInstanceValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DeregisterTaskDefinition.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DeregisterTaskDefinition.go index 0f74fa9ec..60ee21b12 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DeregisterTaskDefinition.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DeregisterTaskDefinition.go @@ -134,6 +134,9 @@ func (c *Client) addOperationDeregisterTaskDefinitionMiddlewares(stack *middlewa if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeregisterTaskDefinitionValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DescribeCapacityProviders.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DescribeCapacityProviders.go index 0af3fb64f..7294aaec8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DescribeCapacityProviders.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DescribeCapacityProviders.go @@ -144,6 +144,9 @@ func (c *Client) addOperationDescribeCapacityProvidersMiddlewares(stack *middlew if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeCapacityProviders(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DescribeClusters.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DescribeClusters.go index d8e57a2b0..c9d860fb8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DescribeClusters.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DescribeClusters.go @@ -135,6 +135,9 @@ func (c *Client) addOperationDescribeClustersMiddlewares(stack *middleware.Stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeClusters(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DescribeContainerInstances.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DescribeContainerInstances.go index 87c1b481e..d2fc0b4a1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DescribeContainerInstances.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DescribeContainerInstances.go @@ -131,6 +131,9 @@ func (c *Client) addOperationDescribeContainerInstancesMiddlewares(stack *middle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDescribeContainerInstancesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DescribeServiceDeployments.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DescribeServiceDeployments.go index b59b32ee4..84736010f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DescribeServiceDeployments.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DescribeServiceDeployments.go @@ -126,6 +126,9 @@ func (c *Client) addOperationDescribeServiceDeploymentsMiddlewares(stack *middle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDescribeServiceDeploymentsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DescribeServiceRevisions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DescribeServiceRevisions.go index 7ec557f22..c23a1310b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DescribeServiceRevisions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DescribeServiceRevisions.go @@ -130,6 +130,9 @@ func (c *Client) addOperationDescribeServiceRevisionsMiddlewares(stack *middlewa if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDescribeServiceRevisionsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DescribeServices.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DescribeServices.go index 3ea1d3935..16624c05a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DescribeServices.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DescribeServices.go @@ -11,7 +11,6 @@ import ( smithytime "github.com/aws/smithy-go/time" smithyhttp "github.com/aws/smithy-go/transport/http" smithywaiter "github.com/aws/smithy-go/waiter" - jmespath "github.com/jmespath/go-jmespath" "strconv" "time" ) @@ -132,6 +131,9 @@ func (c *Client) addOperationDescribeServicesMiddlewares(stack *middleware.Stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDescribeServicesValidationMiddleware(stack); err != nil { return err } @@ -328,53 +330,54 @@ func (w *ServicesInactiveWaiter) WaitForOutput(ctx context.Context, params *Desc func servicesInactiveStateRetryable(ctx context.Context, input *DescribeServicesInput, output *DescribeServicesOutput, err error) (bool, error) { if err == nil { - pathValue, err := jmespath.Search("failures[].reason", output) - if err != nil { - return false, fmt.Errorf("error evaluating waiter state: %w", err) + v1 := output.Failures + var v2 []string + for _, v := range v1 { + v3 := v.Reason + if v3 != nil { + v2 = append(v2, *v3) + } } - expectedValue := "MISSING" - listOfValues, ok := pathValue.([]interface{}) - if !ok { - return false, fmt.Errorf("waiter comparator expected list got %T", pathValue) - } - - for _, v := range listOfValues { - value, ok := v.(*string) - if !ok { - return false, fmt.Errorf("waiter comparator expected *string value, got %T", pathValue) + var match bool + for _, v := range v2 { + if string(v) == expectedValue { + match = true + break } + } - if string(*value) == expectedValue { - return false, fmt.Errorf("waiter state transitioned to Failure") - } + if match { + return false, fmt.Errorf("waiter state transitioned to Failure") } } if err == nil { - pathValue, err := jmespath.Search("services[].status", output) - if err != nil { - return false, fmt.Errorf("error evaluating waiter state: %w", err) + v1 := output.Services + var v2 []string + for _, v := range v1 { + v3 := v.Status + if v3 != nil { + v2 = append(v2, *v3) + } } - expectedValue := "INACTIVE" - listOfValues, ok := pathValue.([]interface{}) - if !ok { - return false, fmt.Errorf("waiter comparator expected list got %T", pathValue) - } - - for _, v := range listOfValues { - value, ok := v.(*string) - if !ok { - return false, fmt.Errorf("waiter comparator expected *string value, got %T", pathValue) + var match bool + for _, v := range v2 { + if string(v) == expectedValue { + match = true + break } + } - if string(*value) == expectedValue { - return false, nil - } + if match { + return false, nil } } + if err != nil { + return false, err + } return true, nil } @@ -538,98 +541,107 @@ func (w *ServicesStableWaiter) WaitForOutput(ctx context.Context, params *Descri func servicesStableStateRetryable(ctx context.Context, input *DescribeServicesInput, output *DescribeServicesOutput, err error) (bool, error) { if err == nil { - pathValue, err := jmespath.Search("failures[].reason", output) - if err != nil { - return false, fmt.Errorf("error evaluating waiter state: %w", err) + v1 := output.Failures + var v2 []string + for _, v := range v1 { + v3 := v.Reason + if v3 != nil { + v2 = append(v2, *v3) + } } - expectedValue := "MISSING" - listOfValues, ok := pathValue.([]interface{}) - if !ok { - return false, fmt.Errorf("waiter comparator expected list got %T", pathValue) - } - - for _, v := range listOfValues { - value, ok := v.(*string) - if !ok { - return false, fmt.Errorf("waiter comparator expected *string value, got %T", pathValue) + var match bool + for _, v := range v2 { + if string(v) == expectedValue { + match = true + break } + } - if string(*value) == expectedValue { - return false, fmt.Errorf("waiter state transitioned to Failure") - } + if match { + return false, fmt.Errorf("waiter state transitioned to Failure") } } if err == nil { - pathValue, err := jmespath.Search("services[].status", output) - if err != nil { - return false, fmt.Errorf("error evaluating waiter state: %w", err) + v1 := output.Services + var v2 []string + for _, v := range v1 { + v3 := v.Status + if v3 != nil { + v2 = append(v2, *v3) + } } - expectedValue := "DRAINING" - listOfValues, ok := pathValue.([]interface{}) - if !ok { - return false, fmt.Errorf("waiter comparator expected list got %T", pathValue) - } - - for _, v := range listOfValues { - value, ok := v.(*string) - if !ok { - return false, fmt.Errorf("waiter comparator expected *string value, got %T", pathValue) + var match bool + for _, v := range v2 { + if string(v) == expectedValue { + match = true + break } + } - if string(*value) == expectedValue { - return false, fmt.Errorf("waiter state transitioned to Failure") - } + if match { + return false, fmt.Errorf("waiter state transitioned to Failure") } } if err == nil { - pathValue, err := jmespath.Search("services[].status", output) - if err != nil { - return false, fmt.Errorf("error evaluating waiter state: %w", err) + v1 := output.Services + var v2 []string + for _, v := range v1 { + v3 := v.Status + if v3 != nil { + v2 = append(v2, *v3) + } } - expectedValue := "INACTIVE" - listOfValues, ok := pathValue.([]interface{}) - if !ok { - return false, fmt.Errorf("waiter comparator expected list got %T", pathValue) - } - - for _, v := range listOfValues { - value, ok := v.(*string) - if !ok { - return false, fmt.Errorf("waiter comparator expected *string value, got %T", pathValue) + var match bool + for _, v := range v2 { + if string(v) == expectedValue { + match = true + break } + } - if string(*value) == expectedValue { - return false, fmt.Errorf("waiter state transitioned to Failure") - } + if match { + return false, fmt.Errorf("waiter state transitioned to Failure") } } if err == nil { - pathValue, err := jmespath.Search("length(services[?!(length(deployments) == `1` && runningCount == desiredCount)]) == `0`", output) - if err != nil { - return false, fmt.Errorf("error evaluating waiter state: %w", err) + v1 := output.Services + var v2 []types.Service + for _, v := range v1 { + v3 := v.Deployments + v4 := len(v3) + v5 := 1 + v6 := int64(v4) == int64(v5) + v7 := v.RunningCount + v8 := v.DesiredCount + v9 := int64(v7) == int64(v8) + v10 := v6 && v9 + v11 := !v10 + if v11 { + v2 = append(v2, v) + } } - + v12 := len(v2) + v13 := 0 + v14 := int64(v12) == int64(v13) expectedValue := "true" bv, err := strconv.ParseBool(expectedValue) if err != nil { return false, fmt.Errorf("error parsing boolean from string %w", err) } - value, ok := pathValue.(bool) - if !ok { - return false, fmt.Errorf("waiter comparator expected bool value got %T", pathValue) - } - - if value == bv { + if v14 == bv { return false, nil } } + if err != nil { + return false, err + } return true, nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DescribeTaskDefinition.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DescribeTaskDefinition.go index 8739cee27..2d10d69c9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DescribeTaskDefinition.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DescribeTaskDefinition.go @@ -152,6 +152,9 @@ func (c *Client) addOperationDescribeTaskDefinitionMiddlewares(stack *middleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDescribeTaskDefinitionValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DescribeTaskSets.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DescribeTaskSets.go index ac4706ab7..2e89248eb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DescribeTaskSets.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DescribeTaskSets.go @@ -134,6 +134,9 @@ func (c *Client) addOperationDescribeTaskSetsMiddlewares(stack *middleware.Stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDescribeTaskSetsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DescribeTasks.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DescribeTasks.go index 0a7437567..fc4086abb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DescribeTasks.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DescribeTasks.go @@ -11,7 +11,6 @@ import ( smithytime "github.com/aws/smithy-go/time" smithyhttp "github.com/aws/smithy-go/transport/http" smithywaiter "github.com/aws/smithy-go/waiter" - jmespath "github.com/jmespath/go-jmespath" "time" ) @@ -136,6 +135,9 @@ func (c *Client) addOperationDescribeTasksMiddlewares(stack *middleware.Stack, o if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDescribeTasksValidationMiddleware(stack); err != nil { return err } @@ -331,77 +333,66 @@ func (w *TasksRunningWaiter) WaitForOutput(ctx context.Context, params *Describe func tasksRunningStateRetryable(ctx context.Context, input *DescribeTasksInput, output *DescribeTasksOutput, err error) (bool, error) { if err == nil { - pathValue, err := jmespath.Search("tasks[].lastStatus", output) - if err != nil { - return false, fmt.Errorf("error evaluating waiter state: %w", err) + v1 := output.Tasks + var v2 []string + for _, v := range v1 { + v3 := v.LastStatus + if v3 != nil { + v2 = append(v2, *v3) + } } - expectedValue := "STOPPED" - listOfValues, ok := pathValue.([]interface{}) - if !ok { - return false, fmt.Errorf("waiter comparator expected list got %T", pathValue) - } - - for _, v := range listOfValues { - value, ok := v.(*string) - if !ok { - return false, fmt.Errorf("waiter comparator expected *string value, got %T", pathValue) + var match bool + for _, v := range v2 { + if string(v) == expectedValue { + match = true + break } + } - if string(*value) == expectedValue { - return false, fmt.Errorf("waiter state transitioned to Failure") - } + if match { + return false, fmt.Errorf("waiter state transitioned to Failure") } } if err == nil { - pathValue, err := jmespath.Search("failures[].reason", output) - if err != nil { - return false, fmt.Errorf("error evaluating waiter state: %w", err) + v1 := output.Failures + var v2 []string + for _, v := range v1 { + v3 := v.Reason + if v3 != nil { + v2 = append(v2, *v3) + } } - expectedValue := "MISSING" - listOfValues, ok := pathValue.([]interface{}) - if !ok { - return false, fmt.Errorf("waiter comparator expected list got %T", pathValue) - } - - for _, v := range listOfValues { - value, ok := v.(*string) - if !ok { - return false, fmt.Errorf("waiter comparator expected *string value, got %T", pathValue) + var match bool + for _, v := range v2 { + if string(v) == expectedValue { + match = true + break } + } - if string(*value) == expectedValue { - return false, fmt.Errorf("waiter state transitioned to Failure") - } + if match { + return false, fmt.Errorf("waiter state transitioned to Failure") } } if err == nil { - pathValue, err := jmespath.Search("tasks[].lastStatus", output) - if err != nil { - return false, fmt.Errorf("error evaluating waiter state: %w", err) + v1 := output.Tasks + var v2 []string + for _, v := range v1 { + v3 := v.LastStatus + if v3 != nil { + v2 = append(v2, *v3) + } } - expectedValue := "RUNNING" - var match = true - listOfValues, ok := pathValue.([]interface{}) - if !ok { - return false, fmt.Errorf("waiter comparator expected list got %T", pathValue) - } - - if len(listOfValues) == 0 { - match = false - } - for _, v := range listOfValues { - value, ok := v.(*string) - if !ok { - return false, fmt.Errorf("waiter comparator expected *string value, got %T", pathValue) - } - - if string(*value) != expectedValue { + match := len(v2) > 0 + for _, v := range v2 { + if string(v) != expectedValue { match = false + break } } @@ -410,6 +401,9 @@ func tasksRunningStateRetryable(ctx context.Context, input *DescribeTasksInput, } } + if err != nil { + return false, err + } return true, nil } @@ -572,29 +566,20 @@ func (w *TasksStoppedWaiter) WaitForOutput(ctx context.Context, params *Describe func tasksStoppedStateRetryable(ctx context.Context, input *DescribeTasksInput, output *DescribeTasksOutput, err error) (bool, error) { if err == nil { - pathValue, err := jmespath.Search("tasks[].lastStatus", output) - if err != nil { - return false, fmt.Errorf("error evaluating waiter state: %w", err) + v1 := output.Tasks + var v2 []string + for _, v := range v1 { + v3 := v.LastStatus + if v3 != nil { + v2 = append(v2, *v3) + } } - expectedValue := "STOPPED" - var match = true - listOfValues, ok := pathValue.([]interface{}) - if !ok { - return false, fmt.Errorf("waiter comparator expected list got %T", pathValue) - } - - if len(listOfValues) == 0 { - match = false - } - for _, v := range listOfValues { - value, ok := v.(*string) - if !ok { - return false, fmt.Errorf("waiter comparator expected *string value, got %T", pathValue) - } - - if string(*value) != expectedValue { + match := len(v2) > 0 + for _, v := range v2 { + if string(v) != expectedValue { match = false + break } } @@ -603,6 +588,9 @@ func tasksStoppedStateRetryable(ctx context.Context, input *DescribeTasksInput, } } + if err != nil { + return false, err + } return true, nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DiscoverPollEndpoint.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DiscoverPollEndpoint.go index 1890814c1..a99895347 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DiscoverPollEndpoint.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_DiscoverPollEndpoint.go @@ -129,6 +129,9 @@ func (c *Client) addOperationDiscoverPollEndpointMiddlewares(stack *middleware.S if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDiscoverPollEndpoint(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ExecuteCommand.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ExecuteCommand.go index 1ad97aee6..c2b18653d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ExecuteCommand.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ExecuteCommand.go @@ -158,6 +158,9 @@ func (c *Client) addOperationExecuteCommandMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpExecuteCommandValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_GetTaskProtection.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_GetTaskProtection.go index cc23bfaad..784e66e56 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_GetTaskProtection.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_GetTaskProtection.go @@ -127,6 +127,9 @@ func (c *Client) addOperationGetTaskProtectionMiddlewares(stack *middleware.Stac if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetTaskProtectionValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ListAccountSettings.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ListAccountSettings.go index 71d5a1fe4..6ffc67ed2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ListAccountSettings.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ListAccountSettings.go @@ -153,6 +153,9 @@ func (c *Client) addOperationListAccountSettingsMiddlewares(stack *middleware.St if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListAccountSettings(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ListAttributes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ListAttributes.go index 6eb4b0773..d28b6fa2a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ListAttributes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ListAttributes.go @@ -153,6 +153,9 @@ func (c *Client) addOperationListAttributesMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListAttributesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ListClusters.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ListClusters.go index d712b4dce..687bfb0fa 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ListClusters.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ListClusters.go @@ -131,6 +131,9 @@ func (c *Client) addOperationListClustersMiddlewares(stack *middleware.Stack, op if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListClusters(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ListContainerInstances.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ListContainerInstances.go index 710350444..d31f5d3d9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ListContainerInstances.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ListContainerInstances.go @@ -158,6 +158,9 @@ func (c *Client) addOperationListContainerInstancesMiddlewares(stack *middleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListContainerInstances(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ListServiceDeployments.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ListServiceDeployments.go index 3e7d782a6..b6a5958a3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ListServiceDeployments.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ListServiceDeployments.go @@ -14,9 +14,9 @@ import ( // This operation lists all the service deployments that meet the specified filter // criteria. // -// A service deployment happens when you release a softwre update for the service. -// You route traffic from the running service revisions to the new service revison -// and control the number of running tasks. +// A service deployment happens when you release a software update for the +// service. You route traffic from the running service revisions to the new service +// revison and control the number of running tasks. // // This API returns the values that you use for the request parameters in [DescribeServiceRevisions]. // @@ -175,6 +175,9 @@ func (c *Client) addOperationListServiceDeploymentsMiddlewares(stack *middleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListServiceDeploymentsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ListServices.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ListServices.go index 73c5d8456..3b9303a61 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ListServices.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ListServices.go @@ -144,6 +144,9 @@ func (c *Client) addOperationListServicesMiddlewares(stack *middleware.Stack, op if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListServices(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ListServicesByNamespace.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ListServicesByNamespace.go index 6694b5b4f..c99d2bf9f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ListServicesByNamespace.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ListServicesByNamespace.go @@ -151,6 +151,9 @@ func (c *Client) addOperationListServicesByNamespaceMiddlewares(stack *middlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListServicesByNamespaceValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ListTagsForResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ListTagsForResource.go index 9147c3d75..33b106eb2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ListTagsForResource.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ListTagsForResource.go @@ -114,6 +114,9 @@ func (c *Client) addOperationListTagsForResourceMiddlewares(stack *middleware.St if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListTagsForResourceValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ListTaskDefinitionFamilies.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ListTaskDefinitionFamilies.go index fec541e39..a7d5f60f5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ListTaskDefinitionFamilies.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ListTaskDefinitionFamilies.go @@ -153,6 +153,9 @@ func (c *Client) addOperationListTaskDefinitionFamiliesMiddlewares(stack *middle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListTaskDefinitionFamilies(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ListTaskDefinitions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ListTaskDefinitions.go index f1cd9e8a4..ee215a7b0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ListTaskDefinitions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ListTaskDefinitions.go @@ -155,6 +155,9 @@ func (c *Client) addOperationListTaskDefinitionsMiddlewares(stack *middleware.St if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListTaskDefinitions(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ListTasks.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ListTasks.go index 1e2f32688..8139b5b2a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ListTasks.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_ListTasks.go @@ -176,6 +176,9 @@ func (c *Client) addOperationListTasksMiddlewares(stack *middleware.Stack, optio if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListTasks(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_PutAccountSetting.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_PutAccountSetting.go index 02bcf1bf5..725f67ebc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_PutAccountSetting.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_PutAccountSetting.go @@ -235,6 +235,9 @@ func (c *Client) addOperationPutAccountSettingMiddlewares(stack *middleware.Stac if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutAccountSettingValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_PutAccountSettingDefault.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_PutAccountSettingDefault.go index 862e2b29d..49d16c28e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_PutAccountSettingDefault.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_PutAccountSettingDefault.go @@ -216,6 +216,9 @@ func (c *Client) addOperationPutAccountSettingDefaultMiddlewares(stack *middlewa if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutAccountSettingDefaultValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_PutAttributes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_PutAttributes.go index fd884a2b7..36b51b801 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_PutAttributes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_PutAttributes.go @@ -125,6 +125,9 @@ func (c *Client) addOperationPutAttributesMiddlewares(stack *middleware.Stack, o if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutAttributesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_PutClusterCapacityProviders.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_PutClusterCapacityProviders.go index 8615ead0d..480e626aa 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_PutClusterCapacityProviders.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_PutClusterCapacityProviders.go @@ -172,6 +172,9 @@ func (c *Client) addOperationPutClusterCapacityProvidersMiddlewares(stack *middl if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutClusterCapacityProvidersValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_RegisterContainerInstance.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_RegisterContainerInstance.go index c94baf04a..85bfe481a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_RegisterContainerInstance.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_RegisterContainerInstance.go @@ -171,6 +171,9 @@ func (c *Client) addOperationRegisterContainerInstanceMiddlewares(stack *middlew if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpRegisterContainerInstanceValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_RegisterTaskDefinition.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_RegisterTaskDefinition.go index 650128c54..524e091a7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_RegisterTaskDefinition.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_RegisterTaskDefinition.go @@ -72,9 +72,9 @@ type RegisterTaskDefinitionInput struct { // Task-level CPU and memory parameters are ignored for Windows containers. We // recommend specifying container-level resources for Windows containers. // - // If you're using the EC2 launch type, this field is optional. Supported values - // are between 128 CPU units ( 0.125 vCPUs) and 10240 CPU units ( 10 vCPUs). If - // you do not specify a value, the parameter is ignored. + // If you're using the EC2 launch type or external launch type, this field is + // optional. Supported values are between 128 CPU units ( 0.125 vCPUs) and 196608 + // CPU units ( 192 vCPUs). If you do not specify a value, the parameter is ignored. // // If you're using the Fargate launch type, this field is required and you must // use one of the following values, which determines your range of supported values @@ -414,6 +414,9 @@ func (c *Client) addOperationRegisterTaskDefinitionMiddlewares(stack *middleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpRegisterTaskDefinitionValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_RunTask.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_RunTask.go index 8a10fd153..f6e4cb40d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_RunTask.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_RunTask.go @@ -354,6 +354,9 @@ func (c *Client) addOperationRunTaskMiddlewares(stack *middleware.Stack, options if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addIdempotencyToken_opRunTaskMiddleware(stack, options); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_StartTask.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_StartTask.go index 64e4584fe..822971357 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_StartTask.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_StartTask.go @@ -232,6 +232,9 @@ func (c *Client) addOperationStartTaskMiddlewares(stack *middleware.Stack, optio if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpStartTaskValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_StopTask.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_StopTask.go index 881579fc2..c002981fd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_StopTask.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_StopTask.go @@ -47,7 +47,7 @@ func (c *Client) StopTask(ctx context.Context, params *StopTaskInput, optFns ... type StopTaskInput struct { - // The task ID of the task to stop. + // Thefull Amazon Resource Name (ARN) of the task. // // This member is required. Task *string @@ -142,6 +142,9 @@ func (c *Client) addOperationStopTaskMiddlewares(stack *middleware.Stack, option if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpStopTaskValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_SubmitAttachmentStateChanges.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_SubmitAttachmentStateChanges.go index 90f01ca7e..0a9f25c35 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_SubmitAttachmentStateChanges.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_SubmitAttachmentStateChanges.go @@ -119,6 +119,9 @@ func (c *Client) addOperationSubmitAttachmentStateChangesMiddlewares(stack *midd if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpSubmitAttachmentStateChangesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_SubmitContainerStateChange.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_SubmitContainerStateChange.go index 2c89d8a7c..fa31f6009 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_SubmitContainerStateChange.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_SubmitContainerStateChange.go @@ -135,6 +135,9 @@ func (c *Client) addOperationSubmitContainerStateChangeMiddlewares(stack *middle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opSubmitContainerStateChange(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_SubmitTaskStateChange.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_SubmitTaskStateChange.go index 649e3720a..d840c520a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_SubmitTaskStateChange.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_SubmitTaskStateChange.go @@ -142,6 +142,9 @@ func (c *Client) addOperationSubmitTaskStateChangeMiddlewares(stack *middleware. if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpSubmitTaskStateChangeValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_TagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_TagResource.go index 7883396ed..b0863fc72 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_TagResource.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_TagResource.go @@ -36,6 +36,22 @@ type TagResourceInput struct { // supported resources are Amazon ECS capacity providers, tasks, services, task // definitions, clusters, and container instances. // + // In order to tag a service that has the following ARN format, you need to + // migrate the service to the long ARN. For more information, see [Migrate an Amazon ECS short service ARN to a long ARN]in the Amazon + // Elastic Container Service Developer Guide. + // + // arn:aws:ecs:region:aws_account_id:service/service-name + // + // After the migration is complete, the service has the long ARN format, as shown + // below. Use this ARN to tag the service. + // + // arn:aws:ecs:region:aws_account_id:service/cluster-name/service-name + // + // If you try to tag a service with a short ARN, you receive an + // InvalidParameterException error. + // + // [Migrate an Amazon ECS short service ARN to a long ARN]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-arn-migration.html + // // This member is required. ResourceArn *string @@ -141,6 +157,9 @@ func (c *Client) addOperationTagResourceMiddlewares(stack *middleware.Stack, opt if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpTagResourceValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_UntagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_UntagResource.go index 2d9120107..da1351bdb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_UntagResource.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_UntagResource.go @@ -114,6 +114,9 @@ func (c *Client) addOperationUntagResourceMiddlewares(stack *middleware.Stack, o if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpUntagResourceValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_UpdateCapacityProvider.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_UpdateCapacityProvider.go index aa3d90841..70f079033 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_UpdateCapacityProvider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_UpdateCapacityProvider.go @@ -118,6 +118,9 @@ func (c *Client) addOperationUpdateCapacityProviderMiddlewares(stack *middleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpUpdateCapacityProviderValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_UpdateCluster.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_UpdateCluster.go index b4eade5fe..6fb38428d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_UpdateCluster.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_UpdateCluster.go @@ -136,6 +136,9 @@ func (c *Client) addOperationUpdateClusterMiddlewares(stack *middleware.Stack, o if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpUpdateClusterValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_UpdateClusterSettings.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_UpdateClusterSettings.go index 1b35dd40c..76c69384a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_UpdateClusterSettings.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_UpdateClusterSettings.go @@ -128,6 +128,9 @@ func (c *Client) addOperationUpdateClusterSettingsMiddlewares(stack *middleware. if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpUpdateClusterSettingsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_UpdateContainerAgent.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_UpdateContainerAgent.go index f65c195fe..df6b96be7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_UpdateContainerAgent.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_UpdateContainerAgent.go @@ -139,6 +139,9 @@ func (c *Client) addOperationUpdateContainerAgentMiddlewares(stack *middleware.S if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpUpdateContainerAgentValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_UpdateContainerInstancesState.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_UpdateContainerInstancesState.go index 3b62608b7..f02f25e21 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_UpdateContainerInstancesState.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_UpdateContainerInstancesState.go @@ -180,6 +180,9 @@ func (c *Client) addOperationUpdateContainerInstancesStateMiddlewares(stack *mid if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpUpdateContainerInstancesStateValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_UpdateService.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_UpdateService.go index 4d46e2a90..d664a00c0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_UpdateService.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_UpdateService.go @@ -163,7 +163,7 @@ type UpdateServiceInput struct { // Indicates whether to use Availability Zone rebalancing for the service. // // For more information, see [Balancing an Amazon ECS service across Availability Zones] in the Amazon Elastic Container Service Developer - // Guide. + // Guide . // // [Balancing an Amazon ECS service across Availability Zones]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-rebalancing.html AvailabilityZoneRebalancing types.AvailabilityZoneRebalancing @@ -437,6 +437,9 @@ func (c *Client) addOperationUpdateServiceMiddlewares(stack *middleware.Stack, o if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpUpdateServiceValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_UpdateServicePrimaryTaskSet.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_UpdateServicePrimaryTaskSet.go index abfd27524..2505d76ee 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_UpdateServicePrimaryTaskSet.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_UpdateServicePrimaryTaskSet.go @@ -131,6 +131,9 @@ func (c *Client) addOperationUpdateServicePrimaryTaskSetMiddlewares(stack *middl if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpUpdateServicePrimaryTaskSetValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_UpdateTaskProtection.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_UpdateTaskProtection.go index 0be834bfd..1cf5c8a8f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_UpdateTaskProtection.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_UpdateTaskProtection.go @@ -173,6 +173,9 @@ func (c *Client) addOperationUpdateTaskProtectionMiddlewares(stack *middleware.S if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpUpdateTaskProtectionValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_UpdateTaskSet.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_UpdateTaskSet.go index 011535ede..82f37b833 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_UpdateTaskSet.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/api_op_UpdateTaskSet.go @@ -134,6 +134,9 @@ func (c *Client) addOperationUpdateTaskSetMiddlewares(stack *middleware.Stack, o if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpUpdateTaskSetValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/generated.json index 2a0af2dc6..ad02dd3d4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/generated.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/generated.json @@ -3,8 +3,7 @@ "github.com/aws/aws-sdk-go-v2": "v1.4.0", "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", - "github.com/aws/smithy-go": "v1.4.0", - "github.com/jmespath/go-jmespath": "v0.4.0" + "github.com/aws/smithy-go": "v1.4.0" }, "files": [ "api_client.go", @@ -81,12 +80,13 @@ "protocol_test.go", "serializers.go", "snapshot_test.go", + "sra_operation_order_test.go", "types/enums.go", "types/errors.go", "types/types.go", "validators.go" ], - "go": "1.15", + "go": "1.22", "module": "github.com/aws/aws-sdk-go-v2/service/ecs", "unstable": false } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/go_module_metadata.go index 18db02442..ea1c28200 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/go_module_metadata.go @@ -3,4 +3,4 @@ package ecs // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.53.0" +const goModuleVersion = "1.54.2" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/internal/endpoints/endpoints.go index 75eb7d212..3f37a0517 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/internal/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/internal/endpoints/endpoints.go @@ -175,6 +175,9 @@ var defaultPartitions = endpoints.Partitions{ endpoints.EndpointKey{ Region: "ap-southeast-5", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-7", + }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "ca-central-1", }: endpoints.Endpoint{}, @@ -250,6 +253,9 @@ var defaultPartitions = endpoints.Partitions{ endpoints.EndpointKey{ Region: "me-south-1", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "mx-central-1", + }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "sa-east-1", }: endpoints.Endpoint{}, @@ -430,6 +436,14 @@ var defaultPartitions = endpoints.Partitions{ }, RegionRegex: partitionRegexp.AwsIsoF, IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-isof-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-isof-south-1", + }: endpoints.Endpoint{}, + }, }, { ID: "aws-us-gov", diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/types/errors.go index ec66bf2cf..6a2ce980a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/types/errors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/types/errors.go @@ -64,9 +64,9 @@ func (e *AttributeLimitExceededException) ErrorCode() string { } func (e *AttributeLimitExceededException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// Your Amazon Web Services account was blocked. For more information, contact [Amazon Web Services Support]. +// Your Amazon Web Services account was blocked. For more information, contact [Amazon Web ServicesSupport]. // -// [Amazon Web Services Support]: http://aws.amazon.com/contact-us/ +// [Amazon Web ServicesSupport]: http://aws.amazon.com/contact-us/ type BlockedException struct { Message *string @@ -285,6 +285,10 @@ func (e *ConflictException) ErrorFault() smithy.ErrorFault { return smithy.Fault // The specified parameter isn't valid. Review the available parameters for the // API request. +// +// For more information about service event errors, see [Amazon ECS service event messages]. +// +// [Amazon ECS service event messages]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-event-messages-list.html type InvalidParameterException struct { Message *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/types/types.go index 21fb301b5..8cdcf6df6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/types/types.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ecs/types/types.go @@ -164,21 +164,25 @@ type AutoScalingGroupProviderUpdate struct { type AwsVpcConfiguration struct { // The IDs of the subnets associated with the task or service. There's a limit of - // 16 subnets that can be specified per awsvpcConfiguration . + // 16 subnets that can be specified. // // All specified subnets must be from the same VPC. // // This member is required. Subnets []string - // Whether the task's elastic network interface receives a public IP address. The - // default value is ENABLED . + // Whether the task's elastic network interface receives a public IP address. + // + // Consider the following when you set this value: + // + // - When you use create-service or update-service , the default is DISABLED . + // + // - When the service deploymentController is ECS , the value must be DISABLED . AssignPublicIp AssignPublicIp // The IDs of the security groups associated with the task or service. If you // don't specify a security group, the default security group for the VPC is used. - // There's a limit of 5 security groups that can be specified per - // awsvpcConfiguration . + // There's a limit of 5 security groups that can be specified. // // All specified security groups must be from the same VPC. SecurityGroups []string @@ -527,8 +531,8 @@ type ClusterServiceConnectDefaultsRequest struct { // The namespace name or full Amazon Resource Name (ARN) of the Cloud Map // namespace that's used when you create a service and don't specify a Service // Connect configuration. The namespace name can include up to 1024 characters. The - // name is case-sensitive. The name can't include hyphens (-), tilde (~), greater - // than (>), less than (<), or slash (/). + // name is case-sensitive. The name can't include greater than (>), less than (<), + // double quotation marks ("), or slash (/). // // If you enter an existing namespace name or ARN, then that namespace will be // used. Any namespace type is supported. The namespace must be in this account and @@ -957,8 +961,8 @@ type ContainerDefinition struct { // and VPC settings. Links []string - // Linux-specific modifications that are applied to the container, such as Linux - // kernel capabilities. For more information see [KernelCapabilities]. + // Linux-specific modifications that are applied to the default Docker container + // configuration, such as Linux kernel capabilities. For more information see [KernelCapabilities]. // // This parameter is not supported for Windows containers. // @@ -2497,8 +2501,13 @@ type FSxWindowsFileServerVolumeConfiguration struct { // - Container health checks aren't supported for tasks that are part of a // service that's configured to use a Classic Load Balancer. // +// For an example of how to specify a task definition with multiple containers +// where container dependency is specified, see [Container dependency]in the Amazon Elastic Container +// Service Developer Guide. +// // [Updating the Amazon ECS container agent]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html // [Fargate platform versions]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html +// [Container dependency]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/example_task_definitions.html#example_task_definition-containerdependency type HealthCheck struct { // A string array representing the command that the container runs to determine if @@ -2524,17 +2533,19 @@ type HealthCheck struct { Command []string // The time period in seconds between each health check execution. You may specify - // between 5 and 300 seconds. The default value is 30 seconds. + // between 5 and 300 seconds. The default value is 30 seconds. This value applies + // only when you specify a command . Interval *int32 // The number of times to retry a failed health check before the container is // considered unhealthy. You may specify between 1 and 10 retries. The default - // value is 3. + // value is 3. This value applies only when you specify a command . Retries *int32 // The optional grace period to provide containers time to bootstrap before failed // health checks count towards the maximum number of retries. You can specify - // between 0 and 300 seconds. By default, the startPeriod is off. + // between 0 and 300 seconds. By default, the startPeriod is off. This value + // applies only when you specify a command . // // If a health check succeeds within the startPeriod , then the container is // considered healthy and any subsequent failures count toward the maximum number @@ -2543,7 +2554,7 @@ type HealthCheck struct { // The time period in seconds to wait for a health check to succeed before it is // considered a failure. You may specify between 2 and 60 seconds. The default - // value is 5. + // value is 5. This value applies only when you specify a command . Timeout *int32 noSmithyDocumentSerde @@ -2650,6 +2661,30 @@ type InstanceHealthCheckResult struct { // for a container defined in the task definition. For more detailed information // about these Linux capabilities, see the [capabilities(7)]Linux manual page. // +// The following describes how Docker processes the Linux capabilities specified +// in the add and drop request parameters. For information about the latest +// behavior, see [Docker Compose: order of cap_drop and cap_add]in the Docker Community Forum. +// +// - When the container is a privleged container, the container capabilities are +// all of the default Docker capabilities. The capabilities specified in the add +// request parameter, and the drop request parameter are ignored. +// +// - When the add request parameter is set to ALL, the container capabilities are +// all of the default Docker capabilities, excluding those specified in the drop +// request parameter. +// +// - When the drop request parameter is set to ALL, the container capabilities +// are the capabilities specified in the add request parameter. +// +// - When the add request parameter and the drop request parameter are both +// empty, the capabilities the container capabilities are all of the default Docker +// capabilities. +// +// - The default is to first drop the capabilities specified in the drop request +// parameter, and then add the capabilities specified in the add request +// parameter. +// +// [Docker Compose: order of cap_drop and cap_add]: https://forums.docker.com/t/docker-compose-order-of-cap-drop-and-cap-add/97136/1 // [capabilities(7)]: http://man7.org/linux/man-pages/man7/capabilities.7.html type KernelCapabilities struct { @@ -3163,9 +3198,13 @@ type ManagedScaling struct { type ManagedStorageConfiguration struct { // Specify the Key Management Service key ID for the Fargate ephemeral storage. + // + // The key must be a single Region key. FargateEphemeralStorageKmsKeyId *string // Specify a Key Management Service key ID to encrypt the managed storage. + // + // The key must be a single Region key. KmsKeyId *string noSmithyDocumentSerde @@ -3792,7 +3831,7 @@ type Service struct { // Indicates whether to use Availability Zone rebalancing for the service. // // For more information, see [Balancing an Amazon ECS service across Availability Zones] in the Amazon Elastic Container Service Developer - // Guide. + // Guide . // // [Balancing an Amazon ECS service across Availability Zones]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-rebalancing.html AvailabilityZoneRebalancing AvailabilityZoneRebalancing @@ -4891,12 +4930,13 @@ type Task struct { // values are converted to an integer that indicates the CPU units when the task // definition is registered. // - // If you use the EC2 launch type, this field is optional. Supported values are - // between 128 CPU units ( 0.125 vCPUs) and 10240 CPU units ( 10 vCPUs). + // If you're using the EC2 launch type or the external launch type, this field is + // optional. Supported values are between 128 CPU units ( 0.125 vCPUs) and 196608 + // CPU units ( 192 vCPUs). If you do not specify a value, the parameter is ignored. // - // If you use the Fargate launch type, this field is required. You must use one of - // the following values. These values determine the range of supported values for - // the memory parameter: + // If you're using the Fargate launch type, this field is required. You must use + // one of the following values. These values determine the range of supported + // values for the memory parameter: // // The CPU units cannot be less than 1 vCPU when you use Windows containers on // Fargate. @@ -5138,11 +5178,10 @@ type TaskDefinition struct { // this field is required. You must use one of the following values. The value that // you choose determines your range of valid values for the memory parameter. // - // If you use the EC2 launch type, this field is optional. Supported values are - // between 128 CPU units ( 0.125 vCPUs) and 10240 CPU units ( 10 vCPUs). - // - // The CPU units cannot be less than 1 vCPU when you use Windows containers on - // Fargate. + // If you're using the EC2 launch type or the external launch type, this field is + // optional. Supported values are between 128 CPU units ( 0.125 vCPUs) and 196608 + // CPU units ( 192 vCPUs). The CPU units cannot be less than 1 vCPU when you use + // Windows containers on Fargate. // // - 256 (.25 vCPU) - Available memory values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 // GB) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/CHANGELOG.md index d9416223d..1b60f5cd1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/CHANGELOG.md @@ -1,3 +1,12 @@ +# v1.40.1 (2025-03-04.2) + +* **Bug Fix**: Add assurance test for operation order. + +# v1.40.0 (2025-02-27) + +* **Feature**: Track credential providers via User-Agent Feature ids +* **Dependency Update**: Updated to the latest SDK module versions + # v1.39.2 (2025-02-18) * **Bug Fix**: Bump go version to 1.22 diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_client.go index 0c18b4725..78b8db486 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_client.go @@ -762,6 +762,37 @@ func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { return nil } +type setCredentialSourceMiddleware struct { + ua *awsmiddleware.RequestUserAgent + options Options +} + +func (m setCredentialSourceMiddleware) ID() string { return "SetCredentialSourceMiddleware" } + +func (m setCredentialSourceMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + asProviderSource, ok := m.options.Credentials.(aws.CredentialProviderSource) + if !ok { + return next.HandleBuild(ctx, in) + } + providerSources := asProviderSource.ProviderSources() + for _, source := range providerSources { + m.ua.AddCredentialsSource(source) + } + return next.HandleBuild(ctx, in) +} + +func addCredentialSource(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + mw := setCredentialSourceMiddleware{ua: ua, options: options} + return stack.Build.Insert(&mw, "UserAgent", middleware.Before) +} + func resolveTracerProvider(options *Options) { if options.TracerProvider == nil { options.TracerProvider = &tracing.NopTracerProvider{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_AddClientIDToOpenIDConnectProvider.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_AddClientIDToOpenIDConnectProvider.go index d0079c96a..16da371ab 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_AddClientIDToOpenIDConnectProvider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_AddClientIDToOpenIDConnectProvider.go @@ -119,6 +119,9 @@ func (c *Client) addOperationAddClientIDToOpenIDConnectProviderMiddlewares(stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAddClientIDToOpenIDConnectProviderValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_AddRoleToInstanceProfile.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_AddRoleToInstanceProfile.go index d8b07ec7f..d32e33a77 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_AddRoleToInstanceProfile.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_AddRoleToInstanceProfile.go @@ -150,6 +150,9 @@ func (c *Client) addOperationAddRoleToInstanceProfileMiddlewares(stack *middlewa if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAddRoleToInstanceProfileValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_AddUserToGroup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_AddUserToGroup.go index 5064d1384..88304adad 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_AddUserToGroup.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_AddUserToGroup.go @@ -124,6 +124,9 @@ func (c *Client) addOperationAddUserToGroupMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAddUserToGroupValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_AttachGroupPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_AttachGroupPolicy.go index 5311a28c2..fdeed038d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_AttachGroupPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_AttachGroupPolicy.go @@ -135,6 +135,9 @@ func (c *Client) addOperationAttachGroupPolicyMiddlewares(stack *middleware.Stac if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAttachGroupPolicyValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_AttachRolePolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_AttachRolePolicy.go index 911d43456..b94e54cc9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_AttachRolePolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_AttachRolePolicy.go @@ -142,6 +142,9 @@ func (c *Client) addOperationAttachRolePolicyMiddlewares(stack *middleware.Stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAttachRolePolicyValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_AttachUserPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_AttachUserPolicy.go index 95a79d0b0..052b3e4d4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_AttachUserPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_AttachUserPolicy.go @@ -135,6 +135,9 @@ func (c *Client) addOperationAttachUserPolicyMiddlewares(stack *middleware.Stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAttachUserPolicyValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ChangePassword.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ChangePassword.go index 0aed4dc50..041eb0ccb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ChangePassword.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ChangePassword.go @@ -134,6 +134,9 @@ func (c *Client) addOperationChangePasswordMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpChangePasswordValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateAccessKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateAccessKey.go index 939a1717f..c8aa5d7ae 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateAccessKey.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateAccessKey.go @@ -140,6 +140,9 @@ func (c *Client) addOperationCreateAccessKeyMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateAccessKey(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateAccountAlias.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateAccountAlias.go index 6befed397..a70980e74 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateAccountAlias.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateAccountAlias.go @@ -117,6 +117,9 @@ func (c *Client) addOperationCreateAccountAliasMiddlewares(stack *middleware.Sta if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateAccountAliasValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateGroup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateGroup.go index b6f1079be..3f8ca8fc8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateGroup.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateGroup.go @@ -139,6 +139,9 @@ func (c *Client) addOperationCreateGroupMiddlewares(stack *middleware.Stack, opt if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateGroupValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateInstanceProfile.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateInstanceProfile.go index 26b50cff8..f7845751f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateInstanceProfile.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateInstanceProfile.go @@ -156,6 +156,9 @@ func (c *Client) addOperationCreateInstanceProfileMiddlewares(stack *middleware. if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateInstanceProfileValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateLoginProfile.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateLoginProfile.go index 63c5ed262..c81d9436d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateLoginProfile.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateLoginProfile.go @@ -156,6 +156,9 @@ func (c *Client) addOperationCreateLoginProfileMiddlewares(stack *middleware.Sta if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateLoginProfile(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateOpenIDConnectProvider.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateOpenIDConnectProvider.go index dcb772c17..6d79fbe31 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateOpenIDConnectProvider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateOpenIDConnectProvider.go @@ -216,6 +216,9 @@ func (c *Client) addOperationCreateOpenIDConnectProviderMiddlewares(stack *middl if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateOpenIDConnectProviderValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreatePolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreatePolicy.go index 691c64926..124a179fb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreatePolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreatePolicy.go @@ -200,6 +200,9 @@ func (c *Client) addOperationCreatePolicyMiddlewares(stack *middleware.Stack, op if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreatePolicyValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreatePolicyVersion.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreatePolicyVersion.go index 5943f98a1..5f91e6984 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreatePolicyVersion.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreatePolicyVersion.go @@ -171,6 +171,9 @@ func (c *Client) addOperationCreatePolicyVersionMiddlewares(stack *middleware.St if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreatePolicyVersionValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateRole.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateRole.go index 2c0fcf790..8dcabed04 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateRole.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateRole.go @@ -220,6 +220,9 @@ func (c *Client) addOperationCreateRoleMiddlewares(stack *middleware.Stack, opti if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateRoleValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateSAMLProvider.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateSAMLProvider.go index 775a06c48..8b9bfe121 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateSAMLProvider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateSAMLProvider.go @@ -179,6 +179,9 @@ func (c *Client) addOperationCreateSAMLProviderMiddlewares(stack *middleware.Sta if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateSAMLProviderValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateServiceLinkedRole.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateServiceLinkedRole.go index 5d9f5041c..e13d21c84 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateServiceLinkedRole.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateServiceLinkedRole.go @@ -146,6 +146,9 @@ func (c *Client) addOperationCreateServiceLinkedRoleMiddlewares(stack *middlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateServiceLinkedRoleValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateServiceSpecificCredential.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateServiceSpecificCredential.go index 3b74e8545..10e6beacb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateServiceSpecificCredential.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateServiceSpecificCredential.go @@ -146,6 +146,9 @@ func (c *Client) addOperationCreateServiceSpecificCredentialMiddlewares(stack *m if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateServiceSpecificCredentialValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateUser.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateUser.go index 6c18ad2ac..eb3eb9388 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateUser.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateUser.go @@ -162,6 +162,9 @@ func (c *Client) addOperationCreateUserMiddlewares(stack *middleware.Stack, opti if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateUserValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateVirtualMFADevice.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateVirtualMFADevice.go index 4f8233b27..bb62d0449 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateVirtualMFADevice.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateVirtualMFADevice.go @@ -162,6 +162,9 @@ func (c *Client) addOperationCreateVirtualMFADeviceMiddlewares(stack *middleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateVirtualMFADeviceValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeactivateMFADevice.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeactivateMFADevice.go index f675e5341..4666e9712 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeactivateMFADevice.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeactivateMFADevice.go @@ -134,6 +134,9 @@ func (c *Client) addOperationDeactivateMFADeviceMiddlewares(stack *middleware.St if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeactivateMFADeviceValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteAccessKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteAccessKey.go index 9722f3998..911f3ba96 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteAccessKey.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteAccessKey.go @@ -129,6 +129,9 @@ func (c *Client) addOperationDeleteAccessKeyMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteAccessKeyValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteAccountAlias.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteAccountAlias.go index 5de74902e..e201b0fcf 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteAccountAlias.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteAccountAlias.go @@ -118,6 +118,9 @@ func (c *Client) addOperationDeleteAccountAliasMiddlewares(stack *middleware.Sta if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteAccountAliasValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteAccountPasswordPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteAccountPasswordPolicy.go index 498c88306..609c0dfd7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteAccountPasswordPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteAccountPasswordPolicy.go @@ -102,6 +102,9 @@ func (c *Client) addOperationDeleteAccountPasswordPolicyMiddlewares(stack *middl if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteAccountPasswordPolicy(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteGroup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteGroup.go index e133fbc5f..962f48960 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteGroup.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteGroup.go @@ -114,6 +114,9 @@ func (c *Client) addOperationDeleteGroupMiddlewares(stack *middleware.Stack, opt if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteGroupValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteGroupPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteGroupPolicy.go index d38f19b34..a9d50f7e7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteGroupPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteGroupPolicy.go @@ -131,6 +131,9 @@ func (c *Client) addOperationDeleteGroupPolicyMiddlewares(stack *middleware.Stac if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteGroupPolicyValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteInstanceProfile.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteInstanceProfile.go index 09d974707..82102d2e2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteInstanceProfile.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteInstanceProfile.go @@ -123,6 +123,9 @@ func (c *Client) addOperationDeleteInstanceProfileMiddlewares(stack *middleware. if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteInstanceProfileValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteLoginProfile.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteLoginProfile.go index f7c45ce47..980f0d6a8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteLoginProfile.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteLoginProfile.go @@ -128,6 +128,9 @@ func (c *Client) addOperationDeleteLoginProfileMiddlewares(stack *middleware.Sta if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteLoginProfile(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteOpenIDConnectProvider.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteOpenIDConnectProvider.go index 0daf8d871..cd836637d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteOpenIDConnectProvider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteOpenIDConnectProvider.go @@ -116,6 +116,9 @@ func (c *Client) addOperationDeleteOpenIDConnectProviderMiddlewares(stack *middl if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteOpenIDConnectProviderValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeletePolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeletePolicy.go index 5c0aa800f..e4f9e6424 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeletePolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeletePolicy.go @@ -133,6 +133,9 @@ func (c *Client) addOperationDeletePolicyMiddlewares(stack *middleware.Stack, op if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeletePolicyValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeletePolicyVersion.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeletePolicyVersion.go index 04767686b..ecbb8a507 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeletePolicyVersion.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeletePolicyVersion.go @@ -135,6 +135,9 @@ func (c *Client) addOperationDeletePolicyVersionMiddlewares(stack *middleware.St if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeletePolicyVersionValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteRole.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteRole.go index c8d2320b4..2dbab4de5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteRole.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteRole.go @@ -131,6 +131,9 @@ func (c *Client) addOperationDeleteRoleMiddlewares(stack *middleware.Stack, opti if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteRoleValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteRolePermissionsBoundary.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteRolePermissionsBoundary.go index 89d063183..d32c2b9ec 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteRolePermissionsBoundary.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteRolePermissionsBoundary.go @@ -114,6 +114,9 @@ func (c *Client) addOperationDeleteRolePermissionsBoundaryMiddlewares(stack *mid if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteRolePermissionsBoundaryValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteRolePolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteRolePolicy.go index 267a3fb73..40ed71ac3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteRolePolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteRolePolicy.go @@ -131,6 +131,9 @@ func (c *Client) addOperationDeleteRolePolicyMiddlewares(stack *middleware.Stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteRolePolicyValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteSAMLProvider.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteSAMLProvider.go index 8ad85a5c4..f99eb1788 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteSAMLProvider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteSAMLProvider.go @@ -116,6 +116,9 @@ func (c *Client) addOperationDeleteSAMLProviderMiddlewares(stack *middleware.Sta if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteSAMLProviderValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteSSHPublicKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteSSHPublicKey.go index 7c4f9aa24..827098a58 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteSSHPublicKey.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteSSHPublicKey.go @@ -130,6 +130,9 @@ func (c *Client) addOperationDeleteSSHPublicKeyMiddlewares(stack *middleware.Sta if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteSSHPublicKeyValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteServerCertificate.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteServerCertificate.go index bd0089550..9d4b8ccd0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteServerCertificate.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteServerCertificate.go @@ -128,6 +128,9 @@ func (c *Client) addOperationDeleteServerCertificateMiddlewares(stack *middlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteServerCertificateValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteServiceLinkedRole.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteServiceLinkedRole.go index e4cac12c8..8b65a663c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteServiceLinkedRole.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteServiceLinkedRole.go @@ -133,6 +133,9 @@ func (c *Client) addOperationDeleteServiceLinkedRoleMiddlewares(stack *middlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteServiceLinkedRoleValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteServiceSpecificCredential.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteServiceSpecificCredential.go index 75fc1a7c7..70d9b4697 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteServiceSpecificCredential.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteServiceSpecificCredential.go @@ -124,6 +124,9 @@ func (c *Client) addOperationDeleteServiceSpecificCredentialMiddlewares(stack *m if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteServiceSpecificCredentialValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteSigningCertificate.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteSigningCertificate.go index cd673808e..450b8bfda 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteSigningCertificate.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteSigningCertificate.go @@ -128,6 +128,9 @@ func (c *Client) addOperationDeleteSigningCertificateMiddlewares(stack *middlewa if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteSigningCertificateValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteUser.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteUser.go index 98fd67e1b..77b725816 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteUser.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteUser.go @@ -136,6 +136,9 @@ func (c *Client) addOperationDeleteUserMiddlewares(stack *middleware.Stack, opti if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteUserValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteUserPermissionsBoundary.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteUserPermissionsBoundary.go index f00eb3638..584d974a2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteUserPermissionsBoundary.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteUserPermissionsBoundary.go @@ -112,6 +112,9 @@ func (c *Client) addOperationDeleteUserPermissionsBoundaryMiddlewares(stack *mid if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteUserPermissionsBoundaryValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteUserPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteUserPolicy.go index ba27de197..c08df88d4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteUserPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteUserPolicy.go @@ -131,6 +131,9 @@ func (c *Client) addOperationDeleteUserPolicyMiddlewares(stack *middleware.Stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteUserPolicyValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteVirtualMFADevice.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteVirtualMFADevice.go index e1becfbe9..3a0e77539 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteVirtualMFADevice.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteVirtualMFADevice.go @@ -117,6 +117,9 @@ func (c *Client) addOperationDeleteVirtualMFADeviceMiddlewares(stack *middleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteVirtualMFADeviceValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DetachGroupPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DetachGroupPolicy.go index 6f5d9f011..d749a474c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DetachGroupPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DetachGroupPolicy.go @@ -128,6 +128,9 @@ func (c *Client) addOperationDetachGroupPolicyMiddlewares(stack *middleware.Stac if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDetachGroupPolicyValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DetachRolePolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DetachRolePolicy.go index 32521226f..ab454d82e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DetachRolePolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DetachRolePolicy.go @@ -128,6 +128,9 @@ func (c *Client) addOperationDetachRolePolicyMiddlewares(stack *middleware.Stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDetachRolePolicyValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DetachUserPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DetachUserPolicy.go index f627c0450..1be22a7e3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DetachUserPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DetachUserPolicy.go @@ -128,6 +128,9 @@ func (c *Client) addOperationDetachUserPolicyMiddlewares(stack *middleware.Stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDetachUserPolicyValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DisableOrganizationsRootCredentialsManagement.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DisableOrganizationsRootCredentialsManagement.go index fab6c6073..c7ea45cd8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DisableOrganizationsRootCredentialsManagement.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DisableOrganizationsRootCredentialsManagement.go @@ -113,6 +113,9 @@ func (c *Client) addOperationDisableOrganizationsRootCredentialsManagementMiddle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDisableOrganizationsRootCredentialsManagement(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DisableOrganizationsRootSessions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DisableOrganizationsRootSessions.go index ca4181d06..537bbeae5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DisableOrganizationsRootSessions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DisableOrganizationsRootSessions.go @@ -113,6 +113,9 @@ func (c *Client) addOperationDisableOrganizationsRootSessionsMiddlewares(stack * if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDisableOrganizationsRootSessions(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_EnableMFADevice.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_EnableMFADevice.go index 34916c2f1..2135b0cd8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_EnableMFADevice.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_EnableMFADevice.go @@ -157,6 +157,9 @@ func (c *Client) addOperationEnableMFADeviceMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpEnableMFADeviceValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_EnableOrganizationsRootCredentialsManagement.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_EnableOrganizationsRootCredentialsManagement.go index 48541af6a..afebed205 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_EnableOrganizationsRootCredentialsManagement.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_EnableOrganizationsRootCredentialsManagement.go @@ -124,6 +124,9 @@ func (c *Client) addOperationEnableOrganizationsRootCredentialsManagementMiddlew if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opEnableOrganizationsRootCredentialsManagement(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_EnableOrganizationsRootSessions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_EnableOrganizationsRootSessions.go index 07aacb0cd..3cfd5fdfc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_EnableOrganizationsRootSessions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_EnableOrganizationsRootSessions.go @@ -123,6 +123,9 @@ func (c *Client) addOperationEnableOrganizationsRootSessionsMiddlewares(stack *m if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opEnableOrganizationsRootSessions(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GenerateCredentialReport.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GenerateCredentialReport.go index 30e210894..a67ee4dba 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GenerateCredentialReport.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GenerateCredentialReport.go @@ -114,6 +114,9 @@ func (c *Client) addOperationGenerateCredentialReportMiddlewares(stack *middlewa if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGenerateCredentialReport(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GenerateOrganizationsAccessReport.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GenerateOrganizationsAccessReport.go index b9c2b930d..ea0d62eff 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GenerateOrganizationsAccessReport.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GenerateOrganizationsAccessReport.go @@ -234,6 +234,9 @@ func (c *Client) addOperationGenerateOrganizationsAccessReportMiddlewares(stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGenerateOrganizationsAccessReportValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GenerateServiceLastAccessedDetails.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GenerateServiceLastAccessedDetails.go index 1be261c21..7b5e2178b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GenerateServiceLastAccessedDetails.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GenerateServiceLastAccessedDetails.go @@ -183,6 +183,9 @@ func (c *Client) addOperationGenerateServiceLastAccessedDetailsMiddlewares(stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGenerateServiceLastAccessedDetailsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetAccessKeyLastUsed.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetAccessKeyLastUsed.go index 748c5f288..81a560f7b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetAccessKeyLastUsed.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetAccessKeyLastUsed.go @@ -125,6 +125,9 @@ func (c *Client) addOperationGetAccessKeyLastUsedMiddlewares(stack *middleware.S if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetAccessKeyLastUsedValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetAccountAuthorizationDetails.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetAccountAuthorizationDetails.go index 2162fdb8a..c72bc19ab 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetAccountAuthorizationDetails.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetAccountAuthorizationDetails.go @@ -169,6 +169,9 @@ func (c *Client) addOperationGetAccountAuthorizationDetailsMiddlewares(stack *mi if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetAccountAuthorizationDetails(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetAccountPasswordPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetAccountPasswordPolicy.go index aa50f44cd..4df246360 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetAccountPasswordPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetAccountPasswordPolicy.go @@ -114,6 +114,9 @@ func (c *Client) addOperationGetAccountPasswordPolicyMiddlewares(stack *middlewa if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetAccountPasswordPolicy(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetAccountSummary.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetAccountSummary.go index f37d86aef..6f7856a26 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetAccountSummary.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetAccountSummary.go @@ -112,6 +112,9 @@ func (c *Client) addOperationGetAccountSummaryMiddlewares(stack *middleware.Stac if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetAccountSummary(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetContextKeysForCustomPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetContextKeysForCustomPolicy.go index a506e9388..10a8f7372 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetContextKeysForCustomPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetContextKeysForCustomPolicy.go @@ -138,6 +138,9 @@ func (c *Client) addOperationGetContextKeysForCustomPolicyMiddlewares(stack *mid if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetContextKeysForCustomPolicyValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetContextKeysForPrincipalPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetContextKeysForPrincipalPolicy.go index 10cd3ff5c..3e20af7d9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetContextKeysForPrincipalPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetContextKeysForPrincipalPolicy.go @@ -157,6 +157,9 @@ func (c *Client) addOperationGetContextKeysForPrincipalPolicyMiddlewares(stack * if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetContextKeysForPrincipalPolicyValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetCredentialReport.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetCredentialReport.go index d75d50dcc..a8b232424 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetCredentialReport.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetCredentialReport.go @@ -120,6 +120,9 @@ func (c *Client) addOperationGetCredentialReportMiddlewares(stack *middleware.St if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetCredentialReport(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetGroup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetGroup.go index f10ee89c2..7deac4d16 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetGroup.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetGroup.go @@ -157,6 +157,9 @@ func (c *Client) addOperationGetGroupMiddlewares(stack *middleware.Stack, option if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetGroupValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetGroupPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetGroupPolicy.go index 3a3db540d..9da0699be 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetGroupPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetGroupPolicy.go @@ -161,6 +161,9 @@ func (c *Client) addOperationGetGroupPolicyMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetGroupPolicyValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetInstanceProfile.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetInstanceProfile.go index 919f7cb74..03c96ed01 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetInstanceProfile.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetInstanceProfile.go @@ -130,6 +130,9 @@ func (c *Client) addOperationGetInstanceProfileMiddlewares(stack *middleware.Sta if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetInstanceProfileValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetLoginProfile.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetLoginProfile.go index 4e16bb622..2a559c7fc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetLoginProfile.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetLoginProfile.go @@ -136,6 +136,9 @@ func (c *Client) addOperationGetLoginProfileMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetLoginProfile(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetMFADevice.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetMFADevice.go index 92e629346..0b22fdd07 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetMFADevice.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetMFADevice.go @@ -135,6 +135,9 @@ func (c *Client) addOperationGetMFADeviceMiddlewares(stack *middleware.Stack, op if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetMFADeviceValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetOpenIDConnectProvider.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetOpenIDConnectProvider.go index aca71da5c..afff41f77 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetOpenIDConnectProvider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetOpenIDConnectProvider.go @@ -142,6 +142,9 @@ func (c *Client) addOperationGetOpenIDConnectProviderMiddlewares(stack *middlewa if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetOpenIDConnectProviderValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetOrganizationsAccessReport.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetOrganizationsAccessReport.go index adc980b59..9a1a4d450 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetOrganizationsAccessReport.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetOrganizationsAccessReport.go @@ -201,6 +201,9 @@ func (c *Client) addOperationGetOrganizationsAccessReportMiddlewares(stack *midd if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetOrganizationsAccessReportValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetPolicy.go index 4cd2c9dd2..729008bac 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetPolicy.go @@ -137,6 +137,9 @@ func (c *Client) addOperationGetPolicyMiddlewares(stack *middleware.Stack, optio if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetPolicyValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetPolicyVersion.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetPolicyVersion.go index e8ca347e3..0b7629502 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetPolicyVersion.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetPolicyVersion.go @@ -151,6 +151,9 @@ func (c *Client) addOperationGetPolicyVersionMiddlewares(stack *middleware.Stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetPolicyVersionValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetRole.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetRole.go index 88055052b..e824dc819 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetRole.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetRole.go @@ -137,6 +137,9 @@ func (c *Client) addOperationGetRoleMiddlewares(stack *middleware.Stack, options if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetRoleValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetRolePolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetRolePolicy.go index 9830349bc..31a4fdfaf 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetRolePolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetRolePolicy.go @@ -164,6 +164,9 @@ func (c *Client) addOperationGetRolePolicyMiddlewares(stack *middleware.Stack, o if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetRolePolicyValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetSAMLProvider.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetSAMLProvider.go index 13a87ade0..df49bee05 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetSAMLProvider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetSAMLProvider.go @@ -147,6 +147,9 @@ func (c *Client) addOperationGetSAMLProviderMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetSAMLProviderValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetSSHPublicKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetSSHPublicKey.go index f9c1cb88b..b0b137105 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetSSHPublicKey.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetSSHPublicKey.go @@ -143,6 +143,9 @@ func (c *Client) addOperationGetSSHPublicKeyMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetSSHPublicKeyValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetServerCertificate.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetServerCertificate.go index 583841ba0..b5010dfdb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetServerCertificate.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetServerCertificate.go @@ -127,6 +127,9 @@ func (c *Client) addOperationGetServerCertificateMiddlewares(stack *middleware.S if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetServerCertificateValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetServiceLastAccessedDetails.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetServiceLastAccessedDetails.go index da2b5e1c8..bde36d0f4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetServiceLastAccessedDetails.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetServiceLastAccessedDetails.go @@ -224,6 +224,9 @@ func (c *Client) addOperationGetServiceLastAccessedDetailsMiddlewares(stack *mid if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetServiceLastAccessedDetailsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetServiceLastAccessedDetailsWithEntities.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetServiceLastAccessedDetailsWithEntities.go index 22266fa2d..4a9aa2ddb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetServiceLastAccessedDetailsWithEntities.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetServiceLastAccessedDetailsWithEntities.go @@ -209,6 +209,9 @@ func (c *Client) addOperationGetServiceLastAccessedDetailsWithEntitiesMiddleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetServiceLastAccessedDetailsWithEntitiesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetServiceLinkedRoleDeletionStatus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetServiceLinkedRoleDeletionStatus.go index 0bb8fd482..813966647 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetServiceLinkedRoleDeletionStatus.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetServiceLinkedRoleDeletionStatus.go @@ -122,6 +122,9 @@ func (c *Client) addOperationGetServiceLinkedRoleDeletionStatusMiddlewares(stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetServiceLinkedRoleDeletionStatusValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetUser.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetUser.go index 4586e1429..1213967f1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetUser.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetUser.go @@ -149,6 +149,9 @@ func (c *Client) addOperationGetUserMiddlewares(stack *middleware.Stack, options if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetUser(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetUserPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetUserPolicy.go index 387888a15..808c6c627 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetUserPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetUserPolicy.go @@ -161,6 +161,9 @@ func (c *Client) addOperationGetUserPolicyMiddlewares(stack *middleware.Stack, o if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetUserPolicyValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListAccessKeys.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListAccessKeys.go index de39bf7ca..904c1d084 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListAccessKeys.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListAccessKeys.go @@ -164,6 +164,9 @@ func (c *Client) addOperationListAccessKeysMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListAccessKeys(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListAccountAliases.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListAccountAliases.go index a65d49f20..14109c0f5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListAccountAliases.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListAccountAliases.go @@ -143,6 +143,9 @@ func (c *Client) addOperationListAccountAliasesMiddlewares(stack *middleware.Sta if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListAccountAliases(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListAttachedGroupPolicies.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListAttachedGroupPolicies.go index fe9de8b74..5a9e0fa48 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListAttachedGroupPolicies.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListAttachedGroupPolicies.go @@ -172,6 +172,9 @@ func (c *Client) addOperationListAttachedGroupPoliciesMiddlewares(stack *middlew if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListAttachedGroupPoliciesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListAttachedRolePolicies.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListAttachedRolePolicies.go index f767a16d3..53327a1af 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListAttachedRolePolicies.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListAttachedRolePolicies.go @@ -172,6 +172,9 @@ func (c *Client) addOperationListAttachedRolePoliciesMiddlewares(stack *middlewa if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListAttachedRolePoliciesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListAttachedUserPolicies.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListAttachedUserPolicies.go index 46cfe4182..920ffd5f8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListAttachedUserPolicies.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListAttachedUserPolicies.go @@ -172,6 +172,9 @@ func (c *Client) addOperationListAttachedUserPoliciesMiddlewares(stack *middlewa if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListAttachedUserPoliciesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListEntitiesForPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListEntitiesForPolicy.go index 0844ac675..6130cb90d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListEntitiesForPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListEntitiesForPolicy.go @@ -190,6 +190,9 @@ func (c *Client) addOperationListEntitiesForPolicyMiddlewares(stack *middleware. if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListEntitiesForPolicyValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListGroupPolicies.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListGroupPolicies.go index 5513f487a..1a3747d77 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListGroupPolicies.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListGroupPolicies.go @@ -166,6 +166,9 @@ func (c *Client) addOperationListGroupPoliciesMiddlewares(stack *middleware.Stac if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListGroupPoliciesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListGroups.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListGroups.go index 13be88a6b..404a28202 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListGroups.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListGroups.go @@ -155,6 +155,9 @@ func (c *Client) addOperationListGroupsMiddlewares(stack *middleware.Stack, opti if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListGroups(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListGroupsForUser.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListGroupsForUser.go index 7aa47cf88..87e19e1ff 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListGroupsForUser.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListGroupsForUser.go @@ -152,6 +152,9 @@ func (c *Client) addOperationListGroupsForUserMiddlewares(stack *middleware.Stac if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListGroupsForUserValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListInstanceProfileTags.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListInstanceProfileTags.go index bf2efc836..3f9b65e48 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListInstanceProfileTags.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListInstanceProfileTags.go @@ -155,6 +155,9 @@ func (c *Client) addOperationListInstanceProfileTagsMiddlewares(stack *middlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListInstanceProfileTagsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListInstanceProfiles.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListInstanceProfiles.go index 257da5997..25704aec7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListInstanceProfiles.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListInstanceProfiles.go @@ -164,6 +164,9 @@ func (c *Client) addOperationListInstanceProfilesMiddlewares(stack *middleware.S if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListInstanceProfiles(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListInstanceProfilesForRole.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListInstanceProfilesForRole.go index 7815e5473..1e6af8733 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListInstanceProfilesForRole.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListInstanceProfilesForRole.go @@ -156,6 +156,9 @@ func (c *Client) addOperationListInstanceProfilesForRoleMiddlewares(stack *middl if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListInstanceProfilesForRoleValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListMFADeviceTags.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListMFADeviceTags.go index bdac50b8d..1408ab4c9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListMFADeviceTags.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListMFADeviceTags.go @@ -156,6 +156,9 @@ func (c *Client) addOperationListMFADeviceTagsMiddlewares(stack *middleware.Stac if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListMFADeviceTagsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListMFADevices.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListMFADevices.go index 74b5b61cd..0812bb90d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListMFADevices.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListMFADevices.go @@ -154,6 +154,9 @@ func (c *Client) addOperationListMFADevicesMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListMFADevices(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListOpenIDConnectProviderTags.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListOpenIDConnectProviderTags.go index acad2e84d..29daed6be 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListOpenIDConnectProviderTags.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListOpenIDConnectProviderTags.go @@ -160,6 +160,9 @@ func (c *Client) addOperationListOpenIDConnectProviderTagsMiddlewares(stack *mid if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListOpenIDConnectProviderTagsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListOpenIDConnectProviders.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListOpenIDConnectProviders.go index bc88dac5e..030f91a56 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListOpenIDConnectProviders.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListOpenIDConnectProviders.go @@ -114,6 +114,9 @@ func (c *Client) addOperationListOpenIDConnectProvidersMiddlewares(stack *middle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListOpenIDConnectProviders(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListOrganizationsFeatures.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListOrganizationsFeatures.go index 43d139733..df933e641 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListOrganizationsFeatures.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListOrganizationsFeatures.go @@ -112,6 +112,9 @@ func (c *Client) addOperationListOrganizationsFeaturesMiddlewares(stack *middlew if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListOrganizationsFeatures(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListPolicies.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListPolicies.go index 9c4061473..867bc9bc2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListPolicies.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListPolicies.go @@ -192,6 +192,9 @@ func (c *Client) addOperationListPoliciesMiddlewares(stack *middleware.Stack, op if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListPolicies(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListPoliciesGrantingServiceAccess.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListPoliciesGrantingServiceAccess.go index b7b5a8f2e..9a90e61f0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListPoliciesGrantingServiceAccess.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListPoliciesGrantingServiceAccess.go @@ -180,6 +180,9 @@ func (c *Client) addOperationListPoliciesGrantingServiceAccessMiddlewares(stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListPoliciesGrantingServiceAccessValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListPolicyTags.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListPolicyTags.go index ddbfc061a..09bdc90c2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListPolicyTags.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListPolicyTags.go @@ -155,6 +155,9 @@ func (c *Client) addOperationListPolicyTagsMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListPolicyTagsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListPolicyVersions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListPolicyVersions.go index 943983f5e..69c002b0d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListPolicyVersions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListPolicyVersions.go @@ -157,6 +157,9 @@ func (c *Client) addOperationListPolicyVersionsMiddlewares(stack *middleware.Sta if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListPolicyVersionsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListRolePolicies.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListRolePolicies.go index 1af132816..673d830a5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListRolePolicies.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListRolePolicies.go @@ -160,6 +160,9 @@ func (c *Client) addOperationListRolePoliciesMiddlewares(stack *middleware.Stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListRolePoliciesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListRoleTags.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListRoleTags.go index f91e03949..ede33085f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListRoleTags.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListRoleTags.go @@ -155,6 +155,9 @@ func (c *Client) addOperationListRoleTagsMiddlewares(stack *middleware.Stack, op if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListRoleTagsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListRoles.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListRoles.go index 9d3fc4a92..b9ab38dea 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListRoles.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListRoles.go @@ -171,6 +171,9 @@ func (c *Client) addOperationListRolesMiddlewares(stack *middleware.Stack, optio if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListRoles(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListSAMLProviderTags.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListSAMLProviderTags.go index 8c677d3ff..bca5377e7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListSAMLProviderTags.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListSAMLProviderTags.go @@ -160,6 +160,9 @@ func (c *Client) addOperationListSAMLProviderTagsMiddlewares(stack *middleware.S if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListSAMLProviderTagsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListSAMLProviders.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListSAMLProviders.go index acd4d43b2..2b7039574 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListSAMLProviders.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListSAMLProviders.go @@ -116,6 +116,9 @@ func (c *Client) addOperationListSAMLProvidersMiddlewares(stack *middleware.Stac if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListSAMLProviders(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListSSHPublicKeys.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListSSHPublicKeys.go index c9a609476..192e77bf1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListSSHPublicKeys.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListSSHPublicKeys.go @@ -159,6 +159,9 @@ func (c *Client) addOperationListSSHPublicKeysMiddlewares(stack *middleware.Stac if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListSSHPublicKeys(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListServerCertificateTags.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListServerCertificateTags.go index 4683f3969..a218f2ce1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListServerCertificateTags.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListServerCertificateTags.go @@ -161,6 +161,9 @@ func (c *Client) addOperationListServerCertificateTagsMiddlewares(stack *middlew if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListServerCertificateTagsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListServerCertificates.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListServerCertificates.go index 638d0167c..93c94a1d9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListServerCertificates.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListServerCertificates.go @@ -167,6 +167,9 @@ func (c *Client) addOperationListServerCertificatesMiddlewares(stack *middleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListServerCertificates(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListServiceSpecificCredentials.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListServiceSpecificCredentials.go index d48c3c410..623c54483 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListServiceSpecificCredentials.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListServiceSpecificCredentials.go @@ -131,6 +131,9 @@ func (c *Client) addOperationListServiceSpecificCredentialsMiddlewares(stack *mi if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListServiceSpecificCredentials(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListSigningCertificates.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListSigningCertificates.go index 95a545527..829d0a9ac 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListSigningCertificates.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListSigningCertificates.go @@ -159,6 +159,9 @@ func (c *Client) addOperationListSigningCertificatesMiddlewares(stack *middlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListSigningCertificates(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListUserPolicies.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListUserPolicies.go index 3f633c2df..be3ee9061 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListUserPolicies.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListUserPolicies.go @@ -159,6 +159,9 @@ func (c *Client) addOperationListUserPoliciesMiddlewares(stack *middleware.Stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListUserPoliciesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListUserTags.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListUserTags.go index a402c64dc..104b83d28 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListUserTags.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListUserTags.go @@ -155,6 +155,9 @@ func (c *Client) addOperationListUserTagsMiddlewares(stack *middleware.Stack, op if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListUserTagsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListUsers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListUsers.go index d4a29cf86..cf0ffcff8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListUsers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListUsers.go @@ -167,6 +167,9 @@ func (c *Client) addOperationListUsersMiddlewares(stack *middleware.Stack, optio if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListUsers(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListVirtualMFADevices.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListVirtualMFADevices.go index 2e27beefc..72177c49e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListVirtualMFADevices.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListVirtualMFADevices.go @@ -155,6 +155,9 @@ func (c *Client) addOperationListVirtualMFADevicesMiddlewares(stack *middleware. if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListVirtualMFADevices(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_PutGroupPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_PutGroupPolicy.go index 76011d291..1420a0c15 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_PutGroupPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_PutGroupPolicy.go @@ -166,6 +166,9 @@ func (c *Client) addOperationPutGroupPolicyMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutGroupPolicyValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_PutRolePermissionsBoundary.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_PutRolePermissionsBoundary.go index 7535a37cd..ef0bf72a6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_PutRolePermissionsBoundary.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_PutRolePermissionsBoundary.go @@ -137,6 +137,9 @@ func (c *Client) addOperationPutRolePermissionsBoundaryMiddlewares(stack *middle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutRolePermissionsBoundaryValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_PutRolePolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_PutRolePolicy.go index b81b5b4dc..a4143ec73 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_PutRolePolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_PutRolePolicy.go @@ -175,6 +175,9 @@ func (c *Client) addOperationPutRolePolicyMiddlewares(stack *middleware.Stack, o if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutRolePolicyValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_PutUserPermissionsBoundary.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_PutUserPermissionsBoundary.go index b90d153f7..a6588015b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_PutUserPermissionsBoundary.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_PutUserPermissionsBoundary.go @@ -135,6 +135,9 @@ func (c *Client) addOperationPutUserPermissionsBoundaryMiddlewares(stack *middle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutUserPermissionsBoundaryValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_PutUserPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_PutUserPolicy.go index fcd23509e..ed0a91e5d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_PutUserPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_PutUserPolicy.go @@ -166,6 +166,9 @@ func (c *Client) addOperationPutUserPolicyMiddlewares(stack *middleware.Stack, o if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutUserPolicyValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_RemoveClientIDFromOpenIDConnectProvider.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_RemoveClientIDFromOpenIDConnectProvider.go index c045b5048..c63b1efa9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_RemoveClientIDFromOpenIDConnectProvider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_RemoveClientIDFromOpenIDConnectProvider.go @@ -124,6 +124,9 @@ func (c *Client) addOperationRemoveClientIDFromOpenIDConnectProviderMiddlewares( if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpRemoveClientIDFromOpenIDConnectProviderValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_RemoveRoleFromInstanceProfile.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_RemoveRoleFromInstanceProfile.go index ee73f6187..6272b9fea 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_RemoveRoleFromInstanceProfile.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_RemoveRoleFromInstanceProfile.go @@ -135,6 +135,9 @@ func (c *Client) addOperationRemoveRoleFromInstanceProfileMiddlewares(stack *mid if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpRemoveRoleFromInstanceProfileValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_RemoveUserFromGroup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_RemoveUserFromGroup.go index ae4a5d5d6..611e11aca 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_RemoveUserFromGroup.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_RemoveUserFromGroup.go @@ -124,6 +124,9 @@ func (c *Client) addOperationRemoveUserFromGroupMiddlewares(stack *middleware.St if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpRemoveUserFromGroupValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ResetServiceSpecificCredential.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ResetServiceSpecificCredential.go index 7e58651a3..6689f7f7a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ResetServiceSpecificCredential.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ResetServiceSpecificCredential.go @@ -135,6 +135,9 @@ func (c *Client) addOperationResetServiceSpecificCredentialMiddlewares(stack *mi if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpResetServiceSpecificCredentialValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ResyncMFADevice.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ResyncMFADevice.go index d9616307e..d3d0d6518 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ResyncMFADevice.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ResyncMFADevice.go @@ -144,6 +144,9 @@ func (c *Client) addOperationResyncMFADeviceMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpResyncMFADeviceValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_SetDefaultPolicyVersion.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_SetDefaultPolicyVersion.go index 408879a99..741b0fd48 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_SetDefaultPolicyVersion.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_SetDefaultPolicyVersion.go @@ -130,6 +130,9 @@ func (c *Client) addOperationSetDefaultPolicyVersionMiddlewares(stack *middlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpSetDefaultPolicyVersionValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_SetSecurityTokenServicePreferences.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_SetSecurityTokenServicePreferences.go index 999933e85..9a8ea6b5d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_SetSecurityTokenServicePreferences.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_SetSecurityTokenServicePreferences.go @@ -137,6 +137,9 @@ func (c *Client) addOperationSetSecurityTokenServicePreferencesMiddlewares(stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpSetSecurityTokenServicePreferencesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_SimulateCustomPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_SimulateCustomPolicy.go index 532d40f33..9dff863c0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_SimulateCustomPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_SimulateCustomPolicy.go @@ -343,6 +343,9 @@ func (c *Client) addOperationSimulateCustomPolicyMiddlewares(stack *middleware.S if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpSimulateCustomPolicyValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_SimulatePrincipalPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_SimulatePrincipalPolicy.go index 13b95b4ef..c9177fa05 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_SimulatePrincipalPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_SimulatePrincipalPolicy.go @@ -368,6 +368,9 @@ func (c *Client) addOperationSimulatePrincipalPolicyMiddlewares(stack *middlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpSimulatePrincipalPolicyValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_TagInstanceProfile.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_TagInstanceProfile.go index 9eded37ac..b906d565a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_TagInstanceProfile.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_TagInstanceProfile.go @@ -145,6 +145,9 @@ func (c *Client) addOperationTagInstanceProfileMiddlewares(stack *middleware.Sta if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpTagInstanceProfileValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_TagMFADevice.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_TagMFADevice.go index 44ce7fea1..ec596e113 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_TagMFADevice.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_TagMFADevice.go @@ -147,6 +147,9 @@ func (c *Client) addOperationTagMFADeviceMiddlewares(stack *middleware.Stack, op if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpTagMFADeviceValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_TagOpenIDConnectProvider.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_TagOpenIDConnectProvider.go index 15f7a20af..99cb32fa6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_TagOpenIDConnectProvider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_TagOpenIDConnectProvider.go @@ -147,6 +147,9 @@ func (c *Client) addOperationTagOpenIDConnectProviderMiddlewares(stack *middlewa if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpTagOpenIDConnectProviderValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_TagPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_TagPolicy.go index 3b13ad9a6..24a70dc24 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_TagPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_TagPolicy.go @@ -145,6 +145,9 @@ func (c *Client) addOperationTagPolicyMiddlewares(stack *middleware.Stack, optio if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpTagPolicyValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_TagRole.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_TagRole.go index fc89f5ad3..5d9961518 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_TagRole.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_TagRole.go @@ -153,6 +153,9 @@ func (c *Client) addOperationTagRoleMiddlewares(stack *middleware.Stack, options if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpTagRoleValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_TagSAMLProvider.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_TagSAMLProvider.go index fd16e6898..75d4316b0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_TagSAMLProvider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_TagSAMLProvider.go @@ -147,6 +147,9 @@ func (c *Client) addOperationTagSAMLProviderMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpTagSAMLProviderValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_TagServerCertificate.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_TagServerCertificate.go index 9b993e46a..69e4a20e1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_TagServerCertificate.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_TagServerCertificate.go @@ -154,6 +154,9 @@ func (c *Client) addOperationTagServerCertificateMiddlewares(stack *middleware.S if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpTagServerCertificateValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_TagUser.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_TagUser.go index 6f99ac6a5..b2d308064 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_TagUser.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_TagUser.go @@ -152,6 +152,9 @@ func (c *Client) addOperationTagUserMiddlewares(stack *middleware.Stack, options if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpTagUserValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UntagInstanceProfile.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UntagInstanceProfile.go index 947da742d..acfd9d361 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UntagInstanceProfile.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UntagInstanceProfile.go @@ -122,6 +122,9 @@ func (c *Client) addOperationUntagInstanceProfileMiddlewares(stack *middleware.S if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpUntagInstanceProfileValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UntagMFADevice.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UntagMFADevice.go index 5bdfcf472..c271a3d5f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UntagMFADevice.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UntagMFADevice.go @@ -123,6 +123,9 @@ func (c *Client) addOperationUntagMFADeviceMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpUntagMFADeviceValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UntagOpenIDConnectProvider.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UntagOpenIDConnectProvider.go index 66d67cbee..bb4692132 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UntagOpenIDConnectProvider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UntagOpenIDConnectProvider.go @@ -124,6 +124,9 @@ func (c *Client) addOperationUntagOpenIDConnectProviderMiddlewares(stack *middle if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpUntagOpenIDConnectProviderValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UntagPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UntagPolicy.go index 9197933cc..d4a53c073 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UntagPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UntagPolicy.go @@ -122,6 +122,9 @@ func (c *Client) addOperationUntagPolicyMiddlewares(stack *middleware.Stack, opt if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpUntagPolicyValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UntagRole.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UntagRole.go index 3d5a54aa7..e71d8bdfe 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UntagRole.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UntagRole.go @@ -122,6 +122,9 @@ func (c *Client) addOperationUntagRoleMiddlewares(stack *middleware.Stack, optio if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpUntagRoleValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UntagSAMLProvider.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UntagSAMLProvider.go index bc82fa51d..33ab3df2b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UntagSAMLProvider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UntagSAMLProvider.go @@ -124,6 +124,9 @@ func (c *Client) addOperationUntagSAMLProviderMiddlewares(stack *middleware.Stac if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpUntagSAMLProviderValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UntagServerCertificate.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UntagServerCertificate.go index 49070c830..00201bd5e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UntagServerCertificate.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UntagServerCertificate.go @@ -128,6 +128,9 @@ func (c *Client) addOperationUntagServerCertificateMiddlewares(stack *middleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpUntagServerCertificateValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UntagUser.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UntagUser.go index 24d47ca6d..1d5096e3b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UntagUser.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UntagUser.go @@ -122,6 +122,9 @@ func (c *Client) addOperationUntagUserMiddlewares(stack *middleware.Stack, optio if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpUntagUserValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateAccessKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateAccessKey.go index 9b4ce807c..a657096b0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateAccessKey.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateAccessKey.go @@ -143,6 +143,9 @@ func (c *Client) addOperationUpdateAccessKeyMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpUpdateAccessKeyValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateAccountPasswordPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateAccountPasswordPolicy.go index d501b71c3..e248107d1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateAccountPasswordPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateAccountPasswordPolicy.go @@ -196,6 +196,9 @@ func (c *Client) addOperationUpdateAccountPasswordPolicyMiddlewares(stack *middl if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateAccountPasswordPolicy(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateAssumeRolePolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateAssumeRolePolicy.go index 26fe6cd3e..b41e4f143 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateAssumeRolePolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateAssumeRolePolicy.go @@ -141,6 +141,9 @@ func (c *Client) addOperationUpdateAssumeRolePolicyMiddlewares(stack *middleware if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpUpdateAssumeRolePolicyValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateGroup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateGroup.go index c11cf78ab..cad2c235b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateGroup.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateGroup.go @@ -145,6 +145,9 @@ func (c *Client) addOperationUpdateGroupMiddlewares(stack *middleware.Stack, opt if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpUpdateGroupValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateLoginProfile.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateLoginProfile.go index b2de7ba0f..3a3c74263 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateLoginProfile.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateLoginProfile.go @@ -145,6 +145,9 @@ func (c *Client) addOperationUpdateLoginProfileMiddlewares(stack *middleware.Sta if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpUpdateLoginProfileValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateOpenIDConnectProviderThumbprint.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateOpenIDConnectProviderThumbprint.go index b8087b02e..abb2972ea 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateOpenIDConnectProviderThumbprint.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateOpenIDConnectProviderThumbprint.go @@ -139,6 +139,9 @@ func (c *Client) addOperationUpdateOpenIDConnectProviderThumbprintMiddlewares(st if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpUpdateOpenIDConnectProviderThumbprintValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateRole.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateRole.go index 85c5ee1c1..f14f43e94 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateRole.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateRole.go @@ -131,6 +131,9 @@ func (c *Client) addOperationUpdateRoleMiddlewares(stack *middleware.Stack, opti if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpUpdateRoleValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateRoleDescription.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateRoleDescription.go index 9837bbc81..c294734a0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateRoleDescription.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateRoleDescription.go @@ -120,6 +120,9 @@ func (c *Client) addOperationUpdateRoleDescriptionMiddlewares(stack *middleware. if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpUpdateRoleDescriptionValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateSAMLProvider.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateSAMLProvider.go index 3598faf86..12abbb29a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateSAMLProvider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateSAMLProvider.go @@ -138,6 +138,9 @@ func (c *Client) addOperationUpdateSAMLProviderMiddlewares(stack *middleware.Sta if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpUpdateSAMLProviderValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateSSHPublicKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateSSHPublicKey.go index 0523f3361..64c6bbbc0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateSSHPublicKey.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateSSHPublicKey.go @@ -141,6 +141,9 @@ func (c *Client) addOperationUpdateSSHPublicKeyMiddlewares(stack *middleware.Sta if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpUpdateSSHPublicKeyValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateServerCertificate.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateServerCertificate.go index 5d4b55fcb..7cb0c9b69 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateServerCertificate.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateServerCertificate.go @@ -156,6 +156,9 @@ func (c *Client) addOperationUpdateServerCertificateMiddlewares(stack *middlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpUpdateServerCertificateValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateServiceSpecificCredential.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateServiceSpecificCredential.go index dfafeebe1..d7c5673c9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateServiceSpecificCredential.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateServiceSpecificCredential.go @@ -132,6 +132,9 @@ func (c *Client) addOperationUpdateServiceSpecificCredentialMiddlewares(stack *m if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpUpdateServiceSpecificCredentialValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateSigningCertificate.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateSigningCertificate.go index 5019ce6ba..82908cf5b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateSigningCertificate.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateSigningCertificate.go @@ -138,6 +138,9 @@ func (c *Client) addOperationUpdateSigningCertificateMiddlewares(stack *middlewa if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpUpdateSigningCertificateValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateUser.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateUser.go index 2c59d59ee..5a32a7084 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateUser.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateUser.go @@ -146,6 +146,9 @@ func (c *Client) addOperationUpdateUserMiddlewares(stack *middleware.Stack, opti if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpUpdateUserValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UploadSSHPublicKey.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UploadSSHPublicKey.go index 478275081..d2ac7e229 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UploadSSHPublicKey.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UploadSSHPublicKey.go @@ -147,6 +147,9 @@ func (c *Client) addOperationUploadSSHPublicKeyMiddlewares(stack *middleware.Sta if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpUploadSSHPublicKeyValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UploadServerCertificate.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UploadServerCertificate.go index ea45b9525..7cab7e66b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UploadServerCertificate.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UploadServerCertificate.go @@ -242,6 +242,9 @@ func (c *Client) addOperationUploadServerCertificateMiddlewares(stack *middlewar if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpUploadServerCertificateValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UploadSigningCertificate.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UploadSigningCertificate.go index a22a48f83..213037e9f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UploadSigningCertificate.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UploadSigningCertificate.go @@ -161,6 +161,9 @@ func (c *Client) addOperationUploadSigningCertificateMiddlewares(stack *middlewa if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpUploadSigningCertificateValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/generated.json index 6480a123e..0832b8bd6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/generated.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/generated.json @@ -185,6 +185,7 @@ "protocol_test.go", "serializers.go", "snapshot_test.go", + "sra_operation_order_test.go", "types/enums.go", "types/errors.go", "types/types.go", diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/go_module_metadata.go index 7ea342899..7bda22ae8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/iam/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/iam/go_module_metadata.go @@ -3,4 +3,4 @@ package iam // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.39.2" +const goModuleVersion = "1.40.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md index 2246bd62e..af4dcdc14 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md @@ -1,3 +1,142 @@ +# v1.7.0 (2025-03-11) + +* **Feature**: Add extra check during output checksum validation so the validation skip warning would not be logged if object is not fetched from s3 + +# v1.6.2 (2025-02-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.1 (2025-02-18) + +* **Bug Fix**: Bump go version to 1.22 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.6.0 (2025-02-10) + +* **Feature**: Support CRC64NVME flex checksums. + +# v1.5.6 (2025-02-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.5 (2025-01-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.4 (2025-01-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.5.3 (2025-01-24) + +* **Bug Fix**: Enable request checksum validation mode by default +* **Dependency Update**: Updated to the latest SDK module versions +* **Dependency Update**: Upgrade to smithy-go v1.22.2. + +# v1.5.2 (2025-01-17) + +* **Bug Fix**: Fix bug where credentials weren't refreshed during retry loop. + +# v1.5.1 (2025-01-16) + +* **Bug Fix**: Fix nil dereference panic for operations that require checksums, but do not have an input setting for which algorithm to use. + +# v1.5.0 (2025-01-15) + +* **Feature**: S3 client behavior is updated to always calculate a checksum by default for operations that support it (such as PutObject or UploadPart), or require it (such as DeleteObjects). The checksum algorithm used by default now becomes CRC32. Checksum behavior can be configured using `when_supported` and `when_required` options - in code using RequestChecksumCalculation, in shared config using request_checksum_calculation, or as env variable using AWS_REQUEST_CHECKSUM_CALCULATION. The S3 client attempts to validate response checksums for all S3 API operations that support checksums. However, if the SDK has not implemented the specified checksum algorithm then this validation is skipped. Checksum validation behavior can be configured using `when_supported` and `when_required` options - in code using ResponseChecksumValidation, in shared config using response_checksum_validation, or as env variable using AWS_RESPONSE_CHECKSUM_VALIDATION. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.8 (2025-01-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.7 (2024-12-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.6 (2024-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.5 (2024-11-18) + +* **Dependency Update**: Update to smithy-go v1.22.1. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.4 (2024-11-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.3 (2024-10-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.2 (2024-10-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.1 (2024-10-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2024-10-04) + +* **Feature**: Add support for HTTP client metrics. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.20 (2024-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.19 (2024-09-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.18 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.17 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.16 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.15 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.14 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.13 (2024-06-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.12 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.11 (2024-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.10 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.9 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.8 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.3.7 (2024-03-29) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/algorithms.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/algorithms.go index a17041c35..dab97fb22 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/algorithms.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/algorithms.go @@ -9,6 +9,7 @@ import ( "fmt" "hash" "hash/crc32" + "hash/crc64" "io" "strings" "sync" @@ -30,13 +31,20 @@ const ( // AlgorithmSHA256 represents SHA256 hash algorithm AlgorithmSHA256 Algorithm = "SHA256" + + // AlgorithmCRC64NVME represents CRC64NVME hash algorithm + AlgorithmCRC64NVME Algorithm = "CRC64NVME" ) +// inverted NVME polynomial as required by crc64.MakeTable +const crc64NVME = 0x9a6c_9329_ac4b_c9b5 + var supportedAlgorithms = []Algorithm{ AlgorithmCRC32C, AlgorithmCRC32, AlgorithmSHA1, AlgorithmSHA256, + AlgorithmCRC64NVME, } func (a Algorithm) String() string { return string(a) } @@ -89,6 +97,8 @@ func NewAlgorithmHash(v Algorithm) (hash.Hash, error) { return crc32.NewIEEE(), nil case AlgorithmCRC32C: return crc32.New(crc32.MakeTable(crc32.Castagnoli)), nil + case AlgorithmCRC64NVME: + return crc64.New(crc64.MakeTable(crc64NVME)), nil default: return nil, fmt.Errorf("unknown checksum algorithm, %v", v) } @@ -106,6 +116,8 @@ func AlgorithmChecksumLength(v Algorithm) (int, error) { return crc32.Size, nil case AlgorithmCRC32C: return crc32.Size, nil + case AlgorithmCRC64NVME: + return crc64.Size, nil default: return 0, fmt.Errorf("unknown checksum algorithm, %v", v) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go index 6785174da..148f77de2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go @@ -3,4 +3,4 @@ package checksum // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.3.7" +const goModuleVersion = "1.7.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_add.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_add.go index 1b727acbe..274d649fb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_add.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_add.go @@ -1,6 +1,7 @@ package checksum import ( + "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/smithy-go/middleware" ) @@ -14,11 +15,16 @@ type InputMiddlewareOptions struct { // and true, or false if no algorithm is specified. GetAlgorithm func(interface{}) (string, bool) - // Forces the middleware to compute the input payload's checksum. The - // request will fail if the algorithm is not specified or unable to compute - // the checksum. + // RequireChecksum indicates whether operation model forces middleware to compute the input payload's checksum. + // If RequireChecksum is set to true, checksum will be calculated and RequestChecksumCalculation will be ignored, + // otherwise RequestChecksumCalculation will be used to indicate if checksum will be calculated RequireChecksum bool + // RequestChecksumCalculation is the user config to opt-in/out request checksum calculation. If RequireChecksum is + // set to true, checksum will be calculated and this field will be ignored, otherwise + // RequestChecksumCalculation will be used to indicate if checksum will be calculated + RequestChecksumCalculation aws.RequestChecksumCalculation + // Enables support for wrapping the serialized input payload with a // content-encoding: aws-check wrapper, and including a trailer for the // algorithm's checksum value. @@ -46,33 +52,16 @@ type InputMiddlewareOptions struct { // AddInputMiddleware adds the middleware for performing checksum computing // of request payloads, and checksum validation of response payloads. +// +// Deprecated: This internal-only runtime API is frozen. Do not call or modify +// it in new code. Checksum-enabled service operations now generate this +// middleware setup code inline per #2507. func AddInputMiddleware(stack *middleware.Stack, options InputMiddlewareOptions) (err error) { - // TODO ensure this works correctly with presigned URLs - - // Middleware stack: - // * (OK)(Initialize) --none-- - // * (OK)(Serialize) EndpointResolver - // * (OK)(Build) ComputeContentLength - // * (AD)(Build) Header ComputeInputPayloadChecksum - // * SIGNED Payload - If HTTP && not support trailing checksum - // * UNSIGNED Payload - If HTTPS && not support trailing checksum - // * (RM)(Build) ContentChecksum - OK to remove - // * (OK)(Build) ComputePayloadHash - // * v4.dynamicPayloadSigningMiddleware - // * v4.computePayloadSHA256 - // * v4.unsignedPayload - // (OK)(Build) Set computedPayloadHash header - // * (OK)(Finalize) Retry - // * (AD)(Finalize) Trailer ComputeInputPayloadChecksum, - // * Requires HTTPS && support trailing checksum - // * UNSIGNED Payload - // * Finalize run if HTTPS && support trailing checksum - // * (OK)(Finalize) Signing - // * (OK)(Deserialize) --none-- - // Initial checksum configuration look up middleware - err = stack.Initialize.Add(&setupInputContext{ - GetAlgorithm: options.GetAlgorithm, + err = stack.Initialize.Add(&SetupInputContext{ + GetAlgorithm: options.GetAlgorithm, + RequireChecksum: options.RequireChecksum, + RequestChecksumCalculation: options.RequestChecksumCalculation, }, middleware.Before) if err != nil { return err @@ -80,8 +69,7 @@ func AddInputMiddleware(stack *middleware.Stack, options InputMiddlewareOptions) stack.Build.Remove("ContentChecksum") - inputChecksum := &computeInputPayloadChecksum{ - RequireChecksum: options.RequireChecksum, + inputChecksum := &ComputeInputPayloadChecksum{ EnableTrailingChecksum: options.EnableTrailingChecksum, EnableComputePayloadHash: options.EnableComputeSHA256PayloadHash, EnableDecodedContentLengthHeader: options.EnableDecodedContentLengthHeader, @@ -92,9 +80,8 @@ func AddInputMiddleware(stack *middleware.Stack, options InputMiddlewareOptions) // If trailing checksum is not supported no need for finalize handler to be added. if options.EnableTrailingChecksum { - trailerMiddleware := &addInputChecksumTrailer{ + trailerMiddleware := &AddInputChecksumTrailer{ EnableTrailingChecksum: inputChecksum.EnableTrailingChecksum, - RequireChecksum: inputChecksum.RequireChecksum, EnableComputePayloadHash: inputChecksum.EnableComputePayloadHash, EnableDecodedContentLengthHeader: inputChecksum.EnableDecodedContentLengthHeader, } @@ -109,10 +96,10 @@ func AddInputMiddleware(stack *middleware.Stack, options InputMiddlewareOptions) // RemoveInputMiddleware Removes the compute input payload checksum middleware // handlers from the stack. func RemoveInputMiddleware(stack *middleware.Stack) { - id := (*setupInputContext)(nil).ID() + id := (*SetupInputContext)(nil).ID() stack.Initialize.Remove(id) - id = (*computeInputPayloadChecksum)(nil).ID() + id = (*ComputeInputPayloadChecksum)(nil).ID() stack.Finalize.Remove(id) } @@ -126,6 +113,12 @@ type OutputMiddlewareOptions struct { // mode and true, or false if no mode is specified. GetValidationMode func(interface{}) (string, bool) + // SetValidationMode is a function to set the checksum validation mode of input parameters + SetValidationMode func(interface{}, string) + + // ResponseChecksumValidation is the user config to opt-in/out response checksum validation + ResponseChecksumValidation aws.ResponseChecksumValidation + // The set of checksum algorithms that should be used for response payload // checksum validation. The algorithm(s) used will be a union of the // output's returned algorithms and this set. @@ -134,7 +127,7 @@ type OutputMiddlewareOptions struct { ValidationAlgorithms []string // If set the middleware will ignore output multipart checksums. Otherwise - // an checksum format error will be returned by the middleware. + // a checksum format error will be returned by the middleware. IgnoreMultipartValidation bool // When set the middleware will log when output does not have checksum or @@ -150,7 +143,9 @@ type OutputMiddlewareOptions struct { // checksum. func AddOutputMiddleware(stack *middleware.Stack, options OutputMiddlewareOptions) error { err := stack.Initialize.Add(&setupOutputContext{ - GetValidationMode: options.GetValidationMode, + GetValidationMode: options.GetValidationMode, + SetValidationMode: options.SetValidationMode, + ResponseChecksumValidation: options.ResponseChecksumValidation, }, middleware.Before) if err != nil { return err diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_checksum_metrics_tracking.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_checksum_metrics_tracking.go new file mode 100644 index 000000000..861a44293 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_checksum_metrics_tracking.go @@ -0,0 +1,90 @@ +package checksum + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +var supportedChecksumFeatures = map[Algorithm]awsmiddleware.UserAgentFeature{ + AlgorithmCRC32: awsmiddleware.UserAgentFeatureRequestChecksumCRC32, + AlgorithmCRC32C: awsmiddleware.UserAgentFeatureRequestChecksumCRC32C, + AlgorithmSHA1: awsmiddleware.UserAgentFeatureRequestChecksumSHA1, + AlgorithmSHA256: awsmiddleware.UserAgentFeatureRequestChecksumSHA256, + AlgorithmCRC64NVME: awsmiddleware.UserAgentFeatureRequestChecksumCRC64, +} + +// RequestChecksumMetricsTracking is the middleware to track operation request's checksum usage +type RequestChecksumMetricsTracking struct { + RequestChecksumCalculation aws.RequestChecksumCalculation + UserAgent *awsmiddleware.RequestUserAgent +} + +// ID provides the middleware identifier +func (m *RequestChecksumMetricsTracking) ID() string { + return "AWSChecksum:RequestMetricsTracking" +} + +// HandleBuild checks request checksum config and checksum value sent +// and sends corresponding feature id to user agent +func (m *RequestChecksumMetricsTracking) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown request type %T", req) + } + + switch m.RequestChecksumCalculation { + case aws.RequestChecksumCalculationWhenSupported: + m.UserAgent.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRequestChecksumWhenSupported) + case aws.RequestChecksumCalculationWhenRequired: + m.UserAgent.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRequestChecksumWhenRequired) + } + + for algo, feat := range supportedChecksumFeatures { + checksumHeader := AlgorithmHTTPHeader(algo) + if checksum := req.Header.Get(checksumHeader); checksum != "" { + m.UserAgent.AddUserAgentFeature(feat) + } + } + + return next.HandleBuild(ctx, in) +} + +// ResponseChecksumMetricsTracking is the middleware to track operation response's checksum usage +type ResponseChecksumMetricsTracking struct { + ResponseChecksumValidation aws.ResponseChecksumValidation + UserAgent *awsmiddleware.RequestUserAgent +} + +// ID provides the middleware identifier +func (m *ResponseChecksumMetricsTracking) ID() string { + return "AWSChecksum:ResponseMetricsTracking" +} + +// HandleBuild checks the response checksum config and sends corresponding feature id to user agent +func (m *ResponseChecksumMetricsTracking) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown request type %T", req) + } + + switch m.ResponseChecksumValidation { + case aws.ResponseChecksumValidationWhenSupported: + m.UserAgent.AddUserAgentFeature(awsmiddleware.UserAgentFeatureResponseChecksumWhenSupported) + case aws.ResponseChecksumValidationWhenRequired: + m.UserAgent.AddUserAgentFeature(awsmiddleware.UserAgentFeatureResponseChecksumWhenRequired) + } + + return next.HandleBuild(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_compute_input_checksum.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_compute_input_checksum.go index 7ffca33f0..ee8ff5454 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_compute_input_checksum.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_compute_input_checksum.go @@ -7,6 +7,7 @@ import ( "hash" "io" "strconv" + "strings" v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" @@ -16,7 +17,6 @@ import ( ) const ( - contentMD5Header = "Content-Md5" streamingUnsignedPayloadTrailerPayloadHash = "STREAMING-UNSIGNED-PAYLOAD-TRAILER" ) @@ -39,8 +39,8 @@ func SetComputedInputChecksums(m *middleware.Metadata, vs map[string]string) { m.Set(computedInputChecksumsKey{}, vs) } -// computeInputPayloadChecksum middleware computes payload checksum -type computeInputPayloadChecksum struct { +// ComputeInputPayloadChecksum middleware computes payload checksum +type ComputeInputPayloadChecksum struct { // Enables support for wrapping the serialized input payload with a // content-encoding: aws-check wrapper, and including a trailer for the // algorithm's checksum value. @@ -49,13 +49,6 @@ type computeInputPayloadChecksum struct { // the Algorithm's header is already set on the request. EnableTrailingChecksum bool - // States that a checksum is required to be included for the operation. If - // Input does not specify a checksum, fallback to built in MD5 checksum is - // used. - // - // Replaces smithy-go's ContentChecksum middleware. - RequireChecksum bool - // Enables support for computing the SHA256 checksum of input payloads // along with the algorithm specified checksum. Prevents downstream // middleware handlers (computePayloadSHA256) re-reading the payload. @@ -78,7 +71,7 @@ type computeInputPayloadChecksum struct { type useTrailer struct{} // ID provides the middleware's identifier. -func (m *computeInputPayloadChecksum) ID() string { +func (m *ComputeInputPayloadChecksum) ID() string { return "AWSChecksum:ComputeInputPayloadChecksum" } @@ -98,18 +91,27 @@ func (e computeInputHeaderChecksumError) Error() string { } func (e computeInputHeaderChecksumError) Unwrap() error { return e.Err } -// HandleBuild handles computing the payload's checksum, in the following cases: +// HandleFinalize handles computing the payload's checksum, in the following cases: // - Is HTTP, not HTTPS // - RequireChecksum is true, and no checksums were specified via the Input // - Trailing checksums are not supported // // The build handler must be inserted in the stack before ContentPayloadHash // and after ComputeContentLength. -func (m *computeInputPayloadChecksum) HandleFinalize( +func (m *ComputeInputPayloadChecksum) HandleFinalize( ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, ) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { + var checksum string + algorithm, ok, err := getInputAlgorithm(ctx) + if err != nil { + return out, metadata, err + } + if !ok { + return next.HandleFinalize(ctx, in) + } + req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, computeInputHeaderChecksumError{ @@ -117,8 +119,6 @@ func (m *computeInputPayloadChecksum) HandleFinalize( } } - var algorithm Algorithm - var checksum string defer func() { if algorithm == "" || checksum == "" || err != nil { return @@ -130,29 +130,14 @@ func (m *computeInputPayloadChecksum) HandleFinalize( }) }() - // If no algorithm was specified, and the operation requires a checksum, - // fallback to the legacy content MD5 checksum. - algorithm, ok, err = getInputAlgorithm(ctx) - if err != nil { - return out, metadata, err - } else if !ok { - if m.RequireChecksum { - checksum, err = setMD5Checksum(ctx, req) - if err != nil { - return out, metadata, computeInputHeaderChecksumError{ - Msg: "failed to compute stream's MD5 checksum", - Err: err, - } - } - algorithm = Algorithm("MD5") + // If any checksum header is already set nothing to do. + for header := range req.Header { + h := strings.ToUpper(header) + if strings.HasPrefix(h, "X-AMZ-CHECKSUM-") { + algorithm = Algorithm(strings.TrimPrefix(h, "X-AMZ-CHECKSUM-")) + checksum = req.Header.Get(header) + return next.HandleFinalize(ctx, in) } - return next.HandleFinalize(ctx, in) - } - - // If the checksum header is already set nothing to do. - checksumHeader := AlgorithmHTTPHeader(algorithm) - if checksum = req.Header.Get(checksumHeader); checksum != "" { - return next.HandleFinalize(ctx, in) } computePayloadHash := m.EnableComputePayloadHash @@ -217,6 +202,7 @@ func (m *computeInputPayloadChecksum) HandleFinalize( } } + checksumHeader := AlgorithmHTTPHeader(algorithm) req.Header.Set(checksumHeader, checksum) if computePayloadHash { @@ -242,28 +228,37 @@ func (e computeInputTrailingChecksumError) Error() string { } func (e computeInputTrailingChecksumError) Unwrap() error { return e.Err } -// addInputChecksumTrailer +// AddInputChecksumTrailer adds HTTP checksum when // - Is HTTPS, not HTTP // - A checksum was specified via the Input // - Trailing checksums are supported. -type addInputChecksumTrailer struct { +type AddInputChecksumTrailer struct { EnableTrailingChecksum bool - RequireChecksum bool EnableComputePayloadHash bool EnableDecodedContentLengthHeader bool } // ID identifies this middleware. -func (*addInputChecksumTrailer) ID() string { +func (*AddInputChecksumTrailer) ID() string { return "addInputChecksumTrailer" } // HandleFinalize wraps the request body to write the trailing checksum. -func (m *addInputChecksumTrailer) HandleFinalize( +func (m *AddInputChecksumTrailer) HandleFinalize( ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, ) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { + algorithm, ok, err := getInputAlgorithm(ctx) + if err != nil { + return out, metadata, computeInputTrailingChecksumError{ + Msg: "failed to get algorithm", + Err: err, + } + } else if !ok { + return next.HandleFinalize(ctx, in) + } + if enabled, _ := middleware.GetStackValue(ctx, useTrailer{}).(bool); !enabled { return next.HandleFinalize(ctx, in) } @@ -281,26 +276,13 @@ func (m *addInputChecksumTrailer) HandleFinalize( } } - // If no algorithm was specified, there is nothing to do. - algorithm, ok, err := getInputAlgorithm(ctx) - if err != nil { - return out, metadata, computeInputTrailingChecksumError{ - Msg: "failed to get algorithm", - Err: err, - } - } else if !ok { - return out, metadata, computeInputTrailingChecksumError{ - Msg: "no algorithm specified", + // If any checksum header is already set nothing to do. + for header := range req.Header { + if strings.HasPrefix(strings.ToLower(header), "x-amz-checksum-") { + return next.HandleFinalize(ctx, in) } } - // If the checksum header is already set before finalize could run, there - // is nothing to do. - checksumHeader := AlgorithmHTTPHeader(algorithm) - if req.Header.Get(checksumHeader) != "" { - return next.HandleFinalize(ctx, in) - } - stream := req.GetStream() streamLength, err := getRequestStreamLength(req) if err != nil { @@ -444,39 +426,3 @@ func getRequestStreamLength(req *smithyhttp.Request) (int64, error) { return -1, nil } - -// setMD5Checksum computes the MD5 of the request payload and sets it to the -// Content-MD5 header. Returning the MD5 base64 encoded string or error. -// -// If the MD5 is already set as the Content-MD5 header, that value will be -// returned, and nothing else will be done. -// -// If the payload is empty, no MD5 will be computed. No error will be returned. -// Empty payloads do not have an MD5 value. -// -// Replaces the smithy-go middleware for httpChecksum trait. -func setMD5Checksum(ctx context.Context, req *smithyhttp.Request) (string, error) { - if v := req.Header.Get(contentMD5Header); len(v) != 0 { - return v, nil - } - stream := req.GetStream() - if stream == nil { - return "", nil - } - - if !req.IsStreamSeekable() { - return "", fmt.Errorf( - "unseekable stream is not supported for computing md5 checksum") - } - - v, err := computeMD5Checksum(stream) - if err != nil { - return "", err - } - if err := req.RewindStream(); err != nil { - return "", fmt.Errorf("failed to rewind stream after computing MD5 checksum, %w", err) - } - // set the 'Content-MD5' header - req.Header.Set(contentMD5Header, string(v)) - return string(v), nil -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_setup_context.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_setup_context.go index 3db73afe7..3347e88cc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_setup_context.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_setup_context.go @@ -3,43 +3,62 @@ package checksum import ( "context" + "github.com/aws/aws-sdk-go-v2/aws" + internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" "github.com/aws/smithy-go/middleware" ) -// setupChecksumContext is the initial middleware that looks up the input +const ( + checksumValidationModeEnabled = "ENABLED" +) + +// SetupInputContext is the initial middleware that looks up the input // used to configure checksum behavior. This middleware must be executed before // input validation step or any other checksum middleware. -type setupInputContext struct { +type SetupInputContext struct { // GetAlgorithm is a function to get the checksum algorithm of the // input payload from the input parameters. // // Given the input parameter value, the function must return the algorithm // and true, or false if no algorithm is specified. GetAlgorithm func(interface{}) (string, bool) + + // RequireChecksum indicates whether operation model forces middleware to compute the input payload's checksum. + // If RequireChecksum is set to true, checksum will be calculated and RequestChecksumCalculation will be ignored, + // otherwise RequestChecksumCalculation will be used to indicate if checksum will be calculated + RequireChecksum bool + + // RequestChecksumCalculation is the user config to opt-in/out request checksum calculation. If RequireChecksum is + // set to true, checksum will be calculated and this field will be ignored, otherwise + // RequestChecksumCalculation will be used to indicate if checksum will be calculated + RequestChecksumCalculation aws.RequestChecksumCalculation } // ID for the middleware -func (m *setupInputContext) ID() string { +func (m *SetupInputContext) ID() string { return "AWSChecksum:SetupInputContext" } // HandleInitialize initialization middleware that setups up the checksum // context based on the input parameters provided in the stack. -func (m *setupInputContext) HandleInitialize( +func (m *SetupInputContext) HandleInitialize( ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, ) ( out middleware.InitializeOutput, metadata middleware.Metadata, err error, ) { - // Check if validation algorithm is specified. + // nil check here is for operations that require checksum but do not have input algorithm setting if m.GetAlgorithm != nil { - // check is input resource has a checksum algorithm - algorithm, ok := m.GetAlgorithm(in.Parameters) - if ok && len(algorithm) != 0 { + if algorithm, ok := m.GetAlgorithm(in.Parameters); ok { ctx = internalcontext.SetChecksumInputAlgorithm(ctx, algorithm) + return next.HandleInitialize(ctx, in) } } + if m.RequireChecksum || m.RequestChecksumCalculation == aws.RequestChecksumCalculationWhenSupported { + ctx = internalcontext.SetChecksumInputAlgorithm(ctx, string(AlgorithmCRC32)) + } + return next.HandleInitialize(ctx, in) } @@ -50,6 +69,12 @@ type setupOutputContext struct { // Given the input parameter value, the function must return the validation // mode and true, or false if no mode is specified. GetValidationMode func(interface{}) (string, bool) + + // SetValidationMode is a function to set the checksum validation mode of input parameters + SetValidationMode func(interface{}, string) + + // ResponseChecksumValidation states user config to opt-in/out checksum validation + ResponseChecksumValidation aws.ResponseChecksumValidation } // ID for the middleware @@ -64,13 +89,12 @@ func (m *setupOutputContext) HandleInitialize( ) ( out middleware.InitializeOutput, metadata middleware.Metadata, err error, ) { - // Check if validation mode is specified. - if m.GetValidationMode != nil { - // check is input resource has a checksum algorithm - mode, ok := m.GetValidationMode(in.Parameters) - if ok && len(mode) != 0 { - ctx = setContextOutputValidationMode(ctx, mode) - } + + mode, _ := m.GetValidationMode(in.Parameters) + + if m.ResponseChecksumValidation == aws.ResponseChecksumValidationWhenSupported || mode == checksumValidationModeEnabled { + m.SetValidationMode(in.Parameters, checksumValidationModeEnabled) + ctx = setContextOutputValidationMode(ctx, checksumValidationModeEnabled) } return next.HandleInitialize(ctx, in) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_validate_output.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_validate_output.go index 9fde12d86..6b56f8c6a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_validate_output.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_validate_output.go @@ -3,6 +3,7 @@ package checksum import ( "context" "fmt" + "net/http" "strings" "github.com/aws/smithy-go" @@ -55,7 +56,7 @@ func (m *validateOutputPayloadChecksum) ID() string { } // HandleDeserialize is a Deserialize middleware that wraps the HTTP response -// body with an io.ReadCloser that will validate the its checksum. +// body with an io.ReadCloser that will validate its checksum. func (m *validateOutputPayloadChecksum) HandleDeserialize( ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler, ) ( @@ -66,8 +67,7 @@ func (m *validateOutputPayloadChecksum) HandleDeserialize( return out, metadata, err } - // If there is no validation mode specified nothing is supported. - if mode := getContextOutputValidationMode(ctx); mode != "ENABLED" { + if mode := getContextOutputValidationMode(ctx); mode != checksumValidationModeEnabled { return out, metadata, err } @@ -90,13 +90,11 @@ func (m *validateOutputPayloadChecksum) HandleDeserialize( algorithmToUse = algorithm } - // TODO this must validate the validation mode is set to enabled. - logger := middleware.GetLogger(ctx) // Skip validation if no checksum algorithm or checksum is available. if len(expectedChecksum) == 0 || len(algorithmToUse) == 0 { - if m.LogValidationSkipped { + if response.StatusCode != 404 && response.Body != http.NoBody && m.LogValidationSkipped { // TODO this probably should have more information about the // operation output that won't be validated. logger.Logf(logging.Warn, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md index 150e26f4e..ff2188db8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md @@ -1,3 +1,124 @@ +# v1.18.15 (2025-02-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.14 (2025-02-18) + +* **Bug Fix**: Bump go version to 1.22 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.13 (2025-02-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.12 (2025-01-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.11 (2025-01-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.10 (2025-01-24) + +* **Dependency Update**: Updated to the latest SDK module versions +* **Dependency Update**: Upgrade to smithy-go v1.22.2. + +# v1.18.9 (2025-01-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.8 (2025-01-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.7 (2024-12-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.6 (2024-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.5 (2024-11-18) + +* **Dependency Update**: Update to smithy-go v1.22.1. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.4 (2024-11-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.3 (2024-10-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.2 (2024-10-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.1 (2024-10-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.0 (2024-10-04) + +* **Feature**: Add support for HTTP client metrics. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.18 (2024-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.17 (2024-09-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.16 (2024-08-15) + +* **Dependency Update**: Bump minimum Go version to 1.21. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.15 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.14 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.13 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.12 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.11 (2024-06-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.10 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.9 (2024-06-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.8 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.7 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.6 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.17.5 (2024-03-29) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go index a1f30ee06..a0129140c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go @@ -3,4 +3,4 @@ package s3shared // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.17.5" +const goModuleVersion = "1.18.15" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata_retriever.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata_retriever.go index f52f2f11e..7251588b0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata_retriever.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/metadata_retriever.go @@ -5,6 +5,7 @@ import ( awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -30,6 +31,8 @@ func (m *metadataRetriever) HandleDeserialize(ctx context.Context, in middleware ) { out, metadata, err = next.HandleDeserialize(ctx, in) + span, _ := tracing.GetSpan(ctx) + resp, ok := out.RawResponse.(*smithyhttp.Response) if !ok { // No raw response to wrap with. @@ -40,12 +43,14 @@ func (m *metadataRetriever) HandleDeserialize(ctx context.Context, in middleware if v := resp.Header.Get("X-Amz-Request-Id"); len(v) != 0 { // set reqID on metadata for successful responses. awsmiddleware.SetRequestIDMetadata(&metadata, v) + span.SetProperty("aws.request_id", v) } // look up host-id if v := resp.Header.Get("X-Amz-Id-2"); len(v) != 0 { // set reqID on metadata for successful responses. SetHostIDMetadata(&metadata, v) + span.SetProperty("aws.extended_request_id", v) } return out, metadata, err diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/pricing/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/pricing/CHANGELOG.md index ca13fa509..68562eab7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/pricing/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/pricing/CHANGELOG.md @@ -1,3 +1,16 @@ +# v1.34.1 (2025-03-04.2) + +* **Bug Fix**: Add assurance test for operation order. + +# v1.34.0 (2025-02-28) + +* **Feature**: Update GetProducts and DescribeServices API request input validations. + +# v1.33.0 (2025-02-27) + +* **Feature**: Track credential providers via User-Agent Feature ids +* **Dependency Update**: Updated to the latest SDK module versions + # v1.32.17 (2025-02-18) * **Bug Fix**: Bump go version to 1.22 diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/pricing/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/pricing/api_client.go index 253f7364a..cf0e7fcaa 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/pricing/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/pricing/api_client.go @@ -762,6 +762,37 @@ func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { return nil } +type setCredentialSourceMiddleware struct { + ua *awsmiddleware.RequestUserAgent + options Options +} + +func (m setCredentialSourceMiddleware) ID() string { return "SetCredentialSourceMiddleware" } + +func (m setCredentialSourceMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + asProviderSource, ok := m.options.Credentials.(aws.CredentialProviderSource) + if !ok { + return next.HandleBuild(ctx, in) + } + providerSources := asProviderSource.ProviderSources() + for _, source := range providerSources { + m.ua.AddCredentialsSource(source) + } + return next.HandleBuild(ctx, in) +} + +func addCredentialSource(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + mw := setCredentialSourceMiddleware{ua: ua, options: options} + return stack.Build.Insert(&mw, "UserAgent", middleware.Before) +} + func resolveTracerProvider(options *Options) { if options.TracerProvider == nil { options.TracerProvider = &tracing.NopTracerProvider{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/pricing/api_op_DescribeServices.go b/vendor/github.com/aws/aws-sdk-go-v2/service/pricing/api_op_DescribeServices.go index 56a27aae2..73631b68d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/pricing/api_op_DescribeServices.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/pricing/api_op_DescribeServices.go @@ -136,6 +136,9 @@ func (c *Client) addOperationDescribeServicesMiddlewares(stack *middleware.Stack if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeServices(options.Region), middleware.Before); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/pricing/api_op_GetAttributeValues.go b/vendor/github.com/aws/aws-sdk-go-v2/service/pricing/api_op_GetAttributeValues.go index 0dca647e4..b6ab19a0c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/pricing/api_op_GetAttributeValues.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/pricing/api_op_GetAttributeValues.go @@ -134,6 +134,9 @@ func (c *Client) addOperationGetAttributeValuesMiddlewares(stack *middleware.Sta if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetAttributeValuesValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/pricing/api_op_GetPriceListFileUrl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/pricing/api_op_GetPriceListFileUrl.go index aae3c2213..e9aed5100 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/pricing/api_op_GetPriceListFileUrl.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/pricing/api_op_GetPriceListFileUrl.go @@ -132,6 +132,9 @@ func (c *Client) addOperationGetPriceListFileUrlMiddlewares(stack *middleware.St if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetPriceListFileUrlValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/pricing/api_op_GetProducts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/pricing/api_op_GetProducts.go index a18cb72c6..3881d41db 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/pricing/api_op_GetProducts.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/pricing/api_op_GetProducts.go @@ -135,6 +135,9 @@ func (c *Client) addOperationGetProductsMiddlewares(stack *middleware.Stack, opt if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetProductsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/pricing/api_op_ListPriceLists.go b/vendor/github.com/aws/aws-sdk-go-v2/service/pricing/api_op_ListPriceLists.go index 53d4eac7e..e8e0b5b1e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/pricing/api_op_ListPriceLists.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/pricing/api_op_ListPriceLists.go @@ -167,6 +167,9 @@ func (c *Client) addOperationListPriceListsMiddlewares(stack *middleware.Stack, if err = addUserAgentRetryMode(stack, options); err != nil { return err } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListPriceListsValidationMiddleware(stack); err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/pricing/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/pricing/deserializers.go index c3d3df7d9..9ec603864 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/pricing/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/pricing/deserializers.go @@ -1649,7 +1649,7 @@ func awsAwsjson11_deserializeOpDocumentDescribeServicesOutput(v **DescribeServic if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected String to be of type string, got %T instead", value) + return fmt.Errorf("expected FormatVersion to be of type string, got %T instead", value) } sv.FormatVersion = ptr.String(jtv) } @@ -1788,7 +1788,7 @@ func awsAwsjson11_deserializeOpDocumentGetProductsOutput(v **GetProductsOutput, if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected String to be of type string, got %T instead", value) + return fmt.Errorf("expected FormatVersion to be of type string, got %T instead", value) } sv.FormatVersion = ptr.String(jtv) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/pricing/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/pricing/generated.json index c91c84837..f615016ec 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/pricing/generated.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/pricing/generated.json @@ -26,6 +26,7 @@ "protocol_test.go", "serializers.go", "snapshot_test.go", + "sra_operation_order_test.go", "types/enums.go", "types/errors.go", "types/types.go", diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/pricing/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/pricing/go_module_metadata.go index 417b3a3bb..6fc11b0be 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/pricing/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/pricing/go_module_metadata.go @@ -3,4 +3,4 @@ package pricing // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.32.17" +const goModuleVersion = "1.34.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md index ed5204688..9f27d13db 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md @@ -1,3 +1,289 @@ +# v1.78.2 (2025-03-11) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.78.1 (2025-03-04.2) + +* **Bug Fix**: Add assurance test for operation order. + +# v1.78.0 (2025-02-27) + +* **Feature**: Track credential providers via User-Agent Feature ids +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.77.1 (2025-02-18) + +* **Bug Fix**: Bump go version to 1.22 +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.77.0 (2025-02-14) + +* **Feature**: Added support for Content-Range header in HeadObject response. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.76.1 (2025-02-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.76.0 (2025-02-06) + +* **Feature**: Updated list of the valid AWS Region values for the LocationConstraint parameter for general purpose buckets. + +# v1.75.4 (2025-02-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.75.3 (2025-02-04) + +* No change notes available for this release. + +# v1.75.2 (2025-01-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.75.1 (2025-01-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.75.0 (2025-01-29) + +* **Feature**: Change the type of MpuObjectSize in CompleteMultipartUploadRequest from int to long. + +# v1.74.1 (2025-01-24) + +* **Bug Fix**: Enable request checksum validation mode by default +* **Dependency Update**: Updated to the latest SDK module versions +* **Dependency Update**: Upgrade to smithy-go v1.22.2. + +# v1.74.0 (2025-01-22) + +* **Feature**: Add a client config option to disable logging when output checksum validation is skipped due to an unsupported algorithm. + +# v1.73.2 (2025-01-17) + +* **Bug Fix**: Fix bug where credentials weren't refreshed during retry loop. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.73.1 (2025-01-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.73.0 (2025-01-15) + +* **Feature**: S3 client behavior is updated to always calculate a checksum by default for operations that support it (such as PutObject or UploadPart), or require it (such as DeleteObjects). The checksum algorithm used by default now becomes CRC32. Checksum behavior can be configured using `when_supported` and `when_required` options - in code using RequestChecksumCalculation, in shared config using request_checksum_calculation, or as env variable using AWS_REQUEST_CHECKSUM_CALCULATION. The S3 client attempts to validate response checksums for all S3 API operations that support checksums. However, if the SDK has not implemented the specified checksum algorithm then this validation is skipped. Checksum validation behavior can be configured using `when_supported` and `when_required` options - in code using ResponseChecksumValidation, in shared config using response_checksum_validation, or as env variable using AWS_RESPONSE_CHECKSUM_VALIDATION. +* **Feature**: This change enhances integrity protections for new SDK requests to S3. S3 SDKs now support the CRC64NVME checksum algorithm, full object checksums for multipart S3 objects, and new default integrity protections for S3 requests. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.72.3 (2025-01-14) + +* **Bug Fix**: Fix issue where waiters were not failing on unmatched errors as they should. This may have breaking behavioral changes for users in fringe cases. See [this announcement](https://github.com/aws/aws-sdk-go-v2/discussions/2954) for more information. + +# v1.72.2 (2025-01-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.72.1 (2025-01-08) + +* No change notes available for this release. + +# v1.72.0 (2025-01-03) + +* **Feature**: This change is only for updating the model regexp of CopySource which is not for validation but only for documentation and user guide change. + +# v1.71.1 (2024-12-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.71.0 (2024-12-03.2) + +* **Feature**: Amazon S3 Metadata stores object metadata in read-only, fully managed Apache Iceberg metadata tables that you can query. You can create metadata table configurations for S3 general purpose buckets. + +# v1.70.0 (2024-12-02) + +* **Feature**: Amazon S3 introduces support for AWS Dedicated Local Zones +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.69.0 (2024-11-25) + +* **Feature**: Amazon Simple Storage Service / Features: Add support for ETag based conditional writes in PutObject and CompleteMultiPartUpload APIs to prevent unintended object modifications. + +# v1.68.0 (2024-11-21) + +* **Feature**: Add support for conditional deletes for the S3 DeleteObject and DeleteObjects APIs. Add support for write offset bytes option used to append to objects with the S3 PutObject API. + +# v1.67.1 (2024-11-18) + +* **Dependency Update**: Update to smithy-go v1.22.1. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.67.0 (2024-11-14) + +* **Feature**: This release updates the ListBuckets API Reference documentation in support of the new 10,000 general purpose bucket default quota on all AWS accounts. To increase your bucket quota from 10,000 to up to 1 million buckets, simply request a quota increase via Service Quotas. + +# v1.66.3 (2024-11-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.66.2 (2024-10-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.66.1 (2024-10-25) + +* **Bug Fix**: Update presign post URL resolution to use the exact result from EndpointResolverV2 + +# v1.66.0 (2024-10-16) + +* **Feature**: Add support for the new optional bucket-region and prefix query parameters in the ListBuckets API. For ListBuckets requests that express pagination, Amazon S3 will now return both the bucket names and associated AWS regions in the response. + +# v1.65.3 (2024-10-11) + +* **Bug Fix**: **BREAKING CHANGE**: S3 ReplicationRuleFilter and LifecycleRuleFilter shapes are being changed from union to structure types + +# v1.65.2 (2024-10-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.65.1 (2024-10-07) + +* **Bug Fix**: **CHANGE IN BEHAVIOR**: Allow serialization of headers with empty string for prefix headers. We are deploying this fix because the behavior is actively preventing users from transmitting keys with empty values to the service. If you were setting metadata keys with empty values before this change, they will now actually be sent to the service. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.65.0 (2024-10-04) + +* **Feature**: Add support for HTTP client metrics. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.64.1 (2024-10-03) + +* No change notes available for this release. + +# v1.64.0 (2024-10-02) + +* **Feature**: This release introduces a header representing the minimum object size limit for Lifecycle transitions. + +# v1.63.3 (2024-09-27) + +* No change notes available for this release. + +# v1.63.2 (2024-09-25) + +* No change notes available for this release. + +# v1.63.1 (2024-09-23) + +* No change notes available for this release. + +# v1.63.0 (2024-09-20) + +* **Feature**: Add tracing and metrics support to service clients. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.62.0 (2024-09-18) + +* **Feature**: Added SSE-KMS support for directory buckets. + +# v1.61.3 (2024-09-17) + +* **Bug Fix**: **BREAKFIX**: Only generate AccountIDEndpointMode config for services that use it. This is a compiler break, but removes no actual functionality, as no services currently use the account ID in endpoint resolution. + +# v1.61.2 (2024-09-04) + +* No change notes available for this release. + +# v1.61.1 (2024-09-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.61.0 (2024-08-28) + +* **Feature**: Add presignPost for s3 PutObject + +# v1.60.1 (2024-08-22) + +* No change notes available for this release. + +# v1.60.0 (2024-08-20) + +* **Feature**: Amazon Simple Storage Service / Features : Add support for conditional writes for PutObject and CompleteMultipartUpload APIs. + +# v1.59.0 (2024-08-15) + +* **Feature**: Amazon Simple Storage Service / Features : Adds support for pagination in the S3 ListBuckets API. +* **Dependency Update**: Bump minimum Go version to 1.21. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.58.3 (2024-08-02) + +* **Bug Fix**: Add assurance tests for auth scheme selection logic. + +# v1.58.2 (2024-07-10.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.58.1 (2024-07-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.58.0 (2024-07-02) + +* **Feature**: Added response overrides to Head Object requests. + +# v1.57.1 (2024-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.57.0 (2024-06-26) + +* **Feature**: Support list-of-string endpoint parameter. + +# v1.56.1 (2024-06-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.56.0 (2024-06-18) + +* **Feature**: Track usage of various AWS SDK features in user-agent string. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.55.2 (2024-06-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.55.1 (2024-06-07) + +* **Bug Fix**: Add clock skew correction on all service clients +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.55.0 (2024-06-05) + +* **Feature**: Added new params copySource and key to copyObject API for supporting S3 Access Grants plugin. These changes will not change any of the existing S3 API functionality. +* **Bug Fix**: Add S3-specific smithy protocol tests. + +# v1.54.4 (2024-06-03) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.54.3 (2024-05-23) + +* **Bug Fix**: Prevent parsing failures for nonstandard `Expires` values in responses. If the SDK cannot parse the value set in the response header for this field it will now be returned as `nil`. A new field, `ExpiresString`, has been added that will retain the unparsed value from the response (regardless of whether it came back in a format recognized by the SDK). + +# v1.54.2 (2024-05-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.54.1 (2024-05-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.54.0 (2024-05-14) + +* **Feature**: Updated a few x-id in the http uri traits + +# v1.53.2 (2024-05-08) + +* **Bug Fix**: GoDoc improvement + # v1.53.1 (2024-03-29) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go index a31c2e0ac..40035a43d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go @@ -4,6 +4,7 @@ package s3 import ( "context" + "errors" "fmt" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/aws/defaults" @@ -14,6 +15,7 @@ import ( internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware" "github.com/aws/aws-sdk-go-v2/internal/v4a" acceptencodingcust "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding" internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" @@ -22,22 +24,156 @@ import ( s3sharedconfig "github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" smithydocument "github.com/aws/smithy-go/document" "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/metrics" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "net" "net/http" + "sync/atomic" "time" ) const ServiceID = "S3" const ServiceAPIVersion = "2006-03-01" +type operationMetrics struct { + Duration metrics.Float64Histogram + SerializeDuration metrics.Float64Histogram + ResolveIdentityDuration metrics.Float64Histogram + ResolveEndpointDuration metrics.Float64Histogram + SignRequestDuration metrics.Float64Histogram + DeserializeDuration metrics.Float64Histogram +} + +func (m *operationMetrics) histogramFor(name string) metrics.Float64Histogram { + switch name { + case "client.call.duration": + return m.Duration + case "client.call.serialization_duration": + return m.SerializeDuration + case "client.call.resolve_identity_duration": + return m.ResolveIdentityDuration + case "client.call.resolve_endpoint_duration": + return m.ResolveEndpointDuration + case "client.call.signing_duration": + return m.SignRequestDuration + case "client.call.deserialization_duration": + return m.DeserializeDuration + default: + panic("unrecognized operation metric") + } +} + +func timeOperationMetric[T any]( + ctx context.Context, metric string, fn func() (T, error), + opts ...metrics.RecordMetricOption, +) (T, error) { + instr := getOperationMetrics(ctx).histogramFor(metric) + opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) + + start := time.Now() + v, err := fn() + end := time.Now() + + elapsed := end.Sub(start) + instr.Record(ctx, float64(elapsed)/1e9, opts...) + return v, err +} + +func startMetricTimer(ctx context.Context, metric string, opts ...metrics.RecordMetricOption) func() { + instr := getOperationMetrics(ctx).histogramFor(metric) + opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...) + + var ended bool + start := time.Now() + return func() { + if ended { + return + } + ended = true + + end := time.Now() + + elapsed := end.Sub(start) + instr.Record(ctx, float64(elapsed)/1e9, opts...) + } +} + +func withOperationMetadata(ctx context.Context) metrics.RecordMetricOption { + return func(o *metrics.RecordMetricOptions) { + o.Properties.Set("rpc.service", middleware.GetServiceID(ctx)) + o.Properties.Set("rpc.method", middleware.GetOperationName(ctx)) + } +} + +type operationMetricsKey struct{} + +func withOperationMetrics(parent context.Context, mp metrics.MeterProvider) (context.Context, error) { + meter := mp.Meter("github.com/aws/aws-sdk-go-v2/service/s3") + om := &operationMetrics{} + + var err error + + om.Duration, err = operationMetricTimer(meter, "client.call.duration", + "Overall call duration (including retries and time to send or receive request and response body)") + if err != nil { + return nil, err + } + om.SerializeDuration, err = operationMetricTimer(meter, "client.call.serialization_duration", + "The time it takes to serialize a message body") + if err != nil { + return nil, err + } + om.ResolveIdentityDuration, err = operationMetricTimer(meter, "client.call.auth.resolve_identity_duration", + "The time taken to acquire an identity (AWS credentials, bearer token, etc) from an Identity Provider") + if err != nil { + return nil, err + } + om.ResolveEndpointDuration, err = operationMetricTimer(meter, "client.call.resolve_endpoint_duration", + "The time it takes to resolve an endpoint (endpoint resolver, not DNS) for the request") + if err != nil { + return nil, err + } + om.SignRequestDuration, err = operationMetricTimer(meter, "client.call.auth.signing_duration", + "The time it takes to sign a request") + if err != nil { + return nil, err + } + om.DeserializeDuration, err = operationMetricTimer(meter, "client.call.deserialization_duration", + "The time it takes to deserialize a message body") + if err != nil { + return nil, err + } + + return context.WithValue(parent, operationMetricsKey{}, om), nil +} + +func operationMetricTimer(m metrics.Meter, name, desc string) (metrics.Float64Histogram, error) { + return m.Float64Histogram(name, func(o *metrics.InstrumentOptions) { + o.UnitLabel = "s" + o.Description = desc + }) +} + +func getOperationMetrics(ctx context.Context) *operationMetrics { + return ctx.Value(operationMetricsKey{}).(*operationMetrics) +} + +func operationTracer(p tracing.TracerProvider) tracing.Tracer { + return p.Tracer("github.com/aws/aws-sdk-go-v2/service/s3") +} + // Client provides the API client to make operations call for Amazon Simple // Storage Service. type Client struct { options Options + + // Difference between the time reported by the server and the client + timeOffset *atomic.Int64 } // New returns an initialized Client based on the functional options. Provide @@ -60,6 +196,10 @@ func New(options Options, optFns ...func(*Options)) *Client { resolveHTTPSignerV4a(&options) + resolveTracerProvider(&options) + + resolveMeterProvider(&options) + resolveAuthSchemeResolver(&options) for _, fn := range optFns { @@ -82,6 +222,8 @@ func New(options Options, optFns ...func(*Options)) *Client { finalizeExpressCredentials(&options, client) + initializeTimeOffsetResolver(client) + return client } @@ -94,8 +236,15 @@ func (c *Client) Options() Options { return c.options.Copy() } -func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) { +func (c *Client) invokeOperation( + ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error, +) ( + result interface{}, metadata middleware.Metadata, err error, +) { ctx = middleware.ClearStackValues(ctx) + ctx = middleware.WithServiceID(ctx, ServiceID) + ctx = middleware.WithOperationName(ctx, opID) + stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) options := c.options.Copy() @@ -125,15 +274,56 @@ func (c *Client) invokeOperation(ctx context.Context, opID string, params interf } } - handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) - result, metadata, err = handler.Handle(ctx, params) + ctx, err = withOperationMetrics(ctx, options.MeterProvider) if err != nil { + return nil, metadata, err + } + + tracer := operationTracer(options.TracerProvider) + spanName := fmt.Sprintf("%s.%s", ServiceID, opID) + + ctx = tracing.WithOperationTracer(ctx, tracer) + + ctx, span := tracer.StartSpan(ctx, spanName, func(o *tracing.SpanOptions) { + o.Kind = tracing.SpanKindClient + o.Properties.Set("rpc.system", "aws-api") + o.Properties.Set("rpc.method", opID) + o.Properties.Set("rpc.service", ServiceID) + }) + endTimer := startMetricTimer(ctx, "client.call.duration") + defer endTimer() + defer span.End() + + handler := smithyhttp.NewClientHandlerWithOptions(options.HTTPClient, func(o *smithyhttp.ClientHandler) { + o.Meter = options.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/s3") + }) + decorated := middleware.DecorateHandler(handler, stack) + result, metadata, err = decorated.Handle(ctx, params) + if err != nil { + span.SetProperty("exception.type", fmt.Sprintf("%T", err)) + span.SetProperty("exception.message", err.Error()) + + var aerr smithy.APIError + if errors.As(err, &aerr) { + span.SetProperty("api.error_code", aerr.ErrorCode()) + span.SetProperty("api.error_message", aerr.ErrorMessage()) + span.SetProperty("api.error_fault", aerr.ErrorFault().String()) + } + err = &smithy.OperationError{ ServiceID: ServiceID, OperationName: opID, Err: err, } } + + span.SetProperty("error", err != nil) + if err == nil { + span.SetStatus(tracing.SpanStatusOK) + } else { + span.SetStatus(tracing.SpanStatusError) + } + return result, metadata, err } @@ -171,7 +361,7 @@ func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, o if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil { return fmt.Errorf("add ResolveEndpointV2: %v", err) } - if err := stack.Finalize.Insert(&signRequestMiddleware{}, "ResolveEndpointV2", middleware.After); err != nil { + if err := stack.Finalize.Insert(&signRequestMiddleware{options: options}, "ResolveEndpointV2", middleware.After); err != nil { return fmt.Errorf("add Signing: %w", err) } return nil @@ -259,15 +449,17 @@ func setResolvedDefaultsMode(o *Options) { // NewFromConfig returns a new client from the provided config. func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { opts := Options{ - Region: cfg.Region, - DefaultsMode: cfg.DefaultsMode, - RuntimeEnvironment: cfg.RuntimeEnvironment, - HTTPClient: cfg.HTTPClient, - Credentials: cfg.Credentials, - APIOptions: cfg.APIOptions, - Logger: cfg.Logger, - ClientLogMode: cfg.ClientLogMode, - AppID: cfg.AppID, + Region: cfg.Region, + DefaultsMode: cfg.DefaultsMode, + RuntimeEnvironment: cfg.RuntimeEnvironment, + HTTPClient: cfg.HTTPClient, + Credentials: cfg.Credentials, + APIOptions: cfg.APIOptions, + Logger: cfg.Logger, + ClientLogMode: cfg.ClientLogMode, + AppID: cfg.AppID, + RequestChecksumCalculation: cfg.RequestChecksumCalculation, + ResponseChecksumValidation: cfg.ResponseChecksumValidation, } resolveAWSRetryerProvider(cfg, &opts) resolveAWSRetryMaxAttempts(cfg, &opts) @@ -459,6 +651,30 @@ func addRawResponseToMetadata(stack *middleware.Stack) error { func addRecordResponseTiming(stack *middleware.Stack) error { return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After) } + +func addSpanRetryLoop(stack *middleware.Stack, options Options) error { + return stack.Finalize.Insert(&spanRetryLoop{options: options}, "Retry", middleware.Before) +} + +type spanRetryLoop struct { + options Options +} + +func (*spanRetryLoop) ID() string { + return "spanRetryLoop" +} + +func (m *spanRetryLoop) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + middleware.FinalizeOutput, middleware.Metadata, error, +) { + tracer := operationTracer(m.options.TracerProvider) + ctx, span := tracer.StartSpan(ctx, "RetryLoop") + defer span.End() + + return next.HandleFinalize(ctx, in) +} func addStreamingEventsPayload(stack *middleware.Stack) error { return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before) } @@ -475,11 +691,36 @@ func addContentSHA256Header(stack *middleware.Stack) error { return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After) } +func addIsWaiterUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter) + return nil + }) +} + +func addIsPaginatorUserAgent(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator) + return nil + }) +} + func addRetry(stack *middleware.Stack, o Options) error { attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) { m.LogAttempts = o.ClientLogMode.IsRetries() + m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/s3") }) - if err := stack.Finalize.Insert(attempt, "Signing", middleware.Before); err != nil { + if err := stack.Finalize.Insert(attempt, "ResolveAuthScheme", middleware.Before); err != nil { return err } if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil { @@ -548,6 +789,18 @@ func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { return nil } +func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string { + if mode == aws.AccountIDEndpointModeDisabled { + return nil + } + + if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" { + return aws.String(ca.Credentials.AccountID) + } + + return nil +} + type httpSignerV4a interface { SignHTTP(ctx context.Context, credentials v4a.Credentials, r *http.Request, payloadHash, service string, regionSet []string, signingTime time.Time, @@ -568,6 +821,99 @@ func newDefaultV4aSigner(o Options) *v4a.Signer { }) } +func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error { + mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset} + if err := stack.Build.Add(&mw, middleware.After); err != nil { + return err + } + return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before) +} +func initializeTimeOffsetResolver(c *Client) { + c.timeOffset = new(atomic.Int64) +} + +func addUserAgentRetryMode(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + switch options.Retryer.(type) { + case *retry.Standard: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard) + case *retry.AdaptiveMode: + ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive) + } + return nil +} + +func addRequestChecksumMetricsTracking(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + return stack.Build.Insert(&internalChecksum.RequestChecksumMetricsTracking{ + RequestChecksumCalculation: options.RequestChecksumCalculation, + UserAgent: ua, + }, "UserAgent", middleware.Before) +} + +func addResponseChecksumMetricsTracking(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + return stack.Build.Insert(&internalChecksum.ResponseChecksumMetricsTracking{ + ResponseChecksumValidation: options.ResponseChecksumValidation, + UserAgent: ua, + }, "UserAgent", middleware.Before) +} + +type setCredentialSourceMiddleware struct { + ua *awsmiddleware.RequestUserAgent + options Options +} + +func (m setCredentialSourceMiddleware) ID() string { return "SetCredentialSourceMiddleware" } + +func (m setCredentialSourceMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + asProviderSource, ok := m.options.Credentials.(aws.CredentialProviderSource) + if !ok { + return next.HandleBuild(ctx, in) + } + providerSources := asProviderSource.ProviderSources() + for _, source := range providerSources { + m.ua.AddCredentialsSource(source) + } + return next.HandleBuild(ctx, in) +} + +func addCredentialSource(stack *middleware.Stack, options Options) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + mw := setCredentialSourceMiddleware{ua: ua, options: options} + return stack.Build.Insert(&mw, "UserAgent", middleware.Before) +} + +func resolveTracerProvider(options *Options) { + if options.TracerProvider == nil { + options.TracerProvider = &tracing.NopTracerProvider{} + } +} + +func resolveMeterProvider(options *Options) { + if options.MeterProvider == nil { + options.MeterProvider = metrics.NopMeterProvider{} + } +} + func addMetadataRetrieverMiddleware(stack *middleware.Stack) error { return s3shared.AddMetadataRetrieverMiddleware(stack) } @@ -601,6 +947,41 @@ func GetComputedInputChecksumsMetadata(m middleware.Metadata) (ComputedInputChec } +func addInputChecksumMiddleware(stack *middleware.Stack, options internalChecksum.InputMiddlewareOptions) (err error) { + err = stack.Initialize.Add(&internalChecksum.SetupInputContext{ + GetAlgorithm: options.GetAlgorithm, + RequireChecksum: options.RequireChecksum, + RequestChecksumCalculation: options.RequestChecksumCalculation, + }, middleware.Before) + if err != nil { + return err + } + + stack.Build.Remove("ContentChecksum") + + inputChecksum := &internalChecksum.ComputeInputPayloadChecksum{ + EnableTrailingChecksum: options.EnableTrailingChecksum, + EnableComputePayloadHash: options.EnableComputeSHA256PayloadHash, + EnableDecodedContentLengthHeader: options.EnableDecodedContentLengthHeader, + } + if err := stack.Finalize.Insert(inputChecksum, "ResolveEndpointV2", middleware.After); err != nil { + return err + } + + if options.EnableTrailingChecksum { + trailerMiddleware := &internalChecksum.AddInputChecksumTrailer{ + EnableTrailingChecksum: inputChecksum.EnableTrailingChecksum, + EnableComputePayloadHash: inputChecksum.EnableComputePayloadHash, + EnableDecodedContentLengthHeader: inputChecksum.EnableDecodedContentLengthHeader, + } + if err := stack.Finalize.Insert(trailerMiddleware, inputChecksum.ID(), middleware.After); err != nil { + return err + } + } + + return nil +} + // ChecksumValidationMetadata contains metadata such as the checksum algorithm // used for data integrity validation. type ChecksumValidationMetadata struct { @@ -859,6 +1240,10 @@ func (c presignConverter) convertToPresignMiddleware(stack *middleware.Stack, op return nil } +func withNoDefaultChecksumAPIOption(options *Options) { + options.RequestChecksumCalculation = aws.RequestChecksumCalculationWhenRequired +} + func addRequestResponseLogging(stack *middleware.Stack, o Options) error { return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{ LogRequest: o.ClientLogMode.IsRequest(), @@ -896,3 +1281,89 @@ func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { DisableHTTPS: o.EndpointOptions.DisableHTTPS, }, "ResolveEndpointV2", middleware.After) } + +type spanInitializeStart struct { +} + +func (*spanInitializeStart) ID() string { + return "spanInitializeStart" +} + +func (m *spanInitializeStart) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + middleware.InitializeOutput, middleware.Metadata, error, +) { + ctx, _ = tracing.StartSpan(ctx, "Initialize") + + return next.HandleInitialize(ctx, in) +} + +type spanInitializeEnd struct { +} + +func (*spanInitializeEnd) ID() string { + return "spanInitializeEnd" +} + +func (m *spanInitializeEnd) HandleInitialize( + ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler, +) ( + middleware.InitializeOutput, middleware.Metadata, error, +) { + ctx, span := tracing.PopSpan(ctx) + span.End() + + return next.HandleInitialize(ctx, in) +} + +type spanBuildRequestStart struct { +} + +func (*spanBuildRequestStart) ID() string { + return "spanBuildRequestStart" +} + +func (m *spanBuildRequestStart) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + middleware.SerializeOutput, middleware.Metadata, error, +) { + ctx, _ = tracing.StartSpan(ctx, "BuildRequest") + + return next.HandleSerialize(ctx, in) +} + +type spanBuildRequestEnd struct { +} + +func (*spanBuildRequestEnd) ID() string { + return "spanBuildRequestEnd" +} + +func (m *spanBuildRequestEnd) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + middleware.BuildOutput, middleware.Metadata, error, +) { + ctx, span := tracing.PopSpan(ctx) + span.End() + + return next.HandleBuild(ctx, in) +} + +func addSpanInitializeStart(stack *middleware.Stack) error { + return stack.Initialize.Add(&spanInitializeStart{}, middleware.Before) +} + +func addSpanInitializeEnd(stack *middleware.Stack) error { + return stack.Initialize.Add(&spanInitializeEnd{}, middleware.After) +} + +func addSpanBuildRequestStart(stack *middleware.Stack) error { + return stack.Serialize.Add(&spanBuildRequestStart{}, middleware.Before) +} + +func addSpanBuildRequestEnd(stack *middleware.Stack) error { + return stack.Build.Add(&spanBuildRequestEnd{}, middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go index c71060e08..3caa7dd7b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go @@ -11,6 +11,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" + "time" ) // This operation aborts a multipart upload. After a multipart upload is aborted, @@ -18,22 +19,35 @@ import ( // by any previously uploaded parts will be freed. However, if any part uploads are // currently in progress, those part uploads might or might not succeed. As a // result, it might be necessary to abort a given multipart upload multiple times -// in order to completely free all storage consumed by all parts. To verify that -// all parts have been removed and prevent getting charged for the part storage, -// you should call the ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// API operation and ensure that the parts list is empty. Directory buckets - For -// directory buckets, you must make requests for this API operation to the Zonal -// endpoint. These endpoints support virtual-hosted-style requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style -// requests are not supported. For more information, see Regional and Zonal -// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions +// in order to completely free all storage consumed by all parts. +// +// To verify that all parts have been removed and prevent getting charged for the +// part storage, you should call the [ListParts]API operation and ensure that the parts list +// is empty. +// +// - Directory buckets - If multipart uploads in a directory bucket are in +// progress, you can't delete the bucket until all the in-progress multipart +// uploads are aborted or completed. To delete these in-progress multipart uploads, +// use the ListMultipartUploads operation to list the in-progress multipart +// uploads in the bucket and use the AbortMultipartUpload operation to abort all +// the in-progress multipart uploads. +// +// - Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support +// virtual-hosted-style requests in the format +// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name +// . Path-style requests are not supported. For more information about endpoints +// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information +// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// Permissions +// // - General purpose bucket permissions - For information about permissions -// required to use the multipart upload, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// in the Amazon S3 User Guide. +// required to use the multipart upload, see [Multipart Upload and Permissions]in the Amazon S3 User Guide. +// // - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the // s3express:CreateSession permission to the directory bucket in a bucket policy // or an IAM identity-based policy. Then, you make the CreateSession API call on // the bucket to obtain a session token. With the session token in your request @@ -41,17 +55,32 @@ import ( // expires, you make another CreateSession API call to generate a new session // token for use. Amazon Web Services CLI or SDKs create session and refresh the // session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// expires. For more information about authorization, see [CreateSession]CreateSession . +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// +// The following operations are related to AbortMultipartUpload : +// +// [CreateMultipartUpload] +// +// [UploadPart] +// +// [CompleteMultipartUpload] // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are -// related to AbortMultipartUpload : -// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) -// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// [ListParts] +// +// [ListMultipartUploads] +// +// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html +// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html +// [Multipart Upload and Permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html +// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html func (c *Client) AbortMultipartUpload(ctx context.Context, params *AbortMultipartUploadInput, optFns ...func(*Options)) (*AbortMultipartUploadOutput, error) { if params == nil { params = &AbortMultipartUploadInput{} @@ -69,31 +98,39 @@ func (c *Client) AbortMultipartUpload(ctx context.Context, params *AbortMultipar type AbortMultipartUploadInput struct { - // The bucket name to which the upload was taking place. Directory buckets - When - // you use this operation with a directory bucket, you must use - // virtual-hosted-style requests in the format - // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not - // supported. Directory bucket names must be unique in the chosen Availability - // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket - // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // The bucket name to which the upload was taking place. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -113,20 +150,33 @@ type AbortMultipartUploadInput struct { // status code 403 Forbidden (access denied). ExpectedBucketOwner *string + // If present, this header aborts an in progress multipart upload only if it was + // initiated on the provided timestamp. If the initiated timestamp of the multipart + // upload does not match the provided value, the operation returns a 412 + // Precondition Failed error. If the initiated timestamp matches or if the + // multipart upload doesn’t exist, the operation returns a 204 Success (No Content) + // response. + // + // This functionality is only supported for directory buckets. + IfMatchInitiatedTime *time.Time + // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer noSmithyDocumentSerde } func (in *AbortMultipartUploadInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Key = in.Key @@ -135,7 +185,9 @@ func (in *AbortMultipartUploadInput) bindEndpointParams(p *EndpointParameters) { type AbortMultipartUploadOutput struct { // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -187,6 +239,9 @@ func (c *Client) addOperationAbortMultipartUploadMiddlewares(stack *middleware.S if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -202,6 +257,18 @@ func (c *Client) addOperationAbortMultipartUploadMiddlewares(stack *middleware.S if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpAbortMultipartUploadValidationMiddleware(stack); err != nil { return err } @@ -235,6 +302,18 @@ func (c *Client) addOperationAbortMultipartUploadMiddlewares(stack *middleware.S if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go index 8f89d780e..ef4198eed 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go @@ -13,88 +13,138 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Completes a multipart upload by assembling previously uploaded parts. You first -// initiate the multipart upload and then upload all parts using the UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// operation or the UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) -// operation. After successfully uploading all relevant parts of an upload, you -// call this CompleteMultipartUpload operation to complete the upload. Upon -// receiving this request, Amazon S3 concatenates all the parts in ascending order -// by part number to create a new object. In the CompleteMultipartUpload request, -// you must provide the parts list and ensure that the parts list is complete. The -// CompleteMultipartUpload API operation concatenates the parts that you provide in -// the list. For each part in the list, you must provide the PartNumber value and -// the ETag value that are returned after that part was uploaded. The processing -// of a CompleteMultipartUpload request could take several minutes to finalize. -// After Amazon S3 begins processing the request, it sends an HTTP response header -// that specifies a 200 OK response. While processing is in progress, Amazon S3 -// periodically sends white space characters to keep the connection from timing -// out. A request could fail after the initial 200 OK response has been sent. This -// means that a 200 OK response can contain either a success or an error. The -// error response might be embedded in the 200 OK response. If you call this API -// operation directly, make sure to design your application to parse the contents -// of the response and handle it appropriately. If you use Amazon Web Services -// SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply -// error handling per your configuration settings (including automatically retrying -// the request as appropriate). If the condition persists, the SDKs throw an -// exception (or, for the SDKs that don't use exceptions, they return an error). +// Completes a multipart upload by assembling previously uploaded parts. +// +// You first initiate the multipart upload and then upload all parts using the [UploadPart] +// operation or the [UploadPartCopy]operation. After successfully uploading all relevant parts of +// an upload, you call this CompleteMultipartUpload operation to complete the +// upload. Upon receiving this request, Amazon S3 concatenates all the parts in +// ascending order by part number to create a new object. In the +// CompleteMultipartUpload request, you must provide the parts list and ensure that +// the parts list is complete. The CompleteMultipartUpload API operation +// concatenates the parts that you provide in the list. For each part in the list, +// you must provide the PartNumber value and the ETag value that are returned +// after that part was uploaded. +// +// The processing of a CompleteMultipartUpload request could take several minutes +// to finalize. After Amazon S3 begins processing the request, it sends an HTTP +// response header that specifies a 200 OK response. While processing is in +// progress, Amazon S3 periodically sends white space characters to keep the +// connection from timing out. A request could fail after the initial 200 OK +// response has been sent. This means that a 200 OK response can contain either a +// success or an error. The error response might be embedded in the 200 OK +// response. If you call this API operation directly, make sure to design your +// application to parse the contents of the response and handle it appropriately. +// If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect +// the embedded error and apply error handling per your configuration settings +// (including automatically retrying the request as appropriate). If the condition +// persists, the SDKs throw an exception (or, for the SDKs that don't use +// exceptions, they return an error). +// // Note that if CompleteMultipartUpload fails, applications should be prepared to // retry any failed requests (including 500 error responses). For more information, -// see Amazon S3 Error Best Practices (https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html) -// . You can't use Content-Type: application/x-www-form-urlencoded for the +// see [Amazon S3 Error Best Practices]. +// +// You can't use Content-Type: application/x-www-form-urlencoded for the // CompleteMultipartUpload requests. Also, if you don't provide a Content-Type -// header, CompleteMultipartUpload can still return a 200 OK response. For more -// information about multipart uploads, see Uploading Objects Using Multipart -// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) -// in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must -// make requests for this API operation to the Zonal endpoint. These endpoints -// support virtual-hosted-style requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style -// requests are not supported. For more information, see Regional and Zonal -// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions +// header, CompleteMultipartUpload can still return a 200 OK response. +// +// For more information about multipart uploads, see [Uploading Objects Using Multipart Upload] in the Amazon S3 User Guide. +// +// Directory buckets - For directory buckets, you must make requests for this API +// operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format +// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name +// . Path-style requests are not supported. For more information about endpoints +// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information +// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// Permissions // - General purpose bucket permissions - For information about permissions -// required to use the multipart upload API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// in the Amazon S3 User Guide. -// - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the -// s3express:CreateSession permission to the directory bucket in a bucket policy -// or an IAM identity-based policy. Then, you make the CreateSession API call on -// the bucket to obtain a session token. With the session token in your request -// header, you can make API requests to this operation. After the session token -// expires, you make another CreateSession API call to generate a new session -// token for use. Amazon Web Services CLI or SDKs create session and refresh the -// session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// required to use the multipart upload API, see [Multipart Upload and Permissions]in the Amazon S3 User Guide. +// +// If you provide an [additional checksum value]in your MultipartUpload requests and the object is encrypted +// +// with Key Management Service, you must have permission to use the kms:Decrypt +// action for the CompleteMultipartUpload request to succeed. +// +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see [CreateSession]CreateSession . +// +// If the object is encrypted with SSE-KMS, you must also have the +// +// kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies +// and KMS key policies for the KMS key. // // Special errors +// // - Error Code: EntityTooSmall +// // - Description: Your proposed upload is smaller than the minimum allowed // object size. Each part must be at least 5 MB in size, except the last part. +// // - HTTP Status Code: 400 Bad Request +// // - Error Code: InvalidPart +// // - Description: One or more of the specified parts could not be found. The // part might not have been uploaded, or the specified ETag might not have matched // the uploaded part's ETag. +// // - HTTP Status Code: 400 Bad Request +// // - Error Code: InvalidPartOrder +// // - Description: The list of parts was not in ascending order. The parts list // must be specified in order by part number. +// // - HTTP Status Code: 400 Bad Request +// // - Error Code: NoSuchUpload +// // - Description: The specified multipart upload does not exist. The upload ID // might be invalid, or the multipart upload might have been aborted or completed. +// // - HTTP Status Code: 404 Not Found // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are -// related to CompleteMultipartUpload : -// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) -// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// +// The following operations are related to CompleteMultipartUpload : +// +// [CreateMultipartUpload] +// +// [UploadPart] +// +// [AbortMultipartUpload] +// +// [ListParts] +// +// [ListMultipartUploads] +// +// [Uploading Objects Using Multipart Upload]: https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html +// [Amazon S3 Error Best Practices]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html +// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html +// [additional checksum value]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_Checksum.html +// [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html +// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html +// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html +// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html +// [Multipart Upload and Permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html func (c *Client) CompleteMultipartUpload(ctx context.Context, params *CompleteMultipartUploadInput, optFns ...func(*Options)) (*CompleteMultipartUploadOutput, error) { if params == nil { params = &CompleteMultipartUploadInput{} @@ -112,31 +162,39 @@ func (c *Client) CompleteMultipartUpload(ctx context.Context, params *CompleteMu type CompleteMultipartUploadInput struct { - // Name of the bucket to which the multipart upload was initiated. Directory - // buckets - When you use this operation with a directory bucket, you must use - // virtual-hosted-style requests in the format - // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not - // supported. Directory bucket names must be unique in the chosen Availability - // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket - // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // Name of the bucket to which the multipart upload was initiated. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -153,37 +211,98 @@ type CompleteMultipartUploadInput struct { // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Base64 encoded, 32-bit CRC32 checksum of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32 *string // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 32-bit CRC32C checksum of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Base64 encoded, 32-bit CRC32C checksum of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32C *string // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 160-bit SHA-1 digest of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Base64 encoded, 64-bit CRC64NVME checksum of the object. The CRC64NVME checksum + // is always a full object checksum. For more information, see [Checking object integrity in the Amazon S3 User Guide]. + // + // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumCRC64NVME *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // Base64 encoded, 160-bit SHA1 digest of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA1 *string // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 256-bit SHA-256 digest of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Base64 encoded, 256-bit SHA256 digest of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA256 *string + // This header specifies the checksum type of the object, which determines how + // part-level checksums are combined to create an object-level checksum for + // multipart objects. You can use this header as a data integrity check to verify + // that the checksum type that is received is the same checksum that was specified. + // If the checksum type doesn’t match the checksum type that was specified for the + // object during the CreateMultipartUpload request, it’ll result in a BadDigest + // error. For more information, see Checking object integrity in the Amazon S3 User + // Guide. + ChecksumType types.ChecksumType + // The account ID of the expected bucket owner. If the account ID that you provide // does not match the actual owner of the bucket, the request fails with the HTTP // status code 403 Forbidden (access denied). ExpectedBucketOwner *string + // Uploads the object only if the ETag (entity tag) value provided during the + // WRITE operation matches the ETag of the object in S3. If the ETag values do not + // match, the operation returns a 412 Precondition Failed error. + // + // If a conflicting operation occurs during the upload S3 returns a 409 + // ConditionalRequestConflict response. On a 409 failure you should fetch the + // object's ETag, re-initiate the multipart upload with CreateMultipartUpload , and + // re-upload each part. + // + // Expects the ETag value as a string. + // + // For more information about conditional requests, see [RFC 7232], or [Conditional requests] in the Amazon S3 + // User Guide. + // + // [Conditional requests]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/conditional-requests.html + // [RFC 7232]: https://tools.ietf.org/html/rfc7232 + IfMatch *string + + // Uploads the object only if the object key name does not already exist in the + // bucket specified. Otherwise, Amazon S3 returns a 412 Precondition Failed error. + // + // If a conflicting operation occurs during the upload S3 returns a 409 + // ConditionalRequestConflict response. On a 409 failure you should re-initiate the + // multipart upload with CreateMultipartUpload and re-upload each part. + // + // Expects the '*' (asterisk) character. + // + // For more information about conditional requests, see [RFC 7232], or [Conditional requests] in the Amazon S3 + // User Guide. + // + // [Conditional requests]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/conditional-requests.html + // [RFC 7232]: https://tools.ietf.org/html/rfc7232 + IfNoneMatch *string + + // The expected total object size of the multipart upload request. If there’s a + // mismatch between the specified object size value and the actual object size + // value, it results in an HTTP 400 InvalidRequest error. + MpuObjectSize *int64 + // The container for the multipart upload request information. MultipartUpload *types.CompletedMultipartUpload @@ -191,38 +310,47 @@ type CompleteMultipartUploadInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // The server-side encryption (SSE) algorithm used to encrypt the object. This // parameter is required only when the object was created using a checksum // algorithm or if your bucket policy requires the use of SSE-C. For more - // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html#ssec-require-condition-key) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html#ssec-require-condition-key SSECustomerAlgorithm *string // The server-side encryption (SSE) customer managed key. This parameter is needed // only when the object was created using a checksum algorithm. For more - // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html SSECustomerKey *string // The MD5 server-side encryption (SSE) customer managed key. This parameter is // needed only when the object was created using a checksum algorithm. For more - // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html SSECustomerKeyMD5 *string noSmithyDocumentSerde } func (in *CompleteMultipartUploadInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Key = in.Key @@ -231,63 +359,90 @@ func (in *CompleteMultipartUploadInput) bindEndpointParams(p *EndpointParameters type CompleteMultipartUploadOutput struct { // The name of the bucket that contains the newly created object. Does not return - // the access point ARN or access point alias if used. Access points are not - // supported by directory buckets. + // the access point ARN or access point alias if used. + // + // Access points are not supported by directory buckets. Bucket *string // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality - // is not supported for directory buckets. + // encryption with Key Management Service (KMS) keys (SSE-KMS). BucketKeyEnabled *bool - // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32 checksum of the object. This checksum is only + // be present if the checksum was uploaded with the object. When you use an API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumCRC32 *string - // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32C checksum of the object. This checksum is only + // present if the checksum was uploaded with the object. When you use an API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumCRC32C *string - // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. When you use the API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // Base64 encoded, 64-bit CRC64NVME checksum of the object. The CRC64NVME checksum + // is always a full object checksum. For more information, see [Checking object integrity in the Amazon S3 User Guide]. + // + // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumCRC64NVME *string + + // The Base64 encoded, 160-bit SHA1 digest of the object. This will only be + // present if the object was uploaded with the object. When you use the API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumSHA1 *string - // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 256-bit SHA256 digest of the object. This will only be + // present if the object was uploaded with the object. When you use an API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumSHA256 *string + // The checksum type, which determines how part-level checksums are combined to + // create an object-level checksum for multipart objects. You can use this header + // as a data integrity check to verify that the checksum type that is received is + // the same checksum type that was specified during the CreateMultipartUpload + // request. For more information, see [Checking object integrity in the Amazon S3 User Guide]. + // + // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumType types.ChecksumType + // Entity tag that identifies the newly created object's data. Objects with // different object data will have different entity tags. The entity tag is an // opaque string. The entity tag may or may not be an MD5 digest of the object // data. If the entity tag is not an MD5 digest of the object data, it will contain // one or more nonhexadecimal characters and/or will consist of less than 32 or // more than 32 hexadecimal digits. For more information about how the entity tag - // is calculated, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. + // is calculated, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ETag *string // If the object expiration is configured, this will contain the expiration date ( // expiry-date ) and rule ID ( rule-id ). The value of rule-id is URL-encoded. + // // This functionality is not supported for directory buckets. Expiration *string @@ -298,21 +453,22 @@ type CompleteMultipartUploadOutput struct { Location *string // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged - // If present, indicates the ID of the Key Management Service (KMS) symmetric - // encryption customer managed key that was used for the object. This functionality - // is not supported for directory buckets. + // If present, indicates the ID of the KMS key that was used for object encryption. SSEKMSKeyId *string // The server-side encryption algorithm used when storing this object in Amazon S3 - // (for example, AES256 , aws:kms ). For directory buckets, only server-side - // encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. + // (for example, AES256 , aws:kms ). ServerSideEncryption types.ServerSideEncryption // Version ID of the newly created object, in case the bucket has versioning - // turned on. This functionality is not supported for directory buckets. + // turned on. + // + // This functionality is not supported for directory buckets. VersionId *string // Metadata pertaining to the operation's result. @@ -364,6 +520,9 @@ func (c *Client) addOperationCompleteMultipartUploadMiddlewares(stack *middlewar if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -379,6 +538,18 @@ func (c *Client) addOperationCompleteMultipartUploadMiddlewares(stack *middlewar if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCompleteMultipartUploadValidationMiddleware(stack); err != nil { return err } @@ -415,6 +586,18 @@ func (c *Client) addOperationCompleteMultipartUploadMiddlewares(stack *middlewar if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go index c7990bab1..2d7d12c3e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go @@ -15,102 +15,155 @@ import ( "time" ) -// Creates a copy of an object that is already stored in Amazon S3. You can store -// individual objects of up to 5 TB in Amazon S3. You create a copy of your object -// up to 5 GB in size in a single atomic action using this API. However, to copy an -// object greater than 5 GB, you must use the multipart upload Upload Part - Copy -// (UploadPartCopy) API. For more information, see Copy Object Using the REST -// Multipart Upload API (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html) -// . You can copy individual objects between general purpose buckets, between +// Creates a copy of an object that is already stored in Amazon S3. +// +// You can store individual objects of up to 5 TB in Amazon S3. You create a copy +// of your object up to 5 GB in size in a single atomic action using this API. +// However, to copy an object greater than 5 GB, you must use the multipart upload +// Upload Part - Copy (UploadPartCopy) API. For more information, see [Copy Object Using the REST Multipart Upload API]. +// +// You can copy individual objects between general purpose buckets, between // directory buckets, and between general purpose buckets and directory buckets. -// Directory buckets - For directory buckets, you must make requests for this API -// operation to the Zonal endpoint. These endpoints support virtual-hosted-style -// requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style -// requests are not supported. For more information, see Regional and Zonal -// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Both the Region that you want to copy the object -// from and the Region that you want to copy the object to must be enabled for your -// account. For more information about how to enable a Region for your account, see -// Enable or disable a Region for standalone accounts (https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-regions.html#manage-acct-regions-enable-standalone) -// in the Amazon Web Services Account Management Guide. Amazon S3 transfer -// acceleration does not support cross-Region copies. If you request a cross-Region -// copy using a transfer acceleration endpoint, you get a 400 Bad Request error. -// For more information, see Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) -// . Authentication and authorization All CopyObject requests must be -// authenticated and signed by using IAM credentials (access key ID and secret -// access key for the IAM identities). All headers with the x-amz- prefix, -// including x-amz-copy-source , must be signed. For more information, see REST -// Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) -// . Directory buckets - You must use the IAM credentials to authenticate and +// +// - Amazon S3 supports copy operations using Multi-Region Access Points only as +// a destination when using the Multi-Region Access Point ARN. +// +// - Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support +// virtual-hosted-style requests in the format +// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name +// . Path-style requests are not supported. For more information about endpoints +// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information +// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// - VPC endpoints don't support cross-Region requests (including copies). If +// you're using VPC endpoints, your source and destination buckets should be in the +// same Amazon Web Services Region as your VPC endpoint. +// +// Both the Region that you want to copy the object from and the Region that you +// want to copy the object to must be enabled for your account. For more +// information about how to enable a Region for your account, see [Enable or disable a Region for standalone accounts]in the Amazon +// Web Services Account Management Guide. +// +// Amazon S3 transfer acceleration does not support cross-Region copies. If you +// request a cross-Region copy using a transfer acceleration endpoint, you get a +// 400 Bad Request error. For more information, see [Transfer Acceleration]. +// +// Authentication and authorization All CopyObject requests must be authenticated +// and signed by using IAM credentials (access key ID and secret access key for the +// IAM identities). All headers with the x-amz- prefix, including x-amz-copy-source +// , must be signed. For more information, see [REST Authentication]. +// +// Directory buckets - You must use the IAM credentials to authenticate and // authorize your access to the CopyObject API operation, instead of using the -// temporary security credentials through the CreateSession API operation. Amazon -// Web Services CLI or SDKs handles authentication and authorization on your -// behalf. Permissions You must have read access to the source object and write -// access to the destination bucket. +// temporary security credentials through the CreateSession API operation. +// +// Amazon Web Services CLI or SDKs handles authentication and authorization on +// your behalf. +// +// Permissions You must have read access to the source object and write access to +// the destination bucket. +// // - General purpose bucket permissions - You must have permissions in an IAM // policy based on the source and destination bucket types in a CopyObject // operation. +// // - If the source object is in a general purpose bucket, you must have // s3:GetObject permission to read the source object that is being copied. +// // - If the destination bucket is a general purpose bucket, you must have // s3:PutObject permission to write the object copy to the destination bucket. +// // - Directory bucket permissions - You must have permissions in a bucket policy // or an IAM identity-based policy based on the source and destination bucket types // in a CopyObject operation. +// // - If the source object that you want to copy is in a directory bucket, you // must have the s3express:CreateSession permission in the Action element of a // policy to read the object. By default, the session is in the ReadWrite mode. // If you want to restrict the access, you can explicitly set the // s3express:SessionMode condition key to ReadOnly on the copy source bucket. +// // - If the copy destination is a directory bucket, you must have the // s3express:CreateSession permission in the Action element of a policy to write // the object to the destination. The s3express:SessionMode condition key can't -// be set to ReadOnly on the copy destination bucket. For example policies, see -// Example bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) -// and Amazon Web Services Identity and Access Management (IAM) identity-based -// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) -// in the Amazon S3 User Guide. +// be set to ReadOnly on the copy destination bucket. +// +// If the object is encrypted with SSE-KMS, you must also have the +// +// kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies +// and KMS key policies for the KMS key. +// +// For example policies, see [Example bucket policies for S3 Express One Zone]and [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]in the Amazon S3 User Guide. // // Response and special errors When the request is an HTTP 1.1 request, the // response is chunk encoded. When the request is not an HTTP 1.1 request, the // response would not contain the Content-Length . You always need to read the -// entire response body to check if the copy succeeds. to keep the connection alive -// while we copy the data. +// entire response body to check if the copy succeeds. +// // - If the copy is successful, you receive a response with information about // the copied object. +// // - A copy request might return an error when Amazon S3 receives the copy // request or while Amazon S3 is copying the files. A 200 OK response can contain // either a success or an error. +// // - If the error occurs before the copy action starts, you receive a standard // Amazon S3 error. +// // - If the error occurs during the copy operation, the error response is // embedded in the 200 OK response. For example, in a cross-region copy, you may -// encounter throttling and receive a 200 OK response. For more information, see -// Resolve the Error 200 response when copying objects to Amazon S3 (https://repost.aws/knowledge-center/s3-resolve-200-internalerror) +// encounter throttling and receive a 200 OK response. For more information, see [Resolve the Error 200 response when copying objects to Amazon S3] // . The 200 OK status code means the copy was accepted, but it doesn't mean the // copy is complete. Another example is when you disconnect from Amazon S3 before // the copy is complete, Amazon S3 might cancel the copy and you may receive a // 200 OK response. You must stay connected to Amazon S3 until the entire -// response is successfully received and processed. If you call this API operation -// directly, make sure to design your application to parse the content of the -// response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs -// handle this condition. The SDKs detect the embedded error and apply error -// handling per your configuration settings (including automatically retrying the -// request as appropriate). If the condition persists, the SDKs throw an exception -// (or, for the SDKs that don't use exceptions, they return an error). +// response is successfully received and processed. +// +// If you call this API operation directly, make sure to design your application +// +// to parse the content of the response and handle it appropriately. If you use +// Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the +// embedded error and apply error handling per your configuration settings +// (including automatically retrying the request as appropriate). If the condition +// persists, the SDKs throw an exception (or, for the SDKs that don't use +// exceptions, they return an error). // // Charge The copy request charge is based on the storage class and Region that // you specify for the destination object. The request can also result in a data // retrieval charge for the source if the source storage class bills for data // retrieval. If the copy source is in a different region, the data transfer is -// billed to the copy source account. For pricing information, see Amazon S3 -// pricing (http://aws.amazon.com/s3/pricing/) . HTTP Host header syntax Directory -// buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are -// related to CopyObject : -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// billed to the copy source account. For pricing information, see [Amazon S3 pricing]. +// +// HTTP Host header syntax +// +// - Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// +// - Amazon S3 on Outposts - When you use this action with S3 on Outposts +// through the REST API, you must direct requests to the S3 on Outposts hostname. +// The S3 on Outposts hostname takes the form +// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . The +// hostname isn't required when you use the Amazon Web Services CLI or SDKs. +// +// The following operations are related to CopyObject : +// +// [PutObject] +// +// [GetObject] +// +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html +// [Resolve the Error 200 response when copying objects to Amazon S3]: https://repost.aws/knowledge-center/s3-resolve-200-internalerror +// [Copy Object Using the REST Multipart Upload API]: https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html +// [REST Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html +// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html +// [Enable or disable a Region for standalone accounts]: https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-regions.html#manage-acct-regions-enable-standalone +// [Transfer Acceleration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html +// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// [Amazon S3 pricing]: http://aws.amazon.com/s3/pricing/ func (c *Client) CopyObject(ctx context.Context, params *CopyObjectInput, optFns ...func(*Options)) (*CopyObjectOutput, error) { if params == nil { params = &CopyObjectInput{} @@ -128,31 +181,51 @@ func (c *Client) CopyObject(ctx context.Context, params *CopyObjectInput, optFns type CopyObjectInput struct { - // The name of the destination bucket. Directory buckets - When you use this - // operation with a directory bucket, you must use virtual-hosted-style requests in - // the format Bucket_name.s3express-az_id.region.amazonaws.com . Path-style - // requests are not supported. Directory bucket names must be unique in the chosen - // Availability Zone. Bucket names must follow the format - // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 - // ). For information about bucket naming restrictions, see Directory bucket - // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // The name of the destination bucket. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Copying objects across different Amazon Web Services Regions isn't supported + // when the source or destination bucket is in Amazon Web Services Local Zones. The + // source and destination buckets must have the same parent Amazon Web Services + // Region. Otherwise, you get an HTTP 400 Bad Request error with the error code + // InvalidRequest . + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must use the + // Outpost bucket access point ARN or the access point alias for the destination + // bucket. + // + // You can only copy objects within the same Outpost bucket. It's not supported to + // copy objects across different Amazon Web Services Outposts, between buckets on + // the same Outposts, or between Outposts buckets and any other bucket types. For + // more information about S3 on Outposts, see [What is S3 on Outposts?]in the S3 on Outposts guide. When + // you use this action with S3 on Outposts through the REST API, you must direct + // requests to the S3 on Outposts hostname, in the format + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . The + // hostname isn't required when you use the Amazon Web Services CLI or SDKs. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -160,10 +233,11 @@ type CopyObjectInput struct { // Specifies the source object for the copy operation. The source object can be up // to 5 GB. If the source object is an object that was uploaded by using a // multipart upload, the object copy will be a single part object after the source - // object is copied to the destination bucket. You specify the value of the copy - // source in one of two formats, depending on whether you want to access the source - // object through an access point (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html) - // : + // object is copied to the destination bucket. + // + // You specify the value of the copy source in one of two formats, depending on + // whether you want to access the source object through an [access point]: + // // - For objects not accessed through an access point, specify the name of the // source bucket and the key of the source object, separated by a slash (/). For // example, to copy the object reports/january.pdf from the general purpose @@ -172,6 +246,7 @@ type CopyObjectInput struct { // bucket awsexamplebucket--use1-az5--x-s3 , use // awsexamplebucket--use1-az5--x-s3/reports/january.pdf . The value must be // URL-encoded. + // // - For objects accessed through access points, specify the Amazon Resource // Name (ARN) of the object as accessed through the access point, in the format // arn:aws:s3:::accesspoint//object/ . For example, to copy the object @@ -179,15 +254,20 @@ type CopyObjectInput struct { // 123456789012 in Region us-west-2 , use the URL encoding of // arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf // . The value must be URL encoded. + // // - Amazon S3 supports copy operations using Access points only when the source // and destination buckets are in the same Amazon Web Services Region. - // - Access points are not supported by directory buckets. Alternatively, for - // objects accessed through Amazon S3 on Outposts, specify the ARN of the object as - // accessed in the format arn:aws:s3-outposts:::outpost//object/ . For example, - // to copy the object reports/january.pdf through outpost my-outpost owned by - // account 123456789012 in Region us-west-2 , use the URL encoding of + // + // - Access points are not supported by directory buckets. + // + // Alternatively, for objects accessed through Amazon S3 on Outposts, specify the + // ARN of the object as accessed in the format + // arn:aws:s3-outposts:::outpost//object/ . For example, to copy the object + // reports/january.pdf through outpost my-outpost owned by account 123456789012 + // in Region us-west-2 , use the URL encoding of // arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf // . The value must be URL-encoded. + // // If your source bucket versioning is enabled, the x-amz-copy-source header by // default identifies the current version of an object to copy. If the current // version is a delete marker, Amazon S3 behaves as if the object was deleted. To @@ -195,14 +275,21 @@ type CopyObjectInput struct { // append ?versionId= to the value (for example, // awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893 // ). If you don't specify a version ID, Amazon S3 copies the latest version of the - // source object. If you enable versioning on the destination bucket, Amazon S3 - // generates a unique version ID for the copied object. This version ID is - // different from the version ID of the source object. Amazon S3 returns the - // version ID of the copied object in the x-amz-version-id response header in the - // response. If you do not enable versioning or suspend it on the destination - // bucket, the version ID that Amazon S3 generates in the x-amz-version-id - // response header is always null. Directory buckets - S3 Versioning isn't enabled - // and supported for directory buckets. + // source object. + // + // If you enable versioning on the destination bucket, Amazon S3 generates a + // unique version ID for the copied object. This version ID is different from the + // version ID of the source object. Amazon S3 returns the version ID of the copied + // object in the x-amz-version-id response header in the response. + // + // If you do not enable versioning or suspend it on the destination bucket, the + // version ID that Amazon S3 generates in the x-amz-version-id response header is + // always null. + // + // Directory buckets - S3 Versioning isn't enabled and supported for directory + // buckets. + // + // [access point]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html // // This member is required. CopySource *string @@ -212,51 +299,71 @@ type CopyObjectInput struct { // This member is required. Key *string - // The canned access control list (ACL) to apply to the object. When you copy an - // object, the ACL metadata is not preserved and is set to private by default. - // Only the owner has full access control. To override the default ACL setting, - // specify a new ACL when you generate a copy request. For more information, see - // Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) - // . If the destination bucket that you're copying objects to uses the bucket owner + // The canned access control list (ACL) to apply to the object. + // + // When you copy an object, the ACL metadata is not preserved and is set to private + // by default. Only the owner has full access control. To override the default ACL + // setting, specify a new ACL when you generate a copy request. For more + // information, see [Using ACLs]. + // + // If the destination bucket that you're copying objects to uses the bucket owner // enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect // permissions. Buckets that use this setting only accept PUT requests that don't // specify an ACL or PUT requests that specify bucket owner full control ACLs, // such as the bucket-owner-full-control canned ACL or an equivalent form of this - // ACL expressed in the XML format. For more information, see Controlling - // ownership of objects and disabling ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) - // in the Amazon S3 User Guide. + // ACL expressed in the XML format. For more information, see [Controlling ownership of objects and disabling ACLs]in the Amazon S3 + // User Guide. + // // - If your destination bucket uses the bucket owner enforced setting for // Object Ownership, all objects written to the bucket by any account will be owned // by the bucket owner. + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. + // + // [Using ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html + // [Controlling ownership of objects and disabling ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html ACL types.ObjectCannedACL // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption // with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). // If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object. + // // Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object // encryption with SSE-KMS. Specifying this header with a COPY action doesn’t - // affect bucket-level settings for S3 Bucket Key. For more information, see - // Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) - // in the Amazon S3 User Guide. This functionality is not supported when the - // destination bucket is a directory bucket. + // affect bucket-level settings for S3 Bucket Key. + // + // For more information, see [Amazon S3 Bucket Keys] in the Amazon S3 User Guide. + // + // Directory buckets - S3 Bucket Keys aren't supported, when you copy SSE-KMS + // encrypted objects from general purpose buckets to directory buckets, from + // directory buckets to general purpose buckets, or between directory buckets, + // through [CopyObject]. In this case, Amazon S3 makes a call to KMS every time a copy request + // is made for a KMS-encrypted object. + // + // [Amazon S3 Bucket Keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html + // [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html BucketKeyEnabled *bool // Specifies the caching behavior along the request/reply chain. CacheControl *string // Indicates the algorithm that you want Amazon S3 to use to create the checksum - // for the object. For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. When you copy an object, if the source object has a - // checksum, that checksum value will be copied to the new object by default. If - // the CopyObject request does not include this x-amz-checksum-algorithm header, - // the checksum algorithm will be copied from the source object to the destination - // object (if it's present on the source object). You can optionally specify a - // different checksum algorithm to use with the x-amz-checksum-algorithm header. - // Unrecognized or unsupported values will respond with the HTTP status code 400 - // Bad Request . For directory buckets, when you use Amazon Web Services SDKs, - // CRC32 is the default checksum algorithm that's used for performance. + // for the object. For more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // When you copy an object, if the source object has a checksum, that checksum + // value will be copied to the new object by default. If the CopyObject request + // does not include this x-amz-checksum-algorithm header, the checksum algorithm + // will be copied from the source object to the destination object (if it's present + // on the source object). You can optionally specify a different checksum algorithm + // to use with the x-amz-checksum-algorithm header. Unrecognized or unsupported + // values will respond with the HTTP status code 400 Bad Request . + // + // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the + // default checksum algorithm that's used for performance. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm // Specifies presentational information for the object. Indicates whether an @@ -266,8 +373,10 @@ type CopyObjectInput struct { // Specifies what content encodings have been applied to the object and thus what // decoding mechanisms must be applied to obtain the media-type referenced by the - // Content-Type header field. For directory buckets, only the aws-chunked value is - // supported in this header field. + // Content-Type header field. + // + // For directory buckets, only the aws-chunked value is supported in this header + // field. ContentEncoding *string // The language the content is in. @@ -276,62 +385,85 @@ type CopyObjectInput struct { // A standard MIME type that describes the format of the object data. ContentType *string - // Copies the object if its entity tag (ETag) matches the specified tag. If both - // the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since + // Copies the object if its entity tag (ETag) matches the specified tag. + // + // If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since // headers are present in the request and evaluate as follows, Amazon S3 returns // 200 OK and copies the data: + // // - x-amz-copy-source-if-match condition evaluates to true + // // - x-amz-copy-source-if-unmodified-since condition evaluates to false CopySourceIfMatch *string - // Copies the object if it has been modified since the specified time. If both the - // x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since headers - // are present in the request and evaluate as follows, Amazon S3 returns the 412 - // Precondition Failed response code: + // Copies the object if it has been modified since the specified time. + // + // If both the x-amz-copy-source-if-none-match and + // x-amz-copy-source-if-modified-since headers are present in the request and + // evaluate as follows, Amazon S3 returns the 412 Precondition Failed response + // code: + // // - x-amz-copy-source-if-none-match condition evaluates to false + // // - x-amz-copy-source-if-modified-since condition evaluates to true CopySourceIfModifiedSince *time.Time - // Copies the object if its entity tag (ETag) is different than the specified - // ETag. If both the x-amz-copy-source-if-none-match and + // Copies the object if its entity tag (ETag) is different than the specified ETag. + // + // If both the x-amz-copy-source-if-none-match and // x-amz-copy-source-if-modified-since headers are present in the request and // evaluate as follows, Amazon S3 returns the 412 Precondition Failed response // code: + // // - x-amz-copy-source-if-none-match condition evaluates to false + // // - x-amz-copy-source-if-modified-since condition evaluates to true CopySourceIfNoneMatch *string - // Copies the object if it hasn't been modified since the specified time. If both - // the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since + // Copies the object if it hasn't been modified since the specified time. + // + // If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since // headers are present in the request and evaluate as follows, Amazon S3 returns // 200 OK and copies the data: + // // - x-amz-copy-source-if-match condition evaluates to true + // // - x-amz-copy-source-if-unmodified-since condition evaluates to false CopySourceIfUnmodifiedSince *time.Time // Specifies the algorithm to use when decrypting the source object (for example, - // AES256 ). If the source object for the copy is stored in Amazon S3 using SSE-C, - // you must provide the necessary encryption information in your request so that - // Amazon S3 can decrypt the object for copying. This functionality is not - // supported when the source object is in a directory bucket. + // AES256 ). + // + // If the source object for the copy is stored in Amazon S3 using SSE-C, you must + // provide the necessary encryption information in your request so that Amazon S3 + // can decrypt the object for copying. + // + // This functionality is not supported when the source object is in a directory + // bucket. CopySourceSSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt // the source object. The encryption key provided in this header must be the same - // one that was used when the source object was created. If the source object for - // the copy is stored in Amazon S3 using SSE-C, you must provide the necessary - // encryption information in your request so that Amazon S3 can decrypt the object - // for copying. This functionality is not supported when the source object is in a - // directory bucket. + // one that was used when the source object was created. + // + // If the source object for the copy is stored in Amazon S3 using SSE-C, you must + // provide the necessary encryption information in your request so that Amazon S3 + // can decrypt the object for copying. + // + // This functionality is not supported when the source object is in a directory + // bucket. CopySourceSSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. If the source object for the copy - // is stored in Amazon S3 using SSE-C, you must provide the necessary encryption - // information in your request so that Amazon S3 can decrypt the object for - // copying. This functionality is not supported when the source object is in a - // directory bucket. + // encryption key was transmitted without error. + // + // If the source object for the copy is stored in Amazon S3 using SSE-C, you must + // provide the necessary encryption information in your request so that Amazon S3 + // can decrypt the object for copying. + // + // This functionality is not supported when the source object is in a directory + // bucket. CopySourceSSECustomerKeyMD5 *string // The account ID of the expected destination bucket owner. If the account ID that @@ -348,22 +480,30 @@ type CopyObjectInput struct { Expires *time.Time // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. GrantFullControl *string // Allows grantee to read the object data and its metadata. + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. GrantRead *string // Allows grantee to read the object ACL. + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. GrantReadACP *string // Allows grantee to write the ACL for the applicable object. + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. GrantWriteACP *string @@ -373,26 +513,32 @@ type CopyObjectInput struct { // Specifies whether the metadata is copied from the source object or replaced // with metadata that's provided in the request. When copying an object, you can // preserve all metadata (the default) or specify new metadata. If this header - // isn’t specified, COPY is the default behavior. General purpose bucket - For - // general purpose buckets, when you grant permissions, you can use the - // s3:x-amz-metadata-directive condition key to enforce certain metadata behavior - // when objects are uploaded. For more information, see Amazon S3 condition key - // examples (https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html) - // in the Amazon S3 User Guide. x-amz-website-redirect-location is unique to each - // object and is not copied when using the x-amz-metadata-directive header. To - // copy the value, you must specify x-amz-website-redirect-location in the request - // header. + // isn’t specified, COPY is the default behavior. + // + // General purpose bucket - For general purpose buckets, when you grant + // permissions, you can use the s3:x-amz-metadata-directive condition key to + // enforce certain metadata behavior when objects are uploaded. For more + // information, see [Amazon S3 condition key examples]in the Amazon S3 User Guide. + // + // x-amz-website-redirect-location is unique to each object and is not copied when + // using the x-amz-metadata-directive header. To copy the value, you must specify + // x-amz-website-redirect-location in the request header. + // + // [Amazon S3 condition key examples]: https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html MetadataDirective types.MetadataDirective - // Specifies whether you want to apply a legal hold to the object copy. This - // functionality is not supported for directory buckets. + // Specifies whether you want to apply a legal hold to the object copy. + // + // This functionality is not supported for directory buckets. ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus - // The Object Lock mode that you want to apply to the object copy. This - // functionality is not supported for directory buckets. + // The Object Lock mode that you want to apply to the object copy. + // + // This functionality is not supported for directory buckets. ObjectLockMode types.ObjectLockMode // The date and time when you want the Object Lock of the object copy to expire. + // // This functionality is not supported for directory buckets. ObjectLockRetainUntilDate *time.Time @@ -400,19 +546,23 @@ type CopyObjectInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer - // Specifies the algorithm to use when encrypting the object (for example, AES256 - // ). When you perform a CopyObject operation, if you want to use a different type - // of encryption setting for the target object, you can specify appropriate + // Specifies the algorithm to use when encrypting the object (for example, AES256 ). + // + // When you perform a CopyObject operation, if you want to use a different type of + // encryption setting for the target object, you can specify appropriate // encryption-related headers to encrypt the target object with an Amazon S3 // managed key, a KMS key, or a customer-provided key. If the encryption setting in // your request is different from the default encryption configuration of the // destination bucket, the encryption setting in your request takes precedence. + // // This functionality is not supported when the destination bucket is a directory // bucket. SSECustomerAlgorithm *string @@ -421,81 +571,154 @@ type CopyObjectInput struct { // encrypting data. This value is used to store the object and then it is // discarded. Amazon S3 does not store the encryption key. The key must be // appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. This functionality is - // not supported when the destination bucket is a directory bucket. + // x-amz-server-side-encryption-customer-algorithm header. + // + // This functionality is not supported when the destination bucket is a directory + // bucket. SSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. This functionality is not - // supported when the destination bucket is a directory bucket. + // encryption key was transmitted without error. + // + // This functionality is not supported when the destination bucket is a directory + // bucket. SSECustomerKeyMD5 *string - // Specifies the Amazon Web Services KMS Encryption Context to use for object - // encryption. The value of this header is a base64-encoded UTF-8 string holding - // JSON with the encryption context key-value pairs. This value must be explicitly - // added to specify encryption context for CopyObject requests. This functionality - // is not supported when the destination bucket is a directory bucket. + // Specifies the Amazon Web Services KMS Encryption Context as an additional + // encryption context to use for the destination object encryption. The value of + // this header is a base64-encoded UTF-8 string holding JSON with the encryption + // context key-value pairs. + // + // General purpose buckets - This value must be explicitly added to specify + // encryption context for CopyObject requests if you want an additional encryption + // context for your destination object. The additional encryption context of the + // source object won't be copied to the destination object. For more information, + // see [Encryption context]in the Amazon S3 User Guide. + // + // Directory buckets - You can optionally provide an explicit encryption context + // value. The value must match the default encryption context - the bucket Amazon + // Resource Name (ARN). An additional encryption context value is not supported. + // + // [Encryption context]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html#encryption-context SSEKMSEncryptionContext *string - // Specifies the KMS ID (Key ID, Key ARN, or Key Alias) to use for object + // Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object // encryption. All GET and PUT requests for an object protected by KMS will fail if // they're not made via SSL or using SigV4. For information about configuring any // of the officially supported Amazon Web Services SDKs and Amazon Web Services - // CLI, see Specifying the Signature Version in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) - // in the Amazon S3 User Guide. This functionality is not supported when the - // destination bucket is a directory bucket. + // CLI, see [Specifying the Signature Version in Request Authentication]in the Amazon S3 User Guide. + // + // Directory buckets - To encrypt data using SSE-KMS, it's recommended to specify + // the x-amz-server-side-encryption header to aws:kms . Then, the + // x-amz-server-side-encryption-aws-kms-key-id header implicitly uses the bucket's + // default KMS customer managed key ID. If you want to explicitly set the + // x-amz-server-side-encryption-aws-kms-key-id header, it must match the bucket's + // default customer managed key (using key ID or ARN, not alias). Your SSE-KMS + // configuration can only support 1 [customer managed key]per directory bucket's lifetime. The [Amazon Web Services managed key] ( aws/s3 + // ) isn't supported. + // + // Incorrect key specification results in an HTTP 400 Bad Request error. + // + // [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk + // [Specifying the Signature Version in Request Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + // [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk SSEKMSKeyId *string - // The server-side encryption algorithm used when storing this object in Amazon S3 - // (for example, AES256 , aws:kms , aws:kms:dsse ). Unrecognized or unsupported - // values won’t write a destination object and will receive a 400 Bad Request - // response. Amazon S3 automatically encrypts all new objects that are copied to an - // S3 bucket. When copying an object, if you don't specify encryption information - // in your copy request, the encryption setting of the target object is set to the + // The server-side encryption algorithm used when storing this object in Amazon + // S3. Unrecognized or unsupported values won’t write a destination object and will + // receive a 400 Bad Request response. + // + // Amazon S3 automatically encrypts all new objects that are copied to an S3 + // bucket. When copying an object, if you don't specify encryption information in + // your copy request, the encryption setting of the target object is set to the // default encryption configuration of the destination bucket. By default, all // buckets have a base level of encryption configuration that uses server-side // encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a - // default encryption configuration that uses server-side encryption with Key - // Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with - // Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with - // customer-provided encryption keys (SSE-C), Amazon S3 uses the corresponding KMS - // key, or a customer-provided key to encrypt the target object copy. When you - // perform a CopyObject operation, if you want to use a different type of - // encryption setting for the target object, you can specify appropriate - // encryption-related headers to encrypt the target object with an Amazon S3 - // managed key, a KMS key, or a customer-provided key. If the encryption setting in - // your request is different from the default encryption configuration of the - // destination bucket, the encryption setting in your request takes precedence. - // With server-side encryption, Amazon S3 encrypts your data as it writes your data - // to disks in its data centers and decrypts the data when you access it. For more - // information about server-side encryption, see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) - // in the Amazon S3 User Guide. For directory buckets, only server-side encryption - // with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. + // different default encryption configuration, Amazon S3 uses the corresponding + // encryption key to encrypt the target object copy. + // + // With server-side encryption, Amazon S3 encrypts your data as it writes your + // data to disks in its data centers and decrypts the data when you access it. For + // more information about server-side encryption, see [Using Server-Side Encryption]in the Amazon S3 User Guide. + // + // General purpose buckets + // + // - For general purpose buckets, there are the following supported options for + // server-side encryption: server-side encryption with Key Management Service (KMS) + // keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS + // keys (DSSE-KMS), and server-side encryption with customer-provided encryption + // keys (SSE-C). Amazon S3 uses the corresponding KMS key, or a customer-provided + // key to encrypt the target object copy. + // + // - When you perform a CopyObject operation, if you want to use a different type + // of encryption setting for the target object, you can specify appropriate + // encryption-related headers to encrypt the target object with an Amazon S3 + // managed key, a KMS key, or a customer-provided key. If the encryption setting in + // your request is different from the default encryption configuration of the + // destination bucket, the encryption setting in your request takes precedence. + // + // Directory buckets + // + // - For directory buckets, there are only two supported options for server-side + // encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) ( + // AES256 ) and server-side encryption with KMS keys (SSE-KMS) ( aws:kms ). We + // recommend that the bucket's default encryption uses the desired encryption + // configuration and you don't override the bucket default encryption in your + // CreateSession requests or PUT object requests. Then, new objects are + // automatically encrypted with the desired encryption settings. For more + // information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. For more information about the + // encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads]. + // + // - To encrypt new object copies to a directory bucket with SSE-KMS, we + // recommend you specify SSE-KMS as the directory bucket's default encryption + // configuration with a KMS key (specifically, a [customer managed key]). The [Amazon Web Services managed key]( aws/s3 ) isn't + // supported. Your SSE-KMS configuration can only support 1 [customer managed key]per directory bucket + // for the lifetime of the bucket. After you specify a customer managed key for + // SSE-KMS, you can't override the customer managed key for the bucket's SSE-KMS + // configuration. Then, when you perform a CopyObject operation and want to + // specify server-side encryption settings for new object copies with SSE-KMS in + // the encryption-related request headers, you must ensure the encryption key is + // the same customer managed key that you specified for the directory bucket's + // default encryption configuration. + // + // [Using Server-Side Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html + // [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html + // [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk + // [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html + // [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk ServerSideEncryption types.ServerSideEncryption // If the x-amz-storage-class header is not used, the copied object will be stored // in the STANDARD Storage Class by default. The STANDARD storage class provides // high durability and high availability. Depending on performance needs, you can // specify a different Storage Class. + // // - Directory buckets - For directory buckets, only the S3 Express One Zone // storage class is supported to store newly created objects. Unsupported storage // class values won't write a destination object and will respond with the HTTP // status code 400 Bad Request . + // // - Amazon S3 on Outposts - S3 on Outposts only uses the OUTPOSTS Storage Class. + // // You can use the CopyObject action to change the storage class of an object that // is already stored in Amazon S3 by using the x-amz-storage-class header. For - // more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) - // in the Amazon S3 User Guide. Before using an object as a source object for the - // copy operation, you must restore a copy of it if it meets any of the following - // conditions: + // more information, see [Storage Classes]in the Amazon S3 User Guide. + // + // Before using an object as a source object for the copy operation, you must + // restore a copy of it if it meets any of the following conditions: + // // - The storage class of the source object is GLACIER or DEEP_ARCHIVE . - // - The storage class of the source object is INTELLIGENT_TIERING and it's S3 - // Intelligent-Tiering access tier (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intelligent-tiering-overview.html#intel-tiering-tier-definition) - // is Archive Access or Deep Archive Access . - // For more information, see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) - // and Copying Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html) - // in the Amazon S3 User Guide. + // + // - The storage class of the source object is INTELLIGENT_TIERING and it's [S3 Intelligent-Tiering access tier]is + // Archive Access or Deep Archive Access . + // + // For more information, see [RestoreObject] and [Copying Objects] in the Amazon S3 User Guide. + // + // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html + // [RestoreObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html + // [Copying Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html + // [S3 Intelligent-Tiering access tier]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/intelligent-tiering-overview.html#intel-tiering-tier-definition StorageClass types.StorageClass // The tag-set for the object copy in the destination bucket. This value must be @@ -503,60 +726,82 @@ type CopyObjectInput struct { // the x-amz-tagging-directive . If you choose COPY for the x-amz-tagging-directive // , you don't need to set the x-amz-tagging header, because the tag-set will be // copied from the source object directly. The tag-set must be encoded as URL Query - // parameters. The default value is the empty value. Directory buckets - For - // directory buckets in a CopyObject operation, only the empty tag-set is - // supported. Any requests that attempt to write non-empty tags into directory - // buckets will receive a 501 Not Implemented status code. When the destination - // bucket is a directory bucket, you will receive a 501 Not Implemented response - // in any of the following situations: + // parameters. + // + // The default value is the empty value. + // + // Directory buckets - For directory buckets in a CopyObject operation, only the + // empty tag-set is supported. Any requests that attempt to write non-empty tags + // into directory buckets will receive a 501 Not Implemented status code. When the + // destination bucket is a directory bucket, you will receive a 501 Not Implemented + // response in any of the following situations: + // // - When you attempt to COPY the tag-set from an S3 source object that has // non-empty tags. + // // - When you attempt to REPLACE the tag-set of a source object and set a // non-empty value to x-amz-tagging . + // // - When you don't set the x-amz-tagging-directive header and the source object // has non-empty tags. This is because the default value of // x-amz-tagging-directive is COPY . + // // Because only the empty tag-set is supported for directory buckets in a // CopyObject operation, the following situations are allowed: + // // - When you attempt to COPY the tag-set from a directory bucket source object // that has no tags to a general purpose bucket. It copies an empty tag-set to the // destination object. + // // - When you attempt to REPLACE the tag-set of a directory bucket source object // and set the x-amz-tagging value of the directory bucket destination object to // empty. + // // - When you attempt to REPLACE the tag-set of a general purpose bucket source // object that has non-empty tags and set the x-amz-tagging value of the // directory bucket destination object to empty. + // // - When you attempt to REPLACE the tag-set of a directory bucket source object // and don't set the x-amz-tagging value of the directory bucket destination // object. This is because the default value of x-amz-tagging is the empty value. Tagging *string // Specifies whether the object tag-set is copied from the source object or - // replaced with the tag-set that's provided in the request. The default value is - // COPY . Directory buckets - For directory buckets in a CopyObject operation, - // only the empty tag-set is supported. Any requests that attempt to write - // non-empty tags into directory buckets will receive a 501 Not Implemented status - // code. When the destination bucket is a directory bucket, you will receive a 501 - // Not Implemented response in any of the following situations: + // replaced with the tag-set that's provided in the request. + // + // The default value is COPY . + // + // Directory buckets - For directory buckets in a CopyObject operation, only the + // empty tag-set is supported. Any requests that attempt to write non-empty tags + // into directory buckets will receive a 501 Not Implemented status code. When the + // destination bucket is a directory bucket, you will receive a 501 Not Implemented + // response in any of the following situations: + // // - When you attempt to COPY the tag-set from an S3 source object that has // non-empty tags. + // // - When you attempt to REPLACE the tag-set of a source object and set a // non-empty value to x-amz-tagging . + // // - When you don't set the x-amz-tagging-directive header and the source object // has non-empty tags. This is because the default value of // x-amz-tagging-directive is COPY . + // // Because only the empty tag-set is supported for directory buckets in a // CopyObject operation, the following situations are allowed: + // // - When you attempt to COPY the tag-set from a directory bucket source object // that has no tags to a general purpose bucket. It copies an empty tag-set to the // destination object. + // // - When you attempt to REPLACE the tag-set of a directory bucket source object // and set the x-amz-tagging value of the directory bucket destination object to // empty. + // // - When you attempt to REPLACE the tag-set of a general purpose bucket source // object that has non-empty tags and set the x-amz-tagging value of the // directory bucket destination object to empty. + // // - When you attempt to REPLACE the tag-set of a directory bucket source object // and don't set the x-amz-tagging value of the directory bucket destination // object. This is because the default value of x-amz-tagging is the empty value. @@ -567,71 +812,79 @@ type CopyObjectInput struct { // Amazon S3 stores the value of this header in the object metadata. This value is // unique to each object and is not copied when using the x-amz-metadata-directive // header. Instead, you may opt to provide this header in combination with the - // x-amz-metadata-directive header. This functionality is not supported for - // directory buckets. + // x-amz-metadata-directive header. + // + // This functionality is not supported for directory buckets. WebsiteRedirectLocation *string noSmithyDocumentSerde } func (in *CopyObjectInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.CopySource = in.CopySource + p.Key = in.Key p.DisableS3ExpressSessionAuth = ptr.Bool(true) } type CopyObjectOutput struct { // Indicates whether the copied object uses an S3 Bucket Key for server-side - // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality - // is not supported for directory buckets. + // encryption with Key Management Service (KMS) keys (SSE-KMS). BucketKeyEnabled *bool // Container for all response elements. CopyObjectResult *types.CopyObjectResult - // Version ID of the source object that was copied. This functionality is not - // supported when the source object is in a directory bucket. + // Version ID of the source object that was copied. + // + // This functionality is not supported when the source object is in a directory + // bucket. CopySourceVersionId *string - // If the object expiration is configured, the response includes this header. This - // functionality is not supported for directory buckets. + // If the object expiration is configured, the response includes this header. + // + // Object expiration information is not returned in directory buckets and this + // header returns the value " NotImplemented " in all responses for directory + // buckets. Expiration *string // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to confirm the encryption - // algorithm that's used. This functionality is not supported for directory - // buckets. + // algorithm that's used. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to provide the round-trip - // message integrity verification of the customer-provided encryption key. This - // functionality is not supported for directory buckets. + // message integrity verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string // If present, indicates the Amazon Web Services KMS Encryption Context to use for - // object encryption. The value of this header is a base64-encoded UTF-8 string - // holding JSON with the encryption context key-value pairs. This functionality is - // not supported for directory buckets. + // object encryption. The value of this header is a Base64 encoded UTF-8 string + // holding JSON with the encryption context key-value pairs. SSEKMSEncryptionContext *string - // If present, indicates the ID of the Key Management Service (KMS) symmetric - // encryption customer managed key that was used for the object. This functionality - // is not supported for directory buckets. + // If present, indicates the ID of the KMS key that was used for object encryption. SSEKMSKeyId *string // The server-side encryption algorithm used when you store this object in Amazon - // S3 (for example, AES256 , aws:kms , aws:kms:dsse ). For directory buckets, only - // server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is - // supported. + // S3 (for example, AES256 , aws:kms , aws:kms:dsse ). ServerSideEncryption types.ServerSideEncryption - // Version ID of the newly created copy. This functionality is not supported for - // directory buckets. + // Version ID of the newly created copy. + // + // This functionality is not supported for directory buckets. VersionId *string // Metadata pertaining to the operation's result. @@ -683,6 +936,9 @@ func (c *Client) addOperationCopyObjectMiddlewares(stack *middleware.Stack, opti if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -698,6 +954,18 @@ func (c *Client) addOperationCopyObjectMiddlewares(stack *middleware.Stack, opti if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCopyObjectValidationMiddleware(stack); err != nil { return err } @@ -734,6 +1002,18 @@ func (c *Client) addOperationCopyObjectMiddlewares(stack *middleware.Stack, opti if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go index b39244bcf..5dfadb4f7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go @@ -15,89 +15,118 @@ import ( ) // This action creates an Amazon S3 bucket. To create an Amazon S3 on Outposts -// bucket, see CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html) -// . Creates a new S3 bucket. To create a bucket, you must set up Amazon S3 and -// have a valid Amazon Web Services Access Key ID to authenticate requests. -// Anonymous requests are never allowed to create buckets. By creating the bucket, -// you become the bucket owner. There are two types of buckets: general purpose -// buckets and directory buckets. For more information about these bucket types, -// see Creating, configuring, and working with Amazon S3 buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html) -// in the Amazon S3 User Guide. +// bucket, see [CreateBucket]CreateBucket . +// +// Creates a new S3 bucket. To create a bucket, you must set up Amazon S3 and have +// a valid Amazon Web Services Access Key ID to authenticate requests. Anonymous +// requests are never allowed to create buckets. By creating the bucket, you become +// the bucket owner. +// +// There are two types of buckets: general purpose buckets and directory buckets. +// For more information about these bucket types, see [Creating, configuring, and working with Amazon S3 buckets]in the Amazon S3 User Guide. +// // - General purpose buckets - If you send your CreateBucket request to the // s3.amazonaws.com global endpoint, the request goes to the us-east-1 Region. So // the signature calculations in Signature Version 4 must use us-east-1 as the // Region, even if the location constraint in the request specifies another Region // where the bucket is to be created. If you create a bucket in a Region other than // US East (N. Virginia), your application must be able to handle 307 redirect. For -// more information, see Virtual hosting of buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html) -// in the Amazon S3 User Guide. +// more information, see [Virtual hosting of buckets]in the Amazon S3 User Guide. +// // - Directory buckets - For directory buckets, you must make requests for this // API operation to the Regional endpoint. These endpoints support path-style // requests in the format -// https://s3express-control.region_code.amazonaws.com/bucket-name . -// Virtual-hosted-style requests aren't supported. For more information, see -// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. +// https://s3express-control.region-code.amazonaws.com/bucket-name . +// Virtual-hosted-style requests aren't supported. For more information about +// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more +// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. // // Permissions +// // - General purpose bucket permissions - In addition to the s3:CreateBucket // permission, the following permissions are required in a policy when your // CreateBucket request includes specific headers: +// // - Access control lists (ACLs) - In your CreateBucket request, if you specify // an access control list (ACL) and set it to public-read , public-read-write , // authenticated-read , or if you explicitly specify any other custom ACLs, both // s3:CreateBucket and s3:PutBucketAcl permissions are required. In your // CreateBucket request, if you set the ACL to private , or if you don't specify // any ACLs, only the s3:CreateBucket permission is required. +// // - Object Lock - In your CreateBucket request, if you set // x-amz-bucket-object-lock-enabled to true, the // s3:PutBucketObjectLockConfiguration and s3:PutBucketVersioning permissions are // required. +// // - S3 Object Ownership - If your CreateBucket request includes the // x-amz-object-ownership header, then the s3:PutBucketOwnershipControls -// permission is required. To set an ACL on a bucket as part of a CreateBucket -// request, you must explicitly set S3 Object Ownership for the bucket to a -// different value than the default, BucketOwnerEnforced . Additionally, if your -// desired bucket ACL grants public access, you must first create the bucket -// (without the bucket ACL) and then explicitly disable Block Public Access on the -// bucket before using PutBucketAcl to set the ACL. If you try to create a bucket -// with a public ACL, the request will fail. For the majority of modern use cases -// in S3, we recommend that you keep all Block Public Access settings enabled and -// keep ACLs disabled. If you would like to share data with users outside of your -// account, you can use bucket policies as needed. For more information, see -// Controlling ownership of objects and disabling ACLs for your bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) -// and Blocking public access to your Amazon S3 storage (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html) -// in the Amazon S3 User Guide. -// - S3 Block Public Access - If your specific use case requires granting public -// access to your S3 resources, you can disable Block Public Access. Specifically, -// you can create a new bucket with Block Public Access enabled, then separately -// call the DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) -// API. To use this operation, you must have the s3:PutBucketPublicAccessBlock -// permission. For more information about S3 Block Public Access, see Blocking -// public access to your Amazon S3 storage (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html) -// in the Amazon S3 User Guide. -// - Directory bucket permissions - You must have the s3express:CreateBucket -// permission in an IAM identity-based policy instead of a bucket policy. -// Cross-account access to this API operation isn't supported. This operation can -// only be performed by the Amazon Web Services account that owns the resource. For -// more information about directory bucket policies and permissions, see Amazon -// Web Services Identity and Access Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) -// in the Amazon S3 User Guide. The permissions for ACLs, Object Lock, S3 Object -// Ownership, and S3 Block Public Access are not supported for directory buckets. -// For directory buckets, all Block Public Access settings are enabled at the -// bucket level and S3 Object Ownership is set to Bucket owner enforced (ACLs -// disabled). These settings can't be modified. For more information about -// permissions for creating and working with directory buckets, see Directory -// buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html) -// in the Amazon S3 User Guide. For more information about supported S3 features -// for directory buckets, see Features of S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-one-zone.html#s3-express-features) -// in the Amazon S3 User Guide. +// permission is required. +// +// To set an ACL on a bucket as part of a CreateBucket request, you must explicitly +// +// set S3 Object Ownership for the bucket to a different value than the default, +// BucketOwnerEnforced . Additionally, if your desired bucket ACL grants public +// access, you must first create the bucket (without the bucket ACL) and then +// explicitly disable Block Public Access on the bucket before using PutBucketAcl +// to set the ACL. If you try to create a bucket with a public ACL, the request +// will fail. +// +// For the majority of modern use cases in S3, we recommend that you keep all +// +// Block Public Access settings enabled and keep ACLs disabled. If you would like +// to share data with users outside of your account, you can use bucket policies as +// needed. For more information, see [Controlling ownership of objects and disabling ACLs for your bucket]and [Blocking public access to your Amazon S3 storage]in the Amazon S3 User Guide. +// +// - S3 Block Public Access - If your specific use case requires granting public +// access to your S3 resources, you can disable Block Public Access. Specifically, +// you can create a new bucket with Block Public Access enabled, then separately +// call the [DeletePublicAccessBlock]DeletePublicAccessBlock API. To use this operation, you must have the +// s3:PutBucketPublicAccessBlock permission. For more information about S3 Block +// Public Access, see [Blocking public access to your Amazon S3 storage]in the Amazon S3 User Guide. +// +// - Directory bucket permissions - You must have the s3express:CreateBucket +// permission in an IAM identity-based policy instead of a bucket policy. +// Cross-account access to this API operation isn't supported. This operation can +// only be performed by the Amazon Web Services account that owns the resource. For +// more information about directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the +// Amazon S3 User Guide. +// +// The permissions for ACLs, Object Lock, S3 Object Ownership, and S3 Block Public +// +// Access are not supported for directory buckets. For directory buckets, all Block +// Public Access settings are enabled at the bucket level and S3 Object Ownership +// is set to Bucket owner enforced (ACLs disabled). These settings can't be +// modified. +// +// For more information about permissions for creating and working with directory // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// s3express-control.region.amazonaws.com . The following operations are related to -// CreateBucket : -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// buckets, see [Directory buckets]in the Amazon S3 User Guide. For more information about +// supported S3 features for directory buckets, see [Features of S3 Express One Zone]in the Amazon S3 User Guide. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region-code.amazonaws.com . +// +// The following operations are related to CreateBucket : +// +// [PutObject] +// +// [DeleteBucket] +// +// [Creating, configuring, and working with Amazon S3 buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html +// [DeleteBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html +// [Virtual hosting of buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// +// [DeletePublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html +// [Directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html +// [Features of S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-one-zone.html#s3-express-features +// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html +// [Controlling ownership of objects and disabling ACLs for your bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html +// [Blocking public access to your Amazon S3 storage]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html func (c *Client) CreateBucket(ctx context.Context, params *CreateBucketInput, optFns ...func(*Options)) (*CreateBucketOutput, error) { if params == nil { params = &CreateBucketInput{} @@ -115,77 +144,100 @@ func (c *Client) CreateBucket(ctx context.Context, params *CreateBucketInput, op type CreateBucketInput struct { - // The name of the bucket to create. General purpose buckets - For information - // about bucket naming restrictions, see Bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html) - // in the Amazon S3 User Guide. Directory buckets - When you use this operation - // with a directory bucket, you must use path-style requests in the format - // https://s3express-control.region_code.amazonaws.com/bucket-name . + // The name of the bucket to create. + // + // General purpose buckets - For information about bucket naming restrictions, see [Bucket naming rules] + // in the Amazon S3 User Guide. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use path-style requests in the format + // https://s3express-control.region-code.amazonaws.com/bucket-name . // Virtual-hosted-style requests aren't supported. Directory bucket names must be - // unique in the chosen Availability Zone. Bucket names must also follow the format - // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 - // ). For information about bucket naming restrictions, see Directory bucket - // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide + // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must + // also follow the format bucket-base-name--zone-id--x-s3 (for example, + // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [Bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html // // This member is required. Bucket *string - // The canned ACL to apply to the bucket. This functionality is not supported for - // directory buckets. + // The canned ACL to apply to the bucket. + // + // This functionality is not supported for directory buckets. ACL types.BucketCannedACL // The configuration information for the bucket. CreateBucketConfiguration *types.CreateBucketConfiguration // Allows grantee the read, write, read ACP, and write ACP permissions on the - // bucket. This functionality is not supported for directory buckets. + // bucket. + // + // This functionality is not supported for directory buckets. GrantFullControl *string - // Allows grantee to list the objects in the bucket. This functionality is not - // supported for directory buckets. + // Allows grantee to list the objects in the bucket. + // + // This functionality is not supported for directory buckets. GrantRead *string - // Allows grantee to read the bucket ACL. This functionality is not supported for - // directory buckets. + // Allows grantee to read the bucket ACL. + // + // This functionality is not supported for directory buckets. GrantReadACP *string - // Allows grantee to create new objects in the bucket. For the bucket and object - // owners of existing objects, also allows deletions and overwrites of those - // objects. This functionality is not supported for directory buckets. + // Allows grantee to create new objects in the bucket. + // + // For the bucket and object owners of existing objects, also allows deletions and + // overwrites of those objects. + // + // This functionality is not supported for directory buckets. GrantWrite *string - // Allows grantee to write the ACL for the applicable bucket. This functionality - // is not supported for directory buckets. + // Allows grantee to write the ACL for the applicable bucket. + // + // This functionality is not supported for directory buckets. GrantWriteACP *string // Specifies whether you want S3 Object Lock to be enabled for the new bucket. + // // This functionality is not supported for directory buckets. ObjectLockEnabledForBucket *bool // The container element for object ownership for a bucket's ownership controls. + // // BucketOwnerPreferred - Objects uploaded to the bucket change ownership to the // bucket owner if the objects are uploaded with the bucket-owner-full-control - // canned ACL. ObjectWriter - The uploading account will own the object if the - // object is uploaded with the bucket-owner-full-control canned ACL. + // canned ACL. + // + // ObjectWriter - The uploading account will own the object if the object is + // uploaded with the bucket-owner-full-control canned ACL. + // // BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer // affect permissions. The bucket owner automatically owns and has full control // over every object in the bucket. The bucket only accepts PUT requests that don't // specify an ACL or specify bucket owner full control ACLs (such as the predefined // bucket-owner-full-control canned ACL or a custom ACL in XML format that grants - // the same permissions). By default, ObjectOwnership is set to BucketOwnerEnforced - // and ACLs are disabled. We recommend keeping ACLs disabled, except in uncommon - // use cases where you must control access for each object individually. For more - // information about S3 Object Ownership, see Controlling ownership of objects and - // disabling ACLs for your bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. Directory buckets use the bucket owner enforced setting for S3 Object - // Ownership. + // the same permissions). + // + // By default, ObjectOwnership is set to BucketOwnerEnforced and ACLs are + // disabled. We recommend keeping ACLs disabled, except in uncommon use cases where + // you must control access for each object individually. For more information about + // S3 Object Ownership, see [Controlling ownership of objects and disabling ACLs for your bucket]in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. Directory buckets + // use the bucket owner enforced setting for S3 Object Ownership. + // + // [Controlling ownership of objects and disabling ACLs for your bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html ObjectOwnership types.ObjectOwnership noSmithyDocumentSerde } func (in *CreateBucketInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) p.DisableAccessPoints = ptr.Bool(true) @@ -245,6 +297,9 @@ func (c *Client) addOperationCreateBucketMiddlewares(stack *middleware.Stack, op if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -260,6 +315,18 @@ func (c *Client) addOperationCreateBucketMiddlewares(stack *middleware.Stack, op if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateBucketValidationMiddleware(stack); err != nil { return err } @@ -293,6 +360,18 @@ func (c *Client) addOperationCreateBucketMiddlewares(stack *middleware.Stack, op if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucketMetadataTableConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucketMetadataTableConfiguration.go new file mode 100644 index 000000000..fbac4458f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucketMetadataTableConfiguration.go @@ -0,0 +1,293 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a metadata table configuration for a general purpose bucket. For more +// information, see [Accelerating data discovery with S3 Metadata]in the Amazon S3 User Guide. +// +// Permissions To use this operation, you must have the following permissions. For +// more information, see [Setting up permissions for configuring metadata tables]in the Amazon S3 User Guide. +// +// If you also want to integrate your table bucket with Amazon Web Services +// analytics services so that you can query your metadata table, you need +// additional permissions. For more information, see [Integrating Amazon S3 Tables with Amazon Web Services analytics services]in the Amazon S3 User Guide. +// +// - s3:CreateBucketMetadataTableConfiguration +// +// - s3tables:CreateNamespace +// +// - s3tables:GetTable +// +// - s3tables:CreateTable +// +// - s3tables:PutTablePolicy +// +// The following operations are related to CreateBucketMetadataTableConfiguration : +// +// [DeleteBucketMetadataTableConfiguration] +// +// [GetBucketMetadataTableConfiguration] +// +// [Setting up permissions for configuring metadata tables]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-permissions.html +// [GetBucketMetadataTableConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetadataTableConfiguration.html +// [DeleteBucketMetadataTableConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetadataTableConfiguration.html +// [Accelerating data discovery with S3 Metadata]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-overview.html +// [Integrating Amazon S3 Tables with Amazon Web Services analytics services]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-tables-integrating-aws.html +func (c *Client) CreateBucketMetadataTableConfiguration(ctx context.Context, params *CreateBucketMetadataTableConfigurationInput, optFns ...func(*Options)) (*CreateBucketMetadataTableConfigurationOutput, error) { + if params == nil { + params = &CreateBucketMetadataTableConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateBucketMetadataTableConfiguration", params, optFns, c.addOperationCreateBucketMetadataTableConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateBucketMetadataTableConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateBucketMetadataTableConfigurationInput struct { + + // The general purpose bucket that you want to create the metadata table + // configuration in. + // + // This member is required. + Bucket *string + + // The contents of your metadata table configuration. + // + // This member is required. + MetadataTableConfiguration *types.MetadataTableConfiguration + + // The checksum algorithm to use with your metadata table configuration. + ChecksumAlgorithm types.ChecksumAlgorithm + + // The Content-MD5 header for the metadata table configuration. + ContentMD5 *string + + // The expected owner of the general purpose bucket that contains your metadata + // table configuration. + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *CreateBucketMetadataTableConfigurationInput) bindEndpointParams(p *EndpointParameters) { + + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type CreateBucketMetadataTableConfigurationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateBucketMetadataTableConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpCreateBucketMetadataTableConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpCreateBucketMetadataTableConfiguration{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "CreateBucketMetadataTableConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpCreateBucketMetadataTableConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateBucketMetadataTableConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addCreateBucketMetadataTableConfigurationInputChecksumMiddlewares(stack, options); err != nil { + return err + } + if err = addCreateBucketMetadataTableConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func (v *CreateBucketMetadataTableConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opCreateBucketMetadataTableConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "CreateBucketMetadataTableConfiguration", + } +} + +// getCreateBucketMetadataTableConfigurationRequestAlgorithmMember gets the +// request checksum algorithm value provided as input. +func getCreateBucketMetadataTableConfigurationRequestAlgorithmMember(input interface{}) (string, bool) { + in := input.(*CreateBucketMetadataTableConfigurationInput) + if len(in.ChecksumAlgorithm) == 0 { + return "", false + } + return string(in.ChecksumAlgorithm), true +} + +func addCreateBucketMetadataTableConfigurationInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + GetAlgorithm: getCreateBucketMetadataTableConfigurationRequestAlgorithmMember, + RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, + EnableTrailingChecksum: false, + EnableComputeSHA256PayloadHash: true, + EnableDecodedContentLengthHeader: true, + }) +} + +// getCreateBucketMetadataTableConfigurationBucketMember returns a pointer to +// string denoting a provided bucket member valueand a boolean indicating if the +// input has a modeled bucket name, +func getCreateBucketMetadataTableConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*CreateBucketMetadataTableConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addCreateBucketMetadataTableConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getCreateBucketMetadataTableConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go index c083c32d8..8b272620a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go @@ -16,51 +16,54 @@ import ( // This action initiates a multipart upload and returns an upload ID. This upload // ID is used to associate all of the parts in the specific multipart upload. You -// specify this upload ID in each of your subsequent upload part requests (see -// UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// ). You also include this upload ID in the final request to either complete or -// abort the multipart upload request. For more information about multipart -// uploads, see Multipart Upload Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) -// in the Amazon S3 User Guide. After you initiate a multipart upload and upload -// one or more parts, to stop being charged for storing the uploaded parts, you -// must either complete or abort the multipart upload. Amazon S3 frees up the space -// used to store the parts and stops charging you for storing them only after you -// either complete or abort a multipart upload. If you have configured a lifecycle -// rule to abort incomplete multipart uploads, the created multipart upload must be -// completed within the number of days specified in the bucket lifecycle -// configuration. Otherwise, the incomplete multipart upload becomes eligible for -// an abort action and Amazon S3 aborts the multipart upload. For more information, -// see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) -// . +// specify this upload ID in each of your subsequent upload part requests (see [UploadPart]). +// You also include this upload ID in the final request to either complete or abort +// the multipart upload request. For more information about multipart uploads, see [Multipart Upload Overview] +// in the Amazon S3 User Guide. +// +// After you initiate a multipart upload and upload one or more parts, to stop +// being charged for storing the uploaded parts, you must either complete or abort +// the multipart upload. Amazon S3 frees up the space used to store the parts and +// stops charging you for storing them only after you either complete or abort a +// multipart upload. +// +// If you have configured a lifecycle rule to abort incomplete multipart uploads, +// the created multipart upload must be completed within the number of days +// specified in the bucket lifecycle configuration. Otherwise, the incomplete +// multipart upload becomes eligible for an abort action and Amazon S3 aborts the +// multipart upload. For more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]. +// // - Directory buckets - S3 Lifecycle is not supported by directory buckets. +// // - Directory buckets - For directory buckets, you must make requests for this // API operation to the Zonal endpoint. These endpoints support // virtual-hosted-style requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . -// Path-style requests are not supported. For more information, see Regional and -// Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. +// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name +// . Path-style requests are not supported. For more information about endpoints +// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information +// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. // // Request signing For request signing, multipart upload is just a series of // regular requests. You initiate a multipart upload, send one or more requests to // upload parts, and then complete the multipart upload process. You sign each // request individually. There is nothing special about signing multipart upload -// requests. For more information about signing, see Authenticating Requests -// (Amazon Web Services Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) -// in the Amazon S3 User Guide. Permissions -// - General purpose bucket permissions - For information about the permissions -// required to use the multipart upload API, see Multipart upload and permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// in the Amazon S3 User Guide. To perform a multipart upload with encryption by -// using an Amazon Web Services KMS key, the requester must have permission to the -// kms:Decrypt and kms:GenerateDataKey* actions on the key. These permissions are -// required because Amazon S3 must decrypt and read data from the encrypted file -// parts before it completes the multipart upload. For more information, see -// Multipart upload API and permissions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions) -// and Protecting data using server-side encryption with Amazon Web Services KMS (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) -// in the Amazon S3 User Guide. +// requests. For more information about signing, see [Authenticating Requests (Amazon Web Services Signature Version 4)]in the Amazon S3 User Guide. +// +// Permissions +// +// - General purpose bucket permissions - To perform a multipart upload with +// encryption using an Key Management Service (KMS) KMS key, the requester must +// have permission to the kms:Decrypt and kms:GenerateDataKey actions on the key. +// The requester must also have permissions for the kms:GenerateDataKey action +// for the CreateMultipartUpload API. Then, the requester needs permissions for +// the kms:Decrypt action on the UploadPart and UploadPartCopy APIs. These +// permissions are required because Amazon S3 must decrypt and read data from the +// encrypted file parts before it completes the multipart upload. For more +// information, see [Multipart upload API and permissions]and [Protecting data using server-side encryption with Amazon Web Services KMS]in the Amazon S3 User Guide. +// // - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the // s3express:CreateSession permission to the directory bucket in a bucket policy // or an IAM identity-based policy. Then, you make the CreateSession API call on // the bucket to obtain a session token. With the session token in your request @@ -68,10 +71,10 @@ import ( // expires, you make another CreateSession API call to generate a new session // token for use. Amazon Web Services CLI or SDKs create session and refresh the // session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// expires. For more information about authorization, see [CreateSession]CreateSession . // // Encryption +// // - General purpose buckets - Server-side encryption is for data encryption at // rest. Amazon S3 encrypts your data as it writes it to disks in its data centers // and decrypts it when you access it. Amazon S3 automatically encrypts all new @@ -91,61 +94,135 @@ import ( // in your request is different from the default encryption configuration of the // destination bucket, the encryption setting in your request takes precedence. If // you choose to provide your own encryption key, the request headers you provide -// in UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// and UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) -// requests must match the headers you used in the CreateMultipartUpload request. +// in [UploadPart]and [UploadPartCopy]requests must match the headers you used in the CreateMultipartUpload +// request. +// // - Use KMS keys (SSE-KMS) that include the Amazon Web Services managed key ( // aws/s3 ) and KMS customer managed keys stored in Key Management Service (KMS) // – If you want Amazon Web Services to manage the keys used to encrypt data, // specify the following headers in the request. +// // - x-amz-server-side-encryption +// // - x-amz-server-side-encryption-aws-kms-key-id +// // - x-amz-server-side-encryption-context +// // - If you specify x-amz-server-side-encryption:aws:kms , but don't provide // x-amz-server-side-encryption-aws-kms-key-id , Amazon S3 uses the Amazon Web // Services managed key ( aws/s3 key) in KMS to protect the data. +// // - To perform a multipart upload with encryption by using an Amazon Web // Services KMS key, the requester must have permission to the kms:Decrypt and // kms:GenerateDataKey* actions on the key. These permissions are required // because Amazon S3 must decrypt and read data from the encrypted file parts -// before it completes the multipart upload. For more information, see Multipart -// upload API and permissions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions) -// and Protecting data using server-side encryption with Amazon Web Services KMS (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) -// in the Amazon S3 User Guide. +// before it completes the multipart upload. For more information, see [Multipart upload API and permissions]and [Protecting data using server-side encryption with Amazon Web Services KMS]in +// the Amazon S3 User Guide. +// // - If your Identity and Access Management (IAM) user or role is in the same // Amazon Web Services account as the KMS key, then you must have these permissions // on the key policy. If your IAM user or role is in a different account from the // key, then you must have the permissions on both the key policy and your IAM user // or role. +// // - All GET and PUT requests for an object protected by KMS fail if you don't // make them by using Secure Sockets Layer (SSL), Transport Layer Security (TLS), // or Signature Version 4. For information about configuring any of the officially -// supported Amazon Web Services SDKs and Amazon Web Services CLI, see -// Specifying the Signature Version in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) -// in the Amazon S3 User Guide. For more information about server-side -// encryption with KMS keys (SSE-KMS), see Protecting Data Using Server-Side -// Encryption with KMS keys (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) -// in the Amazon S3 User Guide. -// - Use customer-provided encryption keys (SSE-C) – If you want to manage your -// own encryption keys, provide all the following headers in the request. -// - x-amz-server-side-encryption-customer-algorithm -// - x-amz-server-side-encryption-customer-key -// - x-amz-server-side-encryption-customer-key-MD5 For more information about -// server-side encryption with customer-provided encryption keys (SSE-C), see -// Protecting data using server-side encryption with customer-provided encryption -// keys (SSE-C) (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html) -// in the Amazon S3 User Guide. -// - Directory buckets -For directory buckets, only server-side encryption with -// Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are -// related to CreateMultipartUpload : -// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) -// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) -// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// supported Amazon Web Services SDKs and Amazon Web Services CLI, see [Specifying the Signature Version in Request Authentication]in the +// Amazon S3 User Guide. +// +// For more information about server-side encryption with KMS keys (SSE-KMS), see [Protecting Data Using Server-Side Encryption with KMS keys] +// +// in the Amazon S3 User Guide. +// +// - Use customer-provided encryption keys (SSE-C) – If you want to manage your +// own encryption keys, provide all the following headers in the request. +// +// - x-amz-server-side-encryption-customer-algorithm +// +// - x-amz-server-side-encryption-customer-key +// +// - x-amz-server-side-encryption-customer-key-MD5 +// +// For more information about server-side encryption with customer-provided +// +// encryption keys (SSE-C), see [Protecting data using server-side encryption with customer-provided encryption keys (SSE-C)]in the Amazon S3 User Guide. +// +// - Directory buckets - For directory buckets, there are only two supported +// options for server-side encryption: server-side encryption with Amazon S3 +// managed keys (SSE-S3) ( AES256 ) and server-side encryption with KMS keys +// (SSE-KMS) ( aws:kms ). We recommend that the bucket's default encryption uses +// the desired encryption configuration and you don't override the bucket default +// encryption in your CreateSession requests or PUT object requests. Then, new +// objects are automatically encrypted with the desired encryption settings. For +// more information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. For more information about +// the encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads]. +// +// In the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]) using the REST API, the +// +// encryption request headers must match the encryption settings that are specified +// in the CreateSession request. You can't override the values of the encryption +// settings ( x-amz-server-side-encryption , +// x-amz-server-side-encryption-aws-kms-key-id , +// x-amz-server-side-encryption-context , and +// x-amz-server-side-encryption-bucket-key-enabled ) that are specified in the +// CreateSession request. You don't need to explicitly specify these encryption +// settings values in Zonal endpoint API calls, and Amazon S3 will use the +// encryption settings values from the CreateSession request to protect new +// objects in the directory bucket. +// +// When you use the CLI or the Amazon Web Services SDKs, for CreateSession , the +// +// session token refreshes automatically to avoid service interruptions when a +// session expires. The CLI or the Amazon Web Services SDKs use the bucket's +// default encryption configuration for the CreateSession request. It's not +// supported to override the encryption settings values in the CreateSession +// request. So in the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]), the encryption +// request headers must match the default encryption configuration of the directory +// bucket. +// +// For directory buckets, when you perform a CreateMultipartUpload operation and an +// +// UploadPartCopy operation, the request headers you provide in the +// CreateMultipartUpload request must match the default encryption configuration +// of the destination bucket. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// +// The following operations are related to CreateMultipartUpload : +// +// [UploadPart] +// +// [CompleteMultipartUpload] +// +// [AbortMultipartUpload] +// +// [ListParts] +// +// [ListMultipartUploads] +// +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html +// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html +// [Protecting Data Using Server-Side Encryption with KMS keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html +// [Specifying the Signature Version in Request Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version +// [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config +// [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html +// [Multipart upload API and permissions]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions +// [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html +// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html +// [Authenticating Requests (Amazon Web Services Signature Version 4)]: https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html +// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html +// [Multipart Upload Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html +// [Protecting data using server-side encryption with Amazon Web Services KMS]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html +// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// +// [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html +// [Protecting data using server-side encryption with customer-provided encryption keys (SSE-C)]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html +// [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html func (c *Client) CreateMultipartUpload(ctx context.Context, params *CreateMultipartUploadInput, optFns ...func(*Options)) (*CreateMultipartUploadOutput, error) { if params == nil { params = &CreateMultipartUploadInput{} @@ -164,30 +241,39 @@ func (c *Client) CreateMultipartUpload(ctx context.Context, params *CreateMultip type CreateMultipartUploadInput struct { // The name of the bucket where the multipart upload is initiated and where the - // object is uploaded. Directory buckets - When you use this operation with a - // directory bucket, you must use virtual-hosted-style requests in the format - // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not - // supported. Directory bucket names must be unique in the chosen Availability - // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket - // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // object is uploaded. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -199,41 +285,67 @@ type CreateMultipartUploadInput struct { // The canned ACL to apply to the object. Amazon S3 supports a set of predefined // ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and - // permissions. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) - // in the Amazon S3 User Guide. By default, all objects are private. Only the owner - // has full access control. When uploading an object, you can grant access - // permissions to individual Amazon Web Services accounts or to predefined groups - // defined by Amazon S3. These permissions are then added to the access control - // list (ACL) on the new object. For more information, see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) - // . One way to grant the permissions using the request headers is to specify a - // canned ACL with the x-amz-acl request header. + // permissions. For more information, see [Canned ACL]in the Amazon S3 User Guide. + // + // By default, all objects are private. Only the owner has full access control. + // When uploading an object, you can grant access permissions to individual Amazon + // Web Services accounts or to predefined groups defined by Amazon S3. These + // permissions are then added to the access control list (ACL) on the new object. + // For more information, see [Using ACLs]. One way to grant the permissions using the request + // headers is to specify a canned ACL with the x-amz-acl request header. + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. + // + // [Canned ACL]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL + // [Using ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html ACL types.ObjectCannedACL // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption // with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). - // Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object - // encryption with SSE-KMS. Specifying this header with an object action doesn’t - // affect bucket-level settings for S3 Bucket Key. This functionality is not - // supported for directory buckets. + // + // General purpose buckets - Setting this header to true causes Amazon S3 to use + // an S3 Bucket Key for object encryption with SSE-KMS. Also, specifying this + // header with a PUT action doesn't affect bucket-level settings for S3 Bucket Key. + // + // Directory buckets - S3 Bucket Keys are always enabled for GET and PUT + // operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't + // supported, when you copy SSE-KMS encrypted objects from general purpose buckets + // to directory buckets, from directory buckets to general purpose buckets, or + // between directory buckets, through [CopyObject], [UploadPartCopy], [the Copy operation in Batch Operations], or [the import jobs]. In this case, Amazon S3 makes a + // call to KMS every time a copy request is made for a KMS-encrypted object. + // + // [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html + // [the import jobs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-import-job + // [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html + // [the Copy operation in Batch Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-Batch-Ops BucketKeyEnabled *bool // Specifies caching behavior along the request/reply chain. CacheControl *string // Indicates the algorithm that you want Amazon S3 to use to create the checksum - // for the object. For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. + // for the object. For more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm + // Indicates the checksum type that you want Amazon S3 to use to calculate the + // object’s checksum value. For more information, see [Checking object integrity in the Amazon S3 User Guide]. + // + // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumType types.ChecksumType + // Specifies presentational information for the object. ContentDisposition *string // Specifies what content encodings have been applied to the object and thus what // decoding mechanisms must be applied to obtain the media-type referenced by the - // Content-Type header field. For directory buckets, only the aws-chunked value is - // supported in this header field. + // Content-Type header field. + // + // For directory buckets, only the aws-chunked value is supported in this header + // field. ContentEncoding *string // The language that the content is in. @@ -251,213 +363,385 @@ type CreateMultipartUploadInput struct { Expires *time.Time // Specify access permissions explicitly to give the grantee READ, READ_ACP, and - // WRITE_ACP permissions on the object. By default, all objects are private. Only - // the owner has full access control. When uploading an object, you can use this - // header to explicitly grant access permissions to specific Amazon Web Services - // accounts or groups. This header maps to specific permissions that Amazon S3 - // supports in an ACL. For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) - // in the Amazon S3 User Guide. You specify each grantee as a type=value pair, - // where the type is one of the following: + // WRITE_ACP permissions on the object. + // + // By default, all objects are private. Only the owner has full access control. + // When uploading an object, you can use this header to explicitly grant access + // permissions to specific Amazon Web Services accounts or groups. This header maps + // to specific permissions that Amazon S3 supports in an ACL. For more information, + // see [Access Control List (ACL) Overview]in the Amazon S3 User Guide. + // + // You specify each grantee as a type=value pair, where the type is one of the + // following: + // // - id – if the value specified is the canonical user ID of an Amazon Web // Services account + // // - uri – if you are granting permissions to a predefined group + // // - emailAddress – if the value specified is the email address of an Amazon Web - // Services account Using email addresses to specify a grantee is only supported in - // the following Amazon Web Services Regions: + // Services account + // + // Using email addresses to specify a grantee is only supported in the following + // Amazon Web Services Regions: + // // - US East (N. Virginia) + // // - US West (N. California) + // // - US West (Oregon) + // // - Asia Pacific (Singapore) + // // - Asia Pacific (Sydney) + // // - Asia Pacific (Tokyo) + // // - Europe (Ireland) - // - South America (São Paulo) For a list of all the Amazon S3 supported Regions - // and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) - // in the Amazon Web Services General Reference. + // + // - South America (São Paulo) + // + // For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the + // Amazon Web Services General Reference. + // // For example, the following x-amz-grant-read header grants the Amazon Web // Services accounts identified by account IDs permissions to read object data and - // its metadata: x-amz-grant-read: id="11112222333", id="444455556666" + // its metadata: + // + // x-amz-grant-read: id="11112222333", id="444455556666" + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. + // + // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region + // [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html GrantFullControl *string // Specify access permissions explicitly to allow grantee to read the object data - // and its metadata. By default, all objects are private. Only the owner has full - // access control. When uploading an object, you can use this header to explicitly - // grant access permissions to specific Amazon Web Services accounts or groups. - // This header maps to specific permissions that Amazon S3 supports in an ACL. For - // more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) - // in the Amazon S3 User Guide. You specify each grantee as a type=value pair, - // where the type is one of the following: + // and its metadata. + // + // By default, all objects are private. Only the owner has full access control. + // When uploading an object, you can use this header to explicitly grant access + // permissions to specific Amazon Web Services accounts or groups. This header maps + // to specific permissions that Amazon S3 supports in an ACL. For more information, + // see [Access Control List (ACL) Overview]in the Amazon S3 User Guide. + // + // You specify each grantee as a type=value pair, where the type is one of the + // following: + // // - id – if the value specified is the canonical user ID of an Amazon Web // Services account + // // - uri – if you are granting permissions to a predefined group + // // - emailAddress – if the value specified is the email address of an Amazon Web - // Services account Using email addresses to specify a grantee is only supported in - // the following Amazon Web Services Regions: + // Services account + // + // Using email addresses to specify a grantee is only supported in the following + // Amazon Web Services Regions: + // // - US East (N. Virginia) + // // - US West (N. California) + // // - US West (Oregon) + // // - Asia Pacific (Singapore) + // // - Asia Pacific (Sydney) + // // - Asia Pacific (Tokyo) + // // - Europe (Ireland) - // - South America (São Paulo) For a list of all the Amazon S3 supported Regions - // and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) - // in the Amazon Web Services General Reference. + // + // - South America (São Paulo) + // + // For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the + // Amazon Web Services General Reference. + // // For example, the following x-amz-grant-read header grants the Amazon Web // Services accounts identified by account IDs permissions to read object data and - // its metadata: x-amz-grant-read: id="11112222333", id="444455556666" + // its metadata: + // + // x-amz-grant-read: id="11112222333", id="444455556666" + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. + // + // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region + // [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html GrantRead *string // Specify access permissions explicitly to allows grantee to read the object ACL. + // // By default, all objects are private. Only the owner has full access control. // When uploading an object, you can use this header to explicitly grant access // permissions to specific Amazon Web Services accounts or groups. This header maps // to specific permissions that Amazon S3 supports in an ACL. For more information, - // see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) - // in the Amazon S3 User Guide. You specify each grantee as a type=value pair, - // where the type is one of the following: + // see [Access Control List (ACL) Overview]in the Amazon S3 User Guide. + // + // You specify each grantee as a type=value pair, where the type is one of the + // following: + // // - id – if the value specified is the canonical user ID of an Amazon Web // Services account + // // - uri – if you are granting permissions to a predefined group + // // - emailAddress – if the value specified is the email address of an Amazon Web - // Services account Using email addresses to specify a grantee is only supported in - // the following Amazon Web Services Regions: + // Services account + // + // Using email addresses to specify a grantee is only supported in the following + // Amazon Web Services Regions: + // // - US East (N. Virginia) + // // - US West (N. California) + // // - US West (Oregon) + // // - Asia Pacific (Singapore) + // // - Asia Pacific (Sydney) + // // - Asia Pacific (Tokyo) + // // - Europe (Ireland) - // - South America (São Paulo) For a list of all the Amazon S3 supported Regions - // and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) - // in the Amazon Web Services General Reference. + // + // - South America (São Paulo) + // + // For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the + // Amazon Web Services General Reference. + // // For example, the following x-amz-grant-read header grants the Amazon Web // Services accounts identified by account IDs permissions to read object data and - // its metadata: x-amz-grant-read: id="11112222333", id="444455556666" + // its metadata: + // + // x-amz-grant-read: id="11112222333", id="444455556666" + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. + // + // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region + // [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html GrantReadACP *string // Specify access permissions explicitly to allows grantee to allow grantee to - // write the ACL for the applicable object. By default, all objects are private. - // Only the owner has full access control. When uploading an object, you can use - // this header to explicitly grant access permissions to specific Amazon Web - // Services accounts or groups. This header maps to specific permissions that - // Amazon S3 supports in an ACL. For more information, see Access Control List - // (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) - // in the Amazon S3 User Guide. You specify each grantee as a type=value pair, - // where the type is one of the following: + // write the ACL for the applicable object. + // + // By default, all objects are private. Only the owner has full access control. + // When uploading an object, you can use this header to explicitly grant access + // permissions to specific Amazon Web Services accounts or groups. This header maps + // to specific permissions that Amazon S3 supports in an ACL. For more information, + // see [Access Control List (ACL) Overview]in the Amazon S3 User Guide. + // + // You specify each grantee as a type=value pair, where the type is one of the + // following: + // // - id – if the value specified is the canonical user ID of an Amazon Web // Services account + // // - uri – if you are granting permissions to a predefined group + // // - emailAddress – if the value specified is the email address of an Amazon Web - // Services account Using email addresses to specify a grantee is only supported in - // the following Amazon Web Services Regions: + // Services account + // + // Using email addresses to specify a grantee is only supported in the following + // Amazon Web Services Regions: + // // - US East (N. Virginia) + // // - US West (N. California) + // // - US West (Oregon) + // // - Asia Pacific (Singapore) + // // - Asia Pacific (Sydney) + // // - Asia Pacific (Tokyo) + // // - Europe (Ireland) - // - South America (São Paulo) For a list of all the Amazon S3 supported Regions - // and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) - // in the Amazon Web Services General Reference. + // + // - South America (São Paulo) + // + // For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the + // Amazon Web Services General Reference. + // // For example, the following x-amz-grant-read header grants the Amazon Web // Services accounts identified by account IDs permissions to read object data and - // its metadata: x-amz-grant-read: id="11112222333", id="444455556666" + // its metadata: + // + // x-amz-grant-read: id="11112222333", id="444455556666" + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. + // + // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region + // [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html GrantWriteACP *string // A map of metadata to store with the object in S3. Metadata map[string]string - // Specifies whether you want to apply a legal hold to the uploaded object. This - // functionality is not supported for directory buckets. + // Specifies whether you want to apply a legal hold to the uploaded object. + // + // This functionality is not supported for directory buckets. ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus // Specifies the Object Lock mode that you want to apply to the uploaded object. + // // This functionality is not supported for directory buckets. ObjectLockMode types.ObjectLockMode - // Specifies the date and time when you want the Object Lock to expire. This - // functionality is not supported for directory buckets. + // Specifies the date and time when you want the Object Lock to expire. + // + // This functionality is not supported for directory buckets. ObjectLockRetainUntilDate *time.Time // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer - // Specifies the algorithm to use when encrypting the object (for example, - // AES256). This functionality is not supported for directory buckets. + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use in // encrypting data. This value is used to store the object and then it is // discarded; Amazon S3 does not store the encryption key. The key must be // appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. This functionality is - // not supported for directory buckets. + // x-amz-server-side-encryption-customer-algorithm header. + // + // This functionality is not supported for directory buckets. SSECustomerKey *string // Specifies the 128-bit MD5 digest of the customer-provided encryption key // according to RFC 1321. Amazon S3 uses this header for a message integrity check - // to ensure that the encryption key was transmitted without error. This - // functionality is not supported for directory buckets. + // to ensure that the encryption key was transmitted without error. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string // Specifies the Amazon Web Services KMS Encryption Context to use for object - // encryption. The value of this header is a base64-encoded UTF-8 string holding - // JSON with the encryption context key-value pairs. This functionality is not - // supported for directory buckets. + // encryption. The value of this header is a Base64 encoded string of a UTF-8 + // encoded JSON, which contains the encryption context as key-value pairs. + // + // Directory buckets - You can optionally provide an explicit encryption context + // value. The value must match the default encryption context - the bucket Amazon + // Resource Name (ARN). An additional encryption context value is not supported. SSEKMSEncryptionContext *string - // Specifies the ID (Key ID, Key ARN, or Key Alias) of the symmetric encryption - // customer managed key to use for object encryption. This functionality is not - // supported for directory buckets. + // Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object + // encryption. If the KMS key doesn't exist in the same account that's issuing the + // command, you must use the full Key ARN not the Key ID. + // + // General purpose buckets - If you specify x-amz-server-side-encryption with + // aws:kms or aws:kms:dsse , this header specifies the ID (Key ID, Key ARN, or Key + // Alias) of the KMS key to use. If you specify + // x-amz-server-side-encryption:aws:kms or + // x-amz-server-side-encryption:aws:kms:dsse , but do not provide + // x-amz-server-side-encryption-aws-kms-key-id , Amazon S3 uses the Amazon Web + // Services managed key ( aws/s3 ) to protect the data. + // + // Directory buckets - To encrypt data using SSE-KMS, it's recommended to specify + // the x-amz-server-side-encryption header to aws:kms . Then, the + // x-amz-server-side-encryption-aws-kms-key-id header implicitly uses the bucket's + // default KMS customer managed key ID. If you want to explicitly set the + // x-amz-server-side-encryption-aws-kms-key-id header, it must match the bucket's + // default customer managed key (using key ID or ARN, not alias). Your SSE-KMS + // configuration can only support 1 [customer managed key]per directory bucket's lifetime. The [Amazon Web Services managed key] ( aws/s3 + // ) isn't supported. + // + // Incorrect key specification results in an HTTP 400 Bad Request error. + // + // [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk + // [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk SSEKMSKeyId *string // The server-side encryption algorithm used when you store this object in Amazon - // S3 (for example, AES256 , aws:kms ). For directory buckets, only server-side - // encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. + // S3 (for example, AES256 , aws:kms ). + // + // - Directory buckets - For directory buckets, there are only two supported + // options for server-side encryption: server-side encryption with Amazon S3 + // managed keys (SSE-S3) ( AES256 ) and server-side encryption with KMS keys + // (SSE-KMS) ( aws:kms ). We recommend that the bucket's default encryption uses + // the desired encryption configuration and you don't override the bucket default + // encryption in your CreateSession requests or PUT object requests. Then, new + // objects are automatically encrypted with the desired encryption settings. For + // more information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. For more information about + // the encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads]. + // + // In the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]) using the REST API, the + // encryption request headers must match the encryption settings that are specified + // in the CreateSession request. You can't override the values of the encryption + // settings ( x-amz-server-side-encryption , + // x-amz-server-side-encryption-aws-kms-key-id , + // x-amz-server-side-encryption-context , and + // x-amz-server-side-encryption-bucket-key-enabled ) that are specified in the + // CreateSession request. You don't need to explicitly specify these encryption + // settings values in Zonal endpoint API calls, and Amazon S3 will use the + // encryption settings values from the CreateSession request to protect new + // objects in the directory bucket. + // + // When you use the CLI or the Amazon Web Services SDKs, for CreateSession , the + // session token refreshes automatically to avoid service interruptions when a + // session expires. The CLI or the Amazon Web Services SDKs use the bucket's + // default encryption configuration for the CreateSession request. It's not + // supported to override the encryption settings values in the CreateSession + // request. So in the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]), the encryption + // request headers must match the default encryption configuration of the directory + // bucket. + // + // [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html + // [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html + // [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html + // [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html ServerSideEncryption types.ServerSideEncryption // By default, Amazon S3 uses the STANDARD Storage Class to store newly created // objects. The STANDARD storage class provides high durability and high // availability. Depending on performance needs, you can specify a different - // Storage Class. For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) - // in the Amazon S3 User Guide. + // Storage Class. For more information, see [Storage Classes]in the Amazon S3 User Guide. + // // - For directory buckets, only the S3 Express One Zone storage class is // supported to store newly created objects. + // // - Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. + // + // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html StorageClass types.StorageClass - // The tag-set for the object. The tag-set must be encoded as URL Query - // parameters. This functionality is not supported for directory buckets. + // The tag-set for the object. The tag-set must be encoded as URL Query parameters. + // + // This functionality is not supported for directory buckets. Tagging *string // If the bucket is configured as a website, redirects requests for this object to // another object in the same bucket or to an external URL. Amazon S3 stores the - // value of this header in the object metadata. This functionality is not supported - // for directory buckets. + // value of this header in the object metadata. + // + // This functionality is not supported for directory buckets. WebsiteRedirectLocation *string noSmithyDocumentSerde } func (in *CreateMultipartUploadInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Key = in.Key @@ -469,65 +753,75 @@ type CreateMultipartUploadOutput struct { // incomplete multipart uploads and the prefix in the lifecycle rule matches the // object name in the request, the response includes this header. The header // indicates when the initiated multipart upload becomes eligible for an abort - // operation. For more information, see Aborting Incomplete Multipart Uploads - // Using a Bucket Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) - // in the Amazon S3 User Guide. The response also includes the x-amz-abort-rule-id - // header that provides the ID of the lifecycle configuration rule that defines the - // abort action. This functionality is not supported for directory buckets. + // operation. For more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]in the Amazon S3 User Guide. + // + // The response also includes the x-amz-abort-rule-id header that provides the ID + // of the lifecycle configuration rule that defines the abort action. + // + // This functionality is not supported for directory buckets. + // + // [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config AbortDate *time.Time // This header is returned along with the x-amz-abort-date header. It identifies // the applicable lifecycle configuration rule that defines the action to abort - // incomplete multipart uploads. This functionality is not supported for directory - // buckets. + // incomplete multipart uploads. + // + // This functionality is not supported for directory buckets. AbortRuleId *string // The name of the bucket to which the multipart upload was initiated. Does not - // return the access point ARN or access point alias if used. Access points are not - // supported by directory buckets. + // return the access point ARN or access point alias if used. + // + // Access points are not supported by directory buckets. Bucket *string // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality - // is not supported for directory buckets. + // encryption with Key Management Service (KMS) keys (SSE-KMS). BucketKeyEnabled *bool // The algorithm that was used to create a checksum of the object. ChecksumAlgorithm types.ChecksumAlgorithm + // Indicates the checksum type that you want Amazon S3 to use to calculate the + // object’s checksum value. For more information, see [Checking object integrity in the Amazon S3 User Guide]. + // + // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumType types.ChecksumType + // Object key for which the multipart upload was initiated. Key *string // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to confirm the encryption - // algorithm that's used. This functionality is not supported for directory - // buckets. + // algorithm that's used. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to provide the round-trip - // message integrity verification of the customer-provided encryption key. This - // functionality is not supported for directory buckets. + // message integrity verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string // If present, indicates the Amazon Web Services KMS Encryption Context to use for - // object encryption. The value of this header is a base64-encoded UTF-8 string - // holding JSON with the encryption context key-value pairs. This functionality is - // not supported for directory buckets. + // object encryption. The value of this header is a Base64 encoded string of a + // UTF-8 encoded JSON, which contains the encryption context as key-value pairs. SSEKMSEncryptionContext *string - // If present, indicates the ID of the Key Management Service (KMS) symmetric - // encryption customer managed key that was used for the object. This functionality - // is not supported for directory buckets. + // If present, indicates the ID of the KMS key that was used for object encryption. SSEKMSKeyId *string // The server-side encryption algorithm used when you store this object in Amazon - // S3 (for example, AES256 , aws:kms ). For directory buckets, only server-side - // encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. + // S3 (for example, AES256 , aws:kms ). ServerSideEncryption types.ServerSideEncryption // ID for the initiated multipart upload. @@ -582,6 +876,9 @@ func (c *Client) addOperationCreateMultipartUploadMiddlewares(stack *middleware. if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -597,6 +894,18 @@ func (c *Client) addOperationCreateMultipartUploadMiddlewares(stack *middleware. if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateMultipartUploadValidationMiddleware(stack); err != nil { return err } @@ -633,6 +942,18 @@ func (c *Client) addOperationCreateMultipartUploadMiddlewares(stack *middleware. if err = addSetCreateMPUChecksumAlgorithm(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateSession.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateSession.go index e2d5a007d..21db13194 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateSession.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateSession.go @@ -15,64 +15,130 @@ import ( ) // Creates a session that establishes temporary security credentials to support -// fast authentication and authorization for the Zonal endpoint APIs on directory -// buckets. For more information about Zonal endpoint APIs that include the -// Availability Zone in the request endpoint, see S3 Express One Zone APIs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-APIs.html) -// in the Amazon S3 User Guide. To make Zonal endpoint API requests on a directory -// bucket, use the CreateSession API operation. Specifically, you grant -// s3express:CreateSession permission to a bucket in a bucket policy or an IAM -// identity-based policy. Then, you use IAM credentials to make the CreateSession -// API request on the bucket, which returns temporary security credentials that -// include the access key ID, secret access key, session token, and expiration. -// These credentials have associated permissions to access the Zonal endpoint APIs. -// After the session is created, you don’t need to use other policies to grant -// permissions to each Zonal endpoint API individually. Instead, in your Zonal -// endpoint API requests, you sign your requests by applying the temporary security -// credentials of the session to the request headers and following the SigV4 -// protocol for authentication. You also apply the session token to the -// x-amz-s3session-token request header for authorization. Temporary security -// credentials are scoped to the bucket and expire after 5 minutes. After the -// expiration time, any calls that you make with those credentials will fail. You -// must use IAM credentials again to make a CreateSession API request that -// generates a new set of temporary credentials for use. Temporary credentials -// cannot be extended or refreshed beyond the original specified interval. If you -// use Amazon Web Services SDKs, SDKs handle the session token refreshes +// fast authentication and authorization for the Zonal endpoint API operations on +// directory buckets. For more information about Zonal endpoint API operations that +// include the Availability Zone in the request endpoint, see [S3 Express One Zone APIs]in the Amazon S3 +// User Guide. +// +// To make Zonal endpoint API requests on a directory bucket, use the CreateSession +// API operation. Specifically, you grant s3express:CreateSession permission to a +// bucket in a bucket policy or an IAM identity-based policy. Then, you use IAM +// credentials to make the CreateSession API request on the bucket, which returns +// temporary security credentials that include the access key ID, secret access +// key, session token, and expiration. These credentials have associated +// permissions to access the Zonal endpoint API operations. After the session is +// created, you don’t need to use other policies to grant permissions to each Zonal +// endpoint API individually. Instead, in your Zonal endpoint API requests, you +// sign your requests by applying the temporary security credentials of the session +// to the request headers and following the SigV4 protocol for authentication. You +// also apply the session token to the x-amz-s3session-token request header for +// authorization. Temporary security credentials are scoped to the bucket and +// expire after 5 minutes. After the expiration time, any calls that you make with +// those credentials will fail. You must use IAM credentials again to make a +// CreateSession API request that generates a new set of temporary credentials for +// use. Temporary credentials cannot be extended or refreshed beyond the original +// specified interval. +// +// If you use Amazon Web Services SDKs, SDKs handle the session token refreshes // automatically to avoid service interruptions when a session expires. We // recommend that you use the Amazon Web Services SDKs to initiate and manage -// requests to the CreateSession API. For more information, see Performance -// guidelines and design patterns (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-optimizing-performance-guidelines-design-patterns.html#s3-express-optimizing-performance-session-authentication) -// in the Amazon S3 User Guide. +// requests to the CreateSession API. For more information, see [Performance guidelines and design patterns]in the Amazon S3 +// User Guide. +// // - You must make requests for this API operation to the Zonal endpoint. These // endpoints support virtual-hosted-style requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests -// are not supported. For more information, see Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. -// - CopyObject API operation - Unlike other Zonal endpoint APIs, the CopyObject -// API operation doesn't use the temporary security credentials returned from the -// CreateSession API operation for authentication and authorization. For -// information about authentication and authorization of the CopyObject API -// operation on directory buckets, see CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) -// . -// - HeadBucket API operation - Unlike other Zonal endpoint APIs, the HeadBucket -// API operation doesn't use the temporary security credentials returned from the -// CreateSession API operation for authentication and authorization. For -// information about authentication and authorization of the HeadBucket API -// operation on directory buckets, see HeadBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html) -// . +// https://bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style +// requests are not supported. For more information about endpoints in Availability +// Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information about endpoints +// in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// - CopyObject API operation - Unlike other Zonal endpoint API operations, the +// CopyObject API operation doesn't use the temporary security credentials +// returned from the CreateSession API operation for authentication and +// authorization. For information about authentication and authorization of the +// CopyObject API operation on directory buckets, see [CopyObject]. +// +// - HeadBucket API operation - Unlike other Zonal endpoint API operations, the +// HeadBucket API operation doesn't use the temporary security credentials +// returned from the CreateSession API operation for authentication and +// authorization. For information about authentication and authorization of the +// HeadBucket API operation on directory buckets, see [HeadBucket]. // // Permissions To obtain temporary security credentials, you must create a bucket // policy or an IAM identity-based policy that grants s3express:CreateSession // permission to the bucket. In a policy, you can have the s3express:SessionMode // condition key to control who can create a ReadWrite or ReadOnly session. For -// more information about ReadWrite or ReadOnly sessions, see -// x-amz-create-session-mode (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html#API_CreateSession_RequestParameters) -// . For example policies, see Example bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) -// and Amazon Web Services Identity and Access Management (IAM) identity-based -// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) -// in the Amazon S3 User Guide. To grant cross-account access to Zonal endpoint -// APIs, the bucket policy should also grant both accounts the -// s3express:CreateSession permission. HTTP Host header syntax Directory buckets - -// The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com . +// more information about ReadWrite or ReadOnly sessions, see [x-amz-create-session-mode] +// x-amz-create-session-mode . For example policies, see [Example bucket policies for S3 Express One Zone] and [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone] in the Amazon S3 +// User Guide. +// +// To grant cross-account access to Zonal endpoint API operations, the bucket +// policy should also grant both accounts the s3express:CreateSession permission. +// +// If you want to encrypt objects with SSE-KMS, you must also have the +// kms:GenerateDataKey and the kms:Decrypt permissions in IAM identity-based +// policies and KMS key policies for the target KMS key. +// +// Encryption For directory buckets, there are only two supported options for +// server-side encryption: server-side encryption with Amazon S3 managed keys +// (SSE-S3) ( AES256 ) and server-side encryption with KMS keys (SSE-KMS) ( aws:kms +// ). We recommend that the bucket's default encryption uses the desired encryption +// configuration and you don't override the bucket default encryption in your +// CreateSession requests or PUT object requests. Then, new objects are +// automatically encrypted with the desired encryption settings. For more +// information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. For more information about the +// encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads]. +// +// For [Zonal endpoint (object-level) API operations] except [CopyObject] and [UploadPartCopy], you authenticate and authorize requests through [CreateSession] for low +// latency. To encrypt new objects in a directory bucket with SSE-KMS, you must +// specify SSE-KMS as the directory bucket's default encryption configuration with +// a KMS key (specifically, a [customer managed key]). Then, when a session is created for Zonal +// endpoint API operations, new objects are automatically encrypted and decrypted +// with SSE-KMS and S3 Bucket Keys during the session. +// +// Only 1 [customer managed key] is supported per directory bucket for the lifetime of the bucket. The [Amazon Web Services managed key] ( +// aws/s3 ) isn't supported. After you specify SSE-KMS as your bucket's default +// encryption configuration with a customer managed key, you can't change the +// customer managed key for the bucket's SSE-KMS configuration. +// +// In the Zonal endpoint API calls (except [CopyObject] and [UploadPartCopy]) using the REST API, you can't +// override the values of the encryption settings ( x-amz-server-side-encryption , +// x-amz-server-side-encryption-aws-kms-key-id , +// x-amz-server-side-encryption-context , and +// x-amz-server-side-encryption-bucket-key-enabled ) from the CreateSession +// request. You don't need to explicitly specify these encryption settings values +// in Zonal endpoint API calls, and Amazon S3 will use the encryption settings +// values from the CreateSession request to protect new objects in the directory +// bucket. +// +// When you use the CLI or the Amazon Web Services SDKs, for CreateSession , the +// session token refreshes automatically to avoid service interruptions when a +// session expires. The CLI or the Amazon Web Services SDKs use the bucket's +// default encryption configuration for the CreateSession request. It's not +// supported to override the encryption settings values in the CreateSession +// request. Also, in the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]), it's not +// supported to override the values of the encryption settings from the +// CreateSession request. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// +// [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [Performance guidelines and design patterns]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-optimizing-performance-guidelines-design-patterns.html#s3-express-optimizing-performance-session-authentication +// [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html +// [S3 Express One Zone APIs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-APIs.html +// [HeadBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html +// [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html +// [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk +// [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html +// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html +// [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk +// [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html +// [x-amz-create-session-mode]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html#API_CreateSession_RequestParameters +// [Zonal endpoint (object-level) API operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-differences.html#s3-express-differences-api-operations +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html func (c *Client) CreateSession(ctx context.Context, params *CreateSessionInput, optFns ...func(*Options)) (*CreateSessionOutput, error) { if params == nil { params = &CreateSessionInput{} @@ -95,29 +161,111 @@ type CreateSessionInput struct { // This member is required. Bucket *string + // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption + // with server-side encryption using KMS keys (SSE-KMS). + // + // S3 Bucket Keys are always enabled for GET and PUT operations in a directory + // bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy + // SSE-KMS encrypted objects from general purpose buckets to directory buckets, + // from directory buckets to general purpose buckets, or between directory buckets, + // through [CopyObject], [UploadPartCopy], [the Copy operation in Batch Operations], or [the import jobs]. In this case, Amazon S3 makes a call to KMS every time a + // copy request is made for a KMS-encrypted object. + // + // [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html + // [the import jobs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-import-job + // [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html + // [the Copy operation in Batch Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-Batch-Ops + BucketKeyEnabled *bool + + // Specifies the Amazon Web Services KMS Encryption Context as an additional + // encryption context to use for object encryption. The value of this header is a + // Base64 encoded string of a UTF-8 encoded JSON, which contains the encryption + // context as key-value pairs. This value is stored as object metadata and + // automatically gets passed on to Amazon Web Services KMS for future GetObject + // operations on this object. + // + // General purpose buckets - This value must be explicitly added during CopyObject + // operations if you want an additional encryption context for your object. For + // more information, see [Encryption context]in the Amazon S3 User Guide. + // + // Directory buckets - You can optionally provide an explicit encryption context + // value. The value must match the default encryption context - the bucket Amazon + // Resource Name (ARN). An additional encryption context value is not supported. + // + // [Encryption context]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html#encryption-context + SSEKMSEncryptionContext *string + + // If you specify x-amz-server-side-encryption with aws:kms , you must specify the + // x-amz-server-side-encryption-aws-kms-key-id header with the ID (Key ID or Key + // ARN) of the KMS symmetric encryption customer managed key to use. Otherwise, you + // get an HTTP 400 Bad Request error. Only use the key ID or key ARN. The key + // alias format of the KMS key isn't supported. Also, if the KMS key doesn't exist + // in the same account that't issuing the command, you must use the full Key ARN + // not the Key ID. + // + // Your SSE-KMS configuration can only support 1 [customer managed key] per directory bucket's lifetime. + // The [Amazon Web Services managed key]( aws/s3 ) isn't supported. + // + // [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk + // [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk + SSEKMSKeyId *string + + // The server-side encryption algorithm to use when you store objects in the + // directory bucket. + // + // For directory buckets, there are only two supported options for server-side + // encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256 + // ) and server-side encryption with KMS keys (SSE-KMS) ( aws:kms ). By default, + // Amazon S3 encrypts data with SSE-S3. For more information, see [Protecting data with server-side encryption]in the Amazon S3 + // User Guide. + // + // [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/serv-side-encryption.html + ServerSideEncryption types.ServerSideEncryption + // Specifies the mode of the session that will be created, either ReadWrite or // ReadOnly . By default, a ReadWrite session is created. A ReadWrite session is - // capable of executing all the Zonal endpoint APIs on a directory bucket. A - // ReadOnly session is constrained to execute the following Zonal endpoint APIs: - // GetObject , HeadObject , ListObjectsV2 , GetObjectAttributes , ListParts , and - // ListMultipartUploads . + // capable of executing all the Zonal endpoint API operations on a directory + // bucket. A ReadOnly session is constrained to execute the following Zonal + // endpoint API operations: GetObject , HeadObject , ListObjectsV2 , + // GetObjectAttributes , ListParts , and ListMultipartUploads . SessionMode types.SessionMode noSmithyDocumentSerde } func (in *CreateSessionInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.DisableS3ExpressSessionAuth = ptr.Bool(true) } type CreateSessionOutput struct { - // The established temporary security credentials for the created session.. + // The established temporary security credentials for the created session. // // This member is required. Credentials *types.SessionCredentials + // Indicates whether to use an S3 Bucket Key for server-side encryption with KMS + // keys (SSE-KMS). + BucketKeyEnabled *bool + + // If present, indicates the Amazon Web Services KMS Encryption Context to use for + // object encryption. The value of this header is a Base64 encoded string of a + // UTF-8 encoded JSON, which contains the encryption context as key-value pairs. + // This value is stored as object metadata and automatically gets passed on to + // Amazon Web Services KMS for future GetObject operations on this object. + SSEKMSEncryptionContext *string + + // If you specify x-amz-server-side-encryption with aws:kms , this header indicates + // the ID of the KMS symmetric encryption customer managed key that was used for + // object encryption. + SSEKMSKeyId *string + + // The server-side encryption algorithm used when you store objects in the + // directory bucket. + ServerSideEncryption types.ServerSideEncryption + // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -167,6 +315,9 @@ func (c *Client) addOperationCreateSessionMiddlewares(stack *middleware.Stack, o if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -182,6 +333,18 @@ func (c *Client) addOperationCreateSessionMiddlewares(stack *middleware.Stack, o if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpCreateSessionValidationMiddleware(stack); err != nil { return err } @@ -215,6 +378,18 @@ func (c *Client) addOperationCreateSessionMiddlewares(stack *middleware.Stack, o if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucket.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucket.go index 30e1381bd..9e1edf91a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucket.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucket.go @@ -15,33 +15,45 @@ import ( // Deletes the S3 bucket. All objects (including all object versions and delete // markers) in the bucket must be deleted before the bucket itself can be deleted. +// // - Directory buckets - If multipart uploads in a directory bucket are in // progress, you can't delete the bucket until all the in-progress multipart // uploads are aborted or completed. +// // - Directory buckets - For directory buckets, you must make requests for this // API operation to the Regional endpoint. These endpoints support path-style // requests in the format -// https://s3express-control.region_code.amazonaws.com/bucket-name . -// Virtual-hosted-style requests aren't supported. For more information, see -// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. +// https://s3express-control.region-code.amazonaws.com/bucket-name . +// Virtual-hosted-style requests aren't supported. For more information about +// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more +// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. // // Permissions +// // - General purpose bucket permissions - You must have the s3:DeleteBucket // permission on the specified bucket in a policy. +// // - Directory bucket permissions - You must have the s3express:DeleteBucket // permission in an IAM identity-based policy instead of a bucket policy. // Cross-account access to this API operation isn't supported. This operation can // only be performed by the Amazon Web Services account that owns the resource. For -// more information about directory bucket policies and permissions, see Amazon -// Web Services Identity and Access Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) -// in the Amazon S3 User Guide. +// more information about directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the +// Amazon S3 User Guide. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region-code.amazonaws.com . +// +// The following operations are related to DeleteBucket : +// +// [CreateBucket] +// +// [DeleteObject] // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// s3express-control.region.amazonaws.com . The following operations are related to -// DeleteBucket : -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html func (c *Client) DeleteBucket(ctx context.Context, params *DeleteBucketInput, optFns ...func(*Options)) (*DeleteBucketOutput, error) { if params == nil { params = &DeleteBucketInput{} @@ -59,30 +71,36 @@ func (c *Client) DeleteBucket(ctx context.Context, params *DeleteBucketInput, op type DeleteBucketInput struct { - // Specifies the bucket being deleted. Directory buckets - When you use this - // operation with a directory bucket, you must use path-style requests in the - // format https://s3express-control.region_code.amazonaws.com/bucket-name . + // Specifies the bucket being deleted. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use path-style requests in the format + // https://s3express-control.region-code.amazonaws.com/bucket-name . // Virtual-hosted-style requests aren't supported. Directory bucket names must be - // unique in the chosen Availability Zone. Bucket names must also follow the format - // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 - // ). For information about bucket naming restrictions, see Directory bucket - // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide + // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must + // also follow the format bucket-base-name--zone-id--x-s3 (for example, + // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html // // This member is required. Bucket *string // The account ID of the expected bucket owner. If the account ID that you provide // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). For directory buckets, this header - // is not supported in this API operation. If you specify this header, the request - // fails with the HTTP status code 501 Not Implemented . + // status code 403 Forbidden (access denied). + // + // For directory buckets, this header is not supported in this API operation. If + // you specify this header, the request fails with the HTTP status code 501 Not + // Implemented . ExpectedBucketOwner *string noSmithyDocumentSerde } func (in *DeleteBucketInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -137,6 +155,9 @@ func (c *Client) addOperationDeleteBucketMiddlewares(stack *middleware.Stack, op if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -152,6 +173,18 @@ func (c *Client) addOperationDeleteBucketMiddlewares(stack *middleware.Stack, op if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteBucketValidationMiddleware(stack); err != nil { return err } @@ -185,6 +218,18 @@ func (c *Client) addOperationDeleteBucketMiddlewares(stack *middleware.Stack, op if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go index 0033825a0..a3a1873fa 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go @@ -13,20 +13,32 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Deletes an analytics -// configuration for the bucket (specified by the analytics configuration ID). To -// use this operation, you must have permissions to perform the +// This operation is not supported for directory buckets. +// +// Deletes an analytics configuration for the bucket (specified by the analytics +// configuration ID). +// +// To use this operation, you must have permissions to perform the // s3:PutAnalyticsConfiguration action. The bucket owner has this permission by // default. The bucket owner can grant this permission to others. For more -// information about permissions, see Permissions Related to Bucket Subresource -// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . For information about the Amazon S3 analytics feature, see Amazon S3 -// Analytics – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) -// . The following operations are related to DeleteBucketAnalyticsConfiguration : -// - GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) -// - ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) -// - PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// For information about the Amazon S3 analytics feature, see [Amazon S3 Analytics – Storage Class Analysis]. +// +// The following operations are related to DeleteBucketAnalyticsConfiguration : +// +// [GetBucketAnalyticsConfiguration] +// +// [ListBucketAnalyticsConfigurations] +// +// [PutBucketAnalyticsConfiguration] +// +// [Amazon S3 Analytics – Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [GetBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html +// [ListBucketAnalyticsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html +// [PutBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html func (c *Client) DeleteBucketAnalyticsConfiguration(ctx context.Context, params *DeleteBucketAnalyticsConfigurationInput, optFns ...func(*Options)) (*DeleteBucketAnalyticsConfigurationOutput, error) { if params == nil { params = &DeleteBucketAnalyticsConfigurationInput{} @@ -63,6 +75,7 @@ type DeleteBucketAnalyticsConfigurationInput struct { } func (in *DeleteBucketAnalyticsConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -117,6 +130,9 @@ func (c *Client) addOperationDeleteBucketAnalyticsConfigurationMiddlewares(stack if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -132,6 +148,18 @@ func (c *Client) addOperationDeleteBucketAnalyticsConfigurationMiddlewares(stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteBucketAnalyticsConfigurationValidationMiddleware(stack); err != nil { return err } @@ -165,6 +193,18 @@ func (c *Client) addOperationDeleteBucketAnalyticsConfigurationMiddlewares(stack if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketCors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketCors.go index d465826fb..ebae81c34 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketCors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketCors.go @@ -13,14 +13,25 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Deletes the cors -// configuration information set for the bucket. To use this operation, you must -// have permission to perform the s3:PutBucketCORS action. The bucket owner has -// this permission by default and can grant this permission to others. For -// information about cors , see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) -// in the Amazon S3 User Guide. Related Resources -// - PutBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html) -// - RESTOPTIONSobject (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html) +// This operation is not supported for directory buckets. +// +// Deletes the cors configuration information set for the bucket. +// +// To use this operation, you must have permission to perform the s3:PutBucketCORS +// action. The bucket owner has this permission by default and can grant this +// permission to others. +// +// For information about cors , see [Enabling Cross-Origin Resource Sharing] in the Amazon S3 User Guide. +// +// # Related Resources +// +// [PutBucketCors] +// +// [RESTOPTIONSobject] +// +// [PutBucketCors]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html +// [Enabling Cross-Origin Resource Sharing]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html +// [RESTOPTIONSobject]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html func (c *Client) DeleteBucketCors(ctx context.Context, params *DeleteBucketCorsInput, optFns ...func(*Options)) (*DeleteBucketCorsOutput, error) { if params == nil { params = &DeleteBucketCorsInput{} @@ -52,6 +63,7 @@ type DeleteBucketCorsInput struct { } func (in *DeleteBucketCorsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -106,6 +118,9 @@ func (c *Client) addOperationDeleteBucketCorsMiddlewares(stack *middleware.Stack if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -121,6 +136,18 @@ func (c *Client) addOperationDeleteBucketCorsMiddlewares(stack *middleware.Stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteBucketCorsValidationMiddleware(stack); err != nil { return err } @@ -154,6 +181,18 @@ func (c *Client) addOperationDeleteBucketCorsMiddlewares(stack *middleware.Stack if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketEncryption.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketEncryption.go index 7be8c4759..bf654025b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketEncryption.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketEncryption.go @@ -13,20 +13,46 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. This implementation of -// the DELETE action resets the default encryption for the bucket as server-side -// encryption with Amazon S3 managed keys (SSE-S3). For information about the -// bucket default encryption feature, see Amazon S3 Bucket Default Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) -// in the Amazon S3 User Guide. To use this operation, you must have permissions to -// perform the s3:PutEncryptionConfiguration action. The bucket owner has this -// permission by default. The bucket owner can grant this permission to others. For -// more information about permissions, see Permissions Related to Bucket -// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// in the Amazon S3 User Guide. The following operations are related to -// DeleteBucketEncryption : -// - PutBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html) -// - GetBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html) +// This implementation of the DELETE action resets the default encryption for the +// bucket as server-side encryption with Amazon S3 managed keys (SSE-S3). +// +// - General purpose buckets - For information about the bucket default +// encryption feature, see [Amazon S3 Bucket Default Encryption]in the Amazon S3 User Guide. +// +// - Directory buckets - For directory buckets, there are only two supported +// options for server-side encryption: SSE-S3 and SSE-KMS. For information about +// the default encryption configuration in directory buckets, see [Setting default server-side encryption behavior for directory buckets]. +// +// Permissions +// +// - General purpose bucket permissions - The s3:PutEncryptionConfiguration +// permission is required in a policy. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see [Permissions Related to Bucket Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// - Directory bucket permissions - To grant access to this API operation, you +// must have the s3express:PutEncryptionConfiguration permission in an IAM +// identity-based policy instead of a bucket policy. Cross-account access to this +// API operation isn't supported. This operation can only be performed by the +// Amazon Web Services account that owns the resource. For more information about +// directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region-code.amazonaws.com . +// +// The following operations are related to DeleteBucketEncryption : +// +// [PutBucketEncryption] +// +// [GetBucketEncryption] +// +// [GetBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html +// [PutBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html +// [Setting default server-side encryption behavior for directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-bucket-encryption.html +// [Amazon S3 Bucket Default Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [Permissions Related to Bucket Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html func (c *Client) DeleteBucketEncryption(ctx context.Context, params *DeleteBucketEncryptionInput, optFns ...func(*Options)) (*DeleteBucketEncryptionOutput, error) { if params == nil { params = &DeleteBucketEncryptionInput{} @@ -47,18 +73,34 @@ type DeleteBucketEncryptionInput struct { // The name of the bucket containing the server-side encryption configuration to // delete. // + // Directory buckets - When you use this operation with a directory bucket, you + // must use path-style requests in the format + // https://s3express-control.region-code.amazonaws.com/bucket-name . + // Virtual-hosted-style requests aren't supported. Directory bucket names must be + // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must + // also follow the format bucket-base-name--zone-id--x-s3 (for example, + // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // // This member is required. Bucket *string // The account ID of the expected bucket owner. If the account ID that you provide // does not match the actual owner of the bucket, the request fails with the HTTP // status code 403 Forbidden (access denied). + // + // For directory buckets, this header is not supported in this API operation. If + // you specify this header, the request fails with the HTTP status code 501 Not + // Implemented . ExpectedBucketOwner *string noSmithyDocumentSerde } func (in *DeleteBucketEncryptionInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -113,6 +155,9 @@ func (c *Client) addOperationDeleteBucketEncryptionMiddlewares(stack *middleware if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -128,6 +173,18 @@ func (c *Client) addOperationDeleteBucketEncryptionMiddlewares(stack *middleware if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteBucketEncryptionValidationMiddleware(stack); err != nil { return err } @@ -161,6 +218,18 @@ func (c *Client) addOperationDeleteBucketEncryptionMiddlewares(stack *middleware if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go index 734d23b04..a62386c3e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go @@ -13,25 +13,38 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Deletes the S3 -// Intelligent-Tiering configuration from the specified bucket. The S3 -// Intelligent-Tiering storage class is designed to optimize storage costs by -// automatically moving data to the most cost-effective storage access tier, +// This operation is not supported for directory buckets. +// +// Deletes the S3 Intelligent-Tiering configuration from the specified bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage costs +// by automatically moving data to the most cost-effective storage access tier, // without performance impact or operational overhead. S3 Intelligent-Tiering // delivers automatic cost savings in three low latency and high throughput access // tiers. To get the lowest storage cost on data that can be accessed in minutes to -// hours, you can choose to activate additional archiving capabilities. The S3 -// Intelligent-Tiering storage class is the ideal storage class for data with -// unknown, changing, or unpredictable access patterns, independent of object size -// or retention period. If the size of an object is less than 128 KB, it is not -// monitored and not eligible for auto-tiering. Smaller objects can be stored, but -// they are always charged at the Frequent Access tier rates in the S3 -// Intelligent-Tiering storage class. For more information, see Storage class for -// automatically optimizing frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) -// . Operations related to DeleteBucketIntelligentTieringConfiguration include: -// - GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) -// - PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) -// - ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) +// hours, you can choose to activate additional archiving capabilities. +// +// The S3 Intelligent-Tiering storage class is the ideal storage class for data +// with unknown, changing, or unpredictable access patterns, independent of object +// size or retention period. If the size of an object is less than 128 KB, it is +// not monitored and not eligible for auto-tiering. Smaller objects can be stored, +// but they are always charged at the Frequent Access tier rates in the S3 +// Intelligent-Tiering storage class. +// +// For more information, see [Storage class for automatically optimizing frequently and infrequently accessed objects]. +// +// Operations related to DeleteBucketIntelligentTieringConfiguration include: +// +// [GetBucketIntelligentTieringConfiguration] +// +// [PutBucketIntelligentTieringConfiguration] +// +// [ListBucketIntelligentTieringConfigurations] +// +// [ListBucketIntelligentTieringConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html +// [GetBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html +// [PutBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html +// [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access func (c *Client) DeleteBucketIntelligentTieringConfiguration(ctx context.Context, params *DeleteBucketIntelligentTieringConfigurationInput, optFns ...func(*Options)) (*DeleteBucketIntelligentTieringConfigurationOutput, error) { if params == nil { params = &DeleteBucketIntelligentTieringConfigurationInput{} @@ -64,6 +77,7 @@ type DeleteBucketIntelligentTieringConfigurationInput struct { } func (in *DeleteBucketIntelligentTieringConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -118,6 +132,9 @@ func (c *Client) addOperationDeleteBucketIntelligentTieringConfigurationMiddlewa if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -133,6 +150,18 @@ func (c *Client) addOperationDeleteBucketIntelligentTieringConfigurationMiddlewa if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteBucketIntelligentTieringConfigurationValidationMiddleware(stack); err != nil { return err } @@ -166,6 +195,18 @@ func (c *Client) addOperationDeleteBucketIntelligentTieringConfigurationMiddlewa if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketInventoryConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketInventoryConfiguration.go index 3b8d81a43..d24e3f754 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketInventoryConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketInventoryConfiguration.go @@ -13,18 +13,32 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Deletes an inventory -// configuration (identified by the inventory ID) from the bucket. To use this -// operation, you must have permissions to perform the s3:PutInventoryConfiguration -// action. The bucket owner has this permission by default. The bucket owner can -// grant this permission to others. For more information about permissions, see -// Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . For information about the Amazon S3 inventory feature, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) -// . Operations related to DeleteBucketInventoryConfiguration include: -// - GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) -// - PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) -// - ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) +// This operation is not supported for directory buckets. +// +// Deletes an inventory configuration (identified by the inventory ID) from the +// bucket. +// +// To use this operation, you must have permissions to perform the +// s3:PutInventoryConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// For information about the Amazon S3 inventory feature, see [Amazon S3 Inventory]. +// +// Operations related to DeleteBucketInventoryConfiguration include: +// +// [GetBucketInventoryConfiguration] +// +// [PutBucketInventoryConfiguration] +// +// [ListBucketInventoryConfigurations] +// +// [Amazon S3 Inventory]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html +// [ListBucketInventoryConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [PutBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html +// [GetBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html func (c *Client) DeleteBucketInventoryConfiguration(ctx context.Context, params *DeleteBucketInventoryConfigurationInput, optFns ...func(*Options)) (*DeleteBucketInventoryConfigurationOutput, error) { if params == nil { params = &DeleteBucketInventoryConfigurationInput{} @@ -61,6 +75,7 @@ type DeleteBucketInventoryConfigurationInput struct { } func (in *DeleteBucketInventoryConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -115,6 +130,9 @@ func (c *Client) addOperationDeleteBucketInventoryConfigurationMiddlewares(stack if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -130,6 +148,18 @@ func (c *Client) addOperationDeleteBucketInventoryConfigurationMiddlewares(stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteBucketInventoryConfigurationValidationMiddleware(stack); err != nil { return err } @@ -163,6 +193,18 @@ func (c *Client) addOperationDeleteBucketInventoryConfigurationMiddlewares(stack if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketLifecycle.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketLifecycle.go index 88928b284..489be24c4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketLifecycle.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketLifecycle.go @@ -13,20 +13,61 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Deletes the lifecycle -// configuration from the specified bucket. Amazon S3 removes all the lifecycle -// configuration rules in the lifecycle subresource associated with the bucket. -// Your objects never expire, and Amazon S3 no longer automatically deletes any -// objects on the basis of rules contained in the deleted lifecycle configuration. -// To use this operation, you must have permission to perform the -// s3:PutLifecycleConfiguration action. By default, the bucket owner has this -// permission and the bucket owner can grant this permission to others. There is -// usually some time lag before lifecycle configuration deletion is fully -// propagated to all the Amazon S3 systems. For more information about the object -// expiration, see Elements to Describe Lifecycle Actions (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions) -// . Related actions include: -// - PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) -// - GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +// Deletes the lifecycle configuration from the specified bucket. Amazon S3 +// removes all the lifecycle configuration rules in the lifecycle subresource +// associated with the bucket. Your objects never expire, and Amazon S3 no longer +// automatically deletes any objects on the basis of rules contained in the deleted +// lifecycle configuration. +// +// Permissions +// - General purpose bucket permissions - By default, all Amazon S3 resources +// are private, including buckets, objects, and related subresources (for example, +// lifecycle configuration and website configuration). Only the resource owner +// (that is, the Amazon Web Services account that created it) can access the +// resource. The resource owner can optionally grant access permissions to others +// by writing an access policy. For this operation, a user must have the +// s3:PutLifecycleConfiguration permission. +// +// For more information about permissions, see [Managing Access Permissions to Your Amazon S3 Resources]. +// +// - Directory bucket permissions - You must have the +// s3express:PutLifecycleConfiguration permission in an IAM identity-based policy +// to use this operation. Cross-account access to this API operation isn't +// supported. The resource owner can optionally grant access permissions to others +// by creating a role or user for them as long as they are within the same account +// as the owner and resource. +// +// For more information about directory bucket policies and permissions, see [Authorizing Regional endpoint APIs with IAM]in +// +// the Amazon S3 User Guide. +// +// Directory buckets - For directory buckets, you must make requests for this API +// +// operation to the Regional endpoint. These endpoints support path-style requests +// in the format https://s3express-control.region-code.amazonaws.com/bucket-name +// . Virtual-hosted-style requests aren't supported. For more information about +// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more +// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region.amazonaws.com . +// +// For more information about the object expiration, see [Elements to Describe Lifecycle Actions]. +// +// Related actions include: +// +// [PutBucketLifecycleConfiguration] +// +// [GetBucketLifecycleConfiguration] +// +// [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html +// [Elements to Describe Lifecycle Actions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions +// [GetBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html +// [Authorizing Regional endpoint APIs with IAM]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html func (c *Client) DeleteBucketLifecycle(ctx context.Context, params *DeleteBucketLifecycleInput, optFns ...func(*Options)) (*DeleteBucketLifecycleOutput, error) { if params == nil { params = &DeleteBucketLifecycleInput{} @@ -52,12 +93,16 @@ type DeleteBucketLifecycleInput struct { // The account ID of the expected bucket owner. If the account ID that you provide // does not match the actual owner of the bucket, the request fails with the HTTP // status code 403 Forbidden (access denied). + // + // This parameter applies to general purpose buckets only. It is not supported for + // directory bucket lifecycle configurations. ExpectedBucketOwner *string noSmithyDocumentSerde } func (in *DeleteBucketLifecycleInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -112,6 +157,9 @@ func (c *Client) addOperationDeleteBucketLifecycleMiddlewares(stack *middleware. if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -127,6 +175,18 @@ func (c *Client) addOperationDeleteBucketLifecycleMiddlewares(stack *middleware. if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteBucketLifecycleValidationMiddleware(stack); err != nil { return err } @@ -160,6 +220,18 @@ func (c *Client) addOperationDeleteBucketLifecycleMiddlewares(stack *middleware. if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetadataTableConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetadataTableConfiguration.go new file mode 100644 index 000000000..880d74d94 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetadataTableConfiguration.go @@ -0,0 +1,237 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes a metadata table configuration from a general purpose bucket. For more +// +// information, see [Accelerating data discovery with S3 Metadata]in the Amazon S3 User Guide. +// +// Permissions To use this operation, you must have the +// s3:DeleteBucketMetadataTableConfiguration permission. For more information, see [Setting up permissions for configuring metadata tables] +// in the Amazon S3 User Guide. +// +// The following operations are related to DeleteBucketMetadataTableConfiguration : +// +// [CreateBucketMetadataTableConfiguration] +// +// [GetBucketMetadataTableConfiguration] +// +// [Setting up permissions for configuring metadata tables]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-permissions.html +// [GetBucketMetadataTableConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetadataTableConfiguration.html +// [CreateBucketMetadataTableConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucketMetadataTableConfiguration.html +// [Accelerating data discovery with S3 Metadata]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-overview.html +func (c *Client) DeleteBucketMetadataTableConfiguration(ctx context.Context, params *DeleteBucketMetadataTableConfigurationInput, optFns ...func(*Options)) (*DeleteBucketMetadataTableConfigurationOutput, error) { + if params == nil { + params = &DeleteBucketMetadataTableConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteBucketMetadataTableConfiguration", params, optFns, c.addOperationDeleteBucketMetadataTableConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteBucketMetadataTableConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteBucketMetadataTableConfigurationInput struct { + + // The general purpose bucket that you want to remove the metadata table + // configuration from. + // + // This member is required. + Bucket *string + + // The expected bucket owner of the general purpose bucket that you want to + // remove the metadata table configuration from. + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *DeleteBucketMetadataTableConfigurationInput) bindEndpointParams(p *EndpointParameters) { + + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type DeleteBucketMetadataTableConfigurationOutput struct { + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteBucketMetadataTableConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketMetadataTableConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpDeleteBucketMetadataTableConfiguration{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketMetadataTableConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpDeleteBucketMetadataTableConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBucketMetadataTableConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addDeleteBucketMetadataTableConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func (v *DeleteBucketMetadataTableConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opDeleteBucketMetadataTableConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeleteBucketMetadataTableConfiguration", + } +} + +// getDeleteBucketMetadataTableConfigurationBucketMember returns a pointer to +// string denoting a provided bucket member valueand a boolean indicating if the +// input has a modeled bucket name, +func getDeleteBucketMetadataTableConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*DeleteBucketMetadataTableConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addDeleteBucketMetadataTableConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getDeleteBucketMetadataTableConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetricsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetricsConfiguration.go index 21384351f..ed96b0253 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetricsConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetricsConfiguration.go @@ -13,22 +13,35 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Deletes a metrics -// configuration for the Amazon CloudWatch request metrics (specified by the -// metrics configuration ID) from the bucket. Note that this doesn't include the -// daily storage metrics. To use this operation, you must have permissions to -// perform the s3:PutMetricsConfiguration action. The bucket owner has this -// permission by default. The bucket owner can grant this permission to others. For -// more information about permissions, see Permissions Related to Bucket -// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . For information about CloudWatch request metrics for Amazon S3, see -// Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) -// . The following operations are related to DeleteBucketMetricsConfiguration : -// - GetBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) -// - PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) -// - ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) -// - Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) +// This operation is not supported for directory buckets. +// +// Deletes a metrics configuration for the Amazon CloudWatch request metrics +// (specified by the metrics configuration ID) from the bucket. Note that this +// doesn't include the daily storage metrics. +// +// To use this operation, you must have permissions to perform the +// s3:PutMetricsConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// For information about CloudWatch request metrics for Amazon S3, see [Monitoring Metrics with Amazon CloudWatch]. +// +// The following operations are related to DeleteBucketMetricsConfiguration : +// +// [GetBucketMetricsConfiguration] +// +// [PutBucketMetricsConfiguration] +// +// [ListBucketMetricsConfigurations] +// +// [Monitoring Metrics with Amazon CloudWatch] +// +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Monitoring Metrics with Amazon CloudWatch]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html +// [GetBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html +// [ListBucketMetricsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html +// [PutBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html func (c *Client) DeleteBucketMetricsConfiguration(ctx context.Context, params *DeleteBucketMetricsConfigurationInput, optFns ...func(*Options)) (*DeleteBucketMetricsConfigurationOutput, error) { if params == nil { params = &DeleteBucketMetricsConfigurationInput{} @@ -66,6 +79,7 @@ type DeleteBucketMetricsConfigurationInput struct { } func (in *DeleteBucketMetricsConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -120,6 +134,9 @@ func (c *Client) addOperationDeleteBucketMetricsConfigurationMiddlewares(stack * if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -135,6 +152,18 @@ func (c *Client) addOperationDeleteBucketMetricsConfigurationMiddlewares(stack * if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteBucketMetricsConfigurationValidationMiddleware(stack); err != nil { return err } @@ -168,6 +197,18 @@ func (c *Client) addOperationDeleteBucketMetricsConfigurationMiddlewares(stack * if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketOwnershipControls.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketOwnershipControls.go index 4beac6b09..4775a2516 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketOwnershipControls.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketOwnershipControls.go @@ -13,14 +13,22 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Removes OwnershipControls -// for an Amazon S3 bucket. To use this operation, you must have the -// s3:PutBucketOwnershipControls permission. For more information about Amazon S3 -// permissions, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// . For information about Amazon S3 Object Ownership, see Using Object Ownership (https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html) -// . The following operations are related to DeleteBucketOwnershipControls : -// - GetBucketOwnershipControls -// - PutBucketOwnershipControls +// This operation is not supported for directory buckets. +// +// Removes OwnershipControls for an Amazon S3 bucket. To use this operation, you +// must have the s3:PutBucketOwnershipControls permission. For more information +// about Amazon S3 permissions, see [Specifying Permissions in a Policy]. +// +// For information about Amazon S3 Object Ownership, see [Using Object Ownership]. +// +// The following operations are related to DeleteBucketOwnershipControls : +// +// # GetBucketOwnershipControls +// +// # PutBucketOwnershipControls +// +// [Using Object Ownership]: https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html +// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html func (c *Client) DeleteBucketOwnershipControls(ctx context.Context, params *DeleteBucketOwnershipControlsInput, optFns ...func(*Options)) (*DeleteBucketOwnershipControlsOutput, error) { if params == nil { params = &DeleteBucketOwnershipControlsInput{} @@ -52,6 +60,7 @@ type DeleteBucketOwnershipControlsInput struct { } func (in *DeleteBucketOwnershipControlsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -106,6 +115,9 @@ func (c *Client) addOperationDeleteBucketOwnershipControlsMiddlewares(stack *mid if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -121,6 +133,18 @@ func (c *Client) addOperationDeleteBucketOwnershipControlsMiddlewares(stack *mid if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteBucketOwnershipControlsValidationMiddleware(stack); err != nil { return err } @@ -154,6 +178,18 @@ func (c *Client) addOperationDeleteBucketOwnershipControlsMiddlewares(stack *mid if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketPolicy.go index b8e1f56a1..048abe79e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketPolicy.go @@ -13,44 +13,59 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Deletes the policy of a specified bucket. Directory buckets - For directory -// buckets, you must make requests for this API operation to the Regional endpoint. -// These endpoints support path-style requests in the format -// https://s3express-control.region_code.amazonaws.com/bucket-name . -// Virtual-hosted-style requests aren't supported. For more information, see -// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions If you are using an identity other than -// the root user of the Amazon Web Services account that owns the bucket, the -// calling identity must both have the DeleteBucketPolicy permissions on the -// specified bucket and belong to the bucket owner's account in order to use this -// operation. If you don't have DeleteBucketPolicy permissions, Amazon S3 returns -// a 403 Access Denied error. If you have the correct permissions, but you're not -// using an identity that belongs to the bucket owner's account, Amazon S3 returns -// a 405 Method Not Allowed error. To ensure that bucket owners don't -// inadvertently lock themselves out of their own buckets, the root principal in a -// bucket owner's Amazon Web Services account can perform the GetBucketPolicy , -// PutBucketPolicy , and DeleteBucketPolicy API actions, even if their bucket -// policy explicitly denies the root principal's access. Bucket owner root -// principals can only be blocked from performing these API actions by VPC endpoint -// policies and Amazon Web Services Organizations policies. +// Deletes the policy of a specified bucket. +// +// Directory buckets - For directory buckets, you must make requests for this API +// operation to the Regional endpoint. These endpoints support path-style requests +// in the format https://s3express-control.region-code.amazonaws.com/bucket-name . +// Virtual-hosted-style requests aren't supported. For more information about +// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more +// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// Permissions If you are using an identity other than the root user of the Amazon +// Web Services account that owns the bucket, the calling identity must both have +// the DeleteBucketPolicy permissions on the specified bucket and belong to the +// bucket owner's account in order to use this operation. +// +// If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403 +// Access Denied error. If you have the correct permissions, but you're not using +// an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 +// Method Not Allowed error. +// +// To ensure that bucket owners don't inadvertently lock themselves out of their +// own buckets, the root principal in a bucket owner's Amazon Web Services account +// can perform the GetBucketPolicy , PutBucketPolicy , and DeleteBucketPolicy API +// actions, even if their bucket policy explicitly denies the root principal's +// access. Bucket owner root principals can only be blocked from performing these +// API actions by VPC endpoint policies and Amazon Web Services Organizations +// policies. +// // - General purpose bucket permissions - The s3:DeleteBucketPolicy permission is // required in a policy. For more information about general purpose buckets bucket -// policies, see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) -// in the Amazon S3 User Guide. +// policies, see [Using Bucket Policies and User Policies]in the Amazon S3 User Guide. +// // - Directory bucket permissions - To grant access to this API operation, you // must have the s3express:DeleteBucketPolicy permission in an IAM identity-based // policy instead of a bucket policy. Cross-account access to this API operation // isn't supported. This operation can only be performed by the Amazon Web Services // account that owns the resource. For more information about directory bucket -// policies and permissions, see Amazon Web Services Identity and Access -// Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) -// in the Amazon S3 User Guide. +// policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region-code.amazonaws.com . +// +// # The following operations are related to DeleteBucketPolicy // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// s3express-control.region.amazonaws.com . The following operations are related to -// DeleteBucketPolicy -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// [CreateBucket] +// +// [DeleteObject] +// +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html +// [Using Bucket Policies and User Policies]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html func (c *Client) DeleteBucketPolicy(ctx context.Context, params *DeleteBucketPolicyInput, optFns ...func(*Options)) (*DeleteBucketPolicyOutput, error) { if params == nil { params = &DeleteBucketPolicyInput{} @@ -68,30 +83,36 @@ func (c *Client) DeleteBucketPolicy(ctx context.Context, params *DeleteBucketPol type DeleteBucketPolicyInput struct { - // The bucket name. Directory buckets - When you use this operation with a - // directory bucket, you must use path-style requests in the format - // https://s3express-control.region_code.amazonaws.com/bucket-name . + // The bucket name. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use path-style requests in the format + // https://s3express-control.region-code.amazonaws.com/bucket-name . // Virtual-hosted-style requests aren't supported. Directory bucket names must be - // unique in the chosen Availability Zone. Bucket names must also follow the format - // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 - // ). For information about bucket naming restrictions, see Directory bucket - // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide + // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must + // also follow the format bucket-base-name--zone-id--x-s3 (for example, + // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html // // This member is required. Bucket *string // The account ID of the expected bucket owner. If the account ID that you provide // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). For directory buckets, this header - // is not supported in this API operation. If you specify this header, the request - // fails with the HTTP status code 501 Not Implemented . + // status code 403 Forbidden (access denied). + // + // For directory buckets, this header is not supported in this API operation. If + // you specify this header, the request fails with the HTTP status code 501 Not + // Implemented . ExpectedBucketOwner *string noSmithyDocumentSerde } func (in *DeleteBucketPolicyInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -146,6 +167,9 @@ func (c *Client) addOperationDeleteBucketPolicyMiddlewares(stack *middleware.Sta if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -161,6 +185,18 @@ func (c *Client) addOperationDeleteBucketPolicyMiddlewares(stack *middleware.Sta if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteBucketPolicyValidationMiddleware(stack); err != nil { return err } @@ -194,6 +230,18 @@ func (c *Client) addOperationDeleteBucketPolicyMiddlewares(stack *middleware.Sta if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketReplication.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketReplication.go index 9fdc6bcf3..f5c434b8d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketReplication.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketReplication.go @@ -13,18 +13,32 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Deletes the replication -// configuration from the bucket. To use this operation, you must have permissions -// to perform the s3:PutReplicationConfiguration action. The bucket owner has -// these permissions by default and can grant it to others. For more information -// about permissions, see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . It can take a while for the deletion of a replication configuration to fully -// propagate. For information about replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) -// in the Amazon S3 User Guide. The following operations are related to -// DeleteBucketReplication : -// - PutBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html) -// - GetBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html) +// This operation is not supported for directory buckets. +// +// Deletes the replication configuration from the bucket. +// +// To use this operation, you must have permissions to perform the +// s3:PutReplicationConfiguration action. The bucket owner has these permissions by +// default and can grant it to others. For more information about permissions, see [Permissions Related to Bucket Subresource Operations] +// and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// It can take a while for the deletion of a replication configuration to fully +// propagate. +// +// For information about replication configuration, see [Replication] in the Amazon S3 User +// Guide. +// +// The following operations are related to DeleteBucketReplication : +// +// [PutBucketReplication] +// +// [GetBucketReplication] +// +// [GetBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [PutBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html +// [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html func (c *Client) DeleteBucketReplication(ctx context.Context, params *DeleteBucketReplicationInput, optFns ...func(*Options)) (*DeleteBucketReplicationOutput, error) { if params == nil { params = &DeleteBucketReplicationInput{} @@ -42,7 +56,7 @@ func (c *Client) DeleteBucketReplication(ctx context.Context, params *DeleteBuck type DeleteBucketReplicationInput struct { - // The bucket name. + // The bucket name. // // This member is required. Bucket *string @@ -56,6 +70,7 @@ type DeleteBucketReplicationInput struct { } func (in *DeleteBucketReplicationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -110,6 +125,9 @@ func (c *Client) addOperationDeleteBucketReplicationMiddlewares(stack *middlewar if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -125,6 +143,18 @@ func (c *Client) addOperationDeleteBucketReplicationMiddlewares(stack *middlewar if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteBucketReplicationValidationMiddleware(stack); err != nil { return err } @@ -158,6 +188,18 @@ func (c *Client) addOperationDeleteBucketReplicationMiddlewares(stack *middlewar if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketTagging.go index ae737d40a..ab73775a1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketTagging.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketTagging.go @@ -13,13 +13,22 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Deletes the tags from the -// bucket. To use this operation, you must have permission to perform the +// This operation is not supported for directory buckets. +// +// Deletes the tags from the bucket. +// +// To use this operation, you must have permission to perform the // s3:PutBucketTagging action. By default, the bucket owner has this permission and -// can grant this permission to others. The following operations are related to -// DeleteBucketTagging : -// - GetBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html) -// - PutBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html) +// can grant this permission to others. +// +// The following operations are related to DeleteBucketTagging : +// +// [GetBucketTagging] +// +// [PutBucketTagging] +// +// [GetBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html +// [PutBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html func (c *Client) DeleteBucketTagging(ctx context.Context, params *DeleteBucketTaggingInput, optFns ...func(*Options)) (*DeleteBucketTaggingOutput, error) { if params == nil { params = &DeleteBucketTaggingInput{} @@ -51,6 +60,7 @@ type DeleteBucketTaggingInput struct { } func (in *DeleteBucketTaggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -105,6 +115,9 @@ func (c *Client) addOperationDeleteBucketTaggingMiddlewares(stack *middleware.St if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -120,6 +133,18 @@ func (c *Client) addOperationDeleteBucketTaggingMiddlewares(stack *middleware.St if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteBucketTaggingValidationMiddleware(stack); err != nil { return err } @@ -153,6 +178,18 @@ func (c *Client) addOperationDeleteBucketTaggingMiddlewares(stack *middleware.St if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketWebsite.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketWebsite.go index 425936dfa..6ea3b745d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketWebsite.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketWebsite.go @@ -13,20 +13,31 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. This action removes the -// website configuration for a bucket. Amazon S3 returns a 200 OK response upon -// successfully deleting a website configuration on the specified bucket. You will -// get a 200 OK response if the website configuration you are trying to delete -// does not exist on the bucket. Amazon S3 returns a 404 response if the bucket -// specified in the request does not exist. This DELETE action requires the -// S3:DeleteBucketWebsite permission. By default, only the bucket owner can delete -// the website configuration attached to a bucket. However, bucket owners can grant -// other users permission to delete the website configuration by writing a bucket -// policy granting them the S3:DeleteBucketWebsite permission. For more -// information about hosting websites, see Hosting Websites on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) -// . The following operations are related to DeleteBucketWebsite : -// - GetBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketWebsite.html) -// - PutBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html) +// This operation is not supported for directory buckets. +// +// This action removes the website configuration for a bucket. Amazon S3 returns a +// 200 OK response upon successfully deleting a website configuration on the +// specified bucket. You will get a 200 OK response if the website configuration +// you are trying to delete does not exist on the bucket. Amazon S3 returns a 404 +// response if the bucket specified in the request does not exist. +// +// This DELETE action requires the S3:DeleteBucketWebsite permission. By default, +// only the bucket owner can delete the website configuration attached to a bucket. +// However, bucket owners can grant other users permission to delete the website +// configuration by writing a bucket policy granting them the +// S3:DeleteBucketWebsite permission. +// +// For more information about hosting websites, see [Hosting Websites on Amazon S3]. +// +// The following operations are related to DeleteBucketWebsite : +// +// [GetBucketWebsite] +// +// [PutBucketWebsite] +// +// [GetBucketWebsite]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketWebsite.html +// [PutBucketWebsite]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html +// [Hosting Websites on Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html func (c *Client) DeleteBucketWebsite(ctx context.Context, params *DeleteBucketWebsiteInput, optFns ...func(*Options)) (*DeleteBucketWebsiteOutput, error) { if params == nil { params = &DeleteBucketWebsiteInput{} @@ -58,6 +69,7 @@ type DeleteBucketWebsiteInput struct { } func (in *DeleteBucketWebsiteInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -112,6 +124,9 @@ func (c *Client) addOperationDeleteBucketWebsiteMiddlewares(stack *middleware.St if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -127,6 +142,18 @@ func (c *Client) addOperationDeleteBucketWebsiteMiddlewares(stack *middleware.St if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteBucketWebsiteValidationMiddleware(stack); err != nil { return err } @@ -160,6 +187,18 @@ func (c *Client) addOperationDeleteBucketWebsiteMiddlewares(stack *middleware.St if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go index c1e5ff73a..f57c88ea5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go @@ -11,6 +11,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" + "time" ) // Removes an object from a bucket. The behavior depends on the bucket's @@ -22,9 +23,7 @@ import ( // - If bucket versioning is enabled, the operation inserts a delete marker, // which becomes the current version of the object. To permanently delete an object // in a versioned bucket, you must include the object’s versionId in the request. -// For more information about versioning-enabled buckets, see Deleting object -// versions from a versioning-enabled bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeletingObjectVersions.html) -// . +// For more information about versioning-enabled buckets, see [Deleting object versions from a versioning-enabled bucket]. // // - If bucket versioning is suspended, the operation removes the object that // has a null versionId , if there is one, and inserts a delete marker that @@ -32,9 +31,7 @@ import ( // versionId , and all versions of the object have a versionId , Amazon S3 does // not remove the object and only inserts a delete marker. To permanently delete an // object that has a versionId , you must include the object’s versionId in the -// request. For more information about versioning-suspended buckets, see -// Deleting objects from versioning-suspended buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeletingObjectsfromVersioningSuspendedBuckets.html) -// . +// request. For more information about versioning-suspended buckets, see [Deleting objects from versioning-suspended buckets]. // // - Directory buckets - S3 Versioning isn't enabled and supported for directory // buckets. For this API operation, only the null value of the version ID is @@ -44,38 +41,45 @@ import ( // - Directory buckets - For directory buckets, you must make requests for this // API operation to the Zonal endpoint. These endpoints support // virtual-hosted-style requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . -// Path-style requests are not supported. For more information, see Regional and -// Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. +// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name +// . Path-style requests are not supported. For more information about endpoints +// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information +// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. // // To remove a specific version, you must use the versionId query parameter. Using // this query parameter permanently deletes the version. If the object deleted is a // delete marker, Amazon S3 sets the response header x-amz-delete-marker to true. +// // If the object you want to delete is in a bucket where the bucket versioning // configuration is MFA Delete enabled, you must include the x-amz-mfa request // header in the DELETE versionId request. Requests that include x-amz-mfa must -// use HTTPS. For more information about MFA Delete, see Using MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html) -// in the Amazon S3 User Guide. To see sample requests that use versioning, see -// Sample Request (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete) -// . Directory buckets - MFA delete is not supported by directory buckets. You can -// delete objects by explicitly calling DELETE Object or calling ( -// PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html) -// ) to enable Amazon S3 to remove them for you. If you want to block users or -// accounts from removing or deleting objects from your bucket, you must deny them -// the s3:DeleteObject , s3:DeleteObjectVersion , and s3:PutLifeCycleConfiguration -// actions. Directory buckets - S3 Lifecycle is not supported by directory buckets. +// use HTTPS. For more information about MFA Delete, see [Using MFA Delete]in the Amazon S3 User +// Guide. To see sample requests that use versioning, see [Sample Request]. +// +// Directory buckets - MFA delete is not supported by directory buckets. +// +// You can delete objects by explicitly calling DELETE Object or calling ([PutBucketLifecycle] ) to +// enable Amazon S3 to remove them for you. If you want to block users or accounts +// from removing or deleting objects from your bucket, you must deny them the +// s3:DeleteObject , s3:DeleteObjectVersion , and s3:PutLifeCycleConfiguration +// actions. +// +// Directory buckets - S3 Lifecycle is not supported by directory buckets. +// // Permissions +// // - General purpose bucket permissions - The following permissions are required // in your policies when your DeleteObjects request includes specific headers. +// // - s3:DeleteObject - To delete an object from a bucket, you must always have // the s3:DeleteObject permission. +// // - s3:DeleteObjectVersion - To delete a specific version of an object from a -// versioning-enabled bucket, you must have the s3:DeleteObjectVersion -// permission. +// versioning-enabled bucket, you must have the s3:DeleteObjectVersion permission. +// // - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the // s3express:CreateSession permission to the directory bucket in a bucket policy // or an IAM identity-based policy. Then, you make the CreateSession API call on // the bucket to obtain a session token. With the session token in your request @@ -83,13 +87,24 @@ import ( // expires, you make another CreateSession API call to generate a new session // token for use. Amazon Web Services CLI or SDKs create session and refresh the // session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// expires. For more information about authorization, see [CreateSession]CreateSession . +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// +// The following action is related to DeleteObject : +// +// [PutObject] // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following action is -// related to DeleteObject : -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// [Sample Request]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [Deleting objects from versioning-suspended buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeletingObjectsfromVersioningSuspendedBuckets.html +// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html +// [PutBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html +// [Deleting object versions from a versioning-enabled bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeletingObjectVersions.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// [Using MFA Delete]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html func (c *Client) DeleteObject(ctx context.Context, params *DeleteObjectInput, optFns ...func(*Options)) (*DeleteObjectOutput, error) { if params == nil { params = &DeleteObjectInput{} @@ -107,31 +122,39 @@ func (c *Client) DeleteObject(ctx context.Context, params *DeleteObjectInput, op type DeleteObjectInput struct { - // The bucket name of the bucket containing the object. Directory buckets - When - // you use this operation with a directory bucket, you must use - // virtual-hosted-style requests in the format - // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not - // supported. Directory bucket names must be unique in the chosen Availability - // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket - // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // The bucket name of the bucket containing the object. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -143,8 +166,9 @@ type DeleteObjectInput struct { // Indicates whether S3 Object Lock should bypass Governance-mode restrictions to // process this operation. To use this header, you must have the - // s3:BypassGovernanceRetention permission. This functionality is not supported for - // directory buckets. + // s3:BypassGovernanceRetention permission. + // + // This functionality is not supported for directory buckets. BypassGovernanceRetention *bool // The account ID of the expected bucket owner. If the account ID that you provide @@ -152,31 +176,69 @@ type DeleteObjectInput struct { // status code 403 Forbidden (access denied). ExpectedBucketOwner *string + // The If-Match header field makes the request method conditional on ETags. If the + // ETag value does not match, the operation returns a 412 Precondition Failed + // error. If the ETag matches or if the object doesn't exist, the operation will + // return a 204 Success (No Content) response . + // + // For more information about conditional requests, see [RFC 7232]. + // + // This functionality is only supported for directory buckets. + // + // [RFC 7232]: https://tools.ietf.org/html/rfc7232 + IfMatch *string + + // If present, the object is deleted only if its modification times matches the + // provided Timestamp . If the Timestamp values do not match, the operation + // returns a 412 Precondition Failed error. If the Timestamp matches or if the + // object doesn’t exist, the operation returns a 204 Success (No Content) response. + // + // This functionality is only supported for directory buckets. + IfMatchLastModifiedTime *time.Time + + // If present, the object is deleted only if its size matches the provided size in + // bytes. If the Size value does not match, the operation returns a 412 + // Precondition Failed error. If the Size matches or if the object doesn’t exist, + // the operation returns a 204 Success (No Content) response. + // + // This functionality is only supported for directory buckets. + // + // You can use the If-Match , x-amz-if-match-last-modified-time and + // x-amz-if-match-size conditional headers in conjunction with each-other or + // individually. + IfMatchSize *int64 + // The concatenation of the authentication device's serial number, a space, and // the value that is displayed on your authentication device. Required to // permanently delete a versioned object if versioning is configured with MFA - // delete enabled. This functionality is not supported for directory buckets. + // delete enabled. + // + // This functionality is not supported for directory buckets. MFA *string // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer - // Version ID used to reference a specific version of the object. For directory - // buckets in this API operation, only the null value of the version ID is - // supported. + // Version ID used to reference a specific version of the object. + // + // For directory buckets in this API operation, only the null value of the version + // ID is supported. VersionId *string noSmithyDocumentSerde } func (in *DeleteObjectInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Key = in.Key @@ -187,16 +249,23 @@ type DeleteObjectOutput struct { // Indicates whether the specified object version that was permanently deleted was // (true) or was not (false) a delete marker before deletion. In a simple DELETE, // this header indicates whether (true) or not (false) the current version of the - // object is a delete marker. This functionality is not supported for directory - // buckets. + // object is a delete marker. To learn more about delete markers, see [Working with delete markers]. + // + // This functionality is not supported for directory buckets. + // + // [Working with delete markers]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeleteMarker.html DeleteMarker *bool // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Returns the version ID of the delete marker created as a result of the DELETE - // operation. This functionality is not supported for directory buckets. + // operation. + // + // This functionality is not supported for directory buckets. VersionId *string // Metadata pertaining to the operation's result. @@ -248,6 +317,9 @@ func (c *Client) addOperationDeleteObjectMiddlewares(stack *middleware.Stack, op if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -263,6 +335,18 @@ func (c *Client) addOperationDeleteObjectMiddlewares(stack *middleware.Stack, op if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteObjectValidationMiddleware(stack); err != nil { return err } @@ -296,6 +380,18 @@ func (c *Client) addOperationDeleteObjectMiddlewares(stack *middleware.Stack, op if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjectTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjectTagging.go index c5f31dec6..2e69b4317 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjectTagging.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjectTagging.go @@ -12,16 +12,27 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Removes the entire tag -// set from the specified object. For more information about managing object tags, -// see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html) -// . To use this operation, you must have permission to perform the -// s3:DeleteObjectTagging action. To delete tags of a specific object version, add -// the versionId query parameter in the request. You will need permission for the -// s3:DeleteObjectVersionTagging action. The following operations are related to -// DeleteObjectTagging : -// - PutObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) -// - GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) +// This operation is not supported for directory buckets. +// +// Removes the entire tag set from the specified object. For more information +// about managing object tags, see [Object Tagging]. +// +// To use this operation, you must have permission to perform the +// s3:DeleteObjectTagging action. +// +// To delete tags of a specific object version, add the versionId query parameter +// in the request. You will need permission for the s3:DeleteObjectVersionTagging +// action. +// +// The following operations are related to DeleteObjectTagging : +// +// [PutObjectTagging] +// +// [GetObjectTagging] +// +// [PutObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html +// [Object Tagging]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html +// [GetObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html func (c *Client) DeleteObjectTagging(ctx context.Context, params *DeleteObjectTaggingInput, optFns ...func(*Options)) (*DeleteObjectTaggingOutput, error) { if params == nil { params = &DeleteObjectTaggingInput{} @@ -39,23 +50,26 @@ func (c *Client) DeleteObjectTagging(ctx context.Context, params *DeleteObjectTa type DeleteObjectTaggingInput struct { - // The bucket name containing the objects from which to remove the tags. Access - // points - When you use this action with an access point, you must provide the - // alias of the access point in place of the bucket name or specify the access + // The bucket name containing the objects from which to remove the tags. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access // point ARN. When using the access point ARN, you must direct requests to the // access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. S3 on Outposts - When you use this action with - // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. - // The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -77,6 +91,7 @@ type DeleteObjectTaggingInput struct { } func (in *DeleteObjectTaggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -135,6 +150,9 @@ func (c *Client) addOperationDeleteObjectTaggingMiddlewares(stack *middleware.St if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -150,6 +168,18 @@ func (c *Client) addOperationDeleteObjectTaggingMiddlewares(stack *middleware.St if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteObjectTaggingValidationMiddleware(stack); err != nil { return err } @@ -183,6 +213,18 @@ func (c *Client) addOperationDeleteObjectTaggingMiddlewares(stack *middleware.St if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go index 05f82cf75..a73f2b6d3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go @@ -17,47 +17,58 @@ import ( // This operation enables you to delete multiple objects from a bucket using a // single HTTP request. If you know the object keys that you want to delete, then // this operation provides a suitable alternative to sending individual delete -// requests, reducing per-request overhead. The request can contain a list of up to -// 1000 keys that you want to delete. In the XML, you provide the object key names, -// and optionally, version IDs if you want to delete a specific version of the -// object from a versioning-enabled bucket. For each key, Amazon S3 performs a -// delete operation and returns the result of that delete, success or failure, in -// the response. Note that if the object specified in the request is not found, -// Amazon S3 returns the result as deleted. +// requests, reducing per-request overhead. +// +// The request can contain a list of up to 1,000 keys that you want to delete. In +// the XML, you provide the object key names, and optionally, version IDs if you +// want to delete a specific version of the object from a versioning-enabled +// bucket. For each key, Amazon S3 performs a delete operation and returns the +// result of that delete, success or failure, in the response. If the object +// specified in the request isn't found, Amazon S3 confirms the deletion by +// returning the result as deleted. +// // - Directory buckets - S3 Versioning isn't enabled and supported for directory // buckets. +// // - Directory buckets - For directory buckets, you must make requests for this // API operation to the Zonal endpoint. These endpoints support // virtual-hosted-style requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . -// Path-style requests are not supported. For more information, see Regional and -// Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. +// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name +// . Path-style requests are not supported. For more information about endpoints +// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information +// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. // // The operation supports two modes for the response: verbose and quiet. By // default, the operation uses verbose mode in which the response includes the // result of deletion of each key in your request. In quiet mode the response // includes only keys where the delete operation encountered an error. For a // successful deletion in a quiet mode, the operation does not return any -// information about the delete in the response body. When performing this action -// on an MFA Delete enabled bucket, that attempts to delete any versioned objects, -// you must include an MFA token. If you do not provide one, the entire request -// will fail, even if there are non-versioned objects you are trying to delete. If -// you provide an invalid token, whether there are versioned keys in the request or -// not, the entire Multi-Object Delete request will fail. For information about MFA -// Delete, see MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete) -// in the Amazon S3 User Guide. Directory buckets - MFA delete is not supported by -// directory buckets. Permissions +// information about the delete in the response body. +// +// When performing this action on an MFA Delete enabled bucket, that attempts to +// delete any versioned objects, you must include an MFA token. If you do not +// provide one, the entire request will fail, even if there are non-versioned +// objects you are trying to delete. If you provide an invalid token, whether there +// are versioned keys in the request or not, the entire Multi-Object Delete request +// will fail. For information about MFA Delete, see [MFA Delete]in the Amazon S3 User Guide. +// +// Directory buckets - MFA delete is not supported by directory buckets. +// +// Permissions +// // - General purpose bucket permissions - The following permissions are required // in your policies when your DeleteObjects request includes specific headers. +// // - s3:DeleteObject - To delete an object from a bucket, you must always specify // the s3:DeleteObject permission. +// // - s3:DeleteObjectVersion - To delete a specific version of an object from a -// versiong-enabled bucket, you must specify the s3:DeleteObjectVersion +// versioning-enabled bucket, you must specify the s3:DeleteObjectVersion // permission. +// // - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the // s3express:CreateSession permission to the directory bucket in a bucket policy // or an IAM identity-based policy. Then, you make the CreateSession API call on // the bucket to obtain a session token. With the session token in your request @@ -65,26 +76,43 @@ import ( // expires, you make another CreateSession API call to generate a new session // token for use. Amazon Web Services CLI or SDKs create session and refresh the // session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// expires. For more information about authorization, see [CreateSession]CreateSession . // // Content-MD5 request header +// // - General purpose bucket - The Content-MD5 request header is required for all // Multi-Object Delete requests. Amazon S3 uses the header value to ensure that // your request body has not been altered in transit. +// // - Directory bucket - The Content-MD5 request header or a additional checksum // request header (including x-amz-checksum-crc32 , x-amz-checksum-crc32c , // x-amz-checksum-sha1 , or x-amz-checksum-sha256 ) is required for all // Multi-Object Delete requests. // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are -// related to DeleteObjects : -// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) -// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// +// The following operations are related to DeleteObjects : +// +// [CreateMultipartUpload] +// +// [UploadPart] +// +// [CompleteMultipartUpload] +// +// [ListParts] +// +// [AbortMultipartUpload] +// +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html +// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html +// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html +// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// [MFA Delete]: https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete +// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html func (c *Client) DeleteObjects(ctx context.Context, params *DeleteObjectsInput, optFns ...func(*Options)) (*DeleteObjectsOutput, error) { if params == nil { params = &DeleteObjectsInput{} @@ -102,31 +130,39 @@ func (c *Client) DeleteObjects(ctx context.Context, params *DeleteObjectsInput, type DeleteObjectsInput struct { - // The bucket name containing the objects to delete. Directory buckets - When you - // use this operation with a directory bucket, you must use virtual-hosted-style - // requests in the format Bucket_name.s3express-az_id.region.amazonaws.com . - // Path-style requests are not supported. Directory bucket names must be unique in - // the chosen Availability Zone. Bucket names must follow the format - // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 - // ). For information about bucket naming restrictions, see Directory bucket - // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // The bucket name containing the objects to delete. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -138,28 +174,41 @@ type DeleteObjectsInput struct { // Specifies whether you want to delete this object even if it has a // Governance-type Object Lock in place. To use this header, you must have the - // s3:BypassGovernanceRetention permission. This functionality is not supported for - // directory buckets. + // s3:BypassGovernanceRetention permission. + // + // This functionality is not supported for directory buckets. BypassGovernanceRetention *bool // Indicates the algorithm used to create the checksum for the object when you use // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 - // fails the request with the HTTP status code 400 Bad Request . For the - // x-amz-checksum-algorithm header, replace algorithm with the supported - // algorithm from the following list: + // fails the request with the HTTP status code 400 Bad Request . + // + // For the x-amz-checksum-algorithm header, replace algorithm with the + // supported algorithm from the following list: + // // - CRC32 + // // - CRC32C + // + // - CRC64NVME + // // - SHA1 + // // - SHA256 - // For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If the individual checksum value you provide - // through x-amz-checksum-algorithm doesn't match the checksum algorithm you set - // through x-amz-sdk-checksum-algorithm , Amazon S3 ignores any provided - // ChecksumAlgorithm parameter and uses the checksum algorithm that matches the - // provided value in x-amz-checksum-algorithm . If you provide an individual - // checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter. + // + // For more information, see [Checking object integrity] in the Amazon S3 User Guide. + // + // If the individual checksum value you provide through x-amz-checksum-algorithm + // doesn't match the checksum algorithm you set through + // x-amz-sdk-checksum-algorithm , Amazon S3 fails the request with a BadDigest + // error. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm // The account ID of the expected bucket owner. If the account ID that you provide @@ -170,31 +219,38 @@ type DeleteObjectsInput struct { // The concatenation of the authentication device's serial number, a space, and // the value that is displayed on your authentication device. Required to // permanently delete a versioned object if versioning is configured with MFA - // delete enabled. When performing the DeleteObjects operation on an MFA delete - // enabled bucket, which attempts to delete the specified versioned objects, you - // must include an MFA token. If you don't provide an MFA token, the entire request - // will fail, even if there are non-versioned objects that you are trying to - // delete. If you provide an invalid token, whether there are versioned object keys - // in the request or not, the entire Multi-Object Delete request will fail. For - // information about MFA Delete, see MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // delete enabled. + // + // When performing the DeleteObjects operation on an MFA delete enabled bucket, + // which attempts to delete the specified versioned objects, you must include an + // MFA token. If you don't provide an MFA token, the entire request will fail, even + // if there are non-versioned objects that you are trying to delete. If you provide + // an invalid token, whether there are versioned object keys in the request or not, + // the entire Multi-Object Delete request will fail. For information about MFA + // Delete, see [MFA Delete]in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [MFA Delete]: https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete MFA *string // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer noSmithyDocumentSerde } func (in *DeleteObjectsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -210,7 +266,9 @@ type DeleteObjectsOutput struct { Errors []types.Error // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -262,6 +320,9 @@ func (c *Client) addOperationDeleteObjectsMiddlewares(stack *middleware.Stack, o if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -277,6 +338,21 @@ func (c *Client) addOperationDeleteObjectsMiddlewares(stack *middleware.Stack, o if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeleteObjectsValidationMiddleware(stack); err != nil { return err } @@ -316,6 +392,18 @@ func (c *Client) addOperationDeleteObjectsMiddlewares(stack *middleware.Stack, o if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -345,9 +433,10 @@ func getDeleteObjectsRequestAlgorithmMember(input interface{}) (string, bool) { } func addDeleteObjectsInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getDeleteObjectsRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeletePublicAccessBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeletePublicAccessBlock.go index 43969e2b1..87e72df27 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeletePublicAccessBlock.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeletePublicAccessBlock.go @@ -13,17 +13,28 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Removes the -// PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation, -// you must have the s3:PutBucketPublicAccessBlock permission. For more -// information about permissions, see Permissions Related to Bucket Subresource -// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . The following operations are related to DeletePublicAccessBlock : -// - Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) -// - GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) -// - PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) -// - GetBucketPolicyStatus (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html) +// This operation is not supported for directory buckets. +// +// Removes the PublicAccessBlock configuration for an Amazon S3 bucket. To use +// this operation, you must have the s3:PutBucketPublicAccessBlock permission. For +// more information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// The following operations are related to DeletePublicAccessBlock : +// +// [Using Amazon S3 Block Public Access] +// +// [GetPublicAccessBlock] +// +// [PutPublicAccessBlock] +// +// [GetBucketPolicyStatus] +// +// [GetPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html +// [PutPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Using Amazon S3 Block Public Access]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [GetBucketPolicyStatus]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html func (c *Client) DeletePublicAccessBlock(ctx context.Context, params *DeletePublicAccessBlockInput, optFns ...func(*Options)) (*DeletePublicAccessBlockOutput, error) { if params == nil { params = &DeletePublicAccessBlockInput{} @@ -55,6 +66,7 @@ type DeletePublicAccessBlockInput struct { } func (in *DeletePublicAccessBlockInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -109,6 +121,9 @@ func (c *Client) addOperationDeletePublicAccessBlockMiddlewares(stack *middlewar if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -124,6 +139,18 @@ func (c *Client) addOperationDeletePublicAccessBlockMiddlewares(stack *middlewar if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpDeletePublicAccessBlockValidationMiddleware(stack); err != nil { return err } @@ -157,6 +184,18 @@ func (c *Client) addOperationDeletePublicAccessBlockMiddlewares(stack *middlewar if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go index 4bb1ff71c..0ed2a8670 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go @@ -14,26 +14,36 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. This implementation of -// the GET action uses the accelerate subresource to return the Transfer -// Acceleration state of a bucket, which is either Enabled or Suspended . Amazon S3 -// Transfer Acceleration is a bucket-level feature that enables you to perform -// faster data transfers to and from Amazon S3. To use this operation, you must -// have permission to perform the s3:GetAccelerateConfiguration action. The bucket -// owner has this permission by default. The bucket owner can grant this permission -// to others. For more information about permissions, see Permissions Related to -// Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// in the Amazon S3 User Guide. You set the Transfer Acceleration state of an -// existing bucket to Enabled or Suspended by using the -// PutBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html) -// operation. A GET accelerate request does not return a state value for a bucket -// that has no transfer acceleration state. A bucket has no Transfer Acceleration -// state if a state has never been set on the bucket. For more information about -// transfer acceleration, see Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) -// in the Amazon S3 User Guide. The following operations are related to -// GetBucketAccelerateConfiguration : -// - PutBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html) +// This operation is not supported for directory buckets. +// +// This implementation of the GET action uses the accelerate subresource to return +// the Transfer Acceleration state of a bucket, which is either Enabled or +// Suspended . Amazon S3 Transfer Acceleration is a bucket-level feature that +// enables you to perform faster data transfers to and from Amazon S3. +// +// To use this operation, you must have permission to perform the +// s3:GetAccelerateConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to your Amazon S3 Resources] in the Amazon S3 User Guide. +// +// You set the Transfer Acceleration state of an existing bucket to Enabled or +// Suspended by using the [PutBucketAccelerateConfiguration] operation. +// +// A GET accelerate request does not return a state value for a bucket that has no +// transfer acceleration state. A bucket has no Transfer Acceleration state if a +// state has never been set on the bucket. +// +// For more information about transfer acceleration, see [Transfer Acceleration] in the Amazon S3 User +// Guide. +// +// The following operations are related to GetBucketAccelerateConfiguration : +// +// [PutBucketAccelerateConfiguration] +// +// [PutBucketAccelerateConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Managing Access Permissions to your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [Transfer Acceleration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html func (c *Client) GetBucketAccelerateConfiguration(ctx context.Context, params *GetBucketAccelerateConfigurationInput, optFns ...func(*Options)) (*GetBucketAccelerateConfigurationOutput, error) { if params == nil { params = &GetBucketAccelerateConfigurationInput{} @@ -65,16 +75,19 @@ type GetBucketAccelerateConfigurationInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer noSmithyDocumentSerde } func (in *GetBucketAccelerateConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -82,7 +95,9 @@ func (in *GetBucketAccelerateConfigurationInput) bindEndpointParams(p *EndpointP type GetBucketAccelerateConfigurationOutput struct { // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // The accelerate configuration of the bucket. @@ -137,6 +152,9 @@ func (c *Client) addOperationGetBucketAccelerateConfigurationMiddlewares(stack * if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -152,6 +170,18 @@ func (c *Client) addOperationGetBucketAccelerateConfigurationMiddlewares(stack * if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketAccelerateConfigurationValidationMiddleware(stack); err != nil { return err } @@ -185,6 +215,18 @@ func (c *Client) addOperationGetBucketAccelerateConfigurationMiddlewares(stack * if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAcl.go index bc7c4ea18..fed95ac01 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAcl.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAcl.go @@ -14,26 +14,35 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. This implementation of -// the GET action uses the acl subresource to return the access control list (ACL) -// of a bucket. To use GET to return the ACL of the bucket, you must have the -// READ_ACP access to the bucket. If READ_ACP permission is granted to the -// anonymous user, you can return the ACL of the bucket without using an -// authorization header. When you use this API operation with an access point, -// provide the alias of the access point in place of the bucket name. When you use -// this API operation with an Object Lambda access point, provide the alias of the -// Object Lambda access point in place of the bucket name. If the Object Lambda -// access point alias in a request is not valid, the error code +// This operation is not supported for directory buckets. +// +// This implementation of the GET action uses the acl subresource to return the +// access control list (ACL) of a bucket. To use GET to return the ACL of the +// bucket, you must have the READ_ACP access to the bucket. If READ_ACP permission +// is granted to the anonymous user, you can return the ACL of the bucket without +// using an authorization header. +// +// When you use this API operation with an access point, provide the alias of the +// access point in place of the bucket name. +// +// When you use this API operation with an Object Lambda access point, provide the +// alias of the Object Lambda access point in place of the bucket name. If the +// Object Lambda access point alias in a request is not valid, the error code // InvalidAccessPointAliasError is returned. For more information about -// InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) -// . If your bucket uses the bucket owner enforced setting for S3 Object Ownership, +// InvalidAccessPointAliasError , see [List of Error Codes]. +// +// If your bucket uses the bucket owner enforced setting for S3 Object Ownership, // requests to read ACLs are still supported and return the // bucket-owner-full-control ACL with the owner being the account that created the -// bucket. For more information, see Controlling object ownership and disabling -// ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) -// in the Amazon S3 User Guide. The following operations are related to -// GetBucketAcl : -// - ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) +// bucket. For more information, see [Controlling object ownership and disabling ACLs]in the Amazon S3 User Guide. +// +// The following operations are related to GetBucketAcl : +// +// [ListObjects] +// +// [ListObjects]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html +// [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList +// [Controlling object ownership and disabling ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html func (c *Client) GetBucketAcl(ctx context.Context, params *GetBucketAclInput, optFns ...func(*Options)) (*GetBucketAclOutput, error) { if params == nil { params = &GetBucketAclInput{} @@ -51,14 +60,18 @@ func (c *Client) GetBucketAcl(ctx context.Context, params *GetBucketAclInput, op type GetBucketAclInput struct { - // Specifies the S3 bucket whose ACL is being requested. When you use this API - // operation with an access point, provide the alias of the access point in place - // of the bucket name. When you use this API operation with an Object Lambda access - // point, provide the alias of the Object Lambda access point in place of the - // bucket name. If the Object Lambda access point alias in a request is not valid, - // the error code InvalidAccessPointAliasError is returned. For more information - // about InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) - // . + // Specifies the S3 bucket whose ACL is being requested. + // + // When you use this API operation with an access point, provide the alias of the + // access point in place of the bucket name. + // + // When you use this API operation with an Object Lambda access point, provide the + // alias of the Object Lambda access point in place of the bucket name. If the + // Object Lambda access point alias in a request is not valid, the error code + // InvalidAccessPointAliasError is returned. For more information about + // InvalidAccessPointAliasError , see [List of Error Codes]. + // + // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList // // This member is required. Bucket *string @@ -72,6 +85,7 @@ type GetBucketAclInput struct { } func (in *GetBucketAclInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -133,6 +147,9 @@ func (c *Client) addOperationGetBucketAclMiddlewares(stack *middleware.Stack, op if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -148,6 +165,18 @@ func (c *Client) addOperationGetBucketAclMiddlewares(stack *middleware.Stack, op if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketAclValidationMiddleware(stack); err != nil { return err } @@ -181,6 +210,18 @@ func (c *Client) addOperationGetBucketAclMiddlewares(stack *middleware.Stack, op if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAnalyticsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAnalyticsConfiguration.go index 64e41d403..b0423c85e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAnalyticsConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAnalyticsConfiguration.go @@ -14,21 +14,33 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. This implementation of -// the GET action returns an analytics configuration (identified by the analytics -// configuration ID) from the bucket. To use this operation, you must have -// permissions to perform the s3:GetAnalyticsConfiguration action. The bucket -// owner has this permission by default. The bucket owner can grant this permission -// to others. For more information about permissions, see Permissions Related to -// Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// in the Amazon S3 User Guide. For information about Amazon S3 analytics feature, -// see Amazon S3 Analytics – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) -// in the Amazon S3 User Guide. The following operations are related to -// GetBucketAnalyticsConfiguration : -// - DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) -// - ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) -// - PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) +// This operation is not supported for directory buckets. +// +// This implementation of the GET action returns an analytics configuration +// (identified by the analytics configuration ID) from the bucket. +// +// To use this operation, you must have permissions to perform the +// s3:GetAnalyticsConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources] in the Amazon S3 User Guide. +// +// For information about Amazon S3 analytics feature, see [Amazon S3 Analytics – Storage Class Analysis] in the Amazon S3 User +// Guide. +// +// The following operations are related to GetBucketAnalyticsConfiguration : +// +// [DeleteBucketAnalyticsConfiguration] +// +// [ListBucketAnalyticsConfigurations] +// +// [PutBucketAnalyticsConfiguration] +// +// [Amazon S3 Analytics – Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html +// [DeleteBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [ListBucketAnalyticsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html +// [PutBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html func (c *Client) GetBucketAnalyticsConfiguration(ctx context.Context, params *GetBucketAnalyticsConfigurationInput, optFns ...func(*Options)) (*GetBucketAnalyticsConfigurationOutput, error) { if params == nil { params = &GetBucketAnalyticsConfigurationInput{} @@ -65,6 +77,7 @@ type GetBucketAnalyticsConfigurationInput struct { } func (in *GetBucketAnalyticsConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -123,6 +136,9 @@ func (c *Client) addOperationGetBucketAnalyticsConfigurationMiddlewares(stack *m if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -138,6 +154,18 @@ func (c *Client) addOperationGetBucketAnalyticsConfigurationMiddlewares(stack *m if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketAnalyticsConfigurationValidationMiddleware(stack); err != nil { return err } @@ -171,6 +199,18 @@ func (c *Client) addOperationGetBucketAnalyticsConfigurationMiddlewares(stack *m if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketCors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketCors.go index 0997225eb..ef6a970da 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketCors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketCors.go @@ -14,21 +14,36 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns the Cross-Origin -// Resource Sharing (CORS) configuration information set for the bucket. To use -// this operation, you must have permission to perform the s3:GetBucketCORS +// This operation is not supported for directory buckets. +// +// Returns the Cross-Origin Resource Sharing (CORS) configuration information set +// for the bucket. +// +// To use this operation, you must have permission to perform the s3:GetBucketCORS // action. By default, the bucket owner has this permission and can grant it to -// others. When you use this API operation with an access point, provide the alias -// of the access point in place of the bucket name. When you use this API operation -// with an Object Lambda access point, provide the alias of the Object Lambda -// access point in place of the bucket name. If the Object Lambda access point -// alias in a request is not valid, the error code InvalidAccessPointAliasError is -// returned. For more information about InvalidAccessPointAliasError , see List of -// Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) -// . For more information about CORS, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) -// . The following operations are related to GetBucketCors : -// - PutBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html) -// - DeleteBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html) +// others. +// +// When you use this API operation with an access point, provide the alias of the +// access point in place of the bucket name. +// +// When you use this API operation with an Object Lambda access point, provide the +// alias of the Object Lambda access point in place of the bucket name. If the +// Object Lambda access point alias in a request is not valid, the error code +// InvalidAccessPointAliasError is returned. For more information about +// InvalidAccessPointAliasError , see [List of Error Codes]. +// +// For more information about CORS, see [Enabling Cross-Origin Resource Sharing]. +// +// The following operations are related to GetBucketCors : +// +// [PutBucketCors] +// +// [DeleteBucketCors] +// +// [PutBucketCors]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html +// [Enabling Cross-Origin Resource Sharing]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html +// [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList +// [DeleteBucketCors]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html func (c *Client) GetBucketCors(ctx context.Context, params *GetBucketCorsInput, optFns ...func(*Options)) (*GetBucketCorsOutput, error) { if params == nil { params = &GetBucketCorsInput{} @@ -46,14 +61,18 @@ func (c *Client) GetBucketCors(ctx context.Context, params *GetBucketCorsInput, type GetBucketCorsInput struct { - // The bucket name for which to get the cors configuration. When you use this API - // operation with an access point, provide the alias of the access point in place - // of the bucket name. When you use this API operation with an Object Lambda access - // point, provide the alias of the Object Lambda access point in place of the - // bucket name. If the Object Lambda access point alias in a request is not valid, - // the error code InvalidAccessPointAliasError is returned. For more information - // about InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) - // . + // The bucket name for which to get the cors configuration. + // + // When you use this API operation with an access point, provide the alias of the + // access point in place of the bucket name. + // + // When you use this API operation with an Object Lambda access point, provide the + // alias of the Object Lambda access point in place of the bucket name. If the + // Object Lambda access point alias in a request is not valid, the error code + // InvalidAccessPointAliasError is returned. For more information about + // InvalidAccessPointAliasError , see [List of Error Codes]. + // + // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList // // This member is required. Bucket *string @@ -67,6 +86,7 @@ type GetBucketCorsInput struct { } func (in *GetBucketCorsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -126,6 +146,9 @@ func (c *Client) addOperationGetBucketCorsMiddlewares(stack *middleware.Stack, o if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -141,6 +164,18 @@ func (c *Client) addOperationGetBucketCorsMiddlewares(stack *middleware.Stack, o if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketCorsValidationMiddleware(stack); err != nil { return err } @@ -174,6 +209,18 @@ func (c *Client) addOperationGetBucketCorsMiddlewares(stack *middleware.Stack, o if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketEncryption.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketEncryption.go index 22c1f9bb5..82b7211a1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketEncryption.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketEncryption.go @@ -14,20 +14,47 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns the default -// encryption configuration for an Amazon S3 bucket. By default, all buckets have a -// default encryption configuration that uses server-side encryption with Amazon S3 -// managed keys (SSE-S3). For information about the bucket default encryption -// feature, see Amazon S3 Bucket Default Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) -// in the Amazon S3 User Guide. To use this operation, you must have permission to -// perform the s3:GetEncryptionConfiguration action. The bucket owner has this -// permission by default. The bucket owner can grant this permission to others. For -// more information about permissions, see Permissions Related to Bucket -// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . The following operations are related to GetBucketEncryption : -// - PutBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html) -// - DeleteBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html) +// Returns the default encryption configuration for an Amazon S3 bucket. By +// default, all buckets have a default encryption configuration that uses +// server-side encryption with Amazon S3 managed keys (SSE-S3). +// +// - General purpose buckets - For information about the bucket default +// encryption feature, see [Amazon S3 Bucket Default Encryption]in the Amazon S3 User Guide. +// +// - Directory buckets - For directory buckets, there are only two supported +// options for server-side encryption: SSE-S3 and SSE-KMS. For information about +// the default encryption configuration in directory buckets, see [Setting default server-side encryption behavior for directory buckets]. +// +// Permissions +// +// - General purpose bucket permissions - The s3:GetEncryptionConfiguration +// permission is required in a policy. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see [Permissions Related to Bucket Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// - Directory bucket permissions - To grant access to this API operation, you +// must have the s3express:GetEncryptionConfiguration permission in an IAM +// identity-based policy instead of a bucket policy. Cross-account access to this +// API operation isn't supported. This operation can only be performed by the +// Amazon Web Services account that owns the resource. For more information about +// directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region-code.amazonaws.com . +// +// The following operations are related to GetBucketEncryption : +// +// [PutBucketEncryption] +// +// [DeleteBucketEncryption] +// +// [DeleteBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html +// [PutBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html +// [Setting default server-side encryption behavior for directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-bucket-encryption.html +// [Amazon S3 Bucket Default Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [Permissions Related to Bucket Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html func (c *Client) GetBucketEncryption(ctx context.Context, params *GetBucketEncryptionInput, optFns ...func(*Options)) (*GetBucketEncryptionOutput, error) { if params == nil { params = &GetBucketEncryptionInput{} @@ -48,18 +75,34 @@ type GetBucketEncryptionInput struct { // The name of the bucket from which the server-side encryption configuration is // retrieved. // + // Directory buckets - When you use this operation with a directory bucket, you + // must use path-style requests in the format + // https://s3express-control.region-code.amazonaws.com/bucket-name . + // Virtual-hosted-style requests aren't supported. Directory bucket names must be + // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must + // also follow the format bucket-base-name--zone-id--x-s3 (for example, + // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // // This member is required. Bucket *string // The account ID of the expected bucket owner. If the account ID that you provide // does not match the actual owner of the bucket, the request fails with the HTTP // status code 403 Forbidden (access denied). + // + // For directory buckets, this header is not supported in this API operation. If + // you specify this header, the request fails with the HTTP status code 501 Not + // Implemented . ExpectedBucketOwner *string noSmithyDocumentSerde } func (in *GetBucketEncryptionInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -118,6 +161,9 @@ func (c *Client) addOperationGetBucketEncryptionMiddlewares(stack *middleware.St if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -133,6 +179,18 @@ func (c *Client) addOperationGetBucketEncryptionMiddlewares(stack *middleware.St if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketEncryptionValidationMiddleware(stack); err != nil { return err } @@ -166,6 +224,18 @@ func (c *Client) addOperationGetBucketEncryptionMiddlewares(stack *middleware.St if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go index f3ae88a9b..5a500f0a6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go @@ -14,25 +14,38 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Gets the S3 -// Intelligent-Tiering configuration from the specified bucket. The S3 -// Intelligent-Tiering storage class is designed to optimize storage costs by -// automatically moving data to the most cost-effective storage access tier, +// This operation is not supported for directory buckets. +// +// Gets the S3 Intelligent-Tiering configuration from the specified bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage costs +// by automatically moving data to the most cost-effective storage access tier, // without performance impact or operational overhead. S3 Intelligent-Tiering // delivers automatic cost savings in three low latency and high throughput access // tiers. To get the lowest storage cost on data that can be accessed in minutes to -// hours, you can choose to activate additional archiving capabilities. The S3 -// Intelligent-Tiering storage class is the ideal storage class for data with -// unknown, changing, or unpredictable access patterns, independent of object size -// or retention period. If the size of an object is less than 128 KB, it is not -// monitored and not eligible for auto-tiering. Smaller objects can be stored, but -// they are always charged at the Frequent Access tier rates in the S3 -// Intelligent-Tiering storage class. For more information, see Storage class for -// automatically optimizing frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) -// . Operations related to GetBucketIntelligentTieringConfiguration include: -// - DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) -// - PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) -// - ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) +// hours, you can choose to activate additional archiving capabilities. +// +// The S3 Intelligent-Tiering storage class is the ideal storage class for data +// with unknown, changing, or unpredictable access patterns, independent of object +// size or retention period. If the size of an object is less than 128 KB, it is +// not monitored and not eligible for auto-tiering. Smaller objects can be stored, +// but they are always charged at the Frequent Access tier rates in the S3 +// Intelligent-Tiering storage class. +// +// For more information, see [Storage class for automatically optimizing frequently and infrequently accessed objects]. +// +// Operations related to GetBucketIntelligentTieringConfiguration include: +// +// [DeleteBucketIntelligentTieringConfiguration] +// +// [PutBucketIntelligentTieringConfiguration] +// +// [ListBucketIntelligentTieringConfigurations] +// +// [ListBucketIntelligentTieringConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html +// [PutBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html +// [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access +// [DeleteBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html func (c *Client) GetBucketIntelligentTieringConfiguration(ctx context.Context, params *GetBucketIntelligentTieringConfigurationInput, optFns ...func(*Options)) (*GetBucketIntelligentTieringConfigurationOutput, error) { if params == nil { params = &GetBucketIntelligentTieringConfigurationInput{} @@ -65,6 +78,7 @@ type GetBucketIntelligentTieringConfigurationInput struct { } func (in *GetBucketIntelligentTieringConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -123,6 +137,9 @@ func (c *Client) addOperationGetBucketIntelligentTieringConfigurationMiddlewares if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -138,6 +155,18 @@ func (c *Client) addOperationGetBucketIntelligentTieringConfigurationMiddlewares if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketIntelligentTieringConfigurationValidationMiddleware(stack); err != nil { return err } @@ -171,6 +200,18 @@ func (c *Client) addOperationGetBucketIntelligentTieringConfigurationMiddlewares if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketInventoryConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketInventoryConfiguration.go index 123218e97..163c86ae7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketInventoryConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketInventoryConfiguration.go @@ -14,18 +14,32 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns an inventory -// configuration (identified by the inventory configuration ID) from the bucket. To -// use this operation, you must have permissions to perform the +// This operation is not supported for directory buckets. +// +// Returns an inventory configuration (identified by the inventory configuration +// ID) from the bucket. +// +// To use this operation, you must have permissions to perform the // s3:GetInventoryConfiguration action. The bucket owner has this permission by // default and can grant this permission to others. For more information about -// permissions, see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . For information about the Amazon S3 inventory feature, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) -// . The following operations are related to GetBucketInventoryConfiguration : -// - DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) -// - ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) -// - PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) +// permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// For information about the Amazon S3 inventory feature, see [Amazon S3 Inventory]. +// +// The following operations are related to GetBucketInventoryConfiguration : +// +// [DeleteBucketInventoryConfiguration] +// +// [ListBucketInventoryConfigurations] +// +// [PutBucketInventoryConfiguration] +// +// [Amazon S3 Inventory]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html +// [ListBucketInventoryConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [DeleteBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [PutBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html func (c *Client) GetBucketInventoryConfiguration(ctx context.Context, params *GetBucketInventoryConfigurationInput, optFns ...func(*Options)) (*GetBucketInventoryConfigurationOutput, error) { if params == nil { params = &GetBucketInventoryConfigurationInput{} @@ -62,6 +76,7 @@ type GetBucketInventoryConfigurationInput struct { } func (in *GetBucketInventoryConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -120,6 +135,9 @@ func (c *Client) addOperationGetBucketInventoryConfigurationMiddlewares(stack *m if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -135,6 +153,18 @@ func (c *Client) addOperationGetBucketInventoryConfigurationMiddlewares(stack *m if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketInventoryConfigurationValidationMiddleware(stack); err != nil { return err } @@ -168,6 +198,18 @@ func (c *Client) addOperationGetBucketInventoryConfigurationMiddlewares(stack *m if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLifecycleConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLifecycleConfiguration.go index ddcbbe096..5803ff5b5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLifecycleConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLifecycleConfiguration.go @@ -14,35 +14,81 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Bucket lifecycle -// configuration now supports specifying a lifecycle rule using an object key name -// prefix, one or more object tags, object size, or any combination of these. -// Accordingly, this section describes the latest API. The previous version of the -// API supported filtering based only on an object key name prefix, which is -// supported for backward compatibility. For the related API description, see -// GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html) -// . Accordingly, this section describes the latest API. The response describes the -// new filter element that you can use to specify a filter to select a subset of -// objects to which the rule applies. If you are using a previous version of the -// lifecycle configuration, it still works. For the earlier action, Returns the -// lifecycle configuration information set on the bucket. For information about -// lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) -// . To use this operation, you must have permission to perform the -// s3:GetLifecycleConfiguration action. The bucket owner has this permission, by -// default. The bucket owner can grant this permission to others. For more -// information about permissions, see Permissions Related to Bucket Subresource -// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . GetBucketLifecycleConfiguration has the following special error: +// Returns the lifecycle configuration information set on the bucket. For +// information about lifecycle configuration, see [Object Lifecycle Management]. +// +// Bucket lifecycle configuration now supports specifying a lifecycle rule using +// an object key name prefix, one or more object tags, object size, or any +// combination of these. Accordingly, this section describes the latest API, which +// is compatible with the new functionality. The previous version of the API +// supported filtering based only on an object key name prefix, which is supported +// for general purpose buckets for backward compatibility. For the related API +// description, see [GetBucketLifecycle]. +// +// Lifecyle configurations for directory buckets only support expiring objects and +// cancelling multipart uploads. Expiring of versioned objects, transitions and tag +// filters are not supported. +// +// Permissions +// - General purpose bucket permissions - By default, all Amazon S3 resources +// are private, including buckets, objects, and related subresources (for example, +// lifecycle configuration and website configuration). Only the resource owner +// (that is, the Amazon Web Services account that created it) can access the +// resource. The resource owner can optionally grant access permissions to others +// by writing an access policy. For this operation, a user must have the +// s3:GetLifecycleConfiguration permission. +// +// For more information about permissions, see [Managing Access Permissions to Your Amazon S3 Resources]. +// +// - Directory bucket permissions - You must have the +// s3express:GetLifecycleConfiguration permission in an IAM identity-based policy +// to use this operation. Cross-account access to this API operation isn't +// supported. The resource owner can optionally grant access permissions to others +// by creating a role or user for them as long as they are within the same account +// as the owner and resource. +// +// For more information about directory bucket policies and permissions, see [Authorizing Regional endpoint APIs with IAM]in +// +// the Amazon S3 User Guide. +// +// Directory buckets - For directory buckets, you must make requests for this API +// +// operation to the Regional endpoint. These endpoints support path-style requests +// in the format https://s3express-control.region-code.amazonaws.com/bucket-name +// . Virtual-hosted-style requests aren't supported. For more information about +// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more +// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region.amazonaws.com . +// +// GetBucketLifecycleConfiguration has the following special error: +// // - Error code: NoSuchLifecycleConfiguration +// // - Description: The lifecycle configuration does not exist. +// // - HTTP Status Code: 404 Not Found +// // - SOAP Fault Code Prefix: Client // // The following operations are related to GetBucketLifecycleConfiguration : -// - GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html) -// - PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html) -// - DeleteBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) +// +// [GetBucketLifecycle] +// +// [PutBucketLifecycle] +// +// [DeleteBucketLifecycle] +// +// [GetBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html +// [Object Lifecycle Management]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html +// [Authorizing Regional endpoint APIs with IAM]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html +// [PutBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [DeleteBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html +// +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html func (c *Client) GetBucketLifecycleConfiguration(ctx context.Context, params *GetBucketLifecycleConfigurationInput, optFns ...func(*Options)) (*GetBucketLifecycleConfigurationOutput, error) { if params == nil { params = &GetBucketLifecycleConfigurationInput{} @@ -68,12 +114,16 @@ type GetBucketLifecycleConfigurationInput struct { // The account ID of the expected bucket owner. If the account ID that you provide // does not match the actual owner of the bucket, the request fails with the HTTP // status code 403 Forbidden (access denied). + // + // This parameter applies to general purpose buckets only. It is not supported for + // directory bucket lifecycle configurations. ExpectedBucketOwner *string noSmithyDocumentSerde } func (in *GetBucketLifecycleConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -83,6 +133,25 @@ type GetBucketLifecycleConfigurationOutput struct { // Container for a lifecycle rule. Rules []types.LifecycleRule + // Indicates which default minimum object size behavior is applied to the + // lifecycle configuration. + // + // This parameter applies to general purpose buckets only. It isn't supported for + // directory bucket lifecycle configurations. + // + // - all_storage_classes_128K - Objects smaller than 128 KB will not transition + // to any storage class by default. + // + // - varies_by_storage_class - Objects smaller than 128 KB will transition to + // Glacier Flexible Retrieval or Glacier Deep Archive storage classes. By default, + // all other storage classes will prevent transitions smaller than 128 KB. + // + // To customize the minimum object size for any transition you can add a filter + // that specifies a custom ObjectSizeGreaterThan or ObjectSizeLessThan in the body + // of your transition rule. Custom filters always take precedence over the default + // transition behavior. + TransitionDefaultMinimumObjectSize types.TransitionDefaultMinimumObjectSize + // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -132,6 +201,9 @@ func (c *Client) addOperationGetBucketLifecycleConfigurationMiddlewares(stack *m if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -147,6 +219,18 @@ func (c *Client) addOperationGetBucketLifecycleConfigurationMiddlewares(stack *m if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketLifecycleConfigurationValidationMiddleware(stack); err != nil { return err } @@ -180,6 +264,18 @@ func (c *Client) addOperationGetBucketLifecycleConfigurationMiddlewares(stack *m if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLocation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLocation.go index aff5f3cd5..3d1397b7f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLocation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLocation.go @@ -20,23 +20,34 @@ import ( "io" ) -// This operation is not supported by directory buckets. Returns the Region the -// bucket resides in. You set the bucket's Region using the LocationConstraint -// request parameter in a CreateBucket request. For more information, see -// CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// . When you use this API operation with an access point, provide the alias of the -// access point in place of the bucket name. When you use this API operation with -// an Object Lambda access point, provide the alias of the Object Lambda access -// point in place of the bucket name. If the Object Lambda access point alias in a -// request is not valid, the error code InvalidAccessPointAliasError is returned. -// For more information about InvalidAccessPointAliasError , see List of Error -// Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) -// . We recommend that you use HeadBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html) -// to return the Region that a bucket resides in. For backward compatibility, -// Amazon S3 continues to support GetBucketLocation. The following operations are -// related to GetBucketLocation : -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// This operation is not supported for directory buckets. +// +// Returns the Region the bucket resides in. You set the bucket's Region using the +// LocationConstraint request parameter in a CreateBucket request. For more +// information, see [CreateBucket]. +// +// When you use this API operation with an access point, provide the alias of the +// access point in place of the bucket name. +// +// When you use this API operation with an Object Lambda access point, provide the +// alias of the Object Lambda access point in place of the bucket name. If the +// Object Lambda access point alias in a request is not valid, the error code +// InvalidAccessPointAliasError is returned. For more information about +// InvalidAccessPointAliasError , see [List of Error Codes]. +// +// We recommend that you use [HeadBucket] to return the Region that a bucket resides in. For +// backward compatibility, Amazon S3 continues to support GetBucketLocation. +// +// The following operations are related to GetBucketLocation : +// +// [GetObject] +// +// [CreateBucket] +// +// [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html +// [HeadBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html func (c *Client) GetBucketLocation(ctx context.Context, params *GetBucketLocationInput, optFns ...func(*Options)) (*GetBucketLocationOutput, error) { if params == nil { params = &GetBucketLocationInput{} @@ -54,14 +65,18 @@ func (c *Client) GetBucketLocation(ctx context.Context, params *GetBucketLocatio type GetBucketLocationInput struct { - // The name of the bucket for which to get the location. When you use this API - // operation with an access point, provide the alias of the access point in place - // of the bucket name. When you use this API operation with an Object Lambda access - // point, provide the alias of the Object Lambda access point in place of the - // bucket name. If the Object Lambda access point alias in a request is not valid, - // the error code InvalidAccessPointAliasError is returned. For more information - // about InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) - // . + // The name of the bucket for which to get the location. + // + // When you use this API operation with an access point, provide the alias of the + // access point in place of the bucket name. + // + // When you use this API operation with an Object Lambda access point, provide the + // alias of the Object Lambda access point in place of the bucket name. If the + // Object Lambda access point alias in a request is not valid, the error code + // InvalidAccessPointAliasError is returned. For more information about + // InvalidAccessPointAliasError , see [List of Error Codes]. + // + // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList // // This member is required. Bucket *string @@ -75,6 +90,7 @@ type GetBucketLocationInput struct { } func (in *GetBucketLocationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -82,8 +98,12 @@ func (in *GetBucketLocationInput) bindEndpointParams(p *EndpointParameters) { type GetBucketLocationOutput struct { // Specifies the Region where the bucket resides. For a list of all the Amazon S3 - // supported location constraints by Region, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) - // . Buckets in Region us-east-1 have a LocationConstraint of null . + // supported location constraints by Region, see [Regions and Endpoints]. + // + // Buckets in Region us-east-1 have a LocationConstraint of null . Buckets with a + // LocationConstraint of EU reside in eu-west-1 . + // + // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region LocationConstraint types.BucketLocationConstraint // Metadata pertaining to the operation's result. @@ -135,6 +155,9 @@ func (c *Client) addOperationGetBucketLocationMiddlewares(stack *middleware.Stac if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -153,6 +176,18 @@ func (c *Client) addOperationGetBucketLocationMiddlewares(stack *middleware.Stac if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketLocationValidationMiddleware(stack); err != nil { return err } @@ -186,6 +221,18 @@ func (c *Client) addOperationGetBucketLocationMiddlewares(stack *middleware.Stac if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLogging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLogging.go index d1c4f8fbb..c1f315fab 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLogging.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLogging.go @@ -14,11 +14,19 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns the logging -// status of a bucket and the permissions users have to view and modify that -// status. The following operations are related to GetBucketLogging : -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// - PutBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLogging.html) +// This operation is not supported for directory buckets. +// +// Returns the logging status of a bucket and the permissions users have to view +// and modify that status. +// +// The following operations are related to GetBucketLogging : +// +// [CreateBucket] +// +// [PutBucketLogging] +// +// [PutBucketLogging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLogging.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html func (c *Client) GetBucketLogging(ctx context.Context, params *GetBucketLoggingInput, optFns ...func(*Options)) (*GetBucketLoggingOutput, error) { if params == nil { params = &GetBucketLoggingInput{} @@ -50,6 +58,7 @@ type GetBucketLoggingInput struct { } func (in *GetBucketLoggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -57,8 +66,10 @@ func (in *GetBucketLoggingInput) bindEndpointParams(p *EndpointParameters) { type GetBucketLoggingOutput struct { // Describes where logs are stored and the prefix that Amazon S3 assigns to all - // log object keys for a bucket. For more information, see PUT Bucket logging (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) - // in the Amazon S3 API Reference. + // log object keys for a bucket. For more information, see [PUT Bucket logging]in the Amazon S3 API + // Reference. + // + // [PUT Bucket logging]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html LoggingEnabled *types.LoggingEnabled // Metadata pertaining to the operation's result. @@ -110,6 +121,9 @@ func (c *Client) addOperationGetBucketLoggingMiddlewares(stack *middleware.Stack if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -125,6 +139,18 @@ func (c *Client) addOperationGetBucketLoggingMiddlewares(stack *middleware.Stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketLoggingValidationMiddleware(stack); err != nil { return err } @@ -158,6 +184,18 @@ func (c *Client) addOperationGetBucketLoggingMiddlewares(stack *middleware.Stack if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetadataTableConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetadataTableConfiguration.go new file mode 100644 index 000000000..5d5e9685f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetadataTableConfiguration.go @@ -0,0 +1,242 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Retrieves the metadata table configuration for a general purpose bucket. For +// +// more information, see [Accelerating data discovery with S3 Metadata]in the Amazon S3 User Guide. +// +// Permissions To use this operation, you must have the +// s3:GetBucketMetadataTableConfiguration permission. For more information, see [Setting up permissions for configuring metadata tables] +// in the Amazon S3 User Guide. +// +// The following operations are related to GetBucketMetadataTableConfiguration : +// +// [CreateBucketMetadataTableConfiguration] +// +// [DeleteBucketMetadataTableConfiguration] +// +// [Setting up permissions for configuring metadata tables]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-permissions.html +// [CreateBucketMetadataTableConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucketMetadataTableConfiguration.html +// [DeleteBucketMetadataTableConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetadataTableConfiguration.html +// [Accelerating data discovery with S3 Metadata]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-overview.html +func (c *Client) GetBucketMetadataTableConfiguration(ctx context.Context, params *GetBucketMetadataTableConfigurationInput, optFns ...func(*Options)) (*GetBucketMetadataTableConfigurationOutput, error) { + if params == nil { + params = &GetBucketMetadataTableConfigurationInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetBucketMetadataTableConfiguration", params, optFns, c.addOperationGetBucketMetadataTableConfigurationMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetBucketMetadataTableConfigurationOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetBucketMetadataTableConfigurationInput struct { + + // The general purpose bucket that contains the metadata table configuration that + // you want to retrieve. + // + // This member is required. + Bucket *string + + // The expected owner of the general purpose bucket that you want to retrieve the + // metadata table configuration from. + ExpectedBucketOwner *string + + noSmithyDocumentSerde +} + +func (in *GetBucketMetadataTableConfigurationInput) bindEndpointParams(p *EndpointParameters) { + + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type GetBucketMetadataTableConfigurationOutput struct { + + // The metadata table configuration for the general purpose bucket. + GetBucketMetadataTableConfigurationResult *types.GetBucketMetadataTableConfigurationResult + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetBucketMetadataTableConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketMetadataTableConfiguration{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetBucketMetadataTableConfiguration{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketMetadataTableConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } + if err = addOpGetBucketMetadataTableConfigurationValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetBucketMetadataTableConfiguration(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addGetBucketMetadataTableConfigurationUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func (v *GetBucketMetadataTableConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opGetBucketMetadataTableConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetBucketMetadataTableConfiguration", + } +} + +// getGetBucketMetadataTableConfigurationBucketMember returns a pointer to string +// denoting a provided bucket member valueand a boolean indicating if the input has +// a modeled bucket name, +func getGetBucketMetadataTableConfigurationBucketMember(input interface{}) (*string, bool) { + in := input.(*GetBucketMetadataTableConfigurationInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addGetBucketMetadataTableConfigurationUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getGetBucketMetadataTableConfigurationBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetricsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetricsConfiguration.go index d7499c68c..4fdf2ef42 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetricsConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetricsConfiguration.go @@ -14,21 +14,34 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Gets a metrics -// configuration (specified by the metrics configuration ID) from the bucket. Note -// that this doesn't include the daily storage metrics. To use this operation, you -// must have permissions to perform the s3:GetMetricsConfiguration action. The -// bucket owner has this permission by default. The bucket owner can grant this -// permission to others. For more information about permissions, see Permissions -// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . For information about CloudWatch request metrics for Amazon S3, see -// Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) -// . The following operations are related to GetBucketMetricsConfiguration : -// - PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) -// - DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) -// - ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) -// - Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) +// This operation is not supported for directory buckets. +// +// Gets a metrics configuration (specified by the metrics configuration ID) from +// the bucket. Note that this doesn't include the daily storage metrics. +// +// To use this operation, you must have permissions to perform the +// s3:GetMetricsConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// For information about CloudWatch request metrics for Amazon S3, see [Monitoring Metrics with Amazon CloudWatch]. +// +// The following operations are related to GetBucketMetricsConfiguration : +// +// [PutBucketMetricsConfiguration] +// +// [DeleteBucketMetricsConfiguration] +// +// [ListBucketMetricsConfigurations] +// +// [Monitoring Metrics with Amazon CloudWatch] +// +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Monitoring Metrics with Amazon CloudWatch]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html +// [ListBucketMetricsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html +// [PutBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html +// [DeleteBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html func (c *Client) GetBucketMetricsConfiguration(ctx context.Context, params *GetBucketMetricsConfigurationInput, optFns ...func(*Options)) (*GetBucketMetricsConfigurationOutput, error) { if params == nil { params = &GetBucketMetricsConfigurationInput{} @@ -66,6 +79,7 @@ type GetBucketMetricsConfigurationInput struct { } func (in *GetBucketMetricsConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -124,6 +138,9 @@ func (c *Client) addOperationGetBucketMetricsConfigurationMiddlewares(stack *mid if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -139,6 +156,18 @@ func (c *Client) addOperationGetBucketMetricsConfigurationMiddlewares(stack *mid if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketMetricsConfigurationValidationMiddleware(stack); err != nil { return err } @@ -172,6 +201,18 @@ func (c *Client) addOperationGetBucketMetricsConfigurationMiddlewares(stack *mid if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketNotificationConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketNotificationConfiguration.go index 73155110d..1fb7d1643 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketNotificationConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketNotificationConfiguration.go @@ -14,24 +14,38 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns the notification -// configuration of a bucket. If notifications are not enabled on the bucket, the -// action returns an empty NotificationConfiguration element. By default, you must -// be the bucket owner to read the notification configuration of a bucket. However, -// the bucket owner can use a bucket policy to grant permission to other users to -// read this configuration with the s3:GetBucketNotification permission. When you -// use this API operation with an access point, provide the alias of the access -// point in place of the bucket name. When you use this API operation with an -// Object Lambda access point, provide the alias of the Object Lambda access point -// in place of the bucket name. If the Object Lambda access point alias in a -// request is not valid, the error code InvalidAccessPointAliasError is returned. -// For more information about InvalidAccessPointAliasError , see List of Error -// Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) -// . For more information about setting and reading the notification configuration -// on a bucket, see Setting Up Notification of Bucket Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) -// . For more information about bucket policies, see Using Bucket Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) -// . The following action is related to GetBucketNotification : -// - PutBucketNotification (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotification.html) +// This operation is not supported for directory buckets. +// +// Returns the notification configuration of a bucket. +// +// If notifications are not enabled on the bucket, the action returns an empty +// NotificationConfiguration element. +// +// By default, you must be the bucket owner to read the notification configuration +// of a bucket. However, the bucket owner can use a bucket policy to grant +// permission to other users to read this configuration with the +// s3:GetBucketNotification permission. +// +// When you use this API operation with an access point, provide the alias of the +// access point in place of the bucket name. +// +// When you use this API operation with an Object Lambda access point, provide the +// alias of the Object Lambda access point in place of the bucket name. If the +// Object Lambda access point alias in a request is not valid, the error code +// InvalidAccessPointAliasError is returned. For more information about +// InvalidAccessPointAliasError , see [List of Error Codes]. +// +// For more information about setting and reading the notification configuration +// on a bucket, see [Setting Up Notification of Bucket Events]. For more information about bucket policies, see [Using Bucket Policies]. +// +// The following action is related to GetBucketNotification : +// +// [PutBucketNotification] +// +// [Using Bucket Policies]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html +// [Setting Up Notification of Bucket Events]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html +// [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList +// [PutBucketNotification]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotification.html func (c *Client) GetBucketNotificationConfiguration(ctx context.Context, params *GetBucketNotificationConfigurationInput, optFns ...func(*Options)) (*GetBucketNotificationConfigurationOutput, error) { if params == nil { params = &GetBucketNotificationConfigurationInput{} @@ -49,15 +63,18 @@ func (c *Client) GetBucketNotificationConfiguration(ctx context.Context, params type GetBucketNotificationConfigurationInput struct { - // The name of the bucket for which to get the notification configuration. When - // you use this API operation with an access point, provide the alias of the access - // point in place of the bucket name. When you use this API operation with an - // Object Lambda access point, provide the alias of the Object Lambda access point - // in place of the bucket name. If the Object Lambda access point alias in a - // request is not valid, the error code InvalidAccessPointAliasError is returned. - // For more information about InvalidAccessPointAliasError , see List of Error - // Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) - // . + // The name of the bucket for which to get the notification configuration. + // + // When you use this API operation with an access point, provide the alias of the + // access point in place of the bucket name. + // + // When you use this API operation with an Object Lambda access point, provide the + // alias of the Object Lambda access point in place of the bucket name. If the + // Object Lambda access point alias in a request is not valid, the error code + // InvalidAccessPointAliasError is returned. For more information about + // InvalidAccessPointAliasError , see [List of Error Codes]. + // + // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList // // This member is required. Bucket *string @@ -71,6 +88,7 @@ type GetBucketNotificationConfigurationInput struct { } func (in *GetBucketNotificationConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -143,6 +161,9 @@ func (c *Client) addOperationGetBucketNotificationConfigurationMiddlewares(stack if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -158,6 +179,18 @@ func (c *Client) addOperationGetBucketNotificationConfigurationMiddlewares(stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketNotificationConfigurationValidationMiddleware(stack); err != nil { return err } @@ -191,6 +224,18 @@ func (c *Client) addOperationGetBucketNotificationConfigurationMiddlewares(stack if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketOwnershipControls.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketOwnershipControls.go index cea151428..cdc07a580 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketOwnershipControls.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketOwnershipControls.go @@ -14,14 +14,22 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Retrieves -// OwnershipControls for an Amazon S3 bucket. To use this operation, you must have -// the s3:GetBucketOwnershipControls permission. For more information about Amazon -// S3 permissions, see Specifying permissions in a policy (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html) -// . For information about Amazon S3 Object Ownership, see Using Object Ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) -// . The following operations are related to GetBucketOwnershipControls : -// - PutBucketOwnershipControls -// - DeleteBucketOwnershipControls +// This operation is not supported for directory buckets. +// +// Retrieves OwnershipControls for an Amazon S3 bucket. To use this operation, you +// must have the s3:GetBucketOwnershipControls permission. For more information +// about Amazon S3 permissions, see [Specifying permissions in a policy]. +// +// For information about Amazon S3 Object Ownership, see [Using Object Ownership]. +// +// The following operations are related to GetBucketOwnershipControls : +// +// # PutBucketOwnershipControls +// +// # DeleteBucketOwnershipControls +// +// [Using Object Ownership]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html +// [Specifying permissions in a policy]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html func (c *Client) GetBucketOwnershipControls(ctx context.Context, params *GetBucketOwnershipControlsInput, optFns ...func(*Options)) (*GetBucketOwnershipControlsOutput, error) { if params == nil { params = &GetBucketOwnershipControlsInput{} @@ -53,6 +61,7 @@ type GetBucketOwnershipControlsInput struct { } func (in *GetBucketOwnershipControlsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -112,6 +121,9 @@ func (c *Client) addOperationGetBucketOwnershipControlsMiddlewares(stack *middle if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -127,6 +139,18 @@ func (c *Client) addOperationGetBucketOwnershipControlsMiddlewares(stack *middle if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketOwnershipControlsValidationMiddleware(stack); err != nil { return err } @@ -160,6 +184,18 @@ func (c *Client) addOperationGetBucketOwnershipControlsMiddlewares(stack *middle if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicy.go index c2f98f936..26bd4a775 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicy.go @@ -13,47 +13,63 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns the policy of a specified bucket. Directory buckets - For directory -// buckets, you must make requests for this API operation to the Regional endpoint. -// These endpoints support path-style requests in the format -// https://s3express-control.region_code.amazonaws.com/bucket-name . -// Virtual-hosted-style requests aren't supported. For more information, see -// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions If you are using an identity other than -// the root user of the Amazon Web Services account that owns the bucket, the -// calling identity must both have the GetBucketPolicy permissions on the -// specified bucket and belong to the bucket owner's account in order to use this -// operation. If you don't have GetBucketPolicy permissions, Amazon S3 returns a -// 403 Access Denied error. If you have the correct permissions, but you're not -// using an identity that belongs to the bucket owner's account, Amazon S3 returns -// a 405 Method Not Allowed error. To ensure that bucket owners don't -// inadvertently lock themselves out of their own buckets, the root principal in a -// bucket owner's Amazon Web Services account can perform the GetBucketPolicy , -// PutBucketPolicy , and DeleteBucketPolicy API actions, even if their bucket -// policy explicitly denies the root principal's access. Bucket owner root -// principals can only be blocked from performing these API actions by VPC endpoint -// policies and Amazon Web Services Organizations policies. +// Returns the policy of a specified bucket. +// +// Directory buckets - For directory buckets, you must make requests for this API +// operation to the Regional endpoint. These endpoints support path-style requests +// in the format https://s3express-control.region-code.amazonaws.com/bucket-name . +// Virtual-hosted-style requests aren't supported. For more information about +// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more +// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// Permissions If you are using an identity other than the root user of the Amazon +// Web Services account that owns the bucket, the calling identity must both have +// the GetBucketPolicy permissions on the specified bucket and belong to the +// bucket owner's account in order to use this operation. +// +// If you don't have GetBucketPolicy permissions, Amazon S3 returns a 403 Access +// Denied error. If you have the correct permissions, but you're not using an +// identity that belongs to the bucket owner's account, Amazon S3 returns a 405 +// Method Not Allowed error. +// +// To ensure that bucket owners don't inadvertently lock themselves out of their +// own buckets, the root principal in a bucket owner's Amazon Web Services account +// can perform the GetBucketPolicy , PutBucketPolicy , and DeleteBucketPolicy API +// actions, even if their bucket policy explicitly denies the root principal's +// access. Bucket owner root principals can only be blocked from performing these +// API actions by VPC endpoint policies and Amazon Web Services Organizations +// policies. +// // - General purpose bucket permissions - The s3:GetBucketPolicy permission is // required in a policy. For more information about general purpose buckets bucket -// policies, see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) -// in the Amazon S3 User Guide. +// policies, see [Using Bucket Policies and User Policies]in the Amazon S3 User Guide. +// // - Directory bucket permissions - To grant access to this API operation, you // must have the s3express:GetBucketPolicy permission in an IAM identity-based // policy instead of a bucket policy. Cross-account access to this API operation // isn't supported. This operation can only be performed by the Amazon Web Services // account that owns the resource. For more information about directory bucket -// policies and permissions, see Amazon Web Services Identity and Access -// Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) -// in the Amazon S3 User Guide. +// policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide. +// +// Example bucket policies General purpose buckets example bucket policies - See [Bucket policy examples] +// in the Amazon S3 User Guide. +// +// Directory bucket example bucket policies - See [Example bucket policies for S3 Express One Zone] in the Amazon S3 User Guide. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region-code.amazonaws.com . // -// Example bucket policies General purpose buckets example bucket policies - See -// Bucket policy examples (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html) -// in the Amazon S3 User Guide. Directory bucket example bucket policies - See -// Example bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) -// in the Amazon S3 User Guide. HTTP Host header syntax Directory buckets - The -// HTTP Host header syntax is s3express-control.region.amazonaws.com . The -// following action is related to GetBucketPolicy : -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// The following action is related to GetBucketPolicy : +// +// [GetObject] +// +// [Bucket policy examples]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html +// [Using Bucket Policies and User Policies]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html func (c *Client) GetBucketPolicy(ctx context.Context, params *GetBucketPolicyInput, optFns ...func(*Options)) (*GetBucketPolicyOutput, error) { if params == nil { params = &GetBucketPolicyInput{} @@ -71,39 +87,49 @@ func (c *Client) GetBucketPolicy(ctx context.Context, params *GetBucketPolicyInp type GetBucketPolicyInput struct { - // The bucket name to get the bucket policy for. Directory buckets - When you use - // this operation with a directory bucket, you must use path-style requests in the - // format https://s3express-control.region_code.amazonaws.com/bucket-name . + // The bucket name to get the bucket policy for. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use path-style requests in the format + // https://s3express-control.region-code.amazonaws.com/bucket-name . // Virtual-hosted-style requests aren't supported. Directory bucket names must be - // unique in the chosen Availability Zone. Bucket names must also follow the format - // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 - // ). For information about bucket naming restrictions, see Directory bucket - // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide Access points - When you use this API operation with - // an access point, provide the alias of the access point in place of the bucket - // name. Object Lambda access points - When you use this API operation with an - // Object Lambda access point, provide the alias of the Object Lambda access point - // in place of the bucket name. If the Object Lambda access point alias in a - // request is not valid, the error code InvalidAccessPointAliasError is returned. - // For more information about InvalidAccessPointAliasError , see List of Error - // Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) - // . Access points and Object Lambda access points are not supported by directory + // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must + // also follow the format bucket-base-name--zone-id--x-s3 (for example, + // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide + // + // Access points - When you use this API operation with an access point, provide + // the alias of the access point in place of the bucket name. + // + // Object Lambda access points - When you use this API operation with an Object + // Lambda access point, provide the alias of the Object Lambda access point in + // place of the bucket name. If the Object Lambda access point alias in a request + // is not valid, the error code InvalidAccessPointAliasError is returned. For more + // information about InvalidAccessPointAliasError , see [List of Error Codes]. + // + // Access points and Object Lambda access points are not supported by directory // buckets. // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList + // // This member is required. Bucket *string // The account ID of the expected bucket owner. If the account ID that you provide // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). For directory buckets, this header - // is not supported in this API operation. If you specify this header, the request - // fails with the HTTP status code 501 Not Implemented . + // status code 403 Forbidden (access denied). + // + // For directory buckets, this header is not supported in this API operation. If + // you specify this header, the request fails with the HTTP status code 501 Not + // Implemented . ExpectedBucketOwner *string noSmithyDocumentSerde } func (in *GetBucketPolicyInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -162,6 +188,9 @@ func (c *Client) addOperationGetBucketPolicyMiddlewares(stack *middleware.Stack, if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -177,6 +206,18 @@ func (c *Client) addOperationGetBucketPolicyMiddlewares(stack *middleware.Stack, if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketPolicyValidationMiddleware(stack); err != nil { return err } @@ -210,6 +251,18 @@ func (c *Client) addOperationGetBucketPolicyMiddlewares(stack *middleware.Stack, if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicyStatus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicyStatus.go index cb36ac504..db63caf6f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicyStatus.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicyStatus.go @@ -14,18 +14,31 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Retrieves the policy -// status for an Amazon S3 bucket, indicating whether the bucket is public. In -// order to use this operation, you must have the s3:GetBucketPolicyStatus -// permission. For more information about Amazon S3 permissions, see Specifying -// Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// . For more information about when Amazon S3 considers a bucket public, see The -// Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) -// . The following operations are related to GetBucketPolicyStatus : -// - Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) -// - GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) -// - PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) -// - DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) +// This operation is not supported for directory buckets. +// +// Retrieves the policy status for an Amazon S3 bucket, indicating whether the +// bucket is public. In order to use this operation, you must have the +// s3:GetBucketPolicyStatus permission. For more information about Amazon S3 +// permissions, see [Specifying Permissions in a Policy]. +// +// For more information about when Amazon S3 considers a bucket public, see [The Meaning of "Public"]. +// +// The following operations are related to GetBucketPolicyStatus : +// +// [Using Amazon S3 Block Public Access] +// +// [GetPublicAccessBlock] +// +// [PutPublicAccessBlock] +// +// [DeletePublicAccessBlock] +// +// [GetPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html +// [PutPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html +// [DeletePublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html +// [Using Amazon S3 Block Public Access]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html +// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html +// [The Meaning of "Public"]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status func (c *Client) GetBucketPolicyStatus(ctx context.Context, params *GetBucketPolicyStatusInput, optFns ...func(*Options)) (*GetBucketPolicyStatusOutput, error) { if params == nil { params = &GetBucketPolicyStatusInput{} @@ -57,6 +70,7 @@ type GetBucketPolicyStatusInput struct { } func (in *GetBucketPolicyStatusInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -115,6 +129,9 @@ func (c *Client) addOperationGetBucketPolicyStatusMiddlewares(stack *middleware. if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -130,6 +147,18 @@ func (c *Client) addOperationGetBucketPolicyStatusMiddlewares(stack *middleware. if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketPolicyStatusValidationMiddleware(stack); err != nil { return err } @@ -163,6 +192,18 @@ func (c *Client) addOperationGetBucketPolicyStatusMiddlewares(stack *middleware. if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketReplication.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketReplication.go index 7e44d38ee..ebc0d35cf 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketReplication.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketReplication.go @@ -14,21 +14,37 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns the replication -// configuration of a bucket. It can take a while to propagate the put or delete a -// replication configuration to all Amazon S3 systems. Therefore, a get request -// soon after put or delete can return a wrong result. For information about -// replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) -// in the Amazon S3 User Guide. This action requires permissions for the -// s3:GetReplicationConfiguration action. For more information about permissions, -// see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) -// . If you include the Filter element in a replication configuration, you must -// also include the DeleteMarkerReplication and Priority elements. The response -// also returns those elements. For information about GetBucketReplication errors, -// see List of replication-related error codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList) +// This operation is not supported for directory buckets. +// +// Returns the replication configuration of a bucket. +// +// It can take a while to propagate the put or delete a replication configuration +// to all Amazon S3 systems. Therefore, a get request soon after put or delete can +// return a wrong result. +// +// For information about replication configuration, see [Replication] in the Amazon S3 User +// Guide. +// +// This action requires permissions for the s3:GetReplicationConfiguration action. +// For more information about permissions, see [Using Bucket Policies and User Policies]. +// +// If you include the Filter element in a replication configuration, you must also +// include the DeleteMarkerReplication and Priority elements. The response also +// returns those elements. +// +// For information about GetBucketReplication errors, see [List of replication-related error codes] +// // The following operations are related to GetBucketReplication : -// - PutBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html) -// - DeleteBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html) +// +// [PutBucketReplication] +// +// [DeleteBucketReplication] +// +// [PutBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html +// [Using Bucket Policies and User Policies]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html +// [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html +// [List of replication-related error codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList +// [DeleteBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html func (c *Client) GetBucketReplication(ctx context.Context, params *GetBucketReplicationInput, optFns ...func(*Options)) (*GetBucketReplicationOutput, error) { if params == nil { params = &GetBucketReplicationInput{} @@ -60,6 +76,7 @@ type GetBucketReplicationInput struct { } func (in *GetBucketReplicationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -119,6 +136,9 @@ func (c *Client) addOperationGetBucketReplicationMiddlewares(stack *middleware.S if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -134,6 +154,18 @@ func (c *Client) addOperationGetBucketReplicationMiddlewares(stack *middleware.S if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketReplicationValidationMiddleware(stack); err != nil { return err } @@ -167,6 +199,18 @@ func (c *Client) addOperationGetBucketReplicationMiddlewares(stack *middleware.S if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketRequestPayment.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketRequestPayment.go index 16cc52822..34563cb0d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketRequestPayment.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketRequestPayment.go @@ -14,11 +14,17 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns the request -// payment configuration of a bucket. To use this version of the operation, you -// must be the bucket owner. For more information, see Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html) -// . The following operations are related to GetBucketRequestPayment : -// - ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) +// This operation is not supported for directory buckets. +// +// Returns the request payment configuration of a bucket. To use this version of +// the operation, you must be the bucket owner. For more information, see [Requester Pays Buckets]. +// +// The following operations are related to GetBucketRequestPayment : +// +// [ListObjects] +// +// [ListObjects]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html +// [Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html func (c *Client) GetBucketRequestPayment(ctx context.Context, params *GetBucketRequestPaymentInput, optFns ...func(*Options)) (*GetBucketRequestPaymentOutput, error) { if params == nil { params = &GetBucketRequestPaymentInput{} @@ -50,6 +56,7 @@ type GetBucketRequestPaymentInput struct { } func (in *GetBucketRequestPaymentInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -108,6 +115,9 @@ func (c *Client) addOperationGetBucketRequestPaymentMiddlewares(stack *middlewar if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -123,6 +133,18 @@ func (c *Client) addOperationGetBucketRequestPaymentMiddlewares(stack *middlewar if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketRequestPaymentValidationMiddleware(stack); err != nil { return err } @@ -156,6 +178,18 @@ func (c *Client) addOperationGetBucketRequestPaymentMiddlewares(stack *middlewar if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketTagging.go index 69a6e4903..a1d8d99c4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketTagging.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketTagging.go @@ -14,17 +14,28 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns the tag set -// associated with the bucket. To use this operation, you must have permission to -// perform the s3:GetBucketTagging action. By default, the bucket owner has this -// permission and can grant this permission to others. GetBucketTagging has the -// following special error: +// This operation is not supported for directory buckets. +// +// Returns the tag set associated with the bucket. +// +// To use this operation, you must have permission to perform the +// s3:GetBucketTagging action. By default, the bucket owner has this permission and +// can grant this permission to others. +// +// GetBucketTagging has the following special error: +// // - Error code: NoSuchTagSet +// // - Description: There is no tag set associated with the bucket. // // The following operations are related to GetBucketTagging : -// - PutBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html) -// - DeleteBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html) +// +// [PutBucketTagging] +// +// [DeleteBucketTagging] +// +// [PutBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html +// [DeleteBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html func (c *Client) GetBucketTagging(ctx context.Context, params *GetBucketTaggingInput, optFns ...func(*Options)) (*GetBucketTaggingOutput, error) { if params == nil { params = &GetBucketTaggingInput{} @@ -56,6 +67,7 @@ type GetBucketTaggingInput struct { } func (in *GetBucketTaggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -116,6 +128,9 @@ func (c *Client) addOperationGetBucketTaggingMiddlewares(stack *middleware.Stack if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -131,6 +146,18 @@ func (c *Client) addOperationGetBucketTaggingMiddlewares(stack *middleware.Stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketTaggingValidationMiddleware(stack); err != nil { return err } @@ -164,6 +191,18 @@ func (c *Client) addOperationGetBucketTaggingMiddlewares(stack *middleware.Stack if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketVersioning.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketVersioning.go index 10540b136..6e69fd467 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketVersioning.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketVersioning.go @@ -14,15 +14,27 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns the versioning -// state of a bucket. To retrieve the versioning state of a bucket, you must be the -// bucket owner. This implementation also returns the MFA Delete status of the -// versioning state. If the MFA Delete status is enabled , the bucket owner must -// use an authentication device to change the versioning state of the bucket. The -// following operations are related to GetBucketVersioning : -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// This operation is not supported for directory buckets. +// +// Returns the versioning state of a bucket. +// +// To retrieve the versioning state of a bucket, you must be the bucket owner. +// +// This implementation also returns the MFA Delete status of the versioning state. +// If the MFA Delete status is enabled , the bucket owner must use an +// authentication device to change the versioning state of the bucket. +// +// The following operations are related to GetBucketVersioning : +// +// [GetObject] +// +// [PutObject] +// +// [DeleteObject] +// +// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html +// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html func (c *Client) GetBucketVersioning(ctx context.Context, params *GetBucketVersioningInput, optFns ...func(*Options)) (*GetBucketVersioningOutput, error) { if params == nil { params = &GetBucketVersioningInput{} @@ -54,6 +66,7 @@ type GetBucketVersioningInput struct { } func (in *GetBucketVersioningInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -117,6 +130,9 @@ func (c *Client) addOperationGetBucketVersioningMiddlewares(stack *middleware.St if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -132,6 +148,18 @@ func (c *Client) addOperationGetBucketVersioningMiddlewares(stack *middleware.St if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketVersioningValidationMiddleware(stack); err != nil { return err } @@ -165,6 +193,18 @@ func (c *Client) addOperationGetBucketVersioningMiddlewares(stack *middleware.St if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketWebsite.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketWebsite.go index c87f6ff1a..ce0847c8c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketWebsite.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketWebsite.go @@ -14,17 +14,26 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns the website -// configuration for a bucket. To host website on Amazon S3, you can configure a -// bucket as website by adding a website configuration. For more information about -// hosting websites, see Hosting Websites on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) -// . This GET action requires the S3:GetBucketWebsite permission. By default, only +// This operation is not supported for directory buckets. +// +// Returns the website configuration for a bucket. To host website on Amazon S3, +// you can configure a bucket as website by adding a website configuration. For +// more information about hosting websites, see [Hosting Websites on Amazon S3]. +// +// This GET action requires the S3:GetBucketWebsite permission. By default, only // the bucket owner can read the bucket website configuration. However, bucket // owners can allow other users to read the website configuration by writing a -// bucket policy granting them the S3:GetBucketWebsite permission. The following -// operations are related to GetBucketWebsite : -// - DeleteBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketWebsite.html) -// - PutBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html) +// bucket policy granting them the S3:GetBucketWebsite permission. +// +// The following operations are related to GetBucketWebsite : +// +// [DeleteBucketWebsite] +// +// [PutBucketWebsite] +// +// [PutBucketWebsite]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html +// [Hosting Websites on Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html +// [DeleteBucketWebsite]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketWebsite.html func (c *Client) GetBucketWebsite(ctx context.Context, params *GetBucketWebsiteInput, optFns ...func(*Options)) (*GetBucketWebsiteOutput, error) { if params == nil { params = &GetBucketWebsiteInput{} @@ -56,6 +65,7 @@ type GetBucketWebsiteInput struct { } func (in *GetBucketWebsiteInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -124,6 +134,9 @@ func (c *Client) addOperationGetBucketWebsiteMiddlewares(stack *middleware.Stack if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -139,6 +152,18 @@ func (c *Client) addOperationGetBucketWebsiteMiddlewares(stack *middleware.Stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetBucketWebsiteValidationMiddleware(stack); err != nil { return err } @@ -172,6 +197,18 @@ func (c *Client) addOperationGetBucketWebsiteMiddlewares(stack *middleware.Stack if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go index a64f5964e..6fdc8ec9a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go @@ -16,100 +16,153 @@ import ( "time" ) -// Retrieves an object from Amazon S3. In the GetObject request, specify the full -// key name for the object. General purpose buckets - Both the virtual-hosted-style -// requests and the path-style requests are supported. For a virtual hosted-style -// request example, if you have the object photos/2006/February/sample.jpg , -// specify the object key name as /photos/2006/February/sample.jpg . For a -// path-style request example, if you have the object -// photos/2006/February/sample.jpg in the bucket named examplebucket , specify the -// object key name as /examplebucket/photos/2006/February/sample.jpg . For more -// information about request types, see HTTP Host Header Bucket Specification (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket) -// in the Amazon S3 User Guide. Directory buckets - Only virtual-hosted-style -// requests are supported. For a virtual hosted-style request example, if you have -// the object photos/2006/February/sample.jpg in the bucket named -// examplebucket--use1-az5--x-s3 , specify the object key name as +// Retrieves an object from Amazon S3. +// +// In the GetObject request, specify the full key name for the object. +// +// General purpose buckets - Both the virtual-hosted-style requests and the +// path-style requests are supported. For a virtual hosted-style request example, +// if you have the object photos/2006/February/sample.jpg , specify the object key +// name as /photos/2006/February/sample.jpg . For a path-style request example, if +// you have the object photos/2006/February/sample.jpg in the bucket named +// examplebucket , specify the object key name as +// /examplebucket/photos/2006/February/sample.jpg . For more information about +// request types, see [HTTP Host Header Bucket Specification]in the Amazon S3 User Guide. +// +// Directory buckets - Only virtual-hosted-style requests are supported. For a +// virtual hosted-style request example, if you have the object +// photos/2006/February/sample.jpg in the bucket named +// amzn-s3-demo-bucket--usw2-az1--x-s3 , specify the object key name as // /photos/2006/February/sample.jpg . Also, when you make requests to this API // operation, your requests are sent to the Zonal endpoint. These endpoints support // virtual-hosted-style requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style -// requests are not supported. For more information, see Regional and Zonal -// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions +// https://bucket-name.s3express-zone-id.region-code.amazonaws.com/key-name . +// Path-style requests are not supported. For more information about endpoints in +// Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information about +// endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// Permissions // - General purpose bucket permissions - You must have the required permissions // in a policy. To use GetObject , you must have the READ access to the object // (or version). If you grant READ access to the anonymous user, the GetObject // operation returns the object without using an authorization header. For more -// information, see Specifying permissions in a policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// in the Amazon S3 User Guide. If you include a versionId in your request -// header, you must have the s3:GetObjectVersion permission to access a specific -// version of an object. The s3:GetObject permission is not required in this -// scenario. If you request the current version of an object without a specific -// versionId in the request header, only the s3:GetObject permission is required. -// The s3:GetObjectVersion permission is not required in this scenario. If the -// object that you request doesn’t exist, the error that Amazon S3 returns depends -// on whether you also have the s3:ListBucket permission. -// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an -// HTTP status code 404 Not Found error. -// - If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP -// status code 403 Access Denied error. -// - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the -// s3express:CreateSession permission to the directory bucket in a bucket policy -// or an IAM identity-based policy. Then, you make the CreateSession API call on -// the bucket to obtain a session token. With the session token in your request -// header, you can make API requests to this operation. After the session token -// expires, you make another CreateSession API call to generate a new session -// token for use. Amazon Web Services CLI or SDKs create session and refresh the -// session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// information, see [Specifying permissions in a policy]in the Amazon S3 User Guide. +// +// If you include a versionId in your request header, you must have the +// +// s3:GetObjectVersion permission to access a specific version of an object. The +// s3:GetObject permission is not required in this scenario. +// +// If you request the current version of an object without a specific versionId in +// +// the request header, only the s3:GetObject permission is required. The +// s3:GetObjectVersion permission is not required in this scenario. +// +// If the object that you request doesn’t exist, the error that Amazon S3 returns +// +// depends on whether you also have the s3:ListBucket permission. +// +// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an +// HTTP status code 404 Not Found error. +// +// - If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP +// status code 403 Access Denied error. +// +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see [CreateSession]CreateSession . +// +// If the object is encrypted using SSE-KMS, you must also have the +// +// kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies +// and KMS key policies for the KMS key. // // Storage classes If the object you are retrieving is stored in the S3 Glacier // Flexible Retrieval storage class, the S3 Glacier Deep Archive storage class, the // S3 Intelligent-Tiering Archive Access tier, or the S3 Intelligent-Tiering Deep // Archive Access tier, before you can retrieve the object you must first restore a -// copy using RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) -// . Otherwise, this operation returns an InvalidObjectState error. For -// information about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) -// in the Amazon S3 User Guide. Directory buckets - For directory buckets, only the -// S3 Express One Zone storage class is supported to store newly created objects. -// Unsupported storage class values won't write a destination object and will -// respond with the HTTP status code 400 Bad Request . Encryption Encryption -// request headers, like x-amz-server-side-encryption , should not be sent for the -// GetObject requests, if your object uses server-side encryption with Amazon S3 -// managed encryption keys (SSE-S3), server-side encryption with Key Management -// Service (KMS) keys (SSE-KMS), or dual-layer server-side encryption with Amazon -// Web Services KMS keys (DSSE-KMS). If you include the header in your GetObject -// requests for the object that uses these types of keys, you’ll get an HTTP 400 -// Bad Request error. Overriding response header values through the request There -// are times when you want to override certain response header values of a -// GetObject response. For example, you might override the Content-Disposition -// response header value through your GetObject request. You can override values -// for a set of response headers. These modified response header values are -// included only in a successful response, that is, when the HTTP status code 200 -// OK is returned. The headers you can override using the following query -// parameters in the request are a subset of the headers that Amazon S3 accepts -// when you create an object. The response headers that you can override for the -// GetObject response are Cache-Control , Content-Disposition , Content-Encoding , -// Content-Language , Content-Type , and Expires . To override values for a set of -// response headers in the GetObject response, you can use the following query -// parameters in the request. +// copy using [RestoreObject]. Otherwise, this operation returns an InvalidObjectState error. For +// information about restoring archived objects, see [Restoring Archived Objects]in the Amazon S3 User Guide. +// +// Directory buckets - For directory buckets, only the S3 Express One Zone storage +// class is supported to store newly created objects. Unsupported storage class +// values won't write a destination object and will respond with the HTTP status +// code 400 Bad Request . +// +// Encryption Encryption request headers, like x-amz-server-side-encryption , +// should not be sent for the GetObject requests, if your object uses server-side +// encryption with Amazon S3 managed encryption keys (SSE-S3), server-side +// encryption with Key Management Service (KMS) keys (SSE-KMS), or dual-layer +// server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you +// include the header in your GetObject requests for the object that uses these +// types of keys, you’ll get an HTTP 400 Bad Request error. +// +// Directory buckets - For directory buckets, there are only two supported options +// for server-side encryption: SSE-S3 and SSE-KMS. SSE-C isn't supported. For more +// information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. +// +// Overriding response header values through the request There are times when you +// want to override certain response header values of a GetObject response. For +// example, you might override the Content-Disposition response header value +// through your GetObject request. +// +// You can override values for a set of response headers. These modified response +// header values are included only in a successful response, that is, when the HTTP +// status code 200 OK is returned. The headers you can override using the +// following query parameters in the request are a subset of the headers that +// Amazon S3 accepts when you create an object. +// +// The response headers that you can override for the GetObject response are +// Cache-Control , Content-Disposition , Content-Encoding , Content-Language , +// Content-Type , and Expires . +// +// To override values for a set of response headers in the GetObject response, you +// can use the following query parameters in the request. +// // - response-cache-control +// // - response-content-disposition +// // - response-content-encoding +// // - response-content-language +// // - response-content-type +// // - response-expires // // When you use these parameters, you must sign the request by using either an // Authorization header or a presigned URL. These parameters cannot be used with an -// unsigned (anonymous) request. HTTP Host header syntax Directory buckets - The -// HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com . +// unsigned (anonymous) request. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// // The following operations are related to GetObject : -// - ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) -// - GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) +// +// [ListBuckets] +// +// [GetObjectAcl] +// +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [RestoreObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html +// [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html +// [ListBuckets]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html +// [HTTP Host Header Bucket Specification]: https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket +// [Restoring Archived Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html +// [GetObjectAcl]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html +// [Specifying permissions in a policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html func (c *Client) GetObject(ctx context.Context, params *GetObjectInput, optFns ...func(*Options)) (*GetObjectOutput, error) { if params == nil { params = &GetObjectInput{} @@ -127,35 +180,44 @@ func (c *Client) GetObject(ctx context.Context, params *GetObjectInput, optFns . type GetObjectInput struct { - // The bucket name containing the object. Directory buckets - When you use this - // operation with a directory bucket, you must use virtual-hosted-style requests in - // the format Bucket_name.s3express-az_id.region.amazonaws.com . Path-style - // requests are not supported. Directory bucket names must be unique in the chosen - // Availability Zone. Bucket names must follow the format - // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 - // ). For information about bucket naming restrictions, see Directory bucket - // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Object Lambda access points - When you use this - // action with an Object Lambda access point, you must direct requests to the - // Object Lambda access point hostname. The Object Lambda access point hostname - // takes the form AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com. + // The bucket name containing the object. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Object Lambda access points - When you use this action with an Object Lambda + // access point, you must direct requests to the Object Lambda access point + // hostname. The Object Lambda access point hostname takes the form + // AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com. + // // Access points and Object Lambda access points are not supported by directory - // buckets. S3 on Outposts - When you use this action with Amazon S3 on Outposts, - // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts - // hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -174,37 +236,55 @@ type GetObjectInput struct { ExpectedBucketOwner *string // Return the object only if its entity tag (ETag) is the same as the one - // specified in this header; otherwise, return a 412 Precondition Failed error. If - // both of the If-Match and If-Unmodified-Since headers are present in the request - // as follows: If-Match condition evaluates to true , and; If-Unmodified-Since - // condition evaluates to false ; then, S3 returns 200 OK and the data requested. - // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) - // . + // specified in this header; otherwise, return a 412 Precondition Failed error. + // + // If both of the If-Match and If-Unmodified-Since headers are present in the + // request as follows: If-Match condition evaluates to true , and; + // If-Unmodified-Since condition evaluates to false ; then, S3 returns 200 OK and + // the data requested. + // + // For more information about conditional requests, see [RFC 7232]. + // + // [RFC 7232]: https://tools.ietf.org/html/rfc7232 IfMatch *string // Return the object only if it has been modified since the specified time; - // otherwise, return a 304 Not Modified error. If both of the If-None-Match and - // If-Modified-Since headers are present in the request as follows: If-None-Match - // condition evaluates to false , and; If-Modified-Since condition evaluates to - // true ; then, S3 returns 304 Not Modified status code. For more information - // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . + // otherwise, return a 304 Not Modified error. + // + // If both of the If-None-Match and If-Modified-Since headers are present in the + // request as follows: If-None-Match condition evaluates to false , and; + // If-Modified-Since condition evaluates to true ; then, S3 returns 304 Not + // Modified status code. + // + // For more information about conditional requests, see [RFC 7232]. + // + // [RFC 7232]: https://tools.ietf.org/html/rfc7232 IfModifiedSince *time.Time // Return the object only if its entity tag (ETag) is different from the one - // specified in this header; otherwise, return a 304 Not Modified error. If both - // of the If-None-Match and If-Modified-Since headers are present in the request - // as follows: If-None-Match condition evaluates to false , and; If-Modified-Since - // condition evaluates to true ; then, S3 returns 304 Not Modified HTTP status - // code. For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) - // . + // specified in this header; otherwise, return a 304 Not Modified error. + // + // If both of the If-None-Match and If-Modified-Since headers are present in the + // request as follows: If-None-Match condition evaluates to false , and; + // If-Modified-Since condition evaluates to true ; then, S3 returns 304 Not + // Modified HTTP status code. + // + // For more information about conditional requests, see [RFC 7232]. + // + // [RFC 7232]: https://tools.ietf.org/html/rfc7232 IfNoneMatch *string // Return the object only if it has not been modified since the specified time; - // otherwise, return a 412 Precondition Failed error. If both of the If-Match and - // If-Unmodified-Since headers are present in the request as follows: If-Match - // condition evaluates to true , and; If-Unmodified-Since condition evaluates to - // false ; then, S3 returns 200 OK and the data requested. For more information - // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . + // otherwise, return a 412 Precondition Failed error. + // + // If both of the If-Match and If-Unmodified-Since headers are present in the + // request as follows: If-Match condition evaluates to true , and; + // If-Unmodified-Since condition evaluates to false ; then, S3 returns 200 OK and + // the data requested. + // + // For more information about conditional requests, see [RFC 7232]. + // + // [RFC 7232]: https://tools.ietf.org/html/rfc7232 IfUnmodifiedSince *time.Time // Part number of the object being read. This is a positive integer between 1 and @@ -213,18 +293,23 @@ type GetObjectInput struct { PartNumber *int32 // Downloads the specified byte range of an object. For more information about the - // HTTP Range header, see https://www.rfc-editor.org/rfc/rfc9110.html#name-range (https://www.rfc-editor.org/rfc/rfc9110.html#name-range) - // . Amazon S3 doesn't support retrieving multiple ranges of data per GET request. + // HTTP Range header, see [https://www.rfc-editor.org/rfc/rfc9110.html#name-range]. + // + // Amazon S3 doesn't support retrieving multiple ranges of data per GET request. + // + // [https://www.rfc-editor.org/rfc/rfc9110.html#name-range]: https://www.rfc-editor.org/rfc/rfc9110.html#name-range Range *string // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // Sets the Cache-Control header of the response. @@ -245,72 +330,97 @@ type GetObjectInput struct { // Sets the Expires header of the response. ResponseExpires *time.Time - // Specifies the algorithm to use when decrypting the object (for example, AES256 - // ). If you encrypt an object by using server-side encryption with - // customer-provided encryption keys (SSE-C) when you store the object in Amazon - // S3, then when you GET the object, you must use the following headers: + // Specifies the algorithm to use when decrypting the object (for example, AES256 ). + // + // If you encrypt an object by using server-side encryption with customer-provided + // encryption keys (SSE-C) when you store the object in Amazon S3, then when you + // GET the object, you must use the following headers: + // // - x-amz-server-side-encryption-customer-algorithm + // // - x-amz-server-side-encryption-customer-key + // // - x-amz-server-side-encryption-customer-key-MD5 - // For more information about SSE-C, see Server-Side Encryption (Using - // Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // + // For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)] in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html SSECustomerAlgorithm *string // Specifies the customer-provided encryption key that you originally provided for // Amazon S3 to encrypt the data before storing it. This value is used to decrypt // the object when recovering it and must match the one used when storing the data. // The key must be appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. If you encrypt an object - // by using server-side encryption with customer-provided encryption keys (SSE-C) - // when you store the object in Amazon S3, then when you GET the object, you must - // use the following headers: + // x-amz-server-side-encryption-customer-algorithm header. + // + // If you encrypt an object by using server-side encryption with customer-provided + // encryption keys (SSE-C) when you store the object in Amazon S3, then when you + // GET the object, you must use the following headers: + // // - x-amz-server-side-encryption-customer-algorithm + // // - x-amz-server-side-encryption-customer-key + // // - x-amz-server-side-encryption-customer-key-MD5 - // For more information about SSE-C, see Server-Side Encryption (Using - // Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // + // For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)] in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html SSECustomerKey *string // Specifies the 128-bit MD5 digest of the customer-provided encryption key // according to RFC 1321. Amazon S3 uses this header for a message integrity check - // to ensure that the encryption key was transmitted without error. If you encrypt - // an object by using server-side encryption with customer-provided encryption keys - // (SSE-C) when you store the object in Amazon S3, then when you GET the object, - // you must use the following headers: + // to ensure that the encryption key was transmitted without error. + // + // If you encrypt an object by using server-side encryption with customer-provided + // encryption keys (SSE-C) when you store the object in Amazon S3, then when you + // GET the object, you must use the following headers: + // // - x-amz-server-side-encryption-customer-algorithm + // // - x-amz-server-side-encryption-customer-key + // // - x-amz-server-side-encryption-customer-key-MD5 - // For more information about SSE-C, see Server-Side Encryption (Using - // Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // + // For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)] in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html SSECustomerKeyMD5 *string - // Version ID used to reference a specific version of the object. By default, the - // GetObject operation returns the current version of an object. To return a - // different version, use the versionId subresource. + // Version ID used to reference a specific version of the object. + // + // By default, the GetObject operation returns the current version of an object. + // To return a different version, use the versionId subresource. + // // - If you include a versionId in your request header, you must have the // s3:GetObjectVersion permission to access a specific version of an object. The // s3:GetObject permission is not required in this scenario. + // // - If you request the current version of an object without a specific versionId // in the request header, only the s3:GetObject permission is required. The // s3:GetObjectVersion permission is not required in this scenario. + // // - Directory buckets - S3 Versioning isn't enabled and supported for directory // buckets. For this API operation, only the null value of the version ID is // supported by directory buckets. You can only specify null to the versionId // query parameter in the request. - // For more information about versioning, see PutBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html) - // . + // + // For more information about versioning, see [PutBucketVersioning]. + // + // [PutBucketVersioning]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html VersionId *string noSmithyDocumentSerde } func (in *GetObjectInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Key = in.Key @@ -325,37 +435,55 @@ type GetObjectOutput struct { Body io.ReadCloser // Indicates whether the object uses an S3 Bucket Key for server-side encryption - // with Key Management Service (KMS) keys (SSE-KMS). This functionality is not - // supported for directory buckets. + // with Key Management Service (KMS) keys (SSE-KMS). BucketKeyEnabled *bool // Specifies caching behavior along the request/reply chain. CacheControl *string - // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // The Base64 encoded, 32-bit CRC32 checksum of the object. This checksum is only + // present if the object was uploaded with the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32 *string - // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // The Base64 encoded, 32-bit CRC32C checksum of the object. This will only be + // present if the object was uploaded with the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32C *string - // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // The Base64 encoded, 64-bit CRC64NVME checksum of the object. For more + // information, see [Checking object integrity in the Amazon S3 User Guide]. + // + // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumCRC64NVME *string + + // The Base64 encoded, 160-bit SHA1 digest of the object. This will only be + // present if the object was uploaded with the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA1 *string - // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // The Base64 encoded, 256-bit SHA256 digest of the object. This will only be + // present if the object was uploaded with the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA256 *string + // The checksum type, which determines how part-level checksums are combined to + // create an object-level checksum for multipart objects. You can use this header + // response to verify that the checksum type that is received is the same checksum + // type that was specified in the CreateMultipartUpload request. For more + // information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumType types.ChecksumType + // Specifies presentational information for the object. ContentDisposition *string @@ -378,9 +506,11 @@ type GetObjectOutput struct { // Indicates whether the object retrieved was (true) or was not (false) a Delete // Marker. If false, this response header does not appear in the response. + // // - If the current version of the object is a delete marker, Amazon S3 behaves // as if the object was deleted and includes x-amz-delete-marker: true in the // response. + // // - If the specified version in the request is a delete marker, the response // returns a 405 Method Not Allowed error and the Last-Modified: timestamp // response header. @@ -390,20 +520,35 @@ type GetObjectOutput struct { // specific version of a resource found at a URL. ETag *string - // If the object expiration is configured (see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) - // ), the response includes this header. It includes the expiry-date and rule-id + // If the object expiration is configured (see [PutBucketLifecycleConfiguration]PutBucketLifecycleConfiguration ), + // the response includes this header. It includes the expiry-date and rule-id // key-value pairs providing object expiration information. The value of the - // rule-id is URL-encoded. This functionality is not supported for directory + // rule-id is URL-encoded. + // + // Object expiration information is not returned in directory buckets and this + // header returns the value " NotImplemented " in all responses for directory // buckets. + // + // [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html Expiration *string // The date and time at which the object is no longer cacheable. + // + // Deprecated: This field is handled inconsistently across AWS SDKs. Prefer using + // the ExpiresString field which contains the unparsed value from the service + // response. Expires *time.Time - // Date and time when the object was last modified. General purpose buckets - When - // you specify a versionId of the object in your request, if the specified version - // in the request is a delete marker, the response returns a 405 Method Not Allowed - // error and the Last-Modified: timestamp response header. + // The unparsed value of the Expires field from the service response. Prefer use + // of this value over the normal Expires response field where possible. + ExpiresString *string + + // Date and time when the object was last modified. + // + // General purpose buckets - When you specify a versionId of the object in your + // request, if the specified version in the request is a delete marker, the + // response returns a 405 Method Not Allowed error and the Last-Modified: timestamp + // response header. LastModified *time.Time // A map of metadata to store with the object in S3. @@ -415,20 +560,25 @@ type GetObjectOutput struct { // are prefixed with x-amz-meta- . This can happen if you create metadata using an // API like SOAP that supports more flexible metadata than the REST API. For // example, using SOAP, you can create metadata whose values are not legal HTTP - // headers. This functionality is not supported for directory buckets. + // headers. + // + // This functionality is not supported for directory buckets. MissingMeta *int32 // Indicates whether this object has an active legal hold. This field is only - // returned if you have permission to view an object's legal hold status. This - // functionality is not supported for directory buckets. + // returned if you have permission to view an object's legal hold status. + // + // This functionality is not supported for directory buckets. ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus - // The Object Lock mode that's currently in place for this object. This - // functionality is not supported for directory buckets. + // The Object Lock mode that's currently in place for this object. + // + // This functionality is not supported for directory buckets. ObjectLockMode types.ObjectLockMode - // The date and time when this object's Object Lock will expire. This - // functionality is not supported for directory buckets. + // The date and time when this object's Object Lock will expire. + // + // This functionality is not supported for directory buckets. ObjectLockRetainUntilDate *time.Time // The count of parts this object has. This value is only returned if you specify @@ -436,63 +586,72 @@ type GetObjectOutput struct { PartsCount *int32 // Amazon S3 can return this if your request involves a bucket that is either a - // source or destination in a replication rule. This functionality is not supported - // for directory buckets. + // source or destination in a replication rule. + // + // This functionality is not supported for directory buckets. ReplicationStatus types.ReplicationStatus // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Provides information about object restoration action and expiration time of the - // restored object copy. This functionality is not supported for directory buckets. - // Only the S3 Express One Zone storage class is supported by directory buckets to - // store objects. + // restored object copy. + // + // This functionality is not supported for directory buckets. Only the S3 Express + // One Zone storage class is supported by directory buckets to store objects. Restore *string // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to confirm the encryption - // algorithm that's used. This functionality is not supported for directory - // buckets. + // algorithm that's used. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to provide the round-trip - // message integrity verification of the customer-provided encryption key. This - // functionality is not supported for directory buckets. + // message integrity verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string - // If present, indicates the ID of the Key Management Service (KMS) symmetric - // encryption customer managed key that was used for the object. This functionality - // is not supported for directory buckets. + // If present, indicates the ID of the KMS key that was used for object encryption. SSEKMSKeyId *string // The server-side encryption algorithm used when you store this object in Amazon - // S3 (for example, AES256 , aws:kms , aws:kms:dsse ). For directory buckets, only - // server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is - // supported. + // S3. ServerSideEncryption types.ServerSideEncryption // Provides storage class information of the object. Amazon S3 returns this header - // for all objects except for S3 Standard storage class objects. Directory buckets - // - Only the S3 Express One Zone storage class is supported by directory buckets - // to store objects. + // for all objects except for S3 Standard storage class objects. + // + // Directory buckets - Only the S3 Express One Zone storage class is supported by + // directory buckets to store objects. StorageClass types.StorageClass // The number of tags, if any, on the object, when you have the relevant - // permission to read object tags. You can use GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) - // to retrieve the tag set associated with an object. This functionality is not - // supported for directory buckets. + // permission to read object tags. + // + // You can use [GetObjectTagging] to retrieve the tag set associated with an object. + // + // This functionality is not supported for directory buckets. + // + // [GetObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html TagCount *int32 - // Version ID of the object. This functionality is not supported for directory - // buckets. + // Version ID of the object. + // + // This functionality is not supported for directory buckets. VersionId *string // If the bucket is configured as a website, redirects requests for this object to // another object in the same bucket or to an external URL. Amazon S3 stores the - // value of this header in the object metadata. This functionality is not supported - // for directory buckets. + // value of this header in the object metadata. + // + // This functionality is not supported for directory buckets. WebsiteRedirectLocation *string // Metadata pertaining to the operation's result. @@ -544,6 +703,9 @@ func (c *Client) addOperationGetObjectMiddlewares(stack *middleware.Stack, optio if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -556,6 +718,21 @@ func (c *Client) addOperationGetObjectMiddlewares(stack *middleware.Stack, optio if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addResponseChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetObjectValidationMiddleware(stack); err != nil { return err } @@ -592,6 +769,18 @@ func (c *Client) addOperationGetObjectMiddlewares(stack *middleware.Stack, optio if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -620,13 +809,20 @@ func getGetObjectRequestValidationModeMember(input interface{}) (string, bool) { return string(in.ChecksumMode), true } +func setGetObjectRequestValidationModeMember(input interface{}, mode string) { + in := input.(*GetObjectInput) + in.ChecksumMode = types.ChecksumMode(mode) +} + func addGetObjectOutputChecksumMiddlewares(stack *middleware.Stack, options Options) error { return internalChecksum.AddOutputMiddleware(stack, internalChecksum.OutputMiddlewareOptions{ GetValidationMode: getGetObjectRequestValidationModeMember, - ValidationAlgorithms: []string{"CRC32", "CRC32C", "SHA256", "SHA1"}, + SetValidationMode: setGetObjectRequestValidationModeMember, + ResponseChecksumValidation: options.ResponseChecksumValidation, + ValidationAlgorithms: []string{"CRC64NVME", "CRC32", "CRC32C", "SHA256", "SHA1"}, IgnoreMultipartValidation: true, - LogValidationSkipped: true, - LogMultipartValidationSkipped: true, + LogValidationSkipped: !options.DisableLogOutputChecksumValidationSkipped, + LogMultipartValidationSkipped: !options.DisableLogOutputChecksumValidationSkipped, }) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go index fc903cb39..03db6fb83 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go @@ -13,24 +13,39 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns the access -// control list (ACL) of an object. To use this operation, you must have -// s3:GetObjectAcl permissions or READ_ACP access to the object. For more -// information, see Mapping of ACL permissions and access policy permissions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#acl-access-policy-permission-mapping) -// in the Amazon S3 User Guide This functionality is not supported for Amazon S3 on -// Outposts. By default, GET returns ACL information about the current version of -// an object. To return ACL information about a different version, use the -// versionId subresource. If your bucket uses the bucket owner enforced setting for -// S3 Object Ownership, requests to read ACLs are still supported and return the +// This operation is not supported for directory buckets. +// +// Returns the access control list (ACL) of an object. To use this operation, you +// must have s3:GetObjectAcl permissions or READ_ACP access to the object. For +// more information, see [Mapping of ACL permissions and access policy permissions]in the Amazon S3 User Guide +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// By default, GET returns ACL information about the current version of an object. +// To return ACL information about a different version, use the versionId +// subresource. +// +// If your bucket uses the bucket owner enforced setting for S3 Object Ownership, +// requests to read ACLs are still supported and return the // bucket-owner-full-control ACL with the owner being the account that created the -// bucket. For more information, see Controlling object ownership and disabling -// ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) -// in the Amazon S3 User Guide. The following operations are related to -// GetObjectAcl : -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) -// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// bucket. For more information, see [Controlling object ownership and disabling ACLs]in the Amazon S3 User Guide. +// +// The following operations are related to GetObjectAcl : +// +// [GetObject] +// +// [GetObjectAttributes] +// +// [DeleteObject] +// +// [PutObject] +// +// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html +// [Mapping of ACL permissions and access policy permissions]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#acl-access-policy-permission-mapping +// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html +// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html +// [Controlling object ownership and disabling ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html func (c *Client) GetObjectAcl(ctx context.Context, params *GetObjectAclInput, optFns ...func(*Options)) (*GetObjectAclOutput, error) { if params == nil { params = &GetObjectAclInput{} @@ -49,6 +64,7 @@ func (c *Client) GetObjectAcl(ctx context.Context, params *GetObjectAclInput, op type GetObjectAclInput struct { // The bucket name that contains the object for which to get the ACL information. + // // Access points - When you use this action with an access point, you must provide // the alias of the access point in place of the bucket name or specify the access // point ARN. When using the access point ARN, you must direct requests to the @@ -56,8 +72,9 @@ type GetObjectAclInput struct { // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -76,20 +93,24 @@ type GetObjectAclInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer - // Version ID used to reference a specific version of the object. This - // functionality is not supported for directory buckets. + // Version ID used to reference a specific version of the object. + // + // This functionality is not supported for directory buckets. VersionId *string noSmithyDocumentSerde } func (in *GetObjectAclInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Key = in.Key @@ -100,11 +121,13 @@ type GetObjectAclOutput struct { // A list of grants. Grants []types.Grant - // Container for the bucket owner's display name and ID. + // Container for the bucket owner's display name and ID. Owner *types.Owner // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -156,6 +179,9 @@ func (c *Client) addOperationGetObjectAclMiddlewares(stack *middleware.Stack, op if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -171,6 +197,18 @@ func (c *Client) addOperationGetObjectAclMiddlewares(stack *middleware.Stack, op if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetObjectAclValidationMiddleware(stack); err != nil { return err } @@ -204,6 +242,18 @@ func (c *Client) addOperationGetObjectAclMiddlewares(stack *middleware.Stack, op if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAttributes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAttributes.go index dd1b9257c..03c7a3929 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAttributes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAttributes.go @@ -16,32 +16,40 @@ import ( // Retrieves all the metadata from an object without returning the object itself. // This operation is useful if you're interested only in an object's metadata. +// // GetObjectAttributes combines the functionality of HeadObject and ListParts . All // of the data returned with each of those individual calls can be returned with a -// single call to GetObjectAttributes . Directory buckets - For directory buckets, -// you must make requests for this API operation to the Zonal endpoint. These -// endpoints support virtual-hosted-style requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style -// requests are not supported. For more information, see Regional and Zonal -// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions +// single call to GetObjectAttributes . +// +// Directory buckets - For directory buckets, you must make requests for this API +// operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format +// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name +// . Path-style requests are not supported. For more information about endpoints +// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information +// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// Permissions +// // - General purpose bucket permissions - To use GetObjectAttributes , you must // have READ access to the object. The permissions that you need to use this -// operation with depend on whether the bucket is versioned. If the bucket is -// versioned, you need both the s3:GetObjectVersion and -// s3:GetObjectVersionAttributes permissions for this operation. If the bucket is -// not versioned, you need the s3:GetObject and s3:GetObjectAttributes -// permissions. For more information, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// operation depend on whether the bucket is versioned. If the bucket is versioned, +// you need both the s3:GetObjectVersion and s3:GetObjectVersionAttributes +// permissions for this operation. If the bucket is not versioned, you need the +// s3:GetObject and s3:GetObjectAttributes permissions. For more information, see [Specifying Permissions in a Policy] // in the Amazon S3 User Guide. If the object that you request does not exist, the // error Amazon S3 returns depends on whether you also have the s3:ListBucket // permission. +// // - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an // HTTP status code 404 Not Found ("no such key") error. +// // - If you don't have the s3:ListBucket permission, Amazon S3 returns an HTTP // status code 403 Forbidden ("access denied") error. +// // - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the // s3express:CreateSession permission to the directory bucket in a bucket policy // or an IAM identity-based policy. Then, you make the CreateSession API call on // the bucket to obtain a session token. With the session token in your request @@ -49,8 +57,12 @@ import ( // expires, you make another CreateSession API call to generate a new session // token for use. Amazon Web Services CLI or SDKs create session and refresh the // session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// expires. For more information about authorization, see [CreateSession]CreateSession . +// +// If the object is encrypted with SSE-KMS, you must also have the +// +// kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies +// and KMS key policies for the KMS key. // // Encryption Encryption request headers, like x-amz-server-side-encryption , // should not be sent for HEAD requests if your object uses server-side encryption @@ -61,49 +73,96 @@ import ( // want to specify the encryption method. If you include this header in a GET // request for an object that uses these types of keys, you’ll get an HTTP 400 Bad // Request error. It's because the encryption method can't be changed when you -// retrieve the object. If you encrypt an object by using server-side encryption -// with customer-provided encryption keys (SSE-C) when you store the object in -// Amazon S3, then when you retrieve the metadata from the object, you must use the -// following headers to provide the encryption key for the server to be able to -// retrieve the object's metadata. The headers are: +// retrieve the object. +// +// If you encrypt an object by using server-side encryption with customer-provided +// encryption keys (SSE-C) when you store the object in Amazon S3, then when you +// retrieve the metadata from the object, you must use the following headers to +// provide the encryption key for the server to be able to retrieve the object's +// metadata. The headers are: +// // - x-amz-server-side-encryption-customer-algorithm +// // - x-amz-server-side-encryption-customer-key +// // - x-amz-server-side-encryption-customer-key-MD5 // -// For more information about SSE-C, see Server-Side Encryption (Using -// Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) -// in the Amazon S3 User Guide. Directory bucket permissions - For directory -// buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) ( -// AES256 ) is supported. Versioning Directory buckets - S3 Versioning isn't -// enabled and supported for directory buckets. For this API operation, only the -// null value of the version ID is supported by directory buckets. You can only -// specify null to the versionId query parameter in the request. Conditional -// request headers Consider the following when using request headers: +// For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)] in the Amazon S3 User Guide. +// +// Directory bucket permissions - For directory buckets, there are only two +// supported options for server-side encryption: server-side encryption with Amazon +// S3 managed keys (SSE-S3) ( AES256 ) and server-side encryption with KMS keys +// (SSE-KMS) ( aws:kms ). We recommend that the bucket's default encryption uses +// the desired encryption configuration and you don't override the bucket default +// encryption in your CreateSession requests or PUT object requests. Then, new +// objects are automatically encrypted with the desired encryption settings. For +// more information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. For more information about +// the encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads]. +// +// Versioning Directory buckets - S3 Versioning isn't enabled and supported for +// directory buckets. For this API operation, only the null value of the version +// ID is supported by directory buckets. You can only specify null to the versionId +// query parameter in the request. +// +// Conditional request headers Consider the following when using request headers: +// // - If both of the If-Match and If-Unmodified-Since headers are present in the // request as follows, then Amazon S3 returns the HTTP status code 200 OK and the // data requested: +// // - If-Match condition evaluates to true . -// - If-Unmodified-Since condition evaluates to false . For more information -// about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) -// . +// +// - If-Unmodified-Since condition evaluates to false . +// +// For more information about conditional requests, see [RFC 7232]. +// // - If both of the If-None-Match and If-Modified-Since headers are present in // the request as follows, then Amazon S3 returns the HTTP status code 304 Not // Modified : +// // - If-None-Match condition evaluates to false . -// - If-Modified-Since condition evaluates to true . For more information about -// conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . -// -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following actions are -// related to GetObjectAttributes : -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// - GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) -// - GetObjectLegalHold (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLegalHold.html) -// - GetObjectLockConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLockConfiguration.html) -// - GetObjectRetention (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectRetention.html) -// - GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) -// - HeadObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html) -// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// - If-Modified-Since condition evaluates to true . +// +// For more information about conditional requests, see [RFC 7232]. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// +// The following actions are related to GetObjectAttributes : +// +// [GetObject] +// +// [GetObjectAcl] +// +// [GetObjectLegalHold] +// +// [GetObjectLockConfiguration] +// +// [GetObjectRetention] +// +// [GetObjectTagging] +// +// [HeadObject] +// +// [ListParts] +// +// [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html +// [GetObjectLegalHold]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLegalHold.html +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html +// [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html +// [GetObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html +// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html +// [RFC 7232]: https://tools.ietf.org/html/rfc7232 +// [HeadObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html +// [GetObjectLockConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLockConfiguration.html +// [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html +// [GetObjectAcl]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html +// [GetObjectRetention]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectRetention.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html func (c *Client) GetObjectAttributes(ctx context.Context, params *GetObjectAttributesInput, optFns ...func(*Options)) (*GetObjectAttributesOutput, error) { if params == nil { params = &GetObjectAttributesInput{} @@ -121,31 +180,39 @@ func (c *Client) GetObjectAttributes(ctx context.Context, params *GetObjectAttri type GetObjectAttributesInput struct { - // The name of the bucket that contains the object. Directory buckets - When you - // use this operation with a directory bucket, you must use virtual-hosted-style - // requests in the format Bucket_name.s3express-az_id.region.amazonaws.com . - // Path-style requests are not supported. Directory bucket names must be unique in - // the chosen Availability Zone. Bucket names must follow the format - // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 - // ). For information about bucket naming restrictions, see Directory bucket - // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // The name of the bucket that contains the object. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -177,32 +244,38 @@ type GetObjectAttributesInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer - // Specifies the algorithm to use when encrypting the object (for example, - // AES256). This functionality is not supported for directory buckets. + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use in // encrypting data. This value is used to store the object and then it is // discarded; Amazon S3 does not store the encryption key. The key must be // appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. This functionality is - // not supported for directory buckets. + // x-amz-server-side-encryption-customer-algorithm header. + // + // This functionality is not supported for directory buckets. SSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. This functionality is not - // supported for directory buckets. + // encryption key was transmitted without error. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string - // The version ID used to reference a specific version of the object. S3 - // Versioning isn't enabled and supported for directory buckets. For this API + // The version ID used to reference a specific version of the object. + // + // S3 Versioning isn't enabled and supported for directory buckets. For this API // operation, only the null value of the version ID is supported by directory // buckets. You can only specify null to the versionId query parameter in the // request. @@ -212,6 +285,7 @@ type GetObjectAttributesInput struct { } func (in *GetObjectAttributesInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -223,14 +297,18 @@ type GetObjectAttributesOutput struct { // Specifies whether the object retrieved was ( true ) or was not ( false ) a // delete marker. If false , this response header does not appear in the response. + // To learn more about delete markers, see [Working with delete markers]. + // // This functionality is not supported for directory buckets. + // + // [Working with delete markers]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeleteMarker.html DeleteMarker *bool // An ETag is an opaque identifier assigned by a web server to a specific version // of a resource found at a URL. ETag *string - // The creation date of the object. + // Date and time when the object was last modified. LastModified *time.Time // A collection of parts associated with a multipart upload. @@ -240,18 +318,25 @@ type GetObjectAttributesOutput struct { ObjectSize *int64 // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Provides the storage class information of the object. Amazon S3 returns this - // header for all objects except for S3 Standard storage class objects. For more - // information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) - // . Directory buckets - Only the S3 Express One Zone storage class is supported by + // header for all objects except for S3 Standard storage class objects. + // + // For more information, see [Storage Classes]. + // + // Directory buckets - Only the S3 Express One Zone storage class is supported by // directory buckets to store objects. + // + // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html StorageClass types.StorageClass - // The version ID of the object. This functionality is not supported for directory - // buckets. + // The version ID of the object. + // + // This functionality is not supported for directory buckets. VersionId *string // Metadata pertaining to the operation's result. @@ -303,6 +388,9 @@ func (c *Client) addOperationGetObjectAttributesMiddlewares(stack *middleware.St if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -318,6 +406,18 @@ func (c *Client) addOperationGetObjectAttributesMiddlewares(stack *middleware.St if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetObjectAttributesValidationMiddleware(stack); err != nil { return err } @@ -351,6 +451,18 @@ func (c *Client) addOperationGetObjectAttributesMiddlewares(stack *middleware.St if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go index 548f5e1cc..459702e8d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go @@ -13,11 +13,18 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Gets an object's current -// legal hold status. For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) -// . This functionality is not supported for Amazon S3 on Outposts. The following -// action is related to GetObjectLegalHold : -// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// This operation is not supported for directory buckets. +// +// Gets an object's current legal hold status. For more information, see [Locking Objects]. +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// The following action is related to GetObjectLegalHold : +// +// [GetObjectAttributes] +// +// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html +// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html func (c *Client) GetObjectLegalHold(ctx context.Context, params *GetObjectLegalHoldInput, optFns ...func(*Options)) (*GetObjectLegalHoldOutput, error) { if params == nil { params = &GetObjectLegalHoldInput{} @@ -36,15 +43,18 @@ func (c *Client) GetObjectLegalHold(ctx context.Context, params *GetObjectLegalH type GetObjectLegalHoldInput struct { // The bucket name containing the object whose legal hold status you want to - // retrieve. Access points - When you use this action with an access point, you - // must provide the alias of the access point in place of the bucket name or - // specify the access point ARN. When using the access point ARN, you must direct - // requests to the access point hostname. The access point hostname takes the form + // retrieve. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -63,10 +73,12 @@ type GetObjectLegalHoldInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // The version ID of the object whose legal hold status you want to retrieve. @@ -76,6 +88,7 @@ type GetObjectLegalHoldInput struct { } func (in *GetObjectLegalHoldInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -134,6 +147,9 @@ func (c *Client) addOperationGetObjectLegalHoldMiddlewares(stack *middleware.Sta if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -149,6 +165,18 @@ func (c *Client) addOperationGetObjectLegalHoldMiddlewares(stack *middleware.Sta if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetObjectLegalHoldValidationMiddleware(stack); err != nil { return err } @@ -182,6 +210,18 @@ func (c *Client) addOperationGetObjectLegalHoldMiddlewares(stack *middleware.Sta if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLockConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLockConfiguration.go index e8e2fbd9f..f49a4458c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLockConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLockConfiguration.go @@ -13,12 +13,18 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Gets the Object Lock -// configuration for a bucket. The rule specified in the Object Lock configuration -// will be applied by default to every new object placed in the specified bucket. -// For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) -// . The following action is related to GetObjectLockConfiguration : -// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// This operation is not supported for directory buckets. +// +// Gets the Object Lock configuration for a bucket. The rule specified in the +// Object Lock configuration will be applied by default to every new object placed +// in the specified bucket. For more information, see [Locking Objects]. +// +// The following action is related to GetObjectLockConfiguration : +// +// [GetObjectAttributes] +// +// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html +// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html func (c *Client) GetObjectLockConfiguration(ctx context.Context, params *GetObjectLockConfigurationInput, optFns ...func(*Options)) (*GetObjectLockConfigurationOutput, error) { if params == nil { params = &GetObjectLockConfigurationInput{} @@ -36,16 +42,18 @@ func (c *Client) GetObjectLockConfiguration(ctx context.Context, params *GetObje type GetObjectLockConfigurationInput struct { - // The bucket whose Object Lock configuration you want to retrieve. Access points - // - When you use this action with an access point, you must provide the alias of - // the access point in place of the bucket name or specify the access point ARN. - // When using the access point ARN, you must direct requests to the access point - // hostname. The access point hostname takes the form + // The bucket whose Object Lock configuration you want to retrieve. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -59,6 +67,7 @@ type GetObjectLockConfigurationInput struct { } func (in *GetObjectLockConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -117,6 +126,9 @@ func (c *Client) addOperationGetObjectLockConfigurationMiddlewares(stack *middle if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -132,6 +144,18 @@ func (c *Client) addOperationGetObjectLockConfigurationMiddlewares(stack *middle if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetObjectLockConfigurationValidationMiddleware(stack); err != nil { return err } @@ -165,6 +189,18 @@ func (c *Client) addOperationGetObjectLockConfigurationMiddlewares(stack *middle if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go index b4daabf16..bd2d0aaa1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go @@ -13,11 +13,18 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Retrieves an object's -// retention settings. For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) -// . This functionality is not supported for Amazon S3 on Outposts. The following -// action is related to GetObjectRetention : -// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// This operation is not supported for directory buckets. +// +// Retrieves an object's retention settings. For more information, see [Locking Objects]. +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// The following action is related to GetObjectRetention : +// +// [GetObjectAttributes] +// +// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html +// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html func (c *Client) GetObjectRetention(ctx context.Context, params *GetObjectRetentionInput, optFns ...func(*Options)) (*GetObjectRetentionOutput, error) { if params == nil { params = &GetObjectRetentionInput{} @@ -36,15 +43,18 @@ func (c *Client) GetObjectRetention(ctx context.Context, params *GetObjectRetent type GetObjectRetentionInput struct { // The bucket name containing the object whose retention settings you want to - // retrieve. Access points - When you use this action with an access point, you - // must provide the alias of the access point in place of the bucket name or - // specify the access point ARN. When using the access point ARN, you must direct - // requests to the access point hostname. The access point hostname takes the form + // retrieve. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -63,10 +73,12 @@ type GetObjectRetentionInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // The version ID for the object whose retention settings you want to retrieve. @@ -76,6 +88,7 @@ type GetObjectRetentionInput struct { } func (in *GetObjectRetentionInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -134,6 +147,9 @@ func (c *Client) addOperationGetObjectRetentionMiddlewares(stack *middleware.Sta if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -149,6 +165,18 @@ func (c *Client) addOperationGetObjectRetentionMiddlewares(stack *middleware.Sta if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetObjectRetentionValidationMiddleware(stack); err != nil { return err } @@ -182,6 +210,18 @@ func (c *Client) addOperationGetObjectRetentionMiddlewares(stack *middleware.Sta if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go index dc15914a0..204a15793 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go @@ -13,20 +13,35 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns the tag-set of an -// object. You send the GET request against the tagging subresource associated with -// the object. To use this operation, you must have permission to perform the +// This operation is not supported for directory buckets. +// +// Returns the tag-set of an object. You send the GET request against the tagging +// subresource associated with the object. +// +// To use this operation, you must have permission to perform the // s3:GetObjectTagging action. By default, the GET action returns information about // current version of an object. For a versioned bucket, you can have multiple // versions of an object in your bucket. To retrieve tags of any other version, use // the versionId query parameter. You also need permission for the -// s3:GetObjectVersionTagging action. By default, the bucket owner has this -// permission and can grant this permission to others. For information about the -// Amazon S3 object tagging feature, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html) -// . The following actions are related to GetObjectTagging : -// - DeleteObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html) -// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) -// - PutObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) +// s3:GetObjectVersionTagging action. +// +// By default, the bucket owner has this permission and can grant this permission +// to others. +// +// For information about the Amazon S3 object tagging feature, see [Object Tagging]. +// +// The following actions are related to GetObjectTagging : +// +// [DeleteObjectTagging] +// +// [GetObjectAttributes] +// +// [PutObjectTagging] +// +// [DeleteObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html +// [PutObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html +// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html +// [Object Tagging]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html func (c *Client) GetObjectTagging(ctx context.Context, params *GetObjectTaggingInput, optFns ...func(*Options)) (*GetObjectTaggingOutput, error) { if params == nil { params = &GetObjectTaggingInput{} @@ -45,6 +60,7 @@ func (c *Client) GetObjectTagging(ctx context.Context, params *GetObjectTaggingI type GetObjectTaggingInput struct { // The bucket name containing the object for which to get the tagging information. + // // Access points - When you use this action with an access point, you must provide // the alias of the access point in place of the bucket name or specify the access // point ARN. When using the access point ARN, you must direct requests to the @@ -52,15 +68,17 @@ type GetObjectTaggingInput struct { // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. S3 on Outposts - When you use this action with - // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. - // The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -79,10 +97,12 @@ type GetObjectTaggingInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // The versionId of the object for which to get the tagging information. @@ -92,6 +112,7 @@ type GetObjectTaggingInput struct { } func (in *GetObjectTaggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -155,6 +176,9 @@ func (c *Client) addOperationGetObjectTaggingMiddlewares(stack *middleware.Stack if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -170,6 +194,18 @@ func (c *Client) addOperationGetObjectTaggingMiddlewares(stack *middleware.Stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetObjectTaggingValidationMiddleware(stack); err != nil { return err } @@ -203,6 +239,18 @@ func (c *Client) addOperationGetObjectTaggingMiddlewares(stack *middleware.Stack if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go index 9fc83178e..5959ce734 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go @@ -14,14 +14,24 @@ import ( "io" ) -// This operation is not supported by directory buckets. Returns torrent files -// from a bucket. BitTorrent can save you bandwidth when you're distributing large -// files. You can get torrent only for objects that are less than 5 GB in size, and -// that are not encrypted using server-side encryption with a customer-provided -// encryption key. To use GET, you must have READ access to the object. This -// functionality is not supported for Amazon S3 on Outposts. The following action -// is related to GetObjectTorrent : -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// This operation is not supported for directory buckets. +// +// Returns torrent files from a bucket. BitTorrent can save you bandwidth when +// you're distributing large files. +// +// You can get torrent only for objects that are less than 5 GB in size, and that +// are not encrypted using server-side encryption with a customer-provided +// encryption key. +// +// To use GET, you must have READ access to the object. +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// The following action is related to GetObjectTorrent : +// +// [GetObject] +// +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html func (c *Client) GetObjectTorrent(ctx context.Context, params *GetObjectTorrentInput, optFns ...func(*Options)) (*GetObjectTorrentOutput, error) { if params == nil { params = &GetObjectTorrentInput{} @@ -58,16 +68,19 @@ type GetObjectTorrentInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer noSmithyDocumentSerde } func (in *GetObjectTorrentInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -78,7 +91,9 @@ type GetObjectTorrentOutput struct { Body io.ReadCloser // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -130,6 +145,9 @@ func (c *Client) addOperationGetObjectTorrentMiddlewares(stack *middleware.Stack if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -142,6 +160,18 @@ func (c *Client) addOperationGetObjectTorrentMiddlewares(stack *middleware.Stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetObjectTorrentValidationMiddleware(stack); err != nil { return err } @@ -175,6 +205,18 @@ func (c *Client) addOperationGetObjectTorrentMiddlewares(stack *middleware.Stack if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetPublicAccessBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetPublicAccessBlock.go index 3689a4e16..97005c108 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetPublicAccessBlock.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetPublicAccessBlock.go @@ -14,22 +14,38 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Retrieves the -// PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation, -// you must have the s3:GetBucketPublicAccessBlock permission. For more -// information about Amazon S3 permissions, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// . When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or -// an object, it checks the PublicAccessBlock configuration for both the bucket -// (or the bucket that contains the object) and the bucket owner's account. If the +// This operation is not supported for directory buckets. +// +// Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. To use +// this operation, you must have the s3:GetBucketPublicAccessBlock permission. For +// more information about Amazon S3 permissions, see [Specifying Permissions in a Policy]. +// +// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or an +// object, it checks the PublicAccessBlock configuration for both the bucket (or +// the bucket that contains the object) and the bucket owner's account. If the // PublicAccessBlock settings are different between the bucket and the account, // Amazon S3 uses the most restrictive combination of the bucket-level and -// account-level settings. For more information about when Amazon S3 considers a -// bucket or an object public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) -// . The following operations are related to GetPublicAccessBlock : -// - Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) -// - PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) -// - GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) -// - DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) +// account-level settings. +// +// For more information about when Amazon S3 considers a bucket or an object +// public, see [The Meaning of "Public"]. +// +// The following operations are related to GetPublicAccessBlock : +// +// [Using Amazon S3 Block Public Access] +// +// [PutPublicAccessBlock] +// +// [GetPublicAccessBlock] +// +// [DeletePublicAccessBlock] +// +// [GetPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html +// [PutPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html +// [DeletePublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html +// [Using Amazon S3 Block Public Access]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html +// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html +// [The Meaning of "Public"]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status func (c *Client) GetPublicAccessBlock(ctx context.Context, params *GetPublicAccessBlockInput, optFns ...func(*Options)) (*GetPublicAccessBlockOutput, error) { if params == nil { params = &GetPublicAccessBlockInput{} @@ -62,6 +78,7 @@ type GetPublicAccessBlockInput struct { } func (in *GetPublicAccessBlockInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -121,6 +138,9 @@ func (c *Client) addOperationGetPublicAccessBlockMiddlewares(stack *middleware.S if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -136,6 +156,18 @@ func (c *Client) addOperationGetPublicAccessBlockMiddlewares(stack *middleware.S if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpGetPublicAccessBlockValidationMiddleware(stack); err != nil { return err } @@ -169,6 +201,18 @@ func (c *Client) addOperationGetPublicAccessBlockMiddlewares(stack *middleware.S if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadBucket.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadBucket.go index 5f5958916..21470dc11 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadBucket.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadBucket.go @@ -19,42 +19,59 @@ import ( // You can use this operation to determine if a bucket exists and if you have // permission to access it. The action returns a 200 OK if the bucket exists and -// you have permission to access it. If the bucket does not exist or you do not -// have permission to access it, the HEAD request returns a generic 400 Bad Request -// , 403 Forbidden or 404 Not Found code. A message body is not included, so you -// cannot determine the exception beyond these HTTP response codes. Directory -// buckets - You must make requests for this API operation to the Zonal endpoint. -// These endpoints support virtual-hosted-style requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests -// are not supported. For more information, see Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Authentication and authorization All HeadBucket -// requests must be authenticated and signed by using IAM credentials (access key -// ID and secret access key for the IAM identities). All headers with the x-amz- -// prefix, including x-amz-copy-source , must be signed. For more information, see -// REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) -// . Directory bucket - You must use IAM credentials to authenticate and authorize +// you have permission to access it. +// +// If the bucket does not exist or you do not have permission to access it, the +// HEAD request returns a generic 400 Bad Request , 403 Forbidden or 404 Not Found +// code. A message body is not included, so you cannot determine the exception +// beyond these HTTP response codes. +// +// Authentication and authorization General purpose buckets - Request to public +// buckets that grant the s3:ListBucket permission publicly do not need to be +// signed. All other HeadBucket requests must be authenticated and signed by using +// IAM credentials (access key ID and secret access key for the IAM identities). +// All headers with the x-amz- prefix, including x-amz-copy-source , must be +// signed. For more information, see [REST Authentication]. +// +// Directory buckets - You must use IAM credentials to authenticate and authorize // your access to the HeadBucket API operation, instead of using the temporary -// security credentials through the CreateSession API operation. Amazon Web -// Services CLI or SDKs handles authentication and authorization on your behalf. +// security credentials through the CreateSession API operation. +// +// Amazon Web Services CLI or SDKs handles authentication and authorization on +// your behalf. +// // Permissions +// // - General purpose bucket permissions - To use this operation, you must have // permissions to perform the s3:ListBucket action. The bucket owner has this // permission by default and can grant this permission to others. For more -// information about permissions, see Managing access permissions to your Amazon -// S3 resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// in the Amazon S3 User Guide. +// information about permissions, see [Managing access permissions to your Amazon S3 resources]in the Amazon S3 User Guide. +// // - Directory bucket permissions - You must have the s3express:CreateSession // permission in the Action element of a policy. By default, the session is in // the ReadWrite mode. If you want to restrict the access, you can explicitly set -// the s3express:SessionMode condition key to ReadOnly on the bucket. For more -// information about example bucket policies, see Example bucket policies for S3 -// Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) -// and Amazon Web Services Identity and Access Management (IAM) identity-based -// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) -// in the Amazon S3 User Guide. +// the s3express:SessionMode condition key to ReadOnly on the bucket. +// +// For more information about example bucket policies, see [Example bucket policies for S3 Express One Zone]and [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]in the Amazon S3 +// +// User Guide. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . +// You must make requests for this API operation to the Zonal endpoint. These +// endpoints support virtual-hosted-style requests in the format +// https://bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style +// requests are not supported. For more information about endpoints in Availability +// Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information about endpoints in +// Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [REST Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html +// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html +// [Managing access permissions to your Amazon S3 resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html func (c *Client) HeadBucket(ctx context.Context, params *HeadBucketInput, optFns ...func(*Options)) (*HeadBucketOutput, error) { if params == nil { params = &HeadBucketInput{} @@ -72,36 +89,46 @@ func (c *Client) HeadBucket(ctx context.Context, params *HeadBucketInput, optFns type HeadBucketInput struct { - // The bucket name. Directory buckets - When you use this operation with a - // directory bucket, you must use virtual-hosted-style requests in the format - // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not - // supported. Directory bucket names must be unique in the chosen Availability - // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket - // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Object Lambda access points - When you use this API - // operation with an Object Lambda access point, provide the alias of the Object - // Lambda access point in place of the bucket name. If the Object Lambda access - // point alias in a request is not valid, the error code - // InvalidAccessPointAliasError is returned. For more information about - // InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) - // . Access points and Object Lambda access points are not supported by directory - // buckets. S3 on Outposts - When you use this action with Amazon S3 on Outposts, - // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts - // hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // The bucket name. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Object Lambda access points - When you use this API operation with an Object + // Lambda access point, provide the alias of the Object Lambda access point in + // place of the bucket name. If the Object Lambda access point alias in a request + // is not valid, the error code InvalidAccessPointAliasError is returned. For more + // information about InvalidAccessPointAliasError , see [List of Error Codes]. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html + // [List of Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList // // This member is required. Bucket *string @@ -115,6 +142,7 @@ type HeadBucketInput struct { } func (in *HeadBucketInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -122,21 +150,25 @@ func (in *HeadBucketInput) bindEndpointParams(p *EndpointParameters) { type HeadBucketOutput struct { // Indicates whether the bucket name used in the request is an access point alias. - // This functionality is not supported for directory buckets. + // + // For directory buckets, the value of this field is false . AccessPointAlias *bool - // The name of the location where the bucket will be created. For directory - // buckets, the AZ ID of the Availability Zone where the bucket is created. An - // example AZ ID value is usw2-az1 . This functionality is only supported by - // directory buckets. + // The name of the location where the bucket will be created. + // + // For directory buckets, the Zone ID of the Availability Zone or the Local Zone + // where the bucket is created. An example Zone ID value for an Availability Zone + // is usw2-az1 . + // + // This functionality is only supported by directory buckets. BucketLocationName *string - // The type of location where the bucket is created. This functionality is only - // supported by directory buckets. + // The type of location where the bucket is created. + // + // This functionality is only supported by directory buckets. BucketLocationType types.LocationType - // The Region that the bucket is located. This functionality is not supported for - // directory buckets. + // The Region that the bucket is located. BucketRegion *string // Metadata pertaining to the operation's result. @@ -188,6 +220,9 @@ func (c *Client) addOperationHeadBucketMiddlewares(stack *middleware.Stack, opti if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -203,6 +238,18 @@ func (c *Client) addOperationHeadBucketMiddlewares(stack *middleware.Stack, opti if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpHeadBucketValidationMiddleware(stack); err != nil { return err } @@ -236,23 +283,21 @@ func (c *Client) addOperationHeadBucketMiddlewares(stack *middleware.Stack, opti if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } - return nil -} - -func (v *HeadBucketInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false + if err = addSpanInitializeStart(stack); err != nil { + return err } - return *v.Bucket, true -} - -// HeadBucketAPIClient is a client that implements the HeadBucket operation. -type HeadBucketAPIClient interface { - HeadBucket(context.Context, *HeadBucketInput, ...func(*Options)) (*HeadBucketOutput, error) + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil } -var _ HeadBucketAPIClient = (*Client)(nil) - // BucketExistsWaiterOptions are waiter options for BucketExistsWaiter type BucketExistsWaiterOptions struct { @@ -285,12 +330,13 @@ type BucketExistsWaiterOptions struct { // Retryable is function that can be used to override the service defined // waiter-behavior based on operation output, or returned error. This function is - // used by the waiter to decide if a state is retryable or a terminal state. By - // default service-modeled logic will populate this option. This option can thus be - // used to define a custom waiter state with fall-back to service-modeled waiter - // state mutators.The function returns an error in case of a failure state. In case - // of retry state, this function returns a bool value of true and nil error, while - // in case of success it returns a bool value of false and nil error. + // used by the waiter to decide if a state is retryable or a terminal state. + // + // By default service-modeled logic will populate this option. This option can + // thus be used to define a custom waiter state with fall-back to service-modeled + // waiter state mutators.The function returns an error in case of a failure state. + // In case of retry state, this function returns a bool value of true and nil + // error, while in case of success it returns a bool value of false and nil error. Retryable func(context.Context, *HeadBucketInput, *HeadBucketOutput, error) (bool, error) } @@ -366,7 +412,13 @@ func (w *BucketExistsWaiter) WaitForOutput(ctx context.Context, params *HeadBuck } out, err := w.client.HeadBucket(ctx, params, func(o *Options) { + baseOpts := []func(*Options){ + addIsWaiterUserAgent, + } o.APIOptions = append(o.APIOptions, apiOptions...) + for _, opt := range baseOpts { + opt(o) + } for _, opt := range options.ClientOptions { opt(o) } @@ -415,6 +467,9 @@ func bucketExistsStateRetryable(ctx context.Context, input *HeadBucketInput, out } } + if err != nil { + return false, err + } return true, nil } @@ -450,12 +505,13 @@ type BucketNotExistsWaiterOptions struct { // Retryable is function that can be used to override the service defined // waiter-behavior based on operation output, or returned error. This function is - // used by the waiter to decide if a state is retryable or a terminal state. By - // default service-modeled logic will populate this option. This option can thus be - // used to define a custom waiter state with fall-back to service-modeled waiter - // state mutators.The function returns an error in case of a failure state. In case - // of retry state, this function returns a bool value of true and nil error, while - // in case of success it returns a bool value of false and nil error. + // used by the waiter to decide if a state is retryable or a terminal state. + // + // By default service-modeled logic will populate this option. This option can + // thus be used to define a custom waiter state with fall-back to service-modeled + // waiter state mutators.The function returns an error in case of a failure state. + // In case of retry state, this function returns a bool value of true and nil + // error, while in case of success it returns a bool value of false and nil error. Retryable func(context.Context, *HeadBucketInput, *HeadBucketOutput, error) (bool, error) } @@ -532,7 +588,13 @@ func (w *BucketNotExistsWaiter) WaitForOutput(ctx context.Context, params *HeadB } out, err := w.client.HeadBucket(ctx, params, func(o *Options) { + baseOpts := []func(*Options){ + addIsWaiterUserAgent, + } o.APIOptions = append(o.APIOptions, apiOptions...) + for _, opt := range baseOpts { + opt(o) + } for _, opt := range options.ClientOptions { opt(o) } @@ -577,9 +639,26 @@ func bucketNotExistsStateRetryable(ctx context.Context, input *HeadBucketInput, } } + if err != nil { + return false, err + } return true, nil } +func (v *HeadBucketInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +// HeadBucketAPIClient is a client that implements the HeadBucket operation. +type HeadBucketAPIClient interface { + HeadBucket(context.Context, *HeadBucketInput, ...func(*Options)) (*HeadBucketOutput, error) +} + +var _ HeadBucketAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opHeadBucket(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go index 5b7e9b6c3..ce7a68477 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go @@ -19,43 +19,53 @@ import ( // The HEAD operation retrieves metadata from an object without returning the // object itself. This operation is useful if you're interested only in an object's -// metadata. A HEAD request has the same options as a GET operation on an object. -// The response is identical to the GET response except that there is no response +// metadata. +// +// A HEAD request has the same options as a GET operation on an object. The +// response is identical to the GET response except that there is no response // body. Because of this, if the HEAD request generates an error, it returns a // generic code, such as 400 Bad Request , 403 Forbidden , 404 Not Found , 405 // Method Not Allowed , 412 Precondition Failed , or 304 Not Modified . It's not -// possible to retrieve the exact exception of these error codes. Request headers -// are limited to 8 KB in size. For more information, see Common Request Headers (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html) -// . Directory buckets - For directory buckets, you must make requests for this API -// operation to the Zonal endpoint. These endpoints support virtual-hosted-style -// requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style -// requests are not supported. For more information, see Regional and Zonal -// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions +// possible to retrieve the exact exception of these error codes. +// +// Request headers are limited to 8 KB in size. For more information, see [Common Request Headers]. +// +// Permissions +// // - General purpose bucket permissions - To use HEAD , you must have the // s3:GetObject permission. You need the relevant read object (or version) -// permission for this operation. For more information, see Actions, resources, -// and condition keys for Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html) -// in the Amazon S3 User Guide. If the object you request doesn't exist, the error -// that Amazon S3 returns depends on whether you also have the s3:ListBucket -// permission. -// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an -// HTTP status code 404 Not Found error. -// - If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP -// status code 403 Forbidden error. -// - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the -// s3express:CreateSession permission to the directory bucket in a bucket policy -// or an IAM identity-based policy. Then, you make the CreateSession API call on -// the bucket to obtain a session token. With the session token in your request -// header, you can make API requests to this operation. After the session token -// expires, you make another CreateSession API call to generate a new session -// token for use. Amazon Web Services CLI or SDKs create session and refresh the -// session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// permission for this operation. For more information, see [Actions, resources, and condition keys for Amazon S3]in the Amazon S3 +// User Guide. For more information about the permissions to S3 API operations by +// S3 resource types, see Required permissions for Amazon S3 API operationsin the Amazon S3 User Guide. +// +// If the object you request doesn't exist, the error that Amazon S3 returns +// +// depends on whether you also have the s3:ListBucket permission. +// +// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an +// HTTP status code 404 Not Found error. +// +// - If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP +// status code 403 Forbidden error. +// +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see [CreateSession]CreateSession . +// +// If you enable x-amz-checksum-mode in the request and the object is encrypted +// +// with Amazon Web Services Key Management Service (Amazon Web Services KMS), you +// must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM +// identity-based policies and KMS key policies for the KMS key to retrieve the +// checksum of the object. // // Encryption Encryption request headers, like x-amz-server-side-encryption , // should not be sent for HEAD requests if your object uses server-side encryption @@ -66,20 +76,27 @@ import ( // want to specify the encryption method. If you include this header in a HEAD // request for an object that uses these types of keys, you’ll get an HTTP 400 Bad // Request error. It's because the encryption method can't be changed when you -// retrieve the object. If you encrypt an object by using server-side encryption -// with customer-provided encryption keys (SSE-C) when you store the object in -// Amazon S3, then when you retrieve the metadata from the object, you must use the -// following headers to provide the encryption key for the server to be able to -// retrieve the object's metadata. The headers are: +// retrieve the object. +// +// If you encrypt an object by using server-side encryption with customer-provided +// encryption keys (SSE-C) when you store the object in Amazon S3, then when you +// retrieve the metadata from the object, you must use the following headers to +// provide the encryption key for the server to be able to retrieve the object's +// metadata. The headers are: +// // - x-amz-server-side-encryption-customer-algorithm +// // - x-amz-server-side-encryption-customer-key +// // - x-amz-server-side-encryption-customer-key-MD5 // -// For more information about SSE-C, see Server-Side Encryption (Using -// Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) -// in the Amazon S3 User Guide. Directory bucket permissions - For directory -// buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) ( -// AES256 ) is supported. Versioning +// For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)] in the Amazon S3 User Guide. +// +// Directory bucket - For directory buckets, there are only two supported options +// for server-side encryption: SSE-S3 and SSE-KMS. SSE-C isn't supported. For more +// information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. +// +// Versioning // // - If the current version of the object is a delete marker, Amazon S3 behaves // as if the object was deleted and includes x-amz-delete-marker: true in the @@ -88,18 +105,40 @@ import ( // - If the specified version is a delete marker, the response returns a 405 // Method Not Allowed error and the Last-Modified: timestamp response header. // -// - Directory buckets - Delete marker is not supported by directory buckets. +// - Directory buckets - Delete marker is not supported for directory buckets. // // - Directory buckets - S3 Versioning isn't enabled and supported for directory // buckets. For this API operation, only the null value of the version ID is // supported by directory buckets. You can only specify null to the versionId // query parameter in the request. // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following actions are -// related to HeadObject : -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// +// For directory buckets, you must make requests for this API operation to the +// Zonal endpoint. These endpoints support virtual-hosted-style requests in the +// format +// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name +// . Path-style requests are not supported. For more information about endpoints +// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information +// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// The following actions are related to HeadObject : +// +// [GetObject] +// +// [GetObjectAttributes] +// +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html +// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html +// [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html +// [Actions, resources, and condition keys for Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html +// [Common Request Headers]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html func (c *Client) HeadObject(ctx context.Context, params *HeadObjectInput, optFns ...func(*Options)) (*HeadObjectOutput, error) { if params == nil { params = &HeadObjectInput{} @@ -117,31 +156,39 @@ func (c *Client) HeadObject(ctx context.Context, params *HeadObjectInput, optFns type HeadObjectInput struct { - // The name of the bucket that contains the object. Directory buckets - When you - // use this operation with a directory bucket, you must use virtual-hosted-style - // requests in the format Bucket_name.s3express-az_id.region.amazonaws.com . - // Path-style requests are not supported. Directory bucket names must be unique in - // the chosen Availability Zone. Bucket names must follow the format - // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 - // ). For information about bucket naming restrictions, see Directory bucket - // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // The name of the bucket that contains the object. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -151,10 +198,19 @@ type HeadObjectInput struct { // This member is required. Key *string - // To retrieve the checksum, this parameter must be enabled. In addition, if you - // enable ChecksumMode and the object is encrypted with Amazon Web Services Key - // Management Service (Amazon Web Services KMS), you must have permission to use - // the kms:Decrypt action for the request to succeed. + // To retrieve the checksum, this parameter must be enabled. + // + // General purpose buckets - If you enable checksum mode and the object is + // uploaded with a [checksum]and encrypted with an Key Management Service (KMS) key, you + // must have permission to use the kms:Decrypt action to retrieve the checksum. + // + // Directory buckets - If you enable ChecksumMode and the object is encrypted with + // Amazon Web Services Key Management Service (Amazon Web Services KMS), you must + // also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM + // identity-based policies and KMS key policies for the KMS key to retrieve the + // checksum of the object. + // + // [checksum]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_Checksum.html ChecksumMode types.ChecksumMode // The account ID of the expected bucket owner. If the account ID that you provide @@ -163,40 +219,71 @@ type HeadObjectInput struct { ExpectedBucketOwner *string // Return the object only if its entity tag (ETag) is the same as the one - // specified; otherwise, return a 412 (precondition failed) error. If both of the - // If-Match and If-Unmodified-Since headers are present in the request as follows: + // specified; otherwise, return a 412 (precondition failed) error. + // + // If both of the If-Match and If-Unmodified-Since headers are present in the + // request as follows: + // // - If-Match condition evaluates to true , and; + // // - If-Unmodified-Since condition evaluates to false ; - // Then Amazon S3 returns 200 OK and the data requested. For more information - // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . + // + // Then Amazon S3 returns 200 OK and the data requested. + // + // For more information about conditional requests, see [RFC 7232]. + // + // [RFC 7232]: https://tools.ietf.org/html/rfc7232 IfMatch *string // Return the object only if it has been modified since the specified time; - // otherwise, return a 304 (not modified) error. If both of the If-None-Match and - // If-Modified-Since headers are present in the request as follows: + // otherwise, return a 304 (not modified) error. + // + // If both of the If-None-Match and If-Modified-Since headers are present in the + // request as follows: + // // - If-None-Match condition evaluates to false , and; + // // - If-Modified-Since condition evaluates to true ; - // Then Amazon S3 returns the 304 Not Modified response code. For more information - // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . + // + // Then Amazon S3 returns the 304 Not Modified response code. + // + // For more information about conditional requests, see [RFC 7232]. + // + // [RFC 7232]: https://tools.ietf.org/html/rfc7232 IfModifiedSince *time.Time // Return the object only if its entity tag (ETag) is different from the one - // specified; otherwise, return a 304 (not modified) error. If both of the - // If-None-Match and If-Modified-Since headers are present in the request as - // follows: + // specified; otherwise, return a 304 (not modified) error. + // + // If both of the If-None-Match and If-Modified-Since headers are present in the + // request as follows: + // // - If-None-Match condition evaluates to false , and; + // // - If-Modified-Since condition evaluates to true ; - // Then Amazon S3 returns the 304 Not Modified response code. For more information - // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . + // + // Then Amazon S3 returns the 304 Not Modified response code. + // + // For more information about conditional requests, see [RFC 7232]. + // + // [RFC 7232]: https://tools.ietf.org/html/rfc7232 IfNoneMatch *string // Return the object only if it has not been modified since the specified time; - // otherwise, return a 412 (precondition failed) error. If both of the If-Match - // and If-Unmodified-Since headers are present in the request as follows: + // otherwise, return a 412 (precondition failed) error. + // + // If both of the If-Match and If-Unmodified-Since headers are present in the + // request as follows: + // // - If-Match condition evaluates to true , and; + // // - If-Unmodified-Since condition evaluates to false ; - // Then Amazon S3 returns 200 OK and the data requested. For more information - // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . + // + // Then Amazon S3 returns 200 OK and the data requested. + // + // For more information about conditional requests, see [RFC 7232]. + // + // [RFC 7232]: https://tools.ietf.org/html/rfc7232 IfUnmodifiedSince *time.Time // Part number of the object being read. This is a positive integer between 1 and @@ -214,39 +301,64 @@ type HeadObjectInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer - // Specifies the algorithm to use when encrypting the object (for example, - // AES256). This functionality is not supported for directory buckets. + // Sets the Cache-Control header of the response. + ResponseCacheControl *string + + // Sets the Content-Disposition header of the response. + ResponseContentDisposition *string + + // Sets the Content-Encoding header of the response. + ResponseContentEncoding *string + + // Sets the Content-Language header of the response. + ResponseContentLanguage *string + + // Sets the Content-Type header of the response. + ResponseContentType *string + + // Sets the Expires header of the response. + ResponseExpires *time.Time + + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use in // encrypting data. This value is used to store the object and then it is // discarded; Amazon S3 does not store the encryption key. The key must be // appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. This functionality is - // not supported for directory buckets. + // x-amz-server-side-encryption-customer-algorithm header. + // + // This functionality is not supported for directory buckets. SSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. This functionality is not - // supported for directory buckets. + // encryption key was transmitted without error. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string - // Version ID used to reference a specific version of the object. For directory - // buckets in this API operation, only the null value of the version ID is - // supported. + // Version ID used to reference a specific version of the object. + // + // For directory buckets in this API operation, only the null value of the version + // ID is supported. VersionId *string noSmithyDocumentSerde } func (in *HeadObjectInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Key = in.Key @@ -257,54 +369,77 @@ type HeadObjectOutput struct { // Indicates that a range of bytes was specified. AcceptRanges *string - // The archive state of the head object. This functionality is not supported for - // directory buckets. + // The archive state of the head object. + // + // This functionality is not supported for directory buckets. ArchiveStatus types.ArchiveStatus // Indicates whether the object uses an S3 Bucket Key for server-side encryption - // with Key Management Service (KMS) keys (SSE-KMS). This functionality is not - // supported for directory buckets. + // with Key Management Service (KMS) keys (SSE-KMS). BucketKeyEnabled *bool // Specifies caching behavior along the request/reply chain. CacheControl *string - // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32 checksum of the object. This checksum is only + // be present if the checksum was uploaded with the object. When you use an API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumCRC32 *string - // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32C checksum of the object. This checksum is only + // present if the checksum was uploaded with the object. When you use an API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumCRC32C *string - // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. When you use the API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 64-bit CRC64NVME checksum of the object. For more + // information, see [Checking object integrity in the Amazon S3 User Guide]. + // + // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumCRC64NVME *string + + // The Base64 encoded, 160-bit SHA1 digest of the object. This will only be + // present if the object was uploaded with the object. When you use the API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumSHA1 *string - // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 256-bit SHA256 digest of the object. This will only be + // present if the object was uploaded with the object. When you use an API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumSHA256 *string + // The checksum type, which determines how part-level checksums are combined to + // create an object-level checksum for multipart objects. You can use this header + // response to verify that the checksum type that is received is the same checksum + // type that was specified in CreateMultipartUpload request. For more information, + // see [Checking object integrity in the Amazon S3 User Guide]. + // + // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumType types.ChecksumType + // Specifies presentational information for the object. ContentDisposition *string @@ -319,28 +454,45 @@ type HeadObjectOutput struct { // Size of the body in bytes. ContentLength *int64 + // The portion of the object returned in the response for a GET request. + ContentRange *string + // A standard MIME type describing the format of the object data. ContentType *string // Specifies whether the object retrieved was (true) or was not (false) a Delete - // Marker. If false, this response header does not appear in the response. This - // functionality is not supported for directory buckets. + // Marker. If false, this response header does not appear in the response. + // + // This functionality is not supported for directory buckets. DeleteMarker *bool // An entity tag (ETag) is an opaque identifier assigned by a web server to a // specific version of a resource found at a URL. ETag *string - // If the object expiration is configured (see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) - // ), the response includes this header. It includes the expiry-date and rule-id + // If the object expiration is configured (see [PutBucketLifecycleConfiguration]PutBucketLifecycleConfiguration ), + // the response includes this header. It includes the expiry-date and rule-id // key-value pairs providing object expiration information. The value of the - // rule-id is URL-encoded. This functionality is not supported for directory + // rule-id is URL-encoded. + // + // Object expiration information is not returned in directory buckets and this + // header returns the value " NotImplemented " in all responses for directory // buckets. + // + // [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html Expiration *string // The date and time at which the object is no longer cacheable. + // + // Deprecated: This field is handled inconsistently across AWS SDKs. Prefer using + // the ExpiresString field which contains the unparsed value from the service + // response. Expires *time.Time + // The unparsed value of the Expires field from the service response. Prefer use + // of this value over the normal Expires response field where possible. + ExpiresString *string + // Date and time when the object was last modified. LastModified *time.Time @@ -352,26 +504,34 @@ type HeadObjectOutput struct { // This is set to the number of metadata entries not returned in x-amz-meta // headers. This can happen if you create metadata using an API like SOAP that // supports more flexible metadata than the REST API. For example, using SOAP, you - // can create metadata whose values are not legal HTTP headers. This functionality - // is not supported for directory buckets. + // can create metadata whose values are not legal HTTP headers. + // + // This functionality is not supported for directory buckets. MissingMeta *int32 // Specifies whether a legal hold is in effect for this object. This header is // only returned if the requester has the s3:GetObjectLegalHold permission. This // header is not returned if the specified version of this object has never had a - // legal hold applied. For more information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) - // . This functionality is not supported for directory buckets. + // legal hold applied. For more information about S3 Object Lock, see [Object Lock]. + // + // This functionality is not supported for directory buckets. + // + // [Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus // The Object Lock mode, if any, that's in effect for this object. This header is // only returned if the requester has the s3:GetObjectRetention permission. For - // more information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) - // . This functionality is not supported for directory buckets. + // more information about S3 Object Lock, see [Object Lock]. + // + // This functionality is not supported for directory buckets. + // + // [Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html ObjectLockMode types.ObjectLockMode // The date and time when the Object Lock retention period expires. This header is - // only returned if the requester has the s3:GetObjectRetention permission. This - // functionality is not supported for directory buckets. + // only returned if the requester has the s3:GetObjectRetention permission. + // + // This functionality is not supported for directory buckets. ObjectLockRetainUntilDate *time.Time // The count of parts this object has. This value is only returned if you specify @@ -379,89 +539,115 @@ type HeadObjectOutput struct { PartsCount *int32 // Amazon S3 can return this header if your request involves a bucket that is - // either a source or a destination in a replication rule. In replication, you have - // a source bucket on which you configure replication and destination bucket or - // buckets where Amazon S3 stores object replicas. When you request an object ( - // GetObject ) or object metadata ( HeadObject ) from these buckets, Amazon S3 will - // return the x-amz-replication-status header in the response as follows: + // either a source or a destination in a replication rule. + // + // In replication, you have a source bucket on which you configure replication and + // destination bucket or buckets where Amazon S3 stores object replicas. When you + // request an object ( GetObject ) or object metadata ( HeadObject ) from these + // buckets, Amazon S3 will return the x-amz-replication-status header in the + // response as follows: + // // - If requesting an object from the source bucket, Amazon S3 will return the // x-amz-replication-status header if the object in your request is eligible for - // replication. For example, suppose that in your replication configuration, you - // specify object prefix TaxDocs requesting Amazon S3 to replicate objects with - // key prefix TaxDocs . Any objects you upload with this key name prefix, for - // example TaxDocs/document1.pdf , are eligible for replication. For any object - // request with this key name prefix, Amazon S3 will return the - // x-amz-replication-status header with value PENDING, COMPLETED or FAILED - // indicating object replication status. + // replication. + // + // For example, suppose that in your replication configuration, you specify object + // prefix TaxDocs requesting Amazon S3 to replicate objects with key prefix + // TaxDocs . Any objects you upload with this key name prefix, for example + // TaxDocs/document1.pdf , are eligible for replication. For any object request + // with this key name prefix, Amazon S3 will return the x-amz-replication-status + // header with value PENDING, COMPLETED or FAILED indicating object replication + // status. + // // - If requesting an object from a destination bucket, Amazon S3 will return // the x-amz-replication-status header with value REPLICA if the object in your // request is a replica that Amazon S3 created and there is no replica modification // replication in progress. + // // - When replicating objects to multiple destination buckets, the // x-amz-replication-status header acts differently. The header of the source // object will only return a value of COMPLETED when replication is successful to // all destinations. The header will remain at value PENDING until replication has // completed for all destinations. If one or more destinations fails replication // the header will return FAILED. - // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // . This functionality is not supported for directory buckets. + // + // For more information, see [Replication]. + // + // This functionality is not supported for directory buckets. + // + // [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html ReplicationStatus types.ReplicationStatus // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // If the object is an archived object (an object whose storage class is GLACIER), // the response includes this header if either the archive restoration is in - // progress (see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) - // or an archive copy is already restored. If an archive copy is already restored, - // the header value indicates when Amazon S3 is scheduled to delete the object - // copy. For example: x-amz-restore: ongoing-request="false", expiry-date="Fri, 21 - // Dec 2012 00:00:00 GMT" If the object restoration is in progress, the header - // returns the value ongoing-request="true" . For more information about archiving - // objects, see Transitioning Objects: General Considerations (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-transition-general-considerations) - // . This functionality is not supported for directory buckets. Only the S3 Express + // progress (see [RestoreObject]or an archive copy is already restored. + // + // If an archive copy is already restored, the header value indicates when Amazon + // S3 is scheduled to delete the object copy. For example: + // + // x-amz-restore: ongoing-request="false", expiry-date="Fri, 21 Dec 2012 00:00:00 + // GMT" + // + // If the object restoration is in progress, the header returns the value + // ongoing-request="true" . + // + // For more information about archiving objects, see [Transitioning Objects: General Considerations]. + // + // This functionality is not supported for directory buckets. Only the S3 Express // One Zone storage class is supported by directory buckets to store objects. + // + // [Transitioning Objects: General Considerations]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-transition-general-considerations + // [RestoreObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html Restore *string // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to confirm the encryption - // algorithm that's used. This functionality is not supported for directory - // buckets. + // algorithm that's used. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to provide the round-trip - // message integrity verification of the customer-provided encryption key. This - // functionality is not supported for directory buckets. + // message integrity verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string - // If present, indicates the ID of the Key Management Service (KMS) symmetric - // encryption customer managed key that was used for the object. This functionality - // is not supported for directory buckets. + // If present, indicates the ID of the KMS key that was used for object encryption. SSEKMSKeyId *string // The server-side encryption algorithm used when you store this object in Amazon - // S3 (for example, AES256 , aws:kms , aws:kms:dsse ). For directory buckets, only - // server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is - // supported. + // S3 (for example, AES256 , aws:kms , aws:kms:dsse ). ServerSideEncryption types.ServerSideEncryption // Provides storage class information of the object. Amazon S3 returns this header - // for all objects except for S3 Standard storage class objects. For more - // information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) - // . Directory buckets - Only the S3 Express One Zone storage class is supported by + // for all objects except for S3 Standard storage class objects. + // + // For more information, see [Storage Classes]. + // + // Directory buckets - Only the S3 Express One Zone storage class is supported by // directory buckets to store objects. + // + // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html StorageClass types.StorageClass - // Version ID of the object. This functionality is not supported for directory - // buckets. + // Version ID of the object. + // + // This functionality is not supported for directory buckets. VersionId *string // If the bucket is configured as a website, redirects requests for this object to // another object in the same bucket or to an external URL. Amazon S3 stores the - // value of this header in the object metadata. This functionality is not supported - // for directory buckets. + // value of this header in the object metadata. + // + // This functionality is not supported for directory buckets. WebsiteRedirectLocation *string // Metadata pertaining to the operation's result. @@ -513,6 +699,9 @@ func (c *Client) addOperationHeadObjectMiddlewares(stack *middleware.Stack, opti if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -528,6 +717,18 @@ func (c *Client) addOperationHeadObjectMiddlewares(stack *middleware.Stack, opti if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpHeadObjectValidationMiddleware(stack); err != nil { return err } @@ -561,23 +762,21 @@ func (c *Client) addOperationHeadObjectMiddlewares(stack *middleware.Stack, opti if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } - return nil -} - -func (v *HeadObjectInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false + if err = addSpanInitializeStart(stack); err != nil { + return err } - return *v.Bucket, true -} - -// HeadObjectAPIClient is a client that implements the HeadObject operation. -type HeadObjectAPIClient interface { - HeadObject(context.Context, *HeadObjectInput, ...func(*Options)) (*HeadObjectOutput, error) + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil } -var _ HeadObjectAPIClient = (*Client)(nil) - // ObjectExistsWaiterOptions are waiter options for ObjectExistsWaiter type ObjectExistsWaiterOptions struct { @@ -610,12 +809,13 @@ type ObjectExistsWaiterOptions struct { // Retryable is function that can be used to override the service defined // waiter-behavior based on operation output, or returned error. This function is - // used by the waiter to decide if a state is retryable or a terminal state. By - // default service-modeled logic will populate this option. This option can thus be - // used to define a custom waiter state with fall-back to service-modeled waiter - // state mutators.The function returns an error in case of a failure state. In case - // of retry state, this function returns a bool value of true and nil error, while - // in case of success it returns a bool value of false and nil error. + // used by the waiter to decide if a state is retryable or a terminal state. + // + // By default service-modeled logic will populate this option. This option can + // thus be used to define a custom waiter state with fall-back to service-modeled + // waiter state mutators.The function returns an error in case of a failure state. + // In case of retry state, this function returns a bool value of true and nil + // error, while in case of success it returns a bool value of false and nil error. Retryable func(context.Context, *HeadObjectInput, *HeadObjectOutput, error) (bool, error) } @@ -691,7 +891,13 @@ func (w *ObjectExistsWaiter) WaitForOutput(ctx context.Context, params *HeadObje } out, err := w.client.HeadObject(ctx, params, func(o *Options) { + baseOpts := []func(*Options){ + addIsWaiterUserAgent, + } o.APIOptions = append(o.APIOptions, apiOptions...) + for _, opt := range baseOpts { + opt(o) + } for _, opt := range options.ClientOptions { opt(o) } @@ -740,6 +946,9 @@ func objectExistsStateRetryable(ctx context.Context, input *HeadObjectInput, out } } + if err != nil { + return false, err + } return true, nil } @@ -775,12 +984,13 @@ type ObjectNotExistsWaiterOptions struct { // Retryable is function that can be used to override the service defined // waiter-behavior based on operation output, or returned error. This function is - // used by the waiter to decide if a state is retryable or a terminal state. By - // default service-modeled logic will populate this option. This option can thus be - // used to define a custom waiter state with fall-back to service-modeled waiter - // state mutators.The function returns an error in case of a failure state. In case - // of retry state, this function returns a bool value of true and nil error, while - // in case of success it returns a bool value of false and nil error. + // used by the waiter to decide if a state is retryable or a terminal state. + // + // By default service-modeled logic will populate this option. This option can + // thus be used to define a custom waiter state with fall-back to service-modeled + // waiter state mutators.The function returns an error in case of a failure state. + // In case of retry state, this function returns a bool value of true and nil + // error, while in case of success it returns a bool value of false and nil error. Retryable func(context.Context, *HeadObjectInput, *HeadObjectOutput, error) (bool, error) } @@ -857,7 +1067,13 @@ func (w *ObjectNotExistsWaiter) WaitForOutput(ctx context.Context, params *HeadO } out, err := w.client.HeadObject(ctx, params, func(o *Options) { + baseOpts := []func(*Options){ + addIsWaiterUserAgent, + } o.APIOptions = append(o.APIOptions, apiOptions...) + for _, opt := range baseOpts { + opt(o) + } for _, opt := range options.ClientOptions { opt(o) } @@ -902,9 +1118,26 @@ func objectNotExistsStateRetryable(ctx context.Context, input *HeadObjectInput, } } + if err != nil { + return false, err + } return true, nil } +func (v *HeadObjectInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +// HeadObjectAPIClient is a client that implements the HeadObject operation. +type HeadObjectAPIClient interface { + HeadObject(context.Context, *HeadObjectInput, ...func(*Options)) (*HeadObjectOutput, error) +} + +var _ HeadObjectAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opHeadObject(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketAnalyticsConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketAnalyticsConfigurations.go index 67b7571c1..2580b44aa 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketAnalyticsConfigurations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketAnalyticsConfigurations.go @@ -14,27 +14,40 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Lists the analytics -// configurations for the bucket. You can have up to 1,000 analytics configurations -// per bucket. This action supports list pagination and does not return more than -// 100 configurations at a time. You should always check the IsTruncated element -// in the response. If there are no more configurations to list, IsTruncated is -// set to false. If there are more configurations to list, IsTruncated is set to -// true, and there will be a value in NextContinuationToken . You use the +// This operation is not supported for directory buckets. +// +// Lists the analytics configurations for the bucket. You can have up to 1,000 +// analytics configurations per bucket. +// +// This action supports list pagination and does not return more than 100 +// configurations at a time. You should always check the IsTruncated element in +// the response. If there are no more configurations to list, IsTruncated is set +// to false. If there are more configurations to list, IsTruncated is set to true, +// and there will be a value in NextContinuationToken . You use the // NextContinuationToken value to continue the pagination of the list by passing -// the value in continuation-token in the request to GET the next page. To use -// this operation, you must have permissions to perform the +// the value in continuation-token in the request to GET the next page. +// +// To use this operation, you must have permissions to perform the // s3:GetAnalyticsConfiguration action. The bucket owner has this permission by // default. The bucket owner can grant this permission to others. For more -// information about permissions, see Permissions Related to Bucket Subresource -// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . For information about Amazon S3 analytics feature, see Amazon S3 Analytics – -// Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) -// . The following operations are related to ListBucketAnalyticsConfigurations : -// - GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) -// - DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) -// - PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// For information about Amazon S3 analytics feature, see [Amazon S3 Analytics – Storage Class Analysis]. +// +// The following operations are related to ListBucketAnalyticsConfigurations : +// +// [GetBucketAnalyticsConfiguration] +// +// [DeleteBucketAnalyticsConfiguration] +// +// [PutBucketAnalyticsConfiguration] +// +// [Amazon S3 Analytics – Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html +// [DeleteBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [GetBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html +// [PutBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html func (c *Client) ListBucketAnalyticsConfigurations(ctx context.Context, params *ListBucketAnalyticsConfigurationsInput, optFns ...func(*Options)) (*ListBucketAnalyticsConfigurationsOutput, error) { if params == nil { params = &ListBucketAnalyticsConfigurationsInput{} @@ -70,6 +83,7 @@ type ListBucketAnalyticsConfigurationsInput struct { } func (in *ListBucketAnalyticsConfigurationsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -142,6 +156,9 @@ func (c *Client) addOperationListBucketAnalyticsConfigurationsMiddlewares(stack if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -157,6 +174,18 @@ func (c *Client) addOperationListBucketAnalyticsConfigurationsMiddlewares(stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListBucketAnalyticsConfigurationsValidationMiddleware(stack); err != nil { return err } @@ -190,6 +219,18 @@ func (c *Client) addOperationListBucketAnalyticsConfigurationsMiddlewares(stack if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go index 729f87856..2a77b6804 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go @@ -14,25 +14,38 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Lists the S3 -// Intelligent-Tiering configuration from the specified bucket. The S3 -// Intelligent-Tiering storage class is designed to optimize storage costs by -// automatically moving data to the most cost-effective storage access tier, +// This operation is not supported for directory buckets. +// +// Lists the S3 Intelligent-Tiering configuration from the specified bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage costs +// by automatically moving data to the most cost-effective storage access tier, // without performance impact or operational overhead. S3 Intelligent-Tiering // delivers automatic cost savings in three low latency and high throughput access // tiers. To get the lowest storage cost on data that can be accessed in minutes to -// hours, you can choose to activate additional archiving capabilities. The S3 -// Intelligent-Tiering storage class is the ideal storage class for data with -// unknown, changing, or unpredictable access patterns, independent of object size -// or retention period. If the size of an object is less than 128 KB, it is not -// monitored and not eligible for auto-tiering. Smaller objects can be stored, but -// they are always charged at the Frequent Access tier rates in the S3 -// Intelligent-Tiering storage class. For more information, see Storage class for -// automatically optimizing frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) -// . Operations related to ListBucketIntelligentTieringConfigurations include: -// - DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) -// - PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) -// - GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) +// hours, you can choose to activate additional archiving capabilities. +// +// The S3 Intelligent-Tiering storage class is the ideal storage class for data +// with unknown, changing, or unpredictable access patterns, independent of object +// size or retention period. If the size of an object is less than 128 KB, it is +// not monitored and not eligible for auto-tiering. Smaller objects can be stored, +// but they are always charged at the Frequent Access tier rates in the S3 +// Intelligent-Tiering storage class. +// +// For more information, see [Storage class for automatically optimizing frequently and infrequently accessed objects]. +// +// Operations related to ListBucketIntelligentTieringConfigurations include: +// +// [DeleteBucketIntelligentTieringConfiguration] +// +// [PutBucketIntelligentTieringConfiguration] +// +// [GetBucketIntelligentTieringConfiguration] +// +// [GetBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html +// [PutBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html +// [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access +// [DeleteBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html func (c *Client) ListBucketIntelligentTieringConfigurations(ctx context.Context, params *ListBucketIntelligentTieringConfigurationsInput, optFns ...func(*Options)) (*ListBucketIntelligentTieringConfigurationsOutput, error) { if params == nil { params = &ListBucketIntelligentTieringConfigurationsInput{} @@ -64,6 +77,7 @@ type ListBucketIntelligentTieringConfigurationsInput struct { } func (in *ListBucketIntelligentTieringConfigurationsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -136,6 +150,9 @@ func (c *Client) addOperationListBucketIntelligentTieringConfigurationsMiddlewar if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -151,6 +168,18 @@ func (c *Client) addOperationListBucketIntelligentTieringConfigurationsMiddlewar if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListBucketIntelligentTieringConfigurationsValidationMiddleware(stack); err != nil { return err } @@ -184,6 +213,18 @@ func (c *Client) addOperationListBucketIntelligentTieringConfigurationsMiddlewar if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketInventoryConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketInventoryConfigurations.go index 6c879048c..0bd202161 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketInventoryConfigurations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketInventoryConfigurations.go @@ -14,26 +14,40 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns a list of -// inventory configurations for the bucket. You can have up to 1,000 analytics -// configurations per bucket. This action supports list pagination and does not -// return more than 100 configurations at a time. Always check the IsTruncated -// element in the response. If there are no more configurations to list, -// IsTruncated is set to false. If there are more configurations to list, -// IsTruncated is set to true, and there is a value in NextContinuationToken . You -// use the NextContinuationToken value to continue the pagination of the list by -// passing the value in continuation-token in the request to GET the next page. To -// use this operation, you must have permissions to perform the +// This operation is not supported for directory buckets. +// +// Returns a list of inventory configurations for the bucket. You can have up to +// 1,000 analytics configurations per bucket. +// +// This action supports list pagination and does not return more than 100 +// configurations at a time. Always check the IsTruncated element in the response. +// If there are no more configurations to list, IsTruncated is set to false. If +// there are more configurations to list, IsTruncated is set to true, and there is +// a value in NextContinuationToken . You use the NextContinuationToken value to +// continue the pagination of the list by passing the value in continuation-token +// in the request to GET the next page. +// +// To use this operation, you must have permissions to perform the // s3:GetInventoryConfiguration action. The bucket owner has this permission by // default. The bucket owner can grant this permission to others. For more -// information about permissions, see Permissions Related to Bucket Subresource -// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . For information about the Amazon S3 inventory feature, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// For information about the Amazon S3 inventory feature, see [Amazon S3 Inventory] +// // The following operations are related to ListBucketInventoryConfigurations : -// - GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) -// - DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) -// - PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) +// +// [GetBucketInventoryConfiguration] +// +// [DeleteBucketInventoryConfiguration] +// +// [PutBucketInventoryConfiguration] +// +// [Amazon S3 Inventory]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [DeleteBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [PutBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html +// [GetBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html func (c *Client) ListBucketInventoryConfigurations(ctx context.Context, params *ListBucketInventoryConfigurationsInput, optFns ...func(*Options)) (*ListBucketInventoryConfigurationsOutput, error) { if params == nil { params = &ListBucketInventoryConfigurationsInput{} @@ -71,6 +85,7 @@ type ListBucketInventoryConfigurationsInput struct { } func (in *ListBucketInventoryConfigurationsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -143,6 +158,9 @@ func (c *Client) addOperationListBucketInventoryConfigurationsMiddlewares(stack if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -158,6 +176,18 @@ func (c *Client) addOperationListBucketInventoryConfigurationsMiddlewares(stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListBucketInventoryConfigurationsValidationMiddleware(stack); err != nil { return err } @@ -191,6 +221,18 @@ func (c *Client) addOperationListBucketInventoryConfigurationsMiddlewares(stack if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketMetricsConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketMetricsConfigurations.go index 0b6ca9473..7f99075af 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketMetricsConfigurations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketMetricsConfigurations.go @@ -13,28 +13,42 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Lists the metrics -// configurations for the bucket. The metrics configurations are only for the -// request metrics of the bucket and do not provide information on daily storage -// metrics. You can have up to 1,000 configurations per bucket. This action -// supports list pagination and does not return more than 100 configurations at a -// time. Always check the IsTruncated element in the response. If there are no -// more configurations to list, IsTruncated is set to false. If there are more -// configurations to list, IsTruncated is set to true, and there is a value in -// NextContinuationToken . You use the NextContinuationToken value to continue the -// pagination of the list by passing the value in continuation-token in the -// request to GET the next page. To use this operation, you must have permissions -// to perform the s3:GetMetricsConfiguration action. The bucket owner has this -// permission by default. The bucket owner can grant this permission to others. For -// more information about permissions, see Permissions Related to Bucket -// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . For more information about metrics configurations and CloudWatch request -// metrics, see Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) -// . The following operations are related to ListBucketMetricsConfigurations : -// - PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) -// - GetBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) -// - DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) +// This operation is not supported for directory buckets. +// +// Lists the metrics configurations for the bucket. The metrics configurations are +// only for the request metrics of the bucket and do not provide information on +// daily storage metrics. You can have up to 1,000 configurations per bucket. +// +// This action supports list pagination and does not return more than 100 +// configurations at a time. Always check the IsTruncated element in the response. +// If there are no more configurations to list, IsTruncated is set to false. If +// there are more configurations to list, IsTruncated is set to true, and there is +// a value in NextContinuationToken . You use the NextContinuationToken value to +// continue the pagination of the list by passing the value in continuation-token +// in the request to GET the next page. +// +// To use this operation, you must have permissions to perform the +// s3:GetMetricsConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// For more information about metrics configurations and CloudWatch request +// metrics, see [Monitoring Metrics with Amazon CloudWatch]. +// +// The following operations are related to ListBucketMetricsConfigurations : +// +// [PutBucketMetricsConfiguration] +// +// [GetBucketMetricsConfiguration] +// +// [DeleteBucketMetricsConfiguration] +// +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Monitoring Metrics with Amazon CloudWatch]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html +// [GetBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html +// [PutBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html +// [DeleteBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html func (c *Client) ListBucketMetricsConfigurations(ctx context.Context, params *ListBucketMetricsConfigurationsInput, optFns ...func(*Options)) (*ListBucketMetricsConfigurationsOutput, error) { if params == nil { params = &ListBucketMetricsConfigurationsInput{} @@ -72,6 +86,7 @@ type ListBucketMetricsConfigurationsInput struct { } func (in *ListBucketMetricsConfigurationsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -145,6 +160,9 @@ func (c *Client) addOperationListBucketMetricsConfigurationsMiddlewares(stack *m if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -160,6 +178,18 @@ func (c *Client) addOperationListBucketMetricsConfigurationsMiddlewares(stack *m if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListBucketMetricsConfigurationsValidationMiddleware(stack); err != nil { return err } @@ -193,6 +223,18 @@ func (c *Client) addOperationListBucketMetricsConfigurationsMiddlewares(stack *m if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBuckets.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBuckets.go index 086d9d290..aec9715d6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBuckets.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBuckets.go @@ -13,11 +13,23 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns a list of all -// buckets owned by the authenticated sender of the request. To use this operation, -// you must have the s3:ListAllMyBuckets permission. For information about Amazon -// S3 buckets, see Creating, configuring, and working with Amazon S3 buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html) -// . +// This operation is not supported for directory buckets. +// +// Returns a list of all buckets owned by the authenticated sender of the request. +// To grant IAM permission to use this operation, you must add the +// s3:ListAllMyBuckets policy action. +// +// For information about Amazon S3 buckets, see [Creating, configuring, and working with Amazon S3 buckets]. +// +// We strongly recommend using only paginated ListBuckets requests. Unpaginated +// ListBuckets requests are only supported for Amazon Web Services accounts set to +// the default general purpose bucket quota of 10,000. If you have an approved +// general purpose bucket quota above 10,000, you must send paginated ListBuckets +// requests to list your account’s buckets. All unpaginated ListBuckets requests +// will be rejected for Amazon Web Services accounts with a general purpose bucket +// quota greater than 10,000. +// +// [Creating, configuring, and working with Amazon S3 buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html func (c *Client) ListBuckets(ctx context.Context, params *ListBucketsInput, optFns ...func(*Options)) (*ListBucketsOutput, error) { if params == nil { params = &ListBucketsInput{} @@ -34,6 +46,44 @@ func (c *Client) ListBuckets(ctx context.Context, params *ListBucketsInput, optF } type ListBucketsInput struct { + + // Limits the response to buckets that are located in the specified Amazon Web + // Services Region. The Amazon Web Services Region must be expressed according to + // the Amazon Web Services Region code, such as us-west-2 for the US West (Oregon) + // Region. For a list of the valid values for all of the Amazon Web Services + // Regions, see [Regions and Endpoints]. + // + // Requests made to a Regional endpoint that is different from the bucket-region + // parameter are not supported. For example, if you want to limit the response to + // your buckets in Region us-west-2 , the request must be made to an endpoint in + // Region us-west-2 . + // + // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region + BucketRegion *string + + // ContinuationToken indicates to Amazon S3 that the list is being continued on + // this bucket with a token. ContinuationToken is obfuscated and is not a real + // key. You can use this ContinuationToken for pagination of the list results. + // + // Length Constraints: Minimum length of 0. Maximum length of 1024. + // + // Required: No. + // + // If you specify the bucket-region , prefix , or continuation-token query + // parameters without using max-buckets to set the maximum number of buckets + // returned in the response, Amazon S3 applies a default page size of 10,000 and + // provides a continuation token if there are more buckets. + ContinuationToken *string + + // Maximum number of buckets to be returned in response. When the number is more + // than the count of buckets that are owned by an Amazon Web Services account, + // return all the buckets in response. + MaxBuckets *int32 + + // Limits the response to bucket names that begin with the specified bucket name + // prefix. + Prefix *string + noSmithyDocumentSerde } @@ -42,9 +92,20 @@ type ListBucketsOutput struct { // The list of buckets owned by the requester. Buckets []types.Bucket + // ContinuationToken is included in the response when there are more buckets that + // can be listed with pagination. The next ListBuckets request to Amazon S3 can be + // continued with this ContinuationToken . ContinuationToken is obfuscated and is + // not a real bucket. + ContinuationToken *string + // The owner of the buckets listed. Owner *types.Owner + // If Prefix was sent with the request, it is included in the response. + // + // All bucket names in the response begin with the specified bucket name prefix. + Prefix *string + // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -94,6 +155,9 @@ func (c *Client) addOperationListBucketsMiddlewares(stack *middleware.Stack, opt if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -109,6 +173,18 @@ func (c *Client) addOperationListBucketsMiddlewares(stack *middleware.Stack, opt if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListBuckets(options.Region), middleware.Before); err != nil { return err } @@ -139,9 +215,115 @@ func (c *Client) addOperationListBucketsMiddlewares(stack *middleware.Stack, opt if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } +// ListBucketsPaginatorOptions is the paginator options for ListBuckets +type ListBucketsPaginatorOptions struct { + // Maximum number of buckets to be returned in response. When the number is more + // than the count of buckets that are owned by an Amazon Web Services account, + // return all the buckets in response. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListBucketsPaginator is a paginator for ListBuckets +type ListBucketsPaginator struct { + options ListBucketsPaginatorOptions + client ListBucketsAPIClient + params *ListBucketsInput + nextToken *string + firstPage bool +} + +// NewListBucketsPaginator returns a new ListBucketsPaginator +func NewListBucketsPaginator(client ListBucketsAPIClient, params *ListBucketsInput, optFns ...func(*ListBucketsPaginatorOptions)) *ListBucketsPaginator { + if params == nil { + params = &ListBucketsInput{} + } + + options := ListBucketsPaginatorOptions{} + if params.MaxBuckets != nil { + options.Limit = *params.MaxBuckets + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListBucketsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.ContinuationToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListBucketsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListBuckets page. +func (p *ListBucketsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListBucketsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.ContinuationToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxBuckets = limit + + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) + result, err := p.client.ListBuckets(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.ContinuationToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +// ListBucketsAPIClient is a client that implements the ListBuckets operation. +type ListBucketsAPIClient interface { + ListBuckets(context.Context, *ListBucketsInput, ...func(*Options)) (*ListBucketsOutput, error) +} + +var _ ListBucketsAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opListBuckets(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListDirectoryBuckets.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListDirectoryBuckets.go index 3ebf78af1..d8d85e9d6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListDirectoryBuckets.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListDirectoryBuckets.go @@ -15,23 +15,32 @@ import ( ) // Returns a list of all Amazon S3 directory buckets owned by the authenticated -// sender of the request. For more information about directory buckets, see -// Directory buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html) -// in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must -// make requests for this API operation to the Regional endpoint. These endpoints -// support path-style requests in the format -// https://s3express-control.region_code.amazonaws.com/bucket-name . -// Virtual-hosted-style requests aren't supported. For more information, see -// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions You must have the -// s3express:ListAllMyDirectoryBuckets permission in an IAM identity-based policy -// instead of a bucket policy. Cross-account access to this API operation isn't -// supported. This operation can only be performed by the Amazon Web Services -// account that owns the resource. For more information about directory bucket -// policies and permissions, see Amazon Web Services Identity and Access -// Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) -// in the Amazon S3 User Guide. HTTP Host header syntax Directory buckets - The -// HTTP Host header syntax is s3express-control.region.amazonaws.com . +// sender of the request. For more information about directory buckets, see [Directory buckets]in the +// Amazon S3 User Guide. +// +// Directory buckets - For directory buckets, you must make requests for this API +// operation to the Regional endpoint. These endpoints support path-style requests +// in the format https://s3express-control.region-code.amazonaws.com/bucket-name . +// Virtual-hosted-style requests aren't supported. For more information about +// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more +// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// Permissions You must have the s3express:ListAllMyDirectoryBuckets permission in +// an IAM identity-based policy instead of a bucket policy. Cross-account access to +// this API operation isn't supported. This operation can only be performed by the +// Amazon Web Services account that owns the resource. For more information about +// directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region.amazonaws.com . +// +// The BucketRegion response element is not part of the ListDirectoryBuckets +// Response Syntax. +// +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [Directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html func (c *Client) ListDirectoryBuckets(ctx context.Context, params *ListDirectoryBucketsInput, optFns ...func(*Options)) (*ListDirectoryBucketsOutput, error) { if params == nil { params = &ListDirectoryBucketsInput{} @@ -50,8 +59,9 @@ func (c *Client) ListDirectoryBuckets(ctx context.Context, params *ListDirectory type ListDirectoryBucketsInput struct { // ContinuationToken indicates to Amazon S3 that the list is being continued on - // this bucket with a token. ContinuationToken is obfuscated and is not a real - // key. You can use this ContinuationToken for pagination of the list results. + // buckets in this account with a token. ContinuationToken is obfuscated and is + // not a real bucket name. You can use this ContinuationToken for the pagination + // of the list results. ContinuationToken *string // Maximum number of buckets to be returned in response. When the number is more @@ -125,6 +135,9 @@ func (c *Client) addOperationListDirectoryBucketsMiddlewares(stack *middleware.S if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -140,6 +153,18 @@ func (c *Client) addOperationListDirectoryBucketsMiddlewares(stack *middleware.S if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListDirectoryBuckets(options.Region), middleware.Before); err != nil { return err } @@ -170,17 +195,21 @@ func (c *Client) addOperationListDirectoryBucketsMiddlewares(stack *middleware.S if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } -// ListDirectoryBucketsAPIClient is a client that implements the -// ListDirectoryBuckets operation. -type ListDirectoryBucketsAPIClient interface { - ListDirectoryBuckets(context.Context, *ListDirectoryBucketsInput, ...func(*Options)) (*ListDirectoryBucketsOutput, error) -} - -var _ ListDirectoryBucketsAPIClient = (*Client)(nil) - // ListDirectoryBucketsPaginatorOptions is the paginator options for // ListDirectoryBuckets type ListDirectoryBucketsPaginatorOptions struct { @@ -247,6 +276,9 @@ func (p *ListDirectoryBucketsPaginator) NextPage(ctx context.Context, optFns ... } params.MaxDirectoryBuckets = limit + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) result, err := p.client.ListDirectoryBuckets(ctx, ¶ms, optFns...) if err != nil { return nil, err @@ -266,6 +298,14 @@ func (p *ListDirectoryBucketsPaginator) NextPage(ctx context.Context, optFns ... return result, nil } +// ListDirectoryBucketsAPIClient is a client that implements the +// ListDirectoryBuckets operation. +type ListDirectoryBucketsAPIClient interface { + ListDirectoryBuckets(context.Context, *ListDirectoryBucketsInput, ...func(*Options)) (*ListDirectoryBucketsOutput, error) +} + +var _ ListDirectoryBucketsAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opListDirectoryBuckets(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go index 183773651..7d5dd343e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go @@ -16,38 +16,49 @@ import ( // This operation lists in-progress multipart uploads in a bucket. An in-progress // multipart upload is a multipart upload that has been initiated by the // CreateMultipartUpload request, but has not yet been completed or aborted. +// // Directory buckets - If multipart uploads in a directory bucket are in progress, // you can't delete the bucket until all the in-progress multipart uploads are -// aborted or completed. The ListMultipartUploads operation returns a maximum of -// 1,000 multipart uploads in the response. The limit of 1,000 multipart uploads is -// also the default value. You can further limit the number of uploads in a -// response by specifying the max-uploads request parameter. If there are more -// than 1,000 multipart uploads that satisfy your ListMultipartUploads request, -// the response returns an IsTruncated element with the value of true , a -// NextKeyMarker element, and a NextUploadIdMarker element. To list the remaining -// multipart uploads, you need to make subsequent ListMultipartUploads requests. -// In these requests, include two query parameters: key-marker and upload-id-marker -// . Set the value of key-marker to the NextKeyMarker value from the previous -// response. Similarly, set the value of upload-id-marker to the NextUploadIdMarker -// value from the previous response. Directory buckets - The upload-id-marker -// element and the NextUploadIdMarker element aren't supported by directory -// buckets. To list the additional multipart uploads, you only need to set the -// value of key-marker to the NextKeyMarker value from the previous response. For -// more information about multipart uploads, see Uploading Objects Using Multipart -// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) -// in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must -// make requests for this API operation to the Zonal endpoint. These endpoints -// support virtual-hosted-style requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style -// requests are not supported. For more information, see Regional and Zonal -// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions +// aborted or completed. To delete these in-progress multipart uploads, use the +// ListMultipartUploads operation to list the in-progress multipart uploads in the +// bucket and use the AbortMultipartUpload operation to abort all the in-progress +// multipart uploads. +// +// The ListMultipartUploads operation returns a maximum of 1,000 multipart uploads +// in the response. The limit of 1,000 multipart uploads is also the default value. +// You can further limit the number of uploads in a response by specifying the +// max-uploads request parameter. If there are more than 1,000 multipart uploads +// that satisfy your ListMultipartUploads request, the response returns an +// IsTruncated element with the value of true , a NextKeyMarker element, and a +// NextUploadIdMarker element. To list the remaining multipart uploads, you need to +// make subsequent ListMultipartUploads requests. In these requests, include two +// query parameters: key-marker and upload-id-marker . Set the value of key-marker +// to the NextKeyMarker value from the previous response. Similarly, set the value +// of upload-id-marker to the NextUploadIdMarker value from the previous response. +// +// Directory buckets - The upload-id-marker element and the NextUploadIdMarker +// element aren't supported by directory buckets. To list the additional multipart +// uploads, you only need to set the value of key-marker to the NextKeyMarker +// value from the previous response. +// +// For more information about multipart uploads, see [Uploading Objects Using Multipart Upload] in the Amazon S3 User Guide. +// +// Directory buckets - For directory buckets, you must make requests for this API +// operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format +// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name +// . Path-style requests are not supported. For more information about endpoints +// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information +// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// Permissions +// // - General purpose bucket permissions - For information about permissions -// required to use the multipart upload API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// in the Amazon S3 User Guide. +// required to use the multipart upload API, see [Multipart Upload and Permissions]in the Amazon S3 User Guide. +// // - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the // s3express:CreateSession permission to the directory bucket in a bucket policy // or an IAM identity-based policy. Then, you make the CreateSession API call on // the bucket to obtain a session token. With the session token in your request @@ -55,29 +66,49 @@ import ( // expires, you make another CreateSession API call to generate a new session // token for use. Amazon Web Services CLI or SDKs create session and refresh the // session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// expires. For more information about authorization, see [CreateSession]CreateSession . // // Sorting of multipart uploads in response +// // - General purpose bucket - In the ListMultipartUploads response, the multipart // uploads are sorted based on two criteria: +// // - Key-based sorting - Multipart uploads are initially sorted in ascending // order based on their object keys. +// // - Time-based sorting - For uploads that share the same object key, they are // further sorted in ascending order based on the upload initiation time. Among // uploads with the same key, the one that was initiated first will appear before // the ones that were initiated later. +// // - Directory bucket - In the ListMultipartUploads response, the multipart // uploads aren't sorted lexicographically based on the object keys. // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are -// related to ListMultipartUploads : -// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) -// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// +// The following operations are related to ListMultipartUploads : +// +// [CreateMultipartUpload] +// +// [UploadPart] +// +// [CompleteMultipartUpload] +// +// [ListParts] +// +// [AbortMultipartUpload] +// +// [Uploading Objects Using Multipart Upload]: https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html +// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html +// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html +// [Multipart Upload and Permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html +// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html func (c *Client) ListMultipartUploads(ctx context.Context, params *ListMultipartUploadsInput, optFns ...func(*Options)) (*ListMultipartUploadsOutput, error) { if params == nil { params = &ListMultipartUploadsInput{} @@ -95,50 +126,68 @@ func (c *Client) ListMultipartUploads(ctx context.Context, params *ListMultipart type ListMultipartUploadsInput struct { - // The name of the bucket to which the multipart upload was initiated. Directory - // buckets - When you use this operation with a directory bucket, you must use - // virtual-hosted-style requests in the format - // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not - // supported. Directory bucket names must be unique in the chosen Availability - // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket - // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // The name of the bucket to which the multipart upload was initiated. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string - // Character you use to group keys. All keys that contain the same string between - // the prefix, if specified, and the first occurrence of the delimiter after the - // prefix are grouped under a single result element, CommonPrefixes . If you don't - // specify the prefix parameter, then the substring starts at the beginning of the - // key. The keys that are grouped under CommonPrefixes result element are not - // returned elsewhere in the response. Directory buckets - For directory buckets, / - // is the only supported delimiter. + // Character you use to group keys. + // + // All keys that contain the same string between the prefix, if specified, and the + // first occurrence of the delimiter after the prefix are grouped under a single + // result element, CommonPrefixes . If you don't specify the prefix parameter, then + // the substring starts at the beginning of the key. The keys that are grouped + // under CommonPrefixes result element are not returned elsewhere in the response. + // + // Directory buckets - For directory buckets, / is the only supported delimiter. Delimiter *string - // Requests Amazon S3 to encode the object keys in the response and specifies the - // encoding method to use. An object key can contain any Unicode character; - // however, the XML 1.0 parser cannot parse some characters, such as characters - // with an ASCII value from 0 to 10. For characters that are not supported in XML - // 1.0, you can add this parameter to request that Amazon S3 encode the keys in the - // response. + // Encoding type used by Amazon S3 to encode the [object keys] in the response. Responses are + // encoded only in UTF-8. An object key can contain any Unicode character. However, + // the XML 1.0 parser can't parse certain characters, such as characters with an + // ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you + // can add this parameter to request that Amazon S3 encode the keys in the + // response. For more information about characters to avoid in object key names, + // see [Object key naming guidelines]. + // + // When using the URL encoding type, non-ASCII characters that are used in an + // object's key name will be percent-encoded according to UTF-8 code values. For + // example, the object test_file(3).png will appear as test_file%283%29.png . + // + // [Object key naming guidelines]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-guidelines + // [object keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html EncodingType types.EncodingType // The account ID of the expected bucket owner. If the account ID that you provide @@ -147,20 +196,26 @@ type ListMultipartUploadsInput struct { ExpectedBucketOwner *string // Specifies the multipart upload after which listing should begin. + // // - General purpose buckets - For general purpose buckets, key-marker is an // object key. Together with upload-id-marker , this parameter specifies the - // multipart upload after which listing should begin. If upload-id-marker is not - // specified, only the keys lexicographically greater than the specified - // key-marker will be included in the list. If upload-id-marker is specified, any - // multipart uploads for a key equal to the key-marker might also be included, - // provided those multipart uploads have upload IDs lexicographically greater than - // the specified upload-id-marker . + // multipart upload after which listing should begin. + // + // If upload-id-marker is not specified, only the keys lexicographically greater + // than the specified key-marker will be included in the list. + // + // If upload-id-marker is specified, any multipart uploads for a key equal to the + // key-marker might also be included, provided those multipart uploads have + // upload IDs lexicographically greater than the specified upload-id-marker . + // // - Directory buckets - For directory buckets, key-marker is obfuscated and // isn't a real object key. The upload-id-marker parameter isn't supported by // directory buckets. To list the additional multipart uploads, you only need to // set the value of key-marker to the NextKeyMarker value from the previous - // response. In the ListMultipartUploads response, the multipart uploads aren't - // sorted lexicographically based on the object keys. + // response. + // + // In the ListMultipartUploads response, the multipart uploads aren't sorted + // lexicographically based on the object keys. KeyMarker *string // Sets the maximum number of multipart uploads, from 1 to 1,000, to return in the @@ -171,32 +226,38 @@ type ListMultipartUploadsInput struct { // Lists in-progress uploads only for those keys that begin with the specified // prefix. You can use prefixes to separate a bucket into different grouping of // keys. (You can think of using prefix to make groups in the same way that you'd - // use a folder in a file system.) Directory buckets - For directory buckets, only - // prefixes that end in a delimiter ( / ) are supported. + // use a folder in a file system.) + // + // Directory buckets - For directory buckets, only prefixes that end in a + // delimiter ( / ) are supported. Prefix *string // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // Together with key-marker, specifies the multipart upload after which listing // should begin. If key-marker is not specified, the upload-id-marker parameter is // ignored. Otherwise, any multipart uploads for a key equal to the key-marker // might be included in the list only if they have an upload ID lexicographically - // greater than the specified upload-id-marker . This functionality is not - // supported for directory buckets. + // greater than the specified upload-id-marker . + // + // This functionality is not supported for directory buckets. UploadIdMarker *string noSmithyDocumentSerde } func (in *ListMultipartUploadsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Prefix = in.Prefix @@ -210,20 +271,25 @@ type ListMultipartUploadsOutput struct { // If you specify a delimiter in the request, then the result returns each // distinct key prefix containing the delimiter in a CommonPrefixes element. The - // distinct key prefixes are returned in the Prefix child element. Directory - // buckets - For directory buckets, only prefixes that end in a delimiter ( / ) are - // supported. + // distinct key prefixes are returned in the Prefix child element. + // + // Directory buckets - For directory buckets, only prefixes that end in a + // delimiter ( / ) are supported. CommonPrefixes []types.CommonPrefix // Contains the delimiter you specified in the request. If you don't specify a - // delimiter in your request, this element is absent from the response. Directory - // buckets - For directory buckets, / is the only supported delimiter. + // delimiter in your request, this element is absent from the response. + // + // Directory buckets - For directory buckets, / is the only supported delimiter. Delimiter *string - // Encoding type used by Amazon S3 to encode object keys in the response. If you - // specify the encoding-type request parameter, Amazon S3 includes this element in - // the response, and returns encoded key name values in the following response - // elements: Delimiter , KeyMarker , Prefix , NextKeyMarker , Key . + // Encoding type used by Amazon S3 to encode object keys in the response. + // + // If you specify the encoding-type request parameter, Amazon S3 includes this + // element in the response, and returns encoded key name values in the following + // response elements: + // + // Delimiter , KeyMarker , Prefix , NextKeyMarker , Key . EncodingType types.EncodingType // Indicates whether the returned list of multipart uploads is truncated. A value @@ -244,26 +310,31 @@ type ListMultipartUploadsOutput struct { NextKeyMarker *string // When a list is truncated, this element specifies the value that should be used - // for the upload-id-marker request parameter in a subsequent request. This - // functionality is not supported for directory buckets. + // for the upload-id-marker request parameter in a subsequent request. + // + // This functionality is not supported for directory buckets. NextUploadIdMarker *string // When a prefix is provided in the request, this field contains the specified // prefix. The result contains only keys starting with the specified prefix. - // Directory buckets - For directory buckets, only prefixes that end in a delimiter - // ( / ) are supported. + // + // Directory buckets - For directory buckets, only prefixes that end in a + // delimiter ( / ) are supported. Prefix *string // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Together with key-marker, specifies the multipart upload after which listing // should begin. If key-marker is not specified, the upload-id-marker parameter is // ignored. Otherwise, any multipart uploads for a key equal to the key-marker // might be included in the list only if they have an upload ID lexicographically - // greater than the specified upload-id-marker . This functionality is not - // supported for directory buckets. + // greater than the specified upload-id-marker . + // + // This functionality is not supported for directory buckets. UploadIdMarker *string // Container for elements related to a particular multipart upload. A response can @@ -319,6 +390,9 @@ func (c *Client) addOperationListMultipartUploadsMiddlewares(stack *middleware.S if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -334,6 +408,18 @@ func (c *Client) addOperationListMultipartUploadsMiddlewares(stack *middleware.S if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListMultipartUploadsValidationMiddleware(stack); err != nil { return err } @@ -367,6 +453,18 @@ func (c *Client) addOperationListMultipartUploadsMiddlewares(stack *middleware.S if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go index bcb90eb2d..6cfc98628 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go @@ -13,19 +13,34 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns metadata about -// all versions of the objects in a bucket. You can also use request parameters as -// selection criteria to return metadata about a subset of all the object versions. +// This operation is not supported for directory buckets. +// +// Returns metadata about all versions of the objects in a bucket. You can also +// use request parameters as selection criteria to return metadata about a subset +// of all the object versions. +// // To use this operation, you must have permission to perform the -// s3:ListBucketVersions action. Be aware of the name difference. A 200 OK -// response can contain valid or invalid XML. Make sure to design your application -// to parse the contents of the response and handle it appropriately. To use this -// operation, you must have READ access to the bucket. The following operations are -// related to ListObjectVersions : -// - ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// s3:ListBucketVersions action. Be aware of the name difference. +// +// A 200 OK response can contain valid or invalid XML. Make sure to design your +// application to parse the contents of the response and handle it appropriately. +// +// To use this operation, you must have READ access to the bucket. +// +// The following operations are related to ListObjectVersions : +// +// [ListObjectsV2] +// +// [GetObject] +// +// [PutObject] +// +// [DeleteObject] +// +// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html +// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html +// [ListObjectsV2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html func (c *Client) ListObjectVersions(ctx context.Context, params *ListObjectVersionsInput, optFns ...func(*Options)) (*ListObjectVersionsOutput, error) { if params == nil { params = &ListObjectVersionsInput{} @@ -55,12 +70,20 @@ type ListObjectVersionsInput struct { // are not returned elsewhere in the response. Delimiter *string - // Requests Amazon S3 to encode the object keys in the response and specifies the - // encoding method to use. An object key can contain any Unicode character; - // however, the XML 1.0 parser cannot parse some characters, such as characters - // with an ASCII value from 0 to 10. For characters that are not supported in XML - // 1.0, you can add this parameter to request that Amazon S3 encode the keys in the - // response. + // Encoding type used by Amazon S3 to encode the [object keys] in the response. Responses are + // encoded only in UTF-8. An object key can contain any Unicode character. However, + // the XML 1.0 parser can't parse certain characters, such as characters with an + // ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you + // can add this parameter to request that Amazon S3 encode the keys in the + // response. For more information about characters to avoid in object key names, + // see [Object key naming guidelines]. + // + // When using the URL encoding type, non-ASCII characters that are used in an + // object's key name will be percent-encoded according to UTF-8 code values. For + // example, the object test_file(3).png will appear as test_file%283%29.png . + // + // [Object key naming guidelines]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-guidelines + // [object keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html EncodingType types.EncodingType // The account ID of the expected bucket owner. If the account ID that you provide @@ -93,10 +116,12 @@ type ListObjectVersionsInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // Specifies the object version you want to start listing from. @@ -106,6 +131,7 @@ type ListObjectVersionsInput struct { } func (in *ListObjectVersionsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Prefix = in.Prefix @@ -117,7 +143,10 @@ type ListObjectVersionsOutput struct { // calculating the number of returns. CommonPrefixes []types.CommonPrefix - // Container for an object that is a delete marker. + // Container for an object that is a delete marker. To learn more about delete + // markers, see [Working with delete markers]. + // + // [Working with delete markers]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeleteMarker.html DeleteMarkers []types.DeleteMarkerEntry // The delimiter grouping the included keys. A delimiter is a character that you @@ -127,10 +156,13 @@ type ListObjectVersionsOutput struct { // max-keys limitation. These keys are not returned elsewhere in the response. Delimiter *string - // Encoding type used by Amazon S3 to encode object key names in the XML response. + // Encoding type used by Amazon S3 to encode object key names in the XML response. + // // If you specify the encoding-type request parameter, Amazon S3 includes this // element in the response, and returns encoded key name values in the following - // response elements: KeyMarker, NextKeyMarker, Prefix, Key , and Delimiter . + // response elements: + // + // KeyMarker, NextKeyMarker, Prefix, Key , and Delimiter . EncodingType types.EncodingType // A flag that indicates whether Amazon S3 returned all of the results that @@ -164,7 +196,9 @@ type ListObjectVersionsOutput struct { Prefix *string // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Marks the last version of the key returned in a truncated response. @@ -222,6 +256,9 @@ func (c *Client) addOperationListObjectVersionsMiddlewares(stack *middleware.Sta if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -237,6 +274,18 @@ func (c *Client) addOperationListObjectVersionsMiddlewares(stack *middleware.Sta if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListObjectVersionsValidationMiddleware(stack); err != nil { return err } @@ -270,6 +319,18 @@ func (c *Client) addOperationListObjectVersionsMiddlewares(stack *middleware.Sta if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjects.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjects.go index 9a3bf3e02..f56445886 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjects.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjects.go @@ -13,19 +13,35 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Returns some or all (up -// to 1,000) of the objects in a bucket. You can use the request parameters as -// selection criteria to return a subset of the objects in a bucket. A 200 OK -// response can contain valid or invalid XML. Be sure to design your application to -// parse the contents of the response and handle it appropriately. This action has -// been revised. We recommend that you use the newer version, ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) -// , when developing applications. For backward compatibility, Amazon S3 continues -// to support ListObjects . The following operations are related to ListObjects : -// - ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// - ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) +// This operation is not supported for directory buckets. +// +// Returns some or all (up to 1,000) of the objects in a bucket. You can use the +// request parameters as selection criteria to return a subset of the objects in a +// bucket. A 200 OK response can contain valid or invalid XML. Be sure to design +// your application to parse the contents of the response and handle it +// appropriately. +// +// This action has been revised. We recommend that you use the newer version, [ListObjectsV2], +// when developing applications. For backward compatibility, Amazon S3 continues to +// support ListObjects . +// +// The following operations are related to ListObjects : +// +// [ListObjectsV2] +// +// [GetObject] +// +// [PutObject] +// +// [CreateBucket] +// +// [ListBuckets] +// +// [ListBuckets]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html +// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html +// [ListObjectsV2]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html func (c *Client) ListObjects(ctx context.Context, params *ListObjectsInput, optFns ...func(*Options)) (*ListObjectsOutput, error) { if params == nil { params = &ListObjectsInput{} @@ -43,31 +59,39 @@ func (c *Client) ListObjects(ctx context.Context, params *ListObjectsInput, optF type ListObjectsInput struct { - // The name of the bucket containing the objects. Directory buckets - When you use - // this operation with a directory bucket, you must use virtual-hosted-style - // requests in the format Bucket_name.s3express-az_id.region.amazonaws.com . - // Path-style requests are not supported. Directory bucket names must be unique in - // the chosen Availability Zone. Bucket names must follow the format - // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 - // ). For information about bucket naming restrictions, see Directory bucket - // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // The name of the bucket containing the objects. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -75,12 +99,20 @@ type ListObjectsInput struct { // A delimiter is a character that you use to group keys. Delimiter *string - // Requests Amazon S3 to encode the object keys in the response and specifies the - // encoding method to use. An object key can contain any Unicode character; - // however, the XML 1.0 parser cannot parse some characters, such as characters - // with an ASCII value from 0 to 10. For characters that are not supported in XML - // 1.0, you can add this parameter to request that Amazon S3 encode the keys in the - // response. + // Encoding type used by Amazon S3 to encode the [object keys] in the response. Responses are + // encoded only in UTF-8. An object key can contain any Unicode character. However, + // the XML 1.0 parser can't parse certain characters, such as characters with an + // ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you + // can add this parameter to request that Amazon S3 encode the keys in the + // response. For more information about characters to avoid in object key names, + // see [Object key naming guidelines]. + // + // When using the URL encoding type, non-ASCII characters that are used in an + // object's key name will be percent-encoded according to UTF-8 code values. For + // example, the object test_file(3).png will appear as test_file%283%29.png . + // + // [Object key naming guidelines]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-guidelines + // [object keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html EncodingType types.EncodingType // The account ID of the expected bucket owner. If the account ID that you provide @@ -113,6 +145,7 @@ type ListObjectsInput struct { } func (in *ListObjectsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Prefix = in.Prefix @@ -121,14 +154,20 @@ func (in *ListObjectsInput) bindEndpointParams(p *EndpointParameters) { type ListObjectsOutput struct { // All of the keys (up to 1,000) rolled up in a common prefix count as a single - // return when calculating the number of returns. A response can contain - // CommonPrefixes only if you specify a delimiter. CommonPrefixes contains all (if - // there are any) keys between Prefix and the next occurrence of the string - // specified by the delimiter. CommonPrefixes lists keys that act like - // subdirectories in the directory specified by Prefix . For example, if the prefix - // is notes/ and the delimiter is a slash ( / ), as in notes/summer/july , the - // common prefix is notes/summer/ . All of the keys that roll up into a common - // prefix count as a single return when calculating the number of returns. + // return when calculating the number of returns. + // + // A response can contain CommonPrefixes only if you specify a delimiter. + // + // CommonPrefixes contains all (if there are any) keys between Prefix and the next + // occurrence of the string specified by the delimiter. + // + // CommonPrefixes lists keys that act like subdirectories in the directory + // specified by Prefix . + // + // For example, if the prefix is notes/ and the delimiter is a slash ( / ), as in + // notes/summer/july , the common prefix is notes/summer/ . All of the keys that + // roll up into a common prefix count as a single return when calculating the + // number of returns. CommonPrefixes []types.CommonPrefix // Metadata about each object returned. @@ -141,9 +180,20 @@ type ListObjectsOutput struct { // MaxKeys value. Delimiter *string - // Encoding type used by Amazon S3 to encode object keys in the response. If using - // url , non-ASCII characters used in an object's key name will be URL encoded. For - // example, the object test_file(3).png will appear as test_file%283%29.png. + // Encoding type used by Amazon S3 to encode the [object keys] in the response. Responses are + // encoded only in UTF-8. An object key can contain any Unicode character. However, + // the XML 1.0 parser can't parse certain characters, such as characters with an + // ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you + // can add this parameter to request that Amazon S3 encode the keys in the + // response. For more information about characters to avoid in object key names, + // see [Object key naming guidelines]. + // + // When using the URL encoding type, non-ASCII characters that are used in an + // object's key name will be percent-encoded according to UTF-8 code values. For + // example, the object test_file(3).png will appear as test_file%283%29.png . + // + // [Object key naming guidelines]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-guidelines + // [object keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html EncodingType types.EncodingType // A flag that indicates whether Amazon S3 returned all of the results that @@ -163,18 +213,21 @@ type ListObjectsOutput struct { // When the response is truncated (the IsTruncated element value in the response // is true ), you can use the key name in this field as the marker parameter in // the subsequent request to get the next set of objects. Amazon S3 lists objects - // in alphabetical order. This element is returned only if you have the delimiter - // request parameter specified. If the response does not include the NextMarker - // element and it is truncated, you can use the value of the last Key element in - // the response as the marker parameter in the subsequent request to get the next - // set of object keys. + // in alphabetical order. + // + // This element is returned only if you have the delimiter request parameter + // specified. If the response does not include the NextMarker element and it is + // truncated, you can use the value of the last Key element in the response as the + // marker parameter in the subsequent request to get the next set of object keys. NextMarker *string // Keys that begin with the indicated prefix. Prefix *string // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -226,6 +279,9 @@ func (c *Client) addOperationListObjectsMiddlewares(stack *middleware.Stack, opt if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -241,6 +297,18 @@ func (c *Client) addOperationListObjectsMiddlewares(stack *middleware.Stack, opt if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListObjectsValidationMiddleware(stack); err != nil { return err } @@ -274,6 +342,18 @@ func (c *Client) addOperationListObjectsMiddlewares(stack *middleware.Stack, opt if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectsV2.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectsV2.go index ee09d3cbd..f10ae67d5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectsV2.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectsV2.go @@ -17,26 +17,34 @@ import ( // You can use the request parameters as selection criteria to return a subset of // the objects in a bucket. A 200 OK response can contain valid or invalid XML. // Make sure to design your application to parse the contents of the response and -// handle it appropriately. For more information about listing objects, see -// Listing object keys programmatically (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ListingKeysUsingAPIs.html) -// in the Amazon S3 User Guide. To get a list of your buckets, see ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) -// . Directory buckets - For directory buckets, you must make requests for this API -// operation to the Zonal endpoint. These endpoints support virtual-hosted-style -// requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style -// requests are not supported. For more information, see Regional and Zonal -// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions +// handle it appropriately. For more information about listing objects, see [Listing object keys programmatically]in the +// Amazon S3 User Guide. To get a list of your buckets, see [ListBuckets]. +// +// - General purpose bucket - For general purpose buckets, ListObjectsV2 doesn't +// return prefixes that are related only to in-progress multipart uploads. +// +// - Directory buckets - For directory buckets, ListObjectsV2 response includes +// the prefixes that are related only to in-progress multipart uploads. +// +// - Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support +// virtual-hosted-style requests in the format +// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name +// . Path-style requests are not supported. For more information about endpoints +// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information +// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// Permissions +// // - General purpose bucket permissions - To use this operation, you must have // READ access to the bucket. You must have permission to perform the // s3:ListBucket action. The bucket owner has this permission by default and can -// grant this permission to others. For more information about permissions, see -// Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// in the Amazon S3 User Guide. +// grant this permission to others. For more information about permissions, see [Permissions Related to Bucket Subresource Operations] +// and [Managing Access Permissions to Your Amazon S3 Resources]in the Amazon S3 User Guide. +// // - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the // s3express:CreateSession permission to the directory bucket in a bucket policy // or an IAM identity-based policy. Then, you make the CreateSession API call on // the bucket to obtain a session token. With the session token in your request @@ -44,24 +52,43 @@ import ( // expires, you make another CreateSession API call to generate a new session // token for use. Amazon Web Services CLI or SDKs create session and refresh the // session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// expires. For more information about authorization, see [CreateSession]CreateSession . // // Sorting order of returned objects +// // - General purpose bucket - For general purpose buckets, ListObjectsV2 returns // objects in lexicographical order based on their key names. +// // - Directory bucket - For directory buckets, ListObjectsV2 does not return // objects in lexicographical order. // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . This section describes the -// latest revision of this action. We recommend that you use this revised API -// operation for application development. For backward compatibility, Amazon S3 -// continues to support the prior version of this API operation, ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) -// . The following operations are related to ListObjectsV2 : -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// +// This section describes the latest revision of this action. We recommend that +// you use this revised API operation for application development. For backward +// compatibility, Amazon S3 continues to support the prior version of this API +// operation, [ListObjects]. +// +// The following operations are related to ListObjectsV2 : +// +// [GetObject] +// +// [PutObject] +// +// [CreateBucket] +// +// [ListObjects]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Listing object keys programmatically]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ListingKeysUsingAPIs.html +// [ListBuckets]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html +// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html func (c *Client) ListObjectsV2(ctx context.Context, params *ListObjectsV2Input, optFns ...func(*Options)) (*ListObjectsV2Output, error) { if params == nil { params = &ListObjectsV2Input{} @@ -79,30 +106,37 @@ func (c *Client) ListObjectsV2(ctx context.Context, params *ListObjectsV2Input, type ListObjectsV2Input struct { - // Directory buckets - When you use this operation with a directory bucket, you + // Directory buckets - When you use this operation with a directory bucket, you // must use virtual-hosted-style requests in the format - // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not - // supported. Directory bucket names must be unique in the chosen Availability - // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket - // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -113,18 +147,31 @@ type ListObjectsV2Input struct { ContinuationToken *string // A delimiter is a character that you use to group keys. - // - Directory buckets - For directory buckets, / is the only supported - // delimiter. + // + // - Directory buckets - For directory buckets, / is the only supported delimiter. + // // - Directory buckets - When you query ListObjectsV2 with a delimiter during // in-progress multipart uploads, the CommonPrefixes response parameter contains // the prefixes that are associated with the in-progress multipart uploads. For - // more information about multipart uploads, see Multipart Upload Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) - // in the Amazon S3 User Guide. + // more information about multipart uploads, see [Multipart Upload Overview]in the Amazon S3 User Guide. + // + // [Multipart Upload Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html Delimiter *string - // Encoding type used by Amazon S3 to encode object keys in the response. If using - // url , non-ASCII characters used in an object's key name will be URL encoded. For - // example, the object test_file(3).png will appear as test_file%283%29.png. + // Encoding type used by Amazon S3 to encode the [object keys] in the response. Responses are + // encoded only in UTF-8. An object key can contain any Unicode character. However, + // the XML 1.0 parser can't parse certain characters, such as characters with an + // ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you + // can add this parameter to request that Amazon S3 encode the keys in the + // response. For more information about characters to avoid in object key names, + // see [Object key naming guidelines]. + // + // When using the URL encoding type, non-ASCII characters that are used in an + // object's key name will be percent-encoded according to UTF-8 code values. For + // example, the object test_file(3).png will appear as test_file%283%29.png . + // + // [Object key naming guidelines]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-guidelines + // [object keys]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html EncodingType types.EncodingType // The account ID of the expected bucket owner. If the account ID that you provide @@ -134,8 +181,10 @@ type ListObjectsV2Input struct { // The owner field is not present in ListObjectsV2 by default. If you want to // return the owner field with each key in the result, then set the FetchOwner - // field to true . Directory buckets - For directory buckets, the bucket owner is - // returned as the object owner for all objects. + // field to true . + // + // Directory buckets - For directory buckets, the bucket owner is returned as the + // object owner for all objects. FetchOwner *bool // Sets the maximum number of keys returned in the response. By default, the @@ -144,29 +193,35 @@ type ListObjectsV2Input struct { MaxKeys *int32 // Specifies the optional fields that you want returned in the response. Fields - // that you do not specify are not returned. This functionality is not supported - // for directory buckets. + // that you do not specify are not returned. + // + // This functionality is not supported for directory buckets. OptionalObjectAttributes []types.OptionalObjectAttributes - // Limits the response to keys that begin with the specified prefix. Directory - // buckets - For directory buckets, only prefixes that end in a delimiter ( / ) are - // supported. + // Limits the response to keys that begin with the specified prefix. + // + // Directory buckets - For directory buckets, only prefixes that end in a + // delimiter ( / ) are supported. Prefix *string // Confirms that the requester knows that she or he will be charged for the list // objects request in V2 style. Bucket owners need not specify this parameter in - // their requests. This functionality is not supported for directory buckets. + // their requests. + // + // This functionality is not supported for directory buckets. RequestPayer types.RequestPayer // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts - // listing after this specified key. StartAfter can be any key in the bucket. This - // functionality is not supported for directory buckets. + // listing after this specified key. StartAfter can be any key in the bucket. + // + // This functionality is not supported for directory buckets. StartAfter *string noSmithyDocumentSerde } func (in *ListObjectsV2Input) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Prefix = in.Prefix @@ -176,43 +231,57 @@ type ListObjectsV2Output struct { // All of the keys (up to 1,000) that share the same prefix are grouped together. // When counting the total numbers of returns by this API operation, this group of - // keys is considered as one item. A response can contain CommonPrefixes only if - // you specify a delimiter. CommonPrefixes contains all (if there are any) keys - // between Prefix and the next occurrence of the string specified by a delimiter. + // keys is considered as one item. + // + // A response can contain CommonPrefixes only if you specify a delimiter. + // + // CommonPrefixes contains all (if there are any) keys between Prefix and the next + // occurrence of the string specified by a delimiter. + // // CommonPrefixes lists keys that act like subdirectories in the directory - // specified by Prefix . For example, if the prefix is notes/ and the delimiter is - // a slash ( / ) as in notes/summer/july , the common prefix is notes/summer/ . All - // of the keys that roll up into a common prefix count as a single return when - // calculating the number of returns. + // specified by Prefix . + // + // For example, if the prefix is notes/ and the delimiter is a slash ( / ) as in + // notes/summer/july , the common prefix is notes/summer/ . All of the keys that + // roll up into a common prefix count as a single return when calculating the + // number of returns. + // // - Directory buckets - For directory buckets, only prefixes that end in a // delimiter ( / ) are supported. + // // - Directory buckets - When you query ListObjectsV2 with a delimiter during // in-progress multipart uploads, the CommonPrefixes response parameter contains // the prefixes that are associated with the in-progress multipart uploads. For - // more information about multipart uploads, see Multipart Upload Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) - // in the Amazon S3 User Guide. + // more information about multipart uploads, see [Multipart Upload Overview]in the Amazon S3 User Guide. + // + // [Multipart Upload Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html CommonPrefixes []types.CommonPrefix // Metadata about each object returned. Contents []types.Object - // If ContinuationToken was sent with the request, it is included in the response. - // You can use the returned ContinuationToken for pagination of the list response. - // You can use this ContinuationToken for pagination of the list results. + // If ContinuationToken was sent with the request, it is included in the + // response. You can use the returned ContinuationToken for pagination of the list + // response. You can use this ContinuationToken for pagination of the list + // results. ContinuationToken *string // Causes keys that contain the same string between the prefix and the first // occurrence of the delimiter to be rolled up into a single result element in the // CommonPrefixes collection. These rolled-up keys are not returned elsewhere in // the response. Each rolled-up result counts as only one return against the - // MaxKeys value. Directory buckets - For directory buckets, / is the only - // supported delimiter. + // MaxKeys value. + // + // Directory buckets - For directory buckets, / is the only supported delimiter. Delimiter *string // Encoding type used by Amazon S3 to encode object key names in the XML response. + // // If you specify the encoding-type request parameter, Amazon S3 includes this // element in the response, and returns encoded key name values in the following - // response elements: Delimiter, Prefix, Key, and StartAfter . + // response elements: + // + // Delimiter, Prefix, Key, and StartAfter . EncodingType types.EncodingType // Set to false if all of the results were returned. Set to true if more keys are @@ -239,16 +308,21 @@ type ListObjectsV2Output struct { // obfuscated and is not a real key NextContinuationToken *string - // Keys that begin with the indicated prefix. Directory buckets - For directory - // buckets, only prefixes that end in a delimiter ( / ) are supported. + // Keys that begin with the indicated prefix. + // + // Directory buckets - For directory buckets, only prefixes that end in a + // delimiter ( / ) are supported. Prefix *string // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged - // If StartAfter was sent with the request, it is included in the response. This - // functionality is not supported for directory buckets. + // If StartAfter was sent with the request, it is included in the response. + // + // This functionality is not supported for directory buckets. StartAfter *string // Metadata pertaining to the operation's result. @@ -300,6 +374,9 @@ func (c *Client) addOperationListObjectsV2Middlewares(stack *middleware.Stack, o if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -315,6 +392,18 @@ func (c *Client) addOperationListObjectsV2Middlewares(stack *middleware.Stack, o if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListObjectsV2ValidationMiddleware(stack); err != nil { return err } @@ -348,23 +437,21 @@ func (c *Client) addOperationListObjectsV2Middlewares(stack *middleware.Stack, o if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } - return nil -} - -func (v *ListObjectsV2Input) bucket() (string, bool) { - if v.Bucket == nil { - return "", false + if err = addSpanInitializeStart(stack); err != nil { + return err } - return *v.Bucket, true -} - -// ListObjectsV2APIClient is a client that implements the ListObjectsV2 operation. -type ListObjectsV2APIClient interface { - ListObjectsV2(context.Context, *ListObjectsV2Input, ...func(*Options)) (*ListObjectsV2Output, error) + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil } -var _ ListObjectsV2APIClient = (*Client)(nil) - // ListObjectsV2PaginatorOptions is the paginator options for ListObjectsV2 type ListObjectsV2PaginatorOptions struct { // Sets the maximum number of keys returned in the response. By default, the @@ -430,6 +517,9 @@ func (p *ListObjectsV2Paginator) NextPage(ctx context.Context, optFns ...func(*O } params.MaxKeys = limit + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) result, err := p.client.ListObjectsV2(ctx, ¶ms, optFns...) if err != nil { return nil, err @@ -452,6 +542,20 @@ func (p *ListObjectsV2Paginator) NextPage(ctx context.Context, optFns ...func(*O return result, nil } +func (v *ListObjectsV2Input) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +// ListObjectsV2APIClient is a client that implements the ListObjectsV2 operation. +type ListObjectsV2APIClient interface { + ListObjectsV2(context.Context, *ListObjectsV2Input, ...func(*Options)) (*ListObjectsV2Output, error) +} + +var _ ListObjectsV2APIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opListObjectsV2(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go index 3f3946d93..d30650e42 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go @@ -14,55 +14,81 @@ import ( "time" ) -// Lists the parts that have been uploaded for a specific multipart upload. To use -// this operation, you must provide the upload ID in the request. You obtain this -// uploadID by sending the initiate multipart upload request through -// CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// . The ListParts request returns a maximum of 1,000 uploaded parts. The limit of +// Lists the parts that have been uploaded for a specific multipart upload. +// +// To use this operation, you must provide the upload ID in the request. You +// obtain this uploadID by sending the initiate multipart upload request through [CreateMultipartUpload]. +// +// The ListParts request returns a maximum of 1,000 uploaded parts. The limit of // 1,000 parts is also the default value. You can restrict the number of parts in a // response by specifying the max-parts request parameter. If your multipart // upload consists of more than 1,000 parts, the response returns an IsTruncated // field with the value of true , and a NextPartNumberMarker element. To list // remaining uploaded parts, in subsequent ListParts requests, include the // part-number-marker query string parameter and set its value to the -// NextPartNumberMarker field value from the previous response. For more -// information on multipart uploads, see Uploading Objects Using Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) -// in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must -// make requests for this API operation to the Zonal endpoint. These endpoints -// support virtual-hosted-style requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style -// requests are not supported. For more information, see Regional and Zonal -// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions +// NextPartNumberMarker field value from the previous response. +// +// For more information on multipart uploads, see [Uploading Objects Using Multipart Upload] in the Amazon S3 User Guide. +// +// Directory buckets - For directory buckets, you must make requests for this API +// operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format +// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name +// . Path-style requests are not supported. For more information about endpoints +// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information +// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// Permissions // - General purpose bucket permissions - For information about permissions -// required to use the multipart upload API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// in the Amazon S3 User Guide. If the upload was created using server-side -// encryption with Key Management Service (KMS) keys (SSE-KMS) or dual-layer -// server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), you must -// have permission to the kms:Decrypt action for the ListParts request to -// succeed. -// - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the -// s3express:CreateSession permission to the directory bucket in a bucket policy -// or an IAM identity-based policy. Then, you make the CreateSession API call on -// the bucket to obtain a session token. With the session token in your request -// header, you can make API requests to this operation. After the session token -// expires, you make another CreateSession API call to generate a new session -// token for use. Amazon Web Services CLI or SDKs create session and refresh the -// session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// required to use the multipart upload API, see [Multipart Upload and Permissions]in the Amazon S3 User Guide. +// +// If the upload was created using server-side encryption with Key Management +// +// Service (KMS) keys (SSE-KMS) or dual-layer server-side encryption with Amazon +// Web Services KMS keys (DSSE-KMS), you must have permission to the kms:Decrypt +// action for the ListParts request to succeed. +// +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see [CreateSession]CreateSession . +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// +// The following operations are related to ListParts : +// +// [CreateMultipartUpload] +// +// [UploadPart] +// +// [CompleteMultipartUpload] // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are -// related to ListParts : -// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) -// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) -// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) -// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// [AbortMultipartUpload] +// +// [GetObjectAttributes] +// +// [ListMultipartUploads] +// +// [Uploading Objects Using Multipart Upload]: https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html +// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html +// [GetObjectAttributes]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html +// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html +// [Multipart Upload and Permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html +// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html +// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html func (c *Client) ListParts(ctx context.Context, params *ListPartsInput, optFns ...func(*Options)) (*ListPartsOutput, error) { if params == nil { params = &ListPartsInput{} @@ -80,31 +106,39 @@ func (c *Client) ListParts(ctx context.Context, params *ListPartsInput, optFns . type ListPartsInput struct { - // The name of the bucket to which the parts are being uploaded. Directory buckets - // - When you use this operation with a directory bucket, you must use - // virtual-hosted-style requests in the format - // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not - // supported. Directory bucket names must be unique in the chosen Availability - // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket - // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // The name of the bucket to which the parts are being uploaded. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -135,37 +169,46 @@ type ListPartsInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // The server-side encryption (SSE) algorithm used to encrypt the object. This // parameter is needed only when the object was created using a checksum algorithm. - // For more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // For more information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html SSECustomerAlgorithm *string // The server-side encryption (SSE) customer managed key. This parameter is needed // only when the object was created using a checksum algorithm. For more - // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html SSECustomerKey *string // The MD5 server-side encryption (SSE) customer managed key. This parameter is // needed only when the object was created using a checksum algorithm. For more - // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html SSECustomerKeyMD5 *string noSmithyDocumentSerde } func (in *ListPartsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Key = in.Key @@ -177,17 +220,21 @@ type ListPartsOutput struct { // incomplete multipart uploads and the prefix in the lifecycle rule matches the // object name in the request, then the response includes this header indicating // when the initiated multipart upload will become eligible for abort operation. - // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket - // Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) - // . The response will also include the x-amz-abort-rule-id header that will - // provide the ID of the lifecycle configuration rule that defines this action. + // For more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]. + // + // The response will also include the x-amz-abort-rule-id header that will provide + // the ID of the lifecycle configuration rule that defines this action. + // // This functionality is not supported for directory buckets. + // + // [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config AbortDate *time.Time // This header is returned along with the x-amz-abort-date header. It identifies // applicable lifecycle configuration rule that defines the action to abort - // incomplete multipart uploads. This functionality is not supported for directory - // buckets. + // incomplete multipart uploads. + // + // This functionality is not supported for directory buckets. AbortRuleId *string // The name of the bucket to which the multipart upload was initiated. Does not @@ -197,13 +244,22 @@ type ListPartsOutput struct { // The algorithm that was used to create a checksum of the object. ChecksumAlgorithm types.ChecksumAlgorithm + // The checksum type, which determines how part-level checksums are combined to + // create an object-level checksum for multipart objects. You can use this header + // response to verify that the checksum type that is received is the same checksum + // type that was specified in CreateMultipartUpload request. For more information, + // see [Checking object integrity in the Amazon S3 User Guide]. + // + // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumType types.ChecksumType + // Container element that identifies who initiated the multipart upload. If the // initiator is an Amazon Web Services account, this element provides the same // information as the Owner element. If the initiator is an IAM User, this element // provides the user ARN and display name. Initiator *types.Initiator - // Indicates whether the returned list of parts is truncated. A true value + // Indicates whether the returned list of parts is truncated. A true value // indicates that the list was truncated. A list can be truncated if the number of // parts exceeds the limit returned in the MaxParts element. IsTruncated *bool @@ -221,8 +277,10 @@ type ListPartsOutput struct { // Container element that identifies the object owner, after the object is // created. If multipart upload is initiated by an IAM user, this element provides - // the parent account ID and display name. Directory buckets - The bucket owner is - // returned as the object owner for all the parts. + // the parent account ID and display name. + // + // Directory buckets - The bucket owner is returned as the object owner for all + // the parts. Owner *types.Owner // Specifies the part after which listing should begin. Only parts with higher @@ -234,12 +292,15 @@ type ListPartsOutput struct { Parts []types.Part // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged - // The class of storage used to store the uploaded object. Directory buckets - - // Only the S3 Express One Zone storage class is supported by directory buckets to - // store objects. + // The class of storage used to store the uploaded object. + // + // Directory buckets - Only the S3 Express One Zone storage class is supported by + // directory buckets to store objects. StorageClass types.StorageClass // Upload ID identifying the multipart upload whose parts are being listed. @@ -294,6 +355,9 @@ func (c *Client) addOperationListPartsMiddlewares(stack *middleware.Stack, optio if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -309,6 +373,18 @@ func (c *Client) addOperationListPartsMiddlewares(stack *middleware.Stack, optio if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpListPartsValidationMiddleware(stack); err != nil { return err } @@ -342,23 +418,21 @@ func (c *Client) addOperationListPartsMiddlewares(stack *middleware.Stack, optio if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } - return nil -} - -func (v *ListPartsInput) bucket() (string, bool) { - if v.Bucket == nil { - return "", false + if err = addSpanInitializeStart(stack); err != nil { + return err } - return *v.Bucket, true -} - -// ListPartsAPIClient is a client that implements the ListParts operation. -type ListPartsAPIClient interface { - ListParts(context.Context, *ListPartsInput, ...func(*Options)) (*ListPartsOutput, error) + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil } -var _ ListPartsAPIClient = (*Client)(nil) - // ListPartsPaginatorOptions is the paginator options for ListParts type ListPartsPaginatorOptions struct { // Sets the maximum number of parts to return. @@ -422,6 +496,9 @@ func (p *ListPartsPaginator) NextPage(ctx context.Context, optFns ...func(*Optio } params.MaxParts = limit + optFns = append([]func(*Options){ + addIsPaginatorUserAgent, + }, optFns...) result, err := p.client.ListParts(ctx, ¶ms, optFns...) if err != nil { return nil, err @@ -444,6 +521,20 @@ func (p *ListPartsPaginator) NextPage(ctx context.Context, optFns ...func(*Optio return result, nil } +func (v *ListPartsInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +// ListPartsAPIClient is a client that implements the ListParts operation. +type ListPartsAPIClient interface { + ListParts(context.Context, *ListPartsInput, ...func(*Options)) (*ListPartsOutput, error) +} + +var _ ListPartsAPIClient = (*Client)(nil) + func newServiceMetadataMiddleware_opListParts(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAccelerateConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAccelerateConfiguration.go index c15d55f1f..51de19c80 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAccelerateConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAccelerateConfiguration.go @@ -15,30 +15,45 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Sets the accelerate -// configuration of an existing bucket. Amazon S3 Transfer Acceleration is a -// bucket-level feature that enables you to perform faster data transfers to Amazon -// S3. To use this operation, you must have permission to perform the +// This operation is not supported for directory buckets. +// +// Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer +// Acceleration is a bucket-level feature that enables you to perform faster data +// transfers to Amazon S3. +// +// To use this operation, you must have permission to perform the // s3:PutAccelerateConfiguration action. The bucket owner has this permission by // default. The bucket owner can grant this permission to others. For more -// information about permissions, see Permissions Related to Bucket Subresource -// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . The Transfer Acceleration state of a bucket can be set to one of the following +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// The Transfer Acceleration state of a bucket can be set to one of the following // two values: +// // - Enabled – Enables accelerated data transfers to the bucket. +// // - Suspended – Disables accelerated data transfers to the bucket. // -// The GetBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html) -// action returns the transfer acceleration state of a bucket. After setting the -// Transfer Acceleration state of a bucket to Enabled, it might take up to thirty -// minutes before the data transfer rates to the bucket increase. The name of the -// bucket used for Transfer Acceleration must be DNS-compliant and must not contain -// periods ("."). For more information about transfer acceleration, see Transfer -// Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) -// . The following operations are related to PutBucketAccelerateConfiguration : -// - GetBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html) -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// The [GetBucketAccelerateConfiguration] action returns the transfer acceleration state of a bucket. +// +// After setting the Transfer Acceleration state of a bucket to Enabled, it might +// take up to thirty minutes before the data transfer rates to the bucket increase. +// +// The name of the bucket used for Transfer Acceleration must be DNS-compliant and +// must not contain periods ("."). +// +// For more information about transfer acceleration, see [Transfer Acceleration]. +// +// The following operations are related to PutBucketAccelerateConfiguration : +// +// [GetBucketAccelerateConfiguration] +// +// [CreateBucket] +// +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Transfer Acceleration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html +// [GetBucketAccelerateConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html func (c *Client) PutBucketAccelerateConfiguration(ctx context.Context, params *PutBucketAccelerateConfigurationInput, optFns ...func(*Options)) (*PutBucketAccelerateConfigurationOutput, error) { if params == nil { params = &PutBucketAccelerateConfigurationInput{} @@ -66,14 +81,17 @@ type PutBucketAccelerateConfigurationInput struct { // This member is required. Bucket *string - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding + // Indicates the algorithm used to create the checksum for the request when you + // use the SDK. This header will not provide any additional functionality if you + // don't use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm // The account ID of the expected bucket owner. If the account ID that you provide @@ -85,6 +103,7 @@ type PutBucketAccelerateConfigurationInput struct { } func (in *PutBucketAccelerateConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -139,6 +158,9 @@ func (c *Client) addOperationPutBucketAccelerateConfigurationMiddlewares(stack * if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -154,6 +176,21 @@ func (c *Client) addOperationPutBucketAccelerateConfigurationMiddlewares(stack * if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketAccelerateConfigurationValidationMiddleware(stack); err != nil { return err } @@ -190,6 +227,18 @@ func (c *Client) addOperationPutBucketAccelerateConfigurationMiddlewares(stack * if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -219,9 +268,10 @@ func getPutBucketAccelerateConfigurationRequestAlgorithmMember(input interface{} } func addPutBucketAccelerateConfigurationInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutBucketAccelerateConfigurationRequestAlgorithmMember, RequireChecksum: false, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAcl.go index f88bb4af2..a7d37a86b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAcl.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAcl.go @@ -15,89 +15,159 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Sets the permissions on -// an existing bucket using access control lists (ACL). For more information, see -// Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) -// . To set the ACL of a bucket, you must have the WRITE_ACP permission. You can -// use one of the following two ways to set a bucket's permissions: +// This operation is not supported for directory buckets. +// +// Sets the permissions on an existing bucket using access control lists (ACL). +// For more information, see [Using ACLs]. To set the ACL of a bucket, you must have the +// WRITE_ACP permission. +// +// You can use one of the following two ways to set a bucket's permissions: +// // - Specify the ACL in the request body +// // - Specify permissions using request headers // // You cannot specify access permission using both the body and the request -// headers. Depending on your application needs, you may choose to set the ACL on a -// bucket using either the request body or the headers. For example, if you have an +// headers. +// +// Depending on your application needs, you may choose to set the ACL on a bucket +// using either the request body or the headers. For example, if you have an // existing application that updates a bucket ACL using the request body, then you -// can continue to use that approach. If your bucket uses the bucket owner enforced -// setting for S3 Object Ownership, ACLs are disabled and no longer affect -// permissions. You must use policies to grant access to your bucket and the -// objects in it. Requests to set ACLs or update ACLs fail and return the -// AccessControlListNotSupported error code. Requests to read ACLs are still -// supported. For more information, see Controlling object ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) -// in the Amazon S3 User Guide. Permissions You can set access permissions by using -// one of the following methods: +// can continue to use that approach. +// +// If your bucket uses the bucket owner enforced setting for S3 Object Ownership, +// ACLs are disabled and no longer affect permissions. You must use policies to +// grant access to your bucket and the objects in it. Requests to set ACLs or +// update ACLs fail and return the AccessControlListNotSupported error code. +// Requests to read ACLs are still supported. For more information, see [Controlling object ownership]in the +// Amazon S3 User Guide. +// +// Permissions You can set access permissions by using one of the following +// methods: +// // - Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports a // set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined // set of grantees and permissions. Specify the canned ACL name as the value of // x-amz-acl . If you use this header, you cannot use other access -// control-specific headers in your request. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) -// . +// control-specific headers in your request. For more information, see [Canned ACL]. +// // - Specify access permissions explicitly with the x-amz-grant-read , // x-amz-grant-read-acp , x-amz-grant-write-acp , and x-amz-grant-full-control // headers. When using these headers, you specify explicit access permissions and // grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the // permission. If you use these ACL-specific headers, you cannot use the // x-amz-acl header to set a canned ACL. These parameters map to the set of -// permissions that Amazon S3 supports in an ACL. For more information, see -// Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) -// . You specify each grantee as a type=value pair, where the type is one of the -// following: -// - id – if the value specified is the canonical user ID of an Amazon Web -// Services account -// - uri – if you are granting permissions to a predefined group -// - emailAddress – if the value specified is the email address of an Amazon Web -// Services account Using email addresses to specify a grantee is only supported in -// the following Amazon Web Services Regions: -// - US East (N. Virginia) -// - US West (N. California) -// - US West (Oregon) -// - Asia Pacific (Singapore) -// - Asia Pacific (Sydney) -// - Asia Pacific (Tokyo) -// - Europe (Ireland) -// - South America (São Paulo) For a list of all the Amazon S3 supported Regions -// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) -// in the Amazon Web Services General Reference. For example, the following -// x-amz-grant-write header grants create, overwrite, and delete objects -// permission to LogDelivery group predefined by Amazon S3 and two Amazon Web -// Services accounts identified by their email addresses. x-amz-grant-write: -// uri="http://acs.amazonaws.com/groups/s3/LogDelivery", id="111122223333", -// id="555566667777" +// permissions that Amazon S3 supports in an ACL. For more information, see [Access Control List (ACL) Overview]. +// +// You specify each grantee as a type=value pair, where the type is one of the +// +// following: +// +// - id – if the value specified is the canonical user ID of an Amazon Web +// Services account +// +// - uri – if you are granting permissions to a predefined group +// +// - emailAddress – if the value specified is the email address of an Amazon Web +// Services account +// +// Using email addresses to specify a grantee is only supported in the following +// +// Amazon Web Services Regions: +// +// - US East (N. Virginia) +// +// - US West (N. California) +// +// - US West (Oregon) +// +// - Asia Pacific (Singapore) +// +// - Asia Pacific (Sydney) +// +// - Asia Pacific (Tokyo) +// +// - Europe (Ireland) +// +// - South America (São Paulo) +// +// For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the +// +// Amazon Web Services General Reference. +// +// For example, the following x-amz-grant-write header grants create, overwrite, +// +// and delete objects permission to LogDelivery group predefined by Amazon S3 and +// two Amazon Web Services accounts identified by their email addresses. +// +// x-amz-grant-write: uri="http://acs.amazonaws.com/groups/s3/LogDelivery", +// +// id="111122223333", id="555566667777" // // You can use either a canned ACL or specify access permissions explicitly. You -// cannot do both. Grantee Values You can specify the person (grantee) to whom -// you're assigning access rights (using request elements) in the following ways: -// - By the person's ID: <>ID<><>GranteesEmail<> DisplayName is optional and -// ignored in the request -// - By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> -// - By Email address: <>Grantees@email.com<>& The grantee is resolved to the -// CanonicalUser and, in a response to a GET Object acl request, appears as the -// CanonicalUser. Using email addresses to specify a grantee is only supported in -// the following Amazon Web Services Regions: -// - US East (N. Virginia) -// - US West (N. California) -// - US West (Oregon) -// - Asia Pacific (Singapore) -// - Asia Pacific (Sydney) -// - Asia Pacific (Tokyo) -// - Europe (Ireland) -// - South America (São Paulo) For a list of all the Amazon S3 supported Regions -// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) -// in the Amazon Web Services General Reference. +// cannot do both. +// +// Grantee Values You can specify the person (grantee) to whom you're assigning +// access rights (using request elements) in the following ways: +// +// - By the person's ID: +// +// <>ID<><>GranteesEmail<> +// +// DisplayName is optional and ignored in the request +// +// - By URI: +// +// <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// +// - By Email address: +// +// <>Grantees@email.com<>& +// +// The grantee is resolved to the CanonicalUser and, in a response to a GET Object +// +// acl request, appears as the CanonicalUser. +// +// Using email addresses to specify a grantee is only supported in the following +// +// Amazon Web Services Regions: +// +// - US East (N. Virginia) +// +// - US West (N. California) +// +// - US West (Oregon) +// +// - Asia Pacific (Singapore) +// +// - Asia Pacific (Sydney) +// +// - Asia Pacific (Tokyo) +// +// - Europe (Ireland) +// +// - South America (São Paulo) +// +// For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the +// +// Amazon Web Services General Reference. // // The following operations are related to PutBucketAcl : -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) -// - GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) +// +// [CreateBucket] +// +// [DeleteBucket] +// +// [GetObjectAcl] +// +// [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region +// [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html +// [Controlling object ownership]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html +// [DeleteBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html +// [Using ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html +// [Canned ACL]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL +// [GetObjectAcl]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html func (c *Client) PutBucketAcl(ctx context.Context, params *PutBucketAclInput, optFns ...func(*Options)) (*PutBucketAclOutput, error) { if params == nil { params = &PutBucketAclInput{} @@ -126,21 +196,27 @@ type PutBucketAclInput struct { // Contains the elements that set the ACL permissions for an object per grantee. AccessControlPolicy *types.AccessControlPolicy - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding + // Indicates the algorithm used to create the checksum for the request when you + // use the SDK. This header will not provide any additional functionality if you + // don't use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The base64-encoded 128-bit MD5 digest of the data. This header must be used as + // The Base64 encoded 128-bit MD5 digest of the data. This header must be used as // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, go to RFC 1864. (http://www.ietf.org/rfc/rfc1864.txt) + // transit. For more information, go to [RFC 1864.] + // // For requests made using the Amazon Web Services Command Line Interface (CLI) or // Amazon Web Services SDKs, this field is calculated automatically. + // + // [RFC 1864.]: http://www.ietf.org/rfc/rfc1864.txt ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -158,9 +234,10 @@ type PutBucketAclInput struct { // Allows grantee to read the bucket ACL. GrantReadACP *string - // Allows grantee to create new objects in the bucket. For the bucket and object - // owners of existing objects, also allows deletions and overwrites of those - // objects. + // Allows grantee to create new objects in the bucket. + // + // For the bucket and object owners of existing objects, also allows deletions and + // overwrites of those objects. GrantWrite *string // Allows grantee to write the ACL for the applicable bucket. @@ -170,6 +247,7 @@ type PutBucketAclInput struct { } func (in *PutBucketAclInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -224,6 +302,9 @@ func (c *Client) addOperationPutBucketAclMiddlewares(stack *middleware.Stack, op if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -239,6 +320,21 @@ func (c *Client) addOperationPutBucketAclMiddlewares(stack *middleware.Stack, op if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketAclValidationMiddleware(stack); err != nil { return err } @@ -278,6 +374,18 @@ func (c *Client) addOperationPutBucketAclMiddlewares(stack *middleware.Stack, op if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -307,9 +415,10 @@ func getPutBucketAclRequestAlgorithmMember(input interface{}) (string, bool) { } func addPutBucketAclInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutBucketAclRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAnalyticsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAnalyticsConfiguration.go index 0604fb930..8e13fbcda 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAnalyticsConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAnalyticsConfiguration.go @@ -14,45 +14,67 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Sets an analytics -// configuration for the bucket (specified by the analytics configuration ID). You -// can have up to 1,000 analytics configurations per bucket. You can choose to have -// storage class analysis export analysis reports sent to a comma-separated values -// (CSV) flat file. See the DataExport request element. Reports are updated daily -// and are based on the object filters that you configure. When selecting data -// export, you specify a destination bucket and an optional destination prefix -// where the file is written. You can export the data to a destination bucket in a -// different account. However, the destination bucket must be in the same Region as -// the bucket that you are making the PUT analytics configuration to. For more -// information, see Amazon S3 Analytics – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) -// . You must create a bucket policy on the destination bucket where the exported +// This operation is not supported for directory buckets. +// +// Sets an analytics configuration for the bucket (specified by the analytics +// configuration ID). You can have up to 1,000 analytics configurations per bucket. +// +// You can choose to have storage class analysis export analysis reports sent to a +// comma-separated values (CSV) flat file. See the DataExport request element. +// Reports are updated daily and are based on the object filters that you +// configure. When selecting data export, you specify a destination bucket and an +// optional destination prefix where the file is written. You can export the data +// to a destination bucket in a different account. However, the destination bucket +// must be in the same Region as the bucket that you are making the PUT analytics +// configuration to. For more information, see [Amazon S3 Analytics – Storage Class Analysis]. +// +// You must create a bucket policy on the destination bucket where the exported // file is written to grant permissions to Amazon S3 to write objects to the -// bucket. For an example policy, see Granting Permissions for Amazon S3 Inventory -// and Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9) -// . To use this operation, you must have permissions to perform the +// bucket. For an example policy, see [Granting Permissions for Amazon S3 Inventory and Storage Class Analysis]. +// +// To use this operation, you must have permissions to perform the // s3:PutAnalyticsConfiguration action. The bucket owner has this permission by // default. The bucket owner can grant this permission to others. For more -// information about permissions, see Permissions Related to Bucket Subresource -// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . PutBucketAnalyticsConfiguration has the following special errors: +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// PutBucketAnalyticsConfiguration has the following special errors: +// // - HTTP Error: HTTP 400 Bad Request +// // - Code: InvalidArgument +// // - Cause: Invalid argument. +// // - HTTP Error: HTTP 400 Bad Request +// // - Code: TooManyConfigurations +// // - Cause: You are attempting to create a new configuration but have already // reached the 1,000-configuration limit. +// // - HTTP Error: HTTP 403 Forbidden +// // - Code: AccessDenied +// // - Cause: You are not the owner of the specified bucket, or you do not have // the s3:PutAnalyticsConfiguration bucket permission to set the configuration on // the bucket. // // The following operations are related to PutBucketAnalyticsConfiguration : -// - GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) -// - DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) -// - ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) +// +// [GetBucketAnalyticsConfiguration] +// +// [DeleteBucketAnalyticsConfiguration] +// +// [ListBucketAnalyticsConfigurations] +// +// [Amazon S3 Analytics – Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html +// [Granting Permissions for Amazon S3 Inventory and Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9 +// [DeleteBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [GetBucketAnalyticsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html +// [ListBucketAnalyticsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html func (c *Client) PutBucketAnalyticsConfiguration(ctx context.Context, params *PutBucketAnalyticsConfigurationInput, optFns ...func(*Options)) (*PutBucketAnalyticsConfigurationOutput, error) { if params == nil { params = &PutBucketAnalyticsConfigurationInput{} @@ -94,6 +116,7 @@ type PutBucketAnalyticsConfigurationInput struct { } func (in *PutBucketAnalyticsConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -148,6 +171,9 @@ func (c *Client) addOperationPutBucketAnalyticsConfigurationMiddlewares(stack *m if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -163,6 +189,18 @@ func (c *Client) addOperationPutBucketAnalyticsConfigurationMiddlewares(stack *m if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketAnalyticsConfigurationValidationMiddleware(stack); err != nil { return err } @@ -196,6 +234,18 @@ func (c *Client) addOperationPutBucketAnalyticsConfigurationMiddlewares(stack *m if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketCors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketCors.go index 3e6604ef6..653a1598a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketCors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketCors.go @@ -15,35 +15,54 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Sets the cors -// configuration for your bucket. If the configuration exists, Amazon S3 replaces -// it. To use this operation, you must be allowed to perform the s3:PutBucketCORS +// This operation is not supported for directory buckets. +// +// Sets the cors configuration for your bucket. If the configuration exists, +// Amazon S3 replaces it. +// +// To use this operation, you must be allowed to perform the s3:PutBucketCORS // action. By default, the bucket owner has this permission and can grant it to -// others. You set this configuration on a bucket so that the bucket can service +// others. +// +// You set this configuration on a bucket so that the bucket can service // cross-origin requests. For example, you might want to enable a request whose // origin is http://www.example.com to access your Amazon S3 bucket at -// my.example.bucket.com by using the browser's XMLHttpRequest capability. To -// enable cross-origin resource sharing (CORS) on a bucket, you add the cors +// my.example.bucket.com by using the browser's XMLHttpRequest capability. +// +// To enable cross-origin resource sharing (CORS) on a bucket, you add the cors // subresource to the bucket. The cors subresource is an XML document in which you // configure rules that identify origins and the HTTP methods that can be executed -// on your bucket. The document is limited to 64 KB in size. When Amazon S3 -// receives a cross-origin request (or a pre-flight OPTIONS request) against a -// bucket, it evaluates the cors configuration on the bucket and uses the first -// CORSRule rule that matches the incoming browser request to enable a cross-origin -// request. For a rule to match, the following conditions must be met: +// on your bucket. The document is limited to 64 KB in size. +// +// When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS +// request) against a bucket, it evaluates the cors configuration on the bucket +// and uses the first CORSRule rule that matches the incoming browser request to +// enable a cross-origin request. For a rule to match, the following conditions +// must be met: +// // - The request's Origin header must match AllowedOrigin elements. +// // - The request method (for example, GET, PUT, HEAD, and so on) or the // Access-Control-Request-Method header in case of a pre-flight OPTIONS request // must be one of the AllowedMethod elements. +// // - Every header specified in the Access-Control-Request-Headers request header // of a pre-flight request must match an AllowedHeader element. // -// For more information about CORS, go to Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) -// in the Amazon S3 User Guide. The following operations are related to -// PutBucketCors : -// - GetBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketCors.html) -// - DeleteBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html) -// - RESTOPTIONSobject (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html) +// For more information about CORS, go to [Enabling Cross-Origin Resource Sharing] in the Amazon S3 User Guide. +// +// The following operations are related to PutBucketCors : +// +// [GetBucketCors] +// +// [DeleteBucketCors] +// +// [RESTOPTIONSobject] +// +// [GetBucketCors]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketCors.html +// [Enabling Cross-Origin Resource Sharing]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html +// [RESTOPTIONSobject]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html +// [DeleteBucketCors]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html func (c *Client) PutBucketCors(ctx context.Context, params *PutBucketCorsInput, optFns ...func(*Options)) (*PutBucketCorsOutput, error) { if params == nil { params = &PutBucketCorsInput{} @@ -67,27 +86,34 @@ type PutBucketCorsInput struct { Bucket *string // Describes the cross-origin access configuration for objects in an Amazon S3 - // bucket. For more information, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) - // in the Amazon S3 User Guide. + // bucket. For more information, see [Enabling Cross-Origin Resource Sharing]in the Amazon S3 User Guide. + // + // [Enabling Cross-Origin Resource Sharing]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html // // This member is required. CORSConfiguration *types.CORSConfiguration - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding + // Indicates the algorithm used to create the checksum for the request when you + // use the SDK. This header will not provide any additional functionality if you + // don't use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The base64-encoded 128-bit MD5 digest of the data. This header must be used as + // The Base64 encoded 128-bit MD5 digest of the data. This header must be used as // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, go to RFC 1864. (http://www.ietf.org/rfc/rfc1864.txt) + // transit. For more information, go to [RFC 1864.] + // // For requests made using the Amazon Web Services Command Line Interface (CLI) or // Amazon Web Services SDKs, this field is calculated automatically. + // + // [RFC 1864.]: http://www.ietf.org/rfc/rfc1864.txt ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -99,6 +125,7 @@ type PutBucketCorsInput struct { } func (in *PutBucketCorsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -153,6 +180,9 @@ func (c *Client) addOperationPutBucketCorsMiddlewares(stack *middleware.Stack, o if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -168,6 +198,21 @@ func (c *Client) addOperationPutBucketCorsMiddlewares(stack *middleware.Stack, o if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketCorsValidationMiddleware(stack); err != nil { return err } @@ -207,6 +252,18 @@ func (c *Client) addOperationPutBucketCorsMiddlewares(stack *middleware.Stack, o if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -236,9 +293,10 @@ func getPutBucketCorsRequestAlgorithmMember(input interface{}) (string, bool) { } func addPutBucketCorsInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutBucketCorsRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go index dfc71dc5c..87d25702c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go @@ -15,30 +15,116 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. This action uses the -// encryption subresource to configure default encryption and Amazon S3 Bucket Keys -// for an existing bucket. By default, all buckets have a default encryption -// configuration that uses server-side encryption with Amazon S3 managed keys -// (SSE-S3). You can optionally configure default encryption for a bucket by using -// server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or -// dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). -// If you specify default encryption by using SSE-KMS, you can also configure -// Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) -// . If you use PutBucketEncryption to set your default bucket encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) -// to SSE-KMS, you should verify that your KMS key ID is correct. Amazon S3 does -// not validate the KMS key ID provided in PutBucketEncryption requests. This -// action requires Amazon Web Services Signature Version 4. For more information, -// see Authenticating Requests (Amazon Web Services Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) -// . To use this operation, you must have permission to perform the -// s3:PutEncryptionConfiguration action. The bucket owner has this permission by -// default. The bucket owner can grant this permission to others. For more -// information about permissions, see Permissions Related to Bucket Subresource -// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// in the Amazon S3 User Guide. The following operations are related to -// PutBucketEncryption : -// - GetBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html) -// - DeleteBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html) +// This operation configures default encryption and Amazon S3 Bucket Keys for an +// existing bucket. +// +// Directory buckets - For directory buckets, you must make requests for this API +// operation to the Regional endpoint. These endpoints support path-style requests +// in the format https://s3express-control.region-code.amazonaws.com/bucket-name . +// Virtual-hosted-style requests aren't supported. For more information about +// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more +// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// By default, all buckets have a default encryption configuration that uses +// server-side encryption with Amazon S3 managed keys (SSE-S3). +// +// - General purpose buckets +// +// - You can optionally configure default encryption for a bucket by using +// server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or +// dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). +// If you specify default encryption by using SSE-KMS, you can also configure [Amazon S3 Bucket Keys]. +// For information about the bucket default encryption feature, see [Amazon S3 Bucket Default Encryption]in the +// Amazon S3 User Guide. +// +// - If you use PutBucketEncryption to set your [default bucket encryption]to SSE-KMS, you should verify +// that your KMS key ID is correct. Amazon S3 doesn't validate the KMS key ID +// provided in PutBucketEncryption requests. +// +// - Directory buckets - You can optionally configure default encryption for a +// bucket by using server-side encryption with Key Management Service (KMS) keys +// (SSE-KMS). +// +// - We recommend that the bucket's default encryption uses the desired +// encryption configuration and you don't override the bucket default encryption in +// your CreateSession requests or PUT object requests. Then, new objects are +// automatically encrypted with the desired encryption settings. For more +// information about the encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads] +// . +// +// - Your SSE-KMS configuration can only support 1 [customer managed key]per directory bucket's +// lifetime. The [Amazon Web Services managed key]( aws/s3 ) isn't supported. +// +// - S3 Bucket Keys are always enabled for GET and PUT operations in a directory +// bucket and can’t be disabled. S3 Bucket Keys aren't supported, when you copy +// SSE-KMS encrypted objects from general purpose buckets to directory buckets, +// from directory buckets to general purpose buckets, or between directory buckets, +// through [CopyObject], [UploadPartCopy], [the Copy operation in Batch Operations], or [the import jobs]. In this case, Amazon S3 makes a call to KMS every time a +// copy request is made for a KMS-encrypted object. +// +// - When you specify an [KMS customer managed key]for encryption in your directory bucket, only use the +// key ID or key ARN. The key alias format of the KMS key isn't supported. +// +// - For directory buckets, if you use PutBucketEncryption to set your [default bucket encryption]to +// SSE-KMS, Amazon S3 validates the KMS key ID provided in PutBucketEncryption +// requests. +// +// If you're specifying a customer managed KMS key, we recommend using a fully +// qualified KMS key ARN. If you use a KMS key alias instead, then KMS resolves the +// key within the requester’s account. This behavior can result in data that's +// encrypted with a KMS key that belongs to the requester, and not the bucket +// owner. +// +// Also, this action requires Amazon Web Services Signature Version 4. For more +// information, see [Authenticating Requests (Amazon Web Services Signature Version 4)]. +// +// Permissions +// +// - General purpose bucket permissions - The s3:PutEncryptionConfiguration +// permission is required in a policy. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see [Permissions Related to Bucket Operations]and [Managing Access Permissions to Your Amazon S3 Resources]in the Amazon S3 User Guide. +// +// - Directory bucket permissions - To grant access to this API operation, you +// must have the s3express:PutEncryptionConfiguration permission in an IAM +// identity-based policy instead of a bucket policy. Cross-account access to this +// API operation isn't supported. This operation can only be performed by the +// Amazon Web Services account that owns the resource. For more information about +// directory bucket policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide. +// +// To set a directory bucket default encryption with SSE-KMS, you must also have +// +// the kms:GenerateDataKey and the kms:Decrypt permissions in IAM identity-based +// policies and KMS key policies for the target KMS key. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region-code.amazonaws.com . +// +// The following operations are related to PutBucketEncryption : +// +// [GetBucketEncryption] +// +// [DeleteBucketEncryption] +// +// [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [KMS customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk +// [Amazon S3 Bucket Default Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html +// [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [Permissions Related to Bucket Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html +// [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk +// [Authenticating Requests (Amazon Web Services Signature Version 4)]: https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html +// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html +// [Amazon S3 Bucket Keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html +// [GetBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html +// [DeleteBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html +// [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk +// [default bucket encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html +// [the import jobs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-import-job +// [the Copy operation in Batch Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-Batch-Ops +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html func (c *Client) PutBucketEncryption(ctx context.Context, params *PutBucketEncryptionInput, optFns ...func(*Options)) (*PutBucketEncryptionOutput, error) { if params == nil { params = &PutBucketEncryptionInput{} @@ -57,13 +143,18 @@ func (c *Client) PutBucketEncryption(ctx context.Context, params *PutBucketEncry type PutBucketEncryptionInput struct { // Specifies default encryption for a bucket using server-side encryption with - // different key options. By default, all buckets have a default encryption - // configuration that uses server-side encryption with Amazon S3 managed keys - // (SSE-S3). You can optionally configure default encryption for a bucket by using - // server-side encryption with an Amazon Web Services KMS key (SSE-KMS) or a - // customer-provided key (SSE-C). For information about the bucket default - // encryption feature, see Amazon S3 Bucket Default Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) - // in the Amazon S3 User Guide. + // different key options. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use path-style requests in the format + // https://s3express-control.region-code.amazonaws.com/bucket-name . + // Virtual-hosted-style requests aren't supported. Directory bucket names must be + // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must + // also follow the format bucket-base-name--zone-id--x-s3 (for example, + // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html // // This member is required. Bucket *string @@ -73,31 +164,45 @@ type PutBucketEncryptionInput struct { // This member is required. ServerSideEncryptionConfiguration *types.ServerSideEncryptionConfiguration - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding + // Indicates the algorithm used to create the checksum for the request when you + // use the SDK. This header will not provide any additional functionality if you + // don't use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the + // default checksum algorithm that's used for performance. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The base64-encoded 128-bit MD5 digest of the server-side encryption - // configuration. For requests made using the Amazon Web Services Command Line - // Interface (CLI) or Amazon Web Services SDKs, this field is calculated - // automatically. + // The Base64 encoded 128-bit MD5 digest of the server-side encryption + // configuration. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. + // + // This functionality is not supported for directory buckets. ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide // does not match the actual owner of the bucket, the request fails with the HTTP // status code 403 Forbidden (access denied). + // + // For directory buckets, this header is not supported in this API operation. If + // you specify this header, the request fails with the HTTP status code 501 Not + // Implemented . ExpectedBucketOwner *string noSmithyDocumentSerde } func (in *PutBucketEncryptionInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -152,6 +257,9 @@ func (c *Client) addOperationPutBucketEncryptionMiddlewares(stack *middleware.St if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -167,6 +275,21 @@ func (c *Client) addOperationPutBucketEncryptionMiddlewares(stack *middleware.St if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketEncryptionValidationMiddleware(stack); err != nil { return err } @@ -206,6 +329,18 @@ func (c *Client) addOperationPutBucketEncryptionMiddlewares(stack *middleware.St if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -235,9 +370,10 @@ func getPutBucketEncryptionRequestAlgorithmMember(input interface{}) (string, bo } func addPutBucketEncryptionInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutBucketEncryptionRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go index 61d73da56..c032b4039 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go @@ -14,37 +14,58 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Puts a S3 -// Intelligent-Tiering configuration to the specified bucket. You can have up to -// 1,000 S3 Intelligent-Tiering configurations per bucket. The S3 -// Intelligent-Tiering storage class is designed to optimize storage costs by -// automatically moving data to the most cost-effective storage access tier, +// This operation is not supported for directory buckets. +// +// Puts a S3 Intelligent-Tiering configuration to the specified bucket. You can +// have up to 1,000 S3 Intelligent-Tiering configurations per bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage costs +// by automatically moving data to the most cost-effective storage access tier, // without performance impact or operational overhead. S3 Intelligent-Tiering // delivers automatic cost savings in three low latency and high throughput access // tiers. To get the lowest storage cost on data that can be accessed in minutes to -// hours, you can choose to activate additional archiving capabilities. The S3 -// Intelligent-Tiering storage class is the ideal storage class for data with -// unknown, changing, or unpredictable access patterns, independent of object size -// or retention period. If the size of an object is less than 128 KB, it is not -// monitored and not eligible for auto-tiering. Smaller objects can be stored, but -// they are always charged at the Frequent Access tier rates in the S3 -// Intelligent-Tiering storage class. For more information, see Storage class for -// automatically optimizing frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) -// . Operations related to PutBucketIntelligentTieringConfiguration include: -// - DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) -// - GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) -// - ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) +// hours, you can choose to activate additional archiving capabilities. +// +// The S3 Intelligent-Tiering storage class is the ideal storage class for data +// with unknown, changing, or unpredictable access patterns, independent of object +// size or retention period. If the size of an object is less than 128 KB, it is +// not monitored and not eligible for auto-tiering. Smaller objects can be stored, +// but they are always charged at the Frequent Access tier rates in the S3 +// Intelligent-Tiering storage class. +// +// For more information, see [Storage class for automatically optimizing frequently and infrequently accessed objects]. +// +// Operations related to PutBucketIntelligentTieringConfiguration include: +// +// [DeleteBucketIntelligentTieringConfiguration] +// +// [GetBucketIntelligentTieringConfiguration] +// +// [ListBucketIntelligentTieringConfigurations] // // You only need S3 Intelligent-Tiering enabled on a bucket if you want to // automatically move objects stored in the S3 Intelligent-Tiering storage class to // the Archive Access or Deep Archive Access tier. -// PutBucketIntelligentTieringConfiguration has the following special errors: HTTP -// 400 Bad Request Error Code: InvalidArgument Cause: Invalid Argument HTTP 400 Bad -// Request Error Code: TooManyConfigurations Cause: You are attempting to create a -// new configuration but have already reached the 1,000-configuration limit. HTTP -// 403 Forbidden Error Cause: You are not the owner of the specified bucket, or you -// do not have the s3:PutIntelligentTieringConfiguration bucket permission to set -// the configuration on the bucket. +// +// PutBucketIntelligentTieringConfiguration has the following special errors: +// +// HTTP 400 Bad Request Error Code: InvalidArgument +// +// Cause: Invalid Argument +// +// HTTP 400 Bad Request Error Code: TooManyConfigurations +// +// Cause: You are attempting to create a new configuration but have already +// reached the 1,000-configuration limit. +// +// HTTP 403 Forbidden Error Cause: You are not the owner of the specified bucket, +// or you do not have the s3:PutIntelligentTieringConfiguration bucket permission +// to set the configuration on the bucket. +// +// [ListBucketIntelligentTieringConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html +// [GetBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html +// [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access +// [DeleteBucketIntelligentTieringConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html func (c *Client) PutBucketIntelligentTieringConfiguration(ctx context.Context, params *PutBucketIntelligentTieringConfigurationInput, optFns ...func(*Options)) (*PutBucketIntelligentTieringConfigurationOutput, error) { if params == nil { params = &PutBucketIntelligentTieringConfigurationInput{} @@ -82,6 +103,7 @@ type PutBucketIntelligentTieringConfigurationInput struct { } func (in *PutBucketIntelligentTieringConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -136,6 +158,9 @@ func (c *Client) addOperationPutBucketIntelligentTieringConfigurationMiddlewares if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -151,6 +176,18 @@ func (c *Client) addOperationPutBucketIntelligentTieringConfigurationMiddlewares if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketIntelligentTieringConfigurationValidationMiddleware(stack); err != nil { return err } @@ -184,6 +221,18 @@ func (c *Client) addOperationPutBucketIntelligentTieringConfigurationMiddlewares if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketInventoryConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketInventoryConfiguration.go index 03d79a0d8..773ba76e5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketInventoryConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketInventoryConfiguration.go @@ -14,48 +14,76 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. This implementation of -// the PUT action adds an inventory configuration (identified by the inventory ID) -// to the bucket. You can have up to 1,000 inventory configurations per bucket. +// This operation is not supported for directory buckets. +// +// This implementation of the PUT action adds an inventory configuration +// (identified by the inventory ID) to the bucket. You can have up to 1,000 +// inventory configurations per bucket. +// // Amazon S3 inventory generates inventories of the objects in the bucket on a // daily or weekly basis, and the results are published to a flat file. The bucket // that is inventoried is called the source bucket, and the bucket where the // inventory flat file is stored is called the destination bucket. The destination -// bucket must be in the same Amazon Web Services Region as the source bucket. When -// you configure an inventory for a source bucket, you specify the destination -// bucket where you want the inventory to be stored, and whether to generate the -// inventory daily or weekly. You can also configure what object metadata to -// include and whether to inventory all object versions or only current versions. -// For more information, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) -// in the Amazon S3 User Guide. You must create a bucket policy on the destination -// bucket to grant permissions to Amazon S3 to write objects to the bucket in the -// defined location. For an example policy, see Granting Permissions for Amazon S3 -// Inventory and Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9) -// . Permissions To use this operation, you must have permission to perform the +// bucket must be in the same Amazon Web Services Region as the source bucket. +// +// When you configure an inventory for a source bucket, you specify the +// destination bucket where you want the inventory to be stored, and whether to +// generate the inventory daily or weekly. You can also configure what object +// metadata to include and whether to inventory all object versions or only current +// versions. For more information, see [Amazon S3 Inventory]in the Amazon S3 User Guide. +// +// You must create a bucket policy on the destination bucket to grant permissions +// to Amazon S3 to write objects to the bucket in the defined location. For an +// example policy, see [Granting Permissions for Amazon S3 Inventory and Storage Class Analysis]. +// +// Permissions To use this operation, you must have permission to perform the // s3:PutInventoryConfiguration action. The bucket owner has this permission by -// default and can grant this permission to others. The -// s3:PutInventoryConfiguration permission allows a user to create an S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-inventory.html) -// report that includes all object metadata fields available and to specify the +// default and can grant this permission to others. +// +// The s3:PutInventoryConfiguration permission allows a user to create an [S3 Inventory] report +// that includes all object metadata fields available and to specify the // destination bucket to store the inventory. A user with read access to objects in // the destination bucket can also access all object metadata fields that are -// available in the inventory report. To restrict access to an inventory report, -// see Restricting access to an Amazon S3 Inventory report (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html#example-bucket-policies-use-case-10) -// in the Amazon S3 User Guide. For more information about the metadata fields -// available in S3 Inventory, see Amazon S3 Inventory lists (https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-inventory.html#storage-inventory-contents) -// in the Amazon S3 User Guide. For more information about permissions, see -// Permissions related to bucket subresource operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Identity and access management in Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// in the Amazon S3 User Guide. PutBucketInventoryConfiguration has the following -// special errors: HTTP 400 Bad Request Error Code: InvalidArgument Cause: Invalid -// Argument HTTP 400 Bad Request Error Code: TooManyConfigurations Cause: You are -// attempting to create a new configuration but have already reached the -// 1,000-configuration limit. HTTP 403 Forbidden Error Cause: You are not the owner -// of the specified bucket, or you do not have the s3:PutInventoryConfiguration -// bucket permission to set the configuration on the bucket. The following -// operations are related to PutBucketInventoryConfiguration : -// - GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) -// - DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) -// - ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) +// available in the inventory report. +// +// To restrict access to an inventory report, see [Restricting access to an Amazon S3 Inventory report] in the Amazon S3 User Guide. +// For more information about the metadata fields available in S3 Inventory, see [Amazon S3 Inventory lists] +// in the Amazon S3 User Guide. For more information about permissions, see [Permissions related to bucket subresource operations]and [Identity and access management in Amazon S3] +// in the Amazon S3 User Guide. +// +// PutBucketInventoryConfiguration has the following special errors: +// +// HTTP 400 Bad Request Error Code: InvalidArgument +// +// Cause: Invalid Argument +// +// HTTP 400 Bad Request Error Code: TooManyConfigurations +// +// Cause: You are attempting to create a new configuration but have already +// reached the 1,000-configuration limit. +// +// HTTP 403 Forbidden Error Cause: You are not the owner of the specified bucket, +// or you do not have the s3:PutInventoryConfiguration bucket permission to set +// the configuration on the bucket. +// +// The following operations are related to PutBucketInventoryConfiguration : +// +// [GetBucketInventoryConfiguration] +// +// [DeleteBucketInventoryConfiguration] +// +// [ListBucketInventoryConfigurations] +// +// [Granting Permissions for Amazon S3 Inventory and Storage Class Analysis]: https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9 +// [Amazon S3 Inventory]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html +// [ListBucketInventoryConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html +// [S3 Inventory]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-inventory.html +// [Permissions related to bucket subresource operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [DeleteBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html +// [Identity and access management in Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [Restricting access to an Amazon S3 Inventory report]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html#example-bucket-policies-use-case-10 +// [Amazon S3 Inventory lists]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-inventory.html#storage-inventory-contents +// [GetBucketInventoryConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html func (c *Client) PutBucketInventoryConfiguration(ctx context.Context, params *PutBucketInventoryConfigurationInput, optFns ...func(*Options)) (*PutBucketInventoryConfigurationOutput, error) { if params == nil { params = &PutBucketInventoryConfigurationInput{} @@ -97,6 +125,7 @@ type PutBucketInventoryConfigurationInput struct { } func (in *PutBucketInventoryConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -151,6 +180,9 @@ func (c *Client) addOperationPutBucketInventoryConfigurationMiddlewares(stack *m if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -166,6 +198,18 @@ func (c *Client) addOperationPutBucketInventoryConfigurationMiddlewares(stack *m if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketInventoryConfigurationValidationMiddleware(stack); err != nil { return err } @@ -199,6 +243,18 @@ func (c *Client) addOperationPutBucketInventoryConfigurationMiddlewares(stack *m if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLifecycleConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLifecycleConfiguration.go index 88096fdd1..567b80773 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLifecycleConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLifecycleConfiguration.go @@ -15,26 +15,43 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Creates a new lifecycle -// configuration for the bucket or replaces an existing lifecycle configuration. -// Keep in mind that this will overwrite an existing lifecycle configuration, so if -// you want to retain any configuration details, they must be included in the new -// lifecycle configuration. For information about lifecycle configuration, see -// Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html) -// . Bucket lifecycle configuration now supports specifying a lifecycle rule using +// Creates a new lifecycle configuration for the bucket or replaces an existing +// lifecycle configuration. Keep in mind that this will overwrite an existing +// lifecycle configuration, so if you want to retain any configuration details, +// they must be included in the new lifecycle configuration. For information about +// lifecycle configuration, see [Managing your storage lifecycle]. +// +// Bucket lifecycle configuration now supports specifying a lifecycle rule using // an object key name prefix, one or more object tags, object size, or any // combination of these. Accordingly, this section describes the latest API. The // previous version of the API supported filtering based only on an object key name // prefix, which is supported for backward compatibility. For the related API -// description, see PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html) -// . Rules You specify the lifecycle configuration in your request body. The -// lifecycle configuration is specified as XML consisting of one or more rules. An -// Amazon S3 Lifecycle configuration can have up to 1,000 rules. This limit is not -// adjustable. Each rule consists of the following: +// description, see [PutBucketLifecycle]. +// +// Rules Permissions HTTP Host header syntax You specify the lifecycle +// configuration in your request body. The lifecycle configuration is specified as +// XML consisting of one or more rules. An Amazon S3 Lifecycle configuration can +// have up to 1,000 rules. This limit is not adjustable. +// +// Bucket lifecycle configuration supports specifying a lifecycle rule using an +// object key name prefix, one or more object tags, object size, or any combination +// of these. Accordingly, this section describes the latest API. The previous +// version of the API supported filtering based only on an object key name prefix, +// which is supported for backward compatibility for general purpose buckets. For +// the related API description, see [PutBucketLifecycle]. +// +// Lifecyle configurations for directory buckets only support expiring objects and +// cancelling multipart uploads. Expiring of versioned objects,transitions and tag +// filters are not supported. +// +// A lifecycle rule consists of the following: +// // - A filter identifying a subset of objects to which the rule applies. The // filter can be based on a key name prefix, object tags, object size, or any // combination of these. +// // - A status indicating whether the rule is in effect. +// // - One or more lifecycle transition and expiration actions that you want // Amazon S3 to perform on the objects identified by the filter. If the state of // your bucket is versioning-enabled or versioning-suspended, you can have many @@ -42,28 +59,69 @@ import ( // versions). Amazon S3 provides predefined actions that you can specify for // current and noncurrent object versions. // -// For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) -// and Lifecycle Configuration Elements (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html) -// . Permissions By default, all Amazon S3 resources are private, including -// buckets, objects, and related subresources (for example, lifecycle configuration -// and website configuration). Only the resource owner (that is, the Amazon Web -// Services account that created it) can access the resource. The resource owner -// can optionally grant access permissions to others by writing an access policy. -// For this operation, a user must get the s3:PutLifecycleConfiguration -// permission. You can also explicitly deny permissions. An explicit deny also -// supersedes any other permissions. If you want to block users or accounts from -// removing or deleting objects from your bucket, you must deny them permissions -// for the following actions: -// - s3:DeleteObject -// - s3:DeleteObjectVersion -// - s3:PutLifecycleConfiguration +// For more information, see [Object Lifecycle Management] and [Lifecycle Configuration Elements]. +// +// - General purpose bucket permissions - By default, all Amazon S3 resources +// are private, including buckets, objects, and related subresources (for example, +// lifecycle configuration and website configuration). Only the resource owner +// (that is, the Amazon Web Services account that created it) can access the +// resource. The resource owner can optionally grant access permissions to others +// by writing an access policy. For this operation, a user must have the +// s3:PutLifecycleConfiguration permission. +// +// You can also explicitly deny permissions. An explicit deny also supersedes any +// +// other permissions. If you want to block users or accounts from removing or +// deleting objects from your bucket, you must deny them permissions for the +// following actions: +// +// - s3:DeleteObject +// +// - s3:DeleteObjectVersion +// +// - s3:PutLifecycleConfiguration +// +// For more information about permissions, see [Managing Access Permissions to Your Amazon S3 Resources]. // -// For more information about permissions, see Managing Access Permissions to Your -// Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . The following operations are related to PutBucketLifecycleConfiguration : -// - Examples of Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-configuration-examples.html) -// - GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) -// - DeleteBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) +// - Directory bucket permissions - You must have the +// s3express:PutLifecycleConfiguration permission in an IAM identity-based policy +// to use this operation. Cross-account access to this API operation isn't +// supported. The resource owner can optionally grant access permissions to others +// by creating a role or user for them as long as they are within the same account +// as the owner and resource. +// +// For more information about directory bucket policies and permissions, see [Authorizing Regional endpoint APIs with IAM]in +// +// the Amazon S3 User Guide. +// +// Directory buckets - For directory buckets, you must make requests for this API +// +// operation to the Regional endpoint. These endpoints support path-style requests +// in the format https://s3express-control.region-code.amazonaws.com/bucket-name +// . Virtual-hosted-style requests aren't supported. For more information about +// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more +// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// Directory buckets - The HTTP Host header syntax is +// s3express-control.region.amazonaws.com . +// +// The following operations are related to PutBucketLifecycleConfiguration : +// +// [GetBucketLifecycleConfiguration] +// +// [DeleteBucketLifecycle] +// +// [Object Lifecycle Management]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html +// [Lifecycle Configuration Elements]: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html +// [GetBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html +// [Authorizing Regional endpoint APIs with IAM]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html +// [PutBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [DeleteBucketLifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html +// [Managing your storage lifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html +// +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html func (c *Client) PutBucketLifecycleConfiguration(ctx context.Context, params *PutBucketLifecycleConfigurationInput, optFns ...func(*Options)) (*PutBucketLifecycleConfigurationOutput, error) { if params == nil { params = &PutBucketLifecycleConfigurationInput{} @@ -86,33 +144,79 @@ type PutBucketLifecycleConfigurationInput struct { // This member is required. Bucket *string - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding + // Indicates the algorithm used to create the checksum for the request when you + // use the SDK. This header will not provide any additional functionality if you + // don't use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm // The account ID of the expected bucket owner. If the account ID that you provide // does not match the actual owner of the bucket, the request fails with the HTTP // status code 403 Forbidden (access denied). + // + // This parameter applies to general purpose buckets only. It is not supported for + // directory bucket lifecycle configurations. ExpectedBucketOwner *string // Container for lifecycle rules. You can add as many as 1,000 rules. LifecycleConfiguration *types.BucketLifecycleConfiguration + // Indicates which default minimum object size behavior is applied to the + // lifecycle configuration. + // + // This parameter applies to general purpose buckets only. It is not supported for + // directory bucket lifecycle configurations. + // + // - all_storage_classes_128K - Objects smaller than 128 KB will not transition + // to any storage class by default. + // + // - varies_by_storage_class - Objects smaller than 128 KB will transition to + // Glacier Flexible Retrieval or Glacier Deep Archive storage classes. By default, + // all other storage classes will prevent transitions smaller than 128 KB. + // + // To customize the minimum object size for any transition you can add a filter + // that specifies a custom ObjectSizeGreaterThan or ObjectSizeLessThan in the body + // of your transition rule. Custom filters always take precedence over the default + // transition behavior. + TransitionDefaultMinimumObjectSize types.TransitionDefaultMinimumObjectSize + noSmithyDocumentSerde } func (in *PutBucketLifecycleConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } type PutBucketLifecycleConfigurationOutput struct { + + // Indicates which default minimum object size behavior is applied to the + // lifecycle configuration. + // + // This parameter applies to general purpose buckets only. It is not supported for + // directory bucket lifecycle configurations. + // + // - all_storage_classes_128K - Objects smaller than 128 KB will not transition + // to any storage class by default. + // + // - varies_by_storage_class - Objects smaller than 128 KB will transition to + // Glacier Flexible Retrieval or Glacier Deep Archive storage classes. By default, + // all other storage classes will prevent transitions smaller than 128 KB. + // + // To customize the minimum object size for any transition you can add a filter + // that specifies a custom ObjectSizeGreaterThan or ObjectSizeLessThan in the body + // of your transition rule. Custom filters always take precedence over the default + // transition behavior. + TransitionDefaultMinimumObjectSize types.TransitionDefaultMinimumObjectSize + // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -162,6 +266,9 @@ func (c *Client) addOperationPutBucketLifecycleConfigurationMiddlewares(stack *m if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -177,6 +284,21 @@ func (c *Client) addOperationPutBucketLifecycleConfigurationMiddlewares(stack *m if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketLifecycleConfigurationValidationMiddleware(stack); err != nil { return err } @@ -216,6 +338,18 @@ func (c *Client) addOperationPutBucketLifecycleConfigurationMiddlewares(stack *m if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -245,9 +379,10 @@ func getPutBucketLifecycleConfigurationRequestAlgorithmMember(input interface{}) } func addPutBucketLifecycleConfigurationInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutBucketLifecycleConfigurationRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLogging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLogging.go index fb80d2ee1..53342aca4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLogging.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLogging.go @@ -15,39 +15,68 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Set the logging -// parameters for a bucket and to specify permissions for who can view and modify -// the logging parameters. All logs are saved to buckets in the same Amazon Web -// Services Region as the source bucket. To set the logging status of a bucket, you -// must be the bucket owner. The bucket owner is automatically granted FULL_CONTROL -// to all logs. You use the Grantee request element to grant access to other -// people. The Permissions request element specifies the kind of access the -// grantee has to the logs. If the target bucket for log delivery uses the bucket -// owner enforced setting for S3 Object Ownership, you can't use the Grantee -// request element to grant access to others. Permissions can only be granted using -// policies. For more information, see Permissions for server access log delivery (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general) -// in the Amazon S3 User Guide. Grantee Values You can specify the person (grantee) -// to whom you're assigning access rights (by using request elements) in the -// following ways: -// - By the person's ID: <>ID<><>GranteesEmail<> DisplayName is optional and -// ignored in the request. -// - By Email address: <>Grantees@email.com<> The grantee is resolved to the -// CanonicalUser and, in a response to a GETObjectAcl request, appears as the -// CanonicalUser. -// - By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// This operation is not supported for directory buckets. +// +// Set the logging parameters for a bucket and to specify permissions for who can +// view and modify the logging parameters. All logs are saved to buckets in the +// same Amazon Web Services Region as the source bucket. To set the logging status +// of a bucket, you must be the bucket owner. +// +// The bucket owner is automatically granted FULL_CONTROL to all logs. You use the +// Grantee request element to grant access to other people. The Permissions +// request element specifies the kind of access the grantee has to the logs. +// +// If the target bucket for log delivery uses the bucket owner enforced setting +// for S3 Object Ownership, you can't use the Grantee request element to grant +// access to others. Permissions can only be granted using policies. For more +// information, see [Permissions for server access log delivery]in the Amazon S3 User Guide. +// +// Grantee Values You can specify the person (grantee) to whom you're assigning +// access rights (by using request elements) in the following ways: +// +// - By the person's ID: +// +// <>ID<><>GranteesEmail<> +// +// DisplayName is optional and ignored in the request. +// +// - By Email address: +// +// <>Grantees@email.com<> +// +// The grantee is resolved to the CanonicalUser and, in a response to a +// +// GETObjectAcl request, appears as the CanonicalUser. +// +// - By URI: +// +// <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> // // To enable logging, you use LoggingEnabled and its children request elements. To -// disable logging, you use an empty BucketLoggingStatus request element: For -// more information about server access logging, see Server Access Logging (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerLogs.html) -// in the Amazon S3 User Guide. For more information about creating a bucket, see -// CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// . For more information about returning the logging status of a bucket, see -// GetBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html) -// . The following operations are related to PutBucketLogging : -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// - GetBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html) +// disable logging, you use an empty BucketLoggingStatus request element: +// +// For more information about server access logging, see [Server Access Logging] in the Amazon S3 User +// Guide. +// +// For more information about creating a bucket, see [CreateBucket]. For more information about +// returning the logging status of a bucket, see [GetBucketLogging]. +// +// The following operations are related to PutBucketLogging : +// +// [PutObject] +// +// [DeleteBucket] +// +// [CreateBucket] +// +// [GetBucketLogging] +// +// [Permissions for server access log delivery]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general +// [DeleteBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html +// [GetBucketLogging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html +// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html +// [Server Access Logging]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerLogs.html func (c *Client) PutBucketLogging(ctx context.Context, params *PutBucketLoggingInput, optFns ...func(*Options)) (*PutBucketLoggingOutput, error) { if params == nil { params = &PutBucketLoggingInput{} @@ -75,19 +104,23 @@ type PutBucketLoggingInput struct { // This member is required. BucketLoggingStatus *types.BucketLoggingStatus - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding + // Indicates the algorithm used to create the checksum for the request when you + // use the SDK. This header will not provide any additional functionality if you + // don't use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The MD5 hash of the PutBucketLogging request body. For requests made using the - // Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, - // this field is calculated automatically. + // The MD5 hash of the PutBucketLogging request body. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -99,6 +132,7 @@ type PutBucketLoggingInput struct { } func (in *PutBucketLoggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -153,6 +187,9 @@ func (c *Client) addOperationPutBucketLoggingMiddlewares(stack *middleware.Stack if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -168,6 +205,21 @@ func (c *Client) addOperationPutBucketLoggingMiddlewares(stack *middleware.Stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketLoggingValidationMiddleware(stack); err != nil { return err } @@ -207,6 +259,18 @@ func (c *Client) addOperationPutBucketLoggingMiddlewares(stack *middleware.Stack if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -236,9 +300,10 @@ func getPutBucketLoggingRequestAlgorithmMember(input interface{}) (string, bool) } func addPutBucketLoggingInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutBucketLoggingRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketMetricsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketMetricsConfiguration.go index bff1452b9..e386b1e9a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketMetricsConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketMetricsConfiguration.go @@ -14,29 +14,44 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Sets a metrics -// configuration (specified by the metrics configuration ID) for the bucket. You -// can have up to 1,000 metrics configurations per bucket. If you're updating an -// existing metrics configuration, note that this is a full replacement of the -// existing metrics configuration. If you don't include the elements you want to -// keep, they are erased. To use this operation, you must have permissions to -// perform the s3:PutMetricsConfiguration action. The bucket owner has this -// permission by default. The bucket owner can grant this permission to others. For -// more information about permissions, see Permissions Related to Bucket -// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . For information about CloudWatch request metrics for Amazon S3, see -// Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) -// . The following operations are related to PutBucketMetricsConfiguration : -// - DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) -// - GetBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) -// - ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) +// This operation is not supported for directory buckets. +// +// Sets a metrics configuration (specified by the metrics configuration ID) for +// the bucket. You can have up to 1,000 metrics configurations per bucket. If +// you're updating an existing metrics configuration, note that this is a full +// replacement of the existing metrics configuration. If you don't include the +// elements you want to keep, they are erased. +// +// To use this operation, you must have permissions to perform the +// s3:PutMetricsConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// For information about CloudWatch request metrics for Amazon S3, see [Monitoring Metrics with Amazon CloudWatch]. +// +// The following operations are related to PutBucketMetricsConfiguration : +// +// [DeleteBucketMetricsConfiguration] +// +// [GetBucketMetricsConfiguration] +// +// [ListBucketMetricsConfigurations] // // PutBucketMetricsConfiguration has the following special error: +// // - Error code: TooManyConfigurations +// // - Description: You are attempting to create a new configuration but have // already reached the 1,000-configuration limit. +// // - HTTP Status Code: HTTP 400 Bad Request +// +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Monitoring Metrics with Amazon CloudWatch]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html +// [GetBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html +// [ListBucketMetricsConfigurations]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html +// [DeleteBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html func (c *Client) PutBucketMetricsConfiguration(ctx context.Context, params *PutBucketMetricsConfigurationInput, optFns ...func(*Options)) (*PutBucketMetricsConfigurationOutput, error) { if params == nil { params = &PutBucketMetricsConfigurationInput{} @@ -79,6 +94,7 @@ type PutBucketMetricsConfigurationInput struct { } func (in *PutBucketMetricsConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -133,6 +149,9 @@ func (c *Client) addOperationPutBucketMetricsConfigurationMiddlewares(stack *mid if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -148,6 +167,18 @@ func (c *Client) addOperationPutBucketMetricsConfigurationMiddlewares(stack *mid if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketMetricsConfigurationValidationMiddleware(stack); err != nil { return err } @@ -181,6 +212,18 @@ func (c *Client) addOperationPutBucketMetricsConfigurationMiddlewares(stack *mid if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketNotificationConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketNotificationConfiguration.go index e937b5c59..9100fb197 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketNotificationConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketNotificationConfiguration.go @@ -14,41 +14,59 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Enables notifications of -// specified events for a bucket. For more information about event notifications, -// see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) -// . Using this API, you can replace an existing notification configuration. The +// This operation is not supported for directory buckets. +// +// Enables notifications of specified events for a bucket. For more information +// about event notifications, see [Configuring Event Notifications]. +// +// Using this API, you can replace an existing notification configuration. The // configuration is an XML file that defines the event types that you want Amazon // S3 to publish and the destination where you want Amazon S3 to publish an event -// notification when it detects an event of the specified type. By default, your -// bucket has no event notifications configured. That is, the notification -// configuration will be an empty NotificationConfiguration . This action -// replaces the existing notification configuration with the configuration you -// include in the request body. After Amazon S3 receives this request, it first -// verifies that any Amazon Simple Notification Service (Amazon SNS) or Amazon -// Simple Queue Service (Amazon SQS) destination exists, and that the bucket owner -// has permission to publish to it by sending a test notification. In the case of -// Lambda destinations, Amazon S3 verifies that the Lambda function permissions -// grant Amazon S3 permission to invoke the function from the Amazon S3 bucket. For -// more information, see Configuring Notifications for Amazon S3 Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) -// . You can disable notifications by adding the empty NotificationConfiguration -// element. For more information about the number of event notification -// configurations that you can create per bucket, see Amazon S3 service quotas (https://docs.aws.amazon.com/general/latest/gr/s3.html#limits_s3) -// in Amazon Web Services General Reference. By default, only the bucket owner can -// configure notifications on a bucket. However, bucket owners can use a bucket -// policy to grant permission to other users to set this configuration with the -// required s3:PutBucketNotification permission. The PUT notification is an atomic -// operation. For example, suppose your notification configuration includes SNS -// topic, SQS queue, and Lambda function configurations. When you send a PUT -// request with this configuration, Amazon S3 sends test messages to your SNS -// topic. If the message fails, the entire PUT action will fail, and Amazon S3 will -// not add the configuration to your bucket. If the configuration in the request -// body includes only one TopicConfiguration specifying only the -// s3:ReducedRedundancyLostObject event type, the response will also include the -// x-amz-sns-test-message-id header containing the message ID of the test -// notification sent to the topic. The following action is related to -// PutBucketNotificationConfiguration : -// - GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) +// notification when it detects an event of the specified type. +// +// By default, your bucket has no event notifications configured. That is, the +// notification configuration will be an empty NotificationConfiguration . +// +// This action replaces the existing notification configuration with the +// configuration you include in the request body. +// +// After Amazon S3 receives this request, it first verifies that any Amazon Simple +// Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon SQS) +// destination exists, and that the bucket owner has permission to publish to it by +// sending a test notification. In the case of Lambda destinations, Amazon S3 +// verifies that the Lambda function permissions grant Amazon S3 permission to +// invoke the function from the Amazon S3 bucket. For more information, see [Configuring Notifications for Amazon S3 Events]. +// +// You can disable notifications by adding the empty NotificationConfiguration +// element. +// +// For more information about the number of event notification configurations that +// you can create per bucket, see [Amazon S3 service quotas]in Amazon Web Services General Reference. +// +// By default, only the bucket owner can configure notifications on a bucket. +// However, bucket owners can use a bucket policy to grant permission to other +// users to set this configuration with the required s3:PutBucketNotification +// permission. +// +// The PUT notification is an atomic operation. For example, suppose your +// notification configuration includes SNS topic, SQS queue, and Lambda function +// configurations. When you send a PUT request with this configuration, Amazon S3 +// sends test messages to your SNS topic. If the message fails, the entire PUT +// action will fail, and Amazon S3 will not add the configuration to your bucket. +// +// If the configuration in the request body includes only one TopicConfiguration +// specifying only the s3:ReducedRedundancyLostObject event type, the response +// will also include the x-amz-sns-test-message-id header containing the message +// ID of the test notification sent to the topic. +// +// The following action is related to PutBucketNotificationConfiguration : +// +// [GetBucketNotificationConfiguration] +// +// [Configuring Notifications for Amazon S3 Events]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html +// [Amazon S3 service quotas]: https://docs.aws.amazon.com/general/latest/gr/s3.html#limits_s3 +// [GetBucketNotificationConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html +// [Configuring Event Notifications]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html func (c *Client) PutBucketNotificationConfiguration(ctx context.Context, params *PutBucketNotificationConfigurationInput, optFns ...func(*Options)) (*PutBucketNotificationConfigurationOutput, error) { if params == nil { params = &PutBucketNotificationConfigurationInput{} @@ -90,6 +108,7 @@ type PutBucketNotificationConfigurationInput struct { } func (in *PutBucketNotificationConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -144,6 +163,9 @@ func (c *Client) addOperationPutBucketNotificationConfigurationMiddlewares(stack if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -159,6 +181,18 @@ func (c *Client) addOperationPutBucketNotificationConfigurationMiddlewares(stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketNotificationConfigurationValidationMiddleware(stack); err != nil { return err } @@ -192,6 +226,18 @@ func (c *Client) addOperationPutBucketNotificationConfigurationMiddlewares(stack if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketOwnershipControls.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketOwnershipControls.go index 94875b755..96f27ccd0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketOwnershipControls.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketOwnershipControls.go @@ -15,14 +15,22 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Creates or modifies -// OwnershipControls for an Amazon S3 bucket. To use this operation, you must have -// the s3:PutBucketOwnershipControls permission. For more information about Amazon -// S3 permissions, see Specifying permissions in a policy (https://docs.aws.amazon.com/AmazonS3/latest/user-guide/using-with-s3-actions.html) -// . For information about Amazon S3 Object Ownership, see Using object ownership (https://docs.aws.amazon.com/AmazonS3/latest/user-guide/about-object-ownership.html) -// . The following operations are related to PutBucketOwnershipControls : -// - GetBucketOwnershipControls -// - DeleteBucketOwnershipControls +// This operation is not supported for directory buckets. +// +// Creates or modifies OwnershipControls for an Amazon S3 bucket. To use this +// operation, you must have the s3:PutBucketOwnershipControls permission. For more +// information about Amazon S3 permissions, see [Specifying permissions in a policy]. +// +// For information about Amazon S3 Object Ownership, see [Using object ownership]. +// +// The following operations are related to PutBucketOwnershipControls : +// +// # GetBucketOwnershipControls +// +// # DeleteBucketOwnershipControls +// +// [Specifying permissions in a policy]: https://docs.aws.amazon.com/AmazonS3/latest/user-guide/using-with-s3-actions.html +// [Using object ownership]: https://docs.aws.amazon.com/AmazonS3/latest/user-guide/about-object-ownership.html func (c *Client) PutBucketOwnershipControls(ctx context.Context, params *PutBucketOwnershipControlsInput, optFns ...func(*Options)) (*PutBucketOwnershipControlsOutput, error) { if params == nil { params = &PutBucketOwnershipControlsInput{} @@ -51,9 +59,10 @@ type PutBucketOwnershipControlsInput struct { // This member is required. OwnershipControls *types.OwnershipControls - // The MD5 hash of the OwnershipControls request body. For requests made using the - // Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, - // this field is calculated automatically. + // The MD5 hash of the OwnershipControls request body. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -65,6 +74,7 @@ type PutBucketOwnershipControlsInput struct { } func (in *PutBucketOwnershipControlsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -119,6 +129,9 @@ func (c *Client) addOperationPutBucketOwnershipControlsMiddlewares(stack *middle if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -134,6 +147,21 @@ func (c *Client) addOperationPutBucketOwnershipControlsMiddlewares(stack *middle if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketOwnershipControlsValidationMiddleware(stack); err != nil { return err } @@ -173,6 +201,18 @@ func (c *Client) addOperationPutBucketOwnershipControlsMiddlewares(stack *middle if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -192,9 +232,10 @@ func newServiceMetadataMiddleware_opPutBucketOwnershipControls(region string) *a } func addPutBucketOwnershipControlsInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: nil, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketPolicy.go index 88e3f2633..1764f4928 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketPolicy.go @@ -15,48 +15,66 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Applies an Amazon S3 bucket policy to an Amazon S3 bucket. Directory buckets - -// For directory buckets, you must make requests for this API operation to the -// Regional endpoint. These endpoints support path-style requests in the format -// https://s3express-control.region_code.amazonaws.com/bucket-name . -// Virtual-hosted-style requests aren't supported. For more information, see -// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions If you are using an identity other than -// the root user of the Amazon Web Services account that owns the bucket, the -// calling identity must both have the PutBucketPolicy permissions on the -// specified bucket and belong to the bucket owner's account in order to use this -// operation. If you don't have PutBucketPolicy permissions, Amazon S3 returns a -// 403 Access Denied error. If you have the correct permissions, but you're not -// using an identity that belongs to the bucket owner's account, Amazon S3 returns -// a 405 Method Not Allowed error. To ensure that bucket owners don't -// inadvertently lock themselves out of their own buckets, the root principal in a -// bucket owner's Amazon Web Services account can perform the GetBucketPolicy , -// PutBucketPolicy , and DeleteBucketPolicy API actions, even if their bucket -// policy explicitly denies the root principal's access. Bucket owner root -// principals can only be blocked from performing these API actions by VPC endpoint -// policies and Amazon Web Services Organizations policies. +// Applies an Amazon S3 bucket policy to an Amazon S3 bucket. +// +// Directory buckets - For directory buckets, you must make requests for this API +// operation to the Regional endpoint. These endpoints support path-style requests +// in the format https://s3express-control.region-code.amazonaws.com/bucket-name . +// Virtual-hosted-style requests aren't supported. For more information about +// endpoints in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more +// information about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// Permissions If you are using an identity other than the root user of the Amazon +// Web Services account that owns the bucket, the calling identity must both have +// the PutBucketPolicy permissions on the specified bucket and belong to the +// bucket owner's account in order to use this operation. +// +// If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access +// Denied error. If you have the correct permissions, but you're not using an +// identity that belongs to the bucket owner's account, Amazon S3 returns a 405 +// Method Not Allowed error. +// +// To ensure that bucket owners don't inadvertently lock themselves out of their +// own buckets, the root principal in a bucket owner's Amazon Web Services account +// can perform the GetBucketPolicy , PutBucketPolicy , and DeleteBucketPolicy API +// actions, even if their bucket policy explicitly denies the root principal's +// access. Bucket owner root principals can only be blocked from performing these +// API actions by VPC endpoint policies and Amazon Web Services Organizations +// policies. +// // - General purpose bucket permissions - The s3:PutBucketPolicy permission is // required in a policy. For more information about general purpose buckets bucket -// policies, see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) -// in the Amazon S3 User Guide. +// policies, see [Using Bucket Policies and User Policies]in the Amazon S3 User Guide. +// // - Directory bucket permissions - To grant access to this API operation, you // must have the s3express:PutBucketPolicy permission in an IAM identity-based // policy instead of a bucket policy. Cross-account access to this API operation // isn't supported. This operation can only be performed by the Amazon Web Services // account that owns the resource. For more information about directory bucket -// policies and permissions, see Amazon Web Services Identity and Access -// Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) -// in the Amazon S3 User Guide. +// policies and permissions, see [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]in the Amazon S3 User Guide. +// +// Example bucket policies General purpose buckets example bucket policies - See [Bucket policy examples] +// in the Amazon S3 User Guide. // -// Example bucket policies General purpose buckets example bucket policies - See -// Bucket policy examples (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html) -// in the Amazon S3 User Guide. Directory bucket example bucket policies - See -// Example bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) -// in the Amazon S3 User Guide. HTTP Host header syntax Directory buckets - The -// HTTP Host header syntax is s3express-control.region.amazonaws.com . The -// following operations are related to PutBucketPolicy : -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// Directory bucket example bucket policies - See [Example bucket policies for S3 Express One Zone] in the Amazon S3 User Guide. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region-code.amazonaws.com . +// +// The following operations are related to PutBucketPolicy : +// +// [CreateBucket] +// +// [DeleteBucket] +// +// [Bucket policy examples]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html +// [DeleteBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html +// [Using Bucket Policies and User Policies]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// [Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html func (c *Client) PutBucketPolicy(ctx context.Context, params *PutBucketPolicyInput, optFns ...func(*Options)) (*PutBucketPolicyOutput, error) { if params == nil { params = &PutBucketPolicyInput{} @@ -74,68 +92,90 @@ func (c *Client) PutBucketPolicy(ctx context.Context, params *PutBucketPolicyInp type PutBucketPolicyInput struct { - // The name of the bucket. Directory buckets - When you use this operation with a - // directory bucket, you must use path-style requests in the format - // https://s3express-control.region_code.amazonaws.com/bucket-name . + // The name of the bucket. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use path-style requests in the format + // https://s3express-control.region-code.amazonaws.com/bucket-name . // Virtual-hosted-style requests aren't supported. Directory bucket names must be - // unique in the chosen Availability Zone. Bucket names must also follow the format - // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 - // ). For information about bucket naming restrictions, see Directory bucket - // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide + // unique in the chosen Zone (Availability Zone or Local Zone). Bucket names must + // also follow the format bucket-base-name--zone-id--x-s3 (for example, + // DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html // // This member is required. Bucket *string - // The bucket policy as a JSON document. For directory buckets, the only IAM - // action supported in the bucket policy is s3express:CreateSession . + // The bucket policy as a JSON document. + // + // For directory buckets, the only IAM action supported in the bucket policy is + // s3express:CreateSession . // // This member is required. Policy *string - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding + // Indicates the algorithm used to create the checksum for the request when you + // use the SDK. This header will not provide any additional functionality if you + // don't use the SDK. When you send this header, there must be a corresponding // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 - // fails the request with the HTTP status code 400 Bad Request . For the - // x-amz-checksum-algorithm header, replace algorithm with the supported - // algorithm from the following list: + // fails the request with the HTTP status code 400 Bad Request . + // + // For the x-amz-checksum-algorithm header, replace algorithm with the + // supported algorithm from the following list: + // // - CRC32 + // // - CRC32C + // + // - CRC64NVME + // // - SHA1 + // // - SHA256 - // For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If the individual checksum value you provide - // through x-amz-checksum-algorithm doesn't match the checksum algorithm you set - // through x-amz-sdk-checksum-algorithm , Amazon S3 ignores any provided - // ChecksumAlgorithm parameter and uses the checksum algorithm that matches the - // provided value in x-amz-checksum-algorithm . For directory buckets, when you - // use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's - // used for performance. + // + // For more information, see [Checking object integrity] in the Amazon S3 User Guide. + // + // If the individual checksum value you provide through x-amz-checksum-algorithm + // doesn't match the checksum algorithm you set through + // x-amz-sdk-checksum-algorithm , Amazon S3 fails the request with a BadDigest + // error. + // + // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the + // default checksum algorithm that's used for performance. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm // Set this parameter to true to confirm that you want to remove your permissions - // to change this bucket policy in the future. This functionality is not supported - // for directory buckets. + // to change this bucket policy in the future. + // + // This functionality is not supported for directory buckets. ConfirmRemoveSelfBucketAccess *bool - // The MD5 hash of the request body. For requests made using the Amazon Web - // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is - // calculated automatically. This functionality is not supported for directory - // buckets. + // The MD5 hash of the request body. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. + // + // This functionality is not supported for directory buckets. ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide // does not match the actual owner of the bucket, the request fails with the HTTP - // status code 403 Forbidden (access denied). For directory buckets, this header - // is not supported in this API operation. If you specify this header, the request - // fails with the HTTP status code 501 Not Implemented . + // status code 403 Forbidden (access denied). + // + // For directory buckets, this header is not supported in this API operation. If + // you specify this header, the request fails with the HTTP status code 501 Not + // Implemented . ExpectedBucketOwner *string noSmithyDocumentSerde } func (in *PutBucketPolicyInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -190,6 +230,9 @@ func (c *Client) addOperationPutBucketPolicyMiddlewares(stack *middleware.Stack, if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -205,6 +248,21 @@ func (c *Client) addOperationPutBucketPolicyMiddlewares(stack *middleware.Stack, if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketPolicyValidationMiddleware(stack); err != nil { return err } @@ -244,6 +302,18 @@ func (c *Client) addOperationPutBucketPolicyMiddlewares(stack *middleware.Stack, if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -273,9 +343,10 @@ func getPutBucketPolicyRequestAlgorithmMember(input interface{}) (string, bool) } func addPutBucketPolicyInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutBucketPolicyRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go index bf59164c0..1b11ef399 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go @@ -15,47 +15,71 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Creates a replication -// configuration or replaces an existing one. For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) -// in the Amazon S3 User Guide. Specify the replication configuration in the -// request body. In the replication configuration, you provide the name of the -// destination bucket or buckets where you want Amazon S3 to replicate objects, the -// IAM role that Amazon S3 can assume to replicate objects on your behalf, and -// other relevant information. You can invoke this request for a specific Amazon -// Web Services Region by using the aws:RequestedRegion (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_condition-keys.html#condition-keys-requestedregion) -// condition key. A replication configuration must include at least one rule, and -// can contain a maximum of 1,000. Each rule identifies a subset of objects to -// replicate by filtering the objects in the source bucket. To choose additional -// subsets of objects to replicate, add a rule for each subset. To specify a subset -// of the objects in the source bucket to apply a replication rule to, add the -// Filter element as a child of the Rule element. You can filter objects based on -// an object key prefix, one or more object tags, or both. When you add the Filter -// element in the configuration, you must also add the following elements: -// DeleteMarkerReplication , Status , and Priority . If you are using an earlier -// version of the replication configuration, Amazon S3 handles replication of -// delete markers differently. For more information, see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations) -// . For information about enabling versioning on a bucket, see Using Versioning (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html) -// . Handling Replication of Encrypted Objects By default, Amazon S3 doesn't +// This operation is not supported for directory buckets. +// +// Creates a replication configuration or replaces an existing one. For more +// information, see [Replication]in the Amazon S3 User Guide. +// +// Specify the replication configuration in the request body. In the replication +// configuration, you provide the name of the destination bucket or buckets where +// you want Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume +// to replicate objects on your behalf, and other relevant information. You can +// invoke this request for a specific Amazon Web Services Region by using the [aws:RequestedRegion] +// aws:RequestedRegion condition key. +// +// A replication configuration must include at least one rule, and can contain a +// maximum of 1,000. Each rule identifies a subset of objects to replicate by +// filtering the objects in the source bucket. To choose additional subsets of +// objects to replicate, add a rule for each subset. +// +// To specify a subset of the objects in the source bucket to apply a replication +// rule to, add the Filter element as a child of the Rule element. You can filter +// objects based on an object key prefix, one or more object tags, or both. When +// you add the Filter element in the configuration, you must also add the following +// elements: DeleteMarkerReplication , Status , and Priority . +// +// If you are using an earlier version of the replication configuration, Amazon S3 +// handles replication of delete markers differently. For more information, see [Backward Compatibility]. +// +// For information about enabling versioning on a bucket, see [Using Versioning]. +// +// Handling Replication of Encrypted Objects By default, Amazon S3 doesn't // replicate objects that are stored at rest using server-side encryption with KMS // keys. To replicate Amazon Web Services KMS-encrypted objects, add the following: // SourceSelectionCriteria , SseKmsEncryptedObjects , Status , // EncryptionConfiguration , and ReplicaKmsKeyID . For information about -// replication configuration, see Replicating Objects Created with SSE Using KMS -// keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html) -// . For information on PutBucketReplication errors, see List of -// replication-related error codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList) +// replication configuration, see [Replicating Objects Created with SSE Using KMS keys]. +// +// For information on PutBucketReplication errors, see [List of replication-related error codes] +// // Permissions To create a PutBucketReplication request, you must have -// s3:PutReplicationConfiguration permissions for the bucket. By default, a -// resource owner, in this case the Amazon Web Services account that created the -// bucket, can perform this operation. The resource owner can also grant others -// permissions to perform the operation. For more information about permissions, -// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . To perform this operation, the user or role performing the action must have -// the iam:PassRole (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html) -// permission. The following operations are related to PutBucketReplication : -// - GetBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html) -// - DeleteBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html) +// s3:PutReplicationConfiguration permissions for the bucket. +// +// By default, a resource owner, in this case the Amazon Web Services account that +// created the bucket, can perform this operation. The resource owner can also +// grant others permissions to perform the operation. For more information about +// permissions, see [Specifying Permissions in a Policy]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// To perform this operation, the user or role performing the action must have the [iam:PassRole] +// permission. +// +// The following operations are related to PutBucketReplication : +// +// [GetBucketReplication] +// +// [DeleteBucketReplication] +// +// [iam:PassRole]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html +// [GetBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html +// [aws:RequestedRegion]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_condition-keys.html#condition-keys-requestedregion +// [Replicating Objects Created with SSE Using KMS keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html +// [Using Versioning]: https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html +// [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html +// [List of replication-related error codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList +// [Backward Compatibility]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations +// [DeleteBucketReplication]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html func (c *Client) PutBucketReplication(ctx context.Context, params *PutBucketReplicationInput, optFns ...func(*Options)) (*PutBucketReplicationOutput, error) { if params == nil { params = &PutBucketReplicationInput{} @@ -84,21 +108,27 @@ type PutBucketReplicationInput struct { // This member is required. ReplicationConfiguration *types.ReplicationConfiguration - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding + // Indicates the algorithm used to create the checksum for the request when you + // use the SDK. This header will not provide any additional functionality if you + // don't use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The base64-encoded 128-bit MD5 digest of the data. You must use this header as + // The Base64 encoded 128-bit MD5 digest of the data. You must use this header as // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, see RFC 1864 (http://www.ietf.org/rfc/rfc1864.txt) - // . For requests made using the Amazon Web Services Command Line Interface (CLI) - // or Amazon Web Services SDKs, this field is calculated automatically. + // transit. For more information, see [RFC 1864]. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. + // + // [RFC 1864]: http://www.ietf.org/rfc/rfc1864.txt ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -113,6 +143,7 @@ type PutBucketReplicationInput struct { } func (in *PutBucketReplicationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -167,6 +198,9 @@ func (c *Client) addOperationPutBucketReplicationMiddlewares(stack *middleware.S if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -182,6 +216,21 @@ func (c *Client) addOperationPutBucketReplicationMiddlewares(stack *middleware.S if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketReplicationValidationMiddleware(stack); err != nil { return err } @@ -221,6 +270,18 @@ func (c *Client) addOperationPutBucketReplicationMiddlewares(stack *middleware.S if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -250,9 +311,10 @@ func getPutBucketReplicationRequestAlgorithmMember(input interface{}) (string, b } func addPutBucketReplicationInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutBucketReplicationRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketRequestPayment.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketRequestPayment.go index 07e0f1639..a6d19c609 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketRequestPayment.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketRequestPayment.go @@ -15,14 +15,22 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Sets the request payment -// configuration for a bucket. By default, the bucket owner pays for downloads from -// the bucket. This configuration parameter enables the bucket owner (only) to -// specify that the person requesting the download will be charged for the -// download. For more information, see Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html) -// . The following operations are related to PutBucketRequestPayment : -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// - GetBucketRequestPayment (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketRequestPayment.html) +// This operation is not supported for directory buckets. +// +// Sets the request payment configuration for a bucket. By default, the bucket +// owner pays for downloads from the bucket. This configuration parameter enables +// the bucket owner (only) to specify that the person requesting the download will +// be charged for the download. For more information, see [Requester Pays Buckets]. +// +// The following operations are related to PutBucketRequestPayment : +// +// [CreateBucket] +// +// [GetBucketRequestPayment] +// +// [GetBucketRequestPayment]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketRequestPayment.html +// [Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html func (c *Client) PutBucketRequestPayment(ctx context.Context, params *PutBucketRequestPaymentInput, optFns ...func(*Options)) (*PutBucketRequestPaymentOutput, error) { if params == nil { params = &PutBucketRequestPaymentInput{} @@ -50,21 +58,27 @@ type PutBucketRequestPaymentInput struct { // This member is required. RequestPaymentConfiguration *types.RequestPaymentConfiguration - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding + // Indicates the algorithm used to create the checksum for the request when you + // use the SDK. This header will not provide any additional functionality if you + // don't use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The base64-encoded 128-bit MD5 digest of the data. You must use this header as + // The Base64 encoded 128-bit MD5 digest of the data. You must use this header as // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, see RFC 1864 (http://www.ietf.org/rfc/rfc1864.txt) - // . For requests made using the Amazon Web Services Command Line Interface (CLI) - // or Amazon Web Services SDKs, this field is calculated automatically. + // transit. For more information, see [RFC 1864]. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. + // + // [RFC 1864]: http://www.ietf.org/rfc/rfc1864.txt ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -76,6 +90,7 @@ type PutBucketRequestPaymentInput struct { } func (in *PutBucketRequestPaymentInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -130,6 +145,9 @@ func (c *Client) addOperationPutBucketRequestPaymentMiddlewares(stack *middlewar if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -145,6 +163,21 @@ func (c *Client) addOperationPutBucketRequestPaymentMiddlewares(stack *middlewar if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketRequestPaymentValidationMiddleware(stack); err != nil { return err } @@ -184,6 +217,18 @@ func (c *Client) addOperationPutBucketRequestPaymentMiddlewares(stack *middlewar if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -213,9 +258,10 @@ func getPutBucketRequestPaymentRequestAlgorithmMember(input interface{}) (string } func addPutBucketRequestPaymentInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutBucketRequestPaymentRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go index 0f0a6fd40..db5e62dfd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go @@ -15,39 +15,54 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Sets the tags for a -// bucket. Use tags to organize your Amazon Web Services bill to reflect your own -// cost structure. To do this, sign up to get your Amazon Web Services account bill -// with tag key values included. Then, to see the cost of combined resources, -// organize your billing information according to resources with the same tag key -// values. For example, you can tag several resources with a specific application -// name, and then organize your billing information to see the total cost of that -// application across several services. For more information, see Cost Allocation -// and Tagging (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) -// and Using Cost Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/userguide/CostAllocTagging.html) -// . When this operation sets the tags for a bucket, it will overwrite any current +// This operation is not supported for directory buckets. +// +// Sets the tags for a bucket. +// +// Use tags to organize your Amazon Web Services bill to reflect your own cost +// structure. To do this, sign up to get your Amazon Web Services account bill with +// tag key values included. Then, to see the cost of combined resources, organize +// your billing information according to resources with the same tag key values. +// For example, you can tag several resources with a specific application name, and +// then organize your billing information to see the total cost of that application +// across several services. For more information, see [Cost Allocation and Tagging]and [Using Cost Allocation in Amazon S3 Bucket Tags]. +// +// When this operation sets the tags for a bucket, it will overwrite any current // tags the bucket already has. You cannot use this operation to add tags to an -// existing list of tags. To use this operation, you must have permissions to -// perform the s3:PutBucketTagging action. The bucket owner has this permission by -// default and can grant this permission to others. For more information about -// permissions, see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . PutBucketTagging has the following special errors. For more Amazon S3 errors -// see, Error Responses (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html) -// . +// existing list of tags. +// +// To use this operation, you must have permissions to perform the +// s3:PutBucketTagging action. The bucket owner has this permission by default and +// can grant this permission to others. For more information about permissions, see +// [Permissions Related to Bucket Subresource Operations]and [Managing Access Permissions to Your Amazon S3 Resources]. +// +// PutBucketTagging has the following special errors. For more Amazon S3 errors +// see, [Error Responses]. +// // - InvalidTag - The tag provided was not a valid tag. This error can occur if -// the tag did not pass input validation. For more information, see Using Cost -// Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/userguide/CostAllocTagging.html) -// . +// the tag did not pass input validation. For more information, see [Using Cost Allocation in Amazon S3 Bucket Tags]. +// // - MalformedXML - The XML provided does not match the schema. +// // - OperationAborted - A conflicting conditional action is currently in progress // against this resource. Please try again. +// // - InternalError - The service was unable to apply the provided tag to the // bucket. // // The following operations are related to PutBucketTagging : -// - GetBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html) -// - DeleteBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html) +// +// [GetBucketTagging] +// +// [DeleteBucketTagging] +// +// [Error Responses]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html +// [GetBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html +// [Cost Allocation and Tagging]: https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [DeleteBucketTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html +// [Using Cost Allocation in Amazon S3 Bucket Tags]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/CostAllocTagging.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html func (c *Client) PutBucketTagging(ctx context.Context, params *PutBucketTaggingInput, optFns ...func(*Options)) (*PutBucketTaggingOutput, error) { if params == nil { params = &PutBucketTaggingInput{} @@ -75,21 +90,27 @@ type PutBucketTaggingInput struct { // This member is required. Tagging *types.Tagging - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding + // Indicates the algorithm used to create the checksum for the request when you + // use the SDK. This header will not provide any additional functionality if you + // don't use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The base64-encoded 128-bit MD5 digest of the data. You must use this header as + // The Base64 encoded 128-bit MD5 digest of the data. You must use this header as // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, see RFC 1864 (http://www.ietf.org/rfc/rfc1864.txt) - // . For requests made using the Amazon Web Services Command Line Interface (CLI) - // or Amazon Web Services SDKs, this field is calculated automatically. + // transit. For more information, see [RFC 1864]. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. + // + // [RFC 1864]: http://www.ietf.org/rfc/rfc1864.txt ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -101,6 +122,7 @@ type PutBucketTaggingInput struct { } func (in *PutBucketTaggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -155,6 +177,9 @@ func (c *Client) addOperationPutBucketTaggingMiddlewares(stack *middleware.Stack if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -170,6 +195,21 @@ func (c *Client) addOperationPutBucketTaggingMiddlewares(stack *middleware.Stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketTaggingValidationMiddleware(stack); err != nil { return err } @@ -209,6 +249,18 @@ func (c *Client) addOperationPutBucketTaggingMiddlewares(stack *middleware.Stack if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -238,9 +290,10 @@ func getPutBucketTaggingRequestAlgorithmMember(input interface{}) (string, bool) } func addPutBucketTaggingInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutBucketTaggingRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketVersioning.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketVersioning.go index 495725cea..aa30d5fad 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketVersioning.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketVersioning.go @@ -15,28 +15,54 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Sets the versioning state -// of an existing bucket. You can set the versioning state with one of the -// following values: Enabled—Enables versioning for the objects in the bucket. All -// objects added to the bucket receive a unique version ID. Suspended—Disables -// versioning for the objects in the bucket. All objects added to the bucket -// receive the version ID null. If the versioning state has never been set on a -// bucket, it has no versioning state; a GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) -// request does not return a versioning state value. In order to enable MFA Delete, -// you must be the bucket owner. If you are the bucket owner and want to enable MFA -// Delete in the bucket versioning configuration, you must include the x-amz-mfa -// request header and the Status and the MfaDelete request elements in a request -// to set the versioning state of the bucket. If you have an object expiration -// lifecycle configuration in your non-versioned bucket and you want to maintain -// the same permanent delete behavior when you enable versioning, you must add a -// noncurrent expiration policy. The noncurrent expiration lifecycle configuration -// will manage the deletes of the noncurrent object versions in the version-enabled -// bucket. (A version-enabled bucket maintains one current and zero or more -// noncurrent object versions.) For more information, see Lifecycle and Versioning (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-and-other-bucket-config) -// . The following operations are related to PutBucketVersioning : -// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) -// - GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) +// This operation is not supported for directory buckets. +// +// When you enable versioning on a bucket for the first time, it might take a +// short amount of time for the change to be fully propagated. While this change is +// propagating, you might encounter intermittent HTTP 404 NoSuchKey errors for +// requests to objects created or updated after enabling versioning. We recommend +// that you wait for 15 minutes after enabling versioning before issuing write +// operations ( PUT or DELETE ) on objects in the bucket. +// +// Sets the versioning state of an existing bucket. +// +// You can set the versioning state with one of the following values: +// +// Enabled—Enables versioning for the objects in the bucket. All objects added to +// the bucket receive a unique version ID. +// +// Suspended—Disables versioning for the objects in the bucket. All objects added +// to the bucket receive the version ID null. +// +// If the versioning state has never been set on a bucket, it has no versioning +// state; a [GetBucketVersioning]request does not return a versioning state value. +// +// In order to enable MFA Delete, you must be the bucket owner. If you are the +// bucket owner and want to enable MFA Delete in the bucket versioning +// configuration, you must include the x-amz-mfa request header and the Status and +// the MfaDelete request elements in a request to set the versioning state of the +// bucket. +// +// If you have an object expiration lifecycle configuration in your non-versioned +// bucket and you want to maintain the same permanent delete behavior when you +// enable versioning, you must add a noncurrent expiration policy. The noncurrent +// expiration lifecycle configuration will manage the deletes of the noncurrent +// object versions in the version-enabled bucket. (A version-enabled bucket +// maintains one current and zero or more noncurrent object versions.) For more +// information, see [Lifecycle and Versioning]. +// +// The following operations are related to PutBucketVersioning : +// +// [CreateBucket] +// +// [DeleteBucket] +// +// [GetBucketVersioning] +// +// [DeleteBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html +// [CreateBucket]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html +// [Lifecycle and Versioning]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-and-other-bucket-config +// [GetBucketVersioning]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html func (c *Client) PutBucketVersioning(ctx context.Context, params *PutBucketVersioningInput, optFns ...func(*Options)) (*PutBucketVersioningOutput, error) { if params == nil { params = &PutBucketVersioningInput{} @@ -64,21 +90,27 @@ type PutBucketVersioningInput struct { // This member is required. VersioningConfiguration *types.VersioningConfiguration - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding + // Indicates the algorithm used to create the checksum for the request when you + // use the SDK. This header will not provide any additional functionality if you + // don't use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // >The base64-encoded 128-bit MD5 digest of the data. You must use this header as + // >The Base64 encoded 128-bit MD5 digest of the data. You must use this header as // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, see RFC 1864 (http://www.ietf.org/rfc/rfc1864.txt) - // . For requests made using the Amazon Web Services Command Line Interface (CLI) - // or Amazon Web Services SDKs, this field is calculated automatically. + // transit. For more information, see [RFC 1864]. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. + // + // [RFC 1864]: http://www.ietf.org/rfc/rfc1864.txt ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -94,6 +126,7 @@ type PutBucketVersioningInput struct { } func (in *PutBucketVersioningInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -148,6 +181,9 @@ func (c *Client) addOperationPutBucketVersioningMiddlewares(stack *middleware.St if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -163,6 +199,21 @@ func (c *Client) addOperationPutBucketVersioningMiddlewares(stack *middleware.St if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketVersioningValidationMiddleware(stack); err != nil { return err } @@ -202,6 +253,18 @@ func (c *Client) addOperationPutBucketVersioningMiddlewares(stack *middleware.St if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -231,9 +294,10 @@ func getPutBucketVersioningRequestAlgorithmMember(input interface{}) (string, bo } func addPutBucketVersioningInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutBucketVersioningRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go index 08c8a582f..169b24bcc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go @@ -15,21 +15,29 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Sets the configuration of -// the website that is specified in the website subresource. To configure a bucket -// as a website, you can add this subresource on the bucket with website -// configuration information such as the file name of the index document and any -// redirect rules. For more information, see Hosting Websites on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) -// . This PUT action requires the S3:PutBucketWebsite permission. By default, only +// This operation is not supported for directory buckets. +// +// Sets the configuration of the website that is specified in the website +// subresource. To configure a bucket as a website, you can add this subresource on +// the bucket with website configuration information such as the file name of the +// index document and any redirect rules. For more information, see [Hosting Websites on Amazon S3]. +// +// This PUT action requires the S3:PutBucketWebsite permission. By default, only // the bucket owner can configure the website attached to a bucket; however, bucket // owners can allow other users to set the website configuration by writing a -// bucket policy that grants them the S3:PutBucketWebsite permission. To redirect -// all website requests sent to the bucket's website endpoint, you add a website -// configuration with the following elements. Because all requests are sent to -// another website, you don't need to provide index document name for the bucket. +// bucket policy that grants them the S3:PutBucketWebsite permission. +// +// To redirect all website requests sent to the bucket's website endpoint, you add +// a website configuration with the following elements. Because all requests are +// sent to another website, you don't need to provide index document name for the +// bucket. +// // - WebsiteConfiguration +// // - RedirectAllRequestsTo +// // - HostName +// // - Protocol // // If you want granular control over redirects, you can use the following elements @@ -37,27 +45,47 @@ import ( // information about the redirect destination. In this case, the website // configuration must provide an index document for the bucket, because some // requests might not be redirected. +// // - WebsiteConfiguration +// // - IndexDocument +// // - Suffix +// // - ErrorDocument +// // - Key +// // - RoutingRules +// // - RoutingRule +// // - Condition +// // - HttpErrorCodeReturnedEquals +// // - KeyPrefixEquals +// // - Redirect +// // - Protocol +// // - HostName +// // - ReplaceKeyPrefixWith +// // - ReplaceKeyWith +// // - HttpRedirectCode // // Amazon S3 has a limitation of 50 routing rules per website configuration. If // you require more than 50 routing rules, you can use object redirect. For more -// information, see Configuring an Object Redirect (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html) -// in the Amazon S3 User Guide. The maximum request length is limited to 128 KB. +// information, see [Configuring an Object Redirect]in the Amazon S3 User Guide. +// +// The maximum request length is limited to 128 KB. +// +// [Hosting Websites on Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html +// [Configuring an Object Redirect]: https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html func (c *Client) PutBucketWebsite(ctx context.Context, params *PutBucketWebsiteInput, optFns ...func(*Options)) (*PutBucketWebsiteOutput, error) { if params == nil { params = &PutBucketWebsiteInput{} @@ -85,21 +113,27 @@ type PutBucketWebsiteInput struct { // This member is required. WebsiteConfiguration *types.WebsiteConfiguration - // Indicates the algorithm used to create the checksum for the object when you use - // the SDK. This header will not provide any additional functionality if you don't - // use the SDK. When you send this header, there must be a corresponding + // Indicates the algorithm used to create the checksum for the request when you + // use the SDK. This header will not provide any additional functionality if you + // don't use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The base64-encoded 128-bit MD5 digest of the data. You must use this header as + // The Base64 encoded 128-bit MD5 digest of the data. You must use this header as // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, see RFC 1864 (http://www.ietf.org/rfc/rfc1864.txt) - // . For requests made using the Amazon Web Services Command Line Interface (CLI) - // or Amazon Web Services SDKs, this field is calculated automatically. + // transit. For more information, see [RFC 1864]. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. + // + // [RFC 1864]: http://www.ietf.org/rfc/rfc1864.txt ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -111,6 +145,7 @@ type PutBucketWebsiteInput struct { } func (in *PutBucketWebsiteInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -165,6 +200,9 @@ func (c *Client) addOperationPutBucketWebsiteMiddlewares(stack *middleware.Stack if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -180,6 +218,21 @@ func (c *Client) addOperationPutBucketWebsiteMiddlewares(stack *middleware.Stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutBucketWebsiteValidationMiddleware(stack); err != nil { return err } @@ -219,6 +272,18 @@ func (c *Client) addOperationPutBucketWebsiteMiddlewares(stack *middleware.Stack if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -248,9 +313,10 @@ func getPutBucketWebsiteRequestAlgorithmMember(input interface{}) (string, bool) } func addPutBucketWebsiteInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutBucketWebsiteRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go index d57e0026e..cc0fb9e5f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go @@ -18,51 +18,73 @@ import ( ) // Adds an object to a bucket. +// // - Amazon S3 never adds partial objects; if you receive a success response, // Amazon S3 added the entire object to the bucket. You cannot use PutObject to // only update a single piece of metadata for an existing object. You must put the // entire object with updated metadata if you want to update some values. +// // - If your bucket uses the bucket owner enforced setting for Object Ownership, // ACLs are disabled and no longer affect permissions. All objects written to the // bucket by any account will be owned by the bucket owner. +// // - Directory buckets - For directory buckets, you must make requests for this // API operation to the Zonal endpoint. These endpoints support // virtual-hosted-style requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . -// Path-style requests are not supported. For more information, see Regional and -// Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. +// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name +// . Path-style requests are not supported. For more information about endpoints +// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information +// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. // // Amazon S3 is a distributed system. If it receives multiple write requests for // the same object simultaneously, it overwrites all but the last object written. // However, Amazon S3 provides features that can modify this behavior: +// // - S3 Object Lock - To prevent objects from being deleted or overwritten, you -// can use Amazon S3 Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html) -// in the Amazon S3 User Guide. This functionality is not supported for directory -// buckets. +// can use [Amazon S3 Object Lock]in the Amazon S3 User Guide. +// +// This functionality is not supported for directory buckets. +// +// - If-None-Match - Uploads the object only if the object key name does not +// already exist in the specified bucket. Otherwise, Amazon S3 returns a 412 +// Precondition Failed error. If a conflicting operation occurs during the +// upload, S3 returns a 409 ConditionalRequestConflict response. On a 409 +// failure, retry the upload. +// +// Expects the * character (asterisk). +// +// For more information, see [Add preconditions to S3 operations with conditional requests]in the Amazon S3 User Guide or [RFC 7232]. +// +// This functionality is not supported for S3 on Outposts. +// // - S3 Versioning - When you enable versioning for a bucket, if Amazon S3 // receives multiple write requests for the same object simultaneously, it stores // all versions of the objects. For each write request that is made to the same // object, Amazon S3 automatically generates a unique version ID of that object // being stored in Amazon S3. You can retrieve, replace, or delete any version of -// the object. For more information about versioning, see Adding Objects to -// Versioning-Enabled Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html) -// in the Amazon S3 User Guide. For information about returning the versioning -// state of a bucket, see GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) -// . This functionality is not supported for directory buckets. +// the object. For more information about versioning, see [Adding Objects to Versioning-Enabled Buckets]in the Amazon S3 User +// Guide. For information about returning the versioning state of a bucket, see [GetBucketVersioning] +// . +// +// This functionality is not supported for directory buckets. // // Permissions +// // - General purpose bucket permissions - The following permissions are required // in your policies when your PutObject request includes specific headers. +// // - s3:PutObject - To successfully complete the PutObject request, you must // always have the s3:PutObject permission on a bucket to add an object to it. +// // - s3:PutObjectAcl - To successfully change the objects ACL of your PutObject // request, you must have the s3:PutObjectAcl . +// // - s3:PutObjectTagging - To successfully set the tag-set with your PutObject // request, you must have the s3:PutObjectTagging . +// // - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the // s3express:CreateSession permission to the directory bucket in a bucket policy // or an IAM identity-based policy. Then, you make the CreateSession API call on // the bucket to obtain a session token. With the session token in your request @@ -70,24 +92,44 @@ import ( // expires, you make another CreateSession API call to generate a new session // token for use. Amazon Web Services CLI or SDKs create session and refresh the // session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . +// expires. For more information about authorization, see [CreateSession]CreateSession . +// +// If the object is encrypted with SSE-KMS, you must also have the +// +// kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies +// and KMS key policies for the KMS key. // // Data integrity with Content-MD5 +// // - General purpose bucket - To ensure that data is not corrupted traversing // the network, use the Content-MD5 header. When you use this header, Amazon S3 // checks the object against the provided MD5 value and, if they do not match, // Amazon S3 returns an error. Alternatively, when the object's ETag is its MD5 // digest, you can calculate the MD5 while putting the object to Amazon S3 and // compare the returned ETag to the calculated MD5 value. +// // - Directory bucket - This functionality is not supported for directory // buckets. // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . For more information about -// related Amazon S3 APIs, see the following: -// - CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) -// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// +// For more information about related Amazon S3 APIs, see the following: +// +// [CopyObject] +// +// [DeleteObject] +// +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [Amazon S3 Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html +// [DeleteObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html +// [Adding Objects to Versioning-Enabled Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html +// [Add preconditions to S3 operations with conditional requests]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/conditional-requests.html +// [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html +// [RFC 7232]: https://datatracker.ietf.org/doc/rfc7232/ +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// [GetBucketVersioning]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html func (c *Client) PutObject(ctx context.Context, params *PutObjectInput, optFns ...func(*Options)) (*PutObjectOutput, error) { if params == nil { params = &PutObjectInput{} @@ -105,31 +147,39 @@ func (c *Client) PutObject(ctx context.Context, params *PutObjectInput, optFns . type PutObjectInput struct { - // The bucket name to which the PUT action was initiated. Directory buckets - When - // you use this operation with a directory bucket, you must use - // virtual-hosted-style requests in the format - // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not - // supported. Directory bucket names must be unique in the chosen Availability - // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket - // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // The bucket name to which the PUT action was initiated. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -139,26 +189,33 @@ type PutObjectInput struct { // This member is required. Key *string - // The canned ACL to apply to the object. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) - // in the Amazon S3 User Guide. When adding a new object, you can use headers to - // grant ACL-based permissions to individual Amazon Web Services accounts or to - // predefined groups defined by Amazon S3. These permissions are then added to the - // ACL on the object. By default, all objects are private. Only the owner has full - // access control. For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) - // and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html) - // in the Amazon S3 User Guide. If the bucket that you're uploading objects to uses - // the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and - // no longer affect permissions. Buckets that use this setting only accept PUT - // requests that don't specify an ACL or PUT requests that specify bucket owner - // full control ACLs, such as the bucket-owner-full-control canned ACL or an - // equivalent form of this ACL expressed in the XML format. PUT requests that - // contain other ACLs (for example, custom grants to certain Amazon Web Services - // accounts) fail and return a 400 error with the error code - // AccessControlListNotSupported . For more information, see Controlling - // ownership of objects and disabling ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) - // in the Amazon S3 User Guide. + // The canned ACL to apply to the object. For more information, see [Canned ACL] in the Amazon + // S3 User Guide. + // + // When adding a new object, you can use headers to grant ACL-based permissions to + // individual Amazon Web Services accounts or to predefined groups defined by + // Amazon S3. These permissions are then added to the ACL on the object. By + // default, all objects are private. Only the owner has full access control. For + // more information, see [Access Control List (ACL) Overview]and [Managing ACLs Using the REST API] in the Amazon S3 User Guide. + // + // If the bucket that you're uploading objects to uses the bucket owner enforced + // setting for S3 Object Ownership, ACLs are disabled and no longer affect + // permissions. Buckets that use this setting only accept PUT requests that don't + // specify an ACL or PUT requests that specify bucket owner full control ACLs, such + // as the bucket-owner-full-control canned ACL or an equivalent form of this ACL + // expressed in the XML format. PUT requests that contain other ACLs (for example, + // custom grants to certain Amazon Web Services accounts) fail and return a 400 + // error with the error code AccessControlListNotSupported . For more information, + // see [Controlling ownership of objects and disabling ACLs]in the Amazon S3 User Guide. + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. + // + // [Managing ACLs Using the REST API]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html + // [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html + // [Canned ACL]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL + // [Controlling ownership of objects and disabling ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html ACL types.ObjectCannedACL // Object data. @@ -166,103 +223,148 @@ type PutObjectInput struct { // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption // with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). - // Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object - // encryption with SSE-KMS. Specifying this header with a PUT action doesn’t affect - // bucket-level settings for S3 Bucket Key. This functionality is not supported for - // directory buckets. + // + // General purpose buckets - Setting this header to true causes Amazon S3 to use + // an S3 Bucket Key for object encryption with SSE-KMS. Also, specifying this + // header with a PUT action doesn't affect bucket-level settings for S3 Bucket Key. + // + // Directory buckets - S3 Bucket Keys are always enabled for GET and PUT + // operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't + // supported, when you copy SSE-KMS encrypted objects from general purpose buckets + // to directory buckets, from directory buckets to general purpose buckets, or + // between directory buckets, through [CopyObject], [UploadPartCopy], [the Copy operation in Batch Operations], or [the import jobs]. In this case, Amazon S3 makes a + // call to KMS every time a copy request is made for a KMS-encrypted object. + // + // [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html + // [the import jobs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-import-job + // [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html + // [the Copy operation in Batch Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-Batch-Ops BucketKeyEnabled *bool // Can be used to specify caching behavior along the request/reply chain. For more - // information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9) - // . + // information, see [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9]. + // + // [http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9]: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 CacheControl *string // Indicates the algorithm used to create the checksum for the object when you use // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 - // fails the request with the HTTP status code 400 Bad Request . For the - // x-amz-checksum-algorithm header, replace algorithm with the supported - // algorithm from the following list: + // fails the request with the HTTP status code 400 Bad Request . + // + // For the x-amz-checksum-algorithm header, replace algorithm with the + // supported algorithm from the following list: + // // - CRC32 + // // - CRC32C + // + // - CRC64NVME + // // - SHA1 + // // - SHA256 - // For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If the individual checksum value you provide - // through x-amz-checksum-algorithm doesn't match the checksum algorithm you set - // through x-amz-sdk-checksum-algorithm , Amazon S3 ignores any provided - // ChecksumAlgorithm parameter and uses the checksum algorithm that matches the - // provided value in x-amz-checksum-algorithm . For directory buckets, when you - // use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's - // used for performance. + // + // For more information, see [Checking object integrity] in the Amazon S3 User Guide. + // + // If the individual checksum value you provide through x-amz-checksum-algorithm + // doesn't match the checksum algorithm you set through + // x-amz-sdk-checksum-algorithm , Amazon S3 fails the request with a BadDigest + // error. + // + // The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any + // request to upload an object with a retention period configured using Amazon S3 + // Object Lock. For more information, see [Uploading objects to an Object Lock enabled bucket]in the Amazon S3 User Guide. + // + // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the + // default checksum algorithm that's used for performance. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + // [Uploading objects to an Object Lock enabled bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock-managing.html#object-lock-put-object ChecksumAlgorithm types.ChecksumAlgorithm // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Base64 encoded, 32-bit CRC32 checksum of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32 *string // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 32-bit CRC32C checksum of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Base64 encoded, 32-bit CRC32C checksum of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32C *string // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 160-bit SHA-1 digest of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Base64 encoded, 64-bit CRC64NVME checksum of the object. The CRC64NVME checksum + // is always a full object checksum. For more information, see [Checking object integrity in the Amazon S3 User Guide]. + // + // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumCRC64NVME *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // Base64 encoded, 160-bit SHA1 digest of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA1 *string // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 256-bit SHA-256 digest of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Base64 encoded, 256-bit SHA256 digest of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA256 *string - // Specifies presentational information for the object. For more information, see - // https://www.rfc-editor.org/rfc/rfc6266#section-4 (https://www.rfc-editor.org/rfc/rfc6266#section-4) - // . + // Specifies presentational information for the object. For more information, see [https://www.rfc-editor.org/rfc/rfc6266#section-4]. + // + // [https://www.rfc-editor.org/rfc/rfc6266#section-4]: https://www.rfc-editor.org/rfc/rfc6266#section-4 ContentDisposition *string // Specifies what content encodings have been applied to the object and thus what // decoding mechanisms must be applied to obtain the media-type referenced by the - // Content-Type header field. For more information, see - // https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding (https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding) - // . + // Content-Type header field. For more information, see [https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding]. + // + // [https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding ContentEncoding *string // The language the content is in. ContentLanguage *string // Size of the body in bytes. This parameter is useful when the size of the body - // cannot be determined automatically. For more information, see - // https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length (https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length) - // . + // cannot be determined automatically. For more information, see [https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length]. + // + // [https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length]: https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length ContentLength *int64 - // The base64-encoded 128-bit MD5 digest of the message (without the headers) + // The Base64 encoded 128-bit MD5 digest of the message (without the headers) // according to RFC 1864. This header can be used as a message integrity check to // verify that the data is the same data that was originally sent. Although it is // optional, we recommend using the Content-MD5 mechanism as an end-to-end - // integrity check. For more information about REST request authentication, see - // REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) - // . The Content-MD5 header is required for any request to upload an object with a - // retention period configured using Amazon S3 Object Lock. For more information - // about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // integrity check. For more information about REST request authentication, see [REST Authentication]. + // + // The Content-MD5 or x-amz-sdk-checksum-algorithm header is required for any + // request to upload an object with a retention period configured using Amazon S3 + // Object Lock. For more information, see [Uploading objects to an Object Lock enabled bucket]in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [REST Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html + // [Uploading objects to an Object Lock enabled bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock-managing.html#object-lock-put-object ContentMD5 *string // A standard MIME type describing the format of the contents. For more - // information, see https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type (https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type) - // . + // information, see [https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type]. + // + // [https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type]: https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type ContentType *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -271,146 +373,280 @@ type PutObjectInput struct { ExpectedBucketOwner *string // The date and time at which the object is no longer cacheable. For more - // information, see https://www.rfc-editor.org/rfc/rfc7234#section-5.3 (https://www.rfc-editor.org/rfc/rfc7234#section-5.3) - // . + // information, see [https://www.rfc-editor.org/rfc/rfc7234#section-5.3]. + // + // [https://www.rfc-editor.org/rfc/rfc7234#section-5.3]: https://www.rfc-editor.org/rfc/rfc7234#section-5.3 Expires *time.Time // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. GrantFullControl *string // Allows grantee to read the object data and its metadata. + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. GrantRead *string // Allows grantee to read the object ACL. + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. GrantReadACP *string // Allows grantee to write the ACL for the applicable object. + // // - This functionality is not supported for directory buckets. + // // - This functionality is not supported for Amazon S3 on Outposts. GrantWriteACP *string + // Uploads the object only if the ETag (entity tag) value provided during the + // WRITE operation matches the ETag of the object in S3. If the ETag values do not + // match, the operation returns a 412 Precondition Failed error. + // + // If a conflicting operation occurs during the upload S3 returns a 409 + // ConditionalRequestConflict response. On a 409 failure you should fetch the + // object's ETag and retry the upload. + // + // Expects the ETag value as a string. + // + // For more information about conditional requests, see [RFC 7232], or [Conditional requests] in the Amazon S3 + // User Guide. + // + // [Conditional requests]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/conditional-requests.html + // [RFC 7232]: https://tools.ietf.org/html/rfc7232 + IfMatch *string + + // Uploads the object only if the object key name does not already exist in the + // bucket specified. Otherwise, Amazon S3 returns a 412 Precondition Failed error. + // + // If a conflicting operation occurs during the upload S3 returns a 409 + // ConditionalRequestConflict response. On a 409 failure you should retry the + // upload. + // + // Expects the '*' (asterisk) character. + // + // For more information about conditional requests, see [RFC 7232], or [Conditional requests] in the Amazon S3 + // User Guide. + // + // [Conditional requests]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/conditional-requests.html + // [RFC 7232]: https://tools.ietf.org/html/rfc7232 + IfNoneMatch *string + // A map of metadata to store with the object in S3. Metadata map[string]string // Specifies whether a legal hold will be applied to this object. For more - // information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // information about S3 Object Lock, see [Object Lock]in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus - // The Object Lock mode that you want to apply to this object. This functionality - // is not supported for directory buckets. + // The Object Lock mode that you want to apply to this object. + // + // This functionality is not supported for directory buckets. ObjectLockMode types.ObjectLockMode // The date and time when you want this object's Object Lock to expire. Must be - // formatted as a timestamp parameter. This functionality is not supported for - // directory buckets. + // formatted as a timestamp parameter. + // + // This functionality is not supported for directory buckets. ObjectLockRetainUntilDate *time.Time // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer - // Specifies the algorithm to use when encrypting the object (for example, AES256 - // ). This functionality is not supported for directory buckets. + // Specifies the algorithm to use when encrypting the object (for example, AES256 ). + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use in // encrypting data. This value is used to store the object and then it is // discarded; Amazon S3 does not store the encryption key. The key must be // appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. This functionality is - // not supported for directory buckets. + // x-amz-server-side-encryption-customer-algorithm header. + // + // This functionality is not supported for directory buckets. SSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. This functionality is not - // supported for directory buckets. + // encryption key was transmitted without error. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string - // Specifies the Amazon Web Services KMS Encryption Context to use for object - // encryption. The value of this header is a base64-encoded UTF-8 string holding - // JSON with the encryption context key-value pairs. This value is stored as object - // metadata and automatically gets passed on to Amazon Web Services KMS for future - // GetObject or CopyObject operations on this object. This value must be - // explicitly added during CopyObject operations. This functionality is not - // supported for directory buckets. + // Specifies the Amazon Web Services KMS Encryption Context as an additional + // encryption context to use for object encryption. The value of this header is a + // Base64 encoded string of a UTF-8 encoded JSON, which contains the encryption + // context as key-value pairs. This value is stored as object metadata and + // automatically gets passed on to Amazon Web Services KMS for future GetObject + // operations on this object. + // + // General purpose buckets - This value must be explicitly added during CopyObject + // operations if you want an additional encryption context for your object. For + // more information, see [Encryption context]in the Amazon S3 User Guide. + // + // Directory buckets - You can optionally provide an explicit encryption context + // value. The value must match the default encryption context - the bucket Amazon + // Resource Name (ARN). An additional encryption context value is not supported. + // + // [Encryption context]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html#encryption-context SSEKMSEncryptionContext *string - // If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse , - // this header specifies the ID (Key ID, Key ARN, or Key Alias) of the Key - // Management Service (KMS) symmetric encryption customer managed key that was used - // for the object. If you specify x-amz-server-side-encryption:aws:kms or + // Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object + // encryption. If the KMS key doesn't exist in the same account that's issuing the + // command, you must use the full Key ARN not the Key ID. + // + // General purpose buckets - If you specify x-amz-server-side-encryption with + // aws:kms or aws:kms:dsse , this header specifies the ID (Key ID, Key ARN, or Key + // Alias) of the KMS key to use. If you specify + // x-amz-server-side-encryption:aws:kms or // x-amz-server-side-encryption:aws:kms:dsse , but do not provide // x-amz-server-side-encryption-aws-kms-key-id , Amazon S3 uses the Amazon Web - // Services managed key ( aws/s3 ) to protect the data. If the KMS key does not - // exist in the same account that's issuing the command, you must use the full ARN - // and not just the ID. This functionality is not supported for directory buckets. + // Services managed key ( aws/s3 ) to protect the data. + // + // Directory buckets - To encrypt data using SSE-KMS, it's recommended to specify + // the x-amz-server-side-encryption header to aws:kms . Then, the + // x-amz-server-side-encryption-aws-kms-key-id header implicitly uses the bucket's + // default KMS customer managed key ID. If you want to explicitly set the + // x-amz-server-side-encryption-aws-kms-key-id header, it must match the bucket's + // default customer managed key (using key ID or ARN, not alias). Your SSE-KMS + // configuration can only support 1 [customer managed key]per directory bucket's lifetime. The [Amazon Web Services managed key] ( aws/s3 + // ) isn't supported. + // + // Incorrect key specification results in an HTTP 400 Bad Request error. + // + // [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk + // [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk SSEKMSKeyId *string // The server-side encryption algorithm that was used when you store this object - // in Amazon S3 (for example, AES256 , aws:kms , aws:kms:dsse ). General purpose - // buckets - You have four mutually exclusive options to protect data using - // server-side encryption in Amazon S3, depending on how you choose to manage the - // encryption keys. Specifically, the encryption key options are Amazon S3 managed - // keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or DSSE-KMS), and - // customer-provided keys (SSE-C). Amazon S3 encrypts data with server-side - // encryption by using Amazon S3 managed keys (SSE-S3) by default. You can - // optionally tell Amazon S3 to encrypt data at rest by using server-side - // encryption with other key options. For more information, see Using Server-Side - // Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) - // in the Amazon S3 User Guide. Directory buckets - For directory buckets, only the - // server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) value is - // supported. + // in Amazon S3 (for example, AES256 , aws:kms , aws:kms:dsse ). + // + // - General purpose buckets - You have four mutually exclusive options to + // protect data using server-side encryption in Amazon S3, depending on how you + // choose to manage the encryption keys. Specifically, the encryption key options + // are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or + // DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts data with + // server-side encryption by using Amazon S3 managed keys (SSE-S3) by default. You + // can optionally tell Amazon S3 to encrypt data at rest by using server-side + // encryption with other key options. For more information, see [Using Server-Side Encryption]in the Amazon S3 + // User Guide. + // + // - Directory buckets - For directory buckets, there are only two supported + // options for server-side encryption: server-side encryption with Amazon S3 + // managed keys (SSE-S3) ( AES256 ) and server-side encryption with KMS keys + // (SSE-KMS) ( aws:kms ). We recommend that the bucket's default encryption uses + // the desired encryption configuration and you don't override the bucket default + // encryption in your CreateSession requests or PUT object requests. Then, new + // objects are automatically encrypted with the desired encryption settings. For + // more information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. For more information about + // the encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads]. + // + // In the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]) using the REST API, the + // encryption request headers must match the encryption settings that are specified + // in the CreateSession request. You can't override the values of the encryption + // settings ( x-amz-server-side-encryption , + // x-amz-server-side-encryption-aws-kms-key-id , + // x-amz-server-side-encryption-context , and + // x-amz-server-side-encryption-bucket-key-enabled ) that are specified in the + // CreateSession request. You don't need to explicitly specify these encryption + // settings values in Zonal endpoint API calls, and Amazon S3 will use the + // encryption settings values from the CreateSession request to protect new + // objects in the directory bucket. + // + // When you use the CLI or the Amazon Web Services SDKs, for CreateSession , the + // session token refreshes automatically to avoid service interruptions when a + // session expires. The CLI or the Amazon Web Services SDKs use the bucket's + // default encryption configuration for the CreateSession request. It's not + // supported to override the encryption settings values in the CreateSession + // request. So in the Zonal endpoint API calls (except [CopyObject]and [UploadPartCopy]), the encryption + // request headers must match the default encryption configuration of the directory + // bucket. + // + // [Using Server-Side Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html + // [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html + // [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html + // [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html + // [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html ServerSideEncryption types.ServerSideEncryption // By default, Amazon S3 uses the STANDARD Storage Class to store newly created // objects. The STANDARD storage class provides high durability and high // availability. Depending on performance needs, you can specify a different - // Storage Class. For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) - // in the Amazon S3 User Guide. + // Storage Class. For more information, see [Storage Classes]in the Amazon S3 User Guide. + // // - For directory buckets, only the S3 Express One Zone storage class is // supported to store newly created objects. + // // - Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. + // + // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html StorageClass types.StorageClass // The tag-set for the object. The tag-set must be encoded as URL Query - // parameters. (For example, "Key1=Value1") This functionality is not supported for - // directory buckets. + // parameters. (For example, "Key1=Value1") + // + // This functionality is not supported for directory buckets. Tagging *string // If the bucket is configured as a website, redirects requests for this object to // another object in the same bucket or to an external URL. Amazon S3 stores the // value of this header in the object metadata. For information about object - // metadata, see Object Key and Metadata (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html) - // in the Amazon S3 User Guide. In the following example, the request header sets - // the redirect to an object (anotherPage.html) in the same bucket: - // x-amz-website-redirect-location: /anotherPage.html In the following example, the - // request header sets the object redirect to another website: - // x-amz-website-redirect-location: http://www.example.com/ For more information - // about website hosting in Amazon S3, see Hosting Websites on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) - // and How to Configure Website Page Redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // metadata, see [Object Key and Metadata]in the Amazon S3 User Guide. + // + // In the following example, the request header sets the redirect to an object + // (anotherPage.html) in the same bucket: + // + // x-amz-website-redirect-location: /anotherPage.html + // + // In the following example, the request header sets the object redirect to + // another website: + // + // x-amz-website-redirect-location: http://www.example.com/ + // + // For more information about website hosting in Amazon S3, see [Hosting Websites on Amazon S3] and [How to Configure Website Page Redirects] in the + // Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // [How to Configure Website Page Redirects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html + // [Hosting Websites on Amazon S3]: https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html + // [Object Key and Metadata]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html WebsiteRedirectLocation *string + // Specifies the offset for appending data to existing objects in bytes. The + // offset must be equal to the size of the existing object being appended to. If no + // object exists, setting this header to 0 will create a new object. + // + // This functionality is only supported for objects in the Amazon S3 Express One + // Zone storage class in directory buckets. + WriteOffsetBytes *int64 + noSmithyDocumentSerde } func (in *PutObjectInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Key = in.Key @@ -419,107 +655,149 @@ func (in *PutObjectInput) bindEndpointParams(p *EndpointParameters) { type PutObjectOutput struct { // Indicates whether the uploaded object uses an S3 Bucket Key for server-side - // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality - // is not supported for directory buckets. + // encryption with Key Management Service (KMS) keys (SSE-KMS). BucketKeyEnabled *bool - // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32 checksum of the object. This checksum is only + // be present if the checksum was uploaded with the object. When you use an API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumCRC32 *string - // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32C checksum of the object. This checksum is only + // present if the checksum was uploaded with the object. When you use an API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumCRC32C *string - // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. When you use the API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 64-bit CRC64NVME checksum of the object. This header is + // present if the object was uploaded with the CRC64NVME checksum algorithm, or if + // it was uploaded without a checksum (and Amazon S3 added the default checksum, + // CRC64NVME , to the uploaded object). For more information about how checksums + // are calculated with multipart uploads, see [Checking object integrity in the Amazon S3 User Guide]. + // + // [Checking object integrity in the Amazon S3 User Guide]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumCRC64NVME *string + + // The Base64 encoded, 160-bit SHA1 digest of the object. This will only be + // present if the object was uploaded with the object. When you use the API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumSHA1 *string - // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 256-bit SHA256 digest of the object. This will only be + // present if the object was uploaded with the object. When you use an API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumSHA256 *string - // Entity tag for the uploaded object. General purpose buckets - To ensure that - // data is not corrupted traversing the network, for objects where the ETag is the - // MD5 digest of the object, you can calculate the MD5 while putting an object to - // Amazon S3 and compare the returned ETag to the calculated MD5 value. Directory - // buckets - The ETag for the object in a directory bucket isn't the MD5 digest of - // the object. + // This header specifies the checksum type of the object, which determines how + // part-level checksums are combined to create an object-level checksum for + // multipart objects. For PutObject uploads, the checksum type is always + // FULL_OBJECT . You can use this header as a data integrity check to verify that + // the checksum type that is received is the same checksum that was specified. For + // more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumType types.ChecksumType + + // Entity tag for the uploaded object. + // + // General purpose buckets - To ensure that data is not corrupted traversing the + // network, for objects where the ETag is the MD5 digest of the object, you can + // calculate the MD5 while putting an object to Amazon S3 and compare the returned + // ETag to the calculated MD5 value. + // + // Directory buckets - The ETag for the object in a directory bucket isn't the MD5 + // digest of the object. ETag *string - // If the expiration is configured for the object (see - // PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) - // ) in the Amazon S3 User Guide, the response includes this header. It includes - // the expiry-date and rule-id key-value pairs that provide information about - // object expiration. The value of the rule-id is URL-encoded. This functionality - // is not supported for directory buckets. + // If the expiration is configured for the object (see [PutBucketLifecycleConfiguration]) in the Amazon S3 User + // Guide, the response includes this header. It includes the expiry-date and + // rule-id key-value pairs that provide information about object expiration. The + // value of the rule-id is URL-encoded. + // + // Object expiration information is not returned in directory buckets and this + // header returns the value " NotImplemented " in all responses for directory + // buckets. + // + // [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html Expiration *string // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to confirm the encryption - // algorithm that's used. This functionality is not supported for directory - // buckets. + // algorithm that's used. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to provide the round-trip - // message integrity verification of the customer-provided encryption key. This - // functionality is not supported for directory buckets. + // message integrity verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string // If present, indicates the Amazon Web Services KMS Encryption Context to use for - // object encryption. The value of this header is a base64-encoded UTF-8 string - // holding JSON with the encryption context key-value pairs. This value is stored - // as object metadata and automatically gets passed on to Amazon Web Services KMS - // for future GetObject or CopyObject operations on this object. This - // functionality is not supported for directory buckets. + // object encryption. The value of this header is a Base64 encoded string of a + // UTF-8 encoded JSON, which contains the encryption context as key-value pairs. + // This value is stored as object metadata and automatically gets passed on to + // Amazon Web Services KMS for future GetObject operations on this object. SSEKMSEncryptionContext *string - // If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse , - // this header indicates the ID of the Key Management Service (KMS) symmetric - // encryption customer managed key that was used for the object. This functionality - // is not supported for directory buckets. + // If present, indicates the ID of the KMS key that was used for object encryption. SSEKMSKeyId *string // The server-side encryption algorithm used when you store this object in Amazon - // S3 (for example, AES256 , aws:kms , aws:kms:dsse ). For directory buckets, only - // server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is - // supported. + // S3. ServerSideEncryption types.ServerSideEncryption - // Version ID of the object. If you enable versioning for a bucket, Amazon S3 - // automatically generates a unique version ID for the object being stored. Amazon - // S3 returns this ID in the response. When you enable versioning for a bucket, if - // Amazon S3 receives multiple write requests for the same object simultaneously, - // it stores all of the objects. For more information about versioning, see Adding - // Objects to Versioning-Enabled Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html) - // in the Amazon S3 User Guide. For information about returning the versioning - // state of a bucket, see GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) - // . This functionality is not supported for directory buckets. + // The size of the object in bytes. This value is only be present if you append + // to an object. + // + // This functionality is only supported for objects in the Amazon S3 Express One + // Zone storage class in directory buckets. + Size *int64 + + // Version ID of the object. + // + // If you enable versioning for a bucket, Amazon S3 automatically generates a + // unique version ID for the object being stored. Amazon S3 returns this ID in the + // response. When you enable versioning for a bucket, if Amazon S3 receives + // multiple write requests for the same object simultaneously, it stores all of the + // objects. For more information about versioning, see [Adding Objects to Versioning-Enabled Buckets]in the Amazon S3 User + // Guide. For information about returning the versioning state of a bucket, see [GetBucketVersioning]. + // + // This functionality is not supported for directory buckets. + // + // [Adding Objects to Versioning-Enabled Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html + // [GetBucketVersioning]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html VersionId *string // Metadata pertaining to the operation's result. @@ -571,6 +849,9 @@ func (c *Client) addOperationPutObjectMiddlewares(stack *middleware.Stack, optio if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -586,6 +867,21 @@ func (c *Client) addOperationPutObjectMiddlewares(stack *middleware.Stack, optio if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutObjectValidationMiddleware(stack); err != nil { return err } @@ -628,6 +924,18 @@ func (c *Client) addOperationPutObjectMiddlewares(stack *middleware.Stack, optio if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -657,9 +965,10 @@ func getPutObjectRequestAlgorithmMember(input interface{}) (string, bool) { } func addPutObjectInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutObjectRequestAlgorithmMember, RequireChecksum: false, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: true, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, @@ -703,6 +1012,8 @@ func (c *PresignClient) PresignPutObject(ctx context.Context, params *PutObjectI } clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) + clientOptFns = append(options.ClientOptions, withNoDefaultChecksumAPIOption) + result, _, err := c.client.invokeOperation(ctx, "PutObject", params, clientOptFns, c.client.addOperationPutObjectMiddlewares, presignConverter(options).convertToPresignMiddleware, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go index 08fea12c1..898d6ce7f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go @@ -14,87 +14,152 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Uses the acl subresource -// to set the access control list (ACL) permissions for a new or existing object in -// an S3 bucket. You must have the WRITE_ACP permission to set the ACL of an -// object. For more information, see What permissions can I grant? (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions) -// in the Amazon S3 User Guide. This functionality is not supported for Amazon S3 -// on Outposts. Depending on your application needs, you can choose to set the ACL -// on an object using either the request body or the headers. For example, if you -// have an existing application that updates a bucket ACL using the request body, -// you can continue to use that approach. For more information, see Access Control -// List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) -// in the Amazon S3 User Guide. If your bucket uses the bucket owner enforced -// setting for S3 Object Ownership, ACLs are disabled and no longer affect -// permissions. You must use policies to grant access to your bucket and the -// objects in it. Requests to set ACLs or update ACLs fail and return the -// AccessControlListNotSupported error code. Requests to read ACLs are still -// supported. For more information, see Controlling object ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) -// in the Amazon S3 User Guide. Permissions You can set access permissions using -// one of the following methods: +// This operation is not supported for directory buckets. +// +// Uses the acl subresource to set the access control list (ACL) permissions for a +// new or existing object in an S3 bucket. You must have the WRITE_ACP permission +// to set the ACL of an object. For more information, see [What permissions can I grant?]in the Amazon S3 User +// Guide. +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// Depending on your application needs, you can choose to set the ACL on an object +// using either the request body or the headers. For example, if you have an +// existing application that updates a bucket ACL using the request body, you can +// continue to use that approach. For more information, see [Access Control List (ACL) Overview]in the Amazon S3 User +// Guide. +// +// If your bucket uses the bucket owner enforced setting for S3 Object Ownership, +// ACLs are disabled and no longer affect permissions. You must use policies to +// grant access to your bucket and the objects in it. Requests to set ACLs or +// update ACLs fail and return the AccessControlListNotSupported error code. +// Requests to read ACLs are still supported. For more information, see [Controlling object ownership]in the +// Amazon S3 User Guide. +// +// Permissions You can set access permissions using one of the following methods: +// // - Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports a // set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined // set of grantees and permissions. Specify the canned ACL name as the value of // x-amz-ac l. If you use this header, you cannot use other access -// control-specific headers in your request. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) -// . +// control-specific headers in your request. For more information, see [Canned ACL]. +// // - Specify access permissions explicitly with the x-amz-grant-read , // x-amz-grant-read-acp , x-amz-grant-write-acp , and x-amz-grant-full-control // headers. When using these headers, you specify explicit access permissions and // grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the // permission. If you use these ACL-specific headers, you cannot use x-amz-acl // header to set a canned ACL. These parameters map to the set of permissions that -// Amazon S3 supports in an ACL. For more information, see Access Control List -// (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) -// . You specify each grantee as a type=value pair, where the type is one of the -// following: -// - id – if the value specified is the canonical user ID of an Amazon Web -// Services account -// - uri – if you are granting permissions to a predefined group -// - emailAddress – if the value specified is the email address of an Amazon Web -// Services account Using email addresses to specify a grantee is only supported in -// the following Amazon Web Services Regions: -// - US East (N. Virginia) -// - US West (N. California) -// - US West (Oregon) -// - Asia Pacific (Singapore) -// - Asia Pacific (Sydney) -// - Asia Pacific (Tokyo) -// - Europe (Ireland) -// - South America (São Paulo) For a list of all the Amazon S3 supported Regions -// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) -// in the Amazon Web Services General Reference. For example, the following -// x-amz-grant-read header grants list objects permission to the two Amazon Web -// Services accounts identified by their email addresses. x-amz-grant-read: -// emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com" +// Amazon S3 supports in an ACL. For more information, see [Access Control List (ACL) Overview]. +// +// You specify each grantee as a type=value pair, where the type is one of the +// +// following: +// +// - id – if the value specified is the canonical user ID of an Amazon Web +// Services account +// +// - uri – if you are granting permissions to a predefined group +// +// - emailAddress – if the value specified is the email address of an Amazon Web +// Services account +// +// Using email addresses to specify a grantee is only supported in the following +// +// Amazon Web Services Regions: +// +// - US East (N. Virginia) +// +// - US West (N. California) +// +// - US West (Oregon) +// +// - Asia Pacific (Singapore) +// +// - Asia Pacific (Sydney) +// +// - Asia Pacific (Tokyo) +// +// - Europe (Ireland) +// +// - South America (São Paulo) +// +// For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the +// +// Amazon Web Services General Reference. +// +// For example, the following x-amz-grant-read header grants list objects +// +// permission to the two Amazon Web Services accounts identified by their email +// addresses. +// +// x-amz-grant-read: emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com" // // You can use either a canned ACL or specify access permissions explicitly. You -// cannot do both. Grantee Values You can specify the person (grantee) to whom -// you're assigning access rights (using request elements) in the following ways: -// - By the person's ID: <>ID<><>GranteesEmail<> DisplayName is optional and -// ignored in the request. -// - By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> -// - By Email address: <>Grantees@email.com<>lt;/Grantee> The grantee is resolved -// to the CanonicalUser and, in a response to a GET Object acl request, appears as -// the CanonicalUser. Using email addresses to specify a grantee is only supported -// in the following Amazon Web Services Regions: -// - US East (N. Virginia) -// - US West (N. California) -// - US West (Oregon) -// - Asia Pacific (Singapore) -// - Asia Pacific (Sydney) -// - Asia Pacific (Tokyo) -// - Europe (Ireland) -// - South America (São Paulo) For a list of all the Amazon S3 supported Regions -// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) -// in the Amazon Web Services General Reference. +// cannot do both. +// +// Grantee Values You can specify the person (grantee) to whom you're assigning +// access rights (using request elements) in the following ways: +// +// - By the person's ID: +// +// <>ID<><>GranteesEmail<> +// +// DisplayName is optional and ignored in the request. +// +// - By URI: +// +// <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// +// - By Email address: +// +// <>Grantees@email.com<>lt;/Grantee> +// +// The grantee is resolved to the CanonicalUser and, in a response to a GET Object +// +// acl request, appears as the CanonicalUser. +// +// Using email addresses to specify a grantee is only supported in the following +// +// Amazon Web Services Regions: +// +// - US East (N. Virginia) +// +// - US West (N. California) +// +// - US West (Oregon) +// +// - Asia Pacific (Singapore) +// +// - Asia Pacific (Sydney) +// +// - Asia Pacific (Tokyo) +// +// - Europe (Ireland) +// +// - South America (São Paulo) +// +// For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints]in the +// +// Amazon Web Services General Reference. // // Versioning The ACL of an object is set at the object version level. By default, // PUT sets the ACL of the current version of an object. To set the ACL of a -// different version, use the versionId subresource. The following operations are -// related to PutObjectAcl : -// - CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// different version, use the versionId subresource. +// +// The following operations are related to PutObjectAcl : +// +// [CopyObject] +// +// [GetObject] +// +// [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region +// [Access Control List (ACL) Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html +// [Controlling object ownership]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html +// [Canned ACL]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL +// [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html +// [What permissions can I grant?]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html func (c *Client) PutObjectAcl(ctx context.Context, params *PutObjectAclInput, optFns ...func(*Options)) (*PutObjectAclOutput, error) { if params == nil { params = &PutObjectAclInput{} @@ -113,6 +178,7 @@ func (c *Client) PutObjectAcl(ctx context.Context, params *PutObjectAclInput, op type PutObjectAclInput struct { // The bucket name that contains the object to which you want to attach the ACL. + // // Access points - When you use this action with an access point, you must provide // the alias of the access point in place of the bucket name or specify the access // point ARN. When using the access point ARN, you must direct requests to the @@ -120,15 +186,17 @@ type PutObjectAclInput struct { // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. S3 on Outposts - When you use this action with - // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. - // The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -138,8 +206,9 @@ type PutObjectAclInput struct { // This member is required. Key *string - // The canned ACL to apply to the object. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) - // . + // The canned ACL to apply to the object. For more information, see [Canned ACL]. + // + // [Canned ACL]: https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL ACL types.ObjectCannedACL // Contains the elements that set the ACL permissions for an object per grantee. @@ -149,17 +218,23 @@ type PutObjectAclInput struct { // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The base64-encoded 128-bit MD5 digest of the data. This header must be used as + // The Base64 encoded 128-bit MD5 digest of the data. This header must be used as // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, go to RFC 1864.> (http://www.ietf.org/rfc/rfc1864.txt) + // transit. For more information, go to [RFC 1864.>] + // // For requests made using the Amazon Web Services Command Line Interface (CLI) or // Amazon Web Services SDKs, this field is calculated automatically. + // + // [RFC 1864.>]: http://www.ietf.org/rfc/rfc1864.txt ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -168,44 +243,54 @@ type PutObjectAclInput struct { ExpectedBucketOwner *string // Allows grantee the read, write, read ACP, and write ACP permissions on the - // bucket. This functionality is not supported for Amazon S3 on Outposts. + // bucket. + // + // This functionality is not supported for Amazon S3 on Outposts. GrantFullControl *string - // Allows grantee to list the objects in the bucket. This functionality is not - // supported for Amazon S3 on Outposts. + // Allows grantee to list the objects in the bucket. + // + // This functionality is not supported for Amazon S3 on Outposts. GrantRead *string - // Allows grantee to read the bucket ACL. This functionality is not supported for - // Amazon S3 on Outposts. + // Allows grantee to read the bucket ACL. + // + // This functionality is not supported for Amazon S3 on Outposts. GrantReadACP *string - // Allows grantee to create new objects in the bucket. For the bucket and object - // owners of existing objects, also allows deletions and overwrites of those - // objects. + // Allows grantee to create new objects in the bucket. + // + // For the bucket and object owners of existing objects, also allows deletions and + // overwrites of those objects. GrantWrite *string - // Allows grantee to write the ACL for the applicable bucket. This functionality - // is not supported for Amazon S3 on Outposts. + // Allows grantee to write the ACL for the applicable bucket. + // + // This functionality is not supported for Amazon S3 on Outposts. GrantWriteACP *string // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer - // Version ID used to reference a specific version of the object. This - // functionality is not supported for directory buckets. + // Version ID used to reference a specific version of the object. + // + // This functionality is not supported for directory buckets. VersionId *string noSmithyDocumentSerde } func (in *PutObjectAclInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Key = in.Key @@ -214,7 +299,9 @@ func (in *PutObjectAclInput) bindEndpointParams(p *EndpointParameters) { type PutObjectAclOutput struct { // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -266,6 +353,9 @@ func (c *Client) addOperationPutObjectAclMiddlewares(stack *middleware.Stack, op if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -281,6 +371,21 @@ func (c *Client) addOperationPutObjectAclMiddlewares(stack *middleware.Stack, op if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutObjectAclValidationMiddleware(stack); err != nil { return err } @@ -320,6 +425,18 @@ func (c *Client) addOperationPutObjectAclMiddlewares(stack *middleware.Stack, op if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -349,9 +466,10 @@ func getPutObjectAclRequestAlgorithmMember(input interface{}) (string, bool) { } func addPutObjectAclInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutObjectAclRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go index cc23509f8..a72024876 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go @@ -14,9 +14,14 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Applies a legal hold -// configuration to the specified object. For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) -// . This functionality is not supported for Amazon S3 on Outposts. +// This operation is not supported for directory buckets. +// +// Applies a legal hold configuration to the specified object. For more +// information, see [Locking Objects]. +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html func (c *Client) PutObjectLegalHold(ctx context.Context, params *PutObjectLegalHoldInput, optFns ...func(*Options)) (*PutObjectLegalHoldOutput, error) { if params == nil { params = &PutObjectLegalHoldInput{} @@ -35,6 +40,7 @@ func (c *Client) PutObjectLegalHold(ctx context.Context, params *PutObjectLegalH type PutObjectLegalHoldInput struct { // The bucket name containing the object that you want to place a legal hold on. + // // Access points - When you use this action with an access point, you must provide // the alias of the access point in place of the bucket name or specify the access // point ARN. When using the access point ARN, you must direct requests to the @@ -42,8 +48,9 @@ type PutObjectLegalHoldInput struct { // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -57,15 +64,19 @@ type PutObjectLegalHoldInput struct { // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The MD5 hash for the request body. For requests made using the Amazon Web - // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is - // calculated automatically. + // The MD5 hash for the request body. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -81,10 +92,12 @@ type PutObjectLegalHoldInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // The version ID of the object that you want to place a legal hold on. @@ -94,6 +107,7 @@ type PutObjectLegalHoldInput struct { } func (in *PutObjectLegalHoldInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -101,7 +115,9 @@ func (in *PutObjectLegalHoldInput) bindEndpointParams(p *EndpointParameters) { type PutObjectLegalHoldOutput struct { // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -153,6 +169,9 @@ func (c *Client) addOperationPutObjectLegalHoldMiddlewares(stack *middleware.Sta if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -168,6 +187,21 @@ func (c *Client) addOperationPutObjectLegalHoldMiddlewares(stack *middleware.Sta if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutObjectLegalHoldValidationMiddleware(stack); err != nil { return err } @@ -207,6 +241,18 @@ func (c *Client) addOperationPutObjectLegalHoldMiddlewares(stack *middleware.Sta if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -236,9 +282,10 @@ func getPutObjectLegalHoldRequestAlgorithmMember(input interface{}) (string, boo } func addPutObjectLegalHoldInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutObjectLegalHoldRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go index 358ececc6..e1d6a1888 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go @@ -14,17 +14,22 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Places an Object Lock -// configuration on the specified bucket. The rule specified in the Object Lock -// configuration will be applied by default to every new object placed in the -// specified bucket. For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) -// . +// This operation is not supported for directory buckets. +// +// Places an Object Lock configuration on the specified bucket. The rule specified +// in the Object Lock configuration will be applied by default to every new object +// placed in the specified bucket. For more information, see [Locking Objects]. +// // - The DefaultRetention settings require both a mode and a period. +// // - The DefaultRetention period can be either Days or Years but you must select // one. You cannot specify Days and Years at the same time. +// // - You can enable Object Lock for new or existing buckets. For more -// information, see Configuring Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock-configure.html) -// . +// information, see [Configuring Object Lock]. +// +// [Configuring Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock-configure.html +// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html func (c *Client) PutObjectLockConfiguration(ctx context.Context, params *PutObjectLockConfigurationInput, optFns ...func(*Options)) (*PutObjectLockConfigurationOutput, error) { if params == nil { params = &PutObjectLockConfigurationInput{} @@ -51,15 +56,19 @@ type PutObjectLockConfigurationInput struct { // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The MD5 hash for the request body. For requests made using the Amazon Web - // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is - // calculated automatically. + // The MD5 hash for the request body. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -74,10 +83,12 @@ type PutObjectLockConfigurationInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // A token to allow Object Lock to be enabled for an existing bucket. @@ -87,6 +98,7 @@ type PutObjectLockConfigurationInput struct { } func (in *PutObjectLockConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -94,7 +106,9 @@ func (in *PutObjectLockConfigurationInput) bindEndpointParams(p *EndpointParamet type PutObjectLockConfigurationOutput struct { // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -146,6 +160,9 @@ func (c *Client) addOperationPutObjectLockConfigurationMiddlewares(stack *middle if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -161,6 +178,21 @@ func (c *Client) addOperationPutObjectLockConfigurationMiddlewares(stack *middle if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutObjectLockConfigurationValidationMiddleware(stack); err != nil { return err } @@ -200,6 +232,18 @@ func (c *Client) addOperationPutObjectLockConfigurationMiddlewares(stack *middle if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -229,9 +273,10 @@ func getPutObjectLockConfigurationRequestAlgorithmMember(input interface{}) (str } func addPutObjectLockConfigurationInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutObjectLockConfigurationRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go index eb787de48..3c7cde265 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go @@ -14,12 +14,16 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Places an Object -// Retention configuration on an object. For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) -// . Users or accounts require the s3:PutObjectRetention permission in order to -// place an Object Retention configuration on objects. Bypassing a Governance +// This operation is not supported for directory buckets. +// +// Places an Object Retention configuration on an object. For more information, +// see [Locking Objects]. Users or accounts require the s3:PutObjectRetention permission in order +// to place an Object Retention configuration on objects. Bypassing a Governance // Retention configuration requires the s3:BypassGovernanceRetention permission. +// // This functionality is not supported for Amazon S3 on Outposts. +// +// [Locking Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html func (c *Client) PutObjectRetention(ctx context.Context, params *PutObjectRetentionInput, optFns ...func(*Options)) (*PutObjectRetentionOutput, error) { if params == nil { params = &PutObjectRetentionInput{} @@ -38,15 +42,18 @@ func (c *Client) PutObjectRetention(ctx context.Context, params *PutObjectRetent type PutObjectRetentionInput struct { // The bucket name that contains the object you want to apply this Object - // Retention configuration to. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. + // Retention configuration to. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -64,15 +71,19 @@ type PutObjectRetentionInput struct { // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The MD5 hash for the request body. For requests made using the Amazon Web - // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is - // calculated automatically. + // The MD5 hash for the request body. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -84,10 +95,12 @@ type PutObjectRetentionInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // The container element for the Object Retention configuration. @@ -101,6 +114,7 @@ type PutObjectRetentionInput struct { } func (in *PutObjectRetentionInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -108,7 +122,9 @@ func (in *PutObjectRetentionInput) bindEndpointParams(p *EndpointParameters) { type PutObjectRetentionOutput struct { // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -160,6 +176,9 @@ func (c *Client) addOperationPutObjectRetentionMiddlewares(stack *middleware.Sta if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -175,6 +194,21 @@ func (c *Client) addOperationPutObjectRetentionMiddlewares(stack *middleware.Sta if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutObjectRetentionValidationMiddleware(stack); err != nil { return err } @@ -214,6 +248,18 @@ func (c *Client) addOperationPutObjectRetentionMiddlewares(stack *middleware.Sta if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -243,9 +289,10 @@ func getPutObjectRetentionRequestAlgorithmMember(input interface{}) (string, boo } func addPutObjectRetentionInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutObjectRetentionRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go index 2768db502..6de5386c1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go @@ -14,35 +14,50 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Sets the supplied tag-set -// to an object that already exists in a bucket. A tag is a key-value pair. For -// more information, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html) -// . You can associate tags with an object by sending a PUT request against the +// This operation is not supported for directory buckets. +// +// Sets the supplied tag-set to an object that already exists in a bucket. A tag +// is a key-value pair. For more information, see [Object Tagging]. +// +// You can associate tags with an object by sending a PUT request against the // tagging subresource that is associated with the object. You can retrieve tags by -// sending a GET request. For more information, see GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) -// . For tagging-related restrictions related to characters and encodings, see Tag -// Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html) -// . Note that Amazon S3 limits the maximum number of tags to 10 tags per object. +// sending a GET request. For more information, see [GetObjectTagging]. +// +// For tagging-related restrictions related to characters and encodings, see [Tag Restrictions]. +// Note that Amazon S3 limits the maximum number of tags to 10 tags per object. +// // To use this operation, you must have permission to perform the // s3:PutObjectTagging action. By default, the bucket owner has this permission and -// can grant this permission to others. To put tags of any other version, use the -// versionId query parameter. You also need permission for the -// s3:PutObjectVersionTagging action. PutObjectTagging has the following special -// errors. For more Amazon S3 errors see, Error Responses (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html) -// . +// can grant this permission to others. +// +// To put tags of any other version, use the versionId query parameter. You also +// need permission for the s3:PutObjectVersionTagging action. +// +// PutObjectTagging has the following special errors. For more Amazon S3 errors +// see, [Error Responses]. +// // - InvalidTag - The tag provided was not a valid tag. This error can occur if -// the tag did not pass input validation. For more information, see Object -// Tagging (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html) -// . +// the tag did not pass input validation. For more information, see [Object Tagging]. +// // - MalformedXML - The XML provided does not match the schema. +// // - OperationAborted - A conflicting conditional action is currently in progress // against this resource. Please try again. +// // - InternalError - The service was unable to apply the provided tag to the // object. // // The following operations are related to PutObjectTagging : -// - GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) -// - DeleteObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html) +// +// [GetObjectTagging] +// +// [DeleteObjectTagging] +// +// [Error Responses]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html +// [DeleteObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html +// [Object Tagging]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html +// [Tag Restrictions]: https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html +// [GetObjectTagging]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html func (c *Client) PutObjectTagging(ctx context.Context, params *PutObjectTaggingInput, optFns ...func(*Options)) (*PutObjectTaggingOutput, error) { if params == nil { params = &PutObjectTaggingInput{} @@ -60,23 +75,26 @@ func (c *Client) PutObjectTagging(ctx context.Context, params *PutObjectTaggingI type PutObjectTaggingInput struct { - // The bucket name containing the object. Access points - When you use this action - // with an access point, you must provide the alias of the access point in place of - // the bucket name or specify the access point ARN. When using the access point - // ARN, you must direct requests to the access point hostname. The access point - // hostname takes the form + // The bucket name containing the object. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. S3 on Outposts - When you use this action with - // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. - // The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -95,15 +113,19 @@ type PutObjectTaggingInput struct { // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The MD5 hash for the request body. For requests made using the Amazon Web - // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is - // calculated automatically. + // The MD5 hash for the request body. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -115,10 +137,12 @@ type PutObjectTaggingInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // The versionId of the object that the tag-set will be added to. @@ -128,6 +152,7 @@ type PutObjectTaggingInput struct { } func (in *PutObjectTaggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -186,6 +211,9 @@ func (c *Client) addOperationPutObjectTaggingMiddlewares(stack *middleware.Stack if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -201,6 +229,21 @@ func (c *Client) addOperationPutObjectTaggingMiddlewares(stack *middleware.Stack if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutObjectTaggingValidationMiddleware(stack); err != nil { return err } @@ -240,6 +283,18 @@ func (c *Client) addOperationPutObjectTaggingMiddlewares(stack *middleware.Stack if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -269,9 +324,10 @@ func getPutObjectTaggingRequestAlgorithmMember(input interface{}) (string, bool) } func addPutObjectTaggingInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutObjectTaggingRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go index 7e6d0788e..114bcc149 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go @@ -15,22 +15,38 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Creates or modifies the -// PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation, -// you must have the s3:PutBucketPublicAccessBlock permission. For more -// information about Amazon S3 permissions, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// . When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or -// an object, it checks the PublicAccessBlock configuration for both the bucket -// (or the bucket that contains the object) and the bucket owner's account. If the +// This operation is not supported for directory buckets. +// +// Creates or modifies the PublicAccessBlock configuration for an Amazon S3 +// bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock +// permission. For more information about Amazon S3 permissions, see [Specifying Permissions in a Policy]. +// +// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or an +// object, it checks the PublicAccessBlock configuration for both the bucket (or +// the bucket that contains the object) and the bucket owner's account. If the // PublicAccessBlock configurations are different between the bucket and the // account, Amazon S3 uses the most restrictive combination of the bucket-level and -// account-level settings. For more information about when Amazon S3 considers a -// bucket or an object public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) -// . The following operations are related to PutPublicAccessBlock : -// - GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) -// - DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) -// - GetBucketPolicyStatus (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html) -// - Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// account-level settings. +// +// For more information about when Amazon S3 considers a bucket or an object +// public, see [The Meaning of "Public"]. +// +// The following operations are related to PutPublicAccessBlock : +// +// [GetPublicAccessBlock] +// +// [DeletePublicAccessBlock] +// +// [GetBucketPolicyStatus] +// +// [Using Amazon S3 Block Public Access] +// +// [GetPublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html +// [DeletePublicAccessBlock]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html +// [Using Amazon S3 Block Public Access]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html +// [GetBucketPolicyStatus]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html +// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html +// [The Meaning of "Public"]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status func (c *Client) PutPublicAccessBlock(ctx context.Context, params *PutPublicAccessBlockInput, optFns ...func(*Options)) (*PutPublicAccessBlockOutput, error) { if params == nil { params = &PutPublicAccessBlockInput{} @@ -56,9 +72,10 @@ type PutPublicAccessBlockInput struct { // The PublicAccessBlock configuration that you want to apply to this Amazon S3 // bucket. You can enable the configuration options in any combination. For more - // information about when Amazon S3 considers a bucket or object public, see The - // Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) - // in the Amazon S3 User Guide. + // information about when Amazon S3 considers a bucket or object public, see [The Meaning of "Public"]in + // the Amazon S3 User Guide. + // + // [The Meaning of "Public"]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status // // This member is required. PublicAccessBlockConfiguration *types.PublicAccessBlockConfiguration @@ -67,15 +84,19 @@ type PutPublicAccessBlockInput struct { // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm - // The MD5 hash of the PutPublicAccessBlock request body. For requests made using - // the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services - // SDKs, this field is calculated automatically. + // The MD5 hash of the PutPublicAccessBlock request body. + // + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -87,6 +108,7 @@ type PutPublicAccessBlockInput struct { } func (in *PutPublicAccessBlockInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.UseS3ExpressControlEndpoint = ptr.Bool(true) } @@ -141,6 +163,9 @@ func (c *Client) addOperationPutPublicAccessBlockMiddlewares(stack *middleware.S if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -156,6 +181,21 @@ func (c *Client) addOperationPutPublicAccessBlockMiddlewares(stack *middleware.S if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpPutPublicAccessBlockValidationMiddleware(stack); err != nil { return err } @@ -195,6 +235,18 @@ func (c *Client) addOperationPutPublicAccessBlockMiddlewares(stack *middleware.S if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -224,9 +276,10 @@ func getPutPublicAccessBlockRequestAlgorithmMember(input interface{}) (string, b } func addPutPublicAccessBlockInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getPutPublicAccessBlockRequestAlgorithmMember, RequireChecksum: true, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go index 3b6aad85b..69a7f4ffb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go @@ -14,40 +14,51 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This operation is not supported by directory buckets. Restores an archived copy -// of an object back into Amazon S3 This functionality is not supported for Amazon -// S3 on Outposts. This action performs the following types of requests: +// This operation is not supported for directory buckets. +// +// # Restores an archived copy of an object back into Amazon S3 +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// This action performs the following types of requests: +// // - restore an archive - Restore an archived object // // For more information about the S3 structure in the request body, see the // following: -// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// - Managing Access with ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) -// in the Amazon S3 User Guide -// - Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) -// in the Amazon S3 User Guide +// +// [PutObject] +// +// [Managing Access with ACLs] +// - in the Amazon S3 User Guide +// +// [Protecting Data Using Server-Side Encryption] +// - in the Amazon S3 User Guide // // Permissions To use this operation, you must have permissions to perform the // s3:RestoreObject action. The bucket owner has this permission by default and can -// grant this permission to others. For more information about permissions, see -// Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// in the Amazon S3 User Guide. Restoring objects Objects that you archive to the -// S3 Glacier Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive -// storage class, and S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep -// Archive tiers, are not accessible in real time. For objects in the S3 Glacier -// Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive storage -// classes, you must first initiate a restore request, and then wait until a -// temporary copy of the object is available. If you want a permanent copy of the -// object, create a copy of it in the Amazon S3 Standard storage class in your S3 -// bucket. To access an archived object, you must restore the object for the -// duration (number of days) that you specify. For objects in the Archive Access or -// Deep Archive Access tiers of S3 Intelligent-Tiering, you must first initiate a -// restore request, and then wait until the object is moved into the Frequent -// Access tier. To restore a specific object version, you can provide a version ID. -// If you don't provide a version ID, Amazon S3 restores the current version. When -// restoring an archived object, you can specify one of the following data access -// tier options in the Tier element of the request body: +// grant this permission to others. For more information about permissions, see [Permissions Related to Bucket Subresource Operations] +// and [Managing Access Permissions to Your Amazon S3 Resources]in the Amazon S3 User Guide. +// +// Restoring objects Objects that you archive to the S3 Glacier Flexible Retrieval +// Flexible Retrieval or S3 Glacier Deep Archive storage class, and S3 +// Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers, are +// not accessible in real time. For objects in the S3 Glacier Flexible Retrieval +// Flexible Retrieval or S3 Glacier Deep Archive storage classes, you must first +// initiate a restore request, and then wait until a temporary copy of the object +// is available. If you want a permanent copy of the object, create a copy of it in +// the Amazon S3 Standard storage class in your S3 bucket. To access an archived +// object, you must restore the object for the duration (number of days) that you +// specify. For objects in the Archive Access or Deep Archive Access tiers of S3 +// Intelligent-Tiering, you must first initiate a restore request, and then wait +// until the object is moved into the Frequent Access tier. +// +// To restore a specific object version, you can provide a version ID. If you +// don't provide a version ID, Amazon S3 restores the current version. +// +// When restoring an archived object, you can specify one of the following data +// access tier options in the Tier element of the request body: +// // - Expedited - Expedited retrievals allow you to quickly access your data // stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or // S3 Intelligent-Tiering Archive tier when occasional urgent requests for @@ -57,6 +68,7 @@ import ( // Expedited retrievals is available when you need it. Expedited retrievals and // provisioned capacity are not available for objects stored in the S3 Glacier Deep // Archive storage class or S3 Intelligent-Tiering Deep Archive tier. +// // - Standard - Standard retrievals allow you to access any of your archived // objects within several hours. This is the default option for retrieval requests // that do not specify the retrieval option. Standard retrievals typically finish @@ -65,6 +77,7 @@ import ( // typically finish within 12 hours for objects stored in the S3 Glacier Deep // Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Standard // retrievals are free for objects stored in S3 Intelligent-Tiering. +// // - Bulk - Bulk retrievals free for objects stored in the S3 Glacier Flexible // Retrieval and S3 Intelligent-Tiering storage classes, enabling you to retrieve // large amounts, even petabytes, of data at no cost. Bulk retrievals typically @@ -76,29 +89,33 @@ import ( // Deep Archive tier. // // For more information about archive retrieval options and provisioned capacity -// for Expedited data access, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) -// in the Amazon S3 User Guide. You can use Amazon S3 restore speed upgrade to -// change the restore speed to a faster speed while it is in progress. For more -// information, see Upgrading the speed of an in-progress restore (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html) -// in the Amazon S3 User Guide. To get the status of object restoration, you can -// send a HEAD request. Operations return the x-amz-restore header, which provides -// information about the restoration status, in the response. You can use Amazon S3 -// event notifications to notify you when a restore is initiated or completed. For -// more information, see Configuring Amazon S3 Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) -// in the Amazon S3 User Guide. After restoring an archived object, you can update -// the restoration period by reissuing the request with a new period. Amazon S3 -// updates the restoration period relative to the current time and charges only for -// the request-there are no data transfer charges. You cannot update the -// restoration period when Amazon S3 is actively processing your current restore -// request for the object. If your bucket has a lifecycle configuration with a rule -// that includes an expiration action, the object expiration overrides the life -// span that you specify in a restore request. For example, if you restore an -// object copy for 10 days, but the object is scheduled to expire in 3 days, Amazon -// S3 deletes the object in 3 days. For more information about lifecycle -// configuration, see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) -// and Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) -// in Amazon S3 User Guide. Responses A successful action returns either the 200 OK -// or 202 Accepted status code. +// for Expedited data access, see [Restoring Archived Objects] in the Amazon S3 User Guide. +// +// You can use Amazon S3 restore speed upgrade to change the restore speed to a +// faster speed while it is in progress. For more information, see [Upgrading the speed of an in-progress restore]in the Amazon +// S3 User Guide. +// +// To get the status of object restoration, you can send a HEAD request. +// Operations return the x-amz-restore header, which provides information about +// the restoration status, in the response. You can use Amazon S3 event +// notifications to notify you when a restore is initiated or completed. For more +// information, see [Configuring Amazon S3 Event Notifications]in the Amazon S3 User Guide. +// +// After restoring an archived object, you can update the restoration period by +// reissuing the request with a new period. Amazon S3 updates the restoration +// period relative to the current time and charges only for the request-there are +// no data transfer charges. You cannot update the restoration period when Amazon +// S3 is actively processing your current restore request for the object. +// +// If your bucket has a lifecycle configuration with a rule that includes an +// expiration action, the object expiration overrides the life span that you +// specify in a restore request. For example, if you restore an object copy for 10 +// days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the +// object in 3 days. For more information about lifecycle configuration, see [PutBucketLifecycleConfiguration]and [Object Lifecycle Management] +// in Amazon S3 User Guide. +// +// Responses A successful action returns either the 200 OK or 202 Accepted status +// code. // // - If the object is not previously restored, then Amazon S3 returns 202 // Accepted in the response. @@ -128,8 +145,22 @@ import ( // - SOAP Fault Code Prefix: N/A // // The following operations are related to RestoreObject : -// - PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) -// - GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) +// +// [PutBucketLifecycleConfiguration] +// +// [GetBucketNotificationConfiguration] +// +// [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html +// [Object Lifecycle Management]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html +// [Permissions Related to Bucket Subresource Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources +// [Configuring Amazon S3 Event Notifications]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html +// [Managing Access with ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html +// [Protecting Data Using Server-Side Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html +// [GetBucketNotificationConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html +// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html +// [Restoring Archived Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html +// [Managing Access Permissions to Your Amazon S3 Resources]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html +// [Upgrading the speed of an in-progress restore]: https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html func (c *Client) RestoreObject(ctx context.Context, params *RestoreObjectInput, optFns ...func(*Options)) (*RestoreObjectOutput, error) { if params == nil { params = &RestoreObjectInput{} @@ -147,23 +178,26 @@ func (c *Client) RestoreObject(ctx context.Context, params *RestoreObjectInput, type RestoreObjectInput struct { - // The bucket name containing the object to restore. Access points - When you use - // this action with an access point, you must provide the alias of the access point - // in place of the bucket name or specify the access point ARN. When using the - // access point ARN, you must direct requests to the access point hostname. The - // access point hostname takes the form + // The bucket name containing the object to restore. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. S3 on Outposts - When you use this action with - // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. - // The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -177,10 +211,13 @@ type RestoreObjectInput struct { // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm // The account ID of the expected bucket owner. If the account ID that you provide @@ -192,10 +229,12 @@ type RestoreObjectInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer // Container for restore job parameters. @@ -208,6 +247,7 @@ type RestoreObjectInput struct { } func (in *RestoreObjectInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -215,7 +255,9 @@ func (in *RestoreObjectInput) bindEndpointParams(p *EndpointParameters) { type RestoreObjectOutput struct { // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Indicates the path in the provided S3 output location where Select results will @@ -271,6 +313,9 @@ func (c *Client) addOperationRestoreObjectMiddlewares(stack *middleware.Stack, o if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -286,6 +331,21 @@ func (c *Client) addOperationRestoreObjectMiddlewares(stack *middleware.Stack, o if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpRestoreObjectValidationMiddleware(stack); err != nil { return err } @@ -322,6 +382,18 @@ func (c *Client) addOperationRestoreObjectMiddlewares(stack *middleware.Stack, o if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -351,9 +423,10 @@ func getRestoreObjectRequestAlgorithmMember(input interface{}) (string, bool) { } func addRestoreObjectInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getRestoreObjectRequestAlgorithmMember, RequireChecksum: false, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: false, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_SelectObjectContent.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_SelectObjectContent.go index f69db696a..2dfa63c6e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_SelectObjectContent.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_SelectObjectContent.go @@ -14,67 +14,94 @@ import ( "sync" ) -// This operation is not supported by directory buckets. This action filters the -// contents of an Amazon S3 object based on a simple structured query language -// (SQL) statement. In the request, along with the SQL expression, you must also -// specify a data serialization format (JSON, CSV, or Apache Parquet) of the -// object. Amazon S3 uses this format to parse object data into records, and -// returns only records that match the specified SQL expression. You must also -// specify the data serialization format for the response. This functionality is -// not supported for Amazon S3 on Outposts. For more information about Amazon S3 -// Select, see Selecting Content from Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html) -// and SELECT Command (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-glacier-select-sql-reference-select.html) -// in the Amazon S3 User Guide. Permissions You must have the s3:GetObject -// permission for this operation. Amazon S3 Select does not support anonymous -// access. For more information about permissions, see Specifying Permissions in a -// Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// in the Amazon S3 User Guide. Object Data Formats You can use Amazon S3 Select to -// query objects that have the following format properties: +// This operation is not supported for directory buckets. +// +// This action filters the contents of an Amazon S3 object based on a simple +// structured query language (SQL) statement. In the request, along with the SQL +// expression, you must also specify a data serialization format (JSON, CSV, or +// Apache Parquet) of the object. Amazon S3 uses this format to parse object data +// into records, and returns only records that match the specified SQL expression. +// You must also specify the data serialization format for the response. +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// For more information about Amazon S3 Select, see [Selecting Content from Objects] and [SELECT Command] in the Amazon S3 User +// Guide. +// +// Permissions You must have the s3:GetObject permission for this operation. +// Amazon S3 Select does not support anonymous access. For more information about +// permissions, see [Specifying Permissions in a Policy]in the Amazon S3 User Guide. +// +// Object Data Formats You can use Amazon S3 Select to query objects that have the +// following format properties: +// // - CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format. +// // - UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports. +// // - GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2. // GZIP and BZIP2 are the only compression formats that Amazon S3 Select supports // for CSV and JSON files. Amazon S3 Select supports columnar compression for // Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object // compression for Parquet objects. +// // - Server-side encryption - Amazon S3 Select supports querying objects that -// are protected with server-side encryption. For objects that are encrypted with -// customer-provided encryption keys (SSE-C), you must use HTTPS, and you must use -// the headers that are documented in the GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// . For more information about SSE-C, see Server-Side Encryption (Using -// Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) -// in the Amazon S3 User Guide. For objects that are encrypted with Amazon S3 -// managed keys (SSE-S3) and Amazon Web Services KMS keys (SSE-KMS), server-side -// encryption is handled transparently, so you don't need to specify anything. For -// more information about server-side encryption, including SSE-S3 and SSE-KMS, see -// Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) -// in the Amazon S3 User Guide. +// are protected with server-side encryption. +// +// For objects that are encrypted with customer-provided encryption keys (SSE-C), +// +// you must use HTTPS, and you must use the headers that are documented in the [GetObject]. +// For more information about SSE-C, see [Server-Side Encryption (Using Customer-Provided Encryption Keys)]in the Amazon S3 User Guide. +// +// For objects that are encrypted with Amazon S3 managed keys (SSE-S3) and Amazon +// +// Web Services KMS keys (SSE-KMS), server-side encryption is handled +// transparently, so you don't need to specify anything. For more information about +// server-side encryption, including SSE-S3 and SSE-KMS, see [Protecting Data Using Server-Side Encryption]in the Amazon S3 +// User Guide. // // Working with the Response Body Given the response size is unknown, Amazon S3 // Select streams the response as a series of messages and includes a // Transfer-Encoding header with chunked as its value in the response. For more -// information, see Appendix: SelectObjectContent Response (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTSelectObjectAppendix.html) -// . GetObject Support The SelectObjectContent action does not support the -// following GetObject functionality. For more information, see GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// . +// information, see [Appendix: SelectObjectContent Response]. +// +// GetObject Support The SelectObjectContent action does not support the following +// GetObject functionality. For more information, see [GetObject]. +// // - Range : Although you can specify a scan range for an Amazon S3 Select -// request (see SelectObjectContentRequest - ScanRange (https://docs.aws.amazon.com/AmazonS3/latest/API/API_SelectObjectContent.html#AmazonS3-SelectObjectContent-request-ScanRange) -// in the request parameters), you cannot specify the range of bytes of an object -// to return. +// request (see [SelectObjectContentRequest - ScanRange]in the request parameters), you cannot specify the range of +// bytes of an object to return. +// // - The GLACIER , DEEP_ARCHIVE , and REDUCED_REDUNDANCY storage classes, or the // ARCHIVE_ACCESS and DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING // storage class: You cannot query objects in the GLACIER , DEEP_ARCHIVE , or // REDUCED_REDUNDANCY storage classes, nor objects in the ARCHIVE_ACCESS or // DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING storage class. For -// more information about storage classes, see Using Amazon S3 storage classes (https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-class-intro.html) -// in the Amazon S3 User Guide. +// more information about storage classes, see [Using Amazon S3 storage classes]in the Amazon S3 User Guide. +// +// Special Errors For a list of special errors for this operation, see [List of SELECT Object Content Error Codes] // -// Special Errors For a list of special errors for this operation, see List of -// SELECT Object Content Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#SelectObjectContentErrorCodeList) // The following operations are related to SelectObjectContent : -// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// - GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) -// - PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +// +// [GetObject] +// +// [GetBucketLifecycleConfiguration] +// +// [PutBucketLifecycleConfiguration] +// +// [Appendix: SelectObjectContent Response]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTSelectObjectAppendix.html +// [Selecting Content from Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html +// [PutBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html +// [SelectObjectContentRequest - ScanRange]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_SelectObjectContent.html#AmazonS3-SelectObjectContent-request-ScanRange +// [List of SELECT Object Content Error Codes]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#SelectObjectContentErrorCodeList +// [GetBucketLifecycleConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html +// [Using Amazon S3 storage classes]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-class-intro.html +// [SELECT Command]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-glacier-select-sql-reference-select.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html +// [Specifying Permissions in a Policy]: https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html +// +// [Server-Side Encryption (Using Customer-Provided Encryption Keys)]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html +// [Protecting Data Using Server-Side Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html func (c *Client) SelectObjectContent(ctx context.Context, params *SelectObjectContentInput, optFns ...func(*Options)) (*SelectObjectContentOutput, error) { if params == nil { params = &SelectObjectContentInput{} @@ -90,14 +117,18 @@ func (c *Client) SelectObjectContent(ctx context.Context, params *SelectObjectCo return out, nil } +// Learn Amazon S3 Select is no longer available to new customers. Existing +// customers of Amazon S3 Select can continue to use the feature as usual. [Learn more] +// // Request to filter the contents of an Amazon S3 object based on a simple // Structured Query Language (SQL) statement. In the request, along with the SQL // expression, you must specify a data serialization format (JSON or CSV) of the // object. Amazon S3 uses this to parse object data into records. It returns only // records that match the specified SQL expression. You must also specify the data -// serialization format for the response. For more information, see S3Select API -// Documentation (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html) -// . +// serialization format for the response. For more information, see [S3Select API Documentation]. +// +// [Learn more]: http://aws.amazon.com/blogs/storage/how-to-optimize-querying-your-data-in-amazon-s3/ +// [S3Select API Documentation]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html type SelectObjectContentInput struct { // The S3 bucket. @@ -140,30 +171,37 @@ type SelectObjectContentInput struct { // The server-side encryption (SSE) algorithm used to encrypt the object. This // parameter is needed only when the object was created using a checksum algorithm. - // For more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. + // For more information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. + // + // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html SSECustomerAlgorithm *string // The server-side encryption (SSE) customer managed key. This parameter is needed // only when the object was created using a checksum algorithm. For more - // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. + // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. + // + // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html SSECustomerKey *string // The MD5 server-side encryption (SSE) customer managed key. This parameter is // needed only when the object was created using a checksum algorithm. For more - // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. + // information, see [Protecting data using SSE-C keys]in the Amazon S3 User Guide. + // + // [Protecting data using SSE-C keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html SSECustomerKeyMD5 *string // Specifies the byte range of the object to get the records from. A record is // processed when its first byte is contained by the range. This parameter is // optional, but when specified, it must not be empty. See RFC 2616, Section - // 14.35.1 about how to specify the start and end of the range. ScanRange may be - // used in the following ways: + // 14.35.1 about how to specify the start and end of the range. + // + // ScanRange may be used in the following ways: + // // - 50100 - process only the records starting between the bytes 50 and 100 // (inclusive, counting from zero) + // // - 50 - process only the records starting after the byte 50 + // // - 50 - process only the records within the last 50 bytes of the file. ScanRange *types.ScanRange @@ -171,6 +209,7 @@ type SelectObjectContentInput struct { } func (in *SelectObjectContentInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket } @@ -235,6 +274,9 @@ func (c *Client) addOperationSelectObjectContentMiddlewares(stack *middleware.St if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -244,6 +286,18 @@ func (c *Client) addOperationSelectObjectContentMiddlewares(stack *middleware.St if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpSelectObjectContentValidationMiddleware(stack); err != nil { return err } @@ -277,6 +331,18 @@ func (c *Client) addOperationSelectObjectContentMiddlewares(stack *middleware.St if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go index ff7319794..624a67b64 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go @@ -16,58 +16,84 @@ import ( "io" ) -// Uploads a part in a multipart upload. In this operation, you provide new data -// as a part of an object in your request. However, you have an option to specify -// your existing Amazon S3 object as a data source for the part you are uploading. -// To upload a part from an existing object, you use the UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) -// operation. You must initiate a multipart upload (see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// ) before you can upload any part. In response to your initiate request, Amazon -// S3 returns an upload ID, a unique identifier that you must include in your -// upload part request. Part numbers can be any number from 1 to 10,000, inclusive. -// A part number uniquely identifies a part and also defines its position within -// the object being created. If you upload a new part using the same part number -// that was used with a previous part, the previously uploaded part is overwritten. +// Uploads a part in a multipart upload. +// +// In this operation, you provide new data as a part of an object in your request. +// However, you have an option to specify your existing Amazon S3 object as a data +// source for the part you are uploading. To upload a part from an existing object, +// you use the [UploadPartCopy]operation. +// +// You must initiate a multipart upload (see [CreateMultipartUpload]) before you can upload any part. In +// response to your initiate request, Amazon S3 returns an upload ID, a unique +// identifier that you must include in your upload part request. +// +// Part numbers can be any number from 1 to 10,000, inclusive. A part number +// uniquely identifies a part and also defines its position within the object being +// created. If you upload a new part using the same part number that was used with +// a previous part, the previously uploaded part is overwritten. +// // For information about maximum and minimum part sizes and other multipart upload -// specifications, see Multipart upload limits (https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html) -// in the Amazon S3 User Guide. After you initiate multipart upload and upload one -// or more parts, you must either complete or abort multipart upload in order to -// stop getting charged for storage of the uploaded parts. Only after you either -// complete or abort multipart upload, Amazon S3 frees up the parts storage and -// stops charging you for the parts storage. For more information on multipart -// uploads, go to Multipart Upload Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) -// in the Amazon S3 User Guide . Directory buckets - For directory buckets, you -// must make requests for this API operation to the Zonal endpoint. These endpoints -// support virtual-hosted-style requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style -// requests are not supported. For more information, see Regional and Zonal -// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Permissions -// - General purpose bucket permissions - For information on the permissions -// required to use the multipart upload API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// in the Amazon S3 User Guide. -// - Directory bucket permissions - To grant access to this API operation on a -// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// API operation for session-based authorization. Specifically, you grant the -// s3express:CreateSession permission to the directory bucket in a bucket policy -// or an IAM identity-based policy. Then, you make the CreateSession API call on -// the bucket to obtain a session token. With the session token in your request -// header, you can make API requests to this operation. After the session token -// expires, you make another CreateSession API call to generate a new session -// token for use. Amazon Web Services CLI or SDKs create session and refresh the -// session token automatically to avoid service interruptions when a session -// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) -// . -// -// Data integrity General purpose bucket - To ensure that data is not corrupted +// specifications, see [Multipart upload limits]in the Amazon S3 User Guide. +// +// After you initiate multipart upload and upload one or more parts, you must +// either complete or abort multipart upload in order to stop getting charged for +// storage of the uploaded parts. Only after you either complete or abort multipart +// upload, Amazon S3 frees up the parts storage and stops charging you for the +// parts storage. +// +// For more information on multipart uploads, go to [Multipart Upload Overview] in the Amazon S3 User Guide . +// +// Directory buckets - For directory buckets, you must make requests for this API +// operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format +// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name +// . Path-style requests are not supported. For more information about endpoints +// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information +// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// Permissions +// - General purpose bucket permissions - To perform a multipart upload with +// encryption using an Key Management Service key, the requester must have +// permission to the kms:Decrypt and kms:GenerateDataKey actions on the key. The +// requester must also have permissions for the kms:GenerateDataKey action for +// the CreateMultipartUpload API. Then, the requester needs permissions for the +// kms:Decrypt action on the UploadPart and UploadPartCopy APIs. +// +// These permissions are required because Amazon S3 must decrypt and read data +// +// from the encrypted file parts before it completes the multipart upload. For more +// information about KMS permissions, see [Protecting data using server-side encryption with KMS]in the Amazon S3 User Guide. For +// information about the permissions required to use the multipart upload API, see [Multipart upload and permissions] +// and [Multipart upload API and permissions]in the Amazon S3 User Guide. +// +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the [CreateSession]CreateSession API operation +// for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see [CreateSession]CreateSession . +// +// If the object is encrypted with SSE-KMS, you must also have the +// +// kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies +// and KMS key policies for the KMS key. +// +// Data integrity General purpose bucket - To ensure that data is not corrupted // traversing the network, specify the Content-MD5 header in the upload part // request. Amazon S3 checks the part data against the provided MD5 value. If they // do not match, Amazon S3 returns an error. If the upload request is signed with // Signature Version 4, then Amazon Web Services S3 uses the x-amz-content-sha256 -// header as a checksum instead of Content-MD5 . For more information see -// Authenticating Requests: Using the Authorization Header (Amazon Web Services -// Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html) -// . Directory buckets - MD5 is not supported by directory buckets. You can use -// checksum algorithms to check object integrity. Encryption +// header as a checksum instead of Content-MD5 . For more information see [Authenticating Requests: Using the Authorization Header (Amazon Web Services Signature Version 4)]. +// +// Directory buckets - MD5 is not supported by directory buckets. You can use +// checksum algorithms to check object integrity. +// +// Encryption // - General purpose bucket - Server-side encryption is for data encryption at // rest. Amazon S3 encrypts your data as it writes it to disks in its data centers // and decrypts it when you access it. You have mutually exclusive options to @@ -78,37 +104,76 @@ import ( // encryption using Amazon S3 managed keys (SSE-S3) by default. You can optionally // tell Amazon S3 to encrypt data at rest using server-side encryption with other // key options. The option you use depends on whether you want to use KMS keys -// (SSE-KMS) or provide your own encryption key (SSE-C). Server-side encryption is -// supported by the S3 Multipart Upload operations. Unless you are using a -// customer-provided encryption key (SSE-C), you don't need to specify the -// encryption parameters in each UploadPart request. Instead, you only need to -// specify the server-side encryption parameters in the initial Initiate Multipart -// request. For more information, see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// . If you request server-side encryption using a customer-provided encryption key -// (SSE-C) in your initiate multipart upload request, you must provide identical -// encryption information in each part upload using the following request headers. -// - x-amz-server-side-encryption-customer-algorithm -// - x-amz-server-side-encryption-customer-key -// - x-amz-server-side-encryption-customer-key-MD5 -// - Directory bucket - For directory buckets, only server-side encryption with -// Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. -// -// For more information, see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) -// in the Amazon S3 User Guide. Special errors +// (SSE-KMS) or provide your own encryption key (SSE-C). +// +// Server-side encryption is supported by the S3 Multipart Upload operations. +// +// Unless you are using a customer-provided encryption key (SSE-C), you don't need +// to specify the encryption parameters in each UploadPart request. Instead, you +// only need to specify the server-side encryption parameters in the initial +// Initiate Multipart request. For more information, see [CreateMultipartUpload]. +// +// If you request server-side encryption using a customer-provided encryption key +// +// (SSE-C) in your initiate multipart upload request, you must provide identical +// encryption information in each part upload using the following request headers. +// +// - x-amz-server-side-encryption-customer-algorithm +// +// - x-amz-server-side-encryption-customer-key +// +// - x-amz-server-side-encryption-customer-key-MD5 +// +// For more information, see [Using Server-Side Encryption]in the Amazon S3 User Guide. +// +// - Directory buckets - For directory buckets, there are only two supported +// options for server-side encryption: server-side encryption with Amazon S3 +// managed keys (SSE-S3) ( AES256 ) and server-side encryption with KMS keys +// (SSE-KMS) ( aws:kms ). +// +// Special errors +// // - Error Code: NoSuchUpload +// // - Description: The specified multipart upload does not exist. The upload ID // might be invalid, or the multipart upload might have been aborted or completed. +// // - HTTP Status Code: 404 Not Found +// // - SOAP Fault Code Prefix: Client // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are -// related to UploadPart : -// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) -// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) -// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// +// The following operations are related to UploadPart : +// +// [CreateMultipartUpload] +// +// [CompleteMultipartUpload] +// +// [AbortMultipartUpload] +// +// [ListParts] +// +// [ListMultipartUploads] +// +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html +// [Authenticating Requests: Using the Authorization Header (Amazon Web Services Signature Version 4)]: https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html +// [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html +// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html +// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html +// [Using Server-Side Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html +// [Multipart upload limits]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html +// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html +// [Multipart Upload Overview]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html +// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// +// [Protecting data using server-side encryption with KMS]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html +// [Multipart upload and permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html +// [CreateSession]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html +// [Multipart upload API and permissions]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions func (c *Client) UploadPart(ctx context.Context, params *UploadPartInput, optFns ...func(*Options)) (*UploadPartOutput, error) { if params == nil { params = &UploadPartInput{} @@ -126,31 +191,39 @@ func (c *Client) UploadPart(ctx context.Context, params *UploadPartInput, optFns type UploadPartInput struct { - // The name of the bucket to which the multipart upload was initiated. Directory - // buckets - When you use this operation with a directory bucket, you must use - // virtual-hosted-style requests in the format - // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not - // supported. Directory bucket names must be unique in the chosen Availability - // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket - // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // The name of the bucket to which the multipart upload was initiated. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string @@ -178,50 +251,67 @@ type UploadPartInput struct { // the SDK. This header will not provide any additional functionality if you don't // use the SDK. When you send this header, there must be a corresponding // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the - // request with the HTTP status code 400 Bad Request . For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. This checksum algorithm must - // be the same for all parts and it match the checksum value supplied in the - // CreateMultipartUpload request. + // request with the HTTP status code 400 Bad Request . For more information, see [Checking object integrity] + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided + // ChecksumAlgorithm parameter. + // + // This checksum algorithm must be the same for all parts and it match the + // checksum value supplied in the CreateMultipartUpload request. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumAlgorithm types.ChecksumAlgorithm // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Base64 encoded, 32-bit CRC32 checksum of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32 *string // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 32-bit CRC32C checksum of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Base64 encoded, 32-bit CRC32C checksum of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32C *string // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 160-bit SHA-1 digest of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Base64 encoded, 64-bit CRC64NVME checksum of the part. For more information, + // see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumCRC64NVME *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // Base64 encoded, 160-bit SHA1 digest of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA1 *string // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the - // base64-encoded, 256-bit SHA-256 digest of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Base64 encoded, 256-bit SHA256 digest of the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA256 *string // Size of the body in bytes. This parameter is useful when the size of the body // cannot be determined automatically. ContentLength *int64 - // The base64-encoded 128-bit MD5 digest of the part data. This parameter is + // The Base64 encoded 128-bit MD5 digest of the part data. This parameter is // auto-populated when using the command from the CLI. This parameter is required - // if object lock parameters are specified. This functionality is not supported for - // directory buckets. + // if object lock parameters are specified. + // + // This functionality is not supported for directory buckets. ContentMD5 *string // The account ID of the expected bucket owner. If the account ID that you provide @@ -233,14 +323,17 @@ type UploadPartInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer - // Specifies the algorithm to use when encrypting the object (for example, - // AES256). This functionality is not supported for directory buckets. + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use in @@ -248,20 +341,23 @@ type UploadPartInput struct { // discarded; Amazon S3 does not store the encryption key. The key must be // appropriate for use with the algorithm specified in the // x-amz-server-side-encryption-customer-algorithm header . This must be the same - // encryption key specified in the initiate multipart upload request. This - // functionality is not supported for directory buckets. + // encryption key specified in the initiate multipart upload request. + // + // This functionality is not supported for directory buckets. SSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. This functionality is not - // supported for directory buckets. + // encryption key was transmitted without error. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string noSmithyDocumentSerde } func (in *UploadPartInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.Key = in.Key @@ -270,73 +366,89 @@ func (in *UploadPartInput) bindEndpointParams(p *EndpointParameters) { type UploadPartOutput struct { // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality - // is not supported for directory buckets. + // encryption with Key Management Service (KMS) keys (SSE-KMS). BucketKeyEnabled *bool - // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32 checksum of the object. This checksum is only + // be present if the checksum was uploaded with the object. When you use an API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumCRC32 *string - // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32C checksum of the object. This checksum is only + // present if the checksum was uploaded with the object. When you use an API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumCRC32C *string - // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. When you use the API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // Base64 encoded, 64-bit CRC64NVME checksum of the part. For more information, + // see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumCRC64NVME *string + + // The Base64 encoded, 160-bit SHA1 digest of the object. This will only be + // present if the object was uploaded with the object. When you use the API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumSHA1 *string - // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 256-bit SHA256 digest of the object. This will only be + // present if the object was uploaded with the object. When you use an API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumSHA256 *string // Entity tag for the uploaded object. ETag *string // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to confirm the encryption - // algorithm that's used. This functionality is not supported for directory - // buckets. + // algorithm that's used. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to provide the round-trip - // message integrity verification of the customer-provided encryption key. This - // functionality is not supported for directory buckets. + // message integrity verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string - // If present, indicates the ID of the Key Management Service (KMS) symmetric - // encryption customer managed key that was used for the object. This functionality - // is not supported for directory buckets. + // If present, indicates the ID of the KMS key that was used for object encryption. SSEKMSKeyId *string // The server-side encryption algorithm used when you store this object in Amazon - // S3 (for example, AES256 , aws:kms ). For directory buckets, only server-side - // encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. + // S3 (for example, AES256 , aws:kms ). ServerSideEncryption types.ServerSideEncryption // Metadata pertaining to the operation's result. @@ -388,6 +500,9 @@ func (c *Client) addOperationUploadPartMiddlewares(stack *middleware.Stack, opti if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -403,6 +518,21 @@ func (c *Client) addOperationUploadPartMiddlewares(stack *middleware.Stack, opti if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addRequestChecksumMetricsTracking(stack, options); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpUploadPartValidationMiddleware(stack); err != nil { return err } @@ -445,6 +575,18 @@ func (c *Client) addOperationUploadPartMiddlewares(stack *middleware.Stack, opti if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } @@ -474,9 +616,10 @@ func getUploadPartRequestAlgorithmMember(input interface{}) (string, bool) { } func addUploadPartInputChecksumMiddlewares(stack *middleware.Stack, options Options) error { - return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{ + return addInputChecksumMiddleware(stack, internalChecksum.InputMiddlewareOptions{ GetAlgorithm: getUploadPartRequestAlgorithmMember, RequireChecksum: false, + RequestChecksumCalculation: options.RequestChecksumCalculation, EnableTrailingChecksum: true, EnableComputeSHA256PayloadHash: true, EnableDecodedContentLengthHeader: true, @@ -521,6 +664,8 @@ func (c *PresignClient) PresignUploadPart(ctx context.Context, params *UploadPar } clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption) + clientOptFns = append(options.ClientOptions, withNoDefaultChecksumAPIOption) + result, _, err := c.client.invokeOperation(ctx, "UploadPart", params, clientOptFns, c.client.addOperationUploadPartMiddlewares, presignConverter(options).convertToPresignMiddleware, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go index d42dc60cd..b5be77dff 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go @@ -18,90 +18,166 @@ import ( // Uploads a part by copying data from an existing object as data source. To // specify the data source, you add the request header x-amz-copy-source in your // request. To specify a byte range, you add the request header -// x-amz-copy-source-range in your request. For information about maximum and -// minimum part sizes and other multipart upload specifications, see Multipart -// upload limits (https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html) -// in the Amazon S3 User Guide. Instead of copying data from an existing object as -// part data, you might use the UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// action to upload new data as a part of an object in your request. You must -// initiate a multipart upload before you can upload any part. In response to your -// initiate request, Amazon S3 returns the upload ID, a unique identifier that you -// must include in your upload part request. For conceptual information about -// multipart uploads, see Uploading Objects Using Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) -// in the Amazon S3 User Guide. For information about copying objects using a -// single atomic action vs. a multipart upload, see Operations on Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html) -// in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must -// make requests for this API operation to the Zonal endpoint. These endpoints -// support virtual-hosted-style requests in the format -// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style -// requests are not supported. For more information, see Regional and Zonal -// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) -// in the Amazon S3 User Guide. Authentication and authorization All UploadPartCopy -// requests must be authenticated and signed by using IAM credentials (access key -// ID and secret access key for the IAM identities). All headers with the x-amz- -// prefix, including x-amz-copy-source , must be signed. For more information, see -// REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) -// . Directory buckets - You must use IAM credentials to authenticate and authorize +// x-amz-copy-source-range in your request. +// +// For information about maximum and minimum part sizes and other multipart upload +// specifications, see [Multipart upload limits]in the Amazon S3 User Guide. +// +// Instead of copying data from an existing object as part data, you might use the [UploadPart] +// action to upload new data as a part of an object in your request. +// +// You must initiate a multipart upload before you can upload any part. In +// response to your initiate request, Amazon S3 returns the upload ID, a unique +// identifier that you must include in your upload part request. +// +// For conceptual information about multipart uploads, see [Uploading Objects Using Multipart Upload] in the Amazon S3 User +// Guide. For information about copying objects using a single atomic action vs. a +// multipart upload, see [Operations on Objects]in the Amazon S3 User Guide. +// +// Directory buckets - For directory buckets, you must make requests for this API +// operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format +// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name +// . Path-style requests are not supported. For more information about endpoints +// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information +// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide. +// +// Authentication and authorization All UploadPartCopy requests must be +// authenticated and signed by using IAM credentials (access key ID and secret +// access key for the IAM identities). All headers with the x-amz- prefix, +// including x-amz-copy-source , must be signed. For more information, see [REST Authentication]. +// +// Directory buckets - You must use IAM credentials to authenticate and authorize // your access to the UploadPartCopy API operation, instead of using the temporary -// security credentials through the CreateSession API operation. Amazon Web -// Services CLI or SDKs handles authentication and authorization on your behalf. +// security credentials through the CreateSession API operation. +// +// Amazon Web Services CLI or SDKs handles authentication and authorization on +// your behalf. +// // Permissions You must have READ access to the source object and WRITE access to // the destination bucket. +// // - General purpose bucket permissions - You must have the permissions in a // policy based on the bucket types of your source bucket and destination bucket in // an UploadPartCopy operation. +// // - If the source object is in a general purpose bucket, you must have the // s3:GetObject permission to read the source object that is being copied. +// // - If the destination bucket is a general purpose bucket, you must have the // s3:PutObject permission to write the object copy to the destination bucket. -// For information about permissions required to use the multipart upload API, see -// Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// in the Amazon S3 User Guide. +// +// - To perform a multipart upload with encryption using an Key Management +// Service key, the requester must have permission to the kms:Decrypt and +// kms:GenerateDataKey actions on the key. The requester must also have +// permissions for the kms:GenerateDataKey action for the CreateMultipartUpload +// API. Then, the requester needs permissions for the kms:Decrypt action on the +// UploadPart and UploadPartCopy APIs. These permissions are required because +// Amazon S3 must decrypt and read data from the encrypted file parts before it +// completes the multipart upload. For more information about KMS permissions, see [Protecting data using server-side encryption with KMS] +// in the Amazon S3 User Guide. For information about the permissions required to +// use the multipart upload API, see [Multipart upload and permissions]and [Multipart upload API and permissions]in the Amazon S3 User Guide. +// // - Directory bucket permissions - You must have permissions in a bucket policy // or an IAM identity-based policy based on the source and destination bucket types // in an UploadPartCopy operation. +// // - If the source object that you want to copy is in a directory bucket, you // must have the s3express:CreateSession permission in the Action element of a -// policy to read the object . By default, the session is in the ReadWrite mode. +// policy to read the object. By default, the session is in the ReadWrite mode. // If you want to restrict the access, you can explicitly set the // s3express:SessionMode condition key to ReadOnly on the copy source bucket. +// // - If the copy destination is a directory bucket, you must have the // s3express:CreateSession permission in the Action element of a policy to write // the object to the destination. The s3express:SessionMode condition key cannot -// be set to ReadOnly on the copy destination. For example policies, see Example -// bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) -// and Amazon Web Services Identity and Access Management (IAM) identity-based -// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) -// in the Amazon S3 User Guide. +// be set to ReadOnly on the copy destination. +// +// If the object is encrypted with SSE-KMS, you must also have the +// +// kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies +// and KMS key policies for the KMS key. +// +// For example policies, see [Example bucket policies for S3 Express One Zone]and [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]in the Amazon S3 User Guide. // // Encryption +// // - General purpose buckets - For information about using server-side // encryption with customer-provided encryption keys with the UploadPartCopy -// operation, see CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) -// and UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// . -// - Directory buckets - For directory buckets, only server-side encryption with -// Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. +// operation, see [CopyObject]and [UploadPart]. +// +// - Directory buckets - For directory buckets, there are only two supported +// options for server-side encryption: server-side encryption with Amazon S3 +// managed keys (SSE-S3) ( AES256 ) and server-side encryption with KMS keys +// (SSE-KMS) ( aws:kms ). For more information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. +// +// For directory buckets, when you perform a CreateMultipartUpload operation and an +// +// UploadPartCopy operation, the request headers you provide in the +// CreateMultipartUpload request must match the default encryption configuration +// of the destination bucket. +// +// S3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from +// +// general purpose buckets to directory buckets, from directory buckets to general +// purpose buckets, or between directory buckets, through [UploadPartCopy]. In this case, Amazon +// S3 makes a call to KMS every time a copy request is made for a KMS-encrypted +// object. // // Special errors +// // - Error Code: NoSuchUpload +// // - Description: The specified multipart upload does not exist. The upload ID // might be invalid, or the multipart upload might have been aborted or completed. +// // - HTTP Status Code: 404 Not Found +// // - Error Code: InvalidRequest +// // - Description: The specified copy source is not supported as a byte-range // copy source. +// // - HTTP Status Code: 400 Bad Request // -// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are -// related to UploadPartCopy : -// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) -// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) -// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket-name.s3express-zone-id.region-code.amazonaws.com . +// +// The following operations are related to UploadPartCopy : +// +// [CreateMultipartUpload] +// +// [UploadPart] +// +// [CompleteMultipartUpload] +// +// [AbortMultipartUpload] +// +// [ListParts] +// +// [ListMultipartUploads] +// +// [Uploading Objects Using Multipart Upload]: https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html +// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html +// [ListParts]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html +// [UploadPart]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html +// [Protecting data using server-side encryption with KMS]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html +// [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html +// [Multipart upload and permissions]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html +// [Multipart upload API and permissions]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions +// [CompleteMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html +// [CreateMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html +// [Multipart upload limits]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html +// [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html +// [AbortMultipartUpload]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html +// [REST Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html +// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html +// [Operations on Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html +// [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html +// [ListMultipartUploads]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html +// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html +// +// [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html func (c *Client) UploadPartCopy(ctx context.Context, params *UploadPartCopyInput, optFns ...func(*Options)) (*UploadPartCopyOutput, error) { if params == nil { params = &UploadPartCopyInput{} @@ -119,43 +195,59 @@ func (c *Client) UploadPartCopy(ctx context.Context, params *UploadPartCopyInput type UploadPartCopyInput struct { - // The bucket name. Directory buckets - When you use this operation with a - // directory bucket, you must use virtual-hosted-style requests in the format - // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not - // supported. Directory bucket names must be unique in the chosen Availability - // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for - // example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3 ). For information about bucket - // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) - // in the Amazon S3 User Guide. Access points - When you use this action with an - // access point, you must provide the alias of the access point in place of the - // bucket name or specify the access point ARN. When using the access point ARN, - // you must direct requests to the access point hostname. The access point hostname - // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this action with an access point through the Amazon Web Services - // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. Access points and Object Lambda access points are - // not supported by directory buckets. S3 on Outposts - When you use this action - // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts - // hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you - // use this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts access point ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) - // in the Amazon S3 User Guide. + // The bucket name. + // + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests + // are not supported. Directory bucket names must be unique in the chosen Zone + // (Availability Zone or Local Zone). Bucket names must follow the format + // bucket-base-name--zone-id--x-s3 (for example, + // amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming + // restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide. + // + // Copying objects across different Amazon Web Services Regions isn't supported + // when the source or destination bucket is in Amazon Web Services Local Zones. The + // source and destination buckets must have the same parent Amazon Web Services + // Region. Otherwise, you get an HTTP 400 Bad Request error with the error code + // InvalidRequest . + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form + // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this + // action with an access point through the Amazon Web Services SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see [Using access points]in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with S3 on Outposts, you must direct + // requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the + // form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When + // you use this action with S3 on Outposts, the destination bucket must be the + // Outposts access point ARN or the access point alias. For more information about + // S3 on Outposts, see [What is S3 on Outposts?]in the Amazon S3 User Guide. + // + // [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html + // [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html + // [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html // // This member is required. Bucket *string // Specifies the source object for the copy operation. You specify the value in // one of two formats, depending on whether you want to access the source object - // through an access point (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html) - // : + // through an [access point]: + // // - For objects not accessed through an access point, specify the name of the // source bucket and key of the source object, separated by a slash (/). For // example, to copy the object reports/january.pdf from the bucket // awsexamplebucket , use awsexamplebucket/reports/january.pdf . The value must // be URL-encoded. + // // - For objects accessed through access points, specify the Amazon Resource // Name (ARN) of the object as accessed through the access point, in the format // arn:aws:s3:::accesspoint//object/ . For example, to copy the object @@ -163,28 +255,39 @@ type UploadPartCopyInput struct { // 123456789012 in Region us-west-2 , use the URL encoding of // arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf // . The value must be URL encoded. + // // - Amazon S3 supports copy operations using Access points only when the source // and destination buckets are in the same Amazon Web Services Region. - // - Access points are not supported by directory buckets. Alternatively, for - // objects accessed through Amazon S3 on Outposts, specify the ARN of the object as - // accessed in the format arn:aws:s3-outposts:::outpost//object/ . For example, - // to copy the object reports/january.pdf through outpost my-outpost owned by - // account 123456789012 in Region us-west-2 , use the URL encoding of + // + // - Access points are not supported by directory buckets. + // + // Alternatively, for objects accessed through Amazon S3 on Outposts, specify the + // ARN of the object as accessed in the format + // arn:aws:s3-outposts:::outpost//object/ . For example, to copy the object + // reports/january.pdf through outpost my-outpost owned by account 123456789012 + // in Region us-west-2 , use the URL encoding of // arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf // . The value must be URL-encoded. + // // If your bucket has versioning enabled, you could have multiple versions of the // same object. By default, x-amz-copy-source identifies the current version of // the source object to copy. To copy a specific version of the source object to // copy, append ?versionId= to the x-amz-copy-source request header (for example, // x-amz-copy-source: // /awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893 - // ). If the current version is a delete marker and you don't specify a versionId - // in the x-amz-copy-source request header, Amazon S3 returns a 404 Not Found - // error, because the object does not exist. If you specify versionId in the + // ). + // + // If the current version is a delete marker and you don't specify a versionId in + // the x-amz-copy-source request header, Amazon S3 returns a 404 Not Found error, + // because the object does not exist. If you specify versionId in the // x-amz-copy-source and the versionId is a delete marker, Amazon S3 returns an // HTTP 400 Bad Request error, because you are not allowed to specify a delete - // marker as a version for the x-amz-copy-source . Directory buckets - S3 - // Versioning isn't enabled and supported for directory buckets. + // marker as a version for the x-amz-copy-source . + // + // Directory buckets - S3 Versioning isn't enabled and supported for directory + // buckets. + // + // [access point]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html // // This member is required. CopySource *string @@ -205,34 +308,56 @@ type UploadPartCopyInput struct { // This member is required. UploadId *string - // Copies the object if its entity tag (ETag) matches the specified tag. If both - // of the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since - // headers are present in the request as follows: x-amz-copy-source-if-match - // condition evaluates to true , and; x-amz-copy-source-if-unmodified-since - // condition evaluates to false ; Amazon S3 returns 200 OK and copies the data. + // Copies the object if its entity tag (ETag) matches the specified tag. + // + // If both of the x-amz-copy-source-if-match and + // x-amz-copy-source-if-unmodified-since headers are present in the request as + // follows: + // + // x-amz-copy-source-if-match condition evaluates to true , and; + // + // x-amz-copy-source-if-unmodified-since condition evaluates to false ; + // + // Amazon S3 returns 200 OK and copies the data. CopySourceIfMatch *string - // Copies the object if it has been modified since the specified time. If both of - // the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since - // headers are present in the request as follows: x-amz-copy-source-if-none-match - // condition evaluates to false , and; x-amz-copy-source-if-modified-since - // condition evaluates to true ; Amazon S3 returns 412 Precondition Failed - // response code. + // Copies the object if it has been modified since the specified time. + // + // If both of the x-amz-copy-source-if-none-match and + // x-amz-copy-source-if-modified-since headers are present in the request as + // follows: + // + // x-amz-copy-source-if-none-match condition evaluates to false , and; + // + // x-amz-copy-source-if-modified-since condition evaluates to true ; + // + // Amazon S3 returns 412 Precondition Failed response code. CopySourceIfModifiedSince *time.Time - // Copies the object if its entity tag (ETag) is different than the specified - // ETag. If both of the x-amz-copy-source-if-none-match and + // Copies the object if its entity tag (ETag) is different than the specified ETag. + // + // If both of the x-amz-copy-source-if-none-match and // x-amz-copy-source-if-modified-since headers are present in the request as - // follows: x-amz-copy-source-if-none-match condition evaluates to false , and; - // x-amz-copy-source-if-modified-since condition evaluates to true ; Amazon S3 - // returns 412 Precondition Failed response code. + // follows: + // + // x-amz-copy-source-if-none-match condition evaluates to false , and; + // + // x-amz-copy-source-if-modified-since condition evaluates to true ; + // + // Amazon S3 returns 412 Precondition Failed response code. CopySourceIfNoneMatch *string - // Copies the object if it hasn't been modified since the specified time. If both - // of the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since - // headers are present in the request as follows: x-amz-copy-source-if-match - // condition evaluates to true , and; x-amz-copy-source-if-unmodified-since - // condition evaluates to false ; Amazon S3 returns 200 OK and copies the data. + // Copies the object if it hasn't been modified since the specified time. + // + // If both of the x-amz-copy-source-if-match and + // x-amz-copy-source-if-unmodified-since headers are present in the request as + // follows: + // + // x-amz-copy-source-if-match condition evaluates to true , and; + // + // x-amz-copy-source-if-unmodified-since condition evaluates to false ; + // + // Amazon S3 returns 200 OK and copies the data. CopySourceIfUnmodifiedSince *time.Time // The range of bytes to copy from the source object. The range value must use the @@ -243,20 +368,26 @@ type UploadPartCopyInput struct { CopySourceRange *string // Specifies the algorithm to use when decrypting the source object (for example, - // AES256 ). This functionality is not supported when the source object is in a - // directory bucket. + // AES256 ). + // + // This functionality is not supported when the source object is in a directory + // bucket. CopySourceSSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt // the source object. The encryption key provided in this header must be one that - // was used when the source object was created. This functionality is not supported - // when the source object is in a directory bucket. + // was used when the source object was created. + // + // This functionality is not supported when the source object is in a directory + // bucket. CopySourceSSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. This functionality is not - // supported when the source object is in a directory bucket. + // encryption key was transmitted without error. + // + // This functionality is not supported when the source object is in a directory + // bucket. CopySourceSSECustomerKeyMD5 *string // The account ID of the expected destination bucket owner. If the account ID that @@ -273,15 +404,18 @@ type UploadPartCopyInput struct { // Bucket owners need not specify this parameter in their requests. If either the // source or destination S3 bucket has Requester Pays enabled, the requester will // pay for corresponding charges to copy the object. For information about - // downloading objects from Requester Pays buckets, see Downloading Objects in - // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. + // downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User + // Guide. + // + // This functionality is not supported for directory buckets. + // + // [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer types.RequestPayer - // Specifies the algorithm to use when encrypting the object (for example, - // AES256). This functionality is not supported when the destination bucket is a - // directory bucket. + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // This functionality is not supported when the destination bucket is a directory + // bucket. SSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use in @@ -289,21 +423,25 @@ type UploadPartCopyInput struct { // discarded; Amazon S3 does not store the encryption key. The key must be // appropriate for use with the algorithm specified in the // x-amz-server-side-encryption-customer-algorithm header. This must be the same - // encryption key specified in the initiate multipart upload request. This - // functionality is not supported when the destination bucket is a directory + // encryption key specified in the initiate multipart upload request. + // + // This functionality is not supported when the destination bucket is a directory // bucket. SSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. This functionality is not - // supported when the destination bucket is a directory bucket. + // encryption key was transmitted without error. + // + // This functionality is not supported when the destination bucket is a directory + // bucket. SSECustomerKeyMD5 *string noSmithyDocumentSerde } func (in *UploadPartCopyInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket p.DisableS3ExpressSessionAuth = ptr.Bool(true) } @@ -311,42 +449,44 @@ func (in *UploadPartCopyInput) bindEndpointParams(p *EndpointParameters) { type UploadPartCopyOutput struct { // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality - // is not supported for directory buckets. + // encryption with Key Management Service (KMS) keys (SSE-KMS). BucketKeyEnabled *bool // Container for all response elements. CopyPartResult *types.CopyPartResult // The version of the source object that was copied, if you have enabled - // versioning on the source bucket. This functionality is not supported when the - // source object is in a directory bucket. + // versioning on the source bucket. + // + // This functionality is not supported when the source object is in a directory + // bucket. CopySourceVersionId *string // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to confirm the encryption - // algorithm that's used. This functionality is not supported for directory - // buckets. + // algorithm that's used. + // + // This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // If server-side encryption with a customer-provided encryption key was // requested, the response will include this header to provide the round-trip - // message integrity verification of the customer-provided encryption key. This - // functionality is not supported for directory buckets. + // message integrity verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. SSECustomerKeyMD5 *string - // If present, indicates the ID of the Key Management Service (KMS) symmetric - // encryption customer managed key that was used for the object. This functionality - // is not supported for directory buckets. + // If present, indicates the ID of the KMS key that was used for object encryption. SSEKMSKeyId *string // The server-side encryption algorithm used when you store this object in Amazon - // S3 (for example, AES256 , aws:kms ). For directory buckets, only server-side - // encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. + // S3 (for example, AES256 , aws:kms ). ServerSideEncryption types.ServerSideEncryption // Metadata pertaining to the operation's result. @@ -398,6 +538,9 @@ func (c *Client) addOperationUploadPartCopyMiddlewares(stack *middleware.Stack, if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -413,6 +556,18 @@ func (c *Client) addOperationUploadPartCopyMiddlewares(stack *middleware.Stack, if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addOpUploadPartCopyValidationMiddleware(stack); err != nil { return err } @@ -449,6 +604,18 @@ func (c *Client) addOperationUploadPartCopyMiddlewares(stack *middleware.Stack, if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_WriteGetObjectResponse.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_WriteGetObjectResponse.go index e181ab711..9a9bbcede 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_WriteGetObjectResponse.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_WriteGetObjectResponse.go @@ -18,42 +18,54 @@ import ( "time" ) -// This operation is not supported by directory buckets. Passes transformed -// objects to a GetObject operation when using Object Lambda access points. For -// information about Object Lambda access points, see Transforming objects with -// Object Lambda access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html) -// in the Amazon S3 User Guide. This operation supports metadata that can be -// returned by GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// , in addition to RequestRoute , RequestToken , StatusCode , ErrorCode , and -// ErrorMessage . The GetObject response metadata is supported so that the -// WriteGetObjectResponse caller, typically an Lambda function, can provide the -// same metadata when it internally invokes GetObject . When WriteGetObjectResponse -// is called by a customer-owned Lambda function, the metadata returned to the end -// user GetObject call might differ from what Amazon S3 would normally return. You -// can include any number of metadata headers. When including a metadata header, it -// should be prefaced with x-amz-meta . For example, x-amz-meta-my-custom-header: -// MyCustomValue . The primary use case for this is to forward GetObject metadata. +// This operation is not supported for directory buckets. +// +// Passes transformed objects to a GetObject operation when using Object Lambda +// access points. For information about Object Lambda access points, see [Transforming objects with Object Lambda access points]in the +// Amazon S3 User Guide. +// +// This operation supports metadata that can be returned by [GetObject], in addition to +// RequestRoute , RequestToken , StatusCode , ErrorCode , and ErrorMessage . The +// GetObject response metadata is supported so that the WriteGetObjectResponse +// caller, typically an Lambda function, can provide the same metadata when it +// internally invokes GetObject . When WriteGetObjectResponse is called by a +// customer-owned Lambda function, the metadata returned to the end user GetObject +// call might differ from what Amazon S3 would normally return. +// +// You can include any number of metadata headers. When including a metadata +// header, it should be prefaced with x-amz-meta . For example, +// x-amz-meta-my-custom-header: MyCustomValue . The primary use case for this is to +// forward GetObject metadata. +// // Amazon Web Services provides some prebuilt Lambda functions that you can use // with S3 Object Lambda to detect and redact personally identifiable information // (PII) and decompress S3 objects. These Lambda functions are available in the // Amazon Web Services Serverless Application Repository, and can be selected // through the Amazon Web Services Management Console when you create your Object -// Lambda access point. Example 1: PII Access Control - This Lambda function uses -// Amazon Comprehend, a natural language processing (NLP) service using machine -// learning to find insights and relationships in text. It automatically detects -// personally identifiable information (PII) such as names, addresses, dates, -// credit card numbers, and social security numbers from documents in your Amazon -// S3 bucket. Example 2: PII Redaction - This Lambda function uses Amazon -// Comprehend, a natural language processing (NLP) service using machine learning -// to find insights and relationships in text. It automatically redacts personally +// Lambda access point. +// +// Example 1: PII Access Control - This Lambda function uses Amazon Comprehend, a +// natural language processing (NLP) service using machine learning to find +// insights and relationships in text. It automatically detects personally // identifiable information (PII) such as names, addresses, dates, credit card // numbers, and social security numbers from documents in your Amazon S3 bucket. +// +// Example 2: PII Redaction - This Lambda function uses Amazon Comprehend, a +// natural language processing (NLP) service using machine learning to find +// insights and relationships in text. It automatically redacts personally +// identifiable information (PII) such as names, addresses, dates, credit card +// numbers, and social security numbers from documents in your Amazon S3 bucket. +// // Example 3: Decompression - The Lambda function S3ObjectLambdaDecompression, is // equipped to decompress objects stored in S3 in one of six compressed file -// formats including bzip2, gzip, snappy, zlib, zstandard and ZIP. For information -// on how to view and use these functions, see Using Amazon Web Services built -// Lambda functions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/olap-examples.html) -// in the Amazon S3 User Guide. +// formats including bzip2, gzip, snappy, zlib, zstandard and ZIP. +// +// For information on how to view and use these functions, see [Using Amazon Web Services built Lambda functions] in the Amazon S3 +// User Guide. +// +// [Transforming objects with Object Lambda access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html +// [Using Amazon Web Services built Lambda functions]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/olap-examples.html +// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html func (c *Client) WriteGetObjectResponse(ctx context.Context, params *WriteGetObjectResponseInput, optFns ...func(*Options)) (*WriteGetObjectResponseOutput, error) { if params == nil { params = &WriteGetObjectResponseInput{} @@ -88,7 +100,7 @@ type WriteGetObjectResponseInput struct { // The object data. Body io.Reader - // Indicates whether the object stored in Amazon S3 uses an S3 bucket key for + // Indicates whether the object stored in Amazon S3 uses an S3 bucket key for // server-side encryption with Amazon Web Services KMS (SSE-KMS). BucketKeyEnabled *bool @@ -96,47 +108,67 @@ type WriteGetObjectResponseInput struct { CacheControl *string // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This specifies the - // base64-encoded, 32-bit CRC32 checksum of the object returned by the Object - // Lambda function. This may not match the checksum for the object stored in Amazon - // S3. Amazon S3 will perform validation of the checksum values only when the - // original GetObject request required checksum validation. For more information - // about checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. Only one checksum header can be specified at a - // time. If you supply multiple checksum headers, this request will fail. + // received is the same data that was originally sent. This specifies the Base64 + // encoded, 32-bit CRC32 checksum of the object returned by the Object Lambda + // function. This may not match the checksum for the object stored in Amazon S3. + // Amazon S3 will perform validation of the checksum values only when the original + // GetObject request required checksum validation. For more information about + // checksums, see [Checking object integrity]in the Amazon S3 User Guide. + // + // Only one checksum header can be specified at a time. If you supply multiple + // checksum headers, this request will fail. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32 *string // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This specifies the - // base64-encoded, 32-bit CRC32C checksum of the object returned by the Object - // Lambda function. This may not match the checksum for the object stored in Amazon - // S3. Amazon S3 will perform validation of the checksum values only when the - // original GetObject request required checksum validation. For more information - // about checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. Only one checksum header can be specified at a - // time. If you supply multiple checksum headers, this request will fail. + // received is the same data that was originally sent. This specifies the Base64 + // encoded, 32-bit CRC32C checksum of the object returned by the Object Lambda + // function. This may not match the checksum for the object stored in Amazon S3. + // Amazon S3 will perform validation of the checksum values only when the original + // GetObject request required checksum validation. For more information about + // checksums, see [Checking object integrity]in the Amazon S3 User Guide. + // + // Only one checksum header can be specified at a time. If you supply multiple + // checksum headers, this request will fail. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32C *string // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This specifies the - // base64-encoded, 160-bit SHA-1 digest of the object returned by the Object Lambda + // received is the same data that was originally sent. This header specifies the + // Base64 encoded, 64-bit CRC64NVME checksum of the part. For more information, + // see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumCRC64NVME *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This specifies the Base64 + // encoded, 160-bit SHA1 digest of the object returned by the Object Lambda // function. This may not match the checksum for the object stored in Amazon S3. // Amazon S3 will perform validation of the checksum values only when the original // GetObject request required checksum validation. For more information about - // checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. Only one checksum header can be specified at a - // time. If you supply multiple checksum headers, this request will fail. + // checksums, see [Checking object integrity]in the Amazon S3 User Guide. + // + // Only one checksum header can be specified at a time. If you supply multiple + // checksum headers, this request will fail. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA1 *string // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This specifies the - // base64-encoded, 256-bit SHA-256 digest of the object returned by the Object - // Lambda function. This may not match the checksum for the object stored in Amazon - // S3. Amazon S3 will perform validation of the checksum values only when the - // original GetObject request required checksum validation. For more information - // about checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. Only one checksum header can be specified at a - // time. If you supply multiple checksum headers, this request will fail. + // received is the same data that was originally sent. This specifies the Base64 + // encoded, 256-bit SHA256 digest of the object returned by the Object Lambda + // function. This may not match the checksum for the object stored in Amazon S3. + // Amazon S3 will perform validation of the checksum values only when the original + // GetObject request required checksum validation. For more information about + // checksums, see [Checking object integrity]in the Amazon S3 User Guide. + // + // Only one checksum header can be specified at a time. If you supply multiple + // checksum headers, this request will fail. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA256 *string // Specifies presentational information for the object. @@ -160,7 +192,9 @@ type WriteGetObjectResponseInput struct { ContentType *string // Specifies whether an object stored in Amazon S3 is ( true ) or is not ( false ) - // a delete marker. + // a delete marker. To learn more about delete markers, see [Working with delete markers]. + // + // [Working with delete markers]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeleteMarker.html DeleteMarker *bool // An opaque identifier assigned by a web server to a specific version of a @@ -205,8 +239,9 @@ type WriteGetObjectResponseInput struct { ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus // Indicates whether an object stored in Amazon S3 has Object Lock enabled. For - // more information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html) - // . + // more information about S3 Object Lock, see [Object Lock]. + // + // [Object Lock]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html ObjectLockMode types.ObjectLockMode // The date and time when Object Lock is configured to expire. @@ -216,12 +251,15 @@ type WriteGetObjectResponseInput struct { PartsCount *int32 // Indicates if request involves bucket that is either a source or destination in - // a Replication rule. For more information about S3 Replication, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication.html) - // . + // a Replication rule. For more information about S3 Replication, see [Replication]. + // + // [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication.html ReplicationStatus types.ReplicationStatus // If present, indicates that the requester was successfully charged for the - // request. This functionality is not supported for directory buckets. + // request. + // + // This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Provides information about object restoration operation and expiration time of @@ -232,43 +270,59 @@ type WriteGetObjectResponseInput struct { // encryption key was specified for object stored in Amazon S3. SSECustomerAlgorithm *string - // 128-bit MD5 digest of customer-provided encryption key used in Amazon S3 to - // encrypt data stored in S3. For more information, see Protecting data using - // server-side encryption with customer-provided encryption keys (SSE-C) (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html) - // . + // 128-bit MD5 digest of customer-provided encryption key used in Amazon S3 to + // encrypt data stored in S3. For more information, see [Protecting data using server-side encryption with customer-provided encryption keys (SSE-C)]. + // + // [Protecting data using server-side encryption with customer-provided encryption keys (SSE-C)]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html SSECustomerKeyMD5 *string - // If present, specifies the ID (Key ID, Key ARN, or Key Alias) of the Amazon Web + // If present, specifies the ID (Key ID, Key ARN, or Key Alias) of the Amazon Web // Services Key Management Service (Amazon Web Services KMS) symmetric encryption // customer managed key that was used for stored in Amazon S3 object. SSEKMSKeyId *string - // The server-side encryption algorithm used when storing requested object in + // The server-side encryption algorithm used when storing requested object in // Amazon S3 (for example, AES256, aws:kms ). ServerSideEncryption types.ServerSideEncryption // The integer status code for an HTTP response of a corresponding GetObject // request. The following is a list of status codes. + // // - 200 - OK + // // - 206 - Partial Content + // // - 304 - Not Modified + // // - 400 - Bad Request + // // - 401 - Unauthorized + // // - 403 - Forbidden + // // - 404 - Not Found + // // - 405 - Method Not Allowed + // // - 409 - Conflict + // // - 411 - Length Required + // // - 412 - Precondition Failed + // // - 416 - Range Not Satisfiable + // // - 500 - Internal Server Error + // // - 503 - Service Unavailable StatusCode *int32 // Provides storage class information of the object. Amazon S3 returns this header - // for all objects except for S3 Standard storage class objects. For more - // information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) - // . + // for all objects except for S3 Standard storage class objects. + // + // For more information, see [Storage Classes]. + // + // [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html StorageClass types.StorageClass // The number of tags, if any, on the object. @@ -338,6 +392,9 @@ func (c *Client) addOperationWriteGetObjectResponseMiddlewares(stack *middleware if err = addRecordResponseTiming(stack); err != nil { return err } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } if err = addClientUserAgent(stack, options); err != nil { return err } @@ -353,6 +410,18 @@ func (c *Client) addOperationWriteGetObjectResponseMiddlewares(stack *middleware if err = addPutBucketContextMiddleware(stack); err != nil { return err } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addIsExpressUserAgent(stack); err != nil { + return err + } + if err = addCredentialSource(stack, options); err != nil { + return err + } if err = addEndpointPrefix_opWriteGetObjectResponseMiddleware(stack); err != nil { return err } @@ -389,6 +458,18 @@ func (c *Client) addOperationWriteGetObjectResponseMiddlewares(stack *middleware if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { return err } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/auth.go index 6ef631bd3..6faedcc0b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/auth.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/auth.go @@ -8,16 +8,18 @@ import ( awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" smithy "github.com/aws/smithy-go" smithyauth "github.com/aws/smithy-go/auth" + "github.com/aws/smithy-go/metrics" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" ) -func bindAuthParamsRegion(params *AuthResolverParameters, _ interface{}, options Options) { +func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) { params.Region = options.Region } -func bindAuthEndpointParams(params *AuthResolverParameters, input interface{}, options Options) { - params.endpointParams = bindEndpointParams(input, options) +func bindAuthEndpointParams(ctx context.Context, params *AuthResolverParameters, input interface{}, options Options) { + params.endpointParams = bindEndpointParams(ctx, input, options) } type setLegacyContextSigningOptionsMiddleware struct { @@ -98,13 +100,13 @@ type AuthResolverParameters struct { Region string } -func bindAuthResolverParams(operation string, input interface{}, options Options) *AuthResolverParameters { +func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters { params := &AuthResolverParameters{ Operation: operation, } - bindAuthEndpointParams(params, input, options) - bindAuthParamsRegion(params, input, options) + bindAuthEndpointParams(ctx, params, input, options) + bindAuthParamsRegion(ctx, params, input, options) return params } @@ -179,7 +181,10 @@ func (*resolveAuthSchemeMiddleware) ID() string { func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { - params := bindAuthResolverParams(m.operation, getOperationInput(ctx), m.options) + _, span := tracing.StartSpan(ctx, "ResolveAuthScheme") + defer span.End() + + params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options) options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) if err != nil { return out, metadata, fmt.Errorf("resolve auth scheme: %w", err) @@ -191,6 +196,9 @@ func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in mid } ctx = setResolvedAuthScheme(ctx, scheme) + + span.SetProperty("auth.scheme_id", scheme.Scheme.SchemeID()) + span.End() return next.HandleFinalize(ctx, in) } @@ -250,7 +258,10 @@ func (*getIdentityMiddleware) ID() string { func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { - rscheme := getResolvedAuthScheme(ctx) + innerCtx, span := tracing.StartSpan(ctx, "GetIdentity") + defer span.End() + + rscheme := getResolvedAuthScheme(innerCtx) if rscheme == nil { return out, metadata, fmt.Errorf("no resolved auth scheme") } @@ -260,12 +271,20 @@ func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middlewar return out, metadata, fmt.Errorf("no identity resolver") } - identity, err := resolver.GetIdentity(ctx, rscheme.IdentityProperties) + identity, err := timeOperationMetric(ctx, "client.call.resolve_identity_duration", + func() (smithyauth.Identity, error) { + return resolver.GetIdentity(innerCtx, rscheme.IdentityProperties) + }, + func(o *metrics.RecordMetricOptions) { + o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) + }) if err != nil { return out, metadata, fmt.Errorf("get identity: %w", err) } ctx = setIdentity(ctx, identity) + + span.End() return next.HandleFinalize(ctx, in) } @@ -281,6 +300,7 @@ func getIdentity(ctx context.Context) smithyauth.Identity { } type signRequestMiddleware struct { + options Options } func (*signRequestMiddleware) ID() string { @@ -290,6 +310,9 @@ func (*signRequestMiddleware) ID() string { func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "SignRequest") + defer span.End() + req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request) @@ -310,9 +333,15 @@ func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middlewar return out, metadata, fmt.Errorf("no signer") } - if err := signer.SignRequest(ctx, req, identity, rscheme.SignerProperties); err != nil { + _, err = timeOperationMetric(ctx, "client.call.signing_duration", func() (any, error) { + return nil, signer.SignRequest(ctx, req, identity, rscheme.SignerProperties) + }, func(o *metrics.RecordMetricOptions) { + o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID()) + }) + if err != nil { return out, metadata, fmt.Errorf("sign request: %w", err) } + span.End() return next.HandleFinalize(ctx, in) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/deserializers.go index 2be5df30f..f1e5ac029 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/deserializers.go @@ -20,13 +20,23 @@ import ( "github.com/aws/smithy-go/middleware" "github.com/aws/smithy-go/ptr" smithytime "github.com/aws/smithy-go/time" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "io" "io/ioutil" "strconv" "strings" + "time" ) +func deserializeS3Expires(v string) (*time.Time, error) { + t, err := smithytime.ParseHTTPDate(v) + if err != nil { + return nil, nil + } + return &t, nil +} + type awsRestxml_deserializeOpAbortMultipartUpload struct { } @@ -42,6 +52,10 @@ func (m *awsRestxml_deserializeOpAbortMultipartUpload) HandleDeserialize(ctx con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -58,6 +72,7 @@ func (m *awsRestxml_deserializeOpAbortMultipartUpload) HandleDeserialize(ctx con return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} } + span.End() return out, metadata, err } @@ -132,6 +147,10 @@ func (m *awsRestxml_deserializeOpCompleteMultipartUpload) HandleDeserialize(ctx return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -176,6 +195,7 @@ func (m *awsRestxml_deserializeOpCompleteMultipartUpload) HandleDeserialize(ctx } } + span.End() return out, metadata, err } @@ -321,6 +341,19 @@ func awsRestxml_deserializeOpDocumentCompleteMultipartUploadOutput(v **CompleteM sv.ChecksumCRC32C = ptr.String(xtv) } + case strings.EqualFold("ChecksumCRC64NVME", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC64NVME = ptr.String(xtv) + } + case strings.EqualFold("ChecksumSHA1", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -347,6 +380,19 @@ func awsRestxml_deserializeOpDocumentCompleteMultipartUploadOutput(v **CompleteM sv.ChecksumSHA256 = ptr.String(xtv) } + case strings.EqualFold("ChecksumType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumType = types.ChecksumType(xtv) + } + case strings.EqualFold("ETag", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -415,6 +461,10 @@ func (m *awsRestxml_deserializeOpCopyObject) HandleDeserialize(ctx context.Conte return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -459,6 +509,7 @@ func (m *awsRestxml_deserializeOpCopyObject) HandleDeserialize(ctx context.Conte } } + span.End() return out, metadata, err } @@ -623,6 +674,10 @@ func (m *awsRestxml_deserializeOpCreateBucket) HandleDeserialize(ctx context.Con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -639,6 +694,7 @@ func (m *awsRestxml_deserializeOpCreateBucket) HandleDeserialize(ctx context.Con return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} } + span.End() return out, metadata, err } @@ -701,6 +757,86 @@ func awsRestxml_deserializeOpHttpBindingsCreateBucketOutput(v *CreateBucketOutpu return nil } +type awsRestxml_deserializeOpCreateBucketMetadataTableConfiguration struct { +} + +func (*awsRestxml_deserializeOpCreateBucketMetadataTableConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpCreateBucketMetadataTableConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorCreateBucketMetadataTableConfiguration(response, &metadata) + } + output := &CreateBucketMetadataTableConfigurationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + span.End() + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorCreateBucketMetadataTableConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + type awsRestxml_deserializeOpCreateMultipartUpload struct { } @@ -716,6 +852,10 @@ func (m *awsRestxml_deserializeOpCreateMultipartUpload) HandleDeserialize(ctx co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -760,6 +900,7 @@ func (m *awsRestxml_deserializeOpCreateMultipartUpload) HandleDeserialize(ctx co } } + span.End() return out, metadata, err } @@ -836,6 +977,11 @@ func awsRestxml_deserializeOpHttpBindingsCreateMultipartUploadOutput(v *CreateMu v.ChecksumAlgorithm = types.ChecksumAlgorithm(headerValues[0]) } + if headerValues := response.Header.Values("x-amz-checksum-type"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumType = types.ChecksumType(headerValues[0]) + } + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { headerValues[0] = strings.TrimSpace(headerValues[0]) v.RequestCharged = types.RequestCharged(headerValues[0]) @@ -958,6 +1104,10 @@ func (m *awsRestxml_deserializeOpCreateSession) HandleDeserialize(ctx context.Co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -969,6 +1119,11 @@ func (m *awsRestxml_deserializeOpCreateSession) HandleDeserialize(ctx context.Co output := &CreateSessionOutput{} out.Result = output + err = awsRestxml_deserializeOpHttpBindingsCreateSessionOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + var buff [1024]byte ringBuffer := smithyio.NewRingBuffer(buff[:]) body := io.TeeReader(response.Body, ringBuffer) @@ -997,6 +1152,7 @@ func (m *awsRestxml_deserializeOpCreateSession) HandleDeserialize(ctx context.Co } } + span.End() return out, metadata, err } @@ -1043,6 +1199,37 @@ func awsRestxml_deserializeOpErrorCreateSession(response *smithyhttp.Response, m } } +func awsRestxml_deserializeOpHttpBindingsCreateSessionOutput(v *CreateSessionOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-bucket-key-enabled"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseBool(headerValues[0]) + if err != nil { + return err + } + v.BucketKeyEnabled = ptr.Bool(vv) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-context"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSEKMSEncryptionContext = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-server-side-encryption-aws-kms-key-id"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.SSEKMSKeyId = ptr.String(headerValues[0]) + } + + return nil +} func awsRestxml_deserializeOpDocumentCreateSessionOutput(v **CreateSessionOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -1100,6 +1287,10 @@ func (m *awsRestxml_deserializeOpDeleteBucket) HandleDeserialize(ctx context.Con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1117,6 +1308,7 @@ func (m *awsRestxml_deserializeOpDeleteBucket) HandleDeserialize(ctx context.Con } } + span.End() return out, metadata, err } @@ -1175,6 +1367,10 @@ func (m *awsRestxml_deserializeOpDeleteBucketAnalyticsConfiguration) HandleDeser return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1192,6 +1388,7 @@ func (m *awsRestxml_deserializeOpDeleteBucketAnalyticsConfiguration) HandleDeser } } + span.End() return out, metadata, err } @@ -1250,6 +1447,10 @@ func (m *awsRestxml_deserializeOpDeleteBucketCors) HandleDeserialize(ctx context return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1267,6 +1468,7 @@ func (m *awsRestxml_deserializeOpDeleteBucketCors) HandleDeserialize(ctx context } } + span.End() return out, metadata, err } @@ -1325,6 +1527,10 @@ func (m *awsRestxml_deserializeOpDeleteBucketEncryption) HandleDeserialize(ctx c return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1342,6 +1548,7 @@ func (m *awsRestxml_deserializeOpDeleteBucketEncryption) HandleDeserialize(ctx c } } + span.End() return out, metadata, err } @@ -1400,6 +1607,10 @@ func (m *awsRestxml_deserializeOpDeleteBucketIntelligentTieringConfiguration) Ha return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1417,6 +1628,7 @@ func (m *awsRestxml_deserializeOpDeleteBucketIntelligentTieringConfiguration) Ha } } + span.End() return out, metadata, err } @@ -1475,6 +1687,10 @@ func (m *awsRestxml_deserializeOpDeleteBucketInventoryConfiguration) HandleDeser return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1492,6 +1708,7 @@ func (m *awsRestxml_deserializeOpDeleteBucketInventoryConfiguration) HandleDeser } } + span.End() return out, metadata, err } @@ -1550,6 +1767,10 @@ func (m *awsRestxml_deserializeOpDeleteBucketLifecycle) HandleDeserialize(ctx co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1567,6 +1788,7 @@ func (m *awsRestxml_deserializeOpDeleteBucketLifecycle) HandleDeserialize(ctx co } } + span.End() return out, metadata, err } @@ -1610,6 +1832,86 @@ func awsRestxml_deserializeOpErrorDeleteBucketLifecycle(response *smithyhttp.Res } } +type awsRestxml_deserializeOpDeleteBucketMetadataTableConfiguration struct { +} + +func (*awsRestxml_deserializeOpDeleteBucketMetadataTableConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpDeleteBucketMetadataTableConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorDeleteBucketMetadataTableConfiguration(response, &metadata) + } + output := &DeleteBucketMetadataTableConfigurationOutput{} + out.Result = output + + if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to discard response body, %w", err), + } + } + + span.End() + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorDeleteBucketMetadataTableConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + type awsRestxml_deserializeOpDeleteBucketMetricsConfiguration struct { } @@ -1625,6 +1927,10 @@ func (m *awsRestxml_deserializeOpDeleteBucketMetricsConfiguration) HandleDeseria return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1642,6 +1948,7 @@ func (m *awsRestxml_deserializeOpDeleteBucketMetricsConfiguration) HandleDeseria } } + span.End() return out, metadata, err } @@ -1700,6 +2007,10 @@ func (m *awsRestxml_deserializeOpDeleteBucketOwnershipControls) HandleDeserializ return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1717,6 +2028,7 @@ func (m *awsRestxml_deserializeOpDeleteBucketOwnershipControls) HandleDeserializ } } + span.End() return out, metadata, err } @@ -1775,6 +2087,10 @@ func (m *awsRestxml_deserializeOpDeleteBucketPolicy) HandleDeserialize(ctx conte return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1792,6 +2108,7 @@ func (m *awsRestxml_deserializeOpDeleteBucketPolicy) HandleDeserialize(ctx conte } } + span.End() return out, metadata, err } @@ -1850,6 +2167,10 @@ func (m *awsRestxml_deserializeOpDeleteBucketReplication) HandleDeserialize(ctx return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1867,6 +2188,7 @@ func (m *awsRestxml_deserializeOpDeleteBucketReplication) HandleDeserialize(ctx } } + span.End() return out, metadata, err } @@ -1925,6 +2247,10 @@ func (m *awsRestxml_deserializeOpDeleteBucketTagging) HandleDeserialize(ctx cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -1942,6 +2268,7 @@ func (m *awsRestxml_deserializeOpDeleteBucketTagging) HandleDeserialize(ctx cont } } + span.End() return out, metadata, err } @@ -2000,6 +2327,10 @@ func (m *awsRestxml_deserializeOpDeleteBucketWebsite) HandleDeserialize(ctx cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -2017,6 +2348,7 @@ func (m *awsRestxml_deserializeOpDeleteBucketWebsite) HandleDeserialize(ctx cont } } + span.End() return out, metadata, err } @@ -2075,6 +2407,10 @@ func (m *awsRestxml_deserializeOpDeleteObject) HandleDeserialize(ctx context.Con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -2091,6 +2427,7 @@ func (m *awsRestxml_deserializeOpDeleteObject) HandleDeserialize(ctx context.Con return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} } + span.End() return out, metadata, err } @@ -2176,6 +2513,10 @@ func (m *awsRestxml_deserializeOpDeleteObjects) HandleDeserialize(ctx context.Co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -2220,6 +2561,7 @@ func (m *awsRestxml_deserializeOpDeleteObjects) HandleDeserialize(ctx context.Co } } + span.End() return out, metadata, err } @@ -2338,6 +2680,10 @@ func (m *awsRestxml_deserializeOpDeleteObjectTagging) HandleDeserialize(ctx cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -2354,6 +2700,7 @@ func (m *awsRestxml_deserializeOpDeleteObjectTagging) HandleDeserialize(ctx cont return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} } + span.End() return out, metadata, err } @@ -2425,6 +2772,10 @@ func (m *awsRestxml_deserializeOpDeletePublicAccessBlock) HandleDeserialize(ctx return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -2442,6 +2793,7 @@ func (m *awsRestxml_deserializeOpDeletePublicAccessBlock) HandleDeserialize(ctx } } + span.End() return out, metadata, err } @@ -2500,6 +2852,10 @@ func (m *awsRestxml_deserializeOpGetBucketAccelerateConfiguration) HandleDeseria return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -2544,6 +2900,7 @@ func (m *awsRestxml_deserializeOpGetBucketAccelerateConfiguration) HandleDeseria } } + span.End() return out, metadata, err } @@ -2663,6 +3020,10 @@ func (m *awsRestxml_deserializeOpGetBucketAcl) HandleDeserialize(ctx context.Con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -2702,6 +3063,7 @@ func (m *awsRestxml_deserializeOpGetBucketAcl) HandleDeserialize(ctx context.Con } } + span.End() return out, metadata, err } @@ -2808,6 +3170,10 @@ func (m *awsRestxml_deserializeOpGetBucketAnalyticsConfiguration) HandleDeserial return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -2847,6 +3213,7 @@ func (m *awsRestxml_deserializeOpGetBucketAnalyticsConfiguration) HandleDeserial } } + span.End() return out, metadata, err } @@ -2947,6 +3314,10 @@ func (m *awsRestxml_deserializeOpGetBucketCors) HandleDeserialize(ctx context.Co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -2986,6 +3357,7 @@ func (m *awsRestxml_deserializeOpGetBucketCors) HandleDeserialize(ctx context.Co } } + span.End() return out, metadata, err } @@ -3086,6 +3458,10 @@ func (m *awsRestxml_deserializeOpGetBucketEncryption) HandleDeserialize(ctx cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -3125,6 +3501,7 @@ func (m *awsRestxml_deserializeOpGetBucketEncryption) HandleDeserialize(ctx cont } } + span.End() return out, metadata, err } @@ -3225,6 +3602,10 @@ func (m *awsRestxml_deserializeOpGetBucketIntelligentTieringConfiguration) Handl return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -3264,6 +3645,7 @@ func (m *awsRestxml_deserializeOpGetBucketIntelligentTieringConfiguration) Handl } } + span.End() return out, metadata, err } @@ -3364,6 +3746,10 @@ func (m *awsRestxml_deserializeOpGetBucketInventoryConfiguration) HandleDeserial return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -3403,6 +3789,7 @@ func (m *awsRestxml_deserializeOpGetBucketInventoryConfiguration) HandleDeserial } } + span.End() return out, metadata, err } @@ -3503,6 +3890,10 @@ func (m *awsRestxml_deserializeOpGetBucketLifecycleConfiguration) HandleDeserial return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -3514,6 +3905,11 @@ func (m *awsRestxml_deserializeOpGetBucketLifecycleConfiguration) HandleDeserial output := &GetBucketLifecycleConfigurationOutput{} out.Result = output + err = awsRestxml_deserializeOpHttpBindingsGetBucketLifecycleConfigurationOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + var buff [1024]byte ringBuffer := smithyio.NewRingBuffer(buff[:]) body := io.TeeReader(response.Body, ringBuffer) @@ -3542,6 +3938,7 @@ func (m *awsRestxml_deserializeOpGetBucketLifecycleConfiguration) HandleDeserial } } + span.End() return out, metadata, err } @@ -3585,6 +3982,18 @@ func awsRestxml_deserializeOpErrorGetBucketLifecycleConfiguration(response *smit } } +func awsRestxml_deserializeOpHttpBindingsGetBucketLifecycleConfigurationOutput(v *GetBucketLifecycleConfigurationOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-transition-default-minimum-object-size"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.TransitionDefaultMinimumObjectSize = types.TransitionDefaultMinimumObjectSize(headerValues[0]) + } + + return nil +} func awsRestxml_deserializeOpDocumentGetBucketLifecycleConfigurationOutput(v **GetBucketLifecycleConfigurationOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -3642,6 +4051,10 @@ func (m *awsRestxml_deserializeOpGetBucketLocation) HandleDeserialize(ctx contex return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -3681,6 +4094,7 @@ func (m *awsRestxml_deserializeOpGetBucketLocation) HandleDeserialize(ctx contex } } + span.End() return out, metadata, err } @@ -3788,6 +4202,10 @@ func (m *awsRestxml_deserializeOpGetBucketLogging) HandleDeserialize(ctx context return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -3827,6 +4245,7 @@ func (m *awsRestxml_deserializeOpGetBucketLogging) HandleDeserialize(ctx context } } + span.End() return out, metadata, err } @@ -3912,14 +4331,14 @@ func awsRestxml_deserializeOpDocumentGetBucketLoggingOutput(v **GetBucketLogging return nil } -type awsRestxml_deserializeOpGetBucketMetricsConfiguration struct { +type awsRestxml_deserializeOpGetBucketMetadataTableConfiguration struct { } -func (*awsRestxml_deserializeOpGetBucketMetricsConfiguration) ID() string { +func (*awsRestxml_deserializeOpGetBucketMetadataTableConfiguration) ID() string { return "OperationDeserializer" } -func (m *awsRestxml_deserializeOpGetBucketMetricsConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestxml_deserializeOpGetBucketMetadataTableConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -3927,15 +4346,19 @@ func (m *awsRestxml_deserializeOpGetBucketMetricsConfiguration) HandleDeserializ return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketMetricsConfiguration(response, &metadata) + return out, metadata, awsRestxml_deserializeOpErrorGetBucketMetadataTableConfiguration(response, &metadata) } - output := &GetBucketMetricsConfigurationOutput{} + output := &GetBucketMetadataTableConfigurationOutput{} out.Result = output var buff [1024]byte @@ -3956,7 +4379,7 @@ func (m *awsRestxml_deserializeOpGetBucketMetricsConfiguration) HandleDeserializ } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeDocumentMetricsConfiguration(&output.MetricsConfiguration, decoder) + err = awsRestxml_deserializeDocumentGetBucketMetadataTableConfigurationResult(&output.GetBucketMetadataTableConfigurationResult, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -3966,10 +4389,11 @@ func (m *awsRestxml_deserializeOpGetBucketMetricsConfiguration) HandleDeserializ } } + span.End() return out, metadata, err } -func awsRestxml_deserializeOpErrorGetBucketMetricsConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestxml_deserializeOpErrorGetBucketMetadataTableConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -4009,13 +4433,13 @@ func awsRestxml_deserializeOpErrorGetBucketMetricsConfiguration(response *smithy } } -func awsRestxml_deserializeOpDocumentGetBucketMetricsConfigurationOutput(v **GetBucketMetricsConfigurationOutput, decoder smithyxml.NodeDecoder) error { +func awsRestxml_deserializeOpDocumentGetBucketMetadataTableConfigurationOutput(v **GetBucketMetadataTableConfigurationOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } - var sv *GetBucketMetricsConfigurationOutput + var sv *GetBucketMetadataTableConfigurationOutput if *v == nil { - sv = &GetBucketMetricsConfigurationOutput{} + sv = &GetBucketMetadataTableConfigurationOutput{} } else { sv = *v } @@ -4031,9 +4455,9 @@ func awsRestxml_deserializeOpDocumentGetBucketMetricsConfigurationOutput(v **Get originalDecoder := decoder decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) switch { - case strings.EqualFold("MetricsConfiguration", t.Name.Local): + case strings.EqualFold("GetBucketMetadataTableConfigurationResult", t.Name.Local): nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentMetricsConfiguration(&sv.MetricsConfiguration, nodeDecoder); err != nil { + if err := awsRestxml_deserializeDocumentGetBucketMetadataTableConfigurationResult(&sv.GetBucketMetadataTableConfigurationResult, nodeDecoder); err != nil { return err } @@ -4051,14 +4475,14 @@ func awsRestxml_deserializeOpDocumentGetBucketMetricsConfigurationOutput(v **Get return nil } -type awsRestxml_deserializeOpGetBucketNotificationConfiguration struct { +type awsRestxml_deserializeOpGetBucketMetricsConfiguration struct { } -func (*awsRestxml_deserializeOpGetBucketNotificationConfiguration) ID() string { +func (*awsRestxml_deserializeOpGetBucketMetricsConfiguration) ID() string { return "OperationDeserializer" } -func (m *awsRestxml_deserializeOpGetBucketNotificationConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestxml_deserializeOpGetBucketMetricsConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -4066,15 +4490,19 @@ func (m *awsRestxml_deserializeOpGetBucketNotificationConfiguration) HandleDeser return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorGetBucketNotificationConfiguration(response, &metadata) + return out, metadata, awsRestxml_deserializeOpErrorGetBucketMetricsConfiguration(response, &metadata) } - output := &GetBucketNotificationConfigurationOutput{} + output := &GetBucketMetricsConfigurationOutput{} out.Result = output var buff [1024]byte @@ -4095,7 +4523,7 @@ func (m *awsRestxml_deserializeOpGetBucketNotificationConfiguration) HandleDeser } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentGetBucketNotificationConfigurationOutput(&output, decoder) + err = awsRestxml_deserializeDocumentMetricsConfiguration(&output.MetricsConfiguration, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -4105,10 +4533,11 @@ func (m *awsRestxml_deserializeOpGetBucketNotificationConfiguration) HandleDeser } } + span.End() return out, metadata, err } -func awsRestxml_deserializeOpErrorGetBucketNotificationConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestxml_deserializeOpErrorGetBucketMetricsConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -4148,13 +4577,13 @@ func awsRestxml_deserializeOpErrorGetBucketNotificationConfiguration(response *s } } -func awsRestxml_deserializeOpDocumentGetBucketNotificationConfigurationOutput(v **GetBucketNotificationConfigurationOutput, decoder smithyxml.NodeDecoder) error { +func awsRestxml_deserializeOpDocumentGetBucketMetricsConfigurationOutput(v **GetBucketMetricsConfigurationOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } - var sv *GetBucketNotificationConfigurationOutput + var sv *GetBucketMetricsConfigurationOutput if *v == nil { - sv = &GetBucketNotificationConfigurationOutput{} + sv = &GetBucketMetricsConfigurationOutput{} } else { sv = *v } @@ -4170,15 +4599,159 @@ func awsRestxml_deserializeOpDocumentGetBucketNotificationConfigurationOutput(v originalDecoder := decoder decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) switch { - case strings.EqualFold("EventBridgeConfiguration", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentEventBridgeConfiguration(&sv.EventBridgeConfiguration, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("CloudFunctionConfiguration", t.Name.Local): + case strings.EqualFold("MetricsConfiguration", t.Name.Local): nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentLambdaFunctionConfigurationListUnwrapped(&sv.LambdaFunctionConfigurations, nodeDecoder); err != nil { + if err := awsRestxml_deserializeDocumentMetricsConfiguration(&sv.MetricsConfiguration, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +type awsRestxml_deserializeOpGetBucketNotificationConfiguration struct { +} + +func (*awsRestxml_deserializeOpGetBucketNotificationConfiguration) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpGetBucketNotificationConfiguration) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorGetBucketNotificationConfiguration(response, &metadata) + } + output := &GetBucketNotificationConfigurationOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentGetBucketNotificationConfigurationOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + span.End() + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorGetBucketNotificationConfiguration(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentGetBucketNotificationConfigurationOutput(v **GetBucketNotificationConfigurationOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *GetBucketNotificationConfigurationOutput + if *v == nil { + sv = &GetBucketNotificationConfigurationOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("EventBridgeConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentEventBridgeConfiguration(&sv.EventBridgeConfiguration, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("CloudFunctionConfiguration", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentLambdaFunctionConfigurationListUnwrapped(&sv.LambdaFunctionConfigurations, nodeDecoder); err != nil { return err } @@ -4223,6 +4796,10 @@ func (m *awsRestxml_deserializeOpGetBucketOwnershipControls) HandleDeserialize(c return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -4262,6 +4839,7 @@ func (m *awsRestxml_deserializeOpGetBucketOwnershipControls) HandleDeserialize(c } } + span.End() return out, metadata, err } @@ -4362,6 +4940,10 @@ func (m *awsRestxml_deserializeOpGetBucketPolicy) HandleDeserialize(ctx context. return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -4378,6 +4960,7 @@ func (m *awsRestxml_deserializeOpGetBucketPolicy) HandleDeserialize(ctx context. return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to deserialize response payload, %w", err)} } + span.End() return out, metadata, err } @@ -4457,6 +5040,10 @@ func (m *awsRestxml_deserializeOpGetBucketPolicyStatus) HandleDeserialize(ctx co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -4496,6 +5083,7 @@ func (m *awsRestxml_deserializeOpGetBucketPolicyStatus) HandleDeserialize(ctx co } } + span.End() return out, metadata, err } @@ -4596,6 +5184,10 @@ func (m *awsRestxml_deserializeOpGetBucketReplication) HandleDeserialize(ctx con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -4635,6 +5227,7 @@ func (m *awsRestxml_deserializeOpGetBucketReplication) HandleDeserialize(ctx con } } + span.End() return out, metadata, err } @@ -4735,6 +5328,10 @@ func (m *awsRestxml_deserializeOpGetBucketRequestPayment) HandleDeserialize(ctx return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -4774,6 +5371,7 @@ func (m *awsRestxml_deserializeOpGetBucketRequestPayment) HandleDeserialize(ctx } } + span.End() return out, metadata, err } @@ -4881,6 +5479,10 @@ func (m *awsRestxml_deserializeOpGetBucketTagging) HandleDeserialize(ctx context return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -4920,6 +5522,7 @@ func (m *awsRestxml_deserializeOpGetBucketTagging) HandleDeserialize(ctx context } } + span.End() return out, metadata, err } @@ -5020,6 +5623,10 @@ func (m *awsRestxml_deserializeOpGetBucketVersioning) HandleDeserialize(ctx cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -5059,6 +5666,7 @@ func (m *awsRestxml_deserializeOpGetBucketVersioning) HandleDeserialize(ctx cont } } + span.End() return out, metadata, err } @@ -5179,6 +5787,10 @@ func (m *awsRestxml_deserializeOpGetBucketWebsite) HandleDeserialize(ctx context return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -5218,6 +5830,7 @@ func (m *awsRestxml_deserializeOpGetBucketWebsite) HandleDeserialize(ctx context } } + span.End() return out, metadata, err } @@ -5336,6 +5949,10 @@ func (m *awsRestxml_deserializeOpGetObject) HandleDeserialize(ctx context.Contex return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -5357,6 +5974,7 @@ func (m *awsRestxml_deserializeOpGetObject) HandleDeserialize(ctx context.Contex return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to deserialize response payload, %w", err)} } + span.End() return out, metadata, err } @@ -5440,6 +6058,11 @@ func awsRestxml_deserializeOpHttpBindingsGetObjectOutput(v *GetObjectOutput, res v.ChecksumCRC32C = ptr.String(headerValues[0]) } + if headerValues := response.Header.Values("x-amz-checksum-crc64nvme"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumCRC64NVME = ptr.String(headerValues[0]) + } + if headerValues := response.Header.Values("x-amz-checksum-sha1"); len(headerValues) != 0 { headerValues[0] = strings.TrimSpace(headerValues[0]) v.ChecksumSHA1 = ptr.String(headerValues[0]) @@ -5450,6 +6073,11 @@ func awsRestxml_deserializeOpHttpBindingsGetObjectOutput(v *GetObjectOutput, res v.ChecksumSHA256 = ptr.String(headerValues[0]) } + if headerValues := response.Header.Values("x-amz-checksum-type"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumType = types.ChecksumType(headerValues[0]) + } + if headerValues := response.Header.Values("Content-Disposition"); len(headerValues) != 0 { headerValues[0] = strings.TrimSpace(headerValues[0]) v.ContentDisposition = ptr.String(headerValues[0]) @@ -5504,12 +6132,17 @@ func awsRestxml_deserializeOpHttpBindingsGetObjectOutput(v *GetObjectOutput, res } if headerValues := response.Header.Values("Expires"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - t, err := smithytime.ParseHTTPDate(headerValues[0]) + deserOverride, err := deserializeS3Expires(headerValues[0]) if err != nil { return err } - v.Expires = ptr.Time(t) + v.Expires = deserOverride + + } + + if headerValues := response.Header.Values("Expires"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ExpiresString = ptr.String(headerValues[0]) } if headerValues := response.Header.Values("Last-Modified"); len(headerValues) != 0 { @@ -5652,6 +6285,10 @@ func (m *awsRestxml_deserializeOpGetObjectAcl) HandleDeserialize(ctx context.Con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -5696,6 +6333,7 @@ func (m *awsRestxml_deserializeOpGetObjectAcl) HandleDeserialize(ctx context.Con } } + span.End() return out, metadata, err } @@ -5817,6 +6455,10 @@ func (m *awsRestxml_deserializeOpGetObjectAttributes) HandleDeserialize(ctx cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -5861,6 +6503,7 @@ func (m *awsRestxml_deserializeOpGetObjectAttributes) HandleDeserialize(ctx cont } } + span.End() return out, metadata, err } @@ -6048,6 +6691,10 @@ func (m *awsRestxml_deserializeOpGetObjectLegalHold) HandleDeserialize(ctx conte return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -6087,6 +6734,7 @@ func (m *awsRestxml_deserializeOpGetObjectLegalHold) HandleDeserialize(ctx conte } } + span.End() return out, metadata, err } @@ -6187,6 +6835,10 @@ func (m *awsRestxml_deserializeOpGetObjectLockConfiguration) HandleDeserialize(c return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -6226,6 +6878,7 @@ func (m *awsRestxml_deserializeOpGetObjectLockConfiguration) HandleDeserialize(c } } + span.End() return out, metadata, err } @@ -6326,6 +6979,10 @@ func (m *awsRestxml_deserializeOpGetObjectRetention) HandleDeserialize(ctx conte return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -6365,6 +7022,7 @@ func (m *awsRestxml_deserializeOpGetObjectRetention) HandleDeserialize(ctx conte } } + span.End() return out, metadata, err } @@ -6465,6 +7123,10 @@ func (m *awsRestxml_deserializeOpGetObjectTagging) HandleDeserialize(ctx context return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -6509,6 +7171,7 @@ func (m *awsRestxml_deserializeOpGetObjectTagging) HandleDeserialize(ctx context } } + span.End() return out, metadata, err } @@ -6621,6 +7284,10 @@ func (m *awsRestxml_deserializeOpGetObjectTorrent) HandleDeserialize(ctx context return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -6642,6 +7309,7 @@ func (m *awsRestxml_deserializeOpGetObjectTorrent) HandleDeserialize(ctx context return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to deserialize response payload, %w", err)} } + span.End() return out, metadata, err } @@ -6720,6 +7388,10 @@ func (m *awsRestxml_deserializeOpGetPublicAccessBlock) HandleDeserialize(ctx con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -6759,6 +7431,7 @@ func (m *awsRestxml_deserializeOpGetPublicAccessBlock) HandleDeserialize(ctx con } } + span.End() return out, metadata, err } @@ -6859,6 +7532,10 @@ func (m *awsRestxml_deserializeOpHeadBucket) HandleDeserialize(ctx context.Conte return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -6875,6 +7552,7 @@ func (m *awsRestxml_deserializeOpHeadBucket) HandleDeserialize(ctx context.Conte return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} } + span.End() return out, metadata, err } @@ -6968,6 +7646,10 @@ func (m *awsRestxml_deserializeOpHeadObject) HandleDeserialize(ctx context.Conte return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -6984,6 +7666,7 @@ func (m *awsRestxml_deserializeOpHeadObject) HandleDeserialize(ctx context.Conte return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} } + span.End() return out, metadata, err } @@ -7069,6 +7752,11 @@ func awsRestxml_deserializeOpHttpBindingsHeadObjectOutput(v *HeadObjectOutput, r v.ChecksumCRC32C = ptr.String(headerValues[0]) } + if headerValues := response.Header.Values("x-amz-checksum-crc64nvme"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumCRC64NVME = ptr.String(headerValues[0]) + } + if headerValues := response.Header.Values("x-amz-checksum-sha1"); len(headerValues) != 0 { headerValues[0] = strings.TrimSpace(headerValues[0]) v.ChecksumSHA1 = ptr.String(headerValues[0]) @@ -7079,6 +7767,11 @@ func awsRestxml_deserializeOpHttpBindingsHeadObjectOutput(v *HeadObjectOutput, r v.ChecksumSHA256 = ptr.String(headerValues[0]) } + if headerValues := response.Header.Values("x-amz-checksum-type"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumType = types.ChecksumType(headerValues[0]) + } + if headerValues := response.Header.Values("Content-Disposition"); len(headerValues) != 0 { headerValues[0] = strings.TrimSpace(headerValues[0]) v.ContentDisposition = ptr.String(headerValues[0]) @@ -7103,6 +7796,11 @@ func awsRestxml_deserializeOpHttpBindingsHeadObjectOutput(v *HeadObjectOutput, r v.ContentLength = ptr.Int64(vv) } + if headerValues := response.Header.Values("Content-Range"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ContentRange = ptr.String(headerValues[0]) + } + if headerValues := response.Header.Values("Content-Type"); len(headerValues) != 0 { headerValues[0] = strings.TrimSpace(headerValues[0]) v.ContentType = ptr.String(headerValues[0]) @@ -7128,12 +7826,17 @@ func awsRestxml_deserializeOpHttpBindingsHeadObjectOutput(v *HeadObjectOutput, r } if headerValues := response.Header.Values("Expires"); len(headerValues) != 0 { - headerValues[0] = strings.TrimSpace(headerValues[0]) - t, err := smithytime.ParseHTTPDate(headerValues[0]) + deserOverride, err := deserializeS3Expires(headerValues[0]) if err != nil { return err } - v.Expires = ptr.Time(t) + v.Expires = deserOverride + + } + + if headerValues := response.Header.Values("Expires"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ExpiresString = ptr.String(headerValues[0]) } if headerValues := response.Header.Values("Last-Modified"); len(headerValues) != 0 { @@ -7260,6 +7963,10 @@ func (m *awsRestxml_deserializeOpListBucketAnalyticsConfigurations) HandleDeseri return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -7299,6 +8006,7 @@ func (m *awsRestxml_deserializeOpListBucketAnalyticsConfigurations) HandleDeseri } } + span.End() return out, metadata, err } @@ -7441,6 +8149,10 @@ func (m *awsRestxml_deserializeOpListBucketIntelligentTieringConfigurations) Han return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -7480,6 +8192,7 @@ func (m *awsRestxml_deserializeOpListBucketIntelligentTieringConfigurations) Han } } + span.End() return out, metadata, err } @@ -7622,6 +8335,10 @@ func (m *awsRestxml_deserializeOpListBucketInventoryConfigurations) HandleDeseri return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -7661,6 +8378,7 @@ func (m *awsRestxml_deserializeOpListBucketInventoryConfigurations) HandleDeseri } } + span.End() return out, metadata, err } @@ -7803,6 +8521,10 @@ func (m *awsRestxml_deserializeOpListBucketMetricsConfigurations) HandleDeserial return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -7842,6 +8564,7 @@ func (m *awsRestxml_deserializeOpListBucketMetricsConfigurations) HandleDeserial } } + span.End() return out, metadata, err } @@ -7984,6 +8707,10 @@ func (m *awsRestxml_deserializeOpListBuckets) HandleDeserialize(ctx context.Cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -8023,6 +8750,7 @@ func (m *awsRestxml_deserializeOpListBuckets) HandleDeserialize(ctx context.Cont } } + span.End() return out, metadata, err } @@ -8094,12 +8822,38 @@ func awsRestxml_deserializeOpDocumentListBucketsOutput(v **ListBucketsOutput, de return err } + case strings.EqualFold("ContinuationToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ContinuationToken = ptr.String(xtv) + } + case strings.EqualFold("Owner", t.Name.Local): nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) if err := awsRestxml_deserializeDocumentOwner(&sv.Owner, nodeDecoder); err != nil { return err } + case strings.EqualFold("Prefix", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Prefix = ptr.String(xtv) + } + default: // Do nothing and ignore the unexpected tag element err = decoder.Decoder.Skip() @@ -8129,6 +8883,10 @@ func (m *awsRestxml_deserializeOpListDirectoryBuckets) HandleDeserialize(ctx con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -8168,6 +8926,7 @@ func (m *awsRestxml_deserializeOpListDirectoryBuckets) HandleDeserialize(ctx con } } + span.End() return out, metadata, err } @@ -8281,6 +9040,10 @@ func (m *awsRestxml_deserializeOpListMultipartUploads) HandleDeserialize(ctx con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -8325,6 +9088,7 @@ func (m *awsRestxml_deserializeOpListMultipartUploads) HandleDeserialize(ctx con } } + span.End() return out, metadata, err } @@ -8580,6 +9344,10 @@ func (m *awsRestxml_deserializeOpListObjects) HandleDeserialize(ctx context.Cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -8624,6 +9392,7 @@ func (m *awsRestxml_deserializeOpListObjects) HandleDeserialize(ctx context.Cont } } + span.End() return out, metadata, err } @@ -8856,6 +9625,10 @@ func (m *awsRestxml_deserializeOpListObjectsV2) HandleDeserialize(ctx context.Co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -8900,6 +9673,7 @@ func (m *awsRestxml_deserializeOpListObjectsV2) HandleDeserialize(ctx context.Co } } + span.End() return out, metadata, err } @@ -9162,6 +9936,10 @@ func (m *awsRestxml_deserializeOpListObjectVersions) HandleDeserialize(ctx conte return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -9206,6 +9984,7 @@ func (m *awsRestxml_deserializeOpListObjectVersions) HandleDeserialize(ctx conte } } + span.End() return out, metadata, err } @@ -9467,6 +10246,10 @@ func (m *awsRestxml_deserializeOpListParts) HandleDeserialize(ctx context.Contex return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -9511,6 +10294,7 @@ func (m *awsRestxml_deserializeOpListParts) HandleDeserialize(ctx context.Contex } } + span.End() return out, metadata, err } @@ -9628,6 +10412,19 @@ func awsRestxml_deserializeOpDocumentListPartsOutput(v **ListPartsOutput, decode sv.ChecksumAlgorithm = types.ChecksumAlgorithm(xtv) } + case strings.EqualFold("ChecksumType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumType = types.ChecksumType(xtv) + } + case strings.EqualFold("Initiator", t.Name.Local): nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) if err := awsRestxml_deserializeDocumentInitiator(&sv.Initiator, nodeDecoder); err != nil { @@ -9773,6 +10570,10 @@ func (m *awsRestxml_deserializeOpPutBucketAccelerateConfiguration) HandleDeseria return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -9790,6 +10591,7 @@ func (m *awsRestxml_deserializeOpPutBucketAccelerateConfiguration) HandleDeseria } } + span.End() return out, metadata, err } @@ -9848,6 +10650,10 @@ func (m *awsRestxml_deserializeOpPutBucketAcl) HandleDeserialize(ctx context.Con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -9865,6 +10671,7 @@ func (m *awsRestxml_deserializeOpPutBucketAcl) HandleDeserialize(ctx context.Con } } + span.End() return out, metadata, err } @@ -9923,6 +10730,10 @@ func (m *awsRestxml_deserializeOpPutBucketAnalyticsConfiguration) HandleDeserial return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -9940,6 +10751,7 @@ func (m *awsRestxml_deserializeOpPutBucketAnalyticsConfiguration) HandleDeserial } } + span.End() return out, metadata, err } @@ -9998,6 +10810,10 @@ func (m *awsRestxml_deserializeOpPutBucketCors) HandleDeserialize(ctx context.Co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -10015,6 +10831,7 @@ func (m *awsRestxml_deserializeOpPutBucketCors) HandleDeserialize(ctx context.Co } } + span.End() return out, metadata, err } @@ -10073,6 +10890,10 @@ func (m *awsRestxml_deserializeOpPutBucketEncryption) HandleDeserialize(ctx cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -10090,6 +10911,7 @@ func (m *awsRestxml_deserializeOpPutBucketEncryption) HandleDeserialize(ctx cont } } + span.End() return out, metadata, err } @@ -10148,6 +10970,10 @@ func (m *awsRestxml_deserializeOpPutBucketIntelligentTieringConfiguration) Handl return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -10165,6 +10991,7 @@ func (m *awsRestxml_deserializeOpPutBucketIntelligentTieringConfiguration) Handl } } + span.End() return out, metadata, err } @@ -10223,6 +11050,10 @@ func (m *awsRestxml_deserializeOpPutBucketInventoryConfiguration) HandleDeserial return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -10240,6 +11071,7 @@ func (m *awsRestxml_deserializeOpPutBucketInventoryConfiguration) HandleDeserial } } + span.End() return out, metadata, err } @@ -10298,6 +11130,10 @@ func (m *awsRestxml_deserializeOpPutBucketLifecycleConfiguration) HandleDeserial return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -10309,12 +11145,12 @@ func (m *awsRestxml_deserializeOpPutBucketLifecycleConfiguration) HandleDeserial output := &PutBucketLifecycleConfigurationOutput{} out.Result = output - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } + err = awsRestxml_deserializeOpHttpBindingsPutBucketLifecycleConfigurationOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} } + span.End() return out, metadata, err } @@ -10358,6 +11194,19 @@ func awsRestxml_deserializeOpErrorPutBucketLifecycleConfiguration(response *smit } } +func awsRestxml_deserializeOpHttpBindingsPutBucketLifecycleConfigurationOutput(v *PutBucketLifecycleConfigurationOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-transition-default-minimum-object-size"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.TransitionDefaultMinimumObjectSize = types.TransitionDefaultMinimumObjectSize(headerValues[0]) + } + + return nil +} + type awsRestxml_deserializeOpPutBucketLogging struct { } @@ -10373,6 +11222,10 @@ func (m *awsRestxml_deserializeOpPutBucketLogging) HandleDeserialize(ctx context return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -10390,6 +11243,7 @@ func (m *awsRestxml_deserializeOpPutBucketLogging) HandleDeserialize(ctx context } } + span.End() return out, metadata, err } @@ -10448,6 +11302,10 @@ func (m *awsRestxml_deserializeOpPutBucketMetricsConfiguration) HandleDeserializ return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -10465,6 +11323,7 @@ func (m *awsRestxml_deserializeOpPutBucketMetricsConfiguration) HandleDeserializ } } + span.End() return out, metadata, err } @@ -10523,6 +11382,10 @@ func (m *awsRestxml_deserializeOpPutBucketNotificationConfiguration) HandleDeser return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -10540,6 +11403,7 @@ func (m *awsRestxml_deserializeOpPutBucketNotificationConfiguration) HandleDeser } } + span.End() return out, metadata, err } @@ -10598,6 +11462,10 @@ func (m *awsRestxml_deserializeOpPutBucketOwnershipControls) HandleDeserialize(c return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -10615,6 +11483,7 @@ func (m *awsRestxml_deserializeOpPutBucketOwnershipControls) HandleDeserialize(c } } + span.End() return out, metadata, err } @@ -10673,6 +11542,10 @@ func (m *awsRestxml_deserializeOpPutBucketPolicy) HandleDeserialize(ctx context. return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -10690,6 +11563,7 @@ func (m *awsRestxml_deserializeOpPutBucketPolicy) HandleDeserialize(ctx context. } } + span.End() return out, metadata, err } @@ -10748,6 +11622,10 @@ func (m *awsRestxml_deserializeOpPutBucketReplication) HandleDeserialize(ctx con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -10765,6 +11643,7 @@ func (m *awsRestxml_deserializeOpPutBucketReplication) HandleDeserialize(ctx con } } + span.End() return out, metadata, err } @@ -10823,6 +11702,10 @@ func (m *awsRestxml_deserializeOpPutBucketRequestPayment) HandleDeserialize(ctx return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -10840,6 +11723,7 @@ func (m *awsRestxml_deserializeOpPutBucketRequestPayment) HandleDeserialize(ctx } } + span.End() return out, metadata, err } @@ -10898,6 +11782,10 @@ func (m *awsRestxml_deserializeOpPutBucketTagging) HandleDeserialize(ctx context return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -10915,6 +11803,7 @@ func (m *awsRestxml_deserializeOpPutBucketTagging) HandleDeserialize(ctx context } } + span.End() return out, metadata, err } @@ -10973,6 +11862,10 @@ func (m *awsRestxml_deserializeOpPutBucketVersioning) HandleDeserialize(ctx cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -10990,6 +11883,7 @@ func (m *awsRestxml_deserializeOpPutBucketVersioning) HandleDeserialize(ctx cont } } + span.End() return out, metadata, err } @@ -11048,6 +11942,10 @@ func (m *awsRestxml_deserializeOpPutBucketWebsite) HandleDeserialize(ctx context return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -11065,6 +11963,7 @@ func (m *awsRestxml_deserializeOpPutBucketWebsite) HandleDeserialize(ctx context } } + span.End() return out, metadata, err } @@ -11123,6 +12022,10 @@ func (m *awsRestxml_deserializeOpPutObject) HandleDeserialize(ctx context.Contex return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -11139,6 +12042,7 @@ func (m *awsRestxml_deserializeOpPutObject) HandleDeserialize(ctx context.Contex return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} } + span.End() return out, metadata, err } @@ -11172,6 +12076,18 @@ func awsRestxml_deserializeOpErrorPutObject(response *smithyhttp.Response, metad } errorBody.Seek(0, io.SeekStart) switch { + case strings.EqualFold("EncryptionTypeMismatch", errorCode): + return awsRestxml_deserializeErrorEncryptionTypeMismatch(response, errorBody) + + case strings.EqualFold("InvalidRequest", errorCode): + return awsRestxml_deserializeErrorInvalidRequest(response, errorBody) + + case strings.EqualFold("InvalidWriteOffset", errorCode): + return awsRestxml_deserializeErrorInvalidWriteOffset(response, errorBody) + + case strings.EqualFold("TooManyParts", errorCode): + return awsRestxml_deserializeErrorTooManyParts(response, errorBody) + default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -11206,6 +12122,11 @@ func awsRestxml_deserializeOpHttpBindingsPutObjectOutput(v *PutObjectOutput, res v.ChecksumCRC32C = ptr.String(headerValues[0]) } + if headerValues := response.Header.Values("x-amz-checksum-crc64nvme"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumCRC64NVME = ptr.String(headerValues[0]) + } + if headerValues := response.Header.Values("x-amz-checksum-sha1"); len(headerValues) != 0 { headerValues[0] = strings.TrimSpace(headerValues[0]) v.ChecksumSHA1 = ptr.String(headerValues[0]) @@ -11216,6 +12137,11 @@ func awsRestxml_deserializeOpHttpBindingsPutObjectOutput(v *PutObjectOutput, res v.ChecksumSHA256 = ptr.String(headerValues[0]) } + if headerValues := response.Header.Values("x-amz-checksum-type"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumType = types.ChecksumType(headerValues[0]) + } + if headerValues := response.Header.Values("ETag"); len(headerValues) != 0 { headerValues[0] = strings.TrimSpace(headerValues[0]) v.ETag = ptr.String(headerValues[0]) @@ -11236,6 +12162,15 @@ func awsRestxml_deserializeOpHttpBindingsPutObjectOutput(v *PutObjectOutput, res v.ServerSideEncryption = types.ServerSideEncryption(headerValues[0]) } + if headerValues := response.Header.Values("x-amz-object-size"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseInt(headerValues[0], 0, 64) + if err != nil { + return err + } + v.Size = ptr.Int64(vv) + } + if headerValues := response.Header.Values("x-amz-server-side-encryption-customer-algorithm"); len(headerValues) != 0 { headerValues[0] = strings.TrimSpace(headerValues[0]) v.SSECustomerAlgorithm = ptr.String(headerValues[0]) @@ -11279,6 +12214,10 @@ func (m *awsRestxml_deserializeOpPutObjectAcl) HandleDeserialize(ctx context.Con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -11295,6 +12234,7 @@ func (m *awsRestxml_deserializeOpPutObjectAcl) HandleDeserialize(ctx context.Con return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} } + span.End() return out, metadata, err } @@ -11369,6 +12309,10 @@ func (m *awsRestxml_deserializeOpPutObjectLegalHold) HandleDeserialize(ctx conte return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -11385,6 +12329,7 @@ func (m *awsRestxml_deserializeOpPutObjectLegalHold) HandleDeserialize(ctx conte return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} } + span.End() return out, metadata, err } @@ -11456,6 +12401,10 @@ func (m *awsRestxml_deserializeOpPutObjectLockConfiguration) HandleDeserialize(c return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -11472,6 +12421,7 @@ func (m *awsRestxml_deserializeOpPutObjectLockConfiguration) HandleDeserialize(c return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} } + span.End() return out, metadata, err } @@ -11543,6 +12493,10 @@ func (m *awsRestxml_deserializeOpPutObjectRetention) HandleDeserialize(ctx conte return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -11559,6 +12513,7 @@ func (m *awsRestxml_deserializeOpPutObjectRetention) HandleDeserialize(ctx conte return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} } + span.End() return out, metadata, err } @@ -11630,6 +12585,10 @@ func (m *awsRestxml_deserializeOpPutObjectTagging) HandleDeserialize(ctx context return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -11646,6 +12605,7 @@ func (m *awsRestxml_deserializeOpPutObjectTagging) HandleDeserialize(ctx context return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} } + span.End() return out, metadata, err } @@ -11717,6 +12677,10 @@ func (m *awsRestxml_deserializeOpPutPublicAccessBlock) HandleDeserialize(ctx con return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -11734,6 +12698,7 @@ func (m *awsRestxml_deserializeOpPutPublicAccessBlock) HandleDeserialize(ctx con } } + span.End() return out, metadata, err } @@ -11792,6 +12757,10 @@ func (m *awsRestxml_deserializeOpRestoreObject) HandleDeserialize(ctx context.Co return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -11808,6 +12777,7 @@ func (m *awsRestxml_deserializeOpRestoreObject) HandleDeserialize(ctx context.Co return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} } + span.End() return out, metadata, err } @@ -11887,6 +12857,10 @@ func (m *awsRestxml_deserializeOpSelectObjectContent) HandleDeserialize(ctx cont return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -11898,6 +12872,7 @@ func (m *awsRestxml_deserializeOpSelectObjectContent) HandleDeserialize(ctx cont output := &SelectObjectContentOutput{} out.Result = output + span.End() return out, metadata, err } @@ -11956,6 +12931,10 @@ func (m *awsRestxml_deserializeOpUploadPart) HandleDeserialize(ctx context.Conte return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -11972,6 +12951,7 @@ func (m *awsRestxml_deserializeOpUploadPart) HandleDeserialize(ctx context.Conte return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} } + span.End() return out, metadata, err } @@ -12039,6 +13019,11 @@ func awsRestxml_deserializeOpHttpBindingsUploadPartOutput(v *UploadPartOutput, r v.ChecksumCRC32C = ptr.String(headerValues[0]) } + if headerValues := response.Header.Values("x-amz-checksum-crc64nvme"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.ChecksumCRC64NVME = ptr.String(headerValues[0]) + } + if headerValues := response.Header.Values("x-amz-checksum-sha1"); len(headerValues) != 0 { headerValues[0] = strings.TrimSpace(headerValues[0]) v.ChecksumSHA1 = ptr.String(headerValues[0]) @@ -12097,6 +13082,10 @@ func (m *awsRestxml_deserializeOpUploadPartCopy) HandleDeserialize(ctx context.C return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -12141,6 +13130,7 @@ func (m *awsRestxml_deserializeOpUploadPartCopy) HandleDeserialize(ctx context.C } } + span.End() return out, metadata, err } @@ -12287,6 +13277,10 @@ func (m *awsRestxml_deserializeOpWriteGetObjectResponse) HandleDeserialize(ctx c return out, metadata, err } + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() response, ok := out.RawResponse.(*smithyhttp.Response) if !ok { return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} @@ -12304,6 +13298,7 @@ func (m *awsRestxml_deserializeOpWriteGetObjectResponse) HandleDeserialize(ctx c } } + span.End() return out, metadata, err } @@ -12865,6 +13860,11 @@ func awsRestxml_deserializeErrorBucketAlreadyOwnedByYou(response *smithyhttp.Res return output } +func awsRestxml_deserializeErrorEncryptionTypeMismatch(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.EncryptionTypeMismatch{} + return output +} + func awsRestxml_deserializeErrorInvalidObjectState(response *smithyhttp.Response, errorBody *bytes.Reader) error { output := &types.InvalidObjectState{} var buff [1024]byte @@ -12898,13 +13898,23 @@ func awsRestxml_deserializeErrorInvalidObjectState(response *smithyhttp.Response return output } -func awsRestxml_deserializeErrorNoSuchBucket(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.NoSuchBucket{} +func awsRestxml_deserializeErrorInvalidRequest(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidRequest{} return output } -func awsRestxml_deserializeErrorNoSuchKey(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.NoSuchKey{} +func awsRestxml_deserializeErrorInvalidWriteOffset(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidWriteOffset{} + return output +} + +func awsRestxml_deserializeErrorNoSuchBucket(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.NoSuchBucket{} + return output +} + +func awsRestxml_deserializeErrorNoSuchKey(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.NoSuchKey{} return output } @@ -12928,6 +13938,11 @@ func awsRestxml_deserializeErrorObjectNotInActiveTierError(response *smithyhttp. return output } +func awsRestxml_deserializeErrorTooManyParts(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.TooManyParts{} + return output +} + func awsRestxml_deserializeDocumentAbortIncompleteMultipartUpload(v **types.AbortIncompleteMultipartUpload, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -13677,6 +14692,19 @@ func awsRestxml_deserializeDocumentBucket(v **types.Bucket, decoder smithyxml.No originalDecoder := decoder decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) switch { + case strings.EqualFold("BucketRegion", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.BucketRegion = ptr.String(xtv) + } + case strings.EqualFold("CreationDate", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -13909,6 +14937,19 @@ func awsRestxml_deserializeDocumentChecksum(v **types.Checksum, decoder smithyxm sv.ChecksumCRC32C = ptr.String(xtv) } + case strings.EqualFold("ChecksumCRC64NVME", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC64NVME = ptr.String(xtv) + } + case strings.EqualFold("ChecksumSHA1", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -13935,6 +14976,19 @@ func awsRestxml_deserializeDocumentChecksum(v **types.Checksum, decoder smithyxm sv.ChecksumSHA256 = ptr.String(xtv) } + case strings.EqualFold("ChecksumType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumType = types.ChecksumType(xtv) + } + default: // Do nothing and ignore the unexpected tag element err = decoder.Decoder.Skip() @@ -14256,6 +15310,19 @@ func awsRestxml_deserializeDocumentCopyObjectResult(v **types.CopyObjectResult, sv.ChecksumCRC32C = ptr.String(xtv) } + case strings.EqualFold("ChecksumCRC64NVME", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC64NVME = ptr.String(xtv) + } + case strings.EqualFold("ChecksumSHA1", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -14282,6 +15349,19 @@ func awsRestxml_deserializeDocumentCopyObjectResult(v **types.CopyObjectResult, sv.ChecksumSHA256 = ptr.String(xtv) } + case strings.EqualFold("ChecksumType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumType = types.ChecksumType(xtv) + } + case strings.EqualFold("ETag", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -14374,6 +15454,19 @@ func awsRestxml_deserializeDocumentCopyPartResult(v **types.CopyPartResult, deco sv.ChecksumCRC32C = ptr.String(xtv) } + case strings.EqualFold("ChecksumCRC64NVME", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC64NVME = ptr.String(xtv) + } + case strings.EqualFold("ChecksumSHA1", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -15210,6 +16303,42 @@ func awsRestxml_deserializeDocumentEncryptionConfiguration(v **types.EncryptionC return nil } +func awsRestxml_deserializeDocumentEncryptionTypeMismatch(v **types.EncryptionTypeMismatch, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.EncryptionTypeMismatch + if *v == nil { + sv = &types.EncryptionTypeMismatch{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsRestxml_deserializeDocumentError(v **types.Error, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -15298,6 +16427,68 @@ func awsRestxml_deserializeDocumentError(v **types.Error, decoder smithyxml.Node return nil } +func awsRestxml_deserializeDocumentErrorDetails(v **types.ErrorDetails, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.ErrorDetails + if *v == nil { + sv = &types.ErrorDetails{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ErrorCode", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ErrorCode = ptr.String(xtv) + } + + case strings.EqualFold("ErrorMessage", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ErrorMessage = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsRestxml_deserializeDocumentErrorDocument(v **types.ErrorDocument, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -15790,6 +16981,67 @@ func awsRestxml_deserializeDocumentFilterRuleListUnwrapped(v *[]types.FilterRule *v = sv return nil } +func awsRestxml_deserializeDocumentGetBucketMetadataTableConfigurationResult(v **types.GetBucketMetadataTableConfigurationResult, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.GetBucketMetadataTableConfigurationResult + if *v == nil { + sv = &types.GetBucketMetadataTableConfigurationResult{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Error", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentErrorDetails(&sv.Error, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("MetadataTableConfigurationResult", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentMetadataTableConfigurationResult(&sv.MetadataTableConfigurationResult, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Status", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Status = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsRestxml_deserializeDocumentGetObjectAttributesParts(v **types.GetObjectAttributesParts, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -16565,6 +17817,78 @@ func awsRestxml_deserializeDocumentInvalidObjectState(v **types.InvalidObjectSta return nil } +func awsRestxml_deserializeDocumentInvalidRequest(v **types.InvalidRequest, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.InvalidRequest + if *v == nil { + sv = &types.InvalidRequest{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentInvalidWriteOffset(v **types.InvalidWriteOffset, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.InvalidWriteOffset + if *v == nil { + sv = &types.InvalidWriteOffset{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsRestxml_deserializeDocumentInventoryConfiguration(v **types.InventoryConfiguration, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -17525,12 +18849,17 @@ func awsRestxml_deserializeDocumentLifecycleRuleAndOperator(v **types.LifecycleR return nil } -func awsRestxml_deserializeDocumentLifecycleRuleFilter(v *types.LifecycleRuleFilter, decoder smithyxml.NodeDecoder) error { +func awsRestxml_deserializeDocumentLifecycleRuleFilter(v **types.LifecycleRuleFilter, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } - var uv types.LifecycleRuleFilter - var memberFound bool + var sv *types.LifecycleRuleFilter + if *v == nil { + sv = &types.LifecycleRuleFilter{} + } else { + sv = *v + } + for { t, done, err := decoder.Token() if err != nil { @@ -17539,27 +18868,16 @@ func awsRestxml_deserializeDocumentLifecycleRuleFilter(v *types.LifecycleRuleFil if done { break } - if memberFound { - if err = decoder.Decoder.Skip(); err != nil { - return err - } - } originalDecoder := decoder decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) switch { case strings.EqualFold("And", t.Name.Local): - var mv types.LifecycleRuleAndOperator nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentLifecycleRuleAndOperator(&destAddr, nodeDecoder); err != nil { + if err := awsRestxml_deserializeDocumentLifecycleRuleAndOperator(&sv.And, nodeDecoder); err != nil { return err } - mv = *destAddr - uv = &types.LifecycleRuleFilterMemberAnd{Value: mv} - memberFound = true case strings.EqualFold("ObjectSizeGreaterThan", t.Name.Local): - var mv int64 val, err := decoder.Value() if err != nil { return err @@ -17573,13 +18891,10 @@ func awsRestxml_deserializeDocumentLifecycleRuleFilter(v *types.LifecycleRuleFil if err != nil { return err } - mv = i64 + sv.ObjectSizeGreaterThan = ptr.Int64(i64) } - uv = &types.LifecycleRuleFilterMemberObjectSizeGreaterThan{Value: mv} - memberFound = true case strings.EqualFold("ObjectSizeLessThan", t.Name.Local): - var mv int64 val, err := decoder.Value() if err != nil { return err @@ -17593,13 +18908,10 @@ func awsRestxml_deserializeDocumentLifecycleRuleFilter(v *types.LifecycleRuleFil if err != nil { return err } - mv = i64 + sv.ObjectSizeLessThan = ptr.Int64(i64) } - uv = &types.LifecycleRuleFilterMemberObjectSizeLessThan{Value: mv} - memberFound = true case strings.EqualFold("Prefix", t.Name.Local): - var mv string val, err := decoder.Value() if err != nil { return err @@ -17609,30 +18921,26 @@ func awsRestxml_deserializeDocumentLifecycleRuleFilter(v *types.LifecycleRuleFil } { xtv := string(val) - mv = xtv + sv.Prefix = ptr.String(xtv) } - uv = &types.LifecycleRuleFilterMemberPrefix{Value: mv} - memberFound = true case strings.EqualFold("Tag", t.Name.Local): - var mv types.Tag nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentTag(&destAddr, nodeDecoder); err != nil { + if err := awsRestxml_deserializeDocumentTag(&sv.Tag, nodeDecoder); err != nil { return err } - mv = *destAddr - uv = &types.LifecycleRuleFilterMemberTag{Value: mv} - memberFound = true default: - uv = &types.UnknownUnionMember{Tag: t.Name.Local} - memberFound = true + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } } decoder = originalDecoder } - *v = uv + *v = sv return nil } @@ -17778,6 +19086,48 @@ func awsRestxml_deserializeDocumentLoggingEnabled(v **types.LoggingEnabled, deco return nil } +func awsRestxml_deserializeDocumentMetadataTableConfigurationResult(v **types.MetadataTableConfigurationResult, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.MetadataTableConfigurationResult + if *v == nil { + sv = &types.MetadataTableConfigurationResult{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("S3TablesDestinationResult", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentS3TablesDestinationResult(&sv.S3TablesDestinationResult, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsRestxml_deserializeDocumentMetrics(v **types.Metrics, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -18146,6 +19496,19 @@ func awsRestxml_deserializeDocumentMultipartUpload(v **types.MultipartUpload, de sv.ChecksumAlgorithm = types.ChecksumAlgorithm(xtv) } + case strings.EqualFold("ChecksumType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumType = types.ChecksumType(xtv) + } + case strings.EqualFold("Initiated", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -18731,6 +20094,19 @@ func awsRestxml_deserializeDocumentObject(v **types.Object, decoder smithyxml.No return err } + case strings.EqualFold("ChecksumType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumType = types.ChecksumType(xtv) + } + case strings.EqualFold("ETag", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -19230,6 +20606,19 @@ func awsRestxml_deserializeDocumentObjectPart(v **types.ObjectPart, decoder smit sv.ChecksumCRC32C = ptr.String(xtv) } + case strings.EqualFold("ChecksumCRC64NVME", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC64NVME = ptr.String(xtv) + } + case strings.EqualFold("ChecksumSHA1", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -19332,6 +20721,19 @@ func awsRestxml_deserializeDocumentObjectVersion(v **types.ObjectVersion, decode return err } + case strings.EqualFold("ChecksumType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumType = types.ChecksumType(xtv) + } + case strings.EqualFold("ETag", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -19797,6 +21199,19 @@ func awsRestxml_deserializeDocumentPart(v **types.Part, decoder smithyxml.NodeDe sv.ChecksumCRC32C = ptr.String(xtv) } + case strings.EqualFold("ChecksumCRC64NVME", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ChecksumCRC64NVME = ptr.String(xtv) + } + case strings.EqualFold("ChecksumSHA1", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -20824,12 +22239,17 @@ func awsRestxml_deserializeDocumentReplicationRuleAndOperator(v **types.Replicat return nil } -func awsRestxml_deserializeDocumentReplicationRuleFilter(v *types.ReplicationRuleFilter, decoder smithyxml.NodeDecoder) error { +func awsRestxml_deserializeDocumentReplicationRuleFilter(v **types.ReplicationRuleFilter, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } - var uv types.ReplicationRuleFilter - var memberFound bool + var sv *types.ReplicationRuleFilter + if *v == nil { + sv = &types.ReplicationRuleFilter{} + } else { + sv = *v + } + for { t, done, err := decoder.Token() if err != nil { @@ -20838,27 +22258,16 @@ func awsRestxml_deserializeDocumentReplicationRuleFilter(v *types.ReplicationRul if done { break } - if memberFound { - if err = decoder.Decoder.Skip(); err != nil { - return err - } - } originalDecoder := decoder decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) switch { case strings.EqualFold("And", t.Name.Local): - var mv types.ReplicationRuleAndOperator nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentReplicationRuleAndOperator(&destAddr, nodeDecoder); err != nil { + if err := awsRestxml_deserializeDocumentReplicationRuleAndOperator(&sv.And, nodeDecoder); err != nil { return err } - mv = *destAddr - uv = &types.ReplicationRuleFilterMemberAnd{Value: mv} - memberFound = true case strings.EqualFold("Prefix", t.Name.Local): - var mv string val, err := decoder.Value() if err != nil { return err @@ -20868,30 +22277,26 @@ func awsRestxml_deserializeDocumentReplicationRuleFilter(v *types.ReplicationRul } { xtv := string(val) - mv = xtv + sv.Prefix = ptr.String(xtv) } - uv = &types.ReplicationRuleFilterMemberPrefix{Value: mv} - memberFound = true case strings.EqualFold("Tag", t.Name.Local): - var mv types.Tag nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsRestxml_deserializeDocumentTag(&destAddr, nodeDecoder); err != nil { + if err := awsRestxml_deserializeDocumentTag(&sv.Tag, nodeDecoder); err != nil { return err } - mv = *destAddr - uv = &types.ReplicationRuleFilterMemberTag{Value: mv} - memberFound = true default: - uv = &types.UnknownUnionMember{Tag: t.Name.Local} - memberFound = true + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } } decoder = originalDecoder } - *v = uv + *v = sv return nil } @@ -21298,6 +22703,94 @@ func awsRestxml_deserializeDocumentS3KeyFilter(v **types.S3KeyFilter, decoder sm return nil } +func awsRestxml_deserializeDocumentS3TablesDestinationResult(v **types.S3TablesDestinationResult, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.S3TablesDestinationResult + if *v == nil { + sv = &types.S3TablesDestinationResult{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("TableArn", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.TableArn = ptr.String(xtv) + } + + case strings.EqualFold("TableBucketArn", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.TableBucketArn = ptr.String(xtv) + } + + case strings.EqualFold("TableName", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.TableName = ptr.String(xtv) + } + + case strings.EqualFold("TableNamespace", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.TableNamespace = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsRestxml_deserializeDocumentServerSideEncryptionByDefault(v **types.ServerSideEncryptionByDefault, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -22370,6 +23863,42 @@ func awsRestxml_deserializeDocumentTieringListUnwrapped(v *[]types.Tiering, deco *v = sv return nil } +func awsRestxml_deserializeDocumentTooManyParts(v **types.TooManyParts, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.TooManyParts + if *v == nil { + sv = &types.TooManyParts{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsRestxml_deserializeDocumentTopicConfiguration(v **types.TopicConfiguration, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoints.go index e6ae2e872..1a4af2822 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoints.go @@ -20,6 +20,7 @@ import ( "github.com/aws/smithy-go/endpoints/private/rulesfn" "github.com/aws/smithy-go/middleware" "github.com/aws/smithy-go/ptr" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "net/http" "net/url" @@ -326,6 +327,13 @@ type EndpointParameters struct { // is required. Prefix *string + // The Copy Source used for Copy Object request. This is an optional parameter that + // will be set automatically for operations that are scoped to Copy + // Source. + // + // Parameter is required. + CopySource *string + // Internal parameter to disable Access Point Buckets // // Parameter is required. @@ -419,6 +427,17 @@ func (p EndpointParameters) WithDefaults() EndpointParameters { return p } +type stringSlice []string + +func (s stringSlice) Get(i int) *string { + if i < 0 || i >= len(s) { + return nil + } + + v := s[i] + return &v +} + // EndpointResolverV2 provides the interface for resolving service endpoints. type EndpointResolverV2 interface { // ResolveEndpoint attempts to resolve the endpoint with the provided options, @@ -702,10 +721,810 @@ func (r *resolver) ResolveEndpoint( if _UseFIPS == true { uriString := func() string { var out strings.Builder - out.WriteString("https://s3express-control-fips.") + out.WriteString("https://s3express-control-fips.") + out.WriteString(_Region) + out.WriteString(".amazonaws.com/") + out.WriteString(_uri_encoded_bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3express-control.") + out.WriteString(_Region) + out.WriteString(".amazonaws.com/") + out.WriteString(_uri_encoded_bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + } + if awsrulesfn.IsVirtualHostableS3Bucket(_Bucket, false) { + if exprVal := params.DisableS3ExpressSessionAuth; exprVal != nil { + _DisableS3ExpressSessionAuth := *exprVal + _ = _DisableS3ExpressSessionAuth + if _DisableS3ExpressSessionAuth == true { + if exprVal := rulesfn.SubString(_Bucket, 6, 14, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 14, 16, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + if exprVal := rulesfn.SubString(_Bucket, 6, 15, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 15, 17, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + if exprVal := rulesfn.SubString(_Bucket, 6, 19, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 19, 21, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + if exprVal := rulesfn.SubString(_Bucket, 6, 20, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 20, 22, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + if exprVal := rulesfn.SubString(_Bucket, 6, 26, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 26, 28, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Unrecognized S3Express bucket name format.") + } + } + if exprVal := rulesfn.SubString(_Bucket, 6, 14, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 14, 16, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + if exprVal := rulesfn.SubString(_Bucket, 6, 15, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 15, 17, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + if exprVal := rulesfn.SubString(_Bucket, 6, 19, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 19, 21, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") out.WriteString(_Region) - out.WriteString(".amazonaws.com/") - out.WriteString(_uri_encoded_bucket) + out.WriteString(".amazonaws.com") return out.String() }() @@ -722,7 +1541,7 @@ func (r *resolver) ResolveEndpoint( out.Set("backend", "S3Express") smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ { - SchemeID: "aws.auth#sigv4", + SchemeID: "sigv4-s3express", SignerProperties: func() smithy.Properties { var sp smithy.Properties smithyhttp.SetDisableDoubleEncoding(&sp, true) @@ -739,248 +1558,12 @@ func (r *resolver) ResolveEndpoint( }(), }, nil } - uriString := func() string { - var out strings.Builder - out.WriteString("https://s3express-control.") - out.WriteString(_Region) - out.WriteString(".amazonaws.com/") - out.WriteString(_uri_encoded_bucket) - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") - } - } - if awsrulesfn.IsVirtualHostableS3Bucket(_Bucket, false) { - if exprVal := params.DisableS3ExpressSessionAuth; exprVal != nil { - _DisableS3ExpressSessionAuth := *exprVal - _ = _DisableS3ExpressSessionAuth - if _DisableS3ExpressSessionAuth == true { - if exprVal := rulesfn.SubString(_Bucket, 6, 14, true); exprVal != nil { - _s3expressAvailabilityZoneId := *exprVal - _ = _s3expressAvailabilityZoneId - if exprVal := rulesfn.SubString(_Bucket, 14, 16, true); exprVal != nil { - _s3expressAvailabilityZoneDelim := *exprVal - _ = _s3expressAvailabilityZoneDelim - if _s3expressAvailabilityZoneDelim == "--" { - if _UseFIPS == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-fips-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - if exprVal := rulesfn.SubString(_Bucket, 6, 15, true); exprVal != nil { - _s3expressAvailabilityZoneId := *exprVal - _ = _s3expressAvailabilityZoneId - if exprVal := rulesfn.SubString(_Bucket, 15, 17, true); exprVal != nil { - _s3expressAvailabilityZoneDelim := *exprVal - _ = _s3expressAvailabilityZoneDelim - if _s3expressAvailabilityZoneDelim == "--" { - if _UseFIPS == true { - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-fips-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - uriString := func() string { - var out strings.Builder - out.WriteString("https://") - out.WriteString(_Bucket) - out.WriteString(".s3express-") - out.WriteString(_s3expressAvailabilityZoneId) - out.WriteString(".") - out.WriteString(_Region) - out.WriteString(".amazonaws.com") - return out.String() - }() - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - Properties: func() smithy.Properties { - var out smithy.Properties - out.Set("backend", "S3Express") - smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ - { - SchemeID: "aws.auth#sigv4", - SignerProperties: func() smithy.Properties { - var sp smithy.Properties - smithyhttp.SetDisableDoubleEncoding(&sp, true) - - smithyhttp.SetSigV4SigningName(&sp, "s3express") - smithyhttp.SetSigV4ASigningName(&sp, "s3express") - - smithyhttp.SetSigV4SigningRegion(&sp, _Region) - return sp - }(), - }, - }) - return out - }(), - }, nil - } - } - } - return endpoint, fmt.Errorf("endpoint rule error, %s", "Unrecognized S3Express bucket name format.") } } - if exprVal := rulesfn.SubString(_Bucket, 6, 14, true); exprVal != nil { + if exprVal := rulesfn.SubString(_Bucket, 6, 20, true); exprVal != nil { _s3expressAvailabilityZoneId := *exprVal _ = _s3expressAvailabilityZoneId - if exprVal := rulesfn.SubString(_Bucket, 14, 16, true); exprVal != nil { + if exprVal := rulesfn.SubString(_Bucket, 20, 22, true); exprVal != nil { _s3expressAvailabilityZoneDelim := *exprVal _ = _s3expressAvailabilityZoneDelim if _s3expressAvailabilityZoneDelim == "--" { @@ -1071,10 +1654,10 @@ func (r *resolver) ResolveEndpoint( } } } - if exprVal := rulesfn.SubString(_Bucket, 6, 15, true); exprVal != nil { + if exprVal := rulesfn.SubString(_Bucket, 6, 26, true); exprVal != nil { _s3expressAvailabilityZoneId := *exprVal _ = _s3expressAvailabilityZoneId - if exprVal := rulesfn.SubString(_Bucket, 15, 17, true); exprVal != nil { + if exprVal := rulesfn.SubString(_Bucket, 26, 28, true); exprVal != nil { _s3expressAvailabilityZoneDelim := *exprVal _ = _s3expressAvailabilityZoneDelim if _s3expressAvailabilityZoneDelim == "--" { @@ -3736,8 +4319,8 @@ func (r *resolver) ResolveEndpoint( return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 Outposts does not support S3 Accelerate") } if exprVal := _bucketArn.ResourceId.Get(4); exprVal != nil { - _var_275 := *exprVal - _ = _var_275 + _var_287 := *exprVal + _ = _var_287 return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Arn: Outpost Access Point ARN contains sub resources") } if exprVal := _bucketArn.ResourceId.Get(1); exprVal != nil { @@ -3980,8 +4563,8 @@ func (r *resolver) ResolveEndpoint( } if _ForcePathStyle == true { if exprVal := awsrulesfn.ParseARN(_Bucket); exprVal != nil { - _var_288 := *exprVal - _ = _var_288 + _var_300 := *exprVal + _ = _var_300 return endpoint, fmt.Errorf("endpoint rule error, %s", "Path-style addressing cannot be used with ARN buckets") } } @@ -5750,7 +6333,7 @@ type endpointParamsBinder interface { bindEndpointParams(*EndpointParameters) } -func bindEndpointParams(input interface{}, options Options) *EndpointParameters { +func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters { params := &EndpointParameters{} params.Region = bindRegion(options.Region) @@ -5782,6 +6365,9 @@ func (*resolveEndpointV2Middleware) ID() string { func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "ResolveEndpoint") + defer span.End() + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { return next.HandleFinalize(ctx, in) } @@ -5795,12 +6381,17 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") } - params := bindEndpointParams(getOperationInput(ctx), m.options) - endpt, err := m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) + params := bindEndpointParams(ctx, getOperationInput(ctx), m.options) + endpt, err := timeOperationMetric(ctx, "client.call.resolve_endpoint_duration", + func() (smithyendpoints.Endpoint, error) { + return m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) + }) if err != nil { return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) } + span.SetProperty("client.call.resolved_endpoint", endpt.URI.String()) + if endpt.URI.RawPath == "" && req.URL.RawPath != "" { endpt.URI.RawPath = endpt.URI.Path } @@ -5822,8 +6413,11 @@ func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in mid rscheme.SignerProperties.SetAll(&o.SignerProperties) } + ctx = setS3ResolvedURI(ctx, endpt.URI.String()) + backend := s3cust.GetPropertiesBackend(&endpt.Properties) ctx = internalcontext.SetS3Backend(ctx, backend) + span.End() return next.HandleFinalize(ctx, in) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_user_agent.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_user_agent.go new file mode 100644 index 000000000..a9b54535b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_user_agent.go @@ -0,0 +1,43 @@ +package s3 + +import ( + "context" + "strings" + + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/smithy-go/middleware" +) + +// isExpressUserAgent tracks whether the caller is using S3 Express +// +// we can only derive this at runtime, so the middleware needs to hold a handle +// to the underlying user-agent manipulator to set the feature flag as +// necessary +type isExpressUserAgent struct { + ua *awsmiddleware.RequestUserAgent +} + +func (*isExpressUserAgent) ID() string { + return "isExpressUserAgent" +} + +func (m *isExpressUserAgent) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + const expressSuffix = "--x-s3" + + bucket, ok := bucketFromInput(in.Parameters) + if ok && strings.HasSuffix(bucket, expressSuffix) { + m.ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureS3ExpressBucket) + } + return next.HandleSerialize(ctx, in) +} + +func addIsExpressUserAgent(stack *middleware.Stack) error { + ua, err := getOrAddRequestUserAgent(stack) + if err != nil { + return err + } + + return stack.Serialize.Add(&isExpressUserAgent{ua}, middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/generated.json index 6e392285e..fa0119bcb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/generated.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/generated.json @@ -18,6 +18,7 @@ "api_op_CompleteMultipartUpload.go", "api_op_CopyObject.go", "api_op_CreateBucket.go", + "api_op_CreateBucketMetadataTableConfiguration.go", "api_op_CreateMultipartUpload.go", "api_op_CreateSession.go", "api_op_DeleteBucket.go", @@ -27,6 +28,7 @@ "api_op_DeleteBucketIntelligentTieringConfiguration.go", "api_op_DeleteBucketInventoryConfiguration.go", "api_op_DeleteBucketLifecycle.go", + "api_op_DeleteBucketMetadataTableConfiguration.go", "api_op_DeleteBucketMetricsConfiguration.go", "api_op_DeleteBucketOwnershipControls.go", "api_op_DeleteBucketPolicy.go", @@ -47,6 +49,7 @@ "api_op_GetBucketLifecycleConfiguration.go", "api_op_GetBucketLocation.go", "api_op_GetBucketLogging.go", + "api_op_GetBucketMetadataTableConfiguration.go", "api_op_GetBucketMetricsConfiguration.go", "api_op_GetBucketNotificationConfiguration.go", "api_op_GetBucketOwnershipControls.go", @@ -123,13 +126,14 @@ "protocol_test.go", "serializers.go", "snapshot_test.go", + "sra_operation_order_test.go", "types/enums.go", "types/errors.go", "types/types.go", "types/types_exported_test.go", "validators.go" ], - "go": "1.15", + "go": "1.22", "module": "github.com/aws/aws-sdk-go-v2/service/s3", "unstable": false } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go index 04c6fdbb3..5831a91c3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go @@ -3,4 +3,4 @@ package s3 // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.53.1" +const goModuleVersion = "1.78.2" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints/endpoints.go index f3e6b0751..3364028a8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints/endpoints.go @@ -96,7 +96,7 @@ var partitionRegexp = struct { AwsUsGov *regexp.Regexp }{ - Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$"), + Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$"), AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), @@ -252,6 +252,24 @@ var defaultPartitions = endpoints.Partitions{ }: { Hostname: "s3.dualstack.ap-southeast-4.amazonaws.com", }, + endpoints.EndpointKey{ + Region: "ap-southeast-5", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-5", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.ap-southeast-5.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ap-southeast-7", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-southeast-7", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.ap-southeast-7.amazonaws.com", + }, endpoints.EndpointKey{ Region: "aws-global", }: endpoints.Endpoint{ @@ -460,6 +478,15 @@ var defaultPartitions = endpoints.Partitions{ }: { Hostname: "s3.dualstack.me-south-1.amazonaws.com", }, + endpoints.EndpointKey{ + Region: "mx-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "mx-central-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.mx-central-1.amazonaws.com", + }, endpoints.EndpointKey{ Region: "s3-external-1", }: endpoints.Endpoint{ @@ -794,19 +821,27 @@ var defaultPartitions = endpoints.Partitions{ Variant: endpoints.FIPSVariant, }: { Hostname: "s3-fips.{region}.csp.hci.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, }, { Variant: 0, }: { Hostname: "s3.{region}.csp.hci.ic.gov", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, }, }, RegionRegex: partitionRegexp.AwsIsoF, IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "us-isof-east-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-isof-south-1", + }: endpoints.Endpoint{}, + }, }, { ID: "aws-us-gov", diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/options.go index 064bcefb4..6f29b807f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/options.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/options.go @@ -12,7 +12,9 @@ import ( s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" smithyauth "github.com/aws/smithy-go/auth" "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/metrics" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "net/http" ) @@ -50,6 +52,10 @@ type Options struct { // clients initial default settings. DefaultsMode aws.DefaultsMode + // Disables logging when the client skips output checksum validation due to lack + // of algorithm support. + DisableLogOutputChecksumValidationSkipped bool + // Allows you to disable S3 Multi-Region access points feature. DisableMultiRegionAccessPoints bool @@ -65,8 +71,10 @@ type Options struct { // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a // value for this field will likely prevent you from using any endpoint-related // service features released after the introduction of EndpointResolverV2 and - // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom - // endpoint, set the client option BaseEndpoint instead. + // BaseEndpoint. + // + // To migrate an EndpointResolver implementation that uses a custom endpoint, set + // the client option BaseEndpoint instead. EndpointResolver EndpointResolver // Resolves the endpoint used for a particular service operation. This should be @@ -82,23 +90,35 @@ type Options struct { // The logger writer interface to write logging messages to. Logger logging.Logger + // The client meter provider. + MeterProvider metrics.MeterProvider + // The region to send requests to. (Required) Region string + // Indicates how user opt-in/out request checksum calculation + RequestChecksumCalculation aws.RequestChecksumCalculation + + // Indicates how user opt-in/out response checksum validation + ResponseChecksumValidation aws.ResponseChecksumValidation + // RetryMaxAttempts specifies the maximum number attempts an API client will call // an operation that fails with a retryable error. A value of 0 is ignored, and // will not be used to configure the API client created default retryer, or modify - // per operation call's retry max attempts. If specified in an operation call's - // functional options with a value that is different than the constructed client's - // Options, the Client's Retryer will be wrapped to use the operation's specific - // RetryMaxAttempts value. + // per operation call's retry max attempts. + // + // If specified in an operation call's functional options with a value that is + // different than the constructed client's Options, the Client's Retryer will be + // wrapped to use the operation's specific RetryMaxAttempts value. RetryMaxAttempts int // RetryMode specifies the retry mode the API client will be created with, if - // Retryer option is not also specified. When creating a new API Clients this - // member will only be used if the Retryer Options member is nil. This value will - // be ignored if Retryer is not nil. Currently does not support per operation call - // overrides, may in the future. + // Retryer option is not also specified. + // + // When creating a new API Clients this member will only be used if the Retryer + // Options member is nil. This value will be ignored if Retryer is not nil. + // + // Currently does not support per operation call overrides, may in the future. RetryMode aws.RetryMode // Retryer guides how HTTP requests should be retried in case of recoverable @@ -113,6 +133,9 @@ type Options struct { // within your applications. RuntimeEnvironment aws.RuntimeEnvironment + // The client tracer provider. + TracerProvider tracing.TracerProvider + // Allows you to enable arn region support for the service. UseARNRegion bool @@ -141,8 +164,9 @@ type Options struct { // The initial DefaultsMode used when the client options were constructed. If the // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved - // value was at that point in time. Currently does not support per operation call - // overrides, may in the future. + // value was at that point in time. + // + // Currently does not support per operation call overrides, may in the future. resolvedDefaultsMode aws.DefaultsMode // The HTTP client to invoke API calls with. Defaults to client's default HTTP @@ -193,6 +217,7 @@ func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { // Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for // this field will likely prevent you from using any endpoint-related service // features released after the introduction of EndpointResolverV2 and BaseEndpoint. +// // To migrate an EndpointResolver implementation that uses a custom endpoint, set // the client option BaseEndpoint instead. func WithEndpointResolver(v EndpointResolver) func(*Options) { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/presign_post.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/presign_post.go new file mode 100644 index 000000000..491ed2e5d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/presign_post.go @@ -0,0 +1,419 @@ +package s3 + +import ( + "context" + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/retry" + v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + acceptencodingcust "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding" + presignedurlcust "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +const ( + algorithmHeader = "X-Amz-Algorithm" + credentialHeader = "X-Amz-Credential" + dateHeader = "X-Amz-Date" + tokenHeader = "X-Amz-Security-Token" + signatureHeader = "X-Amz-Signature" + + algorithm = "AWS4-HMAC-SHA256" + aws4Request = "aws4_request" + bucketHeader = "bucket" + defaultExpiresIn = 15 * time.Minute + shortDateLayout = "20060102" +) + +// PresignPostObject is a special kind of [presigned request] used to send a request using +// form data, likely from an HTML form on a browser. +// Unlike other presigned operations, the return values of this function are not meant to be used directly +// to make an HTTP request but rather to be used as inputs to a form. See [the docs] for more information +// on how to use these values +// +// [presigned request] https://docs.aws.amazon.com/AmazonS3/latest/userguide/ShareObjectPreSignedURL.html +// [the docs] https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPOST.html +func (c *PresignClient) PresignPostObject(ctx context.Context, params *PutObjectInput, optFns ...func(*PresignPostOptions)) (*PresignedPostRequest, error) { + if params == nil { + params = &PutObjectInput{} + } + clientOptions := c.options.copy() + options := PresignPostOptions{ + Expires: clientOptions.Expires, + PostPresigner: &postSignAdapter{}, + } + for _, fn := range optFns { + fn(&options) + } + clientOptFns := append(clientOptions.ClientOptions, withNopHTTPClientAPIOption) + cvt := presignPostConverter(options) + result, _, err := c.client.invokeOperation(ctx, "$type:L", params, clientOptFns, + c.client.addOperationPutObjectMiddlewares, + cvt.ConvertToPresignMiddleware, + func(stack *middleware.Stack, options Options) error { + return awshttp.RemoveContentTypeHeader(stack) + }, + ) + if err != nil { + return nil, err + } + + out := result.(*PresignedPostRequest) + return out, nil +} + +// PresignedPostRequest represents a presigned request to be sent using HTTP verb POST and FormData +type PresignedPostRequest struct { + // Represents the Base URL to make a request to + URL string + // Values is a key-value map of values to be sent as FormData + // these values are not encoded + Values map[string]string +} + +// postSignAdapter adapter to implement the presignPost interface +type postSignAdapter struct{} + +// PresignPost creates a special kind of [presigned request] +// to be used with HTTP verb POST. +// It differs from PUT request mostly on +// 1. It accepts a new set of parameters, `Conditions[]`, that are used to create a policy doc to limit where an object can be posted to +// 2. The return value needs to have more processing since it's meant to be sent via a form and not stand on its own +// 3. There's no body to be signed, since that will be attached when the actual request is made +// 4. The signature is made based on the policy document, not the whole request +// More information can be found at https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPOST.html +// +// [presigned request] https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-presigned-url.html +func (s *postSignAdapter) PresignPost( + credentials aws.Credentials, + bucket string, key string, + region string, service string, signingTime time.Time, conditions []interface{}, expirationTime time.Time, optFns ...func(*v4.SignerOptions), +) (fields map[string]string, err error) { + credentialScope := buildCredentialScope(signingTime, region, service) + credentialStr := credentials.AccessKeyID + "/" + credentialScope + + policyDoc, err := createPolicyDocument(expirationTime, signingTime, bucket, key, credentialStr, &credentials.SessionToken, conditions) + if err != nil { + return nil, err + } + + signature := buildSignature(policyDoc, credentials.SecretAccessKey, service, region, signingTime) + + fields = getPostSignRequiredFields(signingTime, credentialStr, credentials) + fields[signatureHeader] = signature + fields["key"] = key + fields["policy"] = policyDoc + + return fields, nil +} + +func getPostSignRequiredFields(t time.Time, credentialStr string, awsCredentials aws.Credentials) map[string]string { + fields := map[string]string{ + algorithmHeader: algorithm, + dateHeader: t.UTC().Format("20060102T150405Z"), + credentialHeader: credentialStr, + } + + sessionToken := awsCredentials.SessionToken + if len(sessionToken) > 0 { + fields[tokenHeader] = sessionToken + } + + return fields +} + +// PresignPost defines the interface to presign a POST request +type PresignPost interface { + PresignPost( + credentials aws.Credentials, + bucket string, key string, + region string, service string, signingTime time.Time, conditions []interface{}, expirationTime time.Time, + optFns ...func(*v4.SignerOptions), + ) (fields map[string]string, err error) +} + +// PresignPostOptions represent the options to be passed to a PresignPost sign request +type PresignPostOptions struct { + + // ClientOptions are list of functional options to mutate client options used by + // the presign client. + ClientOptions []func(*Options) + + // PostPresigner to use. One will be created if none is provided + PostPresigner PresignPost + + // Expires sets the expiration duration for the generated presign url. This should + // be the duration in seconds the presigned URL should be considered valid for. If + // not set or set to zero, presign url would default to expire after 900 seconds. + Expires time.Duration + + // Conditions a list of extra conditions to pass to the policy document + // Available conditions can be found [here] + // + // [here]https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html#sigv4-PolicyConditions + Conditions []interface{} +} + +type presignPostConverter PresignPostOptions + +// presignPostRequestMiddlewareOptions is the options for the presignPostRequestMiddleware middleware. +type presignPostRequestMiddlewareOptions struct { + CredentialsProvider aws.CredentialsProvider + Presigner PresignPost + LogSigning bool + ExpiresIn time.Duration + Conditions []interface{} +} + +type presignPostRequestMiddleware struct { + credentialsProvider aws.CredentialsProvider + presigner PresignPost + logSigning bool + expiresIn time.Duration + conditions []interface{} +} + +// newPresignPostRequestMiddleware returns a new presignPostRequestMiddleware +// initialized with the presigner. +func newPresignPostRequestMiddleware(options presignPostRequestMiddlewareOptions) *presignPostRequestMiddleware { + return &presignPostRequestMiddleware{ + credentialsProvider: options.CredentialsProvider, + presigner: options.Presigner, + logSigning: options.LogSigning, + expiresIn: options.ExpiresIn, + conditions: options.Conditions, + } +} + +// ID provides the middleware ID. +func (*presignPostRequestMiddleware) ID() string { return "PresignPostRequestMiddleware" } + +// HandleFinalize will take the provided input and create a presigned url for +// the http request using the SigV4 presign authentication scheme. +// +// Since the signed request is not a valid HTTP request +func (s *presignPostRequestMiddleware) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + + input := getOperationInput(ctx) + asS3Put, ok := input.(*PutObjectInput) + if !ok { + return out, metadata, fmt.Errorf("expected PutObjectInput") + } + bucketName, ok := asS3Put.bucket() + if !ok { + return out, metadata, fmt.Errorf("requested input bucketName not found on request") + } + uploadKey := asS3Put.Key + if uploadKey == nil { + return out, metadata, fmt.Errorf("PutObject input does not have a key input") + } + + uri := getS3ResolvedURI(ctx) + + signingName := awsmiddleware.GetSigningName(ctx) + signingRegion := awsmiddleware.GetSigningRegion(ctx) + + credentials, err := s.credentialsProvider.Retrieve(ctx) + if err != nil { + return out, metadata, &v4.SigningError{ + Err: fmt.Errorf("failed to retrieve credentials: %w", err), + } + } + skew := internalcontext.GetAttemptSkewContext(ctx) + signingTime := sdk.NowTime().Add(skew) + expirationTime := signingTime.Add(s.expiresIn).UTC() + + fields, err := s.presigner.PresignPost( + credentials, + bucketName, + *uploadKey, + signingRegion, + signingName, + signingTime, + s.conditions, + expirationTime, + func(o *v4.SignerOptions) { + o.Logger = middleware.GetLogger(ctx) + o.LogSigning = s.logSigning + }) + if err != nil { + return out, metadata, &v4.SigningError{ + Err: fmt.Errorf("failed to sign http request, %w", err), + } + } + + out.Result = &PresignedPostRequest{ + URL: uri, + Values: fields, + } + + return out, metadata, nil +} + +// Adapted from existing PresignConverter middleware +func (c presignPostConverter) ConvertToPresignMiddleware(stack *middleware.Stack, options Options) (err error) { + stack.Build.Remove("UserAgent") + stack.Finalize.Remove((*acceptencodingcust.DisableGzip)(nil).ID()) + stack.Finalize.Remove((*retry.Attempt)(nil).ID()) + stack.Finalize.Remove((*retry.MetricsHeader)(nil).ID()) + stack.Deserialize.Clear() + + if err := stack.Finalize.Insert(&presignContextPolyfillMiddleware{}, "Signing", middleware.Before); err != nil { + return err + } + + // if no expiration is set, set one + expiresIn := c.Expires + if expiresIn == 0 { + expiresIn = defaultExpiresIn + } + + pmw := newPresignPostRequestMiddleware(presignPostRequestMiddlewareOptions{ + CredentialsProvider: options.Credentials, + Presigner: c.PostPresigner, + LogSigning: options.ClientLogMode.IsSigning(), + ExpiresIn: expiresIn, + Conditions: c.Conditions, + }) + if _, err := stack.Finalize.Swap("Signing", pmw); err != nil { + return err + } + if err = smithyhttp.AddNoPayloadDefaultContentTypeRemover(stack); err != nil { + return err + } + err = presignedurlcust.AddAsIsPresigningMiddleware(stack) + if err != nil { + return err + } + return nil +} + +func createPolicyDocument(expirationTime time.Time, signingTime time.Time, bucket string, key string, credentialString string, securityToken *string, extraConditions []interface{}) (string, error) { + initialConditions := []interface{}{ + map[string]string{ + algorithmHeader: algorithm, + }, + map[string]string{ + bucketHeader: bucket, + }, + map[string]string{ + credentialHeader: credentialString, + }, + map[string]string{ + dateHeader: signingTime.UTC().Format("20060102T150405Z"), + }, + } + + var conditions []interface{} + for _, v := range initialConditions { + conditions = append(conditions, v) + } + + if securityToken != nil && *securityToken != "" { + conditions = append(conditions, map[string]string{ + tokenHeader: *securityToken, + }) + } + + // append user-defined conditions at the end + conditions = append(conditions, extraConditions...) + + // The policy allows you to set a "key" value to specify what's the name of the + // key to add. Customers can add one by specifying one in their conditions, + // so we're checking if one has already been set. + // If none is found, restrict this to just the key name passed on the request + // This can be disabled by adding a condition that explicitly allows + // everything + if !isAlreadyCheckingForKey(conditions) { + conditions = append(conditions, map[string]string{"key": key}) + } + + policyDoc := map[string]interface{}{ + "conditions": conditions, + "expiration": expirationTime.Format(time.RFC3339), + } + + jsonBytes, err := json.Marshal(policyDoc) + if err != nil { + return "", err + } + + return base64.StdEncoding.EncodeToString(jsonBytes), nil +} + +func isAlreadyCheckingForKey(conditions []interface{}) bool { + // Need to check for two conditions: + // 1. A condition of the form ["starts-with", "$key", "mykey"] + // 2. A condition of the form {"key": "mykey"} + for _, c := range conditions { + slice, ok := c.([]interface{}) + if ok && len(slice) > 1 { + if slice[0] == "starts-with" && slice[1] == "$key" { + return true + } + } + m, ok := c.(map[string]interface{}) + if ok && len(m) > 0 { + for k := range m { + if k == "key" { + return true + } + } + } + // Repeat this but for map[string]string due to type constrains + ms, ok := c.(map[string]string) + if ok && len(ms) > 0 { + for k := range ms { + if k == "key" { + return true + } + } + } + } + return false +} + +// these methods have been copied from v4 implementation since they are not exported for public use +func hmacsha256(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +func buildSignature(strToSign, secret, service, region string, t time.Time) string { + key := deriveKey(secret, service, region, t) + return hex.EncodeToString(hmacsha256(key, []byte(strToSign))) +} + +func deriveKey(secret, service, region string, t time.Time) []byte { + hmacDate := hmacsha256([]byte("AWS4"+secret), []byte(t.UTC().Format(shortDateLayout))) + hmacRegion := hmacsha256(hmacDate, []byte(region)) + hmacService := hmacsha256(hmacRegion, []byte(service)) + return hmacsha256(hmacService, []byte(aws4Request)) +} + +func buildCredentialScope(signingTime time.Time, region, service string) string { + return strings.Join([]string{ + signingTime.UTC().Format(shortDateLayout), + region, + service, + aws4Request, + }, "/") +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serializers.go index 59524bdcb..c37fab1e1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serializers.go @@ -12,6 +12,7 @@ import ( smithyxml "github.com/aws/smithy-go/encoding/xml" "github.com/aws/smithy-go/middleware" smithytime "github.com/aws/smithy-go/time" + "github.com/aws/smithy-go/tracing" smithyhttp "github.com/aws/smithy-go/transport/http" "net/http" "strconv" @@ -28,6 +29,10 @@ func (*awsRestxml_serializeOpAbortMultipartUpload) ID() string { func (m *awsRestxml_serializeOpAbortMultipartUpload) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -64,6 +69,8 @@ func (m *awsRestxml_serializeOpAbortMultipartUpload) HandleSerialize(ctx context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsAbortMultipartUploadInput(v *AbortMultipartUploadInput, encoder *httpbinding.Encoder) error { @@ -71,11 +78,16 @@ func awsRestxml_serializeOpHttpBindingsAbortMultipartUploadInput(v *AbortMultipa return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } + if v.IfMatchInitiatedTime != nil { + locationName := "X-Amz-If-Match-Initiated-Time" + encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.IfMatchInitiatedTime)) + } + if v.Key == nil || len(*v.Key) == 0 { return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} } @@ -107,6 +119,10 @@ func (*awsRestxml_serializeOpCompleteMultipartUpload) ID() string { func (m *awsRestxml_serializeOpCompleteMultipartUpload) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -118,7 +134,7 @@ func (m *awsRestxml_serializeOpCompleteMultipartUpload) HandleSerialize(ctx cont return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=CompleteMultipartUpload") + opPath, opQuery := httpbinding.SplitURI("/{Key+}") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "POST" @@ -167,6 +183,8 @@ func (m *awsRestxml_serializeOpCompleteMultipartUpload) HandleSerialize(ctx cont } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsCompleteMultipartUploadInput(v *CompleteMultipartUploadInput, encoder *httpbinding.Encoder) error { @@ -174,31 +192,51 @@ func awsRestxml_serializeOpHttpBindingsCompleteMultipartUploadInput(v *CompleteM return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ChecksumCRC32 != nil && len(*v.ChecksumCRC32) > 0 { + if v.ChecksumCRC32 != nil { locationName := "X-Amz-Checksum-Crc32" encoder.SetHeader(locationName).String(*v.ChecksumCRC32) } - if v.ChecksumCRC32C != nil && len(*v.ChecksumCRC32C) > 0 { + if v.ChecksumCRC32C != nil { locationName := "X-Amz-Checksum-Crc32c" encoder.SetHeader(locationName).String(*v.ChecksumCRC32C) } - if v.ChecksumSHA1 != nil && len(*v.ChecksumSHA1) > 0 { + if v.ChecksumCRC64NVME != nil { + locationName := "X-Amz-Checksum-Crc64nvme" + encoder.SetHeader(locationName).String(*v.ChecksumCRC64NVME) + } + + if v.ChecksumSHA1 != nil { locationName := "X-Amz-Checksum-Sha1" encoder.SetHeader(locationName).String(*v.ChecksumSHA1) } - if v.ChecksumSHA256 != nil && len(*v.ChecksumSHA256) > 0 { + if v.ChecksumSHA256 != nil { locationName := "X-Amz-Checksum-Sha256" encoder.SetHeader(locationName).String(*v.ChecksumSHA256) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if len(v.ChecksumType) > 0 { + locationName := "X-Amz-Checksum-Type" + encoder.SetHeader(locationName).String(string(v.ChecksumType)) + } + + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } + if v.IfMatch != nil { + locationName := "If-Match" + encoder.SetHeader(locationName).String(*v.IfMatch) + } + + if v.IfNoneMatch != nil { + locationName := "If-None-Match" + encoder.SetHeader(locationName).String(*v.IfNoneMatch) + } + if v.Key == nil || len(*v.Key) == 0 { return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} } @@ -208,22 +246,27 @@ func awsRestxml_serializeOpHttpBindingsCompleteMultipartUploadInput(v *CompleteM } } + if v.MpuObjectSize != nil { + locationName := "X-Amz-Mp-Object-Size" + encoder.SetHeader(locationName).Long(*v.MpuObjectSize) + } + if len(v.RequestPayer) > 0 { locationName := "X-Amz-Request-Payer" encoder.SetHeader(locationName).String(string(v.RequestPayer)) } - if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + if v.SSECustomerAlgorithm != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) } - if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + if v.SSECustomerKey != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key" encoder.SetHeader(locationName).String(*v.SSECustomerKey) } - if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + if v.SSECustomerKeyMD5 != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) } @@ -245,6 +288,10 @@ func (*awsRestxml_serializeOpCopyObject) ID() string { func (m *awsRestxml_serializeOpCopyObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -281,6 +328,8 @@ func (m *awsRestxml_serializeOpCopyObject) HandleSerialize(ctx context.Context, } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsCopyObjectInput(v *CopyObjectInput, encoder *httpbinding.Encoder) error { @@ -298,7 +347,7 @@ func awsRestxml_serializeOpHttpBindingsCopyObjectInput(v *CopyObjectInput, encod encoder.SetHeader(locationName).Boolean(*v.BucketKeyEnabled) } - if v.CacheControl != nil && len(*v.CacheControl) > 0 { + if v.CacheControl != nil { locationName := "Cache-Control" encoder.SetHeader(locationName).String(*v.CacheControl) } @@ -308,32 +357,32 @@ func awsRestxml_serializeOpHttpBindingsCopyObjectInput(v *CopyObjectInput, encod encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentDisposition != nil && len(*v.ContentDisposition) > 0 { + if v.ContentDisposition != nil { locationName := "Content-Disposition" encoder.SetHeader(locationName).String(*v.ContentDisposition) } - if v.ContentEncoding != nil && len(*v.ContentEncoding) > 0 { + if v.ContentEncoding != nil { locationName := "Content-Encoding" encoder.SetHeader(locationName).String(*v.ContentEncoding) } - if v.ContentLanguage != nil && len(*v.ContentLanguage) > 0 { + if v.ContentLanguage != nil { locationName := "Content-Language" encoder.SetHeader(locationName).String(*v.ContentLanguage) } - if v.ContentType != nil && len(*v.ContentType) > 0 { + if v.ContentType != nil { locationName := "Content-Type" encoder.SetHeader(locationName).String(*v.ContentType) } - if v.CopySource != nil && len(*v.CopySource) > 0 { + if v.CopySource != nil { locationName := "X-Amz-Copy-Source" encoder.SetHeader(locationName).String(*v.CopySource) } - if v.CopySourceIfMatch != nil && len(*v.CopySourceIfMatch) > 0 { + if v.CopySourceIfMatch != nil { locationName := "X-Amz-Copy-Source-If-Match" encoder.SetHeader(locationName).String(*v.CopySourceIfMatch) } @@ -343,7 +392,7 @@ func awsRestxml_serializeOpHttpBindingsCopyObjectInput(v *CopyObjectInput, encod encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.CopySourceIfModifiedSince)) } - if v.CopySourceIfNoneMatch != nil && len(*v.CopySourceIfNoneMatch) > 0 { + if v.CopySourceIfNoneMatch != nil { locationName := "X-Amz-Copy-Source-If-None-Match" encoder.SetHeader(locationName).String(*v.CopySourceIfNoneMatch) } @@ -353,27 +402,27 @@ func awsRestxml_serializeOpHttpBindingsCopyObjectInput(v *CopyObjectInput, encod encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.CopySourceIfUnmodifiedSince)) } - if v.CopySourceSSECustomerAlgorithm != nil && len(*v.CopySourceSSECustomerAlgorithm) > 0 { + if v.CopySourceSSECustomerAlgorithm != nil { locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm" encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerAlgorithm) } - if v.CopySourceSSECustomerKey != nil && len(*v.CopySourceSSECustomerKey) > 0 { + if v.CopySourceSSECustomerKey != nil { locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key" encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerKey) } - if v.CopySourceSSECustomerKeyMD5 != nil && len(*v.CopySourceSSECustomerKeyMD5) > 0 { + if v.CopySourceSSECustomerKeyMD5 != nil { locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5" encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerKeyMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } - if v.ExpectedSourceBucketOwner != nil && len(*v.ExpectedSourceBucketOwner) > 0 { + if v.ExpectedSourceBucketOwner != nil { locationName := "X-Amz-Source-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedSourceBucketOwner) } @@ -383,22 +432,22 @@ func awsRestxml_serializeOpHttpBindingsCopyObjectInput(v *CopyObjectInput, encod encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.Expires)) } - if v.GrantFullControl != nil && len(*v.GrantFullControl) > 0 { + if v.GrantFullControl != nil { locationName := "X-Amz-Grant-Full-Control" encoder.SetHeader(locationName).String(*v.GrantFullControl) } - if v.GrantRead != nil && len(*v.GrantRead) > 0 { + if v.GrantRead != nil { locationName := "X-Amz-Grant-Read" encoder.SetHeader(locationName).String(*v.GrantRead) } - if v.GrantReadACP != nil && len(*v.GrantReadACP) > 0 { + if v.GrantReadACP != nil { locationName := "X-Amz-Grant-Read-Acp" encoder.SetHeader(locationName).String(*v.GrantReadACP) } - if v.GrantWriteACP != nil && len(*v.GrantWriteACP) > 0 { + if v.GrantWriteACP != nil { locationName := "X-Amz-Grant-Write-Acp" encoder.SetHeader(locationName).String(*v.GrantWriteACP) } @@ -415,9 +464,7 @@ func awsRestxml_serializeOpHttpBindingsCopyObjectInput(v *CopyObjectInput, encod if v.Metadata != nil { hv := encoder.Headers("X-Amz-Meta-") for mapKey, mapVal := range v.Metadata { - if len(mapVal) > 0 { - hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal) - } + hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal) } } @@ -451,27 +498,27 @@ func awsRestxml_serializeOpHttpBindingsCopyObjectInput(v *CopyObjectInput, encod encoder.SetHeader(locationName).String(string(v.ServerSideEncryption)) } - if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + if v.SSECustomerAlgorithm != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) } - if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + if v.SSECustomerKey != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key" encoder.SetHeader(locationName).String(*v.SSECustomerKey) } - if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + if v.SSECustomerKeyMD5 != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) } - if v.SSEKMSEncryptionContext != nil && len(*v.SSEKMSEncryptionContext) > 0 { + if v.SSEKMSEncryptionContext != nil { locationName := "X-Amz-Server-Side-Encryption-Context" encoder.SetHeader(locationName).String(*v.SSEKMSEncryptionContext) } - if v.SSEKMSKeyId != nil && len(*v.SSEKMSKeyId) > 0 { + if v.SSEKMSKeyId != nil { locationName := "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id" encoder.SetHeader(locationName).String(*v.SSEKMSKeyId) } @@ -481,7 +528,7 @@ func awsRestxml_serializeOpHttpBindingsCopyObjectInput(v *CopyObjectInput, encod encoder.SetHeader(locationName).String(string(v.StorageClass)) } - if v.Tagging != nil && len(*v.Tagging) > 0 { + if v.Tagging != nil { locationName := "X-Amz-Tagging" encoder.SetHeader(locationName).String(*v.Tagging) } @@ -491,7 +538,7 @@ func awsRestxml_serializeOpHttpBindingsCopyObjectInput(v *CopyObjectInput, encod encoder.SetHeader(locationName).String(string(v.TaggingDirective)) } - if v.WebsiteRedirectLocation != nil && len(*v.WebsiteRedirectLocation) > 0 { + if v.WebsiteRedirectLocation != nil { locationName := "X-Amz-Website-Redirect-Location" encoder.SetHeader(locationName).String(*v.WebsiteRedirectLocation) } @@ -509,6 +556,10 @@ func (*awsRestxml_serializeOpCreateBucket) ID() string { func (m *awsRestxml_serializeOpCreateBucket) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -569,6 +620,8 @@ func (m *awsRestxml_serializeOpCreateBucket) HandleSerialize(ctx context.Context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsCreateBucketInput(v *CreateBucketInput, encoder *httpbinding.Encoder) error { @@ -581,27 +634,27 @@ func awsRestxml_serializeOpHttpBindingsCreateBucketInput(v *CreateBucketInput, e encoder.SetHeader(locationName).String(string(v.ACL)) } - if v.GrantFullControl != nil && len(*v.GrantFullControl) > 0 { + if v.GrantFullControl != nil { locationName := "X-Amz-Grant-Full-Control" encoder.SetHeader(locationName).String(*v.GrantFullControl) } - if v.GrantRead != nil && len(*v.GrantRead) > 0 { + if v.GrantRead != nil { locationName := "X-Amz-Grant-Read" encoder.SetHeader(locationName).String(*v.GrantRead) } - if v.GrantReadACP != nil && len(*v.GrantReadACP) > 0 { + if v.GrantReadACP != nil { locationName := "X-Amz-Grant-Read-Acp" encoder.SetHeader(locationName).String(*v.GrantReadACP) } - if v.GrantWrite != nil && len(*v.GrantWrite) > 0 { + if v.GrantWrite != nil { locationName := "X-Amz-Grant-Write" encoder.SetHeader(locationName).String(*v.GrantWrite) } - if v.GrantWriteACP != nil && len(*v.GrantWriteACP) > 0 { + if v.GrantWriteACP != nil { locationName := "X-Amz-Grant-Write-Acp" encoder.SetHeader(locationName).String(*v.GrantWriteACP) } @@ -619,6 +672,107 @@ func awsRestxml_serializeOpHttpBindingsCreateBucketInput(v *CreateBucketInput, e return nil } +type awsRestxml_serializeOpCreateBucketMetadataTableConfiguration struct { +} + +func (*awsRestxml_serializeOpCreateBucketMetadataTableConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpCreateBucketMetadataTableConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateBucketMetadataTableConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?metadataTable") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsCreateBucketMetadataTableConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if input.MetadataTableConfiguration != nil { + if !restEncoder.HasHeader("Content-Type") { + ctx = smithyhttp.SetIsContentTypeDefaultValue(ctx, true) + restEncoder.SetHeader("Content-Type").String("application/xml") + } + + xmlEncoder := smithyxml.NewEncoder(bytes.NewBuffer(nil)) + payloadRootAttr := []smithyxml.Attr{} + payloadRoot := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "MetadataTableConfiguration", + }, + Attr: payloadRootAttr, + } + payloadRoot.Attr = append(payloadRoot.Attr, smithyxml.NewNamespaceAttribute("", "http://s3.amazonaws.com/doc/2006-03-01/")) + if err := awsRestxml_serializeDocumentMetadataTableConfiguration(input.MetadataTableConfiguration, xmlEncoder.RootElement(payloadRoot)); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + payload := bytes.NewReader(xmlEncoder.Bytes()) + if request, err = request.SetStream(payload); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsCreateBucketMetadataTableConfigurationInput(v *CreateBucketMetadataTableConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if len(v.ChecksumAlgorithm) > 0 { + locationName := "X-Amz-Sdk-Checksum-Algorithm" + encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) + } + + if v.ContentMD5 != nil { + locationName := "Content-Md5" + encoder.SetHeader(locationName).String(*v.ContentMD5) + } + + if v.ExpectedBucketOwner != nil { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + type awsRestxml_serializeOpCreateMultipartUpload struct { } @@ -629,6 +783,10 @@ func (*awsRestxml_serializeOpCreateMultipartUpload) ID() string { func (m *awsRestxml_serializeOpCreateMultipartUpload) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -640,7 +798,7 @@ func (m *awsRestxml_serializeOpCreateMultipartUpload) HandleSerialize(ctx contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Key+}?uploads&x-id=CreateMultipartUpload") + opPath, opQuery := httpbinding.SplitURI("/{Key+}?uploads") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "POST" @@ -665,6 +823,8 @@ func (m *awsRestxml_serializeOpCreateMultipartUpload) HandleSerialize(ctx contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsCreateMultipartUploadInput(v *CreateMultipartUploadInput, encoder *httpbinding.Encoder) error { @@ -682,7 +842,7 @@ func awsRestxml_serializeOpHttpBindingsCreateMultipartUploadInput(v *CreateMulti encoder.SetHeader(locationName).Boolean(*v.BucketKeyEnabled) } - if v.CacheControl != nil && len(*v.CacheControl) > 0 { + if v.CacheControl != nil { locationName := "Cache-Control" encoder.SetHeader(locationName).String(*v.CacheControl) } @@ -692,27 +852,32 @@ func awsRestxml_serializeOpHttpBindingsCreateMultipartUploadInput(v *CreateMulti encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentDisposition != nil && len(*v.ContentDisposition) > 0 { + if len(v.ChecksumType) > 0 { + locationName := "X-Amz-Checksum-Type" + encoder.SetHeader(locationName).String(string(v.ChecksumType)) + } + + if v.ContentDisposition != nil { locationName := "Content-Disposition" encoder.SetHeader(locationName).String(*v.ContentDisposition) } - if v.ContentEncoding != nil && len(*v.ContentEncoding) > 0 { + if v.ContentEncoding != nil { locationName := "Content-Encoding" encoder.SetHeader(locationName).String(*v.ContentEncoding) } - if v.ContentLanguage != nil && len(*v.ContentLanguage) > 0 { + if v.ContentLanguage != nil { locationName := "Content-Language" encoder.SetHeader(locationName).String(*v.ContentLanguage) } - if v.ContentType != nil && len(*v.ContentType) > 0 { + if v.ContentType != nil { locationName := "Content-Type" encoder.SetHeader(locationName).String(*v.ContentType) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -722,22 +887,22 @@ func awsRestxml_serializeOpHttpBindingsCreateMultipartUploadInput(v *CreateMulti encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.Expires)) } - if v.GrantFullControl != nil && len(*v.GrantFullControl) > 0 { + if v.GrantFullControl != nil { locationName := "X-Amz-Grant-Full-Control" encoder.SetHeader(locationName).String(*v.GrantFullControl) } - if v.GrantRead != nil && len(*v.GrantRead) > 0 { + if v.GrantRead != nil { locationName := "X-Amz-Grant-Read" encoder.SetHeader(locationName).String(*v.GrantRead) } - if v.GrantReadACP != nil && len(*v.GrantReadACP) > 0 { + if v.GrantReadACP != nil { locationName := "X-Amz-Grant-Read-Acp" encoder.SetHeader(locationName).String(*v.GrantReadACP) } - if v.GrantWriteACP != nil && len(*v.GrantWriteACP) > 0 { + if v.GrantWriteACP != nil { locationName := "X-Amz-Grant-Write-Acp" encoder.SetHeader(locationName).String(*v.GrantWriteACP) } @@ -754,9 +919,7 @@ func awsRestxml_serializeOpHttpBindingsCreateMultipartUploadInput(v *CreateMulti if v.Metadata != nil { hv := encoder.Headers("X-Amz-Meta-") for mapKey, mapVal := range v.Metadata { - if len(mapVal) > 0 { - hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal) - } + hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal) } } @@ -785,27 +948,27 @@ func awsRestxml_serializeOpHttpBindingsCreateMultipartUploadInput(v *CreateMulti encoder.SetHeader(locationName).String(string(v.ServerSideEncryption)) } - if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + if v.SSECustomerAlgorithm != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) } - if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + if v.SSECustomerKey != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key" encoder.SetHeader(locationName).String(*v.SSECustomerKey) } - if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + if v.SSECustomerKeyMD5 != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) } - if v.SSEKMSEncryptionContext != nil && len(*v.SSEKMSEncryptionContext) > 0 { + if v.SSEKMSEncryptionContext != nil { locationName := "X-Amz-Server-Side-Encryption-Context" encoder.SetHeader(locationName).String(*v.SSEKMSEncryptionContext) } - if v.SSEKMSKeyId != nil && len(*v.SSEKMSKeyId) > 0 { + if v.SSEKMSKeyId != nil { locationName := "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id" encoder.SetHeader(locationName).String(*v.SSEKMSKeyId) } @@ -815,12 +978,12 @@ func awsRestxml_serializeOpHttpBindingsCreateMultipartUploadInput(v *CreateMulti encoder.SetHeader(locationName).String(string(v.StorageClass)) } - if v.Tagging != nil && len(*v.Tagging) > 0 { + if v.Tagging != nil { locationName := "X-Amz-Tagging" encoder.SetHeader(locationName).String(*v.Tagging) } - if v.WebsiteRedirectLocation != nil && len(*v.WebsiteRedirectLocation) > 0 { + if v.WebsiteRedirectLocation != nil { locationName := "X-Amz-Website-Redirect-Location" encoder.SetHeader(locationName).String(*v.WebsiteRedirectLocation) } @@ -838,6 +1001,10 @@ func (*awsRestxml_serializeOpCreateSession) ID() string { func (m *awsRestxml_serializeOpCreateSession) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -874,6 +1041,8 @@ func (m *awsRestxml_serializeOpCreateSession) HandleSerialize(ctx context.Contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsCreateSessionInput(v *CreateSessionInput, encoder *httpbinding.Encoder) error { @@ -881,11 +1050,31 @@ func awsRestxml_serializeOpHttpBindingsCreateSessionInput(v *CreateSessionInput, return fmt.Errorf("unsupported serialization of nil %T", v) } + if v.BucketKeyEnabled != nil { + locationName := "X-Amz-Server-Side-Encryption-Bucket-Key-Enabled" + encoder.SetHeader(locationName).Boolean(*v.BucketKeyEnabled) + } + + if len(v.ServerSideEncryption) > 0 { + locationName := "X-Amz-Server-Side-Encryption" + encoder.SetHeader(locationName).String(string(v.ServerSideEncryption)) + } + if len(v.SessionMode) > 0 { locationName := "X-Amz-Create-Session-Mode" encoder.SetHeader(locationName).String(string(v.SessionMode)) } + if v.SSEKMSEncryptionContext != nil { + locationName := "X-Amz-Server-Side-Encryption-Context" + encoder.SetHeader(locationName).String(*v.SSEKMSEncryptionContext) + } + + if v.SSEKMSKeyId != nil { + locationName := "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id" + encoder.SetHeader(locationName).String(*v.SSEKMSKeyId) + } + return nil } @@ -899,6 +1088,10 @@ func (*awsRestxml_serializeOpDeleteBucket) ID() string { func (m *awsRestxml_serializeOpDeleteBucket) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -935,6 +1128,8 @@ func (m *awsRestxml_serializeOpDeleteBucket) HandleSerialize(ctx context.Context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteBucketInput(v *DeleteBucketInput, encoder *httpbinding.Encoder) error { @@ -942,7 +1137,7 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketInput(v *DeleteBucketInput, e return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -960,6 +1155,10 @@ func (*awsRestxml_serializeOpDeleteBucketAnalyticsConfiguration) ID() string { func (m *awsRestxml_serializeOpDeleteBucketAnalyticsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -996,6 +1195,8 @@ func (m *awsRestxml_serializeOpDeleteBucketAnalyticsConfiguration) HandleSeriali } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteBucketAnalyticsConfigurationInput(v *DeleteBucketAnalyticsConfigurationInput, encoder *httpbinding.Encoder) error { @@ -1003,7 +1204,7 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketAnalyticsConfigurationInput(v return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -1025,6 +1226,10 @@ func (*awsRestxml_serializeOpDeleteBucketCors) ID() string { func (m *awsRestxml_serializeOpDeleteBucketCors) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1061,6 +1266,8 @@ func (m *awsRestxml_serializeOpDeleteBucketCors) HandleSerialize(ctx context.Con } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteBucketCorsInput(v *DeleteBucketCorsInput, encoder *httpbinding.Encoder) error { @@ -1068,7 +1275,7 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketCorsInput(v *DeleteBucketCors return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -1086,6 +1293,10 @@ func (*awsRestxml_serializeOpDeleteBucketEncryption) ID() string { func (m *awsRestxml_serializeOpDeleteBucketEncryption) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1122,6 +1333,8 @@ func (m *awsRestxml_serializeOpDeleteBucketEncryption) HandleSerialize(ctx conte } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteBucketEncryptionInput(v *DeleteBucketEncryptionInput, encoder *httpbinding.Encoder) error { @@ -1129,7 +1342,7 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketEncryptionInput(v *DeleteBuck return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -1147,6 +1360,10 @@ func (*awsRestxml_serializeOpDeleteBucketIntelligentTieringConfiguration) ID() s func (m *awsRestxml_serializeOpDeleteBucketIntelligentTieringConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1183,6 +1400,8 @@ func (m *awsRestxml_serializeOpDeleteBucketIntelligentTieringConfiguration) Hand } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteBucketIntelligentTieringConfigurationInput(v *DeleteBucketIntelligentTieringConfigurationInput, encoder *httpbinding.Encoder) error { @@ -1207,6 +1426,10 @@ func (*awsRestxml_serializeOpDeleteBucketInventoryConfiguration) ID() string { func (m *awsRestxml_serializeOpDeleteBucketInventoryConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1243,6 +1466,8 @@ func (m *awsRestxml_serializeOpDeleteBucketInventoryConfiguration) HandleSeriali } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteBucketInventoryConfigurationInput(v *DeleteBucketInventoryConfigurationInput, encoder *httpbinding.Encoder) error { @@ -1250,7 +1475,7 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketInventoryConfigurationInput(v return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -1272,6 +1497,10 @@ func (*awsRestxml_serializeOpDeleteBucketLifecycle) ID() string { func (m *awsRestxml_serializeOpDeleteBucketLifecycle) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1308,6 +1537,8 @@ func (m *awsRestxml_serializeOpDeleteBucketLifecycle) HandleSerialize(ctx contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteBucketLifecycleInput(v *DeleteBucketLifecycleInput, encoder *httpbinding.Encoder) error { @@ -1315,7 +1546,74 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketLifecycleInput(v *DeleteBucke return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpDeleteBucketMetadataTableConfiguration struct { +} + +func (*awsRestxml_serializeOpDeleteBucketMetadataTableConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpDeleteBucketMetadataTableConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteBucketMetadataTableConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?metadataTable") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "DELETE" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsDeleteBucketMetadataTableConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsDeleteBucketMetadataTableConfigurationInput(v *DeleteBucketMetadataTableConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -1333,6 +1631,10 @@ func (*awsRestxml_serializeOpDeleteBucketMetricsConfiguration) ID() string { func (m *awsRestxml_serializeOpDeleteBucketMetricsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1369,6 +1671,8 @@ func (m *awsRestxml_serializeOpDeleteBucketMetricsConfiguration) HandleSerialize } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteBucketMetricsConfigurationInput(v *DeleteBucketMetricsConfigurationInput, encoder *httpbinding.Encoder) error { @@ -1376,7 +1680,7 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketMetricsConfigurationInput(v * return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -1398,6 +1702,10 @@ func (*awsRestxml_serializeOpDeleteBucketOwnershipControls) ID() string { func (m *awsRestxml_serializeOpDeleteBucketOwnershipControls) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1434,6 +1742,8 @@ func (m *awsRestxml_serializeOpDeleteBucketOwnershipControls) HandleSerialize(ct } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteBucketOwnershipControlsInput(v *DeleteBucketOwnershipControlsInput, encoder *httpbinding.Encoder) error { @@ -1441,7 +1751,7 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketOwnershipControlsInput(v *Del return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -1459,6 +1769,10 @@ func (*awsRestxml_serializeOpDeleteBucketPolicy) ID() string { func (m *awsRestxml_serializeOpDeleteBucketPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1495,6 +1809,8 @@ func (m *awsRestxml_serializeOpDeleteBucketPolicy) HandleSerialize(ctx context.C } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteBucketPolicyInput(v *DeleteBucketPolicyInput, encoder *httpbinding.Encoder) error { @@ -1502,7 +1818,7 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketPolicyInput(v *DeleteBucketPo return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -1520,6 +1836,10 @@ func (*awsRestxml_serializeOpDeleteBucketReplication) ID() string { func (m *awsRestxml_serializeOpDeleteBucketReplication) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1556,6 +1876,8 @@ func (m *awsRestxml_serializeOpDeleteBucketReplication) HandleSerialize(ctx cont } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteBucketReplicationInput(v *DeleteBucketReplicationInput, encoder *httpbinding.Encoder) error { @@ -1563,7 +1885,7 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketReplicationInput(v *DeleteBuc return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -1581,6 +1903,10 @@ func (*awsRestxml_serializeOpDeleteBucketTagging) ID() string { func (m *awsRestxml_serializeOpDeleteBucketTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1617,6 +1943,8 @@ func (m *awsRestxml_serializeOpDeleteBucketTagging) HandleSerialize(ctx context. } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteBucketTaggingInput(v *DeleteBucketTaggingInput, encoder *httpbinding.Encoder) error { @@ -1624,7 +1952,7 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketTaggingInput(v *DeleteBucketT return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -1642,6 +1970,10 @@ func (*awsRestxml_serializeOpDeleteBucketWebsite) ID() string { func (m *awsRestxml_serializeOpDeleteBucketWebsite) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1678,6 +2010,8 @@ func (m *awsRestxml_serializeOpDeleteBucketWebsite) HandleSerialize(ctx context. } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteBucketWebsiteInput(v *DeleteBucketWebsiteInput, encoder *httpbinding.Encoder) error { @@ -1685,7 +2019,7 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketWebsiteInput(v *DeleteBucketW return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -1703,6 +2037,10 @@ func (*awsRestxml_serializeOpDeleteObject) ID() string { func (m *awsRestxml_serializeOpDeleteObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1739,6 +2077,8 @@ func (m *awsRestxml_serializeOpDeleteObject) HandleSerialize(ctx context.Context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteObjectInput(v *DeleteObjectInput, encoder *httpbinding.Encoder) error { @@ -1751,11 +2091,26 @@ func awsRestxml_serializeOpHttpBindingsDeleteObjectInput(v *DeleteObjectInput, e encoder.SetHeader(locationName).Boolean(*v.BypassGovernanceRetention) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } + if v.IfMatch != nil { + locationName := "If-Match" + encoder.SetHeader(locationName).String(*v.IfMatch) + } + + if v.IfMatchLastModifiedTime != nil { + locationName := "X-Amz-If-Match-Last-Modified-Time" + encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.IfMatchLastModifiedTime)) + } + + if v.IfMatchSize != nil { + locationName := "X-Amz-If-Match-Size" + encoder.SetHeader(locationName).Long(*v.IfMatchSize) + } + if v.Key == nil || len(*v.Key) == 0 { return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} } @@ -1765,7 +2120,7 @@ func awsRestxml_serializeOpHttpBindingsDeleteObjectInput(v *DeleteObjectInput, e } } - if v.MFA != nil && len(*v.MFA) > 0 { + if v.MFA != nil { locationName := "X-Amz-Mfa" encoder.SetHeader(locationName).String(*v.MFA) } @@ -1792,6 +2147,10 @@ func (*awsRestxml_serializeOpDeleteObjects) ID() string { func (m *awsRestxml_serializeOpDeleteObjects) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1803,7 +2162,7 @@ func (m *awsRestxml_serializeOpDeleteObjects) HandleSerialize(ctx context.Contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/?delete&x-id=DeleteObjects") + opPath, opQuery := httpbinding.SplitURI("/?delete") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "POST" @@ -1852,6 +2211,8 @@ func (m *awsRestxml_serializeOpDeleteObjects) HandleSerialize(ctx context.Contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteObjectsInput(v *DeleteObjectsInput, encoder *httpbinding.Encoder) error { @@ -1869,12 +2230,12 @@ func awsRestxml_serializeOpHttpBindingsDeleteObjectsInput(v *DeleteObjectsInput, encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } - if v.MFA != nil && len(*v.MFA) > 0 { + if v.MFA != nil { locationName := "X-Amz-Mfa" encoder.SetHeader(locationName).String(*v.MFA) } @@ -1897,6 +2258,10 @@ func (*awsRestxml_serializeOpDeleteObjectTagging) ID() string { func (m *awsRestxml_serializeOpDeleteObjectTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -1933,6 +2298,8 @@ func (m *awsRestxml_serializeOpDeleteObjectTagging) HandleSerialize(ctx context. } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeleteObjectTaggingInput(v *DeleteObjectTaggingInput, encoder *httpbinding.Encoder) error { @@ -1940,7 +2307,7 @@ func awsRestxml_serializeOpHttpBindingsDeleteObjectTaggingInput(v *DeleteObjectT return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -1971,6 +2338,10 @@ func (*awsRestxml_serializeOpDeletePublicAccessBlock) ID() string { func (m *awsRestxml_serializeOpDeletePublicAccessBlock) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2007,6 +2378,8 @@ func (m *awsRestxml_serializeOpDeletePublicAccessBlock) HandleSerialize(ctx cont } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsDeletePublicAccessBlockInput(v *DeletePublicAccessBlockInput, encoder *httpbinding.Encoder) error { @@ -2014,7 +2387,7 @@ func awsRestxml_serializeOpHttpBindingsDeletePublicAccessBlockInput(v *DeletePub return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -2032,6 +2405,10 @@ func (*awsRestxml_serializeOpGetBucketAccelerateConfiguration) ID() string { func (m *awsRestxml_serializeOpGetBucketAccelerateConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2068,6 +2445,8 @@ func (m *awsRestxml_serializeOpGetBucketAccelerateConfiguration) HandleSerialize } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketAccelerateConfigurationInput(v *GetBucketAccelerateConfigurationInput, encoder *httpbinding.Encoder) error { @@ -2075,7 +2454,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketAccelerateConfigurationInput(v * return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -2098,6 +2477,10 @@ func (*awsRestxml_serializeOpGetBucketAcl) ID() string { func (m *awsRestxml_serializeOpGetBucketAcl) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2134,6 +2517,8 @@ func (m *awsRestxml_serializeOpGetBucketAcl) HandleSerialize(ctx context.Context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketAclInput(v *GetBucketAclInput, encoder *httpbinding.Encoder) error { @@ -2141,7 +2526,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketAclInput(v *GetBucketAclInput, e return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -2159,6 +2544,10 @@ func (*awsRestxml_serializeOpGetBucketAnalyticsConfiguration) ID() string { func (m *awsRestxml_serializeOpGetBucketAnalyticsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2195,6 +2584,8 @@ func (m *awsRestxml_serializeOpGetBucketAnalyticsConfiguration) HandleSerialize( } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketAnalyticsConfigurationInput(v *GetBucketAnalyticsConfigurationInput, encoder *httpbinding.Encoder) error { @@ -2202,7 +2593,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketAnalyticsConfigurationInput(v *G return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -2224,6 +2615,10 @@ func (*awsRestxml_serializeOpGetBucketCors) ID() string { func (m *awsRestxml_serializeOpGetBucketCors) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2260,6 +2655,8 @@ func (m *awsRestxml_serializeOpGetBucketCors) HandleSerialize(ctx context.Contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketCorsInput(v *GetBucketCorsInput, encoder *httpbinding.Encoder) error { @@ -2267,7 +2664,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketCorsInput(v *GetBucketCorsInput, return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -2285,6 +2682,10 @@ func (*awsRestxml_serializeOpGetBucketEncryption) ID() string { func (m *awsRestxml_serializeOpGetBucketEncryption) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2321,6 +2722,8 @@ func (m *awsRestxml_serializeOpGetBucketEncryption) HandleSerialize(ctx context. } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketEncryptionInput(v *GetBucketEncryptionInput, encoder *httpbinding.Encoder) error { @@ -2328,7 +2731,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketEncryptionInput(v *GetBucketEncr return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -2346,6 +2749,10 @@ func (*awsRestxml_serializeOpGetBucketIntelligentTieringConfiguration) ID() stri func (m *awsRestxml_serializeOpGetBucketIntelligentTieringConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2382,6 +2789,8 @@ func (m *awsRestxml_serializeOpGetBucketIntelligentTieringConfiguration) HandleS } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketIntelligentTieringConfigurationInput(v *GetBucketIntelligentTieringConfigurationInput, encoder *httpbinding.Encoder) error { @@ -2406,6 +2815,10 @@ func (*awsRestxml_serializeOpGetBucketInventoryConfiguration) ID() string { func (m *awsRestxml_serializeOpGetBucketInventoryConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2442,6 +2855,8 @@ func (m *awsRestxml_serializeOpGetBucketInventoryConfiguration) HandleSerialize( } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketInventoryConfigurationInput(v *GetBucketInventoryConfigurationInput, encoder *httpbinding.Encoder) error { @@ -2449,7 +2864,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketInventoryConfigurationInput(v *G return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -2471,6 +2886,10 @@ func (*awsRestxml_serializeOpGetBucketLifecycleConfiguration) ID() string { func (m *awsRestxml_serializeOpGetBucketLifecycleConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2507,6 +2926,8 @@ func (m *awsRestxml_serializeOpGetBucketLifecycleConfiguration) HandleSerialize( } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketLifecycleConfigurationInput(v *GetBucketLifecycleConfigurationInput, encoder *httpbinding.Encoder) error { @@ -2514,7 +2935,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketLifecycleConfigurationInput(v *G return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -2532,6 +2953,10 @@ func (*awsRestxml_serializeOpGetBucketLocation) ID() string { func (m *awsRestxml_serializeOpGetBucketLocation) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2568,6 +2993,8 @@ func (m *awsRestxml_serializeOpGetBucketLocation) HandleSerialize(ctx context.Co } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketLocationInput(v *GetBucketLocationInput, encoder *httpbinding.Encoder) error { @@ -2575,7 +3002,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketLocationInput(v *GetBucketLocati return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -2593,6 +3020,10 @@ func (*awsRestxml_serializeOpGetBucketLogging) ID() string { func (m *awsRestxml_serializeOpGetBucketLogging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2629,6 +3060,8 @@ func (m *awsRestxml_serializeOpGetBucketLogging) HandleSerialize(ctx context.Con } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketLoggingInput(v *GetBucketLoggingInput, encoder *httpbinding.Encoder) error { @@ -2636,7 +3069,74 @@ func awsRestxml_serializeOpHttpBindingsGetBucketLoggingInput(v *GetBucketLogging return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { + locationName := "X-Amz-Expected-Bucket-Owner" + encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) + } + + return nil +} + +type awsRestxml_serializeOpGetBucketMetadataTableConfiguration struct { +} + +func (*awsRestxml_serializeOpGetBucketMetadataTableConfiguration) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpGetBucketMetadataTableConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetBucketMetadataTableConfigurationInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?metadataTable") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsGetBucketMetadataTableConfigurationInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsGetBucketMetadataTableConfigurationInput(v *GetBucketMetadataTableConfigurationInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -2654,6 +3154,10 @@ func (*awsRestxml_serializeOpGetBucketMetricsConfiguration) ID() string { func (m *awsRestxml_serializeOpGetBucketMetricsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2690,6 +3194,8 @@ func (m *awsRestxml_serializeOpGetBucketMetricsConfiguration) HandleSerialize(ct } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketMetricsConfigurationInput(v *GetBucketMetricsConfigurationInput, encoder *httpbinding.Encoder) error { @@ -2697,7 +3203,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketMetricsConfigurationInput(v *Get return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -2719,6 +3225,10 @@ func (*awsRestxml_serializeOpGetBucketNotificationConfiguration) ID() string { func (m *awsRestxml_serializeOpGetBucketNotificationConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2755,6 +3265,8 @@ func (m *awsRestxml_serializeOpGetBucketNotificationConfiguration) HandleSeriali } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketNotificationConfigurationInput(v *GetBucketNotificationConfigurationInput, encoder *httpbinding.Encoder) error { @@ -2762,7 +3274,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketNotificationConfigurationInput(v return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -2780,6 +3292,10 @@ func (*awsRestxml_serializeOpGetBucketOwnershipControls) ID() string { func (m *awsRestxml_serializeOpGetBucketOwnershipControls) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2816,6 +3332,8 @@ func (m *awsRestxml_serializeOpGetBucketOwnershipControls) HandleSerialize(ctx c } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketOwnershipControlsInput(v *GetBucketOwnershipControlsInput, encoder *httpbinding.Encoder) error { @@ -2823,7 +3341,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketOwnershipControlsInput(v *GetBuc return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -2841,6 +3359,10 @@ func (*awsRestxml_serializeOpGetBucketPolicy) ID() string { func (m *awsRestxml_serializeOpGetBucketPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2877,6 +3399,8 @@ func (m *awsRestxml_serializeOpGetBucketPolicy) HandleSerialize(ctx context.Cont } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketPolicyInput(v *GetBucketPolicyInput, encoder *httpbinding.Encoder) error { @@ -2884,7 +3408,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketPolicyInput(v *GetBucketPolicyIn return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -2902,6 +3426,10 @@ func (*awsRestxml_serializeOpGetBucketPolicyStatus) ID() string { func (m *awsRestxml_serializeOpGetBucketPolicyStatus) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2938,6 +3466,8 @@ func (m *awsRestxml_serializeOpGetBucketPolicyStatus) HandleSerialize(ctx contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketPolicyStatusInput(v *GetBucketPolicyStatusInput, encoder *httpbinding.Encoder) error { @@ -2945,7 +3475,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketPolicyStatusInput(v *GetBucketPo return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -2963,6 +3493,10 @@ func (*awsRestxml_serializeOpGetBucketReplication) ID() string { func (m *awsRestxml_serializeOpGetBucketReplication) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -2999,6 +3533,8 @@ func (m *awsRestxml_serializeOpGetBucketReplication) HandleSerialize(ctx context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketReplicationInput(v *GetBucketReplicationInput, encoder *httpbinding.Encoder) error { @@ -3006,7 +3542,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketReplicationInput(v *GetBucketRep return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -3024,6 +3560,10 @@ func (*awsRestxml_serializeOpGetBucketRequestPayment) ID() string { func (m *awsRestxml_serializeOpGetBucketRequestPayment) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -3060,6 +3600,8 @@ func (m *awsRestxml_serializeOpGetBucketRequestPayment) HandleSerialize(ctx cont } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketRequestPaymentInput(v *GetBucketRequestPaymentInput, encoder *httpbinding.Encoder) error { @@ -3067,7 +3609,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketRequestPaymentInput(v *GetBucket return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -3085,6 +3627,10 @@ func (*awsRestxml_serializeOpGetBucketTagging) ID() string { func (m *awsRestxml_serializeOpGetBucketTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -3121,6 +3667,8 @@ func (m *awsRestxml_serializeOpGetBucketTagging) HandleSerialize(ctx context.Con } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketTaggingInput(v *GetBucketTaggingInput, encoder *httpbinding.Encoder) error { @@ -3128,7 +3676,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketTaggingInput(v *GetBucketTagging return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -3146,6 +3694,10 @@ func (*awsRestxml_serializeOpGetBucketVersioning) ID() string { func (m *awsRestxml_serializeOpGetBucketVersioning) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -3182,6 +3734,8 @@ func (m *awsRestxml_serializeOpGetBucketVersioning) HandleSerialize(ctx context. } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketVersioningInput(v *GetBucketVersioningInput, encoder *httpbinding.Encoder) error { @@ -3189,7 +3743,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketVersioningInput(v *GetBucketVers return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -3207,6 +3761,10 @@ func (*awsRestxml_serializeOpGetBucketWebsite) ID() string { func (m *awsRestxml_serializeOpGetBucketWebsite) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -3243,6 +3801,8 @@ func (m *awsRestxml_serializeOpGetBucketWebsite) HandleSerialize(ctx context.Con } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetBucketWebsiteInput(v *GetBucketWebsiteInput, encoder *httpbinding.Encoder) error { @@ -3250,7 +3810,7 @@ func awsRestxml_serializeOpHttpBindingsGetBucketWebsiteInput(v *GetBucketWebsite return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -3268,6 +3828,10 @@ func (*awsRestxml_serializeOpGetObject) ID() string { func (m *awsRestxml_serializeOpGetObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -3304,6 +3868,8 @@ func (m *awsRestxml_serializeOpGetObject) HandleSerialize(ctx context.Context, i } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetObjectInput(v *GetObjectInput, encoder *httpbinding.Encoder) error { @@ -3316,12 +3882,12 @@ func awsRestxml_serializeOpHttpBindingsGetObjectInput(v *GetObjectInput, encoder encoder.SetHeader(locationName).String(string(v.ChecksumMode)) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } - if v.IfMatch != nil && len(*v.IfMatch) > 0 { + if v.IfMatch != nil { locationName := "If-Match" encoder.SetHeader(locationName).String(*v.IfMatch) } @@ -3331,7 +3897,7 @@ func awsRestxml_serializeOpHttpBindingsGetObjectInput(v *GetObjectInput, encoder encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.IfModifiedSince)) } - if v.IfNoneMatch != nil && len(*v.IfNoneMatch) > 0 { + if v.IfNoneMatch != nil { locationName := "If-None-Match" encoder.SetHeader(locationName).String(*v.IfNoneMatch) } @@ -3354,7 +3920,7 @@ func awsRestxml_serializeOpHttpBindingsGetObjectInput(v *GetObjectInput, encoder encoder.SetQuery("partNumber").Integer(*v.PartNumber) } - if v.Range != nil && len(*v.Range) > 0 { + if v.Range != nil { locationName := "Range" encoder.SetHeader(locationName).String(*v.Range) } @@ -3388,17 +3954,17 @@ func awsRestxml_serializeOpHttpBindingsGetObjectInput(v *GetObjectInput, encoder encoder.SetQuery("response-expires").String(smithytime.FormatHTTPDate(*v.ResponseExpires)) } - if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + if v.SSECustomerAlgorithm != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) } - if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + if v.SSECustomerKey != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key" encoder.SetHeader(locationName).String(*v.SSECustomerKey) } - if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + if v.SSECustomerKeyMD5 != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) } @@ -3420,6 +3986,10 @@ func (*awsRestxml_serializeOpGetObjectAcl) ID() string { func (m *awsRestxml_serializeOpGetObjectAcl) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -3456,6 +4026,8 @@ func (m *awsRestxml_serializeOpGetObjectAcl) HandleSerialize(ctx context.Context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetObjectAclInput(v *GetObjectAclInput, encoder *httpbinding.Encoder) error { @@ -3463,7 +4035,7 @@ func awsRestxml_serializeOpHttpBindingsGetObjectAclInput(v *GetObjectAclInput, e return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -3499,6 +4071,10 @@ func (*awsRestxml_serializeOpGetObjectAttributes) ID() string { func (m *awsRestxml_serializeOpGetObjectAttributes) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -3535,6 +4111,8 @@ func (m *awsRestxml_serializeOpGetObjectAttributes) HandleSerialize(ctx context. } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetObjectAttributesInput(v *GetObjectAttributesInput, encoder *httpbinding.Encoder) error { @@ -3542,7 +4120,7 @@ func awsRestxml_serializeOpHttpBindingsGetObjectAttributesInput(v *GetObjectAttr return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -3563,6 +4141,9 @@ func awsRestxml_serializeOpHttpBindingsGetObjectAttributesInput(v *GetObjectAttr if v.ObjectAttributes != nil { locationName := "X-Amz-Object-Attributes" + if len(v.ObjectAttributes) == 0 { + encoder.AddHeader(locationName).String("") + } for i := range v.ObjectAttributes { if len(v.ObjectAttributes[i]) > 0 { escaped := string(v.ObjectAttributes[i]) @@ -3575,7 +4156,7 @@ func awsRestxml_serializeOpHttpBindingsGetObjectAttributesInput(v *GetObjectAttr } } - if v.PartNumberMarker != nil && len(*v.PartNumberMarker) > 0 { + if v.PartNumberMarker != nil { locationName := "X-Amz-Part-Number-Marker" encoder.SetHeader(locationName).String(*v.PartNumberMarker) } @@ -3585,17 +4166,17 @@ func awsRestxml_serializeOpHttpBindingsGetObjectAttributesInput(v *GetObjectAttr encoder.SetHeader(locationName).String(string(v.RequestPayer)) } - if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + if v.SSECustomerAlgorithm != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) } - if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + if v.SSECustomerKey != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key" encoder.SetHeader(locationName).String(*v.SSECustomerKey) } - if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + if v.SSECustomerKeyMD5 != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) } @@ -3617,6 +4198,10 @@ func (*awsRestxml_serializeOpGetObjectLegalHold) ID() string { func (m *awsRestxml_serializeOpGetObjectLegalHold) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -3653,6 +4238,8 @@ func (m *awsRestxml_serializeOpGetObjectLegalHold) HandleSerialize(ctx context.C } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetObjectLegalHoldInput(v *GetObjectLegalHoldInput, encoder *httpbinding.Encoder) error { @@ -3660,7 +4247,7 @@ func awsRestxml_serializeOpHttpBindingsGetObjectLegalHoldInput(v *GetObjectLegal return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -3696,6 +4283,10 @@ func (*awsRestxml_serializeOpGetObjectLockConfiguration) ID() string { func (m *awsRestxml_serializeOpGetObjectLockConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -3732,6 +4323,8 @@ func (m *awsRestxml_serializeOpGetObjectLockConfiguration) HandleSerialize(ctx c } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetObjectLockConfigurationInput(v *GetObjectLockConfigurationInput, encoder *httpbinding.Encoder) error { @@ -3739,7 +4332,7 @@ func awsRestxml_serializeOpHttpBindingsGetObjectLockConfigurationInput(v *GetObj return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -3757,6 +4350,10 @@ func (*awsRestxml_serializeOpGetObjectRetention) ID() string { func (m *awsRestxml_serializeOpGetObjectRetention) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -3793,6 +4390,8 @@ func (m *awsRestxml_serializeOpGetObjectRetention) HandleSerialize(ctx context.C } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetObjectRetentionInput(v *GetObjectRetentionInput, encoder *httpbinding.Encoder) error { @@ -3800,7 +4399,7 @@ func awsRestxml_serializeOpHttpBindingsGetObjectRetentionInput(v *GetObjectReten return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -3836,6 +4435,10 @@ func (*awsRestxml_serializeOpGetObjectTagging) ID() string { func (m *awsRestxml_serializeOpGetObjectTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -3872,6 +4475,8 @@ func (m *awsRestxml_serializeOpGetObjectTagging) HandleSerialize(ctx context.Con } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetObjectTaggingInput(v *GetObjectTaggingInput, encoder *httpbinding.Encoder) error { @@ -3879,7 +4484,7 @@ func awsRestxml_serializeOpHttpBindingsGetObjectTaggingInput(v *GetObjectTagging return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -3915,6 +4520,10 @@ func (*awsRestxml_serializeOpGetObjectTorrent) ID() string { func (m *awsRestxml_serializeOpGetObjectTorrent) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -3951,6 +4560,8 @@ func (m *awsRestxml_serializeOpGetObjectTorrent) HandleSerialize(ctx context.Con } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetObjectTorrentInput(v *GetObjectTorrentInput, encoder *httpbinding.Encoder) error { @@ -3958,7 +4569,7 @@ func awsRestxml_serializeOpHttpBindingsGetObjectTorrentInput(v *GetObjectTorrent return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -3990,6 +4601,10 @@ func (*awsRestxml_serializeOpGetPublicAccessBlock) ID() string { func (m *awsRestxml_serializeOpGetPublicAccessBlock) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -4026,6 +4641,8 @@ func (m *awsRestxml_serializeOpGetPublicAccessBlock) HandleSerialize(ctx context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsGetPublicAccessBlockInput(v *GetPublicAccessBlockInput, encoder *httpbinding.Encoder) error { @@ -4033,7 +4650,7 @@ func awsRestxml_serializeOpHttpBindingsGetPublicAccessBlockInput(v *GetPublicAcc return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -4051,6 +4668,10 @@ func (*awsRestxml_serializeOpHeadBucket) ID() string { func (m *awsRestxml_serializeOpHeadBucket) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -4087,6 +4708,8 @@ func (m *awsRestxml_serializeOpHeadBucket) HandleSerialize(ctx context.Context, } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsHeadBucketInput(v *HeadBucketInput, encoder *httpbinding.Encoder) error { @@ -4094,7 +4717,7 @@ func awsRestxml_serializeOpHttpBindingsHeadBucketInput(v *HeadBucketInput, encod return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -4112,6 +4735,10 @@ func (*awsRestxml_serializeOpHeadObject) ID() string { func (m *awsRestxml_serializeOpHeadObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -4148,6 +4775,8 @@ func (m *awsRestxml_serializeOpHeadObject) HandleSerialize(ctx context.Context, } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsHeadObjectInput(v *HeadObjectInput, encoder *httpbinding.Encoder) error { @@ -4160,12 +4789,12 @@ func awsRestxml_serializeOpHttpBindingsHeadObjectInput(v *HeadObjectInput, encod encoder.SetHeader(locationName).String(string(v.ChecksumMode)) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } - if v.IfMatch != nil && len(*v.IfMatch) > 0 { + if v.IfMatch != nil { locationName := "If-Match" encoder.SetHeader(locationName).String(*v.IfMatch) } @@ -4175,7 +4804,7 @@ func awsRestxml_serializeOpHttpBindingsHeadObjectInput(v *HeadObjectInput, encod encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.IfModifiedSince)) } - if v.IfNoneMatch != nil && len(*v.IfNoneMatch) > 0 { + if v.IfNoneMatch != nil { locationName := "If-None-Match" encoder.SetHeader(locationName).String(*v.IfNoneMatch) } @@ -4198,7 +4827,7 @@ func awsRestxml_serializeOpHttpBindingsHeadObjectInput(v *HeadObjectInput, encod encoder.SetQuery("partNumber").Integer(*v.PartNumber) } - if v.Range != nil && len(*v.Range) > 0 { + if v.Range != nil { locationName := "Range" encoder.SetHeader(locationName).String(*v.Range) } @@ -4208,17 +4837,41 @@ func awsRestxml_serializeOpHttpBindingsHeadObjectInput(v *HeadObjectInput, encod encoder.SetHeader(locationName).String(string(v.RequestPayer)) } - if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + if v.ResponseCacheControl != nil { + encoder.SetQuery("response-cache-control").String(*v.ResponseCacheControl) + } + + if v.ResponseContentDisposition != nil { + encoder.SetQuery("response-content-disposition").String(*v.ResponseContentDisposition) + } + + if v.ResponseContentEncoding != nil { + encoder.SetQuery("response-content-encoding").String(*v.ResponseContentEncoding) + } + + if v.ResponseContentLanguage != nil { + encoder.SetQuery("response-content-language").String(*v.ResponseContentLanguage) + } + + if v.ResponseContentType != nil { + encoder.SetQuery("response-content-type").String(*v.ResponseContentType) + } + + if v.ResponseExpires != nil { + encoder.SetQuery("response-expires").String(smithytime.FormatHTTPDate(*v.ResponseExpires)) + } + + if v.SSECustomerAlgorithm != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) } - if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + if v.SSECustomerKey != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key" encoder.SetHeader(locationName).String(*v.SSECustomerKey) } - if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + if v.SSECustomerKeyMD5 != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) } @@ -4240,6 +4893,10 @@ func (*awsRestxml_serializeOpListBucketAnalyticsConfigurations) ID() string { func (m *awsRestxml_serializeOpListBucketAnalyticsConfigurations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -4276,6 +4933,8 @@ func (m *awsRestxml_serializeOpListBucketAnalyticsConfigurations) HandleSerializ } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsListBucketAnalyticsConfigurationsInput(v *ListBucketAnalyticsConfigurationsInput, encoder *httpbinding.Encoder) error { @@ -4287,7 +4946,7 @@ func awsRestxml_serializeOpHttpBindingsListBucketAnalyticsConfigurationsInput(v encoder.SetQuery("continuation-token").String(*v.ContinuationToken) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -4305,6 +4964,10 @@ func (*awsRestxml_serializeOpListBucketIntelligentTieringConfigurations) ID() st func (m *awsRestxml_serializeOpListBucketIntelligentTieringConfigurations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -4341,6 +5004,8 @@ func (m *awsRestxml_serializeOpListBucketIntelligentTieringConfigurations) Handl } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsListBucketIntelligentTieringConfigurationsInput(v *ListBucketIntelligentTieringConfigurationsInput, encoder *httpbinding.Encoder) error { @@ -4365,6 +5030,10 @@ func (*awsRestxml_serializeOpListBucketInventoryConfigurations) ID() string { func (m *awsRestxml_serializeOpListBucketInventoryConfigurations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -4401,6 +5070,8 @@ func (m *awsRestxml_serializeOpListBucketInventoryConfigurations) HandleSerializ } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsListBucketInventoryConfigurationsInput(v *ListBucketInventoryConfigurationsInput, encoder *httpbinding.Encoder) error { @@ -4412,7 +5083,7 @@ func awsRestxml_serializeOpHttpBindingsListBucketInventoryConfigurationsInput(v encoder.SetQuery("continuation-token").String(*v.ContinuationToken) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -4430,6 +5101,10 @@ func (*awsRestxml_serializeOpListBucketMetricsConfigurations) ID() string { func (m *awsRestxml_serializeOpListBucketMetricsConfigurations) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -4466,6 +5141,8 @@ func (m *awsRestxml_serializeOpListBucketMetricsConfigurations) HandleSerialize( } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsListBucketMetricsConfigurationsInput(v *ListBucketMetricsConfigurationsInput, encoder *httpbinding.Encoder) error { @@ -4477,7 +5154,7 @@ func awsRestxml_serializeOpHttpBindingsListBucketMetricsConfigurationsInput(v *L encoder.SetQuery("continuation-token").String(*v.ContinuationToken) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -4495,6 +5172,10 @@ func (*awsRestxml_serializeOpListBuckets) ID() string { func (m *awsRestxml_serializeOpListBuckets) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -4522,11 +5203,17 @@ func (m *awsRestxml_serializeOpListBuckets) HandleSerialize(ctx context.Context, return out, metadata, &smithy.SerializationError{Err: err} } + if err := awsRestxml_serializeOpHttpBindingsListBucketsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + if request.Request, err = restEncoder.Encode(request.Request); err != nil { return out, metadata, &smithy.SerializationError{Err: err} } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsListBucketsInput(v *ListBucketsInput, encoder *httpbinding.Encoder) error { @@ -4534,6 +5221,22 @@ func awsRestxml_serializeOpHttpBindingsListBucketsInput(v *ListBucketsInput, enc return fmt.Errorf("unsupported serialization of nil %T", v) } + if v.BucketRegion != nil { + encoder.SetQuery("bucket-region").String(*v.BucketRegion) + } + + if v.ContinuationToken != nil { + encoder.SetQuery("continuation-token").String(*v.ContinuationToken) + } + + if v.MaxBuckets != nil { + encoder.SetQuery("max-buckets").Integer(*v.MaxBuckets) + } + + if v.Prefix != nil { + encoder.SetQuery("prefix").String(*v.Prefix) + } + return nil } @@ -4547,6 +5250,10 @@ func (*awsRestxml_serializeOpListDirectoryBuckets) ID() string { func (m *awsRestxml_serializeOpListDirectoryBuckets) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -4583,6 +5290,8 @@ func (m *awsRestxml_serializeOpListDirectoryBuckets) HandleSerialize(ctx context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsListDirectoryBucketsInput(v *ListDirectoryBucketsInput, encoder *httpbinding.Encoder) error { @@ -4611,6 +5320,10 @@ func (*awsRestxml_serializeOpListMultipartUploads) ID() string { func (m *awsRestxml_serializeOpListMultipartUploads) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -4647,6 +5360,8 @@ func (m *awsRestxml_serializeOpListMultipartUploads) HandleSerialize(ctx context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsListMultipartUploadsInput(v *ListMultipartUploadsInput, encoder *httpbinding.Encoder) error { @@ -4662,7 +5377,7 @@ func awsRestxml_serializeOpHttpBindingsListMultipartUploadsInput(v *ListMultipar encoder.SetQuery("encoding-type").String(string(v.EncodingType)) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -4701,6 +5416,10 @@ func (*awsRestxml_serializeOpListObjects) ID() string { func (m *awsRestxml_serializeOpListObjects) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -4737,6 +5456,8 @@ func (m *awsRestxml_serializeOpListObjects) HandleSerialize(ctx context.Context, } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsListObjectsInput(v *ListObjectsInput, encoder *httpbinding.Encoder) error { @@ -4752,7 +5473,7 @@ func awsRestxml_serializeOpHttpBindingsListObjectsInput(v *ListObjectsInput, enc encoder.SetQuery("encoding-type").String(string(v.EncodingType)) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -4767,6 +5488,9 @@ func awsRestxml_serializeOpHttpBindingsListObjectsInput(v *ListObjectsInput, enc if v.OptionalObjectAttributes != nil { locationName := "X-Amz-Optional-Object-Attributes" + if len(v.OptionalObjectAttributes) == 0 { + encoder.AddHeader(locationName).String("") + } for i := range v.OptionalObjectAttributes { if len(v.OptionalObjectAttributes[i]) > 0 { escaped := string(v.OptionalObjectAttributes[i]) @@ -4801,6 +5525,10 @@ func (*awsRestxml_serializeOpListObjectsV2) ID() string { func (m *awsRestxml_serializeOpListObjectsV2) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -4837,6 +5565,8 @@ func (m *awsRestxml_serializeOpListObjectsV2) HandleSerialize(ctx context.Contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsListObjectsV2Input(v *ListObjectsV2Input, encoder *httpbinding.Encoder) error { @@ -4856,7 +5586,7 @@ func awsRestxml_serializeOpHttpBindingsListObjectsV2Input(v *ListObjectsV2Input, encoder.SetQuery("encoding-type").String(string(v.EncodingType)) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -4871,6 +5601,9 @@ func awsRestxml_serializeOpHttpBindingsListObjectsV2Input(v *ListObjectsV2Input, if v.OptionalObjectAttributes != nil { locationName := "X-Amz-Optional-Object-Attributes" + if len(v.OptionalObjectAttributes) == 0 { + encoder.AddHeader(locationName).String("") + } for i := range v.OptionalObjectAttributes { if len(v.OptionalObjectAttributes[i]) > 0 { escaped := string(v.OptionalObjectAttributes[i]) @@ -4909,6 +5642,10 @@ func (*awsRestxml_serializeOpListObjectVersions) ID() string { func (m *awsRestxml_serializeOpListObjectVersions) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -4945,6 +5682,8 @@ func (m *awsRestxml_serializeOpListObjectVersions) HandleSerialize(ctx context.C } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsListObjectVersionsInput(v *ListObjectVersionsInput, encoder *httpbinding.Encoder) error { @@ -4960,7 +5699,7 @@ func awsRestxml_serializeOpHttpBindingsListObjectVersionsInput(v *ListObjectVers encoder.SetQuery("encoding-type").String(string(v.EncodingType)) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -4975,6 +5714,9 @@ func awsRestxml_serializeOpHttpBindingsListObjectVersionsInput(v *ListObjectVers if v.OptionalObjectAttributes != nil { locationName := "X-Amz-Optional-Object-Attributes" + if len(v.OptionalObjectAttributes) == 0 { + encoder.AddHeader(locationName).String("") + } for i := range v.OptionalObjectAttributes { if len(v.OptionalObjectAttributes[i]) > 0 { escaped := string(v.OptionalObjectAttributes[i]) @@ -5013,6 +5755,10 @@ func (*awsRestxml_serializeOpListParts) ID() string { func (m *awsRestxml_serializeOpListParts) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -5049,6 +5795,8 @@ func (m *awsRestxml_serializeOpListParts) HandleSerialize(ctx context.Context, i } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsListPartsInput(v *ListPartsInput, encoder *httpbinding.Encoder) error { @@ -5056,7 +5804,7 @@ func awsRestxml_serializeOpHttpBindingsListPartsInput(v *ListPartsInput, encoder return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -5083,17 +5831,17 @@ func awsRestxml_serializeOpHttpBindingsListPartsInput(v *ListPartsInput, encoder encoder.SetHeader(locationName).String(string(v.RequestPayer)) } - if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + if v.SSECustomerAlgorithm != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) } - if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + if v.SSECustomerKey != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key" encoder.SetHeader(locationName).String(*v.SSECustomerKey) } - if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + if v.SSECustomerKeyMD5 != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) } @@ -5115,6 +5863,10 @@ func (*awsRestxml_serializeOpPutBucketAccelerateConfiguration) ID() string { func (m *awsRestxml_serializeOpPutBucketAccelerateConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -5175,6 +5927,8 @@ func (m *awsRestxml_serializeOpPutBucketAccelerateConfiguration) HandleSerialize } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketAccelerateConfigurationInput(v *PutBucketAccelerateConfigurationInput, encoder *httpbinding.Encoder) error { @@ -5187,7 +5941,7 @@ func awsRestxml_serializeOpHttpBindingsPutBucketAccelerateConfigurationInput(v * encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -5205,6 +5959,10 @@ func (*awsRestxml_serializeOpPutBucketAcl) ID() string { func (m *awsRestxml_serializeOpPutBucketAcl) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -5265,6 +6023,8 @@ func (m *awsRestxml_serializeOpPutBucketAcl) HandleSerialize(ctx context.Context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketAclInput(v *PutBucketAclInput, encoder *httpbinding.Encoder) error { @@ -5282,37 +6042,37 @@ func awsRestxml_serializeOpHttpBindingsPutBucketAclInput(v *PutBucketAclInput, e encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } - if v.GrantFullControl != nil && len(*v.GrantFullControl) > 0 { + if v.GrantFullControl != nil { locationName := "X-Amz-Grant-Full-Control" encoder.SetHeader(locationName).String(*v.GrantFullControl) } - if v.GrantRead != nil && len(*v.GrantRead) > 0 { + if v.GrantRead != nil { locationName := "X-Amz-Grant-Read" encoder.SetHeader(locationName).String(*v.GrantRead) } - if v.GrantReadACP != nil && len(*v.GrantReadACP) > 0 { + if v.GrantReadACP != nil { locationName := "X-Amz-Grant-Read-Acp" encoder.SetHeader(locationName).String(*v.GrantReadACP) } - if v.GrantWrite != nil && len(*v.GrantWrite) > 0 { + if v.GrantWrite != nil { locationName := "X-Amz-Grant-Write" encoder.SetHeader(locationName).String(*v.GrantWrite) } - if v.GrantWriteACP != nil && len(*v.GrantWriteACP) > 0 { + if v.GrantWriteACP != nil { locationName := "X-Amz-Grant-Write-Acp" encoder.SetHeader(locationName).String(*v.GrantWriteACP) } @@ -5330,6 +6090,10 @@ func (*awsRestxml_serializeOpPutBucketAnalyticsConfiguration) ID() string { func (m *awsRestxml_serializeOpPutBucketAnalyticsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -5390,6 +6154,8 @@ func (m *awsRestxml_serializeOpPutBucketAnalyticsConfiguration) HandleSerialize( } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketAnalyticsConfigurationInput(v *PutBucketAnalyticsConfigurationInput, encoder *httpbinding.Encoder) error { @@ -5397,7 +6163,7 @@ func awsRestxml_serializeOpHttpBindingsPutBucketAnalyticsConfigurationInput(v *P return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -5419,6 +6185,10 @@ func (*awsRestxml_serializeOpPutBucketCors) ID() string { func (m *awsRestxml_serializeOpPutBucketCors) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -5479,6 +6249,8 @@ func (m *awsRestxml_serializeOpPutBucketCors) HandleSerialize(ctx context.Contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketCorsInput(v *PutBucketCorsInput, encoder *httpbinding.Encoder) error { @@ -5491,12 +6263,12 @@ func awsRestxml_serializeOpHttpBindingsPutBucketCorsInput(v *PutBucketCorsInput, encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -5514,6 +6286,10 @@ func (*awsRestxml_serializeOpPutBucketEncryption) ID() string { func (m *awsRestxml_serializeOpPutBucketEncryption) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -5574,6 +6350,8 @@ func (m *awsRestxml_serializeOpPutBucketEncryption) HandleSerialize(ctx context. } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketEncryptionInput(v *PutBucketEncryptionInput, encoder *httpbinding.Encoder) error { @@ -5586,12 +6364,12 @@ func awsRestxml_serializeOpHttpBindingsPutBucketEncryptionInput(v *PutBucketEncr encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -5609,6 +6387,10 @@ func (*awsRestxml_serializeOpPutBucketIntelligentTieringConfiguration) ID() stri func (m *awsRestxml_serializeOpPutBucketIntelligentTieringConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -5669,6 +6451,8 @@ func (m *awsRestxml_serializeOpPutBucketIntelligentTieringConfiguration) HandleS } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketIntelligentTieringConfigurationInput(v *PutBucketIntelligentTieringConfigurationInput, encoder *httpbinding.Encoder) error { @@ -5693,6 +6477,10 @@ func (*awsRestxml_serializeOpPutBucketInventoryConfiguration) ID() string { func (m *awsRestxml_serializeOpPutBucketInventoryConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -5753,6 +6541,8 @@ func (m *awsRestxml_serializeOpPutBucketInventoryConfiguration) HandleSerialize( } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketInventoryConfigurationInput(v *PutBucketInventoryConfigurationInput, encoder *httpbinding.Encoder) error { @@ -5760,7 +6550,7 @@ func awsRestxml_serializeOpHttpBindingsPutBucketInventoryConfigurationInput(v *P return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -5782,6 +6572,10 @@ func (*awsRestxml_serializeOpPutBucketLifecycleConfiguration) ID() string { func (m *awsRestxml_serializeOpPutBucketLifecycleConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -5842,6 +6636,8 @@ func (m *awsRestxml_serializeOpPutBucketLifecycleConfiguration) HandleSerialize( } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketLifecycleConfigurationInput(v *PutBucketLifecycleConfigurationInput, encoder *httpbinding.Encoder) error { @@ -5854,11 +6650,16 @@ func awsRestxml_serializeOpHttpBindingsPutBucketLifecycleConfigurationInput(v *P encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } + if len(v.TransitionDefaultMinimumObjectSize) > 0 { + locationName := "X-Amz-Transition-Default-Minimum-Object-Size" + encoder.SetHeader(locationName).String(string(v.TransitionDefaultMinimumObjectSize)) + } + return nil } @@ -5872,6 +6673,10 @@ func (*awsRestxml_serializeOpPutBucketLogging) ID() string { func (m *awsRestxml_serializeOpPutBucketLogging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -5932,6 +6737,8 @@ func (m *awsRestxml_serializeOpPutBucketLogging) HandleSerialize(ctx context.Con } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketLoggingInput(v *PutBucketLoggingInput, encoder *httpbinding.Encoder) error { @@ -5944,12 +6751,12 @@ func awsRestxml_serializeOpHttpBindingsPutBucketLoggingInput(v *PutBucketLogging encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -5967,6 +6774,10 @@ func (*awsRestxml_serializeOpPutBucketMetricsConfiguration) ID() string { func (m *awsRestxml_serializeOpPutBucketMetricsConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -6027,6 +6838,8 @@ func (m *awsRestxml_serializeOpPutBucketMetricsConfiguration) HandleSerialize(ct } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketMetricsConfigurationInput(v *PutBucketMetricsConfigurationInput, encoder *httpbinding.Encoder) error { @@ -6034,7 +6847,7 @@ func awsRestxml_serializeOpHttpBindingsPutBucketMetricsConfigurationInput(v *Put return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -6056,6 +6869,10 @@ func (*awsRestxml_serializeOpPutBucketNotificationConfiguration) ID() string { func (m *awsRestxml_serializeOpPutBucketNotificationConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -6116,6 +6933,8 @@ func (m *awsRestxml_serializeOpPutBucketNotificationConfiguration) HandleSeriali } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketNotificationConfigurationInput(v *PutBucketNotificationConfigurationInput, encoder *httpbinding.Encoder) error { @@ -6123,7 +6942,7 @@ func awsRestxml_serializeOpHttpBindingsPutBucketNotificationConfigurationInput(v return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -6146,6 +6965,10 @@ func (*awsRestxml_serializeOpPutBucketOwnershipControls) ID() string { func (m *awsRestxml_serializeOpPutBucketOwnershipControls) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -6206,6 +7029,8 @@ func (m *awsRestxml_serializeOpPutBucketOwnershipControls) HandleSerialize(ctx c } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketOwnershipControlsInput(v *PutBucketOwnershipControlsInput, encoder *httpbinding.Encoder) error { @@ -6213,12 +7038,12 @@ func awsRestxml_serializeOpHttpBindingsPutBucketOwnershipControlsInput(v *PutBuc return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -6236,6 +7061,10 @@ func (*awsRestxml_serializeOpPutBucketPolicy) ID() string { func (m *awsRestxml_serializeOpPutBucketPolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -6284,6 +7113,8 @@ func (m *awsRestxml_serializeOpPutBucketPolicy) HandleSerialize(ctx context.Cont } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketPolicyInput(v *PutBucketPolicyInput, encoder *httpbinding.Encoder) error { @@ -6301,12 +7132,12 @@ func awsRestxml_serializeOpHttpBindingsPutBucketPolicyInput(v *PutBucketPolicyIn encoder.SetHeader(locationName).Boolean(*v.ConfirmRemoveSelfBucketAccess) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -6324,6 +7155,10 @@ func (*awsRestxml_serializeOpPutBucketReplication) ID() string { func (m *awsRestxml_serializeOpPutBucketReplication) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -6384,6 +7219,8 @@ func (m *awsRestxml_serializeOpPutBucketReplication) HandleSerialize(ctx context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketReplicationInput(v *PutBucketReplicationInput, encoder *httpbinding.Encoder) error { @@ -6396,17 +7233,17 @@ func awsRestxml_serializeOpHttpBindingsPutBucketReplicationInput(v *PutBucketRep encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } - if v.Token != nil && len(*v.Token) > 0 { + if v.Token != nil { locationName := "X-Amz-Bucket-Object-Lock-Token" encoder.SetHeader(locationName).String(*v.Token) } @@ -6424,6 +7261,10 @@ func (*awsRestxml_serializeOpPutBucketRequestPayment) ID() string { func (m *awsRestxml_serializeOpPutBucketRequestPayment) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -6484,6 +7325,8 @@ func (m *awsRestxml_serializeOpPutBucketRequestPayment) HandleSerialize(ctx cont } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketRequestPaymentInput(v *PutBucketRequestPaymentInput, encoder *httpbinding.Encoder) error { @@ -6496,12 +7339,12 @@ func awsRestxml_serializeOpHttpBindingsPutBucketRequestPaymentInput(v *PutBucket encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -6519,6 +7362,10 @@ func (*awsRestxml_serializeOpPutBucketTagging) ID() string { func (m *awsRestxml_serializeOpPutBucketTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -6579,6 +7426,8 @@ func (m *awsRestxml_serializeOpPutBucketTagging) HandleSerialize(ctx context.Con } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketTaggingInput(v *PutBucketTaggingInput, encoder *httpbinding.Encoder) error { @@ -6591,12 +7440,12 @@ func awsRestxml_serializeOpHttpBindingsPutBucketTaggingInput(v *PutBucketTagging encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -6614,6 +7463,10 @@ func (*awsRestxml_serializeOpPutBucketVersioning) ID() string { func (m *awsRestxml_serializeOpPutBucketVersioning) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -6674,6 +7527,8 @@ func (m *awsRestxml_serializeOpPutBucketVersioning) HandleSerialize(ctx context. } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketVersioningInput(v *PutBucketVersioningInput, encoder *httpbinding.Encoder) error { @@ -6686,17 +7541,17 @@ func awsRestxml_serializeOpHttpBindingsPutBucketVersioningInput(v *PutBucketVers encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } - if v.MFA != nil && len(*v.MFA) > 0 { + if v.MFA != nil { locationName := "X-Amz-Mfa" encoder.SetHeader(locationName).String(*v.MFA) } @@ -6714,6 +7569,10 @@ func (*awsRestxml_serializeOpPutBucketWebsite) ID() string { func (m *awsRestxml_serializeOpPutBucketWebsite) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -6774,6 +7633,8 @@ func (m *awsRestxml_serializeOpPutBucketWebsite) HandleSerialize(ctx context.Con } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutBucketWebsiteInput(v *PutBucketWebsiteInput, encoder *httpbinding.Encoder) error { @@ -6786,12 +7647,12 @@ func awsRestxml_serializeOpHttpBindingsPutBucketWebsiteInput(v *PutBucketWebsite encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -6809,6 +7670,10 @@ func (*awsRestxml_serializeOpPutObject) ID() string { func (m *awsRestxml_serializeOpPutObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -6857,6 +7722,8 @@ func (m *awsRestxml_serializeOpPutObject) HandleSerialize(ctx context.Context, i } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutObjectInput(v *PutObjectInput, encoder *httpbinding.Encoder) error { @@ -6874,7 +7741,7 @@ func awsRestxml_serializeOpHttpBindingsPutObjectInput(v *PutObjectInput, encoder encoder.SetHeader(locationName).Boolean(*v.BucketKeyEnabled) } - if v.CacheControl != nil && len(*v.CacheControl) > 0 { + if v.CacheControl != nil { locationName := "Cache-Control" encoder.SetHeader(locationName).String(*v.CacheControl) } @@ -6884,37 +7751,42 @@ func awsRestxml_serializeOpHttpBindingsPutObjectInput(v *PutObjectInput, encoder encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ChecksumCRC32 != nil && len(*v.ChecksumCRC32) > 0 { + if v.ChecksumCRC32 != nil { locationName := "X-Amz-Checksum-Crc32" encoder.SetHeader(locationName).String(*v.ChecksumCRC32) } - if v.ChecksumCRC32C != nil && len(*v.ChecksumCRC32C) > 0 { + if v.ChecksumCRC32C != nil { locationName := "X-Amz-Checksum-Crc32c" encoder.SetHeader(locationName).String(*v.ChecksumCRC32C) } - if v.ChecksumSHA1 != nil && len(*v.ChecksumSHA1) > 0 { + if v.ChecksumCRC64NVME != nil { + locationName := "X-Amz-Checksum-Crc64nvme" + encoder.SetHeader(locationName).String(*v.ChecksumCRC64NVME) + } + + if v.ChecksumSHA1 != nil { locationName := "X-Amz-Checksum-Sha1" encoder.SetHeader(locationName).String(*v.ChecksumSHA1) } - if v.ChecksumSHA256 != nil && len(*v.ChecksumSHA256) > 0 { + if v.ChecksumSHA256 != nil { locationName := "X-Amz-Checksum-Sha256" encoder.SetHeader(locationName).String(*v.ChecksumSHA256) } - if v.ContentDisposition != nil && len(*v.ContentDisposition) > 0 { + if v.ContentDisposition != nil { locationName := "Content-Disposition" encoder.SetHeader(locationName).String(*v.ContentDisposition) } - if v.ContentEncoding != nil && len(*v.ContentEncoding) > 0 { + if v.ContentEncoding != nil { locationName := "Content-Encoding" encoder.SetHeader(locationName).String(*v.ContentEncoding) } - if v.ContentLanguage != nil && len(*v.ContentLanguage) > 0 { + if v.ContentLanguage != nil { locationName := "Content-Language" encoder.SetHeader(locationName).String(*v.ContentLanguage) } @@ -6924,17 +7796,17 @@ func awsRestxml_serializeOpHttpBindingsPutObjectInput(v *PutObjectInput, encoder encoder.SetHeader(locationName).Long(*v.ContentLength) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ContentType != nil && len(*v.ContentType) > 0 { + if v.ContentType != nil { locationName := "Content-Type" encoder.SetHeader(locationName).String(*v.ContentType) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -6944,26 +7816,36 @@ func awsRestxml_serializeOpHttpBindingsPutObjectInput(v *PutObjectInput, encoder encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.Expires)) } - if v.GrantFullControl != nil && len(*v.GrantFullControl) > 0 { + if v.GrantFullControl != nil { locationName := "X-Amz-Grant-Full-Control" encoder.SetHeader(locationName).String(*v.GrantFullControl) } - if v.GrantRead != nil && len(*v.GrantRead) > 0 { + if v.GrantRead != nil { locationName := "X-Amz-Grant-Read" encoder.SetHeader(locationName).String(*v.GrantRead) } - if v.GrantReadACP != nil && len(*v.GrantReadACP) > 0 { + if v.GrantReadACP != nil { locationName := "X-Amz-Grant-Read-Acp" encoder.SetHeader(locationName).String(*v.GrantReadACP) } - if v.GrantWriteACP != nil && len(*v.GrantWriteACP) > 0 { + if v.GrantWriteACP != nil { locationName := "X-Amz-Grant-Write-Acp" encoder.SetHeader(locationName).String(*v.GrantWriteACP) } + if v.IfMatch != nil { + locationName := "If-Match" + encoder.SetHeader(locationName).String(*v.IfMatch) + } + + if v.IfNoneMatch != nil { + locationName := "If-None-Match" + encoder.SetHeader(locationName).String(*v.IfNoneMatch) + } + if v.Key == nil || len(*v.Key) == 0 { return &smithy.SerializationError{Err: fmt.Errorf("input member Key must not be empty")} } @@ -6976,9 +7858,7 @@ func awsRestxml_serializeOpHttpBindingsPutObjectInput(v *PutObjectInput, encoder if v.Metadata != nil { hv := encoder.Headers("X-Amz-Meta-") for mapKey, mapVal := range v.Metadata { - if len(mapVal) > 0 { - hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal) - } + hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal) } } @@ -7007,27 +7887,27 @@ func awsRestxml_serializeOpHttpBindingsPutObjectInput(v *PutObjectInput, encoder encoder.SetHeader(locationName).String(string(v.ServerSideEncryption)) } - if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + if v.SSECustomerAlgorithm != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) } - if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + if v.SSECustomerKey != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key" encoder.SetHeader(locationName).String(*v.SSECustomerKey) } - if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + if v.SSECustomerKeyMD5 != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) } - if v.SSEKMSEncryptionContext != nil && len(*v.SSEKMSEncryptionContext) > 0 { + if v.SSEKMSEncryptionContext != nil { locationName := "X-Amz-Server-Side-Encryption-Context" encoder.SetHeader(locationName).String(*v.SSEKMSEncryptionContext) } - if v.SSEKMSKeyId != nil && len(*v.SSEKMSKeyId) > 0 { + if v.SSEKMSKeyId != nil { locationName := "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id" encoder.SetHeader(locationName).String(*v.SSEKMSKeyId) } @@ -7037,16 +7917,21 @@ func awsRestxml_serializeOpHttpBindingsPutObjectInput(v *PutObjectInput, encoder encoder.SetHeader(locationName).String(string(v.StorageClass)) } - if v.Tagging != nil && len(*v.Tagging) > 0 { + if v.Tagging != nil { locationName := "X-Amz-Tagging" encoder.SetHeader(locationName).String(*v.Tagging) } - if v.WebsiteRedirectLocation != nil && len(*v.WebsiteRedirectLocation) > 0 { + if v.WebsiteRedirectLocation != nil { locationName := "X-Amz-Website-Redirect-Location" encoder.SetHeader(locationName).String(*v.WebsiteRedirectLocation) } + if v.WriteOffsetBytes != nil { + locationName := "X-Amz-Write-Offset-Bytes" + encoder.SetHeader(locationName).Long(*v.WriteOffsetBytes) + } + return nil } @@ -7060,6 +7945,10 @@ func (*awsRestxml_serializeOpPutObjectAcl) ID() string { func (m *awsRestxml_serializeOpPutObjectAcl) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -7120,6 +8009,8 @@ func (m *awsRestxml_serializeOpPutObjectAcl) HandleSerialize(ctx context.Context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutObjectAclInput(v *PutObjectAclInput, encoder *httpbinding.Encoder) error { @@ -7137,37 +8028,37 @@ func awsRestxml_serializeOpHttpBindingsPutObjectAclInput(v *PutObjectAclInput, e encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } - if v.GrantFullControl != nil && len(*v.GrantFullControl) > 0 { + if v.GrantFullControl != nil { locationName := "X-Amz-Grant-Full-Control" encoder.SetHeader(locationName).String(*v.GrantFullControl) } - if v.GrantRead != nil && len(*v.GrantRead) > 0 { + if v.GrantRead != nil { locationName := "X-Amz-Grant-Read" encoder.SetHeader(locationName).String(*v.GrantRead) } - if v.GrantReadACP != nil && len(*v.GrantReadACP) > 0 { + if v.GrantReadACP != nil { locationName := "X-Amz-Grant-Read-Acp" encoder.SetHeader(locationName).String(*v.GrantReadACP) } - if v.GrantWrite != nil && len(*v.GrantWrite) > 0 { + if v.GrantWrite != nil { locationName := "X-Amz-Grant-Write" encoder.SetHeader(locationName).String(*v.GrantWrite) } - if v.GrantWriteACP != nil && len(*v.GrantWriteACP) > 0 { + if v.GrantWriteACP != nil { locationName := "X-Amz-Grant-Write-Acp" encoder.SetHeader(locationName).String(*v.GrantWriteACP) } @@ -7203,6 +8094,10 @@ func (*awsRestxml_serializeOpPutObjectLegalHold) ID() string { func (m *awsRestxml_serializeOpPutObjectLegalHold) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -7263,6 +8158,8 @@ func (m *awsRestxml_serializeOpPutObjectLegalHold) HandleSerialize(ctx context.C } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutObjectLegalHoldInput(v *PutObjectLegalHoldInput, encoder *httpbinding.Encoder) error { @@ -7275,12 +8172,12 @@ func awsRestxml_serializeOpHttpBindingsPutObjectLegalHoldInput(v *PutObjectLegal encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -7316,6 +8213,10 @@ func (*awsRestxml_serializeOpPutObjectLockConfiguration) ID() string { func (m *awsRestxml_serializeOpPutObjectLockConfiguration) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -7376,6 +8277,8 @@ func (m *awsRestxml_serializeOpPutObjectLockConfiguration) HandleSerialize(ctx c } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutObjectLockConfigurationInput(v *PutObjectLockConfigurationInput, encoder *httpbinding.Encoder) error { @@ -7388,12 +8291,12 @@ func awsRestxml_serializeOpHttpBindingsPutObjectLockConfigurationInput(v *PutObj encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -7403,7 +8306,7 @@ func awsRestxml_serializeOpHttpBindingsPutObjectLockConfigurationInput(v *PutObj encoder.SetHeader(locationName).String(string(v.RequestPayer)) } - if v.Token != nil && len(*v.Token) > 0 { + if v.Token != nil { locationName := "X-Amz-Bucket-Object-Lock-Token" encoder.SetHeader(locationName).String(*v.Token) } @@ -7421,6 +8324,10 @@ func (*awsRestxml_serializeOpPutObjectRetention) ID() string { func (m *awsRestxml_serializeOpPutObjectRetention) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -7481,6 +8388,8 @@ func (m *awsRestxml_serializeOpPutObjectRetention) HandleSerialize(ctx context.C } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutObjectRetentionInput(v *PutObjectRetentionInput, encoder *httpbinding.Encoder) error { @@ -7498,12 +8407,12 @@ func awsRestxml_serializeOpHttpBindingsPutObjectRetentionInput(v *PutObjectReten encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -7539,6 +8448,10 @@ func (*awsRestxml_serializeOpPutObjectTagging) ID() string { func (m *awsRestxml_serializeOpPutObjectTagging) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -7599,6 +8512,8 @@ func (m *awsRestxml_serializeOpPutObjectTagging) HandleSerialize(ctx context.Con } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutObjectTaggingInput(v *PutObjectTaggingInput, encoder *httpbinding.Encoder) error { @@ -7611,12 +8526,12 @@ func awsRestxml_serializeOpHttpBindingsPutObjectTaggingInput(v *PutObjectTagging encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -7652,6 +8567,10 @@ func (*awsRestxml_serializeOpPutPublicAccessBlock) ID() string { func (m *awsRestxml_serializeOpPutPublicAccessBlock) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -7712,6 +8631,8 @@ func (m *awsRestxml_serializeOpPutPublicAccessBlock) HandleSerialize(ctx context } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsPutPublicAccessBlockInput(v *PutPublicAccessBlockInput, encoder *httpbinding.Encoder) error { @@ -7724,12 +8645,12 @@ func awsRestxml_serializeOpHttpBindingsPutPublicAccessBlockInput(v *PutPublicAcc encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -7747,6 +8668,10 @@ func (*awsRestxml_serializeOpRestoreObject) ID() string { func (m *awsRestxml_serializeOpRestoreObject) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -7758,7 +8683,7 @@ func (m *awsRestxml_serializeOpRestoreObject) HandleSerialize(ctx context.Contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Key+}?restore&x-id=RestoreObject") + opPath, opQuery := httpbinding.SplitURI("/{Key+}?restore") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "POST" @@ -7807,6 +8732,8 @@ func (m *awsRestxml_serializeOpRestoreObject) HandleSerialize(ctx context.Contex } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsRestoreObjectInput(v *RestoreObjectInput, encoder *httpbinding.Encoder) error { @@ -7819,7 +8746,7 @@ func awsRestxml_serializeOpHttpBindingsRestoreObjectInput(v *RestoreObjectInput, encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -7855,6 +8782,10 @@ func (*awsRestxml_serializeOpSelectObjectContent) ID() string { func (m *awsRestxml_serializeOpSelectObjectContent) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -7866,7 +8797,7 @@ func (m *awsRestxml_serializeOpSelectObjectContent) HandleSerialize(ctx context. return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Key+}?select&select-type=2&x-id=SelectObjectContent") + opPath, opQuery := httpbinding.SplitURI("/{Key+}?select&select-type=2") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "POST" @@ -7909,6 +8840,8 @@ func (m *awsRestxml_serializeOpSelectObjectContent) HandleSerialize(ctx context. } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsSelectObjectContentInput(v *SelectObjectContentInput, encoder *httpbinding.Encoder) error { @@ -7916,7 +8849,7 @@ func awsRestxml_serializeOpHttpBindingsSelectObjectContentInput(v *SelectObjectC return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -7930,17 +8863,17 @@ func awsRestxml_serializeOpHttpBindingsSelectObjectContentInput(v *SelectObjectC } } - if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + if v.SSECustomerAlgorithm != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) } - if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + if v.SSECustomerKey != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key" encoder.SetHeader(locationName).String(*v.SSECustomerKey) } - if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + if v.SSECustomerKeyMD5 != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) } @@ -8037,6 +8970,10 @@ func (*awsRestxml_serializeOpUploadPart) ID() string { func (m *awsRestxml_serializeOpUploadPart) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -8085,6 +9022,8 @@ func (m *awsRestxml_serializeOpUploadPart) HandleSerialize(ctx context.Context, } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsUploadPartInput(v *UploadPartInput, encoder *httpbinding.Encoder) error { @@ -8097,22 +9036,27 @@ func awsRestxml_serializeOpHttpBindingsUploadPartInput(v *UploadPartInput, encod encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ChecksumCRC32 != nil && len(*v.ChecksumCRC32) > 0 { + if v.ChecksumCRC32 != nil { locationName := "X-Amz-Checksum-Crc32" encoder.SetHeader(locationName).String(*v.ChecksumCRC32) } - if v.ChecksumCRC32C != nil && len(*v.ChecksumCRC32C) > 0 { + if v.ChecksumCRC32C != nil { locationName := "X-Amz-Checksum-Crc32c" encoder.SetHeader(locationName).String(*v.ChecksumCRC32C) } - if v.ChecksumSHA1 != nil && len(*v.ChecksumSHA1) > 0 { + if v.ChecksumCRC64NVME != nil { + locationName := "X-Amz-Checksum-Crc64nvme" + encoder.SetHeader(locationName).String(*v.ChecksumCRC64NVME) + } + + if v.ChecksumSHA1 != nil { locationName := "X-Amz-Checksum-Sha1" encoder.SetHeader(locationName).String(*v.ChecksumSHA1) } - if v.ChecksumSHA256 != nil && len(*v.ChecksumSHA256) > 0 { + if v.ChecksumSHA256 != nil { locationName := "X-Amz-Checksum-Sha256" encoder.SetHeader(locationName).String(*v.ChecksumSHA256) } @@ -8122,12 +9066,12 @@ func awsRestxml_serializeOpHttpBindingsUploadPartInput(v *UploadPartInput, encod encoder.SetHeader(locationName).Long(*v.ContentLength) } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { + if v.ContentMD5 != nil { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } @@ -8150,17 +9094,17 @@ func awsRestxml_serializeOpHttpBindingsUploadPartInput(v *UploadPartInput, encod encoder.SetHeader(locationName).String(string(v.RequestPayer)) } - if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + if v.SSECustomerAlgorithm != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) } - if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + if v.SSECustomerKey != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key" encoder.SetHeader(locationName).String(*v.SSECustomerKey) } - if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + if v.SSECustomerKeyMD5 != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) } @@ -8182,6 +9126,10 @@ func (*awsRestxml_serializeOpUploadPartCopy) ID() string { func (m *awsRestxml_serializeOpUploadPartCopy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -8218,6 +9166,8 @@ func (m *awsRestxml_serializeOpUploadPartCopy) HandleSerialize(ctx context.Conte } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsUploadPartCopyInput(v *UploadPartCopyInput, encoder *httpbinding.Encoder) error { @@ -8225,12 +9175,12 @@ func awsRestxml_serializeOpHttpBindingsUploadPartCopyInput(v *UploadPartCopyInpu return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.CopySource != nil && len(*v.CopySource) > 0 { + if v.CopySource != nil { locationName := "X-Amz-Copy-Source" encoder.SetHeader(locationName).String(*v.CopySource) } - if v.CopySourceIfMatch != nil && len(*v.CopySourceIfMatch) > 0 { + if v.CopySourceIfMatch != nil { locationName := "X-Amz-Copy-Source-If-Match" encoder.SetHeader(locationName).String(*v.CopySourceIfMatch) } @@ -8240,7 +9190,7 @@ func awsRestxml_serializeOpHttpBindingsUploadPartCopyInput(v *UploadPartCopyInpu encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.CopySourceIfModifiedSince)) } - if v.CopySourceIfNoneMatch != nil && len(*v.CopySourceIfNoneMatch) > 0 { + if v.CopySourceIfNoneMatch != nil { locationName := "X-Amz-Copy-Source-If-None-Match" encoder.SetHeader(locationName).String(*v.CopySourceIfNoneMatch) } @@ -8250,32 +9200,32 @@ func awsRestxml_serializeOpHttpBindingsUploadPartCopyInput(v *UploadPartCopyInpu encoder.SetHeader(locationName).String(smithytime.FormatHTTPDate(*v.CopySourceIfUnmodifiedSince)) } - if v.CopySourceRange != nil && len(*v.CopySourceRange) > 0 { + if v.CopySourceRange != nil { locationName := "X-Amz-Copy-Source-Range" encoder.SetHeader(locationName).String(*v.CopySourceRange) } - if v.CopySourceSSECustomerAlgorithm != nil && len(*v.CopySourceSSECustomerAlgorithm) > 0 { + if v.CopySourceSSECustomerAlgorithm != nil { locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm" encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerAlgorithm) } - if v.CopySourceSSECustomerKey != nil && len(*v.CopySourceSSECustomerKey) > 0 { + if v.CopySourceSSECustomerKey != nil { locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key" encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerKey) } - if v.CopySourceSSECustomerKeyMD5 != nil && len(*v.CopySourceSSECustomerKeyMD5) > 0 { + if v.CopySourceSSECustomerKeyMD5 != nil { locationName := "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5" encoder.SetHeader(locationName).String(*v.CopySourceSSECustomerKeyMD5) } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { + if v.ExpectedBucketOwner != nil { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } - if v.ExpectedSourceBucketOwner != nil && len(*v.ExpectedSourceBucketOwner) > 0 { + if v.ExpectedSourceBucketOwner != nil { locationName := "X-Amz-Source-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedSourceBucketOwner) } @@ -8298,17 +9248,17 @@ func awsRestxml_serializeOpHttpBindingsUploadPartCopyInput(v *UploadPartCopyInpu encoder.SetHeader(locationName).String(string(v.RequestPayer)) } - if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + if v.SSECustomerAlgorithm != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Algorithm" encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) } - if v.SSECustomerKey != nil && len(*v.SSECustomerKey) > 0 { + if v.SSECustomerKey != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key" encoder.SetHeader(locationName).String(*v.SSECustomerKey) } - if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + if v.SSECustomerKeyMD5 != nil { locationName := "X-Amz-Server-Side-Encryption-Customer-Key-Md5" encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) } @@ -8330,6 +9280,10 @@ func (*awsRestxml_serializeOpWriteGetObjectResponse) ID() string { func (m *awsRestxml_serializeOpWriteGetObjectResponse) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() request, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} @@ -8341,7 +9295,7 @@ func (m *awsRestxml_serializeOpWriteGetObjectResponse) HandleSerialize(ctx conte return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/WriteGetObjectResponse?x-id=WriteGetObjectResponse") + opPath, opQuery := httpbinding.SplitURI("/WriteGetObjectResponse") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "POST" @@ -8378,6 +9332,8 @@ func (m *awsRestxml_serializeOpWriteGetObjectResponse) HandleSerialize(ctx conte } in.Request = request + endTimer() + span.End() return next.HandleSerialize(ctx, in) } func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetObjectResponseInput, encoder *httpbinding.Encoder) error { @@ -8385,7 +9341,7 @@ func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetOb return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.AcceptRanges != nil && len(*v.AcceptRanges) > 0 { + if v.AcceptRanges != nil { locationName := "X-Amz-Fwd-Header-Accept-Ranges" encoder.SetHeader(locationName).String(*v.AcceptRanges) } @@ -8395,42 +9351,47 @@ func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetOb encoder.SetHeader(locationName).Boolean(*v.BucketKeyEnabled) } - if v.CacheControl != nil && len(*v.CacheControl) > 0 { + if v.CacheControl != nil { locationName := "X-Amz-Fwd-Header-Cache-Control" encoder.SetHeader(locationName).String(*v.CacheControl) } - if v.ChecksumCRC32 != nil && len(*v.ChecksumCRC32) > 0 { + if v.ChecksumCRC32 != nil { locationName := "X-Amz-Fwd-Header-X-Amz-Checksum-Crc32" encoder.SetHeader(locationName).String(*v.ChecksumCRC32) } - if v.ChecksumCRC32C != nil && len(*v.ChecksumCRC32C) > 0 { + if v.ChecksumCRC32C != nil { locationName := "X-Amz-Fwd-Header-X-Amz-Checksum-Crc32c" encoder.SetHeader(locationName).String(*v.ChecksumCRC32C) } - if v.ChecksumSHA1 != nil && len(*v.ChecksumSHA1) > 0 { + if v.ChecksumCRC64NVME != nil { + locationName := "X-Amz-Fwd-Header-X-Amz-Checksum-Crc64nvme" + encoder.SetHeader(locationName).String(*v.ChecksumCRC64NVME) + } + + if v.ChecksumSHA1 != nil { locationName := "X-Amz-Fwd-Header-X-Amz-Checksum-Sha1" encoder.SetHeader(locationName).String(*v.ChecksumSHA1) } - if v.ChecksumSHA256 != nil && len(*v.ChecksumSHA256) > 0 { + if v.ChecksumSHA256 != nil { locationName := "X-Amz-Fwd-Header-X-Amz-Checksum-Sha256" encoder.SetHeader(locationName).String(*v.ChecksumSHA256) } - if v.ContentDisposition != nil && len(*v.ContentDisposition) > 0 { + if v.ContentDisposition != nil { locationName := "X-Amz-Fwd-Header-Content-Disposition" encoder.SetHeader(locationName).String(*v.ContentDisposition) } - if v.ContentEncoding != nil && len(*v.ContentEncoding) > 0 { + if v.ContentEncoding != nil { locationName := "X-Amz-Fwd-Header-Content-Encoding" encoder.SetHeader(locationName).String(*v.ContentEncoding) } - if v.ContentLanguage != nil && len(*v.ContentLanguage) > 0 { + if v.ContentLanguage != nil { locationName := "X-Amz-Fwd-Header-Content-Language" encoder.SetHeader(locationName).String(*v.ContentLanguage) } @@ -8440,12 +9401,12 @@ func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetOb encoder.SetHeader(locationName).Long(*v.ContentLength) } - if v.ContentRange != nil && len(*v.ContentRange) > 0 { + if v.ContentRange != nil { locationName := "X-Amz-Fwd-Header-Content-Range" encoder.SetHeader(locationName).String(*v.ContentRange) } - if v.ContentType != nil && len(*v.ContentType) > 0 { + if v.ContentType != nil { locationName := "X-Amz-Fwd-Header-Content-Type" encoder.SetHeader(locationName).String(*v.ContentType) } @@ -8455,22 +9416,22 @@ func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetOb encoder.SetHeader(locationName).Boolean(*v.DeleteMarker) } - if v.ErrorCode != nil && len(*v.ErrorCode) > 0 { + if v.ErrorCode != nil { locationName := "X-Amz-Fwd-Error-Code" encoder.SetHeader(locationName).String(*v.ErrorCode) } - if v.ErrorMessage != nil && len(*v.ErrorMessage) > 0 { + if v.ErrorMessage != nil { locationName := "X-Amz-Fwd-Error-Message" encoder.SetHeader(locationName).String(*v.ErrorMessage) } - if v.ETag != nil && len(*v.ETag) > 0 { + if v.ETag != nil { locationName := "X-Amz-Fwd-Header-Etag" encoder.SetHeader(locationName).String(*v.ETag) } - if v.Expiration != nil && len(*v.Expiration) > 0 { + if v.Expiration != nil { locationName := "X-Amz-Fwd-Header-X-Amz-Expiration" encoder.SetHeader(locationName).String(*v.Expiration) } @@ -8488,9 +9449,7 @@ func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetOb if v.Metadata != nil { hv := encoder.Headers("X-Amz-Meta-") for mapKey, mapVal := range v.Metadata { - if len(mapVal) > 0 { - hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal) - } + hv.SetHeader(http.CanonicalHeaderKey(mapKey)).String(mapVal) } } @@ -8529,17 +9488,17 @@ func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetOb encoder.SetHeader(locationName).String(string(v.RequestCharged)) } - if v.RequestRoute != nil && len(*v.RequestRoute) > 0 { + if v.RequestRoute != nil { locationName := "X-Amz-Request-Route" encoder.SetHeader(locationName).String(*v.RequestRoute) } - if v.RequestToken != nil && len(*v.RequestToken) > 0 { + if v.RequestToken != nil { locationName := "X-Amz-Request-Token" encoder.SetHeader(locationName).String(*v.RequestToken) } - if v.Restore != nil && len(*v.Restore) > 0 { + if v.Restore != nil { locationName := "X-Amz-Fwd-Header-X-Amz-Restore" encoder.SetHeader(locationName).String(*v.Restore) } @@ -8549,17 +9508,17 @@ func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetOb encoder.SetHeader(locationName).String(string(v.ServerSideEncryption)) } - if v.SSECustomerAlgorithm != nil && len(*v.SSECustomerAlgorithm) > 0 { + if v.SSECustomerAlgorithm != nil { locationName := "X-Amz-Fwd-Header-X-Amz-Server-Side-Encryption-Customer-Algorithm" encoder.SetHeader(locationName).String(*v.SSECustomerAlgorithm) } - if v.SSECustomerKeyMD5 != nil && len(*v.SSECustomerKeyMD5) > 0 { + if v.SSECustomerKeyMD5 != nil { locationName := "X-Amz-Fwd-Header-X-Amz-Server-Side-Encryption-Customer-Key-Md5" encoder.SetHeader(locationName).String(*v.SSECustomerKeyMD5) } - if v.SSEKMSKeyId != nil && len(*v.SSEKMSKeyId) > 0 { + if v.SSEKMSKeyId != nil { locationName := "X-Amz-Fwd-Header-X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id" encoder.SetHeader(locationName).String(*v.SSEKMSKeyId) } @@ -8579,7 +9538,7 @@ func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetOb encoder.SetHeader(locationName).Integer(*v.TagCount) } - if v.VersionId != nil && len(*v.VersionId) > 0 { + if v.VersionId != nil { locationName := "X-Amz-Fwd-Header-X-Amz-Version-Id" encoder.SetHeader(locationName).String(*v.VersionId) } @@ -8995,6 +9954,17 @@ func awsRestxml_serializeDocumentCompletedPart(v *types.CompletedPart, value smi el := value.MemberElement(root) el.String(*v.ChecksumCRC32C) } + if v.ChecksumCRC64NVME != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ChecksumCRC64NVME", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.ChecksumCRC64NVME) + } if v.ChecksumSHA1 != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ @@ -10587,71 +11557,66 @@ func awsRestxml_serializeDocumentLifecycleRuleAndOperator(v *types.LifecycleRule return nil } -func awsRestxml_serializeDocumentLifecycleRuleFilter(v types.LifecycleRuleFilter, value smithyxml.Value) error { +func awsRestxml_serializeDocumentLifecycleRuleFilter(v *types.LifecycleRuleFilter, value smithyxml.Value) error { defer value.Close() - switch uv := v.(type) { - case *types.LifecycleRuleFilterMemberAnd: - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ + if v.And != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ Name: smithyxml.Name{ Local: "And", }, - Attr: customMemberNameAttr, + Attr: rootAttr, } - av := value.MemberElement(customMemberName) - if err := awsRestxml_serializeDocumentLifecycleRuleAndOperator(&uv.Value, av); err != nil { + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentLifecycleRuleAndOperator(v.And, el); err != nil { return err } - - case *types.LifecycleRuleFilterMemberObjectSizeGreaterThan: - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ + } + if v.ObjectSizeGreaterThan != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ Name: smithyxml.Name{ Local: "ObjectSizeGreaterThan", }, - Attr: customMemberNameAttr, + Attr: rootAttr, } - av := value.MemberElement(customMemberName) - av.Long(uv.Value) - - case *types.LifecycleRuleFilterMemberObjectSizeLessThan: - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ + el := value.MemberElement(root) + el.Long(*v.ObjectSizeGreaterThan) + } + if v.ObjectSizeLessThan != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ Name: smithyxml.Name{ Local: "ObjectSizeLessThan", }, - Attr: customMemberNameAttr, + Attr: rootAttr, } - av := value.MemberElement(customMemberName) - av.Long(uv.Value) - - case *types.LifecycleRuleFilterMemberPrefix: - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ + el := value.MemberElement(root) + el.Long(*v.ObjectSizeLessThan) + } + if v.Prefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ Name: smithyxml.Name{ Local: "Prefix", }, - Attr: customMemberNameAttr, + Attr: rootAttr, } - av := value.MemberElement(customMemberName) - av.String(uv.Value) - - case *types.LifecycleRuleFilterMemberTag: - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ + el := value.MemberElement(root) + el.String(*v.Prefix) + } + if v.Tag != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ Name: smithyxml.Name{ Local: "Tag", }, - Attr: customMemberNameAttr, + Attr: rootAttr, } - av := value.MemberElement(customMemberName) - if err := awsRestxml_serializeDocumentTag(&uv.Value, av); err != nil { + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentTag(v.Tag, el); err != nil { return err } - - default: - return fmt.Errorf("attempted to serialize unknown member type %T for union %T", uv, v) - } return nil } @@ -10778,6 +11743,24 @@ func awsRestxml_serializeDocumentMetadataEntry(v *types.MetadataEntry, value smi return nil } +func awsRestxml_serializeDocumentMetadataTableConfiguration(v *types.MetadataTableConfiguration, value smithyxml.Value) error { + defer value.Close() + if v.S3TablesDestination != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "S3TablesDestination", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentS3TablesDestination(v.S3TablesDestination, el); err != nil { + return err + } + } + return nil +} + func awsRestxml_serializeDocumentMetrics(v *types.Metrics, value smithyxml.Value) error { defer value.Close() if v.EventThreshold != nil { @@ -11091,6 +12074,17 @@ func awsRestxml_serializeDocumentNotificationConfigurationFilter(v *types.Notifi func awsRestxml_serializeDocumentObjectIdentifier(v *types.ObjectIdentifier, value smithyxml.Value) error { defer value.Close() + if v.ETag != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "ETag", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.ETag) + } if v.Key != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ @@ -11102,6 +12096,28 @@ func awsRestxml_serializeDocumentObjectIdentifier(v *types.ObjectIdentifier, val el := value.MemberElement(root) el.String(*v.Key) } + if v.LastModifiedTime != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "LastModifiedTime", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(smithytime.FormatHTTPDate(*v.LastModifiedTime)) + } + if v.Size != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Size", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.Long(*v.Size) + } if v.VersionId != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ @@ -11759,49 +12775,44 @@ func awsRestxml_serializeDocumentReplicationRuleAndOperator(v *types.Replication return nil } -func awsRestxml_serializeDocumentReplicationRuleFilter(v types.ReplicationRuleFilter, value smithyxml.Value) error { +func awsRestxml_serializeDocumentReplicationRuleFilter(v *types.ReplicationRuleFilter, value smithyxml.Value) error { defer value.Close() - switch uv := v.(type) { - case *types.ReplicationRuleFilterMemberAnd: - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ + if v.And != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ Name: smithyxml.Name{ Local: "And", }, - Attr: customMemberNameAttr, + Attr: rootAttr, } - av := value.MemberElement(customMemberName) - if err := awsRestxml_serializeDocumentReplicationRuleAndOperator(&uv.Value, av); err != nil { + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentReplicationRuleAndOperator(v.And, el); err != nil { return err } - - case *types.ReplicationRuleFilterMemberPrefix: - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ + } + if v.Prefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ Name: smithyxml.Name{ Local: "Prefix", }, - Attr: customMemberNameAttr, + Attr: rootAttr, } - av := value.MemberElement(customMemberName) - av.String(uv.Value) - - case *types.ReplicationRuleFilterMemberTag: - customMemberNameAttr := []smithyxml.Attr{} - customMemberName := smithyxml.StartElement{ + el := value.MemberElement(root) + el.String(*v.Prefix) + } + if v.Tag != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ Name: smithyxml.Name{ Local: "Tag", }, - Attr: customMemberNameAttr, + Attr: rootAttr, } - av := value.MemberElement(customMemberName) - if err := awsRestxml_serializeDocumentTag(&uv.Value, av); err != nil { + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentTag(v.Tag, el); err != nil { return err } - - default: - return fmt.Errorf("attempted to serialize unknown member type %T for union %T", uv, v) - } return nil } @@ -12158,6 +13169,33 @@ func awsRestxml_serializeDocumentS3Location(v *types.S3Location, value smithyxml return nil } +func awsRestxml_serializeDocumentS3TablesDestination(v *types.S3TablesDestination, value smithyxml.Value) error { + defer value.Close() + if v.TableBucketArn != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "TableBucketArn", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.TableBucketArn) + } + if v.TableName != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "TableName", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.TableName) + } + return nil +} + func awsRestxml_serializeDocumentScanRange(v *types.ScanRange, value smithyxml.Value) error { defer value.Close() if v.End != nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go index ea3b9c82a..ac5ab0bef 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go @@ -11,6 +11,7 @@ const ( // Values returns all known values for AnalyticsS3ExportFileFormat. Note that this // can be expanded in the future, and so it is only as up to date as the client. +// // The ordering of this slice is not guaranteed to be stable across updates. func (AnalyticsS3ExportFileFormat) Values() []AnalyticsS3ExportFileFormat { return []AnalyticsS3ExportFileFormat{ @@ -27,8 +28,9 @@ const ( ) // Values returns all known values for ArchiveStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ArchiveStatus) Values() []ArchiveStatus { return []ArchiveStatus{ "ARCHIVE_ACCESS", @@ -45,8 +47,9 @@ const ( ) // Values returns all known values for BucketAccelerateStatus. Note that this can -// be expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (BucketAccelerateStatus) Values() []BucketAccelerateStatus { return []BucketAccelerateStatus{ "Enabled", @@ -65,8 +68,9 @@ const ( ) // Values returns all known values for BucketCannedACL. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (BucketCannedACL) Values() []BucketCannedACL { return []BucketCannedACL{ "private", @@ -90,17 +94,22 @@ const ( BucketLocationConstraintApSoutheast1 BucketLocationConstraint = "ap-southeast-1" BucketLocationConstraintApSoutheast2 BucketLocationConstraint = "ap-southeast-2" BucketLocationConstraintApSoutheast3 BucketLocationConstraint = "ap-southeast-3" + BucketLocationConstraintApSoutheast4 BucketLocationConstraint = "ap-southeast-4" + BucketLocationConstraintApSoutheast5 BucketLocationConstraint = "ap-southeast-5" BucketLocationConstraintCaCentral1 BucketLocationConstraint = "ca-central-1" BucketLocationConstraintCnNorth1 BucketLocationConstraint = "cn-north-1" BucketLocationConstraintCnNorthwest1 BucketLocationConstraint = "cn-northwest-1" BucketLocationConstraintEu BucketLocationConstraint = "EU" BucketLocationConstraintEuCentral1 BucketLocationConstraint = "eu-central-1" + BucketLocationConstraintEuCentral2 BucketLocationConstraint = "eu-central-2" BucketLocationConstraintEuNorth1 BucketLocationConstraint = "eu-north-1" BucketLocationConstraintEuSouth1 BucketLocationConstraint = "eu-south-1" BucketLocationConstraintEuSouth2 BucketLocationConstraint = "eu-south-2" BucketLocationConstraintEuWest1 BucketLocationConstraint = "eu-west-1" BucketLocationConstraintEuWest2 BucketLocationConstraint = "eu-west-2" BucketLocationConstraintEuWest3 BucketLocationConstraint = "eu-west-3" + BucketLocationConstraintIlCentral1 BucketLocationConstraint = "il-central-1" + BucketLocationConstraintMeCentral1 BucketLocationConstraint = "me-central-1" BucketLocationConstraintMeSouth1 BucketLocationConstraint = "me-south-1" BucketLocationConstraintSaEast1 BucketLocationConstraint = "sa-east-1" BucketLocationConstraintUsEast2 BucketLocationConstraint = "us-east-2" @@ -112,6 +121,7 @@ const ( // Values returns all known values for BucketLocationConstraint. Note that this // can be expanded in the future, and so it is only as up to date as the client. +// // The ordering of this slice is not guaranteed to be stable across updates. func (BucketLocationConstraint) Values() []BucketLocationConstraint { return []BucketLocationConstraint{ @@ -125,17 +135,22 @@ func (BucketLocationConstraint) Values() []BucketLocationConstraint { "ap-southeast-1", "ap-southeast-2", "ap-southeast-3", + "ap-southeast-4", + "ap-southeast-5", "ca-central-1", "cn-north-1", "cn-northwest-1", "EU", "eu-central-1", + "eu-central-2", "eu-north-1", "eu-south-1", "eu-south-2", "eu-west-1", "eu-west-2", "eu-west-3", + "il-central-1", + "me-central-1", "me-south-1", "sa-east-1", "us-east-2", @@ -156,8 +171,9 @@ const ( ) // Values returns all known values for BucketLogsPermission. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (BucketLogsPermission) Values() []BucketLogsPermission { return []BucketLogsPermission{ "FULL_CONTROL", @@ -174,8 +190,9 @@ const ( ) // Values returns all known values for BucketType. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (BucketType) Values() []BucketType { return []BucketType{ "Directory", @@ -191,8 +208,9 @@ const ( ) // Values returns all known values for BucketVersioningStatus. Note that this can -// be expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (BucketVersioningStatus) Values() []BucketVersioningStatus { return []BucketVersioningStatus{ "Enabled", @@ -204,21 +222,24 @@ type ChecksumAlgorithm string // Enum values for ChecksumAlgorithm const ( - ChecksumAlgorithmCrc32 ChecksumAlgorithm = "CRC32" - ChecksumAlgorithmCrc32c ChecksumAlgorithm = "CRC32C" - ChecksumAlgorithmSha1 ChecksumAlgorithm = "SHA1" - ChecksumAlgorithmSha256 ChecksumAlgorithm = "SHA256" + ChecksumAlgorithmCrc32 ChecksumAlgorithm = "CRC32" + ChecksumAlgorithmCrc32c ChecksumAlgorithm = "CRC32C" + ChecksumAlgorithmSha1 ChecksumAlgorithm = "SHA1" + ChecksumAlgorithmSha256 ChecksumAlgorithm = "SHA256" + ChecksumAlgorithmCrc64nvme ChecksumAlgorithm = "CRC64NVME" ) // Values returns all known values for ChecksumAlgorithm. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ChecksumAlgorithm) Values() []ChecksumAlgorithm { return []ChecksumAlgorithm{ "CRC32", "CRC32C", "SHA1", "SHA256", + "CRC64NVME", } } @@ -230,14 +251,34 @@ const ( ) // Values returns all known values for ChecksumMode. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ChecksumMode) Values() []ChecksumMode { return []ChecksumMode{ "ENABLED", } } +type ChecksumType string + +// Enum values for ChecksumType +const ( + ChecksumTypeComposite ChecksumType = "COMPOSITE" + ChecksumTypeFullObject ChecksumType = "FULL_OBJECT" +) + +// Values returns all known values for ChecksumType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ChecksumType) Values() []ChecksumType { + return []ChecksumType{ + "COMPOSITE", + "FULL_OBJECT", + } +} + type CompressionType string // Enum values for CompressionType @@ -248,8 +289,9 @@ const ( ) // Values returns all known values for CompressionType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (CompressionType) Values() []CompressionType { return []CompressionType{ "NONE", @@ -263,14 +305,17 @@ type DataRedundancy string // Enum values for DataRedundancy const ( DataRedundancySingleAvailabilityZone DataRedundancy = "SingleAvailabilityZone" + DataRedundancySingleLocalZone DataRedundancy = "SingleLocalZone" ) // Values returns all known values for DataRedundancy. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (DataRedundancy) Values() []DataRedundancy { return []DataRedundancy{ "SingleAvailabilityZone", + "SingleLocalZone", } } @@ -284,8 +329,9 @@ const ( // Values returns all known values for DeleteMarkerReplicationStatus. Note that // this can be expanded in the future, and so it is only as up to date as the -// client. The ordering of this slice is not guaranteed to be stable across -// updates. +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (DeleteMarkerReplicationStatus) Values() []DeleteMarkerReplicationStatus { return []DeleteMarkerReplicationStatus{ "Enabled", @@ -301,8 +347,9 @@ const ( ) // Values returns all known values for EncodingType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (EncodingType) Values() []EncodingType { return []EncodingType{ "url", @@ -343,8 +390,9 @@ const ( ) // Values returns all known values for Event. Note that this can be expanded in -// the future, and so it is only as up to date as the client. The ordering of this -// slice is not guaranteed to be stable across updates. +// the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (Event) Values() []Event { return []Event{ "s3:ReducedRedundancyLostObject", @@ -387,8 +435,9 @@ const ( // Values returns all known values for ExistingObjectReplicationStatus. Note that // this can be expanded in the future, and so it is only as up to date as the -// client. The ordering of this slice is not guaranteed to be stable across -// updates. +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ExistingObjectReplicationStatus) Values() []ExistingObjectReplicationStatus { return []ExistingObjectReplicationStatus{ "Enabled", @@ -405,8 +454,9 @@ const ( ) // Values returns all known values for ExpirationStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ExpirationStatus) Values() []ExpirationStatus { return []ExpirationStatus{ "Enabled", @@ -422,8 +472,9 @@ const ( ) // Values returns all known values for ExpressionType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ExpressionType) Values() []ExpressionType { return []ExpressionType{ "SQL", @@ -440,8 +491,9 @@ const ( ) // Values returns all known values for FileHeaderInfo. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (FileHeaderInfo) Values() []FileHeaderInfo { return []FileHeaderInfo{ "USE", @@ -459,8 +511,9 @@ const ( ) // Values returns all known values for FilterRuleName. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (FilterRuleName) Values() []FilterRuleName { return []FilterRuleName{ "prefix", @@ -478,8 +531,9 @@ const ( // Values returns all known values for IntelligentTieringAccessTier. Note that // this can be expanded in the future, and so it is only as up to date as the -// client. The ordering of this slice is not guaranteed to be stable across -// updates. +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (IntelligentTieringAccessTier) Values() []IntelligentTieringAccessTier { return []IntelligentTieringAccessTier{ "ARCHIVE_ACCESS", @@ -497,6 +551,7 @@ const ( // Values returns all known values for IntelligentTieringStatus. Note that this // can be expanded in the future, and so it is only as up to date as the client. +// // The ordering of this slice is not guaranteed to be stable across updates. func (IntelligentTieringStatus) Values() []IntelligentTieringStatus { return []IntelligentTieringStatus{ @@ -515,8 +570,9 @@ const ( ) // Values returns all known values for InventoryFormat. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (InventoryFormat) Values() []InventoryFormat { return []InventoryFormat{ "CSV", @@ -534,8 +590,9 @@ const ( ) // Values returns all known values for InventoryFrequency. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (InventoryFrequency) Values() []InventoryFrequency { return []InventoryFrequency{ "Daily", @@ -553,8 +610,9 @@ const ( // Values returns all known values for InventoryIncludedObjectVersions. Note that // this can be expanded in the future, and so it is only as up to date as the -// client. The ordering of this slice is not guaranteed to be stable across -// updates. +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (InventoryIncludedObjectVersions) Values() []InventoryIncludedObjectVersions { return []InventoryIncludedObjectVersions{ "All", @@ -584,8 +642,9 @@ const ( ) // Values returns all known values for InventoryOptionalField. Note that this can -// be expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (InventoryOptionalField) Values() []InventoryOptionalField { return []InventoryOptionalField{ "Size", @@ -615,8 +674,9 @@ const ( ) // Values returns all known values for JSONType. Note that this can be expanded in -// the future, and so it is only as up to date as the client. The ordering of this -// slice is not guaranteed to be stable across updates. +// the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (JSONType) Values() []JSONType { return []JSONType{ "DOCUMENT", @@ -629,14 +689,17 @@ type LocationType string // Enum values for LocationType const ( LocationTypeAvailabilityZone LocationType = "AvailabilityZone" + LocationTypeLocalZone LocationType = "LocalZone" ) // Values returns all known values for LocationType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (LocationType) Values() []LocationType { return []LocationType{ "AvailabilityZone", + "LocalZone", } } @@ -649,8 +712,9 @@ const ( ) // Values returns all known values for MetadataDirective. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (MetadataDirective) Values() []MetadataDirective { return []MetadataDirective{ "COPY", @@ -667,8 +731,9 @@ const ( ) // Values returns all known values for MetricsStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (MetricsStatus) Values() []MetricsStatus { return []MetricsStatus{ "Enabled", @@ -685,8 +750,9 @@ const ( ) // Values returns all known values for MFADelete. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (MFADelete) Values() []MFADelete { return []MFADelete{ "Enabled", @@ -703,8 +769,9 @@ const ( ) // Values returns all known values for MFADeleteStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (MFADeleteStatus) Values() []MFADeleteStatus { return []MFADeleteStatus{ "Enabled", @@ -724,8 +791,9 @@ const ( ) // Values returns all known values for ObjectAttributes. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ObjectAttributes) Values() []ObjectAttributes { return []ObjectAttributes{ "ETag", @@ -750,8 +818,9 @@ const ( ) // Values returns all known values for ObjectCannedACL. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ObjectCannedACL) Values() []ObjectCannedACL { return []ObjectCannedACL{ "private", @@ -772,8 +841,9 @@ const ( ) // Values returns all known values for ObjectLockEnabled. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ObjectLockEnabled) Values() []ObjectLockEnabled { return []ObjectLockEnabled{ "Enabled", @@ -790,6 +860,7 @@ const ( // Values returns all known values for ObjectLockLegalHoldStatus. Note that this // can be expanded in the future, and so it is only as up to date as the client. +// // The ordering of this slice is not guaranteed to be stable across updates. func (ObjectLockLegalHoldStatus) Values() []ObjectLockLegalHoldStatus { return []ObjectLockLegalHoldStatus{ @@ -807,8 +878,9 @@ const ( ) // Values returns all known values for ObjectLockMode. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ObjectLockMode) Values() []ObjectLockMode { return []ObjectLockMode{ "GOVERNANCE", @@ -825,8 +897,9 @@ const ( ) // Values returns all known values for ObjectLockRetentionMode. Note that this can -// be expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ObjectLockRetentionMode) Values() []ObjectLockRetentionMode { return []ObjectLockRetentionMode{ "GOVERNANCE", @@ -844,8 +917,9 @@ const ( ) // Values returns all known values for ObjectOwnership. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ObjectOwnership) Values() []ObjectOwnership { return []ObjectOwnership{ "BucketOwnerPreferred", @@ -872,8 +946,9 @@ const ( ) // Values returns all known values for ObjectStorageClass. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ObjectStorageClass) Values() []ObjectStorageClass { return []ObjectStorageClass{ "STANDARD", @@ -899,6 +974,7 @@ const ( // Values returns all known values for ObjectVersionStorageClass. Note that this // can be expanded in the future, and so it is only as up to date as the client. +// // The ordering of this slice is not guaranteed to be stable across updates. func (ObjectVersionStorageClass) Values() []ObjectVersionStorageClass { return []ObjectVersionStorageClass{ @@ -915,6 +991,7 @@ const ( // Values returns all known values for OptionalObjectAttributes. Note that this // can be expanded in the future, and so it is only as up to date as the client. +// // The ordering of this slice is not guaranteed to be stable across updates. func (OptionalObjectAttributes) Values() []OptionalObjectAttributes { return []OptionalObjectAttributes{ @@ -930,8 +1007,9 @@ const ( ) // Values returns all known values for OwnerOverride. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (OwnerOverride) Values() []OwnerOverride { return []OwnerOverride{ "Destination", @@ -947,8 +1025,9 @@ const ( ) // Values returns all known values for PartitionDateSource. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (PartitionDateSource) Values() []PartitionDateSource { return []PartitionDateSource{ "EventTime", @@ -965,8 +1044,9 @@ const ( ) // Values returns all known values for Payer. Note that this can be expanded in -// the future, and so it is only as up to date as the client. The ordering of this -// slice is not guaranteed to be stable across updates. +// the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (Payer) Values() []Payer { return []Payer{ "Requester", @@ -986,8 +1066,9 @@ const ( ) // Values returns all known values for Permission. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (Permission) Values() []Permission { return []Permission{ "FULL_CONTROL", @@ -1007,8 +1088,9 @@ const ( ) // Values returns all known values for Protocol. Note that this can be expanded in -// the future, and so it is only as up to date as the client. The ordering of this -// slice is not guaranteed to be stable across updates. +// the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (Protocol) Values() []Protocol { return []Protocol{ "http", @@ -1025,8 +1107,9 @@ const ( ) // Values returns all known values for QuoteFields. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (QuoteFields) Values() []QuoteFields { return []QuoteFields{ "ALWAYS", @@ -1044,6 +1127,7 @@ const ( // Values returns all known values for ReplicaModificationsStatus. Note that this // can be expanded in the future, and so it is only as up to date as the client. +// // The ordering of this slice is not guaranteed to be stable across updates. func (ReplicaModificationsStatus) Values() []ReplicaModificationsStatus { return []ReplicaModificationsStatus{ @@ -1061,8 +1145,9 @@ const ( ) // Values returns all known values for ReplicationRuleStatus. Note that this can -// be expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ReplicationRuleStatus) Values() []ReplicationRuleStatus { return []ReplicationRuleStatus{ "Enabled", @@ -1082,8 +1167,9 @@ const ( ) // Values returns all known values for ReplicationStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ReplicationStatus) Values() []ReplicationStatus { return []ReplicationStatus{ "COMPLETE", @@ -1103,8 +1189,9 @@ const ( ) // Values returns all known values for ReplicationTimeStatus. Note that this can -// be expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ReplicationTimeStatus) Values() []ReplicationTimeStatus { return []ReplicationTimeStatus{ "Enabled", @@ -1120,8 +1207,9 @@ const ( ) // Values returns all known values for RequestCharged. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (RequestCharged) Values() []RequestCharged { return []RequestCharged{ "requester", @@ -1136,8 +1224,9 @@ const ( ) // Values returns all known values for RequestPayer. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (RequestPayer) Values() []RequestPayer { return []RequestPayer{ "requester", @@ -1152,8 +1241,9 @@ const ( ) // Values returns all known values for RestoreRequestType. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (RestoreRequestType) Values() []RestoreRequestType { return []RestoreRequestType{ "SELECT", @@ -1170,8 +1260,9 @@ const ( ) // Values returns all known values for ServerSideEncryption. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (ServerSideEncryption) Values() []ServerSideEncryption { return []ServerSideEncryption{ "AES256", @@ -1189,8 +1280,9 @@ const ( ) // Values returns all known values for SessionMode. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (SessionMode) Values() []SessionMode { return []SessionMode{ "ReadOnly", @@ -1208,8 +1300,9 @@ const ( // Values returns all known values for SseKmsEncryptedObjectsStatus. Note that // this can be expanded in the future, and so it is only as up to date as the -// client. The ordering of this slice is not guaranteed to be stable across -// updates. +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (SseKmsEncryptedObjectsStatus) Values() []SseKmsEncryptedObjectsStatus { return []SseKmsEncryptedObjectsStatus{ "Enabled", @@ -1235,8 +1328,9 @@ const ( ) // Values returns all known values for StorageClass. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (StorageClass) Values() []StorageClass { return []StorageClass{ "STANDARD", @@ -1262,8 +1356,9 @@ const ( // Values returns all known values for StorageClassAnalysisSchemaVersion. Note // that this can be expanded in the future, and so it is only as up to date as the -// client. The ordering of this slice is not guaranteed to be stable across -// updates. +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (StorageClassAnalysisSchemaVersion) Values() []StorageClassAnalysisSchemaVersion { return []StorageClassAnalysisSchemaVersion{ "V_1", @@ -1279,8 +1374,9 @@ const ( ) // Values returns all known values for TaggingDirective. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (TaggingDirective) Values() []TaggingDirective { return []TaggingDirective{ "COPY", @@ -1298,8 +1394,9 @@ const ( ) // Values returns all known values for Tier. Note that this can be expanded in the -// future, and so it is only as up to date as the client. The ordering of this -// slice is not guaranteed to be stable across updates. +// future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (Tier) Values() []Tier { return []Tier{ "Standard", @@ -1308,6 +1405,26 @@ func (Tier) Values() []Tier { } } +type TransitionDefaultMinimumObjectSize string + +// Enum values for TransitionDefaultMinimumObjectSize +const ( + TransitionDefaultMinimumObjectSizeVariesByStorageClass TransitionDefaultMinimumObjectSize = "varies_by_storage_class" + TransitionDefaultMinimumObjectSizeAllStorageClasses128k TransitionDefaultMinimumObjectSize = "all_storage_classes_128K" +) + +// Values returns all known values for TransitionDefaultMinimumObjectSize. Note +// that this can be expanded in the future, and so it is only as up to date as the +// client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (TransitionDefaultMinimumObjectSize) Values() []TransitionDefaultMinimumObjectSize { + return []TransitionDefaultMinimumObjectSize{ + "varies_by_storage_class", + "all_storage_classes_128K", + } +} + type TransitionStorageClass string // Enum values for TransitionStorageClass @@ -1321,8 +1438,9 @@ const ( ) // Values returns all known values for TransitionStorageClass. Note that this can -// be expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (TransitionStorageClass) Values() []TransitionStorageClass { return []TransitionStorageClass{ "GLACIER", @@ -1344,8 +1462,9 @@ const ( ) // Values returns all known values for Type. Note that this can be expanded in the -// future, and so it is only as up to date as the client. The ordering of this -// slice is not guaranteed to be stable across updates. +// future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. func (Type) Values() []Type { return []Type{ "CanonicalUser", diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/errors.go index 166484f4e..1070573a5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/errors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/errors.go @@ -64,14 +64,46 @@ func (e *BucketAlreadyOwnedByYou) ErrorCode() string { } func (e *BucketAlreadyOwnedByYou) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// Object is archived and inaccessible until restored. If the object you are -// retrieving is stored in the S3 Glacier Flexible Retrieval storage class, the S3 -// Glacier Deep Archive storage class, the S3 Intelligent-Tiering Archive Access -// tier, or the S3 Intelligent-Tiering Deep Archive Access tier, before you can -// retrieve the object you must first restore a copy using RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) -// . Otherwise, this operation returns an InvalidObjectState error. For -// information about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) -// in the Amazon S3 User Guide. +// The existing object was created with a different encryption type. Subsequent +// +// write requests must include the appropriate encryption parameters in the request +// or while creating the session. +type EncryptionTypeMismatch struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *EncryptionTypeMismatch) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *EncryptionTypeMismatch) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *EncryptionTypeMismatch) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "EncryptionTypeMismatch" + } + return *e.ErrorCodeOverride +} +func (e *EncryptionTypeMismatch) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// Object is archived and inaccessible until restored. +// +// If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval +// storage class, the S3 Glacier Deep Archive storage class, the S3 +// Intelligent-Tiering Archive Access tier, or the S3 Intelligent-Tiering Deep +// Archive Access tier, before you can retrieve the object you must first restore a +// copy using [RestoreObject]. Otherwise, this operation returns an InvalidObjectState error. For +// information about restoring archived objects, see [Restoring Archived Objects]in the Amazon S3 User Guide. +// +// [RestoreObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html +// [Restoring Archived Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html type InvalidObjectState struct { Message *string @@ -100,6 +132,69 @@ func (e *InvalidObjectState) ErrorCode() string { } func (e *InvalidObjectState) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } +// You may receive this error in multiple cases. Depending on the reason for the +// error, you may receive one of the messages below: +// +// - Cannot specify both a write offset value and user-defined object metadata +// for existing objects. +// +// - Checksum Type mismatch occurred, expected checksum Type: sha1, actual +// checksum Type: crc32c. +// +// - Request body cannot be empty when 'write offset' is specified. +type InvalidRequest struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *InvalidRequest) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidRequest) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidRequest) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidRequest" + } + return *e.ErrorCodeOverride +} +func (e *InvalidRequest) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The write offset value that you specified does not match the current object +// +// size. +type InvalidWriteOffset struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *InvalidWriteOffset) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidWriteOffset) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidWriteOffset) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidWriteOffset" + } + return *e.ErrorCodeOverride +} +func (e *InvalidWriteOffset) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + // The specified bucket does not exist. type NoSuchBucket struct { Message *string @@ -256,3 +351,32 @@ func (e *ObjectNotInActiveTierError) ErrorCode() string { return *e.ErrorCodeOverride } func (e *ObjectNotInActiveTierError) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// You have attempted to add more parts than the maximum of 10000 that are +// +// allowed for this object. You can use the CopyObject operation to copy this +// object to another and then add more data to the newly copied object. +type TooManyParts struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *TooManyParts) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *TooManyParts) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *TooManyParts) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "TooManyParts" + } + return *e.ErrorCodeOverride +} +func (e *TooManyParts) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go index 4299b57cc..2d5a284a2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go @@ -9,9 +9,9 @@ import ( // Specifies the days since the initiation of an incomplete multipart upload that // Amazon S3 will wait before permanently removing all parts of the upload. For -// more information, see Aborting Incomplete Multipart Uploads Using a Bucket -// Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) -// in the Amazon S3 User Guide. +// more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]in the Amazon S3 User Guide. +// +// [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config type AbortIncompleteMultipartUpload struct { // Specifies the number of days after which Amazon S3 aborts an incomplete @@ -22,8 +22,9 @@ type AbortIncompleteMultipartUpload struct { } // Configures the transfer acceleration state for an Amazon S3 bucket. For more -// information, see Amazon S3 Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) -// in the Amazon S3 User Guide. +// information, see [Amazon S3 Transfer Acceleration]in the Amazon S3 User Guide. +// +// [Amazon S3 Transfer Acceleration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html type AccelerateConfiguration struct { // Specifies the transfer acceleration status of the bucket. @@ -47,9 +48,10 @@ type AccessControlPolicy struct { // A container for information about access control for replicas. type AccessControlTranslation struct { - // Specifies the replica ownership. For default and valid values, see PUT bucket - // replication (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) - // in the Amazon S3 API Reference. + // Specifies the replica ownership. For default and valid values, see [PUT bucket replication] in the + // Amazon S3 API Reference. + // + // [PUT bucket replication]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html // // This member is required. Owner OwnerOverride @@ -82,7 +84,7 @@ type AnalyticsConfiguration struct { // This member is required. Id *string - // Contains data related to access patterns to be collected and made available to + // Contains data related to access patterns to be collected and made available to // analyze the tradeoffs between different storage classes. // // This member is required. @@ -162,9 +164,10 @@ type AnalyticsS3BucketDestination struct { Format AnalyticsS3ExportFileFormat // The account ID that owns the destination S3 bucket. If no account ID is - // provided, the owner is not validated before exporting data. Although this value - // is optional, we strongly recommend that you set it to help prevent problems if - // the destination bucket ownership changes. + // provided, the owner is not validated before exporting data. + // + // Although this value is optional, we strongly recommend that you set it to help + // prevent problems if the destination bucket ownership changes. BucketAccountId *string // The prefix to use when exporting data. The prefix is prepended to all results. @@ -176,6 +179,11 @@ type AnalyticsS3BucketDestination struct { // In terms of implementation, a Bucket is a resource. type Bucket struct { + // BucketRegion indicates the Amazon Web Services region where the bucket is + // located. If the request contains at least one valid parameter, it is included in + // the response. + BucketRegion *string + // Date the bucket was created. This date can change when making changes to your // bucket, such as editing its bucket policy. CreationDate *time.Time @@ -187,12 +195,15 @@ type Bucket struct { } // Specifies the information about the bucket that will be created. For more -// information about directory buckets, see Directory buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html) -// in the Amazon S3 User Guide. This functionality is only supported by directory -// buckets. +// information about directory buckets, see [Directory buckets]in the Amazon S3 User Guide. +// +// This functionality is only supported by directory buckets. +// +// [Directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html type BucketInfo struct { - // The number of Availability Zone that's used for redundancy for the bucket. + // The number of Zone (Availability Zone or Local Zone) that's used for redundancy + // for the bucket. DataRedundancy DataRedundancy // The type of bucket. @@ -202,8 +213,9 @@ type BucketInfo struct { } // Specifies the lifecycle configuration for objects in an Amazon S3 bucket. For -// more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) -// in the Amazon S3 User Guide. +// more information, see [Object Lifecycle Management]in the Amazon S3 User Guide. +// +// [Object Lifecycle Management]: https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html type BucketLifecycleConfiguration struct { // A lifecycle rule for individual objects in an Amazon S3 bucket. @@ -218,8 +230,10 @@ type BucketLifecycleConfiguration struct { type BucketLoggingStatus struct { // Describes where logs are stored and the prefix that Amazon S3 assigns to all - // log object keys for a bucket. For more information, see PUT Bucket logging (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) - // in the Amazon S3 API Reference. + // log object keys for a bucket. For more information, see [PUT Bucket logging]in the Amazon S3 API + // Reference. + // + // [PUT Bucket logging]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html LoggingEnabled *LoggingEnabled noSmithyDocumentSerde @@ -228,42 +242,65 @@ type BucketLoggingStatus struct { // Contains all the possible checksum or digest values for an object. type Checksum struct { - // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32 checksum of the object. This checksum is only + // be present if the checksum was uploaded with the object. When you use an API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumCRC32 *string - // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32C checksum of the object. This checksum is only + // present if the checksum was uploaded with the object. When you use an API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumCRC32C *string - // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. When you use the API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 64-bit CRC64NVME checksum of the object. This checksum is + // present if the object was uploaded with the CRC64NVME checksum algorithm, or if + // the object was uploaded without a checksum (and Amazon S3 added the default + // checksum, CRC64NVME , to the uploaded object). For more information, see [Checking object integrity] in + // the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumCRC64NVME *string + + // The Base64 encoded, 160-bit SHA1 digest of the object. This will only be + // present if the object was uploaded with the object. When you use the API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumSHA1 *string - // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 256-bit SHA256 digest of the object. This will only be + // present if the object was uploaded with the object. When you use an API + // operation on an object that was uploaded using multipart uploads, this value may + // not be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information about + // how checksums are calculated with multipart uploads, see [Checking object integrity]in the Amazon S3 User + // Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums ChecksumSHA256 *string + // The checksum type that is used to calculate the object’s checksum value. For + // more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumType ChecksumType + noSmithyDocumentSerde } @@ -283,8 +320,10 @@ type CommonPrefix struct { // The container for the completed multipart upload details. type CompletedMultipartUpload struct { - // Array of CompletedPart data types. If you do not supply a valid Part with your - // request, the service sends back an HTTP 400 response. + // Array of CompletedPart data types. + // + // If you do not supply a valid Part with your request, the service sends back an + // HTTP 400 response. Parts []CompletedPart noSmithyDocumentSerde @@ -293,40 +332,40 @@ type CompletedMultipartUpload struct { // Details of the parts that were uploaded. type CompletedPart struct { - // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32 checksum of the part. This checksum is present + // if the multipart upload request was created with the CRC32 checksum algorithm. + // For more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32 *string - // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32C checksum of the part. This checksum is + // present if the multipart upload request was created with the CRC32C checksum + // algorithm. For more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32C *string - // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. When you use the API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 64-bit CRC64NVME checksum of the part. This checksum is + // present if the multipart upload request was created with the CRC64NVME checksum + // algorithm to the uploaded object). For more information, see [Checking object integrity]in the Amazon S3 + // User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumCRC64NVME *string + + // The Base64 encoded, 160-bit SHA1 checksum of the part. This checksum is present + // if the multipart upload request was created with the SHA1 checksum algorithm. + // For more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA1 *string - // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 256-bit SHA256 checksum of the part. This checksum is + // present if the multipart upload request was created with the SHA256 checksum + // algorithm. For more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA256 *string // Entity tag returned when the part was uploaded. @@ -334,12 +373,14 @@ type CompletedPart struct { // Part number that identifies the part. This is a positive integer between 1 and // 10,000. + // // - General purpose buckets - In CompleteMultipartUpload , when a additional // checksum (including x-amz-checksum-crc32 , x-amz-checksum-crc32c , // x-amz-checksum-sha1 , or x-amz-checksum-sha256 ) is applied to each part, the // PartNumber must start at 1 and the part numbers must be consecutive. // Otherwise, Amazon S3 generates an HTTP 400 Bad Request status code and an // InvalidPartOrder error code. + // // - Directory buckets - In CompleteMultipartUpload , the PartNumber must start // at 1 and the part numbers must be consecutive. PartNumber *int32 @@ -366,10 +407,12 @@ type Condition struct { // be /docs , which identifies all objects in the docs/ folder. Required when the // parent element Condition is specified and sibling HttpErrorCodeReturnedEquals // is not specified. If both conditions are specified, both must be true for the - // redirect to be applied. Replacement must be made for object keys containing - // special characters (such as carriage returns) when using XML requests. For more - // information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) - // . + // redirect to be applied. + // + // Replacement must be made for object keys containing special characters (such as + // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + // + // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints KeyPrefixEquals *string noSmithyDocumentSerde @@ -382,30 +425,49 @@ type ContinuationEvent struct { // Container for all response elements. type CopyObjectResult struct { - // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // The Base64 encoded, 32-bit CRC32 checksum of the object. This checksum is only + // present if the object was uploaded with the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32 *string - // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // The Base64 encoded, 32-bit CRC32C checksum of the object. This will only be + // present if the object was uploaded with the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32C *string - // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // The Base64 encoded, 64-bit CRC64NVME checksum of the object. This checksum is + // present if the object being copied was uploaded with the CRC64NVME checksum + // algorithm, or if the object was uploaded without a checksum (and Amazon S3 added + // the default checksum, CRC64NVME , to the uploaded object). For more information, + // see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumCRC64NVME *string + + // The Base64 encoded, 160-bit SHA1 digest of the object. This will only be + // present if the object was uploaded with the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA1 *string - // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. For more information, see Checking - // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // The Base64 encoded, 256-bit SHA256 digest of the object. This will only be + // present if the object was uploaded with the object. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA256 *string + // The checksum type that is used to calculate the object’s checksum value. For + // more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumType ChecksumType + // Returns the ETag of the new object. The ETag reflects only changes to the // contents of an object, not its metadata. ETag *string @@ -419,40 +481,44 @@ type CopyObjectResult struct { // Container for all response elements. type CopyPartResult struct { - // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // Base64 encoded, 32-bit CRC32 checksum of the part. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32 *string - // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // Base64 encoded, 32-bit CRC32C checksum of the part. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32C *string - // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. When you use the API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // The Base64 encoded, 64-bit CRC64NVME checksum of the part. This checksum is + // present if the multipart upload request was created with the CRC64NVME checksum + // algorithm to the uploaded object). For more information, see [Checking object integrity]in the Amazon S3 + // User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumCRC64NVME *string + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // Base64 encoded, 160-bit SHA1 checksum of the part. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA1 *string - // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies the + // Base64 encoded, 256-bit SHA256 checksum of the part. For more information, see [Checking object integrity] // in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA256 *string // Entity tag of the object. @@ -465,8 +531,9 @@ type CopyPartResult struct { } // Describes the cross-origin access configuration for objects in an Amazon S3 -// bucket. For more information, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) -// in the Amazon S3 User Guide. +// bucket. For more information, see [Enabling Cross-Origin Resource Sharing]in the Amazon S3 User Guide. +// +// [Enabling Cross-Origin Resource Sharing]: https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html type CORSConfiguration struct { // A set of origins and methods (cross-origin access that you want to allow). You @@ -515,23 +582,37 @@ type CORSRule struct { // The configuration information for the bucket. type CreateBucketConfiguration struct { - // Specifies the information about the bucket that will be created. This - // functionality is only supported by directory buckets. + // Specifies the information about the bucket that will be created. + // + // This functionality is only supported by directory buckets. Bucket *BucketInfo - // Specifies the location where the bucket will be created. For directory buckets, - // the location type is Availability Zone. This functionality is only supported by - // directory buckets. + // Specifies the location where the bucket will be created. + // + // Directory buckets - The location type is Availability Zone or Local Zone. To + // use the Local Zone location type, your account must be enabled for Dedicated + // Local Zones. Otherwise, you get an HTTP 403 Forbidden error with the error code + // AccessDenied . To learn more, see [Enable accounts for Dedicated Local Zones] in the Amazon S3 User Guide. + // + // This functionality is only supported by directory buckets. + // + // [Enable accounts for Dedicated Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/opt-in-directory-bucket-lz.html Location *LocationInfo // Specifies the Region where the bucket will be created. You might choose a // Region to optimize latency, minimize costs, or address regulatory requirements. // For example, if you reside in Europe, you will probably find it advantageous to - // create buckets in the Europe (Ireland) Region. For more information, see - // Accessing a bucket (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) - // in the Amazon S3 User Guide. If you don't specify a Region, the bucket is - // created in the US East (N. Virginia) Region (us-east-1) by default. This - // functionality is not supported for directory buckets. + // create buckets in the Europe (Ireland) Region. + // + // If you don't specify a Region, the bucket is created in the US East (N. + // Virginia) Region (us-east-1) by default. Configurations using the value EU will + // create a bucket in eu-west-1 . + // + // For a list of the valid values for all of the Amazon Web Services Regions, see [Regions and Endpoints]. + // + // This functionality is not supported for directory buckets. + // + // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region LocationConstraint BucketLocationConstraint noSmithyDocumentSerde @@ -548,7 +629,9 @@ type CSVInput struct { // A single character used to indicate that a row should be ignored when the // character is present at the start of that row. You can specify any character to - // indicate a comment line. The default character is # . Default: # + // indicate a comment line. The default character is # . + // + // Default: # Comments *string // A single character used to separate individual fields in a record. You can @@ -556,17 +639,26 @@ type CSVInput struct { FieldDelimiter *string // Describes the first line of input. Valid values are: + // // - NONE : First line is not a header. + // // - IGNORE : First line is a header, but you can't use the header values to // indicate the column in an expression. You can use column position (such as _1, // _2, …) to indicate the column ( SELECT s._1 FROM OBJECT s ). + // // - Use : First line is a header, and you can use the header value to identify a // column in an expression ( SELECT "name" FROM OBJECT ). FileHeaderInfo FileHeaderInfo // A single character used for escaping when the field delimiter is part of the // value. For example, if the value is a, b , Amazon S3 wraps this field value in - // quotation marks, as follows: " a , b " . Type: String Default: " Ancestors: CSV + // quotation marks, as follows: " a , b " . + // + // Type: String + // + // Default: " + // + // Ancestors: CSV QuoteCharacter *string // A single character used for escaping the quotation mark character inside an @@ -599,7 +691,9 @@ type CSVOutput struct { QuoteEscapeCharacter *string // Indicates whether to use quotation marks around output fields. + // // - ALWAYS : Always use quotation marks for output fields. + // // - ASNEEDED : Use quotation marks for output fields when needed. QuoteFields QuoteFields @@ -610,9 +704,11 @@ type CSVOutput struct { noSmithyDocumentSerde } -// The container element for specifying the default Object Lock retention settings -// for new objects placed in the specified bucket. +// The container element for optionally specifying the default Object Lock +// retention settings for new objects placed in the specified bucket. +// // - The DefaultRetention settings require both a mode and a period. +// // - The DefaultRetention period can be either Days or Years but you must select // one. You cannot specify Days and Years at the same time. type DefaultRetention struct { @@ -635,10 +731,12 @@ type DefaultRetention struct { // Container for the objects to delete. type Delete struct { - // The object to delete. Directory buckets - For directory buckets, an object - // that's composed entirely of whitespace characters is not supported by the - // DeleteObjects API operation. The request will receive a 400 Bad Request error - // and none of the objects in the request will be deleted. + // The object to delete. + // + // Directory buckets - For directory buckets, an object that's composed entirely + // of whitespace characters is not supported by the DeleteObjects API operation. + // The request will receive a 400 Bad Request error and none of the objects in the + // request will be deleted. // // This member is required. Objects []ObjectIdentifier @@ -656,21 +754,26 @@ type DeletedObject struct { // Indicates whether the specified object version that was permanently deleted was // (true) or was not (false) a delete marker before deletion. In a simple DELETE, // this header indicates whether (true) or not (false) the current version of the - // object is a delete marker. This functionality is not supported for directory - // buckets. + // object is a delete marker. To learn more about delete markers, see [Working with delete markers]. + // + // This functionality is not supported for directory buckets. + // + // [Working with delete markers]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeleteMarker.html DeleteMarker *bool // The version ID of the delete marker created as a result of the DELETE // operation. If you delete a specific object version, the value returned by this - // header is the version ID of the object version deleted. This functionality is - // not supported for directory buckets. + // header is the version ID of the object version deleted. + // + // This functionality is not supported for directory buckets. DeleteMarkerVersionId *string // The name of the deleted object. Key *string - // The version ID of the deleted object. This functionality is not supported for - // directory buckets. + // The version ID of the deleted object. + // + // This functionality is not supported for directory buckets. VersionId *string noSmithyDocumentSerde @@ -689,7 +792,7 @@ type DeleteMarkerEntry struct { // Date and time when the object was last modified. LastModified *time.Time - // The account that created the delete marker.> + // The account that created the delete marker. Owner *Owner // Version ID of an object. @@ -703,17 +806,20 @@ type DeleteMarkerEntry struct { // DeleteMarkerReplication element. If your Filter includes a Tag element, the // DeleteMarkerReplication Status must be set to Disabled, because Amazon S3 does // not support replicating delete markers for tag-based rules. For an example -// configuration, see Basic Rule Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config) -// . For more information about delete marker replication, see Basic Rule -// Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html) -// . If you are using an earlier version of the replication configuration, Amazon -// S3 handles replication of delete markers differently. For more information, see -// Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations) -// . +// configuration, see [Basic Rule Configuration]. +// +// For more information about delete marker replication, see [Basic Rule Configuration]. +// +// If you are using an earlier version of the replication configuration, Amazon S3 +// handles replication of delete markers differently. For more information, see [Backward Compatibility]. +// +// [Basic Rule Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html +// [Backward Compatibility]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations type DeleteMarkerReplication struct { - // Indicates whether to replicate delete markers. Indicates whether to replicate - // delete markers. + // Indicates whether to replicate delete markers. + // + // Indicates whether to replicate delete markers. Status DeleteMarkerReplicationStatus noSmithyDocumentSerde @@ -723,7 +829,7 @@ type DeleteMarkerReplication struct { // for an Amazon S3 bucket and S3 Replication Time Control (S3 RTC). type Destination struct { - // The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to store + // The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to store // the results. // // This member is required. @@ -740,29 +846,32 @@ type Destination struct { // Amazon S3 to change replica ownership to the Amazon Web Services account that // owns the destination bucket by specifying the AccessControlTranslation // property, this is the account ID of the destination bucket owner. For more - // information, see Replication Additional Configuration: Changing the Replica - // Owner (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-change-owner.html) - // in the Amazon S3 User Guide. + // information, see [Replication Additional Configuration: Changing the Replica Owner]in the Amazon S3 User Guide. + // + // [Replication Additional Configuration: Changing the Replica Owner]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-change-owner.html Account *string // A container that provides information about encryption. If // SourceSelectionCriteria is specified, you must specify this element. EncryptionConfiguration *EncryptionConfiguration - // A container specifying replication metrics-related settings enabling + // A container specifying replication metrics-related settings enabling // replication metrics and events. Metrics *Metrics - // A container specifying S3 Replication Time Control (S3 RTC), including whether + // A container specifying S3 Replication Time Control (S3 RTC), including whether // S3 RTC is enabled and the time when all objects and operations on objects must // be replicated. Must be specified together with a Metrics block. ReplicationTime *ReplicationTime - // The storage class to use when replicating objects, such as S3 Standard or + // The storage class to use when replicating objects, such as S3 Standard or // reduced redundancy. By default, Amazon S3 uses the storage class of the source - // object to create the object replica. For valid values, see the StorageClass - // element of the PUT Bucket replication (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) - // action in the Amazon S3 API Reference. + // object to create the object replica. + // + // For valid values, see the StorageClass element of the [PUT Bucket replication] action in the Amazon S3 + // API Reference. + // + // [PUT Bucket replication]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html StorageClass StorageClass noSmithyDocumentSerde @@ -784,8 +893,9 @@ type Encryption struct { // If the encryption type is aws:kms , this optional value specifies the ID of the // symmetric encryption customer managed key to use for encryption of job results. // Amazon S3 only supports symmetric encryption KMS keys. For more information, see - // Asymmetric keys in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) - // in the Amazon Web Services Key Management Service Developer Guide. + // [Asymmetric keys in KMS]in the Amazon Web Services Key Management Service Developer Guide. + // + // [Asymmetric keys in KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html KMSKeyId *string noSmithyDocumentSerde @@ -793,14 +903,21 @@ type Encryption struct { // Specifies encryption-related information for an Amazon S3 bucket that is a // destination for replicated objects. +// +// If you're specifying a customer managed KMS key, we recommend using a fully +// qualified KMS key ARN. If you use a KMS key alias instead, then KMS resolves the +// key within the requester’s account. This behavior can result in data that's +// encrypted with a KMS key that belongs to the requester, and not the bucket +// owner. type EncryptionConfiguration struct { // Specifies the ID (Key ARN or Alias ARN) of the customer managed Amazon Web // Services KMS key stored in Amazon Web Services Key Management Service (KMS) for // the destination bucket. Amazon S3 uses this key to encrypt replica objects. // Amazon S3 only supports symmetric encryption KMS keys. For more information, see - // Asymmetric keys in Amazon Web Services KMS (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) - // in the Amazon Web Services Key Management Service Developer Guide. + // [Asymmetric keys in Amazon Web Services KMS]in the Amazon Web Services Key Management Service Developer Guide. + // + // [Asymmetric keys in Amazon Web Services KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html ReplicaKmsKeyID *string noSmithyDocumentSerde @@ -819,414 +936,766 @@ type Error struct { // The error code is a string that uniquely identifies an error condition. It is // meant to be read and understood by programs that detect and handle errors by // type. The following is a list of Amazon S3 error codes. For more information, - // see Error responses (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html) - // . + // see [Error responses]. + // // - Code: AccessDenied + // // - Description: Access Denied + // // - HTTP Status Code: 403 Forbidden + // // - SOAP Fault Code Prefix: Client + // // - Code: AccountProblem + // // - Description: There is a problem with your Amazon Web Services account that // prevents the action from completing successfully. Contact Amazon Web Services // Support for further assistance. + // // - HTTP Status Code: 403 Forbidden + // // - SOAP Fault Code Prefix: Client + // // - Code: AllAccessDisabled + // // - Description: All access to this Amazon S3 resource has been disabled. // Contact Amazon Web Services Support for further assistance. + // // - HTTP Status Code: 403 Forbidden + // // - SOAP Fault Code Prefix: Client + // // - Code: AmbiguousGrantByEmailAddress + // // - Description: The email address you provided is associated with more than // one account. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: AuthorizationHeaderMalformed + // // - Description: The authorization header you provided is invalid. + // // - HTTP Status Code: 400 Bad Request + // // - HTTP Status Code: N/A + // // - Code: BadDigest + // // - Description: The Content-MD5 you specified did not match what we received. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: BucketAlreadyExists + // // - Description: The requested bucket name is not available. The bucket // namespace is shared by all users of the system. Please select a different name // and try again. + // // - HTTP Status Code: 409 Conflict + // // - SOAP Fault Code Prefix: Client + // // - Code: BucketAlreadyOwnedByYou + // // - Description: The bucket you tried to create already exists, and you own it. // Amazon S3 returns this error in all Amazon Web Services Regions except in the // North Virginia Region. For legacy compatibility, if you re-create an existing // bucket that you already own in the North Virginia Region, Amazon S3 returns 200 // OK and resets the bucket access control lists (ACLs). + // // - Code: 409 Conflict (in all Regions except the North Virginia Region) + // // - SOAP Fault Code Prefix: Client + // // - Code: BucketNotEmpty + // // - Description: The bucket you tried to delete is not empty. + // // - HTTP Status Code: 409 Conflict + // // - SOAP Fault Code Prefix: Client + // // - Code: CredentialsNotSupported + // // - Description: This request does not support credentials. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: CrossLocationLoggingProhibited + // // - Description: Cross-location logging not allowed. Buckets in one geographic // location cannot log information to a bucket in another location. + // // - HTTP Status Code: 403 Forbidden + // // - SOAP Fault Code Prefix: Client + // // - Code: EntityTooSmall + // // - Description: Your proposed upload is smaller than the minimum allowed // object size. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: EntityTooLarge + // // - Description: Your proposed upload exceeds the maximum allowed object size. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: ExpiredToken + // // - Description: The provided token has expired. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: IllegalVersioningConfigurationException + // // - Description: Indicates that the versioning configuration specified in the // request is invalid. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: IncompleteBody + // // - Description: You did not provide the number of bytes specified by the // Content-Length HTTP header + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: IncorrectNumberOfFilesInPostRequest + // // - Description: POST requires exactly one file upload per request. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InlineDataTooLarge + // // - Description: Inline data exceeds the maximum allowed size. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InternalError + // // - Description: We encountered an internal error. Please try again. + // // - HTTP Status Code: 500 Internal Server Error + // // - SOAP Fault Code Prefix: Server + // // - Code: InvalidAccessKeyId + // // - Description: The Amazon Web Services access key ID you provided does not // exist in our records. + // // - HTTP Status Code: 403 Forbidden + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidAddressingHeader + // // - Description: You must specify the Anonymous role. + // // - HTTP Status Code: N/A + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidArgument + // // - Description: Invalid Argument + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidBucketName + // // - Description: The specified bucket is not valid. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidBucketState + // // - Description: The request is not valid with the current state of the bucket. + // // - HTTP Status Code: 409 Conflict + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidDigest + // // - Description: The Content-MD5 you specified is not valid. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidEncryptionAlgorithmError + // // - Description: The encryption request you specified is not valid. The valid // value is AES256. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidLocationConstraint + // // - Description: The specified location constraint is not valid. For more - // information about Regions, see How to Select a Region for Your Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) - // . + // information about Regions, see [How to Select a Region for Your Buckets]. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidObjectState + // // - Description: The action is not valid for the current state of the object. + // // - HTTP Status Code: 403 Forbidden + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidPart + // // - Description: One or more of the specified parts could not be found. The // part might not have been uploaded, or the specified entity tag might not have // matched the part's entity tag. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidPartOrder + // // - Description: The list of parts was not in ascending order. Parts list must // be specified in order by part number. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidPayer + // // - Description: All access to this object has been disabled. Please contact // Amazon Web Services Support for further assistance. + // // - HTTP Status Code: 403 Forbidden + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidPolicyDocument + // // - Description: The content of the form does not meet the conditions specified // in the policy document. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidRange + // // - Description: The requested range cannot be satisfied. + // // - HTTP Status Code: 416 Requested Range Not Satisfiable + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidRequest + // // - Description: Please use AWS4-HMAC-SHA256 . + // // - HTTP Status Code: 400 Bad Request + // // - Code: N/A + // // - Code: InvalidRequest + // // - Description: SOAP requests must be made over an HTTPS connection. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidRequest + // // - Description: Amazon S3 Transfer Acceleration is not supported for buckets // with non-DNS compliant names. + // // - HTTP Status Code: 400 Bad Request + // // - Code: N/A + // // - Code: InvalidRequest + // // - Description: Amazon S3 Transfer Acceleration is not supported for buckets // with periods (.) in their names. + // // - HTTP Status Code: 400 Bad Request + // // - Code: N/A + // // - Code: InvalidRequest + // // - Description: Amazon S3 Transfer Accelerate endpoint only supports virtual // style requests. + // // - HTTP Status Code: 400 Bad Request + // // - Code: N/A + // // - Code: InvalidRequest - // - Description: Amazon S3 Transfer Accelerate is not configured on this - // bucket. + // + // - Description: Amazon S3 Transfer Accelerate is not configured on this bucket. + // // - HTTP Status Code: 400 Bad Request + // // - Code: N/A + // // - Code: InvalidRequest + // // - Description: Amazon S3 Transfer Accelerate is disabled on this bucket. + // // - HTTP Status Code: 400 Bad Request + // // - Code: N/A + // // - Code: InvalidRequest + // // - Description: Amazon S3 Transfer Acceleration is not supported on this // bucket. Contact Amazon Web Services Support for more information. + // // - HTTP Status Code: 400 Bad Request + // // - Code: N/A + // // - Code: InvalidRequest + // // - Description: Amazon S3 Transfer Acceleration cannot be enabled on this // bucket. Contact Amazon Web Services Support for more information. + // // - HTTP Status Code: 400 Bad Request + // // - Code: N/A + // // - Code: InvalidSecurity + // // - Description: The provided security credentials are not valid. + // // - HTTP Status Code: 403 Forbidden + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidSOAPRequest + // // - Description: The SOAP request body is invalid. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidStorageClass + // // - Description: The storage class you specified is not valid. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidTargetBucketForLogging + // // - Description: The target bucket for logging does not exist, is not owned by // you, or does not have the appropriate grants for the log-delivery group. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidToken + // // - Description: The provided token is malformed or otherwise invalid. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: InvalidURI + // // - Description: Couldn't parse the specified URI. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: KeyTooLongError + // // - Description: Your key is too long. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: MalformedACLError + // // - Description: The XML you provided was not well-formed or did not validate // against our published schema. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: MalformedPOSTRequest + // // - Description: The body of your POST request is not well-formed // multipart/form-data. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: MalformedXML + // // - Description: This happens when the user sends malformed XML (XML that // doesn't conform to the published XSD) for the configuration. The error message // is, "The XML you provided was not well-formed or did not validate against our // published schema." + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: MaxMessageLengthExceeded + // // - Description: Your request was too big. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: MaxPostPreDataLengthExceededError + // // - Description: Your POST request fields preceding the upload file were too // large. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: MetadataTooLarge - // - Description: Your metadata headers exceed the maximum allowed metadata - // size. + // + // - Description: Your metadata headers exceed the maximum allowed metadata size. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: MethodNotAllowed + // // - Description: The specified method is not allowed against this resource. + // // - HTTP Status Code: 405 Method Not Allowed + // // - SOAP Fault Code Prefix: Client + // // - Code: MissingAttachment + // // - Description: A SOAP attachment was expected, but none were found. + // // - HTTP Status Code: N/A + // // - SOAP Fault Code Prefix: Client + // // - Code: MissingContentLength + // // - Description: You must provide the Content-Length HTTP header. + // // - HTTP Status Code: 411 Length Required + // // - SOAP Fault Code Prefix: Client + // // - Code: MissingRequestBodyError + // // - Description: This happens when the user sends an empty XML document as a // request. The error message is, "Request body is empty." + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: MissingSecurityElement + // // - Description: The SOAP 1.1 request is missing a security element. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: MissingSecurityHeader + // // - Description: Your request is missing a required header. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: NoLoggingStatusForKey + // // - Description: There is no such thing as a logging status subresource for a // key. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: NoSuchBucket + // // - Description: The specified bucket does not exist. + // // - HTTP Status Code: 404 Not Found + // // - SOAP Fault Code Prefix: Client + // // - Code: NoSuchBucketPolicy + // // - Description: The specified bucket does not have a bucket policy. + // // - HTTP Status Code: 404 Not Found + // // - SOAP Fault Code Prefix: Client + // // - Code: NoSuchKey + // // - Description: The specified key does not exist. + // // - HTTP Status Code: 404 Not Found + // // - SOAP Fault Code Prefix: Client + // // - Code: NoSuchLifecycleConfiguration + // // - Description: The lifecycle configuration does not exist. + // // - HTTP Status Code: 404 Not Found + // // - SOAP Fault Code Prefix: Client + // // - Code: NoSuchUpload + // // - Description: The specified multipart upload does not exist. The upload ID // might be invalid, or the multipart upload might have been aborted or completed. + // // - HTTP Status Code: 404 Not Found + // // - SOAP Fault Code Prefix: Client + // // - Code: NoSuchVersion + // // - Description: Indicates that the version ID specified in the request does // not match an existing version. + // // - HTTP Status Code: 404 Not Found + // // - SOAP Fault Code Prefix: Client + // // - Code: NotImplemented + // // - Description: A header you provided implies functionality that is not // implemented. + // // - HTTP Status Code: 501 Not Implemented + // // - SOAP Fault Code Prefix: Server + // // - Code: NotSignedUp + // // - Description: Your account is not signed up for the Amazon S3 service. You // must sign up before you can use Amazon S3. You can sign up at the following URL: - // Amazon S3 (http://aws.amazon.com/s3) + // [Amazon S3] + // // - HTTP Status Code: 403 Forbidden + // // - SOAP Fault Code Prefix: Client + // // - Code: OperationAborted + // // - Description: A conflicting conditional action is currently in progress // against this resource. Try again. + // // - HTTP Status Code: 409 Conflict + // // - SOAP Fault Code Prefix: Client + // // - Code: PermanentRedirect + // // - Description: The bucket you are attempting to access must be addressed // using the specified endpoint. Send all future requests to this endpoint. + // // - HTTP Status Code: 301 Moved Permanently + // // - SOAP Fault Code Prefix: Client + // // - Code: PreconditionFailed + // // - Description: At least one of the preconditions you specified did not hold. + // // - HTTP Status Code: 412 Precondition Failed + // // - SOAP Fault Code Prefix: Client + // // - Code: Redirect + // // - Description: Temporary redirect. + // // - HTTP Status Code: 307 Moved Temporarily + // // - SOAP Fault Code Prefix: Client + // // - Code: RestoreAlreadyInProgress + // // - Description: Object restore is already in progress. + // // - HTTP Status Code: 409 Conflict + // // - SOAP Fault Code Prefix: Client + // // - Code: RequestIsNotMultiPartContent + // // - Description: Bucket POST must be of the enclosure-type multipart/form-data. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: RequestTimeout + // // - Description: Your socket connection to the server was not read from or // written to within the timeout period. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: RequestTimeTooSkewed + // // - Description: The difference between the request time and the server's time // is too large. + // // - HTTP Status Code: 403 Forbidden + // // - SOAP Fault Code Prefix: Client + // // - Code: RequestTorrentOfBucketError + // // - Description: Requesting the torrent file of a bucket is not permitted. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: SignatureDoesNotMatch + // // - Description: The request signature we calculated does not match the // signature you provided. Check your Amazon Web Services secret access key and - // signing method. For more information, see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) - // and SOAP Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html) - // for details. + // signing method. For more information, see [REST Authentication]and [SOAP Authentication]for details. + // // - HTTP Status Code: 403 Forbidden + // // - SOAP Fault Code Prefix: Client + // // - Code: ServiceUnavailable + // // - Description: Service is unable to handle request. + // // - HTTP Status Code: 503 Service Unavailable + // // - SOAP Fault Code Prefix: Server + // // - Code: SlowDown + // // - Description: Reduce your request rate. + // // - HTTP Status Code: 503 Slow Down + // // - SOAP Fault Code Prefix: Server + // // - Code: TemporaryRedirect + // // - Description: You are being redirected to the bucket while DNS updates. + // // - HTTP Status Code: 307 Moved Temporarily + // // - SOAP Fault Code Prefix: Client + // // - Code: TokenRefreshRequired + // // - Description: The provided token must be refreshed. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: TooManyBuckets + // // - Description: You have attempted to create more buckets than allowed. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: UnexpectedContent + // // - Description: This request does not support content. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: UnresolvableGrantByEmailAddress + // // - Description: The email address you provided does not match any account on // record. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // // - Code: UserKeyMustBeSpecified + // // - Description: The bucket POST must contain the specified field name. If it // is specified, check the order of the fields. + // // - HTTP Status Code: 400 Bad Request + // // - SOAP Fault Code Prefix: Client + // + // [How to Select a Region for Your Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro + // [Error responses]: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html + // [REST Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html + // [Amazon S3]: http://aws.amazon.com/s3 + // [SOAP Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html Code *string // The error key. @@ -1240,21 +1709,104 @@ type Error struct { // error message. Message *string - // The version ID of the error. This functionality is not supported for directory - // buckets. + // The version ID of the error. + // + // This functionality is not supported for directory buckets. VersionId *string noSmithyDocumentSerde } +// If the CreateBucketMetadataTableConfiguration request succeeds, but S3 +// +// Metadata was unable to create the table, this structure contains the error code +// and error message. +type ErrorDetails struct { + + // If the CreateBucketMetadataTableConfiguration request succeeds, but S3 + // Metadata was unable to create the table, this structure contains the error code. + // The possible error codes and error messages are as follows: + // + // - AccessDeniedCreatingResources - You don't have sufficient permissions to + // create the required resources. Make sure that you have + // s3tables:CreateNamespace , s3tables:CreateTable , s3tables:GetTable and + // s3tables:PutTablePolicy permissions, and then try again. To create a new + // metadata table, you must delete the metadata configuration for this bucket, and + // then create a new metadata configuration. + // + // - AccessDeniedWritingToTable - Unable to write to the metadata table because + // of missing resource permissions. To fix the resource policy, Amazon S3 needs to + // create a new metadata table. To create a new metadata table, you must delete the + // metadata configuration for this bucket, and then create a new metadata + // configuration. + // + // - DestinationTableNotFound - The destination table doesn't exist. To create a + // new metadata table, you must delete the metadata configuration for this bucket, + // and then create a new metadata configuration. + // + // - ServerInternalError - An internal error has occurred. To create a new + // metadata table, you must delete the metadata configuration for this bucket, and + // then create a new metadata configuration. + // + // - TableAlreadyExists - The table that you specified already exists in the + // table bucket's namespace. Specify a different table name. To create a new + // metadata table, you must delete the metadata configuration for this bucket, and + // then create a new metadata configuration. + // + // - TableBucketNotFound - The table bucket that you specified doesn't exist in + // this Amazon Web Services Region and account. Create or choose a different table + // bucket. To create a new metadata table, you must delete the metadata + // configuration for this bucket, and then create a new metadata configuration. + ErrorCode *string + + // If the CreateBucketMetadataTableConfiguration request succeeds, but S3 + // Metadata was unable to create the table, this structure contains the error + // message. The possible error codes and error messages are as follows: + // + // - AccessDeniedCreatingResources - You don't have sufficient permissions to + // create the required resources. Make sure that you have + // s3tables:CreateNamespace , s3tables:CreateTable , s3tables:GetTable and + // s3tables:PutTablePolicy permissions, and then try again. To create a new + // metadata table, you must delete the metadata configuration for this bucket, and + // then create a new metadata configuration. + // + // - AccessDeniedWritingToTable - Unable to write to the metadata table because + // of missing resource permissions. To fix the resource policy, Amazon S3 needs to + // create a new metadata table. To create a new metadata table, you must delete the + // metadata configuration for this bucket, and then create a new metadata + // configuration. + // + // - DestinationTableNotFound - The destination table doesn't exist. To create a + // new metadata table, you must delete the metadata configuration for this bucket, + // and then create a new metadata configuration. + // + // - ServerInternalError - An internal error has occurred. To create a new + // metadata table, you must delete the metadata configuration for this bucket, and + // then create a new metadata configuration. + // + // - TableAlreadyExists - The table that you specified already exists in the + // table bucket's namespace. Specify a different table name. To create a new + // metadata table, you must delete the metadata configuration for this bucket, and + // then create a new metadata configuration. + // + // - TableBucketNotFound - The table bucket that you specified doesn't exist in + // this Amazon Web Services Region and account. Create or choose a different table + // bucket. To create a new metadata table, you must delete the metadata + // configuration for this bucket, and then create a new metadata configuration. + ErrorMessage *string + + noSmithyDocumentSerde +} + // The error information. type ErrorDocument struct { - // The object key name to use when a 4XX class error occurs. Replacement must be - // made for object keys containing special characters (such as carriage returns) - // when using XML requests. For more information, see XML related object key - // constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) - // . + // The object key name to use when a 4XX class error occurs. + // + // Replacement must be made for object keys containing special characters (such as + // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + // + // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints // // This member is required. Key *string @@ -1267,9 +1819,12 @@ type EventBridgeConfiguration struct { noSmithyDocumentSerde } -// Optional configuration to replicate existing source bucket objects. For more -// information, see Replicating Existing Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication) -// in the Amazon S3 User Guide. +// Optional configuration to replicate existing source bucket objects. +// +// This parameter is no longer supported. To replicate existing objects, see [Replicating existing objects with S3 Batch Replication] in +// the Amazon S3 User Guide. +// +// [Replicating existing objects with S3 Batch Replication]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-batch-replication-batch.html type ExistingObjectReplication struct { // Specifies whether Amazon S3 replicates existing source bucket objects. @@ -1293,9 +1848,10 @@ type FilterRule struct { // The object key name prefix or suffix identifying one or more objects to which // the filtering rule applies. The maximum length is 1,024 characters. Overlapping - // prefixes and suffixes are not supported. For more information, see Configuring - // Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // in the Amazon S3 User Guide. + // prefixes and suffixes are not supported. For more information, see [Configuring Event Notifications]in the + // Amazon S3 User Guide. + // + // [Configuring Event Notifications]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html Name FilterRuleName // The value that the filter searches for in object key names. @@ -1304,6 +1860,36 @@ type FilterRule struct { noSmithyDocumentSerde } +// The metadata table configuration for a general purpose bucket. +type GetBucketMetadataTableConfigurationResult struct { + + // The metadata table configuration for a general purpose bucket. + // + // This member is required. + MetadataTableConfigurationResult *MetadataTableConfigurationResult + + // The status of the metadata table. The status values are: + // + // - CREATING - The metadata table is in the process of being created in the + // specified table bucket. + // + // - ACTIVE - The metadata table has been created successfully and records are + // being delivered to the table. + // + // - FAILED - Amazon S3 is unable to create the metadata table, or Amazon S3 is + // unable to deliver records. See ErrorDetails for details. + // + // This member is required. + Status *string + + // If the CreateBucketMetadataTableConfiguration request succeeds, but S3 + // Metadata was unable to create the table, this structure contains the error code + // and error message. + Error *ErrorDetails + + noSmithyDocumentSerde +} + // A collection of parts associated with a multipart upload. type GetObjectAttributesParts struct { @@ -1325,10 +1911,12 @@ type GetObjectAttributesParts struct { // A container for elements related to a particular part. A response can contain // zero or more Parts elements. + // // - General purpose buckets - For GetObjectAttributes , if a additional checksum // (including x-amz-checksum-crc32 , x-amz-checksum-crc32c , x-amz-checksum-sha1 // , or x-amz-checksum-sha256 ) isn't applied to the object specified in the // request, the response doesn't return Part . + // // - Directory buckets - For GetObjectAttributes , no matter whether a additional // checksum is applied to the object specified in the request, the response returns // Part . @@ -1374,19 +1962,31 @@ type Grantee struct { // Screen name of the grantee. DisplayName *string - // Email address of the grantee. Using email addresses to specify a grantee is - // only supported in the following Amazon Web Services Regions: + // Email address of the grantee. + // + // Using email addresses to specify a grantee is only supported in the following + // Amazon Web Services Regions: + // // - US East (N. Virginia) + // // - US West (N. California) + // // - US West (Oregon) + // // - Asia Pacific (Singapore) + // // - Asia Pacific (Sydney) + // // - Asia Pacific (Tokyo) + // // - Europe (Ireland) + // // - South America (São Paulo) - // For a list of all the Amazon S3 supported Regions and endpoints, see Regions - // and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) - // in the Amazon Web Services General Reference. + // + // For a list of all the Amazon S3 supported Regions and endpoints, see [Regions and Endpoints] in the + // Amazon Web Services General Reference. + // + // [Regions and Endpoints]: https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region EmailAddress *string // The canonical user ID of the grantee. @@ -1402,13 +2002,15 @@ type Grantee struct { type IndexDocument struct { // A suffix that is appended to a request that is for a directory on the website - // endpoint (for example,if the suffix is index.html and you make a request to - // samplebucket/images/ the data that is returned will be for the object with the - // key name images/index.html) The suffix must not be empty and must not include a - // slash character. Replacement must be made for object keys containing special - // characters (such as carriage returns) when using XML requests. For more - // information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) - // . + // endpoint. (For example, if the suffix is index.html and you make a request to + // samplebucket/images/ , the data that is returned will be for the object with the + // key name images/index.html .) The suffix must not be empty and must not include + // a slash character. + // + // Replacement must be made for object keys containing special characters (such as + // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + // + // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints // // This member is required. Suffix *string @@ -1419,12 +2021,14 @@ type IndexDocument struct { // Container element that identifies who initiated the multipart upload. type Initiator struct { - // Name of the Principal. This functionality is not supported for directory - // buckets. + // Name of the Principal. + // + // This functionality is not supported for directory buckets. DisplayName *string // If the principal is an Amazon Web Services account, it provides the Canonical // User ID. If the principal is an IAM User, it provides a user ARN value. + // // Directory buckets - If the principal is an Amazon Web Services account, it // provides the Amazon Web Services account ID. If the principal is an IAM User, it // provides a user ARN value. @@ -1467,10 +2071,11 @@ type IntelligentTieringAndOperator struct { noSmithyDocumentSerde } -// Specifies the S3 Intelligent-Tiering configuration for an Amazon S3 bucket. For -// information about the S3 Intelligent-Tiering storage class, see Storage class -// for automatically optimizing frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) -// . +// Specifies the S3 Intelligent-Tiering configuration for an Amazon S3 bucket. +// +// For information about the S3 Intelligent-Tiering storage class, see [Storage class for automatically optimizing frequently and infrequently accessed objects]. +// +// [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access type IntelligentTieringConfiguration struct { // The ID used to identify the S3 Intelligent-Tiering configuration. @@ -1505,10 +2110,12 @@ type IntelligentTieringFilter struct { And *IntelligentTieringAndOperator // An object key name prefix that identifies the subset of objects to which the - // rule applies. Replacement must be made for object keys containing special - // characters (such as carriage returns) when using XML requests. For more - // information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) - // . + // rule applies. + // + // Replacement must be made for object keys containing special characters (such as + // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + // + // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints Prefix *string // A container of a key value name pair. @@ -1518,8 +2125,9 @@ type IntelligentTieringFilter struct { } // Specifies the inventory configuration for an Amazon S3 bucket. For more -// information, see GET Bucket inventory (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html) -// in the Amazon S3 API Reference. +// information, see [GET Bucket inventory]in the Amazon S3 API Reference. +// +// [GET Bucket inventory]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html type InventoryConfiguration struct { // Contains information about where to publish the inventory results. @@ -1614,9 +2222,10 @@ type InventoryS3BucketDestination struct { Format InventoryFormat // The account ID that owns the destination S3 bucket. If no account ID is - // provided, the owner is not validated before exporting data. Although this value - // is optional, we strongly recommend that you set it to help prevent problems if - // the destination bucket ownership changes. + // provided, the owner is not validated before exporting data. + // + // Although this value is optional, we strongly recommend that you set it to help + // prevent problems if the destination bucket ownership changes. AccountId *string // Contains the type of server-side encryption used to encrypt the inventory @@ -1663,8 +2272,9 @@ type JSONOutput struct { type LambdaFunctionConfiguration struct { // The Amazon S3 bucket event for which to invoke the Lambda function. For more - // information, see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // in the Amazon S3 User Guide. + // information, see [Supported Event Types]in the Amazon S3 User Guide. + // + // [Supported Event Types]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html // // This member is required. Events []Event @@ -1676,8 +2286,9 @@ type LambdaFunctionConfiguration struct { LambdaFunctionArn *string // Specifies object key name filtering rules. For information about key name - // filtering, see Configuring event notifications using object key name filtering (https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html) - // in the Amazon S3 User Guide. + // filtering, see [Configuring event notifications using object key name filtering]in the Amazon S3 User Guide. + // + // [Configuring event notifications using object key name filtering]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html Filter *NotificationConfigurationFilter // An optional unique identifier for configurations in a notification @@ -1687,13 +2298,18 @@ type LambdaFunctionConfiguration struct { noSmithyDocumentSerde } -// Container for the expiration for the lifecycle of the object. For more -// information see, Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html) -// in the Amazon S3 User Guide. +// Container for the expiration for the lifecycle of the object. +// +// For more information see, [Managing your storage lifecycle] in the Amazon S3 User Guide. +// +// [Managing your storage lifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html type LifecycleExpiration struct { // Indicates at what date the object is to be moved or deleted. The date value // must conform to the ISO 8601 format. The time is always midnight UTC. + // + // This parameter applies to general purpose buckets only. It is not supported for + // directory bucket lifecycle configurations. Date *time.Time // Indicates the lifetime, in days, of the objects that are subject to the rule. @@ -1704,14 +2320,19 @@ type LifecycleExpiration struct { // versions. If set to true, the delete marker will be expired; if set to false the // policy takes no action. This cannot be specified with Days or Date in a // Lifecycle Expiration Policy. + // + // This parameter applies to general purpose buckets only. It is not supported for + // directory bucket lifecycle configurations. ExpiredObjectDeleteMarker *bool noSmithyDocumentSerde } -// A lifecycle rule for individual objects in an Amazon S3 bucket. For more -// information see, Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html) -// in the Amazon S3 User Guide. +// A lifecycle rule for individual objects in an Amazon S3 bucket. +// +// For more information see, [Managing your storage lifecycle] in the Amazon S3 User Guide. +// +// [Managing your storage lifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html type LifecycleRule struct { // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule is @@ -1722,9 +2343,9 @@ type LifecycleRule struct { // Specifies the days since the initiation of an incomplete multipart upload that // Amazon S3 will wait before permanently removing all parts of the upload. For - // more information, see Aborting Incomplete Multipart Uploads Using a Bucket - // Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) - // in the Amazon S3 User Guide. + // more information, see [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]in the Amazon S3 User Guide. + // + // [Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload // Specifies the expiration for the lifecycle of the object in the form of date, @@ -1734,7 +2355,9 @@ type LifecycleRule struct { // The Filter is used to identify objects that a Lifecycle Rule applies to. A // Filter must have exactly one of Prefix , Tag , or And specified. Filter is // required if the LifecycleRule does not contain a Prefix element. - Filter LifecycleRuleFilter + // + // Tag filters are not supported for directory buckets. + Filter *LifecycleRuleFilter // Unique identifier for the rule. The value cannot be longer than 255 characters. ID *string @@ -1744,6 +2367,9 @@ type LifecycleRule struct { // configuration action on a bucket that has versioning enabled (or suspended) to // request that Amazon S3 delete noncurrent object versions at a specific period in // the object's lifetime. + // + // This parameter applies to general purpose buckets only. It is not supported for + // directory bucket lifecycle configurations. NoncurrentVersionExpiration *NoncurrentVersionExpiration // Specifies the transition rule for the lifecycle rule that describes when @@ -1751,18 +2377,26 @@ type LifecycleRule struct { // versioning-enabled (or versioning is suspended), you can set this action to // request that Amazon S3 transition noncurrent object versions to a specific // storage class at a set period in the object's lifetime. + // + // This parameter applies to general purpose buckets only. It is not supported for + // directory bucket lifecycle configurations. NoncurrentVersionTransitions []NoncurrentVersionTransition // Prefix identifying one or more objects to which the rule applies. This is no - // longer used; use Filter instead. Replacement must be made for object keys - // containing special characters (such as carriage returns) when using XML - // requests. For more information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) - // . + // longer used; use Filter instead. + // + // Replacement must be made for object keys containing special characters (such as + // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + // + // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints // // Deprecated: This member has been deprecated. Prefix *string // Specifies when an Amazon S3 object transitions to a specified storage class. + // + // This parameter applies to general purpose buckets only. It is not supported for + // directory bucket lifecycle configurations. Transitions []Transition noSmithyDocumentSerde @@ -1793,79 +2427,51 @@ type LifecycleRuleAndOperator struct { // Filter can have exactly one of Prefix , Tag , ObjectSizeGreaterThan , // ObjectSizeLessThan , or And specified. If the Filter element is left empty, the // Lifecycle Rule applies to all objects in the bucket. -// -// The following types satisfy this interface: -// -// LifecycleRuleFilterMemberAnd -// LifecycleRuleFilterMemberObjectSizeGreaterThan -// LifecycleRuleFilterMemberObjectSizeLessThan -// LifecycleRuleFilterMemberPrefix -// LifecycleRuleFilterMemberTag -type LifecycleRuleFilter interface { - isLifecycleRuleFilter() -} - -// This is used in a Lifecycle Rule Filter to apply a logical AND to two or more -// predicates. The Lifecycle Rule will apply to any object matching all of the -// predicates configured inside the And operator. -type LifecycleRuleFilterMemberAnd struct { - Value LifecycleRuleAndOperator - - noSmithyDocumentSerde -} - -func (*LifecycleRuleFilterMemberAnd) isLifecycleRuleFilter() {} - -// Minimum object size to which the rule applies. -type LifecycleRuleFilterMemberObjectSizeGreaterThan struct { - Value int64 - - noSmithyDocumentSerde -} - -func (*LifecycleRuleFilterMemberObjectSizeGreaterThan) isLifecycleRuleFilter() {} - -// Maximum object size to which the rule applies. -type LifecycleRuleFilterMemberObjectSizeLessThan struct { - Value int64 - - noSmithyDocumentSerde -} +type LifecycleRuleFilter struct { -func (*LifecycleRuleFilterMemberObjectSizeLessThan) isLifecycleRuleFilter() {} + // This is used in a Lifecycle Rule Filter to apply a logical AND to two or more + // predicates. The Lifecycle Rule will apply to any object matching all of the + // predicates configured inside the And operator. + And *LifecycleRuleAndOperator -// Prefix identifying one or more objects to which the rule applies. Replacement -// must be made for object keys containing special characters (such as carriage -// returns) when using XML requests. For more information, see XML related object -// key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) -// . -type LifecycleRuleFilterMemberPrefix struct { - Value string + // Minimum object size to which the rule applies. + ObjectSizeGreaterThan *int64 - noSmithyDocumentSerde -} + // Maximum object size to which the rule applies. + ObjectSizeLessThan *int64 -func (*LifecycleRuleFilterMemberPrefix) isLifecycleRuleFilter() {} + // Prefix identifying one or more objects to which the rule applies. + // + // Replacement must be made for object keys containing special characters (such as + // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + // + // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints + Prefix *string -// This tag must exist in the object's tag set in order for the rule to apply. -type LifecycleRuleFilterMemberTag struct { - Value Tag + // This tag must exist in the object's tag set in order for the rule to apply. + // + // This parameter applies to general purpose buckets only. It is not supported for + // directory bucket lifecycle configurations. + Tag *Tag noSmithyDocumentSerde } -func (*LifecycleRuleFilterMemberTag) isLifecycleRuleFilter() {} - -// Specifies the location where the bucket will be created. For directory buckets, -// the location type is Availability Zone. For more information about directory -// buckets, see Directory buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html) -// in the Amazon S3 User Guide. This functionality is only supported by directory -// buckets. +// Specifies the location where the bucket will be created. +// +// For directory buckets, the location type is Availability Zone or Local Zone. +// For more information about directory buckets, see [Working with directory buckets]in the Amazon S3 User Guide. +// +// This functionality is only supported by directory buckets. +// +// [Working with directory buckets]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html type LocationInfo struct { - // The name of the location where the bucket will be created. For directory - // buckets, the name of the location is the AZ ID of the Availability Zone where - // the bucket will be created. An example AZ ID value is usw2-az1 . + // The name of the location where the bucket will be created. + // + // For directory buckets, the name of the location is the Zone ID of the + // Availability Zone (AZ) or Local Zone (LZ) where the bucket will be created. An + // example AZ ID value is usw2-az1 . Name *string // The type of location where the bucket will be created. @@ -1875,8 +2481,10 @@ type LocationInfo struct { } // Describes where logs are stored and the prefix that Amazon S3 assigns to all -// log object keys for a bucket. For more information, see PUT Bucket logging (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) -// in the Amazon S3 API Reference. +// log object keys for a bucket. For more information, see [PUT Bucket logging]in the Amazon S3 API +// Reference. +// +// [PUT Bucket logging]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html type LoggingEnabled struct { // Specifies the bucket where you want Amazon S3 to store server access logs. You @@ -1896,10 +2504,12 @@ type LoggingEnabled struct { // This member is required. TargetPrefix *string - // Container for granting information. Buckets that use the bucket owner enforced - // setting for Object Ownership don't support target grants. For more information, - // see Permissions for server access log delivery (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general) - // in the Amazon S3 User Guide. + // Container for granting information. + // + // Buckets that use the bucket owner enforced setting for Object Ownership don't + // support target grants. For more information, see [Permissions for server access log delivery]in the Amazon S3 User Guide. + // + // [Permissions for server access log delivery]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general TargetGrants []TargetGrant // Amazon S3 key format for log objects. @@ -1920,16 +2530,49 @@ type MetadataEntry struct { noSmithyDocumentSerde } -// A container specifying replication metrics-related settings enabling +// The metadata table configuration for a general purpose bucket. +type MetadataTableConfiguration struct { + + // The destination information for the metadata table configuration. The + // destination table bucket must be in the same Region and Amazon Web Services + // account as the general purpose bucket. The specified metadata table name must be + // unique within the aws_s3_metadata namespace in the destination table bucket. + // + // This member is required. + S3TablesDestination *S3TablesDestination + + noSmithyDocumentSerde +} + +// The metadata table configuration for a general purpose bucket. The destination +// +// table bucket must be in the same Region and Amazon Web Services account as the +// general purpose bucket. The specified metadata table name must be unique within +// the aws_s3_metadata namespace in the destination table bucket. +type MetadataTableConfigurationResult struct { + + // The destination information for the metadata table configuration. The + // destination table bucket must be in the same Region and Amazon Web Services + // account as the general purpose bucket. The specified metadata table name must be + // unique within the aws_s3_metadata namespace in the destination table bucket. + // + // This member is required. + S3TablesDestinationResult *S3TablesDestinationResult + + noSmithyDocumentSerde +} + +// A container specifying replication metrics-related settings enabling +// // replication metrics and events. type Metrics struct { - // Specifies whether the replication metrics are enabled. + // Specifies whether the replication metrics are enabled. // // This member is required. Status MetricsStatus - // A container specifying the time threshold for emitting the + // A container specifying the time threshold for emitting the // s3:Replication:OperationMissedThreshold event. EventThreshold *ReplicationTimeValue @@ -1957,8 +2600,9 @@ type MetricsAndOperator struct { // by the metrics configuration ID) from an Amazon S3 bucket. If you're updating an // existing metrics configuration, note that this is a full replacement of the // existing metrics configuration. If you don't include the elements you want to -// keep, they are erased. For more information, see PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html) -// . +// keep, they are erased. For more information, see [PutBucketMetricsConfiguration]. +// +// [PutBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html type MetricsConfiguration struct { // The ID used to identify the metrics configuration. The ID has a 64 character @@ -1978,8 +2622,7 @@ type MetricsConfiguration struct { // Specifies a metrics configuration filter. The metrics configuration only // includes objects that meet the filter's criteria. A filter must be a prefix, an // object tag, an access point ARN, or a conjunction (MetricsAndOperator). For more -// information, see PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) -// . +// information, see [PutBucketMetricsConfiguration]. // // The following types satisfy this interface: // @@ -1987,6 +2630,8 @@ type MetricsConfiguration struct { // MetricsFilterMemberAnd // MetricsFilterMemberPrefix // MetricsFilterMemberTag +// +// [PutBucketMetricsConfiguration]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html type MetricsFilter interface { isMetricsFilter() } @@ -2035,6 +2680,12 @@ type MultipartUpload struct { // The algorithm that was used to create a checksum of the object. ChecksumAlgorithm ChecksumAlgorithm + // The checksum type that is used to calculate the object’s checksum value. For + // more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumType ChecksumType + // Date and time at which the multipart upload was initiated. Initiated *time.Time @@ -2045,13 +2696,15 @@ type MultipartUpload struct { Key *string // Specifies the owner of the object that is part of the multipart upload. - // Directory buckets - The bucket owner is returned as the object owner for all the - // objects. + // + // Directory buckets - The bucket owner is returned as the object owner for all + // the objects. Owner *Owner - // The class of storage used to store the object. Directory buckets - Only the S3 - // Express One Zone storage class is supported by directory buckets to store - // objects. + // The class of storage used to store the object. + // + // Directory buckets - Only the S3 Express One Zone storage class is supported by + // directory buckets to store objects. StorageClass StorageClass // Upload ID that identifies the multipart upload. @@ -2065,20 +2718,31 @@ type MultipartUpload struct { // configuration action on a bucket that has versioning enabled (or suspended) to // request that Amazon S3 delete noncurrent object versions at a specific period in // the object's lifetime. +// +// This parameter applies to general purpose buckets only. It is not supported for +// directory bucket lifecycle configurations. type NoncurrentVersionExpiration struct { - // Specifies how many newer noncurrent versions must exist before Amazon S3 can - // perform the associated action on a given version. If there are this many more - // recent noncurrent versions, Amazon S3 will take the associated action. For more - // information about noncurrent versions, see Lifecycle configuration elements (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html) - // in the Amazon S3 User Guide. + // Specifies how many noncurrent versions Amazon S3 will retain. You can specify + // up to 100 noncurrent versions to retain. Amazon S3 will permanently delete any + // additional noncurrent versions beyond the specified number to retain. For more + // information about noncurrent versions, see [Lifecycle configuration elements]in the Amazon S3 User Guide. + // + // This parameter applies to general purpose buckets only. It is not supported for + // directory bucket lifecycle configurations. + // + // [Lifecycle configuration elements]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html NewerNoncurrentVersions *int32 // Specifies the number of days an object is noncurrent before Amazon S3 can // perform the associated action. The value must be a non-zero positive integer. - // For information about the noncurrent days calculations, see How Amazon S3 - // Calculates When an Object Became Noncurrent (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) - // in the Amazon S3 User Guide. + // For information about the noncurrent days calculations, see [How Amazon S3 Calculates When an Object Became Noncurrent]in the Amazon S3 + // User Guide. + // + // This parameter applies to general purpose buckets only. It is not supported for + // directory bucket lifecycle configurations. + // + // [How Amazon S3 Calculates When an Object Became Noncurrent]: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations NoncurrentDays *int32 noSmithyDocumentSerde @@ -2093,18 +2757,20 @@ type NoncurrentVersionExpiration struct { // specific period in the object's lifetime. type NoncurrentVersionTransition struct { - // Specifies how many newer noncurrent versions must exist before Amazon S3 can - // perform the associated action on a given version. If there are this many more - // recent noncurrent versions, Amazon S3 will take the associated action. For more - // information about noncurrent versions, see Lifecycle configuration elements (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html) - // in the Amazon S3 User Guide. + // Specifies how many noncurrent versions Amazon S3 will retain in the same + // storage class before transitioning objects. You can specify up to 100 noncurrent + // versions to retain. Amazon S3 will transition any additional noncurrent versions + // beyond the specified number to retain. For more information about noncurrent + // versions, see [Lifecycle configuration elements]in the Amazon S3 User Guide. + // + // [Lifecycle configuration elements]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html NewerNoncurrentVersions *int32 // Specifies the number of days an object is noncurrent before Amazon S3 can // perform the associated action. For information about the noncurrent days - // calculations, see How Amazon S3 Calculates How Long an Object Has Been - // Noncurrent (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) - // in the Amazon S3 User Guide. + // calculations, see [How Amazon S3 Calculates How Long an Object Has Been Noncurrent]in the Amazon S3 User Guide. + // + // [How Amazon S3 Calculates How Long an Object Has Been Noncurrent]: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations NoncurrentDays *int32 // The class of storage used to store the object. @@ -2136,8 +2802,9 @@ type NotificationConfiguration struct { } // Specifies object key name filtering rules. For information about key name -// filtering, see Configuring event notifications using object key name filtering (https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html) -// in the Amazon S3 User Guide. +// filtering, see [Configuring event notifications using object key name filtering]in the Amazon S3 User Guide. +// +// [Configuring event notifications using object key name filtering]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html type NotificationConfigurationFilter struct { // A container for object key name prefix and suffix filtering rules. @@ -2152,21 +2819,31 @@ type Object struct { // The algorithm that was used to create a checksum of the object. ChecksumAlgorithm []ChecksumAlgorithm + // The checksum type that is used to calculate the object’s checksum value. For + // more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumType ChecksumType + // The entity tag is a hash of the object. The ETag reflects changes only to the // contents of an object, not its metadata. The ETag may or may not be an MD5 // digest of the object data. Whether or not it is depends on how the object was // created and how it is encrypted as described below: + // // - Objects created by the PUT Object, POST Object, or Copy operation, or // through the Amazon Web Services Management Console, and are encrypted by SSE-S3 // or plaintext, have ETags that are an MD5 digest of their object data. + // // - Objects created by the PUT Object, POST Object, or Copy operation, or // through the Amazon Web Services Management Console, and are encrypted by SSE-C // or SSE-KMS, have ETags that are not an MD5 digest of their object data. + // // - If an object is created by either the Multipart Upload or Part Copy // operation, the ETag is not an MD5 digest, regardless of the method of // encryption. If an object is larger than 16 MB, the Amazon Web Services // Management Console will upload or copy that object as a Multipart Upload, and // therefore the ETag will not be an MD5 digest. + // // Directory buckets - MD5 is not supported by directory buckets. ETag *string @@ -2177,25 +2854,29 @@ type Object struct { // Creation date of the object. LastModified *time.Time - // The owner of the object Directory buckets - The bucket owner is returned as the - // object owner. + // The owner of the object + // + // Directory buckets - The bucket owner is returned as the object owner. Owner *Owner // Specifies the restoration status of an object. Objects in certain storage // classes must be restored before they can be retrieved. For more information - // about these storage classes and how to work with archived objects, see Working - // with archived objects (https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. Only the S3 Express One Zone storage class is supported by directory - // buckets to store objects. + // about these storage classes and how to work with archived objects, see [Working with archived objects]in the + // Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. Only the S3 Express + // One Zone storage class is supported by directory buckets to store objects. + // + // [Working with archived objects]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html RestoreStatus *RestoreStatus // Size in bytes of the object Size *int64 - // The class of storage used to store the object. Directory buckets - Only the S3 - // Express One Zone storage class is supported by directory buckets to store - // objects. + // The class of storage used to store the object. + // + // Directory buckets - Only the S3 Express One Zone storage class is supported by + // directory buckets to store objects. StorageClass ObjectStorageClass noSmithyDocumentSerde @@ -2204,16 +2885,39 @@ type Object struct { // Object Identifier is unique value to identify objects. type ObjectIdentifier struct { - // Key name of the object. Replacement must be made for object keys containing - // special characters (such as carriage returns) when using XML requests. For more - // information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) - // . + // Key name of the object. + // + // Replacement must be made for object keys containing special characters (such as + // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + // + // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints // // This member is required. Key *string - // Version ID for the specific version of the object to delete. This functionality - // is not supported for directory buckets. + // An entity tag (ETag) is an identifier assigned by a web server to a specific + // version of a resource found at a URL. This header field makes the request method + // conditional on ETags . + // + // Entity tags (ETags) for S3 Express One Zone are random alphanumeric strings + // unique to the object. + ETag *string + + // If present, the objects are deleted only if its modification times matches the + // provided Timestamp . + // + // This functionality is only supported for directory buckets. + LastModifiedTime *time.Time + + // If present, the objects are deleted only if its size matches the provided size + // in bytes. + // + // This functionality is only supported for directory buckets. + Size *int64 + + // Version ID for the specific version of the object to delete. + // + // This functionality is not supported for directory buckets. VersionId *string noSmithyDocumentSerde @@ -2271,38 +2975,41 @@ type ObjectLockRule struct { // A container for elements related to an individual part. type ObjectPart struct { - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32 checksum of the part. This checksum is present + // if the multipart upload request was created with the CRC32 checksum algorithm. + // For more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32 *string - // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32C checksum of the part. This checksum is + // present if the multipart upload request was created with the CRC32C checksum + // algorithm. For more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32C *string - // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. When you use the API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 64-bit CRC64NVME checksum of the part. This checksum is + // present if the multipart upload request was created with the CRC64NVME checksum + // algorithm, or if the object was uploaded without a checksum (and Amazon S3 added + // the default checksum, CRC64NVME , to the uploaded object). For more information, + // see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumCRC64NVME *string + + // The Base64 encoded, 160-bit SHA1 checksum of the part. This checksum is present + // if the multipart upload request was created with the SHA1 checksum algorithm. + // For more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA1 *string - // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 256-bit SHA256 checksum of the part. This checksum is + // present if the multipart upload request was created with the SHA256 checksum + // algorithm. For more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA256 *string // The part number identifying the part. This value is a positive integer between @@ -2321,6 +3028,12 @@ type ObjectVersion struct { // The algorithm that was used to create a checksum of the object. ChecksumAlgorithm []ChecksumAlgorithm + // The checksum type that is used to calculate the object’s checksum value. For + // more information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumType ChecksumType + // The entity tag is an MD5 hash of that version of the object. ETag *string @@ -2339,9 +3052,10 @@ type ObjectVersion struct { // Specifies the restoration status of an object. Objects in certain storage // classes must be restored before they can be retrieved. For more information - // about these storage classes and how to work with archived objects, see Working - // with archived objects (https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html) - // in the Amazon S3 User Guide. + // about these storage classes and how to work with archived objects, see [Working with archived objects]in the + // Amazon S3 User Guide. + // + // [Working with archived objects]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html RestoreStatus *RestoreStatus // Size in bytes of the object. @@ -2382,14 +3096,23 @@ type Owner struct { // Container for the display name of the owner. This value is only supported in // the following Amazon Web Services Regions: + // // - US East (N. Virginia) + // // - US West (N. California) + // // - US West (Oregon) + // // - Asia Pacific (Singapore) + // // - Asia Pacific (Sydney) + // // - Asia Pacific (Tokyo) + // // - Europe (Ireland) + // // - South America (São Paulo) + // // This functionality is not supported for directory buckets. DisplayName *string @@ -2414,23 +3137,30 @@ type OwnershipControls struct { type OwnershipControlsRule struct { // The container element for object ownership for a bucket's ownership controls. + // // BucketOwnerPreferred - Objects uploaded to the bucket change ownership to the // bucket owner if the objects are uploaded with the bucket-owner-full-control - // canned ACL. ObjectWriter - The uploading account will own the object if the - // object is uploaded with the bucket-owner-full-control canned ACL. + // canned ACL. + // + // ObjectWriter - The uploading account will own the object if the object is + // uploaded with the bucket-owner-full-control canned ACL. + // // BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer // affect permissions. The bucket owner automatically owns and has full control // over every object in the bucket. The bucket only accepts PUT requests that don't // specify an ACL or specify bucket owner full control ACLs (such as the predefined // bucket-owner-full-control canned ACL or a custom ACL in XML format that grants - // the same permissions). By default, ObjectOwnership is set to BucketOwnerEnforced - // and ACLs are disabled. We recommend keeping ACLs disabled, except in uncommon - // use cases where you must control access for each object individually. For more - // information about S3 Object Ownership, see Controlling ownership of objects and - // disabling ACLs for your bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) - // in the Amazon S3 User Guide. This functionality is not supported for directory - // buckets. Directory buckets use the bucket owner enforced setting for S3 Object - // Ownership. + // the same permissions). + // + // By default, ObjectOwnership is set to BucketOwnerEnforced and ACLs are + // disabled. We recommend keeping ACLs disabled, except in uncommon use cases where + // you must control access for each object individually. For more information about + // S3 Object Ownership, see [Controlling ownership of objects and disabling ACLs for your bucket]in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. Directory buckets + // use the bucket owner enforced setting for S3 Object Ownership. + // + // [Controlling ownership of objects and disabling ACLs for your bucket]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html // // This member is required. ObjectOwnership ObjectOwnership @@ -2446,36 +3176,41 @@ type ParquetInput struct { // Container for elements related to a part. type Part struct { - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32 checksum of the part. This checksum is present + // if the object was uploaded with the CRC32 checksum algorithm. For more + // information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32 *string - // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. When you use an API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 32-bit CRC32C checksum of the part. This checksum is + // present if the object was uploaded with the CRC32C checksum algorithm. For more + // information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumCRC32C *string - // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. When you use the API operation on an - // object that was uploaded using multipart uploads, this value may not be a direct - // checksum value of the full object. Instead, it's a calculation based on the - // checksum values of each individual part. For more information about how - // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) - // in the Amazon S3 User Guide. + // The Base64 encoded, 64-bit CRC64NVME checksum of the part. This checksum is + // present if the multipart upload request was created with the CRC64NVME checksum + // algorithm, or if the object was uploaded without a checksum (and Amazon S3 added + // the default checksum, CRC64NVME , to the uploaded object). For more information, + // see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html + ChecksumCRC64NVME *string + + // The Base64 encoded, 160-bit SHA1 checksum of the part. This checksum is present + // if the object was uploaded with the SHA1 checksum algorithm. For more + // information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA1 *string - // This header can be used as a data integrity check to verify that the data - // received is the same data that was originally sent. This header specifies the - // base64-encoded, 256-bit SHA-256 digest of the object. For more information, see - // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. + // The Base64 encoded, 256-bit SHA256 checksum of the part. This checksum is + // present if the object was uploaded with the SHA256 checksum algorithm. For more + // information, see [Checking object integrity]in the Amazon S3 User Guide. + // + // [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html ChecksumSHA256 *string // Entity tag returned when the part was uploaded. @@ -2495,13 +3230,22 @@ type Part struct { } // Amazon S3 keys for log objects are partitioned in the following format: -// [DestinationPrefix][SourceAccountId]/[SourceRegion]/[SourceBucket]/[YYYY]/[MM]/[DD]/[YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString] +// +// [DestinationPrefix][SourceAccountId]/[SourceRegion]/[SourceBucket]/[YYYY]/[MM]/[DD]/[YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString] +// // PartitionedPrefix defaults to EventTime delivery when server access logs are // delivered. type PartitionedPrefix struct { // Specifies the partition date source for the partitioned prefix. - // PartitionDateSource can be EventTime or DeliveryTime. + // PartitionDateSource can be EventTime or DeliveryTime . + // + // For DeliveryTime , the time in the log file names corresponds to the delivery + // time for the log files. + // + // For EventTime , The logs delivered are for a specific day only. The year, month, + // and day correspond to the day on which the event occurred, and the hour, minutes + // and seconds are set to 00 in the key. PartitionDateSource PartitionDateSource noSmithyDocumentSerde @@ -2543,41 +3287,48 @@ type ProgressEvent struct { // The PublicAccessBlock configuration that you want to apply to this Amazon S3 // bucket. You can enable the configuration options in any combination. For more -// information about when Amazon S3 considers a bucket or object public, see The -// Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) -// in the Amazon S3 User Guide. +// information about when Amazon S3 considers a bucket or object public, see [The Meaning of "Public"]in +// the Amazon S3 User Guide. +// +// [The Meaning of "Public"]: https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status type PublicAccessBlockConfiguration struct { // Specifies whether Amazon S3 should block public access control lists (ACLs) for // this bucket and objects in this bucket. Setting this element to TRUE causes the // following behavior: - // - PUT Bucket ACL and PUT Object ACL calls fail if the specified ACL is - // public. + // + // - PUT Bucket ACL and PUT Object ACL calls fail if the specified ACL is public. + // // - PUT Object calls fail if the request includes a public ACL. + // // - PUT Bucket calls fail if the request includes a public ACL. + // // Enabling this setting doesn't affect existing policies or ACLs. BlockPublicAcls *bool // Specifies whether Amazon S3 should block public bucket policies for this // bucket. Setting this element to TRUE causes Amazon S3 to reject calls to PUT - // Bucket policy if the specified bucket policy allows public access. Enabling this - // setting doesn't affect existing bucket policies. + // Bucket policy if the specified bucket policy allows public access. + // + // Enabling this setting doesn't affect existing bucket policies. BlockPublicPolicy *bool // Specifies whether Amazon S3 should ignore public ACLs for this bucket and // objects in this bucket. Setting this element to TRUE causes Amazon S3 to ignore - // all public ACLs on this bucket and objects in this bucket. Enabling this setting - // doesn't affect the persistence of any existing ACLs and doesn't prevent new - // public ACLs from being set. + // all public ACLs on this bucket and objects in this bucket. + // + // Enabling this setting doesn't affect the persistence of any existing ACLs and + // doesn't prevent new public ACLs from being set. IgnorePublicAcls *bool // Specifies whether Amazon S3 should restrict public bucket policies for this // bucket. Setting this element to TRUE restricts access to this bucket to only - // Amazon Web Service principals and authorized users within this account if the - // bucket has a public policy. Enabling this setting doesn't affect previously - // stored bucket policies, except that public and cross-account access within any - // public bucket policy, including non-public delegation to specific accounts, is - // blocked. + // Amazon Web Services service principals and authorized users within this account + // if the bucket has a public policy. + // + // Enabling this setting doesn't affect previously stored bucket policies, except + // that public and cross-account access within any public bucket policy, including + // non-public delegation to specific accounts, is blocked. RestrictPublicBuckets *bool noSmithyDocumentSerde @@ -2599,8 +3350,9 @@ type QueueConfiguration struct { QueueArn *string // Specifies object key name filtering rules. For information about key name - // filtering, see Configuring event notifications using object key name filtering (https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html) - // in the Amazon S3 User Guide. + // filtering, see [Configuring event notifications using object key name filtering]in the Amazon S3 User Guide. + // + // [Configuring event notifications using object key name filtering]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html Filter *NotificationConfigurationFilter // An optional unique identifier for configurations in a notification @@ -2613,7 +3365,14 @@ type QueueConfiguration struct { // The container for the records event. type RecordsEvent struct { - // The byte array of partial, one or more result records. + // The byte array of partial, one or more result records. S3 Select doesn't + // guarantee that a record will be self-contained in one record frame. To ensure + // continuous streaming of data, S3 Select might split the same record across + // multiple record frames instead of aggregating the results in memory. Some S3 + // clients (for example, the SDK for Java) handle this behavior by creating a + // ByteStream out of the response by default. Other clients might not handle this + // behavior by default. In those cases, you must aggregate the results on the + // client side and parse the response. Payload []byte noSmithyDocumentSerde @@ -2639,18 +3398,22 @@ type Redirect struct { // documents/ , you can set a condition block with KeyPrefixEquals set to docs/ // and in the Redirect set ReplaceKeyPrefixWith to /documents . Not required if one // of the siblings is present. Can be present only if ReplaceKeyWith is not - // provided. Replacement must be made for object keys containing special characters - // (such as carriage returns) when using XML requests. For more information, see - // XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) - // . + // provided. + // + // Replacement must be made for object keys containing special characters (such as + // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + // + // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints ReplaceKeyPrefixWith *string // The specific object key to use in the redirect request. For example, redirect // request to error.html . Not required if one of the siblings is present. Can be - // present only if ReplaceKeyPrefixWith is not provided. Replacement must be made - // for object keys containing special characters (such as carriage returns) when - // using XML requests. For more information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) - // . + // present only if ReplaceKeyPrefixWith is not provided. + // + // Replacement must be made for object keys containing special characters (such as + // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + // + // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints ReplaceKeyWith *string noSmithyDocumentSerde @@ -2676,9 +3439,11 @@ type RedirectAllRequestsTo struct { // Amazon S3 doesn't replicate replica modifications by default. In the latest // version of replication configuration (when Filter is specified), you can // specify this element and set the status to Enabled to replicate modifications -// on replicas. If you don't specify the Filter element, Amazon S3 assumes that -// the replication configuration is the earlier version, V1. In the earlier -// version, this element is not allowed. +// on replicas. +// +// If you don't specify the Filter element, Amazon S3 assumes that the replication +// configuration is the earlier version, V1. In the earlier version, this element +// is not allowed. type ReplicaModifications struct { // Specifies whether Amazon S3 replicates modifications on replicas. @@ -2694,9 +3459,10 @@ type ReplicaModifications struct { type ReplicationConfiguration struct { // The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role - // that Amazon S3 assumes when replicating objects. For more information, see How - // to Set Up Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html) - // in the Amazon S3 User Guide. + // that Amazon S3 assumes when replicating objects. For more information, see [How to Set Up Replication]in + // the Amazon S3 User Guide. + // + // [How to Set Up Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html // // This member is required. Role *string @@ -2729,34 +3495,41 @@ type ReplicationRule struct { // DeleteMarkerReplication element. If your Filter includes a Tag element, the // DeleteMarkerReplication Status must be set to Disabled, because Amazon S3 does // not support replicating delete markers for tag-based rules. For an example - // configuration, see Basic Rule Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config) - // . For more information about delete marker replication, see Basic Rule - // Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html) - // . If you are using an earlier version of the replication configuration, Amazon - // S3 handles replication of delete markers differently. For more information, see - // Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations) - // . + // configuration, see [Basic Rule Configuration]. + // + // For more information about delete marker replication, see [Basic Rule Configuration]. + // + // If you are using an earlier version of the replication configuration, Amazon S3 + // handles replication of delete markers differently. For more information, see [Backward Compatibility]. + // + // [Basic Rule Configuration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html + // [Backward Compatibility]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations DeleteMarkerReplication *DeleteMarkerReplication - // Optional configuration to replicate existing source bucket objects. For more - // information, see Replicating Existing Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication) - // in the Amazon S3 User Guide. + // Optional configuration to replicate existing source bucket objects. + // + // This parameter is no longer supported. To replicate existing objects, see [Replicating existing objects with S3 Batch Replication] in + // the Amazon S3 User Guide. + // + // [Replicating existing objects with S3 Batch Replication]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-batch-replication-batch.html ExistingObjectReplication *ExistingObjectReplication // A filter that identifies the subset of objects to which the replication rule // applies. A Filter must specify exactly one Prefix , Tag , or an And child // element. - Filter ReplicationRuleFilter + Filter *ReplicationRuleFilter // A unique identifier for the rule. The maximum value is 255 characters. ID *string // An object key name prefix that identifies the object or objects to which the // rule applies. The maximum prefix length is 1,024 characters. To include all - // objects in a bucket, specify an empty string. Replacement must be made for - // object keys containing special characters (such as carriage returns) when using - // XML requests. For more information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) - // . + // objects in a bucket, specify an empty string. + // + // Replacement must be made for object keys containing special characters (such as + // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + // + // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints // // Deprecated: This member has been deprecated. Prefix *string @@ -2766,8 +3539,10 @@ type ReplicationRule struct { // according to all replication rules. However, if there are two or more rules with // the same destination bucket, then objects will be replicated according to the // rule with the highest priority. The higher the number, the higher the priority. - // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) - // in the Amazon S3 User Guide. + // + // For more information, see [Replication] in the Amazon S3 User Guide. + // + // [Replication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html Priority *int32 // A container that describes additional filters for identifying the source @@ -2782,9 +3557,13 @@ type ReplicationRule struct { // A container for specifying rule filters. The filters determine the subset of // objects to which the rule applies. This element is required only if you specify -// more than one filter. For example: +// more than one filter. +// +// For example: +// // - If you specify both a Prefix and a Tag filter, wrap these filters in an And // tag. +// // - If you specify a filter based on multiple tags, wrap the Tag elements in an // And tag. type ReplicationRuleAndOperator struct { @@ -2802,67 +3581,50 @@ type ReplicationRuleAndOperator struct { // A filter that identifies the subset of objects to which the replication rule // applies. A Filter must specify exactly one Prefix , Tag , or an And child // element. -// -// The following types satisfy this interface: -// -// ReplicationRuleFilterMemberAnd -// ReplicationRuleFilterMemberPrefix -// ReplicationRuleFilterMemberTag -type ReplicationRuleFilter interface { - isReplicationRuleFilter() -} - -// A container for specifying rule filters. The filters determine the subset of -// objects to which the rule applies. This element is required only if you specify -// more than one filter. For example: -// - If you specify both a Prefix and a Tag filter, wrap these filters in an And -// tag. -// - If you specify a filter based on multiple tags, wrap the Tag elements in an -// And tag. -type ReplicationRuleFilterMemberAnd struct { - Value ReplicationRuleAndOperator +type ReplicationRuleFilter struct { - noSmithyDocumentSerde -} - -func (*ReplicationRuleFilterMemberAnd) isReplicationRuleFilter() {} - -// An object key name prefix that identifies the subset of objects to which the -// rule applies. Replacement must be made for object keys containing special -// characters (such as carriage returns) when using XML requests. For more -// information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) -// . -type ReplicationRuleFilterMemberPrefix struct { - Value string - - noSmithyDocumentSerde -} + // A container for specifying rule filters. The filters determine the subset of + // objects to which the rule applies. This element is required only if you specify + // more than one filter. For example: + // + // - If you specify both a Prefix and a Tag filter, wrap these filters in an And + // tag. + // + // - If you specify a filter based on multiple tags, wrap the Tag elements in an + // And tag. + And *ReplicationRuleAndOperator -func (*ReplicationRuleFilterMemberPrefix) isReplicationRuleFilter() {} + // An object key name prefix that identifies the subset of objects to which the + // rule applies. + // + // Replacement must be made for object keys containing special characters (such as + // carriage returns) when using XML requests. For more information, see [XML related object key constraints]. + // + // [XML related object key constraints]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints + Prefix *string -// A container for specifying a tag key and value. The rule applies only to -// objects that have the tag in their tag set. -type ReplicationRuleFilterMemberTag struct { - Value Tag + // A container for specifying a tag key and value. + // + // The rule applies only to objects that have the tag in their tag set. + Tag *Tag noSmithyDocumentSerde } -func (*ReplicationRuleFilterMemberTag) isReplicationRuleFilter() {} - -// A container specifying S3 Replication Time Control (S3 RTC) related +// A container specifying S3 Replication Time Control (S3 RTC) related +// // information, including whether S3 RTC is enabled and the time when all objects // and operations on objects must be replicated. Must be specified together with a // Metrics block. type ReplicationTime struct { - // Specifies whether the replication time is enabled. + // Specifies whether the replication time is enabled. // // This member is required. Status ReplicationTimeStatus - // A container specifying the time by which replication should be complete for all - // objects and operations on objects. + // A container specifying the time by which replication should be complete for + // all objects and operations on objects. // // This member is required. Time *ReplicationTimeValue @@ -2870,11 +3632,14 @@ type ReplicationTime struct { noSmithyDocumentSerde } -// A container specifying the time value for S3 Replication Time Control (S3 RTC) +// A container specifying the time value for S3 Replication Time Control (S3 RTC) +// // and replication metrics EventThreshold . type ReplicationTimeValue struct { - // Contains an integer specifying time in minutes. Valid value: 15 + // Contains an integer specifying time in minutes. + // + // Valid value: 15 Minutes *int32 noSmithyDocumentSerde @@ -2905,8 +3670,10 @@ type RequestProgress struct { type RestoreRequest struct { // Lifetime of the active copy in days. Do not use with restores that specify - // OutputLocation . The Days element is required for regular restores, and must not - // be provided for select requests. + // OutputLocation . + // + // The Days element is required for regular restores, and must not be provided for + // select requests. Days *int32 // The optional description for the job. @@ -2919,13 +3686,23 @@ type RestoreRequest struct { // Describes the location where the restore job's output is stored. OutputLocation *OutputLocation + // Amazon S3 Select is no longer available to new customers. Existing customers of + // Amazon S3 Select can continue to use the feature as usual. [Learn more] + // // Describes the parameters for Select job types. + // + // [Learn more]: http://aws.amazon.com/blogs/storage/how-to-optimize-querying-your-data-in-amazon-s3/ SelectParameters *SelectParameters // Retrieval tier at which the restore will be processed. Tier Tier + // Amazon S3 Select is no longer available to new customers. Existing customers of + // Amazon S3 Select can continue to use the feature as usual. [Learn more] + // // Type of restore request. + // + // [Learn more]: http://aws.amazon.com/blogs/storage/how-to-optimize-querying-your-data-in-amazon-s3/ Type RestoreRequestType noSmithyDocumentSerde @@ -2933,34 +3710,43 @@ type RestoreRequest struct { // Specifies the restoration status of an object. Objects in certain storage // classes must be restored before they can be retrieved. For more information -// about these storage classes and how to work with archived objects, see Working -// with archived objects (https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html) -// in the Amazon S3 User Guide. This functionality is not supported for directory -// buckets. Only the S3 Express One Zone storage class is supported by directory -// buckets to store objects. +// about these storage classes and how to work with archived objects, see [Working with archived objects]in the +// Amazon S3 User Guide. +// +// This functionality is not supported for directory buckets. Only the S3 Express +// One Zone storage class is supported by directory buckets to store objects. +// +// [Working with archived objects]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html type RestoreStatus struct { // Specifies whether the object is currently being restored. If the object // restoration is in progress, the header returns the value TRUE . For example: - // x-amz-optional-object-attributes: IsRestoreInProgress="true" If the object - // restoration has completed, the header returns the value FALSE . For example: - // x-amz-optional-object-attributes: IsRestoreInProgress="false", - // RestoreExpiryDate="2012-12-21T00:00:00.000Z" If the object hasn't been restored, - // there is no header response. + // + // x-amz-optional-object-attributes: IsRestoreInProgress="true" + // + // If the object restoration has completed, the header returns the value FALSE . + // For example: + // + // x-amz-optional-object-attributes: IsRestoreInProgress="false", + // RestoreExpiryDate="2012-12-21T00:00:00.000Z" + // + // If the object hasn't been restored, there is no header response. IsRestoreInProgress *bool // Indicates when the restored copy will expire. This value is populated only if // the object has already been restored. For example: - // x-amz-optional-object-attributes: IsRestoreInProgress="false", - // RestoreExpiryDate="2012-12-21T00:00:00.000Z" + // + // x-amz-optional-object-attributes: IsRestoreInProgress="false", + // RestoreExpiryDate="2012-12-21T00:00:00.000Z" RestoreExpiryDate *time.Time noSmithyDocumentSerde } // Specifies the redirect behavior and when a redirect is applied. For more -// information about routing rules, see Configuring advanced conditional redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html#advanced-conditional-redirects) -// in the Amazon S3 User Guide. +// information about routing rules, see [Configuring advanced conditional redirects]in the Amazon S3 User Guide. +// +// [Configuring advanced conditional redirects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html#advanced-conditional-redirects type RoutingRule struct { // Container for redirect information. You can redirect requests to another host, @@ -3024,6 +3810,69 @@ type S3Location struct { noSmithyDocumentSerde } +// The destination information for the metadata table configuration. The +// +// destination table bucket must be in the same Region and Amazon Web Services +// account as the general purpose bucket. The specified metadata table name must be +// unique within the aws_s3_metadata namespace in the destination table bucket. +type S3TablesDestination struct { + + // The Amazon Resource Name (ARN) for the table bucket that's specified as the + // destination in the metadata table configuration. The destination table bucket + // must be in the same Region and Amazon Web Services account as the general + // purpose bucket. + // + // This member is required. + TableBucketArn *string + + // The name for the metadata table in your metadata table configuration. The + // specified metadata table name must be unique within the aws_s3_metadata + // namespace in the destination table bucket. + // + // This member is required. + TableName *string + + noSmithyDocumentSerde +} + +// The destination information for the metadata table configuration. The +// +// destination table bucket must be in the same Region and Amazon Web Services +// account as the general purpose bucket. The specified metadata table name must be +// unique within the aws_s3_metadata namespace in the destination table bucket. +type S3TablesDestinationResult struct { + + // The Amazon Resource Name (ARN) for the metadata table in the metadata table + // configuration. The specified metadata table name must be unique within the + // aws_s3_metadata namespace in the destination table bucket. + // + // This member is required. + TableArn *string + + // The Amazon Resource Name (ARN) for the table bucket that's specified as the + // destination in the metadata table configuration. The destination table bucket + // must be in the same Region and Amazon Web Services account as the general + // purpose bucket. + // + // This member is required. + TableBucketArn *string + + // The name for the metadata table in your metadata table configuration. The + // specified metadata table name must be unique within the aws_s3_metadata + // namespace in the destination table bucket. + // + // This member is required. + TableName *string + + // The table bucket namespace for the metadata table in your metadata table + // configuration. This value is always aws_s3_metadata . + // + // This member is required. + TableNamespace *string + + noSmithyDocumentSerde +} + // Specifies the byte range of the object to get the records from. A record is // processed when its first byte is contained by the range. This parameter is // optional, but when specified, it must not be empty. See RFC 2616, Section @@ -3103,11 +3952,26 @@ type SelectObjectContentEventStreamMemberStats struct { func (*SelectObjectContentEventStreamMemberStats) isSelectObjectContentEventStream() {} +// Amazon S3 Select is no longer available to new customers. Existing customers of +// Amazon S3 Select can continue to use the feature as usual. [Learn more] +// // Describes the parameters for Select job types. +// +// Learn [How to optimize querying your data in Amazon S3] using [Amazon Athena], [S3 Object Lambda], or client-side filtering. +// +// [Learn more]: http://aws.amazon.com/blogs/storage/how-to-optimize-querying-your-data-in-amazon-s3/ +// [How to optimize querying your data in Amazon S3]: http://aws.amazon.com/blogs/storage/how-to-optimize-querying-your-data-in-amazon-s3/ +// [Amazon Athena]: https://docs.aws.amazon.com/athena/latest/ug/what-is.html +// [S3 Object Lambda]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html type SelectParameters struct { + // Amazon S3 Select is no longer available to new customers. Existing customers of + // Amazon S3 Select can continue to use the feature as usual. [Learn more] + // // The expression that is used to query the object. // + // [Learn more]: http://aws.amazon.com/blogs/storage/how-to-optimize-querying-your-data-in-amazon-s3/ + // // This member is required. Expression *string @@ -3131,34 +3995,73 @@ type SelectParameters struct { // Describes the default server-side encryption to apply to new objects in the // bucket. If a PUT Object request doesn't specify any server-side encryption, this -// default encryption will be applied. If you don't specify a customer managed key -// at configuration, Amazon S3 automatically creates an Amazon Web Services KMS key -// in your Amazon Web Services account the first time that you add an object -// encrypted with SSE-KMS to a bucket. By default, Amazon S3 uses this KMS key for -// SSE-KMS. For more information, see PUT Bucket encryption (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html) -// in the Amazon S3 API Reference. +// default encryption will be applied. For more information, see [PutBucketEncryption]. +// +// - General purpose buckets - If you don't specify a customer managed key at +// configuration, Amazon S3 automatically creates an Amazon Web Services KMS key ( +// aws/s3 ) in your Amazon Web Services account the first time that you add an +// object encrypted with SSE-KMS to a bucket. By default, Amazon S3 uses this KMS +// key for SSE-KMS. +// +// - Directory buckets - Your SSE-KMS configuration can only support 1 [customer managed key]per +// directory bucket's lifetime. The [Amazon Web Services managed key]( aws/s3 ) isn't supported. +// +// - Directory buckets - For directory buckets, there are only two supported +// options for server-side encryption: SSE-S3 and SSE-KMS. +// +// [PutBucketEncryption]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html +// [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk +// [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk type ServerSideEncryptionByDefault struct { // Server-side encryption algorithm to use for the default encryption. // + // For directory buckets, there are only two supported values for server-side + // encryption: AES256 and aws:kms . + // // This member is required. SSEAlgorithm ServerSideEncryption - // Amazon Web Services Key Management Service (KMS) customer Amazon Web Services - // KMS key ID to use for the default encryption. This parameter is allowed if and - // only if SSEAlgorithm is set to aws:kms or aws:kms:dsse . You can specify the key - // ID, key alias, or the Amazon Resource Name (ARN) of the KMS key. + // Amazon Web Services Key Management Service (KMS) customer managed key ID to use + // for the default encryption. + // + // - General purpose buckets - This parameter is allowed if and only if + // SSEAlgorithm is set to aws:kms or aws:kms:dsse . + // + // - Directory buckets - This parameter is allowed if and only if SSEAlgorithm is + // set to aws:kms . + // + // You can specify the key ID, key alias, or the Amazon Resource Name (ARN) of the + // KMS key. + // // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // // - Key ARN: // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // // - Key Alias: alias/alias-name - // If you use a key ID, you can run into a LogDestination undeliverable error when - // creating a VPC flow log. If you are using encryption with cross-account or - // Amazon Web Services service operations you must use a fully qualified KMS key - // ARN. For more information, see Using encryption for cross-account operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy) - // . Amazon S3 only supports symmetric encryption KMS keys. For more information, - // see Asymmetric keys in Amazon Web Services KMS (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) - // in the Amazon Web Services Key Management Service Developer Guide. + // + // If you are using encryption with cross-account or Amazon Web Services service + // operations, you must use a fully qualified KMS key ARN. For more information, + // see [Using encryption for cross-account operations]. + // + // - General purpose buckets - If you're specifying a customer managed KMS key, + // we recommend using a fully qualified KMS key ARN. If you use a KMS key alias + // instead, then KMS resolves the key within the requester’s account. This behavior + // can result in data that's encrypted with a KMS key that belongs to the + // requester, and not the bucket owner. Also, if you use a key ID, you can run into + // a LogDestination undeliverable error when creating a VPC flow log. + // + // - Directory buckets - When you specify an [KMS customer managed key]for encryption in your directory + // bucket, only use the key ID or key ARN. The key alias format of the KMS key + // isn't supported. + // + // Amazon S3 only supports symmetric encryption KMS keys. For more information, + // see [Asymmetric keys in Amazon Web Services KMS]in the Amazon Web Services Key Management Service Developer Guide. + // + // [Using encryption for cross-account operations]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy + // [KMS customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk + // [Asymmetric keys in Amazon Web Services KMS]: https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html KMSMasterKeyID *string noSmithyDocumentSerde @@ -3177,6 +4080,18 @@ type ServerSideEncryptionConfiguration struct { } // Specifies the default server-side encryption configuration. +// +// - General purpose buckets - If you're specifying a customer managed KMS key, +// we recommend using a fully qualified KMS key ARN. If you use a KMS key alias +// instead, then KMS resolves the key within the requester’s account. This behavior +// can result in data that's encrypted with a KMS key that belongs to the +// requester, and not the bucket owner. +// +// - Directory buckets - When you specify an [KMS customer managed key]for encryption in your directory +// bucket, only use the key ID or key ARN. The key alias format of the KMS key +// isn't supported. +// +// [KMS customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk type ServerSideEncryptionRule struct { // Specifies the default server-side encryption to apply to new objects in the @@ -3187,17 +4102,33 @@ type ServerSideEncryptionRule struct { // Specifies whether Amazon S3 should use an S3 Bucket Key with server-side // encryption using KMS (SSE-KMS) for new objects in the bucket. Existing objects // are not affected. Setting the BucketKeyEnabled element to true causes Amazon S3 - // to use an S3 Bucket Key. By default, S3 Bucket Key is not enabled. For more - // information, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) - // in the Amazon S3 User Guide. + // to use an S3 Bucket Key. + // + // - General purpose buckets - By default, S3 Bucket Key is not enabled. For + // more information, see [Amazon S3 Bucket Keys]in the Amazon S3 User Guide. + // + // - Directory buckets - S3 Bucket Keys are always enabled for GET and PUT + // operations in a directory bucket and can’t be disabled. S3 Bucket Keys aren't + // supported, when you copy SSE-KMS encrypted objects from general purpose buckets + // to directory buckets, from directory buckets to general purpose buckets, or + // between directory buckets, through [CopyObject], [UploadPartCopy], [the Copy operation in Batch Operations], or [the import jobs]. In this case, Amazon S3 makes a + // call to KMS every time a copy request is made for a KMS-encrypted object. + // + // [Amazon S3 Bucket Keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html + // [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html + // [the import jobs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-import-job + // [UploadPartCopy]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html + // [the Copy operation in Batch Operations]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-Batch-Ops BucketKeyEnabled *bool noSmithyDocumentSerde } -// The established temporary security credentials of the session. Directory -// buckets - These session credentials are only supported for the authentication -// and authorization of Zonal endpoint APIs on directory buckets. +// The established temporary security credentials of the session. +// +// Directory buckets - These session credentials are only supported for the +// authentication and authorization of Zonal endpoint API operations on directory +// buckets. type SessionCredentials struct { // A unique identifier that's associated with a secret access key. The access key @@ -3233,7 +4164,9 @@ type SessionCredentials struct { } // To use simple format for S3 keys for log objects, set SimplePrefix to an empty -// object. [DestinationPrefix][YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString] +// object. +// +// [DestinationPrefix][YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString] type SimplePrefix struct { noSmithyDocumentSerde } @@ -3249,12 +4182,14 @@ type SourceSelectionCriteria struct { // Amazon S3 doesn't replicate replica modifications by default. In the latest // version of replication configuration (when Filter is specified), you can // specify this element and set the status to Enabled to replicate modifications - // on replicas. If you don't specify the Filter element, Amazon S3 assumes that - // the replication configuration is the earlier version, V1. In the earlier - // version, this element is not allowed + // on replicas. + // + // If you don't specify the Filter element, Amazon S3 assumes that the replication + // configuration is the earlier version, V1. In the earlier version, this element + // is not allowed ReplicaModifications *ReplicaModifications - // A container for filter information for the selection of Amazon S3 objects + // A container for filter information for the selection of Amazon S3 objects // encrypted with Amazon Web Services KMS. If you include SourceSelectionCriteria // in the replication configuration, this element is required. SseKmsEncryptedObjects *SseKmsEncryptedObjects @@ -3372,10 +4307,12 @@ type Tagging struct { noSmithyDocumentSerde } -// Container for granting information. Buckets that use the bucket owner enforced -// setting for Object Ownership don't support target grants. For more information, -// see Permissions server access log delivery (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general) -// in the Amazon S3 User Guide. +// Container for granting information. +// +// Buckets that use the bucket owner enforced setting for Object Ownership don't +// support target grants. For more information, see [Permissions server access log delivery]in the Amazon S3 User Guide. +// +// [Permissions server access log delivery]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general type TargetGrant struct { // Container for the person being granted permissions. @@ -3406,9 +4343,10 @@ type TargetObjectKeyFormat struct { // without additional operational overhead. type Tiering struct { - // S3 Intelligent-Tiering access tier. See Storage class for automatically - // optimizing frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) - // for a list of access tiers in the S3 Intelligent-Tiering storage class. + // S3 Intelligent-Tiering access tier. See [Storage class for automatically optimizing frequently and infrequently accessed objects] for a list of access tiers in the S3 + // Intelligent-Tiering storage class. + // + // [Storage class for automatically optimizing frequently and infrequently accessed objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access // // This member is required. AccessTier IntelligentTieringAccessTier @@ -3431,8 +4369,9 @@ type Tiering struct { type TopicConfiguration struct { // The Amazon S3 bucket event about which to send notifications. For more - // information, see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // in the Amazon S3 User Guide. + // information, see [Supported Event Types]in the Amazon S3 User Guide. + // + // [Supported Event Types]: https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html // // This member is required. Events []Event @@ -3444,8 +4383,9 @@ type TopicConfiguration struct { TopicArn *string // Specifies object key name filtering rules. For information about key name - // filtering, see Configuring event notifications using object key name filtering (https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html) - // in the Amazon S3 User Guide. + // filtering, see [Configuring event notifications using object key name filtering]in the Amazon S3 User Guide. + // + // [Configuring event notifications using object key name filtering]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html Filter *NotificationConfigurationFilter // An optional unique identifier for configurations in a notification @@ -3456,9 +4396,10 @@ type TopicConfiguration struct { } // Specifies when an object transitions to a specified storage class. For more -// information about Amazon S3 lifecycle configuration rules, see Transitioning -// Objects Using Amazon S3 Lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html) -// in the Amazon S3 User Guide. +// information about Amazon S3 lifecycle configuration rules, see [Transitioning Objects Using Amazon S3 Lifecycle]in the Amazon S3 +// User Guide. +// +// [Transitioning Objects Using Amazon S3 Lifecycle]: https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html type Transition struct { // Indicates when objects are transitioned to the specified storage class. The @@ -3466,7 +4407,15 @@ type Transition struct { Date *time.Time // Indicates the number of days after creation when objects are transitioned to - // the specified storage class. The value must be a positive integer. + // the specified storage class. If the specified storage class is + // INTELLIGENT_TIERING , GLACIER_IR , GLACIER , or DEEP_ARCHIVE , valid values are + // 0 or positive integers. If the specified storage class is STANDARD_IA or + // ONEZONE_IA , valid values are positive integers greater than 30 . Be aware that + // some storage classes have a minimum storage duration and that you're charged for + // transitioning objects before their minimum storage duration. For more + // information, see [Constraints and considerations for transitions]in the Amazon S3 User Guide. + // + // [Constraints and considerations for transitions]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/lifecycle-transition-general-considerations.html#lifecycle-configuration-constraints Days *int32 // The storage class to which you want the object to transition. @@ -3476,8 +4425,9 @@ type Transition struct { } // Describes the versioning state of an Amazon S3 bucket. For more information, -// see PUT Bucket versioning (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html) -// in the Amazon S3 API Reference. +// see [PUT Bucket versioning]in the Amazon S3 API Reference. +// +// [PUT Bucket versioning]: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html type VersioningConfiguration struct { // Specifies whether MFA delete is enabled in the bucket versioning configuration. @@ -3500,8 +4450,9 @@ type WebsiteConfiguration struct { // The name of the index document for the website. IndexDocument *IndexDocument - // The redirect behavior for every request to this bucket's website endpoint. If - // you specify this property, you can't specify any other property. + // The redirect behavior for every request to this bucket's website endpoint. + // + // If you specify this property, you can't specify any other property. RedirectAllRequestsTo *RedirectAllRequestsTo // Rules that define when a redirect is applied and the redirect behavior. @@ -3522,7 +4473,5 @@ type UnknownUnionMember struct { } func (*UnknownUnionMember) isAnalyticsFilter() {} -func (*UnknownUnionMember) isLifecycleRuleFilter() {} func (*UnknownUnionMember) isMetricsFilter() {} -func (*UnknownUnionMember) isReplicationRuleFilter() {} func (*UnknownUnionMember) isSelectObjectContentEventStream() {} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/uri_context.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/uri_context.go new file mode 100644 index 000000000..0e664c59c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/uri_context.go @@ -0,0 +1,23 @@ +package s3 + +// This contains helper methods to set resolver URI into the context object. If they are ever used for +// something other than S3, they should be moved to internal/context/context.go + +import ( + "context" + + "github.com/aws/smithy-go/middleware" +) + +type s3resolvedURI struct{} + +// setS3ResolvedURI sets the URI as resolved by the EndpointResolverV2 +func setS3ResolvedURI(ctx context.Context, value string) context.Context { + return middleware.WithStackValue(ctx, s3resolvedURI{}, value) +} + +// getS3ResolvedURI gets the URI as resolved by EndpointResolverV2 +func getS3ResolvedURI(ctx context.Context) string { + v, _ := middleware.GetStackValue(ctx, s3resolvedURI{}).(string) + return v +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/validators.go index e954b302d..97a56bd31 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/validators.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/validators.go @@ -90,6 +90,26 @@ func (m *validateOpCreateBucket) HandleInitialize(ctx context.Context, in middle return next.HandleInitialize(ctx, in) } +type validateOpCreateBucketMetadataTableConfiguration struct { +} + +func (*validateOpCreateBucketMetadataTableConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateBucketMetadataTableConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateBucketMetadataTableConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateBucketMetadataTableConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpCreateMultipartUpload struct { } @@ -270,6 +290,26 @@ func (m *validateOpDeleteBucketLifecycle) HandleInitialize(ctx context.Context, return next.HandleInitialize(ctx, in) } +type validateOpDeleteBucketMetadataTableConfiguration struct { +} + +func (*validateOpDeleteBucketMetadataTableConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteBucketMetadataTableConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteBucketMetadataTableConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteBucketMetadataTableConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpDeleteBucketMetricsConfiguration struct { } @@ -670,6 +710,26 @@ func (m *validateOpGetBucketLogging) HandleInitialize(ctx context.Context, in mi return next.HandleInitialize(ctx, in) } +type validateOpGetBucketMetadataTableConfiguration struct { +} + +func (*validateOpGetBucketMetadataTableConfiguration) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetBucketMetadataTableConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetBucketMetadataTableConfigurationInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetBucketMetadataTableConfigurationInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpGetBucketMetricsConfiguration struct { } @@ -1886,6 +1946,10 @@ func addOpCreateBucketValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpCreateBucket{}, middleware.After) } +func addOpCreateBucketMetadataTableConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateBucketMetadataTableConfiguration{}, middleware.After) +} + func addOpCreateMultipartUploadValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpCreateMultipartUpload{}, middleware.After) } @@ -1922,6 +1986,10 @@ func addOpDeleteBucketLifecycleValidationMiddleware(stack *middleware.Stack) err return stack.Initialize.Add(&validateOpDeleteBucketLifecycle{}, middleware.After) } +func addOpDeleteBucketMetadataTableConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteBucketMetadataTableConfiguration{}, middleware.After) +} + func addOpDeleteBucketMetricsConfigurationValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpDeleteBucketMetricsConfiguration{}, middleware.After) } @@ -2002,6 +2070,10 @@ func addOpGetBucketLoggingValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpGetBucketLogging{}, middleware.After) } +func addOpGetBucketMetadataTableConfigurationValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetBucketMetadataTableConfiguration{}, middleware.After) +} + func addOpGetBucketMetricsConfigurationValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpGetBucketMetricsConfiguration{}, middleware.After) } @@ -2912,22 +2984,20 @@ func validateLifecycleRuleAndOperator(v *types.LifecycleRuleAndOperator) error { } } -func validateLifecycleRuleFilter(v types.LifecycleRuleFilter) error { +func validateLifecycleRuleFilter(v *types.LifecycleRuleFilter) error { if v == nil { return nil } invalidParams := smithy.InvalidParamsError{Context: "LifecycleRuleFilter"} - switch uv := v.(type) { - case *types.LifecycleRuleFilterMemberAnd: - if err := validateLifecycleRuleAndOperator(&uv.Value); err != nil { - invalidParams.AddNested("[And]", err.(smithy.InvalidParamsError)) + if v.Tag != nil { + if err := validateTag(v.Tag); err != nil { + invalidParams.AddNested("Tag", err.(smithy.InvalidParamsError)) } - - case *types.LifecycleRuleFilterMemberTag: - if err := validateTag(&uv.Value); err != nil { - invalidParams.AddNested("[Tag]", err.(smithy.InvalidParamsError)) + } + if v.And != nil { + if err := validateLifecycleRuleAndOperator(v.And); err != nil { + invalidParams.AddNested("And", err.(smithy.InvalidParamsError)) } - } if invalidParams.Len() > 0 { return invalidParams @@ -2976,6 +3046,25 @@ func validateLoggingEnabled(v *types.LoggingEnabled) error { } } +func validateMetadataTableConfiguration(v *types.MetadataTableConfiguration) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "MetadataTableConfiguration"} + if v.S3TablesDestination == nil { + invalidParams.Add(smithy.NewErrParamRequired("S3TablesDestination")) + } else if v.S3TablesDestination != nil { + if err := validateS3TablesDestination(v.S3TablesDestination); err != nil { + invalidParams.AddNested("S3TablesDestination", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateMetrics(v *types.Metrics) error { if v == nil { return nil @@ -3320,22 +3409,20 @@ func validateReplicationRuleAndOperator(v *types.ReplicationRuleAndOperator) err } } -func validateReplicationRuleFilter(v types.ReplicationRuleFilter) error { +func validateReplicationRuleFilter(v *types.ReplicationRuleFilter) error { if v == nil { return nil } invalidParams := smithy.InvalidParamsError{Context: "ReplicationRuleFilter"} - switch uv := v.(type) { - case *types.ReplicationRuleFilterMemberAnd: - if err := validateReplicationRuleAndOperator(&uv.Value); err != nil { - invalidParams.AddNested("[And]", err.(smithy.InvalidParamsError)) + if v.Tag != nil { + if err := validateTag(v.Tag); err != nil { + invalidParams.AddNested("Tag", err.(smithy.InvalidParamsError)) } - - case *types.ReplicationRuleFilterMemberTag: - if err := validateTag(&uv.Value); err != nil { - invalidParams.AddNested("[Tag]", err.(smithy.InvalidParamsError)) + } + if v.And != nil { + if err := validateReplicationRuleAndOperator(v.And); err != nil { + invalidParams.AddNested("And", err.(smithy.InvalidParamsError)) } - } if invalidParams.Len() > 0 { return invalidParams @@ -3486,6 +3573,24 @@ func validateS3Location(v *types.S3Location) error { } } +func validateS3TablesDestination(v *types.S3TablesDestination) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "S3TablesDestination"} + if v.TableBucketArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("TableBucketArn")) + } + if v.TableName == nil { + invalidParams.Add(smithy.NewErrParamRequired("TableName")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateSelectParameters(v *types.SelectParameters) error { if v == nil { return nil @@ -3937,6 +4042,28 @@ func validateOpCreateBucketInput(v *CreateBucketInput) error { } } +func validateOpCreateBucketMetadataTableConfigurationInput(v *CreateBucketMetadataTableConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateBucketMetadataTableConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if v.MetadataTableConfiguration == nil { + invalidParams.Add(smithy.NewErrParamRequired("MetadataTableConfiguration")) + } else if v.MetadataTableConfiguration != nil { + if err := validateMetadataTableConfiguration(v.MetadataTableConfiguration); err != nil { + invalidParams.AddNested("MetadataTableConfiguration", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpCreateMultipartUploadInput(v *CreateMultipartUploadInput) error { if v == nil { return nil @@ -4084,6 +4211,21 @@ func validateOpDeleteBucketLifecycleInput(v *DeleteBucketLifecycleInput) error { } } +func validateOpDeleteBucketMetadataTableConfigurationInput(v *DeleteBucketMetadataTableConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteBucketMetadataTableConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpDeleteBucketMetricsConfigurationInput(v *DeleteBucketMetricsConfigurationInput) error { if v == nil { return nil @@ -4409,6 +4551,21 @@ func validateOpGetBucketLoggingInput(v *GetBucketLoggingInput) error { } } +func validateOpGetBucketMetadataTableConfigurationInput(v *GetBucketMetadataTableConfigurationInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetBucketMetadataTableConfigurationInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpGetBucketMetricsConfigurationInput(v *GetBucketMetricsConfigurationInput) error { if v == nil { return nil diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/common.go index 120008db1..df01890b2 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/common.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/common.go @@ -17,6 +17,7 @@ import ( "github.com/go-git/go-git/v5/plumbing" "github.com/go-git/go-git/v5/plumbing/protocol/packp" + "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" "github.com/go-git/go-git/v5/plumbing/transport" "github.com/go-git/go-git/v5/utils/ioutil" "github.com/golang/groupcache/lru" @@ -24,7 +25,7 @@ import ( // it requires a bytes.Buffer, because we need to know the length func applyHeadersToRequest(req *http.Request, content *bytes.Buffer, host string, requestType string) { - req.Header.Add("User-Agent", "git/1.0") + req.Header.Add("User-Agent", capability.DefaultAgent()) req.Header.Add("Host", host) // host:port if content == nil { diff --git a/vendor/github.com/go-git/go-git/v5/worktree.go b/vendor/github.com/go-git/go-git/v5/worktree.go index 8dfa50b1b..dded08e99 100644 --- a/vendor/github.com/go-git/go-git/v5/worktree.go +++ b/vendor/github.com/go-git/go-git/v5/worktree.go @@ -425,8 +425,9 @@ func (w *Worktree) resetIndex(t *object.Tree, dirs []string, files []string) err } func inFiles(files []string, v string) bool { + v = filepath.Clean(v) for _, s := range files { - if s == v { + if filepath.Clean(s) == v { return true } } diff --git a/vendor/github.com/go-git/go-git/v5/worktree_status.go b/vendor/github.com/go-git/go-git/v5/worktree_status.go index 6e72db974..7870d138d 100644 --- a/vendor/github.com/go-git/go-git/v5/worktree_status.go +++ b/vendor/github.com/go-git/go-git/v5/worktree_status.go @@ -370,6 +370,8 @@ func (w *Worktree) doAdd(path string, ignorePattern []gitignore.Pattern, skipSta } } + path = filepath.Clean(path) + if err != nil || !fi.IsDir() { added, h, err = w.doAddFile(idx, s, path, ignorePattern) } else { diff --git a/vendor/github.com/sagikazarmark/slog-shim/.editorconfig b/vendor/github.com/go-viper/mapstructure/v2/.editorconfig similarity index 86% rename from vendor/github.com/sagikazarmark/slog-shim/.editorconfig rename to vendor/github.com/go-viper/mapstructure/v2/.editorconfig index 1fb0e1bec..1f664d13a 100644 --- a/vendor/github.com/sagikazarmark/slog-shim/.editorconfig +++ b/vendor/github.com/go-viper/mapstructure/v2/.editorconfig @@ -8,11 +8,11 @@ indent_style = space insert_final_newline = true trim_trailing_whitespace = true -[*.nix] -indent_size = 2 +[*.go] +indent_style = tab [{Makefile,*.mk}] indent_style = tab -[Taskfile.yaml] +[*.nix] indent_size = 2 diff --git a/vendor/github.com/go-viper/mapstructure/v2/.envrc b/vendor/github.com/go-viper/mapstructure/v2/.envrc new file mode 100644 index 000000000..2e0f9f5f7 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/.envrc @@ -0,0 +1,4 @@ +if ! has nix_direnv_version || ! nix_direnv_version 3.0.4; then + source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.4/direnvrc" "sha256-DzlYZ33mWF/Gs8DDeyjr8mnVmQGx7ASYqA5WlxwvBG4=" +fi +use flake . --impure diff --git a/vendor/github.com/go-viper/mapstructure/v2/.gitignore b/vendor/github.com/go-viper/mapstructure/v2/.gitignore new file mode 100644 index 000000000..470e7ca2b --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/.gitignore @@ -0,0 +1,6 @@ +/.devenv/ +/.direnv/ +/.pre-commit-config.yaml +/bin/ +/build/ +/var/ diff --git a/vendor/github.com/go-viper/mapstructure/v2/.golangci.yaml b/vendor/github.com/go-viper/mapstructure/v2/.golangci.yaml new file mode 100644 index 000000000..763143aa7 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/.golangci.yaml @@ -0,0 +1,23 @@ +run: + timeout: 5m + +linters-settings: + gci: + sections: + - standard + - default + - prefix(github.com/go-viper/mapstructure) + golint: + min-confidence: 0 + goimports: + local-prefixes: github.com/go-viper/maptstructure + +linters: + disable-all: true + enable: + - gci + - gofmt + - gofumpt + - goimports + - staticcheck + # - stylecheck diff --git a/tools/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md similarity index 92% rename from tools/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md rename to vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md index c75823490..afd44e5f5 100644 --- a/tools/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md +++ b/vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md @@ -1,3 +1,11 @@ +> [!WARNING] +> As of v2 of this library, change log can be found in GitHub releases. + +## 1.5.1 + +* Wrap errors so they're compatible with `errors.Is` and `errors.As` [GH-282] +* Fix map of slices not decoding properly in certain cases. [GH-266] + ## 1.5.0 * New option `IgnoreUntaggedFields` to ignore decoding to any fields diff --git a/tools/vendor/github.com/mitchellh/mapstructure/LICENSE b/vendor/github.com/go-viper/mapstructure/v2/LICENSE similarity index 100% rename from tools/vendor/github.com/mitchellh/mapstructure/LICENSE rename to vendor/github.com/go-viper/mapstructure/v2/LICENSE diff --git a/vendor/github.com/go-viper/mapstructure/v2/README.md b/vendor/github.com/go-viper/mapstructure/v2/README.md new file mode 100644 index 000000000..dd5ec69dd --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/README.md @@ -0,0 +1,80 @@ +# mapstructure + +[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/go-viper/mapstructure/ci.yaml?branch=main&style=flat-square)](https://github.com/go-viper/mapstructure/actions?query=workflow%3ACI) +[![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/mod/github.com/go-viper/mapstructure/v2) +![Go Version](https://img.shields.io/badge/go%20version-%3E=1.18-61CFDD.svg?style=flat-square) + +mapstructure is a Go library for decoding generic map values to structures +and vice versa, while providing helpful error handling. + +This library is most useful when decoding values from some data stream (JSON, +Gob, etc.) where you don't _quite_ know the structure of the underlying data +until you read a part of it. You can therefore read a `map[string]interface{}` +and use this library to decode it into the proper underlying native Go +structure. + +## Installation + +```shell +go get github.com/go-viper/mapstructure/v2 +``` + +## Migrating from `github.com/mitchellh/mapstructure` + +[@mitchehllh](https://github.com/mitchellh) announced his intent to archive some of his unmaintained projects (see [here](https://gist.github.com/mitchellh/90029601268e59a29e64e55bab1c5bdc) and [here](https://github.com/mitchellh/mapstructure/issues/349)). This is a repository achieved the "blessed fork" status. + +You can migrate to this package by changing your import paths in your Go files to `github.com/go-viper/mapstructure/v2`. +The API is the same, so you don't need to change anything else. + +Here is a script that can help you with the migration: + +```shell +sed -i 's/github.com\/mitchellh\/mapstructure/github.com\/go-viper\/mapstructure\/v2/g' $(find . -type f -name '*.go') +``` + +If you need more time to migrate your code, that is absolutely fine. + +Some of the latest fixes are backported to the v1 release branch of this package, so you can use the Go modules `replace` feature until you are ready to migrate: + +```shell +replace github.com/mitchellh/mapstructure => github.com/go-viper/mapstructure v1.6.0 +``` + +## Usage & Example + +For usage and examples see the [documentation](https://pkg.go.dev/mod/github.com/go-viper/mapstructure/v2). + +The `Decode` function has examples associated with it there. + +## But Why?! + +Go offers fantastic standard libraries for decoding formats such as JSON. +The standard method is to have a struct pre-created, and populate that struct +from the bytes of the encoded format. This is great, but the problem is if +you have configuration or an encoding that changes slightly depending on +specific fields. For example, consider this JSON: + +```json +{ + "type": "person", + "name": "Mitchell" +} +``` + +Perhaps we can't populate a specific structure without first reading +the "type" field from the JSON. We could always do two passes over the +decoding of the JSON (reading the "type" first, and the rest later). +However, it is much simpler to just decode this into a `map[string]interface{}` +structure, read the "type" key, then use something like this library +to decode it into the proper structure. + +## Credits + +Mapstructure was originally created by [@mitchellh](https://github.com/mitchellh). +This is a maintained fork of the original library. + +Read more about the reasons for the fork [here](https://github.com/mitchellh/mapstructure/issues/349). + +## License + +The project is licensed under the [MIT License](LICENSE). diff --git a/vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go b/vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go new file mode 100644 index 000000000..1f3c69d4b --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/decode_hooks.go @@ -0,0 +1,630 @@ +package mapstructure + +import ( + "encoding" + "errors" + "fmt" + "net" + "net/netip" + "net/url" + "reflect" + "strconv" + "strings" + "time" +) + +// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns +// it into the proper DecodeHookFunc type, such as DecodeHookFuncType. +func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { + // Create variables here so we can reference them with the reflect pkg + var f1 DecodeHookFuncType + var f2 DecodeHookFuncKind + var f3 DecodeHookFuncValue + + // Fill in the variables into this interface and the rest is done + // automatically using the reflect package. + potential := []interface{}{f1, f2, f3} + + v := reflect.ValueOf(h) + vt := v.Type() + for _, raw := range potential { + pt := reflect.ValueOf(raw).Type() + if vt.ConvertibleTo(pt) { + return v.Convert(pt).Interface() + } + } + + return nil +} + +// cachedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns +// it into a closure to be used directly +// if the type fails to convert we return a closure always erroring to keep the previous behaviour +func cachedDecodeHook(raw DecodeHookFunc) func(from reflect.Value, to reflect.Value) (interface{}, error) { + switch f := typedDecodeHook(raw).(type) { + case DecodeHookFuncType: + return func(from reflect.Value, to reflect.Value) (interface{}, error) { + return f(from.Type(), to.Type(), from.Interface()) + } + case DecodeHookFuncKind: + return func(from reflect.Value, to reflect.Value) (interface{}, error) { + return f(from.Kind(), to.Kind(), from.Interface()) + } + case DecodeHookFuncValue: + return func(from reflect.Value, to reflect.Value) (interface{}, error) { + return f(from, to) + } + default: + return func(from reflect.Value, to reflect.Value) (interface{}, error) { + return nil, errors.New("invalid decode hook signature") + } + } +} + +// DecodeHookExec executes the given decode hook. This should be used +// since it'll naturally degrade to the older backwards compatible DecodeHookFunc +// that took reflect.Kind instead of reflect.Type. +func DecodeHookExec( + raw DecodeHookFunc, + from reflect.Value, to reflect.Value, +) (interface{}, error) { + switch f := typedDecodeHook(raw).(type) { + case DecodeHookFuncType: + return f(from.Type(), to.Type(), from.Interface()) + case DecodeHookFuncKind: + return f(from.Kind(), to.Kind(), from.Interface()) + case DecodeHookFuncValue: + return f(from, to) + default: + return nil, errors.New("invalid decode hook signature") + } +} + +// ComposeDecodeHookFunc creates a single DecodeHookFunc that +// automatically composes multiple DecodeHookFuncs. +// +// The composed funcs are called in order, with the result of the +// previous transformation. +func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { + cached := make([]func(from reflect.Value, to reflect.Value) (interface{}, error), 0, len(fs)) + for _, f := range fs { + cached = append(cached, cachedDecodeHook(f)) + } + return func(f reflect.Value, t reflect.Value) (interface{}, error) { + var err error + data := f.Interface() + + newFrom := f + for _, c := range cached { + data, err = c(newFrom, t) + if err != nil { + return nil, err + } + newFrom = reflect.ValueOf(data) + } + + return data, nil + } +} + +// OrComposeDecodeHookFunc executes all input hook functions until one of them returns no error. In that case its value is returned. +// If all hooks return an error, OrComposeDecodeHookFunc returns an error concatenating all error messages. +func OrComposeDecodeHookFunc(ff ...DecodeHookFunc) DecodeHookFunc { + cached := make([]func(from reflect.Value, to reflect.Value) (interface{}, error), 0, len(ff)) + for _, f := range ff { + cached = append(cached, cachedDecodeHook(f)) + } + return func(a, b reflect.Value) (interface{}, error) { + var allErrs string + var out interface{} + var err error + + for _, c := range cached { + out, err = c(a, b) + if err != nil { + allErrs += err.Error() + "\n" + continue + } + + return out, nil + } + + return nil, errors.New(allErrs) + } +} + +// StringToSliceHookFunc returns a DecodeHookFunc that converts +// string to []string by splitting on the given sep. +func StringToSliceHookFunc(sep string) DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.SliceOf(f) { + return data, nil + } + + raw := data.(string) + if raw == "" { + return []string{}, nil + } + + return strings.Split(raw, sep), nil + } +} + +// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts +// strings to time.Duration. +func StringToTimeDurationHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Duration(5)) { + return data, nil + } + + // Convert it by parsing + return time.ParseDuration(data.(string)) + } +} + +// StringToURLHookFunc returns a DecodeHookFunc that converts +// strings to *url.URL. +func StringToURLHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(&url.URL{}) { + return data, nil + } + + // Convert it by parsing + return url.Parse(data.(string)) + } +} + +// StringToIPHookFunc returns a DecodeHookFunc that converts +// strings to net.IP +func StringToIPHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(net.IP{}) { + return data, nil + } + + // Convert it by parsing + ip := net.ParseIP(data.(string)) + if ip == nil { + return net.IP{}, fmt.Errorf("failed parsing ip %v", data) + } + + return ip, nil + } +} + +// StringToIPNetHookFunc returns a DecodeHookFunc that converts +// strings to net.IPNet +func StringToIPNetHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(net.IPNet{}) { + return data, nil + } + + // Convert it by parsing + _, net, err := net.ParseCIDR(data.(string)) + return net, err + } +} + +// StringToTimeHookFunc returns a DecodeHookFunc that converts +// strings to time.Time. +func StringToTimeHookFunc(layout string) DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Time{}) { + return data, nil + } + + // Convert it by parsing + return time.Parse(layout, data.(string)) + } +} + +// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to +// the decoder. +// +// Note that this is significantly different from the WeaklyTypedInput option +// of the DecoderConfig. +func WeaklyTypedHook( + f reflect.Kind, + t reflect.Kind, + data interface{}, +) (interface{}, error) { + dataVal := reflect.ValueOf(data) + switch t { + case reflect.String: + switch f { + case reflect.Bool: + if dataVal.Bool() { + return "1", nil + } + return "0", nil + case reflect.Float32: + return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil + case reflect.Int: + return strconv.FormatInt(dataVal.Int(), 10), nil + case reflect.Slice: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + if elemKind == reflect.Uint8 { + return string(dataVal.Interface().([]uint8)), nil + } + case reflect.Uint: + return strconv.FormatUint(dataVal.Uint(), 10), nil + } + } + + return data, nil +} + +func RecursiveStructToMapHookFunc() DecodeHookFunc { + return func(f reflect.Value, t reflect.Value) (interface{}, error) { + if f.Kind() != reflect.Struct { + return f.Interface(), nil + } + + var i interface{} = struct{}{} + if t.Type() != reflect.TypeOf(&i).Elem() { + return f.Interface(), nil + } + + m := make(map[string]interface{}) + t.Set(reflect.ValueOf(m)) + + return f.Interface(), nil + } +} + +// TextUnmarshallerHookFunc returns a DecodeHookFunc that applies +// strings to the UnmarshalText function, when the target type +// implements the encoding.TextUnmarshaler interface +func TextUnmarshallerHookFunc() DecodeHookFuncType { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + result := reflect.New(t).Interface() + unmarshaller, ok := result.(encoding.TextUnmarshaler) + if !ok { + return data, nil + } + str, ok := data.(string) + if !ok { + str = reflect.Indirect(reflect.ValueOf(&data)).Elem().String() + } + if err := unmarshaller.UnmarshalText([]byte(str)); err != nil { + return nil, err + } + return result, nil + } +} + +// StringToNetIPAddrHookFunc returns a DecodeHookFunc that converts +// strings to netip.Addr. +func StringToNetIPAddrHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(netip.Addr{}) { + return data, nil + } + + // Convert it by parsing + return netip.ParseAddr(data.(string)) + } +} + +// StringToNetIPAddrPortHookFunc returns a DecodeHookFunc that converts +// strings to netip.AddrPort. +func StringToNetIPAddrPortHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(netip.AddrPort{}) { + return data, nil + } + + // Convert it by parsing + return netip.ParseAddrPort(data.(string)) + } +} + +// StringToBasicTypeHookFunc returns a DecodeHookFunc that converts +// strings to basic types. +// int8, uint8, int16, uint16, int32, uint32, int64, uint64, int, uint, float32, float64, bool, byte, rune, complex64, complex128 +func StringToBasicTypeHookFunc() DecodeHookFunc { + return ComposeDecodeHookFunc( + StringToInt8HookFunc(), + StringToUint8HookFunc(), + StringToInt16HookFunc(), + StringToUint16HookFunc(), + StringToInt32HookFunc(), + StringToUint32HookFunc(), + StringToInt64HookFunc(), + StringToUint64HookFunc(), + StringToIntHookFunc(), + StringToUintHookFunc(), + StringToFloat32HookFunc(), + StringToFloat64HookFunc(), + StringToBoolHookFunc(), + // byte and rune are aliases for uint8 and int32 respectively + // StringToByteHookFunc(), + // StringToRuneHookFunc(), + StringToComplex64HookFunc(), + StringToComplex128HookFunc(), + ) +} + +// StringToInt8HookFunc returns a DecodeHookFunc that converts +// strings to int8. +func StringToInt8HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Int8 { + return data, nil + } + + // Convert it by parsing + i64, err := strconv.ParseInt(data.(string), 0, 8) + return int8(i64), err + } +} + +// StringToUint8HookFunc returns a DecodeHookFunc that converts +// strings to uint8. +func StringToUint8HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Uint8 { + return data, nil + } + + // Convert it by parsing + u64, err := strconv.ParseUint(data.(string), 0, 8) + return uint8(u64), err + } +} + +// StringToInt16HookFunc returns a DecodeHookFunc that converts +// strings to int16. +func StringToInt16HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Int16 { + return data, nil + } + + // Convert it by parsing + i64, err := strconv.ParseInt(data.(string), 0, 16) + return int16(i64), err + } +} + +// StringToUint16HookFunc returns a DecodeHookFunc that converts +// strings to uint16. +func StringToUint16HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Uint16 { + return data, nil + } + + // Convert it by parsing + u64, err := strconv.ParseUint(data.(string), 0, 16) + return uint16(u64), err + } +} + +// StringToInt32HookFunc returns a DecodeHookFunc that converts +// strings to int32. +func StringToInt32HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Int32 { + return data, nil + } + + // Convert it by parsing + i64, err := strconv.ParseInt(data.(string), 0, 32) + return int32(i64), err + } +} + +// StringToUint32HookFunc returns a DecodeHookFunc that converts +// strings to uint32. +func StringToUint32HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Uint32 { + return data, nil + } + + // Convert it by parsing + u64, err := strconv.ParseUint(data.(string), 0, 32) + return uint32(u64), err + } +} + +// StringToInt64HookFunc returns a DecodeHookFunc that converts +// strings to int64. +func StringToInt64HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Int64 { + return data, nil + } + + // Convert it by parsing + return strconv.ParseInt(data.(string), 0, 64) + } +} + +// StringToUint64HookFunc returns a DecodeHookFunc that converts +// strings to uint64. +func StringToUint64HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Uint64 { + return data, nil + } + + // Convert it by parsing + return strconv.ParseUint(data.(string), 0, 64) + } +} + +// StringToIntHookFunc returns a DecodeHookFunc that converts +// strings to int. +func StringToIntHookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Int { + return data, nil + } + + // Convert it by parsing + i64, err := strconv.ParseInt(data.(string), 0, 0) + return int(i64), err + } +} + +// StringToUintHookFunc returns a DecodeHookFunc that converts +// strings to uint. +func StringToUintHookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Uint { + return data, nil + } + + // Convert it by parsing + u64, err := strconv.ParseUint(data.(string), 0, 0) + return uint(u64), err + } +} + +// StringToFloat32HookFunc returns a DecodeHookFunc that converts +// strings to float32. +func StringToFloat32HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Float32 { + return data, nil + } + + // Convert it by parsing + f64, err := strconv.ParseFloat(data.(string), 32) + return float32(f64), err + } +} + +// StringToFloat64HookFunc returns a DecodeHookFunc that converts +// strings to float64. +func StringToFloat64HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Float64 { + return data, nil + } + + // Convert it by parsing + return strconv.ParseFloat(data.(string), 64) + } +} + +// StringToBoolHookFunc returns a DecodeHookFunc that converts +// strings to bool. +func StringToBoolHookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Bool { + return data, nil + } + + // Convert it by parsing + return strconv.ParseBool(data.(string)) + } +} + +// StringToByteHookFunc returns a DecodeHookFunc that converts +// strings to byte. +func StringToByteHookFunc() DecodeHookFunc { + return StringToUint8HookFunc() +} + +// StringToRuneHookFunc returns a DecodeHookFunc that converts +// strings to rune. +func StringToRuneHookFunc() DecodeHookFunc { + return StringToInt32HookFunc() +} + +// StringToComplex64HookFunc returns a DecodeHookFunc that converts +// strings to complex64. +func StringToComplex64HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Complex64 { + return data, nil + } + + // Convert it by parsing + c128, err := strconv.ParseComplex(data.(string), 64) + return complex64(c128), err + } +} + +// StringToComplex128HookFunc returns a DecodeHookFunc that converts +// strings to complex128. +func StringToComplex128HookFunc() DecodeHookFunc { + return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Complex128 { + return data, nil + } + + // Convert it by parsing + return strconv.ParseComplex(data.(string), 128) + } +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/flake.lock b/vendor/github.com/go-viper/mapstructure/v2/flake.lock new file mode 100644 index 000000000..4bea8154e --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/flake.lock @@ -0,0 +1,472 @@ +{ + "nodes": { + "cachix": { + "inputs": { + "devenv": "devenv_2", + "flake-compat": [ + "devenv", + "flake-compat" + ], + "nixpkgs": [ + "devenv", + "nixpkgs" + ], + "pre-commit-hooks": [ + "devenv", + "pre-commit-hooks" + ] + }, + "locked": { + "lastModified": 1712055811, + "narHash": "sha256-7FcfMm5A/f02yyzuavJe06zLa9hcMHsagE28ADcmQvk=", + "owner": "cachix", + "repo": "cachix", + "rev": "02e38da89851ec7fec3356a5c04bc8349cae0e30", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "cachix", + "type": "github" + } + }, + "devenv": { + "inputs": { + "cachix": "cachix", + "flake-compat": "flake-compat_2", + "nix": "nix_2", + "nixpkgs": "nixpkgs_2", + "pre-commit-hooks": "pre-commit-hooks" + }, + "locked": { + "lastModified": 1717245169, + "narHash": "sha256-+mW3rTBjGU8p1THJN0lX/Dd/8FbnF+3dB+mJuSaxewE=", + "owner": "cachix", + "repo": "devenv", + "rev": "c3f9f053c077c6f88a3de5276d9178c62baa3fc3", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "devenv", + "type": "github" + } + }, + "devenv_2": { + "inputs": { + "flake-compat": [ + "devenv", + "cachix", + "flake-compat" + ], + "nix": "nix", + "nixpkgs": "nixpkgs", + "poetry2nix": "poetry2nix", + "pre-commit-hooks": [ + "devenv", + "cachix", + "pre-commit-hooks" + ] + }, + "locked": { + "lastModified": 1708704632, + "narHash": "sha256-w+dOIW60FKMaHI1q5714CSibk99JfYxm0CzTinYWr+Q=", + "owner": "cachix", + "repo": "devenv", + "rev": "2ee4450b0f4b95a1b90f2eb5ffea98b90e48c196", + "type": "github" + }, + "original": { + "owner": "cachix", + "ref": "python-rewrite", + "repo": "devenv", + "type": "github" + } + }, + "flake-compat": { + "flake": false, + "locked": { + "lastModified": 1673956053, + "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=", + "owner": "edolstra", + "repo": "flake-compat", + "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, + "flake-compat_2": { + "flake": false, + "locked": { + "lastModified": 1696426674, + "narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=", + "owner": "edolstra", + "repo": "flake-compat", + "rev": "0f9255e01c2351cc7d116c072cb317785dd33b33", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, + "flake-parts": { + "inputs": { + "nixpkgs-lib": "nixpkgs-lib" + }, + "locked": { + "lastModified": 1717285511, + "narHash": "sha256-iKzJcpdXih14qYVcZ9QC9XuZYnPc6T8YImb6dX166kw=", + "owner": "hercules-ci", + "repo": "flake-parts", + "rev": "2a55567fcf15b1b1c7ed712a2c6fadaec7412ea8", + "type": "github" + }, + "original": { + "owner": "hercules-ci", + "repo": "flake-parts", + "type": "github" + } + }, + "flake-utils": { + "inputs": { + "systems": "systems" + }, + "locked": { + "lastModified": 1689068808, + "narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "flake-utils_2": { + "inputs": { + "systems": "systems_2" + }, + "locked": { + "lastModified": 1710146030, + "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "gitignore": { + "inputs": { + "nixpkgs": [ + "devenv", + "pre-commit-hooks", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1709087332, + "narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=", + "owner": "hercules-ci", + "repo": "gitignore.nix", + "rev": "637db329424fd7e46cf4185293b9cc8c88c95394", + "type": "github" + }, + "original": { + "owner": "hercules-ci", + "repo": "gitignore.nix", + "type": "github" + } + }, + "nix": { + "inputs": { + "flake-compat": "flake-compat", + "nixpkgs": [ + "devenv", + "cachix", + "devenv", + "nixpkgs" + ], + "nixpkgs-regression": "nixpkgs-regression" + }, + "locked": { + "lastModified": 1712911606, + "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=", + "owner": "domenkozar", + "repo": "nix", + "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12", + "type": "github" + }, + "original": { + "owner": "domenkozar", + "ref": "devenv-2.21", + "repo": "nix", + "type": "github" + } + }, + "nix-github-actions": { + "inputs": { + "nixpkgs": [ + "devenv", + "cachix", + "devenv", + "poetry2nix", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1688870561, + "narHash": "sha256-4UYkifnPEw1nAzqqPOTL2MvWtm3sNGw1UTYTalkTcGY=", + "owner": "nix-community", + "repo": "nix-github-actions", + "rev": "165b1650b753316aa7f1787f3005a8d2da0f5301", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "nix-github-actions", + "type": "github" + } + }, + "nix_2": { + "inputs": { + "flake-compat": [ + "devenv", + "flake-compat" + ], + "nixpkgs": [ + "devenv", + "nixpkgs" + ], + "nixpkgs-regression": "nixpkgs-regression_2" + }, + "locked": { + "lastModified": 1712911606, + "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=", + "owner": "domenkozar", + "repo": "nix", + "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12", + "type": "github" + }, + "original": { + "owner": "domenkozar", + "ref": "devenv-2.21", + "repo": "nix", + "type": "github" + } + }, + "nixpkgs": { + "locked": { + "lastModified": 1692808169, + "narHash": "sha256-x9Opq06rIiwdwGeK2Ykj69dNc2IvUH1fY55Wm7atwrE=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "9201b5ff357e781bf014d0330d18555695df7ba8", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixpkgs-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs-lib": { + "locked": { + "lastModified": 1717284937, + "narHash": "sha256-lIbdfCsf8LMFloheeE6N31+BMIeixqyQWbSr2vk79EQ=", + "type": "tarball", + "url": "https://github.com/NixOS/nixpkgs/archive/eb9ceca17df2ea50a250b6b27f7bf6ab0186f198.tar.gz" + }, + "original": { + "type": "tarball", + "url": "https://github.com/NixOS/nixpkgs/archive/eb9ceca17df2ea50a250b6b27f7bf6ab0186f198.tar.gz" + } + }, + "nixpkgs-regression": { + "locked": { + "lastModified": 1643052045, + "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", + "type": "github" + }, + "original": { + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", + "type": "github" + } + }, + "nixpkgs-regression_2": { + "locked": { + "lastModified": 1643052045, + "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", + "type": "github" + }, + "original": { + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", + "type": "github" + } + }, + "nixpkgs-stable": { + "locked": { + "lastModified": 1710695816, + "narHash": "sha256-3Eh7fhEID17pv9ZxrPwCLfqXnYP006RKzSs0JptsN84=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "614b4613980a522ba49f0d194531beddbb7220d3", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixos-23.11", + "repo": "nixpkgs", + "type": "github" + } + }, + "nixpkgs_2": { + "locked": { + "lastModified": 1713361204, + "narHash": "sha256-TA6EDunWTkc5FvDCqU3W2T3SFn0gRZqh6D/hJnM02MM=", + "owner": "cachix", + "repo": "devenv-nixpkgs", + "rev": "285676e87ad9f0ca23d8714a6ab61e7e027020c6", + "type": "github" + }, + "original": { + "owner": "cachix", + "ref": "rolling", + "repo": "devenv-nixpkgs", + "type": "github" + } + }, + "nixpkgs_3": { + "locked": { + "lastModified": 1717112898, + "narHash": "sha256-7R2ZvOnvd9h8fDd65p0JnB7wXfUvreox3xFdYWd1BnY=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "6132b0f6e344ce2fe34fc051b72fb46e34f668e0", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixpkgs-unstable", + "repo": "nixpkgs", + "type": "github" + } + }, + "poetry2nix": { + "inputs": { + "flake-utils": "flake-utils", + "nix-github-actions": "nix-github-actions", + "nixpkgs": [ + "devenv", + "cachix", + "devenv", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1692876271, + "narHash": "sha256-IXfZEkI0Mal5y1jr6IRWMqK8GW2/f28xJenZIPQqkY0=", + "owner": "nix-community", + "repo": "poetry2nix", + "rev": "d5006be9c2c2417dafb2e2e5034d83fabd207ee3", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "poetry2nix", + "type": "github" + } + }, + "pre-commit-hooks": { + "inputs": { + "flake-compat": [ + "devenv", + "flake-compat" + ], + "flake-utils": "flake-utils_2", + "gitignore": "gitignore", + "nixpkgs": [ + "devenv", + "nixpkgs" + ], + "nixpkgs-stable": "nixpkgs-stable" + }, + "locked": { + "lastModified": 1713775815, + "narHash": "sha256-Wu9cdYTnGQQwtT20QQMg7jzkANKQjwBD9iccfGKkfls=", + "owner": "cachix", + "repo": "pre-commit-hooks.nix", + "rev": "2ac4dcbf55ed43f3be0bae15e181f08a57af24a4", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "pre-commit-hooks.nix", + "type": "github" + } + }, + "root": { + "inputs": { + "devenv": "devenv", + "flake-parts": "flake-parts", + "nixpkgs": "nixpkgs_3" + } + }, + "systems": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + }, + "systems_2": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/flake.nix b/vendor/github.com/go-viper/mapstructure/v2/flake.nix new file mode 100644 index 000000000..4ed0f5331 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/flake.nix @@ -0,0 +1,39 @@ +{ + inputs = { + nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; + flake-parts.url = "github:hercules-ci/flake-parts"; + devenv.url = "github:cachix/devenv"; + }; + + outputs = inputs@{ flake-parts, ... }: + flake-parts.lib.mkFlake { inherit inputs; } { + imports = [ + inputs.devenv.flakeModule + ]; + + systems = [ "x86_64-linux" "x86_64-darwin" "aarch64-darwin" ]; + + perSystem = { config, self', inputs', pkgs, system, ... }: rec { + devenv.shells = { + default = { + languages = { + go.enable = true; + }; + + pre-commit.hooks = { + nixpkgs-fmt.enable = true; + }; + + packages = with pkgs; [ + golangci-lint + ]; + + # https://github.com/cachix/devenv/issues/528#issuecomment-1556108767 + containers = pkgs.lib.mkForce { }; + }; + + ci = devenv.shells.default; + }; + }; + }; +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/internal/errors/errors.go b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/errors.go new file mode 100644 index 000000000..d1c15e474 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/errors.go @@ -0,0 +1,11 @@ +package errors + +import "errors" + +func New(text string) error { + return errors.New(text) +} + +func As(err error, target interface{}) bool { + return errors.As(err, target) +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join.go b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join.go new file mode 100644 index 000000000..d74e3a0b5 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join.go @@ -0,0 +1,9 @@ +//go:build go1.20 + +package errors + +import "errors" + +func Join(errs ...error) error { + return errors.Join(errs...) +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join_go1_19.go b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join_go1_19.go new file mode 100644 index 000000000..700b40229 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/internal/errors/join_go1_19.go @@ -0,0 +1,61 @@ +//go:build !go1.20 + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package errors + +// Join returns an error that wraps the given errors. +// Any nil error values are discarded. +// Join returns nil if every value in errs is nil. +// The error formats as the concatenation of the strings obtained +// by calling the Error method of each element of errs, with a newline +// between each string. +// +// A non-nil error returned by Join implements the Unwrap() []error method. +func Join(errs ...error) error { + n := 0 + for _, err := range errs { + if err != nil { + n++ + } + } + if n == 0 { + return nil + } + e := &joinError{ + errs: make([]error, 0, n), + } + for _, err := range errs { + if err != nil { + e.errs = append(e.errs, err) + } + } + return e +} + +type joinError struct { + errs []error +} + +func (e *joinError) Error() string { + // Since Join returns nil if every value in errs is nil, + // e.errs cannot be empty. + if len(e.errs) == 1 { + return e.errs[0].Error() + } + + b := []byte(e.errs[0].Error()) + for _, err := range e.errs[1:] { + b = append(b, '\n') + b = append(b, err.Error()...) + } + // At this point, b has at least one byte '\n'. + // return unsafe.String(&b[0], len(b)) + return string(b) +} + +func (e *joinError) Unwrap() []error { + return e.errs +} diff --git a/tools/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go similarity index 87% rename from tools/vendor/github.com/mitchellh/mapstructure/mapstructure.go rename to vendor/github.com/go-viper/mapstructure/v2/mapstructure.go index 1efb22ac3..e77e63ba3 100644 --- a/tools/vendor/github.com/mitchellh/mapstructure/mapstructure.go +++ b/vendor/github.com/go-viper/mapstructure/v2/mapstructure.go @@ -9,84 +9,84 @@ // // The simplest function to start with is Decode. // -// Field Tags +// # Field Tags // // When decoding to a struct, mapstructure will use the field name by // default to perform the mapping. For example, if a struct has a field // "Username" then mapstructure will look for a key in the source value // of "username" (case insensitive). // -// type User struct { -// Username string -// } +// type User struct { +// Username string +// } // // You can change the behavior of mapstructure by using struct tags. // The default struct tag that mapstructure looks for is "mapstructure" // but you can customize it using DecoderConfig. // -// Renaming Fields +// # Renaming Fields // // To rename the key that mapstructure looks for, use the "mapstructure" // tag and set a value directly. For example, to change the "username" example // above to "user": // -// type User struct { -// Username string `mapstructure:"user"` -// } +// type User struct { +// Username string `mapstructure:"user"` +// } // -// Embedded Structs and Squashing +// # Embedded Structs and Squashing // // Embedded structs are treated as if they're another field with that name. // By default, the two structs below are equivalent when decoding with // mapstructure: // -// type Person struct { -// Name string -// } +// type Person struct { +// Name string +// } // -// type Friend struct { -// Person -// } +// type Friend struct { +// Person +// } // -// type Friend struct { -// Person Person -// } +// type Friend struct { +// Person Person +// } // // This would require an input that looks like below: // -// map[string]interface{}{ -// "person": map[string]interface{}{"name": "alice"}, -// } +// map[string]interface{}{ +// "person": map[string]interface{}{"name": "alice"}, +// } // // If your "person" value is NOT nested, then you can append ",squash" to // your tag value and mapstructure will treat it as if the embedded struct // were part of the struct directly. Example: // -// type Friend struct { -// Person `mapstructure:",squash"` -// } +// type Friend struct { +// Person `mapstructure:",squash"` +// } // // Now the following input would be accepted: // -// map[string]interface{}{ -// "name": "alice", -// } +// map[string]interface{}{ +// "name": "alice", +// } // // When decoding from a struct to a map, the squash tag squashes the struct // fields into a single map. Using the example structs from above: // -// Friend{Person: Person{Name: "alice"}} +// Friend{Person: Person{Name: "alice"}} // // Will be decoded into a map: // -// map[string]interface{}{ -// "name": "alice", -// } +// map[string]interface{}{ +// "name": "alice", +// } // // DecoderConfig has a field that changes the behavior of mapstructure // to always squash embedded structs. // -// Remainder Values +// # Remainder Values // // If there are any unmapped keys in the source value, mapstructure by // default will silently ignore them. You can error by setting ErrorUnused @@ -98,20 +98,20 @@ // probably be a "map[string]interface{}" or "map[interface{}]interface{}". // See example below: // -// type Friend struct { -// Name string -// Other map[string]interface{} `mapstructure:",remain"` -// } +// type Friend struct { +// Name string +// Other map[string]interface{} `mapstructure:",remain"` +// } // // Given the input below, Other would be populated with the other // values that weren't used (everything but "name"): // -// map[string]interface{}{ -// "name": "bob", -// "address": "123 Maple St.", -// } +// map[string]interface{}{ +// "name": "bob", +// "address": "123 Maple St.", +// } // -// Omit Empty Values +// # Omit Empty Values // // When decoding from a struct to any other value, you may use the // ",omitempty" suffix on your tag to omit that value if it equates to @@ -122,37 +122,37 @@ // field value is zero and a numeric type, the field is empty, and it won't // be encoded into the destination type. // -// type Source struct { -// Age int `mapstructure:",omitempty"` -// } +// type Source struct { +// Age int `mapstructure:",omitempty"` +// } // -// Unexported fields +// # Unexported fields // // Since unexported (private) struct fields cannot be set outside the package // where they are defined, the decoder will simply skip them. // // For this output type definition: // -// type Exported struct { -// private string // this unexported field will be skipped -// Public string -// } +// type Exported struct { +// private string // this unexported field will be skipped +// Public string +// } // // Using this map as input: // -// map[string]interface{}{ -// "private": "I will be ignored", -// "Public": "I made it through!", -// } +// map[string]interface{}{ +// "private": "I will be ignored", +// "Public": "I made it through!", +// } // // The following struct will be decoded: // -// type Exported struct { -// private: "" // field is left with an empty string (zero value) -// Public: "I made it through!" -// } +// type Exported struct { +// private: "" // field is left with an empty string (zero value) +// Public: "I made it through!" +// } // -// Other Configuration +// # Other Configuration // // mapstructure is highly configurable. See the DecoderConfig struct // for other features and options that are supported. @@ -160,12 +160,13 @@ package mapstructure import ( "encoding/json" - "errors" "fmt" "reflect" "sort" "strconv" "strings" + + "github.com/go-viper/mapstructure/v2/internal/errors" ) // DecodeHookFunc is the callback function that can be used for @@ -265,6 +266,10 @@ type DecoderConfig struct { // defaults to "mapstructure" TagName string + // The option of the value in the tag that indicates a field should + // be squashed. This defaults to "squash". + SquashTagOption string + // IgnoreUntaggedFields ignores all struct fields without explicit // TagName, comparable to `mapstructure:"-"` as default behaviour. IgnoreUntaggedFields bool @@ -273,6 +278,10 @@ type DecoderConfig struct { // field name or tag. Defaults to `strings.EqualFold`. This can be used // to implement case-sensitive tag values, support snake casing, etc. MatchName func(mapKey, fieldName string) bool + + // DecodeNil, if set to true, will cause the DecodeHook (if present) to run + // even if the input is nil. This can be used to provide default values. + DecodeNil bool } // A Decoder takes a raw interface value and turns it into structured @@ -282,7 +291,8 @@ type DecoderConfig struct { // structure. The top-level Decode method is just a convenience that sets // up the most basic Decoder. type Decoder struct { - config *DecoderConfig + config *DecoderConfig + cachedDecodeHook func(from reflect.Value, to reflect.Value) (interface{}, error) } // Metadata contains information about decoding a structure that @@ -400,6 +410,10 @@ func NewDecoder(config *DecoderConfig) (*Decoder, error) { config.TagName = "mapstructure" } + if config.SquashTagOption == "" { + config.SquashTagOption = "squash" + } + if config.MatchName == nil { config.MatchName = strings.EqualFold } @@ -407,6 +421,9 @@ func NewDecoder(config *DecoderConfig) (*Decoder, error) { result := &Decoder{ config: config, } + if config.DecodeHook != nil { + result.cachedDecodeHook = cachedDecodeHook(config.DecodeHook) + } return result, nil } @@ -414,22 +431,37 @@ func NewDecoder(config *DecoderConfig) (*Decoder, error) { // Decode decodes the given raw interface to the target pointer specified // by the configuration. func (d *Decoder) Decode(input interface{}) error { - return d.decode("", input, reflect.ValueOf(d.config.Result).Elem()) + err := d.decode("", input, reflect.ValueOf(d.config.Result).Elem()) + + // Retain some of the original behavior when multiple errors ocurr + var joinedErr interface{ Unwrap() []error } + if errors.As(err, &joinedErr) { + return fmt.Errorf("decoding failed due to the following error(s):\n\n%w", err) + } + + return err +} + +// isNil returns true if the input is nil or a typed nil pointer. +func isNil(input interface{}) bool { + if input == nil { + return true + } + val := reflect.ValueOf(input) + return val.Kind() == reflect.Ptr && val.IsNil() } // Decodes an unknown data type into a specific reflection value. func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error { - var inputVal reflect.Value - if input != nil { - inputVal = reflect.ValueOf(input) - - // We need to check here if input is a typed nil. Typed nils won't - // match the "input == nil" below so we check that here. - if inputVal.Kind() == reflect.Ptr && inputVal.IsNil() { - input = nil - } + var ( + inputVal = reflect.ValueOf(input) + outputKind = getKind(outVal) + decodeNil = d.config.DecodeNil && d.cachedDecodeHook != nil + ) + if isNil(input) { + // Typed nils won't match the "input == nil" below, so reset input. + input = nil } - if input == nil { // If the data is nil, then we don't set anything, unless ZeroFields is set // to true. @@ -440,30 +472,46 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) } } - return nil + if !decodeNil { + return nil + } } - if !inputVal.IsValid() { - // If the input value is invalid, then we just set the value - // to be the zero value. - outVal.Set(reflect.Zero(outVal.Type())) - if d.config.Metadata != nil && name != "" { - d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + if !decodeNil { + // If the input value is invalid, then we just set the value + // to be the zero value. + outVal.Set(reflect.Zero(outVal.Type())) + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + return nil + } + // Hooks need a valid inputVal, so reset it to zero value of outVal type. + switch outputKind { + case reflect.Struct, reflect.Map: + var mapVal map[string]interface{} + inputVal = reflect.ValueOf(mapVal) // create nil map pointer + case reflect.Slice, reflect.Array: + var sliceVal []interface{} + inputVal = reflect.ValueOf(sliceVal) // create nil slice pointer + default: + inputVal = reflect.Zero(outVal.Type()) } - return nil } - if d.config.DecodeHook != nil { + if d.cachedDecodeHook != nil { // We have a DecodeHook, so let's pre-process the input. var err error - input, err = DecodeHookExec(d.config.DecodeHook, inputVal, outVal) + input, err = d.cachedDecodeHook(inputVal, outVal) if err != nil { - return fmt.Errorf("error decoding '%s': %s", name, err) + return fmt.Errorf("error decoding '%s': %w", name, err) } } + if isNil(input) { + return nil + } var err error - outputKind := getKind(outVal) addMetaKey := true switch outputKind { case reflect.Bool: @@ -478,6 +526,8 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e err = d.decodeUint(name, input, outVal) case reflect.Float32: err = d.decodeFloat(name, input, outVal) + case reflect.Complex64: + err = d.decodeComplex(name, input, outVal) case reflect.Struct: err = d.decodeStruct(name, input, outVal) case reflect.Map: @@ -742,8 +792,8 @@ func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) e } default: return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) + "'%s' expected type '%s', got unconvertible type '%#v', value: '%#v'", + name, val, dataVal, data) } return nil @@ -796,6 +846,22 @@ func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) return nil } +func (d *Decoder) decodeComplex(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Complex64: + val.SetComplex(dataVal.Complex()) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) + } + + return nil +} + func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error { valType := val.Type() valKeyType := valType.Key() @@ -811,8 +877,14 @@ func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) er valMap = reflect.MakeMap(mapType) } + dataVal := reflect.ValueOf(data) + + // Resolve any levels of indirection + for dataVal.Kind() == reflect.Pointer { + dataVal = reflect.Indirect(dataVal) + } + // Check input type and based on the input type jump to the proper func - dataVal := reflect.Indirect(reflect.ValueOf(data)) switch dataVal.Kind() { case reflect.Map: return d.decodeMapFromMap(name, dataVal, val, valMap) @@ -857,7 +929,7 @@ func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val refle valElemType := valType.Elem() // Accumulate errors - errors := make([]string, 0) + var errs []error // If the input data is empty, then we just match what the input data is. if dataVal.Len() == 0 { @@ -879,7 +951,7 @@ func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val refle // First decode the key into the proper type currentKey := reflect.Indirect(reflect.New(valKeyType)) if err := d.decode(fieldName, k.Interface(), currentKey); err != nil { - errors = appendErrors(errors, err) + errs = append(errs, err) continue } @@ -887,7 +959,7 @@ func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val refle v := dataVal.MapIndex(k).Interface() currentVal := reflect.Indirect(reflect.New(valElemType)) if err := d.decode(fieldName, v, currentVal); err != nil { - errors = appendErrors(errors, err) + errs = append(errs, err) continue } @@ -897,12 +969,7 @@ func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val refle // Set the built up map to the value val.Set(valMap) - // If we had errors, return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil + return errors.Join(errs...) } func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { @@ -945,7 +1012,7 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re } // If "squash" is specified in the tag, we squash the field down. - squash = squash || strings.Index(tagValue[index+1:], "squash") != -1 + squash = squash || strings.Contains(tagValue[index+1:], d.config.SquashTagOption) if squash { // When squashing, the embedded type can be a pointer to a struct. if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct { @@ -956,6 +1023,18 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re if v.Kind() != reflect.Struct { return fmt.Errorf("cannot squash non-struct type '%s'", v.Type()) } + } else { + if strings.Index(tagValue[index+1:], "remain") != -1 { + if v.Kind() != reflect.Map { + return fmt.Errorf("error remain-tag field with invalid type: '%s'", v.Type()) + } + + ptr := v.MapRange() + for ptr.Next() { + valMap.SetMapIndex(ptr.Key(), ptr.Value()) + } + continue + } } if keyNameTagValue := tagValue[:index]; keyNameTagValue != "" { keyName = keyNameTagValue @@ -1123,10 +1202,12 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) if valSlice.IsNil() || d.config.ZeroFields { // Make a new slice to hold our result, same size as the original data. valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) + } else if valSlice.Len() > dataVal.Len() { + valSlice = valSlice.Slice(0, dataVal.Len()) } // Accumulate any errors - errors := make([]string, 0) + var errs []error for i := 0; i < dataVal.Len(); i++ { currentData := dataVal.Index(i).Interface() @@ -1137,19 +1218,14 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) fieldName := name + "[" + strconv.Itoa(i) + "]" if err := d.decode(fieldName, currentData, currentField); err != nil { - errors = appendErrors(errors, err) + errs = append(errs, err) } } // Finally, set the value to the slice we built up val.Set(valSlice) - // If there were errors, we return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil + return errors.Join(errs...) } func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error { @@ -1161,7 +1237,7 @@ func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) valArray := val - if valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields { + if isComparable(valArray) && valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields { // Check input type if dataValKind != reflect.Array && dataValKind != reflect.Slice { if d.config.WeaklyTypedInput { @@ -1188,7 +1264,6 @@ func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) if dataVal.Len() > arrayType.Len() { return fmt.Errorf( "'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len()) - } // Make a new array to hold our result, same size as the original data. @@ -1196,7 +1271,7 @@ func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) } // Accumulate any errors - errors := make([]string, 0) + var errs []error for i := 0; i < dataVal.Len(); i++ { currentData := dataVal.Index(i).Interface() @@ -1204,19 +1279,14 @@ func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) fieldName := name + "[" + strconv.Itoa(i) + "]" if err := d.decode(fieldName, currentData, currentField); err != nil { - errors = appendErrors(errors, err) + errs = append(errs, err) } } // Finally, set the value to the array we built up val.Set(valArray) - // If there were errors, we return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil + return errors.Join(errs...) } func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error { @@ -1278,7 +1348,8 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e } targetValKeysUnused := make(map[interface{}]struct{}) - errors := make([]string, 0) + + var errs []error // This slice will keep track of all the structs we'll be decoding. // There can be more than one struct if there are embedded structs @@ -1319,7 +1390,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e // We always parse the tags cause we're looking for other tags too tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") for _, tag := range tagParts[1:] { - if tag == "squash" { + if tag == d.config.SquashTagOption { squash = true break } @@ -1331,11 +1402,15 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e } if squash { - if fieldVal.Kind() != reflect.Struct { - errors = appendErrors(errors, - fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldVal.Kind())) - } else { + switch fieldVal.Kind() { + case reflect.Struct: structs = append(structs, fieldVal) + case reflect.Interface: + if !fieldVal.IsNil() { + structs = append(structs, fieldVal.Elem().Elem()) + } + default: + errs = append(errs, fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldVal.Kind())) } continue } @@ -1356,6 +1431,9 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e fieldName := field.Name tagValue := field.Tag.Get(d.config.TagName) + if tagValue == "" && d.config.IgnoreUntaggedFields { + continue + } tagValue = strings.SplitN(tagValue, ",", 2)[0] if tagValue != "" { fieldName = tagValue @@ -1409,7 +1487,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e } if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil { - errors = appendErrors(errors, err) + errs = append(errs, err) } } @@ -1424,7 +1502,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e // Decode it as-if we were just decoding this map onto our map. if err := d.decodeMap(name, remain, remainField.val); err != nil { - errors = appendErrors(errors, err) + errs = append(errs, err) } // Set the map to nil so we have none so that the next check will @@ -1440,7 +1518,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e sort.Strings(keys) err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", ")) - errors = appendErrors(errors, err) + errs = append(errs, err) } if d.config.ErrorUnset && len(targetValKeysUnused) > 0 { @@ -1451,11 +1529,11 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e sort.Strings(keys) err := fmt.Errorf("'%s' has unset fields: %s", name, strings.Join(keys, ", ")) - errors = appendErrors(errors, err) + errs = append(errs, err) } - if len(errors) > 0 { - return &Error{errors} + if err := errors.Join(errs...); err != nil { + return err } // Add the unused keys to the list of unused keys if we're tracking metadata @@ -1509,6 +1587,8 @@ func getKind(val reflect.Value) reflect.Kind { return reflect.Uint case kind >= reflect.Float32 && kind <= reflect.Float64: return reflect.Float32 + case kind >= reflect.Complex64 && kind <= reflect.Complex128: + return reflect.Complex64 default: return kind } diff --git a/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_19.go b/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_19.go new file mode 100644 index 000000000..d0913fff6 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_19.go @@ -0,0 +1,44 @@ +//go:build !go1.20 + +package mapstructure + +import "reflect" + +func isComparable(v reflect.Value) bool { + k := v.Kind() + switch k { + case reflect.Invalid: + return false + + case reflect.Array: + switch v.Type().Elem().Kind() { + case reflect.Interface, reflect.Array, reflect.Struct: + for i := 0; i < v.Type().Len(); i++ { + // if !v.Index(i).Comparable() { + if !isComparable(v.Index(i)) { + return false + } + } + return true + } + return v.Type().Comparable() + + case reflect.Interface: + // return v.Elem().Comparable() + return isComparable(v.Elem()) + + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + return false + + // if !v.Field(i).Comparable() { + if !isComparable(v.Field(i)) { + return false + } + } + return true + + default: + return v.Type().Comparable() + } +} diff --git a/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_20.go b/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_20.go new file mode 100644 index 000000000..f8255a1b1 --- /dev/null +++ b/vendor/github.com/go-viper/mapstructure/v2/reflect_go1_20.go @@ -0,0 +1,10 @@ +//go:build go1.20 + +package mapstructure + +import "reflect" + +// TODO: remove once we drop support for Go <1.20 +func isComparable(v reflect.Value) bool { + return v.Comparable() +} diff --git a/vendor/github.com/hashicorp/hcl/.gitignore b/vendor/github.com/hashicorp/hcl/.gitignore deleted file mode 100644 index 15586a2b5..000000000 --- a/vendor/github.com/hashicorp/hcl/.gitignore +++ /dev/null @@ -1,9 +0,0 @@ -y.output - -# ignore intellij files -.idea -*.iml -*.ipr -*.iws - -*.test diff --git a/vendor/github.com/hashicorp/hcl/.travis.yml b/vendor/github.com/hashicorp/hcl/.travis.yml deleted file mode 100644 index cb63a3216..000000000 --- a/vendor/github.com/hashicorp/hcl/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -sudo: false - -language: go - -go: - - 1.x - - tip - -branches: - only: - - master - -script: make test diff --git a/vendor/github.com/hashicorp/hcl/LICENSE b/vendor/github.com/hashicorp/hcl/LICENSE deleted file mode 100644 index c33dcc7c9..000000000 --- a/vendor/github.com/hashicorp/hcl/LICENSE +++ /dev/null @@ -1,354 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/hcl/Makefile b/vendor/github.com/hashicorp/hcl/Makefile deleted file mode 100644 index 84fd743f5..000000000 --- a/vendor/github.com/hashicorp/hcl/Makefile +++ /dev/null @@ -1,18 +0,0 @@ -TEST?=./... - -default: test - -fmt: generate - go fmt ./... - -test: generate - go get -t ./... - go test $(TEST) $(TESTARGS) - -generate: - go generate ./... - -updatedeps: - go get -u golang.org/x/tools/cmd/stringer - -.PHONY: default generate test updatedeps diff --git a/vendor/github.com/hashicorp/hcl/README.md b/vendor/github.com/hashicorp/hcl/README.md deleted file mode 100644 index c8223326d..000000000 --- a/vendor/github.com/hashicorp/hcl/README.md +++ /dev/null @@ -1,125 +0,0 @@ -# HCL - -[![GoDoc](https://godoc.org/github.com/hashicorp/hcl?status.png)](https://godoc.org/github.com/hashicorp/hcl) [![Build Status](https://travis-ci.org/hashicorp/hcl.svg?branch=master)](https://travis-ci.org/hashicorp/hcl) - -HCL (HashiCorp Configuration Language) is a configuration language built -by HashiCorp. The goal of HCL is to build a structured configuration language -that is both human and machine friendly for use with command-line tools, but -specifically targeted towards DevOps tools, servers, etc. - -HCL is also fully JSON compatible. That is, JSON can be used as completely -valid input to a system expecting HCL. This helps makes systems -interoperable with other systems. - -HCL is heavily inspired by -[libucl](https://github.com/vstakhov/libucl), -nginx configuration, and others similar. - -## Why? - -A common question when viewing HCL is to ask the question: why not -JSON, YAML, etc.? - -Prior to HCL, the tools we built at [HashiCorp](http://www.hashicorp.com) -used a variety of configuration languages from full programming languages -such as Ruby to complete data structure languages such as JSON. What we -learned is that some people wanted human-friendly configuration languages -and some people wanted machine-friendly languages. - -JSON fits a nice balance in this, but is fairly verbose and most -importantly doesn't support comments. With YAML, we found that beginners -had a really hard time determining what the actual structure was, and -ended up guessing more often than not whether to use a hyphen, colon, etc. -in order to represent some configuration key. - -Full programming languages such as Ruby enable complex behavior -a configuration language shouldn't usually allow, and also forces -people to learn some set of Ruby. - -Because of this, we decided to create our own configuration language -that is JSON-compatible. Our configuration language (HCL) is designed -to be written and modified by humans. The API for HCL allows JSON -as an input so that it is also machine-friendly (machines can generate -JSON instead of trying to generate HCL). - -Our goal with HCL is not to alienate other configuration languages. -It is instead to provide HCL as a specialized language for our tools, -and JSON as the interoperability layer. - -## Syntax - -For a complete grammar, please see the parser itself. A high-level overview -of the syntax and grammar is listed here. - - * Single line comments start with `#` or `//` - - * Multi-line comments are wrapped in `/*` and `*/`. Nested block comments - are not allowed. A multi-line comment (also known as a block comment) - terminates at the first `*/` found. - - * Values are assigned with the syntax `key = value` (whitespace doesn't - matter). The value can be any primitive: a string, number, boolean, - object, or list. - - * Strings are double-quoted and can contain any UTF-8 characters. - Example: `"Hello, World"` - - * Multi-line strings start with `<- - echo %Path% - - go version - - go env - - go get -t ./... - -build_script: -- cmd: go test -v ./... diff --git a/vendor/github.com/hashicorp/hcl/decoder.go b/vendor/github.com/hashicorp/hcl/decoder.go deleted file mode 100644 index bed9ebbe1..000000000 --- a/vendor/github.com/hashicorp/hcl/decoder.go +++ /dev/null @@ -1,729 +0,0 @@ -package hcl - -import ( - "errors" - "fmt" - "reflect" - "sort" - "strconv" - "strings" - - "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/hcl/hcl/parser" - "github.com/hashicorp/hcl/hcl/token" -) - -// This is the tag to use with structures to have settings for HCL -const tagName = "hcl" - -var ( - // nodeType holds a reference to the type of ast.Node - nodeType reflect.Type = findNodeType() -) - -// Unmarshal accepts a byte slice as input and writes the -// data to the value pointed to by v. -func Unmarshal(bs []byte, v interface{}) error { - root, err := parse(bs) - if err != nil { - return err - } - - return DecodeObject(v, root) -} - -// Decode reads the given input and decodes it into the structure -// given by `out`. -func Decode(out interface{}, in string) error { - obj, err := Parse(in) - if err != nil { - return err - } - - return DecodeObject(out, obj) -} - -// DecodeObject is a lower-level version of Decode. It decodes a -// raw Object into the given output. -func DecodeObject(out interface{}, n ast.Node) error { - val := reflect.ValueOf(out) - if val.Kind() != reflect.Ptr { - return errors.New("result must be a pointer") - } - - // If we have the file, we really decode the root node - if f, ok := n.(*ast.File); ok { - n = f.Node - } - - var d decoder - return d.decode("root", n, val.Elem()) -} - -type decoder struct { - stack []reflect.Kind -} - -func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error { - k := result - - // If we have an interface with a valid value, we use that - // for the check. - if result.Kind() == reflect.Interface { - elem := result.Elem() - if elem.IsValid() { - k = elem - } - } - - // Push current onto stack unless it is an interface. - if k.Kind() != reflect.Interface { - d.stack = append(d.stack, k.Kind()) - - // Schedule a pop - defer func() { - d.stack = d.stack[:len(d.stack)-1] - }() - } - - switch k.Kind() { - case reflect.Bool: - return d.decodeBool(name, node, result) - case reflect.Float32, reflect.Float64: - return d.decodeFloat(name, node, result) - case reflect.Int, reflect.Int32, reflect.Int64: - return d.decodeInt(name, node, result) - case reflect.Interface: - // When we see an interface, we make our own thing - return d.decodeInterface(name, node, result) - case reflect.Map: - return d.decodeMap(name, node, result) - case reflect.Ptr: - return d.decodePtr(name, node, result) - case reflect.Slice: - return d.decodeSlice(name, node, result) - case reflect.String: - return d.decodeString(name, node, result) - case reflect.Struct: - return d.decodeStruct(name, node, result) - default: - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()), - } - } -} - -func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error { - switch n := node.(type) { - case *ast.LiteralType: - if n.Token.Type == token.BOOL { - v, err := strconv.ParseBool(n.Token.Text) - if err != nil { - return err - } - - result.Set(reflect.ValueOf(v)) - return nil - } - } - - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown type %T", name, node), - } -} - -func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error { - switch n := node.(type) { - case *ast.LiteralType: - if n.Token.Type == token.FLOAT || n.Token.Type == token.NUMBER { - v, err := strconv.ParseFloat(n.Token.Text, 64) - if err != nil { - return err - } - - result.Set(reflect.ValueOf(v).Convert(result.Type())) - return nil - } - } - - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown type %T", name, node), - } -} - -func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error { - switch n := node.(type) { - case *ast.LiteralType: - switch n.Token.Type { - case token.NUMBER: - v, err := strconv.ParseInt(n.Token.Text, 0, 0) - if err != nil { - return err - } - - if result.Kind() == reflect.Interface { - result.Set(reflect.ValueOf(int(v))) - } else { - result.SetInt(v) - } - return nil - case token.STRING: - v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0) - if err != nil { - return err - } - - if result.Kind() == reflect.Interface { - result.Set(reflect.ValueOf(int(v))) - } else { - result.SetInt(v) - } - return nil - } - } - - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown type %T", name, node), - } -} - -func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error { - // When we see an ast.Node, we retain the value to enable deferred decoding. - // Very useful in situations where we want to preserve ast.Node information - // like Pos - if result.Type() == nodeType && result.CanSet() { - result.Set(reflect.ValueOf(node)) - return nil - } - - var set reflect.Value - redecode := true - - // For testing types, ObjectType should just be treated as a list. We - // set this to a temporary var because we want to pass in the real node. - testNode := node - if ot, ok := node.(*ast.ObjectType); ok { - testNode = ot.List - } - - switch n := testNode.(type) { - case *ast.ObjectList: - // If we're at the root or we're directly within a slice, then we - // decode objects into map[string]interface{}, otherwise we decode - // them into lists. - if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { - var temp map[string]interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeMap( - reflect.MapOf( - reflect.TypeOf(""), - tempVal.Type().Elem())) - - set = result - } else { - var temp []map[string]interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeSlice( - reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items)) - set = result - } - case *ast.ObjectType: - // If we're at the root or we're directly within a slice, then we - // decode objects into map[string]interface{}, otherwise we decode - // them into lists. - if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice { - var temp map[string]interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeMap( - reflect.MapOf( - reflect.TypeOf(""), - tempVal.Type().Elem())) - - set = result - } else { - var temp []map[string]interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeSlice( - reflect.SliceOf(tempVal.Type().Elem()), 0, 1) - set = result - } - case *ast.ListType: - var temp []interface{} - tempVal := reflect.ValueOf(temp) - result := reflect.MakeSlice( - reflect.SliceOf(tempVal.Type().Elem()), 0, 0) - set = result - case *ast.LiteralType: - switch n.Token.Type { - case token.BOOL: - var result bool - set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) - case token.FLOAT: - var result float64 - set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) - case token.NUMBER: - var result int - set = reflect.Indirect(reflect.New(reflect.TypeOf(result))) - case token.STRING, token.HEREDOC: - set = reflect.Indirect(reflect.New(reflect.TypeOf(""))) - default: - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node), - } - } - default: - return fmt.Errorf( - "%s: cannot decode into interface: %T", - name, node) - } - - // Set the result to what its supposed to be, then reset - // result so we don't reflect into this method anymore. - result.Set(set) - - if redecode { - // Revisit the node so that we can use the newly instantiated - // thing and populate it. - if err := d.decode(name, node, result); err != nil { - return err - } - } - - return nil -} - -func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error { - if item, ok := node.(*ast.ObjectItem); ok { - node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} - } - - if ot, ok := node.(*ast.ObjectType); ok { - node = ot.List - } - - n, ok := node.(*ast.ObjectList) - if !ok { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: not an object type for map (%T)", name, node), - } - } - - // If we have an interface, then we can address the interface, - // but not the slice itself, so get the element but set the interface - set := result - if result.Kind() == reflect.Interface { - result = result.Elem() - } - - resultType := result.Type() - resultElemType := resultType.Elem() - resultKeyType := resultType.Key() - if resultKeyType.Kind() != reflect.String { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: map must have string keys", name), - } - } - - // Make a map if it is nil - resultMap := result - if result.IsNil() { - resultMap = reflect.MakeMap( - reflect.MapOf(resultKeyType, resultElemType)) - } - - // Go through each element and decode it. - done := make(map[string]struct{}) - for _, item := range n.Items { - if item.Val == nil { - continue - } - - // github.com/hashicorp/terraform/issue/5740 - if len(item.Keys) == 0 { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: map must have string keys", name), - } - } - - // Get the key we're dealing with, which is the first item - keyStr := item.Keys[0].Token.Value().(string) - - // If we've already processed this key, then ignore it - if _, ok := done[keyStr]; ok { - continue - } - - // Determine the value. If we have more than one key, then we - // get the objectlist of only these keys. - itemVal := item.Val - if len(item.Keys) > 1 { - itemVal = n.Filter(keyStr) - done[keyStr] = struct{}{} - } - - // Make the field name - fieldName := fmt.Sprintf("%s.%s", name, keyStr) - - // Get the key/value as reflection values - key := reflect.ValueOf(keyStr) - val := reflect.Indirect(reflect.New(resultElemType)) - - // If we have a pre-existing value in the map, use that - oldVal := resultMap.MapIndex(key) - if oldVal.IsValid() { - val.Set(oldVal) - } - - // Decode! - if err := d.decode(fieldName, itemVal, val); err != nil { - return err - } - - // Set the value on the map - resultMap.SetMapIndex(key, val) - } - - // Set the final map if we can - set.Set(resultMap) - return nil -} - -func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error { - // Create an element of the concrete (non pointer) type and decode - // into that. Then set the value of the pointer to this type. - resultType := result.Type() - resultElemType := resultType.Elem() - val := reflect.New(resultElemType) - if err := d.decode(name, node, reflect.Indirect(val)); err != nil { - return err - } - - result.Set(val) - return nil -} - -func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error { - // If we have an interface, then we can address the interface, - // but not the slice itself, so get the element but set the interface - set := result - if result.Kind() == reflect.Interface { - result = result.Elem() - } - // Create the slice if it isn't nil - resultType := result.Type() - resultElemType := resultType.Elem() - if result.IsNil() { - resultSliceType := reflect.SliceOf(resultElemType) - result = reflect.MakeSlice( - resultSliceType, 0, 0) - } - - // Figure out the items we'll be copying into the slice - var items []ast.Node - switch n := node.(type) { - case *ast.ObjectList: - items = make([]ast.Node, len(n.Items)) - for i, item := range n.Items { - items[i] = item - } - case *ast.ObjectType: - items = []ast.Node{n} - case *ast.ListType: - items = n.List - default: - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("unknown slice type: %T", node), - } - } - - for i, item := range items { - fieldName := fmt.Sprintf("%s[%d]", name, i) - - // Decode - val := reflect.Indirect(reflect.New(resultElemType)) - - // if item is an object that was decoded from ambiguous JSON and - // flattened, make sure it's expanded if it needs to decode into a - // defined structure. - item := expandObject(item, val) - - if err := d.decode(fieldName, item, val); err != nil { - return err - } - - // Append it onto the slice - result = reflect.Append(result, val) - } - - set.Set(result) - return nil -} - -// expandObject detects if an ambiguous JSON object was flattened to a List which -// should be decoded into a struct, and expands the ast to properly deocode. -func expandObject(node ast.Node, result reflect.Value) ast.Node { - item, ok := node.(*ast.ObjectItem) - if !ok { - return node - } - - elemType := result.Type() - - // our target type must be a struct - switch elemType.Kind() { - case reflect.Ptr: - switch elemType.Elem().Kind() { - case reflect.Struct: - //OK - default: - return node - } - case reflect.Struct: - //OK - default: - return node - } - - // A list value will have a key and field name. If it had more fields, - // it wouldn't have been flattened. - if len(item.Keys) != 2 { - return node - } - - keyToken := item.Keys[0].Token - item.Keys = item.Keys[1:] - - // we need to un-flatten the ast enough to decode - newNode := &ast.ObjectItem{ - Keys: []*ast.ObjectKey{ - &ast.ObjectKey{ - Token: keyToken, - }, - }, - Val: &ast.ObjectType{ - List: &ast.ObjectList{ - Items: []*ast.ObjectItem{item}, - }, - }, - } - - return newNode -} - -func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error { - switch n := node.(type) { - case *ast.LiteralType: - switch n.Token.Type { - case token.NUMBER: - result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type())) - return nil - case token.STRING, token.HEREDOC: - result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type())) - return nil - } - } - - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unknown type for string %T", name, node), - } -} - -func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error { - var item *ast.ObjectItem - if it, ok := node.(*ast.ObjectItem); ok { - item = it - node = it.Val - } - - if ot, ok := node.(*ast.ObjectType); ok { - node = ot.List - } - - // Handle the special case where the object itself is a literal. Previously - // the yacc parser would always ensure top-level elements were arrays. The new - // parser does not make the same guarantees, thus we need to convert any - // top-level literal elements into a list. - if _, ok := node.(*ast.LiteralType); ok && item != nil { - node = &ast.ObjectList{Items: []*ast.ObjectItem{item}} - } - - list, ok := node.(*ast.ObjectList) - if !ok { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node), - } - } - - // This slice will keep track of all the structs we'll be decoding. - // There can be more than one struct if there are embedded structs - // that are squashed. - structs := make([]reflect.Value, 1, 5) - structs[0] = result - - // Compile the list of all the fields that we're going to be decoding - // from all the structs. - type field struct { - field reflect.StructField - val reflect.Value - } - fields := []field{} - for len(structs) > 0 { - structVal := structs[0] - structs = structs[1:] - - structType := structVal.Type() - for i := 0; i < structType.NumField(); i++ { - fieldType := structType.Field(i) - tagParts := strings.Split(fieldType.Tag.Get(tagName), ",") - - // Ignore fields with tag name "-" - if tagParts[0] == "-" { - continue - } - - if fieldType.Anonymous { - fieldKind := fieldType.Type.Kind() - if fieldKind != reflect.Struct { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: unsupported type to struct: %s", - fieldType.Name, fieldKind), - } - } - - // We have an embedded field. We "squash" the fields down - // if specified in the tag. - squash := false - for _, tag := range tagParts[1:] { - if tag == "squash" { - squash = true - break - } - } - - if squash { - structs = append( - structs, result.FieldByName(fieldType.Name)) - continue - } - } - - // Normal struct field, store it away - fields = append(fields, field{fieldType, structVal.Field(i)}) - } - } - - usedKeys := make(map[string]struct{}) - decodedFields := make([]string, 0, len(fields)) - decodedFieldsVal := make([]reflect.Value, 0) - unusedKeysVal := make([]reflect.Value, 0) - for _, f := range fields { - field, fieldValue := f.field, f.val - if !fieldValue.IsValid() { - // This should never happen - panic("field is not valid") - } - - // If we can't set the field, then it is unexported or something, - // and we just continue onwards. - if !fieldValue.CanSet() { - continue - } - - fieldName := field.Name - - tagValue := field.Tag.Get(tagName) - tagParts := strings.SplitN(tagValue, ",", 2) - if len(tagParts) >= 2 { - switch tagParts[1] { - case "decodedFields": - decodedFieldsVal = append(decodedFieldsVal, fieldValue) - continue - case "key": - if item == nil { - return &parser.PosError{ - Pos: node.Pos(), - Err: fmt.Errorf("%s: %s asked for 'key', impossible", - name, fieldName), - } - } - - fieldValue.SetString(item.Keys[0].Token.Value().(string)) - continue - case "unusedKeys": - unusedKeysVal = append(unusedKeysVal, fieldValue) - continue - } - } - - if tagParts[0] != "" { - fieldName = tagParts[0] - } - - // Determine the element we'll use to decode. If it is a single - // match (only object with the field), then we decode it exactly. - // If it is a prefix match, then we decode the matches. - filter := list.Filter(fieldName) - - prefixMatches := filter.Children() - matches := filter.Elem() - if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 { - continue - } - - // Track the used key - usedKeys[fieldName] = struct{}{} - - // Create the field name and decode. We range over the elements - // because we actually want the value. - fieldName = fmt.Sprintf("%s.%s", name, fieldName) - if len(prefixMatches.Items) > 0 { - if err := d.decode(fieldName, prefixMatches, fieldValue); err != nil { - return err - } - } - for _, match := range matches.Items { - var decodeNode ast.Node = match.Val - if ot, ok := decodeNode.(*ast.ObjectType); ok { - decodeNode = &ast.ObjectList{Items: ot.List.Items} - } - - if err := d.decode(fieldName, decodeNode, fieldValue); err != nil { - return err - } - } - - decodedFields = append(decodedFields, field.Name) - } - - if len(decodedFieldsVal) > 0 { - // Sort it so that it is deterministic - sort.Strings(decodedFields) - - for _, v := range decodedFieldsVal { - v.Set(reflect.ValueOf(decodedFields)) - } - } - - return nil -} - -// findNodeType returns the type of ast.Node -func findNodeType() reflect.Type { - var nodeContainer struct { - Node ast.Node - } - value := reflect.ValueOf(nodeContainer).FieldByName("Node") - return value.Type() -} diff --git a/vendor/github.com/hashicorp/hcl/hcl.go b/vendor/github.com/hashicorp/hcl/hcl.go deleted file mode 100644 index 575a20b50..000000000 --- a/vendor/github.com/hashicorp/hcl/hcl.go +++ /dev/null @@ -1,11 +0,0 @@ -// Package hcl decodes HCL into usable Go structures. -// -// hcl input can come in either pure HCL format or JSON format. -// It can be parsed into an AST, and then decoded into a structure, -// or it can be decoded directly from a string into a structure. -// -// If you choose to parse HCL into a raw AST, the benefit is that you -// can write custom visitor implementations to implement custom -// semantic checks. By default, HCL does not perform any semantic -// checks. -package hcl diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go deleted file mode 100644 index 6e5ef654b..000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go +++ /dev/null @@ -1,219 +0,0 @@ -// Package ast declares the types used to represent syntax trees for HCL -// (HashiCorp Configuration Language) -package ast - -import ( - "fmt" - "strings" - - "github.com/hashicorp/hcl/hcl/token" -) - -// Node is an element in the abstract syntax tree. -type Node interface { - node() - Pos() token.Pos -} - -func (File) node() {} -func (ObjectList) node() {} -func (ObjectKey) node() {} -func (ObjectItem) node() {} -func (Comment) node() {} -func (CommentGroup) node() {} -func (ObjectType) node() {} -func (LiteralType) node() {} -func (ListType) node() {} - -// File represents a single HCL file -type File struct { - Node Node // usually a *ObjectList - Comments []*CommentGroup // list of all comments in the source -} - -func (f *File) Pos() token.Pos { - return f.Node.Pos() -} - -// ObjectList represents a list of ObjectItems. An HCL file itself is an -// ObjectList. -type ObjectList struct { - Items []*ObjectItem -} - -func (o *ObjectList) Add(item *ObjectItem) { - o.Items = append(o.Items, item) -} - -// Filter filters out the objects with the given key list as a prefix. -// -// The returned list of objects contain ObjectItems where the keys have -// this prefix already stripped off. This might result in objects with -// zero-length key lists if they have no children. -// -// If no matches are found, an empty ObjectList (non-nil) is returned. -func (o *ObjectList) Filter(keys ...string) *ObjectList { - var result ObjectList - for _, item := range o.Items { - // If there aren't enough keys, then ignore this - if len(item.Keys) < len(keys) { - continue - } - - match := true - for i, key := range item.Keys[:len(keys)] { - key := key.Token.Value().(string) - if key != keys[i] && !strings.EqualFold(key, keys[i]) { - match = false - break - } - } - if !match { - continue - } - - // Strip off the prefix from the children - newItem := *item - newItem.Keys = newItem.Keys[len(keys):] - result.Add(&newItem) - } - - return &result -} - -// Children returns further nested objects (key length > 0) within this -// ObjectList. This should be used with Filter to get at child items. -func (o *ObjectList) Children() *ObjectList { - var result ObjectList - for _, item := range o.Items { - if len(item.Keys) > 0 { - result.Add(item) - } - } - - return &result -} - -// Elem returns items in the list that are direct element assignments -// (key length == 0). This should be used with Filter to get at elements. -func (o *ObjectList) Elem() *ObjectList { - var result ObjectList - for _, item := range o.Items { - if len(item.Keys) == 0 { - result.Add(item) - } - } - - return &result -} - -func (o *ObjectList) Pos() token.Pos { - // always returns the uninitiliazed position - return o.Items[0].Pos() -} - -// ObjectItem represents a HCL Object Item. An item is represented with a key -// (or keys). It can be an assignment or an object (both normal and nested) -type ObjectItem struct { - // keys is only one length long if it's of type assignment. If it's a - // nested object it can be larger than one. In that case "assign" is - // invalid as there is no assignments for a nested object. - Keys []*ObjectKey - - // assign contains the position of "=", if any - Assign token.Pos - - // val is the item itself. It can be an object,list, number, bool or a - // string. If key length is larger than one, val can be only of type - // Object. - Val Node - - LeadComment *CommentGroup // associated lead comment - LineComment *CommentGroup // associated line comment -} - -func (o *ObjectItem) Pos() token.Pos { - // I'm not entirely sure what causes this, but removing this causes - // a test failure. We should investigate at some point. - if len(o.Keys) == 0 { - return token.Pos{} - } - - return o.Keys[0].Pos() -} - -// ObjectKeys are either an identifier or of type string. -type ObjectKey struct { - Token token.Token -} - -func (o *ObjectKey) Pos() token.Pos { - return o.Token.Pos -} - -// LiteralType represents a literal of basic type. Valid types are: -// token.NUMBER, token.FLOAT, token.BOOL and token.STRING -type LiteralType struct { - Token token.Token - - // comment types, only used when in a list - LeadComment *CommentGroup - LineComment *CommentGroup -} - -func (l *LiteralType) Pos() token.Pos { - return l.Token.Pos -} - -// ListStatement represents a HCL List type -type ListType struct { - Lbrack token.Pos // position of "[" - Rbrack token.Pos // position of "]" - List []Node // the elements in lexical order -} - -func (l *ListType) Pos() token.Pos { - return l.Lbrack -} - -func (l *ListType) Add(node Node) { - l.List = append(l.List, node) -} - -// ObjectType represents a HCL Object Type -type ObjectType struct { - Lbrace token.Pos // position of "{" - Rbrace token.Pos // position of "}" - List *ObjectList // the nodes in lexical order -} - -func (o *ObjectType) Pos() token.Pos { - return o.Lbrace -} - -// Comment node represents a single //, # style or /*- style commment -type Comment struct { - Start token.Pos // position of / or # - Text string -} - -func (c *Comment) Pos() token.Pos { - return c.Start -} - -// CommentGroup node represents a sequence of comments with no other tokens and -// no empty lines between. -type CommentGroup struct { - List []*Comment // len(List) > 0 -} - -func (c *CommentGroup) Pos() token.Pos { - return c.List[0].Pos() -} - -//------------------------------------------------------------------- -// GoStringer -//------------------------------------------------------------------- - -func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) } -func (o *ObjectList) GoString() string { return fmt.Sprintf("*%#v", *o) } diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go deleted file mode 100644 index ba07ad42b..000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go +++ /dev/null @@ -1,52 +0,0 @@ -package ast - -import "fmt" - -// WalkFunc describes a function to be called for each node during a Walk. The -// returned node can be used to rewrite the AST. Walking stops the returned -// bool is false. -type WalkFunc func(Node) (Node, bool) - -// Walk traverses an AST in depth-first order: It starts by calling fn(node); -// node must not be nil. If fn returns true, Walk invokes fn recursively for -// each of the non-nil children of node, followed by a call of fn(nil). The -// returned node of fn can be used to rewrite the passed node to fn. -func Walk(node Node, fn WalkFunc) Node { - rewritten, ok := fn(node) - if !ok { - return rewritten - } - - switch n := node.(type) { - case *File: - n.Node = Walk(n.Node, fn) - case *ObjectList: - for i, item := range n.Items { - n.Items[i] = Walk(item, fn).(*ObjectItem) - } - case *ObjectKey: - // nothing to do - case *ObjectItem: - for i, k := range n.Keys { - n.Keys[i] = Walk(k, fn).(*ObjectKey) - } - - if n.Val != nil { - n.Val = Walk(n.Val, fn) - } - case *LiteralType: - // nothing to do - case *ListType: - for i, l := range n.List { - n.List[i] = Walk(l, fn) - } - case *ObjectType: - n.List = Walk(n.List, fn).(*ObjectList) - default: - // should we panic here? - fmt.Printf("unknown type: %T\n", n) - } - - fn(nil) - return rewritten -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/error.go b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go deleted file mode 100644 index 5c99381df..000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/parser/error.go +++ /dev/null @@ -1,17 +0,0 @@ -package parser - -import ( - "fmt" - - "github.com/hashicorp/hcl/hcl/token" -) - -// PosError is a parse error that contains a position. -type PosError struct { - Pos token.Pos - Err error -} - -func (e *PosError) Error() string { - return fmt.Sprintf("At %s: %s", e.Pos, e.Err) -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go deleted file mode 100644 index 64c83bcfb..000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go +++ /dev/null @@ -1,532 +0,0 @@ -// Package parser implements a parser for HCL (HashiCorp Configuration -// Language) -package parser - -import ( - "bytes" - "errors" - "fmt" - "strings" - - "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/hcl/hcl/scanner" - "github.com/hashicorp/hcl/hcl/token" -) - -type Parser struct { - sc *scanner.Scanner - - // Last read token - tok token.Token - commaPrev token.Token - - comments []*ast.CommentGroup - leadComment *ast.CommentGroup // last lead comment - lineComment *ast.CommentGroup // last line comment - - enableTrace bool - indent int - n int // buffer size (max = 1) -} - -func newParser(src []byte) *Parser { - return &Parser{ - sc: scanner.New(src), - } -} - -// Parse returns the fully parsed source and returns the abstract syntax tree. -func Parse(src []byte) (*ast.File, error) { - // normalize all line endings - // since the scanner and output only work with "\n" line endings, we may - // end up with dangling "\r" characters in the parsed data. - src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1) - - p := newParser(src) - return p.Parse() -} - -var errEofToken = errors.New("EOF token found") - -// Parse returns the fully parsed source and returns the abstract syntax tree. -func (p *Parser) Parse() (*ast.File, error) { - f := &ast.File{} - var err, scerr error - p.sc.Error = func(pos token.Pos, msg string) { - scerr = &PosError{Pos: pos, Err: errors.New(msg)} - } - - f.Node, err = p.objectList(false) - if scerr != nil { - return nil, scerr - } - if err != nil { - return nil, err - } - - f.Comments = p.comments - return f, nil -} - -// objectList parses a list of items within an object (generally k/v pairs). -// The parameter" obj" tells this whether to we are within an object (braces: -// '{', '}') or just at the top level. If we're within an object, we end -// at an RBRACE. -func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) { - defer un(trace(p, "ParseObjectList")) - node := &ast.ObjectList{} - - for { - if obj { - tok := p.scan() - p.unscan() - if tok.Type == token.RBRACE { - break - } - } - - n, err := p.objectItem() - if err == errEofToken { - break // we are finished - } - - // we don't return a nil node, because might want to use already - // collected items. - if err != nil { - return node, err - } - - node.Add(n) - - // object lists can be optionally comma-delimited e.g. when a list of maps - // is being expressed, so a comma is allowed here - it's simply consumed - tok := p.scan() - if tok.Type != token.COMMA { - p.unscan() - } - } - return node, nil -} - -func (p *Parser) consumeComment() (comment *ast.Comment, endline int) { - endline = p.tok.Pos.Line - - // count the endline if it's multiline comment, ie starting with /* - if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' { - // don't use range here - no need to decode Unicode code points - for i := 0; i < len(p.tok.Text); i++ { - if p.tok.Text[i] == '\n' { - endline++ - } - } - } - - comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text} - p.tok = p.sc.Scan() - return -} - -func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) { - var list []*ast.Comment - endline = p.tok.Pos.Line - - for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n { - var comment *ast.Comment - comment, endline = p.consumeComment() - list = append(list, comment) - } - - // add comment group to the comments list - comments = &ast.CommentGroup{List: list} - p.comments = append(p.comments, comments) - - return -} - -// objectItem parses a single object item -func (p *Parser) objectItem() (*ast.ObjectItem, error) { - defer un(trace(p, "ParseObjectItem")) - - keys, err := p.objectKey() - if len(keys) > 0 && err == errEofToken { - // We ignore eof token here since it is an error if we didn't - // receive a value (but we did receive a key) for the item. - err = nil - } - if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE { - // This is a strange boolean statement, but what it means is: - // We have keys with no value, and we're likely in an object - // (since RBrace ends an object). For this, we set err to nil so - // we continue and get the error below of having the wrong value - // type. - err = nil - - // Reset the token type so we don't think it completed fine. See - // objectType which uses p.tok.Type to check if we're done with - // the object. - p.tok.Type = token.EOF - } - if err != nil { - return nil, err - } - - o := &ast.ObjectItem{ - Keys: keys, - } - - if p.leadComment != nil { - o.LeadComment = p.leadComment - p.leadComment = nil - } - - switch p.tok.Type { - case token.ASSIGN: - o.Assign = p.tok.Pos - o.Val, err = p.object() - if err != nil { - return nil, err - } - case token.LBRACE: - o.Val, err = p.objectType() - if err != nil { - return nil, err - } - default: - keyStr := make([]string, 0, len(keys)) - for _, k := range keys { - keyStr = append(keyStr, k.Token.Text) - } - - return nil, &PosError{ - Pos: p.tok.Pos, - Err: fmt.Errorf( - "key '%s' expected start of object ('{') or assignment ('=')", - strings.Join(keyStr, " ")), - } - } - - // key=#comment - // val - if p.lineComment != nil { - o.LineComment, p.lineComment = p.lineComment, nil - } - - // do a look-ahead for line comment - p.scan() - if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil { - o.LineComment = p.lineComment - p.lineComment = nil - } - p.unscan() - return o, nil -} - -// objectKey parses an object key and returns a ObjectKey AST -func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { - keyCount := 0 - keys := make([]*ast.ObjectKey, 0) - - for { - tok := p.scan() - switch tok.Type { - case token.EOF: - // It is very important to also return the keys here as well as - // the error. This is because we need to be able to tell if we - // did parse keys prior to finding the EOF, or if we just found - // a bare EOF. - return keys, errEofToken - case token.ASSIGN: - // assignment or object only, but not nested objects. this is not - // allowed: `foo bar = {}` - if keyCount > 1 { - return nil, &PosError{ - Pos: p.tok.Pos, - Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type), - } - } - - if keyCount == 0 { - return nil, &PosError{ - Pos: p.tok.Pos, - Err: errors.New("no object keys found!"), - } - } - - return keys, nil - case token.LBRACE: - var err error - - // If we have no keys, then it is a syntax error. i.e. {{}} is not - // allowed. - if len(keys) == 0 { - err = &PosError{ - Pos: p.tok.Pos, - Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type), - } - } - - // object - return keys, err - case token.IDENT, token.STRING: - keyCount++ - keys = append(keys, &ast.ObjectKey{Token: p.tok}) - case token.ILLEGAL: - return keys, &PosError{ - Pos: p.tok.Pos, - Err: fmt.Errorf("illegal character"), - } - default: - return keys, &PosError{ - Pos: p.tok.Pos, - Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type), - } - } - } -} - -// object parses any type of object, such as number, bool, string, object or -// list. -func (p *Parser) object() (ast.Node, error) { - defer un(trace(p, "ParseType")) - tok := p.scan() - - switch tok.Type { - case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC: - return p.literalType() - case token.LBRACE: - return p.objectType() - case token.LBRACK: - return p.listType() - case token.COMMENT: - // implement comment - case token.EOF: - return nil, errEofToken - } - - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf("Unknown token: %+v", tok), - } -} - -// objectType parses an object type and returns a ObjectType AST -func (p *Parser) objectType() (*ast.ObjectType, error) { - defer un(trace(p, "ParseObjectType")) - - // we assume that the currently scanned token is a LBRACE - o := &ast.ObjectType{ - Lbrace: p.tok.Pos, - } - - l, err := p.objectList(true) - - // if we hit RBRACE, we are good to go (means we parsed all Items), if it's - // not a RBRACE, it's an syntax error and we just return it. - if err != nil && p.tok.Type != token.RBRACE { - return nil, err - } - - // No error, scan and expect the ending to be a brace - if tok := p.scan(); tok.Type != token.RBRACE { - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf("object expected closing RBRACE got: %s", tok.Type), - } - } - - o.List = l - o.Rbrace = p.tok.Pos // advanced via parseObjectList - return o, nil -} - -// listType parses a list type and returns a ListType AST -func (p *Parser) listType() (*ast.ListType, error) { - defer un(trace(p, "ParseListType")) - - // we assume that the currently scanned token is a LBRACK - l := &ast.ListType{ - Lbrack: p.tok.Pos, - } - - needComma := false - for { - tok := p.scan() - if needComma { - switch tok.Type { - case token.COMMA, token.RBRACK: - default: - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf( - "error parsing list, expected comma or list end, got: %s", - tok.Type), - } - } - } - switch tok.Type { - case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC: - node, err := p.literalType() - if err != nil { - return nil, err - } - - // If there is a lead comment, apply it - if p.leadComment != nil { - node.LeadComment = p.leadComment - p.leadComment = nil - } - - l.Add(node) - needComma = true - case token.COMMA: - // get next list item or we are at the end - // do a look-ahead for line comment - p.scan() - if p.lineComment != nil && len(l.List) > 0 { - lit, ok := l.List[len(l.List)-1].(*ast.LiteralType) - if ok { - lit.LineComment = p.lineComment - l.List[len(l.List)-1] = lit - p.lineComment = nil - } - } - p.unscan() - - needComma = false - continue - case token.LBRACE: - // Looks like a nested object, so parse it out - node, err := p.objectType() - if err != nil { - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf( - "error while trying to parse object within list: %s", err), - } - } - l.Add(node) - needComma = true - case token.LBRACK: - node, err := p.listType() - if err != nil { - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf( - "error while trying to parse list within list: %s", err), - } - } - l.Add(node) - case token.RBRACK: - // finished - l.Rbrack = p.tok.Pos - return l, nil - default: - return nil, &PosError{ - Pos: tok.Pos, - Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type), - } - } - } -} - -// literalType parses a literal type and returns a LiteralType AST -func (p *Parser) literalType() (*ast.LiteralType, error) { - defer un(trace(p, "ParseLiteral")) - - return &ast.LiteralType{ - Token: p.tok, - }, nil -} - -// scan returns the next token from the underlying scanner. If a token has -// been unscanned then read that instead. In the process, it collects any -// comment groups encountered, and remembers the last lead and line comments. -func (p *Parser) scan() token.Token { - // If we have a token on the buffer, then return it. - if p.n != 0 { - p.n = 0 - return p.tok - } - - // Otherwise read the next token from the scanner and Save it to the buffer - // in case we unscan later. - prev := p.tok - p.tok = p.sc.Scan() - - if p.tok.Type == token.COMMENT { - var comment *ast.CommentGroup - var endline int - - // fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n", - // p.tok.Pos.Line, prev.Pos.Line, endline) - if p.tok.Pos.Line == prev.Pos.Line { - // The comment is on same line as the previous token; it - // cannot be a lead comment but may be a line comment. - comment, endline = p.consumeCommentGroup(0) - if p.tok.Pos.Line != endline { - // The next token is on a different line, thus - // the last comment group is a line comment. - p.lineComment = comment - } - } - - // consume successor comments, if any - endline = -1 - for p.tok.Type == token.COMMENT { - comment, endline = p.consumeCommentGroup(1) - } - - if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE { - switch p.tok.Type { - case token.RBRACE, token.RBRACK: - // Do not count for these cases - default: - // The next token is following on the line immediately after the - // comment group, thus the last comment group is a lead comment. - p.leadComment = comment - } - } - - } - - return p.tok -} - -// unscan pushes the previously read token back onto the buffer. -func (p *Parser) unscan() { - p.n = 1 -} - -// ---------------------------------------------------------------------------- -// Parsing support - -func (p *Parser) printTrace(a ...interface{}) { - if !p.enableTrace { - return - } - - const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " - const n = len(dots) - fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) - - i := 2 * p.indent - for i > n { - fmt.Print(dots) - i -= n - } - // i <= n - fmt.Print(dots[0:i]) - fmt.Println(a...) -} - -func trace(p *Parser, msg string) *Parser { - p.printTrace(msg, "(") - p.indent++ - return p -} - -// Usage pattern: defer un(trace(p, "...")) -func un(p *Parser) { - p.indent-- - p.printTrace(")") -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go b/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go deleted file mode 100644 index 7c038d12a..000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go +++ /dev/null @@ -1,789 +0,0 @@ -package printer - -import ( - "bytes" - "fmt" - "sort" - - "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/hcl/hcl/token" -) - -const ( - blank = byte(' ') - newline = byte('\n') - tab = byte('\t') - infinity = 1 << 30 // offset or line -) - -var ( - unindent = []byte("\uE123") // in the private use space -) - -type printer struct { - cfg Config - prev token.Pos - - comments []*ast.CommentGroup // may be nil, contains all comments - standaloneComments []*ast.CommentGroup // contains all standalone comments (not assigned to any node) - - enableTrace bool - indentTrace int -} - -type ByPosition []*ast.CommentGroup - -func (b ByPosition) Len() int { return len(b) } -func (b ByPosition) Swap(i, j int) { b[i], b[j] = b[j], b[i] } -func (b ByPosition) Less(i, j int) bool { return b[i].Pos().Before(b[j].Pos()) } - -// collectComments comments all standalone comments which are not lead or line -// comment -func (p *printer) collectComments(node ast.Node) { - // first collect all comments. This is already stored in - // ast.File.(comments) - ast.Walk(node, func(nn ast.Node) (ast.Node, bool) { - switch t := nn.(type) { - case *ast.File: - p.comments = t.Comments - return nn, false - } - return nn, true - }) - - standaloneComments := make(map[token.Pos]*ast.CommentGroup, 0) - for _, c := range p.comments { - standaloneComments[c.Pos()] = c - } - - // next remove all lead and line comments from the overall comment map. - // This will give us comments which are standalone, comments which are not - // assigned to any kind of node. - ast.Walk(node, func(nn ast.Node) (ast.Node, bool) { - switch t := nn.(type) { - case *ast.LiteralType: - if t.LeadComment != nil { - for _, comment := range t.LeadComment.List { - if _, ok := standaloneComments[comment.Pos()]; ok { - delete(standaloneComments, comment.Pos()) - } - } - } - - if t.LineComment != nil { - for _, comment := range t.LineComment.List { - if _, ok := standaloneComments[comment.Pos()]; ok { - delete(standaloneComments, comment.Pos()) - } - } - } - case *ast.ObjectItem: - if t.LeadComment != nil { - for _, comment := range t.LeadComment.List { - if _, ok := standaloneComments[comment.Pos()]; ok { - delete(standaloneComments, comment.Pos()) - } - } - } - - if t.LineComment != nil { - for _, comment := range t.LineComment.List { - if _, ok := standaloneComments[comment.Pos()]; ok { - delete(standaloneComments, comment.Pos()) - } - } - } - } - - return nn, true - }) - - for _, c := range standaloneComments { - p.standaloneComments = append(p.standaloneComments, c) - } - - sort.Sort(ByPosition(p.standaloneComments)) -} - -// output prints creates b printable HCL output and returns it. -func (p *printer) output(n interface{}) []byte { - var buf bytes.Buffer - - switch t := n.(type) { - case *ast.File: - // File doesn't trace so we add the tracing here - defer un(trace(p, "File")) - return p.output(t.Node) - case *ast.ObjectList: - defer un(trace(p, "ObjectList")) - - var index int - for { - // Determine the location of the next actual non-comment - // item. If we're at the end, the next item is at "infinity" - var nextItem token.Pos - if index != len(t.Items) { - nextItem = t.Items[index].Pos() - } else { - nextItem = token.Pos{Offset: infinity, Line: infinity} - } - - // Go through the standalone comments in the file and print out - // the comments that we should be for this object item. - for _, c := range p.standaloneComments { - // Go through all the comments in the group. The group - // should be printed together, not separated by double newlines. - printed := false - newlinePrinted := false - for _, comment := range c.List { - // We only care about comments after the previous item - // we've printed so that comments are printed in the - // correct locations (between two objects for example). - // And before the next item. - if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) { - // if we hit the end add newlines so we can print the comment - // we don't do this if prev is invalid which means the - // beginning of the file since the first comment should - // be at the first line. - if !newlinePrinted && p.prev.IsValid() && index == len(t.Items) { - buf.Write([]byte{newline, newline}) - newlinePrinted = true - } - - // Write the actual comment. - buf.WriteString(comment.Text) - buf.WriteByte(newline) - - // Set printed to true to note that we printed something - printed = true - } - } - - // If we're not at the last item, write a new line so - // that there is a newline separating this comment from - // the next object. - if printed && index != len(t.Items) { - buf.WriteByte(newline) - } - } - - if index == len(t.Items) { - break - } - - buf.Write(p.output(t.Items[index])) - if index != len(t.Items)-1 { - // Always write a newline to separate us from the next item - buf.WriteByte(newline) - - // Need to determine if we're going to separate the next item - // with a blank line. The logic here is simple, though there - // are a few conditions: - // - // 1. The next object is more than one line away anyways, - // so we need an empty line. - // - // 2. The next object is not a "single line" object, so - // we need an empty line. - // - // 3. This current object is not a single line object, - // so we need an empty line. - current := t.Items[index] - next := t.Items[index+1] - if next.Pos().Line != t.Items[index].Pos().Line+1 || - !p.isSingleLineObject(next) || - !p.isSingleLineObject(current) { - buf.WriteByte(newline) - } - } - index++ - } - case *ast.ObjectKey: - buf.WriteString(t.Token.Text) - case *ast.ObjectItem: - p.prev = t.Pos() - buf.Write(p.objectItem(t)) - case *ast.LiteralType: - buf.Write(p.literalType(t)) - case *ast.ListType: - buf.Write(p.list(t)) - case *ast.ObjectType: - buf.Write(p.objectType(t)) - default: - fmt.Printf(" unknown type: %T\n", n) - } - - return buf.Bytes() -} - -func (p *printer) literalType(lit *ast.LiteralType) []byte { - result := []byte(lit.Token.Text) - switch lit.Token.Type { - case token.HEREDOC: - // Clear the trailing newline from heredocs - if result[len(result)-1] == '\n' { - result = result[:len(result)-1] - } - - // Poison lines 2+ so that we don't indent them - result = p.heredocIndent(result) - case token.STRING: - // If this is a multiline string, poison lines 2+ so we don't - // indent them. - if bytes.IndexRune(result, '\n') >= 0 { - result = p.heredocIndent(result) - } - } - - return result -} - -// objectItem returns the printable HCL form of an object item. An object type -// starts with one/multiple keys and has a value. The value might be of any -// type. -func (p *printer) objectItem(o *ast.ObjectItem) []byte { - defer un(trace(p, fmt.Sprintf("ObjectItem: %s", o.Keys[0].Token.Text))) - var buf bytes.Buffer - - if o.LeadComment != nil { - for _, comment := range o.LeadComment.List { - buf.WriteString(comment.Text) - buf.WriteByte(newline) - } - } - - // If key and val are on different lines, treat line comments like lead comments. - if o.LineComment != nil && o.Val.Pos().Line != o.Keys[0].Pos().Line { - for _, comment := range o.LineComment.List { - buf.WriteString(comment.Text) - buf.WriteByte(newline) - } - } - - for i, k := range o.Keys { - buf.WriteString(k.Token.Text) - buf.WriteByte(blank) - - // reach end of key - if o.Assign.IsValid() && i == len(o.Keys)-1 && len(o.Keys) == 1 { - buf.WriteString("=") - buf.WriteByte(blank) - } - } - - buf.Write(p.output(o.Val)) - - if o.LineComment != nil && o.Val.Pos().Line == o.Keys[0].Pos().Line { - buf.WriteByte(blank) - for _, comment := range o.LineComment.List { - buf.WriteString(comment.Text) - } - } - - return buf.Bytes() -} - -// objectType returns the printable HCL form of an object type. An object type -// begins with a brace and ends with a brace. -func (p *printer) objectType(o *ast.ObjectType) []byte { - defer un(trace(p, "ObjectType")) - var buf bytes.Buffer - buf.WriteString("{") - - var index int - var nextItem token.Pos - var commented, newlinePrinted bool - for { - // Determine the location of the next actual non-comment - // item. If we're at the end, the next item is the closing brace - if index != len(o.List.Items) { - nextItem = o.List.Items[index].Pos() - } else { - nextItem = o.Rbrace - } - - // Go through the standalone comments in the file and print out - // the comments that we should be for this object item. - for _, c := range p.standaloneComments { - printed := false - var lastCommentPos token.Pos - for _, comment := range c.List { - // We only care about comments after the previous item - // we've printed so that comments are printed in the - // correct locations (between two objects for example). - // And before the next item. - if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) { - // If there are standalone comments and the initial newline has not - // been printed yet, do it now. - if !newlinePrinted { - newlinePrinted = true - buf.WriteByte(newline) - } - - // add newline if it's between other printed nodes - if index > 0 { - commented = true - buf.WriteByte(newline) - } - - // Store this position - lastCommentPos = comment.Pos() - - // output the comment itself - buf.Write(p.indent(p.heredocIndent([]byte(comment.Text)))) - - // Set printed to true to note that we printed something - printed = true - - /* - if index != len(o.List.Items) { - buf.WriteByte(newline) // do not print on the end - } - */ - } - } - - // Stuff to do if we had comments - if printed { - // Always write a newline - buf.WriteByte(newline) - - // If there is another item in the object and our comment - // didn't hug it directly, then make sure there is a blank - // line separating them. - if nextItem != o.Rbrace && nextItem.Line != lastCommentPos.Line+1 { - buf.WriteByte(newline) - } - } - } - - if index == len(o.List.Items) { - p.prev = o.Rbrace - break - } - - // At this point we are sure that it's not a totally empty block: print - // the initial newline if it hasn't been printed yet by the previous - // block about standalone comments. - if !newlinePrinted { - buf.WriteByte(newline) - newlinePrinted = true - } - - // check if we have adjacent one liner items. If yes we'll going to align - // the comments. - var aligned []*ast.ObjectItem - for _, item := range o.List.Items[index:] { - // we don't group one line lists - if len(o.List.Items) == 1 { - break - } - - // one means a oneliner with out any lead comment - // two means a oneliner with lead comment - // anything else might be something else - cur := lines(string(p.objectItem(item))) - if cur > 2 { - break - } - - curPos := item.Pos() - - nextPos := token.Pos{} - if index != len(o.List.Items)-1 { - nextPos = o.List.Items[index+1].Pos() - } - - prevPos := token.Pos{} - if index != 0 { - prevPos = o.List.Items[index-1].Pos() - } - - // fmt.Println("DEBUG ----------------") - // fmt.Printf("prev = %+v prevPos: %s\n", prev, prevPos) - // fmt.Printf("cur = %+v curPos: %s\n", cur, curPos) - // fmt.Printf("next = %+v nextPos: %s\n", next, nextPos) - - if curPos.Line+1 == nextPos.Line { - aligned = append(aligned, item) - index++ - continue - } - - if curPos.Line-1 == prevPos.Line { - aligned = append(aligned, item) - index++ - - // finish if we have a new line or comment next. This happens - // if the next item is not adjacent - if curPos.Line+1 != nextPos.Line { - break - } - continue - } - - break - } - - // put newlines if the items are between other non aligned items. - // newlines are also added if there is a standalone comment already, so - // check it too - if !commented && index != len(aligned) { - buf.WriteByte(newline) - } - - if len(aligned) >= 1 { - p.prev = aligned[len(aligned)-1].Pos() - - items := p.alignedItems(aligned) - buf.Write(p.indent(items)) - } else { - p.prev = o.List.Items[index].Pos() - - buf.Write(p.indent(p.objectItem(o.List.Items[index]))) - index++ - } - - buf.WriteByte(newline) - } - - buf.WriteString("}") - return buf.Bytes() -} - -func (p *printer) alignedItems(items []*ast.ObjectItem) []byte { - var buf bytes.Buffer - - // find the longest key and value length, needed for alignment - var longestKeyLen int // longest key length - var longestValLen int // longest value length - for _, item := range items { - key := len(item.Keys[0].Token.Text) - val := len(p.output(item.Val)) - - if key > longestKeyLen { - longestKeyLen = key - } - - if val > longestValLen { - longestValLen = val - } - } - - for i, item := range items { - if item.LeadComment != nil { - for _, comment := range item.LeadComment.List { - buf.WriteString(comment.Text) - buf.WriteByte(newline) - } - } - - for i, k := range item.Keys { - keyLen := len(k.Token.Text) - buf.WriteString(k.Token.Text) - for i := 0; i < longestKeyLen-keyLen+1; i++ { - buf.WriteByte(blank) - } - - // reach end of key - if i == len(item.Keys)-1 && len(item.Keys) == 1 { - buf.WriteString("=") - buf.WriteByte(blank) - } - } - - val := p.output(item.Val) - valLen := len(val) - buf.Write(val) - - if item.Val.Pos().Line == item.Keys[0].Pos().Line && item.LineComment != nil { - for i := 0; i < longestValLen-valLen+1; i++ { - buf.WriteByte(blank) - } - - for _, comment := range item.LineComment.List { - buf.WriteString(comment.Text) - } - } - - // do not print for the last item - if i != len(items)-1 { - buf.WriteByte(newline) - } - } - - return buf.Bytes() -} - -// list returns the printable HCL form of an list type. -func (p *printer) list(l *ast.ListType) []byte { - if p.isSingleLineList(l) { - return p.singleLineList(l) - } - - var buf bytes.Buffer - buf.WriteString("[") - buf.WriteByte(newline) - - var longestLine int - for _, item := range l.List { - // for now we assume that the list only contains literal types - if lit, ok := item.(*ast.LiteralType); ok { - lineLen := len(lit.Token.Text) - if lineLen > longestLine { - longestLine = lineLen - } - } - } - - haveEmptyLine := false - for i, item := range l.List { - // If we have a lead comment, then we want to write that first - leadComment := false - if lit, ok := item.(*ast.LiteralType); ok && lit.LeadComment != nil { - leadComment = true - - // Ensure an empty line before every element with a - // lead comment (except the first item in a list). - if !haveEmptyLine && i != 0 { - buf.WriteByte(newline) - } - - for _, comment := range lit.LeadComment.List { - buf.Write(p.indent([]byte(comment.Text))) - buf.WriteByte(newline) - } - } - - // also indent each line - val := p.output(item) - curLen := len(val) - buf.Write(p.indent(val)) - - // if this item is a heredoc, then we output the comma on - // the next line. This is the only case this happens. - comma := []byte{','} - if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC { - buf.WriteByte(newline) - comma = p.indent(comma) - } - - buf.Write(comma) - - if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil { - // if the next item doesn't have any comments, do not align - buf.WriteByte(blank) // align one space - for i := 0; i < longestLine-curLen; i++ { - buf.WriteByte(blank) - } - - for _, comment := range lit.LineComment.List { - buf.WriteString(comment.Text) - } - } - - buf.WriteByte(newline) - - // Ensure an empty line after every element with a - // lead comment (except the first item in a list). - haveEmptyLine = leadComment && i != len(l.List)-1 - if haveEmptyLine { - buf.WriteByte(newline) - } - } - - buf.WriteString("]") - return buf.Bytes() -} - -// isSingleLineList returns true if: -// * they were previously formatted entirely on one line -// * they consist entirely of literals -// * there are either no heredoc strings or the list has exactly one element -// * there are no line comments -func (printer) isSingleLineList(l *ast.ListType) bool { - for _, item := range l.List { - if item.Pos().Line != l.Lbrack.Line { - return false - } - - lit, ok := item.(*ast.LiteralType) - if !ok { - return false - } - - if lit.Token.Type == token.HEREDOC && len(l.List) != 1 { - return false - } - - if lit.LineComment != nil { - return false - } - } - - return true -} - -// singleLineList prints a simple single line list. -// For a definition of "simple", see isSingleLineList above. -func (p *printer) singleLineList(l *ast.ListType) []byte { - buf := &bytes.Buffer{} - - buf.WriteString("[") - for i, item := range l.List { - if i != 0 { - buf.WriteString(", ") - } - - // Output the item itself - buf.Write(p.output(item)) - - // The heredoc marker needs to be at the end of line. - if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC { - buf.WriteByte(newline) - } - } - - buf.WriteString("]") - return buf.Bytes() -} - -// indent indents the lines of the given buffer for each non-empty line -func (p *printer) indent(buf []byte) []byte { - var prefix []byte - if p.cfg.SpacesWidth != 0 { - for i := 0; i < p.cfg.SpacesWidth; i++ { - prefix = append(prefix, blank) - } - } else { - prefix = []byte{tab} - } - - var res []byte - bol := true - for _, c := range buf { - if bol && c != '\n' { - res = append(res, prefix...) - } - - res = append(res, c) - bol = c == '\n' - } - return res -} - -// unindent removes all the indentation from the tombstoned lines -func (p *printer) unindent(buf []byte) []byte { - var res []byte - for i := 0; i < len(buf); i++ { - skip := len(buf)-i <= len(unindent) - if !skip { - skip = !bytes.Equal(unindent, buf[i:i+len(unindent)]) - } - if skip { - res = append(res, buf[i]) - continue - } - - // We have a marker. we have to backtrace here and clean out - // any whitespace ahead of our tombstone up to a \n - for j := len(res) - 1; j >= 0; j-- { - if res[j] == '\n' { - break - } - - res = res[:j] - } - - // Skip the entire unindent marker - i += len(unindent) - 1 - } - - return res -} - -// heredocIndent marks all the 2nd and further lines as unindentable -func (p *printer) heredocIndent(buf []byte) []byte { - var res []byte - bol := false - for _, c := range buf { - if bol && c != '\n' { - res = append(res, unindent...) - } - res = append(res, c) - bol = c == '\n' - } - return res -} - -// isSingleLineObject tells whether the given object item is a single -// line object such as "obj {}". -// -// A single line object: -// -// * has no lead comments (hence multi-line) -// * has no assignment -// * has no values in the stanza (within {}) -// -func (p *printer) isSingleLineObject(val *ast.ObjectItem) bool { - // If there is a lead comment, can't be one line - if val.LeadComment != nil { - return false - } - - // If there is assignment, we always break by line - if val.Assign.IsValid() { - return false - } - - // If it isn't an object type, then its not a single line object - ot, ok := val.Val.(*ast.ObjectType) - if !ok { - return false - } - - // If the object has no items, it is single line! - return len(ot.List.Items) == 0 -} - -func lines(txt string) int { - endline := 1 - for i := 0; i < len(txt); i++ { - if txt[i] == '\n' { - endline++ - } - } - return endline -} - -// ---------------------------------------------------------------------------- -// Tracing support - -func (p *printer) printTrace(a ...interface{}) { - if !p.enableTrace { - return - } - - const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " - const n = len(dots) - i := 2 * p.indentTrace - for i > n { - fmt.Print(dots) - i -= n - } - // i <= n - fmt.Print(dots[0:i]) - fmt.Println(a...) -} - -func trace(p *printer, msg string) *printer { - p.printTrace(msg, "(") - p.indentTrace++ - return p -} - -// Usage pattern: defer un(trace(p, "...")) -func un(p *printer) { - p.indentTrace-- - p.printTrace(")") -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go b/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go deleted file mode 100644 index 6617ab8e7..000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go +++ /dev/null @@ -1,66 +0,0 @@ -// Package printer implements printing of AST nodes to HCL format. -package printer - -import ( - "bytes" - "io" - "text/tabwriter" - - "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/hcl/hcl/parser" -) - -var DefaultConfig = Config{ - SpacesWidth: 2, -} - -// A Config node controls the output of Fprint. -type Config struct { - SpacesWidth int // if set, it will use spaces instead of tabs for alignment -} - -func (c *Config) Fprint(output io.Writer, node ast.Node) error { - p := &printer{ - cfg: *c, - comments: make([]*ast.CommentGroup, 0), - standaloneComments: make([]*ast.CommentGroup, 0), - // enableTrace: true, - } - - p.collectComments(node) - - if _, err := output.Write(p.unindent(p.output(node))); err != nil { - return err - } - - // flush tabwriter, if any - var err error - if tw, _ := output.(*tabwriter.Writer); tw != nil { - err = tw.Flush() - } - - return err -} - -// Fprint "pretty-prints" an HCL node to output -// It calls Config.Fprint with default settings. -func Fprint(output io.Writer, node ast.Node) error { - return DefaultConfig.Fprint(output, node) -} - -// Format formats src HCL and returns the result. -func Format(src []byte) ([]byte, error) { - node, err := parser.Parse(src) - if err != nil { - return nil, err - } - - var buf bytes.Buffer - if err := DefaultConfig.Fprint(&buf, node); err != nil { - return nil, err - } - - // Add trailing newline to result - buf.WriteString("\n") - return buf.Bytes(), nil -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go deleted file mode 100644 index 624a18fe3..000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go +++ /dev/null @@ -1,652 +0,0 @@ -// Package scanner implements a scanner for HCL (HashiCorp Configuration -// Language) source text. -package scanner - -import ( - "bytes" - "fmt" - "os" - "regexp" - "unicode" - "unicode/utf8" - - "github.com/hashicorp/hcl/hcl/token" -) - -// eof represents a marker rune for the end of the reader. -const eof = rune(0) - -// Scanner defines a lexical scanner -type Scanner struct { - buf *bytes.Buffer // Source buffer for advancing and scanning - src []byte // Source buffer for immutable access - - // Source Position - srcPos token.Pos // current position - prevPos token.Pos // previous position, used for peek() method - - lastCharLen int // length of last character in bytes - lastLineLen int // length of last line in characters (for correct column reporting) - - tokStart int // token text start position - tokEnd int // token text end position - - // Error is called for each error encountered. If no Error - // function is set, the error is reported to os.Stderr. - Error func(pos token.Pos, msg string) - - // ErrorCount is incremented by one for each error encountered. - ErrorCount int - - // tokPos is the start position of most recently scanned token; set by - // Scan. The Filename field is always left untouched by the Scanner. If - // an error is reported (via Error) and Position is invalid, the scanner is - // not inside a token. - tokPos token.Pos -} - -// New creates and initializes a new instance of Scanner using src as -// its source content. -func New(src []byte) *Scanner { - // even though we accept a src, we read from a io.Reader compatible type - // (*bytes.Buffer). So in the future we might easily change it to streaming - // read. - b := bytes.NewBuffer(src) - s := &Scanner{ - buf: b, - src: src, - } - - // srcPosition always starts with 1 - s.srcPos.Line = 1 - return s -} - -// next reads the next rune from the bufferred reader. Returns the rune(0) if -// an error occurs (or io.EOF is returned). -func (s *Scanner) next() rune { - ch, size, err := s.buf.ReadRune() - if err != nil { - // advance for error reporting - s.srcPos.Column++ - s.srcPos.Offset += size - s.lastCharLen = size - return eof - } - - // remember last position - s.prevPos = s.srcPos - - s.srcPos.Column++ - s.lastCharLen = size - s.srcPos.Offset += size - - if ch == utf8.RuneError && size == 1 { - s.err("illegal UTF-8 encoding") - return ch - } - - if ch == '\n' { - s.srcPos.Line++ - s.lastLineLen = s.srcPos.Column - s.srcPos.Column = 0 - } - - if ch == '\x00' { - s.err("unexpected null character (0x00)") - return eof - } - - if ch == '\uE123' { - s.err("unicode code point U+E123 reserved for internal use") - return utf8.RuneError - } - - // debug - // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) - return ch -} - -// unread unreads the previous read Rune and updates the source position -func (s *Scanner) unread() { - if err := s.buf.UnreadRune(); err != nil { - panic(err) // this is user fault, we should catch it - } - s.srcPos = s.prevPos // put back last position -} - -// peek returns the next rune without advancing the reader. -func (s *Scanner) peek() rune { - peek, _, err := s.buf.ReadRune() - if err != nil { - return eof - } - - s.buf.UnreadRune() - return peek -} - -// Scan scans the next token and returns the token. -func (s *Scanner) Scan() token.Token { - ch := s.next() - - // skip white space - for isWhitespace(ch) { - ch = s.next() - } - - var tok token.Type - - // token text markings - s.tokStart = s.srcPos.Offset - s.lastCharLen - - // token position, initial next() is moving the offset by one(size of rune - // actually), though we are interested with the starting point - s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen - if s.srcPos.Column > 0 { - // common case: last character was not a '\n' - s.tokPos.Line = s.srcPos.Line - s.tokPos.Column = s.srcPos.Column - } else { - // last character was a '\n' - // (we cannot be at the beginning of the source - // since we have called next() at least once) - s.tokPos.Line = s.srcPos.Line - 1 - s.tokPos.Column = s.lastLineLen - } - - switch { - case isLetter(ch): - tok = token.IDENT - lit := s.scanIdentifier() - if lit == "true" || lit == "false" { - tok = token.BOOL - } - case isDecimal(ch): - tok = s.scanNumber(ch) - default: - switch ch { - case eof: - tok = token.EOF - case '"': - tok = token.STRING - s.scanString() - case '#', '/': - tok = token.COMMENT - s.scanComment(ch) - case '.': - tok = token.PERIOD - ch = s.peek() - if isDecimal(ch) { - tok = token.FLOAT - ch = s.scanMantissa(ch) - ch = s.scanExponent(ch) - } - case '<': - tok = token.HEREDOC - s.scanHeredoc() - case '[': - tok = token.LBRACK - case ']': - tok = token.RBRACK - case '{': - tok = token.LBRACE - case '}': - tok = token.RBRACE - case ',': - tok = token.COMMA - case '=': - tok = token.ASSIGN - case '+': - tok = token.ADD - case '-': - if isDecimal(s.peek()) { - ch := s.next() - tok = s.scanNumber(ch) - } else { - tok = token.SUB - } - default: - s.err("illegal char") - } - } - - // finish token ending - s.tokEnd = s.srcPos.Offset - - // create token literal - var tokenText string - if s.tokStart >= 0 { - tokenText = string(s.src[s.tokStart:s.tokEnd]) - } - s.tokStart = s.tokEnd // ensure idempotency of tokenText() call - - return token.Token{ - Type: tok, - Pos: s.tokPos, - Text: tokenText, - } -} - -func (s *Scanner) scanComment(ch rune) { - // single line comments - if ch == '#' || (ch == '/' && s.peek() != '*') { - if ch == '/' && s.peek() != '/' { - s.err("expected '/' for comment") - return - } - - ch = s.next() - for ch != '\n' && ch >= 0 && ch != eof { - ch = s.next() - } - if ch != eof && ch >= 0 { - s.unread() - } - return - } - - // be sure we get the character after /* This allows us to find comment's - // that are not erminated - if ch == '/' { - s.next() - ch = s.next() // read character after "/*" - } - - // look for /* - style comments - for { - if ch < 0 || ch == eof { - s.err("comment not terminated") - break - } - - ch0 := ch - ch = s.next() - if ch0 == '*' && ch == '/' { - break - } - } -} - -// scanNumber scans a HCL number definition starting with the given rune -func (s *Scanner) scanNumber(ch rune) token.Type { - if ch == '0' { - // check for hexadecimal, octal or float - ch = s.next() - if ch == 'x' || ch == 'X' { - // hexadecimal - ch = s.next() - found := false - for isHexadecimal(ch) { - ch = s.next() - found = true - } - - if !found { - s.err("illegal hexadecimal number") - } - - if ch != eof { - s.unread() - } - - return token.NUMBER - } - - // now it's either something like: 0421(octal) or 0.1231(float) - illegalOctal := false - for isDecimal(ch) { - ch = s.next() - if ch == '8' || ch == '9' { - // this is just a possibility. For example 0159 is illegal, but - // 0159.23 is valid. So we mark a possible illegal octal. If - // the next character is not a period, we'll print the error. - illegalOctal = true - } - } - - if ch == 'e' || ch == 'E' { - ch = s.scanExponent(ch) - return token.FLOAT - } - - if ch == '.' { - ch = s.scanFraction(ch) - - if ch == 'e' || ch == 'E' { - ch = s.next() - ch = s.scanExponent(ch) - } - return token.FLOAT - } - - if illegalOctal { - s.err("illegal octal number") - } - - if ch != eof { - s.unread() - } - return token.NUMBER - } - - s.scanMantissa(ch) - ch = s.next() // seek forward - if ch == 'e' || ch == 'E' { - ch = s.scanExponent(ch) - return token.FLOAT - } - - if ch == '.' { - ch = s.scanFraction(ch) - if ch == 'e' || ch == 'E' { - ch = s.next() - ch = s.scanExponent(ch) - } - return token.FLOAT - } - - if ch != eof { - s.unread() - } - return token.NUMBER -} - -// scanMantissa scans the mantissa beginning from the rune. It returns the next -// non decimal rune. It's used to determine wheter it's a fraction or exponent. -func (s *Scanner) scanMantissa(ch rune) rune { - scanned := false - for isDecimal(ch) { - ch = s.next() - scanned = true - } - - if scanned && ch != eof { - s.unread() - } - return ch -} - -// scanFraction scans the fraction after the '.' rune -func (s *Scanner) scanFraction(ch rune) rune { - if ch == '.' { - ch = s.peek() // we peek just to see if we can move forward - ch = s.scanMantissa(ch) - } - return ch -} - -// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' -// rune. -func (s *Scanner) scanExponent(ch rune) rune { - if ch == 'e' || ch == 'E' { - ch = s.next() - if ch == '-' || ch == '+' { - ch = s.next() - } - ch = s.scanMantissa(ch) - } - return ch -} - -// scanHeredoc scans a heredoc string -func (s *Scanner) scanHeredoc() { - // Scan the second '<' in example: '<= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) { - break - } - - // Not an anchor match, record the start of a new line - lineStart = s.srcPos.Offset - } - - if ch == eof { - s.err("heredoc not terminated") - return - } - } - - return -} - -// scanString scans a quoted string -func (s *Scanner) scanString() { - braces := 0 - for { - // '"' opening already consumed - // read character after quote - ch := s.next() - - if (ch == '\n' && braces == 0) || ch < 0 || ch == eof { - s.err("literal not terminated") - return - } - - if ch == '"' && braces == 0 { - break - } - - // If we're going into a ${} then we can ignore quotes for awhile - if braces == 0 && ch == '$' && s.peek() == '{' { - braces++ - s.next() - } else if braces > 0 && ch == '{' { - braces++ - } - if braces > 0 && ch == '}' { - braces-- - } - - if ch == '\\' { - s.scanEscape() - } - } - - return -} - -// scanEscape scans an escape sequence -func (s *Scanner) scanEscape() rune { - // http://en.cppreference.com/w/cpp/language/escape - ch := s.next() // read character after '/' - switch ch { - case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': - // nothing to do - case '0', '1', '2', '3', '4', '5', '6', '7': - // octal notation - ch = s.scanDigits(ch, 8, 3) - case 'x': - // hexademical notation - ch = s.scanDigits(s.next(), 16, 2) - case 'u': - // universal character name - ch = s.scanDigits(s.next(), 16, 4) - case 'U': - // universal character name - ch = s.scanDigits(s.next(), 16, 8) - default: - s.err("illegal char escape") - } - return ch -} - -// scanDigits scans a rune with the given base for n times. For example an -// octal notation \184 would yield in scanDigits(ch, 8, 3) -func (s *Scanner) scanDigits(ch rune, base, n int) rune { - start := n - for n > 0 && digitVal(ch) < base { - ch = s.next() - if ch == eof { - // If we see an EOF, we halt any more scanning of digits - // immediately. - break - } - - n-- - } - if n > 0 { - s.err("illegal char escape") - } - - if n != start && ch != eof { - // we scanned all digits, put the last non digit char back, - // only if we read anything at all - s.unread() - } - - return ch -} - -// scanIdentifier scans an identifier and returns the literal string -func (s *Scanner) scanIdentifier() string { - offs := s.srcPos.Offset - s.lastCharLen - ch := s.next() - for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' { - ch = s.next() - } - - if ch != eof { - s.unread() // we got identifier, put back latest char - } - - return string(s.src[offs:s.srcPos.Offset]) -} - -// recentPosition returns the position of the character immediately after the -// character or token returned by the last call to Scan. -func (s *Scanner) recentPosition() (pos token.Pos) { - pos.Offset = s.srcPos.Offset - s.lastCharLen - switch { - case s.srcPos.Column > 0: - // common case: last character was not a '\n' - pos.Line = s.srcPos.Line - pos.Column = s.srcPos.Column - case s.lastLineLen > 0: - // last character was a '\n' - // (we cannot be at the beginning of the source - // since we have called next() at least once) - pos.Line = s.srcPos.Line - 1 - pos.Column = s.lastLineLen - default: - // at the beginning of the source - pos.Line = 1 - pos.Column = 1 - } - return -} - -// err prints the error of any scanning to s.Error function. If the function is -// not defined, by default it prints them to os.Stderr -func (s *Scanner) err(msg string) { - s.ErrorCount++ - pos := s.recentPosition() - - if s.Error != nil { - s.Error(pos, msg) - return - } - - fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) -} - -// isHexadecimal returns true if the given rune is a letter -func isLetter(ch rune) bool { - return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) -} - -// isDigit returns true if the given rune is a decimal digit -func isDigit(ch rune) bool { - return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) -} - -// isDecimal returns true if the given rune is a decimal number -func isDecimal(ch rune) bool { - return '0' <= ch && ch <= '9' -} - -// isHexadecimal returns true if the given rune is an hexadecimal number -func isHexadecimal(ch rune) bool { - return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' -} - -// isWhitespace returns true if the rune is a space, tab, newline or carriage return -func isWhitespace(ch rune) bool { - return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' -} - -// digitVal returns the integer value of a given octal,decimal or hexadecimal rune -func digitVal(ch rune) int { - switch { - case '0' <= ch && ch <= '9': - return int(ch - '0') - case 'a' <= ch && ch <= 'f': - return int(ch - 'a' + 10) - case 'A' <= ch && ch <= 'F': - return int(ch - 'A' + 10) - } - return 16 // larger than any legal digit val -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go deleted file mode 100644 index 5f981eaa2..000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go +++ /dev/null @@ -1,241 +0,0 @@ -package strconv - -import ( - "errors" - "unicode/utf8" -) - -// ErrSyntax indicates that a value does not have the right syntax for the target type. -var ErrSyntax = errors.New("invalid syntax") - -// Unquote interprets s as a single-quoted, double-quoted, -// or backquoted Go string literal, returning the string value -// that s quotes. (If s is single-quoted, it would be a Go -// character literal; Unquote returns the corresponding -// one-character string.) -func Unquote(s string) (t string, err error) { - n := len(s) - if n < 2 { - return "", ErrSyntax - } - quote := s[0] - if quote != s[n-1] { - return "", ErrSyntax - } - s = s[1 : n-1] - - if quote != '"' { - return "", ErrSyntax - } - if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') { - return "", ErrSyntax - } - - // Is it trivial? Avoid allocation. - if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') { - switch quote { - case '"': - return s, nil - case '\'': - r, size := utf8.DecodeRuneInString(s) - if size == len(s) && (r != utf8.RuneError || size != 1) { - return s, nil - } - } - } - - var runeTmp [utf8.UTFMax]byte - buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. - for len(s) > 0 { - // If we're starting a '${}' then let it through un-unquoted. - // Specifically: we don't unquote any characters within the `${}` - // section. - if s[0] == '$' && len(s) > 1 && s[1] == '{' { - buf = append(buf, '$', '{') - s = s[2:] - - // Continue reading until we find the closing brace, copying as-is - braces := 1 - for len(s) > 0 && braces > 0 { - r, size := utf8.DecodeRuneInString(s) - if r == utf8.RuneError { - return "", ErrSyntax - } - - s = s[size:] - - n := utf8.EncodeRune(runeTmp[:], r) - buf = append(buf, runeTmp[:n]...) - - switch r { - case '{': - braces++ - case '}': - braces-- - } - } - if braces != 0 { - return "", ErrSyntax - } - if len(s) == 0 { - // If there's no string left, we're done! - break - } else { - // If there's more left, we need to pop back up to the top of the loop - // in case there's another interpolation in this string. - continue - } - } - - if s[0] == '\n' { - return "", ErrSyntax - } - - c, multibyte, ss, err := unquoteChar(s, quote) - if err != nil { - return "", err - } - s = ss - if c < utf8.RuneSelf || !multibyte { - buf = append(buf, byte(c)) - } else { - n := utf8.EncodeRune(runeTmp[:], c) - buf = append(buf, runeTmp[:n]...) - } - if quote == '\'' && len(s) != 0 { - // single-quoted must be single character - return "", ErrSyntax - } - } - return string(buf), nil -} - -// contains reports whether the string contains the byte c. -func contains(s string, c byte) bool { - for i := 0; i < len(s); i++ { - if s[i] == c { - return true - } - } - return false -} - -func unhex(b byte) (v rune, ok bool) { - c := rune(b) - switch { - case '0' <= c && c <= '9': - return c - '0', true - case 'a' <= c && c <= 'f': - return c - 'a' + 10, true - case 'A' <= c && c <= 'F': - return c - 'A' + 10, true - } - return -} - -func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) { - // easy cases - switch c := s[0]; { - case c == quote && (quote == '\'' || quote == '"'): - err = ErrSyntax - return - case c >= utf8.RuneSelf: - r, size := utf8.DecodeRuneInString(s) - return r, true, s[size:], nil - case c != '\\': - return rune(s[0]), false, s[1:], nil - } - - // hard case: c is backslash - if len(s) <= 1 { - err = ErrSyntax - return - } - c := s[1] - s = s[2:] - - switch c { - case 'a': - value = '\a' - case 'b': - value = '\b' - case 'f': - value = '\f' - case 'n': - value = '\n' - case 'r': - value = '\r' - case 't': - value = '\t' - case 'v': - value = '\v' - case 'x', 'u', 'U': - n := 0 - switch c { - case 'x': - n = 2 - case 'u': - n = 4 - case 'U': - n = 8 - } - var v rune - if len(s) < n { - err = ErrSyntax - return - } - for j := 0; j < n; j++ { - x, ok := unhex(s[j]) - if !ok { - err = ErrSyntax - return - } - v = v<<4 | x - } - s = s[n:] - if c == 'x' { - // single-byte string, possibly not UTF-8 - value = v - break - } - if v > utf8.MaxRune { - err = ErrSyntax - return - } - value = v - multibyte = true - case '0', '1', '2', '3', '4', '5', '6', '7': - v := rune(c) - '0' - if len(s) < 2 { - err = ErrSyntax - return - } - for j := 0; j < 2; j++ { // one digit already; two more - x := rune(s[j]) - '0' - if x < 0 || x > 7 { - err = ErrSyntax - return - } - v = (v << 3) | x - } - s = s[2:] - if v > 255 { - err = ErrSyntax - return - } - value = v - case '\\': - value = '\\' - case '\'', '"': - if c != quote { - err = ErrSyntax - return - } - value = rune(c) - default: - err = ErrSyntax - return - } - tail = s - return -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/position.go b/vendor/github.com/hashicorp/hcl/hcl/token/position.go deleted file mode 100644 index 59c1bb72d..000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/token/position.go +++ /dev/null @@ -1,46 +0,0 @@ -package token - -import "fmt" - -// Pos describes an arbitrary source position -// including the file, line, and column location. -// A Position is valid if the line number is > 0. -type Pos struct { - Filename string // filename, if any - Offset int // offset, starting at 0 - Line int // line number, starting at 1 - Column int // column number, starting at 1 (character count) -} - -// IsValid returns true if the position is valid. -func (p *Pos) IsValid() bool { return p.Line > 0 } - -// String returns a string in one of several forms: -// -// file:line:column valid position with file name -// line:column valid position without file name -// file invalid position with file name -// - invalid position without file name -func (p Pos) String() string { - s := p.Filename - if p.IsValid() { - if s != "" { - s += ":" - } - s += fmt.Sprintf("%d:%d", p.Line, p.Column) - } - if s == "" { - s = "-" - } - return s -} - -// Before reports whether the position p is before u. -func (p Pos) Before(u Pos) bool { - return u.Offset > p.Offset || u.Line > p.Line -} - -// After reports whether the position p is after u. -func (p Pos) After(u Pos) bool { - return u.Offset < p.Offset || u.Line < p.Line -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/token.go b/vendor/github.com/hashicorp/hcl/hcl/token/token.go deleted file mode 100644 index e37c0664e..000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/token/token.go +++ /dev/null @@ -1,219 +0,0 @@ -// Package token defines constants representing the lexical tokens for HCL -// (HashiCorp Configuration Language) -package token - -import ( - "fmt" - "strconv" - "strings" - - hclstrconv "github.com/hashicorp/hcl/hcl/strconv" -) - -// Token defines a single HCL token which can be obtained via the Scanner -type Token struct { - Type Type - Pos Pos - Text string - JSON bool -} - -// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) -type Type int - -const ( - // Special tokens - ILLEGAL Type = iota - EOF - COMMENT - - identifier_beg - IDENT // literals - literal_beg - NUMBER // 12345 - FLOAT // 123.45 - BOOL // true,false - STRING // "abc" - HEREDOC // < 0 { - // Pop the current item - n := len(frontier) - item := frontier[n-1] - frontier = frontier[:n-1] - - switch v := item.Val.(type) { - case *ast.ObjectType: - items, frontier = flattenObjectType(v, item, items, frontier) - case *ast.ListType: - items, frontier = flattenListType(v, item, items, frontier) - default: - items = append(items, item) - } - } - - // Reverse the list since the frontier model runs things backwards - for i := len(items)/2 - 1; i >= 0; i-- { - opp := len(items) - 1 - i - items[i], items[opp] = items[opp], items[i] - } - - // Done! Set the original items - list.Items = items - return n, true - }) -} - -func flattenListType( - ot *ast.ListType, - item *ast.ObjectItem, - items []*ast.ObjectItem, - frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { - // If the list is empty, keep the original list - if len(ot.List) == 0 { - items = append(items, item) - return items, frontier - } - - // All the elements of this object must also be objects! - for _, subitem := range ot.List { - if _, ok := subitem.(*ast.ObjectType); !ok { - items = append(items, item) - return items, frontier - } - } - - // Great! We have a match go through all the items and flatten - for _, elem := range ot.List { - // Add it to the frontier so that we can recurse - frontier = append(frontier, &ast.ObjectItem{ - Keys: item.Keys, - Assign: item.Assign, - Val: elem, - LeadComment: item.LeadComment, - LineComment: item.LineComment, - }) - } - - return items, frontier -} - -func flattenObjectType( - ot *ast.ObjectType, - item *ast.ObjectItem, - items []*ast.ObjectItem, - frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) { - // If the list has no items we do not have to flatten anything - if ot.List.Items == nil { - items = append(items, item) - return items, frontier - } - - // All the elements of this object must also be objects! - for _, subitem := range ot.List.Items { - if _, ok := subitem.Val.(*ast.ObjectType); !ok { - items = append(items, item) - return items, frontier - } - } - - // Great! We have a match go through all the items and flatten - for _, subitem := range ot.List.Items { - // Copy the new key - keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys)) - copy(keys, item.Keys) - copy(keys[len(item.Keys):], subitem.Keys) - - // Add it to the frontier so that we can recurse - frontier = append(frontier, &ast.ObjectItem{ - Keys: keys, - Assign: item.Assign, - Val: subitem.Val, - LeadComment: item.LeadComment, - LineComment: item.LineComment, - }) - } - - return items, frontier -} diff --git a/vendor/github.com/hashicorp/hcl/json/parser/parser.go b/vendor/github.com/hashicorp/hcl/json/parser/parser.go deleted file mode 100644 index 125a5f072..000000000 --- a/vendor/github.com/hashicorp/hcl/json/parser/parser.go +++ /dev/null @@ -1,313 +0,0 @@ -package parser - -import ( - "errors" - "fmt" - - "github.com/hashicorp/hcl/hcl/ast" - hcltoken "github.com/hashicorp/hcl/hcl/token" - "github.com/hashicorp/hcl/json/scanner" - "github.com/hashicorp/hcl/json/token" -) - -type Parser struct { - sc *scanner.Scanner - - // Last read token - tok token.Token - commaPrev token.Token - - enableTrace bool - indent int - n int // buffer size (max = 1) -} - -func newParser(src []byte) *Parser { - return &Parser{ - sc: scanner.New(src), - } -} - -// Parse returns the fully parsed source and returns the abstract syntax tree. -func Parse(src []byte) (*ast.File, error) { - p := newParser(src) - return p.Parse() -} - -var errEofToken = errors.New("EOF token found") - -// Parse returns the fully parsed source and returns the abstract syntax tree. -func (p *Parser) Parse() (*ast.File, error) { - f := &ast.File{} - var err, scerr error - p.sc.Error = func(pos token.Pos, msg string) { - scerr = fmt.Errorf("%s: %s", pos, msg) - } - - // The root must be an object in JSON - object, err := p.object() - if scerr != nil { - return nil, scerr - } - if err != nil { - return nil, err - } - - // We make our final node an object list so it is more HCL compatible - f.Node = object.List - - // Flatten it, which finds patterns and turns them into more HCL-like - // AST trees. - flattenObjects(f.Node) - - return f, nil -} - -func (p *Parser) objectList() (*ast.ObjectList, error) { - defer un(trace(p, "ParseObjectList")) - node := &ast.ObjectList{} - - for { - n, err := p.objectItem() - if err == errEofToken { - break // we are finished - } - - // we don't return a nil node, because might want to use already - // collected items. - if err != nil { - return node, err - } - - node.Add(n) - - // Check for a followup comma. If it isn't a comma, then we're done - if tok := p.scan(); tok.Type != token.COMMA { - break - } - } - - return node, nil -} - -// objectItem parses a single object item -func (p *Parser) objectItem() (*ast.ObjectItem, error) { - defer un(trace(p, "ParseObjectItem")) - - keys, err := p.objectKey() - if err != nil { - return nil, err - } - - o := &ast.ObjectItem{ - Keys: keys, - } - - switch p.tok.Type { - case token.COLON: - pos := p.tok.Pos - o.Assign = hcltoken.Pos{ - Filename: pos.Filename, - Offset: pos.Offset, - Line: pos.Line, - Column: pos.Column, - } - - o.Val, err = p.objectValue() - if err != nil { - return nil, err - } - } - - return o, nil -} - -// objectKey parses an object key and returns a ObjectKey AST -func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { - keyCount := 0 - keys := make([]*ast.ObjectKey, 0) - - for { - tok := p.scan() - switch tok.Type { - case token.EOF: - return nil, errEofToken - case token.STRING: - keyCount++ - keys = append(keys, &ast.ObjectKey{ - Token: p.tok.HCLToken(), - }) - case token.COLON: - // If we have a zero keycount it means that we never got - // an object key, i.e. `{ :`. This is a syntax error. - if keyCount == 0 { - return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) - } - - // Done - return keys, nil - case token.ILLEGAL: - return nil, errors.New("illegal") - default: - return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) - } - } -} - -// object parses any type of object, such as number, bool, string, object or -// list. -func (p *Parser) objectValue() (ast.Node, error) { - defer un(trace(p, "ParseObjectValue")) - tok := p.scan() - - switch tok.Type { - case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING: - return p.literalType() - case token.LBRACE: - return p.objectType() - case token.LBRACK: - return p.listType() - case token.EOF: - return nil, errEofToken - } - - return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok) -} - -// object parses any type of object, such as number, bool, string, object or -// list. -func (p *Parser) object() (*ast.ObjectType, error) { - defer un(trace(p, "ParseType")) - tok := p.scan() - - switch tok.Type { - case token.LBRACE: - return p.objectType() - case token.EOF: - return nil, errEofToken - } - - return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok) -} - -// objectType parses an object type and returns a ObjectType AST -func (p *Parser) objectType() (*ast.ObjectType, error) { - defer un(trace(p, "ParseObjectType")) - - // we assume that the currently scanned token is a LBRACE - o := &ast.ObjectType{} - - l, err := p.objectList() - - // if we hit RBRACE, we are good to go (means we parsed all Items), if it's - // not a RBRACE, it's an syntax error and we just return it. - if err != nil && p.tok.Type != token.RBRACE { - return nil, err - } - - o.List = l - return o, nil -} - -// listType parses a list type and returns a ListType AST -func (p *Parser) listType() (*ast.ListType, error) { - defer un(trace(p, "ParseListType")) - - // we assume that the currently scanned token is a LBRACK - l := &ast.ListType{} - - for { - tok := p.scan() - switch tok.Type { - case token.NUMBER, token.FLOAT, token.STRING: - node, err := p.literalType() - if err != nil { - return nil, err - } - - l.Add(node) - case token.COMMA: - continue - case token.LBRACE: - node, err := p.objectType() - if err != nil { - return nil, err - } - - l.Add(node) - case token.BOOL: - // TODO(arslan) should we support? not supported by HCL yet - case token.LBRACK: - // TODO(arslan) should we support nested lists? Even though it's - // written in README of HCL, it's not a part of the grammar - // (not defined in parse.y) - case token.RBRACK: - // finished - return l, nil - default: - return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type) - } - - } -} - -// literalType parses a literal type and returns a LiteralType AST -func (p *Parser) literalType() (*ast.LiteralType, error) { - defer un(trace(p, "ParseLiteral")) - - return &ast.LiteralType{ - Token: p.tok.HCLToken(), - }, nil -} - -// scan returns the next token from the underlying scanner. If a token has -// been unscanned then read that instead. -func (p *Parser) scan() token.Token { - // If we have a token on the buffer, then return it. - if p.n != 0 { - p.n = 0 - return p.tok - } - - p.tok = p.sc.Scan() - return p.tok -} - -// unscan pushes the previously read token back onto the buffer. -func (p *Parser) unscan() { - p.n = 1 -} - -// ---------------------------------------------------------------------------- -// Parsing support - -func (p *Parser) printTrace(a ...interface{}) { - if !p.enableTrace { - return - } - - const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " - const n = len(dots) - fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column) - - i := 2 * p.indent - for i > n { - fmt.Print(dots) - i -= n - } - // i <= n - fmt.Print(dots[0:i]) - fmt.Println(a...) -} - -func trace(p *Parser, msg string) *Parser { - p.printTrace(msg, "(") - p.indent++ - return p -} - -// Usage pattern: defer un(trace(p, "...")) -func un(p *Parser) { - p.indent-- - p.printTrace(")") -} diff --git a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go deleted file mode 100644 index fe3f0f095..000000000 --- a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go +++ /dev/null @@ -1,451 +0,0 @@ -package scanner - -import ( - "bytes" - "fmt" - "os" - "unicode" - "unicode/utf8" - - "github.com/hashicorp/hcl/json/token" -) - -// eof represents a marker rune for the end of the reader. -const eof = rune(0) - -// Scanner defines a lexical scanner -type Scanner struct { - buf *bytes.Buffer // Source buffer for advancing and scanning - src []byte // Source buffer for immutable access - - // Source Position - srcPos token.Pos // current position - prevPos token.Pos // previous position, used for peek() method - - lastCharLen int // length of last character in bytes - lastLineLen int // length of last line in characters (for correct column reporting) - - tokStart int // token text start position - tokEnd int // token text end position - - // Error is called for each error encountered. If no Error - // function is set, the error is reported to os.Stderr. - Error func(pos token.Pos, msg string) - - // ErrorCount is incremented by one for each error encountered. - ErrorCount int - - // tokPos is the start position of most recently scanned token; set by - // Scan. The Filename field is always left untouched by the Scanner. If - // an error is reported (via Error) and Position is invalid, the scanner is - // not inside a token. - tokPos token.Pos -} - -// New creates and initializes a new instance of Scanner using src as -// its source content. -func New(src []byte) *Scanner { - // even though we accept a src, we read from a io.Reader compatible type - // (*bytes.Buffer). So in the future we might easily change it to streaming - // read. - b := bytes.NewBuffer(src) - s := &Scanner{ - buf: b, - src: src, - } - - // srcPosition always starts with 1 - s.srcPos.Line = 1 - return s -} - -// next reads the next rune from the bufferred reader. Returns the rune(0) if -// an error occurs (or io.EOF is returned). -func (s *Scanner) next() rune { - ch, size, err := s.buf.ReadRune() - if err != nil { - // advance for error reporting - s.srcPos.Column++ - s.srcPos.Offset += size - s.lastCharLen = size - return eof - } - - if ch == utf8.RuneError && size == 1 { - s.srcPos.Column++ - s.srcPos.Offset += size - s.lastCharLen = size - s.err("illegal UTF-8 encoding") - return ch - } - - // remember last position - s.prevPos = s.srcPos - - s.srcPos.Column++ - s.lastCharLen = size - s.srcPos.Offset += size - - if ch == '\n' { - s.srcPos.Line++ - s.lastLineLen = s.srcPos.Column - s.srcPos.Column = 0 - } - - // debug - // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) - return ch -} - -// unread unreads the previous read Rune and updates the source position -func (s *Scanner) unread() { - if err := s.buf.UnreadRune(); err != nil { - panic(err) // this is user fault, we should catch it - } - s.srcPos = s.prevPos // put back last position -} - -// peek returns the next rune without advancing the reader. -func (s *Scanner) peek() rune { - peek, _, err := s.buf.ReadRune() - if err != nil { - return eof - } - - s.buf.UnreadRune() - return peek -} - -// Scan scans the next token and returns the token. -func (s *Scanner) Scan() token.Token { - ch := s.next() - - // skip white space - for isWhitespace(ch) { - ch = s.next() - } - - var tok token.Type - - // token text markings - s.tokStart = s.srcPos.Offset - s.lastCharLen - - // token position, initial next() is moving the offset by one(size of rune - // actually), though we are interested with the starting point - s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen - if s.srcPos.Column > 0 { - // common case: last character was not a '\n' - s.tokPos.Line = s.srcPos.Line - s.tokPos.Column = s.srcPos.Column - } else { - // last character was a '\n' - // (we cannot be at the beginning of the source - // since we have called next() at least once) - s.tokPos.Line = s.srcPos.Line - 1 - s.tokPos.Column = s.lastLineLen - } - - switch { - case isLetter(ch): - lit := s.scanIdentifier() - if lit == "true" || lit == "false" { - tok = token.BOOL - } else if lit == "null" { - tok = token.NULL - } else { - s.err("illegal char") - } - case isDecimal(ch): - tok = s.scanNumber(ch) - default: - switch ch { - case eof: - tok = token.EOF - case '"': - tok = token.STRING - s.scanString() - case '.': - tok = token.PERIOD - ch = s.peek() - if isDecimal(ch) { - tok = token.FLOAT - ch = s.scanMantissa(ch) - ch = s.scanExponent(ch) - } - case '[': - tok = token.LBRACK - case ']': - tok = token.RBRACK - case '{': - tok = token.LBRACE - case '}': - tok = token.RBRACE - case ',': - tok = token.COMMA - case ':': - tok = token.COLON - case '-': - if isDecimal(s.peek()) { - ch := s.next() - tok = s.scanNumber(ch) - } else { - s.err("illegal char") - } - default: - s.err("illegal char: " + string(ch)) - } - } - - // finish token ending - s.tokEnd = s.srcPos.Offset - - // create token literal - var tokenText string - if s.tokStart >= 0 { - tokenText = string(s.src[s.tokStart:s.tokEnd]) - } - s.tokStart = s.tokEnd // ensure idempotency of tokenText() call - - return token.Token{ - Type: tok, - Pos: s.tokPos, - Text: tokenText, - } -} - -// scanNumber scans a HCL number definition starting with the given rune -func (s *Scanner) scanNumber(ch rune) token.Type { - zero := ch == '0' - pos := s.srcPos - - s.scanMantissa(ch) - ch = s.next() // seek forward - if ch == 'e' || ch == 'E' { - ch = s.scanExponent(ch) - return token.FLOAT - } - - if ch == '.' { - ch = s.scanFraction(ch) - if ch == 'e' || ch == 'E' { - ch = s.next() - ch = s.scanExponent(ch) - } - return token.FLOAT - } - - if ch != eof { - s.unread() - } - - // If we have a larger number and this is zero, error - if zero && pos != s.srcPos { - s.err("numbers cannot start with 0") - } - - return token.NUMBER -} - -// scanMantissa scans the mantissa beginning from the rune. It returns the next -// non decimal rune. It's used to determine wheter it's a fraction or exponent. -func (s *Scanner) scanMantissa(ch rune) rune { - scanned := false - for isDecimal(ch) { - ch = s.next() - scanned = true - } - - if scanned && ch != eof { - s.unread() - } - return ch -} - -// scanFraction scans the fraction after the '.' rune -func (s *Scanner) scanFraction(ch rune) rune { - if ch == '.' { - ch = s.peek() // we peek just to see if we can move forward - ch = s.scanMantissa(ch) - } - return ch -} - -// scanExponent scans the remaining parts of an exponent after the 'e' or 'E' -// rune. -func (s *Scanner) scanExponent(ch rune) rune { - if ch == 'e' || ch == 'E' { - ch = s.next() - if ch == '-' || ch == '+' { - ch = s.next() - } - ch = s.scanMantissa(ch) - } - return ch -} - -// scanString scans a quoted string -func (s *Scanner) scanString() { - braces := 0 - for { - // '"' opening already consumed - // read character after quote - ch := s.next() - - if ch == '\n' || ch < 0 || ch == eof { - s.err("literal not terminated") - return - } - - if ch == '"' { - break - } - - // If we're going into a ${} then we can ignore quotes for awhile - if braces == 0 && ch == '$' && s.peek() == '{' { - braces++ - s.next() - } else if braces > 0 && ch == '{' { - braces++ - } - if braces > 0 && ch == '}' { - braces-- - } - - if ch == '\\' { - s.scanEscape() - } - } - - return -} - -// scanEscape scans an escape sequence -func (s *Scanner) scanEscape() rune { - // http://en.cppreference.com/w/cpp/language/escape - ch := s.next() // read character after '/' - switch ch { - case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"': - // nothing to do - case '0', '1', '2', '3', '4', '5', '6', '7': - // octal notation - ch = s.scanDigits(ch, 8, 3) - case 'x': - // hexademical notation - ch = s.scanDigits(s.next(), 16, 2) - case 'u': - // universal character name - ch = s.scanDigits(s.next(), 16, 4) - case 'U': - // universal character name - ch = s.scanDigits(s.next(), 16, 8) - default: - s.err("illegal char escape") - } - return ch -} - -// scanDigits scans a rune with the given base for n times. For example an -// octal notation \184 would yield in scanDigits(ch, 8, 3) -func (s *Scanner) scanDigits(ch rune, base, n int) rune { - for n > 0 && digitVal(ch) < base { - ch = s.next() - n-- - } - if n > 0 { - s.err("illegal char escape") - } - - // we scanned all digits, put the last non digit char back - s.unread() - return ch -} - -// scanIdentifier scans an identifier and returns the literal string -func (s *Scanner) scanIdentifier() string { - offs := s.srcPos.Offset - s.lastCharLen - ch := s.next() - for isLetter(ch) || isDigit(ch) || ch == '-' { - ch = s.next() - } - - if ch != eof { - s.unread() // we got identifier, put back latest char - } - - return string(s.src[offs:s.srcPos.Offset]) -} - -// recentPosition returns the position of the character immediately after the -// character or token returned by the last call to Scan. -func (s *Scanner) recentPosition() (pos token.Pos) { - pos.Offset = s.srcPos.Offset - s.lastCharLen - switch { - case s.srcPos.Column > 0: - // common case: last character was not a '\n' - pos.Line = s.srcPos.Line - pos.Column = s.srcPos.Column - case s.lastLineLen > 0: - // last character was a '\n' - // (we cannot be at the beginning of the source - // since we have called next() at least once) - pos.Line = s.srcPos.Line - 1 - pos.Column = s.lastLineLen - default: - // at the beginning of the source - pos.Line = 1 - pos.Column = 1 - } - return -} - -// err prints the error of any scanning to s.Error function. If the function is -// not defined, by default it prints them to os.Stderr -func (s *Scanner) err(msg string) { - s.ErrorCount++ - pos := s.recentPosition() - - if s.Error != nil { - s.Error(pos, msg) - return - } - - fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg) -} - -// isHexadecimal returns true if the given rune is a letter -func isLetter(ch rune) bool { - return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch) -} - -// isHexadecimal returns true if the given rune is a decimal digit -func isDigit(ch rune) bool { - return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) -} - -// isHexadecimal returns true if the given rune is a decimal number -func isDecimal(ch rune) bool { - return '0' <= ch && ch <= '9' -} - -// isHexadecimal returns true if the given rune is an hexadecimal number -func isHexadecimal(ch rune) bool { - return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F' -} - -// isWhitespace returns true if the rune is a space, tab, newline or carriage return -func isWhitespace(ch rune) bool { - return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' -} - -// digitVal returns the integer value of a given octal,decimal or hexadecimal rune -func digitVal(ch rune) int { - switch { - case '0' <= ch && ch <= '9': - return int(ch - '0') - case 'a' <= ch && ch <= 'f': - return int(ch - 'a' + 10) - case 'A' <= ch && ch <= 'F': - return int(ch - 'A' + 10) - } - return 16 // larger than any legal digit val -} diff --git a/vendor/github.com/hashicorp/hcl/json/token/position.go b/vendor/github.com/hashicorp/hcl/json/token/position.go deleted file mode 100644 index 59c1bb72d..000000000 --- a/vendor/github.com/hashicorp/hcl/json/token/position.go +++ /dev/null @@ -1,46 +0,0 @@ -package token - -import "fmt" - -// Pos describes an arbitrary source position -// including the file, line, and column location. -// A Position is valid if the line number is > 0. -type Pos struct { - Filename string // filename, if any - Offset int // offset, starting at 0 - Line int // line number, starting at 1 - Column int // column number, starting at 1 (character count) -} - -// IsValid returns true if the position is valid. -func (p *Pos) IsValid() bool { return p.Line > 0 } - -// String returns a string in one of several forms: -// -// file:line:column valid position with file name -// line:column valid position without file name -// file invalid position with file name -// - invalid position without file name -func (p Pos) String() string { - s := p.Filename - if p.IsValid() { - if s != "" { - s += ":" - } - s += fmt.Sprintf("%d:%d", p.Line, p.Column) - } - if s == "" { - s = "-" - } - return s -} - -// Before reports whether the position p is before u. -func (p Pos) Before(u Pos) bool { - return u.Offset > p.Offset || u.Line > p.Line -} - -// After reports whether the position p is after u. -func (p Pos) After(u Pos) bool { - return u.Offset < p.Offset || u.Line < p.Line -} diff --git a/vendor/github.com/hashicorp/hcl/json/token/token.go b/vendor/github.com/hashicorp/hcl/json/token/token.go deleted file mode 100644 index 95a0c3eee..000000000 --- a/vendor/github.com/hashicorp/hcl/json/token/token.go +++ /dev/null @@ -1,118 +0,0 @@ -package token - -import ( - "fmt" - "strconv" - - hcltoken "github.com/hashicorp/hcl/hcl/token" -) - -// Token defines a single HCL token which can be obtained via the Scanner -type Token struct { - Type Type - Pos Pos - Text string -} - -// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language) -type Type int - -const ( - // Special tokens - ILLEGAL Type = iota - EOF - - identifier_beg - literal_beg - NUMBER // 12345 - FLOAT // 123.45 - BOOL // true,false - STRING // "abc" - NULL // null - literal_end - identifier_end - - operator_beg - LBRACK // [ - LBRACE // { - COMMA // , - PERIOD // . - COLON // : - - RBRACK // ] - RBRACE // } - - operator_end -) - -var tokens = [...]string{ - ILLEGAL: "ILLEGAL", - - EOF: "EOF", - - NUMBER: "NUMBER", - FLOAT: "FLOAT", - BOOL: "BOOL", - STRING: "STRING", - NULL: "NULL", - - LBRACK: "LBRACK", - LBRACE: "LBRACE", - COMMA: "COMMA", - PERIOD: "PERIOD", - COLON: "COLON", - - RBRACK: "RBRACK", - RBRACE: "RBRACE", -} - -// String returns the string corresponding to the token tok. -func (t Type) String() string { - s := "" - if 0 <= t && t < Type(len(tokens)) { - s = tokens[t] - } - if s == "" { - s = "token(" + strconv.Itoa(int(t)) + ")" - } - return s -} - -// IsIdentifier returns true for tokens corresponding to identifiers and basic -// type literals; it returns false otherwise. -func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end } - -// IsLiteral returns true for tokens corresponding to basic type literals; it -// returns false otherwise. -func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end } - -// IsOperator returns true for tokens corresponding to operators and -// delimiters; it returns false otherwise. -func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end } - -// String returns the token's literal text. Note that this is only -// applicable for certain token types, such as token.IDENT, -// token.STRING, etc.. -func (t Token) String() string { - return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text) -} - -// HCLToken converts this token to an HCL token. -// -// The token type must be a literal type or this will panic. -func (t Token) HCLToken() hcltoken.Token { - switch t.Type { - case BOOL: - return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text} - case FLOAT: - return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text} - case NULL: - return hcltoken.Token{Type: hcltoken.STRING, Text: ""} - case NUMBER: - return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text} - case STRING: - return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true} - default: - panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type)) - } -} diff --git a/vendor/github.com/hashicorp/hcl/lex.go b/vendor/github.com/hashicorp/hcl/lex.go deleted file mode 100644 index d9993c292..000000000 --- a/vendor/github.com/hashicorp/hcl/lex.go +++ /dev/null @@ -1,38 +0,0 @@ -package hcl - -import ( - "unicode" - "unicode/utf8" -) - -type lexModeValue byte - -const ( - lexModeUnknown lexModeValue = iota - lexModeHcl - lexModeJson -) - -// lexMode returns whether we're going to be parsing in JSON -// mode or HCL mode. -func lexMode(v []byte) lexModeValue { - var ( - r rune - w int - offset int - ) - - for { - r, w = utf8.DecodeRune(v[offset:]) - offset += w - if unicode.IsSpace(r) { - continue - } - if r == '{' { - return lexModeJson - } - break - } - - return lexModeHcl -} diff --git a/vendor/github.com/hashicorp/hcl/parse.go b/vendor/github.com/hashicorp/hcl/parse.go deleted file mode 100644 index 1fca53c4c..000000000 --- a/vendor/github.com/hashicorp/hcl/parse.go +++ /dev/null @@ -1,39 +0,0 @@ -package hcl - -import ( - "fmt" - - "github.com/hashicorp/hcl/hcl/ast" - hclParser "github.com/hashicorp/hcl/hcl/parser" - jsonParser "github.com/hashicorp/hcl/json/parser" -) - -// ParseBytes accepts as input byte slice and returns ast tree. -// -// Input can be either JSON or HCL -func ParseBytes(in []byte) (*ast.File, error) { - return parse(in) -} - -// ParseString accepts input as a string and returns ast tree. -func ParseString(input string) (*ast.File, error) { - return parse([]byte(input)) -} - -func parse(in []byte) (*ast.File, error) { - switch lexMode(in) { - case lexModeHcl: - return hclParser.Parse(in) - case lexModeJson: - return jsonParser.Parse(in) - } - - return nil, fmt.Errorf("unknown config format") -} - -// Parse parses the given input and returns the root object. -// -// The input format can be either HCL or JSON. -func Parse(input string) (*ast.File, error) { - return parse([]byte(input)) -} diff --git a/vendor/github.com/jmespath/go-jmespath/.gitignore b/vendor/github.com/jmespath/go-jmespath/.gitignore deleted file mode 100644 index 5091fb073..000000000 --- a/vendor/github.com/jmespath/go-jmespath/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -/jpgo -jmespath-fuzz.zip -cpu.out -go-jmespath.test diff --git a/vendor/github.com/jmespath/go-jmespath/.travis.yml b/vendor/github.com/jmespath/go-jmespath/.travis.yml deleted file mode 100644 index c56f37c0c..000000000 --- a/vendor/github.com/jmespath/go-jmespath/.travis.yml +++ /dev/null @@ -1,28 +0,0 @@ -language: go - -sudo: false - -go: - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - 1.10.x - - 1.11.x - - 1.12.x - - 1.13.x - - 1.14.x - - 1.15.x - - tip - -allow_failures: - - go: tip - -script: make build - -matrix: - include: - - language: go - go: 1.15.x - script: make test diff --git a/vendor/github.com/jmespath/go-jmespath/LICENSE b/vendor/github.com/jmespath/go-jmespath/LICENSE deleted file mode 100644 index b03310a91..000000000 --- a/vendor/github.com/jmespath/go-jmespath/LICENSE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2015 James Saryerwinnie - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/github.com/jmespath/go-jmespath/Makefile b/vendor/github.com/jmespath/go-jmespath/Makefile deleted file mode 100644 index fb38ec276..000000000 --- a/vendor/github.com/jmespath/go-jmespath/Makefile +++ /dev/null @@ -1,51 +0,0 @@ - -CMD = jpgo - -SRC_PKGS=./ ./cmd/... ./fuzz/... - -help: - @echo "Please use \`make ' where is one of" - @echo " test to run all the tests" - @echo " build to build the library and jp executable" - @echo " generate to run codegen" - - -generate: - go generate ${SRC_PKGS} - -build: - rm -f $(CMD) - go build ${SRC_PKGS} - rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./... - mv cmd/$(CMD)/$(CMD) . - -test: test-internal-testify - echo "making tests ${SRC_PKGS}" - go test -v ${SRC_PKGS} - -check: - go vet ${SRC_PKGS} - @echo "golint ${SRC_PKGS}" - @lint=`golint ${SRC_PKGS}`; \ - lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \ - echo "$$lint"; \ - if [ "$$lint" != "" ]; then exit 1; fi - -htmlc: - go test -coverprofile="/tmp/jpcov" && go tool cover -html="/tmp/jpcov" && unlink /tmp/jpcov - -buildfuzz: - go-fuzz-build github.com/jmespath/go-jmespath/fuzz - -fuzz: buildfuzz - go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/testdata - -bench: - go test -bench . -cpuprofile cpu.out - -pprof-cpu: - go tool pprof ./go-jmespath.test ./cpu.out - -test-internal-testify: - cd internal/testify && go test ./... - diff --git a/vendor/github.com/jmespath/go-jmespath/README.md b/vendor/github.com/jmespath/go-jmespath/README.md deleted file mode 100644 index 110ad7999..000000000 --- a/vendor/github.com/jmespath/go-jmespath/README.md +++ /dev/null @@ -1,87 +0,0 @@ -# go-jmespath - A JMESPath implementation in Go - -[![Build Status](https://img.shields.io/travis/jmespath/go-jmespath.svg)](https://travis-ci.org/jmespath/go-jmespath) - - - -go-jmespath is a GO implementation of JMESPath, -which is a query language for JSON. It will take a JSON -document and transform it into another JSON document -through a JMESPath expression. - -Using go-jmespath is really easy. There's a single function -you use, `jmespath.search`: - - -```go -> import "github.com/jmespath/go-jmespath" -> -> var jsondata = []byte(`{"foo": {"bar": {"baz": [0, 1, 2, 3, 4]}}}`) // your data -> var data interface{} -> err := json.Unmarshal(jsondata, &data) -> result, err := jmespath.Search("foo.bar.baz[2]", data) -result = 2 -``` - -In the example we gave the ``search`` function input data of -`{"foo": {"bar": {"baz": [0, 1, 2, 3, 4]}}}` as well as the JMESPath -expression `foo.bar.baz[2]`, and the `search` function evaluated -the expression against the input data to produce the result ``2``. - -The JMESPath language can do a lot more than select an element -from a list. Here are a few more examples: - -```go -> var jsondata = []byte(`{"foo": {"bar": {"baz": [0, 1, 2, 3, 4]}}}`) // your data -> var data interface{} -> err := json.Unmarshal(jsondata, &data) -> result, err := jmespath.search("foo.bar", data) -result = { "baz": [ 0, 1, 2, 3, 4 ] } - - -> var jsondata = []byte(`{"foo": [{"first": "a", "last": "b"}, - {"first": "c", "last": "d"}]}`) // your data -> var data interface{} -> err := json.Unmarshal(jsondata, &data) -> result, err := jmespath.search({"foo[*].first", data) -result [ 'a', 'c' ] - - -> var jsondata = []byte(`{"foo": [{"age": 20}, {"age": 25}, - {"age": 30}, {"age": 35}, - {"age": 40}]}`) // your data -> var data interface{} -> err := json.Unmarshal(jsondata, &data) -> result, err := jmespath.search("foo[?age > `30`]") -result = [ { age: 35 }, { age: 40 } ] -``` - -You can also pre-compile your query. This is usefull if -you are going to run multiple searches with it: - -```go - > var jsondata = []byte(`{"foo": "bar"}`) - > var data interface{} - > err := json.Unmarshal(jsondata, &data) - > precompiled, err := Compile("foo") - > if err != nil{ - > // ... handle the error - > } - > result, err := precompiled.Search(data) - result = "bar" -``` - -## More Resources - -The example above only show a small amount of what -a JMESPath expression can do. If you want to take a -tour of the language, the *best* place to go is the -[JMESPath Tutorial](http://jmespath.org/tutorial.html). - -One of the best things about JMESPath is that it is -implemented in many different programming languages including -python, ruby, php, lua, etc. To see a complete list of libraries, -check out the [JMESPath libraries page](http://jmespath.org/libraries.html). - -And finally, the full JMESPath specification can be found -on the [JMESPath site](http://jmespath.org/specification.html). diff --git a/vendor/github.com/jmespath/go-jmespath/api.go b/vendor/github.com/jmespath/go-jmespath/api.go deleted file mode 100644 index 010efe9bf..000000000 --- a/vendor/github.com/jmespath/go-jmespath/api.go +++ /dev/null @@ -1,49 +0,0 @@ -package jmespath - -import "strconv" - -// JMESPath is the representation of a compiled JMES path query. A JMESPath is -// safe for concurrent use by multiple goroutines. -type JMESPath struct { - ast ASTNode - intr *treeInterpreter -} - -// Compile parses a JMESPath expression and returns, if successful, a JMESPath -// object that can be used to match against data. -func Compile(expression string) (*JMESPath, error) { - parser := NewParser() - ast, err := parser.Parse(expression) - if err != nil { - return nil, err - } - jmespath := &JMESPath{ast: ast, intr: newInterpreter()} - return jmespath, nil -} - -// MustCompile is like Compile but panics if the expression cannot be parsed. -// It simplifies safe initialization of global variables holding compiled -// JMESPaths. -func MustCompile(expression string) *JMESPath { - jmespath, err := Compile(expression) - if err != nil { - panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error()) - } - return jmespath -} - -// Search evaluates a JMESPath expression against input data and returns the result. -func (jp *JMESPath) Search(data interface{}) (interface{}, error) { - return jp.intr.Execute(jp.ast, data) -} - -// Search evaluates a JMESPath expression against input data and returns the result. -func Search(expression string, data interface{}) (interface{}, error) { - intr := newInterpreter() - parser := NewParser() - ast, err := parser.Parse(expression) - if err != nil { - return nil, err - } - return intr.Execute(ast, data) -} diff --git a/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go deleted file mode 100644 index 1cd2d239c..000000000 --- a/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// generated by stringer -type astNodeType; DO NOT EDIT - -package jmespath - -import "fmt" - -const _astNodeType_name = "ASTEmptyASTComparatorASTCurrentNodeASTExpRefASTFunctionExpressionASTFieldASTFilterProjectionASTFlattenASTIdentityASTIndexASTIndexExpressionASTKeyValPairASTLiteralASTMultiSelectHashASTMultiSelectListASTOrExpressionASTAndExpressionASTNotExpressionASTPipeASTProjectionASTSubexpressionASTSliceASTValueProjection" - -var _astNodeType_index = [...]uint16{0, 8, 21, 35, 44, 65, 73, 92, 102, 113, 121, 139, 152, 162, 180, 198, 213, 229, 245, 252, 265, 281, 289, 307} - -func (i astNodeType) String() string { - if i < 0 || i >= astNodeType(len(_astNodeType_index)-1) { - return fmt.Sprintf("astNodeType(%d)", i) - } - return _astNodeType_name[_astNodeType_index[i]:_astNodeType_index[i+1]] -} diff --git a/vendor/github.com/jmespath/go-jmespath/functions.go b/vendor/github.com/jmespath/go-jmespath/functions.go deleted file mode 100644 index 9b7cd89b4..000000000 --- a/vendor/github.com/jmespath/go-jmespath/functions.go +++ /dev/null @@ -1,842 +0,0 @@ -package jmespath - -import ( - "encoding/json" - "errors" - "fmt" - "math" - "reflect" - "sort" - "strconv" - "strings" - "unicode/utf8" -) - -type jpFunction func(arguments []interface{}) (interface{}, error) - -type jpType string - -const ( - jpUnknown jpType = "unknown" - jpNumber jpType = "number" - jpString jpType = "string" - jpArray jpType = "array" - jpObject jpType = "object" - jpArrayNumber jpType = "array[number]" - jpArrayString jpType = "array[string]" - jpExpref jpType = "expref" - jpAny jpType = "any" -) - -type functionEntry struct { - name string - arguments []argSpec - handler jpFunction - hasExpRef bool -} - -type argSpec struct { - types []jpType - variadic bool -} - -type byExprString struct { - intr *treeInterpreter - node ASTNode - items []interface{} - hasError bool -} - -func (a *byExprString) Len() int { - return len(a.items) -} -func (a *byExprString) Swap(i, j int) { - a.items[i], a.items[j] = a.items[j], a.items[i] -} -func (a *byExprString) Less(i, j int) bool { - first, err := a.intr.Execute(a.node, a.items[i]) - if err != nil { - a.hasError = true - // Return a dummy value. - return true - } - ith, ok := first.(string) - if !ok { - a.hasError = true - return true - } - second, err := a.intr.Execute(a.node, a.items[j]) - if err != nil { - a.hasError = true - // Return a dummy value. - return true - } - jth, ok := second.(string) - if !ok { - a.hasError = true - return true - } - return ith < jth -} - -type byExprFloat struct { - intr *treeInterpreter - node ASTNode - items []interface{} - hasError bool -} - -func (a *byExprFloat) Len() int { - return len(a.items) -} -func (a *byExprFloat) Swap(i, j int) { - a.items[i], a.items[j] = a.items[j], a.items[i] -} -func (a *byExprFloat) Less(i, j int) bool { - first, err := a.intr.Execute(a.node, a.items[i]) - if err != nil { - a.hasError = true - // Return a dummy value. - return true - } - ith, ok := first.(float64) - if !ok { - a.hasError = true - return true - } - second, err := a.intr.Execute(a.node, a.items[j]) - if err != nil { - a.hasError = true - // Return a dummy value. - return true - } - jth, ok := second.(float64) - if !ok { - a.hasError = true - return true - } - return ith < jth -} - -type functionCaller struct { - functionTable map[string]functionEntry -} - -func newFunctionCaller() *functionCaller { - caller := &functionCaller{} - caller.functionTable = map[string]functionEntry{ - "length": { - name: "length", - arguments: []argSpec{ - {types: []jpType{jpString, jpArray, jpObject}}, - }, - handler: jpfLength, - }, - "starts_with": { - name: "starts_with", - arguments: []argSpec{ - {types: []jpType{jpString}}, - {types: []jpType{jpString}}, - }, - handler: jpfStartsWith, - }, - "abs": { - name: "abs", - arguments: []argSpec{ - {types: []jpType{jpNumber}}, - }, - handler: jpfAbs, - }, - "avg": { - name: "avg", - arguments: []argSpec{ - {types: []jpType{jpArrayNumber}}, - }, - handler: jpfAvg, - }, - "ceil": { - name: "ceil", - arguments: []argSpec{ - {types: []jpType{jpNumber}}, - }, - handler: jpfCeil, - }, - "contains": { - name: "contains", - arguments: []argSpec{ - {types: []jpType{jpArray, jpString}}, - {types: []jpType{jpAny}}, - }, - handler: jpfContains, - }, - "ends_with": { - name: "ends_with", - arguments: []argSpec{ - {types: []jpType{jpString}}, - {types: []jpType{jpString}}, - }, - handler: jpfEndsWith, - }, - "floor": { - name: "floor", - arguments: []argSpec{ - {types: []jpType{jpNumber}}, - }, - handler: jpfFloor, - }, - "map": { - name: "amp", - arguments: []argSpec{ - {types: []jpType{jpExpref}}, - {types: []jpType{jpArray}}, - }, - handler: jpfMap, - hasExpRef: true, - }, - "max": { - name: "max", - arguments: []argSpec{ - {types: []jpType{jpArrayNumber, jpArrayString}}, - }, - handler: jpfMax, - }, - "merge": { - name: "merge", - arguments: []argSpec{ - {types: []jpType{jpObject}, variadic: true}, - }, - handler: jpfMerge, - }, - "max_by": { - name: "max_by", - arguments: []argSpec{ - {types: []jpType{jpArray}}, - {types: []jpType{jpExpref}}, - }, - handler: jpfMaxBy, - hasExpRef: true, - }, - "sum": { - name: "sum", - arguments: []argSpec{ - {types: []jpType{jpArrayNumber}}, - }, - handler: jpfSum, - }, - "min": { - name: "min", - arguments: []argSpec{ - {types: []jpType{jpArrayNumber, jpArrayString}}, - }, - handler: jpfMin, - }, - "min_by": { - name: "min_by", - arguments: []argSpec{ - {types: []jpType{jpArray}}, - {types: []jpType{jpExpref}}, - }, - handler: jpfMinBy, - hasExpRef: true, - }, - "type": { - name: "type", - arguments: []argSpec{ - {types: []jpType{jpAny}}, - }, - handler: jpfType, - }, - "keys": { - name: "keys", - arguments: []argSpec{ - {types: []jpType{jpObject}}, - }, - handler: jpfKeys, - }, - "values": { - name: "values", - arguments: []argSpec{ - {types: []jpType{jpObject}}, - }, - handler: jpfValues, - }, - "sort": { - name: "sort", - arguments: []argSpec{ - {types: []jpType{jpArrayString, jpArrayNumber}}, - }, - handler: jpfSort, - }, - "sort_by": { - name: "sort_by", - arguments: []argSpec{ - {types: []jpType{jpArray}}, - {types: []jpType{jpExpref}}, - }, - handler: jpfSortBy, - hasExpRef: true, - }, - "join": { - name: "join", - arguments: []argSpec{ - {types: []jpType{jpString}}, - {types: []jpType{jpArrayString}}, - }, - handler: jpfJoin, - }, - "reverse": { - name: "reverse", - arguments: []argSpec{ - {types: []jpType{jpArray, jpString}}, - }, - handler: jpfReverse, - }, - "to_array": { - name: "to_array", - arguments: []argSpec{ - {types: []jpType{jpAny}}, - }, - handler: jpfToArray, - }, - "to_string": { - name: "to_string", - arguments: []argSpec{ - {types: []jpType{jpAny}}, - }, - handler: jpfToString, - }, - "to_number": { - name: "to_number", - arguments: []argSpec{ - {types: []jpType{jpAny}}, - }, - handler: jpfToNumber, - }, - "not_null": { - name: "not_null", - arguments: []argSpec{ - {types: []jpType{jpAny}, variadic: true}, - }, - handler: jpfNotNull, - }, - } - return caller -} - -func (e *functionEntry) resolveArgs(arguments []interface{}) ([]interface{}, error) { - if len(e.arguments) == 0 { - return arguments, nil - } - if !e.arguments[len(e.arguments)-1].variadic { - if len(e.arguments) != len(arguments) { - return nil, errors.New("incorrect number of args") - } - for i, spec := range e.arguments { - userArg := arguments[i] - err := spec.typeCheck(userArg) - if err != nil { - return nil, err - } - } - return arguments, nil - } - if len(arguments) < len(e.arguments) { - return nil, errors.New("Invalid arity.") - } - return arguments, nil -} - -func (a *argSpec) typeCheck(arg interface{}) error { - for _, t := range a.types { - switch t { - case jpNumber: - if _, ok := arg.(float64); ok { - return nil - } - case jpString: - if _, ok := arg.(string); ok { - return nil - } - case jpArray: - if isSliceType(arg) { - return nil - } - case jpObject: - if _, ok := arg.(map[string]interface{}); ok { - return nil - } - case jpArrayNumber: - if _, ok := toArrayNum(arg); ok { - return nil - } - case jpArrayString: - if _, ok := toArrayStr(arg); ok { - return nil - } - case jpAny: - return nil - case jpExpref: - if _, ok := arg.(expRef); ok { - return nil - } - } - } - return fmt.Errorf("Invalid type for: %v, expected: %#v", arg, a.types) -} - -func (f *functionCaller) CallFunction(name string, arguments []interface{}, intr *treeInterpreter) (interface{}, error) { - entry, ok := f.functionTable[name] - if !ok { - return nil, errors.New("unknown function: " + name) - } - resolvedArgs, err := entry.resolveArgs(arguments) - if err != nil { - return nil, err - } - if entry.hasExpRef { - var extra []interface{} - extra = append(extra, intr) - resolvedArgs = append(extra, resolvedArgs...) - } - return entry.handler(resolvedArgs) -} - -func jpfAbs(arguments []interface{}) (interface{}, error) { - num := arguments[0].(float64) - return math.Abs(num), nil -} - -func jpfLength(arguments []interface{}) (interface{}, error) { - arg := arguments[0] - if c, ok := arg.(string); ok { - return float64(utf8.RuneCountInString(c)), nil - } else if isSliceType(arg) { - v := reflect.ValueOf(arg) - return float64(v.Len()), nil - } else if c, ok := arg.(map[string]interface{}); ok { - return float64(len(c)), nil - } - return nil, errors.New("could not compute length()") -} - -func jpfStartsWith(arguments []interface{}) (interface{}, error) { - search := arguments[0].(string) - prefix := arguments[1].(string) - return strings.HasPrefix(search, prefix), nil -} - -func jpfAvg(arguments []interface{}) (interface{}, error) { - // We've already type checked the value so we can safely use - // type assertions. - args := arguments[0].([]interface{}) - length := float64(len(args)) - numerator := 0.0 - for _, n := range args { - numerator += n.(float64) - } - return numerator / length, nil -} -func jpfCeil(arguments []interface{}) (interface{}, error) { - val := arguments[0].(float64) - return math.Ceil(val), nil -} -func jpfContains(arguments []interface{}) (interface{}, error) { - search := arguments[0] - el := arguments[1] - if searchStr, ok := search.(string); ok { - if elStr, ok := el.(string); ok { - return strings.Index(searchStr, elStr) != -1, nil - } - return false, nil - } - // Otherwise this is a generic contains for []interface{} - general := search.([]interface{}) - for _, item := range general { - if item == el { - return true, nil - } - } - return false, nil -} -func jpfEndsWith(arguments []interface{}) (interface{}, error) { - search := arguments[0].(string) - suffix := arguments[1].(string) - return strings.HasSuffix(search, suffix), nil -} -func jpfFloor(arguments []interface{}) (interface{}, error) { - val := arguments[0].(float64) - return math.Floor(val), nil -} -func jpfMap(arguments []interface{}) (interface{}, error) { - intr := arguments[0].(*treeInterpreter) - exp := arguments[1].(expRef) - node := exp.ref - arr := arguments[2].([]interface{}) - mapped := make([]interface{}, 0, len(arr)) - for _, value := range arr { - current, err := intr.Execute(node, value) - if err != nil { - return nil, err - } - mapped = append(mapped, current) - } - return mapped, nil -} -func jpfMax(arguments []interface{}) (interface{}, error) { - if items, ok := toArrayNum(arguments[0]); ok { - if len(items) == 0 { - return nil, nil - } - if len(items) == 1 { - return items[0], nil - } - best := items[0] - for _, item := range items[1:] { - if item > best { - best = item - } - } - return best, nil - } - // Otherwise we're dealing with a max() of strings. - items, _ := toArrayStr(arguments[0]) - if len(items) == 0 { - return nil, nil - } - if len(items) == 1 { - return items[0], nil - } - best := items[0] - for _, item := range items[1:] { - if item > best { - best = item - } - } - return best, nil -} -func jpfMerge(arguments []interface{}) (interface{}, error) { - final := make(map[string]interface{}) - for _, m := range arguments { - mapped := m.(map[string]interface{}) - for key, value := range mapped { - final[key] = value - } - } - return final, nil -} -func jpfMaxBy(arguments []interface{}) (interface{}, error) { - intr := arguments[0].(*treeInterpreter) - arr := arguments[1].([]interface{}) - exp := arguments[2].(expRef) - node := exp.ref - if len(arr) == 0 { - return nil, nil - } else if len(arr) == 1 { - return arr[0], nil - } - start, err := intr.Execute(node, arr[0]) - if err != nil { - return nil, err - } - switch t := start.(type) { - case float64: - bestVal := t - bestItem := arr[0] - for _, item := range arr[1:] { - result, err := intr.Execute(node, item) - if err != nil { - return nil, err - } - current, ok := result.(float64) - if !ok { - return nil, errors.New("invalid type, must be number") - } - if current > bestVal { - bestVal = current - bestItem = item - } - } - return bestItem, nil - case string: - bestVal := t - bestItem := arr[0] - for _, item := range arr[1:] { - result, err := intr.Execute(node, item) - if err != nil { - return nil, err - } - current, ok := result.(string) - if !ok { - return nil, errors.New("invalid type, must be string") - } - if current > bestVal { - bestVal = current - bestItem = item - } - } - return bestItem, nil - default: - return nil, errors.New("invalid type, must be number of string") - } -} -func jpfSum(arguments []interface{}) (interface{}, error) { - items, _ := toArrayNum(arguments[0]) - sum := 0.0 - for _, item := range items { - sum += item - } - return sum, nil -} - -func jpfMin(arguments []interface{}) (interface{}, error) { - if items, ok := toArrayNum(arguments[0]); ok { - if len(items) == 0 { - return nil, nil - } - if len(items) == 1 { - return items[0], nil - } - best := items[0] - for _, item := range items[1:] { - if item < best { - best = item - } - } - return best, nil - } - items, _ := toArrayStr(arguments[0]) - if len(items) == 0 { - return nil, nil - } - if len(items) == 1 { - return items[0], nil - } - best := items[0] - for _, item := range items[1:] { - if item < best { - best = item - } - } - return best, nil -} - -func jpfMinBy(arguments []interface{}) (interface{}, error) { - intr := arguments[0].(*treeInterpreter) - arr := arguments[1].([]interface{}) - exp := arguments[2].(expRef) - node := exp.ref - if len(arr) == 0 { - return nil, nil - } else if len(arr) == 1 { - return arr[0], nil - } - start, err := intr.Execute(node, arr[0]) - if err != nil { - return nil, err - } - if t, ok := start.(float64); ok { - bestVal := t - bestItem := arr[0] - for _, item := range arr[1:] { - result, err := intr.Execute(node, item) - if err != nil { - return nil, err - } - current, ok := result.(float64) - if !ok { - return nil, errors.New("invalid type, must be number") - } - if current < bestVal { - bestVal = current - bestItem = item - } - } - return bestItem, nil - } else if t, ok := start.(string); ok { - bestVal := t - bestItem := arr[0] - for _, item := range arr[1:] { - result, err := intr.Execute(node, item) - if err != nil { - return nil, err - } - current, ok := result.(string) - if !ok { - return nil, errors.New("invalid type, must be string") - } - if current < bestVal { - bestVal = current - bestItem = item - } - } - return bestItem, nil - } else { - return nil, errors.New("invalid type, must be number of string") - } -} -func jpfType(arguments []interface{}) (interface{}, error) { - arg := arguments[0] - if _, ok := arg.(float64); ok { - return "number", nil - } - if _, ok := arg.(string); ok { - return "string", nil - } - if _, ok := arg.([]interface{}); ok { - return "array", nil - } - if _, ok := arg.(map[string]interface{}); ok { - return "object", nil - } - if arg == nil { - return "null", nil - } - if arg == true || arg == false { - return "boolean", nil - } - return nil, errors.New("unknown type") -} -func jpfKeys(arguments []interface{}) (interface{}, error) { - arg := arguments[0].(map[string]interface{}) - collected := make([]interface{}, 0, len(arg)) - for key := range arg { - collected = append(collected, key) - } - return collected, nil -} -func jpfValues(arguments []interface{}) (interface{}, error) { - arg := arguments[0].(map[string]interface{}) - collected := make([]interface{}, 0, len(arg)) - for _, value := range arg { - collected = append(collected, value) - } - return collected, nil -} -func jpfSort(arguments []interface{}) (interface{}, error) { - if items, ok := toArrayNum(arguments[0]); ok { - d := sort.Float64Slice(items) - sort.Stable(d) - final := make([]interface{}, len(d)) - for i, val := range d { - final[i] = val - } - return final, nil - } - // Otherwise we're dealing with sort()'ing strings. - items, _ := toArrayStr(arguments[0]) - d := sort.StringSlice(items) - sort.Stable(d) - final := make([]interface{}, len(d)) - for i, val := range d { - final[i] = val - } - return final, nil -} -func jpfSortBy(arguments []interface{}) (interface{}, error) { - intr := arguments[0].(*treeInterpreter) - arr := arguments[1].([]interface{}) - exp := arguments[2].(expRef) - node := exp.ref - if len(arr) == 0 { - return arr, nil - } else if len(arr) == 1 { - return arr, nil - } - start, err := intr.Execute(node, arr[0]) - if err != nil { - return nil, err - } - if _, ok := start.(float64); ok { - sortable := &byExprFloat{intr, node, arr, false} - sort.Stable(sortable) - if sortable.hasError { - return nil, errors.New("error in sort_by comparison") - } - return arr, nil - } else if _, ok := start.(string); ok { - sortable := &byExprString{intr, node, arr, false} - sort.Stable(sortable) - if sortable.hasError { - return nil, errors.New("error in sort_by comparison") - } - return arr, nil - } else { - return nil, errors.New("invalid type, must be number of string") - } -} -func jpfJoin(arguments []interface{}) (interface{}, error) { - sep := arguments[0].(string) - // We can't just do arguments[1].([]string), we have to - // manually convert each item to a string. - arrayStr := []string{} - for _, item := range arguments[1].([]interface{}) { - arrayStr = append(arrayStr, item.(string)) - } - return strings.Join(arrayStr, sep), nil -} -func jpfReverse(arguments []interface{}) (interface{}, error) { - if s, ok := arguments[0].(string); ok { - r := []rune(s) - for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 { - r[i], r[j] = r[j], r[i] - } - return string(r), nil - } - items := arguments[0].([]interface{}) - length := len(items) - reversed := make([]interface{}, length) - for i, item := range items { - reversed[length-(i+1)] = item - } - return reversed, nil -} -func jpfToArray(arguments []interface{}) (interface{}, error) { - if _, ok := arguments[0].([]interface{}); ok { - return arguments[0], nil - } - return arguments[:1:1], nil -} -func jpfToString(arguments []interface{}) (interface{}, error) { - if v, ok := arguments[0].(string); ok { - return v, nil - } - result, err := json.Marshal(arguments[0]) - if err != nil { - return nil, err - } - return string(result), nil -} -func jpfToNumber(arguments []interface{}) (interface{}, error) { - arg := arguments[0] - if v, ok := arg.(float64); ok { - return v, nil - } - if v, ok := arg.(string); ok { - conv, err := strconv.ParseFloat(v, 64) - if err != nil { - return nil, nil - } - return conv, nil - } - if _, ok := arg.([]interface{}); ok { - return nil, nil - } - if _, ok := arg.(map[string]interface{}); ok { - return nil, nil - } - if arg == nil { - return nil, nil - } - if arg == true || arg == false { - return nil, nil - } - return nil, errors.New("unknown type") -} -func jpfNotNull(arguments []interface{}) (interface{}, error) { - for _, arg := range arguments { - if arg != nil { - return arg, nil - } - } - return nil, nil -} diff --git a/vendor/github.com/jmespath/go-jmespath/interpreter.go b/vendor/github.com/jmespath/go-jmespath/interpreter.go deleted file mode 100644 index 13c74604c..000000000 --- a/vendor/github.com/jmespath/go-jmespath/interpreter.go +++ /dev/null @@ -1,418 +0,0 @@ -package jmespath - -import ( - "errors" - "reflect" - "unicode" - "unicode/utf8" -) - -/* This is a tree based interpreter. It walks the AST and directly - interprets the AST to search through a JSON document. -*/ - -type treeInterpreter struct { - fCall *functionCaller -} - -func newInterpreter() *treeInterpreter { - interpreter := treeInterpreter{} - interpreter.fCall = newFunctionCaller() - return &interpreter -} - -type expRef struct { - ref ASTNode -} - -// Execute takes an ASTNode and input data and interprets the AST directly. -// It will produce the result of applying the JMESPath expression associated -// with the ASTNode to the input data "value". -func (intr *treeInterpreter) Execute(node ASTNode, value interface{}) (interface{}, error) { - switch node.nodeType { - case ASTComparator: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - right, err := intr.Execute(node.children[1], value) - if err != nil { - return nil, err - } - switch node.value { - case tEQ: - return objsEqual(left, right), nil - case tNE: - return !objsEqual(left, right), nil - } - leftNum, ok := left.(float64) - if !ok { - return nil, nil - } - rightNum, ok := right.(float64) - if !ok { - return nil, nil - } - switch node.value { - case tGT: - return leftNum > rightNum, nil - case tGTE: - return leftNum >= rightNum, nil - case tLT: - return leftNum < rightNum, nil - case tLTE: - return leftNum <= rightNum, nil - } - case ASTExpRef: - return expRef{ref: node.children[0]}, nil - case ASTFunctionExpression: - resolvedArgs := []interface{}{} - for _, arg := range node.children { - current, err := intr.Execute(arg, value) - if err != nil { - return nil, err - } - resolvedArgs = append(resolvedArgs, current) - } - return intr.fCall.CallFunction(node.value.(string), resolvedArgs, intr) - case ASTField: - if m, ok := value.(map[string]interface{}); ok { - key := node.value.(string) - return m[key], nil - } - return intr.fieldFromStruct(node.value.(string), value) - case ASTFilterProjection: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, nil - } - sliceType, ok := left.([]interface{}) - if !ok { - if isSliceType(left) { - return intr.filterProjectionWithReflection(node, left) - } - return nil, nil - } - compareNode := node.children[2] - collected := []interface{}{} - for _, element := range sliceType { - result, err := intr.Execute(compareNode, element) - if err != nil { - return nil, err - } - if !isFalse(result) { - current, err := intr.Execute(node.children[1], element) - if err != nil { - return nil, err - } - if current != nil { - collected = append(collected, current) - } - } - } - return collected, nil - case ASTFlatten: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, nil - } - sliceType, ok := left.([]interface{}) - if !ok { - // If we can't type convert to []interface{}, there's - // a chance this could still work via reflection if we're - // dealing with user provided types. - if isSliceType(left) { - return intr.flattenWithReflection(left) - } - return nil, nil - } - flattened := []interface{}{} - for _, element := range sliceType { - if elementSlice, ok := element.([]interface{}); ok { - flattened = append(flattened, elementSlice...) - } else if isSliceType(element) { - reflectFlat := []interface{}{} - v := reflect.ValueOf(element) - for i := 0; i < v.Len(); i++ { - reflectFlat = append(reflectFlat, v.Index(i).Interface()) - } - flattened = append(flattened, reflectFlat...) - } else { - flattened = append(flattened, element) - } - } - return flattened, nil - case ASTIdentity, ASTCurrentNode: - return value, nil - case ASTIndex: - if sliceType, ok := value.([]interface{}); ok { - index := node.value.(int) - if index < 0 { - index += len(sliceType) - } - if index < len(sliceType) && index >= 0 { - return sliceType[index], nil - } - return nil, nil - } - // Otherwise try via reflection. - rv := reflect.ValueOf(value) - if rv.Kind() == reflect.Slice { - index := node.value.(int) - if index < 0 { - index += rv.Len() - } - if index < rv.Len() && index >= 0 { - v := rv.Index(index) - return v.Interface(), nil - } - } - return nil, nil - case ASTKeyValPair: - return intr.Execute(node.children[0], value) - case ASTLiteral: - return node.value, nil - case ASTMultiSelectHash: - if value == nil { - return nil, nil - } - collected := make(map[string]interface{}) - for _, child := range node.children { - current, err := intr.Execute(child, value) - if err != nil { - return nil, err - } - key := child.value.(string) - collected[key] = current - } - return collected, nil - case ASTMultiSelectList: - if value == nil { - return nil, nil - } - collected := []interface{}{} - for _, child := range node.children { - current, err := intr.Execute(child, value) - if err != nil { - return nil, err - } - collected = append(collected, current) - } - return collected, nil - case ASTOrExpression: - matched, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - if isFalse(matched) { - matched, err = intr.Execute(node.children[1], value) - if err != nil { - return nil, err - } - } - return matched, nil - case ASTAndExpression: - matched, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - if isFalse(matched) { - return matched, nil - } - return intr.Execute(node.children[1], value) - case ASTNotExpression: - matched, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - if isFalse(matched) { - return true, nil - } - return false, nil - case ASTPipe: - result := value - var err error - for _, child := range node.children { - result, err = intr.Execute(child, result) - if err != nil { - return nil, err - } - } - return result, nil - case ASTProjection: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - sliceType, ok := left.([]interface{}) - if !ok { - if isSliceType(left) { - return intr.projectWithReflection(node, left) - } - return nil, nil - } - collected := []interface{}{} - var current interface{} - for _, element := range sliceType { - current, err = intr.Execute(node.children[1], element) - if err != nil { - return nil, err - } - if current != nil { - collected = append(collected, current) - } - } - return collected, nil - case ASTSubexpression, ASTIndexExpression: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, err - } - return intr.Execute(node.children[1], left) - case ASTSlice: - sliceType, ok := value.([]interface{}) - if !ok { - if isSliceType(value) { - return intr.sliceWithReflection(node, value) - } - return nil, nil - } - parts := node.value.([]*int) - sliceParams := make([]sliceParam, 3) - for i, part := range parts { - if part != nil { - sliceParams[i].Specified = true - sliceParams[i].N = *part - } - } - return slice(sliceType, sliceParams) - case ASTValueProjection: - left, err := intr.Execute(node.children[0], value) - if err != nil { - return nil, nil - } - mapType, ok := left.(map[string]interface{}) - if !ok { - return nil, nil - } - values := make([]interface{}, len(mapType)) - for _, value := range mapType { - values = append(values, value) - } - collected := []interface{}{} - for _, element := range values { - current, err := intr.Execute(node.children[1], element) - if err != nil { - return nil, err - } - if current != nil { - collected = append(collected, current) - } - } - return collected, nil - } - return nil, errors.New("Unknown AST node: " + node.nodeType.String()) -} - -func (intr *treeInterpreter) fieldFromStruct(key string, value interface{}) (interface{}, error) { - rv := reflect.ValueOf(value) - first, n := utf8.DecodeRuneInString(key) - fieldName := string(unicode.ToUpper(first)) + key[n:] - if rv.Kind() == reflect.Struct { - v := rv.FieldByName(fieldName) - if !v.IsValid() { - return nil, nil - } - return v.Interface(), nil - } else if rv.Kind() == reflect.Ptr { - // Handle multiple levels of indirection? - if rv.IsNil() { - return nil, nil - } - rv = rv.Elem() - v := rv.FieldByName(fieldName) - if !v.IsValid() { - return nil, nil - } - return v.Interface(), nil - } - return nil, nil -} - -func (intr *treeInterpreter) flattenWithReflection(value interface{}) (interface{}, error) { - v := reflect.ValueOf(value) - flattened := []interface{}{} - for i := 0; i < v.Len(); i++ { - element := v.Index(i).Interface() - if reflect.TypeOf(element).Kind() == reflect.Slice { - // Then insert the contents of the element - // slice into the flattened slice, - // i.e flattened = append(flattened, mySlice...) - elementV := reflect.ValueOf(element) - for j := 0; j < elementV.Len(); j++ { - flattened = append( - flattened, elementV.Index(j).Interface()) - } - } else { - flattened = append(flattened, element) - } - } - return flattened, nil -} - -func (intr *treeInterpreter) sliceWithReflection(node ASTNode, value interface{}) (interface{}, error) { - v := reflect.ValueOf(value) - parts := node.value.([]*int) - sliceParams := make([]sliceParam, 3) - for i, part := range parts { - if part != nil { - sliceParams[i].Specified = true - sliceParams[i].N = *part - } - } - final := []interface{}{} - for i := 0; i < v.Len(); i++ { - element := v.Index(i).Interface() - final = append(final, element) - } - return slice(final, sliceParams) -} - -func (intr *treeInterpreter) filterProjectionWithReflection(node ASTNode, value interface{}) (interface{}, error) { - compareNode := node.children[2] - collected := []interface{}{} - v := reflect.ValueOf(value) - for i := 0; i < v.Len(); i++ { - element := v.Index(i).Interface() - result, err := intr.Execute(compareNode, element) - if err != nil { - return nil, err - } - if !isFalse(result) { - current, err := intr.Execute(node.children[1], element) - if err != nil { - return nil, err - } - if current != nil { - collected = append(collected, current) - } - } - } - return collected, nil -} - -func (intr *treeInterpreter) projectWithReflection(node ASTNode, value interface{}) (interface{}, error) { - collected := []interface{}{} - v := reflect.ValueOf(value) - for i := 0; i < v.Len(); i++ { - element := v.Index(i).Interface() - result, err := intr.Execute(node.children[1], element) - if err != nil { - return nil, err - } - if result != nil { - collected = append(collected, result) - } - } - return collected, nil -} diff --git a/vendor/github.com/jmespath/go-jmespath/lexer.go b/vendor/github.com/jmespath/go-jmespath/lexer.go deleted file mode 100644 index 817900c8f..000000000 --- a/vendor/github.com/jmespath/go-jmespath/lexer.go +++ /dev/null @@ -1,420 +0,0 @@ -package jmespath - -import ( - "bytes" - "encoding/json" - "fmt" - "strconv" - "strings" - "unicode/utf8" -) - -type token struct { - tokenType tokType - value string - position int - length int -} - -type tokType int - -const eof = -1 - -// Lexer contains information about the expression being tokenized. -type Lexer struct { - expression string // The expression provided by the user. - currentPos int // The current position in the string. - lastWidth int // The width of the current rune. This - buf bytes.Buffer // Internal buffer used for building up values. -} - -// SyntaxError is the main error used whenever a lexing or parsing error occurs. -type SyntaxError struct { - msg string // Error message displayed to user - Expression string // Expression that generated a SyntaxError - Offset int // The location in the string where the error occurred -} - -func (e SyntaxError) Error() string { - // In the future, it would be good to underline the specific - // location where the error occurred. - return "SyntaxError: " + e.msg -} - -// HighlightLocation will show where the syntax error occurred. -// It will place a "^" character on a line below the expression -// at the point where the syntax error occurred. -func (e SyntaxError) HighlightLocation() string { - return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^" -} - -//go:generate stringer -type=tokType -const ( - tUnknown tokType = iota - tStar - tDot - tFilter - tFlatten - tLparen - tRparen - tLbracket - tRbracket - tLbrace - tRbrace - tOr - tPipe - tNumber - tUnquotedIdentifier - tQuotedIdentifier - tComma - tColon - tLT - tLTE - tGT - tGTE - tEQ - tNE - tJSONLiteral - tStringLiteral - tCurrent - tExpref - tAnd - tNot - tEOF -) - -var basicTokens = map[rune]tokType{ - '.': tDot, - '*': tStar, - ',': tComma, - ':': tColon, - '{': tLbrace, - '}': tRbrace, - ']': tRbracket, // tLbracket not included because it could be "[]" - '(': tLparen, - ')': tRparen, - '@': tCurrent, -} - -// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64. -// When using this bitmask just be sure to shift the rune down 64 bits -// before checking against identifierStartBits. -const identifierStartBits uint64 = 576460745995190270 - -// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s. -var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270} - -var whiteSpace = map[rune]bool{ - ' ': true, '\t': true, '\n': true, '\r': true, -} - -func (t token) String() string { - return fmt.Sprintf("Token{%+v, %s, %d, %d}", - t.tokenType, t.value, t.position, t.length) -} - -// NewLexer creates a new JMESPath lexer. -func NewLexer() *Lexer { - lexer := Lexer{} - return &lexer -} - -func (lexer *Lexer) next() rune { - if lexer.currentPos >= len(lexer.expression) { - lexer.lastWidth = 0 - return eof - } - r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:]) - lexer.lastWidth = w - lexer.currentPos += w - return r -} - -func (lexer *Lexer) back() { - lexer.currentPos -= lexer.lastWidth -} - -func (lexer *Lexer) peek() rune { - t := lexer.next() - lexer.back() - return t -} - -// tokenize takes an expression and returns corresponding tokens. -func (lexer *Lexer) tokenize(expression string) ([]token, error) { - var tokens []token - lexer.expression = expression - lexer.currentPos = 0 - lexer.lastWidth = 0 -loop: - for { - r := lexer.next() - if identifierStartBits&(1<<(uint64(r)-64)) > 0 { - t := lexer.consumeUnquotedIdentifier() - tokens = append(tokens, t) - } else if val, ok := basicTokens[r]; ok { - // Basic single char token. - t := token{ - tokenType: val, - value: string(r), - position: lexer.currentPos - lexer.lastWidth, - length: 1, - } - tokens = append(tokens, t) - } else if r == '-' || (r >= '0' && r <= '9') { - t := lexer.consumeNumber() - tokens = append(tokens, t) - } else if r == '[' { - t := lexer.consumeLBracket() - tokens = append(tokens, t) - } else if r == '"' { - t, err := lexer.consumeQuotedIdentifier() - if err != nil { - return tokens, err - } - tokens = append(tokens, t) - } else if r == '\'' { - t, err := lexer.consumeRawStringLiteral() - if err != nil { - return tokens, err - } - tokens = append(tokens, t) - } else if r == '`' { - t, err := lexer.consumeLiteral() - if err != nil { - return tokens, err - } - tokens = append(tokens, t) - } else if r == '|' { - t := lexer.matchOrElse(r, '|', tOr, tPipe) - tokens = append(tokens, t) - } else if r == '<' { - t := lexer.matchOrElse(r, '=', tLTE, tLT) - tokens = append(tokens, t) - } else if r == '>' { - t := lexer.matchOrElse(r, '=', tGTE, tGT) - tokens = append(tokens, t) - } else if r == '!' { - t := lexer.matchOrElse(r, '=', tNE, tNot) - tokens = append(tokens, t) - } else if r == '=' { - t := lexer.matchOrElse(r, '=', tEQ, tUnknown) - tokens = append(tokens, t) - } else if r == '&' { - t := lexer.matchOrElse(r, '&', tAnd, tExpref) - tokens = append(tokens, t) - } else if r == eof { - break loop - } else if _, ok := whiteSpace[r]; ok { - // Ignore whitespace - } else { - return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r))) - } - } - tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0}) - return tokens, nil -} - -// Consume characters until the ending rune "r" is reached. -// If the end of the expression is reached before seeing the -// terminating rune "r", then an error is returned. -// If no error occurs then the matching substring is returned. -// The returned string will not include the ending rune. -func (lexer *Lexer) consumeUntil(end rune) (string, error) { - start := lexer.currentPos - current := lexer.next() - for current != end && current != eof { - if current == '\\' && lexer.peek() != eof { - lexer.next() - } - current = lexer.next() - } - if lexer.lastWidth == 0 { - // Then we hit an EOF so we never reached the closing - // delimiter. - return "", SyntaxError{ - msg: "Unclosed delimiter: " + string(end), - Expression: lexer.expression, - Offset: len(lexer.expression), - } - } - return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil -} - -func (lexer *Lexer) consumeLiteral() (token, error) { - start := lexer.currentPos - value, err := lexer.consumeUntil('`') - if err != nil { - return token{}, err - } - value = strings.Replace(value, "\\`", "`", -1) - return token{ - tokenType: tJSONLiteral, - value: value, - position: start, - length: len(value), - }, nil -} - -func (lexer *Lexer) consumeRawStringLiteral() (token, error) { - start := lexer.currentPos - currentIndex := start - current := lexer.next() - for current != '\'' && lexer.peek() != eof { - if current == '\\' && lexer.peek() == '\'' { - chunk := lexer.expression[currentIndex : lexer.currentPos-1] - lexer.buf.WriteString(chunk) - lexer.buf.WriteString("'") - lexer.next() - currentIndex = lexer.currentPos - } - current = lexer.next() - } - if lexer.lastWidth == 0 { - // Then we hit an EOF so we never reached the closing - // delimiter. - return token{}, SyntaxError{ - msg: "Unclosed delimiter: '", - Expression: lexer.expression, - Offset: len(lexer.expression), - } - } - if currentIndex < lexer.currentPos { - lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1]) - } - value := lexer.buf.String() - // Reset the buffer so it can reused again. - lexer.buf.Reset() - return token{ - tokenType: tStringLiteral, - value: value, - position: start, - length: len(value), - }, nil -} - -func (lexer *Lexer) syntaxError(msg string) SyntaxError { - return SyntaxError{ - msg: msg, - Expression: lexer.expression, - Offset: lexer.currentPos - 1, - } -} - -// Checks for a two char token, otherwise matches a single character -// token. This is used whenever a two char token overlaps a single -// char token, e.g. "||" -> tPipe, "|" -> tOr. -func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token { - start := lexer.currentPos - lexer.lastWidth - nextRune := lexer.next() - var t token - if nextRune == second { - t = token{ - tokenType: matchedType, - value: string(first) + string(second), - position: start, - length: 2, - } - } else { - lexer.back() - t = token{ - tokenType: singleCharType, - value: string(first), - position: start, - length: 1, - } - } - return t -} - -func (lexer *Lexer) consumeLBracket() token { - // There's three options here: - // 1. A filter expression "[?" - // 2. A flatten operator "[]" - // 3. A bare rbracket "[" - start := lexer.currentPos - lexer.lastWidth - nextRune := lexer.next() - var t token - if nextRune == '?' { - t = token{ - tokenType: tFilter, - value: "[?", - position: start, - length: 2, - } - } else if nextRune == ']' { - t = token{ - tokenType: tFlatten, - value: "[]", - position: start, - length: 2, - } - } else { - t = token{ - tokenType: tLbracket, - value: "[", - position: start, - length: 1, - } - lexer.back() - } - return t -} - -func (lexer *Lexer) consumeQuotedIdentifier() (token, error) { - start := lexer.currentPos - value, err := lexer.consumeUntil('"') - if err != nil { - return token{}, err - } - var decoded string - asJSON := []byte("\"" + value + "\"") - if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil { - return token{}, err - } - return token{ - tokenType: tQuotedIdentifier, - value: decoded, - position: start - 1, - length: len(decoded), - }, nil -} - -func (lexer *Lexer) consumeUnquotedIdentifier() token { - // Consume runes until we reach the end of an unquoted - // identifier. - start := lexer.currentPos - lexer.lastWidth - for { - r := lexer.next() - if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 { - lexer.back() - break - } - } - value := lexer.expression[start:lexer.currentPos] - return token{ - tokenType: tUnquotedIdentifier, - value: value, - position: start, - length: lexer.currentPos - start, - } -} - -func (lexer *Lexer) consumeNumber() token { - // Consume runes until we reach something that's not a number. - start := lexer.currentPos - lexer.lastWidth - for { - r := lexer.next() - if r < '0' || r > '9' { - lexer.back() - break - } - } - value := lexer.expression[start:lexer.currentPos] - return token{ - tokenType: tNumber, - value: value, - position: start, - length: lexer.currentPos - start, - } -} diff --git a/vendor/github.com/jmespath/go-jmespath/parser.go b/vendor/github.com/jmespath/go-jmespath/parser.go deleted file mode 100644 index 4abc303ab..000000000 --- a/vendor/github.com/jmespath/go-jmespath/parser.go +++ /dev/null @@ -1,603 +0,0 @@ -package jmespath - -import ( - "encoding/json" - "fmt" - "strconv" - "strings" -) - -type astNodeType int - -//go:generate stringer -type astNodeType -const ( - ASTEmpty astNodeType = iota - ASTComparator - ASTCurrentNode - ASTExpRef - ASTFunctionExpression - ASTField - ASTFilterProjection - ASTFlatten - ASTIdentity - ASTIndex - ASTIndexExpression - ASTKeyValPair - ASTLiteral - ASTMultiSelectHash - ASTMultiSelectList - ASTOrExpression - ASTAndExpression - ASTNotExpression - ASTPipe - ASTProjection - ASTSubexpression - ASTSlice - ASTValueProjection -) - -// ASTNode represents the abstract syntax tree of a JMESPath expression. -type ASTNode struct { - nodeType astNodeType - value interface{} - children []ASTNode -} - -func (node ASTNode) String() string { - return node.PrettyPrint(0) -} - -// PrettyPrint will pretty print the parsed AST. -// The AST is an implementation detail and this pretty print -// function is provided as a convenience method to help with -// debugging. You should not rely on its output as the internal -// structure of the AST may change at any time. -func (node ASTNode) PrettyPrint(indent int) string { - spaces := strings.Repeat(" ", indent) - output := fmt.Sprintf("%s%s {\n", spaces, node.nodeType) - nextIndent := indent + 2 - if node.value != nil { - if converted, ok := node.value.(fmt.Stringer); ok { - // Account for things like comparator nodes - // that are enums with a String() method. - output += fmt.Sprintf("%svalue: %s\n", strings.Repeat(" ", nextIndent), converted.String()) - } else { - output += fmt.Sprintf("%svalue: %#v\n", strings.Repeat(" ", nextIndent), node.value) - } - } - lastIndex := len(node.children) - if lastIndex > 0 { - output += fmt.Sprintf("%schildren: {\n", strings.Repeat(" ", nextIndent)) - childIndent := nextIndent + 2 - for _, elem := range node.children { - output += elem.PrettyPrint(childIndent) - } - } - output += fmt.Sprintf("%s}\n", spaces) - return output -} - -var bindingPowers = map[tokType]int{ - tEOF: 0, - tUnquotedIdentifier: 0, - tQuotedIdentifier: 0, - tRbracket: 0, - tRparen: 0, - tComma: 0, - tRbrace: 0, - tNumber: 0, - tCurrent: 0, - tExpref: 0, - tColon: 0, - tPipe: 1, - tOr: 2, - tAnd: 3, - tEQ: 5, - tLT: 5, - tLTE: 5, - tGT: 5, - tGTE: 5, - tNE: 5, - tFlatten: 9, - tStar: 20, - tFilter: 21, - tDot: 40, - tNot: 45, - tLbrace: 50, - tLbracket: 55, - tLparen: 60, -} - -// Parser holds state about the current expression being parsed. -type Parser struct { - expression string - tokens []token - index int -} - -// NewParser creates a new JMESPath parser. -func NewParser() *Parser { - p := Parser{} - return &p -} - -// Parse will compile a JMESPath expression. -func (p *Parser) Parse(expression string) (ASTNode, error) { - lexer := NewLexer() - p.expression = expression - p.index = 0 - tokens, err := lexer.tokenize(expression) - if err != nil { - return ASTNode{}, err - } - p.tokens = tokens - parsed, err := p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - if p.current() != tEOF { - return ASTNode{}, p.syntaxError(fmt.Sprintf( - "Unexpected token at the end of the expression: %s", p.current())) - } - return parsed, nil -} - -func (p *Parser) parseExpression(bindingPower int) (ASTNode, error) { - var err error - leftToken := p.lookaheadToken(0) - p.advance() - leftNode, err := p.nud(leftToken) - if err != nil { - return ASTNode{}, err - } - currentToken := p.current() - for bindingPower < bindingPowers[currentToken] { - p.advance() - leftNode, err = p.led(currentToken, leftNode) - if err != nil { - return ASTNode{}, err - } - currentToken = p.current() - } - return leftNode, nil -} - -func (p *Parser) parseIndexExpression() (ASTNode, error) { - if p.lookahead(0) == tColon || p.lookahead(1) == tColon { - return p.parseSliceExpression() - } - indexStr := p.lookaheadToken(0).value - parsedInt, err := strconv.Atoi(indexStr) - if err != nil { - return ASTNode{}, err - } - indexNode := ASTNode{nodeType: ASTIndex, value: parsedInt} - p.advance() - if err := p.match(tRbracket); err != nil { - return ASTNode{}, err - } - return indexNode, nil -} - -func (p *Parser) parseSliceExpression() (ASTNode, error) { - parts := []*int{nil, nil, nil} - index := 0 - current := p.current() - for current != tRbracket && index < 3 { - if current == tColon { - index++ - p.advance() - } else if current == tNumber { - parsedInt, err := strconv.Atoi(p.lookaheadToken(0).value) - if err != nil { - return ASTNode{}, err - } - parts[index] = &parsedInt - p.advance() - } else { - return ASTNode{}, p.syntaxError( - "Expected tColon or tNumber" + ", received: " + p.current().String()) - } - current = p.current() - } - if err := p.match(tRbracket); err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTSlice, - value: parts, - }, nil -} - -func (p *Parser) match(tokenType tokType) error { - if p.current() == tokenType { - p.advance() - return nil - } - return p.syntaxError("Expected " + tokenType.String() + ", received: " + p.current().String()) -} - -func (p *Parser) led(tokenType tokType, node ASTNode) (ASTNode, error) { - switch tokenType { - case tDot: - if p.current() != tStar { - right, err := p.parseDotRHS(bindingPowers[tDot]) - return ASTNode{ - nodeType: ASTSubexpression, - children: []ASTNode{node, right}, - }, err - } - p.advance() - right, err := p.parseProjectionRHS(bindingPowers[tDot]) - return ASTNode{ - nodeType: ASTValueProjection, - children: []ASTNode{node, right}, - }, err - case tPipe: - right, err := p.parseExpression(bindingPowers[tPipe]) - return ASTNode{nodeType: ASTPipe, children: []ASTNode{node, right}}, err - case tOr: - right, err := p.parseExpression(bindingPowers[tOr]) - return ASTNode{nodeType: ASTOrExpression, children: []ASTNode{node, right}}, err - case tAnd: - right, err := p.parseExpression(bindingPowers[tAnd]) - return ASTNode{nodeType: ASTAndExpression, children: []ASTNode{node, right}}, err - case tLparen: - name := node.value - var args []ASTNode - for p.current() != tRparen { - expression, err := p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - if p.current() == tComma { - if err := p.match(tComma); err != nil { - return ASTNode{}, err - } - } - args = append(args, expression) - } - if err := p.match(tRparen); err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTFunctionExpression, - value: name, - children: args, - }, nil - case tFilter: - return p.parseFilter(node) - case tFlatten: - left := ASTNode{nodeType: ASTFlatten, children: []ASTNode{node}} - right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) - return ASTNode{ - nodeType: ASTProjection, - children: []ASTNode{left, right}, - }, err - case tEQ, tNE, tGT, tGTE, tLT, tLTE: - right, err := p.parseExpression(bindingPowers[tokenType]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTComparator, - value: tokenType, - children: []ASTNode{node, right}, - }, nil - case tLbracket: - tokenType := p.current() - var right ASTNode - var err error - if tokenType == tNumber || tokenType == tColon { - right, err = p.parseIndexExpression() - if err != nil { - return ASTNode{}, err - } - return p.projectIfSlice(node, right) - } - // Otherwise this is a projection. - if err := p.match(tStar); err != nil { - return ASTNode{}, err - } - if err := p.match(tRbracket); err != nil { - return ASTNode{}, err - } - right, err = p.parseProjectionRHS(bindingPowers[tStar]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTProjection, - children: []ASTNode{node, right}, - }, nil - } - return ASTNode{}, p.syntaxError("Unexpected token: " + tokenType.String()) -} - -func (p *Parser) nud(token token) (ASTNode, error) { - switch token.tokenType { - case tJSONLiteral: - var parsed interface{} - err := json.Unmarshal([]byte(token.value), &parsed) - if err != nil { - return ASTNode{}, err - } - return ASTNode{nodeType: ASTLiteral, value: parsed}, nil - case tStringLiteral: - return ASTNode{nodeType: ASTLiteral, value: token.value}, nil - case tUnquotedIdentifier: - return ASTNode{ - nodeType: ASTField, - value: token.value, - }, nil - case tQuotedIdentifier: - node := ASTNode{nodeType: ASTField, value: token.value} - if p.current() == tLparen { - return ASTNode{}, p.syntaxErrorToken("Can't have quoted identifier as function name.", token) - } - return node, nil - case tStar: - left := ASTNode{nodeType: ASTIdentity} - var right ASTNode - var err error - if p.current() == tRbracket { - right = ASTNode{nodeType: ASTIdentity} - } else { - right, err = p.parseProjectionRHS(bindingPowers[tStar]) - } - return ASTNode{nodeType: ASTValueProjection, children: []ASTNode{left, right}}, err - case tFilter: - return p.parseFilter(ASTNode{nodeType: ASTIdentity}) - case tLbrace: - return p.parseMultiSelectHash() - case tFlatten: - left := ASTNode{ - nodeType: ASTFlatten, - children: []ASTNode{{nodeType: ASTIdentity}}, - } - right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{nodeType: ASTProjection, children: []ASTNode{left, right}}, nil - case tLbracket: - tokenType := p.current() - //var right ASTNode - if tokenType == tNumber || tokenType == tColon { - right, err := p.parseIndexExpression() - if err != nil { - return ASTNode{}, nil - } - return p.projectIfSlice(ASTNode{nodeType: ASTIdentity}, right) - } else if tokenType == tStar && p.lookahead(1) == tRbracket { - p.advance() - p.advance() - right, err := p.parseProjectionRHS(bindingPowers[tStar]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTProjection, - children: []ASTNode{{nodeType: ASTIdentity}, right}, - }, nil - } else { - return p.parseMultiSelectList() - } - case tCurrent: - return ASTNode{nodeType: ASTCurrentNode}, nil - case tExpref: - expression, err := p.parseExpression(bindingPowers[tExpref]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{nodeType: ASTExpRef, children: []ASTNode{expression}}, nil - case tNot: - expression, err := p.parseExpression(bindingPowers[tNot]) - if err != nil { - return ASTNode{}, err - } - return ASTNode{nodeType: ASTNotExpression, children: []ASTNode{expression}}, nil - case tLparen: - expression, err := p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - if err := p.match(tRparen); err != nil { - return ASTNode{}, err - } - return expression, nil - case tEOF: - return ASTNode{}, p.syntaxErrorToken("Incomplete expression", token) - } - - return ASTNode{}, p.syntaxErrorToken("Invalid token: "+token.tokenType.String(), token) -} - -func (p *Parser) parseMultiSelectList() (ASTNode, error) { - var expressions []ASTNode - for { - expression, err := p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - expressions = append(expressions, expression) - if p.current() == tRbracket { - break - } - err = p.match(tComma) - if err != nil { - return ASTNode{}, err - } - } - err := p.match(tRbracket) - if err != nil { - return ASTNode{}, err - } - return ASTNode{ - nodeType: ASTMultiSelectList, - children: expressions, - }, nil -} - -func (p *Parser) parseMultiSelectHash() (ASTNode, error) { - var children []ASTNode - for { - keyToken := p.lookaheadToken(0) - if err := p.match(tUnquotedIdentifier); err != nil { - if err := p.match(tQuotedIdentifier); err != nil { - return ASTNode{}, p.syntaxError("Expected tQuotedIdentifier or tUnquotedIdentifier") - } - } - keyName := keyToken.value - err := p.match(tColon) - if err != nil { - return ASTNode{}, err - } - value, err := p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - node := ASTNode{ - nodeType: ASTKeyValPair, - value: keyName, - children: []ASTNode{value}, - } - children = append(children, node) - if p.current() == tComma { - err := p.match(tComma) - if err != nil { - return ASTNode{}, nil - } - } else if p.current() == tRbrace { - err := p.match(tRbrace) - if err != nil { - return ASTNode{}, nil - } - break - } - } - return ASTNode{ - nodeType: ASTMultiSelectHash, - children: children, - }, nil -} - -func (p *Parser) projectIfSlice(left ASTNode, right ASTNode) (ASTNode, error) { - indexExpr := ASTNode{ - nodeType: ASTIndexExpression, - children: []ASTNode{left, right}, - } - if right.nodeType == ASTSlice { - right, err := p.parseProjectionRHS(bindingPowers[tStar]) - return ASTNode{ - nodeType: ASTProjection, - children: []ASTNode{indexExpr, right}, - }, err - } - return indexExpr, nil -} -func (p *Parser) parseFilter(node ASTNode) (ASTNode, error) { - var right, condition ASTNode - var err error - condition, err = p.parseExpression(0) - if err != nil { - return ASTNode{}, err - } - if err := p.match(tRbracket); err != nil { - return ASTNode{}, err - } - if p.current() == tFlatten { - right = ASTNode{nodeType: ASTIdentity} - } else { - right, err = p.parseProjectionRHS(bindingPowers[tFilter]) - if err != nil { - return ASTNode{}, err - } - } - - return ASTNode{ - nodeType: ASTFilterProjection, - children: []ASTNode{node, right, condition}, - }, nil -} - -func (p *Parser) parseDotRHS(bindingPower int) (ASTNode, error) { - lookahead := p.current() - if tokensOneOf([]tokType{tQuotedIdentifier, tUnquotedIdentifier, tStar}, lookahead) { - return p.parseExpression(bindingPower) - } else if lookahead == tLbracket { - if err := p.match(tLbracket); err != nil { - return ASTNode{}, err - } - return p.parseMultiSelectList() - } else if lookahead == tLbrace { - if err := p.match(tLbrace); err != nil { - return ASTNode{}, err - } - return p.parseMultiSelectHash() - } - return ASTNode{}, p.syntaxError("Expected identifier, lbracket, or lbrace") -} - -func (p *Parser) parseProjectionRHS(bindingPower int) (ASTNode, error) { - current := p.current() - if bindingPowers[current] < 10 { - return ASTNode{nodeType: ASTIdentity}, nil - } else if current == tLbracket { - return p.parseExpression(bindingPower) - } else if current == tFilter { - return p.parseExpression(bindingPower) - } else if current == tDot { - err := p.match(tDot) - if err != nil { - return ASTNode{}, err - } - return p.parseDotRHS(bindingPower) - } else { - return ASTNode{}, p.syntaxError("Error") - } -} - -func (p *Parser) lookahead(number int) tokType { - return p.lookaheadToken(number).tokenType -} - -func (p *Parser) current() tokType { - return p.lookahead(0) -} - -func (p *Parser) lookaheadToken(number int) token { - return p.tokens[p.index+number] -} - -func (p *Parser) advance() { - p.index++ -} - -func tokensOneOf(elements []tokType, token tokType) bool { - for _, elem := range elements { - if elem == token { - return true - } - } - return false -} - -func (p *Parser) syntaxError(msg string) SyntaxError { - return SyntaxError{ - msg: msg, - Expression: p.expression, - Offset: p.lookaheadToken(0).position, - } -} - -// Create a SyntaxError based on the provided token. -// This differs from syntaxError() which creates a SyntaxError -// based on the current lookahead token. -func (p *Parser) syntaxErrorToken(msg string, t token) SyntaxError { - return SyntaxError{ - msg: msg, - Expression: p.expression, - Offset: t.position, - } -} diff --git a/vendor/github.com/jmespath/go-jmespath/toktype_string.go b/vendor/github.com/jmespath/go-jmespath/toktype_string.go deleted file mode 100644 index dae79cbdf..000000000 --- a/vendor/github.com/jmespath/go-jmespath/toktype_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// generated by stringer -type=tokType; DO NOT EDIT - -package jmespath - -import "fmt" - -const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF" - -var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214} - -func (i tokType) String() string { - if i < 0 || i >= tokType(len(_tokType_index)-1) { - return fmt.Sprintf("tokType(%d)", i) - } - return _tokType_name[_tokType_index[i]:_tokType_index[i+1]] -} diff --git a/vendor/github.com/jmespath/go-jmespath/util.go b/vendor/github.com/jmespath/go-jmespath/util.go deleted file mode 100644 index ddc1b7d7d..000000000 --- a/vendor/github.com/jmespath/go-jmespath/util.go +++ /dev/null @@ -1,185 +0,0 @@ -package jmespath - -import ( - "errors" - "reflect" -) - -// IsFalse determines if an object is false based on the JMESPath spec. -// JMESPath defines false values to be any of: -// - An empty string array, or hash. -// - The boolean value false. -// - nil -func isFalse(value interface{}) bool { - switch v := value.(type) { - case bool: - return !v - case []interface{}: - return len(v) == 0 - case map[string]interface{}: - return len(v) == 0 - case string: - return len(v) == 0 - case nil: - return true - } - // Try the reflection cases before returning false. - rv := reflect.ValueOf(value) - switch rv.Kind() { - case reflect.Struct: - // A struct type will never be false, even if - // all of its values are the zero type. - return false - case reflect.Slice, reflect.Map: - return rv.Len() == 0 - case reflect.Ptr: - if rv.IsNil() { - return true - } - // If it's a pointer type, we'll try to deref the pointer - // and evaluate the pointer value for isFalse. - element := rv.Elem() - return isFalse(element.Interface()) - } - return false -} - -// ObjsEqual is a generic object equality check. -// It will take two arbitrary objects and recursively determine -// if they are equal. -func objsEqual(left interface{}, right interface{}) bool { - return reflect.DeepEqual(left, right) -} - -// SliceParam refers to a single part of a slice. -// A slice consists of a start, a stop, and a step, similar to -// python slices. -type sliceParam struct { - N int - Specified bool -} - -// Slice supports [start:stop:step] style slicing that's supported in JMESPath. -func slice(slice []interface{}, parts []sliceParam) ([]interface{}, error) { - computed, err := computeSliceParams(len(slice), parts) - if err != nil { - return nil, err - } - start, stop, step := computed[0], computed[1], computed[2] - result := []interface{}{} - if step > 0 { - for i := start; i < stop; i += step { - result = append(result, slice[i]) - } - } else { - for i := start; i > stop; i += step { - result = append(result, slice[i]) - } - } - return result, nil -} - -func computeSliceParams(length int, parts []sliceParam) ([]int, error) { - var start, stop, step int - if !parts[2].Specified { - step = 1 - } else if parts[2].N == 0 { - return nil, errors.New("Invalid slice, step cannot be 0") - } else { - step = parts[2].N - } - var stepValueNegative bool - if step < 0 { - stepValueNegative = true - } else { - stepValueNegative = false - } - - if !parts[0].Specified { - if stepValueNegative { - start = length - 1 - } else { - start = 0 - } - } else { - start = capSlice(length, parts[0].N, step) - } - - if !parts[1].Specified { - if stepValueNegative { - stop = -1 - } else { - stop = length - } - } else { - stop = capSlice(length, parts[1].N, step) - } - return []int{start, stop, step}, nil -} - -func capSlice(length int, actual int, step int) int { - if actual < 0 { - actual += length - if actual < 0 { - if step < 0 { - actual = -1 - } else { - actual = 0 - } - } - } else if actual >= length { - if step < 0 { - actual = length - 1 - } else { - actual = length - } - } - return actual -} - -// ToArrayNum converts an empty interface type to a slice of float64. -// If any element in the array cannot be converted, then nil is returned -// along with a second value of false. -func toArrayNum(data interface{}) ([]float64, bool) { - // Is there a better way to do this with reflect? - if d, ok := data.([]interface{}); ok { - result := make([]float64, len(d)) - for i, el := range d { - item, ok := el.(float64) - if !ok { - return nil, false - } - result[i] = item - } - return result, true - } - return nil, false -} - -// ToArrayStr converts an empty interface type to a slice of strings. -// If any element in the array cannot be converted, then nil is returned -// along with a second value of false. If the input data could be entirely -// converted, then the converted data, along with a second value of true, -// will be returned. -func toArrayStr(data interface{}) ([]string, bool) { - // Is there a better way to do this with reflect? - if d, ok := data.([]interface{}); ok { - result := make([]string, len(d)) - for i, el := range d { - item, ok := el.(string) - if !ok { - return nil, false - } - result[i] = item - } - return result, true - } - return nil, false -} - -func isSliceType(v interface{}) bool { - if v == nil { - return false - } - return reflect.TypeOf(v).Kind() == reflect.Slice -} diff --git a/vendor/github.com/magiconair/properties/.gitignore b/vendor/github.com/magiconair/properties/.gitignore deleted file mode 100644 index e7081ff52..000000000 --- a/vendor/github.com/magiconair/properties/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -*.sublime-project -*.sublime-workspace -*.un~ -*.swp -.idea/ -*.iml diff --git a/vendor/github.com/magiconair/properties/CHANGELOG.md b/vendor/github.com/magiconair/properties/CHANGELOG.md deleted file mode 100644 index 842e8e24f..000000000 --- a/vendor/github.com/magiconair/properties/CHANGELOG.md +++ /dev/null @@ -1,205 +0,0 @@ -## Changelog - -### [1.8.7](https://github.com/magiconair/properties/tree/v1.8.7) - 08 Dec 2022 - - * [PR #65](https://github.com/magiconair/properties/pull/65): Speedup Merge - - Thanks to [@AdityaVallabh](https://github.com/AdityaVallabh) for the patch. - - * [PR #66](https://github.com/magiconair/properties/pull/66): use github actions - -### [1.8.6](https://github.com/magiconair/properties/tree/v1.8.6) - 23 Feb 2022 - - * [PR #57](https://github.com/magiconair/properties/pull/57):Fix "unreachable code" lint error - - Thanks to [@ellie](https://github.com/ellie) for the patch. - - * [PR #63](https://github.com/magiconair/properties/pull/63): Make TestMustGetParsedDuration backwards compatible - - This patch ensures that the `TestMustGetParsedDuration` still works with `go1.3` to make the - author happy until it affects real users. - - Thanks to [@maage](https://github.com/maage) for the patch. - -### [1.8.5](https://github.com/magiconair/properties/tree/v1.8.5) - 24 Mar 2021 - - * [PR #55](https://github.com/magiconair/properties/pull/55): Fix: Encoding Bug in Comments - - When reading comments \ are loaded correctly, but when writing they are then - replaced by \\. This leads to wrong comments when writing and reading multiple times. - - Thanks to [@doxsch](https://github.com/doxsch) for the patch. - -### [1.8.4](https://github.com/magiconair/properties/tree/v1.8.4) - 23 Sep 2020 - - * [PR #50](https://github.com/magiconair/properties/pull/50): enhance error message for circular references - - Thanks to [@sriv](https://github.com/sriv) for the patch. - -### [1.8.3](https://github.com/magiconair/properties/tree/v1.8.3) - 14 Sep 2020 - - * [PR #49](https://github.com/magiconair/properties/pull/49): Include the key in error message causing the circular reference - - The change is include the key in the error message which is causing the circular - reference when parsing/loading the properties files. - - Thanks to [@haroon-sheikh](https://github.com/haroon-sheikh) for the patch. - -### [1.8.2](https://github.com/magiconair/properties/tree/v1.8.2) - 25 Aug 2020 - - * [PR #36](https://github.com/magiconair/properties/pull/36): Escape backslash on write - - This patch ensures that backslashes are escaped on write. Existing applications which - rely on the old behavior may need to be updated. - - Thanks to [@apesternikov](https://github.com/apesternikov) for the patch. - - * [PR #42](https://github.com/magiconair/properties/pull/42): Made Content-Type check whitespace agnostic in LoadURL() - - Thanks to [@aliras1](https://github.com/aliras1) for the patch. - - * [PR #41](https://github.com/magiconair/properties/pull/41): Make key/value separator configurable on Write() - - Thanks to [@mkjor](https://github.com/mkjor) for the patch. - - * [PR #40](https://github.com/magiconair/properties/pull/40): Add method to return a sorted list of keys - - Thanks to [@mkjor](https://github.com/mkjor) for the patch. - -### [1.8.1](https://github.com/magiconair/properties/tree/v1.8.1) - 10 May 2019 - - * [PR #35](https://github.com/magiconair/properties/pull/35): Close body always after request - - This patch ensures that in `LoadURL` the response body is always closed. - - Thanks to [@liubog2008](https://github.com/liubog2008) for the patch. - -### [1.8](https://github.com/magiconair/properties/tree/v1.8) - 15 May 2018 - - * [PR #26](https://github.com/magiconair/properties/pull/26): Disable expansion during loading - - This adds the option to disable property expansion during loading. - - Thanks to [@kmala](https://github.com/kmala) for the patch. - -### [1.7.6](https://github.com/magiconair/properties/tree/v1.7.6) - 14 Feb 2018 - - * [PR #29](https://github.com/magiconair/properties/pull/29): Reworked expansion logic to handle more complex cases. - - See PR for an example. - - Thanks to [@yobert](https://github.com/yobert) for the fix. - -### [1.7.5](https://github.com/magiconair/properties/tree/v1.7.5) - 13 Feb 2018 - - * [PR #28](https://github.com/magiconair/properties/pull/28): Support duplicate expansions in the same value - - Values which expand the same key multiple times (e.g. `key=${a} ${a}`) will no longer fail - with a `circular reference error`. - - Thanks to [@yobert](https://github.com/yobert) for the fix. - -### [1.7.4](https://github.com/magiconair/properties/tree/v1.7.4) - 31 Oct 2017 - - * [Issue #23](https://github.com/magiconair/properties/issues/23): Ignore blank lines with whitespaces - - * [PR #24](https://github.com/magiconair/properties/pull/24): Update keys when DisableExpansion is enabled - - Thanks to [@mgurov](https://github.com/mgurov) for the fix. - -### [1.7.3](https://github.com/magiconair/properties/tree/v1.7.3) - 10 Jul 2017 - - * [Issue #17](https://github.com/magiconair/properties/issues/17): Add [SetValue()](http://godoc.org/github.com/magiconair/properties#Properties.SetValue) method to set values generically - * [Issue #22](https://github.com/magiconair/properties/issues/22): Add [LoadMap()](http://godoc.org/github.com/magiconair/properties#LoadMap) function to load properties from a string map - -### [1.7.2](https://github.com/magiconair/properties/tree/v1.7.2) - 20 Mar 2017 - - * [Issue #15](https://github.com/magiconair/properties/issues/15): Drop gocheck dependency - * [PR #21](https://github.com/magiconair/properties/pull/21): Add [Map()](http://godoc.org/github.com/magiconair/properties#Properties.Map) and [FilterFunc()](http://godoc.org/github.com/magiconair/properties#Properties.FilterFunc) - -### [1.7.1](https://github.com/magiconair/properties/tree/v1.7.1) - 13 Jan 2017 - - * [Issue #14](https://github.com/magiconair/properties/issues/14): Decouple TestLoadExpandedFile from `$USER` - * [PR #12](https://github.com/magiconair/properties/pull/12): Load from files and URLs - * [PR #16](https://github.com/magiconair/properties/pull/16): Keep gofmt happy - * [PR #18](https://github.com/magiconair/properties/pull/18): Fix Delete() function - -### [1.7.0](https://github.com/magiconair/properties/tree/v1.7.0) - 20 Mar 2016 - - * [Issue #10](https://github.com/magiconair/properties/issues/10): Add [LoadURL,LoadURLs,MustLoadURL,MustLoadURLs](http://godoc.org/github.com/magiconair/properties#LoadURL) method to load properties from a URL. - * [Issue #11](https://github.com/magiconair/properties/issues/11): Add [LoadString,MustLoadString](http://godoc.org/github.com/magiconair/properties#LoadString) method to load properties from an UTF8 string. - * [PR #8](https://github.com/magiconair/properties/pull/8): Add [MustFlag](http://godoc.org/github.com/magiconair/properties#Properties.MustFlag) method to provide overrides via command line flags. (@pascaldekloe) - -### [1.6.0](https://github.com/magiconair/properties/tree/v1.6.0) - 11 Dec 2015 - - * Add [Decode](http://godoc.org/github.com/magiconair/properties#Properties.Decode) method to populate struct from properties via tags. - -### [1.5.6](https://github.com/magiconair/properties/tree/v1.5.6) - 18 Oct 2015 - - * Vendored in gopkg.in/check.v1 - -### [1.5.5](https://github.com/magiconair/properties/tree/v1.5.5) - 31 Jul 2015 - - * [PR #6](https://github.com/magiconair/properties/pull/6): Add [Delete](http://godoc.org/github.com/magiconair/properties#Properties.Delete) method to remove keys including comments. (@gerbenjacobs) - -### [1.5.4](https://github.com/magiconair/properties/tree/v1.5.4) - 23 Jun 2015 - - * [Issue #5](https://github.com/magiconair/properties/issues/5): Allow disabling of property expansion [DisableExpansion](http://godoc.org/github.com/magiconair/properties#Properties.DisableExpansion). When property expansion is disabled Properties become a simple key/value store and don't check for circular references. - -### [1.5.3](https://github.com/magiconair/properties/tree/v1.5.3) - 02 Jun 2015 - - * [Issue #4](https://github.com/magiconair/properties/issues/4): Maintain key order in [Filter()](http://godoc.org/github.com/magiconair/properties#Properties.Filter), [FilterPrefix()](http://godoc.org/github.com/magiconair/properties#Properties.FilterPrefix) and [FilterRegexp()](http://godoc.org/github.com/magiconair/properties#Properties.FilterRegexp) - -### [1.5.2](https://github.com/magiconair/properties/tree/v1.5.2) - 10 Apr 2015 - - * [Issue #3](https://github.com/magiconair/properties/issues/3): Don't print comments in [WriteComment()](http://godoc.org/github.com/magiconair/properties#Properties.WriteComment) if they are all empty - * Add clickable links to README - -### [1.5.1](https://github.com/magiconair/properties/tree/v1.5.1) - 08 Dec 2014 - - * Added [GetParsedDuration()](http://godoc.org/github.com/magiconair/properties#Properties.GetParsedDuration) and [MustGetParsedDuration()](http://godoc.org/github.com/magiconair/properties#Properties.MustGetParsedDuration) for values specified compatible with - [time.ParseDuration()](http://golang.org/pkg/time/#ParseDuration). - -### [1.5.0](https://github.com/magiconair/properties/tree/v1.5.0) - 18 Nov 2014 - - * Added support for single and multi-line comments (reading, writing and updating) - * The order of keys is now preserved - * Calling [Set()](http://godoc.org/github.com/magiconair/properties#Properties.Set) with an empty key now silently ignores the call and does not create a new entry - * Added a [MustSet()](http://godoc.org/github.com/magiconair/properties#Properties.MustSet) method - * Migrated test library from launchpad.net/gocheck to [gopkg.in/check.v1](http://gopkg.in/check.v1) - -### [1.4.2](https://github.com/magiconair/properties/tree/v1.4.2) - 15 Nov 2014 - - * [Issue #2](https://github.com/magiconair/properties/issues/2): Fixed goroutine leak in parser which created two lexers but cleaned up only one - -### [1.4.1](https://github.com/magiconair/properties/tree/v1.4.1) - 13 Nov 2014 - - * [Issue #1](https://github.com/magiconair/properties/issues/1): Fixed bug in Keys() method which returned an empty string - -### [1.4.0](https://github.com/magiconair/properties/tree/v1.4.0) - 23 Sep 2014 - - * Added [Keys()](http://godoc.org/github.com/magiconair/properties#Properties.Keys) to get the keys - * Added [Filter()](http://godoc.org/github.com/magiconair/properties#Properties.Filter), [FilterRegexp()](http://godoc.org/github.com/magiconair/properties#Properties.FilterRegexp) and [FilterPrefix()](http://godoc.org/github.com/magiconair/properties#Properties.FilterPrefix) to get a subset of the properties - -### [1.3.0](https://github.com/magiconair/properties/tree/v1.3.0) - 18 Mar 2014 - -* Added support for time.Duration -* Made MustXXX() failure beha[ior configurable (log.Fatal, panic](https://github.com/magiconair/properties/tree/vior configurable (log.Fatal, panic) - custom) -* Changed default of MustXXX() failure from panic to log.Fatal - -### [1.2.0](https://github.com/magiconair/properties/tree/v1.2.0) - 05 Mar 2014 - -* Added MustGet... functions -* Added support for int and uint with range checks on 32 bit platforms - -### [1.1.0](https://github.com/magiconair/properties/tree/v1.1.0) - 20 Jan 2014 - -* Renamed from goproperties to properties -* Added support for expansion of environment vars in - filenames and value expressions -* Fixed bug where value expressions were not at the - start of the string - -### [1.0.0](https://github.com/magiconair/properties/tree/v1.0.0) - 7 Jan 2014 - -* Initial release diff --git a/vendor/github.com/magiconair/properties/LICENSE.md b/vendor/github.com/magiconair/properties/LICENSE.md deleted file mode 100644 index 79c87e3e6..000000000 --- a/vendor/github.com/magiconair/properties/LICENSE.md +++ /dev/null @@ -1,24 +0,0 @@ -Copyright (c) 2013-2020, Frank Schroeder - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/magiconair/properties/README.md b/vendor/github.com/magiconair/properties/README.md deleted file mode 100644 index e2edda025..000000000 --- a/vendor/github.com/magiconair/properties/README.md +++ /dev/null @@ -1,128 +0,0 @@ -[![](https://img.shields.io/github/tag/magiconair/properties.svg?style=flat-square&label=release)](https://github.com/magiconair/properties/releases) -[![Travis CI Status](https://img.shields.io/travis/magiconair/properties.svg?branch=master&style=flat-square&label=travis)](https://travis-ci.org/magiconair/properties) -[![License](https://img.shields.io/badge/License-BSD%202--Clause-orange.svg?style=flat-square)](https://raw.githubusercontent.com/magiconair/properties/master/LICENSE) -[![GoDoc](http://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](http://godoc.org/github.com/magiconair/properties) - -# Overview - -#### Please run `git pull --tags` to update the tags. See [below](#updated-git-tags) why. - -properties is a Go library for reading and writing properties files. - -It supports reading from multiple files or URLs and Spring style recursive -property expansion of expressions like `${key}` to their corresponding value. -Value expressions can refer to other keys like in `${key}` or to environment -variables like in `${USER}`. Filenames can also contain environment variables -like in `/home/${USER}/myapp.properties`. - -Properties can be decoded into structs, maps, arrays and values through -struct tags. - -Comments and the order of keys are preserved. Comments can be modified -and can be written to the output. - -The properties library supports both ISO-8859-1 and UTF-8 encoded data. - -Starting from version 1.3.0 the behavior of the MustXXX() functions is -configurable by providing a custom `ErrorHandler` function. The default has -changed from `panic` to `log.Fatal` but this is configurable and custom -error handling functions can be provided. See the package documentation for -details. - -Read the full documentation on [![GoDoc](http://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](http://godoc.org/github.com/magiconair/properties) - -## Getting Started - -```go -import ( - "flag" - "github.com/magiconair/properties" -) - -func main() { - // init from a file - p := properties.MustLoadFile("${HOME}/config.properties", properties.UTF8) - - // or multiple files - p = properties.MustLoadFiles([]string{ - "${HOME}/config.properties", - "${HOME}/config-${USER}.properties", - }, properties.UTF8, true) - - // or from a map - p = properties.LoadMap(map[string]string{"key": "value", "abc": "def"}) - - // or from a string - p = properties.MustLoadString("key=value\nabc=def") - - // or from a URL - p = properties.MustLoadURL("http://host/path") - - // or from multiple URLs - p = properties.MustLoadURL([]string{ - "http://host/config", - "http://host/config-${USER}", - }, true) - - // or from flags - p.MustFlag(flag.CommandLine) - - // get values through getters - host := p.MustGetString("host") - port := p.GetInt("port", 8080) - - // or through Decode - type Config struct { - Host string `properties:"host"` - Port int `properties:"port,default=9000"` - Accept []string `properties:"accept,default=image/png;image;gif"` - Timeout time.Duration `properties:"timeout,default=5s"` - } - var cfg Config - if err := p.Decode(&cfg); err != nil { - log.Fatal(err) - } -} - -``` - -## Installation and Upgrade - -``` -$ go get -u github.com/magiconair/properties -``` - -## License - -2 clause BSD license. See [LICENSE](https://github.com/magiconair/properties/blob/master/LICENSE) file for details. - -## ToDo - -* Dump contents with passwords and secrets obscured - -## Updated Git tags - -#### 13 Feb 2018 - -I realized that all of the git tags I had pushed before v1.7.5 were lightweight tags -and I've only recently learned that this doesn't play well with `git describe` 😞 - -I have replaced all lightweight tags with signed tags using this script which should -retain the commit date, name and email address. Please run `git pull --tags` to update them. - -Worst case you have to reclone the repo. - -```shell -#!/bin/bash -tag=$1 -echo "Updating $tag" -date=$(git show ${tag}^0 --format=%aD | head -1) -email=$(git show ${tag}^0 --format=%aE | head -1) -name=$(git show ${tag}^0 --format=%aN | head -1) -GIT_COMMITTER_DATE="$date" GIT_COMMITTER_NAME="$name" GIT_COMMITTER_EMAIL="$email" git tag -s -f ${tag} ${tag}^0 -m ${tag} -``` - -I apologize for the inconvenience. - -Frank - diff --git a/vendor/github.com/magiconair/properties/decode.go b/vendor/github.com/magiconair/properties/decode.go deleted file mode 100644 index 8e6aa441d..000000000 --- a/vendor/github.com/magiconair/properties/decode.go +++ /dev/null @@ -1,289 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -import ( - "fmt" - "reflect" - "strconv" - "strings" - "time" -) - -// Decode assigns property values to exported fields of a struct. -// -// Decode traverses v recursively and returns an error if a value cannot be -// converted to the field type or a required value is missing for a field. -// -// The following type dependent decodings are used: -// -// String, boolean, numeric fields have the value of the property key assigned. -// The property key name is the name of the field. A different key and a default -// value can be set in the field's tag. Fields without default value are -// required. If the value cannot be converted to the field type an error is -// returned. -// -// time.Duration fields have the result of time.ParseDuration() assigned. -// -// time.Time fields have the vaule of time.Parse() assigned. The default layout -// is time.RFC3339 but can be set in the field's tag. -// -// Arrays and slices of string, boolean, numeric, time.Duration and time.Time -// fields have the value interpreted as a comma separated list of values. The -// individual values are trimmed of whitespace and empty values are ignored. A -// default value can be provided as a semicolon separated list in the field's -// tag. -// -// Struct fields are decoded recursively using the field name plus "." as -// prefix. The prefix (without dot) can be overridden in the field's tag. -// Default values are not supported in the field's tag. Specify them on the -// fields of the inner struct instead. -// -// Map fields must have a key of type string and are decoded recursively by -// using the field's name plus ".' as prefix and the next element of the key -// name as map key. The prefix (without dot) can be overridden in the field's -// tag. Default values are not supported. -// -// Examples: -// -// // Field is ignored. -// Field int `properties:"-"` -// -// // Field is assigned value of 'Field'. -// Field int -// -// // Field is assigned value of 'myName'. -// Field int `properties:"myName"` -// -// // Field is assigned value of key 'myName' and has a default -// // value 15 if the key does not exist. -// Field int `properties:"myName,default=15"` -// -// // Field is assigned value of key 'Field' and has a default -// // value 15 if the key does not exist. -// Field int `properties:",default=15"` -// -// // Field is assigned value of key 'date' and the date -// // is in format 2006-01-02 -// Field time.Time `properties:"date,layout=2006-01-02"` -// -// // Field is assigned the non-empty and whitespace trimmed -// // values of key 'Field' split by commas. -// Field []string -// -// // Field is assigned the non-empty and whitespace trimmed -// // values of key 'Field' split by commas and has a default -// // value ["a", "b", "c"] if the key does not exist. -// Field []string `properties:",default=a;b;c"` -// -// // Field is decoded recursively with "Field." as key prefix. -// Field SomeStruct -// -// // Field is decoded recursively with "myName." as key prefix. -// Field SomeStruct `properties:"myName"` -// -// // Field is decoded recursively with "Field." as key prefix -// // and the next dotted element of the key as map key. -// Field map[string]string -// -// // Field is decoded recursively with "myName." as key prefix -// // and the next dotted element of the key as map key. -// Field map[string]string `properties:"myName"` -func (p *Properties) Decode(x interface{}) error { - t, v := reflect.TypeOf(x), reflect.ValueOf(x) - if t.Kind() != reflect.Ptr || v.Elem().Type().Kind() != reflect.Struct { - return fmt.Errorf("not a pointer to struct: %s", t) - } - if err := dec(p, "", nil, nil, v); err != nil { - return err - } - return nil -} - -func dec(p *Properties, key string, def *string, opts map[string]string, v reflect.Value) error { - t := v.Type() - - // value returns the property value for key or the default if provided. - value := func() (string, error) { - if val, ok := p.Get(key); ok { - return val, nil - } - if def != nil { - return *def, nil - } - return "", fmt.Errorf("missing required key %s", key) - } - - // conv converts a string to a value of the given type. - conv := func(s string, t reflect.Type) (val reflect.Value, err error) { - var v interface{} - - switch { - case isDuration(t): - v, err = time.ParseDuration(s) - - case isTime(t): - layout := opts["layout"] - if layout == "" { - layout = time.RFC3339 - } - v, err = time.Parse(layout, s) - - case isBool(t): - v, err = boolVal(s), nil - - case isString(t): - v, err = s, nil - - case isFloat(t): - v, err = strconv.ParseFloat(s, 64) - - case isInt(t): - v, err = strconv.ParseInt(s, 10, 64) - - case isUint(t): - v, err = strconv.ParseUint(s, 10, 64) - - default: - return reflect.Zero(t), fmt.Errorf("unsupported type %s", t) - } - if err != nil { - return reflect.Zero(t), err - } - return reflect.ValueOf(v).Convert(t), nil - } - - // keydef returns the property key and the default value based on the - // name of the struct field and the options in the tag. - keydef := func(f reflect.StructField) (string, *string, map[string]string) { - _key, _opts := parseTag(f.Tag.Get("properties")) - - var _def *string - if d, ok := _opts["default"]; ok { - _def = &d - } - if _key != "" { - return _key, _def, _opts - } - return f.Name, _def, _opts - } - - switch { - case isDuration(t) || isTime(t) || isBool(t) || isString(t) || isFloat(t) || isInt(t) || isUint(t): - s, err := value() - if err != nil { - return err - } - val, err := conv(s, t) - if err != nil { - return err - } - v.Set(val) - - case isPtr(t): - return dec(p, key, def, opts, v.Elem()) - - case isStruct(t): - for i := 0; i < v.NumField(); i++ { - fv := v.Field(i) - fk, def, opts := keydef(t.Field(i)) - if !fv.CanSet() { - return fmt.Errorf("cannot set %s", t.Field(i).Name) - } - if fk == "-" { - continue - } - if key != "" { - fk = key + "." + fk - } - if err := dec(p, fk, def, opts, fv); err != nil { - return err - } - } - return nil - - case isArray(t): - val, err := value() - if err != nil { - return err - } - vals := split(val, ";") - a := reflect.MakeSlice(t, 0, len(vals)) - for _, s := range vals { - val, err := conv(s, t.Elem()) - if err != nil { - return err - } - a = reflect.Append(a, val) - } - v.Set(a) - - case isMap(t): - valT := t.Elem() - m := reflect.MakeMap(t) - for postfix := range p.FilterStripPrefix(key + ".").m { - pp := strings.SplitN(postfix, ".", 2) - mk, mv := pp[0], reflect.New(valT) - if err := dec(p, key+"."+mk, nil, nil, mv); err != nil { - return err - } - m.SetMapIndex(reflect.ValueOf(mk), mv.Elem()) - } - v.Set(m) - - default: - return fmt.Errorf("unsupported type %s", t) - } - return nil -} - -// split splits a string on sep, trims whitespace of elements -// and omits empty elements -func split(s string, sep string) []string { - var a []string - for _, v := range strings.Split(s, sep) { - if v = strings.TrimSpace(v); v != "" { - a = append(a, v) - } - } - return a -} - -// parseTag parses a "key,k=v,k=v,..." -func parseTag(tag string) (key string, opts map[string]string) { - opts = map[string]string{} - for i, s := range strings.Split(tag, ",") { - if i == 0 { - key = s - continue - } - - pp := strings.SplitN(s, "=", 2) - if len(pp) == 1 { - opts[pp[0]] = "" - } else { - opts[pp[0]] = pp[1] - } - } - return key, opts -} - -func isArray(t reflect.Type) bool { return t.Kind() == reflect.Array || t.Kind() == reflect.Slice } -func isBool(t reflect.Type) bool { return t.Kind() == reflect.Bool } -func isDuration(t reflect.Type) bool { return t == reflect.TypeOf(time.Second) } -func isMap(t reflect.Type) bool { return t.Kind() == reflect.Map } -func isPtr(t reflect.Type) bool { return t.Kind() == reflect.Ptr } -func isString(t reflect.Type) bool { return t.Kind() == reflect.String } -func isStruct(t reflect.Type) bool { return t.Kind() == reflect.Struct } -func isTime(t reflect.Type) bool { return t == reflect.TypeOf(time.Time{}) } -func isFloat(t reflect.Type) bool { - return t.Kind() == reflect.Float32 || t.Kind() == reflect.Float64 -} -func isInt(t reflect.Type) bool { - return t.Kind() == reflect.Int || t.Kind() == reflect.Int8 || t.Kind() == reflect.Int16 || t.Kind() == reflect.Int32 || t.Kind() == reflect.Int64 -} -func isUint(t reflect.Type) bool { - return t.Kind() == reflect.Uint || t.Kind() == reflect.Uint8 || t.Kind() == reflect.Uint16 || t.Kind() == reflect.Uint32 || t.Kind() == reflect.Uint64 -} diff --git a/vendor/github.com/magiconair/properties/doc.go b/vendor/github.com/magiconair/properties/doc.go deleted file mode 100644 index 7c7979315..000000000 --- a/vendor/github.com/magiconair/properties/doc.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package properties provides functions for reading and writing -// ISO-8859-1 and UTF-8 encoded .properties files and has -// support for recursive property expansion. -// -// Java properties files are ISO-8859-1 encoded and use Unicode -// literals for characters outside the ISO character set. Unicode -// literals can be used in UTF-8 encoded properties files but -// aren't necessary. -// -// To load a single properties file use MustLoadFile(): -// -// p := properties.MustLoadFile(filename, properties.UTF8) -// -// To load multiple properties files use MustLoadFiles() -// which loads the files in the given order and merges the -// result. Missing properties files can be ignored if the -// 'ignoreMissing' flag is set to true. -// -// Filenames can contain environment variables which are expanded -// before loading. -// -// f1 := "/etc/myapp/myapp.conf" -// f2 := "/home/${USER}/myapp.conf" -// p := MustLoadFiles([]string{f1, f2}, properties.UTF8, true) -// -// All of the different key/value delimiters ' ', ':' and '=' are -// supported as well as the comment characters '!' and '#' and -// multi-line values. -// -// ! this is a comment -// # and so is this -// -// # the following expressions are equal -// key value -// key=value -// key:value -// key = value -// key : value -// key = val\ -// ue -// -// Properties stores all comments preceding a key and provides -// GetComments() and SetComments() methods to retrieve and -// update them. The convenience functions GetComment() and -// SetComment() allow access to the last comment. The -// WriteComment() method writes properties files including -// the comments and with the keys in the original order. -// This can be used for sanitizing properties files. -// -// Property expansion is recursive and circular references -// and malformed expressions are not allowed and cause an -// error. Expansion of environment variables is supported. -// -// # standard property -// key = value -// -// # property expansion: key2 = value -// key2 = ${key} -// -// # recursive expansion: key3 = value -// key3 = ${key2} -// -// # circular reference (error) -// key = ${key} -// -// # malformed expression (error) -// key = ${ke -// -// # refers to the users' home dir -// home = ${HOME} -// -// # local key takes precedence over env var: u = foo -// USER = foo -// u = ${USER} -// -// The default property expansion format is ${key} but can be -// changed by setting different pre- and postfix values on the -// Properties object. -// -// p := properties.NewProperties() -// p.Prefix = "#[" -// p.Postfix = "]#" -// -// Properties provides convenience functions for getting typed -// values with default values if the key does not exist or the -// type conversion failed. -// -// # Returns true if the value is either "1", "on", "yes" or "true" -// # Returns false for every other value and the default value if -// # the key does not exist. -// v = p.GetBool("key", false) -// -// # Returns the value if the key exists and the format conversion -// # was successful. Otherwise, the default value is returned. -// v = p.GetInt64("key", 999) -// v = p.GetUint64("key", 999) -// v = p.GetFloat64("key", 123.0) -// v = p.GetString("key", "def") -// v = p.GetDuration("key", 999) -// -// As an alternative properties may be applied with the standard -// library's flag implementation at any time. -// -// # Standard configuration -// v = flag.Int("key", 999, "help message") -// flag.Parse() -// -// # Merge p into the flag set -// p.MustFlag(flag.CommandLine) -// -// Properties provides several MustXXX() convenience functions -// which will terminate the app if an error occurs. The behavior -// of the failure is configurable and the default is to call -// log.Fatal(err). To have the MustXXX() functions panic instead -// of logging the error set a different ErrorHandler before -// you use the Properties package. -// -// properties.ErrorHandler = properties.PanicHandler -// -// # Will panic instead of logging an error -// p := properties.MustLoadFile("config.properties") -// -// You can also provide your own ErrorHandler function. The only requirement -// is that the error handler function must exit after handling the error. -// -// properties.ErrorHandler = func(err error) { -// fmt.Println(err) -// os.Exit(1) -// } -// -// # Will write to stdout and then exit -// p := properties.MustLoadFile("config.properties") -// -// Properties can also be loaded into a struct via the `Decode` -// method, e.g. -// -// type S struct { -// A string `properties:"a,default=foo"` -// D time.Duration `properties:"timeout,default=5s"` -// E time.Time `properties:"expires,layout=2006-01-02,default=2015-01-01"` -// } -// -// See `Decode()` method for the full documentation. -// -// The following documents provide a description of the properties -// file format. -// -// http://en.wikipedia.org/wiki/.properties -// -// http://docs.oracle.com/javase/7/docs/api/java/util/Properties.html#load%28java.io.Reader%29 -package properties diff --git a/vendor/github.com/magiconair/properties/integrate.go b/vendor/github.com/magiconair/properties/integrate.go deleted file mode 100644 index 35d0ae97b..000000000 --- a/vendor/github.com/magiconair/properties/integrate.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -import "flag" - -// MustFlag sets flags that are skipped by dst.Parse when p contains -// the respective key for flag.Flag.Name. -// -// It's use is recommended with command line arguments as in: -// -// flag.Parse() -// p.MustFlag(flag.CommandLine) -func (p *Properties) MustFlag(dst *flag.FlagSet) { - m := make(map[string]*flag.Flag) - dst.VisitAll(func(f *flag.Flag) { - m[f.Name] = f - }) - dst.Visit(func(f *flag.Flag) { - delete(m, f.Name) // overridden - }) - - for name, f := range m { - v, ok := p.Get(name) - if !ok { - continue - } - - if err := f.Value.Set(v); err != nil { - ErrorHandler(err) - } - } -} diff --git a/vendor/github.com/magiconair/properties/lex.go b/vendor/github.com/magiconair/properties/lex.go deleted file mode 100644 index 3d15a1f6e..000000000 --- a/vendor/github.com/magiconair/properties/lex.go +++ /dev/null @@ -1,395 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// -// Parts of the lexer are from the template/text/parser package -// For these parts the following applies: -// -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file of the go 1.2 -// distribution. - -package properties - -import ( - "fmt" - "strconv" - "strings" - "unicode/utf8" -) - -// item represents a token or text string returned from the scanner. -type item struct { - typ itemType // The type of this item. - pos int // The starting position, in bytes, of this item in the input string. - val string // The value of this item. -} - -func (i item) String() string { - switch { - case i.typ == itemEOF: - return "EOF" - case i.typ == itemError: - return i.val - case len(i.val) > 10: - return fmt.Sprintf("%.10q...", i.val) - } - return fmt.Sprintf("%q", i.val) -} - -// itemType identifies the type of lex items. -type itemType int - -const ( - itemError itemType = iota // error occurred; value is text of error - itemEOF - itemKey // a key - itemValue // a value - itemComment // a comment -) - -// defines a constant for EOF -const eof = -1 - -// permitted whitespace characters space, FF and TAB -const whitespace = " \f\t" - -// stateFn represents the state of the scanner as a function that returns the next state. -type stateFn func(*lexer) stateFn - -// lexer holds the state of the scanner. -type lexer struct { - input string // the string being scanned - state stateFn // the next lexing function to enter - pos int // current position in the input - start int // start position of this item - width int // width of last rune read from input - lastPos int // position of most recent item returned by nextItem - runes []rune // scanned runes for this item - items chan item // channel of scanned items -} - -// next returns the next rune in the input. -func (l *lexer) next() rune { - if l.pos >= len(l.input) { - l.width = 0 - return eof - } - r, w := utf8.DecodeRuneInString(l.input[l.pos:]) - l.width = w - l.pos += l.width - return r -} - -// peek returns but does not consume the next rune in the input. -func (l *lexer) peek() rune { - r := l.next() - l.backup() - return r -} - -// backup steps back one rune. Can only be called once per call of next. -func (l *lexer) backup() { - l.pos -= l.width -} - -// emit passes an item back to the client. -func (l *lexer) emit(t itemType) { - i := item{t, l.start, string(l.runes)} - l.items <- i - l.start = l.pos - l.runes = l.runes[:0] -} - -// ignore skips over the pending input before this point. -func (l *lexer) ignore() { - l.start = l.pos -} - -// appends the rune to the current value -func (l *lexer) appendRune(r rune) { - l.runes = append(l.runes, r) -} - -// accept consumes the next rune if it's from the valid set. -func (l *lexer) accept(valid string) bool { - if strings.ContainsRune(valid, l.next()) { - return true - } - l.backup() - return false -} - -// acceptRun consumes a run of runes from the valid set. -func (l *lexer) acceptRun(valid string) { - for strings.ContainsRune(valid, l.next()) { - } - l.backup() -} - -// lineNumber reports which line we're on, based on the position of -// the previous item returned by nextItem. Doing it this way -// means we don't have to worry about peek double counting. -func (l *lexer) lineNumber() int { - return 1 + strings.Count(l.input[:l.lastPos], "\n") -} - -// errorf returns an error token and terminates the scan by passing -// back a nil pointer that will be the next state, terminating l.nextItem. -func (l *lexer) errorf(format string, args ...interface{}) stateFn { - l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)} - return nil -} - -// nextItem returns the next item from the input. -func (l *lexer) nextItem() item { - i := <-l.items - l.lastPos = i.pos - return i -} - -// lex creates a new scanner for the input string. -func lex(input string) *lexer { - l := &lexer{ - input: input, - items: make(chan item), - runes: make([]rune, 0, 32), - } - go l.run() - return l -} - -// run runs the state machine for the lexer. -func (l *lexer) run() { - for l.state = lexBeforeKey(l); l.state != nil; { - l.state = l.state(l) - } -} - -// state functions - -// lexBeforeKey scans until a key begins. -func lexBeforeKey(l *lexer) stateFn { - switch r := l.next(); { - case isEOF(r): - l.emit(itemEOF) - return nil - - case isEOL(r): - l.ignore() - return lexBeforeKey - - case isComment(r): - return lexComment - - case isWhitespace(r): - l.ignore() - return lexBeforeKey - - default: - l.backup() - return lexKey - } -} - -// lexComment scans a comment line. The comment character has already been scanned. -func lexComment(l *lexer) stateFn { - l.acceptRun(whitespace) - l.ignore() - for { - switch r := l.next(); { - case isEOF(r): - l.ignore() - l.emit(itemEOF) - return nil - case isEOL(r): - l.emit(itemComment) - return lexBeforeKey - default: - l.appendRune(r) - } - } -} - -// lexKey scans the key up to a delimiter -func lexKey(l *lexer) stateFn { - var r rune - -Loop: - for { - switch r = l.next(); { - - case isEscape(r): - err := l.scanEscapeSequence() - if err != nil { - return l.errorf(err.Error()) - } - - case isEndOfKey(r): - l.backup() - break Loop - - case isEOF(r): - break Loop - - default: - l.appendRune(r) - } - } - - if len(l.runes) > 0 { - l.emit(itemKey) - } - - if isEOF(r) { - l.emit(itemEOF) - return nil - } - - return lexBeforeValue -} - -// lexBeforeValue scans the delimiter between key and value. -// Leading and trailing whitespace is ignored. -// We expect to be just after the key. -func lexBeforeValue(l *lexer) stateFn { - l.acceptRun(whitespace) - l.accept(":=") - l.acceptRun(whitespace) - l.ignore() - return lexValue -} - -// lexValue scans text until the end of the line. We expect to be just after the delimiter. -func lexValue(l *lexer) stateFn { - for { - switch r := l.next(); { - case isEscape(r): - if isEOL(l.peek()) { - l.next() - l.acceptRun(whitespace) - } else { - err := l.scanEscapeSequence() - if err != nil { - return l.errorf(err.Error()) - } - } - - case isEOL(r): - l.emit(itemValue) - l.ignore() - return lexBeforeKey - - case isEOF(r): - l.emit(itemValue) - l.emit(itemEOF) - return nil - - default: - l.appendRune(r) - } - } -} - -// scanEscapeSequence scans either one of the escaped characters -// or a unicode literal. We expect to be after the escape character. -func (l *lexer) scanEscapeSequence() error { - switch r := l.next(); { - - case isEscapedCharacter(r): - l.appendRune(decodeEscapedCharacter(r)) - return nil - - case atUnicodeLiteral(r): - return l.scanUnicodeLiteral() - - case isEOF(r): - return fmt.Errorf("premature EOF") - - // silently drop the escape character and append the rune as is - default: - l.appendRune(r) - return nil - } -} - -// scans a unicode literal in the form \uXXXX. We expect to be after the \u. -func (l *lexer) scanUnicodeLiteral() error { - // scan the digits - d := make([]rune, 4) - for i := 0; i < 4; i++ { - d[i] = l.next() - if d[i] == eof || !strings.ContainsRune("0123456789abcdefABCDEF", d[i]) { - return fmt.Errorf("invalid unicode literal") - } - } - - // decode the digits into a rune - r, err := strconv.ParseInt(string(d), 16, 0) - if err != nil { - return err - } - - l.appendRune(rune(r)) - return nil -} - -// decodeEscapedCharacter returns the unescaped rune. We expect to be after the escape character. -func decodeEscapedCharacter(r rune) rune { - switch r { - case 'f': - return '\f' - case 'n': - return '\n' - case 'r': - return '\r' - case 't': - return '\t' - default: - return r - } -} - -// atUnicodeLiteral reports whether we are at a unicode literal. -// The escape character has already been consumed. -func atUnicodeLiteral(r rune) bool { - return r == 'u' -} - -// isComment reports whether we are at the start of a comment. -func isComment(r rune) bool { - return r == '#' || r == '!' -} - -// isEndOfKey reports whether the rune terminates the current key. -func isEndOfKey(r rune) bool { - return strings.ContainsRune(" \f\t\r\n:=", r) -} - -// isEOF reports whether we are at EOF. -func isEOF(r rune) bool { - return r == eof -} - -// isEOL reports whether we are at a new line character. -func isEOL(r rune) bool { - return r == '\n' || r == '\r' -} - -// isEscape reports whether the rune is the escape character which -// prefixes unicode literals and other escaped characters. -func isEscape(r rune) bool { - return r == '\\' -} - -// isEscapedCharacter reports whether we are at one of the characters that need escaping. -// The escape character has already been consumed. -func isEscapedCharacter(r rune) bool { - return strings.ContainsRune(" :=fnrt", r) -} - -// isWhitespace reports whether the rune is a whitespace character. -func isWhitespace(r rune) bool { - return strings.ContainsRune(whitespace, r) -} diff --git a/vendor/github.com/magiconair/properties/load.go b/vendor/github.com/magiconair/properties/load.go deleted file mode 100644 index 635368dc8..000000000 --- a/vendor/github.com/magiconair/properties/load.go +++ /dev/null @@ -1,293 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -import ( - "fmt" - "io/ioutil" - "net/http" - "os" - "strings" -) - -// Encoding specifies encoding of the input data. -type Encoding uint - -const ( - // utf8Default is a private placeholder for the zero value of Encoding to - // ensure that it has the correct meaning. UTF8 is the default encoding but - // was assigned a non-zero value which cannot be changed without breaking - // existing code. Clients should continue to use the public constants. - utf8Default Encoding = iota - - // UTF8 interprets the input data as UTF-8. - UTF8 - - // ISO_8859_1 interprets the input data as ISO-8859-1. - ISO_8859_1 -) - -type Loader struct { - // Encoding determines how the data from files and byte buffers - // is interpreted. For URLs the Content-Type header is used - // to determine the encoding of the data. - Encoding Encoding - - // DisableExpansion configures the property expansion of the - // returned property object. When set to true, the property values - // will not be expanded and the Property object will not be checked - // for invalid expansion expressions. - DisableExpansion bool - - // IgnoreMissing configures whether missing files or URLs which return - // 404 are reported as errors. When set to true, missing files and 404 - // status codes are not reported as errors. - IgnoreMissing bool -} - -// Load reads a buffer into a Properties struct. -func (l *Loader) LoadBytes(buf []byte) (*Properties, error) { - return l.loadBytes(buf, l.Encoding) -} - -// LoadAll reads the content of multiple URLs or files in the given order into -// a Properties struct. If IgnoreMissing is true then a 404 status code or -// missing file will not be reported as error. Encoding sets the encoding for -// files. For the URLs see LoadURL for the Content-Type header and the -// encoding. -func (l *Loader) LoadAll(names []string) (*Properties, error) { - all := NewProperties() - for _, name := range names { - n, err := expandName(name) - if err != nil { - return nil, err - } - - var p *Properties - switch { - case strings.HasPrefix(n, "http://"): - p, err = l.LoadURL(n) - case strings.HasPrefix(n, "https://"): - p, err = l.LoadURL(n) - default: - p, err = l.LoadFile(n) - } - if err != nil { - return nil, err - } - all.Merge(p) - } - - all.DisableExpansion = l.DisableExpansion - if all.DisableExpansion { - return all, nil - } - return all, all.check() -} - -// LoadFile reads a file into a Properties struct. -// If IgnoreMissing is true then a missing file will not be -// reported as error. -func (l *Loader) LoadFile(filename string) (*Properties, error) { - data, err := ioutil.ReadFile(filename) - if err != nil { - if l.IgnoreMissing && os.IsNotExist(err) { - LogPrintf("properties: %s not found. skipping", filename) - return NewProperties(), nil - } - return nil, err - } - return l.loadBytes(data, l.Encoding) -} - -// LoadURL reads the content of the URL into a Properties struct. -// -// The encoding is determined via the Content-Type header which -// should be set to 'text/plain'. If the 'charset' parameter is -// missing, 'iso-8859-1' or 'latin1' the encoding is set to -// ISO-8859-1. If the 'charset' parameter is set to 'utf-8' the -// encoding is set to UTF-8. A missing content type header is -// interpreted as 'text/plain; charset=utf-8'. -func (l *Loader) LoadURL(url string) (*Properties, error) { - resp, err := http.Get(url) - if err != nil { - return nil, fmt.Errorf("properties: error fetching %q. %s", url, err) - } - defer resp.Body.Close() - - if resp.StatusCode == 404 && l.IgnoreMissing { - LogPrintf("properties: %s returned %d. skipping", url, resp.StatusCode) - return NewProperties(), nil - } - - if resp.StatusCode != 200 { - return nil, fmt.Errorf("properties: %s returned %d", url, resp.StatusCode) - } - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("properties: %s error reading response. %s", url, err) - } - - ct := resp.Header.Get("Content-Type") - ct = strings.Join(strings.Fields(ct), "") - var enc Encoding - switch strings.ToLower(ct) { - case "text/plain", "text/plain;charset=iso-8859-1", "text/plain;charset=latin1": - enc = ISO_8859_1 - case "", "text/plain;charset=utf-8": - enc = UTF8 - default: - return nil, fmt.Errorf("properties: invalid content type %s", ct) - } - - return l.loadBytes(body, enc) -} - -func (l *Loader) loadBytes(buf []byte, enc Encoding) (*Properties, error) { - p, err := parse(convert(buf, enc)) - if err != nil { - return nil, err - } - p.DisableExpansion = l.DisableExpansion - if p.DisableExpansion { - return p, nil - } - return p, p.check() -} - -// Load reads a buffer into a Properties struct. -func Load(buf []byte, enc Encoding) (*Properties, error) { - l := &Loader{Encoding: enc} - return l.LoadBytes(buf) -} - -// LoadString reads an UTF8 string into a properties struct. -func LoadString(s string) (*Properties, error) { - l := &Loader{Encoding: UTF8} - return l.LoadBytes([]byte(s)) -} - -// LoadMap creates a new Properties struct from a string map. -func LoadMap(m map[string]string) *Properties { - p := NewProperties() - for k, v := range m { - p.Set(k, v) - } - return p -} - -// LoadFile reads a file into a Properties struct. -func LoadFile(filename string, enc Encoding) (*Properties, error) { - l := &Loader{Encoding: enc} - return l.LoadAll([]string{filename}) -} - -// LoadFiles reads multiple files in the given order into -// a Properties struct. If 'ignoreMissing' is true then -// non-existent files will not be reported as error. -func LoadFiles(filenames []string, enc Encoding, ignoreMissing bool) (*Properties, error) { - l := &Loader{Encoding: enc, IgnoreMissing: ignoreMissing} - return l.LoadAll(filenames) -} - -// LoadURL reads the content of the URL into a Properties struct. -// See Loader#LoadURL for details. -func LoadURL(url string) (*Properties, error) { - l := &Loader{Encoding: UTF8} - return l.LoadAll([]string{url}) -} - -// LoadURLs reads the content of multiple URLs in the given order into a -// Properties struct. If IgnoreMissing is true then a 404 status code will -// not be reported as error. See Loader#LoadURL for the Content-Type header -// and the encoding. -func LoadURLs(urls []string, ignoreMissing bool) (*Properties, error) { - l := &Loader{Encoding: UTF8, IgnoreMissing: ignoreMissing} - return l.LoadAll(urls) -} - -// LoadAll reads the content of multiple URLs or files in the given order into a -// Properties struct. If 'ignoreMissing' is true then a 404 status code or missing file will -// not be reported as error. Encoding sets the encoding for files. For the URLs please see -// LoadURL for the Content-Type header and the encoding. -func LoadAll(names []string, enc Encoding, ignoreMissing bool) (*Properties, error) { - l := &Loader{Encoding: enc, IgnoreMissing: ignoreMissing} - return l.LoadAll(names) -} - -// MustLoadString reads an UTF8 string into a Properties struct and -// panics on error. -func MustLoadString(s string) *Properties { - return must(LoadString(s)) -} - -// MustLoadFile reads a file into a Properties struct and -// panics on error. -func MustLoadFile(filename string, enc Encoding) *Properties { - return must(LoadFile(filename, enc)) -} - -// MustLoadFiles reads multiple files in the given order into -// a Properties struct and panics on error. If 'ignoreMissing' -// is true then non-existent files will not be reported as error. -func MustLoadFiles(filenames []string, enc Encoding, ignoreMissing bool) *Properties { - return must(LoadFiles(filenames, enc, ignoreMissing)) -} - -// MustLoadURL reads the content of a URL into a Properties struct and -// panics on error. -func MustLoadURL(url string) *Properties { - return must(LoadURL(url)) -} - -// MustLoadURLs reads the content of multiple URLs in the given order into a -// Properties struct and panics on error. If 'ignoreMissing' is true then a 404 -// status code will not be reported as error. -func MustLoadURLs(urls []string, ignoreMissing bool) *Properties { - return must(LoadURLs(urls, ignoreMissing)) -} - -// MustLoadAll reads the content of multiple URLs or files in the given order into a -// Properties struct. If 'ignoreMissing' is true then a 404 status code or missing file will -// not be reported as error. Encoding sets the encoding for files. For the URLs please see -// LoadURL for the Content-Type header and the encoding. It panics on error. -func MustLoadAll(names []string, enc Encoding, ignoreMissing bool) *Properties { - return must(LoadAll(names, enc, ignoreMissing)) -} - -func must(p *Properties, err error) *Properties { - if err != nil { - ErrorHandler(err) - } - return p -} - -// expandName expands ${ENV_VAR} expressions in a name. -// If the environment variable does not exist then it will be replaced -// with an empty string. Malformed expressions like "${ENV_VAR" will -// be reported as error. -func expandName(name string) (string, error) { - return expand(name, []string{}, "${", "}", make(map[string]string)) -} - -// Interprets a byte buffer either as an ISO-8859-1 or UTF-8 encoded string. -// For ISO-8859-1 we can convert each byte straight into a rune since the -// first 256 unicode code points cover ISO-8859-1. -func convert(buf []byte, enc Encoding) string { - switch enc { - case utf8Default, UTF8: - return string(buf) - case ISO_8859_1: - runes := make([]rune, len(buf)) - for i, b := range buf { - runes[i] = rune(b) - } - return string(runes) - default: - ErrorHandler(fmt.Errorf("unsupported encoding %v", enc)) - } - panic("ErrorHandler should exit") -} diff --git a/vendor/github.com/magiconair/properties/parser.go b/vendor/github.com/magiconair/properties/parser.go deleted file mode 100644 index fccfd39f6..000000000 --- a/vendor/github.com/magiconair/properties/parser.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -import ( - "fmt" - "runtime" -) - -type parser struct { - lex *lexer -} - -func parse(input string) (properties *Properties, err error) { - p := &parser{lex: lex(input)} - defer p.recover(&err) - - properties = NewProperties() - key := "" - comments := []string{} - - for { - token := p.expectOneOf(itemComment, itemKey, itemEOF) - switch token.typ { - case itemEOF: - goto done - case itemComment: - comments = append(comments, token.val) - continue - case itemKey: - key = token.val - if _, ok := properties.m[key]; !ok { - properties.k = append(properties.k, key) - } - } - - token = p.expectOneOf(itemValue, itemEOF) - if len(comments) > 0 { - properties.c[key] = comments - comments = []string{} - } - switch token.typ { - case itemEOF: - properties.m[key] = "" - goto done - case itemValue: - properties.m[key] = token.val - } - } - -done: - return properties, nil -} - -func (p *parser) errorf(format string, args ...interface{}) { - format = fmt.Sprintf("properties: Line %d: %s", p.lex.lineNumber(), format) - panic(fmt.Errorf(format, args...)) -} - -func (p *parser) expectOneOf(expected ...itemType) (token item) { - token = p.lex.nextItem() - for _, v := range expected { - if token.typ == v { - return token - } - } - p.unexpected(token) - panic("unexpected token") -} - -func (p *parser) unexpected(token item) { - p.errorf(token.String()) -} - -// recover is the handler that turns panics into returns from the top level of Parse. -func (p *parser) recover(errp *error) { - e := recover() - if e != nil { - if _, ok := e.(runtime.Error); ok { - panic(e) - } - *errp = e.(error) - } -} diff --git a/vendor/github.com/magiconair/properties/properties.go b/vendor/github.com/magiconair/properties/properties.go deleted file mode 100644 index fb2f7b404..000000000 --- a/vendor/github.com/magiconair/properties/properties.go +++ /dev/null @@ -1,848 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -// BUG(frank): Set() does not check for invalid unicode literals since this is currently handled by the lexer. -// BUG(frank): Write() does not allow to configure the newline character. Therefore, on Windows LF is used. - -import ( - "bytes" - "fmt" - "io" - "log" - "os" - "regexp" - "sort" - "strconv" - "strings" - "time" - "unicode/utf8" -) - -const maxExpansionDepth = 64 - -// ErrorHandlerFunc defines the type of function which handles failures -// of the MustXXX() functions. An error handler function must exit -// the application after handling the error. -type ErrorHandlerFunc func(error) - -// ErrorHandler is the function which handles failures of the MustXXX() -// functions. The default is LogFatalHandler. -var ErrorHandler ErrorHandlerFunc = LogFatalHandler - -// LogHandlerFunc defines the function prototype for logging errors. -type LogHandlerFunc func(fmt string, args ...interface{}) - -// LogPrintf defines a log handler which uses log.Printf. -var LogPrintf LogHandlerFunc = log.Printf - -// LogFatalHandler handles the error by logging a fatal error and exiting. -func LogFatalHandler(err error) { - log.Fatal(err) -} - -// PanicHandler handles the error by panicking. -func PanicHandler(err error) { - panic(err) -} - -// ----------------------------------------------------------------------------- - -// A Properties contains the key/value pairs from the properties input. -// All values are stored in unexpanded form and are expanded at runtime -type Properties struct { - // Pre-/Postfix for property expansion. - Prefix string - Postfix string - - // DisableExpansion controls the expansion of properties on Get() - // and the check for circular references on Set(). When set to - // true Properties behaves like a simple key/value store and does - // not check for circular references on Get() or on Set(). - DisableExpansion bool - - // Stores the key/value pairs - m map[string]string - - // Stores the comments per key. - c map[string][]string - - // Stores the keys in order of appearance. - k []string - - // WriteSeparator specifies the separator of key and value while writing the properties. - WriteSeparator string -} - -// NewProperties creates a new Properties struct with the default -// configuration for "${key}" expressions. -func NewProperties() *Properties { - return &Properties{ - Prefix: "${", - Postfix: "}", - m: map[string]string{}, - c: map[string][]string{}, - k: []string{}, - } -} - -// Load reads a buffer into the given Properties struct. -func (p *Properties) Load(buf []byte, enc Encoding) error { - l := &Loader{Encoding: enc, DisableExpansion: p.DisableExpansion} - newProperties, err := l.LoadBytes(buf) - if err != nil { - return err - } - p.Merge(newProperties) - return nil -} - -// Get returns the expanded value for the given key if exists. -// Otherwise, ok is false. -func (p *Properties) Get(key string) (value string, ok bool) { - v, ok := p.m[key] - if p.DisableExpansion { - return v, ok - } - if !ok { - return "", false - } - - expanded, err := p.expand(key, v) - - // we guarantee that the expanded value is free of - // circular references and malformed expressions - // so we panic if we still get an error here. - if err != nil { - ErrorHandler(err) - } - - return expanded, true -} - -// MustGet returns the expanded value for the given key if exists. -// Otherwise, it panics. -func (p *Properties) MustGet(key string) string { - if v, ok := p.Get(key); ok { - return v - } - ErrorHandler(invalidKeyError(key)) - panic("ErrorHandler should exit") -} - -// ---------------------------------------------------------------------------- - -// ClearComments removes the comments for all keys. -func (p *Properties) ClearComments() { - p.c = map[string][]string{} -} - -// ---------------------------------------------------------------------------- - -// GetComment returns the last comment before the given key or an empty string. -func (p *Properties) GetComment(key string) string { - comments, ok := p.c[key] - if !ok || len(comments) == 0 { - return "" - } - return comments[len(comments)-1] -} - -// ---------------------------------------------------------------------------- - -// GetComments returns all comments that appeared before the given key or nil. -func (p *Properties) GetComments(key string) []string { - if comments, ok := p.c[key]; ok { - return comments - } - return nil -} - -// ---------------------------------------------------------------------------- - -// SetComment sets the comment for the key. -func (p *Properties) SetComment(key, comment string) { - p.c[key] = []string{comment} -} - -// ---------------------------------------------------------------------------- - -// SetComments sets the comments for the key. If the comments are nil then -// all comments for this key are deleted. -func (p *Properties) SetComments(key string, comments []string) { - if comments == nil { - delete(p.c, key) - return - } - p.c[key] = comments -} - -// ---------------------------------------------------------------------------- - -// GetBool checks if the expanded value is one of '1', 'yes', -// 'true' or 'on' if the key exists. The comparison is case-insensitive. -// If the key does not exist the default value is returned. -func (p *Properties) GetBool(key string, def bool) bool { - v, err := p.getBool(key) - if err != nil { - return def - } - return v -} - -// MustGetBool checks if the expanded value is one of '1', 'yes', -// 'true' or 'on' if the key exists. The comparison is case-insensitive. -// If the key does not exist the function panics. -func (p *Properties) MustGetBool(key string) bool { - v, err := p.getBool(key) - if err != nil { - ErrorHandler(err) - } - return v -} - -func (p *Properties) getBool(key string) (value bool, err error) { - if v, ok := p.Get(key); ok { - return boolVal(v), nil - } - return false, invalidKeyError(key) -} - -func boolVal(v string) bool { - v = strings.ToLower(v) - return v == "1" || v == "true" || v == "yes" || v == "on" -} - -// ---------------------------------------------------------------------------- - -// GetDuration parses the expanded value as an time.Duration (in ns) if the -// key exists. If key does not exist or the value cannot be parsed the default -// value is returned. In almost all cases you want to use GetParsedDuration(). -func (p *Properties) GetDuration(key string, def time.Duration) time.Duration { - v, err := p.getInt64(key) - if err != nil { - return def - } - return time.Duration(v) -} - -// MustGetDuration parses the expanded value as an time.Duration (in ns) if -// the key exists. If key does not exist or the value cannot be parsed the -// function panics. In almost all cases you want to use MustGetParsedDuration(). -func (p *Properties) MustGetDuration(key string) time.Duration { - v, err := p.getInt64(key) - if err != nil { - ErrorHandler(err) - } - return time.Duration(v) -} - -// ---------------------------------------------------------------------------- - -// GetParsedDuration parses the expanded value with time.ParseDuration() if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. -func (p *Properties) GetParsedDuration(key string, def time.Duration) time.Duration { - s, ok := p.Get(key) - if !ok { - return def - } - v, err := time.ParseDuration(s) - if err != nil { - return def - } - return v -} - -// MustGetParsedDuration parses the expanded value with time.ParseDuration() if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -func (p *Properties) MustGetParsedDuration(key string) time.Duration { - s, ok := p.Get(key) - if !ok { - ErrorHandler(invalidKeyError(key)) - } - v, err := time.ParseDuration(s) - if err != nil { - ErrorHandler(err) - } - return v -} - -// ---------------------------------------------------------------------------- - -// GetFloat64 parses the expanded value as a float64 if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. -func (p *Properties) GetFloat64(key string, def float64) float64 { - v, err := p.getFloat64(key) - if err != nil { - return def - } - return v -} - -// MustGetFloat64 parses the expanded value as a float64 if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -func (p *Properties) MustGetFloat64(key string) float64 { - v, err := p.getFloat64(key) - if err != nil { - ErrorHandler(err) - } - return v -} - -func (p *Properties) getFloat64(key string) (value float64, err error) { - if v, ok := p.Get(key); ok { - value, err = strconv.ParseFloat(v, 64) - if err != nil { - return 0, err - } - return value, nil - } - return 0, invalidKeyError(key) -} - -// ---------------------------------------------------------------------------- - -// GetInt parses the expanded value as an int if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. If the value does not fit into an int the -// function panics with an out of range error. -func (p *Properties) GetInt(key string, def int) int { - v, err := p.getInt64(key) - if err != nil { - return def - } - return intRangeCheck(key, v) -} - -// MustGetInt parses the expanded value as an int if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -// If the value does not fit into an int the function panics with -// an out of range error. -func (p *Properties) MustGetInt(key string) int { - v, err := p.getInt64(key) - if err != nil { - ErrorHandler(err) - } - return intRangeCheck(key, v) -} - -// ---------------------------------------------------------------------------- - -// GetInt64 parses the expanded value as an int64 if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. -func (p *Properties) GetInt64(key string, def int64) int64 { - v, err := p.getInt64(key) - if err != nil { - return def - } - return v -} - -// MustGetInt64 parses the expanded value as an int if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -func (p *Properties) MustGetInt64(key string) int64 { - v, err := p.getInt64(key) - if err != nil { - ErrorHandler(err) - } - return v -} - -func (p *Properties) getInt64(key string) (value int64, err error) { - if v, ok := p.Get(key); ok { - value, err = strconv.ParseInt(v, 10, 64) - if err != nil { - return 0, err - } - return value, nil - } - return 0, invalidKeyError(key) -} - -// ---------------------------------------------------------------------------- - -// GetUint parses the expanded value as an uint if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. If the value does not fit into an int the -// function panics with an out of range error. -func (p *Properties) GetUint(key string, def uint) uint { - v, err := p.getUint64(key) - if err != nil { - return def - } - return uintRangeCheck(key, v) -} - -// MustGetUint parses the expanded value as an int if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -// If the value does not fit into an int the function panics with -// an out of range error. -func (p *Properties) MustGetUint(key string) uint { - v, err := p.getUint64(key) - if err != nil { - ErrorHandler(err) - } - return uintRangeCheck(key, v) -} - -// ---------------------------------------------------------------------------- - -// GetUint64 parses the expanded value as an uint64 if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. -func (p *Properties) GetUint64(key string, def uint64) uint64 { - v, err := p.getUint64(key) - if err != nil { - return def - } - return v -} - -// MustGetUint64 parses the expanded value as an int if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -func (p *Properties) MustGetUint64(key string) uint64 { - v, err := p.getUint64(key) - if err != nil { - ErrorHandler(err) - } - return v -} - -func (p *Properties) getUint64(key string) (value uint64, err error) { - if v, ok := p.Get(key); ok { - value, err = strconv.ParseUint(v, 10, 64) - if err != nil { - return 0, err - } - return value, nil - } - return 0, invalidKeyError(key) -} - -// ---------------------------------------------------------------------------- - -// GetString returns the expanded value for the given key if exists or -// the default value otherwise. -func (p *Properties) GetString(key, def string) string { - if v, ok := p.Get(key); ok { - return v - } - return def -} - -// MustGetString returns the expanded value for the given key if exists or -// panics otherwise. -func (p *Properties) MustGetString(key string) string { - if v, ok := p.Get(key); ok { - return v - } - ErrorHandler(invalidKeyError(key)) - panic("ErrorHandler should exit") -} - -// ---------------------------------------------------------------------------- - -// Filter returns a new properties object which contains all properties -// for which the key matches the pattern. -func (p *Properties) Filter(pattern string) (*Properties, error) { - re, err := regexp.Compile(pattern) - if err != nil { - return nil, err - } - - return p.FilterRegexp(re), nil -} - -// FilterRegexp returns a new properties object which contains all properties -// for which the key matches the regular expression. -func (p *Properties) FilterRegexp(re *regexp.Regexp) *Properties { - pp := NewProperties() - for _, k := range p.k { - if re.MatchString(k) { - // TODO(fs): we are ignoring the error which flags a circular reference. - // TODO(fs): since we are just copying a subset of keys this cannot happen (fingers crossed) - pp.Set(k, p.m[k]) - } - } - return pp -} - -// FilterPrefix returns a new properties object with a subset of all keys -// with the given prefix. -func (p *Properties) FilterPrefix(prefix string) *Properties { - pp := NewProperties() - for _, k := range p.k { - if strings.HasPrefix(k, prefix) { - // TODO(fs): we are ignoring the error which flags a circular reference. - // TODO(fs): since we are just copying a subset of keys this cannot happen (fingers crossed) - pp.Set(k, p.m[k]) - } - } - return pp -} - -// FilterStripPrefix returns a new properties object with a subset of all keys -// with the given prefix and the prefix removed from the keys. -func (p *Properties) FilterStripPrefix(prefix string) *Properties { - pp := NewProperties() - n := len(prefix) - for _, k := range p.k { - if len(k) > len(prefix) && strings.HasPrefix(k, prefix) { - // TODO(fs): we are ignoring the error which flags a circular reference. - // TODO(fs): since we are modifying keys I am not entirely sure whether we can create a circular reference - // TODO(fs): this function should probably return an error but the signature is fixed - pp.Set(k[n:], p.m[k]) - } - } - return pp -} - -// Len returns the number of keys. -func (p *Properties) Len() int { - return len(p.m) -} - -// Keys returns all keys in the same order as in the input. -func (p *Properties) Keys() []string { - keys := make([]string, len(p.k)) - copy(keys, p.k) - return keys -} - -// Set sets the property key to the corresponding value. -// If a value for key existed before then ok is true and prev -// contains the previous value. If the value contains a -// circular reference or a malformed expression then -// an error is returned. -// An empty key is silently ignored. -func (p *Properties) Set(key, value string) (prev string, ok bool, err error) { - if key == "" { - return "", false, nil - } - - // if expansion is disabled we allow circular references - if p.DisableExpansion { - prev, ok = p.Get(key) - p.m[key] = value - if !ok { - p.k = append(p.k, key) - } - return prev, ok, nil - } - - // to check for a circular reference we temporarily need - // to set the new value. If there is an error then revert - // to the previous state. Only if all tests are successful - // then we add the key to the p.k list. - prev, ok = p.Get(key) - p.m[key] = value - - // now check for a circular reference - _, err = p.expand(key, value) - if err != nil { - - // revert to the previous state - if ok { - p.m[key] = prev - } else { - delete(p.m, key) - } - - return "", false, err - } - - if !ok { - p.k = append(p.k, key) - } - - return prev, ok, nil -} - -// SetValue sets property key to the default string value -// as defined by fmt.Sprintf("%v"). -func (p *Properties) SetValue(key string, value interface{}) error { - _, _, err := p.Set(key, fmt.Sprintf("%v", value)) - return err -} - -// MustSet sets the property key to the corresponding value. -// If a value for key existed before then ok is true and prev -// contains the previous value. An empty key is silently ignored. -func (p *Properties) MustSet(key, value string) (prev string, ok bool) { - prev, ok, err := p.Set(key, value) - if err != nil { - ErrorHandler(err) - } - return prev, ok -} - -// String returns a string of all expanded 'key = value' pairs. -func (p *Properties) String() string { - var s string - for _, key := range p.k { - value, _ := p.Get(key) - s = fmt.Sprintf("%s%s = %s\n", s, key, value) - } - return s -} - -// Sort sorts the properties keys in alphabetical order. -// This is helpfully before writing the properties. -func (p *Properties) Sort() { - sort.Strings(p.k) -} - -// Write writes all unexpanded 'key = value' pairs to the given writer. -// Write returns the number of bytes written and any write error encountered. -func (p *Properties) Write(w io.Writer, enc Encoding) (n int, err error) { - return p.WriteComment(w, "", enc) -} - -// WriteComment writes all unexpanced 'key = value' pairs to the given writer. -// If prefix is not empty then comments are written with a blank line and the -// given prefix. The prefix should be either "# " or "! " to be compatible with -// the properties file format. Otherwise, the properties parser will not be -// able to read the file back in. It returns the number of bytes written and -// any write error encountered. -func (p *Properties) WriteComment(w io.Writer, prefix string, enc Encoding) (n int, err error) { - var x int - - for _, key := range p.k { - value := p.m[key] - - if prefix != "" { - if comments, ok := p.c[key]; ok { - // don't print comments if they are all empty - allEmpty := true - for _, c := range comments { - if c != "" { - allEmpty = false - break - } - } - - if !allEmpty { - // add a blank line between entries but not at the top - if len(comments) > 0 && n > 0 { - x, err = fmt.Fprintln(w) - if err != nil { - return - } - n += x - } - - for _, c := range comments { - x, err = fmt.Fprintf(w, "%s%s\n", prefix, c) - if err != nil { - return - } - n += x - } - } - } - } - sep := " = " - if p.WriteSeparator != "" { - sep = p.WriteSeparator - } - x, err = fmt.Fprintf(w, "%s%s%s\n", encode(key, " :", enc), sep, encode(value, "", enc)) - if err != nil { - return - } - n += x - } - return -} - -// Map returns a copy of the properties as a map. -func (p *Properties) Map() map[string]string { - m := make(map[string]string) - for k, v := range p.m { - m[k] = v - } - return m -} - -// FilterFunc returns a copy of the properties which includes the values which passed all filters. -func (p *Properties) FilterFunc(filters ...func(k, v string) bool) *Properties { - pp := NewProperties() -outer: - for k, v := range p.m { - for _, f := range filters { - if !f(k, v) { - continue outer - } - pp.Set(k, v) - } - } - return pp -} - -// ---------------------------------------------------------------------------- - -// Delete removes the key and its comments. -func (p *Properties) Delete(key string) { - delete(p.m, key) - delete(p.c, key) - newKeys := []string{} - for _, k := range p.k { - if k != key { - newKeys = append(newKeys, k) - } - } - p.k = newKeys -} - -// Merge merges properties, comments and keys from other *Properties into p -func (p *Properties) Merge(other *Properties) { - for _, k := range other.k { - if _, ok := p.m[k]; !ok { - p.k = append(p.k, k) - } - } - for k, v := range other.m { - p.m[k] = v - } - for k, v := range other.c { - p.c[k] = v - } -} - -// ---------------------------------------------------------------------------- - -// check expands all values and returns an error if a circular reference or -// a malformed expression was found. -func (p *Properties) check() error { - for key, value := range p.m { - if _, err := p.expand(key, value); err != nil { - return err - } - } - return nil -} - -func (p *Properties) expand(key, input string) (string, error) { - // no pre/postfix -> nothing to expand - if p.Prefix == "" && p.Postfix == "" { - return input, nil - } - - return expand(input, []string{key}, p.Prefix, p.Postfix, p.m) -} - -// expand recursively expands expressions of '(prefix)key(postfix)' to their corresponding values. -// The function keeps track of the keys that were already expanded and stops if it -// detects a circular reference or a malformed expression of the form '(prefix)key'. -func expand(s string, keys []string, prefix, postfix string, values map[string]string) (string, error) { - if len(keys) > maxExpansionDepth { - return "", fmt.Errorf("expansion too deep") - } - - for { - start := strings.Index(s, prefix) - if start == -1 { - return s, nil - } - - keyStart := start + len(prefix) - keyLen := strings.Index(s[keyStart:], postfix) - if keyLen == -1 { - return "", fmt.Errorf("malformed expression") - } - - end := keyStart + keyLen + len(postfix) - 1 - key := s[keyStart : keyStart+keyLen] - - // fmt.Printf("s:%q pp:%q start:%d end:%d keyStart:%d keyLen:%d key:%q\n", s, prefix + "..." + postfix, start, end, keyStart, keyLen, key) - - for _, k := range keys { - if key == k { - var b bytes.Buffer - b.WriteString("circular reference in:\n") - for _, k1 := range keys { - fmt.Fprintf(&b, "%s=%s\n", k1, values[k1]) - } - return "", fmt.Errorf(b.String()) - } - } - - val, ok := values[key] - if !ok { - val = os.Getenv(key) - } - new_val, err := expand(val, append(keys, key), prefix, postfix, values) - if err != nil { - return "", err - } - s = s[:start] + new_val + s[end+1:] - } -} - -// encode encodes a UTF-8 string to ISO-8859-1 and escapes some characters. -func encode(s string, special string, enc Encoding) string { - switch enc { - case UTF8: - return encodeUtf8(s, special) - case ISO_8859_1: - return encodeIso(s, special) - default: - panic(fmt.Sprintf("unsupported encoding %v", enc)) - } -} - -func encodeUtf8(s string, special string) string { - v := "" - for pos := 0; pos < len(s); { - r, w := utf8.DecodeRuneInString(s[pos:]) - pos += w - v += escape(r, special) - } - return v -} - -func encodeIso(s string, special string) string { - var r rune - var w int - var v string - for pos := 0; pos < len(s); { - switch r, w = utf8.DecodeRuneInString(s[pos:]); { - case r < 1<<8: // single byte rune -> escape special chars only - v += escape(r, special) - case r < 1<<16: // two byte rune -> unicode literal - v += fmt.Sprintf("\\u%04x", r) - default: // more than two bytes per rune -> can't encode - v += "?" - } - pos += w - } - return v -} - -func escape(r rune, special string) string { - switch r { - case '\f': - return "\\f" - case '\n': - return "\\n" - case '\r': - return "\\r" - case '\t': - return "\\t" - case '\\': - return "\\\\" - default: - if strings.ContainsRune(special, r) { - return "\\" + string(r) - } - return string(r) - } -} - -func invalidKeyError(key string) error { - return fmt.Errorf("unknown property: %s", key) -} diff --git a/vendor/github.com/magiconair/properties/rangecheck.go b/vendor/github.com/magiconair/properties/rangecheck.go deleted file mode 100644 index dbd60b36e..000000000 --- a/vendor/github.com/magiconair/properties/rangecheck.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2013-2022 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -import ( - "fmt" - "math" -) - -// make this a var to overwrite it in a test -var is32Bit = ^uint(0) == math.MaxUint32 - -// intRangeCheck checks if the value fits into the int type and -// panics if it does not. -func intRangeCheck(key string, v int64) int { - if is32Bit && (v < math.MinInt32 || v > math.MaxInt32) { - panic(fmt.Sprintf("Value %d for key %s out of range", v, key)) - } - return int(v) -} - -// uintRangeCheck checks if the value fits into the uint type and -// panics if it does not. -func uintRangeCheck(key string, v uint64) uint { - if is32Bit && v > math.MaxUint32 { - panic(fmt.Sprintf("Value %d for key %s out of range", v, key)) - } - return uint(v) -} diff --git a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md deleted file mode 100644 index c75823490..000000000 --- a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md +++ /dev/null @@ -1,96 +0,0 @@ -## 1.5.0 - -* New option `IgnoreUntaggedFields` to ignore decoding to any fields - without `mapstructure` (or the configured tag name) set [GH-277] -* New option `ErrorUnset` which makes it an error if any fields - in a target struct are not set by the decoding process. [GH-225] -* New function `OrComposeDecodeHookFunc` to help compose decode hooks. [GH-240] -* Decoding to slice from array no longer crashes [GH-265] -* Decode nested struct pointers to map [GH-271] -* Fix issue where `,squash` was ignored if `Squash` option was set. [GH-280] -* Fix issue where fields with `,omitempty` would sometimes decode - into a map with an empty string key [GH-281] - -## 1.4.3 - -* Fix cases where `json.Number` didn't decode properly [GH-261] - -## 1.4.2 - -* Custom name matchers to support any sort of casing, formatting, etc. for - field names. [GH-250] -* Fix possible panic in ComposeDecodeHookFunc [GH-251] - -## 1.4.1 - -* Fix regression where `*time.Time` value would be set to empty and not be sent - to decode hooks properly [GH-232] - -## 1.4.0 - -* A new decode hook type `DecodeHookFuncValue` has been added that has - access to the full values. [GH-183] -* Squash is now supported with embedded fields that are struct pointers [GH-205] -* Empty strings will convert to 0 for all numeric types when weakly decoding [GH-206] - -## 1.3.3 - -* Decoding maps from maps creates a settable value for decode hooks [GH-203] - -## 1.3.2 - -* Decode into interface type with a struct value is supported [GH-187] - -## 1.3.1 - -* Squash should only squash embedded structs. [GH-194] - -## 1.3.0 - -* Added `",omitempty"` support. This will ignore zero values in the source - structure when encoding. [GH-145] - -## 1.2.3 - -* Fix duplicate entries in Keys list with pointer values. [GH-185] - -## 1.2.2 - -* Do not add unsettable (unexported) values to the unused metadata key - or "remain" value. [GH-150] - -## 1.2.1 - -* Go modules checksum mismatch fix - -## 1.2.0 - -* Added support to capture unused values in a field using the `",remain"` value - in the mapstructure tag. There is an example to showcase usage. -* Added `DecoderConfig` option to always squash embedded structs -* `json.Number` can decode into `uint` types -* Empty slices are preserved and not replaced with nil slices -* Fix panic that can occur in when decoding a map into a nil slice of structs -* Improved package documentation for godoc - -## 1.1.2 - -* Fix error when decode hook decodes interface implementation into interface - type. [GH-140] - -## 1.1.1 - -* Fix panic that can happen in `decodePtr` - -## 1.1.0 - -* Added `StringToIPHookFunc` to convert `string` to `net.IP` and `net.IPNet` [GH-133] -* Support struct to struct decoding [GH-137] -* If source map value is nil, then destination map value is nil (instead of empty) -* If source slice value is nil, then destination slice value is nil (instead of empty) -* If source pointer is nil, then destination pointer is set to nil (instead of - allocated zero value of type) - -## 1.0.0 - -* Initial tagged stable release. diff --git a/vendor/github.com/mitchellh/mapstructure/LICENSE b/vendor/github.com/mitchellh/mapstructure/LICENSE deleted file mode 100644 index f9c841a51..000000000 --- a/vendor/github.com/mitchellh/mapstructure/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Mitchell Hashimoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/mitchellh/mapstructure/README.md deleted file mode 100644 index 0018dc7d9..000000000 --- a/vendor/github.com/mitchellh/mapstructure/README.md +++ /dev/null @@ -1,46 +0,0 @@ -# mapstructure [![Godoc](https://godoc.org/github.com/mitchellh/mapstructure?status.svg)](https://godoc.org/github.com/mitchellh/mapstructure) - -mapstructure is a Go library for decoding generic map values to structures -and vice versa, while providing helpful error handling. - -This library is most useful when decoding values from some data stream (JSON, -Gob, etc.) where you don't _quite_ know the structure of the underlying data -until you read a part of it. You can therefore read a `map[string]interface{}` -and use this library to decode it into the proper underlying native Go -structure. - -## Installation - -Standard `go get`: - -``` -$ go get github.com/mitchellh/mapstructure -``` - -## Usage & Example - -For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure). - -The `Decode` function has examples associated with it there. - -## But Why?! - -Go offers fantastic standard libraries for decoding formats such as JSON. -The standard method is to have a struct pre-created, and populate that struct -from the bytes of the encoded format. This is great, but the problem is if -you have configuration or an encoding that changes slightly depending on -specific fields. For example, consider this JSON: - -```json -{ - "type": "person", - "name": "Mitchell" -} -``` - -Perhaps we can't populate a specific structure without first reading -the "type" field from the JSON. We could always do two passes over the -decoding of the JSON (reading the "type" first, and the rest later). -However, it is much simpler to just decode this into a `map[string]interface{}` -structure, read the "type" key, then use something like this library -to decode it into the proper structure. diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go deleted file mode 100644 index 3a754ca72..000000000 --- a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go +++ /dev/null @@ -1,279 +0,0 @@ -package mapstructure - -import ( - "encoding" - "errors" - "fmt" - "net" - "reflect" - "strconv" - "strings" - "time" -) - -// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns -// it into the proper DecodeHookFunc type, such as DecodeHookFuncType. -func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { - // Create variables here so we can reference them with the reflect pkg - var f1 DecodeHookFuncType - var f2 DecodeHookFuncKind - var f3 DecodeHookFuncValue - - // Fill in the variables into this interface and the rest is done - // automatically using the reflect package. - potential := []interface{}{f1, f2, f3} - - v := reflect.ValueOf(h) - vt := v.Type() - for _, raw := range potential { - pt := reflect.ValueOf(raw).Type() - if vt.ConvertibleTo(pt) { - return v.Convert(pt).Interface() - } - } - - return nil -} - -// DecodeHookExec executes the given decode hook. This should be used -// since it'll naturally degrade to the older backwards compatible DecodeHookFunc -// that took reflect.Kind instead of reflect.Type. -func DecodeHookExec( - raw DecodeHookFunc, - from reflect.Value, to reflect.Value) (interface{}, error) { - - switch f := typedDecodeHook(raw).(type) { - case DecodeHookFuncType: - return f(from.Type(), to.Type(), from.Interface()) - case DecodeHookFuncKind: - return f(from.Kind(), to.Kind(), from.Interface()) - case DecodeHookFuncValue: - return f(from, to) - default: - return nil, errors.New("invalid decode hook signature") - } -} - -// ComposeDecodeHookFunc creates a single DecodeHookFunc that -// automatically composes multiple DecodeHookFuncs. -// -// The composed funcs are called in order, with the result of the -// previous transformation. -func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { - return func(f reflect.Value, t reflect.Value) (interface{}, error) { - var err error - data := f.Interface() - - newFrom := f - for _, f1 := range fs { - data, err = DecodeHookExec(f1, newFrom, t) - if err != nil { - return nil, err - } - newFrom = reflect.ValueOf(data) - } - - return data, nil - } -} - -// OrComposeDecodeHookFunc executes all input hook functions until one of them returns no error. In that case its value is returned. -// If all hooks return an error, OrComposeDecodeHookFunc returns an error concatenating all error messages. -func OrComposeDecodeHookFunc(ff ...DecodeHookFunc) DecodeHookFunc { - return func(a, b reflect.Value) (interface{}, error) { - var allErrs string - var out interface{} - var err error - - for _, f := range ff { - out, err = DecodeHookExec(f, a, b) - if err != nil { - allErrs += err.Error() + "\n" - continue - } - - return out, nil - } - - return nil, errors.New(allErrs) - } -} - -// StringToSliceHookFunc returns a DecodeHookFunc that converts -// string to []string by splitting on the given sep. -func StringToSliceHookFunc(sep string) DecodeHookFunc { - return func( - f reflect.Kind, - t reflect.Kind, - data interface{}) (interface{}, error) { - if f != reflect.String || t != reflect.Slice { - return data, nil - } - - raw := data.(string) - if raw == "" { - return []string{}, nil - } - - return strings.Split(raw, sep), nil - } -} - -// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts -// strings to time.Duration. -func StringToTimeDurationHookFunc() DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(time.Duration(5)) { - return data, nil - } - - // Convert it by parsing - return time.ParseDuration(data.(string)) - } -} - -// StringToIPHookFunc returns a DecodeHookFunc that converts -// strings to net.IP -func StringToIPHookFunc() DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(net.IP{}) { - return data, nil - } - - // Convert it by parsing - ip := net.ParseIP(data.(string)) - if ip == nil { - return net.IP{}, fmt.Errorf("failed parsing ip %v", data) - } - - return ip, nil - } -} - -// StringToIPNetHookFunc returns a DecodeHookFunc that converts -// strings to net.IPNet -func StringToIPNetHookFunc() DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(net.IPNet{}) { - return data, nil - } - - // Convert it by parsing - _, net, err := net.ParseCIDR(data.(string)) - return net, err - } -} - -// StringToTimeHookFunc returns a DecodeHookFunc that converts -// strings to time.Time. -func StringToTimeHookFunc(layout string) DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - if t != reflect.TypeOf(time.Time{}) { - return data, nil - } - - // Convert it by parsing - return time.Parse(layout, data.(string)) - } -} - -// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to -// the decoder. -// -// Note that this is significantly different from the WeaklyTypedInput option -// of the DecoderConfig. -func WeaklyTypedHook( - f reflect.Kind, - t reflect.Kind, - data interface{}) (interface{}, error) { - dataVal := reflect.ValueOf(data) - switch t { - case reflect.String: - switch f { - case reflect.Bool: - if dataVal.Bool() { - return "1", nil - } - return "0", nil - case reflect.Float32: - return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil - case reflect.Int: - return strconv.FormatInt(dataVal.Int(), 10), nil - case reflect.Slice: - dataType := dataVal.Type() - elemKind := dataType.Elem().Kind() - if elemKind == reflect.Uint8 { - return string(dataVal.Interface().([]uint8)), nil - } - case reflect.Uint: - return strconv.FormatUint(dataVal.Uint(), 10), nil - } - } - - return data, nil -} - -func RecursiveStructToMapHookFunc() DecodeHookFunc { - return func(f reflect.Value, t reflect.Value) (interface{}, error) { - if f.Kind() != reflect.Struct { - return f.Interface(), nil - } - - var i interface{} = struct{}{} - if t.Type() != reflect.TypeOf(&i).Elem() { - return f.Interface(), nil - } - - m := make(map[string]interface{}) - t.Set(reflect.ValueOf(m)) - - return f.Interface(), nil - } -} - -// TextUnmarshallerHookFunc returns a DecodeHookFunc that applies -// strings to the UnmarshalText function, when the target type -// implements the encoding.TextUnmarshaler interface -func TextUnmarshallerHookFunc() DecodeHookFuncType { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { - if f.Kind() != reflect.String { - return data, nil - } - result := reflect.New(t).Interface() - unmarshaller, ok := result.(encoding.TextUnmarshaler) - if !ok { - return data, nil - } - if err := unmarshaller.UnmarshalText([]byte(data.(string))); err != nil { - return nil, err - } - return result, nil - } -} diff --git a/vendor/github.com/mitchellh/mapstructure/error.go b/vendor/github.com/mitchellh/mapstructure/error.go deleted file mode 100644 index 47a99e5af..000000000 --- a/vendor/github.com/mitchellh/mapstructure/error.go +++ /dev/null @@ -1,50 +0,0 @@ -package mapstructure - -import ( - "errors" - "fmt" - "sort" - "strings" -) - -// Error implements the error interface and can represents multiple -// errors that occur in the course of a single decode. -type Error struct { - Errors []string -} - -func (e *Error) Error() string { - points := make([]string, len(e.Errors)) - for i, err := range e.Errors { - points[i] = fmt.Sprintf("* %s", err) - } - - sort.Strings(points) - return fmt.Sprintf( - "%d error(s) decoding:\n\n%s", - len(e.Errors), strings.Join(points, "\n")) -} - -// WrappedErrors implements the errwrap.Wrapper interface to make this -// return value more useful with the errwrap and go-multierror libraries. -func (e *Error) WrappedErrors() []error { - if e == nil { - return nil - } - - result := make([]error, len(e.Errors)) - for i, e := range e.Errors { - result[i] = errors.New(e) - } - - return result -} - -func appendErrors(errors []string, err error) []string { - switch e := err.(type) { - case *Error: - return append(errors, e.Errors...) - default: - return append(errors, e.Error()) - } -} diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go deleted file mode 100644 index 1efb22ac3..000000000 --- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go +++ /dev/null @@ -1,1540 +0,0 @@ -// Package mapstructure exposes functionality to convert one arbitrary -// Go type into another, typically to convert a map[string]interface{} -// into a native Go structure. -// -// The Go structure can be arbitrarily complex, containing slices, -// other structs, etc. and the decoder will properly decode nested -// maps and so on into the proper structures in the native Go struct. -// See the examples to see what the decoder is capable of. -// -// The simplest function to start with is Decode. -// -// Field Tags -// -// When decoding to a struct, mapstructure will use the field name by -// default to perform the mapping. For example, if a struct has a field -// "Username" then mapstructure will look for a key in the source value -// of "username" (case insensitive). -// -// type User struct { -// Username string -// } -// -// You can change the behavior of mapstructure by using struct tags. -// The default struct tag that mapstructure looks for is "mapstructure" -// but you can customize it using DecoderConfig. -// -// Renaming Fields -// -// To rename the key that mapstructure looks for, use the "mapstructure" -// tag and set a value directly. For example, to change the "username" example -// above to "user": -// -// type User struct { -// Username string `mapstructure:"user"` -// } -// -// Embedded Structs and Squashing -// -// Embedded structs are treated as if they're another field with that name. -// By default, the two structs below are equivalent when decoding with -// mapstructure: -// -// type Person struct { -// Name string -// } -// -// type Friend struct { -// Person -// } -// -// type Friend struct { -// Person Person -// } -// -// This would require an input that looks like below: -// -// map[string]interface{}{ -// "person": map[string]interface{}{"name": "alice"}, -// } -// -// If your "person" value is NOT nested, then you can append ",squash" to -// your tag value and mapstructure will treat it as if the embedded struct -// were part of the struct directly. Example: -// -// type Friend struct { -// Person `mapstructure:",squash"` -// } -// -// Now the following input would be accepted: -// -// map[string]interface{}{ -// "name": "alice", -// } -// -// When decoding from a struct to a map, the squash tag squashes the struct -// fields into a single map. Using the example structs from above: -// -// Friend{Person: Person{Name: "alice"}} -// -// Will be decoded into a map: -// -// map[string]interface{}{ -// "name": "alice", -// } -// -// DecoderConfig has a field that changes the behavior of mapstructure -// to always squash embedded structs. -// -// Remainder Values -// -// If there are any unmapped keys in the source value, mapstructure by -// default will silently ignore them. You can error by setting ErrorUnused -// in DecoderConfig. If you're using Metadata you can also maintain a slice -// of the unused keys. -// -// You can also use the ",remain" suffix on your tag to collect all unused -// values in a map. The field with this tag MUST be a map type and should -// probably be a "map[string]interface{}" or "map[interface{}]interface{}". -// See example below: -// -// type Friend struct { -// Name string -// Other map[string]interface{} `mapstructure:",remain"` -// } -// -// Given the input below, Other would be populated with the other -// values that weren't used (everything but "name"): -// -// map[string]interface{}{ -// "name": "bob", -// "address": "123 Maple St.", -// } -// -// Omit Empty Values -// -// When decoding from a struct to any other value, you may use the -// ",omitempty" suffix on your tag to omit that value if it equates to -// the zero value. The zero value of all types is specified in the Go -// specification. -// -// For example, the zero type of a numeric type is zero ("0"). If the struct -// field value is zero and a numeric type, the field is empty, and it won't -// be encoded into the destination type. -// -// type Source struct { -// Age int `mapstructure:",omitempty"` -// } -// -// Unexported fields -// -// Since unexported (private) struct fields cannot be set outside the package -// where they are defined, the decoder will simply skip them. -// -// For this output type definition: -// -// type Exported struct { -// private string // this unexported field will be skipped -// Public string -// } -// -// Using this map as input: -// -// map[string]interface{}{ -// "private": "I will be ignored", -// "Public": "I made it through!", -// } -// -// The following struct will be decoded: -// -// type Exported struct { -// private: "" // field is left with an empty string (zero value) -// Public: "I made it through!" -// } -// -// Other Configuration -// -// mapstructure is highly configurable. See the DecoderConfig struct -// for other features and options that are supported. -package mapstructure - -import ( - "encoding/json" - "errors" - "fmt" - "reflect" - "sort" - "strconv" - "strings" -) - -// DecodeHookFunc is the callback function that can be used for -// data transformations. See "DecodeHook" in the DecoderConfig -// struct. -// -// The type must be one of DecodeHookFuncType, DecodeHookFuncKind, or -// DecodeHookFuncValue. -// Values are a superset of Types (Values can return types), and Types are a -// superset of Kinds (Types can return Kinds) and are generally a richer thing -// to use, but Kinds are simpler if you only need those. -// -// The reason DecodeHookFunc is multi-typed is for backwards compatibility: -// we started with Kinds and then realized Types were the better solution, -// but have a promise to not break backwards compat so we now support -// both. -type DecodeHookFunc interface{} - -// DecodeHookFuncType is a DecodeHookFunc which has complete information about -// the source and target types. -type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error) - -// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the -// source and target types. -type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) - -// DecodeHookFuncValue is a DecodeHookFunc which has complete access to both the source and target -// values. -type DecodeHookFuncValue func(from reflect.Value, to reflect.Value) (interface{}, error) - -// DecoderConfig is the configuration that is used to create a new decoder -// and allows customization of various aspects of decoding. -type DecoderConfig struct { - // DecodeHook, if set, will be called before any decoding and any - // type conversion (if WeaklyTypedInput is on). This lets you modify - // the values before they're set down onto the resulting struct. The - // DecodeHook is called for every map and value in the input. This means - // that if a struct has embedded fields with squash tags the decode hook - // is called only once with all of the input data, not once for each - // embedded struct. - // - // If an error is returned, the entire decode will fail with that error. - DecodeHook DecodeHookFunc - - // If ErrorUnused is true, then it is an error for there to exist - // keys in the original map that were unused in the decoding process - // (extra keys). - ErrorUnused bool - - // If ErrorUnset is true, then it is an error for there to exist - // fields in the result that were not set in the decoding process - // (extra fields). This only applies to decoding to a struct. This - // will affect all nested structs as well. - ErrorUnset bool - - // ZeroFields, if set to true, will zero fields before writing them. - // For example, a map will be emptied before decoded values are put in - // it. If this is false, a map will be merged. - ZeroFields bool - - // If WeaklyTypedInput is true, the decoder will make the following - // "weak" conversions: - // - // - bools to string (true = "1", false = "0") - // - numbers to string (base 10) - // - bools to int/uint (true = 1, false = 0) - // - strings to int/uint (base implied by prefix) - // - int to bool (true if value != 0) - // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F, - // FALSE, false, False. Anything else is an error) - // - empty array = empty map and vice versa - // - negative numbers to overflowed uint values (base 10) - // - slice of maps to a merged map - // - single values are converted to slices if required. Each - // element is weakly decoded. For example: "4" can become []int{4} - // if the target type is an int slice. - // - WeaklyTypedInput bool - - // Squash will squash embedded structs. A squash tag may also be - // added to an individual struct field using a tag. For example: - // - // type Parent struct { - // Child `mapstructure:",squash"` - // } - Squash bool - - // Metadata is the struct that will contain extra metadata about - // the decoding. If this is nil, then no metadata will be tracked. - Metadata *Metadata - - // Result is a pointer to the struct that will contain the decoded - // value. - Result interface{} - - // The tag name that mapstructure reads for field names. This - // defaults to "mapstructure" - TagName string - - // IgnoreUntaggedFields ignores all struct fields without explicit - // TagName, comparable to `mapstructure:"-"` as default behaviour. - IgnoreUntaggedFields bool - - // MatchName is the function used to match the map key to the struct - // field name or tag. Defaults to `strings.EqualFold`. This can be used - // to implement case-sensitive tag values, support snake casing, etc. - MatchName func(mapKey, fieldName string) bool -} - -// A Decoder takes a raw interface value and turns it into structured -// data, keeping track of rich error information along the way in case -// anything goes wrong. Unlike the basic top-level Decode method, you can -// more finely control how the Decoder behaves using the DecoderConfig -// structure. The top-level Decode method is just a convenience that sets -// up the most basic Decoder. -type Decoder struct { - config *DecoderConfig -} - -// Metadata contains information about decoding a structure that -// is tedious or difficult to get otherwise. -type Metadata struct { - // Keys are the keys of the structure which were successfully decoded - Keys []string - - // Unused is a slice of keys that were found in the raw value but - // weren't decoded since there was no matching field in the result interface - Unused []string - - // Unset is a slice of field names that were found in the result interface - // but weren't set in the decoding process since there was no matching value - // in the input - Unset []string -} - -// Decode takes an input structure and uses reflection to translate it to -// the output structure. output must be a pointer to a map or struct. -func Decode(input interface{}, output interface{}) error { - config := &DecoderConfig{ - Metadata: nil, - Result: output, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(input) -} - -// WeakDecode is the same as Decode but is shorthand to enable -// WeaklyTypedInput. See DecoderConfig for more info. -func WeakDecode(input, output interface{}) error { - config := &DecoderConfig{ - Metadata: nil, - Result: output, - WeaklyTypedInput: true, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(input) -} - -// DecodeMetadata is the same as Decode, but is shorthand to -// enable metadata collection. See DecoderConfig for more info. -func DecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { - config := &DecoderConfig{ - Metadata: metadata, - Result: output, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(input) -} - -// WeakDecodeMetadata is the same as Decode, but is shorthand to -// enable both WeaklyTypedInput and metadata collection. See -// DecoderConfig for more info. -func WeakDecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { - config := &DecoderConfig{ - Metadata: metadata, - Result: output, - WeaklyTypedInput: true, - } - - decoder, err := NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(input) -} - -// NewDecoder returns a new decoder for the given configuration. Once -// a decoder has been returned, the same configuration must not be used -// again. -func NewDecoder(config *DecoderConfig) (*Decoder, error) { - val := reflect.ValueOf(config.Result) - if val.Kind() != reflect.Ptr { - return nil, errors.New("result must be a pointer") - } - - val = val.Elem() - if !val.CanAddr() { - return nil, errors.New("result must be addressable (a pointer)") - } - - if config.Metadata != nil { - if config.Metadata.Keys == nil { - config.Metadata.Keys = make([]string, 0) - } - - if config.Metadata.Unused == nil { - config.Metadata.Unused = make([]string, 0) - } - - if config.Metadata.Unset == nil { - config.Metadata.Unset = make([]string, 0) - } - } - - if config.TagName == "" { - config.TagName = "mapstructure" - } - - if config.MatchName == nil { - config.MatchName = strings.EqualFold - } - - result := &Decoder{ - config: config, - } - - return result, nil -} - -// Decode decodes the given raw interface to the target pointer specified -// by the configuration. -func (d *Decoder) Decode(input interface{}) error { - return d.decode("", input, reflect.ValueOf(d.config.Result).Elem()) -} - -// Decodes an unknown data type into a specific reflection value. -func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error { - var inputVal reflect.Value - if input != nil { - inputVal = reflect.ValueOf(input) - - // We need to check here if input is a typed nil. Typed nils won't - // match the "input == nil" below so we check that here. - if inputVal.Kind() == reflect.Ptr && inputVal.IsNil() { - input = nil - } - } - - if input == nil { - // If the data is nil, then we don't set anything, unless ZeroFields is set - // to true. - if d.config.ZeroFields { - outVal.Set(reflect.Zero(outVal.Type())) - - if d.config.Metadata != nil && name != "" { - d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) - } - } - return nil - } - - if !inputVal.IsValid() { - // If the input value is invalid, then we just set the value - // to be the zero value. - outVal.Set(reflect.Zero(outVal.Type())) - if d.config.Metadata != nil && name != "" { - d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) - } - return nil - } - - if d.config.DecodeHook != nil { - // We have a DecodeHook, so let's pre-process the input. - var err error - input, err = DecodeHookExec(d.config.DecodeHook, inputVal, outVal) - if err != nil { - return fmt.Errorf("error decoding '%s': %s", name, err) - } - } - - var err error - outputKind := getKind(outVal) - addMetaKey := true - switch outputKind { - case reflect.Bool: - err = d.decodeBool(name, input, outVal) - case reflect.Interface: - err = d.decodeBasic(name, input, outVal) - case reflect.String: - err = d.decodeString(name, input, outVal) - case reflect.Int: - err = d.decodeInt(name, input, outVal) - case reflect.Uint: - err = d.decodeUint(name, input, outVal) - case reflect.Float32: - err = d.decodeFloat(name, input, outVal) - case reflect.Struct: - err = d.decodeStruct(name, input, outVal) - case reflect.Map: - err = d.decodeMap(name, input, outVal) - case reflect.Ptr: - addMetaKey, err = d.decodePtr(name, input, outVal) - case reflect.Slice: - err = d.decodeSlice(name, input, outVal) - case reflect.Array: - err = d.decodeArray(name, input, outVal) - case reflect.Func: - err = d.decodeFunc(name, input, outVal) - default: - // If we reached this point then we weren't able to decode it - return fmt.Errorf("%s: unsupported type: %s", name, outputKind) - } - - // If we reached here, then we successfully decoded SOMETHING, so - // mark the key as used if we're tracking metainput. - if addMetaKey && d.config.Metadata != nil && name != "" { - d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) - } - - return err -} - -// This decodes a basic type (bool, int, string, etc.) and sets the -// value to "data" of that type. -func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { - if val.IsValid() && val.Elem().IsValid() { - elem := val.Elem() - - // If we can't address this element, then its not writable. Instead, - // we make a copy of the value (which is a pointer and therefore - // writable), decode into that, and replace the whole value. - copied := false - if !elem.CanAddr() { - copied = true - - // Make *T - copy := reflect.New(elem.Type()) - - // *T = elem - copy.Elem().Set(elem) - - // Set elem so we decode into it - elem = copy - } - - // Decode. If we have an error then return. We also return right - // away if we're not a copy because that means we decoded directly. - if err := d.decode(name, data, elem); err != nil || !copied { - return err - } - - // If we're a copy, we need to set te final result - val.Set(elem.Elem()) - return nil - } - - dataVal := reflect.ValueOf(data) - - // If the input data is a pointer, and the assigned type is the dereference - // of that exact pointer, then indirect it so that we can assign it. - // Example: *string to string - if dataVal.Kind() == reflect.Ptr && dataVal.Type().Elem() == val.Type() { - dataVal = reflect.Indirect(dataVal) - } - - if !dataVal.IsValid() { - dataVal = reflect.Zero(val.Type()) - } - - dataValType := dataVal.Type() - if !dataValType.AssignableTo(val.Type()) { - return fmt.Errorf( - "'%s' expected type '%s', got '%s'", - name, val.Type(), dataValType) - } - - val.Set(dataVal) - return nil -} - -func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataKind := getKind(dataVal) - - converted := true - switch { - case dataKind == reflect.String: - val.SetString(dataVal.String()) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetString("1") - } else { - val.SetString("0") - } - case dataKind == reflect.Int && d.config.WeaklyTypedInput: - val.SetString(strconv.FormatInt(dataVal.Int(), 10)) - case dataKind == reflect.Uint && d.config.WeaklyTypedInput: - val.SetString(strconv.FormatUint(dataVal.Uint(), 10)) - case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: - val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64)) - case dataKind == reflect.Slice && d.config.WeaklyTypedInput, - dataKind == reflect.Array && d.config.WeaklyTypedInput: - dataType := dataVal.Type() - elemKind := dataType.Elem().Kind() - switch elemKind { - case reflect.Uint8: - var uints []uint8 - if dataKind == reflect.Array { - uints = make([]uint8, dataVal.Len(), dataVal.Len()) - for i := range uints { - uints[i] = dataVal.Index(i).Interface().(uint8) - } - } else { - uints = dataVal.Interface().([]uint8) - } - val.SetString(string(uints)) - default: - converted = false - } - default: - converted = false - } - - if !converted { - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - - return nil -} - -func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataKind := getKind(dataVal) - dataType := dataVal.Type() - - switch { - case dataKind == reflect.Int: - val.SetInt(dataVal.Int()) - case dataKind == reflect.Uint: - val.SetInt(int64(dataVal.Uint())) - case dataKind == reflect.Float32: - val.SetInt(int64(dataVal.Float())) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetInt(1) - } else { - val.SetInt(0) - } - case dataKind == reflect.String && d.config.WeaklyTypedInput: - str := dataVal.String() - if str == "" { - str = "0" - } - - i, err := strconv.ParseInt(str, 0, val.Type().Bits()) - if err == nil { - val.SetInt(i) - } else { - return fmt.Errorf("cannot parse '%s' as int: %s", name, err) - } - case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": - jn := data.(json.Number) - i, err := jn.Int64() - if err != nil { - return fmt.Errorf( - "error decoding json.Number into %s: %s", name, err) - } - val.SetInt(i) - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - - return nil -} - -func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataKind := getKind(dataVal) - dataType := dataVal.Type() - - switch { - case dataKind == reflect.Int: - i := dataVal.Int() - if i < 0 && !d.config.WeaklyTypedInput { - return fmt.Errorf("cannot parse '%s', %d overflows uint", - name, i) - } - val.SetUint(uint64(i)) - case dataKind == reflect.Uint: - val.SetUint(dataVal.Uint()) - case dataKind == reflect.Float32: - f := dataVal.Float() - if f < 0 && !d.config.WeaklyTypedInput { - return fmt.Errorf("cannot parse '%s', %f overflows uint", - name, f) - } - val.SetUint(uint64(f)) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetUint(1) - } else { - val.SetUint(0) - } - case dataKind == reflect.String && d.config.WeaklyTypedInput: - str := dataVal.String() - if str == "" { - str = "0" - } - - i, err := strconv.ParseUint(str, 0, val.Type().Bits()) - if err == nil { - val.SetUint(i) - } else { - return fmt.Errorf("cannot parse '%s' as uint: %s", name, err) - } - case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": - jn := data.(json.Number) - i, err := strconv.ParseUint(string(jn), 0, 64) - if err != nil { - return fmt.Errorf( - "error decoding json.Number into %s: %s", name, err) - } - val.SetUint(i) - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - - return nil -} - -func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataKind := getKind(dataVal) - - switch { - case dataKind == reflect.Bool: - val.SetBool(dataVal.Bool()) - case dataKind == reflect.Int && d.config.WeaklyTypedInput: - val.SetBool(dataVal.Int() != 0) - case dataKind == reflect.Uint && d.config.WeaklyTypedInput: - val.SetBool(dataVal.Uint() != 0) - case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: - val.SetBool(dataVal.Float() != 0) - case dataKind == reflect.String && d.config.WeaklyTypedInput: - b, err := strconv.ParseBool(dataVal.String()) - if err == nil { - val.SetBool(b) - } else if dataVal.String() == "" { - val.SetBool(false) - } else { - return fmt.Errorf("cannot parse '%s' as bool: %s", name, err) - } - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - - return nil -} - -func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataKind := getKind(dataVal) - dataType := dataVal.Type() - - switch { - case dataKind == reflect.Int: - val.SetFloat(float64(dataVal.Int())) - case dataKind == reflect.Uint: - val.SetFloat(float64(dataVal.Uint())) - case dataKind == reflect.Float32: - val.SetFloat(dataVal.Float()) - case dataKind == reflect.Bool && d.config.WeaklyTypedInput: - if dataVal.Bool() { - val.SetFloat(1) - } else { - val.SetFloat(0) - } - case dataKind == reflect.String && d.config.WeaklyTypedInput: - str := dataVal.String() - if str == "" { - str = "0" - } - - f, err := strconv.ParseFloat(str, val.Type().Bits()) - if err == nil { - val.SetFloat(f) - } else { - return fmt.Errorf("cannot parse '%s' as float: %s", name, err) - } - case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": - jn := data.(json.Number) - i, err := jn.Float64() - if err != nil { - return fmt.Errorf( - "error decoding json.Number into %s: %s", name, err) - } - val.SetFloat(i) - default: - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - - return nil -} - -func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error { - valType := val.Type() - valKeyType := valType.Key() - valElemType := valType.Elem() - - // By default we overwrite keys in the current map - valMap := val - - // If the map is nil or we're purposely zeroing fields, make a new map - if valMap.IsNil() || d.config.ZeroFields { - // Make a new map to hold our result - mapType := reflect.MapOf(valKeyType, valElemType) - valMap = reflect.MakeMap(mapType) - } - - // Check input type and based on the input type jump to the proper func - dataVal := reflect.Indirect(reflect.ValueOf(data)) - switch dataVal.Kind() { - case reflect.Map: - return d.decodeMapFromMap(name, dataVal, val, valMap) - - case reflect.Struct: - return d.decodeMapFromStruct(name, dataVal, val, valMap) - - case reflect.Array, reflect.Slice: - if d.config.WeaklyTypedInput { - return d.decodeMapFromSlice(name, dataVal, val, valMap) - } - - fallthrough - - default: - return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) - } -} - -func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { - // Special case for BC reasons (covered by tests) - if dataVal.Len() == 0 { - val.Set(valMap) - return nil - } - - for i := 0; i < dataVal.Len(); i++ { - err := d.decode( - name+"["+strconv.Itoa(i)+"]", - dataVal.Index(i).Interface(), val) - if err != nil { - return err - } - } - - return nil -} - -func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { - valType := val.Type() - valKeyType := valType.Key() - valElemType := valType.Elem() - - // Accumulate errors - errors := make([]string, 0) - - // If the input data is empty, then we just match what the input data is. - if dataVal.Len() == 0 { - if dataVal.IsNil() { - if !val.IsNil() { - val.Set(dataVal) - } - } else { - // Set to empty allocated value - val.Set(valMap) - } - - return nil - } - - for _, k := range dataVal.MapKeys() { - fieldName := name + "[" + k.String() + "]" - - // First decode the key into the proper type - currentKey := reflect.Indirect(reflect.New(valKeyType)) - if err := d.decode(fieldName, k.Interface(), currentKey); err != nil { - errors = appendErrors(errors, err) - continue - } - - // Next decode the data into the proper type - v := dataVal.MapIndex(k).Interface() - currentVal := reflect.Indirect(reflect.New(valElemType)) - if err := d.decode(fieldName, v, currentVal); err != nil { - errors = appendErrors(errors, err) - continue - } - - valMap.SetMapIndex(currentKey, currentVal) - } - - // Set the built up map to the value - val.Set(valMap) - - // If we had errors, return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil -} - -func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { - typ := dataVal.Type() - for i := 0; i < typ.NumField(); i++ { - // Get the StructField first since this is a cheap operation. If the - // field is unexported, then ignore it. - f := typ.Field(i) - if f.PkgPath != "" { - continue - } - - // Next get the actual value of this field and verify it is assignable - // to the map value. - v := dataVal.Field(i) - if !v.Type().AssignableTo(valMap.Type().Elem()) { - return fmt.Errorf("cannot assign type '%s' to map value field of type '%s'", v.Type(), valMap.Type().Elem()) - } - - tagValue := f.Tag.Get(d.config.TagName) - keyName := f.Name - - if tagValue == "" && d.config.IgnoreUntaggedFields { - continue - } - - // If Squash is set in the config, we squash the field down. - squash := d.config.Squash && v.Kind() == reflect.Struct && f.Anonymous - - v = dereferencePtrToStructIfNeeded(v, d.config.TagName) - - // Determine the name of the key in the map - if index := strings.Index(tagValue, ","); index != -1 { - if tagValue[:index] == "-" { - continue - } - // If "omitempty" is specified in the tag, it ignores empty values. - if strings.Index(tagValue[index+1:], "omitempty") != -1 && isEmptyValue(v) { - continue - } - - // If "squash" is specified in the tag, we squash the field down. - squash = squash || strings.Index(tagValue[index+1:], "squash") != -1 - if squash { - // When squashing, the embedded type can be a pointer to a struct. - if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct { - v = v.Elem() - } - - // The final type must be a struct - if v.Kind() != reflect.Struct { - return fmt.Errorf("cannot squash non-struct type '%s'", v.Type()) - } - } - if keyNameTagValue := tagValue[:index]; keyNameTagValue != "" { - keyName = keyNameTagValue - } - } else if len(tagValue) > 0 { - if tagValue == "-" { - continue - } - keyName = tagValue - } - - switch v.Kind() { - // this is an embedded struct, so handle it differently - case reflect.Struct: - x := reflect.New(v.Type()) - x.Elem().Set(v) - - vType := valMap.Type() - vKeyType := vType.Key() - vElemType := vType.Elem() - mType := reflect.MapOf(vKeyType, vElemType) - vMap := reflect.MakeMap(mType) - - // Creating a pointer to a map so that other methods can completely - // overwrite the map if need be (looking at you decodeMapFromMap). The - // indirection allows the underlying map to be settable (CanSet() == true) - // where as reflect.MakeMap returns an unsettable map. - addrVal := reflect.New(vMap.Type()) - reflect.Indirect(addrVal).Set(vMap) - - err := d.decode(keyName, x.Interface(), reflect.Indirect(addrVal)) - if err != nil { - return err - } - - // the underlying map may have been completely overwritten so pull - // it indirectly out of the enclosing value. - vMap = reflect.Indirect(addrVal) - - if squash { - for _, k := range vMap.MapKeys() { - valMap.SetMapIndex(k, vMap.MapIndex(k)) - } - } else { - valMap.SetMapIndex(reflect.ValueOf(keyName), vMap) - } - - default: - valMap.SetMapIndex(reflect.ValueOf(keyName), v) - } - } - - if val.CanAddr() { - val.Set(valMap) - } - - return nil -} - -func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) (bool, error) { - // If the input data is nil, then we want to just set the output - // pointer to be nil as well. - isNil := data == nil - if !isNil { - switch v := reflect.Indirect(reflect.ValueOf(data)); v.Kind() { - case reflect.Chan, - reflect.Func, - reflect.Interface, - reflect.Map, - reflect.Ptr, - reflect.Slice: - isNil = v.IsNil() - } - } - if isNil { - if !val.IsNil() && val.CanSet() { - nilValue := reflect.New(val.Type()).Elem() - val.Set(nilValue) - } - - return true, nil - } - - // Create an element of the concrete (non pointer) type and decode - // into that. Then set the value of the pointer to this type. - valType := val.Type() - valElemType := valType.Elem() - if val.CanSet() { - realVal := val - if realVal.IsNil() || d.config.ZeroFields { - realVal = reflect.New(valElemType) - } - - if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { - return false, err - } - - val.Set(realVal) - } else { - if err := d.decode(name, data, reflect.Indirect(val)); err != nil { - return false, err - } - } - return false, nil -} - -func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error { - // Create an element of the concrete (non pointer) type and decode - // into that. Then set the value of the pointer to this type. - dataVal := reflect.Indirect(reflect.ValueOf(data)) - if val.Type() != dataVal.Type() { - return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", - name, val.Type(), dataVal.Type(), data) - } - val.Set(dataVal) - return nil -} - -func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataValKind := dataVal.Kind() - valType := val.Type() - valElemType := valType.Elem() - sliceType := reflect.SliceOf(valElemType) - - // If we have a non array/slice type then we first attempt to convert. - if dataValKind != reflect.Array && dataValKind != reflect.Slice { - if d.config.WeaklyTypedInput { - switch { - // Slice and array we use the normal logic - case dataValKind == reflect.Slice, dataValKind == reflect.Array: - break - - // Empty maps turn into empty slices - case dataValKind == reflect.Map: - if dataVal.Len() == 0 { - val.Set(reflect.MakeSlice(sliceType, 0, 0)) - return nil - } - // Create slice of maps of other sizes - return d.decodeSlice(name, []interface{}{data}, val) - - case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8: - return d.decodeSlice(name, []byte(dataVal.String()), val) - - // All other types we try to convert to the slice type - // and "lift" it into it. i.e. a string becomes a string slice. - default: - // Just re-try this function with data as a slice. - return d.decodeSlice(name, []interface{}{data}, val) - } - } - - return fmt.Errorf( - "'%s': source data must be an array or slice, got %s", name, dataValKind) - } - - // If the input value is nil, then don't allocate since empty != nil - if dataValKind != reflect.Array && dataVal.IsNil() { - return nil - } - - valSlice := val - if valSlice.IsNil() || d.config.ZeroFields { - // Make a new slice to hold our result, same size as the original data. - valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) - } - - // Accumulate any errors - errors := make([]string, 0) - - for i := 0; i < dataVal.Len(); i++ { - currentData := dataVal.Index(i).Interface() - for valSlice.Len() <= i { - valSlice = reflect.Append(valSlice, reflect.Zero(valElemType)) - } - currentField := valSlice.Index(i) - - fieldName := name + "[" + strconv.Itoa(i) + "]" - if err := d.decode(fieldName, currentData, currentField); err != nil { - errors = appendErrors(errors, err) - } - } - - // Finally, set the value to the slice we built up - val.Set(valSlice) - - // If there were errors, we return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil -} - -func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - dataValKind := dataVal.Kind() - valType := val.Type() - valElemType := valType.Elem() - arrayType := reflect.ArrayOf(valType.Len(), valElemType) - - valArray := val - - if valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields { - // Check input type - if dataValKind != reflect.Array && dataValKind != reflect.Slice { - if d.config.WeaklyTypedInput { - switch { - // Empty maps turn into empty arrays - case dataValKind == reflect.Map: - if dataVal.Len() == 0 { - val.Set(reflect.Zero(arrayType)) - return nil - } - - // All other types we try to convert to the array type - // and "lift" it into it. i.e. a string becomes a string array. - default: - // Just re-try this function with data as a slice. - return d.decodeArray(name, []interface{}{data}, val) - } - } - - return fmt.Errorf( - "'%s': source data must be an array or slice, got %s", name, dataValKind) - - } - if dataVal.Len() > arrayType.Len() { - return fmt.Errorf( - "'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len()) - - } - - // Make a new array to hold our result, same size as the original data. - valArray = reflect.New(arrayType).Elem() - } - - // Accumulate any errors - errors := make([]string, 0) - - for i := 0; i < dataVal.Len(); i++ { - currentData := dataVal.Index(i).Interface() - currentField := valArray.Index(i) - - fieldName := name + "[" + strconv.Itoa(i) + "]" - if err := d.decode(fieldName, currentData, currentField); err != nil { - errors = appendErrors(errors, err) - } - } - - // Finally, set the value to the array we built up - val.Set(valArray) - - // If there were errors, we return those - if len(errors) > 0 { - return &Error{errors} - } - - return nil -} - -func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error { - dataVal := reflect.Indirect(reflect.ValueOf(data)) - - // If the type of the value to write to and the data match directly, - // then we just set it directly instead of recursing into the structure. - if dataVal.Type() == val.Type() { - val.Set(dataVal) - return nil - } - - dataValKind := dataVal.Kind() - switch dataValKind { - case reflect.Map: - return d.decodeStructFromMap(name, dataVal, val) - - case reflect.Struct: - // Not the most efficient way to do this but we can optimize later if - // we want to. To convert from struct to struct we go to map first - // as an intermediary. - - // Make a new map to hold our result - mapType := reflect.TypeOf((map[string]interface{})(nil)) - mval := reflect.MakeMap(mapType) - - // Creating a pointer to a map so that other methods can completely - // overwrite the map if need be (looking at you decodeMapFromMap). The - // indirection allows the underlying map to be settable (CanSet() == true) - // where as reflect.MakeMap returns an unsettable map. - addrVal := reflect.New(mval.Type()) - - reflect.Indirect(addrVal).Set(mval) - if err := d.decodeMapFromStruct(name, dataVal, reflect.Indirect(addrVal), mval); err != nil { - return err - } - - result := d.decodeStructFromMap(name, reflect.Indirect(addrVal), val) - return result - - default: - return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) - } -} - -func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) error { - dataValType := dataVal.Type() - if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface { - return fmt.Errorf( - "'%s' needs a map with string keys, has '%s' keys", - name, dataValType.Key().Kind()) - } - - dataValKeys := make(map[reflect.Value]struct{}) - dataValKeysUnused := make(map[interface{}]struct{}) - for _, dataValKey := range dataVal.MapKeys() { - dataValKeys[dataValKey] = struct{}{} - dataValKeysUnused[dataValKey.Interface()] = struct{}{} - } - - targetValKeysUnused := make(map[interface{}]struct{}) - errors := make([]string, 0) - - // This slice will keep track of all the structs we'll be decoding. - // There can be more than one struct if there are embedded structs - // that are squashed. - structs := make([]reflect.Value, 1, 5) - structs[0] = val - - // Compile the list of all the fields that we're going to be decoding - // from all the structs. - type field struct { - field reflect.StructField - val reflect.Value - } - - // remainField is set to a valid field set with the "remain" tag if - // we are keeping track of remaining values. - var remainField *field - - fields := []field{} - for len(structs) > 0 { - structVal := structs[0] - structs = structs[1:] - - structType := structVal.Type() - - for i := 0; i < structType.NumField(); i++ { - fieldType := structType.Field(i) - fieldVal := structVal.Field(i) - if fieldVal.Kind() == reflect.Ptr && fieldVal.Elem().Kind() == reflect.Struct { - // Handle embedded struct pointers as embedded structs. - fieldVal = fieldVal.Elem() - } - - // If "squash" is specified in the tag, we squash the field down. - squash := d.config.Squash && fieldVal.Kind() == reflect.Struct && fieldType.Anonymous - remain := false - - // We always parse the tags cause we're looking for other tags too - tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") - for _, tag := range tagParts[1:] { - if tag == "squash" { - squash = true - break - } - - if tag == "remain" { - remain = true - break - } - } - - if squash { - if fieldVal.Kind() != reflect.Struct { - errors = appendErrors(errors, - fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldVal.Kind())) - } else { - structs = append(structs, fieldVal) - } - continue - } - - // Build our field - if remain { - remainField = &field{fieldType, fieldVal} - } else { - // Normal struct field, store it away - fields = append(fields, field{fieldType, fieldVal}) - } - } - } - - // for fieldType, field := range fields { - for _, f := range fields { - field, fieldValue := f.field, f.val - fieldName := field.Name - - tagValue := field.Tag.Get(d.config.TagName) - tagValue = strings.SplitN(tagValue, ",", 2)[0] - if tagValue != "" { - fieldName = tagValue - } - - rawMapKey := reflect.ValueOf(fieldName) - rawMapVal := dataVal.MapIndex(rawMapKey) - if !rawMapVal.IsValid() { - // Do a slower search by iterating over each key and - // doing case-insensitive search. - for dataValKey := range dataValKeys { - mK, ok := dataValKey.Interface().(string) - if !ok { - // Not a string key - continue - } - - if d.config.MatchName(mK, fieldName) { - rawMapKey = dataValKey - rawMapVal = dataVal.MapIndex(dataValKey) - break - } - } - - if !rawMapVal.IsValid() { - // There was no matching key in the map for the value in - // the struct. Remember it for potential errors and metadata. - targetValKeysUnused[fieldName] = struct{}{} - continue - } - } - - if !fieldValue.IsValid() { - // This should never happen - panic("field is not valid") - } - - // If we can't set the field, then it is unexported or something, - // and we just continue onwards. - if !fieldValue.CanSet() { - continue - } - - // Delete the key we're using from the unused map so we stop tracking - delete(dataValKeysUnused, rawMapKey.Interface()) - - // If the name is empty string, then we're at the root, and we - // don't dot-join the fields. - if name != "" { - fieldName = name + "." + fieldName - } - - if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil { - errors = appendErrors(errors, err) - } - } - - // If we have a "remain"-tagged field and we have unused keys then - // we put the unused keys directly into the remain field. - if remainField != nil && len(dataValKeysUnused) > 0 { - // Build a map of only the unused values - remain := map[interface{}]interface{}{} - for key := range dataValKeysUnused { - remain[key] = dataVal.MapIndex(reflect.ValueOf(key)).Interface() - } - - // Decode it as-if we were just decoding this map onto our map. - if err := d.decodeMap(name, remain, remainField.val); err != nil { - errors = appendErrors(errors, err) - } - - // Set the map to nil so we have none so that the next check will - // not error (ErrorUnused) - dataValKeysUnused = nil - } - - if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { - keys := make([]string, 0, len(dataValKeysUnused)) - for rawKey := range dataValKeysUnused { - keys = append(keys, rawKey.(string)) - } - sort.Strings(keys) - - err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", ")) - errors = appendErrors(errors, err) - } - - if d.config.ErrorUnset && len(targetValKeysUnused) > 0 { - keys := make([]string, 0, len(targetValKeysUnused)) - for rawKey := range targetValKeysUnused { - keys = append(keys, rawKey.(string)) - } - sort.Strings(keys) - - err := fmt.Errorf("'%s' has unset fields: %s", name, strings.Join(keys, ", ")) - errors = appendErrors(errors, err) - } - - if len(errors) > 0 { - return &Error{errors} - } - - // Add the unused keys to the list of unused keys if we're tracking metadata - if d.config.Metadata != nil { - for rawKey := range dataValKeysUnused { - key := rawKey.(string) - if name != "" { - key = name + "." + key - } - - d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) - } - for rawKey := range targetValKeysUnused { - key := rawKey.(string) - if name != "" { - key = name + "." + key - } - - d.config.Metadata.Unset = append(d.config.Metadata.Unset, key) - } - } - - return nil -} - -func isEmptyValue(v reflect.Value) bool { - switch getKind(v) { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - } - return false -} - -func getKind(val reflect.Value) reflect.Kind { - kind := val.Kind() - - switch { - case kind >= reflect.Int && kind <= reflect.Int64: - return reflect.Int - case kind >= reflect.Uint && kind <= reflect.Uint64: - return reflect.Uint - case kind >= reflect.Float32 && kind <= reflect.Float64: - return reflect.Float32 - default: - return kind - } -} - -func isStructTypeConvertibleToMap(typ reflect.Type, checkMapstructureTags bool, tagName string) bool { - for i := 0; i < typ.NumField(); i++ { - f := typ.Field(i) - if f.PkgPath == "" && !checkMapstructureTags { // check for unexported fields - return true - } - if checkMapstructureTags && f.Tag.Get(tagName) != "" { // check for mapstructure tags inside - return true - } - } - return false -} - -func dereferencePtrToStructIfNeeded(v reflect.Value, tagName string) reflect.Value { - if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct { - return v - } - deref := v.Elem() - derefT := deref.Type() - if isStructTypeConvertibleToMap(derefT, true, tagName) { - return deref - } - return v -} diff --git a/vendor/github.com/pjbgf/sha1cd/Dockerfile.arm b/vendor/github.com/pjbgf/sha1cd/Dockerfile.arm index 99761296f..4230fba01 100644 --- a/vendor/github.com/pjbgf/sha1cd/Dockerfile.arm +++ b/vendor/github.com/pjbgf/sha1cd/Dockerfile.arm @@ -1,4 +1,4 @@ -FROM golang:1.20@sha256:2edf6aab2d57644f3fe7407132a0d1770846867465a39c2083770cf62734b05d +FROM golang:1.23@sha256:51a6466e8dbf3e00e422eb0f7a97ac450b2d57b33617bbe8d2ee0bddcd9d0d37 ENV GOOS=linux ENV GOARCH=arm @@ -10,7 +10,6 @@ ENV PKG_CONFIG_PATH=/usr/lib/arm-linux-gnueabihf/pkgconfig RUN dpkg --add-architecture armhf \ && apt update \ && apt install -y --no-install-recommends \ - upx \ gcc-arm-linux-gnueabihf \ libc6-dev-armhf-cross \ pkg-config \ diff --git a/vendor/github.com/pjbgf/sha1cd/Dockerfile.arm64 b/vendor/github.com/pjbgf/sha1cd/Dockerfile.arm64 index 66bd09474..59928252a 100644 --- a/vendor/github.com/pjbgf/sha1cd/Dockerfile.arm64 +++ b/vendor/github.com/pjbgf/sha1cd/Dockerfile.arm64 @@ -1,4 +1,4 @@ -FROM golang:1.20@sha256:2edf6aab2d57644f3fe7407132a0d1770846867465a39c2083770cf62734b05d +FROM golang:1.23@sha256:51a6466e8dbf3e00e422eb0f7a97ac450b2d57b33617bbe8d2ee0bddcd9d0d37 ENV GOOS=linux ENV GOARCH=arm64 diff --git a/vendor/github.com/pjbgf/sha1cd/LICENSE b/vendor/github.com/pjbgf/sha1cd/LICENSE index 261eeb9e9..c8ff622ff 100644 --- a/vendor/github.com/pjbgf/sha1cd/LICENSE +++ b/vendor/github.com/pjbgf/sha1cd/LICENSE @@ -186,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright 2023 pjbgf Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/pjbgf/sha1cd/Makefile b/vendor/github.com/pjbgf/sha1cd/Makefile index b24f2cbad..278a109d8 100644 --- a/vendor/github.com/pjbgf/sha1cd/Makefile +++ b/vendor/github.com/pjbgf/sha1cd/Makefile @@ -32,8 +32,7 @@ build-nocgo: cross-build: build-arm build-arm64 build-nocgo generate: - go run sha1cdblock_amd64_asm.go -out sha1cdblock_amd64.s - sed -i 's;&\samd64;&\n// +build !noasm,gc,amd64;g' sha1cdblock_amd64.s + go generate -x ./... verify: generate git diff --exit-code diff --git a/vendor/github.com/pjbgf/sha1cd/sha1cd.go b/vendor/github.com/pjbgf/sha1cd/sha1cd.go index a69e480ee..509569f66 100644 --- a/vendor/github.com/pjbgf/sha1cd/sha1cd.go +++ b/vendor/github.com/pjbgf/sha1cd/sha1cd.go @@ -20,6 +20,8 @@ import ( shared "github.com/pjbgf/sha1cd/internal" ) +//go:generate go run -C asm . -out ../sha1cdblock_amd64.s -pkg $GOPACKAGE + func init() { crypto.RegisterHash(crypto.SHA1, New) } diff --git a/vendor/github.com/pjbgf/sha1cd/sha1cdblock_amd64.s b/vendor/github.com/pjbgf/sha1cd/sha1cdblock_amd64.s index 86f9821ca..e5e213a52 100644 --- a/vendor/github.com/pjbgf/sha1cd/sha1cdblock_amd64.s +++ b/vendor/github.com/pjbgf/sha1cd/sha1cdblock_amd64.s @@ -1,7 +1,6 @@ -// Code generated by command: go run sha1cdblock_amd64_asm.go -out sha1cdblock_amd64.s. DO NOT EDIT. +// Code generated by command: go run asm.go -out ../sha1cdblock_amd64.s -pkg sha1cd. DO NOT EDIT. //go:build !noasm && gc && amd64 -// +build !noasm,gc,amd64 #include "textflag.h" diff --git a/vendor/github.com/pjbgf/sha1cd/ubc/doc.go b/vendor/github.com/pjbgf/sha1cd/ubc/ubc.go similarity index 68% rename from vendor/github.com/pjbgf/sha1cd/ubc/doc.go rename to vendor/github.com/pjbgf/sha1cd/ubc/ubc.go index 0090e36b9..b0b4d76e3 100644 --- a/vendor/github.com/pjbgf/sha1cd/ubc/doc.go +++ b/vendor/github.com/pjbgf/sha1cd/ubc/ubc.go @@ -1,3 +1,5 @@ // ubc package provides ways for SHA1 blocks to be checked for // Unavoidable Bit Conditions that arise from crypto analysis attacks. package ubc + +//go:generate go run -C asm . -out ../ubc_amd64.s -pkg $GOPACKAGE diff --git a/vendor/github.com/pjbgf/sha1cd/ubc/ubc_amd64.go b/vendor/github.com/pjbgf/sha1cd/ubc/ubc_amd64.go new file mode 100644 index 000000000..09159bb5b --- /dev/null +++ b/vendor/github.com/pjbgf/sha1cd/ubc/ubc_amd64.go @@ -0,0 +1,14 @@ +//go:build !noasm && gc && amd64 +// +build !noasm,gc,amd64 + +package ubc + +func CalculateDvMaskAMD64(W [80]uint32) uint32 + +// Check takes as input an expanded message block and verifies the unavoidable bitconditions +// for all listed DVs. It returns a dvmask where each bit belonging to a DV is set if all +// unavoidable bitconditions for that DV have been met. +// Thus, one needs to do the recompression check for each DV that has its bit set. +func CalculateDvMask(W [80]uint32) uint32 { + return CalculateDvMaskAMD64(W) +} diff --git a/vendor/github.com/pjbgf/sha1cd/ubc/ubc_amd64.s b/vendor/github.com/pjbgf/sha1cd/ubc/ubc_amd64.s new file mode 100644 index 000000000..c77ea77ec --- /dev/null +++ b/vendor/github.com/pjbgf/sha1cd/ubc/ubc_amd64.s @@ -0,0 +1,1897 @@ +// Code generated by command: go run asm.go -out ../ubc_amd64.s -pkg ubc. DO NOT EDIT. + +//go:build !noasm && gc && amd64 + +#include "textflag.h" + +// func CalculateDvMaskAMD64(W [80]uint32) uint32 +TEXT ·CalculateDvMaskAMD64(SB), NOSPLIT, $0-324 + MOVL $0xffffffff, AX + + // (((((W[44] ^ W[45]) >> 29) & 1) - 1) | ^(DV_I_48_0_bit | DV_I_51_0_bit | DV_I_52_0_bit | DV_II_45_0_bit | DV_II_46_0_bit | DV_II_50_0_bit | DV_II_51_0_bit)) + MOVL W_44+176(FP), CX + MOVL W_45+180(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + DECL CX + ORL $0xfd7c5f7f, CX + ANDL CX, AX + + // mask &= (((((W[49] ^ W[50]) >> 29) & 1) - 1) | ^(DV_I_46_0_bit | DV_II_45_0_bit | DV_II_50_0_bit | DV_II_51_0_bit | DV_II_55_0_bit | DV_II_56_0_bit)) + MOVL W_49+196(FP), CX + MOVL W_50+200(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + DECL CX + ORL $0x3d7efff7, CX + ANDL CX, AX + + // mask &= (((((W[48] ^ W[49]) >> 29) & 1) - 1) | ^(DV_I_45_0_bit | DV_I_52_0_bit | DV_II_49_0_bit | DV_II_50_0_bit | DV_II_54_0_bit | DV_II_55_0_bit)) + MOVL W_48+192(FP), CX + MOVL W_49+196(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + DECL CX + ORL $0x9f5f7ffb, CX + ANDL CX, AX + + // mask &= ((((W[47] ^ (W[50] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_47_0_bit | DV_I_49_0_bit | DV_I_51_0_bit | DV_II_45_0_bit | DV_II_51_0_bit | DV_II_56_0_bit)) + MOVL W_47+188(FP), CX + MOVL W_50+200(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000010, CX + SUBL $0x00000010, CX + ORL $0x7dfedddf, CX + ANDL CX, AX + + // mask &= (((((W[47] ^ W[48]) >> 29) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_51_0_bit | DV_II_48_0_bit | DV_II_49_0_bit | DV_II_53_0_bit | DV_II_54_0_bit)) + MOVL W_47+188(FP), CX + MOVL W_48+192(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + DECL CX + ORL $0xcfcfdffd, CX + ANDL CX, AX + + // mask &= (((((W[46] >> 4) ^ (W[49] >> 29)) & 1) - 1) | ^(DV_I_46_0_bit | DV_I_48_0_bit | DV_I_50_0_bit | DV_I_52_0_bit | DV_II_50_0_bit | DV_II_55_0_bit)) + MOVL W_46+184(FP), CX + SHRL $0x04, CX + MOVL W_49+196(FP), DX + SHRL $0x1d, DX + XORL DX, CX + ANDL $0x00000001, CX + DECL CX + ORL $0xbf7f7777, CX + ANDL CX, AX + + // mask &= (((((W[46] ^ W[47]) >> 29) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_50_0_bit | DV_II_47_0_bit | DV_II_48_0_bit | DV_II_52_0_bit | DV_II_53_0_bit)) + MOVL W_46+184(FP), CX + MOVL W_47+188(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + DECL CX + ORL $0xe7e7f7fe, CX + ANDL CX, AX + + // mask &= (((((W[45] >> 4) ^ (W[48] >> 29)) & 1) - 1) | ^(DV_I_45_0_bit | DV_I_47_0_bit | DV_I_49_0_bit | DV_I_51_0_bit | DV_II_49_0_bit | DV_II_54_0_bit)) + MOVL W_45+180(FP), CX + SHRL $0x04, CX + MOVL W_48+192(FP), DX + SHRL $0x1d, DX + XORL DX, CX + ANDL $0x00000001, CX + DECL CX + ORL $0xdfdfdddb, CX + ANDL CX, AX + + // mask &= (((((W[45] ^ W[46]) >> 29) & 1) - 1) | ^(DV_I_49_0_bit | DV_I_52_0_bit | DV_II_46_0_bit | DV_II_47_0_bit | DV_II_51_0_bit | DV_II_52_0_bit)) + MOVL W_45+180(FP), CX + MOVL W_46+184(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + DECL CX + ORL $0xf5f57dff, CX + ANDL CX, AX + + // mask &= (((((W[44] >> 4) ^ (W[47] >> 29)) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_I_48_0_bit | DV_I_50_0_bit | DV_II_48_0_bit | DV_II_53_0_bit)) + MOVL W_44+176(FP), CX + SHRL $0x04, CX + MOVL W_47+188(FP), DX + SHRL $0x1d, DX + XORL DX, CX + ANDL $0x00000001, CX + DECL CX + ORL $0xefeff775, CX + ANDL CX, AX + + // mask &= (((((W[43] >> 4) ^ (W[46] >> 29)) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_I_47_0_bit | DV_I_49_0_bit | DV_II_47_0_bit | DV_II_52_0_bit)) + MOVL W_43+172(FP), CX + SHRL $0x04, CX + MOVL W_46+184(FP), DX + SHRL $0x1d, DX + XORL DX, CX + ANDL $0x00000001, CX + DECL CX + ORL $0xf7f7fdda, CX + ANDL CX, AX + + // mask &= (((((W[43] ^ W[44]) >> 29) & 1) - 1) | ^(DV_I_47_0_bit | DV_I_50_0_bit | DV_I_51_0_bit | DV_II_45_0_bit | DV_II_49_0_bit | DV_II_50_0_bit)) + MOVL W_43+172(FP), CX + MOVL W_44+176(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + DECL CX + ORL $0xff5ed7df, CX + ANDL CX, AX + + // mask &= (((((W[42] >> 4) ^ (W[45] >> 29)) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_I_48_0_bit | DV_I_52_0_bit | DV_II_46_0_bit | DV_II_51_0_bit)) + MOVL W_42+168(FP), CX + SHRL $0x04, CX + MOVL W_45+180(FP), DX + SHRL $0x1d, DX + XORL DX, CX + ANDL $0x00000001, CX + DECL CX + ORL $0xfdfd7f75, CX + ANDL CX, AX + + // mask &= (((((W[41] >> 4) ^ (W[44] >> 29)) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_I_47_0_bit | DV_I_51_0_bit | DV_II_45_0_bit | DV_II_50_0_bit)) + MOVL W_41+164(FP), CX + SHRL $0x04, CX + MOVL W_44+176(FP), DX + SHRL $0x1d, DX + XORL DX, CX + ANDL $0x00000001, CX + DECL CX + ORL $0xff7edfda, CX + ANDL CX, AX + + // mask &= (((((W[40] ^ W[41]) >> 29) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_47_0_bit | DV_I_48_0_bit | DV_II_46_0_bit | DV_II_47_0_bit | DV_II_56_0_bit)) + MOVL W_40+160(FP), CX + MOVL W_41+164(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + DECL CX + ORL $0x7ff5ff5d, CX + ANDL CX, AX + + // mask &= (((((W[54] ^ W[55]) >> 29) & 1) - 1) | ^(DV_I_51_0_bit | DV_II_47_0_bit | DV_II_50_0_bit | DV_II_55_0_bit | DV_II_56_0_bit)) + MOVL W_54+216(FP), CX + MOVL W_55+220(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + DECL CX + ORL $0x3f77dfff, CX + ANDL CX, AX + + // mask &= (((((W[53] ^ W[54]) >> 29) & 1) - 1) | ^(DV_I_50_0_bit | DV_II_46_0_bit | DV_II_49_0_bit | DV_II_54_0_bit | DV_II_55_0_bit)) + MOVL W_53+212(FP), CX + MOVL W_54+216(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + DECL CX + ORL $0x9fddf7ff, CX + ANDL CX, AX + + // mask &= (((((W[52] ^ W[53]) >> 29) & 1) - 1) | ^(DV_I_49_0_bit | DV_II_45_0_bit | DV_II_48_0_bit | DV_II_53_0_bit | DV_II_54_0_bit)) + MOVL W_52+208(FP), CX + MOVL W_53+212(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + DECL CX + ORL $0xcfeefdff, CX + ANDL CX, AX + + // mask &= ((((W[50] ^ (W[53] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_50_0_bit | DV_I_52_0_bit | DV_II_46_0_bit | DV_II_48_0_bit | DV_II_54_0_bit)) + MOVL W_50+200(FP), CX + MOVL W_53+212(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000010, CX + SUBL $0x00000010, CX + ORL $0xdfed77ff, CX + ANDL CX, AX + + // mask &= (((((W[50] ^ W[51]) >> 29) & 1) - 1) | ^(DV_I_47_0_bit | DV_II_46_0_bit | DV_II_51_0_bit | DV_II_52_0_bit | DV_II_56_0_bit)) + MOVL W_50+200(FP), CX + MOVL W_51+204(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + DECL CX + ORL $0x75fdffdf, CX + ANDL CX, AX + + // mask &= ((((W[49] ^ (W[52] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_49_0_bit | DV_I_51_0_bit | DV_II_45_0_bit | DV_II_47_0_bit | DV_II_53_0_bit)) + MOVL W_49+196(FP), CX + MOVL W_52+208(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000010, CX + SUBL $0x00000010, CX + ORL $0xeff6ddff, CX + ANDL CX, AX + + // mask &= ((((W[48] ^ (W[51] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_48_0_bit | DV_I_50_0_bit | DV_I_52_0_bit | DV_II_46_0_bit | DV_II_52_0_bit)) + MOVL W_48+192(FP), CX + MOVL W_51+204(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000010, CX + SUBL $0x00000010, CX + ORL $0xf7fd777f, CX + ANDL CX, AX + + // mask &= (((((W[42] ^ W[43]) >> 29) & 1) - 1) | ^(DV_I_46_0_bit | DV_I_49_0_bit | DV_I_50_0_bit | DV_II_48_0_bit | DV_II_49_0_bit)) + MOVL W_42+168(FP), CX + MOVL W_43+172(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + DECL CX + ORL $0xffcff5f7, CX + ANDL CX, AX + + // mask &= (((((W[41] ^ W[42]) >> 29) & 1) - 1) | ^(DV_I_45_0_bit | DV_I_48_0_bit | DV_I_49_0_bit | DV_II_47_0_bit | DV_II_48_0_bit)) + MOVL W_41+164(FP), CX + MOVL W_42+168(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + DECL CX + ORL $0xffe7fd7b, CX + ANDL CX, AX + + // mask &= (((((W[40] >> 4) ^ (W[43] >> 29)) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_I_50_0_bit | DV_II_49_0_bit | DV_II_56_0_bit)) + MOVL W_40+160(FP), CX + MOVL W_43+172(FP), DX + SHRL $0x04, CX + SHRL $0x1d, DX + XORL DX, CX + ANDL $0x00000001, CX + DECL CX + ORL $0x7fdff7f5, CX + ANDL CX, AX + + // mask &= (((((W[39] >> 4) ^ (W[42] >> 29)) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_I_49_0_bit | DV_II_48_0_bit | DV_II_55_0_bit)) + MOVL W_39+156(FP), CX + MOVL W_42+168(FP), DX + SHRL $0x04, CX + SHRL $0x1d, DX + XORL DX, CX + ANDL $0x00000001, CX + DECL CX + ORL $0xbfeffdfa, CX + ANDL CX, AX + + // if (mask & (DV_I_44_0_bit | DV_I_48_0_bit | DV_II_47_0_bit | DV_II_54_0_bit | DV_II_56_0_bit)) != 0 { + // mask &= (((((W[38] >> 4) ^ (W[41] >> 29)) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_48_0_bit | DV_II_47_0_bit | DV_II_54_0_bit | DV_II_56_0_bit)) + // } + TESTL $0xa0080082, AX + JE f1 + MOVL W_38+152(FP), CX + MOVL W_41+164(FP), DX + SHRL $0x04, CX + SHRL $0x1d, DX + XORL DX, CX + ANDL $0x00000001, CX + DECL CX + ORL $0x5ff7ff7d, CX + ANDL CX, AX + +f1: + // mask &= (((((W[37] >> 4) ^ (W[40] >> 29)) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_47_0_bit | DV_II_46_0_bit | DV_II_53_0_bit | DV_II_55_0_bit)) + MOVL W_37+148(FP), CX + MOVL W_40+160(FP), DX + SHRL $0x04, CX + SHRL $0x1d, DX + XORL DX, CX + ANDL $0x00000001, CX + DECL CX + ORL $0xaffdffde, CX + ANDL CX, AX + + // if (mask & (DV_I_52_0_bit | DV_II_48_0_bit | DV_II_51_0_bit | DV_II_56_0_bit)) != 0 { + // mask &= (((((W[55] ^ W[56]) >> 29) & 1) - 1) | ^(DV_I_52_0_bit | DV_II_48_0_bit | DV_II_51_0_bit | DV_II_56_0_bit)) + // } + TESTL $0x82108000, AX + JE f2 + MOVL W_55+220(FP), CX + MOVL W_56+224(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + DECL CX + ORL $0x7def7fff, CX + ANDL CX, AX + +f2: + // if (mask & (DV_I_52_0_bit | DV_II_48_0_bit | DV_II_50_0_bit | DV_II_56_0_bit)) != 0 { + // mask &= ((((W[52] ^ (W[55] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_52_0_bit | DV_II_48_0_bit | DV_II_50_0_bit | DV_II_56_0_bit)) + // } + TESTL $0x80908000, AX + JE f3 + MOVL W_52+208(FP), CX + MOVL W_55+220(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000010, CX + SUBL $0x00000010, CX + ORL $0x7f6f7fff, CX + ANDL CX, AX + +f3: + // if (mask & (DV_I_51_0_bit | DV_II_47_0_bit | DV_II_49_0_bit | DV_II_55_0_bit)) != 0 { + // mask &= ((((W[51] ^ (W[54] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_51_0_bit | DV_II_47_0_bit | DV_II_49_0_bit | DV_II_55_0_bit)) + // } + TESTL $0x40282000, AX + JE f4 + MOVL W_51+204(FP), CX + MOVL W_54+216(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000010, CX + SUBL $0x00000010, CX + ORL $0xbfd7dfff, CX + ANDL CX, AX + +f4: + // if (mask & (DV_I_48_0_bit | DV_II_47_0_bit | DV_II_52_0_bit | DV_II_53_0_bit)) != 0 { + // mask &= (((((W[51] ^ W[52]) >> 29) & 1) - 1) | ^(DV_I_48_0_bit | DV_II_47_0_bit | DV_II_52_0_bit | DV_II_53_0_bit)) + // } + TESTL $0x18080080, AX + JE f5 + MOVL W_51+204(FP), CX + MOVL W_52+208(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + DECL CX + ORL $0xe7f7ff7f, CX + ANDL CX, AX + +f5: + // if (mask & (DV_I_46_0_bit | DV_I_49_0_bit | DV_II_45_0_bit | DV_II_48_0_bit)) != 0 { + // mask &= (((((W[36] >> 4) ^ (W[40] >> 29)) & 1) - 1) | ^(DV_I_46_0_bit | DV_I_49_0_bit | DV_II_45_0_bit | DV_II_48_0_bit)) + // } + TESTL $0x00110208, AX + JE f6 + MOVL W_36+144(FP), CX + SHRL $0x04, CX + MOVL W_40+160(FP), DX + SHRL $0x1d, DX + XORL DX, CX + ANDL $0x00000001, CX + DECL CX + ORL $0xffeefdf7, CX + ANDL CX, AX + +f6: + // if (mask & (DV_I_52_0_bit | DV_II_48_0_bit | DV_II_49_0_bit)) != 0 { + // mask &= ((0 - (((W[53] ^ W[56]) >> 29) & 1)) | ^(DV_I_52_0_bit | DV_II_48_0_bit | DV_II_49_0_bit)) + // } + TESTL $0x00308000, AX + JE f7 + MOVL W_53+212(FP), CX + MOVL W_56+224(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + NEGL CX + ORL $0xffcf7fff, CX + ANDL CX, AX + +f7: + // if (mask & (DV_I_50_0_bit | DV_II_46_0_bit | DV_II_47_0_bit)) != 0 { + // mask &= ((0 - (((W[51] ^ W[54]) >> 29) & 1)) | ^(DV_I_50_0_bit | DV_II_46_0_bit | DV_II_47_0_bit)) + // } + TESTL $0x000a0800, AX + JE f8 + MOVL W_51+204(FP), CX + MOVL W_54+216(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + NEGL CX + ORL $0xfff5f7ff, CX + ANDL CX, AX + +f8: + // if (mask & (DV_I_49_0_bit | DV_I_51_0_bit | DV_II_45_0_bit)) != 0 { + // mask &= ((0 - (((W[50] ^ W[52]) >> 29) & 1)) | ^(DV_I_49_0_bit | DV_I_51_0_bit | DV_II_45_0_bit)) + // } + TESTL $0x00012200, AX + JE f9 + MOVL W_50+200(FP), CX + MOVL W_52+208(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + NEGL CX + ORL $0xfffeddff, CX + ANDL CX, AX + +f9: + // if (mask & (DV_I_48_0_bit | DV_I_50_0_bit | DV_I_52_0_bit)) != 0 { + // mask &= ((0 - (((W[49] ^ W[51]) >> 29) & 1)) | ^(DV_I_48_0_bit | DV_I_50_0_bit | DV_I_52_0_bit)) + // } + TESTL $0x00008880, AX + JE f10 + MOVL W_49+196(FP), CX + MOVL W_51+204(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + NEGL CX + ORL $0xffff777f, CX + ANDL CX, AX + +f10: + // if (mask & (DV_I_47_0_bit | DV_I_49_0_bit | DV_I_51_0_bit)) != 0 { + // mask &= ((0 - (((W[48] ^ W[50]) >> 29) & 1)) | ^(DV_I_47_0_bit | DV_I_49_0_bit | DV_I_51_0_bit)) + // } + TESTL $0x00002220, AX + JE f11 + MOVL W_48+192(FP), CX + MOVL W_50+200(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + NEGL CX + ORL $0xffffdddf, CX + ANDL CX, AX + +f11: + // if (mask & (DV_I_46_0_bit | DV_I_48_0_bit | DV_I_50_0_bit)) != 0 { + // mask &= ((0 - (((W[47] ^ W[49]) >> 29) & 1)) | ^(DV_I_46_0_bit | DV_I_48_0_bit | DV_I_50_0_bit)) + // } + TESTL $0x00000888, AX + JE f12 + MOVL W_47+188(FP), CX + MOVL W_49+196(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + NEGL CX + ORL $0xfffff777, CX + ANDL CX, AX + +f12: + // if (mask & (DV_I_45_0_bit | DV_I_47_0_bit | DV_I_49_0_bit)) != 0 { + // mask &= ((0 - (((W[46] ^ W[48]) >> 29) & 1)) | ^(DV_I_45_0_bit | DV_I_47_0_bit | DV_I_49_0_bit)) + // } + TESTL $0x00000224, AX + JE f13 + MOVL W_46+184(FP), CX + MOVL W_48+192(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + NEGL CX + ORL $0xfffffddb, CX + ANDL CX, AX + +f13: + // mask &= ((((W[45] ^ W[47]) & (1 << 6)) - (1 << 6)) | ^(DV_I_47_2_bit | DV_I_49_2_bit | DV_I_51_2_bit)) + MOVL W_45+180(FP), CX + MOVL W_47+188(FP), DX + XORL DX, CX + ANDL $0x00000040, CX + SUBL $0x00000040, CX + ORL $0xffffbbbf, CX + ANDL CX, AX + + // if (mask & (DV_I_44_0_bit | DV_I_46_0_bit | DV_I_48_0_bit)) != 0 { + // mask &= ((0 - (((W[45] ^ W[47]) >> 29) & 1)) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_I_48_0_bit)) + // } + TESTL $0x0000008a, AX + JE f14 + MOVL W_45+180(FP), CX + MOVL W_47+188(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + NEGL CX + ORL $0xffffff75, CX + ANDL CX, AX + +f14: + // mask &= (((((W[44] ^ W[46]) >> 6) & 1) - 1) | ^(DV_I_46_2_bit | DV_I_48_2_bit | DV_I_50_2_bit)) + MOVL W_44+176(FP), CX + MOVL W_46+184(FP), DX + XORL DX, CX + SHRL $0x06, CX + ANDL $0x00000001, CX + DECL CX + ORL $0xffffeeef, CX + ANDL CX, AX + + // if (mask & (DV_I_43_0_bit | DV_I_45_0_bit | DV_I_47_0_bit)) != 0 { + // mask &= ((0 - (((W[44] ^ W[46]) >> 29) & 1)) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_I_47_0_bit)) + // } + TESTL $0x00000025, AX + JE f15 + MOVL W_44+176(FP), CX + MOVL W_46+184(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + NEGL CX + ORL $0xffffffda, CX + ANDL CX, AX + +f15: + // mask &= ((0 - ((W[41] ^ (W[42] >> 5)) & (1 << 1))) | ^(DV_I_48_2_bit | DV_II_46_2_bit | DV_II_51_2_bit)) + MOVL W_41+164(FP), CX + MOVL W_42+168(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000002, CX + NEGL CX + ORL $0xfbfbfeff, CX + ANDL CX, AX + + // mask &= ((0 - ((W[40] ^ (W[41] >> 5)) & (1 << 1))) | ^(DV_I_47_2_bit | DV_I_51_2_bit | DV_II_50_2_bit)) + MOVL W_40+160(FP), CX + MOVL W_41+164(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000002, CX + NEGL CX + ORL $0xfeffbfbf, CX + ANDL CX, AX + + // if (mask & (DV_I_44_0_bit | DV_I_46_0_bit | DV_II_56_0_bit)) != 0 { + // mask &= ((0 - (((W[40] ^ W[42]) >> 4) & 1)) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_II_56_0_bit)) + // } + TESTL $0x8000000a, AX + JE f16 + MOVL W_40+160(FP), CX + MOVL W_42+168(FP), DX + XORL DX, CX + SHRL $0x04, CX + ANDL $0x00000001, CX + NEGL CX + ORL $0x7ffffff5, CX + ANDL CX, AX + +f16: + // mask &= ((0 - ((W[39] ^ (W[40] >> 5)) & (1 << 1))) | ^(DV_I_46_2_bit | DV_I_50_2_bit | DV_II_49_2_bit)) + MOVL W_39+156(FP), CX + MOVL W_40+160(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000002, CX + NEGL CX + ORL $0xffbfefef, CX + ANDL CX, AX + + // if (mask & (DV_I_43_0_bit | DV_I_45_0_bit | DV_II_55_0_bit)) != 0 { + // mask &= ((0 - (((W[39] ^ W[41]) >> 4) & 1)) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_II_55_0_bit)) + // } + TESTL $0x40000005, AX + JE f17 + MOVL W_39+156(FP), CX + MOVL W_41+164(FP), DX + XORL DX, CX + SHRL $0x04, CX + ANDL $0x00000001, CX + NEGL CX + ORL $0xbffffffa, CX + ANDL CX, AX + +f17: + // if (mask & (DV_I_44_0_bit | DV_II_54_0_bit | DV_II_56_0_bit)) != 0 { + // mask &= ((0 - (((W[38] ^ W[40]) >> 4) & 1)) | ^(DV_I_44_0_bit | DV_II_54_0_bit | DV_II_56_0_bit)) + // } + TESTL $0xa0000002, AX + JE f18 + MOVL W_38+152(FP), CX + MOVL W_40+160(FP), DX + XORL DX, CX + SHRL $0x04, CX + ANDL $0x00000001, CX + NEGL CX + ORL $0x5ffffffd, CX + ANDL CX, AX + +f18: + // if (mask & (DV_I_43_0_bit | DV_II_53_0_bit | DV_II_55_0_bit)) != 0 { + // mask &= ((0 - (((W[37] ^ W[39]) >> 4) & 1)) | ^(DV_I_43_0_bit | DV_II_53_0_bit | DV_II_55_0_bit)) + // } + TESTL $0x50000001, AX + JE f19 + MOVL W_37+148(FP), CX + MOVL W_39+156(FP), DX + XORL DX, CX + SHRL $0x04, CX + ANDL $0x00000001, CX + NEGL CX + ORL $0xaffffffe, CX + ANDL CX, AX + +f19: + // mask &= ((0 - ((W[36] ^ (W[37] >> 5)) & (1 << 1))) | ^(DV_I_47_2_bit | DV_I_50_2_bit | DV_II_46_2_bit)) + MOVL W_36+144(FP), CX + MOVL W_37+148(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000002, CX + NEGL CX + ORL $0xfffbefbf, CX + ANDL CX, AX + + // if (mask & (DV_I_45_0_bit | DV_I_48_0_bit | DV_II_47_0_bit)) != 0 { + // mask &= (((((W[35] >> 4) ^ (W[39] >> 29)) & 1) - 1) | ^(DV_I_45_0_bit | DV_I_48_0_bit | DV_II_47_0_bit)) + // } + TESTL $0x00080084, AX + JE f20 + MOVL W_35+140(FP), CX + MOVL W_39+156(FP), DX + SHRL $0x04, CX + SHRL $0x1d, DX + XORL DX, CX + ANDL $0x00000001, CX + SUBL $0x00000001, CX + ORL $0xfff7ff7b, CX + ANDL CX, AX + +f20: + // if (mask & (DV_I_48_0_bit | DV_II_48_0_bit)) != 0 { + // mask &= ((0 - ((W[63] ^ (W[64] >> 5)) & (1 << 0))) | ^(DV_I_48_0_bit | DV_II_48_0_bit)) + // } + TESTL $0x00100080, AX + JE f21 + MOVL W_63+252(FP), CX + MOVL W_64+256(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000001, CX + NEGL CX + ORL $0xffefff7f, CX + ANDL CX, AX + +f21: + // if (mask & (DV_I_45_0_bit | DV_II_45_0_bit)) != 0 { + // mask &= ((0 - ((W[63] ^ (W[64] >> 5)) & (1 << 1))) | ^(DV_I_45_0_bit | DV_II_45_0_bit)) + // } + TESTL $0x00010004, AX + JE f22 + MOVL W_63+252(FP), CX + MOVL W_64+256(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000002, CX + NEGL CX + ORL $0xfffefffb, CX + ANDL CX, AX + +f22: + // if (mask & (DV_I_47_0_bit | DV_II_47_0_bit)) != 0 { + // mask &= ((0 - ((W[62] ^ (W[63] >> 5)) & (1 << 0))) | ^(DV_I_47_0_bit | DV_II_47_0_bit)) + // } + TESTL $0x00080020, AX + JE f23 + MOVL W_62+248(FP), CX + MOVL W_63+252(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000001, CX + NEGL CX + ORL $0xfff7ffdf, CX + ANDL CX, AX + +f23: + // if (mask & (DV_I_46_0_bit | DV_II_46_0_bit)) != 0 { + // mask &= ((0 - ((W[61] ^ (W[62] >> 5)) & (1 << 0))) | ^(DV_I_46_0_bit | DV_II_46_0_bit)) + // } + TESTL $0x00020008, AX + JE f24 + MOVL W_61+244(FP), CX + MOVL W_62+248(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000001, CX + NEGL CX + ORL $0xfffdfff7, CX + ANDL CX, AX + +f24: + // mask &= ((0 - ((W[61] ^ (W[62] >> 5)) & (1 << 2))) | ^(DV_I_46_2_bit | DV_II_46_2_bit)) + MOVL W_61+244(FP), CX + MOVL W_62+248(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000004, CX + NEGL CX + ORL $0xfffbffef, CX + ANDL CX, AX + + // if (mask & (DV_I_45_0_bit | DV_II_45_0_bit)) != 0 { + // mask &= ((0 - ((W[60] ^ (W[61] >> 5)) & (1 << 0))) | ^(DV_I_45_0_bit | DV_II_45_0_bit)) + // } + TESTL $0x00010004, AX + JE f25 + MOVL W_60+240(FP), CX + MOVL W_61+244(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000001, CX + NEGL CX + ORL $0xfffefffb, CX + ANDL CX, AX + +f25: + // if (mask & (DV_II_51_0_bit | DV_II_54_0_bit)) != 0 { + // mask &= (((((W[58] ^ W[59]) >> 29) & 1) - 1) | ^(DV_II_51_0_bit | DV_II_54_0_bit)) + // } + TESTL $0x22000000, AX + JE f26 + MOVL W_58+232(FP), CX + MOVL W_59+236(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + SUBL $0x00000001, CX + ORL $0xddffffff, CX + ANDL CX, AX + +f26: + // if (mask & (DV_II_50_0_bit | DV_II_53_0_bit)) != 0 { + // mask &= (((((W[57] ^ W[58]) >> 29) & 1) - 1) | ^(DV_II_50_0_bit | DV_II_53_0_bit)) + // } + TESTL $0x10800000, AX + JE f27 + MOVL W_57+228(FP), CX + MOVL W_58+232(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + SUBL $0x00000001, CX + ORL $0xef7fffff, CX + ANDL CX, AX + +f27: + // if (mask & (DV_II_52_0_bit | DV_II_54_0_bit)) != 0 { + // mask &= ((((W[56] ^ (W[59] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_II_52_0_bit | DV_II_54_0_bit)) + // } + TESTL $0x28000000, AX + JE f28 + MOVL W_56+224(FP), CX + MOVL W_59+236(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000010, CX + SUBL $0x00000010, CX + ORL $0xd7ffffff, CX + ANDL CX, AX + +f28: + // if (mask & (DV_II_51_0_bit | DV_II_52_0_bit)) != 0 { + // mask &= ((0 - (((W[56] ^ W[59]) >> 29) & 1)) | ^(DV_II_51_0_bit | DV_II_52_0_bit)) + // } + TESTL $0x0a000000, AX + JE f29 + MOVL W_56+224(FP), CX + MOVL W_59+236(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + NEGL CX + ORL $0xf5ffffff, CX + ANDL CX, AX + +f29: + // if (mask & (DV_II_49_0_bit | DV_II_52_0_bit)) != 0 { + // mask &= (((((W[56] ^ W[57]) >> 29) & 1) - 1) | ^(DV_II_49_0_bit | DV_II_52_0_bit)) + // } + TESTL $0x08200000, AX + JE f30 + MOVL W_56+224(FP), CX + MOVL W_57+228(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + SUBL $0x00000001, CX + ORL $0xf7dfffff, CX + ANDL CX, AX + +f30: + // if (mask & (DV_II_51_0_bit | DV_II_53_0_bit)) != 0 { + // mask &= ((((W[55] ^ (W[58] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_II_51_0_bit | DV_II_53_0_bit)) + // } + TESTL $0x12000000, AX + JE f31 + MOVL W_55+220(FP), CX + MOVL W_58+232(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000010, CX + SUBL $0x00000010, CX + ORL $0xedffffff, CX + ANDL CX, AX + +f31: + // if (mask & (DV_II_50_0_bit | DV_II_52_0_bit)) != 0 { + // mask &= ((((W[54] ^ (W[57] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_II_50_0_bit | DV_II_52_0_bit)) + // } + TESTL $0x08800000, AX + JE f32 + MOVL W_54+216(FP), CX + MOVL W_57+228(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000010, CX + SUBL $0x00000010, CX + ORL $0xf77fffff, CX + ANDL CX, AX + +f32: + // if (mask & (DV_II_49_0_bit | DV_II_51_0_bit)) != 0 { + // mask &= ((((W[53] ^ (W[56] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_II_49_0_bit | DV_II_51_0_bit)) + // } + TESTL $0x02200000, AX + JE f33 + MOVL W_53+212(FP), CX + MOVL W_56+224(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000010, CX + SUBL $0x00000010, CX + ORL $0xfddfffff, CX + ANDL CX, AX + +f33: + // mask &= ((((W[51] ^ (W[50] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_50_2_bit | DV_II_46_2_bit)) + MOVL W_51+204(FP), CX + MOVL W_50+200(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000002, CX + SUBL $0x00000002, CX + ORL $0xfffbefff, CX + ANDL CX, AX + + // mask &= ((((W[48] ^ W[50]) & (1 << 6)) - (1 << 6)) | ^(DV_I_50_2_bit | DV_II_46_2_bit)) + MOVL W_48+192(FP), CX + MOVL W_50+200(FP), DX + XORL DX, CX + ANDL $0x00000040, CX + SUBL $0x00000040, CX + ORL $0xfffbefff, CX + ANDL CX, AX + + // if (mask & (DV_I_51_0_bit | DV_I_52_0_bit)) != 0 { + // mask &= ((0 - (((W[48] ^ W[55]) >> 29) & 1)) | ^(DV_I_51_0_bit | DV_I_52_0_bit)) + // } + TESTL $0x0000a000, AX + JE f34 + MOVL W_48+192(FP), CX + MOVL W_55+220(FP), DX + XORL DX, CX + SHRL $0x1d, CX + ANDL $0x00000001, CX + NEGL CX + ORL $0xffff5fff, CX + ANDL CX, AX + +f34: + // mask &= ((((W[47] ^ W[49]) & (1 << 6)) - (1 << 6)) | ^(DV_I_49_2_bit | DV_I_51_2_bit)) + MOVL W_47+188(FP), CX + MOVL W_49+196(FP), DX + XORL DX, CX + ANDL $0x00000040, CX + SUBL $0x00000040, CX + ORL $0xffffbbff, CX + ANDL CX, AX + + // mask &= ((((W[48] ^ (W[47] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_47_2_bit | DV_II_51_2_bit)) + MOVL W_48+192(FP), CX + MOVL W_47+188(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000002, CX + SUBL $0x00000002, CX + ORL $0xfbffffbf, CX + ANDL CX, AX + + // mask &= ((((W[46] ^ W[48]) & (1 << 6)) - (1 << 6)) | ^(DV_I_48_2_bit | DV_I_50_2_bit)) + MOVL W_46+184(FP), CX + MOVL W_48+192(FP), DX + XORL DX, CX + ANDL $0x00000040, CX + SUBL $0x00000040, CX + ORL $0xffffeeff, CX + ANDL CX, AX + + // mask &= ((((W[47] ^ (W[46] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_46_2_bit | DV_II_50_2_bit)) + MOVL W_47+188(FP), CX + MOVL W_46+184(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000002, CX + SUBL $0x00000002, CX + ORL $0xfeffffef, CX + ANDL CX, AX + + // mask &= ((0 - ((W[44] ^ (W[45] >> 5)) & (1 << 1))) | ^(DV_I_51_2_bit | DV_II_49_2_bit)) + MOVL W_44+176(FP), CX + MOVL W_45+180(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000002, CX + NEGL CX + ORL $0xffbfbfff, CX + ANDL CX, AX + + // mask &= ((((W[43] ^ W[45]) & (1 << 6)) - (1 << 6)) | ^(DV_I_47_2_bit | DV_I_49_2_bit)) + MOVL W_43+172(FP), CX + MOVL W_45+180(FP), DX + XORL DX, CX + ANDL $0x00000040, CX + SUBL $0x00000040, CX + ORL $0xfffffbbf, CX + ANDL CX, AX + + // mask &= (((((W[42] ^ W[44]) >> 6) & 1) - 1) | ^(DV_I_46_2_bit | DV_I_48_2_bit)) + MOVL W_42+168(FP), CX + MOVL W_44+176(FP), DX + XORL DX, CX + SHRL $0x06, CX + ANDL $0x00000001, CX + SUBL $0x00000001, CX + ORL $0xfffffeef, CX + ANDL CX, AX + + // mask &= ((((W[43] ^ (W[42] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_II_46_2_bit | DV_II_51_2_bit)) + MOVL W_43+172(FP), CX + MOVL W_42+168(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000002, CX + SUBL $0x00000002, CX + ORL $0xfbfbffff, CX + ANDL CX, AX + + // mask &= ((((W[42] ^ (W[41] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_51_2_bit | DV_II_50_2_bit)) + MOVL W_42+168(FP), CX + MOVL W_41+164(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000002, CX + SUBL $0x00000002, CX + ORL $0xfeffbfff, CX + ANDL CX, AX + + // mask &= ((((W[41] ^ (W[40] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_50_2_bit | DV_II_49_2_bit)) + MOVL W_41+164(FP), CX + MOVL W_40+160(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000002, CX + SUBL $0x00000002, CX + ORL $0xffbfefff, CX + ANDL CX, AX + + // if (mask & (DV_I_52_0_bit | DV_II_51_0_bit)) != 0 { + // mask &= ((((W[39] ^ (W[43] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_52_0_bit | DV_II_51_0_bit)) + // } + TESTL $0x02008000, AX + JE f35 + MOVL W_39+156(FP), CX + MOVL W_43+172(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000010, CX + SUBL $0x00000010, CX + ORL $0xfdff7fff, CX + ANDL CX, AX + +f35: + // if (mask & (DV_I_51_0_bit | DV_II_50_0_bit)) != 0 { + // mask &= ((((W[38] ^ (W[42] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_51_0_bit | DV_II_50_0_bit)) + // } + TESTL $0x00802000, AX + JE f36 + MOVL W_38+152(FP), CX + MOVL W_42+168(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000010, CX + SUBL $0x00000010, CX + ORL $0xff7fdfff, CX + ANDL CX, AX + +f36: + // if (mask & (DV_I_48_2_bit | DV_I_51_2_bit)) != 0 { + // mask &= ((0 - ((W[37] ^ (W[38] >> 5)) & (1 << 1))) | ^(DV_I_48_2_bit | DV_I_51_2_bit)) + // } + TESTL $0x00004100, AX + JE f37 + MOVL W_37+148(FP), CX + MOVL W_38+152(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000002, CX + NEGL CX + ORL $0xffffbeff, CX + ANDL CX, AX + +f37: + // if (mask & (DV_I_50_0_bit | DV_II_49_0_bit)) != 0 { + // mask &= ((((W[37] ^ (W[41] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_50_0_bit | DV_II_49_0_bit)) + // } + TESTL $0x00200800, AX + JE f38 + MOVL W_37+148(FP), CX + MOVL W_41+164(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000010, CX + SUBL $0x00000010, CX + ORL $0xffdff7ff, CX + ANDL CX, AX + +f38: + // if (mask & (DV_II_52_0_bit | DV_II_54_0_bit)) != 0 { + // mask &= ((0 - ((W[36] ^ W[38]) & (1 << 4))) | ^(DV_II_52_0_bit | DV_II_54_0_bit)) + // } + TESTL $0x28000000, AX + JE f39 + MOVL W_36+144(FP), CX + MOVL W_38+152(FP), DX + XORL DX, CX + ANDL $0x00000010, CX + NEGL CX + ORL $0xd7ffffff, CX + ANDL CX, AX + +f39: + // mask &= ((0 - ((W[35] ^ (W[36] >> 5)) & (1 << 1))) | ^(DV_I_46_2_bit | DV_I_49_2_bit)) + MOVL W_35+140(FP), CX + MOVL W_36+144(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000002, CX + NEGL CX + ORL $0xfffffbef, CX + ANDL CX, AX + + // if (mask & (DV_I_51_0_bit | DV_II_47_0_bit)) != 0 { + // mask &= ((((W[35] ^ (W[39] >> 25)) & (1 << 3)) - (1 << 3)) | ^(DV_I_51_0_bit | DV_II_47_0_bit)) + // } + TESTL $0x00082000, AX + JE f40 + MOVL W_35+140(FP), CX + MOVL W_39+156(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000008, CX + SUBL $0x00000008, CX + ORL $0xfff7dfff, CX + ANDL CX, AX + +f40: + // if mask != 0 + TESTL $0x00000000, AX + JNE end + + // if (mask & DV_I_43_0_bit) != 0 { + // if not((W[61]^(W[62]>>5))&(1<<1)) != 0 || + // not(not((W[59]^(W[63]>>25))&(1<<5))) != 0 || + // not((W[58]^(W[63]>>30))&(1<<0)) != 0 { + // mask &= ^DV_I_43_0_bit + // } + // } + BTL $0x00, AX + JNC f41_skip + MOVL W_61+244(FP), CX + MOVL W_62+248(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000002, CX + NEGL CX + CMPL CX, $0x00000000 + JE f41_in + MOVL W_59+236(FP), CX + MOVL W_63+252(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000020, CX + CMPL CX, $0x00000000 + JNE f41_in + MOVL W_58+232(FP), CX + MOVL W_63+252(FP), DX + SHRL $0x1e, DX + XORL DX, CX + ANDL $0x00000001, CX + NEGL CX + CMPL CX, $0x00000000 + JE f41_in + JMP f41_skip + +f41_in: + ANDL $0xfffffffe, AX + +f41_skip: + // if (mask & DV_I_44_0_bit) != 0 { + // if not((W[62]^(W[63]>>5))&(1<<1)) != 0 || + // not(not((W[60]^(W[64]>>25))&(1<<5))) != 0 || + // not((W[59]^(W[64]>>30))&(1<<0)) != 0 { + // mask &= ^DV_I_44_0_bit + // } + // } + BTL $0x01, AX + JNC f42_skip + MOVL W_62+248(FP), CX + MOVL W_63+252(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000002, CX + NEGL CX + CMPL CX, $0x00000000 + JE f42_in + MOVL W_60+240(FP), CX + MOVL W_64+256(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000020, CX + CMPL CX, $0x00000000 + JNE f42_in + MOVL W_59+236(FP), CX + MOVL W_64+256(FP), DX + SHRL $0x1e, DX + XORL DX, CX + ANDL $0x00000001, CX + NEGL CX + CMPL CX, $0x00000000 + JE f42_in + JMP f42_skip + +f42_in: + ANDL $0xfffffffd, AX + +f42_skip: + // if (mask & DV_I_46_2_bit) != 0 { + // mask &= ((^((W[40] ^ W[42]) >> 2)) | ^DV_I_46_2_bit) + // } + BTL $0x04, AX + JNC f43 + MOVL W_40+160(FP), CX + MOVL W_42+168(FP), DX + XORL DX, CX + SHRL $0x02, CX + NOTL CX + ORL $0xffffffef, CX + ANDL CX, AX + +f43: + // if (mask & DV_I_47_2_bit) != 0 { + // if not((W[62]^(W[63]>>5))&(1<<2)) != 0 || + // not(not((W[41]^W[43])&(1<<6))) != 0 { + // mask &= ^DV_I_47_2_bit + // } + // } + BTL $0x06, AX + JNC f44_skip + MOVL W_62+248(FP), CX + MOVL W_63+252(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000004, CX + NEGL CX + CMPL CX, $0x00000000 + JE f44_in + MOVL W_41+164(FP), CX + MOVL W_43+172(FP), DX + XORL DX, CX + ANDL $0x00000040, CX + CMPL CX, $0x00000000 + JNE f44_in + JMP f44_skip + +f44_in: + ANDL $0xffffffbf, AX + +f44_skip: + // if (mask & DV_I_48_2_bit) != 0 { + // if not((W[63]^(W[64]>>5))&(1<<2)) != 0 || + // not(not((W[48]^(W[49]<<5))&(1<<6))) != 0 { + // mask &= ^DV_I_48_2_bit + // } + // } + BTL $0x08, AX + JNC f45_skip + MOVL W_63+252(FP), CX + MOVL W_64+256(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000004, CX + NEGL CX + CMPL CX, $0x00000000 + JE f45_in + MOVL W_48+192(FP), CX + MOVL W_49+196(FP), DX + SHLL $0x05, DX + XORL DX, CX + ANDL $0x00000040, CX + CMPL CX, $0x00000000 + JNE f45_in + JMP f45_skip + +f45_in: + ANDL $0xfffffeff, AX + +f45_skip: + // if (mask & DV_I_49_2_bit) != 0 { + // if not(not((W[49]^(W[50]<<5))&(1<<6))) != 0 || + // not((W[42]^W[50])&(1<<1)) != 0 || + // not(not((W[39]^(W[40]<<5))&(1<<6))) != 0 || + // not((W[38]^W[40])&(1<<1)) != 0 { + // mask &= ^DV_I_49_2_bit + // } + // } + BTL $0x0a, AX + JNC f46_skip + MOVL W_49+196(FP), CX + MOVL W_50+200(FP), DX + SHLL $0x05, DX + XORL DX, CX + ANDL $0x00000040, CX + CMPL CX, $0x00000000 + JNE f46_in + MOVL W_42+168(FP), CX + MOVL W_50+200(FP), DX + XORL DX, CX + ANDL $0x00000002, CX + CMPL CX, $0x00000000 + JE f46_in + MOVL W_39+156(FP), CX + MOVL W_40+160(FP), DX + SHLL $0x05, DX + XORL DX, CX + ANDL $0x00000040, CX + CMPL CX, $0x00000000 + JNE f46_in + MOVL W_38+152(FP), CX + MOVL W_40+160(FP), DX + XORL DX, CX + ANDL $0x00000002, CX + CMPL CX, $0x00000000 + JE f46_in + JMP f46_skip + +f46_in: + ANDL $0xfffffbff, AX + +f46_skip: + // if (mask & DV_I_50_0_bit) != 0 { + // mask &= (((W[36] ^ W[37]) << 7) | ^DV_I_50_0_bit) + // } + BTL $0x0b, AX + JNC f47 + MOVL W_36+144(FP), CX + MOVL W_37+148(FP), DX + XORL DX, CX + SHLL $0x07, CX + ORL $0xfffff7ff, CX + ANDL CX, AX + +f47: + // if (mask & DV_I_50_2_bit) != 0 { + // mask &= (((W[43] ^ W[51]) << 11) | ^DV_I_50_2_bit) + // } + BTL $0x0c, AX + JNC f48 + MOVL W_43+172(FP), CX + MOVL W_51+204(FP), DX + XORL DX, CX + SHLL $0x0b, CX + ORL $0xffffefff, CX + ANDL CX, AX + +f48: + // if (mask & DV_I_51_0_bit) != 0 { + // mask &= (((W[37] ^ W[38]) << 9) | ^DV_I_51_0_bit) + // } + BTL $0x0d, AX + JNC f49 + MOVL W_37+148(FP), CX + MOVL W_38+152(FP), DX + XORL DX, CX + SHLL $0x09, CX + ORL $0xffffdfff, CX + ANDL CX, AX + +f49: + // if (mask & DV_I_51_2_bit) != 0 { + // if not(not((W[51]^(W[52]<<5))&(1<<6))) != 0 || + // not(not((W[49]^W[51])&(1<<6))) != 0 || + // not(not((W[37]^(W[37]>>5))&(1<<1))) != 0 || + // not(not((W[35]^(W[39]>>25))&(1<<5))) != 0 { + // mask &= ^DV_I_51_2_bit + // } + // } + BTL $0x0e, AX + JNC f50_skip + MOVL W_51+204(FP), CX + MOVL W_52+208(FP), DX + SHLL $0x05, DX + XORL DX, CX + ANDL $0x00000040, CX + CMPL CX, $0x00000000 + JNE f50_in + MOVL W_49+196(FP), CX + MOVL W_51+204(FP), DX + XORL DX, CX + ANDL $0x00000040, CX + CMPL CX, $0x00000000 + JNE f50_in + MOVL W_37+148(FP), CX + MOVL W_37+148(FP), DX + SHRL $0x05, DX + XORL DX, CX + ANDL $0x00000002, CX + CMPL CX, $0x00000000 + JNE f50_in + MOVL W_35+140(FP), CX + MOVL W_39+156(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000020, CX + CMPL CX, $0x00000000 + JNE f50_in + JMP f50_skip + +f50_in: + ANDL $0xffffbfff, AX + +f50_skip: + // if (mask & DV_I_52_0_bit) != 0 { + // mask &= (((W[38] ^ W[39]) << 11) | ^DV_I_52_0_bit) + // } + BTL $0x0f, AX + JNC f51 + MOVL W_38+152(FP), CX + MOVL W_39+156(FP), DX + XORL DX, CX + SHLL $0x0b, CX + ORL $0xffff7fff, CX + ANDL CX, AX + +f51: + // if (mask & DV_II_46_2_bit) != 0 { + // mask &= (((W[47] ^ W[51]) << 17) | ^DV_II_46_2_bit) + // } + TESTL $0x00040000, AX + BTL $0x12, AX + JNC f52 + MOVL W_47+188(FP), CX + MOVL W_51+204(FP), DX + XORL DX, CX + SHLL $0x11, CX + ORL $0xfffbffff, CX + ANDL CX, AX + +f52: + // if (mask & DV_II_48_0_bit) != 0 { + // if not(not((W[36]^(W[40]>>25))&(1<<3))) != 0 || + // not((W[35]^(W[40]<<2))&(1<<30)) != 0 { + // mask &= ^DV_II_48_0_bit + // } + // } + BTL $0x14, AX + JNC f53_skip + MOVL W_36+144(FP), CX + MOVL W_40+160(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000008, CX + CMPL CX, $0x00000000 + JNE f53_in + MOVL W_35+140(FP), CX + MOVL W_40+160(FP), DX + SHLL $0x02, DX + XORL DX, CX + ANDL $0x40000000, CX + CMPL CX, $0x00000000 + JNE f53_in + JMP f53_skip + +f53_in: + ANDL $0xffefffff, AX + +f53_skip: + // if (mask & DV_II_49_0_bit) != 0 { + // if not(not((W[37]^(W[41]>>25))&(1<<3))) != 0 || + // not((W[36]^(W[41]<<2))&(1<<30)) != 0 { + // mask &= ^DV_II_49_0_bit + // } + // } + BTL $0x15, AX + JNC f54_skip + MOVL W_37+148(FP), CX + MOVL W_41+164(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000008, CX + CMPL CX, $0x00000000 + JNE f54_in + MOVL W_36+144(FP), CX + MOVL W_41+164(FP), DX + SHLL $0x02, DX + XORL DX, CX + ANDL $0x40000000, CX + CMPL CX, $0x00000000 + JNE f54_in + JMP f54_skip + +f54_in: + ANDL $0xffdfffff, AX + +f54_skip: + // if (mask & DV_II_49_2_bit) != 0 { + // if not(not((W[53]^(W[54]<<5))&(1<<6))) != 0 || + // not(not((W[51]^W[53])&(1<<6))) != 0 || + // not((W[50]^W[54])&(1<<1)) != 0 || + // not(not((W[45]^(W[46]<<5))&(1<<6))) != 0 || + // not(not((W[37]^(W[41]>>25))&(1<<5))) != 0 || + // not((W[36]^(W[41]>>30))&(1<<0)) != 0 { + // mask &= ^DV_II_49_2_bit + // } + // } + BTL $0x16, AX + JNC f55_skip + MOVL W_53+212(FP), CX + MOVL W_54+216(FP), DX + SHLL $0x05, DX + XORL DX, CX + ANDL $0x00000040, CX + CMPL CX, $0x00000000 + JNE f55_in + MOVL W_51+204(FP), CX + MOVL W_53+212(FP), DX + XORL DX, CX + ANDL $0x00000040, CX + CMPL CX, $0x00000000 + JNE f55_in + MOVL W_50+200(FP), CX + MOVL W_54+216(FP), DX + XORL DX, CX + ANDL $0x00000002, CX + NEGL CX + CMPL CX, $0x00000000 + JE f55_in + MOVL W_45+180(FP), CX + MOVL W_46+184(FP), DX + SHLL $0x05, DX + XORL DX, CX + ANDL $0x00000040, CX + CMPL CX, $0x00000000 + JNE f55_in + MOVL W_37+148(FP), CX + MOVL W_41+164(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000020, CX + CMPL CX, $0x00000000 + JNE f55_in + MOVL W_36+144(FP), CX + MOVL W_41+164(FP), DX + SHRL $0x1e, DX + XORL DX, CX + ANDL $0x00000001, CX + NEGL CX + CMPL CX, $0x00000000 + JE f55_in + JMP f55_skip + +f55_in: + ANDL $0xffbfffff, AX + +f55_skip: + // if (mask & DV_II_50_0_bit) != 0 { + // if not((W[55]^W[58])&(1<<29)) != 0 || + // not(not((W[38]^(W[42]>>25))&(1<<3))) != 0 || + // not((W[37]^(W[42]<<2))&(1<<30)) != 0 { + // mask &= ^DV_II_50_0_bit + // } + // } + BTL $0x17, AX + JNC f56_skip + MOVL W_55+220(FP), CX + MOVL W_58+232(FP), DX + XORL DX, CX + ANDL $0x20000000, CX + NEGL CX + CMPL CX, $0x00000000 + JE f56_in + MOVL W_38+152(FP), CX + MOVL W_42+168(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000008, CX + CMPL CX, $0x00000000 + JNE f56_in + MOVL W_37+148(FP), CX + MOVL W_42+168(FP), DX + SHRL $0x02, DX + XORL DX, CX + ANDL $0x40000000, CX + NEGL CX + CMPL CX, $0x00000000 + JE f56_in + JMP f56_skip + +f56_in: + ANDL $0xff7fffff, AX + +f56_skip: + // if (mask & DV_II_50_2_bit) != 0 { + // if not(not((W[54]^(W[55]<<5))&(1<<6))) != 0 || + // not(not((W[52]^W[54])&(1<<6))) != 0 || + // not((W[51]^W[55])&(1<<1)) != 0 || + // not((W[45]^W[47])&(1<<1)) != 0 || + // not(not((W[38]^(W[42]>>25))&(1<<5))) != 0 || + // not((W[37]^(W[42]>>30))&(1<<0)) != 0 { + // mask &= ^DV_II_50_2_bit + // } + // } + BTL $0x18, AX + JNC f57_skip + MOVL W_54+216(FP), CX + MOVL W_55+220(FP), DX + SHLL $0x05, DX + XORL DX, CX + ANDL $0x00000040, CX + CMPL CX, $0x00000000 + JNE f57_in + MOVL W_52+208(FP), CX + MOVL W_54+216(FP), DX + XORL DX, CX + ANDL $0x00000040, CX + CMPL CX, $0x00000000 + JNE f57_in + MOVL W_51+204(FP), CX + MOVL W_55+220(FP), DX + XORL DX, CX + ANDL $0x00000002, CX + NEGL CX + CMPL CX, $0x00000000 + JE f57_in + MOVL W_45+180(FP), CX + MOVL W_47+188(FP), DX + XORL DX, CX + ANDL $0x00000002, CX + NEGL CX + CMPL CX, $0x00000000 + JE f57_in + MOVL W_38+152(FP), CX + MOVL W_42+168(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000020, CX + CMPL CX, $0x00000000 + JNE f57_in + MOVL W_37+148(FP), CX + MOVL W_42+168(FP), DX + SHRL $0x1e, DX + XORL DX, CX + ANDL $0x00000001, CX + NEGL CX + CMPL CX, $0x00000000 + JE f57_in + JMP f57_skip + +f57_in: + ANDL $0xfeffffff, AX + +f57_skip: + // if (mask & DV_II_51_0_bit) != 0 { + // if not(not((W[39]^(W[43]>>25))&(1<<3))) != 0 || + // not((W[38]^(W[43]<<2))&(1<<30)) != 0 { + // mask &= ^DV_II_51_0_bit + // } + // } + BTL $0x19, AX + JNC f58_skip + MOVL W_39+156(FP), CX + MOVL W_43+172(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000008, CX + CMPL CX, $0x00000000 + JNE f58_in + MOVL W_38+152(FP), CX + MOVL W_43+172(FP), DX + SHLL $0x02, DX + XORL DX, CX + ANDL $0x40000000, CX + NEGL CX + CMPL CX, $0x00000000 + JE f58_in + JMP f58_skip + +f58_in: + ANDL $0xfdffffff, AX + +f58_skip: + // if (mask & DV_II_51_2_bit) != 0 { + // if not(not((W[55]^(W[56]<<5))&(1<<6))) != 0 || + // not(not((W[53]^W[55])&(1<<6))) != 0 || + // not((W[52]^W[56])&(1<<1)) != 0 || + // not((W[46]^W[48])&(1<<1)) != 0 || + // not(not((W[39]^(W[43]>>25))&(1<<5))) != 0 || + // not((W[38]^(W[43]>>30))&(1<<0)) != 0 { + // mask &= ^DV_II_51_2_bit + // } + // } + BTL $0x1a, AX + JNC f59_skip + MOVL W_55+220(FP), CX + MOVL W_56+224(FP), DX + SHLL $0x05, DX + XORL DX, CX + ANDL $0x00000040, CX + CMPL CX, $0x00000000 + JNE f59_in + MOVL W_53+212(FP), CX + MOVL W_55+220(FP), DX + XORL DX, CX + ANDL $0x00000040, CX + CMPL CX, $0x00000000 + JNE f59_in + MOVL W_52+208(FP), CX + MOVL W_56+224(FP), DX + XORL DX, CX + ANDL $0x00000002, CX + NEGL CX + CMPL CX, $0x00000000 + JE f59_in + MOVL W_46+184(FP), CX + MOVL W_48+192(FP), DX + XORL DX, CX + ANDL $0x00000002, CX + NEGL CX + CMPL CX, $0x00000000 + JE f59_in + MOVL W_39+156(FP), CX + MOVL W_43+172(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000020, CX + CMPL CX, $0x00000000 + JNE f59_in + MOVL W_38+152(FP), CX + MOVL W_43+172(FP), DX + SHRL $0x1e, DX + XORL DX, CX + ANDL $0x00000001, CX + NEGL CX + CMPL CX, $0x00000000 + JE f59_in + JMP f59_skip + +f59_in: + ANDL $0xfbffffff, AX + +f59_skip: + // if (mask & DV_II_52_0_bit) != 0 { + // if not(not((W[59]^W[60])&(1<<29))) != 0 || + // not(not((W[40]^(W[44]>>25))&(1<<3))) != 0 || + // not(not((W[40]^(W[44]>>25))&(1<<4))) != 0 || + // not((W[39]^(W[44]<<2))&(1<<30)) != 0 { + // mask &= ^DV_II_52_0_bit + // } + // } + BTL $0x1b, AX + JNC f60_skip + MOVL W_59+236(FP), CX + MOVL W_60+240(FP), DX + XORL DX, CX + ANDL $0x20000000, CX + CMPL CX, $0x00000000 + JNE f60_in + MOVL W_40+160(FP), CX + MOVL W_44+176(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000008, CX + CMPL CX, $0x00000000 + JNE f60_in + MOVL W_40+160(FP), CX + MOVL W_44+176(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000010, CX + CMPL CX, $0x00000000 + JNE f60_in + MOVL W_39+156(FP), CX + MOVL W_44+176(FP), DX + SHLL $0x02, DX + XORL DX, CX + ANDL $0x40000000, CX + NEGL CX + CMPL CX, $0x00000000 + JE f60_in + JMP f60_skip + +f60_in: + ANDL $0xf7ffffff, AX + +f60_skip: + // if (mask & DV_II_53_0_bit) != 0 { + // if not((W[58]^W[61])&(1<<29)) != 0 || + // not(not((W[57]^(W[61]>>25))&(1<<4))) != 0 || + // not(not((W[41]^(W[45]>>25))&(1<<3))) != 0 || + // not(not((W[41]^(W[45]>>25))&(1<<4))) != 0 { + // mask &= ^DV_II_53_0_bit + // } + // } + BTL $0x1c, AX + JNC f61_skip + MOVL W_58+232(FP), CX + MOVL W_61+244(FP), DX + XORL DX, CX + ANDL $0x20000000, CX + NEGL CX + CMPL CX, $0x00000000 + JE f61_in + MOVL W_57+228(FP), CX + MOVL W_61+244(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000010, CX + CMPL CX, $0x00000000 + JNE f61_in + MOVL W_41+164(FP), CX + MOVL W_45+180(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000008, CX + CMPL CX, $0x00000000 + JNE f61_in + MOVL W_41+164(FP), CX + MOVL W_45+180(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000010, CX + CMPL CX, $0x00000000 + JNE f61_in + JMP f61_skip + +f61_in: + ANDL $0xefffffff, AX + +f61_skip: + // if (mask & DV_II_54_0_bit) != 0 { + // if not(not((W[58]^(W[62]>>25))&(1<<4))) != 0 || + // not(not((W[42]^(W[46]>>25))&(1<<3))) != 0 || + // not(not((W[42]^(W[46]>>25))&(1<<4))) != 0 { + // mask &= ^DV_II_54_0_bit + // } + // } + BTL $0x1d, AX + JNC f62_skip + MOVL W_58+232(FP), CX + MOVL W_62+248(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000010, CX + CMPL CX, $0x00000000 + JNE f62_in + MOVL W_42+168(FP), CX + MOVL W_46+184(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000008, CX + CMPL CX, $0x00000000 + JNE f62_in + MOVL W_42+168(FP), CX + MOVL W_46+184(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000010, CX + CMPL CX, $0x00000000 + JNE f62_in + JMP f62_skip + +f62_in: + ANDL $0xdfffffff, AX + +f62_skip: + // if (mask & DV_II_55_0_bit) != 0 { + // if not(not((W[59]^(W[63]>>25))&(1<<4))) != 0 || + // not(not((W[57]^(W[59]>>25))&(1<<4))) != 0 || + // not(not((W[43]^(W[47]>>25))&(1<<3))) != 0 || + // not(not((W[43]^(W[47]>>25))&(1<<4))) != 0 { + // mask &= ^DV_II_55_0_bit + // } + // } + BTL $0x1e, AX + JNC f63_skip + MOVL W_59+236(FP), CX + MOVL W_63+252(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000010, CX + CMPL CX, $0x00000000 + JNE f63_in + MOVL W_57+228(FP), CX + MOVL W_59+236(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000010, CX + CMPL CX, $0x00000000 + JNE f63_in + MOVL W_43+172(FP), CX + MOVL W_47+188(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000008, CX + CMPL CX, $0x00000000 + JNE f63_in + MOVL W_43+172(FP), CX + MOVL W_47+188(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000010, CX + CMPL CX, $0x00000000 + JNE f63_in + JMP f63_skip + +f63_in: + ANDL $0xbfffffff, AX + +f63_skip: + // if (mask & DV_II_56_0_bit) != 0 { + // if not(not((W[60]^(W[64]>>25))&(1<<4))) != 0 || + // not(not((W[44]^(W[48]>>25))&(1<<3))) != 0 || + // not(not((W[44]^(W[48]>>25))&(1<<4))) != 0 { + // mask &= ^DV_II_56_0_bit + // } + // } + BTL $0x1f, AX + JNC f64_skip + MOVL W_60+240(FP), CX + MOVL W_64+256(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000010, CX + CMPL CX, $0x00000000 + JNE f64_in + MOVL W_44+176(FP), CX + MOVL W_48+192(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000008, CX + CMPL CX, $0x00000000 + JNE f64_in + MOVL W_44+176(FP), CX + MOVL W_48+192(FP), DX + SHRL $0x19, DX + XORL DX, CX + ANDL $0x00000010, CX + CMPL CX, $0x00000000 + JNE f64_in + JMP f64_skip + +f64_in: + ANDL $0x7fffffff, AX + +f64_skip: +end: + MOVL AX, ret+320(FP) + RET diff --git a/vendor/github.com/pjbgf/sha1cd/ubc/check.go b/vendor/github.com/pjbgf/sha1cd/ubc/ubc_generic.go similarity index 99% rename from vendor/github.com/pjbgf/sha1cd/ubc/check.go rename to vendor/github.com/pjbgf/sha1cd/ubc/ubc_generic.go index 167a5558f..ee95bd52d 100644 --- a/vendor/github.com/pjbgf/sha1cd/ubc/check.go +++ b/vendor/github.com/pjbgf/sha1cd/ubc/ubc_generic.go @@ -25,7 +25,7 @@ type DvInfo struct { // for all listed DVs. It returns a dvmask where each bit belonging to a DV is set if all // unavoidable bitconditions for that DV have been met. // Thus, one needs to do the recompression check for each DV that has its bit set. -func CalculateDvMask(W [80]uint32) uint32 { +func CalculateDvMaskGeneric(W [80]uint32) uint32 { mask := uint32(0xFFFFFFFF) mask &= (((((W[44] ^ W[45]) >> 29) & 1) - 1) | ^(DV_I_48_0_bit | DV_I_51_0_bit | DV_I_52_0_bit | DV_II_45_0_bit | DV_II_46_0_bit | DV_II_50_0_bit | DV_II_51_0_bit)) mask &= (((((W[49] ^ W[50]) >> 29) & 1) - 1) | ^(DV_I_46_0_bit | DV_II_45_0_bit | DV_II_50_0_bit | DV_II_51_0_bit | DV_II_55_0_bit | DV_II_56_0_bit)) diff --git a/vendor/github.com/pjbgf/sha1cd/ubc/ubc_noasm.go b/vendor/github.com/pjbgf/sha1cd/ubc/ubc_noasm.go new file mode 100644 index 000000000..48d6dff16 --- /dev/null +++ b/vendor/github.com/pjbgf/sha1cd/ubc/ubc_noasm.go @@ -0,0 +1,12 @@ +//go:build !amd64 || noasm || !gc +// +build !amd64 noasm !gc + +package ubc + +// Check takes as input an expanded message block and verifies the unavoidable bitconditions +// for all listed DVs. It returns a dvmask where each bit belonging to a DV is set if all +// unavoidable bitconditions for that DV have been met. +// Thus, one needs to do the recompression check for each DV that has its bit set. +func CalculateDvMask(W [80]uint32) uint32 { + return CalculateDvMaskGeneric(W) +} diff --git a/vendor/github.com/pulumi/esc/.version b/vendor/github.com/pulumi/esc/.version index ac454c6a1..54d1a4f2a 100644 --- a/vendor/github.com/pulumi/esc/.version +++ b/vendor/github.com/pulumi/esc/.version @@ -1 +1 @@ -0.12.0 +0.13.0 diff --git a/vendor/github.com/pulumi/esc/CHANGELOG.md b/vendor/github.com/pulumi/esc/CHANGELOG.md index 458a252c9..3680629ca 100644 --- a/vendor/github.com/pulumi/esc/CHANGELOG.md +++ b/vendor/github.com/pulumi/esc/CHANGELOG.md @@ -1,6 +1,20 @@ CHANGELOG ========= +## 0.13.0 + +### Improvements + +- Add `--string` flag to `env set` to treat the given value as a string. + [#467](https://github.com/pulumi/esc/pull/467) +- Add proper return code to list environments when organization doesn't exist + [#484](https://github.com/pulumi/esc/pull/484) + +### Breaking changes + +- It is now a syntax error to call a builtin function incorrectly. + [449](https://github.com/pulumi/esc/pull/449) + ## 0.12.0 ### Improvements diff --git a/vendor/github.com/pulumi/esc/CHANGELOG_PENDING.md b/vendor/github.com/pulumi/esc/CHANGELOG_PENDING.md index bf5a76e22..229175b1c 100644 --- a/vendor/github.com/pulumi/esc/CHANGELOG_PENDING.md +++ b/vendor/github.com/pulumi/esc/CHANGELOG_PENDING.md @@ -1,19 +1,13 @@ ### Improvements -- Fix diagnostic messages when updating environment with invalid definition - [#422](https://github.com/pulumi/esc/pull/422) -- Introduce support for rotating static credentials via `fn::rotate` providers [432](https://github.com/pulumi/esc/pull/432) -- Add the `rotate` CLI command - [#433](https://github.com/pulumi/esc/pull/433) -- Add ability to pass specific paths to rotate with the `rotate` CLI command - [#440](https://github.com/pulumi/esc/pull/440) -- Introduce inline environment reference syntax - [#443](https://github.com/pulumi/esc/pull/443) -- Introduce rotateOnly inputs - [#444](https://github.com/pulumi/esc/pull/444) -- Release rotate environment CLI command - [#459](https://github.com/pulumi/esc/pull/459) +- Add `--string` flag to `env set` to treat the given value as a string. + [#467](https://github.com/pulumi/esc/pull/467) +- Add proper return code to list environments when organization doesn't exist + [#484](https://github.com/pulumi/esc/pull/484) ### Bug Fixes ### Breaking changes + +- It is now a syntax error to call a builtin function incorrectly. + [449](https://github.com/pulumi/esc/pull/449) diff --git a/vendor/github.com/pulumi/esc/CONTRIBUTING.md b/vendor/github.com/pulumi/esc/CONTRIBUTING.md new file mode 100644 index 000000000..8be6d2a33 --- /dev/null +++ b/vendor/github.com/pulumi/esc/CONTRIBUTING.md @@ -0,0 +1,8 @@ +## Performing a release + +We use [goreleaser](https://goreleaser.com/intro/) for automating releases. +To cut a new release, create a commit that: +- Copy the entries in [CHANGELOG_PENDING](./CHANGELOG_PENDING.md) into [CHANGELOG](./CHANGELOG.md). + CHANGELOG_PENDING is used to generate the release notes. After releasing, the following commit can clear the changes from pending. +- Bumps the version in the [.version](./.version) file, which is used to stamp the version into the binary. +- Tag the commit with a version tag in the format vX.X.X, to trigger the [release automation](./.github/workflows/publish-release.yaml). diff --git a/vendor/github.com/pulumi/esc/ast/environment.go b/vendor/github.com/pulumi/esc/ast/environment.go index 03e7b4d10..9ee598ca3 100644 --- a/vendor/github.com/pulumi/esc/ast/environment.go +++ b/vendor/github.com/pulumi/esc/ast/environment.go @@ -128,6 +128,9 @@ func (d *MapDecl[T]) parse(name string, node syntax.Node) syntax.Diagnostics { var v T vname := name + "." + kvp.Key.Value() + if strings.HasPrefix(kvp.Key.Value(), "fn::") { + diags.Extend(syntax.NodeError(kvp.Key, fmt.Sprintf("builtin function call %q not allowed at the top level", kvp.Key.Value()))) + } vdiags := parseNode(vname, &v, kvp.Value) diags.Extend(vdiags...) diff --git a/vendor/github.com/pulumi/esc/ast/expr.go b/vendor/github.com/pulumi/esc/ast/expr.go index 4dbe23c14..7eacf4c26 100644 --- a/vendor/github.com/pulumi/esc/ast/expr.go +++ b/vendor/github.com/pulumi/esc/ast/expr.go @@ -625,14 +625,20 @@ func FromBase64(value Expr) *FromBase64Expr { } func tryParseFunction(node *syntax.ObjectNode) (Expr, syntax.Diagnostics, bool) { + var diags syntax.Diagnostics if node.Len() != 1 { - return nil, nil, false + for i := 0; i < node.Len(); i++ { + if k := node.Index(i).Key.Value(); strings.HasPrefix(k, "fn::") { + diags = append(diags, syntax.NodeError(node, fmt.Sprintf("illegal call to builtin function: %q must be the only key within its containing object", k))) + + } + } + return nil, diags, false } kvp := node.Index(0) var parse func(node *syntax.ObjectNode, name *StringExpr, args Expr) (Expr, syntax.Diagnostics) - var diags syntax.Diagnostics switch kvp.Key.Value() { case "fn::fromJSON": parse = parseFromJSON diff --git a/vendor/github.com/pulumi/esc/eval/eval.go b/vendor/github.com/pulumi/esc/eval/eval.go index a1ed44b45..36ec723f4 100644 --- a/vendor/github.com/pulumi/esc/eval/eval.go +++ b/vendor/github.com/pulumi/esc/eval/eval.go @@ -402,6 +402,10 @@ func (e *evalContext) isReserveTopLevelKey(k string) bool { // evaluate drives the evaluation of the evalContext's environment. func (e *evalContext) evaluate() (*value, syntax.Diagnostics) { + mine := &imported{evaluating: true} + defer func() { mine.evaluating = false }() + e.imports[e.name] = mine + // Evaluate context. We prepare the context values to later evaluate interpolations. e.evaluateContext() // Evaluate imports. We do this prior to declaration so that we can plumb base values as part of declaration. @@ -444,10 +448,6 @@ func (e *evalContext) evaluateContext() { // evaluateImports evaluates an environment's imports. func (e *evalContext) evaluateImports() { - mine := &imported{evaluating: true} - defer func() { mine.evaluating = false }() - e.imports[e.name] = mine - myImports := map[string]*value{} for _, entry := range e.env.Imports.GetElements() { // If the import does not have a name, there's nothing we can do. This can happen for environments @@ -518,6 +518,9 @@ func (e *evalContext) evaluateImport(expr ast.Expr, name string) (*value, bool) e.errorf(expr, "%s", err.Error()) return nil, false } + if diags.HasErrors() { + return nil, false + } // we only want to rotate the root environment, so set rotating flag to false when evaluating imports imp := newEvalContext(e.ctx, e.validating, false, name, env, false, dec, e.providers, e.environments, e.imports, e.execContext, e.showSecrets, nil) diff --git a/vendor/github.com/pulumi/pulumi-aws-native/sdk/go/aws/internal/pulumiUtilities.go b/vendor/github.com/pulumi/pulumi-aws-native/sdk/go/aws/internal/pulumiUtilities.go index eee33ff99..11a6de86f 100644 --- a/vendor/github.com/pulumi/pulumi-aws-native/sdk/go/aws/internal/pulumiUtilities.go +++ b/vendor/github.com/pulumi/pulumi-aws-native/sdk/go/aws/internal/pulumiUtilities.go @@ -165,7 +165,7 @@ func callPlainInner( func PkgResourceDefaultOpts(opts []pulumi.ResourceOption) []pulumi.ResourceOption { defaults := []pulumi.ResourceOption{} - version := semver.MustParse("1.25.0") + version := semver.MustParse("1.26.0") if !version.Equals(semver.Version{}) { defaults = append(defaults, pulumi.Version(version.String())) } @@ -176,7 +176,7 @@ func PkgResourceDefaultOpts(opts []pulumi.ResourceOption) []pulumi.ResourceOptio func PkgInvokeDefaultOpts(opts []pulumi.InvokeOption) []pulumi.InvokeOption { defaults := []pulumi.InvokeOption{} - version := semver.MustParse("1.25.0") + version := semver.MustParse("1.26.0") if !version.Equals(semver.Version{}) { defaults = append(defaults, pulumi.Version(version.String())) } diff --git a/vendor/github.com/pulumi/pulumi-aws-native/sdk/go/aws/pulumi-plugin.json b/vendor/github.com/pulumi/pulumi-aws-native/sdk/go/aws/pulumi-plugin.json index 6e2ea9a99..3778bde09 100644 --- a/vendor/github.com/pulumi/pulumi-aws-native/sdk/go/aws/pulumi-plugin.json +++ b/vendor/github.com/pulumi/pulumi-aws-native/sdk/go/aws/pulumi-plugin.json @@ -1,5 +1,5 @@ { "resource": true, "name": "aws-native", - "version": "1.25.0" + "version": "1.26.0" } diff --git a/vendor/github.com/pulumi/pulumi-aws-native/sdk/go/aws/pulumiEnums.go b/vendor/github.com/pulumi/pulumi-aws-native/sdk/go/aws/pulumiEnums.go index 9c339150f..ea5389a38 100644 --- a/vendor/github.com/pulumi/pulumi-aws-native/sdk/go/aws/pulumiEnums.go +++ b/vendor/github.com/pulumi/pulumi-aws-native/sdk/go/aws/pulumiEnums.go @@ -83,6 +83,10 @@ const ( RegionUsIsoWest1 = Region("us-iso-west-1") // US ISOB East (Ohio) RegionUsIsobEast1 = Region("us-isob-east-1") + // US ISOF EAST + RegionUsIsofEast1 = Region("us-isof-east-1") + // US ISOF SOUTH + RegionUsIsofSouth1 = Region("us-isof-south-1") // US West (N. California) RegionUsWest1 = Region("us-west-1") // US West (Oregon) diff --git a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/autoscaling/pulumiTypes.go b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/autoscaling/pulumiTypes.go index 7e822e367..eab645d5f 100644 --- a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/autoscaling/pulumiTypes.go +++ b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/autoscaling/pulumiTypes.go @@ -8484,6 +8484,8 @@ type PolicyTargetTrackingConfigurationCustomizedMetricSpecification struct { Metrics []PolicyTargetTrackingConfigurationCustomizedMetricSpecificationMetric `pulumi:"metrics"` // Namespace of the metric. Namespace *string `pulumi:"namespace"` + // The period of the metric in seconds. + Period *int `pulumi:"period"` // Statistic of the metric. Statistic *string `pulumi:"statistic"` // Unit of the metric. @@ -8510,6 +8512,8 @@ type PolicyTargetTrackingConfigurationCustomizedMetricSpecificationArgs struct { Metrics PolicyTargetTrackingConfigurationCustomizedMetricSpecificationMetricArrayInput `pulumi:"metrics"` // Namespace of the metric. Namespace pulumi.StringPtrInput `pulumi:"namespace"` + // The period of the metric in seconds. + Period pulumi.IntPtrInput `pulumi:"period"` // Statistic of the metric. Statistic pulumi.StringPtrInput `pulumi:"statistic"` // Unit of the metric. @@ -8617,6 +8621,11 @@ func (o PolicyTargetTrackingConfigurationCustomizedMetricSpecificationOutput) Na return o.ApplyT(func(v PolicyTargetTrackingConfigurationCustomizedMetricSpecification) *string { return v.Namespace }).(pulumi.StringPtrOutput) } +// The period of the metric in seconds. +func (o PolicyTargetTrackingConfigurationCustomizedMetricSpecificationOutput) Period() pulumi.IntPtrOutput { + return o.ApplyT(func(v PolicyTargetTrackingConfigurationCustomizedMetricSpecification) *int { return v.Period }).(pulumi.IntPtrOutput) +} + // Statistic of the metric. func (o PolicyTargetTrackingConfigurationCustomizedMetricSpecificationOutput) Statistic() pulumi.StringPtrOutput { return o.ApplyT(func(v PolicyTargetTrackingConfigurationCustomizedMetricSpecification) *string { return v.Statistic }).(pulumi.StringPtrOutput) @@ -8691,6 +8700,16 @@ func (o PolicyTargetTrackingConfigurationCustomizedMetricSpecificationPtrOutput) }).(pulumi.StringPtrOutput) } +// The period of the metric in seconds. +func (o PolicyTargetTrackingConfigurationCustomizedMetricSpecificationPtrOutput) Period() pulumi.IntPtrOutput { + return o.ApplyT(func(v *PolicyTargetTrackingConfigurationCustomizedMetricSpecification) *int { + if v == nil { + return nil + } + return v.Period + }).(pulumi.IntPtrOutput) +} + // Statistic of the metric. func (o PolicyTargetTrackingConfigurationCustomizedMetricSpecificationPtrOutput) Statistic() pulumi.StringPtrOutput { return o.ApplyT(func(v *PolicyTargetTrackingConfigurationCustomizedMetricSpecification) *string { diff --git a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/cloudwatch/contributorInsightRule.go b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/cloudwatch/contributorInsightRule.go new file mode 100644 index 000000000..99e890cff --- /dev/null +++ b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/cloudwatch/contributorInsightRule.go @@ -0,0 +1,333 @@ +// Code generated by the Pulumi Terraform Bridge (tfgen) Tool DO NOT EDIT. +// *** WARNING: Do not edit by hand unless you're certain you know what you are doing! *** + +package cloudwatch + +import ( + "context" + "reflect" + + "errors" + "github.com/pulumi/pulumi-aws/sdk/v6/go/aws/internal" + "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +) + +// Resource for managing an AWS CloudWatch Contributor Insight Rule. +// +// ## Example Usage +// +// ### Basic Usage +// +// ```go +// package main +// +// import ( +// +// "github.com/pulumi/pulumi-aws/sdk/v6/go/aws/cloudwatch" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +// +// ) +// +// func main() { +// pulumi.Run(func(ctx *pulumi.Context) error { +// _, err := cloudwatch.NewContributorInsightRule(ctx, "test", &cloudwatch.ContributorInsightRuleArgs{ +// RuleName: pulumi.String("testing"), +// RuleState: pulumi.String("ENABLED"), +// RuleDefinition: pulumi.String("{\"Schema\":{\"Name\":\"CloudWatchLogRule\",\"Version\":1},\"AggregateOn\":\"Count\",\"Contribution\":{\"Filters\":[{\"In\":[\"some-keyword\"],\"Match\":\"$.message\"}],\"Keys\":[\"$.country\"]},\"LogFormat\":\"JSON\",\"LogGroupNames\":[\"/aws/lambda/api-prod\"]}"), +// }) +// if err != nil { +// return err +// } +// return nil +// }) +// } +// +// ``` +// +// ## Import +// +// Using `pulumi import`, import CloudWatch Contributor Insight Rule using the `rule_name`. For example: +// +// ```sh +// $ pulumi import aws:cloudwatch/contributorInsightRule:ContributorInsightRule example contributor_insight_rule-name +// ``` +type ContributorInsightRule struct { + pulumi.CustomResourceState + + // ARN of the Contributor Insight Rule. + ResourceArn pulumi.StringOutput `pulumi:"resourceArn"` + // Definition of the rule, as a JSON object. For details on the valid syntax, see [Contributor Insights Rule Syntax](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/ContributorInsights-RuleSyntax.html). + RuleDefinition pulumi.StringOutput `pulumi:"ruleDefinition"` + // Unique name of the rule. + // + // The following arguments are optional: + RuleName pulumi.StringOutput `pulumi:"ruleName"` + // State of the rule. Valid values are `ENABLED` and `DISABLED`. + RuleState pulumi.StringPtrOutput `pulumi:"ruleState"` + Tags pulumi.StringMapOutput `pulumi:"tags"` + // Deprecated: Please use `tags` instead. + TagsAll pulumi.StringMapOutput `pulumi:"tagsAll"` +} + +// NewContributorInsightRule registers a new resource with the given unique name, arguments, and options. +func NewContributorInsightRule(ctx *pulumi.Context, + name string, args *ContributorInsightRuleArgs, opts ...pulumi.ResourceOption) (*ContributorInsightRule, error) { + if args == nil { + return nil, errors.New("missing one or more required arguments") + } + + if args.RuleDefinition == nil { + return nil, errors.New("invalid value for required argument 'RuleDefinition'") + } + if args.RuleName == nil { + return nil, errors.New("invalid value for required argument 'RuleName'") + } + opts = internal.PkgResourceDefaultOpts(opts) + var resource ContributorInsightRule + err := ctx.RegisterResource("aws:cloudwatch/contributorInsightRule:ContributorInsightRule", name, args, &resource, opts...) + if err != nil { + return nil, err + } + return &resource, nil +} + +// GetContributorInsightRule gets an existing ContributorInsightRule resource's state with the given name, ID, and optional +// state properties that are used to uniquely qualify the lookup (nil if not required). +func GetContributorInsightRule(ctx *pulumi.Context, + name string, id pulumi.IDInput, state *ContributorInsightRuleState, opts ...pulumi.ResourceOption) (*ContributorInsightRule, error) { + var resource ContributorInsightRule + err := ctx.ReadResource("aws:cloudwatch/contributorInsightRule:ContributorInsightRule", name, id, state, &resource, opts...) + if err != nil { + return nil, err + } + return &resource, nil +} + +// Input properties used for looking up and filtering ContributorInsightRule resources. +type contributorInsightRuleState struct { + // ARN of the Contributor Insight Rule. + ResourceArn *string `pulumi:"resourceArn"` + // Definition of the rule, as a JSON object. For details on the valid syntax, see [Contributor Insights Rule Syntax](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/ContributorInsights-RuleSyntax.html). + RuleDefinition *string `pulumi:"ruleDefinition"` + // Unique name of the rule. + // + // The following arguments are optional: + RuleName *string `pulumi:"ruleName"` + // State of the rule. Valid values are `ENABLED` and `DISABLED`. + RuleState *string `pulumi:"ruleState"` + Tags map[string]string `pulumi:"tags"` + // Deprecated: Please use `tags` instead. + TagsAll map[string]string `pulumi:"tagsAll"` +} + +type ContributorInsightRuleState struct { + // ARN of the Contributor Insight Rule. + ResourceArn pulumi.StringPtrInput + // Definition of the rule, as a JSON object. For details on the valid syntax, see [Contributor Insights Rule Syntax](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/ContributorInsights-RuleSyntax.html). + RuleDefinition pulumi.StringPtrInput + // Unique name of the rule. + // + // The following arguments are optional: + RuleName pulumi.StringPtrInput + // State of the rule. Valid values are `ENABLED` and `DISABLED`. + RuleState pulumi.StringPtrInput + Tags pulumi.StringMapInput + // Deprecated: Please use `tags` instead. + TagsAll pulumi.StringMapInput +} + +func (ContributorInsightRuleState) ElementType() reflect.Type { + return reflect.TypeOf((*contributorInsightRuleState)(nil)).Elem() +} + +type contributorInsightRuleArgs struct { + // Definition of the rule, as a JSON object. For details on the valid syntax, see [Contributor Insights Rule Syntax](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/ContributorInsights-RuleSyntax.html). + RuleDefinition string `pulumi:"ruleDefinition"` + // Unique name of the rule. + // + // The following arguments are optional: + RuleName string `pulumi:"ruleName"` + // State of the rule. Valid values are `ENABLED` and `DISABLED`. + RuleState *string `pulumi:"ruleState"` + Tags map[string]string `pulumi:"tags"` +} + +// The set of arguments for constructing a ContributorInsightRule resource. +type ContributorInsightRuleArgs struct { + // Definition of the rule, as a JSON object. For details on the valid syntax, see [Contributor Insights Rule Syntax](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/ContributorInsights-RuleSyntax.html). + RuleDefinition pulumi.StringInput + // Unique name of the rule. + // + // The following arguments are optional: + RuleName pulumi.StringInput + // State of the rule. Valid values are `ENABLED` and `DISABLED`. + RuleState pulumi.StringPtrInput + Tags pulumi.StringMapInput +} + +func (ContributorInsightRuleArgs) ElementType() reflect.Type { + return reflect.TypeOf((*contributorInsightRuleArgs)(nil)).Elem() +} + +type ContributorInsightRuleInput interface { + pulumi.Input + + ToContributorInsightRuleOutput() ContributorInsightRuleOutput + ToContributorInsightRuleOutputWithContext(ctx context.Context) ContributorInsightRuleOutput +} + +func (*ContributorInsightRule) ElementType() reflect.Type { + return reflect.TypeOf((**ContributorInsightRule)(nil)).Elem() +} + +func (i *ContributorInsightRule) ToContributorInsightRuleOutput() ContributorInsightRuleOutput { + return i.ToContributorInsightRuleOutputWithContext(context.Background()) +} + +func (i *ContributorInsightRule) ToContributorInsightRuleOutputWithContext(ctx context.Context) ContributorInsightRuleOutput { + return pulumi.ToOutputWithContext(ctx, i).(ContributorInsightRuleOutput) +} + +// ContributorInsightRuleArrayInput is an input type that accepts ContributorInsightRuleArray and ContributorInsightRuleArrayOutput values. +// You can construct a concrete instance of `ContributorInsightRuleArrayInput` via: +// +// ContributorInsightRuleArray{ ContributorInsightRuleArgs{...} } +type ContributorInsightRuleArrayInput interface { + pulumi.Input + + ToContributorInsightRuleArrayOutput() ContributorInsightRuleArrayOutput + ToContributorInsightRuleArrayOutputWithContext(context.Context) ContributorInsightRuleArrayOutput +} + +type ContributorInsightRuleArray []ContributorInsightRuleInput + +func (ContributorInsightRuleArray) ElementType() reflect.Type { + return reflect.TypeOf((*[]*ContributorInsightRule)(nil)).Elem() +} + +func (i ContributorInsightRuleArray) ToContributorInsightRuleArrayOutput() ContributorInsightRuleArrayOutput { + return i.ToContributorInsightRuleArrayOutputWithContext(context.Background()) +} + +func (i ContributorInsightRuleArray) ToContributorInsightRuleArrayOutputWithContext(ctx context.Context) ContributorInsightRuleArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(ContributorInsightRuleArrayOutput) +} + +// ContributorInsightRuleMapInput is an input type that accepts ContributorInsightRuleMap and ContributorInsightRuleMapOutput values. +// You can construct a concrete instance of `ContributorInsightRuleMapInput` via: +// +// ContributorInsightRuleMap{ "key": ContributorInsightRuleArgs{...} } +type ContributorInsightRuleMapInput interface { + pulumi.Input + + ToContributorInsightRuleMapOutput() ContributorInsightRuleMapOutput + ToContributorInsightRuleMapOutputWithContext(context.Context) ContributorInsightRuleMapOutput +} + +type ContributorInsightRuleMap map[string]ContributorInsightRuleInput + +func (ContributorInsightRuleMap) ElementType() reflect.Type { + return reflect.TypeOf((*map[string]*ContributorInsightRule)(nil)).Elem() +} + +func (i ContributorInsightRuleMap) ToContributorInsightRuleMapOutput() ContributorInsightRuleMapOutput { + return i.ToContributorInsightRuleMapOutputWithContext(context.Background()) +} + +func (i ContributorInsightRuleMap) ToContributorInsightRuleMapOutputWithContext(ctx context.Context) ContributorInsightRuleMapOutput { + return pulumi.ToOutputWithContext(ctx, i).(ContributorInsightRuleMapOutput) +} + +type ContributorInsightRuleOutput struct{ *pulumi.OutputState } + +func (ContributorInsightRuleOutput) ElementType() reflect.Type { + return reflect.TypeOf((**ContributorInsightRule)(nil)).Elem() +} + +func (o ContributorInsightRuleOutput) ToContributorInsightRuleOutput() ContributorInsightRuleOutput { + return o +} + +func (o ContributorInsightRuleOutput) ToContributorInsightRuleOutputWithContext(ctx context.Context) ContributorInsightRuleOutput { + return o +} + +// ARN of the Contributor Insight Rule. +func (o ContributorInsightRuleOutput) ResourceArn() pulumi.StringOutput { + return o.ApplyT(func(v *ContributorInsightRule) pulumi.StringOutput { return v.ResourceArn }).(pulumi.StringOutput) +} + +// Definition of the rule, as a JSON object. For details on the valid syntax, see [Contributor Insights Rule Syntax](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/ContributorInsights-RuleSyntax.html). +func (o ContributorInsightRuleOutput) RuleDefinition() pulumi.StringOutput { + return o.ApplyT(func(v *ContributorInsightRule) pulumi.StringOutput { return v.RuleDefinition }).(pulumi.StringOutput) +} + +// Unique name of the rule. +// +// The following arguments are optional: +func (o ContributorInsightRuleOutput) RuleName() pulumi.StringOutput { + return o.ApplyT(func(v *ContributorInsightRule) pulumi.StringOutput { return v.RuleName }).(pulumi.StringOutput) +} + +// State of the rule. Valid values are `ENABLED` and `DISABLED`. +func (o ContributorInsightRuleOutput) RuleState() pulumi.StringPtrOutput { + return o.ApplyT(func(v *ContributorInsightRule) pulumi.StringPtrOutput { return v.RuleState }).(pulumi.StringPtrOutput) +} + +func (o ContributorInsightRuleOutput) Tags() pulumi.StringMapOutput { + return o.ApplyT(func(v *ContributorInsightRule) pulumi.StringMapOutput { return v.Tags }).(pulumi.StringMapOutput) +} + +// Deprecated: Please use `tags` instead. +func (o ContributorInsightRuleOutput) TagsAll() pulumi.StringMapOutput { + return o.ApplyT(func(v *ContributorInsightRule) pulumi.StringMapOutput { return v.TagsAll }).(pulumi.StringMapOutput) +} + +type ContributorInsightRuleArrayOutput struct{ *pulumi.OutputState } + +func (ContributorInsightRuleArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]*ContributorInsightRule)(nil)).Elem() +} + +func (o ContributorInsightRuleArrayOutput) ToContributorInsightRuleArrayOutput() ContributorInsightRuleArrayOutput { + return o +} + +func (o ContributorInsightRuleArrayOutput) ToContributorInsightRuleArrayOutputWithContext(ctx context.Context) ContributorInsightRuleArrayOutput { + return o +} + +func (o ContributorInsightRuleArrayOutput) Index(i pulumi.IntInput) ContributorInsightRuleOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) *ContributorInsightRule { + return vs[0].([]*ContributorInsightRule)[vs[1].(int)] + }).(ContributorInsightRuleOutput) +} + +type ContributorInsightRuleMapOutput struct{ *pulumi.OutputState } + +func (ContributorInsightRuleMapOutput) ElementType() reflect.Type { + return reflect.TypeOf((*map[string]*ContributorInsightRule)(nil)).Elem() +} + +func (o ContributorInsightRuleMapOutput) ToContributorInsightRuleMapOutput() ContributorInsightRuleMapOutput { + return o +} + +func (o ContributorInsightRuleMapOutput) ToContributorInsightRuleMapOutputWithContext(ctx context.Context) ContributorInsightRuleMapOutput { + return o +} + +func (o ContributorInsightRuleMapOutput) MapIndex(k pulumi.StringInput) ContributorInsightRuleOutput { + return pulumi.All(o, k).ApplyT(func(vs []interface{}) *ContributorInsightRule { + return vs[0].(map[string]*ContributorInsightRule)[vs[1].(string)] + }).(ContributorInsightRuleOutput) +} + +func init() { + pulumi.RegisterInputType(reflect.TypeOf((*ContributorInsightRuleInput)(nil)).Elem(), &ContributorInsightRule{}) + pulumi.RegisterInputType(reflect.TypeOf((*ContributorInsightRuleArrayInput)(nil)).Elem(), ContributorInsightRuleArray{}) + pulumi.RegisterInputType(reflect.TypeOf((*ContributorInsightRuleMapInput)(nil)).Elem(), ContributorInsightRuleMap{}) + pulumi.RegisterOutputType(ContributorInsightRuleOutput{}) + pulumi.RegisterOutputType(ContributorInsightRuleArrayOutput{}) + pulumi.RegisterOutputType(ContributorInsightRuleMapOutput{}) +} diff --git a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/cloudwatch/contributorManagedInsightRule.go b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/cloudwatch/contributorManagedInsightRule.go new file mode 100644 index 000000000..c5ccf9e2f --- /dev/null +++ b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/cloudwatch/contributorManagedInsightRule.go @@ -0,0 +1,306 @@ +// Code generated by the Pulumi Terraform Bridge (tfgen) Tool DO NOT EDIT. +// *** WARNING: Do not edit by hand unless you're certain you know what you are doing! *** + +package cloudwatch + +import ( + "context" + "reflect" + + "errors" + "github.com/pulumi/pulumi-aws/sdk/v6/go/aws/internal" + "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +) + +// Resource for managing an AWS CloudWatch Contributor Managed Insight Rule. +// +// ## Example Usage +// +// ## Import +// +// Using `pulumi import`, import CloudWatch Contributor Managed Insight Rule using the `resource_arn`. For example: +// +// ```sh +// $ pulumi import aws:cloudwatch/contributorManagedInsightRule:ContributorManagedInsightRule example contributor_managed_insight_rule-id-12345678 +// ``` +type ContributorManagedInsightRule struct { + pulumi.CustomResourceState + + // ARN of the Contributor Managed Insight Rule. + Arn pulumi.StringOutput `pulumi:"arn"` + // ARN of an Amazon Web Services resource that has managed Contributor Insights rules. + ResourceArn pulumi.StringOutput `pulumi:"resourceArn"` + RuleName pulumi.StringOutput `pulumi:"ruleName"` + State pulumi.StringOutput `pulumi:"state"` + Tags pulumi.StringMapOutput `pulumi:"tags"` + // Deprecated: Please use `tags` instead. + TagsAll pulumi.StringMapOutput `pulumi:"tagsAll"` + // Template name for the managed Contributor Insights rule, as returned by ListManagedInsightRules. + // + // The following arguments are optional: + TemplateName pulumi.StringOutput `pulumi:"templateName"` +} + +// NewContributorManagedInsightRule registers a new resource with the given unique name, arguments, and options. +func NewContributorManagedInsightRule(ctx *pulumi.Context, + name string, args *ContributorManagedInsightRuleArgs, opts ...pulumi.ResourceOption) (*ContributorManagedInsightRule, error) { + if args == nil { + return nil, errors.New("missing one or more required arguments") + } + + if args.ResourceArn == nil { + return nil, errors.New("invalid value for required argument 'ResourceArn'") + } + if args.TemplateName == nil { + return nil, errors.New("invalid value for required argument 'TemplateName'") + } + opts = internal.PkgResourceDefaultOpts(opts) + var resource ContributorManagedInsightRule + err := ctx.RegisterResource("aws:cloudwatch/contributorManagedInsightRule:ContributorManagedInsightRule", name, args, &resource, opts...) + if err != nil { + return nil, err + } + return &resource, nil +} + +// GetContributorManagedInsightRule gets an existing ContributorManagedInsightRule resource's state with the given name, ID, and optional +// state properties that are used to uniquely qualify the lookup (nil if not required). +func GetContributorManagedInsightRule(ctx *pulumi.Context, + name string, id pulumi.IDInput, state *ContributorManagedInsightRuleState, opts ...pulumi.ResourceOption) (*ContributorManagedInsightRule, error) { + var resource ContributorManagedInsightRule + err := ctx.ReadResource("aws:cloudwatch/contributorManagedInsightRule:ContributorManagedInsightRule", name, id, state, &resource, opts...) + if err != nil { + return nil, err + } + return &resource, nil +} + +// Input properties used for looking up and filtering ContributorManagedInsightRule resources. +type contributorManagedInsightRuleState struct { + // ARN of the Contributor Managed Insight Rule. + Arn *string `pulumi:"arn"` + // ARN of an Amazon Web Services resource that has managed Contributor Insights rules. + ResourceArn *string `pulumi:"resourceArn"` + RuleName *string `pulumi:"ruleName"` + State *string `pulumi:"state"` + Tags map[string]string `pulumi:"tags"` + // Deprecated: Please use `tags` instead. + TagsAll map[string]string `pulumi:"tagsAll"` + // Template name for the managed Contributor Insights rule, as returned by ListManagedInsightRules. + // + // The following arguments are optional: + TemplateName *string `pulumi:"templateName"` +} + +type ContributorManagedInsightRuleState struct { + // ARN of the Contributor Managed Insight Rule. + Arn pulumi.StringPtrInput + // ARN of an Amazon Web Services resource that has managed Contributor Insights rules. + ResourceArn pulumi.StringPtrInput + RuleName pulumi.StringPtrInput + State pulumi.StringPtrInput + Tags pulumi.StringMapInput + // Deprecated: Please use `tags` instead. + TagsAll pulumi.StringMapInput + // Template name for the managed Contributor Insights rule, as returned by ListManagedInsightRules. + // + // The following arguments are optional: + TemplateName pulumi.StringPtrInput +} + +func (ContributorManagedInsightRuleState) ElementType() reflect.Type { + return reflect.TypeOf((*contributorManagedInsightRuleState)(nil)).Elem() +} + +type contributorManagedInsightRuleArgs struct { + // ARN of an Amazon Web Services resource that has managed Contributor Insights rules. + ResourceArn string `pulumi:"resourceArn"` + State *string `pulumi:"state"` + Tags map[string]string `pulumi:"tags"` + // Template name for the managed Contributor Insights rule, as returned by ListManagedInsightRules. + // + // The following arguments are optional: + TemplateName string `pulumi:"templateName"` +} + +// The set of arguments for constructing a ContributorManagedInsightRule resource. +type ContributorManagedInsightRuleArgs struct { + // ARN of an Amazon Web Services resource that has managed Contributor Insights rules. + ResourceArn pulumi.StringInput + State pulumi.StringPtrInput + Tags pulumi.StringMapInput + // Template name for the managed Contributor Insights rule, as returned by ListManagedInsightRules. + // + // The following arguments are optional: + TemplateName pulumi.StringInput +} + +func (ContributorManagedInsightRuleArgs) ElementType() reflect.Type { + return reflect.TypeOf((*contributorManagedInsightRuleArgs)(nil)).Elem() +} + +type ContributorManagedInsightRuleInput interface { + pulumi.Input + + ToContributorManagedInsightRuleOutput() ContributorManagedInsightRuleOutput + ToContributorManagedInsightRuleOutputWithContext(ctx context.Context) ContributorManagedInsightRuleOutput +} + +func (*ContributorManagedInsightRule) ElementType() reflect.Type { + return reflect.TypeOf((**ContributorManagedInsightRule)(nil)).Elem() +} + +func (i *ContributorManagedInsightRule) ToContributorManagedInsightRuleOutput() ContributorManagedInsightRuleOutput { + return i.ToContributorManagedInsightRuleOutputWithContext(context.Background()) +} + +func (i *ContributorManagedInsightRule) ToContributorManagedInsightRuleOutputWithContext(ctx context.Context) ContributorManagedInsightRuleOutput { + return pulumi.ToOutputWithContext(ctx, i).(ContributorManagedInsightRuleOutput) +} + +// ContributorManagedInsightRuleArrayInput is an input type that accepts ContributorManagedInsightRuleArray and ContributorManagedInsightRuleArrayOutput values. +// You can construct a concrete instance of `ContributorManagedInsightRuleArrayInput` via: +// +// ContributorManagedInsightRuleArray{ ContributorManagedInsightRuleArgs{...} } +type ContributorManagedInsightRuleArrayInput interface { + pulumi.Input + + ToContributorManagedInsightRuleArrayOutput() ContributorManagedInsightRuleArrayOutput + ToContributorManagedInsightRuleArrayOutputWithContext(context.Context) ContributorManagedInsightRuleArrayOutput +} + +type ContributorManagedInsightRuleArray []ContributorManagedInsightRuleInput + +func (ContributorManagedInsightRuleArray) ElementType() reflect.Type { + return reflect.TypeOf((*[]*ContributorManagedInsightRule)(nil)).Elem() +} + +func (i ContributorManagedInsightRuleArray) ToContributorManagedInsightRuleArrayOutput() ContributorManagedInsightRuleArrayOutput { + return i.ToContributorManagedInsightRuleArrayOutputWithContext(context.Background()) +} + +func (i ContributorManagedInsightRuleArray) ToContributorManagedInsightRuleArrayOutputWithContext(ctx context.Context) ContributorManagedInsightRuleArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(ContributorManagedInsightRuleArrayOutput) +} + +// ContributorManagedInsightRuleMapInput is an input type that accepts ContributorManagedInsightRuleMap and ContributorManagedInsightRuleMapOutput values. +// You can construct a concrete instance of `ContributorManagedInsightRuleMapInput` via: +// +// ContributorManagedInsightRuleMap{ "key": ContributorManagedInsightRuleArgs{...} } +type ContributorManagedInsightRuleMapInput interface { + pulumi.Input + + ToContributorManagedInsightRuleMapOutput() ContributorManagedInsightRuleMapOutput + ToContributorManagedInsightRuleMapOutputWithContext(context.Context) ContributorManagedInsightRuleMapOutput +} + +type ContributorManagedInsightRuleMap map[string]ContributorManagedInsightRuleInput + +func (ContributorManagedInsightRuleMap) ElementType() reflect.Type { + return reflect.TypeOf((*map[string]*ContributorManagedInsightRule)(nil)).Elem() +} + +func (i ContributorManagedInsightRuleMap) ToContributorManagedInsightRuleMapOutput() ContributorManagedInsightRuleMapOutput { + return i.ToContributorManagedInsightRuleMapOutputWithContext(context.Background()) +} + +func (i ContributorManagedInsightRuleMap) ToContributorManagedInsightRuleMapOutputWithContext(ctx context.Context) ContributorManagedInsightRuleMapOutput { + return pulumi.ToOutputWithContext(ctx, i).(ContributorManagedInsightRuleMapOutput) +} + +type ContributorManagedInsightRuleOutput struct{ *pulumi.OutputState } + +func (ContributorManagedInsightRuleOutput) ElementType() reflect.Type { + return reflect.TypeOf((**ContributorManagedInsightRule)(nil)).Elem() +} + +func (o ContributorManagedInsightRuleOutput) ToContributorManagedInsightRuleOutput() ContributorManagedInsightRuleOutput { + return o +} + +func (o ContributorManagedInsightRuleOutput) ToContributorManagedInsightRuleOutputWithContext(ctx context.Context) ContributorManagedInsightRuleOutput { + return o +} + +// ARN of the Contributor Managed Insight Rule. +func (o ContributorManagedInsightRuleOutput) Arn() pulumi.StringOutput { + return o.ApplyT(func(v *ContributorManagedInsightRule) pulumi.StringOutput { return v.Arn }).(pulumi.StringOutput) +} + +// ARN of an Amazon Web Services resource that has managed Contributor Insights rules. +func (o ContributorManagedInsightRuleOutput) ResourceArn() pulumi.StringOutput { + return o.ApplyT(func(v *ContributorManagedInsightRule) pulumi.StringOutput { return v.ResourceArn }).(pulumi.StringOutput) +} + +func (o ContributorManagedInsightRuleOutput) RuleName() pulumi.StringOutput { + return o.ApplyT(func(v *ContributorManagedInsightRule) pulumi.StringOutput { return v.RuleName }).(pulumi.StringOutput) +} + +func (o ContributorManagedInsightRuleOutput) State() pulumi.StringOutput { + return o.ApplyT(func(v *ContributorManagedInsightRule) pulumi.StringOutput { return v.State }).(pulumi.StringOutput) +} + +func (o ContributorManagedInsightRuleOutput) Tags() pulumi.StringMapOutput { + return o.ApplyT(func(v *ContributorManagedInsightRule) pulumi.StringMapOutput { return v.Tags }).(pulumi.StringMapOutput) +} + +// Deprecated: Please use `tags` instead. +func (o ContributorManagedInsightRuleOutput) TagsAll() pulumi.StringMapOutput { + return o.ApplyT(func(v *ContributorManagedInsightRule) pulumi.StringMapOutput { return v.TagsAll }).(pulumi.StringMapOutput) +} + +// Template name for the managed Contributor Insights rule, as returned by ListManagedInsightRules. +// +// The following arguments are optional: +func (o ContributorManagedInsightRuleOutput) TemplateName() pulumi.StringOutput { + return o.ApplyT(func(v *ContributorManagedInsightRule) pulumi.StringOutput { return v.TemplateName }).(pulumi.StringOutput) +} + +type ContributorManagedInsightRuleArrayOutput struct{ *pulumi.OutputState } + +func (ContributorManagedInsightRuleArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]*ContributorManagedInsightRule)(nil)).Elem() +} + +func (o ContributorManagedInsightRuleArrayOutput) ToContributorManagedInsightRuleArrayOutput() ContributorManagedInsightRuleArrayOutput { + return o +} + +func (o ContributorManagedInsightRuleArrayOutput) ToContributorManagedInsightRuleArrayOutputWithContext(ctx context.Context) ContributorManagedInsightRuleArrayOutput { + return o +} + +func (o ContributorManagedInsightRuleArrayOutput) Index(i pulumi.IntInput) ContributorManagedInsightRuleOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) *ContributorManagedInsightRule { + return vs[0].([]*ContributorManagedInsightRule)[vs[1].(int)] + }).(ContributorManagedInsightRuleOutput) +} + +type ContributorManagedInsightRuleMapOutput struct{ *pulumi.OutputState } + +func (ContributorManagedInsightRuleMapOutput) ElementType() reflect.Type { + return reflect.TypeOf((*map[string]*ContributorManagedInsightRule)(nil)).Elem() +} + +func (o ContributorManagedInsightRuleMapOutput) ToContributorManagedInsightRuleMapOutput() ContributorManagedInsightRuleMapOutput { + return o +} + +func (o ContributorManagedInsightRuleMapOutput) ToContributorManagedInsightRuleMapOutputWithContext(ctx context.Context) ContributorManagedInsightRuleMapOutput { + return o +} + +func (o ContributorManagedInsightRuleMapOutput) MapIndex(k pulumi.StringInput) ContributorManagedInsightRuleOutput { + return pulumi.All(o, k).ApplyT(func(vs []interface{}) *ContributorManagedInsightRule { + return vs[0].(map[string]*ContributorManagedInsightRule)[vs[1].(string)] + }).(ContributorManagedInsightRuleOutput) +} + +func init() { + pulumi.RegisterInputType(reflect.TypeOf((*ContributorManagedInsightRuleInput)(nil)).Elem(), &ContributorManagedInsightRule{}) + pulumi.RegisterInputType(reflect.TypeOf((*ContributorManagedInsightRuleArrayInput)(nil)).Elem(), ContributorManagedInsightRuleArray{}) + pulumi.RegisterInputType(reflect.TypeOf((*ContributorManagedInsightRuleMapInput)(nil)).Elem(), ContributorManagedInsightRuleMap{}) + pulumi.RegisterOutputType(ContributorManagedInsightRuleOutput{}) + pulumi.RegisterOutputType(ContributorManagedInsightRuleArrayOutput{}) + pulumi.RegisterOutputType(ContributorManagedInsightRuleMapOutput{}) +} diff --git a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/cloudwatch/eventRule.go b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/cloudwatch/eventRule.go index 281383d2a..69099003a 100644 --- a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/cloudwatch/eventRule.go +++ b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/cloudwatch/eventRule.go @@ -125,7 +125,7 @@ type EventRule struct { // Defaults to `true`. // Conflicts with `state`. // - // Deprecated: Use "state" instead + // Deprecated: is_enabled is deprecated. Use state instead. IsEnabled pulumi.BoolPtrOutput `pulumi:"isEnabled"` // The name of the rule. If omitted, this provider will assign a random, unique name. Conflicts with `namePrefix`. Name pulumi.StringOutput `pulumi:"name"` @@ -197,7 +197,7 @@ type eventRuleState struct { // Defaults to `true`. // Conflicts with `state`. // - // Deprecated: Use "state" instead + // Deprecated: is_enabled is deprecated. Use state instead. IsEnabled *bool `pulumi:"isEnabled"` // The name of the rule. If omitted, this provider will assign a random, unique name. Conflicts with `namePrefix`. Name *string `pulumi:"name"` @@ -240,7 +240,7 @@ type EventRuleState struct { // Defaults to `true`. // Conflicts with `state`. // - // Deprecated: Use "state" instead + // Deprecated: is_enabled is deprecated. Use state instead. IsEnabled pulumi.BoolPtrInput // The name of the rule. If omitted, this provider will assign a random, unique name. Conflicts with `namePrefix`. Name pulumi.StringPtrInput @@ -285,7 +285,7 @@ type eventRuleArgs struct { // Defaults to `true`. // Conflicts with `state`. // - // Deprecated: Use "state" instead + // Deprecated: is_enabled is deprecated. Use state instead. IsEnabled *bool `pulumi:"isEnabled"` // The name of the rule. If omitted, this provider will assign a random, unique name. Conflicts with `namePrefix`. Name *string `pulumi:"name"` @@ -323,7 +323,7 @@ type EventRuleArgs struct { // Defaults to `true`. // Conflicts with `state`. // - // Deprecated: Use "state" instead + // Deprecated: is_enabled is deprecated. Use state instead. IsEnabled pulumi.BoolPtrInput // The name of the rule. If omitted, this provider will assign a random, unique name. Conflicts with `namePrefix`. Name pulumi.StringPtrInput @@ -463,7 +463,7 @@ func (o EventRuleOutput) ForceDestroy() pulumi.BoolPtrOutput { // Defaults to `true`. // Conflicts with `state`. // -// Deprecated: Use "state" instead +// Deprecated: is_enabled is deprecated. Use state instead. func (o EventRuleOutput) IsEnabled() pulumi.BoolPtrOutput { return o.ApplyT(func(v *EventRule) pulumi.BoolPtrOutput { return v.IsEnabled }).(pulumi.BoolPtrOutput) } diff --git a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/cloudwatch/eventTarget.go b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/cloudwatch/eventTarget.go index 58d524590..5a03d872d 100644 --- a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/cloudwatch/eventTarget.go +++ b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/cloudwatch/eventTarget.go @@ -945,7 +945,7 @@ type EventTarget struct { Rule pulumi.StringOutput `pulumi:"rule"` // Parameters used when you are using the rule to invoke Amazon EC2 Run Command. Documented below. A maximum of 5 are allowed. RunCommandTargets EventTargetRunCommandTargetArrayOutput `pulumi:"runCommandTargets"` - // Parameters used when you are using the rule to invoke an Amazon SageMaker Pipeline. Documented below. A maximum of 1 are allowed. + // Parameters used when you are using the rule to invoke an Amazon SageMaker AI Pipeline. Documented below. A maximum of 1 are allowed. SagemakerPipelineTarget EventTargetSagemakerPipelineTargetPtrOutput `pulumi:"sagemakerPipelineTarget"` // Parameters used when you are using the rule to invoke an Amazon SQS Queue. Documented below. A maximum of 1 are allowed. SqsTarget EventTargetSqsTargetPtrOutput `pulumi:"sqsTarget"` @@ -1026,7 +1026,7 @@ type eventTargetState struct { Rule *string `pulumi:"rule"` // Parameters used when you are using the rule to invoke Amazon EC2 Run Command. Documented below. A maximum of 5 are allowed. RunCommandTargets []EventTargetRunCommandTarget `pulumi:"runCommandTargets"` - // Parameters used when you are using the rule to invoke an Amazon SageMaker Pipeline. Documented below. A maximum of 1 are allowed. + // Parameters used when you are using the rule to invoke an Amazon SageMaker AI Pipeline. Documented below. A maximum of 1 are allowed. SagemakerPipelineTarget *EventTargetSagemakerPipelineTarget `pulumi:"sagemakerPipelineTarget"` // Parameters used when you are using the rule to invoke an Amazon SQS Queue. Documented below. A maximum of 1 are allowed. SqsTarget *EventTargetSqsTarget `pulumi:"sqsTarget"` @@ -1072,7 +1072,7 @@ type EventTargetState struct { Rule pulumi.StringPtrInput // Parameters used when you are using the rule to invoke Amazon EC2 Run Command. Documented below. A maximum of 5 are allowed. RunCommandTargets EventTargetRunCommandTargetArrayInput - // Parameters used when you are using the rule to invoke an Amazon SageMaker Pipeline. Documented below. A maximum of 1 are allowed. + // Parameters used when you are using the rule to invoke an Amazon SageMaker AI Pipeline. Documented below. A maximum of 1 are allowed. SagemakerPipelineTarget EventTargetSagemakerPipelineTargetPtrInput // Parameters used when you are using the rule to invoke an Amazon SQS Queue. Documented below. A maximum of 1 are allowed. SqsTarget EventTargetSqsTargetPtrInput @@ -1122,7 +1122,7 @@ type eventTargetArgs struct { Rule string `pulumi:"rule"` // Parameters used when you are using the rule to invoke Amazon EC2 Run Command. Documented below. A maximum of 5 are allowed. RunCommandTargets []EventTargetRunCommandTarget `pulumi:"runCommandTargets"` - // Parameters used when you are using the rule to invoke an Amazon SageMaker Pipeline. Documented below. A maximum of 1 are allowed. + // Parameters used when you are using the rule to invoke an Amazon SageMaker AI Pipeline. Documented below. A maximum of 1 are allowed. SagemakerPipelineTarget *EventTargetSagemakerPipelineTarget `pulumi:"sagemakerPipelineTarget"` // Parameters used when you are using the rule to invoke an Amazon SQS Queue. Documented below. A maximum of 1 are allowed. SqsTarget *EventTargetSqsTarget `pulumi:"sqsTarget"` @@ -1169,7 +1169,7 @@ type EventTargetArgs struct { Rule pulumi.StringInput // Parameters used when you are using the rule to invoke Amazon EC2 Run Command. Documented below. A maximum of 5 are allowed. RunCommandTargets EventTargetRunCommandTargetArrayInput - // Parameters used when you are using the rule to invoke an Amazon SageMaker Pipeline. Documented below. A maximum of 1 are allowed. + // Parameters used when you are using the rule to invoke an Amazon SageMaker AI Pipeline. Documented below. A maximum of 1 are allowed. SagemakerPipelineTarget EventTargetSagemakerPipelineTargetPtrInput // Parameters used when you are using the rule to invoke an Amazon SQS Queue. Documented below. A maximum of 1 are allowed. SqsTarget EventTargetSqsTargetPtrInput @@ -1352,7 +1352,7 @@ func (o EventTargetOutput) RunCommandTargets() EventTargetRunCommandTargetArrayO return o.ApplyT(func(v *EventTarget) EventTargetRunCommandTargetArrayOutput { return v.RunCommandTargets }).(EventTargetRunCommandTargetArrayOutput) } -// Parameters used when you are using the rule to invoke an Amazon SageMaker Pipeline. Documented below. A maximum of 1 are allowed. +// Parameters used when you are using the rule to invoke an Amazon SageMaker AI Pipeline. Documented below. A maximum of 1 are allowed. func (o EventTargetOutput) SagemakerPipelineTarget() EventTargetSagemakerPipelineTargetPtrOutput { return o.ApplyT(func(v *EventTarget) EventTargetSagemakerPipelineTargetPtrOutput { return v.SagemakerPipelineTarget }).(EventTargetSagemakerPipelineTargetPtrOutput) } diff --git a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/cloudwatch/getContributorManagedInsightRules.go b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/cloudwatch/getContributorManagedInsightRules.go new file mode 100644 index 000000000..d83f2334d --- /dev/null +++ b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/cloudwatch/getContributorManagedInsightRules.go @@ -0,0 +1,130 @@ +// Code generated by the Pulumi Terraform Bridge (tfgen) Tool DO NOT EDIT. +// *** WARNING: Do not edit by hand unless you're certain you know what you are doing! *** + +package cloudwatch + +import ( + "context" + "reflect" + + "github.com/pulumi/pulumi-aws/sdk/v6/go/aws/internal" + "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +) + +// Data source for managing an AWS CloudWatch Contributor Managed Insight Rules. +// +// ## Example Usage +// +// ### Basic Usage +// +// ```go +// package main +// +// import ( +// +// "github.com/pulumi/pulumi-aws/sdk/v6/go/aws/cloudwatch" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +// +// ) +// +// func main() { +// pulumi.Run(func(ctx *pulumi.Context) error { +// _, err := cloudwatch.GetContributorManagedInsightRules(ctx, &cloudwatch.GetContributorManagedInsightRulesArgs{ +// ResourceArn: "arn:aws:ec2:us-west-2:123456789012:resource-name/resourceid", +// }, nil) +// if err != nil { +// return err +// } +// return nil +// }) +// } +// +// ``` +func GetContributorManagedInsightRules(ctx *pulumi.Context, args *GetContributorManagedInsightRulesArgs, opts ...pulumi.InvokeOption) (*GetContributorManagedInsightRulesResult, error) { + opts = internal.PkgInvokeDefaultOpts(opts) + var rv GetContributorManagedInsightRulesResult + err := ctx.Invoke("aws:cloudwatch/getContributorManagedInsightRules:getContributorManagedInsightRules", args, &rv, opts...) + if err != nil { + return nil, err + } + return &rv, nil +} + +// A collection of arguments for invoking getContributorManagedInsightRules. +type GetContributorManagedInsightRulesArgs struct { + // ARN of an Amazon Web Services resource that has managed Contributor Insights rules. + // + // The following arguments are optional: + // + // There are no optional arguments. + ResourceArn string `pulumi:"resourceArn"` +} + +// A collection of values returned by getContributorManagedInsightRules. +type GetContributorManagedInsightRulesResult struct { + // The provider-assigned unique ID for this managed resource. + Id string `pulumi:"id"` + // Managed rules that are available for the specified Amazon Web Services resource. See `managedRules reference` below for details. + ManagedRules []GetContributorManagedInsightRulesManagedRule `pulumi:"managedRules"` + // If a managed rule is enabled, this is the ARN for the related Amazon Web Services resource. + ResourceArn string `pulumi:"resourceArn"` +} + +func GetContributorManagedInsightRulesOutput(ctx *pulumi.Context, args GetContributorManagedInsightRulesOutputArgs, opts ...pulumi.InvokeOption) GetContributorManagedInsightRulesResultOutput { + return pulumi.ToOutputWithContext(ctx.Context(), args). + ApplyT(func(v interface{}) (GetContributorManagedInsightRulesResultOutput, error) { + args := v.(GetContributorManagedInsightRulesArgs) + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("aws:cloudwatch/getContributorManagedInsightRules:getContributorManagedInsightRules", args, GetContributorManagedInsightRulesResultOutput{}, options).(GetContributorManagedInsightRulesResultOutput), nil + }).(GetContributorManagedInsightRulesResultOutput) +} + +// A collection of arguments for invoking getContributorManagedInsightRules. +type GetContributorManagedInsightRulesOutputArgs struct { + // ARN of an Amazon Web Services resource that has managed Contributor Insights rules. + // + // The following arguments are optional: + // + // There are no optional arguments. + ResourceArn pulumi.StringInput `pulumi:"resourceArn"` +} + +func (GetContributorManagedInsightRulesOutputArgs) ElementType() reflect.Type { + return reflect.TypeOf((*GetContributorManagedInsightRulesArgs)(nil)).Elem() +} + +// A collection of values returned by getContributorManagedInsightRules. +type GetContributorManagedInsightRulesResultOutput struct{ *pulumi.OutputState } + +func (GetContributorManagedInsightRulesResultOutput) ElementType() reflect.Type { + return reflect.TypeOf((*GetContributorManagedInsightRulesResult)(nil)).Elem() +} + +func (o GetContributorManagedInsightRulesResultOutput) ToGetContributorManagedInsightRulesResultOutput() GetContributorManagedInsightRulesResultOutput { + return o +} + +func (o GetContributorManagedInsightRulesResultOutput) ToGetContributorManagedInsightRulesResultOutputWithContext(ctx context.Context) GetContributorManagedInsightRulesResultOutput { + return o +} + +// The provider-assigned unique ID for this managed resource. +func (o GetContributorManagedInsightRulesResultOutput) Id() pulumi.StringOutput { + return o.ApplyT(func(v GetContributorManagedInsightRulesResult) string { return v.Id }).(pulumi.StringOutput) +} + +// Managed rules that are available for the specified Amazon Web Services resource. See `managedRules reference` below for details. +func (o GetContributorManagedInsightRulesResultOutput) ManagedRules() GetContributorManagedInsightRulesManagedRuleArrayOutput { + return o.ApplyT(func(v GetContributorManagedInsightRulesResult) []GetContributorManagedInsightRulesManagedRule { + return v.ManagedRules + }).(GetContributorManagedInsightRulesManagedRuleArrayOutput) +} + +// If a managed rule is enabled, this is the ARN for the related Amazon Web Services resource. +func (o GetContributorManagedInsightRulesResultOutput) ResourceArn() pulumi.StringOutput { + return o.ApplyT(func(v GetContributorManagedInsightRulesResult) string { return v.ResourceArn }).(pulumi.StringOutput) +} + +func init() { + pulumi.RegisterOutputType(GetContributorManagedInsightRulesResultOutput{}) +} diff --git a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/cloudwatch/getLogDataProtectionPolicyDocument.go b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/cloudwatch/getLogDataProtectionPolicyDocument.go index f288fce1e..34f849d4e 100644 --- a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/cloudwatch/getLogDataProtectionPolicyDocument.go +++ b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/cloudwatch/getLogDataProtectionPolicyDocument.go @@ -95,7 +95,8 @@ func GetLogDataProtectionPolicyDocument(ctx *pulumi.Context, args *GetLogDataPro // A collection of arguments for invoking getLogDataProtectionPolicyDocument. type GetLogDataProtectionPolicyDocumentArgs struct { - Description *string `pulumi:"description"` + Configuration *GetLogDataProtectionPolicyDocumentConfiguration `pulumi:"configuration"` + Description *string `pulumi:"description"` // The name of the data protection policy document. Name string `pulumi:"name"` // Configures the data protection policy. @@ -109,7 +110,8 @@ type GetLogDataProtectionPolicyDocumentArgs struct { // A collection of values returned by getLogDataProtectionPolicyDocument. type GetLogDataProtectionPolicyDocumentResult struct { - Description *string `pulumi:"description"` + Configuration *GetLogDataProtectionPolicyDocumentConfiguration `pulumi:"configuration"` + Description *string `pulumi:"description"` // The provider-assigned unique ID for this managed resource. Id string `pulumi:"id"` // Standard JSON policy document rendered based on the arguments above. @@ -130,7 +132,8 @@ func GetLogDataProtectionPolicyDocumentOutput(ctx *pulumi.Context, args GetLogDa // A collection of arguments for invoking getLogDataProtectionPolicyDocument. type GetLogDataProtectionPolicyDocumentOutputArgs struct { - Description pulumi.StringPtrInput `pulumi:"description"` + Configuration GetLogDataProtectionPolicyDocumentConfigurationPtrInput `pulumi:"configuration"` + Description pulumi.StringPtrInput `pulumi:"description"` // The name of the data protection policy document. Name pulumi.StringInput `pulumi:"name"` // Configures the data protection policy. @@ -161,6 +164,12 @@ func (o GetLogDataProtectionPolicyDocumentResultOutput) ToGetLogDataProtectionPo return o } +func (o GetLogDataProtectionPolicyDocumentResultOutput) Configuration() GetLogDataProtectionPolicyDocumentConfigurationPtrOutput { + return o.ApplyT(func(v GetLogDataProtectionPolicyDocumentResult) *GetLogDataProtectionPolicyDocumentConfiguration { + return v.Configuration + }).(GetLogDataProtectionPolicyDocumentConfigurationPtrOutput) +} + func (o GetLogDataProtectionPolicyDocumentResultOutput) Description() pulumi.StringPtrOutput { return o.ApplyT(func(v GetLogDataProtectionPolicyDocumentResult) *string { return v.Description }).(pulumi.StringPtrOutput) } diff --git a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/cloudwatch/init.go b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/cloudwatch/init.go index ccfa33842..cec6af4e8 100644 --- a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/cloudwatch/init.go +++ b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/cloudwatch/init.go @@ -23,6 +23,10 @@ func (m *module) Construct(ctx *pulumi.Context, name, typ, urn string) (r pulumi switch typ { case "aws:cloudwatch/compositeAlarm:CompositeAlarm": r = &CompositeAlarm{} + case "aws:cloudwatch/contributorInsightRule:ContributorInsightRule": + r = &ContributorInsightRule{} + case "aws:cloudwatch/contributorManagedInsightRule:ContributorManagedInsightRule": + r = &ContributorManagedInsightRule{} case "aws:cloudwatch/dashboard:Dashboard": r = &Dashboard{} case "aws:cloudwatch/eventApiDestination:EventApiDestination": @@ -99,6 +103,16 @@ func init() { "cloudwatch/compositeAlarm", &module{version}, ) + pulumi.RegisterResourceModule( + "aws", + "cloudwatch/contributorInsightRule", + &module{version}, + ) + pulumi.RegisterResourceModule( + "aws", + "cloudwatch/contributorManagedInsightRule", + &module{version}, + ) pulumi.RegisterResourceModule( "aws", "cloudwatch/dashboard", diff --git a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/cloudwatch/pulumiTypes.go b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/cloudwatch/pulumiTypes.go index c70cd907f..babbf1c36 100644 --- a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/cloudwatch/pulumiTypes.go +++ b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/cloudwatch/pulumiTypes.go @@ -5699,7 +5699,7 @@ func (o EventTargetRunCommandTargetArrayOutput) Index(i pulumi.IntInput) EventTa } type EventTargetSagemakerPipelineTarget struct { - // List of Parameter names and values for SageMaker Model Building Pipeline execution. + // List of Parameter names and values for SageMaker AI Model Building Pipeline execution. PipelineParameterLists []EventTargetSagemakerPipelineTargetPipelineParameterList `pulumi:"pipelineParameterLists"` } @@ -5715,7 +5715,7 @@ type EventTargetSagemakerPipelineTargetInput interface { } type EventTargetSagemakerPipelineTargetArgs struct { - // List of Parameter names and values for SageMaker Model Building Pipeline execution. + // List of Parameter names and values for SageMaker AI Model Building Pipeline execution. PipelineParameterLists EventTargetSagemakerPipelineTargetPipelineParameterListArrayInput `pulumi:"pipelineParameterLists"` } @@ -5796,7 +5796,7 @@ func (o EventTargetSagemakerPipelineTargetOutput) ToEventTargetSagemakerPipeline }).(EventTargetSagemakerPipelineTargetPtrOutput) } -// List of Parameter names and values for SageMaker Model Building Pipeline execution. +// List of Parameter names and values for SageMaker AI Model Building Pipeline execution. func (o EventTargetSagemakerPipelineTargetOutput) PipelineParameterLists() EventTargetSagemakerPipelineTargetPipelineParameterListArrayOutput { return o.ApplyT(func(v EventTargetSagemakerPipelineTarget) []EventTargetSagemakerPipelineTargetPipelineParameterList { return v.PipelineParameterLists @@ -5827,7 +5827,7 @@ func (o EventTargetSagemakerPipelineTargetPtrOutput) Elem() EventTargetSagemaker }).(EventTargetSagemakerPipelineTargetOutput) } -// List of Parameter names and values for SageMaker Model Building Pipeline execution. +// List of Parameter names and values for SageMaker AI Model Building Pipeline execution. func (o EventTargetSagemakerPipelineTargetPtrOutput) PipelineParameterLists() EventTargetSagemakerPipelineTargetPipelineParameterListArrayOutput { return o.ApplyT(func(v *EventTargetSagemakerPipelineTarget) []EventTargetSagemakerPipelineTargetPipelineParameterList { if v == nil { @@ -5838,9 +5838,9 @@ func (o EventTargetSagemakerPipelineTargetPtrOutput) PipelineParameterLists() Ev } type EventTargetSagemakerPipelineTargetPipelineParameterList struct { - // Name of parameter to start execution of a SageMaker Model Building Pipeline. + // Name of parameter to start execution of a SageMaker AI Model Building Pipeline. Name string `pulumi:"name"` - // Value of parameter to start execution of a SageMaker Model Building Pipeline. + // Value of parameter to start execution of a SageMaker AI Model Building Pipeline. Value string `pulumi:"value"` } @@ -5856,9 +5856,9 @@ type EventTargetSagemakerPipelineTargetPipelineParameterListInput interface { } type EventTargetSagemakerPipelineTargetPipelineParameterListArgs struct { - // Name of parameter to start execution of a SageMaker Model Building Pipeline. + // Name of parameter to start execution of a SageMaker AI Model Building Pipeline. Name pulumi.StringInput `pulumi:"name"` - // Value of parameter to start execution of a SageMaker Model Building Pipeline. + // Value of parameter to start execution of a SageMaker AI Model Building Pipeline. Value pulumi.StringInput `pulumi:"value"` } @@ -5913,12 +5913,12 @@ func (o EventTargetSagemakerPipelineTargetPipelineParameterListOutput) ToEventTa return o } -// Name of parameter to start execution of a SageMaker Model Building Pipeline. +// Name of parameter to start execution of a SageMaker AI Model Building Pipeline. func (o EventTargetSagemakerPipelineTargetPipelineParameterListOutput) Name() pulumi.StringOutput { return o.ApplyT(func(v EventTargetSagemakerPipelineTargetPipelineParameterList) string { return v.Name }).(pulumi.StringOutput) } -// Value of parameter to start execution of a SageMaker Model Building Pipeline. +// Value of parameter to start execution of a SageMaker AI Model Building Pipeline. func (o EventTargetSagemakerPipelineTargetPipelineParameterListOutput) Value() pulumi.StringOutput { return o.ApplyT(func(v EventTargetSagemakerPipelineTargetPipelineParameterList) string { return v.Value }).(pulumi.StringOutput) } @@ -7847,6 +7847,241 @@ func (o MetricStreamStatisticsConfigurationIncludeMetricArrayOutput) Index(i pul }).(MetricStreamStatisticsConfigurationIncludeMetricOutput) } +type GetContributorManagedInsightRulesManagedRule struct { + // ARN of an Amazon Web Services resource that has managed Contributor Insights rules. + // + // The following arguments are optional: + // + // There are no optional arguments. + ResourceArn string `pulumi:"resourceArn"` + // Describes the state of a managed rule. If the rule is enabled, it contains information about the Contributor Insights rule that contains information about the related Amazon Web Services resource. See `ruleState reference` below for details. + RuleStates []GetContributorManagedInsightRulesManagedRuleRuleState `pulumi:"ruleStates"` + // Template name for the managed rule. Used to enable managed rules using `PutManagedInsightRules`. + TemplateName string `pulumi:"templateName"` +} + +// GetContributorManagedInsightRulesManagedRuleInput is an input type that accepts GetContributorManagedInsightRulesManagedRuleArgs and GetContributorManagedInsightRulesManagedRuleOutput values. +// You can construct a concrete instance of `GetContributorManagedInsightRulesManagedRuleInput` via: +// +// GetContributorManagedInsightRulesManagedRuleArgs{...} +type GetContributorManagedInsightRulesManagedRuleInput interface { + pulumi.Input + + ToGetContributorManagedInsightRulesManagedRuleOutput() GetContributorManagedInsightRulesManagedRuleOutput + ToGetContributorManagedInsightRulesManagedRuleOutputWithContext(context.Context) GetContributorManagedInsightRulesManagedRuleOutput +} + +type GetContributorManagedInsightRulesManagedRuleArgs struct { + // ARN of an Amazon Web Services resource that has managed Contributor Insights rules. + // + // The following arguments are optional: + // + // There are no optional arguments. + ResourceArn pulumi.StringInput `pulumi:"resourceArn"` + // Describes the state of a managed rule. If the rule is enabled, it contains information about the Contributor Insights rule that contains information about the related Amazon Web Services resource. See `ruleState reference` below for details. + RuleStates GetContributorManagedInsightRulesManagedRuleRuleStateArrayInput `pulumi:"ruleStates"` + // Template name for the managed rule. Used to enable managed rules using `PutManagedInsightRules`. + TemplateName pulumi.StringInput `pulumi:"templateName"` +} + +func (GetContributorManagedInsightRulesManagedRuleArgs) ElementType() reflect.Type { + return reflect.TypeOf((*GetContributorManagedInsightRulesManagedRule)(nil)).Elem() +} + +func (i GetContributorManagedInsightRulesManagedRuleArgs) ToGetContributorManagedInsightRulesManagedRuleOutput() GetContributorManagedInsightRulesManagedRuleOutput { + return i.ToGetContributorManagedInsightRulesManagedRuleOutputWithContext(context.Background()) +} + +func (i GetContributorManagedInsightRulesManagedRuleArgs) ToGetContributorManagedInsightRulesManagedRuleOutputWithContext(ctx context.Context) GetContributorManagedInsightRulesManagedRuleOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetContributorManagedInsightRulesManagedRuleOutput) +} + +// GetContributorManagedInsightRulesManagedRuleArrayInput is an input type that accepts GetContributorManagedInsightRulesManagedRuleArray and GetContributorManagedInsightRulesManagedRuleArrayOutput values. +// You can construct a concrete instance of `GetContributorManagedInsightRulesManagedRuleArrayInput` via: +// +// GetContributorManagedInsightRulesManagedRuleArray{ GetContributorManagedInsightRulesManagedRuleArgs{...} } +type GetContributorManagedInsightRulesManagedRuleArrayInput interface { + pulumi.Input + + ToGetContributorManagedInsightRulesManagedRuleArrayOutput() GetContributorManagedInsightRulesManagedRuleArrayOutput + ToGetContributorManagedInsightRulesManagedRuleArrayOutputWithContext(context.Context) GetContributorManagedInsightRulesManagedRuleArrayOutput +} + +type GetContributorManagedInsightRulesManagedRuleArray []GetContributorManagedInsightRulesManagedRuleInput + +func (GetContributorManagedInsightRulesManagedRuleArray) ElementType() reflect.Type { + return reflect.TypeOf((*[]GetContributorManagedInsightRulesManagedRule)(nil)).Elem() +} + +func (i GetContributorManagedInsightRulesManagedRuleArray) ToGetContributorManagedInsightRulesManagedRuleArrayOutput() GetContributorManagedInsightRulesManagedRuleArrayOutput { + return i.ToGetContributorManagedInsightRulesManagedRuleArrayOutputWithContext(context.Background()) +} + +func (i GetContributorManagedInsightRulesManagedRuleArray) ToGetContributorManagedInsightRulesManagedRuleArrayOutputWithContext(ctx context.Context) GetContributorManagedInsightRulesManagedRuleArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetContributorManagedInsightRulesManagedRuleArrayOutput) +} + +type GetContributorManagedInsightRulesManagedRuleOutput struct{ *pulumi.OutputState } + +func (GetContributorManagedInsightRulesManagedRuleOutput) ElementType() reflect.Type { + return reflect.TypeOf((*GetContributorManagedInsightRulesManagedRule)(nil)).Elem() +} + +func (o GetContributorManagedInsightRulesManagedRuleOutput) ToGetContributorManagedInsightRulesManagedRuleOutput() GetContributorManagedInsightRulesManagedRuleOutput { + return o +} + +func (o GetContributorManagedInsightRulesManagedRuleOutput) ToGetContributorManagedInsightRulesManagedRuleOutputWithContext(ctx context.Context) GetContributorManagedInsightRulesManagedRuleOutput { + return o +} + +// ARN of an Amazon Web Services resource that has managed Contributor Insights rules. +// +// The following arguments are optional: +// +// There are no optional arguments. +func (o GetContributorManagedInsightRulesManagedRuleOutput) ResourceArn() pulumi.StringOutput { + return o.ApplyT(func(v GetContributorManagedInsightRulesManagedRule) string { return v.ResourceArn }).(pulumi.StringOutput) +} + +// Describes the state of a managed rule. If the rule is enabled, it contains information about the Contributor Insights rule that contains information about the related Amazon Web Services resource. See `ruleState reference` below for details. +func (o GetContributorManagedInsightRulesManagedRuleOutput) RuleStates() GetContributorManagedInsightRulesManagedRuleRuleStateArrayOutput { + return o.ApplyT(func(v GetContributorManagedInsightRulesManagedRule) []GetContributorManagedInsightRulesManagedRuleRuleState { + return v.RuleStates + }).(GetContributorManagedInsightRulesManagedRuleRuleStateArrayOutput) +} + +// Template name for the managed rule. Used to enable managed rules using `PutManagedInsightRules`. +func (o GetContributorManagedInsightRulesManagedRuleOutput) TemplateName() pulumi.StringOutput { + return o.ApplyT(func(v GetContributorManagedInsightRulesManagedRule) string { return v.TemplateName }).(pulumi.StringOutput) +} + +type GetContributorManagedInsightRulesManagedRuleArrayOutput struct{ *pulumi.OutputState } + +func (GetContributorManagedInsightRulesManagedRuleArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]GetContributorManagedInsightRulesManagedRule)(nil)).Elem() +} + +func (o GetContributorManagedInsightRulesManagedRuleArrayOutput) ToGetContributorManagedInsightRulesManagedRuleArrayOutput() GetContributorManagedInsightRulesManagedRuleArrayOutput { + return o +} + +func (o GetContributorManagedInsightRulesManagedRuleArrayOutput) ToGetContributorManagedInsightRulesManagedRuleArrayOutputWithContext(ctx context.Context) GetContributorManagedInsightRulesManagedRuleArrayOutput { + return o +} + +func (o GetContributorManagedInsightRulesManagedRuleArrayOutput) Index(i pulumi.IntInput) GetContributorManagedInsightRulesManagedRuleOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) GetContributorManagedInsightRulesManagedRule { + return vs[0].([]GetContributorManagedInsightRulesManagedRule)[vs[1].(int)] + }).(GetContributorManagedInsightRulesManagedRuleOutput) +} + +type GetContributorManagedInsightRulesManagedRuleRuleState struct { + // Name of the Contributor Insights rule that contains data for the specified Amazon Web Services resource. + RuleName string `pulumi:"ruleName"` + // Indicates whether the rule is enabled or disabled. + State string `pulumi:"state"` +} + +// GetContributorManagedInsightRulesManagedRuleRuleStateInput is an input type that accepts GetContributorManagedInsightRulesManagedRuleRuleStateArgs and GetContributorManagedInsightRulesManagedRuleRuleStateOutput values. +// You can construct a concrete instance of `GetContributorManagedInsightRulesManagedRuleRuleStateInput` via: +// +// GetContributorManagedInsightRulesManagedRuleRuleStateArgs{...} +type GetContributorManagedInsightRulesManagedRuleRuleStateInput interface { + pulumi.Input + + ToGetContributorManagedInsightRulesManagedRuleRuleStateOutput() GetContributorManagedInsightRulesManagedRuleRuleStateOutput + ToGetContributorManagedInsightRulesManagedRuleRuleStateOutputWithContext(context.Context) GetContributorManagedInsightRulesManagedRuleRuleStateOutput +} + +type GetContributorManagedInsightRulesManagedRuleRuleStateArgs struct { + // Name of the Contributor Insights rule that contains data for the specified Amazon Web Services resource. + RuleName pulumi.StringInput `pulumi:"ruleName"` + // Indicates whether the rule is enabled or disabled. + State pulumi.StringInput `pulumi:"state"` +} + +func (GetContributorManagedInsightRulesManagedRuleRuleStateArgs) ElementType() reflect.Type { + return reflect.TypeOf((*GetContributorManagedInsightRulesManagedRuleRuleState)(nil)).Elem() +} + +func (i GetContributorManagedInsightRulesManagedRuleRuleStateArgs) ToGetContributorManagedInsightRulesManagedRuleRuleStateOutput() GetContributorManagedInsightRulesManagedRuleRuleStateOutput { + return i.ToGetContributorManagedInsightRulesManagedRuleRuleStateOutputWithContext(context.Background()) +} + +func (i GetContributorManagedInsightRulesManagedRuleRuleStateArgs) ToGetContributorManagedInsightRulesManagedRuleRuleStateOutputWithContext(ctx context.Context) GetContributorManagedInsightRulesManagedRuleRuleStateOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetContributorManagedInsightRulesManagedRuleRuleStateOutput) +} + +// GetContributorManagedInsightRulesManagedRuleRuleStateArrayInput is an input type that accepts GetContributorManagedInsightRulesManagedRuleRuleStateArray and GetContributorManagedInsightRulesManagedRuleRuleStateArrayOutput values. +// You can construct a concrete instance of `GetContributorManagedInsightRulesManagedRuleRuleStateArrayInput` via: +// +// GetContributorManagedInsightRulesManagedRuleRuleStateArray{ GetContributorManagedInsightRulesManagedRuleRuleStateArgs{...} } +type GetContributorManagedInsightRulesManagedRuleRuleStateArrayInput interface { + pulumi.Input + + ToGetContributorManagedInsightRulesManagedRuleRuleStateArrayOutput() GetContributorManagedInsightRulesManagedRuleRuleStateArrayOutput + ToGetContributorManagedInsightRulesManagedRuleRuleStateArrayOutputWithContext(context.Context) GetContributorManagedInsightRulesManagedRuleRuleStateArrayOutput +} + +type GetContributorManagedInsightRulesManagedRuleRuleStateArray []GetContributorManagedInsightRulesManagedRuleRuleStateInput + +func (GetContributorManagedInsightRulesManagedRuleRuleStateArray) ElementType() reflect.Type { + return reflect.TypeOf((*[]GetContributorManagedInsightRulesManagedRuleRuleState)(nil)).Elem() +} + +func (i GetContributorManagedInsightRulesManagedRuleRuleStateArray) ToGetContributorManagedInsightRulesManagedRuleRuleStateArrayOutput() GetContributorManagedInsightRulesManagedRuleRuleStateArrayOutput { + return i.ToGetContributorManagedInsightRulesManagedRuleRuleStateArrayOutputWithContext(context.Background()) +} + +func (i GetContributorManagedInsightRulesManagedRuleRuleStateArray) ToGetContributorManagedInsightRulesManagedRuleRuleStateArrayOutputWithContext(ctx context.Context) GetContributorManagedInsightRulesManagedRuleRuleStateArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetContributorManagedInsightRulesManagedRuleRuleStateArrayOutput) +} + +type GetContributorManagedInsightRulesManagedRuleRuleStateOutput struct{ *pulumi.OutputState } + +func (GetContributorManagedInsightRulesManagedRuleRuleStateOutput) ElementType() reflect.Type { + return reflect.TypeOf((*GetContributorManagedInsightRulesManagedRuleRuleState)(nil)).Elem() +} + +func (o GetContributorManagedInsightRulesManagedRuleRuleStateOutput) ToGetContributorManagedInsightRulesManagedRuleRuleStateOutput() GetContributorManagedInsightRulesManagedRuleRuleStateOutput { + return o +} + +func (o GetContributorManagedInsightRulesManagedRuleRuleStateOutput) ToGetContributorManagedInsightRulesManagedRuleRuleStateOutputWithContext(ctx context.Context) GetContributorManagedInsightRulesManagedRuleRuleStateOutput { + return o +} + +// Name of the Contributor Insights rule that contains data for the specified Amazon Web Services resource. +func (o GetContributorManagedInsightRulesManagedRuleRuleStateOutput) RuleName() pulumi.StringOutput { + return o.ApplyT(func(v GetContributorManagedInsightRulesManagedRuleRuleState) string { return v.RuleName }).(pulumi.StringOutput) +} + +// Indicates whether the rule is enabled or disabled. +func (o GetContributorManagedInsightRulesManagedRuleRuleStateOutput) State() pulumi.StringOutput { + return o.ApplyT(func(v GetContributorManagedInsightRulesManagedRuleRuleState) string { return v.State }).(pulumi.StringOutput) +} + +type GetContributorManagedInsightRulesManagedRuleRuleStateArrayOutput struct{ *pulumi.OutputState } + +func (GetContributorManagedInsightRulesManagedRuleRuleStateArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]GetContributorManagedInsightRulesManagedRuleRuleState)(nil)).Elem() +} + +func (o GetContributorManagedInsightRulesManagedRuleRuleStateArrayOutput) ToGetContributorManagedInsightRulesManagedRuleRuleStateArrayOutput() GetContributorManagedInsightRulesManagedRuleRuleStateArrayOutput { + return o +} + +func (o GetContributorManagedInsightRulesManagedRuleRuleStateArrayOutput) ToGetContributorManagedInsightRulesManagedRuleRuleStateArrayOutputWithContext(ctx context.Context) GetContributorManagedInsightRulesManagedRuleRuleStateArrayOutput { + return o +} + +func (o GetContributorManagedInsightRulesManagedRuleRuleStateArrayOutput) Index(i pulumi.IntInput) GetContributorManagedInsightRulesManagedRuleRuleStateOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) GetContributorManagedInsightRulesManagedRuleRuleState { + return vs[0].([]GetContributorManagedInsightRulesManagedRuleRuleState)[vs[1].(int)] + }).(GetContributorManagedInsightRulesManagedRuleRuleStateOutput) +} + type GetEventBusesEventBus struct { // The ARN of the event bus. Arn string `pulumi:"arn"` @@ -7989,6 +8224,251 @@ func (o GetEventBusesEventBusArrayOutput) Index(i pulumi.IntInput) GetEventBuses }).(GetEventBusesEventBusOutput) } +type GetLogDataProtectionPolicyDocumentConfiguration struct { + // Configures custom regular expressions to detect sensitive data. Read more in [Custom data identifiers](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL-custom-data-identifiers.html). + CustomDataIdentifiers []GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifier `pulumi:"customDataIdentifiers"` +} + +// GetLogDataProtectionPolicyDocumentConfigurationInput is an input type that accepts GetLogDataProtectionPolicyDocumentConfigurationArgs and GetLogDataProtectionPolicyDocumentConfigurationOutput values. +// You can construct a concrete instance of `GetLogDataProtectionPolicyDocumentConfigurationInput` via: +// +// GetLogDataProtectionPolicyDocumentConfigurationArgs{...} +type GetLogDataProtectionPolicyDocumentConfigurationInput interface { + pulumi.Input + + ToGetLogDataProtectionPolicyDocumentConfigurationOutput() GetLogDataProtectionPolicyDocumentConfigurationOutput + ToGetLogDataProtectionPolicyDocumentConfigurationOutputWithContext(context.Context) GetLogDataProtectionPolicyDocumentConfigurationOutput +} + +type GetLogDataProtectionPolicyDocumentConfigurationArgs struct { + // Configures custom regular expressions to detect sensitive data. Read more in [Custom data identifiers](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL-custom-data-identifiers.html). + CustomDataIdentifiers GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierArrayInput `pulumi:"customDataIdentifiers"` +} + +func (GetLogDataProtectionPolicyDocumentConfigurationArgs) ElementType() reflect.Type { + return reflect.TypeOf((*GetLogDataProtectionPolicyDocumentConfiguration)(nil)).Elem() +} + +func (i GetLogDataProtectionPolicyDocumentConfigurationArgs) ToGetLogDataProtectionPolicyDocumentConfigurationOutput() GetLogDataProtectionPolicyDocumentConfigurationOutput { + return i.ToGetLogDataProtectionPolicyDocumentConfigurationOutputWithContext(context.Background()) +} + +func (i GetLogDataProtectionPolicyDocumentConfigurationArgs) ToGetLogDataProtectionPolicyDocumentConfigurationOutputWithContext(ctx context.Context) GetLogDataProtectionPolicyDocumentConfigurationOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetLogDataProtectionPolicyDocumentConfigurationOutput) +} + +func (i GetLogDataProtectionPolicyDocumentConfigurationArgs) ToGetLogDataProtectionPolicyDocumentConfigurationPtrOutput() GetLogDataProtectionPolicyDocumentConfigurationPtrOutput { + return i.ToGetLogDataProtectionPolicyDocumentConfigurationPtrOutputWithContext(context.Background()) +} + +func (i GetLogDataProtectionPolicyDocumentConfigurationArgs) ToGetLogDataProtectionPolicyDocumentConfigurationPtrOutputWithContext(ctx context.Context) GetLogDataProtectionPolicyDocumentConfigurationPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetLogDataProtectionPolicyDocumentConfigurationOutput).ToGetLogDataProtectionPolicyDocumentConfigurationPtrOutputWithContext(ctx) +} + +// GetLogDataProtectionPolicyDocumentConfigurationPtrInput is an input type that accepts GetLogDataProtectionPolicyDocumentConfigurationArgs, GetLogDataProtectionPolicyDocumentConfigurationPtr and GetLogDataProtectionPolicyDocumentConfigurationPtrOutput values. +// You can construct a concrete instance of `GetLogDataProtectionPolicyDocumentConfigurationPtrInput` via: +// +// GetLogDataProtectionPolicyDocumentConfigurationArgs{...} +// +// or: +// +// nil +type GetLogDataProtectionPolicyDocumentConfigurationPtrInput interface { + pulumi.Input + + ToGetLogDataProtectionPolicyDocumentConfigurationPtrOutput() GetLogDataProtectionPolicyDocumentConfigurationPtrOutput + ToGetLogDataProtectionPolicyDocumentConfigurationPtrOutputWithContext(context.Context) GetLogDataProtectionPolicyDocumentConfigurationPtrOutput +} + +type getLogDataProtectionPolicyDocumentConfigurationPtrType GetLogDataProtectionPolicyDocumentConfigurationArgs + +func GetLogDataProtectionPolicyDocumentConfigurationPtr(v *GetLogDataProtectionPolicyDocumentConfigurationArgs) GetLogDataProtectionPolicyDocumentConfigurationPtrInput { + return (*getLogDataProtectionPolicyDocumentConfigurationPtrType)(v) +} + +func (*getLogDataProtectionPolicyDocumentConfigurationPtrType) ElementType() reflect.Type { + return reflect.TypeOf((**GetLogDataProtectionPolicyDocumentConfiguration)(nil)).Elem() +} + +func (i *getLogDataProtectionPolicyDocumentConfigurationPtrType) ToGetLogDataProtectionPolicyDocumentConfigurationPtrOutput() GetLogDataProtectionPolicyDocumentConfigurationPtrOutput { + return i.ToGetLogDataProtectionPolicyDocumentConfigurationPtrOutputWithContext(context.Background()) +} + +func (i *getLogDataProtectionPolicyDocumentConfigurationPtrType) ToGetLogDataProtectionPolicyDocumentConfigurationPtrOutputWithContext(ctx context.Context) GetLogDataProtectionPolicyDocumentConfigurationPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetLogDataProtectionPolicyDocumentConfigurationPtrOutput) +} + +type GetLogDataProtectionPolicyDocumentConfigurationOutput struct{ *pulumi.OutputState } + +func (GetLogDataProtectionPolicyDocumentConfigurationOutput) ElementType() reflect.Type { + return reflect.TypeOf((*GetLogDataProtectionPolicyDocumentConfiguration)(nil)).Elem() +} + +func (o GetLogDataProtectionPolicyDocumentConfigurationOutput) ToGetLogDataProtectionPolicyDocumentConfigurationOutput() GetLogDataProtectionPolicyDocumentConfigurationOutput { + return o +} + +func (o GetLogDataProtectionPolicyDocumentConfigurationOutput) ToGetLogDataProtectionPolicyDocumentConfigurationOutputWithContext(ctx context.Context) GetLogDataProtectionPolicyDocumentConfigurationOutput { + return o +} + +func (o GetLogDataProtectionPolicyDocumentConfigurationOutput) ToGetLogDataProtectionPolicyDocumentConfigurationPtrOutput() GetLogDataProtectionPolicyDocumentConfigurationPtrOutput { + return o.ToGetLogDataProtectionPolicyDocumentConfigurationPtrOutputWithContext(context.Background()) +} + +func (o GetLogDataProtectionPolicyDocumentConfigurationOutput) ToGetLogDataProtectionPolicyDocumentConfigurationPtrOutputWithContext(ctx context.Context) GetLogDataProtectionPolicyDocumentConfigurationPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v GetLogDataProtectionPolicyDocumentConfiguration) *GetLogDataProtectionPolicyDocumentConfiguration { + return &v + }).(GetLogDataProtectionPolicyDocumentConfigurationPtrOutput) +} + +// Configures custom regular expressions to detect sensitive data. Read more in [Custom data identifiers](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL-custom-data-identifiers.html). +func (o GetLogDataProtectionPolicyDocumentConfigurationOutput) CustomDataIdentifiers() GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierArrayOutput { + return o.ApplyT(func(v GetLogDataProtectionPolicyDocumentConfiguration) []GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifier { + return v.CustomDataIdentifiers + }).(GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierArrayOutput) +} + +type GetLogDataProtectionPolicyDocumentConfigurationPtrOutput struct{ *pulumi.OutputState } + +func (GetLogDataProtectionPolicyDocumentConfigurationPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**GetLogDataProtectionPolicyDocumentConfiguration)(nil)).Elem() +} + +func (o GetLogDataProtectionPolicyDocumentConfigurationPtrOutput) ToGetLogDataProtectionPolicyDocumentConfigurationPtrOutput() GetLogDataProtectionPolicyDocumentConfigurationPtrOutput { + return o +} + +func (o GetLogDataProtectionPolicyDocumentConfigurationPtrOutput) ToGetLogDataProtectionPolicyDocumentConfigurationPtrOutputWithContext(ctx context.Context) GetLogDataProtectionPolicyDocumentConfigurationPtrOutput { + return o +} + +func (o GetLogDataProtectionPolicyDocumentConfigurationPtrOutput) Elem() GetLogDataProtectionPolicyDocumentConfigurationOutput { + return o.ApplyT(func(v *GetLogDataProtectionPolicyDocumentConfiguration) GetLogDataProtectionPolicyDocumentConfiguration { + if v != nil { + return *v + } + var ret GetLogDataProtectionPolicyDocumentConfiguration + return ret + }).(GetLogDataProtectionPolicyDocumentConfigurationOutput) +} + +// Configures custom regular expressions to detect sensitive data. Read more in [Custom data identifiers](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL-custom-data-identifiers.html). +func (o GetLogDataProtectionPolicyDocumentConfigurationPtrOutput) CustomDataIdentifiers() GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierArrayOutput { + return o.ApplyT(func(v *GetLogDataProtectionPolicyDocumentConfiguration) []GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifier { + if v == nil { + return nil + } + return v.CustomDataIdentifiers + }).(GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierArrayOutput) +} + +type GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifier struct { + // Name of the custom data idenfitier + Name string `pulumi:"name"` + // Regular expression to match sensitive data + Regex string `pulumi:"regex"` +} + +// GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierInput is an input type that accepts GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierArgs and GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierOutput values. +// You can construct a concrete instance of `GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierInput` via: +// +// GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierArgs{...} +type GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierInput interface { + pulumi.Input + + ToGetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierOutput() GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierOutput + ToGetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierOutputWithContext(context.Context) GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierOutput +} + +type GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierArgs struct { + // Name of the custom data idenfitier + Name pulumi.StringInput `pulumi:"name"` + // Regular expression to match sensitive data + Regex pulumi.StringInput `pulumi:"regex"` +} + +func (GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierArgs) ElementType() reflect.Type { + return reflect.TypeOf((*GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifier)(nil)).Elem() +} + +func (i GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierArgs) ToGetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierOutput() GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierOutput { + return i.ToGetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierOutputWithContext(context.Background()) +} + +func (i GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierArgs) ToGetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierOutputWithContext(ctx context.Context) GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierOutput) +} + +// GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierArrayInput is an input type that accepts GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierArray and GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierArrayOutput values. +// You can construct a concrete instance of `GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierArrayInput` via: +// +// GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierArray{ GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierArgs{...} } +type GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierArrayInput interface { + pulumi.Input + + ToGetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierArrayOutput() GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierArrayOutput + ToGetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierArrayOutputWithContext(context.Context) GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierArrayOutput +} + +type GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierArray []GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierInput + +func (GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierArray) ElementType() reflect.Type { + return reflect.TypeOf((*[]GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifier)(nil)).Elem() +} + +func (i GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierArray) ToGetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierArrayOutput() GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierArrayOutput { + return i.ToGetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierArrayOutputWithContext(context.Background()) +} + +func (i GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierArray) ToGetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierArrayOutputWithContext(ctx context.Context) GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierArrayOutput { + return pulumi.ToOutputWithContext(ctx, i).(GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierArrayOutput) +} + +type GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierOutput struct{ *pulumi.OutputState } + +func (GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierOutput) ElementType() reflect.Type { + return reflect.TypeOf((*GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifier)(nil)).Elem() +} + +func (o GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierOutput) ToGetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierOutput() GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierOutput { + return o +} + +func (o GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierOutput) ToGetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierOutputWithContext(ctx context.Context) GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierOutput { + return o +} + +// Name of the custom data idenfitier +func (o GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierOutput) Name() pulumi.StringOutput { + return o.ApplyT(func(v GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifier) string { return v.Name }).(pulumi.StringOutput) +} + +// Regular expression to match sensitive data +func (o GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierOutput) Regex() pulumi.StringOutput { + return o.ApplyT(func(v GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifier) string { return v.Regex }).(pulumi.StringOutput) +} + +type GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierArrayOutput struct{ *pulumi.OutputState } + +func (GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierArrayOutput) ElementType() reflect.Type { + return reflect.TypeOf((*[]GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifier)(nil)).Elem() +} + +func (o GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierArrayOutput) ToGetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierArrayOutput() GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierArrayOutput { + return o +} + +func (o GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierArrayOutput) ToGetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierArrayOutputWithContext(ctx context.Context) GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierArrayOutput { + return o +} + +func (o GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierArrayOutput) Index(i pulumi.IntInput) GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierOutput { + return pulumi.All(o, i).ApplyT(func(vs []interface{}) GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifier { + return vs[0].([]GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifier)[vs[1].(int)] + }).(GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierOutput) +} + type GetLogDataProtectionPolicyDocumentStatement struct { // Set of at least 1 sensitive data identifiers that you want to mask. Read more in [Types of data that you can protect](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/protect-sensitive-log-data-types.html). DataIdentifiers []string `pulumi:"dataIdentifiers"` @@ -9276,8 +9756,16 @@ func init() { pulumi.RegisterInputType(reflect.TypeOf((*MetricStreamStatisticsConfigurationArrayInput)(nil)).Elem(), MetricStreamStatisticsConfigurationArray{}) pulumi.RegisterInputType(reflect.TypeOf((*MetricStreamStatisticsConfigurationIncludeMetricInput)(nil)).Elem(), MetricStreamStatisticsConfigurationIncludeMetricArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*MetricStreamStatisticsConfigurationIncludeMetricArrayInput)(nil)).Elem(), MetricStreamStatisticsConfigurationIncludeMetricArray{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetContributorManagedInsightRulesManagedRuleInput)(nil)).Elem(), GetContributorManagedInsightRulesManagedRuleArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetContributorManagedInsightRulesManagedRuleArrayInput)(nil)).Elem(), GetContributorManagedInsightRulesManagedRuleArray{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetContributorManagedInsightRulesManagedRuleRuleStateInput)(nil)).Elem(), GetContributorManagedInsightRulesManagedRuleRuleStateArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetContributorManagedInsightRulesManagedRuleRuleStateArrayInput)(nil)).Elem(), GetContributorManagedInsightRulesManagedRuleRuleStateArray{}) pulumi.RegisterInputType(reflect.TypeOf((*GetEventBusesEventBusInput)(nil)).Elem(), GetEventBusesEventBusArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetEventBusesEventBusArrayInput)(nil)).Elem(), GetEventBusesEventBusArray{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetLogDataProtectionPolicyDocumentConfigurationInput)(nil)).Elem(), GetLogDataProtectionPolicyDocumentConfigurationArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetLogDataProtectionPolicyDocumentConfigurationPtrInput)(nil)).Elem(), GetLogDataProtectionPolicyDocumentConfigurationArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierInput)(nil)).Elem(), GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierArrayInput)(nil)).Elem(), GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierArray{}) pulumi.RegisterInputType(reflect.TypeOf((*GetLogDataProtectionPolicyDocumentStatementInput)(nil)).Elem(), GetLogDataProtectionPolicyDocumentStatementArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*GetLogDataProtectionPolicyDocumentStatementArrayInput)(nil)).Elem(), GetLogDataProtectionPolicyDocumentStatementArray{}) pulumi.RegisterInputType(reflect.TypeOf((*GetLogDataProtectionPolicyDocumentStatementOperationInput)(nil)).Elem(), GetLogDataProtectionPolicyDocumentStatementOperationArgs{}) @@ -9399,8 +9887,16 @@ func init() { pulumi.RegisterOutputType(MetricStreamStatisticsConfigurationArrayOutput{}) pulumi.RegisterOutputType(MetricStreamStatisticsConfigurationIncludeMetricOutput{}) pulumi.RegisterOutputType(MetricStreamStatisticsConfigurationIncludeMetricArrayOutput{}) + pulumi.RegisterOutputType(GetContributorManagedInsightRulesManagedRuleOutput{}) + pulumi.RegisterOutputType(GetContributorManagedInsightRulesManagedRuleArrayOutput{}) + pulumi.RegisterOutputType(GetContributorManagedInsightRulesManagedRuleRuleStateOutput{}) + pulumi.RegisterOutputType(GetContributorManagedInsightRulesManagedRuleRuleStateArrayOutput{}) pulumi.RegisterOutputType(GetEventBusesEventBusOutput{}) pulumi.RegisterOutputType(GetEventBusesEventBusArrayOutput{}) + pulumi.RegisterOutputType(GetLogDataProtectionPolicyDocumentConfigurationOutput{}) + pulumi.RegisterOutputType(GetLogDataProtectionPolicyDocumentConfigurationPtrOutput{}) + pulumi.RegisterOutputType(GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierOutput{}) + pulumi.RegisterOutputType(GetLogDataProtectionPolicyDocumentConfigurationCustomDataIdentifierArrayOutput{}) pulumi.RegisterOutputType(GetLogDataProtectionPolicyDocumentStatementOutput{}) pulumi.RegisterOutputType(GetLogDataProtectionPolicyDocumentStatementArrayOutput{}) pulumi.RegisterOutputType(GetLogDataProtectionPolicyDocumentStatementOperationOutput{}) diff --git a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ec2/eip.go b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ec2/eip.go index 3a2bd368b..e823b625c 100644 --- a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ec2/eip.go +++ b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ec2/eip.go @@ -270,7 +270,7 @@ type Eip struct { // > **NOTE:** Specifying both `publicIpv4Pool` and `address` won't cause an error but `address` will be used in the // case both options are defined as the api only requires one or the other. // - // Deprecated: use domain attribute instead + // Deprecated: vpc is deprecated. Use domain instead. Vpc pulumi.BoolOutput `pulumi:"vpc"` } @@ -356,7 +356,7 @@ type eipState struct { // > **NOTE:** Specifying both `publicIpv4Pool` and `address` won't cause an error but `address` will be used in the // case both options are defined as the api only requires one or the other. // - // Deprecated: use domain attribute instead + // Deprecated: vpc is deprecated. Use domain instead. Vpc *bool `pulumi:"vpc"` } @@ -413,7 +413,7 @@ type EipState struct { // > **NOTE:** Specifying both `publicIpv4Pool` and `address` won't cause an error but `address` will be used in the // case both options are defined as the api only requires one or the other. // - // Deprecated: use domain attribute instead + // Deprecated: vpc is deprecated. Use domain instead. Vpc pulumi.BoolPtrInput } @@ -451,7 +451,7 @@ type eipArgs struct { // > **NOTE:** Specifying both `publicIpv4Pool` and `address` won't cause an error but `address` will be used in the // case both options are defined as the api only requires one or the other. // - // Deprecated: use domain attribute instead + // Deprecated: vpc is deprecated. Use domain instead. Vpc *bool `pulumi:"vpc"` } @@ -486,7 +486,7 @@ type EipArgs struct { // > **NOTE:** Specifying both `publicIpv4Pool` and `address` won't cause an error but `address` will be used in the // case both options are defined as the api only requires one or the other. // - // Deprecated: use domain attribute instead + // Deprecated: vpc is deprecated. Use domain instead. Vpc pulumi.BoolPtrInput } @@ -692,7 +692,7 @@ func (o EipOutput) TagsAll() pulumi.StringMapOutput { // > **NOTE:** Specifying both `publicIpv4Pool` and `address` won't cause an error but `address` will be used in the // case both options are defined as the api only requires one or the other. // -// Deprecated: use domain attribute instead +// Deprecated: vpc is deprecated. Use domain instead. func (o EipOutput) Vpc() pulumi.BoolOutput { return o.ApplyT(func(v *Eip) pulumi.BoolOutput { return v.Vpc }).(pulumi.BoolOutput) } diff --git a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ec2/flowLog.go b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ec2/flowLog.go index 42e21d3db..58f17fc79 100644 --- a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ec2/flowLog.go +++ b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ec2/flowLog.go @@ -213,7 +213,7 @@ type FlowLog struct { LogFormat pulumi.StringOutput `pulumi:"logFormat"` // **Deprecated:** Use `logDestination` instead. The name of the CloudWatch log group. Either `logGroupName` or `logDestination` must be set. // - // Deprecated: use 'log_destination' argument instead + // Deprecated: log_group_name is deprecated. Use logDestination instead. LogGroupName pulumi.StringOutput `pulumi:"logGroupName"` // The maximum interval of time // during which a flow of packets is captured and aggregated into a flow @@ -286,7 +286,7 @@ type flowLogState struct { LogFormat *string `pulumi:"logFormat"` // **Deprecated:** Use `logDestination` instead. The name of the CloudWatch log group. Either `logGroupName` or `logDestination` must be set. // - // Deprecated: use 'log_destination' argument instead + // Deprecated: log_group_name is deprecated. Use logDestination instead. LogGroupName *string `pulumi:"logGroupName"` // The maximum interval of time // during which a flow of packets is captured and aggregated into a flow @@ -330,7 +330,7 @@ type FlowLogState struct { LogFormat pulumi.StringPtrInput // **Deprecated:** Use `logDestination` instead. The name of the CloudWatch log group. Either `logGroupName` or `logDestination` must be set. // - // Deprecated: use 'log_destination' argument instead + // Deprecated: log_group_name is deprecated. Use logDestination instead. LogGroupName pulumi.StringPtrInput // The maximum interval of time // during which a flow of packets is captured and aggregated into a flow @@ -376,7 +376,7 @@ type flowLogArgs struct { LogFormat *string `pulumi:"logFormat"` // **Deprecated:** Use `logDestination` instead. The name of the CloudWatch log group. Either `logGroupName` or `logDestination` must be set. // - // Deprecated: use 'log_destination' argument instead + // Deprecated: log_group_name is deprecated. Use logDestination instead. LogGroupName *string `pulumi:"logGroupName"` // The maximum interval of time // during which a flow of packets is captured and aggregated into a flow @@ -415,7 +415,7 @@ type FlowLogArgs struct { LogFormat pulumi.StringPtrInput // **Deprecated:** Use `logDestination` instead. The name of the CloudWatch log group. Either `logGroupName` or `logDestination` must be set. // - // Deprecated: use 'log_destination' argument instead + // Deprecated: log_group_name is deprecated. Use logDestination instead. LogGroupName pulumi.StringPtrInput // The maximum interval of time // during which a flow of packets is captured and aggregated into a flow @@ -565,7 +565,7 @@ func (o FlowLogOutput) LogFormat() pulumi.StringOutput { // **Deprecated:** Use `logDestination` instead. The name of the CloudWatch log group. Either `logGroupName` or `logDestination` must be set. // -// Deprecated: use 'log_destination' argument instead +// Deprecated: log_group_name is deprecated. Use logDestination instead. func (o FlowLogOutput) LogGroupName() pulumi.StringOutput { return o.ApplyT(func(v *FlowLog) pulumi.StringOutput { return v.LogGroupName }).(pulumi.StringOutput) } diff --git a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ec2/getLaunchTemplate.go b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ec2/getLaunchTemplate.go index 315a6c6c6..c1b11b796 100644 --- a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ec2/getLaunchTemplate.go +++ b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ec2/getLaunchTemplate.go @@ -105,12 +105,14 @@ type LookupLaunchTemplateResult struct { DisableApiStop bool `pulumi:"disableApiStop"` DisableApiTermination bool `pulumi:"disableApiTermination"` EbsOptimized string `pulumi:"ebsOptimized"` - ElasticGpuSpecifications []GetLaunchTemplateElasticGpuSpecification `pulumi:"elasticGpuSpecifications"` - ElasticInferenceAccelerators []GetLaunchTemplateElasticInferenceAccelerator `pulumi:"elasticInferenceAccelerators"` - EnclaveOptions []GetLaunchTemplateEnclaveOption `pulumi:"enclaveOptions"` - Filters []GetLaunchTemplateFilter `pulumi:"filters"` - HibernationOptions []GetLaunchTemplateHibernationOption `pulumi:"hibernationOptions"` - IamInstanceProfiles []GetLaunchTemplateIamInstanceProfile `pulumi:"iamInstanceProfiles"` + // Deprecated: elastic_gpu_specifications is deprecated. AWS no longer supports the Elastic Graphics service. + ElasticGpuSpecifications []GetLaunchTemplateElasticGpuSpecification `pulumi:"elasticGpuSpecifications"` + // Deprecated: elastic_inference_accelerator is deprecated. AWS no longer supports the Elastic Inference service. + ElasticInferenceAccelerators []GetLaunchTemplateElasticInferenceAccelerator `pulumi:"elasticInferenceAccelerators"` + EnclaveOptions []GetLaunchTemplateEnclaveOption `pulumi:"enclaveOptions"` + Filters []GetLaunchTemplateFilter `pulumi:"filters"` + HibernationOptions []GetLaunchTemplateHibernationOption `pulumi:"hibernationOptions"` + IamInstanceProfiles []GetLaunchTemplateIamInstanceProfile `pulumi:"iamInstanceProfiles"` // ID of the launch template. Id string `pulumi:"id"` ImageId string `pulumi:"imageId"` @@ -221,12 +223,14 @@ func (o LookupLaunchTemplateResultOutput) EbsOptimized() pulumi.StringOutput { return o.ApplyT(func(v LookupLaunchTemplateResult) string { return v.EbsOptimized }).(pulumi.StringOutput) } +// Deprecated: elastic_gpu_specifications is deprecated. AWS no longer supports the Elastic Graphics service. func (o LookupLaunchTemplateResultOutput) ElasticGpuSpecifications() GetLaunchTemplateElasticGpuSpecificationArrayOutput { return o.ApplyT(func(v LookupLaunchTemplateResult) []GetLaunchTemplateElasticGpuSpecification { return v.ElasticGpuSpecifications }).(GetLaunchTemplateElasticGpuSpecificationArrayOutput) } +// Deprecated: elastic_inference_accelerator is deprecated. AWS no longer supports the Elastic Inference service. func (o LookupLaunchTemplateResultOutput) ElasticInferenceAccelerators() GetLaunchTemplateElasticInferenceAcceleratorArrayOutput { return o.ApplyT(func(v LookupLaunchTemplateResult) []GetLaunchTemplateElasticInferenceAccelerator { return v.ElasticInferenceAccelerators diff --git a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ec2/getVpcEndpoint.go b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ec2/getVpcEndpoint.go index ec996ef60..db6ac313d 100644 --- a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ec2/getVpcEndpoint.go +++ b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ec2/getVpcEndpoint.go @@ -64,7 +64,7 @@ type LookupVpcEndpointArgs struct { Filters []GetVpcEndpointFilter `pulumi:"filters"` // ID of the specific VPC Endpoint to retrieve. Id *string `pulumi:"id"` - // Service name of the specific VPC Endpoint to retrieve. For AWS services the service name is usually in the form `com.amazonaws..` (the SageMaker Notebook service is an exception to this rule, the service name is in the form `aws.sagemaker..notebook`). + // Service name of the specific VPC Endpoint to retrieve. For AWS services the service name is usually in the form `com.amazonaws..` (the SageMaker AI Notebook service is an exception to this rule, the service name is in the form `aws.sagemaker..notebook`). ServiceName *string `pulumi:"serviceName"` // State of the specific VPC Endpoint to retrieve. State *string `pulumi:"state"` @@ -132,7 +132,7 @@ type LookupVpcEndpointOutputArgs struct { Filters GetVpcEndpointFilterArrayInput `pulumi:"filters"` // ID of the specific VPC Endpoint to retrieve. Id pulumi.StringPtrInput `pulumi:"id"` - // Service name of the specific VPC Endpoint to retrieve. For AWS services the service name is usually in the form `com.amazonaws..` (the SageMaker Notebook service is an exception to this rule, the service name is in the form `aws.sagemaker..notebook`). + // Service name of the specific VPC Endpoint to retrieve. For AWS services the service name is usually in the form `com.amazonaws..` (the SageMaker AI Notebook service is an exception to this rule, the service name is in the form `aws.sagemaker..notebook`). ServiceName pulumi.StringPtrInput `pulumi:"serviceName"` // State of the specific VPC Endpoint to retrieve. State pulumi.StringPtrInput `pulumi:"state"` diff --git a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ec2/getVpcEndpointService.go b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ec2/getVpcEndpointService.go index 8d98ab3d3..5d30f5c01 100644 --- a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ec2/getVpcEndpointService.go +++ b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ec2/getVpcEndpointService.go @@ -133,7 +133,7 @@ type LookupVpcEndpointServiceArgs struct { Filters []GetVpcEndpointServiceFilter `pulumi:"filters"` // Common name of an AWS service (e.g., `s3`). Service *string `pulumi:"service"` - // Service name that is specified when creating a VPC endpoint. For AWS services the service name is usually in the form `com.amazonaws..` (the SageMaker Notebook service is an exception to this rule, the service name is in the form `aws.sagemaker..notebook`). + // Service name that is specified when creating a VPC endpoint. For AWS services the service name is usually in the form `com.amazonaws..` (the SageMaker AI Notebook service is an exception to this rule, the service name is in the form `aws.sagemaker..notebook`). ServiceName *string `pulumi:"serviceName"` // AWS regions in which to look for services. ServiceRegions []string `pulumi:"serviceRegions"` @@ -197,7 +197,7 @@ type LookupVpcEndpointServiceOutputArgs struct { Filters GetVpcEndpointServiceFilterArrayInput `pulumi:"filters"` // Common name of an AWS service (e.g., `s3`). Service pulumi.StringPtrInput `pulumi:"service"` - // Service name that is specified when creating a VPC endpoint. For AWS services the service name is usually in the form `com.amazonaws..` (the SageMaker Notebook service is an exception to this rule, the service name is in the form `aws.sagemaker..notebook`). + // Service name that is specified when creating a VPC endpoint. For AWS services the service name is usually in the form `com.amazonaws..` (the SageMaker AI Notebook service is an exception to this rule, the service name is in the form `aws.sagemaker..notebook`). ServiceName pulumi.StringPtrInput `pulumi:"serviceName"` // AWS regions in which to look for services. ServiceRegions pulumi.StringArrayInput `pulumi:"serviceRegions"` diff --git a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ec2/getVpcIpam.go b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ec2/getVpcIpam.go index 3b7f04be1..82b1353be 100644 --- a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ec2/getVpcIpam.go +++ b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ec2/getVpcIpam.go @@ -81,9 +81,9 @@ type LookupVpcIpamResult struct { // ID of the default public scope. PublicDefaultScopeId string `pulumi:"publicDefaultScopeId"` // Number of resource discovery associations. - ResourceDiscoveryAssociationCount float64 `pulumi:"resourceDiscoveryAssociationCount"` + ResourceDiscoveryAssociationCount int `pulumi:"resourceDiscoveryAssociationCount"` // Number of scopes on this IPAM. - ScopeCount float64 `pulumi:"scopeCount"` + ScopeCount int `pulumi:"scopeCount"` // Current state of the IPAM. State string `pulumi:"state"` // State message of the IPAM. @@ -184,13 +184,13 @@ func (o LookupVpcIpamResultOutput) PublicDefaultScopeId() pulumi.StringOutput { } // Number of resource discovery associations. -func (o LookupVpcIpamResultOutput) ResourceDiscoveryAssociationCount() pulumi.Float64Output { - return o.ApplyT(func(v LookupVpcIpamResult) float64 { return v.ResourceDiscoveryAssociationCount }).(pulumi.Float64Output) +func (o LookupVpcIpamResultOutput) ResourceDiscoveryAssociationCount() pulumi.IntOutput { + return o.ApplyT(func(v LookupVpcIpamResult) int { return v.ResourceDiscoveryAssociationCount }).(pulumi.IntOutput) } // Number of scopes on this IPAM. -func (o LookupVpcIpamResultOutput) ScopeCount() pulumi.Float64Output { - return o.ApplyT(func(v LookupVpcIpamResult) float64 { return v.ScopeCount }).(pulumi.Float64Output) +func (o LookupVpcIpamResultOutput) ScopeCount() pulumi.IntOutput { + return o.ApplyT(func(v LookupVpcIpamResult) int { return v.ScopeCount }).(pulumi.IntOutput) } // Current state of the IPAM. diff --git a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ec2/instance.go b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ec2/instance.go index 7164c2b45..afe6c463d 100644 --- a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ec2/instance.go +++ b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ec2/instance.go @@ -336,13 +336,13 @@ type Instance struct { CapacityReservationSpecification InstanceCapacityReservationSpecificationOutput `pulumi:"capacityReservationSpecification"` // Sets the number of CPU cores for an instance. This option is only supported on creation of instance type that support CPU Options [CPU Cores and Threads Per CPU Core Per Instance Type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html#cpu-options-supported-instances-values) - specifying this option for unsupported instance types will return an error from the EC2 API. // - // Deprecated: use 'cpu_options' argument instead + // Deprecated: cpu_core_count is deprecated. Use cpuOptions instead. CpuCoreCount pulumi.IntOutput `pulumi:"cpuCoreCount"` // The CPU options for the instance. See CPU Options below for more details. CpuOptions InstanceCpuOptionsOutput `pulumi:"cpuOptions"` // If set to 1, hyperthreading is disabled on the launched instance. Defaults to 2 if not set. See [Optimizing CPU Options](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) for more information. // - // Deprecated: use 'cpu_options' argument instead + // Deprecated: cpu_threads_per_core is deprecated. Use cpuOptions instead. CpuThreadsPerCore pulumi.IntOutput `pulumi:"cpuThreadsPerCore"` // Configuration block for customizing the credit specification of the instance. See Credit Specification below for more details. This provider will only perform drift detection of its value when present in a configuration. Removing this configuration on existing instances will only stop managing it. It will not change the configuration back to the default for the instance type. CreditSpecification InstanceCreditSpecificationPtrOutput `pulumi:"creditSpecification"` @@ -498,13 +498,13 @@ type instanceState struct { CapacityReservationSpecification *InstanceCapacityReservationSpecification `pulumi:"capacityReservationSpecification"` // Sets the number of CPU cores for an instance. This option is only supported on creation of instance type that support CPU Options [CPU Cores and Threads Per CPU Core Per Instance Type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html#cpu-options-supported-instances-values) - specifying this option for unsupported instance types will return an error from the EC2 API. // - // Deprecated: use 'cpu_options' argument instead + // Deprecated: cpu_core_count is deprecated. Use cpuOptions instead. CpuCoreCount *int `pulumi:"cpuCoreCount"` // The CPU options for the instance. See CPU Options below for more details. CpuOptions *InstanceCpuOptions `pulumi:"cpuOptions"` // If set to 1, hyperthreading is disabled on the launched instance. Defaults to 2 if not set. See [Optimizing CPU Options](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) for more information. // - // Deprecated: use 'cpu_options' argument instead + // Deprecated: cpu_threads_per_core is deprecated. Use cpuOptions instead. CpuThreadsPerCore *int `pulumi:"cpuThreadsPerCore"` // Configuration block for customizing the credit specification of the instance. See Credit Specification below for more details. This provider will only perform drift detection of its value when present in a configuration. Removing this configuration on existing instances will only stop managing it. It will not change the configuration back to the default for the instance type. CreditSpecification *InstanceCreditSpecification `pulumi:"creditSpecification"` @@ -631,13 +631,13 @@ type InstanceState struct { CapacityReservationSpecification InstanceCapacityReservationSpecificationPtrInput // Sets the number of CPU cores for an instance. This option is only supported on creation of instance type that support CPU Options [CPU Cores and Threads Per CPU Core Per Instance Type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html#cpu-options-supported-instances-values) - specifying this option for unsupported instance types will return an error from the EC2 API. // - // Deprecated: use 'cpu_options' argument instead + // Deprecated: cpu_core_count is deprecated. Use cpuOptions instead. CpuCoreCount pulumi.IntPtrInput // The CPU options for the instance. See CPU Options below for more details. CpuOptions InstanceCpuOptionsPtrInput // If set to 1, hyperthreading is disabled on the launched instance. Defaults to 2 if not set. See [Optimizing CPU Options](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) for more information. // - // Deprecated: use 'cpu_options' argument instead + // Deprecated: cpu_threads_per_core is deprecated. Use cpuOptions instead. CpuThreadsPerCore pulumi.IntPtrInput // Configuration block for customizing the credit specification of the instance. See Credit Specification below for more details. This provider will only perform drift detection of its value when present in a configuration. Removing this configuration on existing instances will only stop managing it. It will not change the configuration back to the default for the instance type. CreditSpecification InstanceCreditSpecificationPtrInput @@ -766,13 +766,13 @@ type instanceArgs struct { CapacityReservationSpecification *InstanceCapacityReservationSpecification `pulumi:"capacityReservationSpecification"` // Sets the number of CPU cores for an instance. This option is only supported on creation of instance type that support CPU Options [CPU Cores and Threads Per CPU Core Per Instance Type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html#cpu-options-supported-instances-values) - specifying this option for unsupported instance types will return an error from the EC2 API. // - // Deprecated: use 'cpu_options' argument instead + // Deprecated: cpu_core_count is deprecated. Use cpuOptions instead. CpuCoreCount *int `pulumi:"cpuCoreCount"` // The CPU options for the instance. See CPU Options below for more details. CpuOptions *InstanceCpuOptions `pulumi:"cpuOptions"` // If set to 1, hyperthreading is disabled on the launched instance. Defaults to 2 if not set. See [Optimizing CPU Options](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) for more information. // - // Deprecated: use 'cpu_options' argument instead + // Deprecated: cpu_threads_per_core is deprecated. Use cpuOptions instead. CpuThreadsPerCore *int `pulumi:"cpuThreadsPerCore"` // Configuration block for customizing the credit specification of the instance. See Credit Specification below for more details. This provider will only perform drift detection of its value when present in a configuration. Removing this configuration on existing instances will only stop managing it. It will not change the configuration back to the default for the instance type. CreditSpecification *InstanceCreditSpecification `pulumi:"creditSpecification"` @@ -876,13 +876,13 @@ type InstanceArgs struct { CapacityReservationSpecification InstanceCapacityReservationSpecificationPtrInput // Sets the number of CPU cores for an instance. This option is only supported on creation of instance type that support CPU Options [CPU Cores and Threads Per CPU Core Per Instance Type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html#cpu-options-supported-instances-values) - specifying this option for unsupported instance types will return an error from the EC2 API. // - // Deprecated: use 'cpu_options' argument instead + // Deprecated: cpu_core_count is deprecated. Use cpuOptions instead. CpuCoreCount pulumi.IntPtrInput // The CPU options for the instance. See CPU Options below for more details. CpuOptions InstanceCpuOptionsPtrInput // If set to 1, hyperthreading is disabled on the launched instance. Defaults to 2 if not set. See [Optimizing CPU Options](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) for more information. // - // Deprecated: use 'cpu_options' argument instead + // Deprecated: cpu_threads_per_core is deprecated. Use cpuOptions instead. CpuThreadsPerCore pulumi.IntPtrInput // Configuration block for customizing the credit specification of the instance. See Credit Specification below for more details. This provider will only perform drift detection of its value when present in a configuration. Removing this configuration on existing instances will only stop managing it. It will not change the configuration back to the default for the instance type. CreditSpecification InstanceCreditSpecificationPtrInput @@ -1090,7 +1090,7 @@ func (o InstanceOutput) CapacityReservationSpecification() InstanceCapacityReser // Sets the number of CPU cores for an instance. This option is only supported on creation of instance type that support CPU Options [CPU Cores and Threads Per CPU Core Per Instance Type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html#cpu-options-supported-instances-values) - specifying this option for unsupported instance types will return an error from the EC2 API. // -// Deprecated: use 'cpu_options' argument instead +// Deprecated: cpu_core_count is deprecated. Use cpuOptions instead. func (o InstanceOutput) CpuCoreCount() pulumi.IntOutput { return o.ApplyT(func(v *Instance) pulumi.IntOutput { return v.CpuCoreCount }).(pulumi.IntOutput) } @@ -1102,7 +1102,7 @@ func (o InstanceOutput) CpuOptions() InstanceCpuOptionsOutput { // If set to 1, hyperthreading is disabled on the launched instance. Defaults to 2 if not set. See [Optimizing CPU Options](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) for more information. // -// Deprecated: use 'cpu_options' argument instead +// Deprecated: cpu_threads_per_core is deprecated. Use cpuOptions instead. func (o InstanceOutput) CpuThreadsPerCore() pulumi.IntOutput { return o.ApplyT(func(v *Instance) pulumi.IntOutput { return v.CpuThreadsPerCore }).(pulumi.IntOutput) } diff --git a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ec2/launchTemplate.go b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ec2/launchTemplate.go index 97576f0e7..6e46203d4 100644 --- a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ec2/launchTemplate.go +++ b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ec2/launchTemplate.go @@ -46,10 +46,14 @@ type LaunchTemplate struct { DisableApiTermination pulumi.BoolPtrOutput `pulumi:"disableApiTermination"` // If `true`, the launched EC2 instance will be EBS-optimized. EbsOptimized pulumi.StringPtrOutput `pulumi:"ebsOptimized"` - // The elastic GPU to attach to the instance. See Elastic GPU + // **DEPRECATED** The elastic GPU to attach to the instance. See Elastic GPU // below for more details. + // + // Deprecated: elastic_gpu_specifications is deprecated. AWS no longer supports the Elastic Graphics service. ElasticGpuSpecifications LaunchTemplateElasticGpuSpecificationArrayOutput `pulumi:"elasticGpuSpecifications"` - // Configuration block containing an Elastic Inference Accelerator to attach to the instance. See Elastic Inference Accelerator below for more details. + // **DEPRECATED** Configuration block containing an Elastic Inference Accelerator to attach to the instance. See Elastic Inference Accelerator below for more details. + // + // Deprecated: elastic_inference_accelerator is deprecated. AWS no longer supports the Elastic Inference service. ElasticInferenceAccelerator LaunchTemplateElasticInferenceAcceleratorPtrOutput `pulumi:"elasticInferenceAccelerator"` // Enable Nitro Enclaves on launched instances. See Enclave Options below for more details. EnclaveOptions LaunchTemplateEnclaveOptionsPtrOutput `pulumi:"enclaveOptions"` @@ -169,10 +173,14 @@ type launchTemplateState struct { DisableApiTermination *bool `pulumi:"disableApiTermination"` // If `true`, the launched EC2 instance will be EBS-optimized. EbsOptimized *string `pulumi:"ebsOptimized"` - // The elastic GPU to attach to the instance. See Elastic GPU + // **DEPRECATED** The elastic GPU to attach to the instance. See Elastic GPU // below for more details. + // + // Deprecated: elastic_gpu_specifications is deprecated. AWS no longer supports the Elastic Graphics service. ElasticGpuSpecifications []LaunchTemplateElasticGpuSpecification `pulumi:"elasticGpuSpecifications"` - // Configuration block containing an Elastic Inference Accelerator to attach to the instance. See Elastic Inference Accelerator below for more details. + // **DEPRECATED** Configuration block containing an Elastic Inference Accelerator to attach to the instance. See Elastic Inference Accelerator below for more details. + // + // Deprecated: elastic_inference_accelerator is deprecated. AWS no longer supports the Elastic Inference service. ElasticInferenceAccelerator *LaunchTemplateElasticInferenceAccelerator `pulumi:"elasticInferenceAccelerator"` // Enable Nitro Enclaves on launched instances. See Enclave Options below for more details. EnclaveOptions *LaunchTemplateEnclaveOptions `pulumi:"enclaveOptions"` @@ -263,10 +271,14 @@ type LaunchTemplateState struct { DisableApiTermination pulumi.BoolPtrInput // If `true`, the launched EC2 instance will be EBS-optimized. EbsOptimized pulumi.StringPtrInput - // The elastic GPU to attach to the instance. See Elastic GPU + // **DEPRECATED** The elastic GPU to attach to the instance. See Elastic GPU // below for more details. + // + // Deprecated: elastic_gpu_specifications is deprecated. AWS no longer supports the Elastic Graphics service. ElasticGpuSpecifications LaunchTemplateElasticGpuSpecificationArrayInput - // Configuration block containing an Elastic Inference Accelerator to attach to the instance. See Elastic Inference Accelerator below for more details. + // **DEPRECATED** Configuration block containing an Elastic Inference Accelerator to attach to the instance. See Elastic Inference Accelerator below for more details. + // + // Deprecated: elastic_inference_accelerator is deprecated. AWS no longer supports the Elastic Inference service. ElasticInferenceAccelerator LaunchTemplateElasticInferenceAcceleratorPtrInput // Enable Nitro Enclaves on launched instances. See Enclave Options below for more details. EnclaveOptions LaunchTemplateEnclaveOptionsPtrInput @@ -359,10 +371,14 @@ type launchTemplateArgs struct { DisableApiTermination *bool `pulumi:"disableApiTermination"` // If `true`, the launched EC2 instance will be EBS-optimized. EbsOptimized *string `pulumi:"ebsOptimized"` - // The elastic GPU to attach to the instance. See Elastic GPU + // **DEPRECATED** The elastic GPU to attach to the instance. See Elastic GPU // below for more details. + // + // Deprecated: elastic_gpu_specifications is deprecated. AWS no longer supports the Elastic Graphics service. ElasticGpuSpecifications []LaunchTemplateElasticGpuSpecification `pulumi:"elasticGpuSpecifications"` - // Configuration block containing an Elastic Inference Accelerator to attach to the instance. See Elastic Inference Accelerator below for more details. + // **DEPRECATED** Configuration block containing an Elastic Inference Accelerator to attach to the instance. See Elastic Inference Accelerator below for more details. + // + // Deprecated: elastic_inference_accelerator is deprecated. AWS no longer supports the Elastic Inference service. ElasticInferenceAccelerator *LaunchTemplateElasticInferenceAccelerator `pulumi:"elasticInferenceAccelerator"` // Enable Nitro Enclaves on launched instances. See Enclave Options below for more details. EnclaveOptions *LaunchTemplateEnclaveOptions `pulumi:"enclaveOptions"` @@ -446,10 +462,14 @@ type LaunchTemplateArgs struct { DisableApiTermination pulumi.BoolPtrInput // If `true`, the launched EC2 instance will be EBS-optimized. EbsOptimized pulumi.StringPtrInput - // The elastic GPU to attach to the instance. See Elastic GPU + // **DEPRECATED** The elastic GPU to attach to the instance. See Elastic GPU // below for more details. + // + // Deprecated: elastic_gpu_specifications is deprecated. AWS no longer supports the Elastic Graphics service. ElasticGpuSpecifications LaunchTemplateElasticGpuSpecificationArrayInput - // Configuration block containing an Elastic Inference Accelerator to attach to the instance. See Elastic Inference Accelerator below for more details. + // **DEPRECATED** Configuration block containing an Elastic Inference Accelerator to attach to the instance. See Elastic Inference Accelerator below for more details. + // + // Deprecated: elastic_inference_accelerator is deprecated. AWS no longer supports the Elastic Inference service. ElasticInferenceAccelerator LaunchTemplateElasticInferenceAcceleratorPtrInput // Enable Nitro Enclaves on launched instances. See Enclave Options below for more details. EnclaveOptions LaunchTemplateEnclaveOptionsPtrInput @@ -652,15 +672,19 @@ func (o LaunchTemplateOutput) EbsOptimized() pulumi.StringPtrOutput { return o.ApplyT(func(v *LaunchTemplate) pulumi.StringPtrOutput { return v.EbsOptimized }).(pulumi.StringPtrOutput) } -// The elastic GPU to attach to the instance. See Elastic GPU +// **DEPRECATED** The elastic GPU to attach to the instance. See Elastic GPU // below for more details. +// +// Deprecated: elastic_gpu_specifications is deprecated. AWS no longer supports the Elastic Graphics service. func (o LaunchTemplateOutput) ElasticGpuSpecifications() LaunchTemplateElasticGpuSpecificationArrayOutput { return o.ApplyT(func(v *LaunchTemplate) LaunchTemplateElasticGpuSpecificationArrayOutput { return v.ElasticGpuSpecifications }).(LaunchTemplateElasticGpuSpecificationArrayOutput) } -// Configuration block containing an Elastic Inference Accelerator to attach to the instance. See Elastic Inference Accelerator below for more details. +// **DEPRECATED** Configuration block containing an Elastic Inference Accelerator to attach to the instance. See Elastic Inference Accelerator below for more details. +// +// Deprecated: elastic_inference_accelerator is deprecated. AWS no longer supports the Elastic Inference service. func (o LaunchTemplateOutput) ElasticInferenceAccelerator() LaunchTemplateElasticInferenceAcceleratorPtrOutput { return o.ApplyT(func(v *LaunchTemplate) LaunchTemplateElasticInferenceAcceleratorPtrOutput { return v.ElasticInferenceAccelerator diff --git a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ec2/pulumiTypes.go b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ec2/pulumiTypes.go index 6c466d4f0..0a1a0bf28 100644 --- a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ec2/pulumiTypes.go +++ b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ec2/pulumiTypes.go @@ -5139,6 +5139,14 @@ type FleetSpotOptions struct { InstancePoolsToUseCount *int `pulumi:"instancePoolsToUseCount"` // Nested argument containing maintenance strategies for managing your Spot Instances that are at an elevated risk of being interrupted. Defined below. MaintenanceStrategies *FleetSpotOptionsMaintenanceStrategies `pulumi:"maintenanceStrategies"` + // The maximum amount per hour for Spot Instances that you're willing to pay. + MaxTotalPrice *string `pulumi:"maxTotalPrice"` + // The minimum target capacity for Spot Instances in the fleet. If the minimum target capacity is not reached, the fleet launches no instances. Supported only for fleets of type `instant`. + MinTargetCapacity *int `pulumi:"minTargetCapacity"` + // Indicates that the fleet launches all Spot Instances into a single Availability Zone. Supported only for fleets of type `instant`. + SingleAvailabilityZone *bool `pulumi:"singleAvailabilityZone"` + // Indicates that the fleet uses a single instance type to launch all Spot Instances in the fleet. Supported only for fleets of type `instant`. + SingleInstanceType *bool `pulumi:"singleInstanceType"` } // FleetSpotOptionsInput is an input type that accepts FleetSpotOptionsArgs and FleetSpotOptionsOutput values. @@ -5161,6 +5169,14 @@ type FleetSpotOptionsArgs struct { InstancePoolsToUseCount pulumi.IntPtrInput `pulumi:"instancePoolsToUseCount"` // Nested argument containing maintenance strategies for managing your Spot Instances that are at an elevated risk of being interrupted. Defined below. MaintenanceStrategies FleetSpotOptionsMaintenanceStrategiesPtrInput `pulumi:"maintenanceStrategies"` + // The maximum amount per hour for Spot Instances that you're willing to pay. + MaxTotalPrice pulumi.StringPtrInput `pulumi:"maxTotalPrice"` + // The minimum target capacity for Spot Instances in the fleet. If the minimum target capacity is not reached, the fleet launches no instances. Supported only for fleets of type `instant`. + MinTargetCapacity pulumi.IntPtrInput `pulumi:"minTargetCapacity"` + // Indicates that the fleet launches all Spot Instances into a single Availability Zone. Supported only for fleets of type `instant`. + SingleAvailabilityZone pulumi.BoolPtrInput `pulumi:"singleAvailabilityZone"` + // Indicates that the fleet uses a single instance type to launch all Spot Instances in the fleet. Supported only for fleets of type `instant`. + SingleInstanceType pulumi.BoolPtrInput `pulumi:"singleInstanceType"` } func (FleetSpotOptionsArgs) ElementType() reflect.Type { @@ -5260,6 +5276,26 @@ func (o FleetSpotOptionsOutput) MaintenanceStrategies() FleetSpotOptionsMaintena return o.ApplyT(func(v FleetSpotOptions) *FleetSpotOptionsMaintenanceStrategies { return v.MaintenanceStrategies }).(FleetSpotOptionsMaintenanceStrategiesPtrOutput) } +// The maximum amount per hour for Spot Instances that you're willing to pay. +func (o FleetSpotOptionsOutput) MaxTotalPrice() pulumi.StringPtrOutput { + return o.ApplyT(func(v FleetSpotOptions) *string { return v.MaxTotalPrice }).(pulumi.StringPtrOutput) +} + +// The minimum target capacity for Spot Instances in the fleet. If the minimum target capacity is not reached, the fleet launches no instances. Supported only for fleets of type `instant`. +func (o FleetSpotOptionsOutput) MinTargetCapacity() pulumi.IntPtrOutput { + return o.ApplyT(func(v FleetSpotOptions) *int { return v.MinTargetCapacity }).(pulumi.IntPtrOutput) +} + +// Indicates that the fleet launches all Spot Instances into a single Availability Zone. Supported only for fleets of type `instant`. +func (o FleetSpotOptionsOutput) SingleAvailabilityZone() pulumi.BoolPtrOutput { + return o.ApplyT(func(v FleetSpotOptions) *bool { return v.SingleAvailabilityZone }).(pulumi.BoolPtrOutput) +} + +// Indicates that the fleet uses a single instance type to launch all Spot Instances in the fleet. Supported only for fleets of type `instant`. +func (o FleetSpotOptionsOutput) SingleInstanceType() pulumi.BoolPtrOutput { + return o.ApplyT(func(v FleetSpotOptions) *bool { return v.SingleInstanceType }).(pulumi.BoolPtrOutput) +} + type FleetSpotOptionsPtrOutput struct{ *pulumi.OutputState } func (FleetSpotOptionsPtrOutput) ElementType() reflect.Type { @@ -5324,6 +5360,46 @@ func (o FleetSpotOptionsPtrOutput) MaintenanceStrategies() FleetSpotOptionsMaint }).(FleetSpotOptionsMaintenanceStrategiesPtrOutput) } +// The maximum amount per hour for Spot Instances that you're willing to pay. +func (o FleetSpotOptionsPtrOutput) MaxTotalPrice() pulumi.StringPtrOutput { + return o.ApplyT(func(v *FleetSpotOptions) *string { + if v == nil { + return nil + } + return v.MaxTotalPrice + }).(pulumi.StringPtrOutput) +} + +// The minimum target capacity for Spot Instances in the fleet. If the minimum target capacity is not reached, the fleet launches no instances. Supported only for fleets of type `instant`. +func (o FleetSpotOptionsPtrOutput) MinTargetCapacity() pulumi.IntPtrOutput { + return o.ApplyT(func(v *FleetSpotOptions) *int { + if v == nil { + return nil + } + return v.MinTargetCapacity + }).(pulumi.IntPtrOutput) +} + +// Indicates that the fleet launches all Spot Instances into a single Availability Zone. Supported only for fleets of type `instant`. +func (o FleetSpotOptionsPtrOutput) SingleAvailabilityZone() pulumi.BoolPtrOutput { + return o.ApplyT(func(v *FleetSpotOptions) *bool { + if v == nil { + return nil + } + return v.SingleAvailabilityZone + }).(pulumi.BoolPtrOutput) +} + +// Indicates that the fleet uses a single instance type to launch all Spot Instances in the fleet. Supported only for fleets of type `instant`. +func (o FleetSpotOptionsPtrOutput) SingleInstanceType() pulumi.BoolPtrOutput { + return o.ApplyT(func(v *FleetSpotOptions) *bool { + if v == nil { + return nil + } + return v.SingleInstanceType + }).(pulumi.BoolPtrOutput) +} + type FleetSpotOptionsMaintenanceStrategies struct { // Nested argument containing the capacity rebalance for your fleet request. Defined below. CapacityRebalance *FleetSpotOptionsMaintenanceStrategiesCapacityRebalance `pulumi:"capacityRebalance"` @@ -14037,6 +14113,8 @@ type LaunchTemplateNetworkInterface struct { Description *string `pulumi:"description"` // The integer index of the network interface attachment. DeviceIndex *int `pulumi:"deviceIndex"` + // Configuration for Elastic Network Adapter (ENA) Express settings. Applies to network interfaces that use the [ena Express](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enhanced-networking-ena-express.html) feature. See details below. + EnaSrdSpecification *LaunchTemplateNetworkInterfaceEnaSrdSpecification `pulumi:"enaSrdSpecification"` // The type of network interface. To create an Elastic Fabric Adapter (EFA), specify `efa`. InterfaceType *string `pulumi:"interfaceType"` // The number of secondary private IPv4 addresses to assign to a network interface. Conflicts with `ipv4Addresses` @@ -14093,6 +14171,8 @@ type LaunchTemplateNetworkInterfaceArgs struct { Description pulumi.StringPtrInput `pulumi:"description"` // The integer index of the network interface attachment. DeviceIndex pulumi.IntPtrInput `pulumi:"deviceIndex"` + // Configuration for Elastic Network Adapter (ENA) Express settings. Applies to network interfaces that use the [ena Express](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enhanced-networking-ena-express.html) feature. See details below. + EnaSrdSpecification LaunchTemplateNetworkInterfaceEnaSrdSpecificationPtrInput `pulumi:"enaSrdSpecification"` // The type of network interface. To create an Elastic Fabric Adapter (EFA), specify `efa`. InterfaceType pulumi.StringPtrInput `pulumi:"interfaceType"` // The number of secondary private IPv4 addresses to assign to a network interface. Conflicts with `ipv4Addresses` @@ -14208,6 +14288,13 @@ func (o LaunchTemplateNetworkInterfaceOutput) DeviceIndex() pulumi.IntPtrOutput return o.ApplyT(func(v LaunchTemplateNetworkInterface) *int { return v.DeviceIndex }).(pulumi.IntPtrOutput) } +// Configuration for Elastic Network Adapter (ENA) Express settings. Applies to network interfaces that use the [ena Express](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enhanced-networking-ena-express.html) feature. See details below. +func (o LaunchTemplateNetworkInterfaceOutput) EnaSrdSpecification() LaunchTemplateNetworkInterfaceEnaSrdSpecificationPtrOutput { + return o.ApplyT(func(v LaunchTemplateNetworkInterface) *LaunchTemplateNetworkInterfaceEnaSrdSpecification { + return v.EnaSrdSpecification + }).(LaunchTemplateNetworkInterfaceEnaSrdSpecificationPtrOutput) +} + // The type of network interface. To create an Elastic Fabric Adapter (EFA), specify `efa`. func (o LaunchTemplateNetworkInterfaceOutput) InterfaceType() pulumi.StringPtrOutput { return o.ApplyT(func(v LaunchTemplateNetworkInterface) *string { return v.InterfaceType }).(pulumi.StringPtrOutput) @@ -14480,6 +14567,311 @@ func (o LaunchTemplateNetworkInterfaceConnectionTrackingSpecificationPtrOutput) }).(pulumi.IntPtrOutput) } +type LaunchTemplateNetworkInterfaceEnaSrdSpecification struct { + // Whether to enable ENA Express. ENA Express uses AWS Scalable Reliable Datagram (SRD) technology to improve the performance of TCP traffic. + EnaSrdEnabled *bool `pulumi:"enaSrdEnabled"` + // Configuration for ENA Express UDP optimization. See details below. + EnaSrdUdpSpecification *LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecification `pulumi:"enaSrdUdpSpecification"` +} + +// LaunchTemplateNetworkInterfaceEnaSrdSpecificationInput is an input type that accepts LaunchTemplateNetworkInterfaceEnaSrdSpecificationArgs and LaunchTemplateNetworkInterfaceEnaSrdSpecificationOutput values. +// You can construct a concrete instance of `LaunchTemplateNetworkInterfaceEnaSrdSpecificationInput` via: +// +// LaunchTemplateNetworkInterfaceEnaSrdSpecificationArgs{...} +type LaunchTemplateNetworkInterfaceEnaSrdSpecificationInput interface { + pulumi.Input + + ToLaunchTemplateNetworkInterfaceEnaSrdSpecificationOutput() LaunchTemplateNetworkInterfaceEnaSrdSpecificationOutput + ToLaunchTemplateNetworkInterfaceEnaSrdSpecificationOutputWithContext(context.Context) LaunchTemplateNetworkInterfaceEnaSrdSpecificationOutput +} + +type LaunchTemplateNetworkInterfaceEnaSrdSpecificationArgs struct { + // Whether to enable ENA Express. ENA Express uses AWS Scalable Reliable Datagram (SRD) technology to improve the performance of TCP traffic. + EnaSrdEnabled pulumi.BoolPtrInput `pulumi:"enaSrdEnabled"` + // Configuration for ENA Express UDP optimization. See details below. + EnaSrdUdpSpecification LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrInput `pulumi:"enaSrdUdpSpecification"` +} + +func (LaunchTemplateNetworkInterfaceEnaSrdSpecificationArgs) ElementType() reflect.Type { + return reflect.TypeOf((*LaunchTemplateNetworkInterfaceEnaSrdSpecification)(nil)).Elem() +} + +func (i LaunchTemplateNetworkInterfaceEnaSrdSpecificationArgs) ToLaunchTemplateNetworkInterfaceEnaSrdSpecificationOutput() LaunchTemplateNetworkInterfaceEnaSrdSpecificationOutput { + return i.ToLaunchTemplateNetworkInterfaceEnaSrdSpecificationOutputWithContext(context.Background()) +} + +func (i LaunchTemplateNetworkInterfaceEnaSrdSpecificationArgs) ToLaunchTemplateNetworkInterfaceEnaSrdSpecificationOutputWithContext(ctx context.Context) LaunchTemplateNetworkInterfaceEnaSrdSpecificationOutput { + return pulumi.ToOutputWithContext(ctx, i).(LaunchTemplateNetworkInterfaceEnaSrdSpecificationOutput) +} + +func (i LaunchTemplateNetworkInterfaceEnaSrdSpecificationArgs) ToLaunchTemplateNetworkInterfaceEnaSrdSpecificationPtrOutput() LaunchTemplateNetworkInterfaceEnaSrdSpecificationPtrOutput { + return i.ToLaunchTemplateNetworkInterfaceEnaSrdSpecificationPtrOutputWithContext(context.Background()) +} + +func (i LaunchTemplateNetworkInterfaceEnaSrdSpecificationArgs) ToLaunchTemplateNetworkInterfaceEnaSrdSpecificationPtrOutputWithContext(ctx context.Context) LaunchTemplateNetworkInterfaceEnaSrdSpecificationPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(LaunchTemplateNetworkInterfaceEnaSrdSpecificationOutput).ToLaunchTemplateNetworkInterfaceEnaSrdSpecificationPtrOutputWithContext(ctx) +} + +// LaunchTemplateNetworkInterfaceEnaSrdSpecificationPtrInput is an input type that accepts LaunchTemplateNetworkInterfaceEnaSrdSpecificationArgs, LaunchTemplateNetworkInterfaceEnaSrdSpecificationPtr and LaunchTemplateNetworkInterfaceEnaSrdSpecificationPtrOutput values. +// You can construct a concrete instance of `LaunchTemplateNetworkInterfaceEnaSrdSpecificationPtrInput` via: +// +// LaunchTemplateNetworkInterfaceEnaSrdSpecificationArgs{...} +// +// or: +// +// nil +type LaunchTemplateNetworkInterfaceEnaSrdSpecificationPtrInput interface { + pulumi.Input + + ToLaunchTemplateNetworkInterfaceEnaSrdSpecificationPtrOutput() LaunchTemplateNetworkInterfaceEnaSrdSpecificationPtrOutput + ToLaunchTemplateNetworkInterfaceEnaSrdSpecificationPtrOutputWithContext(context.Context) LaunchTemplateNetworkInterfaceEnaSrdSpecificationPtrOutput +} + +type launchTemplateNetworkInterfaceEnaSrdSpecificationPtrType LaunchTemplateNetworkInterfaceEnaSrdSpecificationArgs + +func LaunchTemplateNetworkInterfaceEnaSrdSpecificationPtr(v *LaunchTemplateNetworkInterfaceEnaSrdSpecificationArgs) LaunchTemplateNetworkInterfaceEnaSrdSpecificationPtrInput { + return (*launchTemplateNetworkInterfaceEnaSrdSpecificationPtrType)(v) +} + +func (*launchTemplateNetworkInterfaceEnaSrdSpecificationPtrType) ElementType() reflect.Type { + return reflect.TypeOf((**LaunchTemplateNetworkInterfaceEnaSrdSpecification)(nil)).Elem() +} + +func (i *launchTemplateNetworkInterfaceEnaSrdSpecificationPtrType) ToLaunchTemplateNetworkInterfaceEnaSrdSpecificationPtrOutput() LaunchTemplateNetworkInterfaceEnaSrdSpecificationPtrOutput { + return i.ToLaunchTemplateNetworkInterfaceEnaSrdSpecificationPtrOutputWithContext(context.Background()) +} + +func (i *launchTemplateNetworkInterfaceEnaSrdSpecificationPtrType) ToLaunchTemplateNetworkInterfaceEnaSrdSpecificationPtrOutputWithContext(ctx context.Context) LaunchTemplateNetworkInterfaceEnaSrdSpecificationPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(LaunchTemplateNetworkInterfaceEnaSrdSpecificationPtrOutput) +} + +type LaunchTemplateNetworkInterfaceEnaSrdSpecificationOutput struct{ *pulumi.OutputState } + +func (LaunchTemplateNetworkInterfaceEnaSrdSpecificationOutput) ElementType() reflect.Type { + return reflect.TypeOf((*LaunchTemplateNetworkInterfaceEnaSrdSpecification)(nil)).Elem() +} + +func (o LaunchTemplateNetworkInterfaceEnaSrdSpecificationOutput) ToLaunchTemplateNetworkInterfaceEnaSrdSpecificationOutput() LaunchTemplateNetworkInterfaceEnaSrdSpecificationOutput { + return o +} + +func (o LaunchTemplateNetworkInterfaceEnaSrdSpecificationOutput) ToLaunchTemplateNetworkInterfaceEnaSrdSpecificationOutputWithContext(ctx context.Context) LaunchTemplateNetworkInterfaceEnaSrdSpecificationOutput { + return o +} + +func (o LaunchTemplateNetworkInterfaceEnaSrdSpecificationOutput) ToLaunchTemplateNetworkInterfaceEnaSrdSpecificationPtrOutput() LaunchTemplateNetworkInterfaceEnaSrdSpecificationPtrOutput { + return o.ToLaunchTemplateNetworkInterfaceEnaSrdSpecificationPtrOutputWithContext(context.Background()) +} + +func (o LaunchTemplateNetworkInterfaceEnaSrdSpecificationOutput) ToLaunchTemplateNetworkInterfaceEnaSrdSpecificationPtrOutputWithContext(ctx context.Context) LaunchTemplateNetworkInterfaceEnaSrdSpecificationPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v LaunchTemplateNetworkInterfaceEnaSrdSpecification) *LaunchTemplateNetworkInterfaceEnaSrdSpecification { + return &v + }).(LaunchTemplateNetworkInterfaceEnaSrdSpecificationPtrOutput) +} + +// Whether to enable ENA Express. ENA Express uses AWS Scalable Reliable Datagram (SRD) technology to improve the performance of TCP traffic. +func (o LaunchTemplateNetworkInterfaceEnaSrdSpecificationOutput) EnaSrdEnabled() pulumi.BoolPtrOutput { + return o.ApplyT(func(v LaunchTemplateNetworkInterfaceEnaSrdSpecification) *bool { return v.EnaSrdEnabled }).(pulumi.BoolPtrOutput) +} + +// Configuration for ENA Express UDP optimization. See details below. +func (o LaunchTemplateNetworkInterfaceEnaSrdSpecificationOutput) EnaSrdUdpSpecification() LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrOutput { + return o.ApplyT(func(v LaunchTemplateNetworkInterfaceEnaSrdSpecification) *LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecification { + return v.EnaSrdUdpSpecification + }).(LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrOutput) +} + +type LaunchTemplateNetworkInterfaceEnaSrdSpecificationPtrOutput struct{ *pulumi.OutputState } + +func (LaunchTemplateNetworkInterfaceEnaSrdSpecificationPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**LaunchTemplateNetworkInterfaceEnaSrdSpecification)(nil)).Elem() +} + +func (o LaunchTemplateNetworkInterfaceEnaSrdSpecificationPtrOutput) ToLaunchTemplateNetworkInterfaceEnaSrdSpecificationPtrOutput() LaunchTemplateNetworkInterfaceEnaSrdSpecificationPtrOutput { + return o +} + +func (o LaunchTemplateNetworkInterfaceEnaSrdSpecificationPtrOutput) ToLaunchTemplateNetworkInterfaceEnaSrdSpecificationPtrOutputWithContext(ctx context.Context) LaunchTemplateNetworkInterfaceEnaSrdSpecificationPtrOutput { + return o +} + +func (o LaunchTemplateNetworkInterfaceEnaSrdSpecificationPtrOutput) Elem() LaunchTemplateNetworkInterfaceEnaSrdSpecificationOutput { + return o.ApplyT(func(v *LaunchTemplateNetworkInterfaceEnaSrdSpecification) LaunchTemplateNetworkInterfaceEnaSrdSpecification { + if v != nil { + return *v + } + var ret LaunchTemplateNetworkInterfaceEnaSrdSpecification + return ret + }).(LaunchTemplateNetworkInterfaceEnaSrdSpecificationOutput) +} + +// Whether to enable ENA Express. ENA Express uses AWS Scalable Reliable Datagram (SRD) technology to improve the performance of TCP traffic. +func (o LaunchTemplateNetworkInterfaceEnaSrdSpecificationPtrOutput) EnaSrdEnabled() pulumi.BoolPtrOutput { + return o.ApplyT(func(v *LaunchTemplateNetworkInterfaceEnaSrdSpecification) *bool { + if v == nil { + return nil + } + return v.EnaSrdEnabled + }).(pulumi.BoolPtrOutput) +} + +// Configuration for ENA Express UDP optimization. See details below. +func (o LaunchTemplateNetworkInterfaceEnaSrdSpecificationPtrOutput) EnaSrdUdpSpecification() LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrOutput { + return o.ApplyT(func(v *LaunchTemplateNetworkInterfaceEnaSrdSpecification) *LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecification { + if v == nil { + return nil + } + return v.EnaSrdUdpSpecification + }).(LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrOutput) +} + +type LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecification struct { + // Whether to enable UDP traffic optimization through ENA Express. Requires `enaSrdEnabled` to be `true`. + // + // NOTE: ENA Express requires [specific instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enhanced-networking-ena-express.html#ena-express-requirements) and minimum bandwidth of 25 Gbps. + EnaSrdUdpEnabled *bool `pulumi:"enaSrdUdpEnabled"` +} + +// LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationInput is an input type that accepts LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationArgs and LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationOutput values. +// You can construct a concrete instance of `LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationInput` via: +// +// LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationArgs{...} +type LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationInput interface { + pulumi.Input + + ToLaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationOutput() LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationOutput + ToLaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationOutputWithContext(context.Context) LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationOutput +} + +type LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationArgs struct { + // Whether to enable UDP traffic optimization through ENA Express. Requires `enaSrdEnabled` to be `true`. + // + // NOTE: ENA Express requires [specific instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enhanced-networking-ena-express.html#ena-express-requirements) and minimum bandwidth of 25 Gbps. + EnaSrdUdpEnabled pulumi.BoolPtrInput `pulumi:"enaSrdUdpEnabled"` +} + +func (LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationArgs) ElementType() reflect.Type { + return reflect.TypeOf((*LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecification)(nil)).Elem() +} + +func (i LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationArgs) ToLaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationOutput() LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationOutput { + return i.ToLaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationOutputWithContext(context.Background()) +} + +func (i LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationArgs) ToLaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationOutputWithContext(ctx context.Context) LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationOutput { + return pulumi.ToOutputWithContext(ctx, i).(LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationOutput) +} + +func (i LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationArgs) ToLaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrOutput() LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrOutput { + return i.ToLaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrOutputWithContext(context.Background()) +} + +func (i LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationArgs) ToLaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrOutputWithContext(ctx context.Context) LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationOutput).ToLaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrOutputWithContext(ctx) +} + +// LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrInput is an input type that accepts LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationArgs, LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtr and LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrOutput values. +// You can construct a concrete instance of `LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrInput` via: +// +// LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationArgs{...} +// +// or: +// +// nil +type LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrInput interface { + pulumi.Input + + ToLaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrOutput() LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrOutput + ToLaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrOutputWithContext(context.Context) LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrOutput +} + +type launchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrType LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationArgs + +func LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtr(v *LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationArgs) LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrInput { + return (*launchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrType)(v) +} + +func (*launchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrType) ElementType() reflect.Type { + return reflect.TypeOf((**LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecification)(nil)).Elem() +} + +func (i *launchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrType) ToLaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrOutput() LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrOutput { + return i.ToLaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrOutputWithContext(context.Background()) +} + +func (i *launchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrType) ToLaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrOutputWithContext(ctx context.Context) LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrOutput) +} + +type LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationOutput struct{ *pulumi.OutputState } + +func (LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationOutput) ElementType() reflect.Type { + return reflect.TypeOf((*LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecification)(nil)).Elem() +} + +func (o LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationOutput) ToLaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationOutput() LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationOutput { + return o +} + +func (o LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationOutput) ToLaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationOutputWithContext(ctx context.Context) LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationOutput { + return o +} + +func (o LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationOutput) ToLaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrOutput() LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrOutput { + return o.ToLaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrOutputWithContext(context.Background()) +} + +func (o LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationOutput) ToLaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrOutputWithContext(ctx context.Context) LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecification) *LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecification { + return &v + }).(LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrOutput) +} + +// Whether to enable UDP traffic optimization through ENA Express. Requires `enaSrdEnabled` to be `true`. +// +// NOTE: ENA Express requires [specific instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enhanced-networking-ena-express.html#ena-express-requirements) and minimum bandwidth of 25 Gbps. +func (o LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationOutput) EnaSrdUdpEnabled() pulumi.BoolPtrOutput { + return o.ApplyT(func(v LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecification) *bool { + return v.EnaSrdUdpEnabled + }).(pulumi.BoolPtrOutput) +} + +type LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrOutput struct{ *pulumi.OutputState } + +func (LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecification)(nil)).Elem() +} + +func (o LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrOutput) ToLaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrOutput() LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrOutput { + return o +} + +func (o LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrOutput) ToLaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrOutputWithContext(ctx context.Context) LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrOutput { + return o +} + +func (o LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrOutput) Elem() LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationOutput { + return o.ApplyT(func(v *LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecification) LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecification { + if v != nil { + return *v + } + var ret LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecification + return ret + }).(LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationOutput) +} + +// Whether to enable UDP traffic optimization through ENA Express. Requires `enaSrdEnabled` to be `true`. +// +// NOTE: ENA Express requires [specific instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enhanced-networking-ena-express.html#ena-express-requirements) and minimum bandwidth of 25 Gbps. +func (o LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrOutput) EnaSrdUdpEnabled() pulumi.BoolPtrOutput { + return o.ApplyT(func(v *LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecification) *bool { + if v == nil { + return nil + } + return v.EnaSrdUdpEnabled + }).(pulumi.BoolPtrOutput) +} + type LaunchTemplatePlacement struct { // The affinity setting for an instance on a Dedicated Host. Affinity *string `pulumi:"affinity"` @@ -58231,9 +58623,9 @@ type GetVpcIpamsIpam struct { // ID of the default public scope. PublicDefaultScopeId string `pulumi:"publicDefaultScopeId"` // Number of resource discovery associations. - ResourceDiscoveryAssociationCount float64 `pulumi:"resourceDiscoveryAssociationCount"` + ResourceDiscoveryAssociationCount int `pulumi:"resourceDiscoveryAssociationCount"` // Number of scopes on this IPAM. - ScopeCount float64 `pulumi:"scopeCount"` + ScopeCount int `pulumi:"scopeCount"` // Current state of the IPAM. State string `pulumi:"state"` // State message of the IPAM. @@ -58277,9 +58669,9 @@ type GetVpcIpamsIpamArgs struct { // ID of the default public scope. PublicDefaultScopeId pulumi.StringInput `pulumi:"publicDefaultScopeId"` // Number of resource discovery associations. - ResourceDiscoveryAssociationCount pulumi.Float64Input `pulumi:"resourceDiscoveryAssociationCount"` + ResourceDiscoveryAssociationCount pulumi.IntInput `pulumi:"resourceDiscoveryAssociationCount"` // Number of scopes on this IPAM. - ScopeCount pulumi.Float64Input `pulumi:"scopeCount"` + ScopeCount pulumi.IntInput `pulumi:"scopeCount"` // Current state of the IPAM. State pulumi.StringInput `pulumi:"state"` // State message of the IPAM. @@ -58395,13 +58787,13 @@ func (o GetVpcIpamsIpamOutput) PublicDefaultScopeId() pulumi.StringOutput { } // Number of resource discovery associations. -func (o GetVpcIpamsIpamOutput) ResourceDiscoveryAssociationCount() pulumi.Float64Output { - return o.ApplyT(func(v GetVpcIpamsIpam) float64 { return v.ResourceDiscoveryAssociationCount }).(pulumi.Float64Output) +func (o GetVpcIpamsIpamOutput) ResourceDiscoveryAssociationCount() pulumi.IntOutput { + return o.ApplyT(func(v GetVpcIpamsIpam) int { return v.ResourceDiscoveryAssociationCount }).(pulumi.IntOutput) } // Number of scopes on this IPAM. -func (o GetVpcIpamsIpamOutput) ScopeCount() pulumi.Float64Output { - return o.ApplyT(func(v GetVpcIpamsIpam) float64 { return v.ScopeCount }).(pulumi.Float64Output) +func (o GetVpcIpamsIpamOutput) ScopeCount() pulumi.IntOutput { + return o.ApplyT(func(v GetVpcIpamsIpam) int { return v.ScopeCount }).(pulumi.IntOutput) } // Current state of the IPAM. @@ -59528,6 +59920,10 @@ func init() { pulumi.RegisterInputType(reflect.TypeOf((*LaunchTemplateNetworkInterfaceArrayInput)(nil)).Elem(), LaunchTemplateNetworkInterfaceArray{}) pulumi.RegisterInputType(reflect.TypeOf((*LaunchTemplateNetworkInterfaceConnectionTrackingSpecificationInput)(nil)).Elem(), LaunchTemplateNetworkInterfaceConnectionTrackingSpecificationArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*LaunchTemplateNetworkInterfaceConnectionTrackingSpecificationPtrInput)(nil)).Elem(), LaunchTemplateNetworkInterfaceConnectionTrackingSpecificationArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*LaunchTemplateNetworkInterfaceEnaSrdSpecificationInput)(nil)).Elem(), LaunchTemplateNetworkInterfaceEnaSrdSpecificationArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*LaunchTemplateNetworkInterfaceEnaSrdSpecificationPtrInput)(nil)).Elem(), LaunchTemplateNetworkInterfaceEnaSrdSpecificationArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationInput)(nil)).Elem(), LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationArgs{}) + pulumi.RegisterInputType(reflect.TypeOf((*LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrInput)(nil)).Elem(), LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*LaunchTemplatePlacementInput)(nil)).Elem(), LaunchTemplatePlacementArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*LaunchTemplatePlacementPtrInput)(nil)).Elem(), LaunchTemplatePlacementArgs{}) pulumi.RegisterInputType(reflect.TypeOf((*LaunchTemplatePrivateDnsNameOptionsInput)(nil)).Elem(), LaunchTemplatePrivateDnsNameOptionsArgs{}) @@ -60403,6 +60799,10 @@ func init() { pulumi.RegisterOutputType(LaunchTemplateNetworkInterfaceArrayOutput{}) pulumi.RegisterOutputType(LaunchTemplateNetworkInterfaceConnectionTrackingSpecificationOutput{}) pulumi.RegisterOutputType(LaunchTemplateNetworkInterfaceConnectionTrackingSpecificationPtrOutput{}) + pulumi.RegisterOutputType(LaunchTemplateNetworkInterfaceEnaSrdSpecificationOutput{}) + pulumi.RegisterOutputType(LaunchTemplateNetworkInterfaceEnaSrdSpecificationPtrOutput{}) + pulumi.RegisterOutputType(LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationOutput{}) + pulumi.RegisterOutputType(LaunchTemplateNetworkInterfaceEnaSrdSpecificationEnaSrdUdpSpecificationPtrOutput{}) pulumi.RegisterOutputType(LaunchTemplatePlacementOutput{}) pulumi.RegisterOutputType(LaunchTemplatePlacementPtrOutput{}) pulumi.RegisterOutputType(LaunchTemplatePrivateDnsNameOptionsOutput{}) diff --git a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ec2/spotInstanceRequest.go b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ec2/spotInstanceRequest.go index 0830e4729..911089023 100644 --- a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ec2/spotInstanceRequest.go +++ b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ec2/spotInstanceRequest.go @@ -88,13 +88,13 @@ type SpotInstanceRequest struct { CapacityReservationSpecification SpotInstanceRequestCapacityReservationSpecificationOutput `pulumi:"capacityReservationSpecification"` // Sets the number of CPU cores for an instance. This option is only supported on creation of instance type that support CPU Options [CPU Cores and Threads Per CPU Core Per Instance Type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html#cpu-options-supported-instances-values) - specifying this option for unsupported instance types will return an error from the EC2 API. // - // Deprecated: use 'cpu_options' argument instead + // Deprecated: cpu_core_count is deprecated. Use cpuOptions instead. CpuCoreCount pulumi.IntOutput `pulumi:"cpuCoreCount"` // The CPU options for the instance. See CPU Options below for more details. CpuOptions SpotInstanceRequestCpuOptionsOutput `pulumi:"cpuOptions"` // If set to 1, hyperthreading is disabled on the launched instance. Defaults to 2 if not set. See [Optimizing CPU Options](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) for more information. // - // Deprecated: use 'cpu_options' argument instead + // Deprecated: cpu_threads_per_core is deprecated. Use cpuOptions instead. CpuThreadsPerCore pulumi.IntOutput `pulumi:"cpuThreadsPerCore"` // Configuration block for customizing the credit specification of the instance. See Credit Specification below for more details. This provider will only perform drift detection of its value when present in a configuration. Removing this configuration on existing instances will only stop managing it. It will not change the configuration back to the default for the instance type. CreditSpecification SpotInstanceRequestCreditSpecificationPtrOutput `pulumi:"creditSpecification"` @@ -273,13 +273,13 @@ type spotInstanceRequestState struct { CapacityReservationSpecification *SpotInstanceRequestCapacityReservationSpecification `pulumi:"capacityReservationSpecification"` // Sets the number of CPU cores for an instance. This option is only supported on creation of instance type that support CPU Options [CPU Cores and Threads Per CPU Core Per Instance Type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html#cpu-options-supported-instances-values) - specifying this option for unsupported instance types will return an error from the EC2 API. // - // Deprecated: use 'cpu_options' argument instead + // Deprecated: cpu_core_count is deprecated. Use cpuOptions instead. CpuCoreCount *int `pulumi:"cpuCoreCount"` // The CPU options for the instance. See CPU Options below for more details. CpuOptions *SpotInstanceRequestCpuOptions `pulumi:"cpuOptions"` // If set to 1, hyperthreading is disabled on the launched instance. Defaults to 2 if not set. See [Optimizing CPU Options](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) for more information. // - // Deprecated: use 'cpu_options' argument instead + // Deprecated: cpu_threads_per_core is deprecated. Use cpuOptions instead. CpuThreadsPerCore *int `pulumi:"cpuThreadsPerCore"` // Configuration block for customizing the credit specification of the instance. See Credit Specification below for more details. This provider will only perform drift detection of its value when present in a configuration. Removing this configuration on existing instances will only stop managing it. It will not change the configuration back to the default for the instance type. CreditSpecification *SpotInstanceRequestCreditSpecification `pulumi:"creditSpecification"` @@ -429,13 +429,13 @@ type SpotInstanceRequestState struct { CapacityReservationSpecification SpotInstanceRequestCapacityReservationSpecificationPtrInput // Sets the number of CPU cores for an instance. This option is only supported on creation of instance type that support CPU Options [CPU Cores and Threads Per CPU Core Per Instance Type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html#cpu-options-supported-instances-values) - specifying this option for unsupported instance types will return an error from the EC2 API. // - // Deprecated: use 'cpu_options' argument instead + // Deprecated: cpu_core_count is deprecated. Use cpuOptions instead. CpuCoreCount pulumi.IntPtrInput // The CPU options for the instance. See CPU Options below for more details. CpuOptions SpotInstanceRequestCpuOptionsPtrInput // If set to 1, hyperthreading is disabled on the launched instance. Defaults to 2 if not set. See [Optimizing CPU Options](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) for more information. // - // Deprecated: use 'cpu_options' argument instead + // Deprecated: cpu_threads_per_core is deprecated. Use cpuOptions instead. CpuThreadsPerCore pulumi.IntPtrInput // Configuration block for customizing the credit specification of the instance. See Credit Specification below for more details. This provider will only perform drift detection of its value when present in a configuration. Removing this configuration on existing instances will only stop managing it. It will not change the configuration back to the default for the instance type. CreditSpecification SpotInstanceRequestCreditSpecificationPtrInput @@ -588,13 +588,13 @@ type spotInstanceRequestArgs struct { CapacityReservationSpecification *SpotInstanceRequestCapacityReservationSpecification `pulumi:"capacityReservationSpecification"` // Sets the number of CPU cores for an instance. This option is only supported on creation of instance type that support CPU Options [CPU Cores and Threads Per CPU Core Per Instance Type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html#cpu-options-supported-instances-values) - specifying this option for unsupported instance types will return an error from the EC2 API. // - // Deprecated: use 'cpu_options' argument instead + // Deprecated: cpu_core_count is deprecated. Use cpuOptions instead. CpuCoreCount *int `pulumi:"cpuCoreCount"` // The CPU options for the instance. See CPU Options below for more details. CpuOptions *SpotInstanceRequestCpuOptions `pulumi:"cpuOptions"` // If set to 1, hyperthreading is disabled on the launched instance. Defaults to 2 if not set. See [Optimizing CPU Options](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) for more information. // - // Deprecated: use 'cpu_options' argument instead + // Deprecated: cpu_threads_per_core is deprecated. Use cpuOptions instead. CpuThreadsPerCore *int `pulumi:"cpuThreadsPerCore"` // Configuration block for customizing the credit specification of the instance. See Credit Specification below for more details. This provider will only perform drift detection of its value when present in a configuration. Removing this configuration on existing instances will only stop managing it. It will not change the configuration back to the default for the instance type. CreditSpecification *SpotInstanceRequestCreditSpecification `pulumi:"creditSpecification"` @@ -716,13 +716,13 @@ type SpotInstanceRequestArgs struct { CapacityReservationSpecification SpotInstanceRequestCapacityReservationSpecificationPtrInput // Sets the number of CPU cores for an instance. This option is only supported on creation of instance type that support CPU Options [CPU Cores and Threads Per CPU Core Per Instance Type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html#cpu-options-supported-instances-values) - specifying this option for unsupported instance types will return an error from the EC2 API. // - // Deprecated: use 'cpu_options' argument instead + // Deprecated: cpu_core_count is deprecated. Use cpuOptions instead. CpuCoreCount pulumi.IntPtrInput // The CPU options for the instance. See CPU Options below for more details. CpuOptions SpotInstanceRequestCpuOptionsPtrInput // If set to 1, hyperthreading is disabled on the launched instance. Defaults to 2 if not set. See [Optimizing CPU Options](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) for more information. // - // Deprecated: use 'cpu_options' argument instead + // Deprecated: cpu_threads_per_core is deprecated. Use cpuOptions instead. CpuThreadsPerCore pulumi.IntPtrInput // Configuration block for customizing the credit specification of the instance. See Credit Specification below for more details. This provider will only perform drift detection of its value when present in a configuration. Removing this configuration on existing instances will only stop managing it. It will not change the configuration back to the default for the instance type. CreditSpecification SpotInstanceRequestCreditSpecificationPtrInput @@ -950,7 +950,7 @@ func (o SpotInstanceRequestOutput) CapacityReservationSpecification() SpotInstan // Sets the number of CPU cores for an instance. This option is only supported on creation of instance type that support CPU Options [CPU Cores and Threads Per CPU Core Per Instance Type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html#cpu-options-supported-instances-values) - specifying this option for unsupported instance types will return an error from the EC2 API. // -// Deprecated: use 'cpu_options' argument instead +// Deprecated: cpu_core_count is deprecated. Use cpuOptions instead. func (o SpotInstanceRequestOutput) CpuCoreCount() pulumi.IntOutput { return o.ApplyT(func(v *SpotInstanceRequest) pulumi.IntOutput { return v.CpuCoreCount }).(pulumi.IntOutput) } @@ -962,7 +962,7 @@ func (o SpotInstanceRequestOutput) CpuOptions() SpotInstanceRequestCpuOptionsOut // If set to 1, hyperthreading is disabled on the launched instance. Defaults to 2 if not set. See [Optimizing CPU Options](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) for more information. // -// Deprecated: use 'cpu_options' argument instead +// Deprecated: cpu_threads_per_core is deprecated. Use cpuOptions instead. func (o SpotInstanceRequestOutput) CpuThreadsPerCore() pulumi.IntOutput { return o.ApplyT(func(v *SpotInstanceRequest) pulumi.IntOutput { return v.CpuThreadsPerCore }).(pulumi.IntOutput) } diff --git a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ec2/vpcEndpoint.go b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ec2/vpcEndpoint.go index efdfba5e3..045a13996 100644 --- a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ec2/vpcEndpoint.go +++ b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ec2/vpcEndpoint.go @@ -307,7 +307,7 @@ type VpcEndpoint struct { // The ID of one or more security groups to associate with the network interface. Applicable for endpoints of type `Interface`. // If no security groups are specified, the VPC's [default security group](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html#DefaultSecurityGroup) is associated with the endpoint. SecurityGroupIds pulumi.StringArrayOutput `pulumi:"securityGroupIds"` - // The service name. For AWS services the service name is usually in the form `com.amazonaws..` (the SageMaker Notebook service is an exception to this rule, the service name is in the form `aws.sagemaker..notebook`). Exactly one of `resourceConfigurationArn`, `serviceName` or `serviceNetworkArn` is required. + // The service name. For AWS services the service name is usually in the form `com.amazonaws..` (the SageMaker AI Notebook service is an exception to this rule, the service name is in the form `aws.sagemaker..notebook`). Exactly one of `resourceConfigurationArn`, `serviceName` or `serviceNetworkArn` is required. ServiceName pulumi.StringPtrOutput `pulumi:"serviceName"` // The ARN of a Service Network to connect this VPC Endpoint to. Exactly one of `resourceConfigurationArn`, `serviceName` or `serviceNetworkArn` is required. ServiceNetworkArn pulumi.StringPtrOutput `pulumi:"serviceNetworkArn"` @@ -396,7 +396,7 @@ type vpcEndpointState struct { // The ID of one or more security groups to associate with the network interface. Applicable for endpoints of type `Interface`. // If no security groups are specified, the VPC's [default security group](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html#DefaultSecurityGroup) is associated with the endpoint. SecurityGroupIds []string `pulumi:"securityGroupIds"` - // The service name. For AWS services the service name is usually in the form `com.amazonaws..` (the SageMaker Notebook service is an exception to this rule, the service name is in the form `aws.sagemaker..notebook`). Exactly one of `resourceConfigurationArn`, `serviceName` or `serviceNetworkArn` is required. + // The service name. For AWS services the service name is usually in the form `com.amazonaws..` (the SageMaker AI Notebook service is an exception to this rule, the service name is in the form `aws.sagemaker..notebook`). Exactly one of `resourceConfigurationArn`, `serviceName` or `serviceNetworkArn` is required. ServiceName *string `pulumi:"serviceName"` // The ARN of a Service Network to connect this VPC Endpoint to. Exactly one of `resourceConfigurationArn`, `serviceName` or `serviceNetworkArn` is required. ServiceNetworkArn *string `pulumi:"serviceNetworkArn"` @@ -453,7 +453,7 @@ type VpcEndpointState struct { // The ID of one or more security groups to associate with the network interface. Applicable for endpoints of type `Interface`. // If no security groups are specified, the VPC's [default security group](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html#DefaultSecurityGroup) is associated with the endpoint. SecurityGroupIds pulumi.StringArrayInput - // The service name. For AWS services the service name is usually in the form `com.amazonaws..` (the SageMaker Notebook service is an exception to this rule, the service name is in the form `aws.sagemaker..notebook`). Exactly one of `resourceConfigurationArn`, `serviceName` or `serviceNetworkArn` is required. + // The service name. For AWS services the service name is usually in the form `com.amazonaws..` (the SageMaker AI Notebook service is an exception to this rule, the service name is in the form `aws.sagemaker..notebook`). Exactly one of `resourceConfigurationArn`, `serviceName` or `serviceNetworkArn` is required. ServiceName pulumi.StringPtrInput // The ARN of a Service Network to connect this VPC Endpoint to. Exactly one of `resourceConfigurationArn`, `serviceName` or `serviceNetworkArn` is required. ServiceNetworkArn pulumi.StringPtrInput @@ -500,7 +500,7 @@ type vpcEndpointArgs struct { // The ID of one or more security groups to associate with the network interface. Applicable for endpoints of type `Interface`. // If no security groups are specified, the VPC's [default security group](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html#DefaultSecurityGroup) is associated with the endpoint. SecurityGroupIds []string `pulumi:"securityGroupIds"` - // The service name. For AWS services the service name is usually in the form `com.amazonaws..` (the SageMaker Notebook service is an exception to this rule, the service name is in the form `aws.sagemaker..notebook`). Exactly one of `resourceConfigurationArn`, `serviceName` or `serviceNetworkArn` is required. + // The service name. For AWS services the service name is usually in the form `com.amazonaws..` (the SageMaker AI Notebook service is an exception to this rule, the service name is in the form `aws.sagemaker..notebook`). Exactly one of `resourceConfigurationArn`, `serviceName` or `serviceNetworkArn` is required. ServiceName *string `pulumi:"serviceName"` // The ARN of a Service Network to connect this VPC Endpoint to. Exactly one of `resourceConfigurationArn`, `serviceName` or `serviceNetworkArn` is required. ServiceNetworkArn *string `pulumi:"serviceNetworkArn"` @@ -538,7 +538,7 @@ type VpcEndpointArgs struct { // The ID of one or more security groups to associate with the network interface. Applicable for endpoints of type `Interface`. // If no security groups are specified, the VPC's [default security group](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html#DefaultSecurityGroup) is associated with the endpoint. SecurityGroupIds pulumi.StringArrayInput - // The service name. For AWS services the service name is usually in the form `com.amazonaws..` (the SageMaker Notebook service is an exception to this rule, the service name is in the form `aws.sagemaker..notebook`). Exactly one of `resourceConfigurationArn`, `serviceName` or `serviceNetworkArn` is required. + // The service name. For AWS services the service name is usually in the form `com.amazonaws..` (the SageMaker AI Notebook service is an exception to this rule, the service name is in the form `aws.sagemaker..notebook`). Exactly one of `resourceConfigurationArn`, `serviceName` or `serviceNetworkArn` is required. ServiceName pulumi.StringPtrInput // The ARN of a Service Network to connect this VPC Endpoint to. Exactly one of `resourceConfigurationArn`, `serviceName` or `serviceNetworkArn` is required. ServiceNetworkArn pulumi.StringPtrInput @@ -720,7 +720,7 @@ func (o VpcEndpointOutput) SecurityGroupIds() pulumi.StringArrayOutput { return o.ApplyT(func(v *VpcEndpoint) pulumi.StringArrayOutput { return v.SecurityGroupIds }).(pulumi.StringArrayOutput) } -// The service name. For AWS services the service name is usually in the form `com.amazonaws..` (the SageMaker Notebook service is an exception to this rule, the service name is in the form `aws.sagemaker..notebook`). Exactly one of `resourceConfigurationArn`, `serviceName` or `serviceNetworkArn` is required. +// The service name. For AWS services the service name is usually in the form `com.amazonaws..` (the SageMaker AI Notebook service is an exception to this rule, the service name is in the form `aws.sagemaker..notebook`). Exactly one of `resourceConfigurationArn`, `serviceName` or `serviceNetworkArn` is required. func (o VpcEndpointOutput) ServiceName() pulumi.StringPtrOutput { return o.ApplyT(func(v *VpcEndpoint) pulumi.StringPtrOutput { return v.ServiceName }).(pulumi.StringPtrOutput) } diff --git a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ecs/pulumiTypes.go b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ecs/pulumiTypes.go index f07ce468e..3a9bc18f0 100644 --- a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ecs/pulumiTypes.go +++ b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/ecs/pulumiTypes.go @@ -9037,7 +9037,9 @@ type GetTaskExecutionOverrides struct { Cpu *string `pulumi:"cpu"` // Amazon Resource Name (ARN) of the task execution role override for the task. ExecutionRoleArn *string `pulumi:"executionRoleArn"` - // Elastic Inference accelerator override for the task. See below. + // **DEPRECATED** Elastic Inference accelerator override for the task. See below. + // + // Deprecated: inference_accelerator_overrides is deprecated. AWS no longer supports the Elastic Inference service. InferenceAcceleratorOverrides []GetTaskExecutionOverridesInferenceAcceleratorOverride `pulumi:"inferenceAcceleratorOverrides"` // The memory override for the task. Memory *string `pulumi:"memory"` @@ -9063,7 +9065,9 @@ type GetTaskExecutionOverridesArgs struct { Cpu pulumi.StringPtrInput `pulumi:"cpu"` // Amazon Resource Name (ARN) of the task execution role override for the task. ExecutionRoleArn pulumi.StringPtrInput `pulumi:"executionRoleArn"` - // Elastic Inference accelerator override for the task. See below. + // **DEPRECATED** Elastic Inference accelerator override for the task. See below. + // + // Deprecated: inference_accelerator_overrides is deprecated. AWS no longer supports the Elastic Inference service. InferenceAcceleratorOverrides GetTaskExecutionOverridesInferenceAcceleratorOverrideArrayInput `pulumi:"inferenceAcceleratorOverrides"` // The memory override for the task. Memory pulumi.StringPtrInput `pulumi:"memory"` @@ -9165,7 +9169,9 @@ func (o GetTaskExecutionOverridesOutput) ExecutionRoleArn() pulumi.StringPtrOutp return o.ApplyT(func(v GetTaskExecutionOverrides) *string { return v.ExecutionRoleArn }).(pulumi.StringPtrOutput) } -// Elastic Inference accelerator override for the task. See below. +// **DEPRECATED** Elastic Inference accelerator override for the task. See below. +// +// Deprecated: inference_accelerator_overrides is deprecated. AWS no longer supports the Elastic Inference service. func (o GetTaskExecutionOverridesOutput) InferenceAcceleratorOverrides() GetTaskExecutionOverridesInferenceAcceleratorOverrideArrayOutput { return o.ApplyT(func(v GetTaskExecutionOverrides) []GetTaskExecutionOverridesInferenceAcceleratorOverride { return v.InferenceAcceleratorOverrides @@ -9236,7 +9242,9 @@ func (o GetTaskExecutionOverridesPtrOutput) ExecutionRoleArn() pulumi.StringPtrO }).(pulumi.StringPtrOutput) } -// Elastic Inference accelerator override for the task. See below. +// **DEPRECATED** Elastic Inference accelerator override for the task. See below. +// +// Deprecated: inference_accelerator_overrides is deprecated. AWS no longer supports the Elastic Inference service. func (o GetTaskExecutionOverridesPtrOutput) InferenceAcceleratorOverrides() GetTaskExecutionOverridesInferenceAcceleratorOverrideArrayOutput { return o.ApplyT(func(v *GetTaskExecutionOverrides) []GetTaskExecutionOverridesInferenceAcceleratorOverride { if v == nil { diff --git a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/iam/getPolicyDocument.go b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/iam/getPolicyDocument.go index e6b3805ce..89aa53fc2 100644 --- a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/iam/getPolicyDocument.go +++ b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/iam/getPolicyDocument.go @@ -596,13 +596,13 @@ func GetPolicyDocument(ctx *pulumi.Context, args *GetPolicyDocumentArgs, opts .. // A collection of arguments for invoking getPolicyDocument. type GetPolicyDocumentArgs struct { - // Deprecated: Not used + // Deprecated: override_json is deprecated. This argument is retained only for backward compatibility with previous versions of this data source. OverrideJson *string `pulumi:"overrideJson"` // List of IAM policy documents that are merged together into the exported document. In merging, statements with non-blank `sid`s will override statements with the same `sid` from earlier documents in the list. Statements with non-blank `sid`s will also override statements with the same `sid` from `sourcePolicyDocuments`. Non-overriding statements will be added to the exported document. OverridePolicyDocuments []string `pulumi:"overridePolicyDocuments"` // ID for the policy document. PolicyId *string `pulumi:"policyId"` - // Deprecated: Not used + // Deprecated: source_json is deprecated. This argument is retained only for backward compatibility with previous versions of this data source. SourceJson *string `pulumi:"sourceJson"` // List of IAM policy documents that are merged together into the exported document. Statements defined in `sourcePolicyDocuments` must have unique `sid`s. Statements with the same `sid` from `overridePolicyDocuments` will override source statements. SourcePolicyDocuments []string `pulumi:"sourcePolicyDocuments"` @@ -620,11 +620,11 @@ type GetPolicyDocumentResult struct { Json string `pulumi:"json"` // Minified JSON policy document rendered based on the arguments above. MinifiedJson string `pulumi:"minifiedJson"` - // Deprecated: Not used + // Deprecated: override_json is deprecated. This argument is retained only for backward compatibility with previous versions of this data source. OverrideJson *string `pulumi:"overrideJson"` OverridePolicyDocuments []string `pulumi:"overridePolicyDocuments"` PolicyId *string `pulumi:"policyId"` - // Deprecated: Not used + // Deprecated: source_json is deprecated. This argument is retained only for backward compatibility with previous versions of this data source. SourceJson *string `pulumi:"sourceJson"` SourcePolicyDocuments []string `pulumi:"sourcePolicyDocuments"` Statements []GetPolicyDocumentStatement `pulumi:"statements"` @@ -642,13 +642,13 @@ func GetPolicyDocumentOutput(ctx *pulumi.Context, args GetPolicyDocumentOutputAr // A collection of arguments for invoking getPolicyDocument. type GetPolicyDocumentOutputArgs struct { - // Deprecated: Not used + // Deprecated: override_json is deprecated. This argument is retained only for backward compatibility with previous versions of this data source. OverrideJson pulumi.StringPtrInput `pulumi:"overrideJson"` // List of IAM policy documents that are merged together into the exported document. In merging, statements with non-blank `sid`s will override statements with the same `sid` from earlier documents in the list. Statements with non-blank `sid`s will also override statements with the same `sid` from `sourcePolicyDocuments`. Non-overriding statements will be added to the exported document. OverridePolicyDocuments pulumi.StringArrayInput `pulumi:"overridePolicyDocuments"` // ID for the policy document. PolicyId pulumi.StringPtrInput `pulumi:"policyId"` - // Deprecated: Not used + // Deprecated: source_json is deprecated. This argument is retained only for backward compatibility with previous versions of this data source. SourceJson pulumi.StringPtrInput `pulumi:"sourceJson"` // List of IAM policy documents that are merged together into the exported document. Statements defined in `sourcePolicyDocuments` must have unique `sid`s. Statements with the same `sid` from `overridePolicyDocuments` will override source statements. SourcePolicyDocuments pulumi.StringArrayInput `pulumi:"sourcePolicyDocuments"` @@ -692,7 +692,7 @@ func (o GetPolicyDocumentResultOutput) MinifiedJson() pulumi.StringOutput { return o.ApplyT(func(v GetPolicyDocumentResult) string { return v.MinifiedJson }).(pulumi.StringOutput) } -// Deprecated: Not used +// Deprecated: override_json is deprecated. This argument is retained only for backward compatibility with previous versions of this data source. func (o GetPolicyDocumentResultOutput) OverrideJson() pulumi.StringPtrOutput { return o.ApplyT(func(v GetPolicyDocumentResult) *string { return v.OverrideJson }).(pulumi.StringPtrOutput) } @@ -705,7 +705,7 @@ func (o GetPolicyDocumentResultOutput) PolicyId() pulumi.StringPtrOutput { return o.ApplyT(func(v GetPolicyDocumentResult) *string { return v.PolicyId }).(pulumi.StringPtrOutput) } -// Deprecated: Not used +// Deprecated: source_json is deprecated. This argument is retained only for backward compatibility with previous versions of this data source. func (o GetPolicyDocumentResultOutput) SourceJson() pulumi.StringPtrOutput { return o.ApplyT(func(v GetPolicyDocumentResult) *string { return v.SourceJson }).(pulumi.StringPtrOutput) } diff --git a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/internal/pulumiUtilities.go b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/internal/pulumiUtilities.go index 031c19d39..07090a2b2 100644 --- a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/internal/pulumiUtilities.go +++ b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/internal/pulumiUtilities.go @@ -165,7 +165,7 @@ func callPlainInner( func PkgResourceDefaultOpts(opts []pulumi.ResourceOption) []pulumi.ResourceOption { defaults := []pulumi.ResourceOption{} - version := semver.MustParse("6.69.0") + version := semver.MustParse("6.72.0") if !version.Equals(semver.Version{}) { defaults = append(defaults, pulumi.Version(version.String())) } @@ -176,7 +176,7 @@ func PkgResourceDefaultOpts(opts []pulumi.ResourceOption) []pulumi.ResourceOptio func PkgInvokeDefaultOpts(opts []pulumi.InvokeOption) []pulumi.InvokeOption { defaults := []pulumi.InvokeOption{} - version := semver.MustParse("6.69.0") + version := semver.MustParse("6.72.0") if !version.Equals(semver.Version{}) { defaults = append(defaults, pulumi.Version(version.String())) } diff --git a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/lb/getListenerRule.go b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/lb/getListenerRule.go index f9627cf3f..d72c42aa0 100644 --- a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/lb/getListenerRule.go +++ b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/lb/getListenerRule.go @@ -64,7 +64,7 @@ import ( // lbRulePriority := cfg.RequireFloat64("lbRulePriority") // _, err := lb.LookupListenerRule(ctx, &lb.LookupListenerRuleArgs{ // ListenerArn: pulumi.StringRef(lbListenerArn), -// Priority: pulumi.Float64Ref(lbRulePriority), +// Priority: pulumi.IntRef(lbRulePriority), // }, nil) // if err != nil { // return err @@ -100,7 +100,7 @@ type LookupListenerRuleArgs struct { ListenerArn *string `pulumi:"listenerArn"` // Priority of the Listener Rule within the Listener. // Must be set if `listenerArn` is set, otherwise must not be set. - Priority *float64 `pulumi:"priority"` + Priority *int `pulumi:"priority"` } // A collection of values returned by getListenerRule. @@ -114,9 +114,9 @@ type LookupListenerRuleResult struct { // Detailed below. Conditions []GetListenerRuleCondition `pulumi:"conditions"` // The provider-assigned unique ID for this managed resource. - Id string `pulumi:"id"` - ListenerArn string `pulumi:"listenerArn"` - Priority float64 `pulumi:"priority"` + Id string `pulumi:"id"` + ListenerArn string `pulumi:"listenerArn"` + Priority int `pulumi:"priority"` // Tags assigned to the Listener Rule. Tags map[string]string `pulumi:"tags"` } @@ -146,7 +146,7 @@ type LookupListenerRuleOutputArgs struct { ListenerArn pulumi.StringPtrInput `pulumi:"listenerArn"` // Priority of the Listener Rule within the Listener. // Must be set if `listenerArn` is set, otherwise must not be set. - Priority pulumi.Float64PtrInput `pulumi:"priority"` + Priority pulumi.IntPtrInput `pulumi:"priority"` } func (LookupListenerRuleOutputArgs) ElementType() reflect.Type { @@ -194,8 +194,8 @@ func (o LookupListenerRuleResultOutput) ListenerArn() pulumi.StringOutput { return o.ApplyT(func(v LookupListenerRuleResult) string { return v.ListenerArn }).(pulumi.StringOutput) } -func (o LookupListenerRuleResultOutput) Priority() pulumi.Float64Output { - return o.ApplyT(func(v LookupListenerRuleResult) float64 { return v.Priority }).(pulumi.Float64Output) +func (o LookupListenerRuleResultOutput) Priority() pulumi.IntOutput { + return o.ApplyT(func(v LookupListenerRuleResult) int { return v.Priority }).(pulumi.IntOutput) } // Tags assigned to the Listener Rule. diff --git a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/lb/pulumiTypes.go b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/lb/pulumiTypes.go index 1e75761d8..a11272e15 100644 --- a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/lb/pulumiTypes.go +++ b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/lb/pulumiTypes.go @@ -7208,7 +7208,7 @@ type GetListenerRuleAction struct { // Detailed below. Forward *GetListenerRuleActionForward `pulumi:"forward"` // The evaluation order of the action. - Order float64 `pulumi:"order"` + Order int `pulumi:"order"` // An action to redirect the request. // Detailed below. Redirect *GetListenerRuleActionRedirect `pulumi:"redirect"` @@ -7241,7 +7241,7 @@ type GetListenerRuleActionArgs struct { // Detailed below. Forward GetListenerRuleActionForwardPtrInput `pulumi:"forward"` // The evaluation order of the action. - Order pulumi.Float64Input `pulumi:"order"` + Order pulumi.IntInput `pulumi:"order"` // An action to redirect the request. // Detailed below. Redirect GetListenerRuleActionRedirectPtrInput `pulumi:"redirect"` @@ -7325,8 +7325,8 @@ func (o GetListenerRuleActionOutput) Forward() GetListenerRuleActionForwardPtrOu } // The evaluation order of the action. -func (o GetListenerRuleActionOutput) Order() pulumi.Float64Output { - return o.ApplyT(func(v GetListenerRuleAction) float64 { return v.Order }).(pulumi.Float64Output) +func (o GetListenerRuleActionOutput) Order() pulumi.IntOutput { + return o.ApplyT(func(v GetListenerRuleAction) int { return v.Order }).(pulumi.IntOutput) } // An action to redirect the request. @@ -8291,7 +8291,7 @@ func (o GetListenerRuleActionForwardPtrOutput) TargetGroups() GetListenerRuleAct type GetListenerRuleActionForwardStickiness struct { // The time period, in seconds, during which requests from a client should be routed to the same target group. - Duration float64 `pulumi:"duration"` + Duration int `pulumi:"duration"` // Indicates whether target group stickiness is enabled. Enabled bool `pulumi:"enabled"` } @@ -8309,7 +8309,7 @@ type GetListenerRuleActionForwardStickinessInput interface { type GetListenerRuleActionForwardStickinessArgs struct { // The time period, in seconds, during which requests from a client should be routed to the same target group. - Duration pulumi.Float64Input `pulumi:"duration"` + Duration pulumi.IntInput `pulumi:"duration"` // Indicates whether target group stickiness is enabled. Enabled pulumi.BoolInput `pulumi:"enabled"` } @@ -8392,8 +8392,8 @@ func (o GetListenerRuleActionForwardStickinessOutput) ToGetListenerRuleActionFor } // The time period, in seconds, during which requests from a client should be routed to the same target group. -func (o GetListenerRuleActionForwardStickinessOutput) Duration() pulumi.Float64Output { - return o.ApplyT(func(v GetListenerRuleActionForwardStickiness) float64 { return v.Duration }).(pulumi.Float64Output) +func (o GetListenerRuleActionForwardStickinessOutput) Duration() pulumi.IntOutput { + return o.ApplyT(func(v GetListenerRuleActionForwardStickiness) int { return v.Duration }).(pulumi.IntOutput) } // Indicates whether target group stickiness is enabled. @@ -8426,13 +8426,13 @@ func (o GetListenerRuleActionForwardStickinessPtrOutput) Elem() GetListenerRuleA } // The time period, in seconds, during which requests from a client should be routed to the same target group. -func (o GetListenerRuleActionForwardStickinessPtrOutput) Duration() pulumi.Float64PtrOutput { - return o.ApplyT(func(v *GetListenerRuleActionForwardStickiness) *float64 { +func (o GetListenerRuleActionForwardStickinessPtrOutput) Duration() pulumi.IntPtrOutput { + return o.ApplyT(func(v *GetListenerRuleActionForwardStickiness) *int { if v == nil { return nil } return &v.Duration - }).(pulumi.Float64PtrOutput) + }).(pulumi.IntPtrOutput) } // Indicates whether target group stickiness is enabled. @@ -8450,7 +8450,7 @@ type GetListenerRuleActionForwardTargetGroup struct { // Either `arn` or `listenerArn` must be set. Arn string `pulumi:"arn"` // Weight of the target group. - Weight float64 `pulumi:"weight"` + Weight int `pulumi:"weight"` } // GetListenerRuleActionForwardTargetGroupInput is an input type that accepts GetListenerRuleActionForwardTargetGroupArgs and GetListenerRuleActionForwardTargetGroupOutput values. @@ -8469,7 +8469,7 @@ type GetListenerRuleActionForwardTargetGroupArgs struct { // Either `arn` or `listenerArn` must be set. Arn pulumi.StringInput `pulumi:"arn"` // Weight of the target group. - Weight pulumi.Float64Input `pulumi:"weight"` + Weight pulumi.IntInput `pulumi:"weight"` } func (GetListenerRuleActionForwardTargetGroupArgs) ElementType() reflect.Type { @@ -8530,8 +8530,8 @@ func (o GetListenerRuleActionForwardTargetGroupOutput) Arn() pulumi.StringOutput } // Weight of the target group. -func (o GetListenerRuleActionForwardTargetGroupOutput) Weight() pulumi.Float64Output { - return o.ApplyT(func(v GetListenerRuleActionForwardTargetGroup) float64 { return v.Weight }).(pulumi.Float64Output) +func (o GetListenerRuleActionForwardTargetGroupOutput) Weight() pulumi.IntOutput { + return o.ApplyT(func(v GetListenerRuleActionForwardTargetGroup) int { return v.Weight }).(pulumi.IntOutput) } type GetListenerRuleActionForwardTargetGroupArrayOutput struct{ *pulumi.OutputState } diff --git a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/s3/bucketLifecycleConfigurationV2.go b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/s3/bucketLifecycleConfigurationV2.go index 1e5035598..dec106194 100644 --- a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/s3/bucketLifecycleConfigurationV2.go +++ b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/s3/bucketLifecycleConfigurationV2.go @@ -333,7 +333,7 @@ import ( // Status: pulumi.String("Enabled"), // Transitions: s3.BucketLifecycleConfigurationV2RuleTransitionArray{ // &s3.BucketLifecycleConfigurationV2RuleTransitionArgs{ -// Days: pulumi.Float64(365), +// Days: pulumi.Int(365), // StorageClass: pulumi.String("GLACIER_IR"), // }, // }, @@ -423,7 +423,7 @@ import ( // &s3.BucketLifecycleConfigurationV2RuleArgs{ // Id: pulumi.String("log"), // Expiration: &s3.BucketLifecycleConfigurationV2RuleExpirationArgs{ -// Days: pulumi.Float64(90), +// Days: pulumi.Int(90), // }, // Filter: &s3.BucketLifecycleConfigurationV2RuleFilterArgs{ // And: &s3.BucketLifecycleConfigurationV2RuleFilterAndArgs{ @@ -437,11 +437,11 @@ import ( // Status: pulumi.String("Enabled"), // Transitions: s3.BucketLifecycleConfigurationV2RuleTransitionArray{ // &s3.BucketLifecycleConfigurationV2RuleTransitionArgs{ -// Days: pulumi.Float64(30), +// Days: pulumi.Int(30), // StorageClass: pulumi.String("STANDARD_IA"), // }, // &s3.BucketLifecycleConfigurationV2RuleTransitionArgs{ -// Days: pulumi.Float64(60), +// Days: pulumi.Int(60), // StorageClass: pulumi.String("GLACIER"), // }, // }, @@ -492,15 +492,15 @@ import ( // Prefix: pulumi.String("config/"), // }, // NoncurrentVersionExpiration: &s3.BucketLifecycleConfigurationV2RuleNoncurrentVersionExpirationArgs{ -// NoncurrentDays: pulumi.Float64(90), +// NoncurrentDays: pulumi.Int(90), // }, // NoncurrentVersionTransitions: s3.BucketLifecycleConfigurationV2RuleNoncurrentVersionTransitionArray{ // &s3.BucketLifecycleConfigurationV2RuleNoncurrentVersionTransitionArgs{ -// NoncurrentDays: pulumi.Float64(30), +// NoncurrentDays: pulumi.Int(30), // StorageClass: pulumi.String("STANDARD_IA"), // }, // &s3.BucketLifecycleConfigurationV2RuleNoncurrentVersionTransitionArgs{ -// NoncurrentDays: pulumi.Float64(60), +// NoncurrentDays: pulumi.Int(60), // StorageClass: pulumi.String("GLACIER"), // }, // }, diff --git a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/s3/bucketObjectv2.go b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/s3/bucketObjectv2.go index 059238904..e94d789c6 100644 --- a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/s3/bucketObjectv2.go +++ b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/s3/bucketObjectv2.go @@ -319,12 +319,14 @@ type BucketObjectv2 struct { BucketKeyEnabled pulumi.BoolOutput `pulumi:"bucketKeyEnabled"` // Caching behavior along the request/reply chain Read [w3c cacheControl](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9) for further details. CacheControl pulumi.StringPtrOutput `pulumi:"cacheControl"` - // Indicates the algorithm used to create the checksum for the object. If a value is specified and the object is encrypted with KMS, you must have permission to use the `kms:Decrypt` action. Valid values: `CRC32`, `CRC32C`, `SHA1`, `SHA256`. + // Indicates the algorithm used to create the checksum for the object. If a value is specified and the object is encrypted with KMS, you must have permission to use the `kms:Decrypt` action. Valid values: `CRC32`, `CRC32C`, `CRC64NVME`, `SHA1`, `SHA256`. ChecksumAlgorithm pulumi.StringPtrOutput `pulumi:"checksumAlgorithm"` // The base64-encoded, 32-bit CRC32 checksum of the object. ChecksumCrc32 pulumi.StringOutput `pulumi:"checksumCrc32"` // The base64-encoded, 32-bit CRC32C checksum of the object. ChecksumCrc32c pulumi.StringOutput `pulumi:"checksumCrc32c"` + // The base64-encoded, 64-bit CRC64NVME checksum of the object. + ChecksumCrc64nvme pulumi.StringOutput `pulumi:"checksumCrc64nvme"` // The base64-encoded, 160-bit SHA-1 digest of the object. ChecksumSha1 pulumi.StringOutput `pulumi:"checksumSha1"` // The base64-encoded, 256-bit SHA-256 digest of the object. @@ -434,12 +436,14 @@ type bucketObjectv2State struct { BucketKeyEnabled *bool `pulumi:"bucketKeyEnabled"` // Caching behavior along the request/reply chain Read [w3c cacheControl](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9) for further details. CacheControl *string `pulumi:"cacheControl"` - // Indicates the algorithm used to create the checksum for the object. If a value is specified and the object is encrypted with KMS, you must have permission to use the `kms:Decrypt` action. Valid values: `CRC32`, `CRC32C`, `SHA1`, `SHA256`. + // Indicates the algorithm used to create the checksum for the object. If a value is specified and the object is encrypted with KMS, you must have permission to use the `kms:Decrypt` action. Valid values: `CRC32`, `CRC32C`, `CRC64NVME`, `SHA1`, `SHA256`. ChecksumAlgorithm *string `pulumi:"checksumAlgorithm"` // The base64-encoded, 32-bit CRC32 checksum of the object. ChecksumCrc32 *string `pulumi:"checksumCrc32"` // The base64-encoded, 32-bit CRC32C checksum of the object. ChecksumCrc32c *string `pulumi:"checksumCrc32c"` + // The base64-encoded, 64-bit CRC64NVME checksum of the object. + ChecksumCrc64nvme *string `pulumi:"checksumCrc64nvme"` // The base64-encoded, 160-bit SHA-1 digest of the object. ChecksumSha1 *string `pulumi:"checksumSha1"` // The base64-encoded, 256-bit SHA-256 digest of the object. @@ -511,12 +515,14 @@ type BucketObjectv2State struct { BucketKeyEnabled pulumi.BoolPtrInput // Caching behavior along the request/reply chain Read [w3c cacheControl](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9) for further details. CacheControl pulumi.StringPtrInput - // Indicates the algorithm used to create the checksum for the object. If a value is specified and the object is encrypted with KMS, you must have permission to use the `kms:Decrypt` action. Valid values: `CRC32`, `CRC32C`, `SHA1`, `SHA256`. + // Indicates the algorithm used to create the checksum for the object. If a value is specified and the object is encrypted with KMS, you must have permission to use the `kms:Decrypt` action. Valid values: `CRC32`, `CRC32C`, `CRC64NVME`, `SHA1`, `SHA256`. ChecksumAlgorithm pulumi.StringPtrInput // The base64-encoded, 32-bit CRC32 checksum of the object. ChecksumCrc32 pulumi.StringPtrInput // The base64-encoded, 32-bit CRC32C checksum of the object. ChecksumCrc32c pulumi.StringPtrInput + // The base64-encoded, 64-bit CRC64NVME checksum of the object. + ChecksumCrc64nvme pulumi.StringPtrInput // The base64-encoded, 160-bit SHA-1 digest of the object. ChecksumSha1 pulumi.StringPtrInput // The base64-encoded, 256-bit SHA-256 digest of the object. @@ -590,7 +596,7 @@ type bucketObjectv2Args struct { BucketKeyEnabled *bool `pulumi:"bucketKeyEnabled"` // Caching behavior along the request/reply chain Read [w3c cacheControl](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9) for further details. CacheControl *string `pulumi:"cacheControl"` - // Indicates the algorithm used to create the checksum for the object. If a value is specified and the object is encrypted with KMS, you must have permission to use the `kms:Decrypt` action. Valid values: `CRC32`, `CRC32C`, `SHA1`, `SHA256`. + // Indicates the algorithm used to create the checksum for the object. If a value is specified and the object is encrypted with KMS, you must have permission to use the `kms:Decrypt` action. Valid values: `CRC32`, `CRC32C`, `CRC64NVME`, `SHA1`, `SHA256`. ChecksumAlgorithm *string `pulumi:"checksumAlgorithm"` // Literal string value to use as the object content, which will be uploaded as UTF-8-encoded text. Content *string `pulumi:"content"` @@ -652,7 +658,7 @@ type BucketObjectv2Args struct { BucketKeyEnabled pulumi.BoolPtrInput // Caching behavior along the request/reply chain Read [w3c cacheControl](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9) for further details. CacheControl pulumi.StringPtrInput - // Indicates the algorithm used to create the checksum for the object. If a value is specified and the object is encrypted with KMS, you must have permission to use the `kms:Decrypt` action. Valid values: `CRC32`, `CRC32C`, `SHA1`, `SHA256`. + // Indicates the algorithm used to create the checksum for the object. If a value is specified and the object is encrypted with KMS, you must have permission to use the `kms:Decrypt` action. Valid values: `CRC32`, `CRC32C`, `CRC64NVME`, `SHA1`, `SHA256`. ChecksumAlgorithm pulumi.StringPtrInput // Literal string value to use as the object content, which will be uploaded as UTF-8-encoded text. Content pulumi.StringPtrInput @@ -816,7 +822,7 @@ func (o BucketObjectv2Output) CacheControl() pulumi.StringPtrOutput { return o.ApplyT(func(v *BucketObjectv2) pulumi.StringPtrOutput { return v.CacheControl }).(pulumi.StringPtrOutput) } -// Indicates the algorithm used to create the checksum for the object. If a value is specified and the object is encrypted with KMS, you must have permission to use the `kms:Decrypt` action. Valid values: `CRC32`, `CRC32C`, `SHA1`, `SHA256`. +// Indicates the algorithm used to create the checksum for the object. If a value is specified and the object is encrypted with KMS, you must have permission to use the `kms:Decrypt` action. Valid values: `CRC32`, `CRC32C`, `CRC64NVME`, `SHA1`, `SHA256`. func (o BucketObjectv2Output) ChecksumAlgorithm() pulumi.StringPtrOutput { return o.ApplyT(func(v *BucketObjectv2) pulumi.StringPtrOutput { return v.ChecksumAlgorithm }).(pulumi.StringPtrOutput) } @@ -831,6 +837,11 @@ func (o BucketObjectv2Output) ChecksumCrc32c() pulumi.StringOutput { return o.ApplyT(func(v *BucketObjectv2) pulumi.StringOutput { return v.ChecksumCrc32c }).(pulumi.StringOutput) } +// The base64-encoded, 64-bit CRC64NVME checksum of the object. +func (o BucketObjectv2Output) ChecksumCrc64nvme() pulumi.StringOutput { + return o.ApplyT(func(v *BucketObjectv2) pulumi.StringOutput { return v.ChecksumCrc64nvme }).(pulumi.StringOutput) +} + // The base64-encoded, 160-bit SHA-1 digest of the object. func (o BucketObjectv2Output) ChecksumSha1() pulumi.StringOutput { return o.ApplyT(func(v *BucketObjectv2) pulumi.StringOutput { return v.ChecksumSha1 }).(pulumi.StringOutput) diff --git a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/s3/bucketV2.go b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/s3/bucketV2.go index 5b853dfc7..656ac966f 100644 --- a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/s3/bucketV2.go +++ b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/s3/bucketV2.go @@ -62,11 +62,11 @@ type BucketV2 struct { // Sets the accelerate configuration of an existing bucket. Can be `Enabled` or `Suspended`. Cannot be used in `cn-north-1` or `us-gov-west-1`. This provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketAccelerateConfigurationV2` instead. // - // Deprecated: Use the s3.BucketAccelerateConfigurationV2 resource instead + // Deprecated: acceleration_status is deprecated. Use the s3.BucketAccelerateConfigurationV2 resource instead. AccelerationStatus pulumi.StringOutput `pulumi:"accelerationStatus"` // The [canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) to apply. Valid values are `private`, `public-read`, `public-read-write`, `aws-exec-read`, `authenticated-read`, and `log-delivery-write`. Defaults to `private`. Conflicts with `grant`. The provider will only perform drift detection if a configuration value is provided. Use the resource `s3.BucketAclV2` instead. // - // Deprecated: Use the s3.BucketAclV2 resource instead + // Deprecated: acl is deprecated. Use the s3.BucketAclV2 resource instead. Acl pulumi.StringOutput `pulumi:"acl"` // ARN of the bucket. Will be of format `arn:aws:s3:::bucketname`. Arn pulumi.StringOutput `pulumi:"arn"` @@ -80,31 +80,31 @@ type BucketV2 struct { BucketRegionalDomainName pulumi.StringOutput `pulumi:"bucketRegionalDomainName"` // Rule of [Cross-Origin Resource Sharing](https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html). See CORS rule below for details. This provider will only perform drift detection if a configuration value is provided. Use the resource `s3.BucketCorsConfigurationV2` instead. // - // Deprecated: Use the s3.BucketCorsConfigurationV2 resource instead + // Deprecated: cors_rule is deprecated. Use the s3.BucketCorsConfigurationV2 resource instead. CorsRules BucketV2CorsRuleArrayOutput `pulumi:"corsRules"` // Boolean that indicates all objects (including any [locked objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html)) should be deleted from the bucket *when the bucket is destroyed* so that the bucket can be destroyed without error. These objects are *not* recoverable. This only deletes objects when the bucket is destroyed, *not* when setting this parameter to `true`. Once this parameter is set to `true`, there must be a successful `pulumi up` run before a destroy is required to update this value in the resource state. Without a successful `pulumi up` after this parameter is set, this flag will have no effect. If setting this field in the same operation that would require replacing the bucket or destroying the bucket, this flag will not work. Additionally when importing a bucket, a successful `pulumi up` is required to set this value in state before it will take effect on a destroy operation. ForceDestroy pulumi.BoolPtrOutput `pulumi:"forceDestroy"` // An [ACL policy grant](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#sample-acl). See Grant below for details. Conflicts with `acl`. The provider will only perform drift detection if a configuration value is provided. Use the resource `s3.BucketAclV2` instead. // - // Deprecated: Use the s3.BucketAclV2 resource instead + // Deprecated: grant is deprecated. Use the s3.BucketAclV2 resource instead. Grants BucketV2GrantArrayOutput `pulumi:"grants"` // [Route 53 Hosted Zone ID](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_website_region_endpoints) for this bucket's region. HostedZoneId pulumi.StringOutput `pulumi:"hostedZoneId"` // Configuration of [object lifecycle management](http://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). See Lifecycle Rule below for details. The provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketLifecycleConfigurationV2` instead. // - // Deprecated: Use the s3.BucketLifecycleConfigurationV2 resource instead + // Deprecated: lifecycle_rule is deprecated. Use the s3.BucketLifecycleConfigurationV2 resource instead. LifecycleRules BucketV2LifecycleRuleArrayOutput `pulumi:"lifecycleRules"` // Configuration of [S3 bucket logging](https://docs.aws.amazon.com/AmazonS3/latest/UG/ManagingBucketLogging.html) parameters. See Logging below for details. The provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketLoggingV2` instead. // - // Deprecated: Use the s3.BucketLoggingV2 resource instead + // Deprecated: logging is deprecated. Use the s3.BucketLoggingV2 resource instead. Loggings BucketV2LoggingArrayOutput `pulumi:"loggings"` // Configuration of [S3 object locking](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). See Object Lock Configuration below for details. // The provider wil only perform drift detection if a configuration value is provided. // Use the `objectLockEnabled` parameter and the resource `s3.BucketObjectLockConfigurationV2` instead. // - // Deprecated: Use the top-level parameter objectLockEnabled and the s3.BucketObjectLockConfigurationV2 resource instead + // Deprecated: object_lock_configuration is deprecated. Use the top-level parameter objectLockEnabled and the s3.BucketObjectLockConfigurationV2 resource instead. ObjectLockConfiguration BucketV2ObjectLockConfigurationOutput `pulumi:"objectLockConfiguration"` // Indicates whether this bucket has an Object Lock configuration enabled. Valid values are `true` or `false`. This argument is not supported in all regions or partitions. ObjectLockEnabled pulumi.BoolOutput `pulumi:"objectLockEnabled"` @@ -112,14 +112,14 @@ type BucketV2 struct { // The provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketPolicy` instead. // - // Deprecated: Use the s3.BucketPolicy resource instead + // Deprecated: policy is deprecated. Use the s3.BucketPolicy resource instead. Policy pulumi.StringOutput `pulumi:"policy"` // AWS region this bucket resides in. Region pulumi.StringOutput `pulumi:"region"` // Configuration of [replication configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html). See Replication Configuration below for details. The provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketReplicationConfig` instead. // - // Deprecated: Use the s3.BucketReplicationConfig resource instead + // Deprecated: replication_configuration is deprecated. Use the s3.BucketReplicationConfig resource instead. ReplicationConfigurations BucketV2ReplicationConfigurationArrayOutput `pulumi:"replicationConfigurations"` // Specifies who should bear the cost of Amazon S3 data transfer. // Can be either `BucketOwner` or `Requester`. By default, the owner of the S3 bucket would incur the costs of any data transfer. @@ -127,13 +127,13 @@ type BucketV2 struct { // The provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketRequestPaymentConfigurationV2` instead. // - // Deprecated: Use the s3.BucketRequestPaymentConfigurationV2 resource instead + // Deprecated: request_payer is deprecated. Use the s3.BucketRequestPaymentConfigurationV2 resource instead. RequestPayer pulumi.StringOutput `pulumi:"requestPayer"` // Configuration of [server-side encryption configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html). See Server Side Encryption Configuration below for details. // The provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketServerSideEncryptionConfigurationV2` instead. // - // Deprecated: Use the s3.BucketServerSideEncryptionConfigurationV2 resource instead + // Deprecated: server_side_encryption_configuration is deprecated. Use the s3.BucketServerSideEncryptionConfigurationV2 resource instead. ServerSideEncryptionConfigurations BucketV2ServerSideEncryptionConfigurationArrayOutput `pulumi:"serverSideEncryptionConfigurations"` // Map of tags to assign to the bucket. If configured with a provider `defaultTags` configuration block present, tags with matching keys will overwrite those defined at the provider-level. // @@ -145,20 +145,20 @@ type BucketV2 struct { TagsAll pulumi.StringMapOutput `pulumi:"tagsAll"` // Configuration of the [S3 bucket versioning state](https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html). See Versioning below for details. The provider will only perform drift detection if a configuration value is provided. Use the resource `s3.BucketVersioningV2` instead. // - // Deprecated: Use the s3.BucketVersioningV2 resource instead + // Deprecated: versioning is deprecated. Use the s3.BucketVersioningV2 resource instead. Versionings BucketV2VersioningArrayOutput `pulumi:"versionings"` // (**Deprecated**) Domain of the website endpoint, if the bucket is configured with a website. If not, this will be an empty string. This is used to create Route 53 alias records. Use the resource `s3.BucketWebsiteConfigurationV2` instead. // - // Deprecated: Use the s3.BucketWebsiteConfigurationV2 resource + // Deprecated: website_domain is deprecated. Use the s3.BucketWebsiteConfigurationV2 resource instead. WebsiteDomain pulumi.StringOutput `pulumi:"websiteDomain"` // (**Deprecated**) Website endpoint, if the bucket is configured with a website. If not, this will be an empty string. Use the resource `s3.BucketWebsiteConfigurationV2` instead. // - // Deprecated: Use the s3.BucketWebsiteConfigurationV2 resource + // Deprecated: website_endpoint is deprecated. Use the s3.BucketWebsiteConfigurationV2 resource instead. WebsiteEndpoint pulumi.StringOutput `pulumi:"websiteEndpoint"` // Configuration of the [S3 bucket website](https://docs.aws.amazon.com/AmazonS3/latest/userguide/WebsiteHosting.html). See Website below for details. The provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketWebsiteConfigurationV2` instead. // - // Deprecated: Use the s3.BucketWebsiteConfigurationV2 resource instead + // Deprecated: website is deprecated. Use the s3.BucketWebsiteConfigurationV2 resource instead. Websites BucketV2WebsiteArrayOutput `pulumi:"websites"` } @@ -201,11 +201,11 @@ type bucketV2State struct { // Sets the accelerate configuration of an existing bucket. Can be `Enabled` or `Suspended`. Cannot be used in `cn-north-1` or `us-gov-west-1`. This provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketAccelerateConfigurationV2` instead. // - // Deprecated: Use the s3.BucketAccelerateConfigurationV2 resource instead + // Deprecated: acceleration_status is deprecated. Use the s3.BucketAccelerateConfigurationV2 resource instead. AccelerationStatus *string `pulumi:"accelerationStatus"` // The [canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) to apply. Valid values are `private`, `public-read`, `public-read-write`, `aws-exec-read`, `authenticated-read`, and `log-delivery-write`. Defaults to `private`. Conflicts with `grant`. The provider will only perform drift detection if a configuration value is provided. Use the resource `s3.BucketAclV2` instead. // - // Deprecated: Use the s3.BucketAclV2 resource instead + // Deprecated: acl is deprecated. Use the s3.BucketAclV2 resource instead. Acl *string `pulumi:"acl"` // ARN of the bucket. Will be of format `arn:aws:s3:::bucketname`. Arn *string `pulumi:"arn"` @@ -219,31 +219,31 @@ type bucketV2State struct { BucketRegionalDomainName *string `pulumi:"bucketRegionalDomainName"` // Rule of [Cross-Origin Resource Sharing](https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html). See CORS rule below for details. This provider will only perform drift detection if a configuration value is provided. Use the resource `s3.BucketCorsConfigurationV2` instead. // - // Deprecated: Use the s3.BucketCorsConfigurationV2 resource instead + // Deprecated: cors_rule is deprecated. Use the s3.BucketCorsConfigurationV2 resource instead. CorsRules []BucketV2CorsRule `pulumi:"corsRules"` // Boolean that indicates all objects (including any [locked objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html)) should be deleted from the bucket *when the bucket is destroyed* so that the bucket can be destroyed without error. These objects are *not* recoverable. This only deletes objects when the bucket is destroyed, *not* when setting this parameter to `true`. Once this parameter is set to `true`, there must be a successful `pulumi up` run before a destroy is required to update this value in the resource state. Without a successful `pulumi up` after this parameter is set, this flag will have no effect. If setting this field in the same operation that would require replacing the bucket or destroying the bucket, this flag will not work. Additionally when importing a bucket, a successful `pulumi up` is required to set this value in state before it will take effect on a destroy operation. ForceDestroy *bool `pulumi:"forceDestroy"` // An [ACL policy grant](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#sample-acl). See Grant below for details. Conflicts with `acl`. The provider will only perform drift detection if a configuration value is provided. Use the resource `s3.BucketAclV2` instead. // - // Deprecated: Use the s3.BucketAclV2 resource instead + // Deprecated: grant is deprecated. Use the s3.BucketAclV2 resource instead. Grants []BucketV2Grant `pulumi:"grants"` // [Route 53 Hosted Zone ID](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_website_region_endpoints) for this bucket's region. HostedZoneId *string `pulumi:"hostedZoneId"` // Configuration of [object lifecycle management](http://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). See Lifecycle Rule below for details. The provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketLifecycleConfigurationV2` instead. // - // Deprecated: Use the s3.BucketLifecycleConfigurationV2 resource instead + // Deprecated: lifecycle_rule is deprecated. Use the s3.BucketLifecycleConfigurationV2 resource instead. LifecycleRules []BucketV2LifecycleRule `pulumi:"lifecycleRules"` // Configuration of [S3 bucket logging](https://docs.aws.amazon.com/AmazonS3/latest/UG/ManagingBucketLogging.html) parameters. See Logging below for details. The provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketLoggingV2` instead. // - // Deprecated: Use the s3.BucketLoggingV2 resource instead + // Deprecated: logging is deprecated. Use the s3.BucketLoggingV2 resource instead. Loggings []BucketV2Logging `pulumi:"loggings"` // Configuration of [S3 object locking](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). See Object Lock Configuration below for details. // The provider wil only perform drift detection if a configuration value is provided. // Use the `objectLockEnabled` parameter and the resource `s3.BucketObjectLockConfigurationV2` instead. // - // Deprecated: Use the top-level parameter objectLockEnabled and the s3.BucketObjectLockConfigurationV2 resource instead + // Deprecated: object_lock_configuration is deprecated. Use the top-level parameter objectLockEnabled and the s3.BucketObjectLockConfigurationV2 resource instead. ObjectLockConfiguration *BucketV2ObjectLockConfiguration `pulumi:"objectLockConfiguration"` // Indicates whether this bucket has an Object Lock configuration enabled. Valid values are `true` or `false`. This argument is not supported in all regions or partitions. ObjectLockEnabled *bool `pulumi:"objectLockEnabled"` @@ -251,14 +251,14 @@ type bucketV2State struct { // The provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketPolicy` instead. // - // Deprecated: Use the s3.BucketPolicy resource instead + // Deprecated: policy is deprecated. Use the s3.BucketPolicy resource instead. Policy *string `pulumi:"policy"` // AWS region this bucket resides in. Region *string `pulumi:"region"` // Configuration of [replication configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html). See Replication Configuration below for details. The provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketReplicationConfig` instead. // - // Deprecated: Use the s3.BucketReplicationConfig resource instead + // Deprecated: replication_configuration is deprecated. Use the s3.BucketReplicationConfig resource instead. ReplicationConfigurations []BucketV2ReplicationConfiguration `pulumi:"replicationConfigurations"` // Specifies who should bear the cost of Amazon S3 data transfer. // Can be either `BucketOwner` or `Requester`. By default, the owner of the S3 bucket would incur the costs of any data transfer. @@ -266,13 +266,13 @@ type bucketV2State struct { // The provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketRequestPaymentConfigurationV2` instead. // - // Deprecated: Use the s3.BucketRequestPaymentConfigurationV2 resource instead + // Deprecated: request_payer is deprecated. Use the s3.BucketRequestPaymentConfigurationV2 resource instead. RequestPayer *string `pulumi:"requestPayer"` // Configuration of [server-side encryption configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html). See Server Side Encryption Configuration below for details. // The provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketServerSideEncryptionConfigurationV2` instead. // - // Deprecated: Use the s3.BucketServerSideEncryptionConfigurationV2 resource instead + // Deprecated: server_side_encryption_configuration is deprecated. Use the s3.BucketServerSideEncryptionConfigurationV2 resource instead. ServerSideEncryptionConfigurations []BucketV2ServerSideEncryptionConfiguration `pulumi:"serverSideEncryptionConfigurations"` // Map of tags to assign to the bucket. If configured with a provider `defaultTags` configuration block present, tags with matching keys will overwrite those defined at the provider-level. // @@ -284,20 +284,20 @@ type bucketV2State struct { TagsAll map[string]string `pulumi:"tagsAll"` // Configuration of the [S3 bucket versioning state](https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html). See Versioning below for details. The provider will only perform drift detection if a configuration value is provided. Use the resource `s3.BucketVersioningV2` instead. // - // Deprecated: Use the s3.BucketVersioningV2 resource instead + // Deprecated: versioning is deprecated. Use the s3.BucketVersioningV2 resource instead. Versionings []BucketV2Versioning `pulumi:"versionings"` // (**Deprecated**) Domain of the website endpoint, if the bucket is configured with a website. If not, this will be an empty string. This is used to create Route 53 alias records. Use the resource `s3.BucketWebsiteConfigurationV2` instead. // - // Deprecated: Use the s3.BucketWebsiteConfigurationV2 resource + // Deprecated: website_domain is deprecated. Use the s3.BucketWebsiteConfigurationV2 resource instead. WebsiteDomain *string `pulumi:"websiteDomain"` // (**Deprecated**) Website endpoint, if the bucket is configured with a website. If not, this will be an empty string. Use the resource `s3.BucketWebsiteConfigurationV2` instead. // - // Deprecated: Use the s3.BucketWebsiteConfigurationV2 resource + // Deprecated: website_endpoint is deprecated. Use the s3.BucketWebsiteConfigurationV2 resource instead. WebsiteEndpoint *string `pulumi:"websiteEndpoint"` // Configuration of the [S3 bucket website](https://docs.aws.amazon.com/AmazonS3/latest/userguide/WebsiteHosting.html). See Website below for details. The provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketWebsiteConfigurationV2` instead. // - // Deprecated: Use the s3.BucketWebsiteConfigurationV2 resource instead + // Deprecated: website is deprecated. Use the s3.BucketWebsiteConfigurationV2 resource instead. Websites []BucketV2Website `pulumi:"websites"` } @@ -305,11 +305,11 @@ type BucketV2State struct { // Sets the accelerate configuration of an existing bucket. Can be `Enabled` or `Suspended`. Cannot be used in `cn-north-1` or `us-gov-west-1`. This provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketAccelerateConfigurationV2` instead. // - // Deprecated: Use the s3.BucketAccelerateConfigurationV2 resource instead + // Deprecated: acceleration_status is deprecated. Use the s3.BucketAccelerateConfigurationV2 resource instead. AccelerationStatus pulumi.StringPtrInput // The [canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) to apply. Valid values are `private`, `public-read`, `public-read-write`, `aws-exec-read`, `authenticated-read`, and `log-delivery-write`. Defaults to `private`. Conflicts with `grant`. The provider will only perform drift detection if a configuration value is provided. Use the resource `s3.BucketAclV2` instead. // - // Deprecated: Use the s3.BucketAclV2 resource instead + // Deprecated: acl is deprecated. Use the s3.BucketAclV2 resource instead. Acl pulumi.StringPtrInput // ARN of the bucket. Will be of format `arn:aws:s3:::bucketname`. Arn pulumi.StringPtrInput @@ -323,31 +323,31 @@ type BucketV2State struct { BucketRegionalDomainName pulumi.StringPtrInput // Rule of [Cross-Origin Resource Sharing](https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html). See CORS rule below for details. This provider will only perform drift detection if a configuration value is provided. Use the resource `s3.BucketCorsConfigurationV2` instead. // - // Deprecated: Use the s3.BucketCorsConfigurationV2 resource instead + // Deprecated: cors_rule is deprecated. Use the s3.BucketCorsConfigurationV2 resource instead. CorsRules BucketV2CorsRuleArrayInput // Boolean that indicates all objects (including any [locked objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html)) should be deleted from the bucket *when the bucket is destroyed* so that the bucket can be destroyed without error. These objects are *not* recoverable. This only deletes objects when the bucket is destroyed, *not* when setting this parameter to `true`. Once this parameter is set to `true`, there must be a successful `pulumi up` run before a destroy is required to update this value in the resource state. Without a successful `pulumi up` after this parameter is set, this flag will have no effect. If setting this field in the same operation that would require replacing the bucket or destroying the bucket, this flag will not work. Additionally when importing a bucket, a successful `pulumi up` is required to set this value in state before it will take effect on a destroy operation. ForceDestroy pulumi.BoolPtrInput // An [ACL policy grant](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#sample-acl). See Grant below for details. Conflicts with `acl`. The provider will only perform drift detection if a configuration value is provided. Use the resource `s3.BucketAclV2` instead. // - // Deprecated: Use the s3.BucketAclV2 resource instead + // Deprecated: grant is deprecated. Use the s3.BucketAclV2 resource instead. Grants BucketV2GrantArrayInput // [Route 53 Hosted Zone ID](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_website_region_endpoints) for this bucket's region. HostedZoneId pulumi.StringPtrInput // Configuration of [object lifecycle management](http://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). See Lifecycle Rule below for details. The provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketLifecycleConfigurationV2` instead. // - // Deprecated: Use the s3.BucketLifecycleConfigurationV2 resource instead + // Deprecated: lifecycle_rule is deprecated. Use the s3.BucketLifecycleConfigurationV2 resource instead. LifecycleRules BucketV2LifecycleRuleArrayInput // Configuration of [S3 bucket logging](https://docs.aws.amazon.com/AmazonS3/latest/UG/ManagingBucketLogging.html) parameters. See Logging below for details. The provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketLoggingV2` instead. // - // Deprecated: Use the s3.BucketLoggingV2 resource instead + // Deprecated: logging is deprecated. Use the s3.BucketLoggingV2 resource instead. Loggings BucketV2LoggingArrayInput // Configuration of [S3 object locking](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). See Object Lock Configuration below for details. // The provider wil only perform drift detection if a configuration value is provided. // Use the `objectLockEnabled` parameter and the resource `s3.BucketObjectLockConfigurationV2` instead. // - // Deprecated: Use the top-level parameter objectLockEnabled and the s3.BucketObjectLockConfigurationV2 resource instead + // Deprecated: object_lock_configuration is deprecated. Use the top-level parameter objectLockEnabled and the s3.BucketObjectLockConfigurationV2 resource instead. ObjectLockConfiguration BucketV2ObjectLockConfigurationPtrInput // Indicates whether this bucket has an Object Lock configuration enabled. Valid values are `true` or `false`. This argument is not supported in all regions or partitions. ObjectLockEnabled pulumi.BoolPtrInput @@ -355,14 +355,14 @@ type BucketV2State struct { // The provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketPolicy` instead. // - // Deprecated: Use the s3.BucketPolicy resource instead + // Deprecated: policy is deprecated. Use the s3.BucketPolicy resource instead. Policy pulumi.StringPtrInput // AWS region this bucket resides in. Region pulumi.StringPtrInput // Configuration of [replication configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html). See Replication Configuration below for details. The provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketReplicationConfig` instead. // - // Deprecated: Use the s3.BucketReplicationConfig resource instead + // Deprecated: replication_configuration is deprecated. Use the s3.BucketReplicationConfig resource instead. ReplicationConfigurations BucketV2ReplicationConfigurationArrayInput // Specifies who should bear the cost of Amazon S3 data transfer. // Can be either `BucketOwner` or `Requester`. By default, the owner of the S3 bucket would incur the costs of any data transfer. @@ -370,13 +370,13 @@ type BucketV2State struct { // The provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketRequestPaymentConfigurationV2` instead. // - // Deprecated: Use the s3.BucketRequestPaymentConfigurationV2 resource instead + // Deprecated: request_payer is deprecated. Use the s3.BucketRequestPaymentConfigurationV2 resource instead. RequestPayer pulumi.StringPtrInput // Configuration of [server-side encryption configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html). See Server Side Encryption Configuration below for details. // The provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketServerSideEncryptionConfigurationV2` instead. // - // Deprecated: Use the s3.BucketServerSideEncryptionConfigurationV2 resource instead + // Deprecated: server_side_encryption_configuration is deprecated. Use the s3.BucketServerSideEncryptionConfigurationV2 resource instead. ServerSideEncryptionConfigurations BucketV2ServerSideEncryptionConfigurationArrayInput // Map of tags to assign to the bucket. If configured with a provider `defaultTags` configuration block present, tags with matching keys will overwrite those defined at the provider-level. // @@ -388,20 +388,20 @@ type BucketV2State struct { TagsAll pulumi.StringMapInput // Configuration of the [S3 bucket versioning state](https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html). See Versioning below for details. The provider will only perform drift detection if a configuration value is provided. Use the resource `s3.BucketVersioningV2` instead. // - // Deprecated: Use the s3.BucketVersioningV2 resource instead + // Deprecated: versioning is deprecated. Use the s3.BucketVersioningV2 resource instead. Versionings BucketV2VersioningArrayInput // (**Deprecated**) Domain of the website endpoint, if the bucket is configured with a website. If not, this will be an empty string. This is used to create Route 53 alias records. Use the resource `s3.BucketWebsiteConfigurationV2` instead. // - // Deprecated: Use the s3.BucketWebsiteConfigurationV2 resource + // Deprecated: website_domain is deprecated. Use the s3.BucketWebsiteConfigurationV2 resource instead. WebsiteDomain pulumi.StringPtrInput // (**Deprecated**) Website endpoint, if the bucket is configured with a website. If not, this will be an empty string. Use the resource `s3.BucketWebsiteConfigurationV2` instead. // - // Deprecated: Use the s3.BucketWebsiteConfigurationV2 resource + // Deprecated: website_endpoint is deprecated. Use the s3.BucketWebsiteConfigurationV2 resource instead. WebsiteEndpoint pulumi.StringPtrInput // Configuration of the [S3 bucket website](https://docs.aws.amazon.com/AmazonS3/latest/userguide/WebsiteHosting.html). See Website below for details. The provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketWebsiteConfigurationV2` instead. // - // Deprecated: Use the s3.BucketWebsiteConfigurationV2 resource instead + // Deprecated: website is deprecated. Use the s3.BucketWebsiteConfigurationV2 resource instead. Websites BucketV2WebsiteArrayInput } @@ -413,11 +413,11 @@ type bucketV2Args struct { // Sets the accelerate configuration of an existing bucket. Can be `Enabled` or `Suspended`. Cannot be used in `cn-north-1` or `us-gov-west-1`. This provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketAccelerateConfigurationV2` instead. // - // Deprecated: Use the s3.BucketAccelerateConfigurationV2 resource instead + // Deprecated: acceleration_status is deprecated. Use the s3.BucketAccelerateConfigurationV2 resource instead. AccelerationStatus *string `pulumi:"accelerationStatus"` // The [canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) to apply. Valid values are `private`, `public-read`, `public-read-write`, `aws-exec-read`, `authenticated-read`, and `log-delivery-write`. Defaults to `private`. Conflicts with `grant`. The provider will only perform drift detection if a configuration value is provided. Use the resource `s3.BucketAclV2` instead. // - // Deprecated: Use the s3.BucketAclV2 resource instead + // Deprecated: acl is deprecated. Use the s3.BucketAclV2 resource instead. Acl *string `pulumi:"acl"` // Name of the bucket. If omitted, the provider will assign a random, unique name. Must be lowercase and less than or equal to 63 characters in length. A full list of bucket naming rules [may be found here](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html). The name must not be in the format `[bucketName]--[azid]--x-s3`. Use the `s3.DirectoryBucket` resource to manage S3 Express buckets. Bucket *string `pulumi:"bucket"` @@ -425,29 +425,29 @@ type bucketV2Args struct { BucketPrefix *string `pulumi:"bucketPrefix"` // Rule of [Cross-Origin Resource Sharing](https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html). See CORS rule below for details. This provider will only perform drift detection if a configuration value is provided. Use the resource `s3.BucketCorsConfigurationV2` instead. // - // Deprecated: Use the s3.BucketCorsConfigurationV2 resource instead + // Deprecated: cors_rule is deprecated. Use the s3.BucketCorsConfigurationV2 resource instead. CorsRules []BucketV2CorsRule `pulumi:"corsRules"` // Boolean that indicates all objects (including any [locked objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html)) should be deleted from the bucket *when the bucket is destroyed* so that the bucket can be destroyed without error. These objects are *not* recoverable. This only deletes objects when the bucket is destroyed, *not* when setting this parameter to `true`. Once this parameter is set to `true`, there must be a successful `pulumi up` run before a destroy is required to update this value in the resource state. Without a successful `pulumi up` after this parameter is set, this flag will have no effect. If setting this field in the same operation that would require replacing the bucket or destroying the bucket, this flag will not work. Additionally when importing a bucket, a successful `pulumi up` is required to set this value in state before it will take effect on a destroy operation. ForceDestroy *bool `pulumi:"forceDestroy"` // An [ACL policy grant](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#sample-acl). See Grant below for details. Conflicts with `acl`. The provider will only perform drift detection if a configuration value is provided. Use the resource `s3.BucketAclV2` instead. // - // Deprecated: Use the s3.BucketAclV2 resource instead + // Deprecated: grant is deprecated. Use the s3.BucketAclV2 resource instead. Grants []BucketV2Grant `pulumi:"grants"` // Configuration of [object lifecycle management](http://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). See Lifecycle Rule below for details. The provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketLifecycleConfigurationV2` instead. // - // Deprecated: Use the s3.BucketLifecycleConfigurationV2 resource instead + // Deprecated: lifecycle_rule is deprecated. Use the s3.BucketLifecycleConfigurationV2 resource instead. LifecycleRules []BucketV2LifecycleRule `pulumi:"lifecycleRules"` // Configuration of [S3 bucket logging](https://docs.aws.amazon.com/AmazonS3/latest/UG/ManagingBucketLogging.html) parameters. See Logging below for details. The provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketLoggingV2` instead. // - // Deprecated: Use the s3.BucketLoggingV2 resource instead + // Deprecated: logging is deprecated. Use the s3.BucketLoggingV2 resource instead. Loggings []BucketV2Logging `pulumi:"loggings"` // Configuration of [S3 object locking](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). See Object Lock Configuration below for details. // The provider wil only perform drift detection if a configuration value is provided. // Use the `objectLockEnabled` parameter and the resource `s3.BucketObjectLockConfigurationV2` instead. // - // Deprecated: Use the top-level parameter objectLockEnabled and the s3.BucketObjectLockConfigurationV2 resource instead + // Deprecated: object_lock_configuration is deprecated. Use the top-level parameter objectLockEnabled and the s3.BucketObjectLockConfigurationV2 resource instead. ObjectLockConfiguration *BucketV2ObjectLockConfiguration `pulumi:"objectLockConfiguration"` // Indicates whether this bucket has an Object Lock configuration enabled. Valid values are `true` or `false`. This argument is not supported in all regions or partitions. ObjectLockEnabled *bool `pulumi:"objectLockEnabled"` @@ -455,12 +455,12 @@ type bucketV2Args struct { // The provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketPolicy` instead. // - // Deprecated: Use the s3.BucketPolicy resource instead + // Deprecated: policy is deprecated. Use the s3.BucketPolicy resource instead. Policy *string `pulumi:"policy"` // Configuration of [replication configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html). See Replication Configuration below for details. The provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketReplicationConfig` instead. // - // Deprecated: Use the s3.BucketReplicationConfig resource instead + // Deprecated: replication_configuration is deprecated. Use the s3.BucketReplicationConfig resource instead. ReplicationConfigurations []BucketV2ReplicationConfiguration `pulumi:"replicationConfigurations"` // Specifies who should bear the cost of Amazon S3 data transfer. // Can be either `BucketOwner` or `Requester`. By default, the owner of the S3 bucket would incur the costs of any data transfer. @@ -468,13 +468,13 @@ type bucketV2Args struct { // The provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketRequestPaymentConfigurationV2` instead. // - // Deprecated: Use the s3.BucketRequestPaymentConfigurationV2 resource instead + // Deprecated: request_payer is deprecated. Use the s3.BucketRequestPaymentConfigurationV2 resource instead. RequestPayer *string `pulumi:"requestPayer"` // Configuration of [server-side encryption configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html). See Server Side Encryption Configuration below for details. // The provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketServerSideEncryptionConfigurationV2` instead. // - // Deprecated: Use the s3.BucketServerSideEncryptionConfigurationV2 resource instead + // Deprecated: server_side_encryption_configuration is deprecated. Use the s3.BucketServerSideEncryptionConfigurationV2 resource instead. ServerSideEncryptionConfigurations []BucketV2ServerSideEncryptionConfiguration `pulumi:"serverSideEncryptionConfigurations"` // Map of tags to assign to the bucket. If configured with a provider `defaultTags` configuration block present, tags with matching keys will overwrite those defined at the provider-level. // @@ -482,12 +482,12 @@ type bucketV2Args struct { Tags map[string]string `pulumi:"tags"` // Configuration of the [S3 bucket versioning state](https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html). See Versioning below for details. The provider will only perform drift detection if a configuration value is provided. Use the resource `s3.BucketVersioningV2` instead. // - // Deprecated: Use the s3.BucketVersioningV2 resource instead + // Deprecated: versioning is deprecated. Use the s3.BucketVersioningV2 resource instead. Versionings []BucketV2Versioning `pulumi:"versionings"` // Configuration of the [S3 bucket website](https://docs.aws.amazon.com/AmazonS3/latest/userguide/WebsiteHosting.html). See Website below for details. The provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketWebsiteConfigurationV2` instead. // - // Deprecated: Use the s3.BucketWebsiteConfigurationV2 resource instead + // Deprecated: website is deprecated. Use the s3.BucketWebsiteConfigurationV2 resource instead. Websites []BucketV2Website `pulumi:"websites"` } @@ -496,11 +496,11 @@ type BucketV2Args struct { // Sets the accelerate configuration of an existing bucket. Can be `Enabled` or `Suspended`. Cannot be used in `cn-north-1` or `us-gov-west-1`. This provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketAccelerateConfigurationV2` instead. // - // Deprecated: Use the s3.BucketAccelerateConfigurationV2 resource instead + // Deprecated: acceleration_status is deprecated. Use the s3.BucketAccelerateConfigurationV2 resource instead. AccelerationStatus pulumi.StringPtrInput // The [canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) to apply. Valid values are `private`, `public-read`, `public-read-write`, `aws-exec-read`, `authenticated-read`, and `log-delivery-write`. Defaults to `private`. Conflicts with `grant`. The provider will only perform drift detection if a configuration value is provided. Use the resource `s3.BucketAclV2` instead. // - // Deprecated: Use the s3.BucketAclV2 resource instead + // Deprecated: acl is deprecated. Use the s3.BucketAclV2 resource instead. Acl pulumi.StringPtrInput // Name of the bucket. If omitted, the provider will assign a random, unique name. Must be lowercase and less than or equal to 63 characters in length. A full list of bucket naming rules [may be found here](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html). The name must not be in the format `[bucketName]--[azid]--x-s3`. Use the `s3.DirectoryBucket` resource to manage S3 Express buckets. Bucket pulumi.StringPtrInput @@ -508,29 +508,29 @@ type BucketV2Args struct { BucketPrefix pulumi.StringPtrInput // Rule of [Cross-Origin Resource Sharing](https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html). See CORS rule below for details. This provider will only perform drift detection if a configuration value is provided. Use the resource `s3.BucketCorsConfigurationV2` instead. // - // Deprecated: Use the s3.BucketCorsConfigurationV2 resource instead + // Deprecated: cors_rule is deprecated. Use the s3.BucketCorsConfigurationV2 resource instead. CorsRules BucketV2CorsRuleArrayInput // Boolean that indicates all objects (including any [locked objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html)) should be deleted from the bucket *when the bucket is destroyed* so that the bucket can be destroyed without error. These objects are *not* recoverable. This only deletes objects when the bucket is destroyed, *not* when setting this parameter to `true`. Once this parameter is set to `true`, there must be a successful `pulumi up` run before a destroy is required to update this value in the resource state. Without a successful `pulumi up` after this parameter is set, this flag will have no effect. If setting this field in the same operation that would require replacing the bucket or destroying the bucket, this flag will not work. Additionally when importing a bucket, a successful `pulumi up` is required to set this value in state before it will take effect on a destroy operation. ForceDestroy pulumi.BoolPtrInput // An [ACL policy grant](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#sample-acl). See Grant below for details. Conflicts with `acl`. The provider will only perform drift detection if a configuration value is provided. Use the resource `s3.BucketAclV2` instead. // - // Deprecated: Use the s3.BucketAclV2 resource instead + // Deprecated: grant is deprecated. Use the s3.BucketAclV2 resource instead. Grants BucketV2GrantArrayInput // Configuration of [object lifecycle management](http://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). See Lifecycle Rule below for details. The provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketLifecycleConfigurationV2` instead. // - // Deprecated: Use the s3.BucketLifecycleConfigurationV2 resource instead + // Deprecated: lifecycle_rule is deprecated. Use the s3.BucketLifecycleConfigurationV2 resource instead. LifecycleRules BucketV2LifecycleRuleArrayInput // Configuration of [S3 bucket logging](https://docs.aws.amazon.com/AmazonS3/latest/UG/ManagingBucketLogging.html) parameters. See Logging below for details. The provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketLoggingV2` instead. // - // Deprecated: Use the s3.BucketLoggingV2 resource instead + // Deprecated: logging is deprecated. Use the s3.BucketLoggingV2 resource instead. Loggings BucketV2LoggingArrayInput // Configuration of [S3 object locking](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). See Object Lock Configuration below for details. // The provider wil only perform drift detection if a configuration value is provided. // Use the `objectLockEnabled` parameter and the resource `s3.BucketObjectLockConfigurationV2` instead. // - // Deprecated: Use the top-level parameter objectLockEnabled and the s3.BucketObjectLockConfigurationV2 resource instead + // Deprecated: object_lock_configuration is deprecated. Use the top-level parameter objectLockEnabled and the s3.BucketObjectLockConfigurationV2 resource instead. ObjectLockConfiguration BucketV2ObjectLockConfigurationPtrInput // Indicates whether this bucket has an Object Lock configuration enabled. Valid values are `true` or `false`. This argument is not supported in all regions or partitions. ObjectLockEnabled pulumi.BoolPtrInput @@ -538,12 +538,12 @@ type BucketV2Args struct { // The provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketPolicy` instead. // - // Deprecated: Use the s3.BucketPolicy resource instead + // Deprecated: policy is deprecated. Use the s3.BucketPolicy resource instead. Policy pulumi.StringPtrInput // Configuration of [replication configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html). See Replication Configuration below for details. The provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketReplicationConfig` instead. // - // Deprecated: Use the s3.BucketReplicationConfig resource instead + // Deprecated: replication_configuration is deprecated. Use the s3.BucketReplicationConfig resource instead. ReplicationConfigurations BucketV2ReplicationConfigurationArrayInput // Specifies who should bear the cost of Amazon S3 data transfer. // Can be either `BucketOwner` or `Requester`. By default, the owner of the S3 bucket would incur the costs of any data transfer. @@ -551,13 +551,13 @@ type BucketV2Args struct { // The provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketRequestPaymentConfigurationV2` instead. // - // Deprecated: Use the s3.BucketRequestPaymentConfigurationV2 resource instead + // Deprecated: request_payer is deprecated. Use the s3.BucketRequestPaymentConfigurationV2 resource instead. RequestPayer pulumi.StringPtrInput // Configuration of [server-side encryption configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html). See Server Side Encryption Configuration below for details. // The provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketServerSideEncryptionConfigurationV2` instead. // - // Deprecated: Use the s3.BucketServerSideEncryptionConfigurationV2 resource instead + // Deprecated: server_side_encryption_configuration is deprecated. Use the s3.BucketServerSideEncryptionConfigurationV2 resource instead. ServerSideEncryptionConfigurations BucketV2ServerSideEncryptionConfigurationArrayInput // Map of tags to assign to the bucket. If configured with a provider `defaultTags` configuration block present, tags with matching keys will overwrite those defined at the provider-level. // @@ -565,12 +565,12 @@ type BucketV2Args struct { Tags pulumi.StringMapInput // Configuration of the [S3 bucket versioning state](https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html). See Versioning below for details. The provider will only perform drift detection if a configuration value is provided. Use the resource `s3.BucketVersioningV2` instead. // - // Deprecated: Use the s3.BucketVersioningV2 resource instead + // Deprecated: versioning is deprecated. Use the s3.BucketVersioningV2 resource instead. Versionings BucketV2VersioningArrayInput // Configuration of the [S3 bucket website](https://docs.aws.amazon.com/AmazonS3/latest/userguide/WebsiteHosting.html). See Website below for details. The provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketWebsiteConfigurationV2` instead. // - // Deprecated: Use the s3.BucketWebsiteConfigurationV2 resource instead + // Deprecated: website is deprecated. Use the s3.BucketWebsiteConfigurationV2 resource instead. Websites BucketV2WebsiteArrayInput } @@ -664,14 +664,14 @@ func (o BucketV2Output) ToBucketV2OutputWithContext(ctx context.Context) BucketV // Sets the accelerate configuration of an existing bucket. Can be `Enabled` or `Suspended`. Cannot be used in `cn-north-1` or `us-gov-west-1`. This provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketAccelerateConfigurationV2` instead. // -// Deprecated: Use the s3.BucketAccelerateConfigurationV2 resource instead +// Deprecated: acceleration_status is deprecated. Use the s3.BucketAccelerateConfigurationV2 resource instead. func (o BucketV2Output) AccelerationStatus() pulumi.StringOutput { return o.ApplyT(func(v *BucketV2) pulumi.StringOutput { return v.AccelerationStatus }).(pulumi.StringOutput) } // The [canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) to apply. Valid values are `private`, `public-read`, `public-read-write`, `aws-exec-read`, `authenticated-read`, and `log-delivery-write`. Defaults to `private`. Conflicts with `grant`. The provider will only perform drift detection if a configuration value is provided. Use the resource `s3.BucketAclV2` instead. // -// Deprecated: Use the s3.BucketAclV2 resource instead +// Deprecated: acl is deprecated. Use the s3.BucketAclV2 resource instead. func (o BucketV2Output) Acl() pulumi.StringOutput { return o.ApplyT(func(v *BucketV2) pulumi.StringOutput { return v.Acl }).(pulumi.StringOutput) } @@ -703,7 +703,7 @@ func (o BucketV2Output) BucketRegionalDomainName() pulumi.StringOutput { // Rule of [Cross-Origin Resource Sharing](https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html). See CORS rule below for details. This provider will only perform drift detection if a configuration value is provided. Use the resource `s3.BucketCorsConfigurationV2` instead. // -// Deprecated: Use the s3.BucketCorsConfigurationV2 resource instead +// Deprecated: cors_rule is deprecated. Use the s3.BucketCorsConfigurationV2 resource instead. func (o BucketV2Output) CorsRules() BucketV2CorsRuleArrayOutput { return o.ApplyT(func(v *BucketV2) BucketV2CorsRuleArrayOutput { return v.CorsRules }).(BucketV2CorsRuleArrayOutput) } @@ -715,7 +715,7 @@ func (o BucketV2Output) ForceDestroy() pulumi.BoolPtrOutput { // An [ACL policy grant](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#sample-acl). See Grant below for details. Conflicts with `acl`. The provider will only perform drift detection if a configuration value is provided. Use the resource `s3.BucketAclV2` instead. // -// Deprecated: Use the s3.BucketAclV2 resource instead +// Deprecated: grant is deprecated. Use the s3.BucketAclV2 resource instead. func (o BucketV2Output) Grants() BucketV2GrantArrayOutput { return o.ApplyT(func(v *BucketV2) BucketV2GrantArrayOutput { return v.Grants }).(BucketV2GrantArrayOutput) } @@ -728,7 +728,7 @@ func (o BucketV2Output) HostedZoneId() pulumi.StringOutput { // Configuration of [object lifecycle management](http://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). See Lifecycle Rule below for details. The provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketLifecycleConfigurationV2` instead. // -// Deprecated: Use the s3.BucketLifecycleConfigurationV2 resource instead +// Deprecated: lifecycle_rule is deprecated. Use the s3.BucketLifecycleConfigurationV2 resource instead. func (o BucketV2Output) LifecycleRules() BucketV2LifecycleRuleArrayOutput { return o.ApplyT(func(v *BucketV2) BucketV2LifecycleRuleArrayOutput { return v.LifecycleRules }).(BucketV2LifecycleRuleArrayOutput) } @@ -736,7 +736,7 @@ func (o BucketV2Output) LifecycleRules() BucketV2LifecycleRuleArrayOutput { // Configuration of [S3 bucket logging](https://docs.aws.amazon.com/AmazonS3/latest/UG/ManagingBucketLogging.html) parameters. See Logging below for details. The provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketLoggingV2` instead. // -// Deprecated: Use the s3.BucketLoggingV2 resource instead +// Deprecated: logging is deprecated. Use the s3.BucketLoggingV2 resource instead. func (o BucketV2Output) Loggings() BucketV2LoggingArrayOutput { return o.ApplyT(func(v *BucketV2) BucketV2LoggingArrayOutput { return v.Loggings }).(BucketV2LoggingArrayOutput) } @@ -745,7 +745,7 @@ func (o BucketV2Output) Loggings() BucketV2LoggingArrayOutput { // The provider wil only perform drift detection if a configuration value is provided. // Use the `objectLockEnabled` parameter and the resource `s3.BucketObjectLockConfigurationV2` instead. // -// Deprecated: Use the top-level parameter objectLockEnabled and the s3.BucketObjectLockConfigurationV2 resource instead +// Deprecated: object_lock_configuration is deprecated. Use the top-level parameter objectLockEnabled and the s3.BucketObjectLockConfigurationV2 resource instead. func (o BucketV2Output) ObjectLockConfiguration() BucketV2ObjectLockConfigurationOutput { return o.ApplyT(func(v *BucketV2) BucketV2ObjectLockConfigurationOutput { return v.ObjectLockConfiguration }).(BucketV2ObjectLockConfigurationOutput) } @@ -759,7 +759,7 @@ func (o BucketV2Output) ObjectLockEnabled() pulumi.BoolOutput { // The provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketPolicy` instead. // -// Deprecated: Use the s3.BucketPolicy resource instead +// Deprecated: policy is deprecated. Use the s3.BucketPolicy resource instead. func (o BucketV2Output) Policy() pulumi.StringOutput { return o.ApplyT(func(v *BucketV2) pulumi.StringOutput { return v.Policy }).(pulumi.StringOutput) } @@ -772,7 +772,7 @@ func (o BucketV2Output) Region() pulumi.StringOutput { // Configuration of [replication configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html). See Replication Configuration below for details. The provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketReplicationConfig` instead. // -// Deprecated: Use the s3.BucketReplicationConfig resource instead +// Deprecated: replication_configuration is deprecated. Use the s3.BucketReplicationConfig resource instead. func (o BucketV2Output) ReplicationConfigurations() BucketV2ReplicationConfigurationArrayOutput { return o.ApplyT(func(v *BucketV2) BucketV2ReplicationConfigurationArrayOutput { return v.ReplicationConfigurations }).(BucketV2ReplicationConfigurationArrayOutput) } @@ -783,7 +783,7 @@ func (o BucketV2Output) ReplicationConfigurations() BucketV2ReplicationConfigura // The provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketRequestPaymentConfigurationV2` instead. // -// Deprecated: Use the s3.BucketRequestPaymentConfigurationV2 resource instead +// Deprecated: request_payer is deprecated. Use the s3.BucketRequestPaymentConfigurationV2 resource instead. func (o BucketV2Output) RequestPayer() pulumi.StringOutput { return o.ApplyT(func(v *BucketV2) pulumi.StringOutput { return v.RequestPayer }).(pulumi.StringOutput) } @@ -792,7 +792,7 @@ func (o BucketV2Output) RequestPayer() pulumi.StringOutput { // The provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketServerSideEncryptionConfigurationV2` instead. // -// Deprecated: Use the s3.BucketServerSideEncryptionConfigurationV2 resource instead +// Deprecated: server_side_encryption_configuration is deprecated. Use the s3.BucketServerSideEncryptionConfigurationV2 resource instead. func (o BucketV2Output) ServerSideEncryptionConfigurations() BucketV2ServerSideEncryptionConfigurationArrayOutput { return o.ApplyT(func(v *BucketV2) BucketV2ServerSideEncryptionConfigurationArrayOutput { return v.ServerSideEncryptionConfigurations @@ -815,21 +815,21 @@ func (o BucketV2Output) TagsAll() pulumi.StringMapOutput { // Configuration of the [S3 bucket versioning state](https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html). See Versioning below for details. The provider will only perform drift detection if a configuration value is provided. Use the resource `s3.BucketVersioningV2` instead. // -// Deprecated: Use the s3.BucketVersioningV2 resource instead +// Deprecated: versioning is deprecated. Use the s3.BucketVersioningV2 resource instead. func (o BucketV2Output) Versionings() BucketV2VersioningArrayOutput { return o.ApplyT(func(v *BucketV2) BucketV2VersioningArrayOutput { return v.Versionings }).(BucketV2VersioningArrayOutput) } // (**Deprecated**) Domain of the website endpoint, if the bucket is configured with a website. If not, this will be an empty string. This is used to create Route 53 alias records. Use the resource `s3.BucketWebsiteConfigurationV2` instead. // -// Deprecated: Use the s3.BucketWebsiteConfigurationV2 resource +// Deprecated: website_domain is deprecated. Use the s3.BucketWebsiteConfigurationV2 resource instead. func (o BucketV2Output) WebsiteDomain() pulumi.StringOutput { return o.ApplyT(func(v *BucketV2) pulumi.StringOutput { return v.WebsiteDomain }).(pulumi.StringOutput) } // (**Deprecated**) Website endpoint, if the bucket is configured with a website. If not, this will be an empty string. Use the resource `s3.BucketWebsiteConfigurationV2` instead. // -// Deprecated: Use the s3.BucketWebsiteConfigurationV2 resource +// Deprecated: website_endpoint is deprecated. Use the s3.BucketWebsiteConfigurationV2 resource instead. func (o BucketV2Output) WebsiteEndpoint() pulumi.StringOutput { return o.ApplyT(func(v *BucketV2) pulumi.StringOutput { return v.WebsiteEndpoint }).(pulumi.StringOutput) } @@ -837,7 +837,7 @@ func (o BucketV2Output) WebsiteEndpoint() pulumi.StringOutput { // Configuration of the [S3 bucket website](https://docs.aws.amazon.com/AmazonS3/latest/userguide/WebsiteHosting.html). See Website below for details. The provider will only perform drift detection if a configuration value is provided. // Use the resource `s3.BucketWebsiteConfigurationV2` instead. // -// Deprecated: Use the s3.BucketWebsiteConfigurationV2 resource instead +// Deprecated: website is deprecated. Use the s3.BucketWebsiteConfigurationV2 resource instead. func (o BucketV2Output) Websites() BucketV2WebsiteArrayOutput { return o.ApplyT(func(v *BucketV2) BucketV2WebsiteArrayOutput { return v.Websites }).(BucketV2WebsiteArrayOutput) } diff --git a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/s3/directoryBucket.go b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/s3/directoryBucket.go index a596602fc..581fdef5a 100644 --- a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/s3/directoryBucket.go +++ b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/s3/directoryBucket.go @@ -16,6 +16,8 @@ import ( // // ## Example Usage // +// ### Availability Zone +// // ```go // package main // @@ -43,6 +45,36 @@ import ( // // ``` // +// ### Dedicated Local Zone +// +// ```go +// package main +// +// import ( +// +// "github.com/pulumi/pulumi-aws/sdk/v6/go/aws/s3" +// "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +// +// ) +// +// func main() { +// pulumi.Run(func(ctx *pulumi.Context) error { +// _, err := s3.NewDirectoryBucket(ctx, "example_local_zone", &s3.DirectoryBucketArgs{ +// Bucket: pulumi.String("example--usw2-xxx-lz1--x-s3"), +// Location: &s3.DirectoryBucketLocationArgs{ +// Name: pulumi.String("usw2-xxx-lz1"), +// Type: pulumi.String("LocalZone"), +// }, +// }) +// if err != nil { +// return err +// } +// return nil +// }) +// } +// +// ``` +// // ## Import // // Using `pulumi import`, import S3 bucket using `bucket`. For example: @@ -57,7 +89,7 @@ type DirectoryBucket struct { Arn pulumi.StringOutput `pulumi:"arn"` // Name of the bucket. The name must be in the format `[bucketName]--[azid]--x-s3`. Use the `s3.BucketV2` resource to manage general purpose buckets. Bucket pulumi.StringOutput `pulumi:"bucket"` - // Data redundancy. Valid values: `SingleAvailabilityZone`. + // Data redundancy. Valid values: `SingleAvailabilityZone`, `SingleLocalZone`. The default value depends on the value of the `location.type` attribute. DataRedundancy pulumi.StringOutput `pulumi:"dataRedundancy"` // Boolean that indicates all objects should be deleted from the bucket *when the bucket is destroyed* so that the bucket can be destroyed without error. These objects are *not* recoverable. This only deletes objects when the bucket is destroyed, *not* when setting this parameter to `true`. Once this parameter is set to `true`, there must be a successful `pulumi up` run before a destroy is required to update this value in the resource state. Without a successful `pulumi up` after this parameter is set, this flag will have no effect. If setting this field in the same operation that would require replacing the bucket or destroying the bucket, this flag will not work. Additionally when importing a bucket, a successful `pulumi up` is required to set this value in state before it will take effect on a destroy operation. ForceDestroy pulumi.BoolOutput `pulumi:"forceDestroy"` @@ -104,7 +136,7 @@ type directoryBucketState struct { Arn *string `pulumi:"arn"` // Name of the bucket. The name must be in the format `[bucketName]--[azid]--x-s3`. Use the `s3.BucketV2` resource to manage general purpose buckets. Bucket *string `pulumi:"bucket"` - // Data redundancy. Valid values: `SingleAvailabilityZone`. + // Data redundancy. Valid values: `SingleAvailabilityZone`, `SingleLocalZone`. The default value depends on the value of the `location.type` attribute. DataRedundancy *string `pulumi:"dataRedundancy"` // Boolean that indicates all objects should be deleted from the bucket *when the bucket is destroyed* so that the bucket can be destroyed without error. These objects are *not* recoverable. This only deletes objects when the bucket is destroyed, *not* when setting this parameter to `true`. Once this parameter is set to `true`, there must be a successful `pulumi up` run before a destroy is required to update this value in the resource state. Without a successful `pulumi up` after this parameter is set, this flag will have no effect. If setting this field in the same operation that would require replacing the bucket or destroying the bucket, this flag will not work. Additionally when importing a bucket, a successful `pulumi up` is required to set this value in state before it will take effect on a destroy operation. ForceDestroy *bool `pulumi:"forceDestroy"` @@ -119,7 +151,7 @@ type DirectoryBucketState struct { Arn pulumi.StringPtrInput // Name of the bucket. The name must be in the format `[bucketName]--[azid]--x-s3`. Use the `s3.BucketV2` resource to manage general purpose buckets. Bucket pulumi.StringPtrInput - // Data redundancy. Valid values: `SingleAvailabilityZone`. + // Data redundancy. Valid values: `SingleAvailabilityZone`, `SingleLocalZone`. The default value depends on the value of the `location.type` attribute. DataRedundancy pulumi.StringPtrInput // Boolean that indicates all objects should be deleted from the bucket *when the bucket is destroyed* so that the bucket can be destroyed without error. These objects are *not* recoverable. This only deletes objects when the bucket is destroyed, *not* when setting this parameter to `true`. Once this parameter is set to `true`, there must be a successful `pulumi up` run before a destroy is required to update this value in the resource state. Without a successful `pulumi up` after this parameter is set, this flag will have no effect. If setting this field in the same operation that would require replacing the bucket or destroying the bucket, this flag will not work. Additionally when importing a bucket, a successful `pulumi up` is required to set this value in state before it will take effect on a destroy operation. ForceDestroy pulumi.BoolPtrInput @@ -136,7 +168,7 @@ func (DirectoryBucketState) ElementType() reflect.Type { type directoryBucketArgs struct { // Name of the bucket. The name must be in the format `[bucketName]--[azid]--x-s3`. Use the `s3.BucketV2` resource to manage general purpose buckets. Bucket string `pulumi:"bucket"` - // Data redundancy. Valid values: `SingleAvailabilityZone`. + // Data redundancy. Valid values: `SingleAvailabilityZone`, `SingleLocalZone`. The default value depends on the value of the `location.type` attribute. DataRedundancy *string `pulumi:"dataRedundancy"` // Boolean that indicates all objects should be deleted from the bucket *when the bucket is destroyed* so that the bucket can be destroyed without error. These objects are *not* recoverable. This only deletes objects when the bucket is destroyed, *not* when setting this parameter to `true`. Once this parameter is set to `true`, there must be a successful `pulumi up` run before a destroy is required to update this value in the resource state. Without a successful `pulumi up` after this parameter is set, this flag will have no effect. If setting this field in the same operation that would require replacing the bucket or destroying the bucket, this flag will not work. Additionally when importing a bucket, a successful `pulumi up` is required to set this value in state before it will take effect on a destroy operation. ForceDestroy *bool `pulumi:"forceDestroy"` @@ -150,7 +182,7 @@ type directoryBucketArgs struct { type DirectoryBucketArgs struct { // Name of the bucket. The name must be in the format `[bucketName]--[azid]--x-s3`. Use the `s3.BucketV2` resource to manage general purpose buckets. Bucket pulumi.StringInput - // Data redundancy. Valid values: `SingleAvailabilityZone`. + // Data redundancy. Valid values: `SingleAvailabilityZone`, `SingleLocalZone`. The default value depends on the value of the `location.type` attribute. DataRedundancy pulumi.StringPtrInput // Boolean that indicates all objects should be deleted from the bucket *when the bucket is destroyed* so that the bucket can be destroyed without error. These objects are *not* recoverable. This only deletes objects when the bucket is destroyed, *not* when setting this parameter to `true`. Once this parameter is set to `true`, there must be a successful `pulumi up` run before a destroy is required to update this value in the resource state. Without a successful `pulumi up` after this parameter is set, this flag will have no effect. If setting this field in the same operation that would require replacing the bucket or destroying the bucket, this flag will not work. Additionally when importing a bucket, a successful `pulumi up` is required to set this value in state before it will take effect on a destroy operation. ForceDestroy pulumi.BoolPtrInput @@ -257,7 +289,7 @@ func (o DirectoryBucketOutput) Bucket() pulumi.StringOutput { return o.ApplyT(func(v *DirectoryBucket) pulumi.StringOutput { return v.Bucket }).(pulumi.StringOutput) } -// Data redundancy. Valid values: `SingleAvailabilityZone`. +// Data redundancy. Valid values: `SingleAvailabilityZone`, `SingleLocalZone`. The default value depends on the value of the `location.type` attribute. func (o DirectoryBucketOutput) DataRedundancy() pulumi.StringOutput { return o.ApplyT(func(v *DirectoryBucket) pulumi.StringOutput { return v.DataRedundancy }).(pulumi.StringOutput) } diff --git a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/s3/getBucketObject.go b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/s3/getBucketObject.go index 58e044856..61fdfaa7d 100644 --- a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/s3/getBucketObject.go +++ b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/s3/getBucketObject.go @@ -127,7 +127,7 @@ func LookupBucketObject(ctx *pulumi.Context, args *LookupBucketObjectArgs, opts type LookupBucketObjectArgs struct { // Name of the bucket to read the object from. Alternatively, an [S3 access point](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) ARN can be specified // - // Deprecated: Use the s3.BucketObjectv2 data source instead + // Deprecated: bucket is deprecated. Use the s3.BucketObjectv2 data source instead. Bucket string `pulumi:"bucket"` // Full path to the object inside the bucket Key string `pulumi:"key"` @@ -143,7 +143,7 @@ type LookupBucketObjectResult struct { Arn string `pulumi:"arn"` // Object data (see **limitations above** to understand cases in which this field is actually available) Body string `pulumi:"body"` - // Deprecated: Use the s3.BucketObjectv2 data source instead + // Deprecated: bucket is deprecated. Use the s3.BucketObjectv2 data source instead. Bucket string `pulumi:"bucket"` // (Optional) Whether or not to use [Amazon S3 Bucket Keys](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) for SSE-KMS. BucketKeyEnabled bool `pulumi:"bucketKeyEnabled"` @@ -206,7 +206,7 @@ func LookupBucketObjectOutput(ctx *pulumi.Context, args LookupBucketObjectOutput type LookupBucketObjectOutputArgs struct { // Name of the bucket to read the object from. Alternatively, an [S3 access point](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) ARN can be specified // - // Deprecated: Use the s3.BucketObjectv2 data source instead + // Deprecated: bucket is deprecated. Use the s3.BucketObjectv2 data source instead. Bucket pulumi.StringInput `pulumi:"bucket"` // Full path to the object inside the bucket Key pulumi.StringInput `pulumi:"key"` @@ -245,7 +245,7 @@ func (o LookupBucketObjectResultOutput) Body() pulumi.StringOutput { return o.ApplyT(func(v LookupBucketObjectResult) string { return v.Body }).(pulumi.StringOutput) } -// Deprecated: Use the s3.BucketObjectv2 data source instead +// Deprecated: bucket is deprecated. Use the s3.BucketObjectv2 data source instead. func (o LookupBucketObjectResultOutput) Bucket() pulumi.StringOutput { return o.ApplyT(func(v LookupBucketObjectResult) string { return v.Bucket }).(pulumi.StringOutput) } diff --git a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/s3/getBucketObjects.go b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/s3/getBucketObjects.go index 5b1ee3d4e..1c8dc9d87 100644 --- a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/s3/getBucketObjects.go +++ b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/s3/getBucketObjects.go @@ -30,7 +30,7 @@ func GetBucketObjects(ctx *pulumi.Context, args *GetBucketObjectsArgs, opts ...p type GetBucketObjectsArgs struct { // Lists object keys in this S3 bucket. Alternatively, an [S3 access point](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) ARN can be specified // - // Deprecated: Use the s3.getObjects data source instead + // Deprecated: bucket is deprecated. Use the s3.getObjects data source instead. Bucket string `pulumi:"bucket"` // Character used to group keys (Default: none) Delimiter *string `pulumi:"delimiter"` @@ -48,7 +48,7 @@ type GetBucketObjectsArgs struct { // A collection of values returned by getBucketObjects. type GetBucketObjectsResult struct { - // Deprecated: Use the s3.getObjects data source instead + // Deprecated: bucket is deprecated. Use the s3.getObjects data source instead. Bucket string `pulumi:"bucket"` // List of any keys between `prefix` and the next occurrence of `delimiter` (i.e., similar to subdirectories of the `prefix` "directory"); the list is only returned when you specify `delimiter` CommonPrefixes []string `pulumi:"commonPrefixes"` @@ -79,7 +79,7 @@ func GetBucketObjectsOutput(ctx *pulumi.Context, args GetBucketObjectsOutputArgs type GetBucketObjectsOutputArgs struct { // Lists object keys in this S3 bucket. Alternatively, an [S3 access point](https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) ARN can be specified // - // Deprecated: Use the s3.getObjects data source instead + // Deprecated: bucket is deprecated. Use the s3.getObjects data source instead. Bucket pulumi.StringInput `pulumi:"bucket"` // Character used to group keys (Default: none) Delimiter pulumi.StringPtrInput `pulumi:"delimiter"` @@ -114,7 +114,7 @@ func (o GetBucketObjectsResultOutput) ToGetBucketObjectsResultOutputWithContext( return o } -// Deprecated: Use the s3.getObjects data source instead +// Deprecated: bucket is deprecated. Use the s3.getObjects data source instead. func (o GetBucketObjectsResultOutput) Bucket() pulumi.StringOutput { return o.ApplyT(func(v GetBucketObjectsResult) string { return v.Bucket }).(pulumi.StringOutput) } diff --git a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/s3/getObject.go b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/s3/getObject.go index 402222c42..4e3fb5ca9 100644 --- a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/s3/getObject.go +++ b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/s3/getObject.go @@ -26,6 +26,7 @@ import ( // * `application/xml` // * `application/atom+xml` // * `application/x-sql` +// * `application/yaml` // // This is to prevent printing unsafe characters and potentially downloading large amount of data which would be thrown away in favor of metadata. // @@ -150,8 +151,10 @@ type GetObjectResult struct { // The base64-encoded, 32-bit CRC32 checksum of the object. ChecksumCrc32 string `pulumi:"checksumCrc32"` // The base64-encoded, 32-bit CRC32C checksum of the object. - ChecksumCrc32c string `pulumi:"checksumCrc32c"` - ChecksumMode *string `pulumi:"checksumMode"` + ChecksumCrc32c string `pulumi:"checksumCrc32c"` + // The base64-encoded, 64-bit CRC64NVME checksum of the object. + ChecksumCrc64nvme string `pulumi:"checksumCrc64nvme"` + ChecksumMode *string `pulumi:"checksumMode"` // The base64-encoded, 160-bit SHA-1 digest of the object. ChecksumSha1 string `pulumi:"checksumSha1"` // The base64-encoded, 256-bit SHA-256 digest of the object. @@ -277,6 +280,11 @@ func (o GetObjectResultOutput) ChecksumCrc32c() pulumi.StringOutput { return o.ApplyT(func(v GetObjectResult) string { return v.ChecksumCrc32c }).(pulumi.StringOutput) } +// The base64-encoded, 64-bit CRC64NVME checksum of the object. +func (o GetObjectResultOutput) ChecksumCrc64nvme() pulumi.StringOutput { + return o.ApplyT(func(v GetObjectResult) string { return v.ChecksumCrc64nvme }).(pulumi.StringOutput) +} + func (o GetObjectResultOutput) ChecksumMode() pulumi.StringPtrOutput { return o.ApplyT(func(v GetObjectResult) *string { return v.ChecksumMode }).(pulumi.StringPtrOutput) } diff --git a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/s3/objectCopy.go b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/s3/objectCopy.go index 982610a65..e06ff995c 100644 --- a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/s3/objectCopy.go +++ b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/s3/objectCopy.go @@ -98,12 +98,14 @@ type ObjectCopy struct { BucketKeyEnabled pulumi.BoolOutput `pulumi:"bucketKeyEnabled"` // Specifies caching behavior along the request/reply chain Read [w3c cacheControl](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9) for further details. CacheControl pulumi.StringOutput `pulumi:"cacheControl"` - // Indicates the algorithm used to create the checksum for the object. If a value is specified and the object is encrypted with KMS, you must have permission to use the `kms:Decrypt` action. Valid values: `CRC32`, `CRC32C`, `SHA1`, `SHA256`. + // Indicates the algorithm used to create the checksum for the object. If a value is specified and the object is encrypted with KMS, you must have permission to use the `kms:Decrypt` action. Valid values: `CRC32`, `CRC32C`, `CRC64NVME` `SHA1`, `SHA256`. ChecksumAlgorithm pulumi.StringPtrOutput `pulumi:"checksumAlgorithm"` // The base64-encoded, 32-bit CRC32 checksum of the object. ChecksumCrc32 pulumi.StringOutput `pulumi:"checksumCrc32"` // The base64-encoded, 32-bit CRC32C checksum of the object. ChecksumCrc32c pulumi.StringOutput `pulumi:"checksumCrc32c"` + // The base64-encoded, 64-bit CRC64NVME checksum of the object. + ChecksumCrc64nvme pulumi.StringOutput `pulumi:"checksumCrc64nvme"` // The base64-encoded, 160-bit SHA-1 digest of the object. ChecksumSha1 pulumi.StringOutput `pulumi:"checksumSha1"` // The base64-encoded, 256-bit SHA-256 digest of the object. @@ -264,12 +266,14 @@ type objectCopyState struct { BucketKeyEnabled *bool `pulumi:"bucketKeyEnabled"` // Specifies caching behavior along the request/reply chain Read [w3c cacheControl](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9) for further details. CacheControl *string `pulumi:"cacheControl"` - // Indicates the algorithm used to create the checksum for the object. If a value is specified and the object is encrypted with KMS, you must have permission to use the `kms:Decrypt` action. Valid values: `CRC32`, `CRC32C`, `SHA1`, `SHA256`. + // Indicates the algorithm used to create the checksum for the object. If a value is specified and the object is encrypted with KMS, you must have permission to use the `kms:Decrypt` action. Valid values: `CRC32`, `CRC32C`, `CRC64NVME` `SHA1`, `SHA256`. ChecksumAlgorithm *string `pulumi:"checksumAlgorithm"` // The base64-encoded, 32-bit CRC32 checksum of the object. ChecksumCrc32 *string `pulumi:"checksumCrc32"` // The base64-encoded, 32-bit CRC32C checksum of the object. ChecksumCrc32c *string `pulumi:"checksumCrc32c"` + // The base64-encoded, 64-bit CRC64NVME checksum of the object. + ChecksumCrc64nvme *string `pulumi:"checksumCrc64nvme"` // The base64-encoded, 160-bit SHA-1 digest of the object. ChecksumSha1 *string `pulumi:"checksumSha1"` // The base64-encoded, 256-bit SHA-256 digest of the object. @@ -373,12 +377,14 @@ type ObjectCopyState struct { BucketKeyEnabled pulumi.BoolPtrInput // Specifies caching behavior along the request/reply chain Read [w3c cacheControl](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9) for further details. CacheControl pulumi.StringPtrInput - // Indicates the algorithm used to create the checksum for the object. If a value is specified and the object is encrypted with KMS, you must have permission to use the `kms:Decrypt` action. Valid values: `CRC32`, `CRC32C`, `SHA1`, `SHA256`. + // Indicates the algorithm used to create the checksum for the object. If a value is specified and the object is encrypted with KMS, you must have permission to use the `kms:Decrypt` action. Valid values: `CRC32`, `CRC32C`, `CRC64NVME` `SHA1`, `SHA256`. ChecksumAlgorithm pulumi.StringPtrInput // The base64-encoded, 32-bit CRC32 checksum of the object. ChecksumCrc32 pulumi.StringPtrInput // The base64-encoded, 32-bit CRC32C checksum of the object. ChecksumCrc32c pulumi.StringPtrInput + // The base64-encoded, 64-bit CRC64NVME checksum of the object. + ChecksumCrc64nvme pulumi.StringPtrInput // The base64-encoded, 160-bit SHA-1 digest of the object. ChecksumSha1 pulumi.StringPtrInput // The base64-encoded, 256-bit SHA-256 digest of the object. @@ -484,7 +490,7 @@ type objectCopyArgs struct { BucketKeyEnabled *bool `pulumi:"bucketKeyEnabled"` // Specifies caching behavior along the request/reply chain Read [w3c cacheControl](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9) for further details. CacheControl *string `pulumi:"cacheControl"` - // Indicates the algorithm used to create the checksum for the object. If a value is specified and the object is encrypted with KMS, you must have permission to use the `kms:Decrypt` action. Valid values: `CRC32`, `CRC32C`, `SHA1`, `SHA256`. + // Indicates the algorithm used to create the checksum for the object. If a value is specified and the object is encrypted with KMS, you must have permission to use the `kms:Decrypt` action. Valid values: `CRC32`, `CRC32C`, `CRC64NVME` `SHA1`, `SHA256`. ChecksumAlgorithm *string `pulumi:"checksumAlgorithm"` // Specifies presentational information for the object. Read [w3c contentDisposition](http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1) for further information. ContentDisposition *string `pulumi:"contentDisposition"` @@ -568,7 +574,7 @@ type ObjectCopyArgs struct { BucketKeyEnabled pulumi.BoolPtrInput // Specifies caching behavior along the request/reply chain Read [w3c cacheControl](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9) for further details. CacheControl pulumi.StringPtrInput - // Indicates the algorithm used to create the checksum for the object. If a value is specified and the object is encrypted with KMS, you must have permission to use the `kms:Decrypt` action. Valid values: `CRC32`, `CRC32C`, `SHA1`, `SHA256`. + // Indicates the algorithm used to create the checksum for the object. If a value is specified and the object is encrypted with KMS, you must have permission to use the `kms:Decrypt` action. Valid values: `CRC32`, `CRC32C`, `CRC64NVME` `SHA1`, `SHA256`. ChecksumAlgorithm pulumi.StringPtrInput // Specifies presentational information for the object. Read [w3c contentDisposition](http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1) for further information. ContentDisposition pulumi.StringPtrInput @@ -754,7 +760,7 @@ func (o ObjectCopyOutput) CacheControl() pulumi.StringOutput { return o.ApplyT(func(v *ObjectCopy) pulumi.StringOutput { return v.CacheControl }).(pulumi.StringOutput) } -// Indicates the algorithm used to create the checksum for the object. If a value is specified and the object is encrypted with KMS, you must have permission to use the `kms:Decrypt` action. Valid values: `CRC32`, `CRC32C`, `SHA1`, `SHA256`. +// Indicates the algorithm used to create the checksum for the object. If a value is specified and the object is encrypted with KMS, you must have permission to use the `kms:Decrypt` action. Valid values: `CRC32`, `CRC32C`, `CRC64NVME` `SHA1`, `SHA256`. func (o ObjectCopyOutput) ChecksumAlgorithm() pulumi.StringPtrOutput { return o.ApplyT(func(v *ObjectCopy) pulumi.StringPtrOutput { return v.ChecksumAlgorithm }).(pulumi.StringPtrOutput) } @@ -769,6 +775,11 @@ func (o ObjectCopyOutput) ChecksumCrc32c() pulumi.StringOutput { return o.ApplyT(func(v *ObjectCopy) pulumi.StringOutput { return v.ChecksumCrc32c }).(pulumi.StringOutput) } +// The base64-encoded, 64-bit CRC64NVME checksum of the object. +func (o ObjectCopyOutput) ChecksumCrc64nvme() pulumi.StringOutput { + return o.ApplyT(func(v *ObjectCopy) pulumi.StringOutput { return v.ChecksumCrc64nvme }).(pulumi.StringOutput) +} + // The base64-encoded, 160-bit SHA-1 digest of the object. func (o ObjectCopyOutput) ChecksumSha1() pulumi.StringOutput { return o.ApplyT(func(v *ObjectCopy) pulumi.StringOutput { return v.ChecksumSha1 }).(pulumi.StringOutput) diff --git a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/s3/pulumiTypes.go b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/s3/pulumiTypes.go index 458b84ef7..9bc6faa6a 100644 --- a/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/s3/pulumiTypes.go +++ b/vendor/github.com/pulumi/pulumi-aws/sdk/v6/go/aws/s3/pulumiTypes.go @@ -2643,7 +2643,7 @@ func (o BucketLifecycleConfigurationV2RuleArrayOutput) Index(i pulumi.IntInput) type BucketLifecycleConfigurationV2RuleAbortIncompleteMultipartUpload struct { // Number of days after which Amazon S3 aborts an incomplete multipart upload. - DaysAfterInitiation *float64 `pulumi:"daysAfterInitiation"` + DaysAfterInitiation *int `pulumi:"daysAfterInitiation"` } // BucketLifecycleConfigurationV2RuleAbortIncompleteMultipartUploadInput is an input type that accepts BucketLifecycleConfigurationV2RuleAbortIncompleteMultipartUploadArgs and BucketLifecycleConfigurationV2RuleAbortIncompleteMultipartUploadOutput values. @@ -2659,7 +2659,7 @@ type BucketLifecycleConfigurationV2RuleAbortIncompleteMultipartUploadInput inter type BucketLifecycleConfigurationV2RuleAbortIncompleteMultipartUploadArgs struct { // Number of days after which Amazon S3 aborts an incomplete multipart upload. - DaysAfterInitiation pulumi.Float64PtrInput `pulumi:"daysAfterInitiation"` + DaysAfterInitiation pulumi.IntPtrInput `pulumi:"daysAfterInitiation"` } func (BucketLifecycleConfigurationV2RuleAbortIncompleteMultipartUploadArgs) ElementType() reflect.Type { @@ -2740,10 +2740,10 @@ func (o BucketLifecycleConfigurationV2RuleAbortIncompleteMultipartUploadOutput) } // Number of days after which Amazon S3 aborts an incomplete multipart upload. -func (o BucketLifecycleConfigurationV2RuleAbortIncompleteMultipartUploadOutput) DaysAfterInitiation() pulumi.Float64PtrOutput { - return o.ApplyT(func(v BucketLifecycleConfigurationV2RuleAbortIncompleteMultipartUpload) *float64 { +func (o BucketLifecycleConfigurationV2RuleAbortIncompleteMultipartUploadOutput) DaysAfterInitiation() pulumi.IntPtrOutput { + return o.ApplyT(func(v BucketLifecycleConfigurationV2RuleAbortIncompleteMultipartUpload) *int { return v.DaysAfterInitiation - }).(pulumi.Float64PtrOutput) + }).(pulumi.IntPtrOutput) } type BucketLifecycleConfigurationV2RuleAbortIncompleteMultipartUploadPtrOutput struct{ *pulumi.OutputState } @@ -2771,20 +2771,20 @@ func (o BucketLifecycleConfigurationV2RuleAbortIncompleteMultipartUploadPtrOutpu } // Number of days after which Amazon S3 aborts an incomplete multipart upload. -func (o BucketLifecycleConfigurationV2RuleAbortIncompleteMultipartUploadPtrOutput) DaysAfterInitiation() pulumi.Float64PtrOutput { - return o.ApplyT(func(v *BucketLifecycleConfigurationV2RuleAbortIncompleteMultipartUpload) *float64 { +func (o BucketLifecycleConfigurationV2RuleAbortIncompleteMultipartUploadPtrOutput) DaysAfterInitiation() pulumi.IntPtrOutput { + return o.ApplyT(func(v *BucketLifecycleConfigurationV2RuleAbortIncompleteMultipartUpload) *int { if v == nil { return nil } return v.DaysAfterInitiation - }).(pulumi.Float64PtrOutput) + }).(pulumi.IntPtrOutput) } type BucketLifecycleConfigurationV2RuleExpiration struct { // Date the object is to be moved or deleted. The date value must be in [RFC3339 full-date format](https://datatracker.ietf.org/doc/html/rfc3339#section-5.6) e.g. `2023-08-22`. Date *string `pulumi:"date"` // Lifetime, in days, of the objects that are subject to the rule. The value must be a non-zero positive integer. - Days *float64 `pulumi:"days"` + Days *int `pulumi:"days"` // Indicates whether Amazon S3 will remove a delete marker with no noncurrent versions. If set to `true`, the delete marker will be expired; if set to `false` the policy takes no action. ExpiredObjectDeleteMarker *bool `pulumi:"expiredObjectDeleteMarker"` } @@ -2804,7 +2804,7 @@ type BucketLifecycleConfigurationV2RuleExpirationArgs struct { // Date the object is to be moved or deleted. The date value must be in [RFC3339 full-date format](https://datatracker.ietf.org/doc/html/rfc3339#section-5.6) e.g. `2023-08-22`. Date pulumi.StringPtrInput `pulumi:"date"` // Lifetime, in days, of the objects that are subject to the rule. The value must be a non-zero positive integer. - Days pulumi.Float64PtrInput `pulumi:"days"` + Days pulumi.IntPtrInput `pulumi:"days"` // Indicates whether Amazon S3 will remove a delete marker with no noncurrent versions. If set to `true`, the delete marker will be expired; if set to `false` the policy takes no action. ExpiredObjectDeleteMarker pulumi.BoolPtrInput `pulumi:"expiredObjectDeleteMarker"` } @@ -2892,8 +2892,8 @@ func (o BucketLifecycleConfigurationV2RuleExpirationOutput) Date() pulumi.String } // Lifetime, in days, of the objects that are subject to the rule. The value must be a non-zero positive integer. -func (o BucketLifecycleConfigurationV2RuleExpirationOutput) Days() pulumi.Float64PtrOutput { - return o.ApplyT(func(v BucketLifecycleConfigurationV2RuleExpiration) *float64 { return v.Days }).(pulumi.Float64PtrOutput) +func (o BucketLifecycleConfigurationV2RuleExpirationOutput) Days() pulumi.IntPtrOutput { + return o.ApplyT(func(v BucketLifecycleConfigurationV2RuleExpiration) *int { return v.Days }).(pulumi.IntPtrOutput) } // Indicates whether Amazon S3 will remove a delete marker with no noncurrent versions. If set to `true`, the delete marker will be expired; if set to `false` the policy takes no action. @@ -2936,13 +2936,13 @@ func (o BucketLifecycleConfigurationV2RuleExpirationPtrOutput) Date() pulumi.Str } // Lifetime, in days, of the objects that are subject to the rule. The value must be a non-zero positive integer. -func (o BucketLifecycleConfigurationV2RuleExpirationPtrOutput) Days() pulumi.Float64PtrOutput { - return o.ApplyT(func(v *BucketLifecycleConfigurationV2RuleExpiration) *float64 { +func (o BucketLifecycleConfigurationV2RuleExpirationPtrOutput) Days() pulumi.IntPtrOutput { + return o.ApplyT(func(v *BucketLifecycleConfigurationV2RuleExpiration) *int { if v == nil { return nil } return v.Days - }).(pulumi.Float64PtrOutput) + }).(pulumi.IntPtrOutput) } // Indicates whether Amazon S3 will remove a delete marker with no noncurrent versions. If set to `true`, the delete marker will be expired; if set to `false` the policy takes no action. @@ -3524,9 +3524,9 @@ func (o BucketLifecycleConfigurationV2RuleFilterTagPtrOutput) Value() pulumi.Str type BucketLifecycleConfigurationV2RuleNoncurrentVersionExpiration struct { // Number of noncurrent versions Amazon S3 will retain. Must be a non-zero positive integer. - NewerNoncurrentVersions *float64 `pulumi:"newerNoncurrentVersions"` + NewerNoncurrentVersions *int `pulumi:"newerNoncurrentVersions"` // Number of days an object is noncurrent before Amazon S3 can perform the associated action. Must be a positive integer. - NoncurrentDays *float64 `pulumi:"noncurrentDays"` + NoncurrentDays int `pulumi:"noncurrentDays"` } // BucketLifecycleConfigurationV2RuleNoncurrentVersionExpirationInput is an input type that accepts BucketLifecycleConfigurationV2RuleNoncurrentVersionExpirationArgs and BucketLifecycleConfigurationV2RuleNoncurrentVersionExpirationOutput values. @@ -3542,9 +3542,9 @@ type BucketLifecycleConfigurationV2RuleNoncurrentVersionExpirationInput interfac type BucketLifecycleConfigurationV2RuleNoncurrentVersionExpirationArgs struct { // Number of noncurrent versions Amazon S3 will retain. Must be a non-zero positive integer. - NewerNoncurrentVersions pulumi.Float64PtrInput `pulumi:"newerNoncurrentVersions"` + NewerNoncurrentVersions pulumi.IntPtrInput `pulumi:"newerNoncurrentVersions"` // Number of days an object is noncurrent before Amazon S3 can perform the associated action. Must be a positive integer. - NoncurrentDays pulumi.Float64PtrInput `pulumi:"noncurrentDays"` + NoncurrentDays pulumi.IntInput `pulumi:"noncurrentDays"` } func (BucketLifecycleConfigurationV2RuleNoncurrentVersionExpirationArgs) ElementType() reflect.Type { @@ -3625,17 +3625,15 @@ func (o BucketLifecycleConfigurationV2RuleNoncurrentVersionExpirationOutput) ToB } // Number of noncurrent versions Amazon S3 will retain. Must be a non-zero positive integer. -func (o BucketLifecycleConfigurationV2RuleNoncurrentVersionExpirationOutput) NewerNoncurrentVersions() pulumi.Float64PtrOutput { - return o.ApplyT(func(v BucketLifecycleConfigurationV2RuleNoncurrentVersionExpiration) *float64 { +func (o BucketLifecycleConfigurationV2RuleNoncurrentVersionExpirationOutput) NewerNoncurrentVersions() pulumi.IntPtrOutput { + return o.ApplyT(func(v BucketLifecycleConfigurationV2RuleNoncurrentVersionExpiration) *int { return v.NewerNoncurrentVersions - }).(pulumi.Float64PtrOutput) + }).(pulumi.IntPtrOutput) } // Number of days an object is noncurrent before Amazon S3 can perform the associated action. Must be a positive integer. -func (o BucketLifecycleConfigurationV2RuleNoncurrentVersionExpirationOutput) NoncurrentDays() pulumi.Float64PtrOutput { - return o.ApplyT(func(v BucketLifecycleConfigurationV2RuleNoncurrentVersionExpiration) *float64 { - return v.NoncurrentDays - }).(pulumi.Float64PtrOutput) +func (o BucketLifecycleConfigurationV2RuleNoncurrentVersionExpirationOutput) NoncurrentDays() pulumi.IntOutput { + return o.ApplyT(func(v BucketLifecycleConfigurationV2RuleNoncurrentVersionExpiration) int { return v.NoncurrentDays }).(pulumi.IntOutput) } type BucketLifecycleConfigurationV2RuleNoncurrentVersionExpirationPtrOutput struct{ *pulumi.OutputState } @@ -3663,30 +3661,30 @@ func (o BucketLifecycleConfigurationV2RuleNoncurrentVersionExpirationPtrOutput) } // Number of noncurrent versions Amazon S3 will retain. Must be a non-zero positive integer. -func (o BucketLifecycleConfigurationV2RuleNoncurrentVersionExpirationPtrOutput) NewerNoncurrentVersions() pulumi.Float64PtrOutput { - return o.ApplyT(func(v *BucketLifecycleConfigurationV2RuleNoncurrentVersionExpiration) *float64 { +func (o BucketLifecycleConfigurationV2RuleNoncurrentVersionExpirationPtrOutput) NewerNoncurrentVersions() pulumi.IntPtrOutput { + return o.ApplyT(func(v *BucketLifecycleConfigurationV2RuleNoncurrentVersionExpiration) *int { if v == nil { return nil } return v.NewerNoncurrentVersions - }).(pulumi.Float64PtrOutput) + }).(pulumi.IntPtrOutput) } // Number of days an object is noncurrent before Amazon S3 can perform the associated action. Must be a positive integer. -func (o BucketLifecycleConfigurationV2RuleNoncurrentVersionExpirationPtrOutput) NoncurrentDays() pulumi.Float64PtrOutput { - return o.ApplyT(func(v *BucketLifecycleConfigurationV2RuleNoncurrentVersionExpiration) *float64 { +func (o BucketLifecycleConfigurationV2RuleNoncurrentVersionExpirationPtrOutput) NoncurrentDays() pulumi.IntPtrOutput { + return o.ApplyT(func(v *BucketLifecycleConfigurationV2RuleNoncurrentVersionExpiration) *int { if v == nil { return nil } - return v.NoncurrentDays - }).(pulumi.Float64PtrOutput) + return &v.NoncurrentDays + }).(pulumi.IntPtrOutput) } type BucketLifecycleConfigurationV2RuleNoncurrentVersionTransition struct { // Number of noncurrent versions Amazon S3 will retain. Must be a non-zero positive integer. - NewerNoncurrentVersions *float64 `pulumi:"newerNoncurrentVersions"` + NewerNoncurrentVersions *int `pulumi:"newerNoncurrentVersions"` // Number of days an object is noncurrent before Amazon S3 can perform the associated action. - NoncurrentDays *float64 `pulumi:"noncurrentDays"` + NoncurrentDays int `pulumi:"noncurrentDays"` // Class of storage used to store the object. Valid Values: `GLACIER`, `STANDARD_IA`, `ONEZONE_IA`, `INTELLIGENT_TIERING`, `DEEP_ARCHIVE`, `GLACIER_IR`. StorageClass string `pulumi:"storageClass"` } @@ -3704,9 +3702,9 @@ type BucketLifecycleConfigurationV2RuleNoncurrentVersionTransitionInput interfac type BucketLifecycleConfigurationV2RuleNoncurrentVersionTransitionArgs struct { // Number of noncurrent versions Amazon S3 will retain. Must be a non-zero positive integer. - NewerNoncurrentVersions pulumi.Float64PtrInput `pulumi:"newerNoncurrentVersions"` + NewerNoncurrentVersions pulumi.IntPtrInput `pulumi:"newerNoncurrentVersions"` // Number of days an object is noncurrent before Amazon S3 can perform the associated action. - NoncurrentDays pulumi.Float64PtrInput `pulumi:"noncurrentDays"` + NoncurrentDays pulumi.IntInput `pulumi:"noncurrentDays"` // Class of storage used to store the object. Valid Values: `GLACIER`, `STANDARD_IA`, `ONEZONE_IA`, `INTELLIGENT_TIERING`, `DEEP_ARCHIVE`, `GLACIER_IR`. StorageClass pulumi.StringInput `pulumi:"storageClass"` } @@ -3763,17 +3761,15 @@ func (o BucketLifecycleConfigurationV2RuleNoncurrentVersionTransitionOutput) ToB } // Number of noncurrent versions Amazon S3 will retain. Must be a non-zero positive integer. -func (o BucketLifecycleConfigurationV2RuleNoncurrentVersionTransitionOutput) NewerNoncurrentVersions() pulumi.Float64PtrOutput { - return o.ApplyT(func(v BucketLifecycleConfigurationV2RuleNoncurrentVersionTransition) *float64 { +func (o BucketLifecycleConfigurationV2RuleNoncurrentVersionTransitionOutput) NewerNoncurrentVersions() pulumi.IntPtrOutput { + return o.ApplyT(func(v BucketLifecycleConfigurationV2RuleNoncurrentVersionTransition) *int { return v.NewerNoncurrentVersions - }).(pulumi.Float64PtrOutput) + }).(pulumi.IntPtrOutput) } // Number of days an object is noncurrent before Amazon S3 can perform the associated action. -func (o BucketLifecycleConfigurationV2RuleNoncurrentVersionTransitionOutput) NoncurrentDays() pulumi.Float64PtrOutput { - return o.ApplyT(func(v BucketLifecycleConfigurationV2RuleNoncurrentVersionTransition) *float64 { - return v.NoncurrentDays - }).(pulumi.Float64PtrOutput) +func (o BucketLifecycleConfigurationV2RuleNoncurrentVersionTransitionOutput) NoncurrentDays() pulumi.IntOutput { + return o.ApplyT(func(v BucketLifecycleConfigurationV2RuleNoncurrentVersionTransition) int { return v.NoncurrentDays }).(pulumi.IntOutput) } // Class of storage used to store the object. Valid Values: `GLACIER`, `STANDARD_IA`, `ONEZONE_IA`, `INTELLIGENT_TIERING`, `DEEP_ARCHIVE`, `GLACIER_IR`. @@ -3805,7 +3801,7 @@ type BucketLifecycleConfigurationV2RuleTransition struct { // Date objects are transitioned to the specified storage class. The date value must be in [RFC3339 full-date format](https://datatracker.ietf.org/doc/html/rfc3339#section-5.6) e.g. `2023-08-22`. Date *string `pulumi:"date"` // Number of days after creation when objects are transitioned to the specified storage class. The value must be a positive integer. If both `days` and `date` are not specified, defaults to `0`. Valid values depend on `storageClass`, see [Transition objects using Amazon S3 Lifecycle](https://docs.aws.amazon.com/AmazonS3/latest/userguide/lifecycle-transition-general-considerations.html) for more details. - Days *float64 `pulumi:"days"` + Days *int `pulumi:"days"` // Class of storage used to store the object. Valid Values: `GLACIER`, `STANDARD_IA`, `ONEZONE_IA`, `INTELLIGENT_TIERING`, `DEEP_ARCHIVE`, `GLACIER_IR`. StorageClass string `pulumi:"storageClass"` } @@ -3825,7 +3821,7 @@ type BucketLifecycleConfigurationV2RuleTransitionArgs struct { // Date objects are transitioned to the specified storage class. The date value must be in [RFC3339 full-date format](https://datatracker.ietf.org/doc/html/rfc3339#section-5.6) e.g. `2023-08-22`. Date pulumi.StringPtrInput `pulumi:"date"` // Number of days after creation when objects are transitioned to the specified storage class. The value must be a positive integer. If both `days` and `date` are not specified, defaults to `0`. Valid values depend on `storageClass`, see [Transition objects using Amazon S3 Lifecycle](https://docs.aws.amazon.com/AmazonS3/latest/userguide/lifecycle-transition-general-considerations.html) for more details. - Days pulumi.Float64PtrInput `pulumi:"days"` + Days pulumi.IntPtrInput `pulumi:"days"` // Class of storage used to store the object. Valid Values: `GLACIER`, `STANDARD_IA`, `ONEZONE_IA`, `INTELLIGENT_TIERING`, `DEEP_ARCHIVE`, `GLACIER_IR`. StorageClass pulumi.StringInput `pulumi:"storageClass"` } @@ -3887,8 +3883,8 @@ func (o BucketLifecycleConfigurationV2RuleTransitionOutput) Date() pulumi.String } // Number of days after creation when objects are transitioned to the specified storage class. The value must be a positive integer. If both `days` and `date` are not specified, defaults to `0`. Valid values depend on `storageClass`, see [Transition objects using Amazon S3 Lifecycle](https://docs.aws.amazon.com/AmazonS3/latest/userguide/lifecycle-transition-general-considerations.html) for more details. -func (o BucketLifecycleConfigurationV2RuleTransitionOutput) Days() pulumi.Float64PtrOutput { - return o.ApplyT(func(v BucketLifecycleConfigurationV2RuleTransition) *float64 { return v.Days }).(pulumi.Float64PtrOutput) +func (o BucketLifecycleConfigurationV2RuleTransitionOutput) Days() pulumi.IntPtrOutput { + return o.ApplyT(func(v BucketLifecycleConfigurationV2RuleTransition) *int { return v.Days }).(pulumi.IntPtrOutput) } // Class of storage used to store the object. Valid Values: `GLACIER`, `STANDARD_IA`, `ONEZONE_IA`, `INTELLIGENT_TIERING`, `DEEP_ARCHIVE`, `GLACIER_IR`. @@ -7316,7 +7312,7 @@ type BucketReplicationConfigRule struct { Id *string `pulumi:"id"` // Object key name prefix identifying one or more objects to which the rule applies. Must be less than or equal to 1024 characters in length. Defaults to an empty string (`""`) if `filter` is not specified. // - // Deprecated: Use filter instead + // Deprecated: prefix is deprecated. Use filter instead. Prefix *string `pulumi:"prefix"` // Priority associated with the rule. Priority should only be set if `filter` is configured. If not provided, defaults to `0`. Priority must be unique between multiple rules. Priority *int `pulumi:"priority"` @@ -7350,7 +7346,7 @@ type BucketReplicationConfigRuleArgs struct { Id pulumi.StringPtrInput `pulumi:"id"` // Object key name prefix identifying one or more objects to which the rule applies. Must be less than or equal to 1024 characters in length. Defaults to an empty string (`""`) if `filter` is not specified. // - // Deprecated: Use filter instead + // Deprecated: prefix is deprecated. Use filter instead. Prefix pulumi.StringPtrInput `pulumi:"prefix"` // Priority associated with the rule. Priority should only be set if `filter` is configured. If not provided, defaults to `0`. Priority must be unique between multiple rules. Priority pulumi.IntPtrInput `pulumi:"priority"` @@ -7442,7 +7438,7 @@ func (o BucketReplicationConfigRuleOutput) Id() pulumi.StringPtrOutput { // Object key name prefix identifying one or more objects to which the rule applies. Must be less than or equal to 1024 characters in length. Defaults to an empty string (`""`) if `filter` is not specified. // -// Deprecated: Use filter instead +// Deprecated: prefix is deprecated. Use filter instead. func (o BucketReplicationConfigRuleOutput) Prefix() pulumi.StringPtrOutput { return o.ApplyT(func(v BucketReplicationConfigRule) *string { return v.Prefix }).(pulumi.StringPtrOutput) } @@ -12688,11 +12684,11 @@ func (o BucketV2LoggingArrayOutput) Index(i pulumi.IntInput) BucketV2LoggingOutp type BucketV2ObjectLockConfiguration struct { // Indicates whether this bucket has an Object Lock configuration enabled. Valid values are `true` or `false`. This argument is not supported in all regions or partitions. // - // Deprecated: Use the top-level parameter objectLockEnabled instead + // Deprecated: object_lock_enabled is deprecated. Use the top-level parameter objectLockEnabled instead. ObjectLockEnabled *string `pulumi:"objectLockEnabled"` // Object Lock rule in place for this bucket (documented below). // - // Deprecated: Use the s3.BucketObjectLockConfigurationV2 resource instead + // Deprecated: rule is deprecated. Use the s3.BucketObjectLockConfigurationV2 resource instead. Rules []BucketV2ObjectLockConfigurationRule `pulumi:"rules"` } @@ -12710,11 +12706,11 @@ type BucketV2ObjectLockConfigurationInput interface { type BucketV2ObjectLockConfigurationArgs struct { // Indicates whether this bucket has an Object Lock configuration enabled. Valid values are `true` or `false`. This argument is not supported in all regions or partitions. // - // Deprecated: Use the top-level parameter objectLockEnabled instead + // Deprecated: object_lock_enabled is deprecated. Use the top-level parameter objectLockEnabled instead. ObjectLockEnabled pulumi.StringPtrInput `pulumi:"objectLockEnabled"` // Object Lock rule in place for this bucket (documented below). // - // Deprecated: Use the s3.BucketObjectLockConfigurationV2 resource instead + // Deprecated: rule is deprecated. Use the s3.BucketObjectLockConfigurationV2 resource instead. Rules BucketV2ObjectLockConfigurationRuleArrayInput `pulumi:"rules"` } @@ -12797,14 +12793,14 @@ func (o BucketV2ObjectLockConfigurationOutput) ToBucketV2ObjectLockConfiguration // Indicates whether this bucket has an Object Lock configuration enabled. Valid values are `true` or `false`. This argument is not supported in all regions or partitions. // -// Deprecated: Use the top-level parameter objectLockEnabled instead +// Deprecated: object_lock_enabled is deprecated. Use the top-level parameter objectLockEnabled instead. func (o BucketV2ObjectLockConfigurationOutput) ObjectLockEnabled() pulumi.StringPtrOutput { return o.ApplyT(func(v BucketV2ObjectLockConfiguration) *string { return v.ObjectLockEnabled }).(pulumi.StringPtrOutput) } // Object Lock rule in place for this bucket (documented below). // -// Deprecated: Use the s3.BucketObjectLockConfigurationV2 resource instead +// Deprecated: rule is deprecated. Use the s3.BucketObjectLockConfigurationV2 resource instead. func (o BucketV2ObjectLockConfigurationOutput) Rules() BucketV2ObjectLockConfigurationRuleArrayOutput { return o.ApplyT(func(v BucketV2ObjectLockConfiguration) []BucketV2ObjectLockConfigurationRule { return v.Rules }).(BucketV2ObjectLockConfigurationRuleArrayOutput) } @@ -12835,7 +12831,7 @@ func (o BucketV2ObjectLockConfigurationPtrOutput) Elem() BucketV2ObjectLockConfi // Indicates whether this bucket has an Object Lock configuration enabled. Valid values are `true` or `false`. This argument is not supported in all regions or partitions. // -// Deprecated: Use the top-level parameter objectLockEnabled instead +// Deprecated: object_lock_enabled is deprecated. Use the top-level parameter objectLockEnabled instead. func (o BucketV2ObjectLockConfigurationPtrOutput) ObjectLockEnabled() pulumi.StringPtrOutput { return o.ApplyT(func(v *BucketV2ObjectLockConfiguration) *string { if v == nil { @@ -12847,7 +12843,7 @@ func (o BucketV2ObjectLockConfigurationPtrOutput) ObjectLockEnabled() pulumi.Str // Object Lock rule in place for this bucket (documented below). // -// Deprecated: Use the s3.BucketObjectLockConfigurationV2 resource instead +// Deprecated: rule is deprecated. Use the s3.BucketObjectLockConfigurationV2 resource instead. func (o BucketV2ObjectLockConfigurationPtrOutput) Rules() BucketV2ObjectLockConfigurationRuleArrayOutput { return o.ApplyT(func(v *BucketV2ObjectLockConfiguration) []BucketV2ObjectLockConfigurationRule { if v == nil { diff --git a/vendor/github.com/pulumi/pulumi-awsx/sdk/v2/go/awsx/internal/pulumiUtilities.go b/vendor/github.com/pulumi/pulumi-awsx/sdk/v2/go/awsx/internal/pulumiUtilities.go index 99a591934..68df11139 100644 --- a/vendor/github.com/pulumi/pulumi-awsx/sdk/v2/go/awsx/internal/pulumiUtilities.go +++ b/vendor/github.com/pulumi/pulumi-awsx/sdk/v2/go/awsx/internal/pulumiUtilities.go @@ -165,7 +165,7 @@ func callPlainInner( func PkgResourceDefaultOpts(opts []pulumi.ResourceOption) []pulumi.ResourceOption { defaults := []pulumi.ResourceOption{} - version := semver.MustParse("2.21.1") + version := semver.MustParse("2.21.0") if !version.Equals(semver.Version{}) { defaults = append(defaults, pulumi.Version(version.String())) } @@ -176,7 +176,7 @@ func PkgResourceDefaultOpts(opts []pulumi.ResourceOption) []pulumi.ResourceOptio func PkgInvokeDefaultOpts(opts []pulumi.InvokeOption) []pulumi.InvokeOption { defaults := []pulumi.InvokeOption{} - version := semver.MustParse("2.21.1") + version := semver.MustParse("2.21.0") if !version.Equals(semver.Version{}) { defaults = append(defaults, pulumi.Version(version.String())) } diff --git a/vendor/github.com/pulumi/pulumi-azure-native-sdk/authorization/v2/getManagementLockAtResourceLevel.go b/vendor/github.com/pulumi/pulumi-azure-native-sdk/authorization/v2/getManagementLockAtResourceLevel.go index c2f5e871a..e2ab5af5b 100644 --- a/vendor/github.com/pulumi/pulumi-azure-native-sdk/authorization/v2/getManagementLockAtResourceLevel.go +++ b/vendor/github.com/pulumi/pulumi-azure-native-sdk/authorization/v2/getManagementLockAtResourceLevel.go @@ -24,6 +24,8 @@ func LookupManagementLockAtResourceLevel(ctx *pulumi.Context, args *LookupManage } type LookupManagementLockAtResourceLevelArgs struct { + // The API version to use for this operation. + ApiVersion string `pulumi:"apiVersion"` // The name of lock. LockName string `pulumi:"lockName"` // An extra path parameter needed in some services, like SQL Databases. @@ -66,6 +68,8 @@ func LookupManagementLockAtResourceLevelOutput(ctx *pulumi.Context, args LookupM } type LookupManagementLockAtResourceLevelOutputArgs struct { + // The API version to use for this operation. + ApiVersion pulumi.StringInput `pulumi:"apiVersion"` // The name of lock. LockName pulumi.StringInput `pulumi:"lockName"` // An extra path parameter needed in some services, like SQL Databases. diff --git a/vendor/github.com/pulumi/pulumi-azure-native-sdk/authorization/v2/getPimRoleEligibilitySchedule.go b/vendor/github.com/pulumi/pulumi-azure-native-sdk/authorization/v2/getPimRoleEligibilitySchedule.go new file mode 100644 index 000000000..baaf9d374 --- /dev/null +++ b/vendor/github.com/pulumi/pulumi-azure-native-sdk/authorization/v2/getPimRoleEligibilitySchedule.go @@ -0,0 +1,220 @@ +// Code generated by the Pulumi SDK Generator DO NOT EDIT. +// *** WARNING: Do not edit by hand unless you're certain you know what you are doing! *** + +package authorization + +import ( + "context" + "reflect" + + "github.com/pulumi/pulumi-azure-native-sdk/v2/utilities" + "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +) + +// Get the specified role eligibility schedule request. +// Azure REST API version: 2020-10-01. +func LookupPimRoleEligibilitySchedule(ctx *pulumi.Context, args *LookupPimRoleEligibilityScheduleArgs, opts ...pulumi.InvokeOption) (*LookupPimRoleEligibilityScheduleResult, error) { + opts = utilities.PkgInvokeDefaultOpts(opts) + var rv LookupPimRoleEligibilityScheduleResult + err := ctx.Invoke("azure-native:authorization:getPimRoleEligibilitySchedule", args, &rv, opts...) + if err != nil { + return nil, err + } + return &rv, nil +} + +type LookupPimRoleEligibilityScheduleArgs struct { + // The name (guid) of the role eligibility schedule request to get. + RoleEligibilityScheduleRequestName string `pulumi:"roleEligibilityScheduleRequestName"` + // The scope of the role eligibility schedule request. + Scope string `pulumi:"scope"` +} + +// Role Eligibility schedule request +type LookupPimRoleEligibilityScheduleResult struct { + // The approvalId of the role eligibility schedule request. + ApprovalId string `pulumi:"approvalId"` + // The conditions on the role assignment. This limits the resources it can be assigned to. e.g.: @Resource[Microsoft.Storage/storageAccounts/blobServices/containers:ContainerName] StringEqualsIgnoreCase 'foo_storage_container' + Condition *string `pulumi:"condition"` + // Version of the condition. Currently accepted value is '2.0' + ConditionVersion *string `pulumi:"conditionVersion"` + // DateTime when role eligibility schedule request was created + CreatedOn string `pulumi:"createdOn"` + // Additional properties of principal, scope and role definition + ExpandedProperties ExpandedPropertiesResponse `pulumi:"expandedProperties"` + // The role eligibility schedule request ID. + Id string `pulumi:"id"` + // Justification for the role eligibility + Justification *string `pulumi:"justification"` + // The role eligibility schedule request name. + Name string `pulumi:"name"` + // The principal ID. + PrincipalId string `pulumi:"principalId"` + // The principal type of the assigned principal ID. + PrincipalType string `pulumi:"principalType"` + // The type of the role assignment schedule request. Eg: SelfActivate, AdminAssign etc + RequestType string `pulumi:"requestType"` + // Id of the user who created this request + RequestorId string `pulumi:"requestorId"` + // The role definition ID. + RoleDefinitionId string `pulumi:"roleDefinitionId"` + // Schedule info of the role eligibility schedule + ScheduleInfo *RoleEligibilityScheduleRequestPropertiesResponseScheduleInfo `pulumi:"scheduleInfo"` + // The role eligibility schedule request scope. + Scope string `pulumi:"scope"` + // The status of the role eligibility schedule request. + Status string `pulumi:"status"` + // The resultant role eligibility schedule id or the role eligibility schedule id being updated + TargetRoleEligibilityScheduleId *string `pulumi:"targetRoleEligibilityScheduleId"` + // The role eligibility schedule instance id being updated + TargetRoleEligibilityScheduleInstanceId *string `pulumi:"targetRoleEligibilityScheduleInstanceId"` + // Ticket Info of the role eligibility + TicketInfo *RoleEligibilityScheduleRequestPropertiesResponseTicketInfo `pulumi:"ticketInfo"` + // The role eligibility schedule request type. + Type string `pulumi:"type"` +} + +func LookupPimRoleEligibilityScheduleOutput(ctx *pulumi.Context, args LookupPimRoleEligibilityScheduleOutputArgs, opts ...pulumi.InvokeOption) LookupPimRoleEligibilityScheduleResultOutput { + return pulumi.ToOutputWithContext(ctx.Context(), args). + ApplyT(func(v interface{}) (LookupPimRoleEligibilityScheduleResultOutput, error) { + args := v.(LookupPimRoleEligibilityScheduleArgs) + options := pulumi.InvokeOutputOptions{InvokeOptions: utilities.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("azure-native:authorization:getPimRoleEligibilitySchedule", args, LookupPimRoleEligibilityScheduleResultOutput{}, options).(LookupPimRoleEligibilityScheduleResultOutput), nil + }).(LookupPimRoleEligibilityScheduleResultOutput) +} + +type LookupPimRoleEligibilityScheduleOutputArgs struct { + // The name (guid) of the role eligibility schedule request to get. + RoleEligibilityScheduleRequestName pulumi.StringInput `pulumi:"roleEligibilityScheduleRequestName"` + // The scope of the role eligibility schedule request. + Scope pulumi.StringInput `pulumi:"scope"` +} + +func (LookupPimRoleEligibilityScheduleOutputArgs) ElementType() reflect.Type { + return reflect.TypeOf((*LookupPimRoleEligibilityScheduleArgs)(nil)).Elem() +} + +// Role Eligibility schedule request +type LookupPimRoleEligibilityScheduleResultOutput struct{ *pulumi.OutputState } + +func (LookupPimRoleEligibilityScheduleResultOutput) ElementType() reflect.Type { + return reflect.TypeOf((*LookupPimRoleEligibilityScheduleResult)(nil)).Elem() +} + +func (o LookupPimRoleEligibilityScheduleResultOutput) ToLookupPimRoleEligibilityScheduleResultOutput() LookupPimRoleEligibilityScheduleResultOutput { + return o +} + +func (o LookupPimRoleEligibilityScheduleResultOutput) ToLookupPimRoleEligibilityScheduleResultOutputWithContext(ctx context.Context) LookupPimRoleEligibilityScheduleResultOutput { + return o +} + +// The approvalId of the role eligibility schedule request. +func (o LookupPimRoleEligibilityScheduleResultOutput) ApprovalId() pulumi.StringOutput { + return o.ApplyT(func(v LookupPimRoleEligibilityScheduleResult) string { return v.ApprovalId }).(pulumi.StringOutput) +} + +// The conditions on the role assignment. This limits the resources it can be assigned to. e.g.: @Resource[Microsoft.Storage/storageAccounts/blobServices/containers:ContainerName] StringEqualsIgnoreCase 'foo_storage_container' +func (o LookupPimRoleEligibilityScheduleResultOutput) Condition() pulumi.StringPtrOutput { + return o.ApplyT(func(v LookupPimRoleEligibilityScheduleResult) *string { return v.Condition }).(pulumi.StringPtrOutput) +} + +// Version of the condition. Currently accepted value is '2.0' +func (o LookupPimRoleEligibilityScheduleResultOutput) ConditionVersion() pulumi.StringPtrOutput { + return o.ApplyT(func(v LookupPimRoleEligibilityScheduleResult) *string { return v.ConditionVersion }).(pulumi.StringPtrOutput) +} + +// DateTime when role eligibility schedule request was created +func (o LookupPimRoleEligibilityScheduleResultOutput) CreatedOn() pulumi.StringOutput { + return o.ApplyT(func(v LookupPimRoleEligibilityScheduleResult) string { return v.CreatedOn }).(pulumi.StringOutput) +} + +// Additional properties of principal, scope and role definition +func (o LookupPimRoleEligibilityScheduleResultOutput) ExpandedProperties() ExpandedPropertiesResponseOutput { + return o.ApplyT(func(v LookupPimRoleEligibilityScheduleResult) ExpandedPropertiesResponse { return v.ExpandedProperties }).(ExpandedPropertiesResponseOutput) +} + +// The role eligibility schedule request ID. +func (o LookupPimRoleEligibilityScheduleResultOutput) Id() pulumi.StringOutput { + return o.ApplyT(func(v LookupPimRoleEligibilityScheduleResult) string { return v.Id }).(pulumi.StringOutput) +} + +// Justification for the role eligibility +func (o LookupPimRoleEligibilityScheduleResultOutput) Justification() pulumi.StringPtrOutput { + return o.ApplyT(func(v LookupPimRoleEligibilityScheduleResult) *string { return v.Justification }).(pulumi.StringPtrOutput) +} + +// The role eligibility schedule request name. +func (o LookupPimRoleEligibilityScheduleResultOutput) Name() pulumi.StringOutput { + return o.ApplyT(func(v LookupPimRoleEligibilityScheduleResult) string { return v.Name }).(pulumi.StringOutput) +} + +// The principal ID. +func (o LookupPimRoleEligibilityScheduleResultOutput) PrincipalId() pulumi.StringOutput { + return o.ApplyT(func(v LookupPimRoleEligibilityScheduleResult) string { return v.PrincipalId }).(pulumi.StringOutput) +} + +// The principal type of the assigned principal ID. +func (o LookupPimRoleEligibilityScheduleResultOutput) PrincipalType() pulumi.StringOutput { + return o.ApplyT(func(v LookupPimRoleEligibilityScheduleResult) string { return v.PrincipalType }).(pulumi.StringOutput) +} + +// The type of the role assignment schedule request. Eg: SelfActivate, AdminAssign etc +func (o LookupPimRoleEligibilityScheduleResultOutput) RequestType() pulumi.StringOutput { + return o.ApplyT(func(v LookupPimRoleEligibilityScheduleResult) string { return v.RequestType }).(pulumi.StringOutput) +} + +// Id of the user who created this request +func (o LookupPimRoleEligibilityScheduleResultOutput) RequestorId() pulumi.StringOutput { + return o.ApplyT(func(v LookupPimRoleEligibilityScheduleResult) string { return v.RequestorId }).(pulumi.StringOutput) +} + +// The role definition ID. +func (o LookupPimRoleEligibilityScheduleResultOutput) RoleDefinitionId() pulumi.StringOutput { + return o.ApplyT(func(v LookupPimRoleEligibilityScheduleResult) string { return v.RoleDefinitionId }).(pulumi.StringOutput) +} + +// Schedule info of the role eligibility schedule +func (o LookupPimRoleEligibilityScheduleResultOutput) ScheduleInfo() RoleEligibilityScheduleRequestPropertiesResponseScheduleInfoPtrOutput { + return o.ApplyT(func(v LookupPimRoleEligibilityScheduleResult) *RoleEligibilityScheduleRequestPropertiesResponseScheduleInfo { + return v.ScheduleInfo + }).(RoleEligibilityScheduleRequestPropertiesResponseScheduleInfoPtrOutput) +} + +// The role eligibility schedule request scope. +func (o LookupPimRoleEligibilityScheduleResultOutput) Scope() pulumi.StringOutput { + return o.ApplyT(func(v LookupPimRoleEligibilityScheduleResult) string { return v.Scope }).(pulumi.StringOutput) +} + +// The status of the role eligibility schedule request. +func (o LookupPimRoleEligibilityScheduleResultOutput) Status() pulumi.StringOutput { + return o.ApplyT(func(v LookupPimRoleEligibilityScheduleResult) string { return v.Status }).(pulumi.StringOutput) +} + +// The resultant role eligibility schedule id or the role eligibility schedule id being updated +func (o LookupPimRoleEligibilityScheduleResultOutput) TargetRoleEligibilityScheduleId() pulumi.StringPtrOutput { + return o.ApplyT(func(v LookupPimRoleEligibilityScheduleResult) *string { return v.TargetRoleEligibilityScheduleId }).(pulumi.StringPtrOutput) +} + +// The role eligibility schedule instance id being updated +func (o LookupPimRoleEligibilityScheduleResultOutput) TargetRoleEligibilityScheduleInstanceId() pulumi.StringPtrOutput { + return o.ApplyT(func(v LookupPimRoleEligibilityScheduleResult) *string { + return v.TargetRoleEligibilityScheduleInstanceId + }).(pulumi.StringPtrOutput) +} + +// Ticket Info of the role eligibility +func (o LookupPimRoleEligibilityScheduleResultOutput) TicketInfo() RoleEligibilityScheduleRequestPropertiesResponseTicketInfoPtrOutput { + return o.ApplyT(func(v LookupPimRoleEligibilityScheduleResult) *RoleEligibilityScheduleRequestPropertiesResponseTicketInfo { + return v.TicketInfo + }).(RoleEligibilityScheduleRequestPropertiesResponseTicketInfoPtrOutput) +} + +// The role eligibility schedule request type. +func (o LookupPimRoleEligibilityScheduleResultOutput) Type() pulumi.StringOutput { + return o.ApplyT(func(v LookupPimRoleEligibilityScheduleResult) string { return v.Type }).(pulumi.StringOutput) +} + +func init() { + pulumi.RegisterOutputType(LookupPimRoleEligibilityScheduleResultOutput{}) +} diff --git a/vendor/github.com/pulumi/pulumi-azure-native-sdk/authorization/v2/init.go b/vendor/github.com/pulumi/pulumi-azure-native-sdk/authorization/v2/init.go index b30478cbf..0eafc1271 100644 --- a/vendor/github.com/pulumi/pulumi-azure-native-sdk/authorization/v2/init.go +++ b/vendor/github.com/pulumi/pulumi-azure-native-sdk/authorization/v2/init.go @@ -33,6 +33,8 @@ func (m *module) Construct(ctx *pulumi.Context, name, typ, urn string) (r pulumi r = &ManagementLockAtSubscriptionLevel{} case "azure-native:authorization:ManagementLockByScope": r = &ManagementLockByScope{} + case "azure-native:authorization:PimRoleEligibilitySchedule": + r = &PimRoleEligibilitySchedule{} case "azure-native:authorization:PolicyAssignment": r = &PolicyAssignment{} case "azure-native:authorization:PolicyDefinition": diff --git a/vendor/github.com/pulumi/pulumi-azure-native-sdk/authorization/v2/managementLockAtResourceLevel.go b/vendor/github.com/pulumi/pulumi-azure-native-sdk/authorization/v2/managementLockAtResourceLevel.go index 31c3eee71..f10e100eb 100644 --- a/vendor/github.com/pulumi/pulumi-azure-native-sdk/authorization/v2/managementLockAtResourceLevel.go +++ b/vendor/github.com/pulumi/pulumi-azure-native-sdk/authorization/v2/managementLockAtResourceLevel.go @@ -38,6 +38,9 @@ func NewManagementLockAtResourceLevel(ctx *pulumi.Context, return nil, errors.New("missing one or more required arguments") } + if args.ApiVersion == nil { + return nil, errors.New("invalid value for required argument 'ApiVersion'") + } if args.Level == nil { return nil, errors.New("invalid value for required argument 'Level'") } @@ -101,6 +104,8 @@ func (ManagementLockAtResourceLevelState) ElementType() reflect.Type { } type managementLockAtResourceLevelArgs struct { + // The API version to use for this operation. + ApiVersion string `pulumi:"apiVersion"` // The level of the lock. Possible values are: NotSpecified, CanNotDelete, ReadOnly. CanNotDelete means authorized users are able to read and modify the resources, but not delete. ReadOnly means authorized users can only read from a resource, but they can't modify or delete it. Level string `pulumi:"level"` // The name of lock. The lock name can be a maximum of 260 characters. It cannot contain <, > %, &, :, \, ?, /, or any control characters. @@ -123,6 +128,8 @@ type managementLockAtResourceLevelArgs struct { // The set of arguments for constructing a ManagementLockAtResourceLevel resource. type ManagementLockAtResourceLevelArgs struct { + // The API version to use for this operation. + ApiVersion pulumi.StringInput // The level of the lock. Possible values are: NotSpecified, CanNotDelete, ReadOnly. CanNotDelete means authorized users are able to read and modify the resources, but not delete. ReadOnly means authorized users can only read from a resource, but they can't modify or delete it. Level pulumi.StringInput // The name of lock. The lock name can be a maximum of 260 characters. It cannot contain <, > %, &, :, \, ?, /, or any control characters. diff --git a/vendor/github.com/pulumi/pulumi-azure-native-sdk/authorization/v2/pimRoleEligibilitySchedule.go b/vendor/github.com/pulumi/pulumi-azure-native-sdk/authorization/v2/pimRoleEligibilitySchedule.go new file mode 100644 index 000000000..792fac6bd --- /dev/null +++ b/vendor/github.com/pulumi/pulumi-azure-native-sdk/authorization/v2/pimRoleEligibilitySchedule.go @@ -0,0 +1,314 @@ +// Code generated by the Pulumi SDK Generator DO NOT EDIT. +// *** WARNING: Do not edit by hand unless you're certain you know what you are doing! *** + +package authorization + +import ( + "context" + "reflect" + + "errors" + "github.com/pulumi/pulumi-azure-native-sdk/v2/utilities" + "github.com/pulumi/pulumi/sdk/v3/go/pulumi" +) + +// A PIM (Privileged Identity Management) Role Eligibility Schedule. +// +// Role Eligibility Schedules are used to limit standing administrator access to privileged roles in Azure PIM. See +// [here](https://learn.microsoft.com/en-us/rest/api/authorization/privileged-role-eligibility-rest-sample) for details. +// +// A Role Eligibility Schedule is uniquely defined by scope, principal, and role. At present, only one instance of this +// resource can exist for a given scope|principal|role tuple. +// +// Note that this resource cannot be updated. Each change leads to a recreation. +// +// Internally, this resource uses the +// [Role Eligibility Schedule Requests](https://learn.microsoft.com/en-us/rest/api/authorization/role-eligibility-schedule-requests?view=rest-authorization-2020-10-01) +// API to create and delete the schedules. +// +// Azure REST API version: 2020-10-01. +type PimRoleEligibilitySchedule struct { + pulumi.CustomResourceState + + // The approvalId of the role eligibility schedule request. + ApprovalId pulumi.StringOutput `pulumi:"approvalId"` + // The conditions on the role assignment. This limits the resources it can be assigned to. e.g.: @Resource[Microsoft.Storage/storageAccounts/blobServices/containers:ContainerName] StringEqualsIgnoreCase 'foo_storage_container' + Condition pulumi.StringPtrOutput `pulumi:"condition"` + // Version of the condition. Currently accepted value is '2.0' + ConditionVersion pulumi.StringPtrOutput `pulumi:"conditionVersion"` + // DateTime when role eligibility schedule request was created + CreatedOn pulumi.StringOutput `pulumi:"createdOn"` + // Additional properties of principal, scope and role definition + ExpandedProperties ExpandedPropertiesResponseOutput `pulumi:"expandedProperties"` + // Justification for the role eligibility + Justification pulumi.StringPtrOutput `pulumi:"justification"` + // The role eligibility schedule request name. + Name pulumi.StringOutput `pulumi:"name"` + // The principal ID. + PrincipalId pulumi.StringOutput `pulumi:"principalId"` + // The principal type of the assigned principal ID. + PrincipalType pulumi.StringOutput `pulumi:"principalType"` + // The type of the role assignment schedule request. Eg: SelfActivate, AdminAssign etc + RequestType pulumi.StringPtrOutput `pulumi:"requestType"` + // Id of the user who created this request + RequestorId pulumi.StringOutput `pulumi:"requestorId"` + // The role definition ID. + RoleDefinitionId pulumi.StringOutput `pulumi:"roleDefinitionId"` + // Schedule info of the role eligibility schedule + ScheduleInfo RoleEligibilityScheduleRequestPropertiesResponseScheduleInfoPtrOutput `pulumi:"scheduleInfo"` + // The role eligibility schedule request scope. + Scope pulumi.StringOutput `pulumi:"scope"` + // The status of the role eligibility schedule request. + Status pulumi.StringOutput `pulumi:"status"` + // The resultant role eligibility schedule id or the role eligibility schedule id being updated + TargetRoleEligibilityScheduleId pulumi.StringPtrOutput `pulumi:"targetRoleEligibilityScheduleId"` + // The role eligibility schedule instance id being updated + TargetRoleEligibilityScheduleInstanceId pulumi.StringPtrOutput `pulumi:"targetRoleEligibilityScheduleInstanceId"` + // Ticket Info of the role eligibility + TicketInfo RoleEligibilityScheduleRequestPropertiesResponseTicketInfoPtrOutput `pulumi:"ticketInfo"` + // The role eligibility schedule request type. + Type pulumi.StringOutput `pulumi:"type"` +} + +// NewPimRoleEligibilitySchedule registers a new resource with the given unique name, arguments, and options. +func NewPimRoleEligibilitySchedule(ctx *pulumi.Context, + name string, args *PimRoleEligibilityScheduleArgs, opts ...pulumi.ResourceOption) (*PimRoleEligibilitySchedule, error) { + if args == nil { + return nil, errors.New("missing one or more required arguments") + } + + if args.PrincipalId == nil { + return nil, errors.New("invalid value for required argument 'PrincipalId'") + } + if args.RoleDefinitionId == nil { + return nil, errors.New("invalid value for required argument 'RoleDefinitionId'") + } + if args.Scope == nil { + return nil, errors.New("invalid value for required argument 'Scope'") + } + aliases := pulumi.Aliases([]pulumi.Alias{ + { + Type: pulumi.String("azure-native:authorization/v20201001:PimRoleEligibilitySchedule"), + }, + }) + opts = append(opts, aliases) + opts = utilities.PkgResourceDefaultOpts(opts) + var resource PimRoleEligibilitySchedule + err := ctx.RegisterResource("azure-native:authorization:PimRoleEligibilitySchedule", name, args, &resource, opts...) + if err != nil { + return nil, err + } + return &resource, nil +} + +// GetPimRoleEligibilitySchedule gets an existing PimRoleEligibilitySchedule resource's state with the given name, ID, and optional +// state properties that are used to uniquely qualify the lookup (nil if not required). +func GetPimRoleEligibilitySchedule(ctx *pulumi.Context, + name string, id pulumi.IDInput, state *PimRoleEligibilityScheduleState, opts ...pulumi.ResourceOption) (*PimRoleEligibilitySchedule, error) { + var resource PimRoleEligibilitySchedule + err := ctx.ReadResource("azure-native:authorization:PimRoleEligibilitySchedule", name, id, state, &resource, opts...) + if err != nil { + return nil, err + } + return &resource, nil +} + +// Input properties used for looking up and filtering PimRoleEligibilitySchedule resources. +type pimRoleEligibilityScheduleState struct { +} + +type PimRoleEligibilityScheduleState struct { +} + +func (PimRoleEligibilityScheduleState) ElementType() reflect.Type { + return reflect.TypeOf((*pimRoleEligibilityScheduleState)(nil)).Elem() +} + +type pimRoleEligibilityScheduleArgs struct { + // The conditions on the role assignment. This limits the resources it can be assigned to. e.g.: @Resource[Microsoft.Storage/storageAccounts/blobServices/containers:ContainerName] StringEqualsIgnoreCase 'foo_storage_container' + Condition *string `pulumi:"condition"` + // Version of the condition. Currently accepted value is '2.0' + ConditionVersion *string `pulumi:"conditionVersion"` + // Justification for the role eligibility + Justification *string `pulumi:"justification"` + // The principal ID. + PrincipalId string `pulumi:"principalId"` + // The role definition ID. + RoleDefinitionId string `pulumi:"roleDefinitionId"` + // Schedule info of the role eligibility schedule + ScheduleInfo *RoleEligibilityScheduleRequestPropertiesScheduleInfo `pulumi:"scheduleInfo"` + // The scope of the role eligibility schedule request to create. The scope can be any REST resource instance. For example, use '/subscriptions/{subscription-id}/' for a subscription, '/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}' for a resource group, and '/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/providers/{resource-provider}/{resource-type}/{resource-name}' for a resource. + Scope string `pulumi:"scope"` + // The resultant role eligibility schedule id or the role eligibility schedule id being updated + TargetRoleEligibilityScheduleId *string `pulumi:"targetRoleEligibilityScheduleId"` + // The role eligibility schedule instance id being updated + TargetRoleEligibilityScheduleInstanceId *string `pulumi:"targetRoleEligibilityScheduleInstanceId"` + // Ticket Info of the role eligibility + TicketInfo *RoleEligibilityScheduleRequestPropertiesTicketInfo `pulumi:"ticketInfo"` +} + +// The set of arguments for constructing a PimRoleEligibilitySchedule resource. +type PimRoleEligibilityScheduleArgs struct { + // The conditions on the role assignment. This limits the resources it can be assigned to. e.g.: @Resource[Microsoft.Storage/storageAccounts/blobServices/containers:ContainerName] StringEqualsIgnoreCase 'foo_storage_container' + Condition pulumi.StringPtrInput + // Version of the condition. Currently accepted value is '2.0' + ConditionVersion pulumi.StringPtrInput + // Justification for the role eligibility + Justification pulumi.StringPtrInput + // The principal ID. + PrincipalId pulumi.StringInput + // The role definition ID. + RoleDefinitionId pulumi.StringInput + // Schedule info of the role eligibility schedule + ScheduleInfo RoleEligibilityScheduleRequestPropertiesScheduleInfoPtrInput + // The scope of the role eligibility schedule request to create. The scope can be any REST resource instance. For example, use '/subscriptions/{subscription-id}/' for a subscription, '/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}' for a resource group, and '/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/providers/{resource-provider}/{resource-type}/{resource-name}' for a resource. + Scope pulumi.StringInput + // The resultant role eligibility schedule id or the role eligibility schedule id being updated + TargetRoleEligibilityScheduleId pulumi.StringPtrInput + // The role eligibility schedule instance id being updated + TargetRoleEligibilityScheduleInstanceId pulumi.StringPtrInput + // Ticket Info of the role eligibility + TicketInfo RoleEligibilityScheduleRequestPropertiesTicketInfoPtrInput +} + +func (PimRoleEligibilityScheduleArgs) ElementType() reflect.Type { + return reflect.TypeOf((*pimRoleEligibilityScheduleArgs)(nil)).Elem() +} + +type PimRoleEligibilityScheduleInput interface { + pulumi.Input + + ToPimRoleEligibilityScheduleOutput() PimRoleEligibilityScheduleOutput + ToPimRoleEligibilityScheduleOutputWithContext(ctx context.Context) PimRoleEligibilityScheduleOutput +} + +func (*PimRoleEligibilitySchedule) ElementType() reflect.Type { + return reflect.TypeOf((**PimRoleEligibilitySchedule)(nil)).Elem() +} + +func (i *PimRoleEligibilitySchedule) ToPimRoleEligibilityScheduleOutput() PimRoleEligibilityScheduleOutput { + return i.ToPimRoleEligibilityScheduleOutputWithContext(context.Background()) +} + +func (i *PimRoleEligibilitySchedule) ToPimRoleEligibilityScheduleOutputWithContext(ctx context.Context) PimRoleEligibilityScheduleOutput { + return pulumi.ToOutputWithContext(ctx, i).(PimRoleEligibilityScheduleOutput) +} + +type PimRoleEligibilityScheduleOutput struct{ *pulumi.OutputState } + +func (PimRoleEligibilityScheduleOutput) ElementType() reflect.Type { + return reflect.TypeOf((**PimRoleEligibilitySchedule)(nil)).Elem() +} + +func (o PimRoleEligibilityScheduleOutput) ToPimRoleEligibilityScheduleOutput() PimRoleEligibilityScheduleOutput { + return o +} + +func (o PimRoleEligibilityScheduleOutput) ToPimRoleEligibilityScheduleOutputWithContext(ctx context.Context) PimRoleEligibilityScheduleOutput { + return o +} + +// The approvalId of the role eligibility schedule request. +func (o PimRoleEligibilityScheduleOutput) ApprovalId() pulumi.StringOutput { + return o.ApplyT(func(v *PimRoleEligibilitySchedule) pulumi.StringOutput { return v.ApprovalId }).(pulumi.StringOutput) +} + +// The conditions on the role assignment. This limits the resources it can be assigned to. e.g.: @Resource[Microsoft.Storage/storageAccounts/blobServices/containers:ContainerName] StringEqualsIgnoreCase 'foo_storage_container' +func (o PimRoleEligibilityScheduleOutput) Condition() pulumi.StringPtrOutput { + return o.ApplyT(func(v *PimRoleEligibilitySchedule) pulumi.StringPtrOutput { return v.Condition }).(pulumi.StringPtrOutput) +} + +// Version of the condition. Currently accepted value is '2.0' +func (o PimRoleEligibilityScheduleOutput) ConditionVersion() pulumi.StringPtrOutput { + return o.ApplyT(func(v *PimRoleEligibilitySchedule) pulumi.StringPtrOutput { return v.ConditionVersion }).(pulumi.StringPtrOutput) +} + +// DateTime when role eligibility schedule request was created +func (o PimRoleEligibilityScheduleOutput) CreatedOn() pulumi.StringOutput { + return o.ApplyT(func(v *PimRoleEligibilitySchedule) pulumi.StringOutput { return v.CreatedOn }).(pulumi.StringOutput) +} + +// Additional properties of principal, scope and role definition +func (o PimRoleEligibilityScheduleOutput) ExpandedProperties() ExpandedPropertiesResponseOutput { + return o.ApplyT(func(v *PimRoleEligibilitySchedule) ExpandedPropertiesResponseOutput { return v.ExpandedProperties }).(ExpandedPropertiesResponseOutput) +} + +// Justification for the role eligibility +func (o PimRoleEligibilityScheduleOutput) Justification() pulumi.StringPtrOutput { + return o.ApplyT(func(v *PimRoleEligibilitySchedule) pulumi.StringPtrOutput { return v.Justification }).(pulumi.StringPtrOutput) +} + +// The role eligibility schedule request name. +func (o PimRoleEligibilityScheduleOutput) Name() pulumi.StringOutput { + return o.ApplyT(func(v *PimRoleEligibilitySchedule) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput) +} + +// The principal ID. +func (o PimRoleEligibilityScheduleOutput) PrincipalId() pulumi.StringOutput { + return o.ApplyT(func(v *PimRoleEligibilitySchedule) pulumi.StringOutput { return v.PrincipalId }).(pulumi.StringOutput) +} + +// The principal type of the assigned principal ID. +func (o PimRoleEligibilityScheduleOutput) PrincipalType() pulumi.StringOutput { + return o.ApplyT(func(v *PimRoleEligibilitySchedule) pulumi.StringOutput { return v.PrincipalType }).(pulumi.StringOutput) +} + +// The type of the role assignment schedule request. Eg: SelfActivate, AdminAssign etc +func (o PimRoleEligibilityScheduleOutput) RequestType() pulumi.StringPtrOutput { + return o.ApplyT(func(v *PimRoleEligibilitySchedule) pulumi.StringPtrOutput { return v.RequestType }).(pulumi.StringPtrOutput) +} + +// Id of the user who created this request +func (o PimRoleEligibilityScheduleOutput) RequestorId() pulumi.StringOutput { + return o.ApplyT(func(v *PimRoleEligibilitySchedule) pulumi.StringOutput { return v.RequestorId }).(pulumi.StringOutput) +} + +// The role definition ID. +func (o PimRoleEligibilityScheduleOutput) RoleDefinitionId() pulumi.StringOutput { + return o.ApplyT(func(v *PimRoleEligibilitySchedule) pulumi.StringOutput { return v.RoleDefinitionId }).(pulumi.StringOutput) +} + +// Schedule info of the role eligibility schedule +func (o PimRoleEligibilityScheduleOutput) ScheduleInfo() RoleEligibilityScheduleRequestPropertiesResponseScheduleInfoPtrOutput { + return o.ApplyT(func(v *PimRoleEligibilitySchedule) RoleEligibilityScheduleRequestPropertiesResponseScheduleInfoPtrOutput { + return v.ScheduleInfo + }).(RoleEligibilityScheduleRequestPropertiesResponseScheduleInfoPtrOutput) +} + +// The role eligibility schedule request scope. +func (o PimRoleEligibilityScheduleOutput) Scope() pulumi.StringOutput { + return o.ApplyT(func(v *PimRoleEligibilitySchedule) pulumi.StringOutput { return v.Scope }).(pulumi.StringOutput) +} + +// The status of the role eligibility schedule request. +func (o PimRoleEligibilityScheduleOutput) Status() pulumi.StringOutput { + return o.ApplyT(func(v *PimRoleEligibilitySchedule) pulumi.StringOutput { return v.Status }).(pulumi.StringOutput) +} + +// The resultant role eligibility schedule id or the role eligibility schedule id being updated +func (o PimRoleEligibilityScheduleOutput) TargetRoleEligibilityScheduleId() pulumi.StringPtrOutput { + return o.ApplyT(func(v *PimRoleEligibilitySchedule) pulumi.StringPtrOutput { return v.TargetRoleEligibilityScheduleId }).(pulumi.StringPtrOutput) +} + +// The role eligibility schedule instance id being updated +func (o PimRoleEligibilityScheduleOutput) TargetRoleEligibilityScheduleInstanceId() pulumi.StringPtrOutput { + return o.ApplyT(func(v *PimRoleEligibilitySchedule) pulumi.StringPtrOutput { + return v.TargetRoleEligibilityScheduleInstanceId + }).(pulumi.StringPtrOutput) +} + +// Ticket Info of the role eligibility +func (o PimRoleEligibilityScheduleOutput) TicketInfo() RoleEligibilityScheduleRequestPropertiesResponseTicketInfoPtrOutput { + return o.ApplyT(func(v *PimRoleEligibilitySchedule) RoleEligibilityScheduleRequestPropertiesResponseTicketInfoPtrOutput { + return v.TicketInfo + }).(RoleEligibilityScheduleRequestPropertiesResponseTicketInfoPtrOutput) +} + +// The role eligibility schedule request type. +func (o PimRoleEligibilityScheduleOutput) Type() pulumi.StringOutput { + return o.ApplyT(func(v *PimRoleEligibilitySchedule) pulumi.StringOutput { return v.Type }).(pulumi.StringOutput) +} + +func init() { + pulumi.RegisterOutputType(PimRoleEligibilityScheduleOutput{}) +} diff --git a/vendor/github.com/pulumi/pulumi-azure-native-sdk/authorization/v2/pulumi-plugin.json b/vendor/github.com/pulumi/pulumi-azure-native-sdk/authorization/v2/pulumi-plugin.json index c96467358..c4a5a451b 100644 --- a/vendor/github.com/pulumi/pulumi-azure-native-sdk/authorization/v2/pulumi-plugin.json +++ b/vendor/github.com/pulumi/pulumi-azure-native-sdk/authorization/v2/pulumi-plugin.json @@ -1,5 +1,5 @@ { "resource": true, "name": "azure-native", - "version": "2.88.0" + "version": "2.89.3" } diff --git a/vendor/github.com/pulumi/pulumi-azure-native-sdk/authorization/v2/pulumiEnums.go b/vendor/github.com/pulumi/pulumi-azure-native-sdk/authorization/v2/pulumiEnums.go index 82dd87021..f86b4ea48 100644 --- a/vendor/github.com/pulumi/pulumi-azure-native-sdk/authorization/v2/pulumiEnums.go +++ b/vendor/github.com/pulumi/pulumi-azure-native-sdk/authorization/v2/pulumiEnums.go @@ -3217,6 +3217,21 @@ func (in *recipientTypePtr) ToRecipientTypePtrOutputWithContext(ctx context.Cont return pulumi.ToOutputWithContext(ctx, in).(RecipientTypePtrOutput) } +// The type of the role assignment schedule request. Eg: SelfActivate, AdminAssign etc +type RequestType string + +const ( + RequestTypeAdminAssign = RequestType("AdminAssign") + RequestTypeAdminRemove = RequestType("AdminRemove") + RequestTypeAdminUpdate = RequestType("AdminUpdate") + RequestTypeAdminExtend = RequestType("AdminExtend") + RequestTypeAdminRenew = RequestType("AdminRenew") + RequestTypeSelfActivate = RequestType("SelfActivate") + RequestTypeSelfDeactivate = RequestType("SelfDeactivate") + RequestTypeSelfExtend = RequestType("SelfExtend") + RequestTypeSelfRenew = RequestType("SelfRenew") +) + // The identity type. This is the only required field when adding a system or user assigned identity to a resource. type ResourceIdentityType string @@ -3574,6 +3589,174 @@ func (in *selectorKindPtr) ToSelectorKindPtrOutputWithContext(ctx context.Contex return pulumi.ToOutputWithContext(ctx, in).(SelectorKindPtrOutput) } +// Type of the role eligibility schedule expiration +type Type string + +const ( + TypeAfterDuration = Type("AfterDuration") + TypeAfterDateTime = Type("AfterDateTime") + TypeNoExpiration = Type("NoExpiration") +) + +func (Type) ElementType() reflect.Type { + return reflect.TypeOf((*Type)(nil)).Elem() +} + +func (e Type) ToTypeOutput() TypeOutput { + return pulumi.ToOutput(e).(TypeOutput) +} + +func (e Type) ToTypeOutputWithContext(ctx context.Context) TypeOutput { + return pulumi.ToOutputWithContext(ctx, e).(TypeOutput) +} + +func (e Type) ToTypePtrOutput() TypePtrOutput { + return e.ToTypePtrOutputWithContext(context.Background()) +} + +func (e Type) ToTypePtrOutputWithContext(ctx context.Context) TypePtrOutput { + return Type(e).ToTypeOutputWithContext(ctx).ToTypePtrOutputWithContext(ctx) +} + +func (e Type) ToStringOutput() pulumi.StringOutput { + return pulumi.ToOutput(pulumi.String(e)).(pulumi.StringOutput) +} + +func (e Type) ToStringOutputWithContext(ctx context.Context) pulumi.StringOutput { + return pulumi.ToOutputWithContext(ctx, pulumi.String(e)).(pulumi.StringOutput) +} + +func (e Type) ToStringPtrOutput() pulumi.StringPtrOutput { + return pulumi.String(e).ToStringPtrOutputWithContext(context.Background()) +} + +func (e Type) ToStringPtrOutputWithContext(ctx context.Context) pulumi.StringPtrOutput { + return pulumi.String(e).ToStringOutputWithContext(ctx).ToStringPtrOutputWithContext(ctx) +} + +type TypeOutput struct{ *pulumi.OutputState } + +func (TypeOutput) ElementType() reflect.Type { + return reflect.TypeOf((*Type)(nil)).Elem() +} + +func (o TypeOutput) ToTypeOutput() TypeOutput { + return o +} + +func (o TypeOutput) ToTypeOutputWithContext(ctx context.Context) TypeOutput { + return o +} + +func (o TypeOutput) ToTypePtrOutput() TypePtrOutput { + return o.ToTypePtrOutputWithContext(context.Background()) +} + +func (o TypeOutput) ToTypePtrOutputWithContext(ctx context.Context) TypePtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v Type) *Type { + return &v + }).(TypePtrOutput) +} + +func (o TypeOutput) ToStringOutput() pulumi.StringOutput { + return o.ToStringOutputWithContext(context.Background()) +} + +func (o TypeOutput) ToStringOutputWithContext(ctx context.Context) pulumi.StringOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, e Type) string { + return string(e) + }).(pulumi.StringOutput) +} + +func (o TypeOutput) ToStringPtrOutput() pulumi.StringPtrOutput { + return o.ToStringPtrOutputWithContext(context.Background()) +} + +func (o TypeOutput) ToStringPtrOutputWithContext(ctx context.Context) pulumi.StringPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, e Type) *string { + v := string(e) + return &v + }).(pulumi.StringPtrOutput) +} + +type TypePtrOutput struct{ *pulumi.OutputState } + +func (TypePtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**Type)(nil)).Elem() +} + +func (o TypePtrOutput) ToTypePtrOutput() TypePtrOutput { + return o +} + +func (o TypePtrOutput) ToTypePtrOutputWithContext(ctx context.Context) TypePtrOutput { + return o +} + +func (o TypePtrOutput) Elem() TypeOutput { + return o.ApplyT(func(v *Type) Type { + if v != nil { + return *v + } + var ret Type + return ret + }).(TypeOutput) +} + +func (o TypePtrOutput) ToStringPtrOutput() pulumi.StringPtrOutput { + return o.ToStringPtrOutputWithContext(context.Background()) +} + +func (o TypePtrOutput) ToStringPtrOutputWithContext(ctx context.Context) pulumi.StringPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, e *Type) *string { + if e == nil { + return nil + } + v := string(*e) + return &v + }).(pulumi.StringPtrOutput) +} + +// TypeInput is an input type that accepts values of the Type enum +// A concrete instance of `TypeInput` can be one of the following: +// +// TypeAfterDuration +// TypeAfterDateTime +// TypeNoExpiration +type TypeInput interface { + pulumi.Input + + ToTypeOutput() TypeOutput + ToTypeOutputWithContext(context.Context) TypeOutput +} + +var typePtrType = reflect.TypeOf((**Type)(nil)).Elem() + +type TypePtrInput interface { + pulumi.Input + + ToTypePtrOutput() TypePtrOutput + ToTypePtrOutputWithContext(context.Context) TypePtrOutput +} + +type typePtr string + +func TypePtr(v string) TypePtrInput { + return (*typePtr)(&v) +} + +func (*typePtr) ElementType() reflect.Type { + return typePtrType +} + +func (in *typePtr) ToTypePtrOutput() TypePtrOutput { + return pulumi.ToOutput(in).(TypePtrOutput) +} + +func (in *typePtr) ToTypePtrOutputWithContext(ctx context.Context) TypePtrOutput { + return pulumi.ToOutputWithContext(ctx, in).(TypePtrOutput) +} + // The type of user. type UserType string @@ -3785,6 +3968,8 @@ func init() { pulumi.RegisterOutputType(ResourceIdentityTypePtrOutput{}) pulumi.RegisterOutputType(SelectorKindOutput{}) pulumi.RegisterOutputType(SelectorKindPtrOutput{}) + pulumi.RegisterOutputType(TypeOutput{}) + pulumi.RegisterOutputType(TypePtrOutput{}) pulumi.RegisterOutputType(UserTypeOutput{}) pulumi.RegisterOutputType(UserTypePtrOutput{}) } diff --git a/vendor/github.com/pulumi/pulumi-azure-native-sdk/authorization/v2/pulumiTypes.go b/vendor/github.com/pulumi/pulumi-azure-native-sdk/authorization/v2/pulumiTypes.go index ce47c2583..60ca7b884 100644 --- a/vendor/github.com/pulumi/pulumi-azure-native-sdk/authorization/v2/pulumiTypes.go +++ b/vendor/github.com/pulumi/pulumi-azure-native-sdk/authorization/v2/pulumiTypes.go @@ -1480,6 +1480,345 @@ func (o ApprovalStageResponseArrayOutput) Index(i pulumi.IntInput) ApprovalStage }).(ApprovalStageResponseOutput) } +// Expanded info of resource, role and principal +type ExpandedPropertiesResponse struct { + // Details of the principal + Principal *ExpandedPropertiesResponsePrincipal `pulumi:"principal"` + // Details of role definition + RoleDefinition *ExpandedPropertiesResponseRoleDefinition `pulumi:"roleDefinition"` + // Details of the resource scope + Scope *ExpandedPropertiesResponseScope `pulumi:"scope"` +} + +// Expanded info of resource, role and principal +type ExpandedPropertiesResponseOutput struct{ *pulumi.OutputState } + +func (ExpandedPropertiesResponseOutput) ElementType() reflect.Type { + return reflect.TypeOf((*ExpandedPropertiesResponse)(nil)).Elem() +} + +func (o ExpandedPropertiesResponseOutput) ToExpandedPropertiesResponseOutput() ExpandedPropertiesResponseOutput { + return o +} + +func (o ExpandedPropertiesResponseOutput) ToExpandedPropertiesResponseOutputWithContext(ctx context.Context) ExpandedPropertiesResponseOutput { + return o +} + +// Details of the principal +func (o ExpandedPropertiesResponseOutput) Principal() ExpandedPropertiesResponsePrincipalPtrOutput { + return o.ApplyT(func(v ExpandedPropertiesResponse) *ExpandedPropertiesResponsePrincipal { return v.Principal }).(ExpandedPropertiesResponsePrincipalPtrOutput) +} + +// Details of role definition +func (o ExpandedPropertiesResponseOutput) RoleDefinition() ExpandedPropertiesResponseRoleDefinitionPtrOutput { + return o.ApplyT(func(v ExpandedPropertiesResponse) *ExpandedPropertiesResponseRoleDefinition { return v.RoleDefinition }).(ExpandedPropertiesResponseRoleDefinitionPtrOutput) +} + +// Details of the resource scope +func (o ExpandedPropertiesResponseOutput) Scope() ExpandedPropertiesResponseScopePtrOutput { + return o.ApplyT(func(v ExpandedPropertiesResponse) *ExpandedPropertiesResponseScope { return v.Scope }).(ExpandedPropertiesResponseScopePtrOutput) +} + +// Details of the principal +type ExpandedPropertiesResponsePrincipal struct { + // Display name of the principal + DisplayName *string `pulumi:"displayName"` + // Email id of the principal + Email *string `pulumi:"email"` + // Id of the principal + Id *string `pulumi:"id"` + // Type of the principal + Type *string `pulumi:"type"` +} + +// Details of the principal +type ExpandedPropertiesResponsePrincipalOutput struct{ *pulumi.OutputState } + +func (ExpandedPropertiesResponsePrincipalOutput) ElementType() reflect.Type { + return reflect.TypeOf((*ExpandedPropertiesResponsePrincipal)(nil)).Elem() +} + +func (o ExpandedPropertiesResponsePrincipalOutput) ToExpandedPropertiesResponsePrincipalOutput() ExpandedPropertiesResponsePrincipalOutput { + return o +} + +func (o ExpandedPropertiesResponsePrincipalOutput) ToExpandedPropertiesResponsePrincipalOutputWithContext(ctx context.Context) ExpandedPropertiesResponsePrincipalOutput { + return o +} + +// Display name of the principal +func (o ExpandedPropertiesResponsePrincipalOutput) DisplayName() pulumi.StringPtrOutput { + return o.ApplyT(func(v ExpandedPropertiesResponsePrincipal) *string { return v.DisplayName }).(pulumi.StringPtrOutput) +} + +// Email id of the principal +func (o ExpandedPropertiesResponsePrincipalOutput) Email() pulumi.StringPtrOutput { + return o.ApplyT(func(v ExpandedPropertiesResponsePrincipal) *string { return v.Email }).(pulumi.StringPtrOutput) +} + +// Id of the principal +func (o ExpandedPropertiesResponsePrincipalOutput) Id() pulumi.StringPtrOutput { + return o.ApplyT(func(v ExpandedPropertiesResponsePrincipal) *string { return v.Id }).(pulumi.StringPtrOutput) +} + +// Type of the principal +func (o ExpandedPropertiesResponsePrincipalOutput) Type() pulumi.StringPtrOutput { + return o.ApplyT(func(v ExpandedPropertiesResponsePrincipal) *string { return v.Type }).(pulumi.StringPtrOutput) +} + +type ExpandedPropertiesResponsePrincipalPtrOutput struct{ *pulumi.OutputState } + +func (ExpandedPropertiesResponsePrincipalPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**ExpandedPropertiesResponsePrincipal)(nil)).Elem() +} + +func (o ExpandedPropertiesResponsePrincipalPtrOutput) ToExpandedPropertiesResponsePrincipalPtrOutput() ExpandedPropertiesResponsePrincipalPtrOutput { + return o +} + +func (o ExpandedPropertiesResponsePrincipalPtrOutput) ToExpandedPropertiesResponsePrincipalPtrOutputWithContext(ctx context.Context) ExpandedPropertiesResponsePrincipalPtrOutput { + return o +} + +func (o ExpandedPropertiesResponsePrincipalPtrOutput) Elem() ExpandedPropertiesResponsePrincipalOutput { + return o.ApplyT(func(v *ExpandedPropertiesResponsePrincipal) ExpandedPropertiesResponsePrincipal { + if v != nil { + return *v + } + var ret ExpandedPropertiesResponsePrincipal + return ret + }).(ExpandedPropertiesResponsePrincipalOutput) +} + +// Display name of the principal +func (o ExpandedPropertiesResponsePrincipalPtrOutput) DisplayName() pulumi.StringPtrOutput { + return o.ApplyT(func(v *ExpandedPropertiesResponsePrincipal) *string { + if v == nil { + return nil + } + return v.DisplayName + }).(pulumi.StringPtrOutput) +} + +// Email id of the principal +func (o ExpandedPropertiesResponsePrincipalPtrOutput) Email() pulumi.StringPtrOutput { + return o.ApplyT(func(v *ExpandedPropertiesResponsePrincipal) *string { + if v == nil { + return nil + } + return v.Email + }).(pulumi.StringPtrOutput) +} + +// Id of the principal +func (o ExpandedPropertiesResponsePrincipalPtrOutput) Id() pulumi.StringPtrOutput { + return o.ApplyT(func(v *ExpandedPropertiesResponsePrincipal) *string { + if v == nil { + return nil + } + return v.Id + }).(pulumi.StringPtrOutput) +} + +// Type of the principal +func (o ExpandedPropertiesResponsePrincipalPtrOutput) Type() pulumi.StringPtrOutput { + return o.ApplyT(func(v *ExpandedPropertiesResponsePrincipal) *string { + if v == nil { + return nil + } + return v.Type + }).(pulumi.StringPtrOutput) +} + +// Details of role definition +type ExpandedPropertiesResponseRoleDefinition struct { + // Display name of the role definition + DisplayName *string `pulumi:"displayName"` + // Id of the role definition + Id *string `pulumi:"id"` + // Type of the role definition + Type *string `pulumi:"type"` +} + +// Details of role definition +type ExpandedPropertiesResponseRoleDefinitionOutput struct{ *pulumi.OutputState } + +func (ExpandedPropertiesResponseRoleDefinitionOutput) ElementType() reflect.Type { + return reflect.TypeOf((*ExpandedPropertiesResponseRoleDefinition)(nil)).Elem() +} + +func (o ExpandedPropertiesResponseRoleDefinitionOutput) ToExpandedPropertiesResponseRoleDefinitionOutput() ExpandedPropertiesResponseRoleDefinitionOutput { + return o +} + +func (o ExpandedPropertiesResponseRoleDefinitionOutput) ToExpandedPropertiesResponseRoleDefinitionOutputWithContext(ctx context.Context) ExpandedPropertiesResponseRoleDefinitionOutput { + return o +} + +// Display name of the role definition +func (o ExpandedPropertiesResponseRoleDefinitionOutput) DisplayName() pulumi.StringPtrOutput { + return o.ApplyT(func(v ExpandedPropertiesResponseRoleDefinition) *string { return v.DisplayName }).(pulumi.StringPtrOutput) +} + +// Id of the role definition +func (o ExpandedPropertiesResponseRoleDefinitionOutput) Id() pulumi.StringPtrOutput { + return o.ApplyT(func(v ExpandedPropertiesResponseRoleDefinition) *string { return v.Id }).(pulumi.StringPtrOutput) +} + +// Type of the role definition +func (o ExpandedPropertiesResponseRoleDefinitionOutput) Type() pulumi.StringPtrOutput { + return o.ApplyT(func(v ExpandedPropertiesResponseRoleDefinition) *string { return v.Type }).(pulumi.StringPtrOutput) +} + +type ExpandedPropertiesResponseRoleDefinitionPtrOutput struct{ *pulumi.OutputState } + +func (ExpandedPropertiesResponseRoleDefinitionPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**ExpandedPropertiesResponseRoleDefinition)(nil)).Elem() +} + +func (o ExpandedPropertiesResponseRoleDefinitionPtrOutput) ToExpandedPropertiesResponseRoleDefinitionPtrOutput() ExpandedPropertiesResponseRoleDefinitionPtrOutput { + return o +} + +func (o ExpandedPropertiesResponseRoleDefinitionPtrOutput) ToExpandedPropertiesResponseRoleDefinitionPtrOutputWithContext(ctx context.Context) ExpandedPropertiesResponseRoleDefinitionPtrOutput { + return o +} + +func (o ExpandedPropertiesResponseRoleDefinitionPtrOutput) Elem() ExpandedPropertiesResponseRoleDefinitionOutput { + return o.ApplyT(func(v *ExpandedPropertiesResponseRoleDefinition) ExpandedPropertiesResponseRoleDefinition { + if v != nil { + return *v + } + var ret ExpandedPropertiesResponseRoleDefinition + return ret + }).(ExpandedPropertiesResponseRoleDefinitionOutput) +} + +// Display name of the role definition +func (o ExpandedPropertiesResponseRoleDefinitionPtrOutput) DisplayName() pulumi.StringPtrOutput { + return o.ApplyT(func(v *ExpandedPropertiesResponseRoleDefinition) *string { + if v == nil { + return nil + } + return v.DisplayName + }).(pulumi.StringPtrOutput) +} + +// Id of the role definition +func (o ExpandedPropertiesResponseRoleDefinitionPtrOutput) Id() pulumi.StringPtrOutput { + return o.ApplyT(func(v *ExpandedPropertiesResponseRoleDefinition) *string { + if v == nil { + return nil + } + return v.Id + }).(pulumi.StringPtrOutput) +} + +// Type of the role definition +func (o ExpandedPropertiesResponseRoleDefinitionPtrOutput) Type() pulumi.StringPtrOutput { + return o.ApplyT(func(v *ExpandedPropertiesResponseRoleDefinition) *string { + if v == nil { + return nil + } + return v.Type + }).(pulumi.StringPtrOutput) +} + +// Details of the resource scope +type ExpandedPropertiesResponseScope struct { + // Display name of the resource + DisplayName *string `pulumi:"displayName"` + // Scope id of the resource + Id *string `pulumi:"id"` + // Type of the resource + Type *string `pulumi:"type"` +} + +// Details of the resource scope +type ExpandedPropertiesResponseScopeOutput struct{ *pulumi.OutputState } + +func (ExpandedPropertiesResponseScopeOutput) ElementType() reflect.Type { + return reflect.TypeOf((*ExpandedPropertiesResponseScope)(nil)).Elem() +} + +func (o ExpandedPropertiesResponseScopeOutput) ToExpandedPropertiesResponseScopeOutput() ExpandedPropertiesResponseScopeOutput { + return o +} + +func (o ExpandedPropertiesResponseScopeOutput) ToExpandedPropertiesResponseScopeOutputWithContext(ctx context.Context) ExpandedPropertiesResponseScopeOutput { + return o +} + +// Display name of the resource +func (o ExpandedPropertiesResponseScopeOutput) DisplayName() pulumi.StringPtrOutput { + return o.ApplyT(func(v ExpandedPropertiesResponseScope) *string { return v.DisplayName }).(pulumi.StringPtrOutput) +} + +// Scope id of the resource +func (o ExpandedPropertiesResponseScopeOutput) Id() pulumi.StringPtrOutput { + return o.ApplyT(func(v ExpandedPropertiesResponseScope) *string { return v.Id }).(pulumi.StringPtrOutput) +} + +// Type of the resource +func (o ExpandedPropertiesResponseScopeOutput) Type() pulumi.StringPtrOutput { + return o.ApplyT(func(v ExpandedPropertiesResponseScope) *string { return v.Type }).(pulumi.StringPtrOutput) +} + +type ExpandedPropertiesResponseScopePtrOutput struct{ *pulumi.OutputState } + +func (ExpandedPropertiesResponseScopePtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**ExpandedPropertiesResponseScope)(nil)).Elem() +} + +func (o ExpandedPropertiesResponseScopePtrOutput) ToExpandedPropertiesResponseScopePtrOutput() ExpandedPropertiesResponseScopePtrOutput { + return o +} + +func (o ExpandedPropertiesResponseScopePtrOutput) ToExpandedPropertiesResponseScopePtrOutputWithContext(ctx context.Context) ExpandedPropertiesResponseScopePtrOutput { + return o +} + +func (o ExpandedPropertiesResponseScopePtrOutput) Elem() ExpandedPropertiesResponseScopeOutput { + return o.ApplyT(func(v *ExpandedPropertiesResponseScope) ExpandedPropertiesResponseScope { + if v != nil { + return *v + } + var ret ExpandedPropertiesResponseScope + return ret + }).(ExpandedPropertiesResponseScopeOutput) +} + +// Display name of the resource +func (o ExpandedPropertiesResponseScopePtrOutput) DisplayName() pulumi.StringPtrOutput { + return o.ApplyT(func(v *ExpandedPropertiesResponseScope) *string { + if v == nil { + return nil + } + return v.DisplayName + }).(pulumi.StringPtrOutput) +} + +// Scope id of the resource +func (o ExpandedPropertiesResponseScopePtrOutput) Id() pulumi.StringPtrOutput { + return o.ApplyT(func(v *ExpandedPropertiesResponseScope) *string { + if v == nil { + return nil + } + return v.Id + }).(pulumi.StringPtrOutput) +} + +// Type of the resource +func (o ExpandedPropertiesResponseScopePtrOutput) Type() pulumi.StringPtrOutput { + return o.ApplyT(func(v *ExpandedPropertiesResponseScope) *string { + if v == nil { + return nil + } + return v.Type + }).(pulumi.StringPtrOutput) +} + // Identity for the resource. Policy assignments support a maximum of one identity. That is either a system assigned identity or a single user assigned identity. type Identity struct { // The identity type. This is the only required field when adding a system or user assigned identity to a resource. @@ -5330,6 +5669,754 @@ func (o ResourceSelectorResponseArrayOutput) Index(i pulumi.IntInput) ResourceSe }).(ResourceSelectorResponseOutput) } +// Expiration of the role eligibility schedule +type RoleEligibilityScheduleRequestPropertiesExpiration struct { + // Duration of the role eligibility schedule in TimeSpan. + Duration *string `pulumi:"duration"` + // End DateTime of the role eligibility schedule. + EndDateTime *string `pulumi:"endDateTime"` + // Type of the role eligibility schedule expiration + Type *string `pulumi:"type"` +} + +// RoleEligibilityScheduleRequestPropertiesExpirationInput is an input type that accepts RoleEligibilityScheduleRequestPropertiesExpirationArgs and RoleEligibilityScheduleRequestPropertiesExpirationOutput values. +// You can construct a concrete instance of `RoleEligibilityScheduleRequestPropertiesExpirationInput` via: +// +// RoleEligibilityScheduleRequestPropertiesExpirationArgs{...} +type RoleEligibilityScheduleRequestPropertiesExpirationInput interface { + pulumi.Input + + ToRoleEligibilityScheduleRequestPropertiesExpirationOutput() RoleEligibilityScheduleRequestPropertiesExpirationOutput + ToRoleEligibilityScheduleRequestPropertiesExpirationOutputWithContext(context.Context) RoleEligibilityScheduleRequestPropertiesExpirationOutput +} + +// Expiration of the role eligibility schedule +type RoleEligibilityScheduleRequestPropertiesExpirationArgs struct { + // Duration of the role eligibility schedule in TimeSpan. + Duration pulumi.StringPtrInput `pulumi:"duration"` + // End DateTime of the role eligibility schedule. + EndDateTime pulumi.StringPtrInput `pulumi:"endDateTime"` + // Type of the role eligibility schedule expiration + Type pulumi.StringPtrInput `pulumi:"type"` +} + +func (RoleEligibilityScheduleRequestPropertiesExpirationArgs) ElementType() reflect.Type { + return reflect.TypeOf((*RoleEligibilityScheduleRequestPropertiesExpiration)(nil)).Elem() +} + +func (i RoleEligibilityScheduleRequestPropertiesExpirationArgs) ToRoleEligibilityScheduleRequestPropertiesExpirationOutput() RoleEligibilityScheduleRequestPropertiesExpirationOutput { + return i.ToRoleEligibilityScheduleRequestPropertiesExpirationOutputWithContext(context.Background()) +} + +func (i RoleEligibilityScheduleRequestPropertiesExpirationArgs) ToRoleEligibilityScheduleRequestPropertiesExpirationOutputWithContext(ctx context.Context) RoleEligibilityScheduleRequestPropertiesExpirationOutput { + return pulumi.ToOutputWithContext(ctx, i).(RoleEligibilityScheduleRequestPropertiesExpirationOutput) +} + +func (i RoleEligibilityScheduleRequestPropertiesExpirationArgs) ToRoleEligibilityScheduleRequestPropertiesExpirationPtrOutput() RoleEligibilityScheduleRequestPropertiesExpirationPtrOutput { + return i.ToRoleEligibilityScheduleRequestPropertiesExpirationPtrOutputWithContext(context.Background()) +} + +func (i RoleEligibilityScheduleRequestPropertiesExpirationArgs) ToRoleEligibilityScheduleRequestPropertiesExpirationPtrOutputWithContext(ctx context.Context) RoleEligibilityScheduleRequestPropertiesExpirationPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(RoleEligibilityScheduleRequestPropertiesExpirationOutput).ToRoleEligibilityScheduleRequestPropertiesExpirationPtrOutputWithContext(ctx) +} + +// RoleEligibilityScheduleRequestPropertiesExpirationPtrInput is an input type that accepts RoleEligibilityScheduleRequestPropertiesExpirationArgs, RoleEligibilityScheduleRequestPropertiesExpirationPtr and RoleEligibilityScheduleRequestPropertiesExpirationPtrOutput values. +// You can construct a concrete instance of `RoleEligibilityScheduleRequestPropertiesExpirationPtrInput` via: +// +// RoleEligibilityScheduleRequestPropertiesExpirationArgs{...} +// +// or: +// +// nil +type RoleEligibilityScheduleRequestPropertiesExpirationPtrInput interface { + pulumi.Input + + ToRoleEligibilityScheduleRequestPropertiesExpirationPtrOutput() RoleEligibilityScheduleRequestPropertiesExpirationPtrOutput + ToRoleEligibilityScheduleRequestPropertiesExpirationPtrOutputWithContext(context.Context) RoleEligibilityScheduleRequestPropertiesExpirationPtrOutput +} + +type roleEligibilityScheduleRequestPropertiesExpirationPtrType RoleEligibilityScheduleRequestPropertiesExpirationArgs + +func RoleEligibilityScheduleRequestPropertiesExpirationPtr(v *RoleEligibilityScheduleRequestPropertiesExpirationArgs) RoleEligibilityScheduleRequestPropertiesExpirationPtrInput { + return (*roleEligibilityScheduleRequestPropertiesExpirationPtrType)(v) +} + +func (*roleEligibilityScheduleRequestPropertiesExpirationPtrType) ElementType() reflect.Type { + return reflect.TypeOf((**RoleEligibilityScheduleRequestPropertiesExpiration)(nil)).Elem() +} + +func (i *roleEligibilityScheduleRequestPropertiesExpirationPtrType) ToRoleEligibilityScheduleRequestPropertiesExpirationPtrOutput() RoleEligibilityScheduleRequestPropertiesExpirationPtrOutput { + return i.ToRoleEligibilityScheduleRequestPropertiesExpirationPtrOutputWithContext(context.Background()) +} + +func (i *roleEligibilityScheduleRequestPropertiesExpirationPtrType) ToRoleEligibilityScheduleRequestPropertiesExpirationPtrOutputWithContext(ctx context.Context) RoleEligibilityScheduleRequestPropertiesExpirationPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(RoleEligibilityScheduleRequestPropertiesExpirationPtrOutput) +} + +// Expiration of the role eligibility schedule +type RoleEligibilityScheduleRequestPropertiesExpirationOutput struct{ *pulumi.OutputState } + +func (RoleEligibilityScheduleRequestPropertiesExpirationOutput) ElementType() reflect.Type { + return reflect.TypeOf((*RoleEligibilityScheduleRequestPropertiesExpiration)(nil)).Elem() +} + +func (o RoleEligibilityScheduleRequestPropertiesExpirationOutput) ToRoleEligibilityScheduleRequestPropertiesExpirationOutput() RoleEligibilityScheduleRequestPropertiesExpirationOutput { + return o +} + +func (o RoleEligibilityScheduleRequestPropertiesExpirationOutput) ToRoleEligibilityScheduleRequestPropertiesExpirationOutputWithContext(ctx context.Context) RoleEligibilityScheduleRequestPropertiesExpirationOutput { + return o +} + +func (o RoleEligibilityScheduleRequestPropertiesExpirationOutput) ToRoleEligibilityScheduleRequestPropertiesExpirationPtrOutput() RoleEligibilityScheduleRequestPropertiesExpirationPtrOutput { + return o.ToRoleEligibilityScheduleRequestPropertiesExpirationPtrOutputWithContext(context.Background()) +} + +func (o RoleEligibilityScheduleRequestPropertiesExpirationOutput) ToRoleEligibilityScheduleRequestPropertiesExpirationPtrOutputWithContext(ctx context.Context) RoleEligibilityScheduleRequestPropertiesExpirationPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v RoleEligibilityScheduleRequestPropertiesExpiration) *RoleEligibilityScheduleRequestPropertiesExpiration { + return &v + }).(RoleEligibilityScheduleRequestPropertiesExpirationPtrOutput) +} + +// Duration of the role eligibility schedule in TimeSpan. +func (o RoleEligibilityScheduleRequestPropertiesExpirationOutput) Duration() pulumi.StringPtrOutput { + return o.ApplyT(func(v RoleEligibilityScheduleRequestPropertiesExpiration) *string { return v.Duration }).(pulumi.StringPtrOutput) +} + +// End DateTime of the role eligibility schedule. +func (o RoleEligibilityScheduleRequestPropertiesExpirationOutput) EndDateTime() pulumi.StringPtrOutput { + return o.ApplyT(func(v RoleEligibilityScheduleRequestPropertiesExpiration) *string { return v.EndDateTime }).(pulumi.StringPtrOutput) +} + +// Type of the role eligibility schedule expiration +func (o RoleEligibilityScheduleRequestPropertiesExpirationOutput) Type() pulumi.StringPtrOutput { + return o.ApplyT(func(v RoleEligibilityScheduleRequestPropertiesExpiration) *string { return v.Type }).(pulumi.StringPtrOutput) +} + +type RoleEligibilityScheduleRequestPropertiesExpirationPtrOutput struct{ *pulumi.OutputState } + +func (RoleEligibilityScheduleRequestPropertiesExpirationPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**RoleEligibilityScheduleRequestPropertiesExpiration)(nil)).Elem() +} + +func (o RoleEligibilityScheduleRequestPropertiesExpirationPtrOutput) ToRoleEligibilityScheduleRequestPropertiesExpirationPtrOutput() RoleEligibilityScheduleRequestPropertiesExpirationPtrOutput { + return o +} + +func (o RoleEligibilityScheduleRequestPropertiesExpirationPtrOutput) ToRoleEligibilityScheduleRequestPropertiesExpirationPtrOutputWithContext(ctx context.Context) RoleEligibilityScheduleRequestPropertiesExpirationPtrOutput { + return o +} + +func (o RoleEligibilityScheduleRequestPropertiesExpirationPtrOutput) Elem() RoleEligibilityScheduleRequestPropertiesExpirationOutput { + return o.ApplyT(func(v *RoleEligibilityScheduleRequestPropertiesExpiration) RoleEligibilityScheduleRequestPropertiesExpiration { + if v != nil { + return *v + } + var ret RoleEligibilityScheduleRequestPropertiesExpiration + return ret + }).(RoleEligibilityScheduleRequestPropertiesExpirationOutput) +} + +// Duration of the role eligibility schedule in TimeSpan. +func (o RoleEligibilityScheduleRequestPropertiesExpirationPtrOutput) Duration() pulumi.StringPtrOutput { + return o.ApplyT(func(v *RoleEligibilityScheduleRequestPropertiesExpiration) *string { + if v == nil { + return nil + } + return v.Duration + }).(pulumi.StringPtrOutput) +} + +// End DateTime of the role eligibility schedule. +func (o RoleEligibilityScheduleRequestPropertiesExpirationPtrOutput) EndDateTime() pulumi.StringPtrOutput { + return o.ApplyT(func(v *RoleEligibilityScheduleRequestPropertiesExpiration) *string { + if v == nil { + return nil + } + return v.EndDateTime + }).(pulumi.StringPtrOutput) +} + +// Type of the role eligibility schedule expiration +func (o RoleEligibilityScheduleRequestPropertiesExpirationPtrOutput) Type() pulumi.StringPtrOutput { + return o.ApplyT(func(v *RoleEligibilityScheduleRequestPropertiesExpiration) *string { + if v == nil { + return nil + } + return v.Type + }).(pulumi.StringPtrOutput) +} + +// Expiration of the role eligibility schedule +type RoleEligibilityScheduleRequestPropertiesResponseExpiration struct { + // Duration of the role eligibility schedule in TimeSpan. + Duration *string `pulumi:"duration"` + // End DateTime of the role eligibility schedule. + EndDateTime *string `pulumi:"endDateTime"` + // Type of the role eligibility schedule expiration + Type *string `pulumi:"type"` +} + +// Expiration of the role eligibility schedule +type RoleEligibilityScheduleRequestPropertiesResponseExpirationOutput struct{ *pulumi.OutputState } + +func (RoleEligibilityScheduleRequestPropertiesResponseExpirationOutput) ElementType() reflect.Type { + return reflect.TypeOf((*RoleEligibilityScheduleRequestPropertiesResponseExpiration)(nil)).Elem() +} + +func (o RoleEligibilityScheduleRequestPropertiesResponseExpirationOutput) ToRoleEligibilityScheduleRequestPropertiesResponseExpirationOutput() RoleEligibilityScheduleRequestPropertiesResponseExpirationOutput { + return o +} + +func (o RoleEligibilityScheduleRequestPropertiesResponseExpirationOutput) ToRoleEligibilityScheduleRequestPropertiesResponseExpirationOutputWithContext(ctx context.Context) RoleEligibilityScheduleRequestPropertiesResponseExpirationOutput { + return o +} + +// Duration of the role eligibility schedule in TimeSpan. +func (o RoleEligibilityScheduleRequestPropertiesResponseExpirationOutput) Duration() pulumi.StringPtrOutput { + return o.ApplyT(func(v RoleEligibilityScheduleRequestPropertiesResponseExpiration) *string { return v.Duration }).(pulumi.StringPtrOutput) +} + +// End DateTime of the role eligibility schedule. +func (o RoleEligibilityScheduleRequestPropertiesResponseExpirationOutput) EndDateTime() pulumi.StringPtrOutput { + return o.ApplyT(func(v RoleEligibilityScheduleRequestPropertiesResponseExpiration) *string { return v.EndDateTime }).(pulumi.StringPtrOutput) +} + +// Type of the role eligibility schedule expiration +func (o RoleEligibilityScheduleRequestPropertiesResponseExpirationOutput) Type() pulumi.StringPtrOutput { + return o.ApplyT(func(v RoleEligibilityScheduleRequestPropertiesResponseExpiration) *string { return v.Type }).(pulumi.StringPtrOutput) +} + +type RoleEligibilityScheduleRequestPropertiesResponseExpirationPtrOutput struct{ *pulumi.OutputState } + +func (RoleEligibilityScheduleRequestPropertiesResponseExpirationPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**RoleEligibilityScheduleRequestPropertiesResponseExpiration)(nil)).Elem() +} + +func (o RoleEligibilityScheduleRequestPropertiesResponseExpirationPtrOutput) ToRoleEligibilityScheduleRequestPropertiesResponseExpirationPtrOutput() RoleEligibilityScheduleRequestPropertiesResponseExpirationPtrOutput { + return o +} + +func (o RoleEligibilityScheduleRequestPropertiesResponseExpirationPtrOutput) ToRoleEligibilityScheduleRequestPropertiesResponseExpirationPtrOutputWithContext(ctx context.Context) RoleEligibilityScheduleRequestPropertiesResponseExpirationPtrOutput { + return o +} + +func (o RoleEligibilityScheduleRequestPropertiesResponseExpirationPtrOutput) Elem() RoleEligibilityScheduleRequestPropertiesResponseExpirationOutput { + return o.ApplyT(func(v *RoleEligibilityScheduleRequestPropertiesResponseExpiration) RoleEligibilityScheduleRequestPropertiesResponseExpiration { + if v != nil { + return *v + } + var ret RoleEligibilityScheduleRequestPropertiesResponseExpiration + return ret + }).(RoleEligibilityScheduleRequestPropertiesResponseExpirationOutput) +} + +// Duration of the role eligibility schedule in TimeSpan. +func (o RoleEligibilityScheduleRequestPropertiesResponseExpirationPtrOutput) Duration() pulumi.StringPtrOutput { + return o.ApplyT(func(v *RoleEligibilityScheduleRequestPropertiesResponseExpiration) *string { + if v == nil { + return nil + } + return v.Duration + }).(pulumi.StringPtrOutput) +} + +// End DateTime of the role eligibility schedule. +func (o RoleEligibilityScheduleRequestPropertiesResponseExpirationPtrOutput) EndDateTime() pulumi.StringPtrOutput { + return o.ApplyT(func(v *RoleEligibilityScheduleRequestPropertiesResponseExpiration) *string { + if v == nil { + return nil + } + return v.EndDateTime + }).(pulumi.StringPtrOutput) +} + +// Type of the role eligibility schedule expiration +func (o RoleEligibilityScheduleRequestPropertiesResponseExpirationPtrOutput) Type() pulumi.StringPtrOutput { + return o.ApplyT(func(v *RoleEligibilityScheduleRequestPropertiesResponseExpiration) *string { + if v == nil { + return nil + } + return v.Type + }).(pulumi.StringPtrOutput) +} + +// Schedule info of the role eligibility schedule +type RoleEligibilityScheduleRequestPropertiesResponseScheduleInfo struct { + // Expiration of the role eligibility schedule + Expiration *RoleEligibilityScheduleRequestPropertiesResponseExpiration `pulumi:"expiration"` + // Start DateTime of the role eligibility schedule. + StartDateTime *string `pulumi:"startDateTime"` +} + +// Schedule info of the role eligibility schedule +type RoleEligibilityScheduleRequestPropertiesResponseScheduleInfoOutput struct{ *pulumi.OutputState } + +func (RoleEligibilityScheduleRequestPropertiesResponseScheduleInfoOutput) ElementType() reflect.Type { + return reflect.TypeOf((*RoleEligibilityScheduleRequestPropertiesResponseScheduleInfo)(nil)).Elem() +} + +func (o RoleEligibilityScheduleRequestPropertiesResponseScheduleInfoOutput) ToRoleEligibilityScheduleRequestPropertiesResponseScheduleInfoOutput() RoleEligibilityScheduleRequestPropertiesResponseScheduleInfoOutput { + return o +} + +func (o RoleEligibilityScheduleRequestPropertiesResponseScheduleInfoOutput) ToRoleEligibilityScheduleRequestPropertiesResponseScheduleInfoOutputWithContext(ctx context.Context) RoleEligibilityScheduleRequestPropertiesResponseScheduleInfoOutput { + return o +} + +// Expiration of the role eligibility schedule +func (o RoleEligibilityScheduleRequestPropertiesResponseScheduleInfoOutput) Expiration() RoleEligibilityScheduleRequestPropertiesResponseExpirationPtrOutput { + return o.ApplyT(func(v RoleEligibilityScheduleRequestPropertiesResponseScheduleInfo) *RoleEligibilityScheduleRequestPropertiesResponseExpiration { + return v.Expiration + }).(RoleEligibilityScheduleRequestPropertiesResponseExpirationPtrOutput) +} + +// Start DateTime of the role eligibility schedule. +func (o RoleEligibilityScheduleRequestPropertiesResponseScheduleInfoOutput) StartDateTime() pulumi.StringPtrOutput { + return o.ApplyT(func(v RoleEligibilityScheduleRequestPropertiesResponseScheduleInfo) *string { return v.StartDateTime }).(pulumi.StringPtrOutput) +} + +type RoleEligibilityScheduleRequestPropertiesResponseScheduleInfoPtrOutput struct{ *pulumi.OutputState } + +func (RoleEligibilityScheduleRequestPropertiesResponseScheduleInfoPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**RoleEligibilityScheduleRequestPropertiesResponseScheduleInfo)(nil)).Elem() +} + +func (o RoleEligibilityScheduleRequestPropertiesResponseScheduleInfoPtrOutput) ToRoleEligibilityScheduleRequestPropertiesResponseScheduleInfoPtrOutput() RoleEligibilityScheduleRequestPropertiesResponseScheduleInfoPtrOutput { + return o +} + +func (o RoleEligibilityScheduleRequestPropertiesResponseScheduleInfoPtrOutput) ToRoleEligibilityScheduleRequestPropertiesResponseScheduleInfoPtrOutputWithContext(ctx context.Context) RoleEligibilityScheduleRequestPropertiesResponseScheduleInfoPtrOutput { + return o +} + +func (o RoleEligibilityScheduleRequestPropertiesResponseScheduleInfoPtrOutput) Elem() RoleEligibilityScheduleRequestPropertiesResponseScheduleInfoOutput { + return o.ApplyT(func(v *RoleEligibilityScheduleRequestPropertiesResponseScheduleInfo) RoleEligibilityScheduleRequestPropertiesResponseScheduleInfo { + if v != nil { + return *v + } + var ret RoleEligibilityScheduleRequestPropertiesResponseScheduleInfo + return ret + }).(RoleEligibilityScheduleRequestPropertiesResponseScheduleInfoOutput) +} + +// Expiration of the role eligibility schedule +func (o RoleEligibilityScheduleRequestPropertiesResponseScheduleInfoPtrOutput) Expiration() RoleEligibilityScheduleRequestPropertiesResponseExpirationPtrOutput { + return o.ApplyT(func(v *RoleEligibilityScheduleRequestPropertiesResponseScheduleInfo) *RoleEligibilityScheduleRequestPropertiesResponseExpiration { + if v == nil { + return nil + } + return v.Expiration + }).(RoleEligibilityScheduleRequestPropertiesResponseExpirationPtrOutput) +} + +// Start DateTime of the role eligibility schedule. +func (o RoleEligibilityScheduleRequestPropertiesResponseScheduleInfoPtrOutput) StartDateTime() pulumi.StringPtrOutput { + return o.ApplyT(func(v *RoleEligibilityScheduleRequestPropertiesResponseScheduleInfo) *string { + if v == nil { + return nil + } + return v.StartDateTime + }).(pulumi.StringPtrOutput) +} + +// Ticket Info of the role eligibility +type RoleEligibilityScheduleRequestPropertiesResponseTicketInfo struct { + // Ticket number for the role eligibility + TicketNumber *string `pulumi:"ticketNumber"` + // Ticket system name for the role eligibility + TicketSystem *string `pulumi:"ticketSystem"` +} + +// Ticket Info of the role eligibility +type RoleEligibilityScheduleRequestPropertiesResponseTicketInfoOutput struct{ *pulumi.OutputState } + +func (RoleEligibilityScheduleRequestPropertiesResponseTicketInfoOutput) ElementType() reflect.Type { + return reflect.TypeOf((*RoleEligibilityScheduleRequestPropertiesResponseTicketInfo)(nil)).Elem() +} + +func (o RoleEligibilityScheduleRequestPropertiesResponseTicketInfoOutput) ToRoleEligibilityScheduleRequestPropertiesResponseTicketInfoOutput() RoleEligibilityScheduleRequestPropertiesResponseTicketInfoOutput { + return o +} + +func (o RoleEligibilityScheduleRequestPropertiesResponseTicketInfoOutput) ToRoleEligibilityScheduleRequestPropertiesResponseTicketInfoOutputWithContext(ctx context.Context) RoleEligibilityScheduleRequestPropertiesResponseTicketInfoOutput { + return o +} + +// Ticket number for the role eligibility +func (o RoleEligibilityScheduleRequestPropertiesResponseTicketInfoOutput) TicketNumber() pulumi.StringPtrOutput { + return o.ApplyT(func(v RoleEligibilityScheduleRequestPropertiesResponseTicketInfo) *string { return v.TicketNumber }).(pulumi.StringPtrOutput) +} + +// Ticket system name for the role eligibility +func (o RoleEligibilityScheduleRequestPropertiesResponseTicketInfoOutput) TicketSystem() pulumi.StringPtrOutput { + return o.ApplyT(func(v RoleEligibilityScheduleRequestPropertiesResponseTicketInfo) *string { return v.TicketSystem }).(pulumi.StringPtrOutput) +} + +type RoleEligibilityScheduleRequestPropertiesResponseTicketInfoPtrOutput struct{ *pulumi.OutputState } + +func (RoleEligibilityScheduleRequestPropertiesResponseTicketInfoPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**RoleEligibilityScheduleRequestPropertiesResponseTicketInfo)(nil)).Elem() +} + +func (o RoleEligibilityScheduleRequestPropertiesResponseTicketInfoPtrOutput) ToRoleEligibilityScheduleRequestPropertiesResponseTicketInfoPtrOutput() RoleEligibilityScheduleRequestPropertiesResponseTicketInfoPtrOutput { + return o +} + +func (o RoleEligibilityScheduleRequestPropertiesResponseTicketInfoPtrOutput) ToRoleEligibilityScheduleRequestPropertiesResponseTicketInfoPtrOutputWithContext(ctx context.Context) RoleEligibilityScheduleRequestPropertiesResponseTicketInfoPtrOutput { + return o +} + +func (o RoleEligibilityScheduleRequestPropertiesResponseTicketInfoPtrOutput) Elem() RoleEligibilityScheduleRequestPropertiesResponseTicketInfoOutput { + return o.ApplyT(func(v *RoleEligibilityScheduleRequestPropertiesResponseTicketInfo) RoleEligibilityScheduleRequestPropertiesResponseTicketInfo { + if v != nil { + return *v + } + var ret RoleEligibilityScheduleRequestPropertiesResponseTicketInfo + return ret + }).(RoleEligibilityScheduleRequestPropertiesResponseTicketInfoOutput) +} + +// Ticket number for the role eligibility +func (o RoleEligibilityScheduleRequestPropertiesResponseTicketInfoPtrOutput) TicketNumber() pulumi.StringPtrOutput { + return o.ApplyT(func(v *RoleEligibilityScheduleRequestPropertiesResponseTicketInfo) *string { + if v == nil { + return nil + } + return v.TicketNumber + }).(pulumi.StringPtrOutput) +} + +// Ticket system name for the role eligibility +func (o RoleEligibilityScheduleRequestPropertiesResponseTicketInfoPtrOutput) TicketSystem() pulumi.StringPtrOutput { + return o.ApplyT(func(v *RoleEligibilityScheduleRequestPropertiesResponseTicketInfo) *string { + if v == nil { + return nil + } + return v.TicketSystem + }).(pulumi.StringPtrOutput) +} + +// Schedule info of the role eligibility schedule +type RoleEligibilityScheduleRequestPropertiesScheduleInfo struct { + // Expiration of the role eligibility schedule + Expiration *RoleEligibilityScheduleRequestPropertiesExpiration `pulumi:"expiration"` + // Start DateTime of the role eligibility schedule. + StartDateTime *string `pulumi:"startDateTime"` +} + +// RoleEligibilityScheduleRequestPropertiesScheduleInfoInput is an input type that accepts RoleEligibilityScheduleRequestPropertiesScheduleInfoArgs and RoleEligibilityScheduleRequestPropertiesScheduleInfoOutput values. +// You can construct a concrete instance of `RoleEligibilityScheduleRequestPropertiesScheduleInfoInput` via: +// +// RoleEligibilityScheduleRequestPropertiesScheduleInfoArgs{...} +type RoleEligibilityScheduleRequestPropertiesScheduleInfoInput interface { + pulumi.Input + + ToRoleEligibilityScheduleRequestPropertiesScheduleInfoOutput() RoleEligibilityScheduleRequestPropertiesScheduleInfoOutput + ToRoleEligibilityScheduleRequestPropertiesScheduleInfoOutputWithContext(context.Context) RoleEligibilityScheduleRequestPropertiesScheduleInfoOutput +} + +// Schedule info of the role eligibility schedule +type RoleEligibilityScheduleRequestPropertiesScheduleInfoArgs struct { + // Expiration of the role eligibility schedule + Expiration RoleEligibilityScheduleRequestPropertiesExpirationPtrInput `pulumi:"expiration"` + // Start DateTime of the role eligibility schedule. + StartDateTime pulumi.StringPtrInput `pulumi:"startDateTime"` +} + +func (RoleEligibilityScheduleRequestPropertiesScheduleInfoArgs) ElementType() reflect.Type { + return reflect.TypeOf((*RoleEligibilityScheduleRequestPropertiesScheduleInfo)(nil)).Elem() +} + +func (i RoleEligibilityScheduleRequestPropertiesScheduleInfoArgs) ToRoleEligibilityScheduleRequestPropertiesScheduleInfoOutput() RoleEligibilityScheduleRequestPropertiesScheduleInfoOutput { + return i.ToRoleEligibilityScheduleRequestPropertiesScheduleInfoOutputWithContext(context.Background()) +} + +func (i RoleEligibilityScheduleRequestPropertiesScheduleInfoArgs) ToRoleEligibilityScheduleRequestPropertiesScheduleInfoOutputWithContext(ctx context.Context) RoleEligibilityScheduleRequestPropertiesScheduleInfoOutput { + return pulumi.ToOutputWithContext(ctx, i).(RoleEligibilityScheduleRequestPropertiesScheduleInfoOutput) +} + +func (i RoleEligibilityScheduleRequestPropertiesScheduleInfoArgs) ToRoleEligibilityScheduleRequestPropertiesScheduleInfoPtrOutput() RoleEligibilityScheduleRequestPropertiesScheduleInfoPtrOutput { + return i.ToRoleEligibilityScheduleRequestPropertiesScheduleInfoPtrOutputWithContext(context.Background()) +} + +func (i RoleEligibilityScheduleRequestPropertiesScheduleInfoArgs) ToRoleEligibilityScheduleRequestPropertiesScheduleInfoPtrOutputWithContext(ctx context.Context) RoleEligibilityScheduleRequestPropertiesScheduleInfoPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(RoleEligibilityScheduleRequestPropertiesScheduleInfoOutput).ToRoleEligibilityScheduleRequestPropertiesScheduleInfoPtrOutputWithContext(ctx) +} + +// RoleEligibilityScheduleRequestPropertiesScheduleInfoPtrInput is an input type that accepts RoleEligibilityScheduleRequestPropertiesScheduleInfoArgs, RoleEligibilityScheduleRequestPropertiesScheduleInfoPtr and RoleEligibilityScheduleRequestPropertiesScheduleInfoPtrOutput values. +// You can construct a concrete instance of `RoleEligibilityScheduleRequestPropertiesScheduleInfoPtrInput` via: +// +// RoleEligibilityScheduleRequestPropertiesScheduleInfoArgs{...} +// +// or: +// +// nil +type RoleEligibilityScheduleRequestPropertiesScheduleInfoPtrInput interface { + pulumi.Input + + ToRoleEligibilityScheduleRequestPropertiesScheduleInfoPtrOutput() RoleEligibilityScheduleRequestPropertiesScheduleInfoPtrOutput + ToRoleEligibilityScheduleRequestPropertiesScheduleInfoPtrOutputWithContext(context.Context) RoleEligibilityScheduleRequestPropertiesScheduleInfoPtrOutput +} + +type roleEligibilityScheduleRequestPropertiesScheduleInfoPtrType RoleEligibilityScheduleRequestPropertiesScheduleInfoArgs + +func RoleEligibilityScheduleRequestPropertiesScheduleInfoPtr(v *RoleEligibilityScheduleRequestPropertiesScheduleInfoArgs) RoleEligibilityScheduleRequestPropertiesScheduleInfoPtrInput { + return (*roleEligibilityScheduleRequestPropertiesScheduleInfoPtrType)(v) +} + +func (*roleEligibilityScheduleRequestPropertiesScheduleInfoPtrType) ElementType() reflect.Type { + return reflect.TypeOf((**RoleEligibilityScheduleRequestPropertiesScheduleInfo)(nil)).Elem() +} + +func (i *roleEligibilityScheduleRequestPropertiesScheduleInfoPtrType) ToRoleEligibilityScheduleRequestPropertiesScheduleInfoPtrOutput() RoleEligibilityScheduleRequestPropertiesScheduleInfoPtrOutput { + return i.ToRoleEligibilityScheduleRequestPropertiesScheduleInfoPtrOutputWithContext(context.Background()) +} + +func (i *roleEligibilityScheduleRequestPropertiesScheduleInfoPtrType) ToRoleEligibilityScheduleRequestPropertiesScheduleInfoPtrOutputWithContext(ctx context.Context) RoleEligibilityScheduleRequestPropertiesScheduleInfoPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(RoleEligibilityScheduleRequestPropertiesScheduleInfoPtrOutput) +} + +// Schedule info of the role eligibility schedule +type RoleEligibilityScheduleRequestPropertiesScheduleInfoOutput struct{ *pulumi.OutputState } + +func (RoleEligibilityScheduleRequestPropertiesScheduleInfoOutput) ElementType() reflect.Type { + return reflect.TypeOf((*RoleEligibilityScheduleRequestPropertiesScheduleInfo)(nil)).Elem() +} + +func (o RoleEligibilityScheduleRequestPropertiesScheduleInfoOutput) ToRoleEligibilityScheduleRequestPropertiesScheduleInfoOutput() RoleEligibilityScheduleRequestPropertiesScheduleInfoOutput { + return o +} + +func (o RoleEligibilityScheduleRequestPropertiesScheduleInfoOutput) ToRoleEligibilityScheduleRequestPropertiesScheduleInfoOutputWithContext(ctx context.Context) RoleEligibilityScheduleRequestPropertiesScheduleInfoOutput { + return o +} + +func (o RoleEligibilityScheduleRequestPropertiesScheduleInfoOutput) ToRoleEligibilityScheduleRequestPropertiesScheduleInfoPtrOutput() RoleEligibilityScheduleRequestPropertiesScheduleInfoPtrOutput { + return o.ToRoleEligibilityScheduleRequestPropertiesScheduleInfoPtrOutputWithContext(context.Background()) +} + +func (o RoleEligibilityScheduleRequestPropertiesScheduleInfoOutput) ToRoleEligibilityScheduleRequestPropertiesScheduleInfoPtrOutputWithContext(ctx context.Context) RoleEligibilityScheduleRequestPropertiesScheduleInfoPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v RoleEligibilityScheduleRequestPropertiesScheduleInfo) *RoleEligibilityScheduleRequestPropertiesScheduleInfo { + return &v + }).(RoleEligibilityScheduleRequestPropertiesScheduleInfoPtrOutput) +} + +// Expiration of the role eligibility schedule +func (o RoleEligibilityScheduleRequestPropertiesScheduleInfoOutput) Expiration() RoleEligibilityScheduleRequestPropertiesExpirationPtrOutput { + return o.ApplyT(func(v RoleEligibilityScheduleRequestPropertiesScheduleInfo) *RoleEligibilityScheduleRequestPropertiesExpiration { + return v.Expiration + }).(RoleEligibilityScheduleRequestPropertiesExpirationPtrOutput) +} + +// Start DateTime of the role eligibility schedule. +func (o RoleEligibilityScheduleRequestPropertiesScheduleInfoOutput) StartDateTime() pulumi.StringPtrOutput { + return o.ApplyT(func(v RoleEligibilityScheduleRequestPropertiesScheduleInfo) *string { return v.StartDateTime }).(pulumi.StringPtrOutput) +} + +type RoleEligibilityScheduleRequestPropertiesScheduleInfoPtrOutput struct{ *pulumi.OutputState } + +func (RoleEligibilityScheduleRequestPropertiesScheduleInfoPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**RoleEligibilityScheduleRequestPropertiesScheduleInfo)(nil)).Elem() +} + +func (o RoleEligibilityScheduleRequestPropertiesScheduleInfoPtrOutput) ToRoleEligibilityScheduleRequestPropertiesScheduleInfoPtrOutput() RoleEligibilityScheduleRequestPropertiesScheduleInfoPtrOutput { + return o +} + +func (o RoleEligibilityScheduleRequestPropertiesScheduleInfoPtrOutput) ToRoleEligibilityScheduleRequestPropertiesScheduleInfoPtrOutputWithContext(ctx context.Context) RoleEligibilityScheduleRequestPropertiesScheduleInfoPtrOutput { + return o +} + +func (o RoleEligibilityScheduleRequestPropertiesScheduleInfoPtrOutput) Elem() RoleEligibilityScheduleRequestPropertiesScheduleInfoOutput { + return o.ApplyT(func(v *RoleEligibilityScheduleRequestPropertiesScheduleInfo) RoleEligibilityScheduleRequestPropertiesScheduleInfo { + if v != nil { + return *v + } + var ret RoleEligibilityScheduleRequestPropertiesScheduleInfo + return ret + }).(RoleEligibilityScheduleRequestPropertiesScheduleInfoOutput) +} + +// Expiration of the role eligibility schedule +func (o RoleEligibilityScheduleRequestPropertiesScheduleInfoPtrOutput) Expiration() RoleEligibilityScheduleRequestPropertiesExpirationPtrOutput { + return o.ApplyT(func(v *RoleEligibilityScheduleRequestPropertiesScheduleInfo) *RoleEligibilityScheduleRequestPropertiesExpiration { + if v == nil { + return nil + } + return v.Expiration + }).(RoleEligibilityScheduleRequestPropertiesExpirationPtrOutput) +} + +// Start DateTime of the role eligibility schedule. +func (o RoleEligibilityScheduleRequestPropertiesScheduleInfoPtrOutput) StartDateTime() pulumi.StringPtrOutput { + return o.ApplyT(func(v *RoleEligibilityScheduleRequestPropertiesScheduleInfo) *string { + if v == nil { + return nil + } + return v.StartDateTime + }).(pulumi.StringPtrOutput) +} + +// Ticket Info of the role eligibility +type RoleEligibilityScheduleRequestPropertiesTicketInfo struct { + // Ticket number for the role eligibility + TicketNumber *string `pulumi:"ticketNumber"` + // Ticket system name for the role eligibility + TicketSystem *string `pulumi:"ticketSystem"` +} + +// RoleEligibilityScheduleRequestPropertiesTicketInfoInput is an input type that accepts RoleEligibilityScheduleRequestPropertiesTicketInfoArgs and RoleEligibilityScheduleRequestPropertiesTicketInfoOutput values. +// You can construct a concrete instance of `RoleEligibilityScheduleRequestPropertiesTicketInfoInput` via: +// +// RoleEligibilityScheduleRequestPropertiesTicketInfoArgs{...} +type RoleEligibilityScheduleRequestPropertiesTicketInfoInput interface { + pulumi.Input + + ToRoleEligibilityScheduleRequestPropertiesTicketInfoOutput() RoleEligibilityScheduleRequestPropertiesTicketInfoOutput + ToRoleEligibilityScheduleRequestPropertiesTicketInfoOutputWithContext(context.Context) RoleEligibilityScheduleRequestPropertiesTicketInfoOutput +} + +// Ticket Info of the role eligibility +type RoleEligibilityScheduleRequestPropertiesTicketInfoArgs struct { + // Ticket number for the role eligibility + TicketNumber pulumi.StringPtrInput `pulumi:"ticketNumber"` + // Ticket system name for the role eligibility + TicketSystem pulumi.StringPtrInput `pulumi:"ticketSystem"` +} + +func (RoleEligibilityScheduleRequestPropertiesTicketInfoArgs) ElementType() reflect.Type { + return reflect.TypeOf((*RoleEligibilityScheduleRequestPropertiesTicketInfo)(nil)).Elem() +} + +func (i RoleEligibilityScheduleRequestPropertiesTicketInfoArgs) ToRoleEligibilityScheduleRequestPropertiesTicketInfoOutput() RoleEligibilityScheduleRequestPropertiesTicketInfoOutput { + return i.ToRoleEligibilityScheduleRequestPropertiesTicketInfoOutputWithContext(context.Background()) +} + +func (i RoleEligibilityScheduleRequestPropertiesTicketInfoArgs) ToRoleEligibilityScheduleRequestPropertiesTicketInfoOutputWithContext(ctx context.Context) RoleEligibilityScheduleRequestPropertiesTicketInfoOutput { + return pulumi.ToOutputWithContext(ctx, i).(RoleEligibilityScheduleRequestPropertiesTicketInfoOutput) +} + +func (i RoleEligibilityScheduleRequestPropertiesTicketInfoArgs) ToRoleEligibilityScheduleRequestPropertiesTicketInfoPtrOutput() RoleEligibilityScheduleRequestPropertiesTicketInfoPtrOutput { + return i.ToRoleEligibilityScheduleRequestPropertiesTicketInfoPtrOutputWithContext(context.Background()) +} + +func (i RoleEligibilityScheduleRequestPropertiesTicketInfoArgs) ToRoleEligibilityScheduleRequestPropertiesTicketInfoPtrOutputWithContext(ctx context.Context) RoleEligibilityScheduleRequestPropertiesTicketInfoPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(RoleEligibilityScheduleRequestPropertiesTicketInfoOutput).ToRoleEligibilityScheduleRequestPropertiesTicketInfoPtrOutputWithContext(ctx) +} + +// RoleEligibilityScheduleRequestPropertiesTicketInfoPtrInput is an input type that accepts RoleEligibilityScheduleRequestPropertiesTicketInfoArgs, RoleEligibilityScheduleRequestPropertiesTicketInfoPtr and RoleEligibilityScheduleRequestPropertiesTicketInfoPtrOutput values. +// You can construct a concrete instance of `RoleEligibilityScheduleRequestPropertiesTicketInfoPtrInput` via: +// +// RoleEligibilityScheduleRequestPropertiesTicketInfoArgs{...} +// +// or: +// +// nil +type RoleEligibilityScheduleRequestPropertiesTicketInfoPtrInput interface { + pulumi.Input + + ToRoleEligibilityScheduleRequestPropertiesTicketInfoPtrOutput() RoleEligibilityScheduleRequestPropertiesTicketInfoPtrOutput + ToRoleEligibilityScheduleRequestPropertiesTicketInfoPtrOutputWithContext(context.Context) RoleEligibilityScheduleRequestPropertiesTicketInfoPtrOutput +} + +type roleEligibilityScheduleRequestPropertiesTicketInfoPtrType RoleEligibilityScheduleRequestPropertiesTicketInfoArgs + +func RoleEligibilityScheduleRequestPropertiesTicketInfoPtr(v *RoleEligibilityScheduleRequestPropertiesTicketInfoArgs) RoleEligibilityScheduleRequestPropertiesTicketInfoPtrInput { + return (*roleEligibilityScheduleRequestPropertiesTicketInfoPtrType)(v) +} + +func (*roleEligibilityScheduleRequestPropertiesTicketInfoPtrType) ElementType() reflect.Type { + return reflect.TypeOf((**RoleEligibilityScheduleRequestPropertiesTicketInfo)(nil)).Elem() +} + +func (i *roleEligibilityScheduleRequestPropertiesTicketInfoPtrType) ToRoleEligibilityScheduleRequestPropertiesTicketInfoPtrOutput() RoleEligibilityScheduleRequestPropertiesTicketInfoPtrOutput { + return i.ToRoleEligibilityScheduleRequestPropertiesTicketInfoPtrOutputWithContext(context.Background()) +} + +func (i *roleEligibilityScheduleRequestPropertiesTicketInfoPtrType) ToRoleEligibilityScheduleRequestPropertiesTicketInfoPtrOutputWithContext(ctx context.Context) RoleEligibilityScheduleRequestPropertiesTicketInfoPtrOutput { + return pulumi.ToOutputWithContext(ctx, i).(RoleEligibilityScheduleRequestPropertiesTicketInfoPtrOutput) +} + +// Ticket Info of the role eligibility +type RoleEligibilityScheduleRequestPropertiesTicketInfoOutput struct{ *pulumi.OutputState } + +func (RoleEligibilityScheduleRequestPropertiesTicketInfoOutput) ElementType() reflect.Type { + return reflect.TypeOf((*RoleEligibilityScheduleRequestPropertiesTicketInfo)(nil)).Elem() +} + +func (o RoleEligibilityScheduleRequestPropertiesTicketInfoOutput) ToRoleEligibilityScheduleRequestPropertiesTicketInfoOutput() RoleEligibilityScheduleRequestPropertiesTicketInfoOutput { + return o +} + +func (o RoleEligibilityScheduleRequestPropertiesTicketInfoOutput) ToRoleEligibilityScheduleRequestPropertiesTicketInfoOutputWithContext(ctx context.Context) RoleEligibilityScheduleRequestPropertiesTicketInfoOutput { + return o +} + +func (o RoleEligibilityScheduleRequestPropertiesTicketInfoOutput) ToRoleEligibilityScheduleRequestPropertiesTicketInfoPtrOutput() RoleEligibilityScheduleRequestPropertiesTicketInfoPtrOutput { + return o.ToRoleEligibilityScheduleRequestPropertiesTicketInfoPtrOutputWithContext(context.Background()) +} + +func (o RoleEligibilityScheduleRequestPropertiesTicketInfoOutput) ToRoleEligibilityScheduleRequestPropertiesTicketInfoPtrOutputWithContext(ctx context.Context) RoleEligibilityScheduleRequestPropertiesTicketInfoPtrOutput { + return o.ApplyTWithContext(ctx, func(_ context.Context, v RoleEligibilityScheduleRequestPropertiesTicketInfo) *RoleEligibilityScheduleRequestPropertiesTicketInfo { + return &v + }).(RoleEligibilityScheduleRequestPropertiesTicketInfoPtrOutput) +} + +// Ticket number for the role eligibility +func (o RoleEligibilityScheduleRequestPropertiesTicketInfoOutput) TicketNumber() pulumi.StringPtrOutput { + return o.ApplyT(func(v RoleEligibilityScheduleRequestPropertiesTicketInfo) *string { return v.TicketNumber }).(pulumi.StringPtrOutput) +} + +// Ticket system name for the role eligibility +func (o RoleEligibilityScheduleRequestPropertiesTicketInfoOutput) TicketSystem() pulumi.StringPtrOutput { + return o.ApplyT(func(v RoleEligibilityScheduleRequestPropertiesTicketInfo) *string { return v.TicketSystem }).(pulumi.StringPtrOutput) +} + +type RoleEligibilityScheduleRequestPropertiesTicketInfoPtrOutput struct{ *pulumi.OutputState } + +func (RoleEligibilityScheduleRequestPropertiesTicketInfoPtrOutput) ElementType() reflect.Type { + return reflect.TypeOf((**RoleEligibilityScheduleRequestPropertiesTicketInfo)(nil)).Elem() +} + +func (o RoleEligibilityScheduleRequestPropertiesTicketInfoPtrOutput) ToRoleEligibilityScheduleRequestPropertiesTicketInfoPtrOutput() RoleEligibilityScheduleRequestPropertiesTicketInfoPtrOutput { + return o +} + +func (o RoleEligibilityScheduleRequestPropertiesTicketInfoPtrOutput) ToRoleEligibilityScheduleRequestPropertiesTicketInfoPtrOutputWithContext(ctx context.Context) RoleEligibilityScheduleRequestPropertiesTicketInfoPtrOutput { + return o +} + +func (o RoleEligibilityScheduleRequestPropertiesTicketInfoPtrOutput) Elem() RoleEligibilityScheduleRequestPropertiesTicketInfoOutput { + return o.ApplyT(func(v *RoleEligibilityScheduleRequestPropertiesTicketInfo) RoleEligibilityScheduleRequestPropertiesTicketInfo { + if v != nil { + return *v + } + var ret RoleEligibilityScheduleRequestPropertiesTicketInfo + return ret + }).(RoleEligibilityScheduleRequestPropertiesTicketInfoOutput) +} + +// Ticket number for the role eligibility +func (o RoleEligibilityScheduleRequestPropertiesTicketInfoPtrOutput) TicketNumber() pulumi.StringPtrOutput { + return o.ApplyT(func(v *RoleEligibilityScheduleRequestPropertiesTicketInfo) *string { + if v == nil { + return nil + } + return v.TicketNumber + }).(pulumi.StringPtrOutput) +} + +// Ticket system name for the role eligibility +func (o RoleEligibilityScheduleRequestPropertiesTicketInfoPtrOutput) TicketSystem() pulumi.StringPtrOutput { + return o.ApplyT(func(v *RoleEligibilityScheduleRequestPropertiesTicketInfo) *string { + if v == nil { + return nil + } + return v.TicketSystem + }).(pulumi.StringPtrOutput) +} + // The role management policy approval rule. type RoleManagementPolicyApprovalRule struct { // The id of the rule. @@ -7276,6 +8363,13 @@ func init() { pulumi.RegisterOutputType(ApprovalStageArrayOutput{}) pulumi.RegisterOutputType(ApprovalStageResponseOutput{}) pulumi.RegisterOutputType(ApprovalStageResponseArrayOutput{}) + pulumi.RegisterOutputType(ExpandedPropertiesResponseOutput{}) + pulumi.RegisterOutputType(ExpandedPropertiesResponsePrincipalOutput{}) + pulumi.RegisterOutputType(ExpandedPropertiesResponsePrincipalPtrOutput{}) + pulumi.RegisterOutputType(ExpandedPropertiesResponseRoleDefinitionOutput{}) + pulumi.RegisterOutputType(ExpandedPropertiesResponseRoleDefinitionPtrOutput{}) + pulumi.RegisterOutputType(ExpandedPropertiesResponseScopeOutput{}) + pulumi.RegisterOutputType(ExpandedPropertiesResponseScopePtrOutput{}) pulumi.RegisterOutputType(IdentityOutput{}) pulumi.RegisterOutputType(IdentityPtrOutput{}) pulumi.RegisterOutputType(IdentityResponseOutput{}) @@ -7353,6 +8447,18 @@ func init() { pulumi.RegisterOutputType(ResourceSelectorArrayOutput{}) pulumi.RegisterOutputType(ResourceSelectorResponseOutput{}) pulumi.RegisterOutputType(ResourceSelectorResponseArrayOutput{}) + pulumi.RegisterOutputType(RoleEligibilityScheduleRequestPropertiesExpirationOutput{}) + pulumi.RegisterOutputType(RoleEligibilityScheduleRequestPropertiesExpirationPtrOutput{}) + pulumi.RegisterOutputType(RoleEligibilityScheduleRequestPropertiesResponseExpirationOutput{}) + pulumi.RegisterOutputType(RoleEligibilityScheduleRequestPropertiesResponseExpirationPtrOutput{}) + pulumi.RegisterOutputType(RoleEligibilityScheduleRequestPropertiesResponseScheduleInfoOutput{}) + pulumi.RegisterOutputType(RoleEligibilityScheduleRequestPropertiesResponseScheduleInfoPtrOutput{}) + pulumi.RegisterOutputType(RoleEligibilityScheduleRequestPropertiesResponseTicketInfoOutput{}) + pulumi.RegisterOutputType(RoleEligibilityScheduleRequestPropertiesResponseTicketInfoPtrOutput{}) + pulumi.RegisterOutputType(RoleEligibilityScheduleRequestPropertiesScheduleInfoOutput{}) + pulumi.RegisterOutputType(RoleEligibilityScheduleRequestPropertiesScheduleInfoPtrOutput{}) + pulumi.RegisterOutputType(RoleEligibilityScheduleRequestPropertiesTicketInfoOutput{}) + pulumi.RegisterOutputType(RoleEligibilityScheduleRequestPropertiesTicketInfoPtrOutput{}) pulumi.RegisterOutputType(RoleManagementPolicyApprovalRuleOutput{}) pulumi.RegisterOutputType(RoleManagementPolicyApprovalRuleResponseOutput{}) pulumi.RegisterOutputType(RoleManagementPolicyAuthenticationContextRuleOutput{}) diff --git a/vendor/github.com/pulumi/pulumi-azure-native-sdk/compute/v2/pulumi-plugin.json b/vendor/github.com/pulumi/pulumi-azure-native-sdk/compute/v2/pulumi-plugin.json index c96467358..c4a5a451b 100644 --- a/vendor/github.com/pulumi/pulumi-azure-native-sdk/compute/v2/pulumi-plugin.json +++ b/vendor/github.com/pulumi/pulumi-azure-native-sdk/compute/v2/pulumi-plugin.json @@ -1,5 +1,5 @@ { "resource": true, "name": "azure-native", - "version": "2.88.0" + "version": "2.89.3" } diff --git a/vendor/github.com/pulumi/pulumi-azure-native-sdk/managedidentity/v2/federatedIdentityCredential.go b/vendor/github.com/pulumi/pulumi-azure-native-sdk/managedidentity/v2/federatedIdentityCredential.go index 3ad682883..674bb67e1 100644 --- a/vendor/github.com/pulumi/pulumi-azure-native-sdk/managedidentity/v2/federatedIdentityCredential.go +++ b/vendor/github.com/pulumi/pulumi-azure-native-sdk/managedidentity/v2/federatedIdentityCredential.go @@ -15,7 +15,7 @@ import ( // Describes a federated identity credential. // Azure REST API version: 2023-01-31. Prior API version in Azure Native 1.x: 2022-01-31-preview. // -// Other available API versions: 2023-07-31-preview. +// Other available API versions: 2023-07-31-preview, 2024-11-30. type FederatedIdentityCredential struct { pulumi.CustomResourceState @@ -65,6 +65,9 @@ func NewFederatedIdentityCredential(ctx *pulumi.Context, { Type: pulumi.String("azure-native:managedidentity/v20230731preview:FederatedIdentityCredential"), }, + { + Type: pulumi.String("azure-native:managedidentity/v20241130:FederatedIdentityCredential"), + }, }) opts = append(opts, aliases) opts = utilities.PkgResourceDefaultOpts(opts) diff --git a/vendor/github.com/pulumi/pulumi-azure-native-sdk/managedidentity/v2/getFederatedIdentityCredential.go b/vendor/github.com/pulumi/pulumi-azure-native-sdk/managedidentity/v2/getFederatedIdentityCredential.go index dd4c9c3c3..928544ef2 100644 --- a/vendor/github.com/pulumi/pulumi-azure-native-sdk/managedidentity/v2/getFederatedIdentityCredential.go +++ b/vendor/github.com/pulumi/pulumi-azure-native-sdk/managedidentity/v2/getFederatedIdentityCredential.go @@ -14,7 +14,7 @@ import ( // Gets the federated identity credential. // Azure REST API version: 2023-01-31. // -// Other available API versions: 2023-07-31-preview. +// Other available API versions: 2023-07-31-preview, 2024-11-30. func LookupFederatedIdentityCredential(ctx *pulumi.Context, args *LookupFederatedIdentityCredentialArgs, opts ...pulumi.InvokeOption) (*LookupFederatedIdentityCredentialResult, error) { opts = utilities.PkgInvokeDefaultOpts(opts) var rv LookupFederatedIdentityCredentialResult @@ -53,15 +53,11 @@ type LookupFederatedIdentityCredentialResult struct { } func LookupFederatedIdentityCredentialOutput(ctx *pulumi.Context, args LookupFederatedIdentityCredentialOutputArgs, opts ...pulumi.InvokeOption) LookupFederatedIdentityCredentialResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). - ApplyT(func(v interface{}) (LookupFederatedIdentityCredentialResult, error) { + return pulumi.ToOutputWithContext(ctx.Context(), args). + ApplyT(func(v interface{}) (LookupFederatedIdentityCredentialResultOutput, error) { args := v.(LookupFederatedIdentityCredentialArgs) - r, err := LookupFederatedIdentityCredential(ctx, &args, opts...) - var s LookupFederatedIdentityCredentialResult - if r != nil { - s = *r - } - return s, err + options := pulumi.InvokeOutputOptions{InvokeOptions: utilities.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("azure-native:managedidentity:getFederatedIdentityCredential", args, LookupFederatedIdentityCredentialResultOutput{}, options).(LookupFederatedIdentityCredentialResultOutput), nil }).(LookupFederatedIdentityCredentialResultOutput) } diff --git a/vendor/github.com/pulumi/pulumi-azure-native-sdk/managedidentity/v2/getUserAssignedIdentity.go b/vendor/github.com/pulumi/pulumi-azure-native-sdk/managedidentity/v2/getUserAssignedIdentity.go index 2d2358291..7b05aa040 100644 --- a/vendor/github.com/pulumi/pulumi-azure-native-sdk/managedidentity/v2/getUserAssignedIdentity.go +++ b/vendor/github.com/pulumi/pulumi-azure-native-sdk/managedidentity/v2/getUserAssignedIdentity.go @@ -14,7 +14,7 @@ import ( // Gets the identity. // Azure REST API version: 2023-01-31. // -// Other available API versions: 2015-08-31-preview, 2023-07-31-preview. +// Other available API versions: 2023-07-31-preview, 2024-11-30. func LookupUserAssignedIdentity(ctx *pulumi.Context, args *LookupUserAssignedIdentityArgs, opts ...pulumi.InvokeOption) (*LookupUserAssignedIdentityResult, error) { opts = utilities.PkgInvokeDefaultOpts(opts) var rv LookupUserAssignedIdentityResult @@ -55,15 +55,11 @@ type LookupUserAssignedIdentityResult struct { } func LookupUserAssignedIdentityOutput(ctx *pulumi.Context, args LookupUserAssignedIdentityOutputArgs, opts ...pulumi.InvokeOption) LookupUserAssignedIdentityResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). - ApplyT(func(v interface{}) (LookupUserAssignedIdentityResult, error) { + return pulumi.ToOutputWithContext(ctx.Context(), args). + ApplyT(func(v interface{}) (LookupUserAssignedIdentityResultOutput, error) { args := v.(LookupUserAssignedIdentityArgs) - r, err := LookupUserAssignedIdentity(ctx, &args, opts...) - var s LookupUserAssignedIdentityResult - if r != nil { - s = *r - } - return s, err + options := pulumi.InvokeOutputOptions{InvokeOptions: utilities.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("azure-native:managedidentity:getUserAssignedIdentity", args, LookupUserAssignedIdentityResultOutput{}, options).(LookupUserAssignedIdentityResultOutput), nil }).(LookupUserAssignedIdentityResultOutput) } diff --git a/vendor/github.com/pulumi/pulumi-azure-native-sdk/managedidentity/v2/listUserAssignedIdentityAssociatedResources.go b/vendor/github.com/pulumi/pulumi-azure-native-sdk/managedidentity/v2/listUserAssignedIdentityAssociatedResources.go index cdfde9fc7..4ed4f604c 100644 --- a/vendor/github.com/pulumi/pulumi-azure-native-sdk/managedidentity/v2/listUserAssignedIdentityAssociatedResources.go +++ b/vendor/github.com/pulumi/pulumi-azure-native-sdk/managedidentity/v2/listUserAssignedIdentityAssociatedResources.go @@ -51,15 +51,11 @@ type ListUserAssignedIdentityAssociatedResourcesResult struct { } func ListUserAssignedIdentityAssociatedResourcesOutput(ctx *pulumi.Context, args ListUserAssignedIdentityAssociatedResourcesOutputArgs, opts ...pulumi.InvokeOption) ListUserAssignedIdentityAssociatedResourcesResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). - ApplyT(func(v interface{}) (ListUserAssignedIdentityAssociatedResourcesResult, error) { + return pulumi.ToOutputWithContext(ctx.Context(), args). + ApplyT(func(v interface{}) (ListUserAssignedIdentityAssociatedResourcesResultOutput, error) { args := v.(ListUserAssignedIdentityAssociatedResourcesArgs) - r, err := ListUserAssignedIdentityAssociatedResources(ctx, &args, opts...) - var s ListUserAssignedIdentityAssociatedResourcesResult - if r != nil { - s = *r - } - return s, err + options := pulumi.InvokeOutputOptions{InvokeOptions: utilities.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("azure-native:managedidentity:listUserAssignedIdentityAssociatedResources", args, ListUserAssignedIdentityAssociatedResourcesResultOutput{}, options).(ListUserAssignedIdentityAssociatedResourcesResultOutput), nil }).(ListUserAssignedIdentityAssociatedResourcesResultOutput) } diff --git a/vendor/github.com/pulumi/pulumi-azure-native-sdk/managedidentity/v2/pulumi-plugin.json b/vendor/github.com/pulumi/pulumi-azure-native-sdk/managedidentity/v2/pulumi-plugin.json index 5e48be47c..c4a5a451b 100644 --- a/vendor/github.com/pulumi/pulumi-azure-native-sdk/managedidentity/v2/pulumi-plugin.json +++ b/vendor/github.com/pulumi/pulumi-azure-native-sdk/managedidentity/v2/pulumi-plugin.json @@ -1,5 +1,5 @@ { "resource": true, "name": "azure-native", - "version": "2.53.0" + "version": "2.89.3" } diff --git a/vendor/github.com/pulumi/pulumi-azure-native-sdk/managedidentity/v2/userAssignedIdentity.go b/vendor/github.com/pulumi/pulumi-azure-native-sdk/managedidentity/v2/userAssignedIdentity.go index 0f11e4abe..15b0ae5f8 100644 --- a/vendor/github.com/pulumi/pulumi-azure-native-sdk/managedidentity/v2/userAssignedIdentity.go +++ b/vendor/github.com/pulumi/pulumi-azure-native-sdk/managedidentity/v2/userAssignedIdentity.go @@ -15,7 +15,7 @@ import ( // Describes an identity resource. // Azure REST API version: 2023-01-31. Prior API version in Azure Native 1.x: 2018-11-30. // -// Other available API versions: 2015-08-31-preview, 2023-07-31-preview. +// Other available API versions: 2023-07-31-preview, 2024-11-30. type UserAssignedIdentity struct { pulumi.CustomResourceState @@ -66,6 +66,9 @@ func NewUserAssignedIdentity(ctx *pulumi.Context, { Type: pulumi.String("azure-native:managedidentity/v20230731preview:UserAssignedIdentity"), }, + { + Type: pulumi.String("azure-native:managedidentity/v20241130:UserAssignedIdentity"), + }, }) opts = append(opts, aliases) opts = utilities.PkgResourceDefaultOpts(opts) diff --git a/vendor/github.com/pulumi/pulumi-azure-native-sdk/network/v2/pulumi-plugin.json b/vendor/github.com/pulumi/pulumi-azure-native-sdk/network/v2/pulumi-plugin.json index c96467358..c4a5a451b 100644 --- a/vendor/github.com/pulumi/pulumi-azure-native-sdk/network/v2/pulumi-plugin.json +++ b/vendor/github.com/pulumi/pulumi-azure-native-sdk/network/v2/pulumi-plugin.json @@ -1,5 +1,5 @@ { "resource": true, "name": "azure-native", - "version": "2.88.0" + "version": "2.89.3" } diff --git a/vendor/github.com/pulumi/pulumi-azure-native-sdk/resources/v2/getResource.go b/vendor/github.com/pulumi/pulumi-azure-native-sdk/resources/v2/getResource.go index 3c4ffcc5b..9ce8e9136 100644 --- a/vendor/github.com/pulumi/pulumi-azure-native-sdk/resources/v2/getResource.go +++ b/vendor/github.com/pulumi/pulumi-azure-native-sdk/resources/v2/getResource.go @@ -26,6 +26,8 @@ func LookupResource(ctx *pulumi.Context, args *LookupResourceArgs, opts ...pulum } type LookupResourceArgs struct { + // The API version to use for the operation. + ApiVersion string `pulumi:"apiVersion"` // The parent resource identity. ParentResourcePath string `pulumi:"parentResourcePath"` // The name of the resource group containing the resource to get. The name is case insensitive. @@ -76,6 +78,8 @@ func LookupResourceOutput(ctx *pulumi.Context, args LookupResourceOutputArgs, op } type LookupResourceOutputArgs struct { + // The API version to use for the operation. + ApiVersion pulumi.StringInput `pulumi:"apiVersion"` // The parent resource identity. ParentResourcePath pulumi.StringInput `pulumi:"parentResourcePath"` // The name of the resource group containing the resource to get. The name is case insensitive. diff --git a/vendor/github.com/pulumi/pulumi-azure-native-sdk/resources/v2/pulumi-plugin.json b/vendor/github.com/pulumi/pulumi-azure-native-sdk/resources/v2/pulumi-plugin.json index c96467358..c4a5a451b 100644 --- a/vendor/github.com/pulumi/pulumi-azure-native-sdk/resources/v2/pulumi-plugin.json +++ b/vendor/github.com/pulumi/pulumi-azure-native-sdk/resources/v2/pulumi-plugin.json @@ -1,5 +1,5 @@ { "resource": true, "name": "azure-native", - "version": "2.88.0" + "version": "2.89.3" } diff --git a/vendor/github.com/pulumi/pulumi-azure-native-sdk/resources/v2/resource.go b/vendor/github.com/pulumi/pulumi-azure-native-sdk/resources/v2/resource.go index 77460735a..0927bc1b6 100644 --- a/vendor/github.com/pulumi/pulumi-azure-native-sdk/resources/v2/resource.go +++ b/vendor/github.com/pulumi/pulumi-azure-native-sdk/resources/v2/resource.go @@ -50,6 +50,9 @@ func NewResource(ctx *pulumi.Context, return nil, errors.New("missing one or more required arguments") } + if args.ApiVersion == nil { + return nil, errors.New("invalid value for required argument 'ApiVersion'") + } if args.ParentResourcePath == nil { return nil, errors.New("invalid value for required argument 'ParentResourcePath'") } @@ -167,6 +170,8 @@ func (ResourceState) ElementType() reflect.Type { } type resourceArgs struct { + // The API version to use for the operation. + ApiVersion string `pulumi:"apiVersion"` // Resource extended location. ExtendedLocation *ExtendedLocation `pulumi:"extendedLocation"` // The identity of the resource. @@ -199,6 +204,8 @@ type resourceArgs struct { // The set of arguments for constructing a Resource resource. type ResourceArgs struct { + // The API version to use for the operation. + ApiVersion pulumi.StringInput // Resource extended location. ExtendedLocation ExtendedLocationPtrInput // The identity of the resource. diff --git a/vendor/github.com/pulumi/pulumi-azure-native-sdk/storage/v2/pulumi-plugin.json b/vendor/github.com/pulumi/pulumi-azure-native-sdk/storage/v2/pulumi-plugin.json index c96467358..c4a5a451b 100644 --- a/vendor/github.com/pulumi/pulumi-azure-native-sdk/storage/v2/pulumi-plugin.json +++ b/vendor/github.com/pulumi/pulumi-azure-native-sdk/storage/v2/pulumi-plugin.json @@ -1,5 +1,5 @@ { "resource": true, "name": "azure-native", - "version": "2.88.0" + "version": "2.89.3" } diff --git a/vendor/github.com/pulumi/pulumi-azure-native-sdk/v2/utilities/pulumiUtilities.go b/vendor/github.com/pulumi/pulumi-azure-native-sdk/v2/utilities/pulumiUtilities.go index b56f3e965..27fae4e72 100644 --- a/vendor/github.com/pulumi/pulumi-azure-native-sdk/v2/utilities/pulumiUtilities.go +++ b/vendor/github.com/pulumi/pulumi-azure-native-sdk/v2/utilities/pulumiUtilities.go @@ -165,7 +165,7 @@ func callPlainInner( func PkgResourceDefaultOpts(opts []pulumi.ResourceOption) []pulumi.ResourceOption { defaults := []pulumi.ResourceOption{} - version := semver.MustParse("2.88.0") + version := semver.MustParse("2.89.3") if !version.Equals(semver.Version{}) { defaults = append(defaults, pulumi.Version(version.String())) } @@ -176,7 +176,7 @@ func PkgResourceDefaultOpts(opts []pulumi.ResourceOption) []pulumi.ResourceOptio func PkgInvokeDefaultOpts(opts []pulumi.InvokeOption) []pulumi.InvokeOption { defaults := []pulumi.InvokeOption{} - version := semver.MustParse("2.88.0") + version := semver.MustParse("2.89.3") if !version.Equals(semver.Version{}) { defaults = append(defaults, pulumi.Version(version.String())) } diff --git a/vendor/github.com/pulumi/pulumi-command/sdk/go/command/internal/pulumiUtilities.go b/vendor/github.com/pulumi/pulumi-command/sdk/go/command/internal/pulumiUtilities.go index 4c3c69fc0..2e6c9e8dd 100644 --- a/vendor/github.com/pulumi/pulumi-command/sdk/go/command/internal/pulumiUtilities.go +++ b/vendor/github.com/pulumi/pulumi-command/sdk/go/command/internal/pulumiUtilities.go @@ -165,7 +165,7 @@ func callPlainInner( func PkgResourceDefaultOpts(opts []pulumi.ResourceOption) []pulumi.ResourceOption { defaults := []pulumi.ResourceOption{} - version := semver.MustParse("1.0.1") + version := semver.MustParse("1.0.2") if !version.Equals(semver.Version{}) { defaults = append(defaults, pulumi.Version(version.String())) } @@ -176,7 +176,7 @@ func PkgResourceDefaultOpts(opts []pulumi.ResourceOption) []pulumi.ResourceOptio func PkgInvokeDefaultOpts(opts []pulumi.InvokeOption) []pulumi.InvokeOption { defaults := []pulumi.InvokeOption{} - version := semver.MustParse("1.0.1") + version := semver.MustParse("1.0.2") if !version.Equals(semver.Version{}) { defaults = append(defaults, pulumi.Version(version.String())) } diff --git a/vendor/github.com/pulumi/pulumi-docker-build/sdk/go/dockerbuild/internal/pulumiUtilities.go b/vendor/github.com/pulumi/pulumi-docker-build/sdk/go/dockerbuild/internal/pulumiUtilities.go index 6ba99d405..b1f56336b 100644 --- a/vendor/github.com/pulumi/pulumi-docker-build/sdk/go/dockerbuild/internal/pulumiUtilities.go +++ b/vendor/github.com/pulumi/pulumi-docker-build/sdk/go/dockerbuild/internal/pulumiUtilities.go @@ -165,7 +165,7 @@ func callPlainInner( func PkgResourceDefaultOpts(opts []pulumi.ResourceOption) []pulumi.ResourceOption { defaults := []pulumi.ResourceOption{} - version := semver.MustParse("0.0.3") + version := semver.MustParse("0.0.10") if !version.Equals(semver.Version{}) { defaults = append(defaults, pulumi.Version(version.String())) } @@ -176,7 +176,7 @@ func PkgResourceDefaultOpts(opts []pulumi.ResourceOption) []pulumi.ResourceOptio func PkgInvokeDefaultOpts(opts []pulumi.InvokeOption) []pulumi.InvokeOption { defaults := []pulumi.InvokeOption{} - version := semver.MustParse("0.0.3") + version := semver.MustParse("0.0.10") if !version.Equals(semver.Version{}) { defaults = append(defaults, pulumi.Version(version.String())) } diff --git a/vendor/github.com/pulumi/pulumi-docker-build/sdk/go/dockerbuild/pulumi-plugin.json b/vendor/github.com/pulumi/pulumi-docker-build/sdk/go/dockerbuild/pulumi-plugin.json index 4690519f0..6cde38168 100644 --- a/vendor/github.com/pulumi/pulumi-docker-build/sdk/go/dockerbuild/pulumi-plugin.json +++ b/vendor/github.com/pulumi/pulumi-docker-build/sdk/go/dockerbuild/pulumi-plugin.json @@ -1,5 +1,5 @@ { "resource": true, "name": "docker-build", - "version": "0.0.3" + "version": "0.0.10" } diff --git a/vendor/github.com/pulumi/pulumi-docker/sdk/v4/go/docker/internal/pulumiUtilities.go b/vendor/github.com/pulumi/pulumi-docker/sdk/v4/go/docker/internal/pulumiUtilities.go index 4bf4ee93b..564b8ab94 100644 --- a/vendor/github.com/pulumi/pulumi-docker/sdk/v4/go/docker/internal/pulumiUtilities.go +++ b/vendor/github.com/pulumi/pulumi-docker/sdk/v4/go/docker/internal/pulumiUtilities.go @@ -165,7 +165,7 @@ func callPlainInner( func PkgResourceDefaultOpts(opts []pulumi.ResourceOption) []pulumi.ResourceOption { defaults := []pulumi.ResourceOption{} - version := semver.MustParse("4.5.8") + version := semver.MustParse("4.6.2") if !version.Equals(semver.Version{}) { defaults = append(defaults, pulumi.Version(version.String())) } @@ -176,7 +176,7 @@ func PkgResourceDefaultOpts(opts []pulumi.ResourceOption) []pulumi.ResourceOptio func PkgInvokeDefaultOpts(opts []pulumi.InvokeOption) []pulumi.InvokeOption { defaults := []pulumi.InvokeOption{} - version := semver.MustParse("4.5.8") + version := semver.MustParse("4.6.2") if !version.Equals(semver.Version{}) { defaults = append(defaults, pulumi.Version(version.String())) } diff --git a/vendor/github.com/pulumi/pulumi-docker/sdk/v4/go/docker/pulumi-plugin.json b/vendor/github.com/pulumi/pulumi-docker/sdk/v4/go/docker/pulumi-plugin.json index c65fe7168..a50ef775b 100644 --- a/vendor/github.com/pulumi/pulumi-docker/sdk/v4/go/docker/pulumi-plugin.json +++ b/vendor/github.com/pulumi/pulumi-docker/sdk/v4/go/docker/pulumi-plugin.json @@ -1,5 +1,5 @@ { "resource": true, "name": "docker", - "version": "4.5.8" + "version": "4.6.2" } diff --git a/vendor/github.com/pulumi/pulumi-random/sdk/v4/go/random/internal/pulumiUtilities.go b/vendor/github.com/pulumi/pulumi-random/sdk/v4/go/random/internal/pulumiUtilities.go index c7ba262e1..fa64e54fd 100644 --- a/vendor/github.com/pulumi/pulumi-random/sdk/v4/go/random/internal/pulumiUtilities.go +++ b/vendor/github.com/pulumi/pulumi-random/sdk/v4/go/random/internal/pulumiUtilities.go @@ -165,7 +165,7 @@ func callPlainInner( func PkgResourceDefaultOpts(opts []pulumi.ResourceOption) []pulumi.ResourceOption { defaults := []pulumi.ResourceOption{} - version := semver.MustParse("4.16.7") + version := semver.MustParse("4.18.0") if !version.Equals(semver.Version{}) { defaults = append(defaults, pulumi.Version(version.String())) } @@ -176,7 +176,7 @@ func PkgResourceDefaultOpts(opts []pulumi.ResourceOption) []pulumi.ResourceOptio func PkgInvokeDefaultOpts(opts []pulumi.InvokeOption) []pulumi.InvokeOption { defaults := []pulumi.InvokeOption{} - version := semver.MustParse("4.16.7") + version := semver.MustParse("4.18.0") if !version.Equals(semver.Version{}) { defaults = append(defaults, pulumi.Version(version.String())) } diff --git a/vendor/github.com/pulumi/pulumi-random/sdk/v4/go/random/pulumi-plugin.json b/vendor/github.com/pulumi/pulumi-random/sdk/v4/go/random/pulumi-plugin.json index 76c6cda74..188dcfe50 100644 --- a/vendor/github.com/pulumi/pulumi-random/sdk/v4/go/random/pulumi-plugin.json +++ b/vendor/github.com/pulumi/pulumi-random/sdk/v4/go/random/pulumi-plugin.json @@ -1,5 +1,5 @@ { "resource": true, "name": "random", - "version": "4.16.7" + "version": "4.18.0" } diff --git a/vendor/github.com/pulumi/pulumi-tls/sdk/v5/go/tls/getCertificate.go b/vendor/github.com/pulumi/pulumi-tls/sdk/v5/go/tls/getCertificate.go index d711552db..a5287493d 100644 --- a/vendor/github.com/pulumi/pulumi-tls/sdk/v5/go/tls/getCertificate.go +++ b/vendor/github.com/pulumi/pulumi-tls/sdk/v5/go/tls/getCertificate.go @@ -46,21 +46,11 @@ type GetCertificateResult struct { } func GetCertificateOutput(ctx *pulumi.Context, args GetCertificateOutputArgs, opts ...pulumi.InvokeOption) GetCertificateResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (GetCertificateResultOutput, error) { args := v.(GetCertificateArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv GetCertificateResult - secret, err := ctx.InvokePackageRaw("tls:index/getCertificate:getCertificate", args, &rv, "", opts...) - if err != nil { - return GetCertificateResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(GetCertificateResultOutput) - if secret { - return pulumi.ToSecret(output).(GetCertificateResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("tls:index/getCertificate:getCertificate", args, GetCertificateResultOutput{}, options).(GetCertificateResultOutput), nil }).(GetCertificateResultOutput) } diff --git a/vendor/github.com/pulumi/pulumi-tls/sdk/v5/go/tls/getPublicKey.go b/vendor/github.com/pulumi/pulumi-tls/sdk/v5/go/tls/getPublicKey.go index 6bf17931d..cda6e7e4d 100644 --- a/vendor/github.com/pulumi/pulumi-tls/sdk/v5/go/tls/getPublicKey.go +++ b/vendor/github.com/pulumi/pulumi-tls/sdk/v5/go/tls/getPublicKey.go @@ -30,7 +30,7 @@ import ( // // func main() { // pulumi.Run(func(ctx *pulumi.Context) error { -// _, err := tls.NewPrivateKey(ctx, "ed25519-example", &tls.PrivateKeyArgs{ +// ed25519_example, err := tls.NewPrivateKey(ctx, "ed25519-example", &tls.PrivateKeyArgs{ // Algorithm: pulumi.String("ED25519"), // }) // if err != nil { @@ -93,21 +93,11 @@ type GetPublicKeyResult struct { } func GetPublicKeyOutput(ctx *pulumi.Context, args GetPublicKeyOutputArgs, opts ...pulumi.InvokeOption) GetPublicKeyResultOutput { - return pulumi.ToOutputWithContext(context.Background(), args). + return pulumi.ToOutputWithContext(ctx.Context(), args). ApplyT(func(v interface{}) (GetPublicKeyResultOutput, error) { args := v.(GetPublicKeyArgs) - opts = internal.PkgInvokeDefaultOpts(opts) - var rv GetPublicKeyResult - secret, err := ctx.InvokePackageRaw("tls:index/getPublicKey:getPublicKey", args, &rv, "", opts...) - if err != nil { - return GetPublicKeyResultOutput{}, err - } - - output := pulumi.ToOutput(rv).(GetPublicKeyResultOutput) - if secret { - return pulumi.ToSecret(output).(GetPublicKeyResultOutput), nil - } - return output, nil + options := pulumi.InvokeOutputOptions{InvokeOptions: internal.PkgInvokeDefaultOpts(opts)} + return ctx.InvokeOutput("tls:index/getPublicKey:getPublicKey", args, GetPublicKeyResultOutput{}, options).(GetPublicKeyResultOutput), nil }).(GetPublicKeyResultOutput) } diff --git a/vendor/github.com/pulumi/pulumi-tls/sdk/v5/go/tls/internal/pulumiUtilities.go b/vendor/github.com/pulumi/pulumi-tls/sdk/v5/go/tls/internal/pulumiUtilities.go index 2ec7b2e91..dfc63118e 100644 --- a/vendor/github.com/pulumi/pulumi-tls/sdk/v5/go/tls/internal/pulumiUtilities.go +++ b/vendor/github.com/pulumi/pulumi-tls/sdk/v5/go/tls/internal/pulumiUtilities.go @@ -165,7 +165,7 @@ func callPlainInner( func PkgResourceDefaultOpts(opts []pulumi.ResourceOption) []pulumi.ResourceOption { defaults := []pulumi.ResourceOption{} - version := semver.MustParse("5.0.9") + version := semver.MustParse("5.1.1") if !version.Equals(semver.Version{}) { defaults = append(defaults, pulumi.Version(version.String())) } @@ -176,7 +176,7 @@ func PkgResourceDefaultOpts(opts []pulumi.ResourceOption) []pulumi.ResourceOptio func PkgInvokeDefaultOpts(opts []pulumi.InvokeOption) []pulumi.InvokeOption { defaults := []pulumi.InvokeOption{} - version := semver.MustParse("5.0.9") + version := semver.MustParse("5.1.1") if !version.Equals(semver.Version{}) { defaults = append(defaults, pulumi.Version(version.String())) } diff --git a/vendor/github.com/pulumi/pulumi-tls/sdk/v5/go/tls/pulumi-plugin.json b/vendor/github.com/pulumi/pulumi-tls/sdk/v5/go/tls/pulumi-plugin.json index 8da8f2bc7..7295f1bf2 100644 --- a/vendor/github.com/pulumi/pulumi-tls/sdk/v5/go/tls/pulumi-plugin.json +++ b/vendor/github.com/pulumi/pulumi-tls/sdk/v5/go/tls/pulumi-plugin.json @@ -1,5 +1,5 @@ { "resource": true, "name": "tls", - "version": "5.0.9" + "version": "5.1.1" } diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/.version b/vendor/github.com/pulumi/pulumi/sdk/v3/.version index 4c6305b6e..8f9c06cd6 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/.version +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/.version @@ -1 +1 @@ -3.154.0 +3.157.0 diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/organizations.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/organizations.go new file mode 100644 index 000000000..a6c83b9fa --- /dev/null +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/organizations.go @@ -0,0 +1,30 @@ +// Copyright 2016-2025, Pulumi Corporation. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package apitype + +// GetDefaultOrganizationResponse returns the backend's opinion of which organization +// to default to for a given user, if a default organization has not been configured. +type GetDefaultOrganizationResponse struct { + // Returns the organization name. + // Can be an empty string, if the user is a member of no organizations + GitHubLogin string + + // Messages is a list of messages that should be displayed to the user that contextualize + // the default org; for example: warning new users if their default org as returned by the + // service is on an expiring trial and not free tier, with possible recommendations + // on how to configure their default org locally. + // Can be possibly empty. + Messages []Message +} diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/stacks.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/stacks.go index 8a1f3486c..1508de6c5 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/stacks.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/stacks.go @@ -108,12 +108,12 @@ type Log3rdPartyDecryptionEvent struct { CommandName string `json:"commandName,omitempty"` } -// BatchDecryptRequest defines the request body for bulk decrypting secret values. +// BatchDecryptRequest defines the request body for batch decrypting secret values. type BatchDecryptRequest struct { Ciphertexts [][]byte `json:"ciphertexts"` } -// BatchDecryptResponse defines the response body for bulk decrypted secret values. The key in +// BatchDecryptResponse defines the response body for batch decrypted secret values. The key in // the map is the base64 encoding of the ciphertext. type BatchDecryptResponse struct { Plaintexts map[string][]byte `json:"plaintexts"` diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/templates.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/templates.go new file mode 100644 index 000000000..810fef6de --- /dev/null +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/apitype/templates.go @@ -0,0 +1,65 @@ +// Copyright 2016-2025, Pulumi Corporation. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package apitype + +// A pulumi template remote where the source URL contains +// a valid Pulumi.yaml file. +type PulumiTemplateRemote struct { + ProjectTemplate + Name string `json:"name"` // The name of the template + SourceName string `json:"sourceName"` // The name of the template source - for display purposes + TemplateURL string `json:"sourceURL"` // The unique url that identifies the template to the service + Runtime string `json:"runtime"` // The runtime of the template +} + +type ListOrgTemplatesResponse struct { + Templates map[string][]*PulumiTemplateRemote `json:"templates"` + // OrgHasTemplates is true len(Templates) > 0 + OrgHasTemplates bool `json:"orgHasTemplates"` + // HasAccessError indicates that the Pulumi service was not able to access a + // template source in the org. + HasAccessError bool `json:"hasAccessError"` + // HasUpstreamError indicates that there was an unspecified error fetching a + // template. + HasUpstreamError bool `json:"hasUpstreamError"` +} + +// ProjectTemplate is a Pulumi project template manifest. +type ProjectTemplate struct { + // DisplayName is an optional user friendly name of the template. + DisplayName string `json:"displayName,omitempty" yaml:"displayName,omitempty"` + // Description is an optional description of the template. + Description string `json:"description,omitempty" yaml:"description,omitempty"` + // Quickstart contains optional text to be displayed after template creation. + Quickstart string `json:"quickstart,omitempty" yaml:"quickstart,omitempty"` + // Config is an optional template config. + Config map[string]ProjectTemplateConfigValue `json:"config,omitempty" yaml:"config,omitempty"` + // Important indicates the template is important. + // + // Deprecated: We don't use this field any more. + Important bool `json:"important,omitempty" yaml:"important,omitempty"` + // Metadata are key/value pairs used to attach additional metadata to a template. + Metadata map[string]string `json:"metadata,omitempty" yaml:"metadata,omitempty"` +} + +// ProjectTemplateConfigValue is a config value included in the project template manifest. +type ProjectTemplateConfigValue struct { + // Description is an optional description for the config value. + Description string `json:"description,omitempty" yaml:"description,omitempty"` + // Default is an optional default value for the config value. + Default string `json:"default,omitempty" yaml:"default,omitempty"` + // Secret may be set to true to indicate that the config value should be encrypted. + Secret bool `json:"secret,omitempty" yaml:"secret,omitempty"` +} diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/env/env.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/env/env.go index 500385a91..ce57a47e0 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/env/env.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/env/env.go @@ -108,6 +108,12 @@ var Parallel = env.Int("PARALLEL", var AccessToken = env.String("ACCESS_TOKEN", "The access token used to authenticate with the Pulumi Service.") +var DisableSecretCache = env.Bool("DISABLE_SECRET_CACHE", + "Disable caching encryption operations for unchanged stack secrets.") + +var ParallelDiff = env.Bool("PARALLEL_DIFF", + "Enable running diff calculations in parallel.") + // List of overrides for Plugin Download URLs. The expected format is `regexp=URL`, and multiple pairs can // be specified separated by commas, e.g. `regexp1=URL1,regexp2=URL2` // @@ -141,6 +147,9 @@ var ( DIYBackendDisableCheckpointBackups = env.Bool("DIY_BACKEND_DISABLE_CHECKPOINT_BACKUPS", "If set checkpoint backups will not be written the to the backup folder.", env.Alternative("DISABLE_CHECKPOINT_BACKUPS")) + + DIYBackendParallel = env.Int("DIY_BACKEND_PARALLEL", + "Number of parallel operations when fetching stacks and resources from the DIY backend.") ) // Environment variables which affect Pulumi AI integrations diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/config/crypt.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/config/crypt.go index f1eb8e937..39413a743 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/config/crypt.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/config/crypt.go @@ -32,6 +32,11 @@ import ( // Encrypter encrypts plaintext into its encrypted ciphertext. type Encrypter interface { EncryptValue(ctx context.Context, plaintext string) (string, error) + + // BatchEncrypt supports encryption of multiple secrets in a single batch request, if supported by the implementation. + // Returns a list of encrypted values in the same order as the input secrets. + // Each secret is encrypted individually and duplicate secret values will result in different ciphertexts. + BatchEncrypt(ctx context.Context, secrets []string) ([]string, error) } // Decrypter decrypts encrypted ciphertext to its plaintext representation. @@ -69,6 +74,10 @@ func (nopCrypter) EncryptValue(ctx context.Context, plaintext string) (string, e return plaintext, nil } +func (nopCrypter) BatchEncrypt(ctx context.Context, secrets []string) ([]string, error) { + return DefaultBatchEncrypt(ctx, NopEncrypter, secrets) +} + // BlindingCrypter returns a Crypter that instead of decrypting or encrypting data, just returns "[secret]", it can // be used when you want to display configuration information to a user but don't want to prompt for a password // so secrets will not be decrypted or encrypted. @@ -89,6 +98,10 @@ func (b blindingCrypter) EncryptValue(ctx context.Context, plaintext string) (st return "[secret]", nil } +func (b blindingCrypter) BatchEncrypt(ctx context.Context, secrets []string) ([]string, error) { + return DefaultBatchEncrypt(ctx, b, secrets) +} + func (b blindingCrypter) BatchDecrypt(ctx context.Context, ciphertexts []string) ([]string, error) { return DefaultBatchDecrypt(ctx, b, ciphertexts) } @@ -104,12 +117,40 @@ func (p panicCrypter) EncryptValue(ctx context.Context, _ string) (string, error panic("attempt to encrypt value") } +func (p panicCrypter) BatchEncrypt(ctx context.Context, _ []string) ([]string, error) { + panic("attempt to batch encrypt values") +} + func (p panicCrypter) DecryptValue(ctx context.Context, _ string) (string, error) { panic("attempt to decrypt value") } func (p panicCrypter) BatchDecrypt(ctx context.Context, ciphertexts []string) ([]string, error) { - panic("attempt to bulk decrypt values") + panic("attempt to batch decrypt values") +} + +type errorCrypter struct { + err string +} + +func NewErrorCrypter(err string) Crypter { + return &errorCrypter{err} +} + +func (e errorCrypter) EncryptValue(ctx context.Context, _ string) (string, error) { + return "", fmt.Errorf("failed to encrypt: %s", e.err) +} + +func (e errorCrypter) BatchEncrypt(ctx context.Context, _ []string) ([]string, error) { + return nil, fmt.Errorf("failed to batch encrypt: %s", e.err) +} + +func (e errorCrypter) DecryptValue(ctx context.Context, _ string) (string, error) { + return "", fmt.Errorf("failed to decrypt: %s", e.err) +} + +func (e errorCrypter) BatchDecrypt(ctx context.Context, _ []string) ([]string, error) { + return nil, fmt.Errorf("failed to batch decrypt: %s", e.err) } // NewSymmetricCrypter creates a crypter that encrypts and decrypts values using AES-256-GCM. The nonce is stored with @@ -140,6 +181,10 @@ func (s symmetricCrypter) EncryptValue(ctx context.Context, value string) (strin base64.StdEncoding.EncodeToString(nonce), base64.StdEncoding.EncodeToString(secret)), nil } +func (s symmetricCrypter) BatchEncrypt(ctx context.Context, secrets []string) ([]string, error) { + return DefaultBatchEncrypt(ctx, s, secrets) +} + func (s symmetricCrypter) DecryptValue(ctx context.Context, value string) (string, error) { vals := strings.Split(value, ":") @@ -220,10 +265,33 @@ func (c prefixCrypter) EncryptValue(ctx context.Context, plaintext string) (stri return c.prefix + plaintext, nil } +func (c prefixCrypter) BatchEncrypt(ctx context.Context, secrets []string) ([]string, error) { + return DefaultBatchEncrypt(ctx, c, secrets) +} + func (c prefixCrypter) BatchDecrypt(ctx context.Context, ciphertexts []string) ([]string, error) { return DefaultBatchDecrypt(ctx, c, ciphertexts) } +// DefaultBatchEncrypt encrypts a list of plaintexts. Each plaintext is encrypted sequentially. The returned +// list of ciphertexts is in the same order as the input list. This should only be used by implementers of Encrypter +// to implement their BatchEncrypt method in cases where they can't do more efficient than just individual operations. +func DefaultBatchEncrypt(ctx context.Context, encrypter Encrypter, plaintexts []string) ([]string, error) { + if len(plaintexts) == 0 { + return nil, nil + } + + encrypted := make([]string, len(plaintexts)) + for i, secret := range plaintexts { + enc, err := encrypter.EncryptValue(ctx, secret) + if err != nil { + return nil, err + } + encrypted[i] = enc + } + return encrypted, nil +} + // DefaultBatchDecrypt decrypts a list of ciphertexts. Each ciphertext is decrypted individually. The returned // map maps from ciphertext to plaintext. This should only be used by implementers of Decrypter to implement // their BatchDecrypt method in cases where they can't do more efficient than just individual decryptions. @@ -254,6 +322,10 @@ func (c *base64Crypter) EncryptValue(ctx context.Context, s string) (string, err return base64.StdEncoding.EncodeToString([]byte(s)), nil } +func (c *base64Crypter) BatchEncrypt(ctx context.Context, secrets []string) ([]string, error) { + return nil, errors.New("BatchEncrypt not supported for base64Crypter") +} + func (c *base64Crypter) DecryptValue(ctx context.Context, s string) (string, error) { b, err := base64.StdEncoding.DecodeString(s) if err != nil { diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/analyzer.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/analyzer.go index 406f038d4..4ba3d30aa 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/analyzer.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/analyzer.go @@ -74,6 +74,7 @@ type AnalyzerResourceOptions struct { AliasURNs []resource.URN // additional URNs that should be aliased to this resource. Aliases []resource.Alias // additional URNs that should be aliased to this resource. CustomTimeouts resource.CustomTimeouts // an optional config object for resource options + Parent resource.URN // an optional parent URN for this resource. } // AnalyzerProviderResource mirrors a resource's provider sent to the analyzer. diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/analyzer_plugin.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/analyzer_plugin.go index 7be0665bc..b3134b957 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/analyzer_plugin.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/analyzer_plugin.go @@ -532,6 +532,7 @@ func marshalResourceOptions(opts AnalyzerResourceOptions) *pulumirpc.AnalyzerRes Update: opts.CustomTimeouts.Update, Delete: opts.CustomTimeouts.Delete, }, + Parent: string(opts.Parent), } return result } diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/context.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/context.go index 3d9bbc34d..6127f2139 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/context.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/context.go @@ -62,31 +62,33 @@ func NewContext(d, statusD diag.Sink, host Host, _ ConfigSource, pwd string, runtimeOptions map[string]interface{}, disableProviderPreview bool, parentSpan opentracing.Span, ) (*Context, error) { - // TODO: I think really this ought to just take plugins *workspace.Plugins as an arg, but yaml depends on - // this function so *sigh*. For now just see if there's a project we should be using, and use it if there - // is. + // TODO: really this ought to just take plugins *workspace.Plugins and packages map[string]workspace.PackageSpec + // as args, but yaml depends on this function so *sigh*. For now just see if there's a project we should be using, + // and use it if there is. projPath, err := workspace.DetectProjectPath() var plugins *workspace.Plugins + var packages map[string]workspace.PackageSpec if err == nil && projPath != "" { project, err := workspace.LoadProject(projPath) if err == nil { plugins = project.Plugins + packages = project.GetPackageSpecs() } } return NewContextWithRoot(d, statusD, host, pwd, pwd, runtimeOptions, - disableProviderPreview, parentSpan, plugins, nil, nil) + disableProviderPreview, parentSpan, plugins, packages, nil, nil) } // NewContextWithRoot is a variation of NewContext that also sets known project Root. Additionally accepts Plugins func NewContextWithRoot(d, statusD diag.Sink, host Host, pwd, root string, runtimeOptions map[string]interface{}, disableProviderPreview bool, - parentSpan opentracing.Span, plugins *workspace.Plugins, config map[config.Key]string, - debugging DebugEventEmitter, + parentSpan opentracing.Span, plugins *workspace.Plugins, packages map[string]workspace.PackageSpec, + config map[config.Key]string, debugging DebugEventEmitter, ) (*Context, error) { return NewContextWithContext( context.Background(), d, statusD, host, pwd, root, - runtimeOptions, disableProviderPreview, parentSpan, plugins, config, debugging) + runtimeOptions, disableProviderPreview, parentSpan, plugins, packages, config, debugging) } // NewContextWithContext is a variation of NewContextWithRoot that also sets the base context. @@ -94,8 +96,8 @@ func NewContextWithContext( ctx context.Context, d, statusD diag.Sink, host Host, pwd, root string, runtimeOptions map[string]interface{}, disableProviderPreview bool, - parentSpan opentracing.Span, plugins *workspace.Plugins, config map[config.Key]string, - debugging DebugEventEmitter, + parentSpan opentracing.Span, plugins *workspace.Plugins, packages map[string]workspace.PackageSpec, + config map[config.Key]string, debugging DebugEventEmitter, ) (*Context, error) { if d == nil { d = diag.DefaultSink(io.Discard, io.Discard, diag.FormatOptions{Color: colors.Never}) @@ -125,7 +127,9 @@ func NewContextWithContext( baseContext: ctx, } if host == nil { - h, err := NewDefaultHost(pctx, runtimeOptions, disableProviderPreview, plugins, config, debugging, projectName) + h, err := NewDefaultHost( + pctx, runtimeOptions, disableProviderPreview, plugins, packages, config, debugging, projectName, + ) if err != nil { return nil, err } diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/host.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/host.go index 049dea659..b4e0242f4 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/host.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/host.go @@ -20,6 +20,7 @@ import ( "fmt" "os" "path/filepath" + "strings" "sync" "github.com/blang/semver" @@ -96,10 +97,35 @@ type Host interface { Close() error } +// IsLocalPluginPath determines if a plugin source refers to a local path rather than a downloadable plugin. +// A plugin is considered local if it doesn't match the plugin name regexp and doesn't have a download URL. +func IsLocalPluginPath(source string) bool { + // If the source starts with ./ or ../ or / it's definitely a local path + if strings.HasPrefix(source, "./") || strings.HasPrefix(source, "..") || strings.HasPrefix(source, "/") { + return true + } + + // If the source starts with git://, it's definitely not a local path + if strings.HasPrefix(source, "git://") { + return false + } + + // For other cases, we need to be careful about how we interpret the source, so let's parse the spec + // and check if it has a download URL. + pluginSpec, err := workspace.NewPluginSpec(source, apitype.ResourcePlugin, nil, "", nil) + if err != nil { + // If we can't parse it as a plugin spec, assume it's a local path + return true + } + + // If there is a download URL or the name matches the plugin name regexp after parsing, it's not a local path + return pluginSpec.PluginDownloadURL == "" && !workspace.PluginNameRegexp.MatchString(pluginSpec.Name) +} + // NewDefaultHost implements the standard plugin logic, using the standard installation root to find them. func NewDefaultHost(ctx *Context, runtimeOptions map[string]interface{}, - disableProviderPreview bool, plugins *workspace.Plugins, config map[config.Key]string, - debugging DebugEventEmitter, projectName tokens.PackageName, + disableProviderPreview bool, plugins *workspace.Plugins, packages map[string]workspace.PackageSpec, + config map[config.Key]string, debugging DebugEventEmitter, projectName tokens.PackageName, ) (Host, error) { // Create plugin info from providers projectPlugins := make([]workspace.ProjectPlugin, 0) @@ -127,6 +153,26 @@ func NewDefaultHost(ctx *Context, runtimeOptions map[string]interface{}, } } + for name, pkg := range packages { + // Skip downloadable plugins, so that only local folder paths remain. + if !IsLocalPluginPath(pkg.Source) { + continue + } + + // Resolve the absolute path to the plugin folder. + path, err := resolvePluginPath(ctx.Root, pkg.Source) + if err != nil { + return nil, err + } + + // Add the plugin to the project plugins list. + projectPlugins = append(projectPlugins, workspace.ProjectPlugin{ + Kind: apitype.ResourcePlugin, + Name: name, + Path: path, + }) + } + host := &defaultHost{ ctx: ctx, runtimeOptions: runtimeOptions, @@ -170,6 +216,28 @@ func NewDefaultHost(ctx *Context, runtimeOptions map[string]interface{}, return host, nil } +func resolvePluginPath(root string, path string) (string, error) { + // The path is relative to the project root. Make it absolute here so we don't need to track that everywhere its used. + var err error + if !filepath.IsAbs(path) { + path, err = filepath.Abs(filepath.Join(root, path)) + if err != nil { + return "", fmt.Errorf("getting absolute path for plugin path %s: %w", path, err) + } + } + + stat, err := os.Stat(path) + if os.IsNotExist(err) { + return "", fmt.Errorf("no folder at path '%s'", path) + } else if err != nil { + return "", fmt.Errorf("checking provider folder: %w", err) + } else if !stat.IsDir() { + return "", fmt.Errorf("provider folder '%s' is not a directory", path) + } + + return path, nil +} + func parsePluginOpts( root string, providerOpts workspace.PluginOptions, k apitype.PluginKind, ) (workspace.ProjectPlugin, error) { @@ -189,22 +257,9 @@ func parsePluginOpts( v = &ver } - stat, err := os.Stat(providerOpts.Path) - if os.IsNotExist(err) { - return handleErr("no folder at path '%s'", providerOpts.Path) - } else if err != nil { - return handleErr("checking provider folder: %w", err) - } else if !stat.IsDir() { - return handleErr("provider folder '%s' is not a directory", providerOpts.Path) - } - - // The path is relative to the project root. Make it absolute here so we don't need to track that everywhere its used. - path := providerOpts.Path - if !filepath.IsAbs(path) { - path, err = filepath.Abs(filepath.Join(root, path)) - if err != nil { - return handleErr("getting absolute path for plugin path %s: %w", providerOpts.Path, err) - } + path, err := resolvePluginPath(root, providerOpts.Path) + if err != nil { + return handleErr(err.Error()) } pluginInfo := workspace.ProjectPlugin{ diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/provider.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/provider.go index 1fe6e8870..ab515fb2b 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/provider.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/provider.go @@ -76,35 +76,58 @@ type ProviderHandshakeResponse struct { SupportsAutonamingConfiguration bool } +// ParameterizeParameters can either be of concrete type ParameterizeArgs or ParameterizeValue, for when parameterizing +// a provider from either the command line or from within a Pulumi program, respectively. type ParameterizeParameters interface { isParameterizeParameters() } type ( + // ParameterizeArgs is used when parameterizing a provider from command line arguments. ParameterizeArgs struct { + // The arguments passed on the command line following the provider name. Args []string } + // ParameterizeValue is used when parameterizing a provider from within a Pulumi program. ParameterizeValue struct { - Name string + // The name of the parameterization. + Name string + // The version of the parameterization. Version semver.Version - Value []byte + // The binary parameterization value as returned from a previous parameterization call. + // This can be any data the provider needs to parameterize itself - such as the data needed to turn resource + // requests into API calls. + Value []byte } ) -func (*ParameterizeArgs) isParameterizeParameters() {} +// isParameterizeParameters is a no-op method that marks ParameterizeArgs as implementing ParameterizeParameters. +func (*ParameterizeArgs) isParameterizeParameters() {} + +// isParameterizeParameters is a no-op method that marks ParameterizeValue as implementing ParameterizeParameters. func (*ParameterizeValue) isParameterizeParameters() {} +// The type of requests sent as part of a Parameterize call. type ParameterizeRequest struct { + // The parameters to use when parameterizing the provider instance. Parameters ParameterizeParameters } +// The type of responses sent as part of a Parameterize call. type ParameterizeResponse struct { - Name string + // The name of the parameterization. This must be unique in the context of a program. + // If the request parameter was a ParameterizeValue, then this field must match the input name. + Name string + // The version of the parameterization. This is required to be set by the provider. It does not have to match the + // version of the provider itself, but can be used however the provider sees fit. If the request parameter was a + // ParameterizeValue, then this field must match the input version. Version semver.Version } +// GetSchemaResponse is the response to a GetSchema call. type GetSchemaResponse struct { + // The bytes of the JSON serialized Pulumi schema for generating the provider's SDK. Schema []byte } @@ -723,7 +746,7 @@ type ConstructOptions struct { Dependencies []resource.URN // Protect is true if the component is protected. - Protect bool + Protect *bool // Providers is a map from package name to provider reference. Providers map[string]string diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/provider_server.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/provider_server.go index 4aeed6856..6b9a60c82 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/provider_server.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin/provider_server.go @@ -782,7 +782,7 @@ func (p *providerServer) Construct(ctx context.Context, options := ConstructOptions{ Aliases: aliases, Dependencies: dependencies, - Protect: req.GetProtect(), + Protect: req.Protect, Providers: req.GetProviders(), PropertyDependencies: propertyDependencies, } diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/resource_goal.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/resource_goal.go index 77b077396..9e4b38ba5 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/resource_goal.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/resource/resource_goal.go @@ -26,7 +26,7 @@ type Goal struct { Custom bool // true if this resource is custom, managed by a plugin. Properties PropertyMap // the resource's property state. Parent URN // an optional parent URN for this resource. - Protect bool // true to protect this resource from deletion. + Protect *bool // true to protect this resource from deletion. Dependencies []URN // dependencies of this resource object. Provider string // the provider to use for this resource. InitErrors []string // errors encountered as we attempted to initialize the resource. @@ -48,7 +48,7 @@ type Goal struct { // NewGoal allocates a new resource goal state. func NewGoal(t tokens.Type, name string, custom bool, props PropertyMap, - parent URN, protect bool, dependencies []URN, provider string, initErrors []string, + parent URN, protect *bool, dependencies []URN, provider string, initErrors []string, propertyDependencies map[PropertyKey][]URN, deleteBeforeReplace *bool, ignoreChanges []string, additionalSecretOutputs []PropertyKey, aliases []Alias, id ID, customTimeouts *CustomTimeouts, replaceOnChanges []string, retainOnDelete bool, deletedWith URN, sourcePosition string, diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/cmdutil/trace.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/cmdutil/trace.go index 8e2b0f022..a8453d656 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/cmdutil/trace.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/util/cmdutil/trace.go @@ -214,6 +214,10 @@ func rootSpanTags() []opentracing.Tag { Key: "runtime.GOARCH", Value: runtime.GOARCH, }, + { + Key: "runtime.GOMAXPROCS", + Value: runtime.GOMAXPROCS(0), + }, { Key: "runtime.NumCPU", Value: runtime.NumCPU(), diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/workspace/project.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/workspace/project.go index 876fcf35a..3030432ab 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/workspace/project.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/workspace/project.go @@ -73,32 +73,10 @@ func init() { type Analyzers []tokens.QName // ProjectTemplate is a Pulumi project template manifest. -type ProjectTemplate struct { - // DisplayName is an optional user friendly name of the template. - DisplayName string `json:"displayName,omitempty" yaml:"displayName,omitempty"` - // Description is an optional description of the template. - Description string `json:"description,omitempty" yaml:"description,omitempty"` - // Quickstart contains optional text to be displayed after template creation. - Quickstart string `json:"quickstart,omitempty" yaml:"quickstart,omitempty"` - // Config is an optional template config. - Config map[string]ProjectTemplateConfigValue `json:"config,omitempty" yaml:"config,omitempty"` - // Important indicates the template is important. - // - // Deprecated: We don't use this field any more. - Important bool `json:"important,omitempty" yaml:"important,omitempty"` - // Metadata are key/value pairs used to attach additional metadata to a template. - Metadata map[string]string `json:"metadata,omitempty" yaml:"metadata,omitempty"` -} +type ProjectTemplate = apitype.ProjectTemplate // ProjectTemplateConfigValue is a config value included in the project template manifest. -type ProjectTemplateConfigValue struct { - // Description is an optional description for the config value. - Description string `json:"description,omitempty" yaml:"description,omitempty"` - // Default is an optional default value for the config value. - Default string `json:"default,omitempty" yaml:"default,omitempty"` - // Secret may be set to true to indicate that the config value should be encrypted. - Secret bool `json:"secret,omitempty" yaml:"secret,omitempty"` -} +type ProjectTemplateConfigValue = apitype.ProjectTemplateConfigValue // ProjectBackend is the configuration for where the backend state is stored. If unset, will use the // system's currently logged-in backend. @@ -123,6 +101,89 @@ type PluginOptions struct { Path string `json:"path" yaml:"path"` } +// PackageSpec defines the structured format for a package dependency +type PackageSpec struct { + Source string `json:"source" yaml:"source"` + Version string `json:"version,omitempty" yaml:"version,omitempty"` + Parameters []string `json:"parameters,omitempty" yaml:"parameters,omitempty"` +} + +// packageValue can be either a string or a PackageSpec +// This is a private implementation type that handles the dual-format parsing +type packageValue struct { + // The underlying value, either a string or a PackageSpec + value interface{} +} + +// Spec returns the package as a PackageSpec if it's a PackageSpec, +// or creates a simple PackageSpec from its string representation. +func (pv *packageValue) Spec() PackageSpec { + switch v := pv.value.(type) { + case PackageSpec: + return v + case string: + // Check if the string contains a version specifier in the format "source@version" + parts := strings.Split(v, "@") + if len(parts) == 2 { + return PackageSpec{ + Source: parts[0], + Version: parts[1], + } + } + return PackageSpec{Source: v} + default: + panic("unexpected type in PackageValue") + } +} + +// UnmarshalJSON implements the json.Unmarshaler interface +func (pv *packageValue) UnmarshalJSON(data []byte) error { + // First try to unmarshal as a string + var s string + if err := json.Unmarshal(data, &s); err == nil { + pv.value = s + return nil + } + + // If that fails, try to unmarshal as a PackageSpec + var spec PackageSpec + if err := json.Unmarshal(data, &spec); err == nil { + pv.value = spec + return nil + } + + return errors.New("package must be either a string or a package specification object") +} + +// MarshalJSON implements the json.Marshaler interface +func (pv packageValue) MarshalJSON() ([]byte, error) { + return json.Marshal(pv.value) +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface +func (pv *packageValue) UnmarshalYAML(unmarshal func(interface{}) error) error { + // First try to unmarshal as a string + var s string + if err := unmarshal(&s); err == nil { + pv.value = s + return nil + } + + // If that fails, try to unmarshal as a PackageSpec + var spec PackageSpec + if err := unmarshal(&spec); err == nil { + pv.value = spec + return nil + } + + return errors.New("package must be either a string or a package specification object") +} + +// MarshalYAML implements the yaml.Marshaler interface +func (pv packageValue) MarshalYAML() (interface{}, error) { + return pv.value, nil +} + type Plugins struct { Providers []PluginOptions `json:"providers,omitempty" yaml:"providers,omitempty"` Languages []PluginOptions `json:"languages,omitempty" yaml:"languages,omitempty"` @@ -198,6 +259,9 @@ type Project struct { // Options is an optional set of project options Options *ProjectOptions `json:"options,omitempty" yaml:"options,omitempty"` + // Packages is a map of package dependencies that can be either strings or PackageSpecs + Packages map[string]packageValue `json:"packages,omitempty" yaml:"packages,omitempty"` + Plugins *Plugins `json:"plugins,omitempty" yaml:"plugins,omitempty"` // Handle additional keys, albeit in a way that will remove comments and trivia. @@ -211,6 +275,19 @@ func (proj Project) RawValue() []byte { return proj.raw } +// GetPackageSpecs returns a map of package names to their corresponding PackageSpecs +func (proj *Project) GetPackageSpecs() map[string]PackageSpec { + if proj.Packages == nil { + return nil + } + + result := make(map[string]PackageSpec) + for name, packageValue := range proj.Packages { + result[name] = packageValue.Spec() + } + return result +} + func isPrimitiveValue(value interface{}) bool { switch value.(type) { case string, int, bool: diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/workspace/project.json b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/workspace/project.json index 758e6058b..7804009f9 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/workspace/project.json +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/common/workspace/project.json @@ -218,6 +218,21 @@ }, "additionalProperties":false }, + "packages":{ + "description":"Package dependencies for this Pulumi project.", + "type":"object", + "additionalProperties":{ + "oneOf": [ + { + "type": "string", + "description": "Package source in a string format, optionally with `@version` suffix" + }, + { + "$ref":"#/$defs/packageSpec" + } + ] + } + }, "plugins":{ "description":"Override for the plugin selection. Intended for use in developing pulumi plugins.", "type":"object", @@ -252,6 +267,31 @@ ], "additionalProperties":true, "$defs":{ + "packageSpec":{ + "title":"PackageSpec", + "type":"object", + "additionalProperties":false, + "required":[ + "source" + ], + "properties":{ + "source":{ + "type":"string", + "description":"Source of the package, which can be a URL scheme like 'github://', 'git://', a file path './local/path', or a name like 'terraform-provider'" + }, + "version":{ + "type":"string", + "description":"Version of the package to use" + }, + "parameters":{ + "type":"array", + "items": { + "type": "string" + }, + "description":"Additional parameters for parameterized providers" + } + } + }, "pluginOptions":{ "title":"PluginOptions", "type":"object", diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/pulumi/context.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/pulumi/context.go index 9ac347f33..399a21fcd 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/pulumi/context.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/pulumi/context.go @@ -388,7 +388,15 @@ func (ctx *Context) registerTransform(t ResourceTransform) (*pulumirpc.Callback, opts.IgnoreChanges = rpcReq.Options.IgnoreChanges opts.Parent = parent opts.PluginDownloadURL = rpcReq.Options.PluginDownloadUrl - opts.Protect = rpcReq.Options.Protect + + // TODO(https://github.com/pulumi/pulumi/issues/18935): The Go public API can't express the + // optionality that the wire protocol can + var protect bool + if rpcReq.Options.Protect != nil { + protect = *rpcReq.Options.Protect + } + opts.Protect = protect + if rpcReq.Options.Provider != "" { opts.Provider = ctx.newDependencyProviderResourceFromRef(rpcReq.Options.Provider) } @@ -490,7 +498,7 @@ func (ctx *Context) registerTransform(t ResourceTransform) (*pulumirpc.Callback, } rpcRes.Options.IgnoreChanges = opts.IgnoreChanges rpcRes.Options.PluginDownloadUrl = opts.PluginDownloadURL - rpcRes.Options.Protect = opts.Protect + rpcRes.Options.Protect = &opts.Protect if opts.Provider != nil { rpcRes.Options.Provider, err = ctx.resolveProviderReference(opts.Provider) @@ -1255,8 +1263,8 @@ func (ctx *Context) readPackageResource( // Get the provider for the resource. provider := getProvider(t, options.Provider, providers) protect := options.Protect - if parent != nil { - protect = protect || parent.getProtect() + if parent != nil && protect == nil { + protect = parent.getProtect() } // Create resolvers for the resource's outputs. @@ -1456,8 +1464,8 @@ func (ctx *Context) registerResource( // Get the provider for the resource. provider := getProvider(t, options.Provider, providers) protect := options.Protect - if parent != nil { - protect = protect || parent.getProtect() + if parent != nil && protect == nil { + protect = parent.getProtect() } // Create resolvers for the resource's outputs. @@ -1651,7 +1659,7 @@ type resourceState struct { outputs map[string]Output providers map[string]ProviderResource provider ProviderResource - protect bool + protect *bool version string pluginDownloadURL string name string @@ -1799,7 +1807,7 @@ var mapOutputType = reflect.TypeOf((*MapOutput)(nil)).Elem() // makeResourceState creates a set of resolvers that we'll use to finalize state, for URNs, IDs, and output // properties. func (ctx *Context) makeResourceState(t, name string, resourceV Resource, providers map[string]ProviderResource, - provider ProviderResource, protect bool, version, pluginDownloadURL string, aliases []URNOutput, + provider ProviderResource, protect *bool, version, pluginDownloadURL string, aliases []URNOutput, transformations []ResourceTransformation, ) *resourceState { // Ensure that the input res is a pointer to a struct. Note that we don't fail if it is not, and we probably @@ -1997,7 +2005,7 @@ func (state *resourceState) resolve(ctx *Context, err error, inputs *resourceInp type resourceInputs struct { parent string deps []string - protect bool + protect *bool provider string providers map[string]string resolvedProps resource.PropertyMap diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/pulumi/provider.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/pulumi/provider.go index 45d8eba3c..dec3871cc 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/pulumi/provider.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/pulumi/provider.go @@ -117,7 +117,7 @@ func construct(ctx context.Context, req *pulumirpc.ConstructRequest, engineConn if len(dependencies) > 0 { ro.DependsOn = append(ro.DependsOn, resourceDependencySet(dependencies)) } - ro.Protect = req.GetProtect() + ro.Protect = req.Protect ro.Providers = providers ro.Parent = parent diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/go/pulumi/resource.go b/vendor/github.com/pulumi/pulumi/sdk/v3/go/pulumi/resource.go index e28997979..a95550fb7 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/go/pulumi/resource.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/go/pulumi/resource.go @@ -61,7 +61,7 @@ type ResourceState struct { children resourceSet providers map[string]ProviderResource provider ProviderResource - protect bool + protect *bool version string pluginDownloadURL string aliases []URNOutput @@ -122,7 +122,7 @@ func (s *ResourceState) getProvider() ProviderResource { return s.provider } -func (s *ResourceState) getProtect() bool { +func (s *ResourceState) getProtect() *bool { return s.protect } @@ -236,7 +236,7 @@ type Resource interface { getProvider() ProviderResource // getProtect returns the protect flag for the resource. - getProtect() bool + getProtect() *bool // getVersion returns the version for the resource. getVersion() string @@ -435,7 +435,7 @@ type resourceOptions struct { IgnoreChanges []string Import IDInput Parent Resource - Protect bool + Protect *bool Provider ProviderResource Providers map[string]ProviderResource ReplaceOnChanges []string @@ -482,6 +482,11 @@ func resourceOptionsSnapshot(ro *resourceOptions) *ResourceOptions { }) } + var protect bool + if ro.Protect != nil { + protect = *ro.Protect + } + return &ResourceOptions{ AdditionalSecretOutputs: ro.AdditionalSecretOutputs, Aliases: ro.Aliases, @@ -492,7 +497,7 @@ func resourceOptionsSnapshot(ro *resourceOptions) *ResourceOptions { IgnoreChanges: ro.IgnoreChanges, Import: ro.Import, Parent: ro.Parent, - Protect: ro.Protect, + Protect: protect, Provider: ro.Provider, Providers: providers, ReplaceOnChanges: ro.ReplaceOnChanges, @@ -803,7 +808,7 @@ func Parent(r Resource) ResourceOrInvokeOption { // Protect, when set to true, ensures that this resource cannot be deleted (without first setting it to false). func Protect(o bool) ResourceOption { return resourceOption(func(ro *resourceOptions) { - ro.Protect = o + ro.Protect = &o }) } diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/proto/go/analyzer.pb.go b/vendor/github.com/pulumi/pulumi/sdk/v3/proto/go/analyzer.pb.go index cad869242..3797af588 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/proto/go/analyzer.pb.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/proto/go/analyzer.pb.go @@ -303,6 +303,7 @@ type AnalyzerResourceOptions struct { AdditionalSecretOutputs []string `protobuf:"bytes,5,rep,name=additionalSecretOutputs,proto3" json:"additionalSecretOutputs,omitempty"` // a list of output properties that should also be treated as secret, in addition to ones we detect. Aliases []string `protobuf:"bytes,6,rep,name=aliases,proto3" json:"aliases,omitempty"` // a list of additional URNs that shoud be considered the same. CustomTimeouts *AnalyzerResourceOptions_CustomTimeouts `protobuf:"bytes,7,opt,name=customTimeouts,proto3" json:"customTimeouts,omitempty"` // a config block that will be used to configure timeouts for CRUD operations. + Parent string `protobuf:"bytes,8,opt,name=parent,proto3" json:"parent,omitempty"` // an optional parent URN that this child resource belongs to. } func (x *AnalyzerResourceOptions) Reset() { @@ -386,6 +387,13 @@ func (x *AnalyzerResourceOptions) GetCustomTimeouts() *AnalyzerResourceOptions_C return nil } +func (x *AnalyzerResourceOptions) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + // AnalyzerProviderResource provides information about a resource's provider. type AnalyzerProviderResource struct { state protoimpl.MessageState @@ -1299,7 +1307,7 @@ var file_pulumi_analyzer_proto_rawDesc = []byte{ 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x7a, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd4, 0x03, 0x0a, 0x17, 0x41, 0x6e, 0x61, 0x6c, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xec, 0x03, 0x0a, 0x17, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x7a, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x12, 0x24, 0x0a, @@ -1323,180 +1331,182 @@ var file_pulumi_analyzer_proto_rawDesc = []byte{ 0x61, 0x6c, 0x79, 0x7a, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x73, 0x52, 0x0e, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, - 0x6f, 0x75, 0x74, 0x73, 0x1a, 0x58, 0x0a, 0x0e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, - 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x06, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x16, - 0x0a, 0x06, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x06, - 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x22, 0x8d, - 0x01, 0x0a, 0x18, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x7a, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x76, 0x69, - 0x64, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, - 0x37, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x0a, 0x70, 0x72, - 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x32, - 0x0a, 0x1c, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x7a, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, - 0x74, 0x79, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x12, 0x12, - 0x0a, 0x04, 0x75, 0x72, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x75, 0x72, - 0x6e, 0x73, 0x22, 0x50, 0x0a, 0x13, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x7a, 0x65, 0x53, 0x74, 0x61, - 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x09, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x70, - 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x7a, 0x65, - 0x72, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x73, 0x22, 0x51, 0x0a, 0x0f, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x7a, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, - 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, - 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x7a, 0x65, - 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, - 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xb4, 0x02, 0x0a, 0x11, 0x41, 0x6e, 0x61, 0x6c, - 0x79, 0x7a, 0x65, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x12, 0x1e, 0x0a, - 0x0a, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0a, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x26, 0x0a, - 0x0e, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x50, 0x61, 0x63, - 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x11, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x50, - 0x61, 0x63, 0x6b, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x11, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, - 0x12, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x74, - 0x61, 0x67, 0x73, 0x12, 0x47, 0x0a, 0x10, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, - 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x66, 0x6f, 0x72, 0x63, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x10, 0x65, 0x6e, 0x66, 0x6f, - 0x72, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x10, 0x0a, 0x03, - 0x75, 0x72, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6e, 0x22, 0xfe, - 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, - 0x0a, 0x0a, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0a, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x26, - 0x0a, 0x0e, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x4e, 0x61, 0x6d, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x50, 0x61, - 0x63, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x11, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x50, 0x61, 0x63, 0x6b, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x11, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, - 0x74, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, - 0x75, 0x63, 0x74, 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, - 0x1e, 0x0a, 0x0a, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x22, - 0x4f, 0x0a, 0x11, 0x52, 0x65, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x0c, 0x72, 0x65, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x75, 0x6c, - 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x0c, 0x72, 0x65, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x22, 0xe6, 0x02, 0x0a, 0x0c, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x7a, 0x65, 0x72, 0x49, 0x6e, 0x66, - 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, - 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, - 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x31, 0x0a, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x75, 0x6c, 0x75, - 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x49, 0x6e, 0x66, 0x6f, - 0x52, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0e, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x73, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x73, 0x75, - 0x70, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x50, 0x0a, 0x0d, - 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, - 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x7a, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x49, 0x6e, 0x69, - 0x74, 0x69, 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x0d, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x59, - 0x0a, 0x12, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, - 0x63, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x8a, 0x02, 0x0a, 0x0a, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, - 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x20, - 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x47, 0x0a, 0x10, 0x65, 0x6e, - 0x66, 0x6f, 0x72, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x05, + 0x6f, 0x75, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x1a, 0x58, 0x0a, 0x0e, + 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x73, 0x12, 0x16, + 0x0a, 0x06, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x06, + 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x06, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x16, + 0x0a, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x06, + 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x22, 0x8d, 0x01, 0x0a, 0x18, 0x41, 0x6e, 0x61, 0x6c, 0x79, + 0x7a, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x37, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, + 0x72, 0x75, 0x63, 0x74, 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, + 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, + 0x72, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x32, 0x0a, 0x1c, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x7a, + 0x65, 0x72, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, + 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x72, 0x6e, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x75, 0x72, 0x6e, 0x73, 0x22, 0x50, 0x0a, 0x13, 0x41, 0x6e, + 0x61, 0x6c, 0x79, 0x7a, 0x65, 0x53, 0x74, 0x61, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x39, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, + 0x2e, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x7a, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x22, 0x51, 0x0a, 0x0f, + 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x3e, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, + 0x2e, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x7a, 0x65, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, + 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, + 0xb4, 0x02, 0x0a, 0x11, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x7a, 0x65, 0x44, 0x69, 0x61, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x26, 0x0a, 0x0e, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x50, + 0x61, 0x63, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x0a, + 0x11, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x50, 0x61, 0x63, 0x6b, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, + 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x47, 0x0a, 0x10, 0x65, + 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, + 0x63, 0x2e, 0x45, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4c, 0x65, 0x76, + 0x65, 0x6c, 0x52, 0x10, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4c, + 0x65, 0x76, 0x65, 0x6c, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x75, 0x72, 0x6e, 0x22, 0xfe, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x6d, 0x65, 0x64, + 0x69, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x26, 0x0a, 0x0e, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x50, 0x61, 0x63, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2c, + 0x0a, 0x11, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x50, 0x61, 0x63, 0x6b, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, + 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x0a, 0x70, 0x72, 0x6f, + 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x69, 0x61, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x69, 0x61, + 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x22, 0x4f, 0x0a, 0x11, 0x52, 0x65, 0x6d, 0x65, 0x64, + 0x69, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x0c, + 0x72, 0x65, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x52, + 0x65, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x72, 0x65, 0x6d, 0x65, + 0x64, 0x69, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe6, 0x02, 0x0a, 0x0c, 0x41, 0x6e, 0x61, + 0x6c, 0x79, 0x7a, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, + 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x31, 0x0a, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, + 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0e, + 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x50, 0x0a, 0x0d, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x70, 0x75, + 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x7a, 0x65, 0x72, + 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0d, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x59, 0x0a, 0x12, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, + 0x6c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2d, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0x8a, 0x02, 0x0a, 0x0a, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x49, 0x6e, 0x66, 0x6f, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, + 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, + 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x12, 0x47, 0x0a, 0x10, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x6d, 0x65, 0x6e, + 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x70, + 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x10, 0x65, 0x6e, 0x66, 0x6f, 0x72, + 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x41, 0x0a, 0x0c, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1d, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x69, + 0x0a, 0x12, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x12, 0x37, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, + 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, + 0x74, 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x1a, 0x0a, + 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x22, 0x90, 0x01, 0x0a, 0x0c, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x47, 0x0a, 0x10, 0x65, 0x6e, + 0x66, 0x6f, 0x72, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x10, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4c, 0x65, - 0x76, 0x65, 0x6c, 0x12, 0x41, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x70, 0x75, 0x6c, 0x75, - 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x69, 0x0a, 0x12, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x37, 0x0a, 0x0a, - 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, - 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, - 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, - 0x64, 0x22, 0x90, 0x01, 0x0a, 0x0c, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x47, 0x0a, 0x10, 0x65, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x70, - 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x52, 0x10, 0x65, 0x6e, 0x66, 0x6f, 0x72, - 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, 0x0a, 0x70, - 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, - 0x74, 0x69, 0x65, 0x73, 0x22, 0xcf, 0x01, 0x0a, 0x18, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, - 0x72, 0x65, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x7a, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x59, 0x0a, 0x0c, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, - 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x41, 0x6e, 0x61, - 0x6c, 0x79, 0x7a, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, - 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x58, 0x0a, 0x11, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x2a, 0x4c, 0x0a, 0x10, 0x45, 0x6e, 0x66, 0x6f, 0x72, 0x63, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x0c, 0x0a, 0x08, 0x41, 0x44, - 0x56, 0x49, 0x53, 0x4f, 0x52, 0x59, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4d, 0x41, 0x4e, 0x44, - 0x41, 0x54, 0x4f, 0x52, 0x59, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x49, 0x53, 0x41, 0x42, - 0x4c, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x52, 0x45, 0x4d, 0x45, 0x44, 0x49, 0x41, - 0x54, 0x45, 0x10, 0x03, 0x32, 0xb8, 0x03, 0x0a, 0x08, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x7a, 0x65, - 0x72, 0x12, 0x42, 0x0a, 0x07, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x7a, 0x65, 0x12, 0x19, 0x2e, 0x70, - 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x7a, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, - 0x72, 0x70, 0x63, 0x2e, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4c, 0x0a, 0x0c, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x7a, 0x65, - 0x53, 0x74, 0x61, 0x63, 0x6b, 0x12, 0x1e, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, - 0x63, 0x2e, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x7a, 0x65, 0x53, 0x74, 0x61, 0x63, 0x6b, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, - 0x63, 0x2e, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x46, 0x0a, 0x09, 0x52, 0x65, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, - 0x12, 0x19, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x41, 0x6e, 0x61, - 0x6c, 0x79, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x70, 0x75, - 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x44, 0x0a, 0x0f, 0x47, - 0x65, 0x74, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x7a, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x17, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, - 0x70, 0x63, 0x2e, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x7a, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x22, - 0x00, 0x12, 0x40, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, 0x6e, - 0x66, 0x6f, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x15, 0x2e, 0x70, 0x75, 0x6c, - 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x66, - 0x6f, 0x22, 0x00, 0x12, 0x4a, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, - 0x12, 0x23, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x7a, 0x65, 0x72, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x42, - 0x34, 0x5a, 0x32, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x75, - 0x6c, 0x75, 0x6d, 0x69, 0x2f, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x2f, 0x73, 0x64, 0x6b, 0x2f, - 0x76, 0x33, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x3b, 0x70, 0x75, 0x6c, 0x75, - 0x6d, 0x69, 0x72, 0x70, 0x63, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, + 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xcf, 0x01, 0x0a, + 0x18, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x7a, + 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x59, 0x0a, 0x0c, 0x70, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x35, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x65, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x7a, 0x65, 0x72, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x58, 0x0a, 0x11, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2d, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x70, 0x75, 0x6c, + 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x2a, 0x4c, + 0x0a, 0x10, 0x45, 0x6e, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4c, 0x65, 0x76, + 0x65, 0x6c, 0x12, 0x0c, 0x0a, 0x08, 0x41, 0x44, 0x56, 0x49, 0x53, 0x4f, 0x52, 0x59, 0x10, 0x00, + 0x12, 0x0d, 0x0a, 0x09, 0x4d, 0x41, 0x4e, 0x44, 0x41, 0x54, 0x4f, 0x52, 0x59, 0x10, 0x01, 0x12, + 0x0c, 0x0a, 0x08, 0x44, 0x49, 0x53, 0x41, 0x42, 0x4c, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0d, 0x0a, + 0x09, 0x52, 0x45, 0x4d, 0x45, 0x44, 0x49, 0x41, 0x54, 0x45, 0x10, 0x03, 0x32, 0xb8, 0x03, 0x0a, + 0x08, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x7a, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x07, 0x41, 0x6e, 0x61, + 0x6c, 0x79, 0x7a, 0x65, 0x12, 0x19, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, + 0x2e, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x1a, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x41, 0x6e, 0x61, 0x6c, + 0x79, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4c, 0x0a, + 0x0c, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x7a, 0x65, 0x53, 0x74, 0x61, 0x63, 0x6b, 0x12, 0x1e, 0x2e, + 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x7a, + 0x65, 0x53, 0x74, 0x61, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, + 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x7a, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x46, 0x0a, 0x09, 0x52, + 0x65, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x12, 0x19, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, + 0x69, 0x72, 0x70, 0x63, 0x2e, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, + 0x52, 0x65, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x44, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x7a, + 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x17, + 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x41, 0x6e, 0x61, 0x6c, 0x79, + 0x7a, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x00, 0x12, 0x40, 0x0a, 0x0d, 0x47, 0x65, 0x74, + 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x1a, 0x15, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x50, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x00, 0x12, 0x4a, 0x0a, 0x09, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x12, 0x23, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, + 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x41, 0x6e, + 0x61, 0x6c, 0x79, 0x7a, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x42, 0x34, 0x5a, 0x32, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x2f, 0x70, 0x75, 0x6c, + 0x75, 0x6d, 0x69, 0x2f, 0x73, 0x64, 0x6b, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x67, 0x6f, 0x3b, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/proto/go/provider.pb.go b/vendor/github.com/pulumi/pulumi/sdk/v3/proto/go/provider.pb.go index 5531e7f9c..a5a8db2c0 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/proto/go/provider.pb.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/proto/go/provider.pb.go @@ -2498,7 +2498,7 @@ type ConstructRequest struct { Organization string `protobuf:"bytes,17,opt,name=organization,proto3" json:"organization,omitempty"` // True if and only if the resource (and by extension, its nested resources) should be marked as protected. // Protected resources cannot be deleted without first being unprotected. - Protect bool `protobuf:"varint,12,opt,name=protect,proto3" json:"protect,omitempty"` + Protect *bool `protobuf:"varint,12,opt,name=protect,proto3,oneof" json:"protect,omitempty"` // A list of additional URNs that should be considered the same as this component's URN (and which will therefore be // used to build aliases for its nested resource URNs). These may be URNs that previously referred to this component // e.g. if it had its parent (and consequently URN) changed. @@ -2666,8 +2666,8 @@ func (x *ConstructRequest) GetOrganization() string { } func (x *ConstructRequest) GetProtect() bool { - if x != nil { - return x.Protect + if x != nil && x.Protect != nil { + return *x.Protect } return false } @@ -4015,7 +4015,7 @@ var file_pulumi_provider_proto_rawDesc = []byte{ 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x09, 0x6f, 0x6c, 0x64, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0xad, 0x0b, 0x0a, 0x10, + 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0xbe, 0x0b, 0x0a, 0x10, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, @@ -4055,201 +4055,202 @@ var file_pulumi_provider_proto_rawDesc = []byte{ 0x69, 0x67, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x6c, - 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x61, 0x6c, 0x69, - 0x61, 0x73, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x17, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x61, 0x6c, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x18, - 0x12, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, - 0x6c, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x12, 0x52, - 0x0a, 0x0e, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x73, - 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, - 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, - 0x74, 0x73, 0x52, 0x0e, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, - 0x74, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x57, 0x69, 0x74, - 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, - 0x57, 0x69, 0x74, 0x68, 0x12, 0x30, 0x0a, 0x13, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x65, - 0x66, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x13, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x52, - 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, - 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x16, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x69, - 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x2a, 0x0a, 0x10, - 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x4f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, - 0x18, 0x17, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x4f, - 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x74, 0x61, - 0x69, 0x6e, 0x4f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x18, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0e, 0x72, 0x65, 0x74, 0x61, 0x69, 0x6e, 0x4f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x12, 0x32, 0x0a, 0x15, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x73, 0x5f, 0x6f, 0x75, 0x74, 0x70, - 0x75, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x13, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x73, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x73, 0x1a, 0x2a, 0x0a, 0x14, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, - 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x12, 0x12, 0x0a, 0x04, - 0x75, 0x72, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x75, 0x72, 0x6e, 0x73, - 0x1a, 0x58, 0x0a, 0x0e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, - 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x06, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x75, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x1a, 0x39, 0x0a, 0x0b, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x76, 0x0a, 0x16, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x44, 0x65, - 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x46, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x30, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x6e, - 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x50, 0x72, - 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, - 0x65, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3c, 0x0a, - 0x0e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xdc, 0x02, 0x0a, 0x11, - 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x75, 0x72, 0x6e, 0x12, 0x2d, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x05, 0x73, 0x74, 0x61, - 0x74, 0x65, 0x12, 0x61, 0x0a, 0x11, 0x73, 0x74, 0x61, 0x74, 0x65, 0x44, 0x65, 0x70, 0x65, 0x6e, - 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, + 0x12, 0x1d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, + 0x08, 0x48, 0x00, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x88, 0x01, 0x01, 0x12, + 0x18, 0x0a, 0x07, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x07, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x17, 0x61, 0x64, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4f, 0x75, 0x74, + 0x70, 0x75, 0x74, 0x73, 0x18, 0x12, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x61, 0x64, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4f, 0x75, 0x74, 0x70, + 0x75, 0x74, 0x73, 0x12, 0x52, 0x0a, 0x0e, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, + 0x65, 0x6f, 0x75, 0x74, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x70, 0x75, + 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x63, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, + 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x73, 0x52, 0x0e, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, + 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x64, 0x57, 0x69, 0x74, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x64, 0x57, 0x69, 0x74, 0x68, 0x12, 0x30, 0x0a, 0x13, 0x64, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, + 0x18, 0x15, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x65, + 0x66, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x69, + 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x16, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x0d, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, + 0x73, 0x12, 0x2a, 0x0a, 0x10, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x4f, 0x6e, 0x43, 0x68, + 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x17, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x72, 0x65, 0x70, + 0x6c, 0x61, 0x63, 0x65, 0x4f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x26, 0x0a, + 0x0e, 0x72, 0x65, 0x74, 0x61, 0x69, 0x6e, 0x4f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, + 0x18, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x72, 0x65, 0x74, 0x61, 0x69, 0x6e, 0x4f, 0x6e, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x73, + 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x19, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x73, 0x4f, 0x75, 0x74, + 0x70, 0x75, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x1a, 0x2a, 0x0a, 0x14, 0x50, 0x72, 0x6f, + 0x70, 0x65, 0x72, 0x74, 0x79, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, + 0x73, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x72, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x04, 0x75, 0x72, 0x6e, 0x73, 0x1a, 0x58, 0x0a, 0x0e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, + 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, + 0x16, 0x0a, 0x06, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x1a, + 0x39, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x76, 0x0a, 0x16, 0x49, 0x6e, + 0x70, 0x75, 0x74, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x46, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, + 0x63, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x44, 0x65, 0x70, 0x65, 0x6e, + 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x1a, 0x3c, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x22, 0xdc, 0x02, 0x0a, + 0x11, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x75, 0x72, 0x6e, 0x12, 0x2d, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x05, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x12, 0x61, 0x0a, 0x11, 0x73, 0x74, 0x61, 0x74, 0x65, 0x44, 0x65, 0x70, 0x65, + 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, + 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, + 0x72, 0x75, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x11, 0x73, 0x74, 0x61, 0x74, 0x65, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, + 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x1a, 0x2a, 0x0a, 0x14, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, + 0x74, 0x79, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x12, 0x12, + 0x0a, 0x04, 0x75, 0x72, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x75, 0x72, + 0x6e, 0x73, 0x1a, 0x77, 0x0a, 0x16, 0x53, 0x74, 0x61, 0x74, 0x65, 0x44, 0x65, 0x70, 0x65, 0x6e, + 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x47, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, - 0x75, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x11, 0x73, 0x74, 0x61, 0x74, 0x65, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, - 0x6e, 0x63, 0x69, 0x65, 0x73, 0x1a, 0x2a, 0x0a, 0x14, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, - 0x79, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x12, 0x12, 0x0a, - 0x04, 0x75, 0x72, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x75, 0x72, 0x6e, - 0x73, 0x1a, 0x77, 0x0a, 0x16, 0x53, 0x74, 0x61, 0x74, 0x65, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, - 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x47, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x70, - 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x75, - 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, - 0x72, 0x74, 0x79, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xad, 0x01, 0x0a, 0x17, 0x45, - 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x69, 0x74, - 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x37, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, - 0x74, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, + 0x75, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x70, + 0x65, 0x72, 0x74, 0x79, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xad, 0x01, 0x0a, 0x17, + 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x69, + 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x37, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, + 0x72, 0x75, 0x63, 0x74, 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, + 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x07, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x73, 0x12, 0x2f, 0x0a, 0x06, 0x69, 0x6e, + 0x70, 0x75, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, - 0x75, 0x63, 0x74, 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, - 0x18, 0x0a, 0x07, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x07, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x73, 0x12, 0x2f, 0x0a, 0x06, 0x69, 0x6e, 0x70, - 0x75, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, - 0x63, 0x74, 0x52, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x22, 0x41, 0x0a, 0x11, 0x47, 0x65, - 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x22, 0x44, 0x0a, - 0x12, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, - 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, - 0x61, 0x74, 0x61, 0x22, 0x26, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, - 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x33, 0x0a, 0x13, 0x47, - 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, - 0x32, 0xb3, 0x0b, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, - 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x58, 0x0a, 0x09, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, - 0x6b, 0x65, 0x12, 0x23, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x50, - 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, - 0x72, 0x70, 0x63, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, - 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0x51, 0x0a, 0x0c, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x69, 0x7a, 0x65, 0x12, - 0x1e, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x61, 0x72, 0x61, - 0x6d, 0x65, 0x74, 0x65, 0x72, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x1f, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x61, 0x72, 0x61, - 0x6d, 0x65, 0x74, 0x65, 0x72, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x48, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, - 0x1b, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x53, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x70, - 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x42, 0x0a, 0x0b, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x17, 0x2e, 0x70, 0x75, - 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, - 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0x3f, 0x0a, 0x0a, 0x44, 0x69, 0x66, 0x66, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x16, - 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x44, 0x69, 0x66, 0x66, 0x52, + 0x75, 0x63, 0x74, 0x52, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x22, 0x41, 0x0a, 0x11, 0x47, + 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x22, 0x44, + 0x0a, 0x12, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, + 0x64, 0x61, 0x74, 0x61, 0x22, 0x26, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, + 0x6e, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x33, 0x0a, 0x13, + 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x73, 0x32, 0xb3, 0x0b, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x58, 0x0a, 0x09, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, + 0x61, 0x6b, 0x65, 0x12, 0x23, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, + 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, + 0x69, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x48, 0x61, 0x6e, + 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x51, 0x0a, 0x0c, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x69, 0x7a, 0x65, + 0x12, 0x1e, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x61, 0x72, + 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1f, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x61, 0x72, + 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x12, 0x1b, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x47, 0x65, 0x74, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, + 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x42, 0x0a, + 0x0b, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x17, 0x2e, 0x70, + 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, + 0x63, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x3f, 0x0a, 0x0a, 0x44, 0x69, 0x66, 0x66, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x16, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x44, 0x69, 0x66, 0x66, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, + 0x72, 0x70, 0x63, 0x2e, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x12, 0x48, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x12, + 0x1b, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x70, + 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, + 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3f, 0x0a, 0x06, + 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x12, 0x18, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, + 0x70, 0x63, 0x2e, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x19, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x49, 0x6e, 0x76, + 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x47, 0x0a, + 0x0c, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x12, 0x18, 0x2e, + 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, + 0x72, 0x70, 0x63, 0x2e, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x39, 0x0a, 0x04, 0x43, 0x61, 0x6c, 0x6c, 0x12, 0x16, + 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, - 0x70, 0x63, 0x2e, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x48, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x12, 0x1b, - 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x70, 0x75, - 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3f, 0x0a, 0x06, 0x49, - 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x12, 0x18, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, - 0x63, 0x2e, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x19, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x49, 0x6e, 0x76, 0x6f, - 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x47, 0x0a, 0x0c, - 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x12, 0x18, 0x2e, 0x70, - 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, - 0x70, 0x63, 0x2e, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x39, 0x0a, 0x04, 0x43, 0x61, 0x6c, 0x6c, 0x12, 0x16, 0x2e, - 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, - 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0x3c, 0x0a, 0x05, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x17, 0x2e, 0x70, 0x75, 0x6c, 0x75, - 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x39, - 0x0a, 0x04, 0x44, 0x69, 0x66, 0x66, 0x12, 0x16, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, - 0x70, 0x63, 0x2e, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, - 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x44, 0x69, 0x66, 0x66, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3f, 0x0a, 0x06, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x12, 0x18, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, - 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x39, 0x0a, 0x04, 0x52, 0x65, - 0x61, 0x64, 0x12, 0x16, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x52, - 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x70, 0x75, 0x6c, - 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3f, 0x0a, 0x06, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, - 0x18, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x70, 0x75, 0x6c, 0x75, - 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3c, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x12, 0x18, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x63, - 0x74, 0x12, 0x1b, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, - 0x6e, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, - 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, - 0x72, 0x75, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3a, - 0x0a, 0x06, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, - 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x40, 0x0a, 0x0d, 0x47, 0x65, - 0x74, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x2e, 0x67, 0x6f, + 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x3c, 0x0a, 0x05, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x17, 0x2e, 0x70, 0x75, 0x6c, + 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x39, 0x0a, 0x04, 0x44, 0x69, 0x66, 0x66, 0x12, 0x16, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, + 0x72, 0x70, 0x63, 0x2e, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x17, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x44, 0x69, 0x66, 0x66, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3f, 0x0a, 0x06, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x12, 0x18, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, + 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, + 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x39, 0x0a, 0x04, 0x52, + 0x65, 0x61, 0x64, 0x12, 0x16, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, + 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x70, 0x75, + 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3f, 0x0a, 0x06, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x12, 0x18, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x70, 0x75, 0x6c, + 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x3c, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x12, 0x18, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, - 0x70, 0x74, 0x79, 0x1a, 0x15, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, - 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x00, 0x12, 0x3b, 0x0a, 0x06, - 0x41, 0x74, 0x74, 0x61, 0x63, 0x68, 0x12, 0x17, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, - 0x70, 0x63, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x41, 0x74, 0x74, 0x61, 0x63, 0x68, 0x1a, - 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x47, 0x65, 0x74, - 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, - 0x72, 0x70, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, - 0x63, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, - 0x70, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x1d, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, - 0x63, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, - 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x34, 0x5a, 0x32, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x2f, 0x70, 0x75, 0x6c, 0x75, - 0x6d, 0x69, 0x2f, 0x73, 0x64, 0x6b, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, - 0x67, 0x6f, 0x3b, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x75, + 0x63, 0x74, 0x12, 0x1b, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, + 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x1c, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x6e, 0x73, + 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, + 0x3a, 0x0a, 0x06, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x40, 0x0a, 0x0d, 0x47, + 0x65, 0x74, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x15, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, + 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x00, 0x12, 0x3b, 0x0a, + 0x06, 0x41, 0x74, 0x74, 0x61, 0x63, 0x68, 0x12, 0x17, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, + 0x72, 0x70, 0x63, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x41, 0x74, 0x74, 0x61, 0x63, 0x68, + 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x47, 0x65, + 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, + 0x69, 0x72, 0x70, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, + 0x70, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4d, 0x61, + 0x70, 0x70, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x1d, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, + 0x70, 0x63, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, + 0x63, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x34, 0x5a, 0x32, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x2f, 0x70, 0x75, 0x6c, + 0x75, 0x6d, 0x69, 0x2f, 0x73, 0x64, 0x6b, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x67, 0x6f, 0x3b, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -4946,6 +4947,7 @@ func file_pulumi_provider_proto_init() { (*ParameterizeRequest_Value)(nil), } file_pulumi_provider_proto_msgTypes[6].OneofWrappers = []interface{}{} + file_pulumi_provider_proto_msgTypes[26].OneofWrappers = []interface{}{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/github.com/pulumi/pulumi/sdk/v3/proto/go/resource.pb.go b/vendor/github.com/pulumi/pulumi/sdk/v3/proto/go/resource.pb.go index b29fef910..1163a1eeb 100644 --- a/vendor/github.com/pulumi/pulumi/sdk/v3/proto/go/resource.pb.go +++ b/vendor/github.com/pulumi/pulumi/sdk/v3/proto/go/resource.pb.go @@ -408,7 +408,7 @@ type RegisterResourceRequest struct { Parent string `protobuf:"bytes,3,opt,name=parent,proto3" json:"parent,omitempty"` // an optional parent URN that this child resource belongs to. Custom bool `protobuf:"varint,4,opt,name=custom,proto3" json:"custom,omitempty"` // true if the resource is a custom, managed by a plugin's CRUD operations. Object *structpb.Struct `protobuf:"bytes,5,opt,name=object,proto3" json:"object,omitempty"` // an object produced by the interpreter/source. - Protect bool `protobuf:"varint,6,opt,name=protect,proto3" json:"protect,omitempty"` // true if the resource should be marked protected. + Protect *bool `protobuf:"varint,6,opt,name=protect,proto3,oneof" json:"protect,omitempty"` // true if the resource should be marked protected. Dependencies []string `protobuf:"bytes,7,rep,name=dependencies,proto3" json:"dependencies,omitempty"` // a list of URNs that this resource depends on, as observed by the language host. Provider string `protobuf:"bytes,8,opt,name=provider,proto3" json:"provider,omitempty"` // an optional reference to the provider to manage this resource's CRUD operations. PropertyDependencies map[string]*RegisterResourceRequest_PropertyDependencies `protobuf:"bytes,9,rep,name=propertyDependencies,proto3" json:"propertyDependencies,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // a map from property keys to the dependencies of the property. @@ -513,8 +513,8 @@ func (x *RegisterResourceRequest) GetObject() *structpb.Struct { } func (x *RegisterResourceRequest) GetProtect() bool { - if x != nil { - return x.Protect + if x != nil && x.Protect != nil { + return *x.Protect } return false } @@ -1090,7 +1090,7 @@ type TransformResourceOptions struct { unknownFields protoimpl.UnknownFields DependsOn []string `protobuf:"bytes,1,rep,name=depends_on,json=dependsOn,proto3" json:"depends_on,omitempty"` - Protect bool `protobuf:"varint,2,opt,name=protect,proto3" json:"protect,omitempty"` + Protect *bool `protobuf:"varint,2,opt,name=protect,proto3,oneof" json:"protect,omitempty"` IgnoreChanges []string `protobuf:"bytes,3,rep,name=ignore_changes,json=ignoreChanges,proto3" json:"ignore_changes,omitempty"` ReplaceOnChanges []string `protobuf:"bytes,4,rep,name=replace_on_changes,json=replaceOnChanges,proto3" json:"replace_on_changes,omitempty"` Version string `protobuf:"bytes,5,opt,name=version,proto3" json:"version,omitempty"` @@ -1146,8 +1146,8 @@ func (x *TransformResourceOptions) GetDependsOn() []string { } func (x *TransformResourceOptions) GetProtect() bool { - if x != nil { - return x.Protect + if x != nil && x.Protect != nil { + return *x.Protect } return false } @@ -2048,7 +2048,7 @@ var file_pulumi_resource_proto_rawDesc = []byte{ 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, - 0x74, 0x69, 0x65, 0x73, 0x22, 0x8e, 0x0f, 0x0a, 0x17, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, + 0x74, 0x69, 0x65, 0x73, 0x22, 0x9f, 0x0f, 0x0a, 0x17, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, @@ -2058,110 +2058,290 @@ var file_pulumi_resource_proto_rawDesc = []byte{ 0x52, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x12, 0x2f, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, - 0x74, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x6f, - 0x74, 0x65, 0x63, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x74, - 0x65, 0x63, 0x74, 0x12, 0x22, 0x0a, 0x0c, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, - 0x69, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x70, 0x65, 0x6e, - 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, - 0x64, 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, - 0x64, 0x65, 0x72, 0x12, 0x70, 0x0a, 0x14, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x44, - 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x3c, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, + 0x74, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1d, 0x0a, 0x07, 0x70, 0x72, 0x6f, + 0x74, 0x65, 0x63, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x07, 0x70, 0x72, + 0x6f, 0x74, 0x65, 0x63, 0x74, 0x88, 0x01, 0x01, 0x12, 0x22, 0x0a, 0x0c, 0x64, 0x65, 0x70, 0x65, + 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, + 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, + 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x70, 0x0a, 0x14, 0x70, 0x72, 0x6f, 0x70, + 0x65, 0x72, 0x74, 0x79, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, + 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, + 0x70, 0x63, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x79, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x14, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x44, 0x65, + 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x12, 0x30, 0x0a, 0x13, 0x64, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x63, + 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, + 0x65, 0x66, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x0a, 0x0d, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, + 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x69, + 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x24, 0x0a, 0x0d, + 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x18, 0x0d, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, + 0x74, 0x73, 0x12, 0x38, 0x0a, 0x17, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x18, 0x0e, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x17, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x53, + 0x65, 0x63, 0x72, 0x65, 0x74, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x12, 0x1c, 0x0a, 0x09, + 0x61, 0x6c, 0x69, 0x61, 0x73, 0x55, 0x52, 0x4e, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x09, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x55, 0x52, 0x4e, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x69, 0x6d, + 0x70, 0x6f, 0x72, 0x74, 0x49, 0x64, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x69, 0x6d, + 0x70, 0x6f, 0x72, 0x74, 0x49, 0x64, 0x12, 0x59, 0x0a, 0x0e, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, + 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, + 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x73, 0x52, 0x0e, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x73, 0x12, 0x3e, 0x0a, 0x1a, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, + 0x65, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x18, + 0x12, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x65, 0x66, + 0x6f, 0x72, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x65, + 0x64, 0x12, 0x34, 0x0a, 0x15, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x50, 0x61, 0x72, + 0x74, 0x69, 0x61, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x15, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, + 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x12, + 0x28, 0x0a, 0x0f, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x4f, 0x0a, 0x09, 0x70, 0x72, 0x6f, + 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x70, + 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, + 0x72, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x09, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x12, 0x2a, 0x0a, 0x10, 0x72, 0x65, + 0x70, 0x6c, 0x61, 0x63, 0x65, 0x4f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x17, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x4f, 0x6e, 0x43, + 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x11, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x55, 0x52, 0x4c, 0x18, 0x18, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x11, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, + 0x64, 0x55, 0x52, 0x4c, 0x12, 0x61, 0x0a, 0x0f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x1e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, + 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, + 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x74, 0x61, 0x69, + 0x6e, 0x4f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0e, 0x72, 0x65, 0x74, 0x61, 0x69, 0x6e, 0x4f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, + 0x2a, 0x0a, 0x07, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x1a, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x10, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x41, 0x6c, 0x69, + 0x61, 0x73, 0x52, 0x07, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x64, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x57, 0x69, 0x74, 0x68, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x57, 0x69, 0x74, 0x68, 0x12, 0x1e, 0x0a, + 0x0a, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x53, 0x70, 0x65, 0x63, 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x53, 0x70, 0x65, 0x63, 0x73, 0x12, 0x41, 0x0a, + 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, + 0x63, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x33, 0x0a, 0x0a, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x73, 0x18, 0x1f, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, + 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x52, 0x0a, 0x74, 0x72, 0x61, 0x6e, 0x73, + 0x66, 0x6f, 0x72, 0x6d, 0x73, 0x12, 0x38, 0x0a, 0x17, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, + 0x73, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, + 0x18, 0x20, 0x20, 0x01, 0x28, 0x08, 0x52, 0x17, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x73, + 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x12, + 0x1e, 0x0a, 0x0a, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x52, 0x65, 0x66, 0x18, 0x21, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x52, 0x65, 0x66, 0x1a, + 0x2a, 0x0a, 0x14, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x44, 0x65, 0x70, 0x65, 0x6e, + 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x72, 0x6e, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x75, 0x72, 0x6e, 0x73, 0x1a, 0x58, 0x0a, 0x0e, 0x43, + 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x73, 0x12, 0x16, 0x0a, + 0x06, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x63, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x16, 0x0a, + 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x1a, 0x80, 0x01, 0x0a, 0x19, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, + 0x74, 0x79, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x4d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, + 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, + 0x79, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3c, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x76, + 0x69, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x42, 0x0a, 0x14, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x70, + 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x22, 0xed, 0x03, 0x0a, 0x18, 0x52, 0x65, 0x67, 0x69, 0x73, + 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x75, 0x72, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x2f, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x06, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x73, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x18, + 0x0a, 0x07, 0x73, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x07, 0x73, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x71, 0x0a, 0x14, 0x70, 0x72, 0x6f, 0x70, + 0x65, 0x72, 0x74, 0x79, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, + 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, + 0x70, 0x63, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x70, + 0x65, 0x72, 0x74, 0x79, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x14, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x44, + 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x12, 0x29, 0x0a, 0x06, 0x72, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x11, 0x2e, 0x70, 0x75, + 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, + 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x1a, 0x2a, 0x0a, 0x14, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, + 0x74, 0x79, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x12, 0x12, + 0x0a, 0x04, 0x75, 0x72, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x75, 0x72, + 0x6e, 0x73, 0x1a, 0x81, 0x01, 0x0a, 0x19, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x44, + 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x4e, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x38, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x44, 0x65, - 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x14, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, - 0x6e, 0x63, 0x69, 0x65, 0x73, 0x12, 0x30, 0x0a, 0x13, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, - 0x65, 0x66, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x18, 0x0a, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x13, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, - 0x52, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x24, 0x0a, 0x0d, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, - 0x65, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, - 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x24, 0x0a, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x70, - 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, - 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x12, 0x38, 0x0a, - 0x17, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x53, 0x65, 0x63, 0x72, 0x65, - 0x74, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, - 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, - 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x6c, 0x69, 0x61, 0x73, - 0x55, 0x52, 0x4e, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x61, 0x6c, 0x69, 0x61, - 0x73, 0x55, 0x52, 0x4e, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x49, - 0x64, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x49, - 0x64, 0x12, 0x59, 0x0a, 0x0e, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x6f, - 0x75, 0x74, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x70, 0x75, 0x6c, 0x75, - 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x43, 0x75, - 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x73, 0x52, 0x0e, 0x63, 0x75, - 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x73, 0x12, 0x3e, 0x0a, 0x1a, - 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x70, 0x6c, - 0x61, 0x63, 0x65, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x1a, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x52, 0x65, - 0x70, 0x6c, 0x61, 0x63, 0x65, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x12, 0x34, 0x0a, 0x15, - 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x73, 0x75, 0x70, - 0x70, 0x6f, 0x72, 0x74, 0x73, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x18, 0x14, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x12, 0x28, 0x0a, 0x0f, 0x61, 0x63, - 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x15, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x0f, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x73, 0x12, 0x4f, 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, - 0x73, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x44, + 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x65, 0x0a, 0x1e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, + 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6e, 0x12, 0x31, 0x0a, 0x07, 0x6f, 0x75, + 0x74, 0x70, 0x75, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, + 0x72, 0x75, 0x63, 0x74, 0x52, 0x07, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x22, 0xec, 0x03, + 0x0a, 0x15, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x6f, 0x6b, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x6f, 0x6b, 0x12, 0x2b, 0x0a, 0x04, 0x61, 0x72, 0x67, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, + 0x52, 0x04, 0x61, 0x72, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x0f, + 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x11, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x55, 0x52, 0x4c, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x11, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, + 0x64, 0x55, 0x52, 0x4c, 0x12, 0x5f, 0x0a, 0x0f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, + 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, + 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x41, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, + 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x61, 0x63, 0x6b, + 0x61, 0x67, 0x65, 0x52, 0x65, 0x66, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x61, + 0x63, 0x6b, 0x61, 0x67, 0x65, 0x52, 0x65, 0x66, 0x1a, 0x42, 0x0a, 0x14, 0x50, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xcd, 0x06, 0x0a, + 0x13, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x6f, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x74, 0x6f, 0x6b, 0x12, 0x2b, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x04, 0x61, + 0x72, 0x67, 0x73, 0x12, 0x5d, 0x0a, 0x0f, 0x61, 0x72, 0x67, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, + 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x70, + 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x41, 0x72, 0x67, + 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x0f, 0x61, 0x72, 0x67, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, + 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x18, + 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x11, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x55, 0x52, 0x4c, 0x18, 0x0d, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x11, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x44, 0x6f, 0x77, 0x6e, 0x6c, + 0x6f, 0x61, 0x64, 0x55, 0x52, 0x4c, 0x12, 0x5d, 0x0a, 0x0f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x33, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, + 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x41, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, + 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x61, 0x63, 0x6b, + 0x61, 0x67, 0x65, 0x52, 0x65, 0x66, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x61, + 0x63, 0x6b, 0x61, 0x67, 0x65, 0x52, 0x65, 0x66, 0x1a, 0x2a, 0x0a, 0x14, 0x41, 0x72, 0x67, 0x75, + 0x6d, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, + 0x12, 0x12, 0x0a, 0x04, 0x75, 0x72, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, + 0x75, 0x72, 0x6e, 0x73, 0x1a, 0x77, 0x0a, 0x14, 0x41, 0x72, 0x67, 0x44, 0x65, 0x70, 0x65, 0x6e, + 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x49, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, + 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x41, 0x72, + 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, + 0x65, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x42, 0x0a, + 0x14, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x4a, 0x04, 0x08, + 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x4a, 0x04, 0x08, 0x0a, 0x10, 0x0b, 0x4a, + 0x04, 0x08, 0x0b, 0x10, 0x0c, 0x4a, 0x04, 0x08, 0x0c, 0x10, 0x0d, 0x4a, 0x04, 0x08, 0x0e, 0x10, + 0x0f, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x05, 0x73, 0x74, 0x61, 0x63, + 0x6b, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x10, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x06, 0x64, 0x72, 0x79, + 0x52, 0x75, 0x6e, 0x52, 0x08, 0x70, 0x61, 0x72, 0x61, 0x6c, 0x6c, 0x65, 0x6c, 0x52, 0x0f, 0x6d, + 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0c, + 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xbc, 0x07, 0x0a, + 0x18, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x64, 0x65, 0x70, + 0x65, 0x6e, 0x64, 0x73, 0x5f, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x64, + 0x65, 0x70, 0x65, 0x6e, 0x64, 0x73, 0x4f, 0x6e, 0x12, 0x1d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x74, + 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x07, 0x70, 0x72, 0x6f, + 0x74, 0x65, 0x63, 0x74, 0x88, 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x69, 0x67, 0x6e, 0x6f, 0x72, + 0x65, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0d, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x2c, + 0x0a, 0x12, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x5f, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, + 0x6e, 0x67, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x72, 0x65, 0x70, 0x6c, + 0x61, 0x63, 0x65, 0x4f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x07, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, + 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, + 0x72, 0x70, 0x63, 0x2e, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x07, 0x61, 0x6c, 0x69, 0x61, 0x73, + 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x5a, + 0x0a, 0x0f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x76, - 0x69, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x76, - 0x69, 0x64, 0x65, 0x72, 0x73, 0x12, 0x2a, 0x0a, 0x10, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, - 0x4f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x17, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x10, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x4f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, - 0x73, 0x12, 0x2c, 0x0a, 0x11, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x44, 0x6f, 0x77, 0x6e, 0x6c, - 0x6f, 0x61, 0x64, 0x55, 0x52, 0x4c, 0x18, 0x18, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x70, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x55, 0x52, 0x4c, 0x12, - 0x61, 0x0a, 0x0f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, - 0x6d, 0x73, 0x18, 0x1e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, - 0x69, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x50, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x0f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, - 0x6d, 0x73, 0x12, 0x26, 0x0a, 0x0e, 0x72, 0x65, 0x74, 0x61, 0x69, 0x6e, 0x4f, 0x6e, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x72, 0x65, 0x74, 0x61, - 0x69, 0x6e, 0x4f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x2a, 0x0a, 0x07, 0x61, 0x6c, - 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x1a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x75, - 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x52, 0x07, 0x61, - 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x64, 0x57, 0x69, 0x74, 0x68, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x64, 0x57, 0x69, 0x74, 0x68, 0x12, 0x1e, 0x0a, 0x0a, 0x61, 0x6c, 0x69, 0x61, - 0x73, 0x53, 0x70, 0x65, 0x63, 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, - 0x69, 0x61, 0x73, 0x53, 0x70, 0x65, 0x63, 0x73, 0x12, 0x41, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x0a, 0x74, - 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x73, 0x18, 0x1f, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x13, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, - 0x62, 0x61, 0x63, 0x6b, 0x52, 0x0a, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x73, - 0x12, 0x38, 0x0a, 0x17, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x52, 0x65, 0x73, 0x75, - 0x6c, 0x74, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x18, 0x20, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x17, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x52, 0x65, 0x73, 0x75, 0x6c, - 0x74, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x61, - 0x63, 0x6b, 0x61, 0x67, 0x65, 0x52, 0x65, 0x66, 0x18, 0x21, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, - 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x52, 0x65, 0x66, 0x1a, 0x2a, 0x0a, 0x14, 0x50, 0x72, - 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, - 0x65, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x72, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x04, 0x75, 0x72, 0x6e, 0x73, 0x1a, 0x58, 0x0a, 0x0e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, - 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x12, 0x16, 0x0a, 0x06, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x06, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x1a, 0x80, 0x01, 0x0a, 0x19, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x44, 0x65, 0x70, - 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x4d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x37, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x67, 0x69, - 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x44, 0x65, 0x70, 0x65, - 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x1a, 0x3c, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, + 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x43, 0x75, 0x73, 0x74, + 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x73, 0x52, 0x0e, 0x63, 0x75, 0x73, 0x74, + 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x5f, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x75, 0x72, + 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x44, + 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x55, 0x72, 0x6c, 0x12, 0x28, 0x0a, 0x10, 0x72, 0x65, + 0x74, 0x61, 0x69, 0x6e, 0x5f, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x0a, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x72, 0x65, 0x74, 0x61, 0x69, 0x6e, 0x4f, 0x6e, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, + 0x77, 0x69, 0x74, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x64, 0x57, 0x69, 0x74, 0x68, 0x12, 0x37, 0x0a, 0x15, 0x64, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, + 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, 0x13, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x88, 0x01, 0x01, + 0x12, 0x3a, 0x0a, 0x19, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x73, + 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x18, 0x0d, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x17, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x53, + 0x65, 0x63, 0x72, 0x65, 0x74, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x12, 0x50, 0x0a, 0x09, + 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x32, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x54, 0x72, 0x61, 0x6e, + 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x12, 0x63, + 0x0a, 0x10, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, + 0x6d, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, + 0x69, 0x72, 0x70, 0x63, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x50, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x0f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, + 0x75, 0x6d, 0x73, 0x1a, 0x3c, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, @@ -2169,335 +2349,157 @@ var file_pulumi_resource_proto_rawDesc = []byte{ 0x73, 0x75, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xed, 0x03, 0x0a, 0x18, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, - 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x75, 0x72, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x02, 0x69, 0x64, 0x12, 0x2f, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x06, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x73, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x18, 0x0a, - 0x07, 0x73, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, - 0x73, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x71, 0x0a, 0x14, 0x70, 0x72, 0x6f, 0x70, 0x65, - 0x72, 0x74, 0x79, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x18, - 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, - 0x63, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, - 0x72, 0x74, 0x79, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x14, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x44, 0x65, - 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x12, 0x29, 0x0a, 0x06, 0x72, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x11, 0x2e, 0x70, 0x75, 0x6c, - 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x1a, 0x2a, 0x0a, 0x14, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, - 0x79, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x12, 0x12, 0x0a, - 0x04, 0x75, 0x72, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x75, 0x72, 0x6e, - 0x73, 0x1a, 0x81, 0x01, 0x0a, 0x19, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x44, 0x65, - 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x4e, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x38, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x67, - 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x44, 0x65, - 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x65, 0x0a, 0x1e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, - 0x72, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6e, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6e, 0x12, 0x31, 0x0a, 0x07, 0x6f, 0x75, 0x74, - 0x70, 0x75, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, - 0x75, 0x63, 0x74, 0x52, 0x07, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x22, 0xec, 0x03, 0x0a, - 0x15, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x6f, 0x6b, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x74, 0x6f, 0x6b, 0x12, 0x2b, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, - 0x04, 0x61, 0x72, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, - 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, - 0x72, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x0f, 0x61, - 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x11, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x44, - 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x55, 0x52, 0x4c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x11, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, - 0x55, 0x52, 0x4c, 0x12, 0x5f, 0x0a, 0x0f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x70, - 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x50, - 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x52, 0x0f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x73, 0x75, 0x6d, 0x73, 0x12, 0x41, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, - 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, - 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, - 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, - 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x61, 0x63, 0x6b, 0x61, - 0x67, 0x65, 0x52, 0x65, 0x66, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x61, 0x63, - 0x6b, 0x61, 0x67, 0x65, 0x52, 0x65, 0x66, 0x1a, 0x42, 0x0a, 0x14, 0x50, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xcd, 0x06, 0x0a, 0x13, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x6f, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x74, 0x6f, 0x6b, 0x12, 0x2b, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x04, 0x61, 0x72, - 0x67, 0x73, 0x12, 0x5d, 0x0a, 0x0f, 0x61, 0x72, 0x67, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, - 0x6e, 0x63, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x70, 0x75, - 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x41, 0x72, 0x67, 0x44, - 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x52, 0x0f, 0x61, 0x72, 0x67, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, - 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x18, 0x0a, - 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x11, 0x70, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x55, 0x52, 0x4c, 0x18, 0x0d, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x11, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, - 0x61, 0x64, 0x55, 0x52, 0x4c, 0x12, 0x5d, 0x0a, 0x0f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, - 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x50, - 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x52, 0x0f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x73, 0x75, 0x6d, 0x73, 0x12, 0x41, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, - 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, - 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, - 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, - 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x61, 0x63, 0x6b, 0x61, - 0x67, 0x65, 0x52, 0x65, 0x66, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x61, 0x63, - 0x6b, 0x61, 0x67, 0x65, 0x52, 0x65, 0x66, 0x1a, 0x2a, 0x0a, 0x14, 0x41, 0x72, 0x67, 0x75, 0x6d, - 0x65, 0x6e, 0x74, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x12, - 0x12, 0x0a, 0x04, 0x75, 0x72, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x75, - 0x72, 0x6e, 0x73, 0x1a, 0x77, 0x0a, 0x14, 0x41, 0x72, 0x67, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, - 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x49, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x70, - 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x41, 0x72, 0x67, - 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, - 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x42, 0x0a, 0x14, - 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x4a, 0x04, 0x08, 0x08, - 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x4a, 0x04, 0x08, 0x0a, 0x10, 0x0b, 0x4a, 0x04, - 0x08, 0x0b, 0x10, 0x0c, 0x4a, 0x04, 0x08, 0x0c, 0x10, 0x0d, 0x4a, 0x04, 0x08, 0x0e, 0x10, 0x0f, - 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x05, 0x73, 0x74, 0x61, 0x63, 0x6b, - 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x10, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x06, 0x64, 0x72, 0x79, 0x52, - 0x75, 0x6e, 0x52, 0x08, 0x70, 0x61, 0x72, 0x61, 0x6c, 0x6c, 0x65, 0x6c, 0x52, 0x0f, 0x6d, 0x6f, - 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0c, 0x6f, - 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xab, 0x07, 0x0a, 0x18, - 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x65, - 0x6e, 0x64, 0x73, 0x5f, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x64, 0x65, - 0x70, 0x65, 0x6e, 0x64, 0x73, 0x4f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x74, 0x65, - 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, - 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x6e, - 0x67, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x69, 0x67, 0x6e, 0x6f, 0x72, - 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x72, 0x65, 0x70, 0x6c, - 0x61, 0x63, 0x65, 0x5f, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x04, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x4f, 0x6e, 0x43, - 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x12, 0x2a, 0x0a, 0x07, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x41, 0x6c, - 0x69, 0x61, 0x73, 0x52, 0x07, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, - 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x5a, 0x0a, 0x0f, 0x63, 0x75, 0x73, 0x74, - 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x31, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, - 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, - 0x6f, 0x75, 0x74, 0x73, 0x52, 0x0e, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, - 0x6f, 0x75, 0x74, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x5f, 0x64, - 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x11, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, - 0x64, 0x55, 0x72, 0x6c, 0x12, 0x28, 0x0a, 0x10, 0x72, 0x65, 0x74, 0x61, 0x69, 0x6e, 0x5f, 0x6f, - 0x6e, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, - 0x72, 0x65, 0x74, 0x61, 0x69, 0x6e, 0x4f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x21, - 0x0a, 0x0c, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x18, 0x0b, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x57, 0x69, 0x74, - 0x68, 0x12, 0x37, 0x0a, 0x15, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, - 0x72, 0x65, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, - 0x48, 0x00, 0x52, 0x13, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, - 0x52, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x19, 0x61, 0x64, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, - 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x61, - 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4f, - 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x12, 0x50, 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, - 0x65, 0x72, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x70, 0x75, 0x6c, 0x75, - 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x52, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x50, - 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x70, - 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x12, 0x63, 0x0a, 0x10, 0x70, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x0f, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x54, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, + 0x74, 0x42, 0x18, 0x0a, 0x16, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x62, 0x65, 0x66, + 0x6f, 0x72, 0x65, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x22, 0xe2, 0x01, 0x0a, 0x10, + 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x75, 0x73, 0x74, + 0x6f, 0x6d, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x37, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x70, + 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, + 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, + 0x73, 0x12, 0x3d, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x68, - 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x70, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x1a, 0x3c, 0x0a, - 0x0e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x42, 0x0a, 0x14, 0x50, - 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, - 0x18, 0x0a, 0x16, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, - 0x65, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x22, 0xe2, 0x01, 0x0a, 0x10, 0x54, 0x72, - 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, - 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, - 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x12, 0x16, - 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, - 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x37, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, - 0x74, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x22, 0x8b, 0x01, 0x0a, 0x11, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, + 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, - 0x3d, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, + 0x3d, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x8b, - 0x01, 0x0a, 0x11, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, - 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, - 0x74, 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x3d, 0x0a, - 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, - 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, - 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x98, 0x01, 0x0a, - 0x16, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x2b, 0x0a, - 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, - 0x72, 0x75, 0x63, 0x74, 0x52, 0x04, 0x61, 0x72, 0x67, 0x73, 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x70, 0x75, - 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, - 0x6d, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x83, 0x01, 0x0a, 0x17, 0x54, 0x72, 0x61, 0x6e, - 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x2b, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x04, 0x61, 0x72, 0x67, 0x73, - 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x21, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x54, 0x72, - 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa5, 0x02, - 0x0a, 0x16, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x49, 0x6e, 0x76, 0x6f, 0x6b, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x76, - 0x69, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x76, - 0x69, 0x64, 0x65, 0x72, 0x12, 0x2e, 0x0a, 0x13, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x5f, 0x64, - 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x11, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, - 0x64, 0x55, 0x72, 0x6c, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x61, - 0x0a, 0x10, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, - 0x6d, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, - 0x69, 0x72, 0x70, 0x63, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x49, 0x6e, - 0x76, 0x6f, 0x6b, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x50, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x52, 0x0f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, - 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x73, 0x75, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc0, 0x02, 0x0a, 0x16, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, - 0x65, 0x72, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x21, - 0x0a, 0x0c, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x55, 0x72, - 0x6c, 0x12, 0x4e, 0x0a, 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x04, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, - 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, - 0x73, 0x12, 0x47, 0x0a, 0x10, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x69, 0x7a, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x70, 0x75, - 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, - 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, - 0x74, 0x65, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x3c, 0x0a, 0x0e, 0x43, 0x68, - 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x2b, 0x0a, 0x17, 0x52, 0x65, 0x67, 0x69, - 0x73, 0x74, 0x65, 0x72, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x72, 0x65, 0x66, 0x22, 0x56, 0x0a, 0x10, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, - 0x65, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, - 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2a, 0x29, 0x0a, - 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, - 0x53, 0x53, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x46, 0x41, 0x49, 0x4c, 0x10, 0x01, 0x12, 0x08, - 0x0a, 0x04, 0x53, 0x4b, 0x49, 0x50, 0x10, 0x02, 0x32, 0xd0, 0x06, 0x0a, 0x0f, 0x52, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x12, 0x5a, 0x0a, 0x0f, - 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, - 0x21, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x75, 0x70, 0x70, - 0x6f, 0x72, 0x74, 0x73, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x53, - 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x47, 0x0a, 0x06, 0x49, 0x6e, 0x76, 0x6f, - 0x6b, 0x65, 0x12, 0x20, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x52, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, - 0x2e, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x4f, 0x0a, 0x0c, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x6e, 0x76, 0x6f, 0x6b, - 0x65, 0x12, 0x20, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, - 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x30, 0x01, 0x12, 0x41, 0x0a, 0x04, 0x43, 0x61, 0x6c, 0x6c, 0x12, 0x1e, 0x2e, 0x70, 0x75, 0x6c, - 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, - 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x70, 0x75, 0x6c, - 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1e, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, - 0x63, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, - 0x63, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x52, 0x65, 0x67, 0x69, - 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x22, 0x2e, 0x70, - 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, - 0x72, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x23, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x67, - 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5e, 0x0a, 0x17, 0x52, 0x65, 0x67, 0x69, 0x73, - 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x75, 0x74, 0x70, 0x75, - 0x74, 0x73, 0x12, 0x29, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x52, - 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, - 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x98, + 0x01, 0x0a, 0x16, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x49, 0x6e, 0x76, 0x6f, + 0x6b, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, + 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, + 0x2b, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x47, 0x0a, 0x16, 0x52, 0x65, 0x67, 0x69, 0x73, - 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x63, 0x6b, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, - 0x6d, 0x12, 0x13, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, - 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, - 0x12, 0x4d, 0x0a, 0x1c, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x63, - 0x6b, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, - 0x12, 0x13, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, - 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, - 0x5a, 0x0a, 0x0f, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x50, 0x61, 0x63, 0x6b, 0x61, - 0x67, 0x65, 0x12, 0x21, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x52, - 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, - 0x63, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x34, 0x5a, 0x32, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, - 0x2f, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x2f, 0x73, 0x64, 0x6b, 0x2f, 0x76, 0x33, 0x2f, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x3b, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, - 0x63, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x04, 0x61, 0x72, 0x67, 0x73, 0x12, 0x3b, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, + 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, + 0x6f, 0x72, 0x6d, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x83, 0x01, 0x0a, 0x17, 0x54, 0x72, + 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2b, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x04, 0x61, 0x72, + 0x67, 0x73, 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, + 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, + 0xa5, 0x02, 0x0a, 0x16, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x49, 0x6e, 0x76, + 0x6f, 0x6b, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x2e, 0x0a, 0x13, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x5f, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x11, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x44, 0x6f, 0x77, 0x6e, 0x6c, + 0x6f, 0x61, 0x64, 0x55, 0x72, 0x6c, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x61, 0x0a, 0x10, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, + 0x73, 0x75, 0x6d, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x70, 0x75, 0x6c, + 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, + 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x50, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x0f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, + 0x75, 0x6d, 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc0, 0x02, 0x0a, 0x16, 0x52, 0x65, 0x67, 0x69, + 0x73, 0x74, 0x65, 0x72, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x75, 0x72, 0x6c, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, + 0x55, 0x72, 0x6c, 0x12, 0x4e, 0x0a, 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, + 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, + 0x70, 0x63, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x50, 0x61, 0x63, 0x6b, 0x61, + 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, + 0x75, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, + 0x75, 0x6d, 0x73, 0x12, 0x47, 0x0a, 0x10, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, + 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, + 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x70, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x3c, 0x0a, 0x0e, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x2b, 0x0a, 0x17, 0x52, 0x65, + 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x72, 0x65, 0x66, 0x22, 0x56, 0x0a, 0x10, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2a, + 0x29, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, + 0x43, 0x45, 0x53, 0x53, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x46, 0x41, 0x49, 0x4c, 0x10, 0x01, + 0x12, 0x08, 0x0a, 0x04, 0x53, 0x4b, 0x49, 0x50, 0x10, 0x02, 0x32, 0xd0, 0x06, 0x0a, 0x0f, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x12, 0x5a, + 0x0a, 0x0f, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x12, 0x21, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x75, + 0x70, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, + 0x2e, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x47, 0x0a, 0x06, 0x49, 0x6e, + 0x76, 0x6f, 0x6b, 0x65, 0x12, 0x20, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, + 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, + 0x70, 0x63, 0x2e, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x0c, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x49, 0x6e, 0x76, + 0x6f, 0x6b, 0x65, 0x12, 0x20, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, + 0x63, 0x2e, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x30, 0x01, 0x12, 0x41, 0x0a, 0x04, 0x43, 0x61, 0x6c, 0x6c, 0x12, 0x1e, 0x2e, 0x70, + 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x70, + 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x51, 0x0a, 0x0c, 0x52, 0x65, 0x61, 0x64, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1e, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, + 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, + 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x52, 0x65, + 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x22, + 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, + 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x52, + 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5e, 0x0a, 0x17, 0x52, 0x65, 0x67, + 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x75, 0x74, + 0x70, 0x75, 0x74, 0x73, 0x12, 0x29, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, + 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x00, 0x12, 0x47, 0x0a, 0x16, 0x52, 0x65, 0x67, + 0x69, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x63, 0x6b, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, + 0x6f, 0x72, 0x6d, 0x12, 0x13, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, + 0x43, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, + 0x22, 0x00, 0x12, 0x4d, 0x0a, 0x1c, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, + 0x61, 0x63, 0x6b, 0x49, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, + 0x72, 0x6d, 0x12, 0x13, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, 0x2e, 0x43, + 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, + 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x50, 0x61, 0x63, + 0x6b, 0x61, 0x67, 0x65, 0x12, 0x21, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x72, 0x70, 0x63, + 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, + 0x72, 0x70, 0x63, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x50, 0x61, 0x63, 0x6b, + 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x34, 0x5a, + 0x32, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x75, 0x6c, 0x75, + 0x6d, 0x69, 0x2f, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, 0x2f, 0x73, 0x64, 0x6b, 0x2f, 0x76, 0x33, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x3b, 0x70, 0x75, 0x6c, 0x75, 0x6d, 0x69, + 0x72, 0x70, 0x63, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2902,6 +2904,7 @@ func file_pulumi_resource_proto_init() { } } } + file_pulumi_resource_proto_msgTypes[4].OneofWrappers = []interface{}{} file_pulumi_resource_proto_msgTypes[9].OneofWrappers = []interface{}{} type x struct{} out := protoimpl.TypeBuilder{ diff --git a/vendor/github.com/sagikazarmark/locafero/.envrc b/vendor/github.com/sagikazarmark/locafero/.envrc index 3ce7171a3..2e0f9f5f7 100644 --- a/vendor/github.com/sagikazarmark/locafero/.envrc +++ b/vendor/github.com/sagikazarmark/locafero/.envrc @@ -1,4 +1,4 @@ -if ! has nix_direnv_version || ! nix_direnv_version 2.3.0; then - source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/2.3.0/direnvrc" "sha256-Dmd+j63L84wuzgyjITIfSxSD57Tx7v51DMxVZOsiUD8=" +if ! has nix_direnv_version || ! nix_direnv_version 3.0.4; then + source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.4/direnvrc" "sha256-DzlYZ33mWF/Gs8DDeyjr8mnVmQGx7ASYqA5WlxwvBG4=" fi use flake . --impure diff --git a/vendor/github.com/sagikazarmark/locafero/README.md b/vendor/github.com/sagikazarmark/locafero/README.md index a48e8e978..1b74c70c5 100644 --- a/vendor/github.com/sagikazarmark/locafero/README.md +++ b/vendor/github.com/sagikazarmark/locafero/README.md @@ -2,7 +2,7 @@ [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/sagikazarmark/locafero/ci.yaml?style=flat-square)](https://github.com/sagikazarmark/locafero/actions/workflows/ci.yaml) [![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/mod/github.com/sagikazarmark/locafero) -![Go Version](https://img.shields.io/badge/go%20version-%3E=1.20-61CFDD.svg?style=flat-square) +![Go Version](https://img.shields.io/badge/go%20version-%3E=1.23-61CFDD.svg?style=flat-square) [![built with nix](https://img.shields.io/badge/builtwith-nix-7d81f7?style=flat-square)](https://builtwithnix.org) **Finder library for [Afero](https://github.com/spf13/afero) ported from [go-finder](https://github.com/sagikazarmark/go-finder).** diff --git a/vendor/github.com/sagikazarmark/locafero/finder.go b/vendor/github.com/sagikazarmark/locafero/finder.go index 754c8b260..ef8d54712 100644 --- a/vendor/github.com/sagikazarmark/locafero/finder.go +++ b/vendor/github.com/sagikazarmark/locafero/finder.go @@ -27,7 +27,7 @@ type Finder struct { // It provides the capability to search for entries with depth, // meaning it can target deeper locations within the directory structure. // - // It also supports glob syntax (as defined by [filepat.Match]), offering greater flexibility in search patterns. + // It also supports glob syntax (as defined by [filepath.Match]), offering greater flexibility in search patterns. // // Examples: // - config.yaml @@ -63,7 +63,7 @@ func (f Finder) Find(fsys afero.Fs) ([]string, error) { // pool.Go(func() ([]string, error) { // // If the name contains any glob character, perform a glob match - // if strings.ContainsAny(searchName, "*?[]\\^") { + // if strings.ContainsAny(searchName, globMatch) { // return globWalkSearch(fsys, searchPath, searchName, f.Type) // } // @@ -79,7 +79,7 @@ func (f Finder) Find(fsys afero.Fs) ([]string, error) { allResults, err := iter.MapErr(searchItems, func(item *searchItem) ([]string, error) { // If the name contains any glob character, perform a glob match - if strings.ContainsAny(item.name, "*?[]\\^") { + if strings.ContainsAny(item.name, globMatch) { return globWalkSearch(fsys, item.path, item.name, f.Type) } diff --git a/vendor/github.com/sagikazarmark/locafero/flake.lock b/vendor/github.com/sagikazarmark/locafero/flake.lock index 46d28f805..58b9e3280 100644 --- a/vendor/github.com/sagikazarmark/locafero/flake.lock +++ b/vendor/github.com/sagikazarmark/locafero/flake.lock @@ -1,18 +1,47 @@ { "nodes": { + "cachix": { + "inputs": { + "devenv": [ + "devenv" + ], + "flake-compat": [ + "devenv" + ], + "git-hooks": [ + "devenv" + ], + "nixpkgs": "nixpkgs" + }, + "locked": { + "lastModified": 1737621947, + "narHash": "sha256-8HFvG7fvIFbgtaYAY2628Tb89fA55nPm2jSiNs0/Cws=", + "owner": "cachix", + "repo": "cachix", + "rev": "f65a3cd5e339c223471e64c051434616e18cc4f5", + "type": "github" + }, + "original": { + "owner": "cachix", + "ref": "latest", + "repo": "cachix", + "type": "github" + } + }, "devenv": { "inputs": { + "cachix": "cachix", "flake-compat": "flake-compat", + "git-hooks": "git-hooks", "nix": "nix", - "nixpkgs": "nixpkgs", - "pre-commit-hooks": "pre-commit-hooks" + "nixpkgs": "nixpkgs_3" }, "locked": { - "lastModified": 1694097209, - "narHash": "sha256-gQmBjjxeSyySjbh0yQVBKApo2KWIFqqbRUvG+Fa+QpM=", + "lastModified": 1742042793, + "narHash": "sha256-SU5v9uPXvg+Tl5GbUEKliIBE7YV7aVbZlhBx28Dax18=", "owner": "cachix", "repo": "devenv", - "rev": "7a8e6a91510efe89d8dcb8e43233f93e86f6b189", + "rev": "7e999fc6ba55e72776a4f7b97be55642b393201a", "type": "github" }, "original": { @@ -24,11 +53,11 @@ "flake-compat": { "flake": false, "locked": { - "lastModified": 1673956053, - "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=", + "lastModified": 1733328505, + "narHash": "sha256-NeCCThCEP3eCl2l/+27kNNK7QrwZB1IJCrXfrbv5oqU=", "owner": "edolstra", "repo": "flake-compat", - "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9", + "rev": "ff81ac966bb2cae68946d5ed5fc4994f96d0ffec", "type": "github" }, "original": { @@ -38,15 +67,37 @@ } }, "flake-parts": { + "inputs": { + "nixpkgs-lib": [ + "devenv", + "nix", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1712014858, + "narHash": "sha256-sB4SWl2lX95bExY2gMFG5HIzvva5AVMJd4Igm+GpZNw=", + "owner": "hercules-ci", + "repo": "flake-parts", + "rev": "9126214d0a59633752a136528f5f3b9aa8565b7d", + "type": "github" + }, + "original": { + "owner": "hercules-ci", + "repo": "flake-parts", + "type": "github" + } + }, + "flake-parts_2": { "inputs": { "nixpkgs-lib": "nixpkgs-lib" }, "locked": { - "lastModified": 1693611461, - "narHash": "sha256-aPODl8vAgGQ0ZYFIRisxYG5MOGSkIczvu2Cd8Gb9+1Y=", + "lastModified": 1741352980, + "narHash": "sha256-+u2UunDA4Cl5Fci3m7S643HzKmIDAe+fiXrLqYsR2fs=", "owner": "hercules-ci", "repo": "flake-parts", - "rev": "7f53fdb7bdc5bb237da7fefef12d099e4fd611ca", + "rev": "f4330d22f1c5d2ba72d3d22df5597d123fdb60a9", "type": "github" }, "original": { @@ -55,21 +106,28 @@ "type": "github" } }, - "flake-utils": { + "git-hooks": { "inputs": { - "systems": "systems" + "flake-compat": [ + "devenv" + ], + "gitignore": "gitignore", + "nixpkgs": [ + "devenv", + "nixpkgs" + ] }, "locked": { - "lastModified": 1685518550, - "narHash": "sha256-o2d0KcvaXzTrPRIo0kOLV0/QXHhDQ5DTi+OxcjO8xqY=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "a1720a10a6cfe8234c0e93907ffe81be440f4cef", + "lastModified": 1740849354, + "narHash": "sha256-oy33+t09FraucSZ2rZ6qnD1Y1c8azKKmQuCvF2ytUko=", + "owner": "cachix", + "repo": "git-hooks.nix", + "rev": "4a709a8ce9f8c08fa7ddb86761fe488ff7858a07", "type": "github" }, "original": { - "owner": "numtide", - "repo": "flake-utils", + "owner": "cachix", + "repo": "git-hooks.nix", "type": "github" } }, @@ -77,16 +135,16 @@ "inputs": { "nixpkgs": [ "devenv", - "pre-commit-hooks", + "git-hooks", "nixpkgs" ] }, "locked": { - "lastModified": 1660459072, - "narHash": "sha256-8DFJjXG8zqoONA1vXtgeKXy68KdJL5UaXR8NtVMUbx8=", + "lastModified": 1709087332, + "narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=", "owner": "hercules-ci", "repo": "gitignore.nix", - "rev": "a20de23b925fd8264fd7fad6454652e142fd7f73", + "rev": "637db329424fd7e46cf4185293b9cc8c88c95394", "type": "github" }, "original": { @@ -95,119 +153,125 @@ "type": "github" } }, - "lowdown-src": { + "libgit2": { "flake": false, "locked": { - "lastModified": 1633514407, - "narHash": "sha256-Dw32tiMjdK9t3ETl5fzGrutQTzh2rufgZV4A/BbxuD4=", - "owner": "kristapsdz", - "repo": "lowdown", - "rev": "d2c2b44ff6c27b936ec27358a2653caaef8f73b8", + "lastModified": 1697646580, + "narHash": "sha256-oX4Z3S9WtJlwvj0uH9HlYcWv+x1hqp8mhXl7HsLu2f0=", + "owner": "libgit2", + "repo": "libgit2", + "rev": "45fd9ed7ae1a9b74b957ef4f337bc3c8b3df01b5", "type": "github" }, "original": { - "owner": "kristapsdz", - "repo": "lowdown", + "owner": "libgit2", + "repo": "libgit2", "type": "github" } }, "nix": { "inputs": { - "lowdown-src": "lowdown-src", - "nixpkgs": [ - "devenv", - "nixpkgs" + "flake-compat": [ + "devenv" + ], + "flake-parts": "flake-parts", + "libgit2": "libgit2", + "nixpkgs": "nixpkgs_2", + "nixpkgs-23-11": [ + "devenv" + ], + "nixpkgs-regression": [ + "devenv" ], - "nixpkgs-regression": "nixpkgs-regression" + "pre-commit-hooks": [ + "devenv" + ] }, "locked": { - "lastModified": 1676545802, - "narHash": "sha256-EK4rZ+Hd5hsvXnzSzk2ikhStJnD63odF7SzsQ8CuSPU=", + "lastModified": 1734114420, + "narHash": "sha256-n52PUzub5jZWc8nI/sR7UICOheU8rNA+YZ73YaHeCBg=", "owner": "domenkozar", "repo": "nix", - "rev": "7c91803598ffbcfe4a55c44ac6d49b2cf07a527f", + "rev": "bde6a1a0d1f2af86caa4d20d23eca019f3d57eee", "type": "github" }, "original": { "owner": "domenkozar", - "ref": "relaxed-flakes", + "ref": "devenv-2.24", "repo": "nix", "type": "github" } }, "nixpkgs": { "locked": { - "lastModified": 1678875422, - "narHash": "sha256-T3o6NcQPwXjxJMn2shz86Chch4ljXgZn746c2caGxd8=", + "lastModified": 1733212471, + "narHash": "sha256-M1+uCoV5igihRfcUKrr1riygbe73/dzNnzPsmaLCmpo=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "126f49a01de5b7e35a43fd43f891ecf6d3a51459", + "rev": "55d15ad12a74eb7d4646254e13638ad0c4128776", "type": "github" }, "original": { "owner": "NixOS", - "ref": "nixpkgs-unstable", + "ref": "nixos-unstable", "repo": "nixpkgs", "type": "github" } }, "nixpkgs-lib": { "locked": { - "dir": "lib", - "lastModified": 1693471703, - "narHash": "sha256-0l03ZBL8P1P6z8MaSDS/MvuU8E75rVxe5eE1N6gxeTo=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "3e52e76b70d5508f3cec70b882a29199f4d1ee85", + "lastModified": 1740877520, + "narHash": "sha256-oiwv/ZK/2FhGxrCkQkB83i7GnWXPPLzoqFHpDD3uYpk=", + "owner": "nix-community", + "repo": "nixpkgs.lib", + "rev": "147dee35aab2193b174e4c0868bd80ead5ce755c", "type": "github" }, "original": { - "dir": "lib", - "owner": "NixOS", - "ref": "nixos-unstable", - "repo": "nixpkgs", + "owner": "nix-community", + "repo": "nixpkgs.lib", "type": "github" } }, - "nixpkgs-regression": { + "nixpkgs_2": { "locked": { - "lastModified": 1643052045, - "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", + "lastModified": 1717432640, + "narHash": "sha256-+f9c4/ZX5MWDOuB1rKoWj+lBNm0z0rs4CK47HBLxy1o=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", + "rev": "88269ab3044128b7c2f4c7d68448b2fb50456870", "type": "github" }, "original": { "owner": "NixOS", + "ref": "release-24.05", "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", "type": "github" } }, - "nixpkgs-stable": { + "nixpkgs_3": { "locked": { - "lastModified": 1685801374, - "narHash": "sha256-otaSUoFEMM+LjBI1XL/xGB5ao6IwnZOXc47qhIgJe8U=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "c37ca420157f4abc31e26f436c1145f8951ff373", + "lastModified": 1733477122, + "narHash": "sha256-qamMCz5mNpQmgBwc8SB5tVMlD5sbwVIToVZtSxMph9s=", + "owner": "cachix", + "repo": "devenv-nixpkgs", + "rev": "7bd9e84d0452f6d2e63b6e6da29fe73fac951857", "type": "github" }, "original": { - "owner": "NixOS", - "ref": "nixos-23.05", - "repo": "nixpkgs", + "owner": "cachix", + "ref": "rolling", + "repo": "devenv-nixpkgs", "type": "github" } }, - "nixpkgs_2": { + "nixpkgs_4": { "locked": { - "lastModified": 1694343207, - "narHash": "sha256-jWi7OwFxU5Owi4k2JmiL1sa/OuBCQtpaAesuj5LXC8w=", + "lastModified": 1741865919, + "narHash": "sha256-4thdbnP6dlbdq+qZWTsm4ffAwoS8Tiq1YResB+RP6WE=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "78058d810644f5ed276804ce7ea9e82d92bee293", + "rev": "573c650e8a14b2faa0041645ab18aed7e60f0c9a", "type": "github" }, "original": { @@ -217,54 +281,11 @@ "type": "github" } }, - "pre-commit-hooks": { - "inputs": { - "flake-compat": [ - "devenv", - "flake-compat" - ], - "flake-utils": "flake-utils", - "gitignore": "gitignore", - "nixpkgs": [ - "devenv", - "nixpkgs" - ], - "nixpkgs-stable": "nixpkgs-stable" - }, - "locked": { - "lastModified": 1688056373, - "narHash": "sha256-2+SDlNRTKsgo3LBRiMUcoEUb6sDViRNQhzJquZ4koOI=", - "owner": "cachix", - "repo": "pre-commit-hooks.nix", - "rev": "5843cf069272d92b60c3ed9e55b7a8989c01d4c7", - "type": "github" - }, - "original": { - "owner": "cachix", - "repo": "pre-commit-hooks.nix", - "type": "github" - } - }, "root": { "inputs": { "devenv": "devenv", - "flake-parts": "flake-parts", - "nixpkgs": "nixpkgs_2" - } - }, - "systems": { - "locked": { - "lastModified": 1681028828, - "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", - "owner": "nix-systems", - "repo": "default", - "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", - "type": "github" - }, - "original": { - "owner": "nix-systems", - "repo": "default", - "type": "github" + "flake-parts": "flake-parts_2", + "nixpkgs": "nixpkgs_4" } } }, diff --git a/vendor/github.com/sagikazarmark/locafero/flake.nix b/vendor/github.com/sagikazarmark/locafero/flake.nix index 209ecf286..7656f81b0 100644 --- a/vendor/github.com/sagikazarmark/locafero/flake.nix +++ b/vendor/github.com/sagikazarmark/locafero/flake.nix @@ -1,47 +1,63 @@ { - description = "Finder library for Afero"; - inputs = { nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; flake-parts.url = "github:hercules-ci/flake-parts"; devenv.url = "github:cachix/devenv"; }; - outputs = inputs@{ flake-parts, ... }: + outputs = + inputs@{ flake-parts, ... }: flake-parts.lib.mkFlake { inherit inputs; } { imports = [ inputs.devenv.flakeModule ]; - systems = [ "x86_64-linux" "aarch64-darwin" ]; + systems = [ + "x86_64-linux" + "aarch64-darwin" + ]; - perSystem = { config, self', inputs', pkgs, system, ... }: rec { - devenv.shells = { - default = { - languages = { - go.enable = true; + perSystem = + { + pkgs, + ... + }: + rec { + devenv.shells = { + default = { + languages = { + go.enable = true; + go.package = pkgs.lib.mkDefault pkgs.go_1_23; + }; + + packages = with pkgs; [ + just + + golangci-lint + ]; + + # https://github.com/cachix/devenv/issues/528#issuecomment-1556108767 + containers = pkgs.lib.mkForce { }; }; - packages = with pkgs; [ - just + ci = devenv.shells.default; - golangci-lint - ]; + ci_1_23 = { + imports = [ devenv.shells.ci ]; - # https://github.com/cachix/devenv/issues/528#issuecomment-1556108767 - containers = pkgs.lib.mkForce { }; - }; - - ci = devenv.shells.default; + languages = { + go.package = pkgs.go_1_23; + }; + }; - ci_1_20 = { - imports = [ devenv.shells.ci ]; + ci_1_24 = { + imports = [ devenv.shells.ci ]; - languages = { - go.package = pkgs.go_1_20; + languages = { + go.package = pkgs.go_1_24; + }; }; }; }; - }; }; } diff --git a/vendor/github.com/sagikazarmark/locafero/glob.go b/vendor/github.com/sagikazarmark/locafero/glob.go new file mode 100644 index 000000000..00f833e99 --- /dev/null +++ b/vendor/github.com/sagikazarmark/locafero/glob.go @@ -0,0 +1,5 @@ +//go:build !windows + +package locafero + +const globMatch = "*?[]\\^" diff --git a/vendor/github.com/sagikazarmark/locafero/glob_windows.go b/vendor/github.com/sagikazarmark/locafero/glob_windows.go new file mode 100644 index 000000000..7aec2b247 --- /dev/null +++ b/vendor/github.com/sagikazarmark/locafero/glob_windows.go @@ -0,0 +1,8 @@ +//go:build windows + +package locafero + +// See [filepath.Match]: +// +// On Windows, escaping is disabled. Instead, '\\' is treated as path separator. +const globMatch = "*?[]^" diff --git a/vendor/github.com/sagikazarmark/slog-shim/.envrc b/vendor/github.com/sagikazarmark/slog-shim/.envrc deleted file mode 100644 index 3ce7171a3..000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/.envrc +++ /dev/null @@ -1,4 +0,0 @@ -if ! has nix_direnv_version || ! nix_direnv_version 2.3.0; then - source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/2.3.0/direnvrc" "sha256-Dmd+j63L84wuzgyjITIfSxSD57Tx7v51DMxVZOsiUD8=" -fi -use flake . --impure diff --git a/vendor/github.com/sagikazarmark/slog-shim/LICENSE b/vendor/github.com/sagikazarmark/slog-shim/LICENSE deleted file mode 100644 index 6a66aea5e..000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/sagikazarmark/slog-shim/README.md b/vendor/github.com/sagikazarmark/slog-shim/README.md deleted file mode 100644 index 1f5be85e1..000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/README.md +++ /dev/null @@ -1,81 +0,0 @@ -# [slog](https://pkg.go.dev/log/slog) shim - -[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/sagikazarmark/slog-shim/ci.yaml?style=flat-square)](https://github.com/sagikazarmark/slog-shim/actions/workflows/ci.yaml) -[![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/mod/github.com/sagikazarmark/slog-shim) -![Go Version](https://img.shields.io/badge/go%20version-%3E=1.20-61CFDD.svg?style=flat-square) -[![built with nix](https://img.shields.io/badge/builtwith-nix-7d81f7?style=flat-square)](https://builtwithnix.org) - -Go 1.21 introduced a [new structured logging package](https://golang.org/doc/go1.21#slog), `log/slog`, to the standard library. -Although it's been eagerly anticipated by many, widespread adoption isn't expected to occur immediately, -especially since updating to Go 1.21 is a decision that most libraries won't make overnight. - -Before this package was added to the standard library, there was an _experimental_ version available at [golang.org/x/exp/slog](https://pkg.go.dev/golang.org/x/exp/slog). -While it's generally advised against using experimental packages in production, -this one served as a sort of backport package for the last few years, -incorporating new features before they were added to the standard library (like `slices`, `maps` or `errors`). - -This package serves as a bridge, helping libraries integrate slog in a backward-compatible way without having to immediately update their Go version requirement to 1.21. On Go 1.21 (and above), it acts as a drop-in replacement for `log/slog`, while below 1.21 it falls back to `golang.org/x/exp/slog`. - -**How does it achieve backwards compatibility?** - -Although there's no consensus on whether dropping support for older Go versions is considered backward compatible, a majority seems to believe it is. -(I don't have scientific proof for this, but it's based on conversations with various individuals across different channels.) - -This package adheres to that interpretation of backward compatibility. On Go 1.21, the shim uses type aliases to offer the same API as `slog/log`. -Once a library upgrades its version requirement to Go 1.21, it should be able to discard this shim and use `log/slog` directly. - -For older Go versions, the library might become unstable after removing the shim. -However, since those older versions are no longer supported, the promise of backward compatibility remains intact. - -## Installation - -```shell -go get github.com/sagikazarmark/slog-shim -``` - -## Usage - -Import this package into your library and use it in your public API: - -```go -package mylib - -import slog "github.com/sagikazarmark/slog-shim" - -func New(logger *slog.Logger) MyLib { - // ... -} -``` - -When using the library, clients can either use `log/slog` (when on Go 1.21) or `golang.org/x/exp/slog` (below Go 1.21): - -```go -package main - -import "log/slog" - -// OR - -import "golang.org/x/exp/slog" - -mylib.New(slog.Default()) -``` - -**Make sure consumers are aware that your API behaves differently on different Go versions.** - -Once you bump your Go version requirement to Go 1.21, you can drop the shim entirely from your code: - -```diff -package mylib - -- import slog "github.com/sagikazarmark/slog-shim" -+ import "log/slog" - -func New(logger *slog.Logger) MyLib { - // ... -} -``` - -## License - -The project is licensed under a [BSD-style license](LICENSE). diff --git a/vendor/github.com/sagikazarmark/slog-shim/attr.go b/vendor/github.com/sagikazarmark/slog-shim/attr.go deleted file mode 100644 index 89608bf3a..000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/attr.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.21 - -package slog - -import ( - "log/slog" - "time" -) - -// An Attr is a key-value pair. -type Attr = slog.Attr - -// String returns an Attr for a string value. -func String(key, value string) Attr { - return slog.String(key, value) -} - -// Int64 returns an Attr for an int64. -func Int64(key string, value int64) Attr { - return slog.Int64(key, value) -} - -// Int converts an int to an int64 and returns -// an Attr with that value. -func Int(key string, value int) Attr { - return slog.Int(key, value) -} - -// Uint64 returns an Attr for a uint64. -func Uint64(key string, v uint64) Attr { - return slog.Uint64(key, v) -} - -// Float64 returns an Attr for a floating-point number. -func Float64(key string, v float64) Attr { - return slog.Float64(key, v) -} - -// Bool returns an Attr for a bool. -func Bool(key string, v bool) Attr { - return slog.Bool(key, v) -} - -// Time returns an Attr for a time.Time. -// It discards the monotonic portion. -func Time(key string, v time.Time) Attr { - return slog.Time(key, v) -} - -// Duration returns an Attr for a time.Duration. -func Duration(key string, v time.Duration) Attr { - return slog.Duration(key, v) -} - -// Group returns an Attr for a Group Value. -// The first argument is the key; the remaining arguments -// are converted to Attrs as in [Logger.Log]. -// -// Use Group to collect several key-value pairs under a single -// key on a log line, or as the result of LogValue -// in order to log a single value as multiple Attrs. -func Group(key string, args ...any) Attr { - return slog.Group(key, args...) -} - -// Any returns an Attr for the supplied value. -// See [Value.AnyValue] for how values are treated. -func Any(key string, value any) Attr { - return slog.Any(key, value) -} diff --git a/vendor/github.com/sagikazarmark/slog-shim/attr_120.go b/vendor/github.com/sagikazarmark/slog-shim/attr_120.go deleted file mode 100644 index b66481333..000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/attr_120.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.21 - -package slog - -import ( - "time" - - "golang.org/x/exp/slog" -) - -// An Attr is a key-value pair. -type Attr = slog.Attr - -// String returns an Attr for a string value. -func String(key, value string) Attr { - return slog.String(key, value) -} - -// Int64 returns an Attr for an int64. -func Int64(key string, value int64) Attr { - return slog.Int64(key, value) -} - -// Int converts an int to an int64 and returns -// an Attr with that value. -func Int(key string, value int) Attr { - return slog.Int(key, value) -} - -// Uint64 returns an Attr for a uint64. -func Uint64(key string, v uint64) Attr { - return slog.Uint64(key, v) -} - -// Float64 returns an Attr for a floating-point number. -func Float64(key string, v float64) Attr { - return slog.Float64(key, v) -} - -// Bool returns an Attr for a bool. -func Bool(key string, v bool) Attr { - return slog.Bool(key, v) -} - -// Time returns an Attr for a time.Time. -// It discards the monotonic portion. -func Time(key string, v time.Time) Attr { - return slog.Time(key, v) -} - -// Duration returns an Attr for a time.Duration. -func Duration(key string, v time.Duration) Attr { - return slog.Duration(key, v) -} - -// Group returns an Attr for a Group Value. -// The first argument is the key; the remaining arguments -// are converted to Attrs as in [Logger.Log]. -// -// Use Group to collect several key-value pairs under a single -// key on a log line, or as the result of LogValue -// in order to log a single value as multiple Attrs. -func Group(key string, args ...any) Attr { - return slog.Group(key, args...) -} - -// Any returns an Attr for the supplied value. -// See [Value.AnyValue] for how values are treated. -func Any(key string, value any) Attr { - return slog.Any(key, value) -} diff --git a/vendor/github.com/sagikazarmark/slog-shim/flake.lock b/vendor/github.com/sagikazarmark/slog-shim/flake.lock deleted file mode 100644 index 7e8898e9e..000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/flake.lock +++ /dev/null @@ -1,273 +0,0 @@ -{ - "nodes": { - "devenv": { - "inputs": { - "flake-compat": "flake-compat", - "nix": "nix", - "nixpkgs": "nixpkgs", - "pre-commit-hooks": "pre-commit-hooks" - }, - "locked": { - "lastModified": 1694097209, - "narHash": "sha256-gQmBjjxeSyySjbh0yQVBKApo2KWIFqqbRUvG+Fa+QpM=", - "owner": "cachix", - "repo": "devenv", - "rev": "7a8e6a91510efe89d8dcb8e43233f93e86f6b189", - "type": "github" - }, - "original": { - "owner": "cachix", - "repo": "devenv", - "type": "github" - } - }, - "flake-compat": { - "flake": false, - "locked": { - "lastModified": 1673956053, - "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=", - "owner": "edolstra", - "repo": "flake-compat", - "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9", - "type": "github" - }, - "original": { - "owner": "edolstra", - "repo": "flake-compat", - "type": "github" - } - }, - "flake-parts": { - "inputs": { - "nixpkgs-lib": "nixpkgs-lib" - }, - "locked": { - "lastModified": 1693611461, - "narHash": "sha256-aPODl8vAgGQ0ZYFIRisxYG5MOGSkIczvu2Cd8Gb9+1Y=", - "owner": "hercules-ci", - "repo": "flake-parts", - "rev": "7f53fdb7bdc5bb237da7fefef12d099e4fd611ca", - "type": "github" - }, - "original": { - "owner": "hercules-ci", - "repo": "flake-parts", - "type": "github" - } - }, - "flake-utils": { - "inputs": { - "systems": "systems" - }, - "locked": { - "lastModified": 1685518550, - "narHash": "sha256-o2d0KcvaXzTrPRIo0kOLV0/QXHhDQ5DTi+OxcjO8xqY=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "a1720a10a6cfe8234c0e93907ffe81be440f4cef", - "type": "github" - }, - "original": { - "owner": "numtide", - "repo": "flake-utils", - "type": "github" - } - }, - "gitignore": { - "inputs": { - "nixpkgs": [ - "devenv", - "pre-commit-hooks", - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1660459072, - "narHash": "sha256-8DFJjXG8zqoONA1vXtgeKXy68KdJL5UaXR8NtVMUbx8=", - "owner": "hercules-ci", - "repo": "gitignore.nix", - "rev": "a20de23b925fd8264fd7fad6454652e142fd7f73", - "type": "github" - }, - "original": { - "owner": "hercules-ci", - "repo": "gitignore.nix", - "type": "github" - } - }, - "lowdown-src": { - "flake": false, - "locked": { - "lastModified": 1633514407, - "narHash": "sha256-Dw32tiMjdK9t3ETl5fzGrutQTzh2rufgZV4A/BbxuD4=", - "owner": "kristapsdz", - "repo": "lowdown", - "rev": "d2c2b44ff6c27b936ec27358a2653caaef8f73b8", - "type": "github" - }, - "original": { - "owner": "kristapsdz", - "repo": "lowdown", - "type": "github" - } - }, - "nix": { - "inputs": { - "lowdown-src": "lowdown-src", - "nixpkgs": [ - "devenv", - "nixpkgs" - ], - "nixpkgs-regression": "nixpkgs-regression" - }, - "locked": { - "lastModified": 1676545802, - "narHash": "sha256-EK4rZ+Hd5hsvXnzSzk2ikhStJnD63odF7SzsQ8CuSPU=", - "owner": "domenkozar", - "repo": "nix", - "rev": "7c91803598ffbcfe4a55c44ac6d49b2cf07a527f", - "type": "github" - }, - "original": { - "owner": "domenkozar", - "ref": "relaxed-flakes", - "repo": "nix", - "type": "github" - } - }, - "nixpkgs": { - "locked": { - "lastModified": 1678875422, - "narHash": "sha256-T3o6NcQPwXjxJMn2shz86Chch4ljXgZn746c2caGxd8=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "126f49a01de5b7e35a43fd43f891ecf6d3a51459", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixpkgs-unstable", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs-lib": { - "locked": { - "dir": "lib", - "lastModified": 1693471703, - "narHash": "sha256-0l03ZBL8P1P6z8MaSDS/MvuU8E75rVxe5eE1N6gxeTo=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "3e52e76b70d5508f3cec70b882a29199f4d1ee85", - "type": "github" - }, - "original": { - "dir": "lib", - "owner": "NixOS", - "ref": "nixos-unstable", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs-regression": { - "locked": { - "lastModified": 1643052045, - "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - }, - "original": { - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", - "type": "github" - } - }, - "nixpkgs-stable": { - "locked": { - "lastModified": 1685801374, - "narHash": "sha256-otaSUoFEMM+LjBI1XL/xGB5ao6IwnZOXc47qhIgJe8U=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "c37ca420157f4abc31e26f436c1145f8951ff373", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixos-23.05", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs_2": { - "locked": { - "lastModified": 1694345580, - "narHash": "sha256-BbG0NUxQTz1dN/Y87yPWZc/0Kp/coJ0vM3+7sNa5kUM=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "f002de6834fdde9c864f33c1ec51da7df19cd832", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "master", - "repo": "nixpkgs", - "type": "github" - } - }, - "pre-commit-hooks": { - "inputs": { - "flake-compat": [ - "devenv", - "flake-compat" - ], - "flake-utils": "flake-utils", - "gitignore": "gitignore", - "nixpkgs": [ - "devenv", - "nixpkgs" - ], - "nixpkgs-stable": "nixpkgs-stable" - }, - "locked": { - "lastModified": 1688056373, - "narHash": "sha256-2+SDlNRTKsgo3LBRiMUcoEUb6sDViRNQhzJquZ4koOI=", - "owner": "cachix", - "repo": "pre-commit-hooks.nix", - "rev": "5843cf069272d92b60c3ed9e55b7a8989c01d4c7", - "type": "github" - }, - "original": { - "owner": "cachix", - "repo": "pre-commit-hooks.nix", - "type": "github" - } - }, - "root": { - "inputs": { - "devenv": "devenv", - "flake-parts": "flake-parts", - "nixpkgs": "nixpkgs_2" - } - }, - "systems": { - "locked": { - "lastModified": 1681028828, - "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", - "owner": "nix-systems", - "repo": "default", - "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", - "type": "github" - }, - "original": { - "owner": "nix-systems", - "repo": "default", - "type": "github" - } - } - }, - "root": "root", - "version": 7 -} diff --git a/vendor/github.com/sagikazarmark/slog-shim/handler.go b/vendor/github.com/sagikazarmark/slog-shim/handler.go deleted file mode 100644 index f55556ae1..000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/handler.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.21 - -package slog - -import ( - "log/slog" -) - -// A Handler handles log records produced by a Logger.. -// -// A typical handler may print log records to standard error, -// or write them to a file or database, or perhaps augment them -// with additional attributes and pass them on to another handler. -// -// Any of the Handler's methods may be called concurrently with itself -// or with other methods. It is the responsibility of the Handler to -// manage this concurrency. -// -// Users of the slog package should not invoke Handler methods directly. -// They should use the methods of [Logger] instead. -type Handler = slog.Handler - -// HandlerOptions are options for a TextHandler or JSONHandler. -// A zero HandlerOptions consists entirely of default values. -type HandlerOptions = slog.HandlerOptions - -// Keys for "built-in" attributes. -const ( - // TimeKey is the key used by the built-in handlers for the time - // when the log method is called. The associated Value is a [time.Time]. - TimeKey = slog.TimeKey - // LevelKey is the key used by the built-in handlers for the level - // of the log call. The associated value is a [Level]. - LevelKey = slog.LevelKey - // MessageKey is the key used by the built-in handlers for the - // message of the log call. The associated value is a string. - MessageKey = slog.MessageKey - // SourceKey is the key used by the built-in handlers for the source file - // and line of the log call. The associated value is a string. - SourceKey = slog.SourceKey -) diff --git a/vendor/github.com/sagikazarmark/slog-shim/handler_120.go b/vendor/github.com/sagikazarmark/slog-shim/handler_120.go deleted file mode 100644 index 670057573..000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/handler_120.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.21 - -package slog - -import ( - "golang.org/x/exp/slog" -) - -// A Handler handles log records produced by a Logger.. -// -// A typical handler may print log records to standard error, -// or write them to a file or database, or perhaps augment them -// with additional attributes and pass them on to another handler. -// -// Any of the Handler's methods may be called concurrently with itself -// or with other methods. It is the responsibility of the Handler to -// manage this concurrency. -// -// Users of the slog package should not invoke Handler methods directly. -// They should use the methods of [Logger] instead. -type Handler = slog.Handler - -// HandlerOptions are options for a TextHandler or JSONHandler. -// A zero HandlerOptions consists entirely of default values. -type HandlerOptions = slog.HandlerOptions - -// Keys for "built-in" attributes. -const ( - // TimeKey is the key used by the built-in handlers for the time - // when the log method is called. The associated Value is a [time.Time]. - TimeKey = slog.TimeKey - // LevelKey is the key used by the built-in handlers for the level - // of the log call. The associated value is a [Level]. - LevelKey = slog.LevelKey - // MessageKey is the key used by the built-in handlers for the - // message of the log call. The associated value is a string. - MessageKey = slog.MessageKey - // SourceKey is the key used by the built-in handlers for the source file - // and line of the log call. The associated value is a string. - SourceKey = slog.SourceKey -) diff --git a/vendor/github.com/sagikazarmark/slog-shim/json_handler.go b/vendor/github.com/sagikazarmark/slog-shim/json_handler.go deleted file mode 100644 index 7c22bd81e..000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/json_handler.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.21 - -package slog - -import ( - "io" - "log/slog" -) - -// JSONHandler is a Handler that writes Records to an io.Writer as -// line-delimited JSON objects. -type JSONHandler = slog.JSONHandler - -// NewJSONHandler creates a JSONHandler that writes to w, -// using the given options. -// If opts is nil, the default options are used. -func NewJSONHandler(w io.Writer, opts *HandlerOptions) *JSONHandler { - return slog.NewJSONHandler(w, opts) -} diff --git a/vendor/github.com/sagikazarmark/slog-shim/json_handler_120.go b/vendor/github.com/sagikazarmark/slog-shim/json_handler_120.go deleted file mode 100644 index 7b14f10ba..000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/json_handler_120.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.21 - -package slog - -import ( - "io" - - "golang.org/x/exp/slog" -) - -// JSONHandler is a Handler that writes Records to an io.Writer as -// line-delimited JSON objects. -type JSONHandler = slog.JSONHandler - -// NewJSONHandler creates a JSONHandler that writes to w, -// using the given options. -// If opts is nil, the default options are used. -func NewJSONHandler(w io.Writer, opts *HandlerOptions) *JSONHandler { - return slog.NewJSONHandler(w, opts) -} diff --git a/vendor/github.com/sagikazarmark/slog-shim/level.go b/vendor/github.com/sagikazarmark/slog-shim/level.go deleted file mode 100644 index 07288cf89..000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/level.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.21 - -package slog - -import ( - "log/slog" -) - -// A Level is the importance or severity of a log event. -// The higher the level, the more important or severe the event. -type Level = slog.Level - -// Level numbers are inherently arbitrary, -// but we picked them to satisfy three constraints. -// Any system can map them to another numbering scheme if it wishes. -// -// First, we wanted the default level to be Info, Since Levels are ints, Info is -// the default value for int, zero. -// -// Second, we wanted to make it easy to use levels to specify logger verbosity. -// Since a larger level means a more severe event, a logger that accepts events -// with smaller (or more negative) level means a more verbose logger. Logger -// verbosity is thus the negation of event severity, and the default verbosity -// of 0 accepts all events at least as severe as INFO. -// -// Third, we wanted some room between levels to accommodate schemes with named -// levels between ours. For example, Google Cloud Logging defines a Notice level -// between Info and Warn. Since there are only a few of these intermediate -// levels, the gap between the numbers need not be large. Our gap of 4 matches -// OpenTelemetry's mapping. Subtracting 9 from an OpenTelemetry level in the -// DEBUG, INFO, WARN and ERROR ranges converts it to the corresponding slog -// Level range. OpenTelemetry also has the names TRACE and FATAL, which slog -// does not. But those OpenTelemetry levels can still be represented as slog -// Levels by using the appropriate integers. -// -// Names for common levels. -const ( - LevelDebug Level = slog.LevelDebug - LevelInfo Level = slog.LevelInfo - LevelWarn Level = slog.LevelWarn - LevelError Level = slog.LevelError -) - -// A LevelVar is a Level variable, to allow a Handler level to change -// dynamically. -// It implements Leveler as well as a Set method, -// and it is safe for use by multiple goroutines. -// The zero LevelVar corresponds to LevelInfo. -type LevelVar = slog.LevelVar - -// A Leveler provides a Level value. -// -// As Level itself implements Leveler, clients typically supply -// a Level value wherever a Leveler is needed, such as in HandlerOptions. -// Clients who need to vary the level dynamically can provide a more complex -// Leveler implementation such as *LevelVar. -type Leveler = slog.Leveler diff --git a/vendor/github.com/sagikazarmark/slog-shim/level_120.go b/vendor/github.com/sagikazarmark/slog-shim/level_120.go deleted file mode 100644 index d3feb9420..000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/level_120.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.21 - -package slog - -import ( - "golang.org/x/exp/slog" -) - -// A Level is the importance or severity of a log event. -// The higher the level, the more important or severe the event. -type Level = slog.Level - -// Level numbers are inherently arbitrary, -// but we picked them to satisfy three constraints. -// Any system can map them to another numbering scheme if it wishes. -// -// First, we wanted the default level to be Info, Since Levels are ints, Info is -// the default value for int, zero. -// -// Second, we wanted to make it easy to use levels to specify logger verbosity. -// Since a larger level means a more severe event, a logger that accepts events -// with smaller (or more negative) level means a more verbose logger. Logger -// verbosity is thus the negation of event severity, and the default verbosity -// of 0 accepts all events at least as severe as INFO. -// -// Third, we wanted some room between levels to accommodate schemes with named -// levels between ours. For example, Google Cloud Logging defines a Notice level -// between Info and Warn. Since there are only a few of these intermediate -// levels, the gap between the numbers need not be large. Our gap of 4 matches -// OpenTelemetry's mapping. Subtracting 9 from an OpenTelemetry level in the -// DEBUG, INFO, WARN and ERROR ranges converts it to the corresponding slog -// Level range. OpenTelemetry also has the names TRACE and FATAL, which slog -// does not. But those OpenTelemetry levels can still be represented as slog -// Levels by using the appropriate integers. -// -// Names for common levels. -const ( - LevelDebug Level = slog.LevelDebug - LevelInfo Level = slog.LevelInfo - LevelWarn Level = slog.LevelWarn - LevelError Level = slog.LevelError -) - -// A LevelVar is a Level variable, to allow a Handler level to change -// dynamically. -// It implements Leveler as well as a Set method, -// and it is safe for use by multiple goroutines. -// The zero LevelVar corresponds to LevelInfo. -type LevelVar = slog.LevelVar - -// A Leveler provides a Level value. -// -// As Level itself implements Leveler, clients typically supply -// a Level value wherever a Leveler is needed, such as in HandlerOptions. -// Clients who need to vary the level dynamically can provide a more complex -// Leveler implementation such as *LevelVar. -type Leveler = slog.Leveler diff --git a/vendor/github.com/sagikazarmark/slog-shim/logger.go b/vendor/github.com/sagikazarmark/slog-shim/logger.go deleted file mode 100644 index e80036bec..000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/logger.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.21 - -package slog - -import ( - "context" - "log" - "log/slog" -) - -// Default returns the default Logger. -func Default() *Logger { return slog.Default() } - -// SetDefault makes l the default Logger. -// After this call, output from the log package's default Logger -// (as with [log.Print], etc.) will be logged at LevelInfo using l's Handler. -func SetDefault(l *Logger) { - slog.SetDefault(l) -} - -// A Logger records structured information about each call to its -// Log, Debug, Info, Warn, and Error methods. -// For each call, it creates a Record and passes it to a Handler. -// -// To create a new Logger, call [New] or a Logger method -// that begins "With". -type Logger = slog.Logger - -// New creates a new Logger with the given non-nil Handler. -func New(h Handler) *Logger { - return slog.New(h) -} - -// With calls Logger.With on the default logger. -func With(args ...any) *Logger { - return slog.With(args...) -} - -// NewLogLogger returns a new log.Logger such that each call to its Output method -// dispatches a Record to the specified handler. The logger acts as a bridge from -// the older log API to newer structured logging handlers. -func NewLogLogger(h Handler, level Level) *log.Logger { - return slog.NewLogLogger(h, level) -} - -// Debug calls Logger.Debug on the default logger. -func Debug(msg string, args ...any) { - slog.Debug(msg, args...) -} - -// DebugContext calls Logger.DebugContext on the default logger. -func DebugContext(ctx context.Context, msg string, args ...any) { - slog.DebugContext(ctx, msg, args...) -} - -// Info calls Logger.Info on the default logger. -func Info(msg string, args ...any) { - slog.Info(msg, args...) -} - -// InfoContext calls Logger.InfoContext on the default logger. -func InfoContext(ctx context.Context, msg string, args ...any) { - slog.InfoContext(ctx, msg, args...) -} - -// Warn calls Logger.Warn on the default logger. -func Warn(msg string, args ...any) { - slog.Warn(msg, args...) -} - -// WarnContext calls Logger.WarnContext on the default logger. -func WarnContext(ctx context.Context, msg string, args ...any) { - slog.WarnContext(ctx, msg, args...) -} - -// Error calls Logger.Error on the default logger. -func Error(msg string, args ...any) { - slog.Error(msg, args...) -} - -// ErrorContext calls Logger.ErrorContext on the default logger. -func ErrorContext(ctx context.Context, msg string, args ...any) { - slog.ErrorContext(ctx, msg, args...) -} - -// Log calls Logger.Log on the default logger. -func Log(ctx context.Context, level Level, msg string, args ...any) { - slog.Log(ctx, level, msg, args...) -} - -// LogAttrs calls Logger.LogAttrs on the default logger. -func LogAttrs(ctx context.Context, level Level, msg string, attrs ...Attr) { - slog.LogAttrs(ctx, level, msg, attrs...) -} diff --git a/vendor/github.com/sagikazarmark/slog-shim/logger_120.go b/vendor/github.com/sagikazarmark/slog-shim/logger_120.go deleted file mode 100644 index 97ebdd5e1..000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/logger_120.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.21 - -package slog - -import ( - "context" - "log" - - "golang.org/x/exp/slog" -) - -// Default returns the default Logger. -func Default() *Logger { return slog.Default() } - -// SetDefault makes l the default Logger. -// After this call, output from the log package's default Logger -// (as with [log.Print], etc.) will be logged at LevelInfo using l's Handler. -func SetDefault(l *Logger) { - slog.SetDefault(l) -} - -// A Logger records structured information about each call to its -// Log, Debug, Info, Warn, and Error methods. -// For each call, it creates a Record and passes it to a Handler. -// -// To create a new Logger, call [New] or a Logger method -// that begins "With". -type Logger = slog.Logger - -// New creates a new Logger with the given non-nil Handler. -func New(h Handler) *Logger { - return slog.New(h) -} - -// With calls Logger.With on the default logger. -func With(args ...any) *Logger { - return slog.With(args...) -} - -// NewLogLogger returns a new log.Logger such that each call to its Output method -// dispatches a Record to the specified handler. The logger acts as a bridge from -// the older log API to newer structured logging handlers. -func NewLogLogger(h Handler, level Level) *log.Logger { - return slog.NewLogLogger(h, level) -} - -// Debug calls Logger.Debug on the default logger. -func Debug(msg string, args ...any) { - slog.Debug(msg, args...) -} - -// DebugContext calls Logger.DebugContext on the default logger. -func DebugContext(ctx context.Context, msg string, args ...any) { - slog.DebugContext(ctx, msg, args...) -} - -// Info calls Logger.Info on the default logger. -func Info(msg string, args ...any) { - slog.Info(msg, args...) -} - -// InfoContext calls Logger.InfoContext on the default logger. -func InfoContext(ctx context.Context, msg string, args ...any) { - slog.InfoContext(ctx, msg, args...) -} - -// Warn calls Logger.Warn on the default logger. -func Warn(msg string, args ...any) { - slog.Warn(msg, args...) -} - -// WarnContext calls Logger.WarnContext on the default logger. -func WarnContext(ctx context.Context, msg string, args ...any) { - slog.WarnContext(ctx, msg, args...) -} - -// Error calls Logger.Error on the default logger. -func Error(msg string, args ...any) { - slog.Error(msg, args...) -} - -// ErrorContext calls Logger.ErrorContext on the default logger. -func ErrorContext(ctx context.Context, msg string, args ...any) { - slog.ErrorContext(ctx, msg, args...) -} - -// Log calls Logger.Log on the default logger. -func Log(ctx context.Context, level Level, msg string, args ...any) { - slog.Log(ctx, level, msg, args...) -} - -// LogAttrs calls Logger.LogAttrs on the default logger. -func LogAttrs(ctx context.Context, level Level, msg string, attrs ...Attr) { - slog.LogAttrs(ctx, level, msg, attrs...) -} diff --git a/vendor/github.com/sagikazarmark/slog-shim/record.go b/vendor/github.com/sagikazarmark/slog-shim/record.go deleted file mode 100644 index 85ad1f784..000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/record.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.21 - -package slog - -import ( - "log/slog" - "time" -) - -// A Record holds information about a log event. -// Copies of a Record share state. -// Do not modify a Record after handing out a copy to it. -// Call [NewRecord] to create a new Record. -// Use [Record.Clone] to create a copy with no shared state. -type Record = slog.Record - -// NewRecord creates a Record from the given arguments. -// Use [Record.AddAttrs] to add attributes to the Record. -// -// NewRecord is intended for logging APIs that want to support a [Handler] as -// a backend. -func NewRecord(t time.Time, level Level, msg string, pc uintptr) Record { - return slog.NewRecord(t, level, msg, pc) -} - -// Source describes the location of a line of source code. -type Source = slog.Source diff --git a/vendor/github.com/sagikazarmark/slog-shim/record_120.go b/vendor/github.com/sagikazarmark/slog-shim/record_120.go deleted file mode 100644 index c2eaf4e79..000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/record_120.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.21 - -package slog - -import ( - "time" - - "golang.org/x/exp/slog" -) - -// A Record holds information about a log event. -// Copies of a Record share state. -// Do not modify a Record after handing out a copy to it. -// Call [NewRecord] to create a new Record. -// Use [Record.Clone] to create a copy with no shared state. -type Record = slog.Record - -// NewRecord creates a Record from the given arguments. -// Use [Record.AddAttrs] to add attributes to the Record. -// -// NewRecord is intended for logging APIs that want to support a [Handler] as -// a backend. -func NewRecord(t time.Time, level Level, msg string, pc uintptr) Record { - return slog.NewRecord(t, level, msg, pc) -} - -// Source describes the location of a line of source code. -type Source = slog.Source diff --git a/vendor/github.com/sagikazarmark/slog-shim/text_handler.go b/vendor/github.com/sagikazarmark/slog-shim/text_handler.go deleted file mode 100644 index 45f6cfcba..000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/text_handler.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.21 - -package slog - -import ( - "io" - "log/slog" -) - -// TextHandler is a Handler that writes Records to an io.Writer as a -// sequence of key=value pairs separated by spaces and followed by a newline. -type TextHandler = slog.TextHandler - -// NewTextHandler creates a TextHandler that writes to w, -// using the given options. -// If opts is nil, the default options are used. -func NewTextHandler(w io.Writer, opts *HandlerOptions) *TextHandler { - return slog.NewTextHandler(w, opts) -} diff --git a/vendor/github.com/sagikazarmark/slog-shim/text_handler_120.go b/vendor/github.com/sagikazarmark/slog-shim/text_handler_120.go deleted file mode 100644 index a69d63cce..000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/text_handler_120.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.21 - -package slog - -import ( - "io" - - "golang.org/x/exp/slog" -) - -// TextHandler is a Handler that writes Records to an io.Writer as a -// sequence of key=value pairs separated by spaces and followed by a newline. -type TextHandler = slog.TextHandler - -// NewTextHandler creates a TextHandler that writes to w, -// using the given options. -// If opts is nil, the default options are used. -func NewTextHandler(w io.Writer, opts *HandlerOptions) *TextHandler { - return slog.NewTextHandler(w, opts) -} diff --git a/vendor/github.com/sagikazarmark/slog-shim/value.go b/vendor/github.com/sagikazarmark/slog-shim/value.go deleted file mode 100644 index 61173eb94..000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/value.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.21 - -package slog - -import ( - "log/slog" - "time" -) - -// A Value can represent any Go value, but unlike type any, -// it can represent most small values without an allocation. -// The zero Value corresponds to nil. -type Value = slog.Value - -// Kind is the kind of a Value. -type Kind = slog.Kind - -// The following list is sorted alphabetically, but it's also important that -// KindAny is 0 so that a zero Value represents nil. -const ( - KindAny = slog.KindAny - KindBool = slog.KindBool - KindDuration = slog.KindDuration - KindFloat64 = slog.KindFloat64 - KindInt64 = slog.KindInt64 - KindString = slog.KindString - KindTime = slog.KindTime - KindUint64 = slog.KindUint64 - KindGroup = slog.KindGroup - KindLogValuer = slog.KindLogValuer -) - -//////////////// Constructors - -// StringValue returns a new Value for a string. -func StringValue(value string) Value { - return slog.StringValue(value) -} - -// IntValue returns a Value for an int. -func IntValue(v int) Value { - return slog.IntValue(v) -} - -// Int64Value returns a Value for an int64. -func Int64Value(v int64) Value { - return slog.Int64Value(v) -} - -// Uint64Value returns a Value for a uint64. -func Uint64Value(v uint64) Value { - return slog.Uint64Value(v) -} - -// Float64Value returns a Value for a floating-point number. -func Float64Value(v float64) Value { - return slog.Float64Value(v) -} - -// BoolValue returns a Value for a bool. -func BoolValue(v bool) Value { - return slog.BoolValue(v) -} - -// TimeValue returns a Value for a time.Time. -// It discards the monotonic portion. -func TimeValue(v time.Time) Value { - return slog.TimeValue(v) -} - -// DurationValue returns a Value for a time.Duration. -func DurationValue(v time.Duration) Value { - return slog.DurationValue(v) -} - -// GroupValue returns a new Value for a list of Attrs. -// The caller must not subsequently mutate the argument slice. -func GroupValue(as ...Attr) Value { - return slog.GroupValue(as...) -} - -// AnyValue returns a Value for the supplied value. -// -// If the supplied value is of type Value, it is returned -// unmodified. -// -// Given a value of one of Go's predeclared string, bool, or -// (non-complex) numeric types, AnyValue returns a Value of kind -// String, Bool, Uint64, Int64, or Float64. The width of the -// original numeric type is not preserved. -// -// Given a time.Time or time.Duration value, AnyValue returns a Value of kind -// KindTime or KindDuration. The monotonic time is not preserved. -// -// For nil, or values of all other types, including named types whose -// underlying type is numeric, AnyValue returns a value of kind KindAny. -func AnyValue(v any) Value { - return slog.AnyValue(v) -} - -// A LogValuer is any Go value that can convert itself into a Value for logging. -// -// This mechanism may be used to defer expensive operations until they are -// needed, or to expand a single value into a sequence of components. -type LogValuer = slog.LogValuer diff --git a/vendor/github.com/sagikazarmark/slog-shim/value_120.go b/vendor/github.com/sagikazarmark/slog-shim/value_120.go deleted file mode 100644 index 0f9f871ee..000000000 --- a/vendor/github.com/sagikazarmark/slog-shim/value_120.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.21 - -package slog - -import ( - "time" - - "golang.org/x/exp/slog" -) - -// A Value can represent any Go value, but unlike type any, -// it can represent most small values without an allocation. -// The zero Value corresponds to nil. -type Value = slog.Value - -// Kind is the kind of a Value. -type Kind = slog.Kind - -// The following list is sorted alphabetically, but it's also important that -// KindAny is 0 so that a zero Value represents nil. -const ( - KindAny = slog.KindAny - KindBool = slog.KindBool - KindDuration = slog.KindDuration - KindFloat64 = slog.KindFloat64 - KindInt64 = slog.KindInt64 - KindString = slog.KindString - KindTime = slog.KindTime - KindUint64 = slog.KindUint64 - KindGroup = slog.KindGroup - KindLogValuer = slog.KindLogValuer -) - -//////////////// Constructors - -// StringValue returns a new Value for a string. -func StringValue(value string) Value { - return slog.StringValue(value) -} - -// IntValue returns a Value for an int. -func IntValue(v int) Value { - return slog.IntValue(v) -} - -// Int64Value returns a Value for an int64. -func Int64Value(v int64) Value { - return slog.Int64Value(v) -} - -// Uint64Value returns a Value for a uint64. -func Uint64Value(v uint64) Value { - return slog.Uint64Value(v) -} - -// Float64Value returns a Value for a floating-point number. -func Float64Value(v float64) Value { - return slog.Float64Value(v) -} - -// BoolValue returns a Value for a bool. -func BoolValue(v bool) Value { - return slog.BoolValue(v) -} - -// TimeValue returns a Value for a time.Time. -// It discards the monotonic portion. -func TimeValue(v time.Time) Value { - return slog.TimeValue(v) -} - -// DurationValue returns a Value for a time.Duration. -func DurationValue(v time.Duration) Value { - return slog.DurationValue(v) -} - -// GroupValue returns a new Value for a list of Attrs. -// The caller must not subsequently mutate the argument slice. -func GroupValue(as ...Attr) Value { - return slog.GroupValue(as...) -} - -// AnyValue returns a Value for the supplied value. -// -// If the supplied value is of type Value, it is returned -// unmodified. -// -// Given a value of one of Go's predeclared string, bool, or -// (non-complex) numeric types, AnyValue returns a Value of kind -// String, Bool, Uint64, Int64, or Float64. The width of the -// original numeric type is not preserved. -// -// Given a time.Time or time.Duration value, AnyValue returns a Value of kind -// KindTime or KindDuration. The monotonic time is not preserved. -// -// For nil, or values of all other types, including named types whose -// underlying type is numeric, AnyValue returns a value of kind KindAny. -func AnyValue(v any) Value { - return slog.AnyValue(v) -} - -// A LogValuer is any Go value that can convert itself into a Value for logging. -// -// This mechanism may be used to defer expensive operations until they are -// needed, or to expand a single value into a sequence of components. -type LogValuer = slog.LogValuer diff --git a/vendor/github.com/skeema/knownhosts/NOTICE b/vendor/github.com/skeema/knownhosts/NOTICE index a92cb34d6..224b20248 100644 --- a/vendor/github.com/skeema/knownhosts/NOTICE +++ b/vendor/github.com/skeema/knownhosts/NOTICE @@ -1,4 +1,4 @@ -Copyright 2024 Skeema LLC and the Skeema Knownhosts authors +Copyright 2025 Skeema LLC and the Skeema Knownhosts authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/skeema/knownhosts/README.md b/vendor/github.com/skeema/knownhosts/README.md index 046bc0edc..170e04d14 100644 --- a/vendor/github.com/skeema/knownhosts/README.md +++ b/vendor/github.com/skeema/knownhosts/README.md @@ -116,7 +116,7 @@ config := &ssh.ClientConfig{ ## License -**Source code copyright 2024 Skeema LLC and the Skeema Knownhosts authors** +**Source code copyright 2025 Skeema LLC and the Skeema Knownhosts authors** ```text Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tools/vendor/github.com/fsnotify/fsnotify/.editorconfig b/vendor/github.com/spf13/afero/.editorconfig similarity index 69% rename from tools/vendor/github.com/fsnotify/fsnotify/.editorconfig rename to vendor/github.com/spf13/afero/.editorconfig index fad895851..4492e9f9f 100644 --- a/tools/vendor/github.com/fsnotify/fsnotify/.editorconfig +++ b/vendor/github.com/spf13/afero/.editorconfig @@ -1,12 +1,12 @@ root = true -[*.go] -indent_style = tab +[*] +charset = utf-8 +end_of_line = lf indent_size = 4 -insert_final_newline = true - -[*.{yml,yaml}] indent_style = space -indent_size = 2 insert_final_newline = true trim_trailing_whitespace = true + +[*.go] +indent_style = tab diff --git a/vendor/github.com/spf13/afero/.golangci.yaml b/vendor/github.com/spf13/afero/.golangci.yaml new file mode 100644 index 000000000..806289a25 --- /dev/null +++ b/vendor/github.com/spf13/afero/.golangci.yaml @@ -0,0 +1,18 @@ +linters-settings: + gci: + sections: + - standard + - default + - prefix(github.com/spf13/afero) + +linters: + disable-all: true + enable: + - gci + - gofmt + - gofumpt + - staticcheck + +issues: + exclude-dirs: + - gcsfs/internal/stiface diff --git a/vendor/github.com/spf13/afero/README.md b/vendor/github.com/spf13/afero/README.md index 3bafbfdfc..86f154554 100644 --- a/vendor/github.com/spf13/afero/README.md +++ b/vendor/github.com/spf13/afero/README.md @@ -2,7 +2,11 @@ A FileSystem Abstraction System for Go -[![Test](https://github.com/spf13/afero/actions/workflows/test.yml/badge.svg)](https://github.com/spf13/afero/actions/workflows/test.yml) [![GoDoc](https://godoc.org/github.com/spf13/afero?status.svg)](https://godoc.org/github.com/spf13/afero) [![Join the chat at https://gitter.im/spf13/afero](https://badges.gitter.im/Dev%20Chat.svg)](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/spf13/afero/ci.yaml?branch=master&style=flat-square)](https://github.com/spf13/afero/actions?query=workflow%3ACI) +[![Join the chat at https://gitter.im/spf13/afero](https://badges.gitter.im/Dev%20Chat.svg)](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/afero?style=flat-square)](https://goreportcard.com/report/github.com/spf13/afero) +![Go Version](https://img.shields.io/badge/go%20version-%3E=1.23-61CFDD.svg?style=flat-square) +[![PkgGoDev](https://pkg.go.dev/badge/mod/github.com/spf13/afero)](https://pkg.go.dev/mod/github.com/spf13/afero) # Overview @@ -12,7 +16,7 @@ types and methods. Afero has an exceptionally clean interface and simple design without needless constructors or initialization methods. Afero is also a library providing a base set of interoperable backend -filesystems that make it easy to work with afero while retaining all the power +filesystems that make it easy to work with, while retaining all the power and benefit of the os and ioutil packages. Afero provides significant improvements over using the os package alone, most @@ -427,6 +431,39 @@ See the [Releases Page](https://github.com/spf13/afero/releases). 4. Push to the branch (`git push origin my-new-feature`) 5. Create new Pull Request +## Releasing + +As of version 1.14.0, Afero moved implementations with third-party libraries to +their own submodules. + +Releasing a new version now requires a few steps: + +``` +VERSION=X.Y.Z +git tag -a v$VERSION -m "Release $VERSION" +git push origin v$VERSION + +cd gcsfs +go get github.com/spf13/afero@v$VERSION +go mod tidy +git commit -am "Update afero to v$VERSION" +git tag -a gcsfs/v$VERSION -m "Release gcsfs $VERSION" +git push origin gcsfs/v$VERSION +cd .. + +cd sftpfs +go get github.com/spf13/afero@v$VERSION +go mod tidy +git commit -am "Update afero to v$VERSION" +git tag -a sftpfs/v$VERSION -m "Release sftpfs $VERSION" +git push origin sftpfs/v$VERSION +cd .. + +git push +``` + +TODO: move these instructions to a Makefile or something + ## Contributors Names in no particular order: diff --git a/vendor/github.com/spf13/afero/iofs.go b/vendor/github.com/spf13/afero/iofs.go index 938b9316e..b13155ca4 100644 --- a/vendor/github.com/spf13/afero/iofs.go +++ b/vendor/github.com/spf13/afero/iofs.go @@ -255,7 +255,6 @@ func (f fromIOFSFile) Readdir(count int) ([]os.FileInfo, error) { ret := make([]os.FileInfo, len(entries)) for i := range entries { ret[i], err = entries[i].Info() - if err != nil { return nil, err } diff --git a/vendor/github.com/spf13/afero/memmap.go b/vendor/github.com/spf13/afero/memmap.go index d6c744e8d..ed92f5649 100644 --- a/vendor/github.com/spf13/afero/memmap.go +++ b/vendor/github.com/spf13/afero/memmap.go @@ -16,11 +16,9 @@ package afero import ( "fmt" "io" - "log" "os" "path/filepath" - "sort" "strings" "sync" diff --git a/vendor/github.com/spf13/cast/README.md b/vendor/github.com/spf13/cast/README.md index 0e9e14593..1be666a45 100644 --- a/vendor/github.com/spf13/cast/README.md +++ b/vendor/github.com/spf13/cast/README.md @@ -1,6 +1,6 @@ # cast -[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/spf13/cast/ci.yaml?branch=master&style=flat-square)](https://github.com/spf13/cast/actions/workflows/ci.yaml) +[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/spf13/cast/test.yaml?branch=master&style=flat-square)](https://github.com/spf13/cast/actions/workflows/test.yaml) [![PkgGoDev](https://pkg.go.dev/badge/mod/github.com/spf13/cast)](https://pkg.go.dev/mod/github.com/spf13/cast) ![Go Version](https://img.shields.io/badge/go%20version-%3E=1.16-61CFDD.svg?style=flat-square) [![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cast?style=flat-square)](https://goreportcard.com/report/github.com/spf13/cast) diff --git a/vendor/github.com/spf13/cast/caste.go b/vendor/github.com/spf13/cast/caste.go index d49bbf83e..4181a2e75 100644 --- a/vendor/github.com/spf13/cast/caste.go +++ b/vendor/github.com/spf13/cast/caste.go @@ -18,6 +18,14 @@ import ( var errNegativeNotAllowed = errors.New("unable to cast negative value") +type float64EProvider interface { + Float64() (float64, error) +} + +type float64Provider interface { + Float64() float64 +} + // ToTimeE casts an interface to a time.Time type. func ToTimeE(i interface{}) (tim time.Time, err error) { return ToTimeInDefaultLocationE(i, time.UTC) @@ -77,11 +85,14 @@ func ToDurationE(i interface{}) (d time.Duration, err error) { d, err = time.ParseDuration(s + "ns") } return - case json.Number: + case float64EProvider: var v float64 v, err = s.Float64() d = time.Duration(v) return + case float64Provider: + d = time.Duration(s.Float64()) + return default: err = fmt.Errorf("unable to cast %#v of type %T to Duration", i, i) return @@ -174,12 +185,14 @@ func ToFloat64E(i interface{}) (float64, error) { return v, nil } return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) - case json.Number: + case float64EProvider: v, err := s.Float64() if err == nil { return v, nil } return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) + case float64Provider: + return s.Float64(), nil case bool: if s { return 1, nil @@ -230,12 +243,14 @@ func ToFloat32E(i interface{}) (float32, error) { return float32(v), nil } return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) - case json.Number: + case float64EProvider: v, err := s.Float64() if err == nil { return float32(v), nil } return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) + case float64Provider: + return float32(s.Float64()), nil case bool: if s { return 1, nil @@ -598,12 +613,12 @@ func ToUint64E(i interface{}) (uint64, error) { switch s := i.(type) { case string: - v, err := strconv.ParseInt(trimZeroDecimal(s), 0, 0) + v, err := strconv.ParseUint(trimZeroDecimal(s), 0, 0) if err == nil { if v < 0 { return 0, errNegativeNotAllowed } - return uint64(v), nil + return v, nil } return 0, fmt.Errorf("unable to cast %#v of type %T to uint64", i, i) case json.Number: @@ -917,8 +932,8 @@ func indirectToStringerOrError(a interface{}) interface{} { return nil } - var errorType = reflect.TypeOf((*error)(nil)).Elem() - var fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem() + errorType := reflect.TypeOf((*error)(nil)).Elem() + fmtStringerType := reflect.TypeOf((*fmt.Stringer)(nil)).Elem() v := reflect.ValueOf(a) for !v.Type().Implements(fmtStringerType) && !v.Type().Implements(errorType) && v.Kind() == reflect.Ptr && !v.IsNil() { @@ -987,7 +1002,7 @@ func ToStringE(i interface{}) (string, error) { // ToStringMapStringE casts an interface to a map[string]string type. func ToStringMapStringE(i interface{}) (map[string]string, error) { - var m = map[string]string{} + m := map[string]string{} switch v := i.(type) { case map[string]string: @@ -1017,7 +1032,7 @@ func ToStringMapStringE(i interface{}) (map[string]string, error) { // ToStringMapStringSliceE casts an interface to a map[string][]string type. func ToStringMapStringSliceE(i interface{}) (map[string][]string, error) { - var m = map[string][]string{} + m := map[string][]string{} switch v := i.(type) { case map[string][]string: @@ -1081,7 +1096,7 @@ func ToStringMapStringSliceE(i interface{}) (map[string][]string, error) { // ToStringMapBoolE casts an interface to a map[string]bool type. func ToStringMapBoolE(i interface{}) (map[string]bool, error) { - var m = map[string]bool{} + m := map[string]bool{} switch v := i.(type) { case map[interface{}]interface{}: @@ -1106,7 +1121,7 @@ func ToStringMapBoolE(i interface{}) (map[string]bool, error) { // ToStringMapE casts an interface to a map[string]interface{} type. func ToStringMapE(i interface{}) (map[string]interface{}, error) { - var m = map[string]interface{}{} + m := map[string]interface{}{} switch v := i.(type) { case map[interface{}]interface{}: @@ -1126,7 +1141,7 @@ func ToStringMapE(i interface{}) (map[string]interface{}, error) { // ToStringMapIntE casts an interface to a map[string]int{} type. func ToStringMapIntE(i interface{}) (map[string]int, error) { - var m = map[string]int{} + m := map[string]int{} if i == nil { return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i) } @@ -1167,7 +1182,7 @@ func ToStringMapIntE(i interface{}) (map[string]int, error) { // ToStringMapInt64E casts an interface to a map[string]int64{} type. func ToStringMapInt64E(i interface{}) (map[string]int64, error) { - var m = map[string]int64{} + m := map[string]int64{} if i == nil { return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i) } @@ -1404,38 +1419,35 @@ func (f timeFormat) hasTimezone() bool { return f.typ >= timeFormatNumericTimezone && f.typ <= timeFormatNumericAndNamedTimezone } -var ( - timeFormats = []timeFormat{ - // Keep common formats at the top. - {"2006-01-02", timeFormatNoTimezone}, - {time.RFC3339, timeFormatNumericTimezone}, - {"2006-01-02T15:04:05", timeFormatNoTimezone}, // iso8601 without timezone - {time.RFC1123Z, timeFormatNumericTimezone}, - {time.RFC1123, timeFormatNamedTimezone}, - {time.RFC822Z, timeFormatNumericTimezone}, - {time.RFC822, timeFormatNamedTimezone}, - {time.RFC850, timeFormatNamedTimezone}, - {"2006-01-02 15:04:05.999999999 -0700 MST", timeFormatNumericAndNamedTimezone}, // Time.String() - {"2006-01-02T15:04:05-0700", timeFormatNumericTimezone}, // RFC3339 without timezone hh:mm colon - {"2006-01-02 15:04:05Z0700", timeFormatNumericTimezone}, // RFC3339 without T or timezone hh:mm colon - {"2006-01-02 15:04:05", timeFormatNoTimezone}, - {time.ANSIC, timeFormatNoTimezone}, - {time.UnixDate, timeFormatNamedTimezone}, - {time.RubyDate, timeFormatNumericTimezone}, - {"2006-01-02 15:04:05Z07:00", timeFormatNumericTimezone}, - {"02 Jan 2006", timeFormatNoTimezone}, - {"2006-01-02 15:04:05 -07:00", timeFormatNumericTimezone}, - {"2006-01-02 15:04:05 -0700", timeFormatNumericTimezone}, - {time.Kitchen, timeFormatTimeOnly}, - {time.Stamp, timeFormatTimeOnly}, - {time.StampMilli, timeFormatTimeOnly}, - {time.StampMicro, timeFormatTimeOnly}, - {time.StampNano, timeFormatTimeOnly}, - } -) +var timeFormats = []timeFormat{ + // Keep common formats at the top. + {"2006-01-02", timeFormatNoTimezone}, + {time.RFC3339, timeFormatNumericTimezone}, + {"2006-01-02T15:04:05", timeFormatNoTimezone}, // iso8601 without timezone + {time.RFC1123Z, timeFormatNumericTimezone}, + {time.RFC1123, timeFormatNamedTimezone}, + {time.RFC822Z, timeFormatNumericTimezone}, + {time.RFC822, timeFormatNamedTimezone}, + {time.RFC850, timeFormatNamedTimezone}, + {"2006-01-02 15:04:05.999999999 -0700 MST", timeFormatNumericAndNamedTimezone}, // Time.String() + {"2006-01-02T15:04:05-0700", timeFormatNumericTimezone}, // RFC3339 without timezone hh:mm colon + {"2006-01-02 15:04:05Z0700", timeFormatNumericTimezone}, // RFC3339 without T or timezone hh:mm colon + {"2006-01-02 15:04:05", timeFormatNoTimezone}, + {time.ANSIC, timeFormatNoTimezone}, + {time.UnixDate, timeFormatNamedTimezone}, + {time.RubyDate, timeFormatNumericTimezone}, + {"2006-01-02 15:04:05Z07:00", timeFormatNumericTimezone}, + {"02 Jan 2006", timeFormatNoTimezone}, + {"2006-01-02 15:04:05 -07:00", timeFormatNumericTimezone}, + {"2006-01-02 15:04:05 -0700", timeFormatNumericTimezone}, + {time.Kitchen, timeFormatTimeOnly}, + {time.Stamp, timeFormatTimeOnly}, + {time.StampMilli, timeFormatTimeOnly}, + {time.StampMicro, timeFormatTimeOnly}, + {time.StampNano, timeFormatTimeOnly}, +} func parseDateWith(s string, location *time.Location, formats []timeFormat) (d time.Time, e error) { - for _, format := range formats { if d, e = time.Parse(format.format, s); e == nil { diff --git a/vendor/github.com/spf13/viper/.envrc b/vendor/github.com/spf13/viper/.envrc index 3ce7171a3..2e0f9f5f7 100644 --- a/vendor/github.com/spf13/viper/.envrc +++ b/vendor/github.com/spf13/viper/.envrc @@ -1,4 +1,4 @@ -if ! has nix_direnv_version || ! nix_direnv_version 2.3.0; then - source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/2.3.0/direnvrc" "sha256-Dmd+j63L84wuzgyjITIfSxSD57Tx7v51DMxVZOsiUD8=" +if ! has nix_direnv_version || ! nix_direnv_version 3.0.4; then + source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.4/direnvrc" "sha256-DzlYZ33mWF/Gs8DDeyjr8mnVmQGx7ASYqA5WlxwvBG4=" fi use flake . --impure diff --git a/vendor/github.com/spf13/viper/.golangci.yaml b/vendor/github.com/spf13/viper/.golangci.yaml index 1faeae42c..474f41633 100644 --- a/vendor/github.com/spf13/viper/.golangci.yaml +++ b/vendor/github.com/spf13/viper/.golangci.yaml @@ -17,8 +17,6 @@ linters-settings: disabled-checks: - importShadow - unnamedResult - golint: - min-confidence: 0 goimports: local-prefixes: github.com/spf13/viper @@ -30,7 +28,6 @@ linters: - dupl - durationcheck - exhaustive - - exportloopref - gci - gocritic - godot diff --git a/vendor/github.com/spf13/viper/README.md b/vendor/github.com/spf13/viper/README.md index 3fc7d84f1..769a5d900 100644 --- a/vendor/github.com/spf13/viper/README.md +++ b/vendor/github.com/spf13/viper/README.md @@ -3,7 +3,8 @@ > > **Thank you!** -![Viper](.github/logo.png?raw=true) +![viper logo](https://github.com/user-attachments/assets/acae9193-2974-41f3-808d-2d433f5ada5e) + [![Mentioned in Awesome Go](https://awesome.re/mentioned-badge-flat.svg)](https://github.com/avelino/awesome-go#configuration) [![run on repl.it](https://repl.it/badge/github/sagikazarmark/Viper-example)](https://repl.it/@sagikazarmark/Viper-example#main.go) @@ -11,7 +12,7 @@ [![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/spf13/viper/ci.yaml?branch=master&style=flat-square)](https://github.com/spf13/viper/actions?query=workflow%3ACI) [![Join the chat at https://gitter.im/spf13/viper](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/spf13/viper?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![Go Report Card](https://goreportcard.com/badge/github.com/spf13/viper?style=flat-square)](https://goreportcard.com/report/github.com/spf13/viper) -![Go Version](https://img.shields.io/badge/go%20version-%3E=1.20-61CFDD.svg?style=flat-square) +![Go Version](https://img.shields.io/badge/go%20version-%3E=1.21-61CFDD.svg?style=flat-square) [![PkgGoDev](https://pkg.go.dev/badge/mod/github.com/spf13/viper)](https://pkg.go.dev/mod/github.com/spf13/viper) **Go configuration with fangs!** @@ -802,7 +803,7 @@ if err != nil { } ``` -Viper uses [github.com/mitchellh/mapstructure](https://github.com/mitchellh/mapstructure) under the hood for unmarshaling values which uses `mapstructure` tags by default. +Viper uses [github.com/go-viper/mapstructure](https://github.com/go-viper/mapstructure) under the hood for unmarshaling values which uses `mapstructure` tags by default. ### Decoding custom formats @@ -836,13 +837,15 @@ func yamlStringSettings() string { ## Viper or Vipers? -Viper comes ready to use out of the box. There is no configuration or -initialization needed to begin using Viper. Since most applications will want -to use a single central repository for their configuration, the viper package -provides this. It is similar to a singleton. +Viper comes with a global instance (singleton) out of the box. + +Although it makes setting up configuration easy, +using it is generally discouraged as it makes testing harder and can lead to unexpected behavior. + +The best practice is to initialize a Viper instance and pass that around when necessary. -In all of the examples above, they demonstrate using viper in its singleton -style approach. +The global instance _MAY_ be deprecated in the future. +See [#1855](https://github.com/spf13/viper/issues/1855) for more details. ### Working with multiple vipers diff --git a/vendor/github.com/spf13/viper/UPDATES.md b/vendor/github.com/spf13/viper/UPDATES.md new file mode 100644 index 000000000..ccf413ed7 --- /dev/null +++ b/vendor/github.com/spf13/viper/UPDATES.md @@ -0,0 +1,126 @@ +# Update Log + +**This document details any major updates required to use new features or improvements in Viper.** + +## v1.20.x + +### New file searching API + +Viper now includes a new file searching API that allows users to customize how Viper looks for config files. + +Viper accepts a custom [`Finder`](https://pkg.go.dev/github.com/spf13/viper#Finder) interface implementation: + +```go +// Finder looks for files and directories in an [afero.Fs] filesystem. +type Finder interface { + Find(fsys afero.Fs) ([]string, error) +} +``` + +It is supposed to return a list of paths to config files. + +The default implementation uses [github.com/sagikazarmark/locafero](https://github.com/sagikazarmark/locafero) under the hood. + +You can supply your own implementation using `WithFinder`: + +```go +v := viper.NewWithOptions( + viper.WithFinder(&MyFinder{}), +) +``` + +For more information, check out the [Finder examples](https://pkg.go.dev/github.com/spf13/viper#Finder) +and the [documentation](https://pkg.go.dev/github.com/sagikazarmark/locafero) for the locafero package. + +### New encoding API + +Viper now allows customizing the encoding layer by providing an API for encoding and decoding configuration data: + +```go +// Encoder encodes Viper's internal data structures into a byte representation. +// It's primarily used for encoding a map[string]any into a file format. +type Encoder interface { + Encode(v map[string]any) ([]byte, error) +} + +// Decoder decodes the contents of a byte slice into Viper's internal data structures. +// It's primarily used for decoding contents of a file into a map[string]any. +type Decoder interface { + Decode(b []byte, v map[string]any) error +} + +// Codec combines [Encoder] and [Decoder] interfaces. +type Codec interface { + Encoder + Decoder +} +``` + +By default, Viper includes the following codecs: + +- JSON +- TOML +- YAML +- Dotenv + +The rest of the codecs are moved to [github.com/go-viper/encoding](https://github.com/go-viper/encoding) + +Customizing the encoding layer is possible by providing a custom registry of codecs: + +- [Encoder](https://pkg.go.dev/github.com/spf13/viper#Encoder) -> [EncoderRegistry](https://pkg.go.dev/github.com/spf13/viper#EncoderRegistry) +- [Decoder](https://pkg.go.dev/github.com/spf13/viper#Decoder) -> [DecoderRegistry](https://pkg.go.dev/github.com/spf13/viper#DecoderRegistry) +- [Codec](https://pkg.go.dev/github.com/spf13/viper#Codec) -> [CodecRegistry](https://pkg.go.dev/github.com/spf13/viper#CodecRegistry) + +You can supply the registry of codecs to Viper using the appropriate `With*Registry` function: + +```go +codecRegistry := viper.NewCodecRegistry() + +codecRegistry.RegisterCodec("myformat", &MyCodec{}) + +v := viper.NewWithOptions( + viper.WithCodecRegistry(codecRegistry), +) +``` + +### BREAKING: HCL, Java properties, INI removed from core + +In order to reduce third-party dependencies, Viper dropped support for the following formats from the core: + +- HCL +- Java properties +- INI + +You can still use these formats though by importing them from [github.com/go-viper/encoding](https://github.com/go-viper/encoding): + +```go +import ( + "github.com/go-viper/encoding/hcl" + "github.com/go-viper/encoding/javaproperties" + "github.com/go-viper/encoding/ini" +) + +codecRegistry := viper.NewCodecRegistry() + +{ + codec := hcl.Codec{} + + codecRegistry.RegisterCodec("hcl", codec) + codecRegistry.RegisterCodec("tfvars", codec) + +} + +{ + codec := &javaproperties.Codec{} + + codecRegistry.RegisterCodec("properties", codec) + codecRegistry.RegisterCodec("props", codec) + codecRegistry.RegisterCodec("prop", codec) +} + +codecRegistry.RegisterCodec("ini", ini.Codec{}) + +v := viper.NewWithOptions( + viper.WithCodecRegistry(codecRegistry), +) +``` diff --git a/vendor/github.com/spf13/viper/encoding.go b/vendor/github.com/spf13/viper/encoding.go new file mode 100644 index 000000000..a7da55860 --- /dev/null +++ b/vendor/github.com/spf13/viper/encoding.go @@ -0,0 +1,181 @@ +package viper + +import ( + "errors" + "strings" + "sync" + + "github.com/spf13/viper/internal/encoding/dotenv" + "github.com/spf13/viper/internal/encoding/json" + "github.com/spf13/viper/internal/encoding/toml" + "github.com/spf13/viper/internal/encoding/yaml" +) + +// Encoder encodes Viper's internal data structures into a byte representation. +// It's primarily used for encoding a map[string]any into a file format. +type Encoder interface { + Encode(v map[string]any) ([]byte, error) +} + +// Decoder decodes the contents of a byte slice into Viper's internal data structures. +// It's primarily used for decoding contents of a file into a map[string]any. +type Decoder interface { + Decode(b []byte, v map[string]any) error +} + +// Codec combines [Encoder] and [Decoder] interfaces. +type Codec interface { + Encoder + Decoder +} + +// TODO: consider adding specific errors for not found scenarios + +// EncoderRegistry returns an [Encoder] for a given format. +// +// Format is case-insensitive. +// +// [EncoderRegistry] returns an error if no [Encoder] is registered for the format. +type EncoderRegistry interface { + Encoder(format string) (Encoder, error) +} + +// DecoderRegistry returns an [Decoder] for a given format. +// +// Format is case-insensitive. +// +// [DecoderRegistry] returns an error if no [Decoder] is registered for the format. +type DecoderRegistry interface { + Decoder(format string) (Decoder, error) +} + +// [CodecRegistry] combines [EncoderRegistry] and [DecoderRegistry] interfaces. +type CodecRegistry interface { + EncoderRegistry + DecoderRegistry +} + +// WithEncoderRegistry sets a custom [EncoderRegistry]. +func WithEncoderRegistry(r EncoderRegistry) Option { + return optionFunc(func(v *Viper) { + if r == nil { + return + } + + v.encoderRegistry = r + }) +} + +// WithDecoderRegistry sets a custom [DecoderRegistry]. +func WithDecoderRegistry(r DecoderRegistry) Option { + return optionFunc(func(v *Viper) { + if r == nil { + return + } + + v.decoderRegistry = r + }) +} + +// WithCodecRegistry sets a custom [EncoderRegistry] and [DecoderRegistry]. +func WithCodecRegistry(r CodecRegistry) Option { + return optionFunc(func(v *Viper) { + if r == nil { + return + } + + v.encoderRegistry = r + v.decoderRegistry = r + }) +} + +// DefaultCodecRegistry is a simple implementation of [CodecRegistry] that allows registering custom [Codec]s. +type DefaultCodecRegistry struct { + codecs map[string]Codec + + mu sync.RWMutex + once sync.Once +} + +// NewCodecRegistry returns a new [CodecRegistry], ready to accept custom [Codec]s. +func NewCodecRegistry() *DefaultCodecRegistry { + r := &DefaultCodecRegistry{} + + r.init() + + return r +} + +func (r *DefaultCodecRegistry) init() { + r.once.Do(func() { + r.codecs = map[string]Codec{} + }) +} + +// RegisterCodec registers a custom [Codec]. +// +// Format is case-insensitive. +func (r *DefaultCodecRegistry) RegisterCodec(format string, codec Codec) error { + r.init() + + r.mu.Lock() + defer r.mu.Unlock() + + r.codecs[strings.ToLower(format)] = codec + + return nil +} + +// Encoder implements the [EncoderRegistry] interface. +// +// Format is case-insensitive. +func (r *DefaultCodecRegistry) Encoder(format string) (Encoder, error) { + encoder, ok := r.codec(format) + if !ok { + return nil, errors.New("encoder not found for this format") + } + + return encoder, nil +} + +// Decoder implements the [DecoderRegistry] interface. +// +// Format is case-insensitive. +func (r *DefaultCodecRegistry) Decoder(format string) (Decoder, error) { + decoder, ok := r.codec(format) + if !ok { + return nil, errors.New("decoder not found for this format") + } + + return decoder, nil +} + +func (r *DefaultCodecRegistry) codec(format string) (Codec, bool) { + r.mu.Lock() + defer r.mu.Unlock() + + format = strings.ToLower(format) + + if r.codecs != nil { + codec, ok := r.codecs[format] + if ok { + return codec, true + } + } + + switch format { + case "yaml", "yml": + return yaml.Codec{}, true + + case "json": + return json.Codec{}, true + + case "toml": + return toml.Codec{}, true + + case "dotenv", "env": + return &dotenv.Codec{}, true + } + + return nil, false +} diff --git a/vendor/github.com/spf13/viper/experimental.go b/vendor/github.com/spf13/viper/experimental.go new file mode 100644 index 000000000..6e19e8a10 --- /dev/null +++ b/vendor/github.com/spf13/viper/experimental.go @@ -0,0 +1,8 @@ +package viper + +// ExperimentalBindStruct tells Viper to use the new bind struct feature. +func ExperimentalBindStruct() Option { + return optionFunc(func(v *Viper) { + v.experimentalBindStruct = true + }) +} diff --git a/vendor/github.com/spf13/viper/file.go b/vendor/github.com/spf13/viper/file.go index a54fe5a7a..50a40581d 100644 --- a/vendor/github.com/spf13/viper/file.go +++ b/vendor/github.com/spf13/viper/file.go @@ -1,5 +1,3 @@ -//go:build !finder - package viper import ( @@ -7,12 +5,62 @@ import ( "os" "path/filepath" + "github.com/sagikazarmark/locafero" "github.com/spf13/afero" ) +// ExperimentalFinder tells Viper to use the new Finder interface for finding configuration files. +func ExperimentalFinder() Option { + return optionFunc(func(v *Viper) { + v.experimentalFinder = true + }) +} + +// Search for a config file. +func (v *Viper) findConfigFile() (string, error) { + finder := v.finder + + if finder == nil && v.experimentalFinder { + var names []string + + if v.configType != "" { + names = locafero.NameWithOptionalExtensions(v.configName, SupportedExts...) + } else { + names = locafero.NameWithExtensions(v.configName, SupportedExts...) + } + + finder = locafero.Finder{ + Paths: v.configPaths, + Names: names, + Type: locafero.FileTypeFile, + } + } + + if finder != nil { + return v.findConfigFileWithFinder(finder) + } + + return v.findConfigFileOld() +} + +func (v *Viper) findConfigFileWithFinder(finder Finder) (string, error) { + results, err := finder.Find(v.fs) + if err != nil { + return "", err + } + + if len(results) == 0 { + return "", ConfigFileNotFoundError{v.configName, fmt.Sprintf("%s", v.configPaths)} + } + + // We call clean on the final result to ensure that the path is in its canonical form. + // This is mostly for consistent path handling and to make sure tests pass. + return results[0], nil +} + // Search all configPaths for any config file. // Returns the first path that exists (and is a config file). -func (v *Viper) findConfigFile() (string, error) { +func (v *Viper) findConfigFileOld() (string, error) { v.logger.Info("searching for config in paths", "paths", v.configPaths) for _, cp := range v.configPaths { diff --git a/vendor/github.com/spf13/viper/file_finder.go b/vendor/github.com/spf13/viper/file_finder.go deleted file mode 100644 index d96a1bd22..000000000 --- a/vendor/github.com/spf13/viper/file_finder.go +++ /dev/null @@ -1,38 +0,0 @@ -//go:build finder - -package viper - -import ( - "fmt" - - "github.com/sagikazarmark/locafero" -) - -// Search all configPaths for any config file. -// Returns the first path that exists (and is a config file). -func (v *Viper) findConfigFile() (string, error) { - var names []string - - if v.configType != "" { - names = locafero.NameWithOptionalExtensions(v.configName, SupportedExts...) - } else { - names = locafero.NameWithExtensions(v.configName, SupportedExts...) - } - - finder := locafero.Finder{ - Paths: v.configPaths, - Names: names, - Type: locafero.FileTypeFile, - } - - results, err := finder.Find(v.fs) - if err != nil { - return "", err - } - - if len(results) == 0 { - return "", ConfigFileNotFoundError{v.configName, fmt.Sprintf("%s", v.configPaths)} - } - - return results[0], nil -} diff --git a/vendor/github.com/spf13/viper/finder.go b/vendor/github.com/spf13/viper/finder.go new file mode 100644 index 000000000..9b203ea69 --- /dev/null +++ b/vendor/github.com/spf13/viper/finder.go @@ -0,0 +1,55 @@ +package viper + +import ( + "errors" + + "github.com/spf13/afero" +) + +// WithFinder sets a custom [Finder]. +func WithFinder(f Finder) Option { + return optionFunc(func(v *Viper) { + if f == nil { + return + } + + v.finder = f + }) +} + +// Finder looks for files and directories in an [afero.Fs] filesystem. +type Finder interface { + Find(fsys afero.Fs) ([]string, error) +} + +// Finders combines multiple finders into one. +func Finders(finders ...Finder) Finder { + return &combinedFinder{finders: finders} +} + +// combinedFinder is a Finder that combines multiple finders. +type combinedFinder struct { + finders []Finder +} + +// Find implements the [Finder] interface. +func (c *combinedFinder) Find(fsys afero.Fs) ([]string, error) { + var results []string + var errs []error + + for _, finder := range c.finders { + if finder == nil { + continue + } + + r, err := finder.Find(fsys) + if err != nil { + errs = append(errs, err) + continue + } + + results = append(results, r...) + } + + return results, errors.Join(errs...) +} diff --git a/vendor/github.com/spf13/viper/flake.lock b/vendor/github.com/spf13/viper/flake.lock index 3840614fa..d76dfbddd 100644 --- a/vendor/github.com/spf13/viper/flake.lock +++ b/vendor/github.com/spf13/viper/flake.lock @@ -1,22 +1,84 @@ { "nodes": { + "cachix": { + "inputs": { + "devenv": "devenv_2", + "flake-compat": [ + "devenv", + "flake-compat" + ], + "nixpkgs": [ + "devenv", + "nixpkgs" + ], + "pre-commit-hooks": [ + "devenv", + "pre-commit-hooks" + ] + }, + "locked": { + "lastModified": 1712055811, + "narHash": "sha256-7FcfMm5A/f02yyzuavJe06zLa9hcMHsagE28ADcmQvk=", + "owner": "cachix", + "repo": "cachix", + "rev": "02e38da89851ec7fec3356a5c04bc8349cae0e30", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "cachix", + "type": "github" + } + }, "devenv": { "inputs": { - "flake-compat": "flake-compat", + "cachix": "cachix", + "flake-compat": "flake-compat_2", + "nix": "nix_2", + "nixpkgs": "nixpkgs_2", + "pre-commit-hooks": "pre-commit-hooks" + }, + "locked": { + "lastModified": 1724763216, + "narHash": "sha256-oW2bwCrJpIzibCNK6zfIDaIQw765yMAuMSG2gyZfGv0=", + "owner": "cachix", + "repo": "devenv", + "rev": "1e4ef61205b9aa20fe04bf1c468b6a316281c4f1", + "type": "github" + }, + "original": { + "owner": "cachix", + "repo": "devenv", + "type": "github" + } + }, + "devenv_2": { + "inputs": { + "flake-compat": [ + "devenv", + "cachix", + "flake-compat" + ], "nix": "nix", "nixpkgs": "nixpkgs", - "pre-commit-hooks": "pre-commit-hooks" + "poetry2nix": "poetry2nix", + "pre-commit-hooks": [ + "devenv", + "cachix", + "pre-commit-hooks" + ] }, "locked": { - "lastModified": 1707817777, - "narHash": "sha256-vHyIs1OULQ3/91wD6xOiuayfI71JXALGA5KLnDKAcy0=", + "lastModified": 1708704632, + "narHash": "sha256-w+dOIW60FKMaHI1q5714CSibk99JfYxm0CzTinYWr+Q=", "owner": "cachix", "repo": "devenv", - "rev": "5a30b9e5ac7c6167e61b1f4193d5130bb9f8defa", + "rev": "2ee4450b0f4b95a1b90f2eb5ffea98b90e48c196", "type": "github" }, "original": { "owner": "cachix", + "ref": "python-rewrite", "repo": "devenv", "type": "github" } @@ -37,16 +99,32 @@ "type": "github" } }, + "flake-compat_2": { + "flake": false, + "locked": { + "lastModified": 1696426674, + "narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=", + "owner": "edolstra", + "repo": "flake-compat", + "rev": "0f9255e01c2351cc7d116c072cb317785dd33b33", + "type": "github" + }, + "original": { + "owner": "edolstra", + "repo": "flake-compat", + "type": "github" + } + }, "flake-parts": { "inputs": { "nixpkgs-lib": "nixpkgs-lib" }, "locked": { - "lastModified": 1706830856, - "narHash": "sha256-a0NYyp+h9hlb7ddVz4LUn1vT/PLwqfrWYcHMvFB1xYg=", + "lastModified": 1722555600, + "narHash": "sha256-XOQkdLafnb/p9ij77byFQjDf5m5QYl9b2REiVClC+x4=", "owner": "hercules-ci", "repo": "flake-parts", - "rev": "b253292d9c0a5ead9bc98c4e9a26c6312e27d69f", + "rev": "8471fe90ad337a8074e957b69ca4d0089218391d", "type": "github" }, "original": { @@ -60,11 +138,29 @@ "systems": "systems" }, "locked": { - "lastModified": 1685518550, - "narHash": "sha256-o2d0KcvaXzTrPRIo0kOLV0/QXHhDQ5DTi+OxcjO8xqY=", + "lastModified": 1689068808, + "narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4", + "type": "github" + }, + "original": { + "owner": "numtide", + "repo": "flake-utils", + "type": "github" + } + }, + "flake-utils_2": { + "inputs": { + "systems": "systems_2" + }, + "locked": { + "lastModified": 1710146030, + "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", "owner": "numtide", "repo": "flake-utils", - "rev": "a1720a10a6cfe8234c0e93907ffe81be440f4cef", + "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", "type": "github" }, "original": { @@ -82,11 +178,11 @@ ] }, "locked": { - "lastModified": 1660459072, - "narHash": "sha256-8DFJjXG8zqoONA1vXtgeKXy68KdJL5UaXR8NtVMUbx8=", + "lastModified": 1709087332, + "narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=", "owner": "hercules-ci", "repo": "gitignore.nix", - "rev": "a20de23b925fd8264fd7fad6454652e142fd7f73", + "rev": "637db329424fd7e46cf4185293b9cc8c88c95394", "type": "github" }, "original": { @@ -95,53 +191,90 @@ "type": "github" } }, - "lowdown-src": { - "flake": false, + "nix": { + "inputs": { + "flake-compat": "flake-compat", + "nixpkgs": [ + "devenv", + "cachix", + "devenv", + "nixpkgs" + ], + "nixpkgs-regression": "nixpkgs-regression" + }, "locked": { - "lastModified": 1633514407, - "narHash": "sha256-Dw32tiMjdK9t3ETl5fzGrutQTzh2rufgZV4A/BbxuD4=", - "owner": "kristapsdz", - "repo": "lowdown", - "rev": "d2c2b44ff6c27b936ec27358a2653caaef8f73b8", + "lastModified": 1712911606, + "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=", + "owner": "domenkozar", + "repo": "nix", + "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12", "type": "github" }, "original": { - "owner": "kristapsdz", - "repo": "lowdown", + "owner": "domenkozar", + "ref": "devenv-2.21", + "repo": "nix", "type": "github" } }, - "nix": { + "nix-github-actions": { + "inputs": { + "nixpkgs": [ + "devenv", + "cachix", + "devenv", + "poetry2nix", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1688870561, + "narHash": "sha256-4UYkifnPEw1nAzqqPOTL2MvWtm3sNGw1UTYTalkTcGY=", + "owner": "nix-community", + "repo": "nix-github-actions", + "rev": "165b1650b753316aa7f1787f3005a8d2da0f5301", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "nix-github-actions", + "type": "github" + } + }, + "nix_2": { "inputs": { - "lowdown-src": "lowdown-src", + "flake-compat": [ + "devenv", + "flake-compat" + ], "nixpkgs": [ "devenv", "nixpkgs" ], - "nixpkgs-regression": "nixpkgs-regression" + "nixpkgs-regression": "nixpkgs-regression_2" }, "locked": { - "lastModified": 1676545802, - "narHash": "sha256-EK4rZ+Hd5hsvXnzSzk2ikhStJnD63odF7SzsQ8CuSPU=", + "lastModified": 1712911606, + "narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=", "owner": "domenkozar", "repo": "nix", - "rev": "7c91803598ffbcfe4a55c44ac6d49b2cf07a527f", + "rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12", "type": "github" }, "original": { "owner": "domenkozar", - "ref": "relaxed-flakes", + "ref": "devenv-2.21", "repo": "nix", "type": "github" } }, "nixpkgs": { "locked": { - "lastModified": 1678875422, - "narHash": "sha256-T3o6NcQPwXjxJMn2shz86Chch4ljXgZn746c2caGxd8=", + "lastModified": 1692808169, + "narHash": "sha256-x9Opq06rIiwdwGeK2Ykj69dNc2IvUH1fY55Wm7atwrE=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "126f49a01de5b7e35a43fd43f891ecf6d3a51459", + "rev": "9201b5ff357e781bf014d0330d18555695df7ba8", "type": "github" }, "original": { @@ -153,23 +286,33 @@ }, "nixpkgs-lib": { "locked": { - "dir": "lib", - "lastModified": 1706550542, - "narHash": "sha256-UcsnCG6wx++23yeER4Hg18CXWbgNpqNXcHIo5/1Y+hc=", + "lastModified": 1722555339, + "narHash": "sha256-uFf2QeW7eAHlYXuDktm9c25OxOyCoUOQmh5SZ9amE5Q=", + "type": "tarball", + "url": "https://github.com/NixOS/nixpkgs/archive/a5d394176e64ab29c852d03346c1fc9b0b7d33eb.tar.gz" + }, + "original": { + "type": "tarball", + "url": "https://github.com/NixOS/nixpkgs/archive/a5d394176e64ab29c852d03346c1fc9b0b7d33eb.tar.gz" + } + }, + "nixpkgs-regression": { + "locked": { + "lastModified": 1643052045, + "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "97b17f32362e475016f942bbdfda4a4a72a8a652", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", "type": "github" }, "original": { - "dir": "lib", "owner": "NixOS", - "ref": "nixos-unstable", "repo": "nixpkgs", + "rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2", "type": "github" } }, - "nixpkgs-regression": { + "nixpkgs-regression_2": { "locked": { "lastModified": 1643052045, "narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=", @@ -187,27 +330,43 @@ }, "nixpkgs-stable": { "locked": { - "lastModified": 1685801374, - "narHash": "sha256-otaSUoFEMM+LjBI1XL/xGB5ao6IwnZOXc47qhIgJe8U=", + "lastModified": 1710695816, + "narHash": "sha256-3Eh7fhEID17pv9ZxrPwCLfqXnYP006RKzSs0JptsN84=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "c37ca420157f4abc31e26f436c1145f8951ff373", + "rev": "614b4613980a522ba49f0d194531beddbb7220d3", "type": "github" }, "original": { "owner": "NixOS", - "ref": "nixos-23.05", + "ref": "nixos-23.11", "repo": "nixpkgs", "type": "github" } }, "nixpkgs_2": { "locked": { - "lastModified": 1707939175, - "narHash": "sha256-D1xan0lgxbmXDyzVqXTiSYHLmAMrMRdD+alKzEO/p3w=", + "lastModified": 1713361204, + "narHash": "sha256-TA6EDunWTkc5FvDCqU3W2T3SFn0gRZqh6D/hJnM02MM=", + "owner": "cachix", + "repo": "devenv-nixpkgs", + "rev": "285676e87ad9f0ca23d8714a6ab61e7e027020c6", + "type": "github" + }, + "original": { + "owner": "cachix", + "ref": "rolling", + "repo": "devenv-nixpkgs", + "type": "github" + } + }, + "nixpkgs_3": { + "locked": { + "lastModified": 1724748588, + "narHash": "sha256-NlpGA4+AIf1dKNq76ps90rxowlFXUsV9x7vK/mN37JM=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "f7e8132daca31b1e3859ac0fb49741754375ac3d", + "rev": "a6292e34000dc93d43bccf78338770c1c5ec8a99", "type": "github" }, "original": { @@ -217,13 +376,38 @@ "type": "github" } }, + "poetry2nix": { + "inputs": { + "flake-utils": "flake-utils", + "nix-github-actions": "nix-github-actions", + "nixpkgs": [ + "devenv", + "cachix", + "devenv", + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1692876271, + "narHash": "sha256-IXfZEkI0Mal5y1jr6IRWMqK8GW2/f28xJenZIPQqkY0=", + "owner": "nix-community", + "repo": "poetry2nix", + "rev": "d5006be9c2c2417dafb2e2e5034d83fabd207ee3", + "type": "github" + }, + "original": { + "owner": "nix-community", + "repo": "poetry2nix", + "type": "github" + } + }, "pre-commit-hooks": { "inputs": { "flake-compat": [ "devenv", "flake-compat" ], - "flake-utils": "flake-utils", + "flake-utils": "flake-utils_2", "gitignore": "gitignore", "nixpkgs": [ "devenv", @@ -232,11 +416,11 @@ "nixpkgs-stable": "nixpkgs-stable" }, "locked": { - "lastModified": 1704725188, - "narHash": "sha256-qq8NbkhRZF1vVYQFt1s8Mbgo8knj+83+QlL5LBnYGpI=", + "lastModified": 1713775815, + "narHash": "sha256-Wu9cdYTnGQQwtT20QQMg7jzkANKQjwBD9iccfGKkfls=", "owner": "cachix", "repo": "pre-commit-hooks.nix", - "rev": "ea96f0c05924341c551a797aaba8126334c505d2", + "rev": "2ac4dcbf55ed43f3be0bae15e181f08a57af24a4", "type": "github" }, "original": { @@ -249,7 +433,7 @@ "inputs": { "devenv": "devenv", "flake-parts": "flake-parts", - "nixpkgs": "nixpkgs_2" + "nixpkgs": "nixpkgs_3" } }, "systems": { @@ -266,6 +450,21 @@ "repo": "default", "type": "github" } + }, + "systems_2": { + "locked": { + "lastModified": 1681028828, + "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", + "owner": "nix-systems", + "repo": "default", + "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", + "type": "github" + }, + "original": { + "owner": "nix-systems", + "repo": "default", + "type": "github" + } } }, "root": "root", diff --git a/vendor/github.com/spf13/viper/flake.nix b/vendor/github.com/spf13/viper/flake.nix index 0230668cf..52ad7d581 100644 --- a/vendor/github.com/spf13/viper/flake.nix +++ b/vendor/github.com/spf13/viper/flake.nix @@ -20,7 +20,7 @@ default = { languages = { go.enable = true; - go.package = pkgs.go_1_22; + go.package = pkgs.go_1_23; }; pre-commit.hooks = { diff --git a/vendor/github.com/spf13/viper/internal/encoding/decoder.go b/vendor/github.com/spf13/viper/internal/encoding/decoder.go deleted file mode 100644 index 8a7b1dbc9..000000000 --- a/vendor/github.com/spf13/viper/internal/encoding/decoder.go +++ /dev/null @@ -1,61 +0,0 @@ -package encoding - -import ( - "sync" -) - -// Decoder decodes the contents of b into v. -// It's primarily used for decoding contents of a file into a map[string]any. -type Decoder interface { - Decode(b []byte, v map[string]any) error -} - -const ( - // ErrDecoderNotFound is returned when there is no decoder registered for a format. - ErrDecoderNotFound = encodingError("decoder not found for this format") - - // ErrDecoderFormatAlreadyRegistered is returned when an decoder is already registered for a format. - ErrDecoderFormatAlreadyRegistered = encodingError("decoder already registered for this format") -) - -// DecoderRegistry can choose an appropriate Decoder based on the provided format. -type DecoderRegistry struct { - decoders map[string]Decoder - - mu sync.RWMutex -} - -// NewDecoderRegistry returns a new, initialized DecoderRegistry. -func NewDecoderRegistry() *DecoderRegistry { - return &DecoderRegistry{ - decoders: make(map[string]Decoder), - } -} - -// RegisterDecoder registers a Decoder for a format. -// Registering a Decoder for an already existing format is not supported. -func (e *DecoderRegistry) RegisterDecoder(format string, enc Decoder) error { - e.mu.Lock() - defer e.mu.Unlock() - - if _, ok := e.decoders[format]; ok { - return ErrDecoderFormatAlreadyRegistered - } - - e.decoders[format] = enc - - return nil -} - -// Decode calls the underlying Decoder based on the format. -func (e *DecoderRegistry) Decode(format string, b []byte, v map[string]any) error { - e.mu.RLock() - decoder, ok := e.decoders[format] - e.mu.RUnlock() - - if !ok { - return ErrDecoderNotFound - } - - return decoder.Decode(b, v) -} diff --git a/vendor/github.com/spf13/viper/internal/encoding/encoder.go b/vendor/github.com/spf13/viper/internal/encoding/encoder.go deleted file mode 100644 index 659585962..000000000 --- a/vendor/github.com/spf13/viper/internal/encoding/encoder.go +++ /dev/null @@ -1,60 +0,0 @@ -package encoding - -import ( - "sync" -) - -// Encoder encodes the contents of v into a byte representation. -// It's primarily used for encoding a map[string]any into a file format. -type Encoder interface { - Encode(v map[string]any) ([]byte, error) -} - -const ( - // ErrEncoderNotFound is returned when there is no encoder registered for a format. - ErrEncoderNotFound = encodingError("encoder not found for this format") - - // ErrEncoderFormatAlreadyRegistered is returned when an encoder is already registered for a format. - ErrEncoderFormatAlreadyRegistered = encodingError("encoder already registered for this format") -) - -// EncoderRegistry can choose an appropriate Encoder based on the provided format. -type EncoderRegistry struct { - encoders map[string]Encoder - - mu sync.RWMutex -} - -// NewEncoderRegistry returns a new, initialized EncoderRegistry. -func NewEncoderRegistry() *EncoderRegistry { - return &EncoderRegistry{ - encoders: make(map[string]Encoder), - } -} - -// RegisterEncoder registers an Encoder for a format. -// Registering a Encoder for an already existing format is not supported. -func (e *EncoderRegistry) RegisterEncoder(format string, enc Encoder) error { - e.mu.Lock() - defer e.mu.Unlock() - - if _, ok := e.encoders[format]; ok { - return ErrEncoderFormatAlreadyRegistered - } - - e.encoders[format] = enc - - return nil -} - -func (e *EncoderRegistry) Encode(format string, v map[string]any) ([]byte, error) { - e.mu.RLock() - encoder, ok := e.encoders[format] - e.mu.RUnlock() - - if !ok { - return nil, ErrEncoderNotFound - } - - return encoder.Encode(v) -} diff --git a/vendor/github.com/spf13/viper/internal/encoding/error.go b/vendor/github.com/spf13/viper/internal/encoding/error.go deleted file mode 100644 index e4cde02d7..000000000 --- a/vendor/github.com/spf13/viper/internal/encoding/error.go +++ /dev/null @@ -1,7 +0,0 @@ -package encoding - -type encodingError string - -func (e encodingError) Error() string { - return string(e) -} diff --git a/vendor/github.com/spf13/viper/internal/encoding/hcl/codec.go b/vendor/github.com/spf13/viper/internal/encoding/hcl/codec.go deleted file mode 100644 index d7fa8a1b7..000000000 --- a/vendor/github.com/spf13/viper/internal/encoding/hcl/codec.go +++ /dev/null @@ -1,40 +0,0 @@ -package hcl - -import ( - "bytes" - "encoding/json" - - "github.com/hashicorp/hcl" - "github.com/hashicorp/hcl/hcl/printer" -) - -// Codec implements the encoding.Encoder and encoding.Decoder interfaces for HCL encoding. -// TODO: add printer config to the codec? -type Codec struct{} - -func (Codec) Encode(v map[string]any) ([]byte, error) { - b, err := json.Marshal(v) - if err != nil { - return nil, err - } - - // TODO: use printer.Format? Is the trailing newline an issue? - - ast, err := hcl.Parse(string(b)) - if err != nil { - return nil, err - } - - var buf bytes.Buffer - - err = printer.Fprint(&buf, ast.Node) - if err != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -func (Codec) Decode(b []byte, v map[string]any) error { - return hcl.Unmarshal(b, &v) -} diff --git a/vendor/github.com/spf13/viper/internal/encoding/ini/codec.go b/vendor/github.com/spf13/viper/internal/encoding/ini/codec.go deleted file mode 100644 index d91cf59d2..000000000 --- a/vendor/github.com/spf13/viper/internal/encoding/ini/codec.go +++ /dev/null @@ -1,99 +0,0 @@ -package ini - -import ( - "bytes" - "sort" - "strings" - - "github.com/spf13/cast" - "gopkg.in/ini.v1" -) - -// LoadOptions contains all customized options used for load data source(s). -// This type is added here for convenience: this way consumers can import a single package called "ini". -type LoadOptions = ini.LoadOptions - -// Codec implements the encoding.Encoder and encoding.Decoder interfaces for INI encoding. -type Codec struct { - KeyDelimiter string - LoadOptions LoadOptions -} - -func (c Codec) Encode(v map[string]any) ([]byte, error) { - cfg := ini.Empty() - ini.PrettyFormat = false - - flattened := map[string]any{} - - flattened = flattenAndMergeMap(flattened, v, "", c.keyDelimiter()) - - keys := make([]string, 0, len(flattened)) - - for key := range flattened { - keys = append(keys, key) - } - - sort.Strings(keys) - - for _, key := range keys { - sectionName, keyName := "", key - - lastSep := strings.LastIndex(key, ".") - if lastSep != -1 { - sectionName = key[:(lastSep)] - keyName = key[(lastSep + 1):] - } - - // TODO: is this a good idea? - if sectionName == "default" { - sectionName = "" - } - - cfg.Section(sectionName).Key(keyName).SetValue(cast.ToString(flattened[key])) - } - - var buf bytes.Buffer - - _, err := cfg.WriteTo(&buf) - if err != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -func (c Codec) Decode(b []byte, v map[string]any) error { - cfg := ini.Empty(c.LoadOptions) - - err := cfg.Append(b) - if err != nil { - return err - } - - sections := cfg.Sections() - - for i := 0; i < len(sections); i++ { - section := sections[i] - keys := section.Keys() - - for j := 0; j < len(keys); j++ { - key := keys[j] - value := cfg.Section(section.Name()).Key(key.Name()).String() - - deepestMap := deepSearch(v, strings.Split(section.Name(), c.keyDelimiter())) - - // set innermost value - deepestMap[key.Name()] = value - } - } - - return nil -} - -func (c Codec) keyDelimiter() string { - if c.KeyDelimiter == "" { - return "." - } - - return c.KeyDelimiter -} diff --git a/vendor/github.com/spf13/viper/internal/encoding/ini/map_utils.go b/vendor/github.com/spf13/viper/internal/encoding/ini/map_utils.go deleted file mode 100644 index 490ab594e..000000000 --- a/vendor/github.com/spf13/viper/internal/encoding/ini/map_utils.go +++ /dev/null @@ -1,74 +0,0 @@ -package ini - -import ( - "strings" - - "github.com/spf13/cast" -) - -// THIS CODE IS COPIED HERE: IT SHOULD NOT BE MODIFIED -// AT SOME POINT IT WILL BE MOVED TO A COMMON PLACE -// deepSearch scans deep maps, following the key indexes listed in the -// sequence "path". -// The last value is expected to be another map, and is returned. -// -// In case intermediate keys do not exist, or map to a non-map value, -// a new map is created and inserted, and the search continues from there: -// the initial map "m" may be modified! -func deepSearch(m map[string]any, path []string) map[string]any { - for _, k := range path { - m2, ok := m[k] - if !ok { - // intermediate key does not exist - // => create it and continue from there - m3 := make(map[string]any) - m[k] = m3 - m = m3 - continue - } - m3, ok := m2.(map[string]any) - if !ok { - // intermediate key is a value - // => replace with a new map - m3 = make(map[string]any) - m[k] = m3 - } - // continue search from here - m = m3 - } - return m -} - -// flattenAndMergeMap recursively flattens the given map into a new map -// Code is based on the function with the same name in the main package. -// TODO: move it to a common place. -func flattenAndMergeMap(shadow, m map[string]any, prefix, delimiter string) map[string]any { - if shadow != nil && prefix != "" && shadow[prefix] != nil { - // prefix is shadowed => nothing more to flatten - return shadow - } - if shadow == nil { - shadow = make(map[string]any) - } - - var m2 map[string]any - if prefix != "" { - prefix += delimiter - } - for k, val := range m { - fullKey := prefix + k - switch val := val.(type) { - case map[string]any: - m2 = val - case map[any]any: - m2 = cast.ToStringMap(val) - default: - // immediate value - shadow[strings.ToLower(fullKey)] = val - continue - } - // recursively merge to shadow map - shadow = flattenAndMergeMap(shadow, m2, fullKey, delimiter) - } - return shadow -} diff --git a/vendor/github.com/spf13/viper/internal/encoding/javaproperties/codec.go b/vendor/github.com/spf13/viper/internal/encoding/javaproperties/codec.go deleted file mode 100644 index e92e5172c..000000000 --- a/vendor/github.com/spf13/viper/internal/encoding/javaproperties/codec.go +++ /dev/null @@ -1,86 +0,0 @@ -package javaproperties - -import ( - "bytes" - "sort" - "strings" - - "github.com/magiconair/properties" - "github.com/spf13/cast" -) - -// Codec implements the encoding.Encoder and encoding.Decoder interfaces for Java properties encoding. -type Codec struct { - KeyDelimiter string - - // Store read properties on the object so that we can write back in order with comments. - // This will only be used if the configuration read is a properties file. - // TODO: drop this feature in v2 - // TODO: make use of the global properties object optional - Properties *properties.Properties -} - -func (c *Codec) Encode(v map[string]any) ([]byte, error) { - if c.Properties == nil { - c.Properties = properties.NewProperties() - } - - flattened := map[string]any{} - - flattened = flattenAndMergeMap(flattened, v, "", c.keyDelimiter()) - - keys := make([]string, 0, len(flattened)) - - for key := range flattened { - keys = append(keys, key) - } - - sort.Strings(keys) - - for _, key := range keys { - _, _, err := c.Properties.Set(key, cast.ToString(flattened[key])) - if err != nil { - return nil, err - } - } - - var buf bytes.Buffer - - _, err := c.Properties.WriteComment(&buf, "#", properties.UTF8) - if err != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -func (c *Codec) Decode(b []byte, v map[string]any) error { - var err error - c.Properties, err = properties.Load(b, properties.UTF8) - if err != nil { - return err - } - - for _, key := range c.Properties.Keys() { - // ignore existence check: we know it's there - value, _ := c.Properties.Get(key) - - // recursively build nested maps - path := strings.Split(key, c.keyDelimiter()) - lastKey := strings.ToLower(path[len(path)-1]) - deepestMap := deepSearch(v, path[0:len(path)-1]) - - // set innermost value - deepestMap[lastKey] = value - } - - return nil -} - -func (c Codec) keyDelimiter() string { - if c.KeyDelimiter == "" { - return "." - } - - return c.KeyDelimiter -} diff --git a/vendor/github.com/spf13/viper/internal/encoding/javaproperties/map_utils.go b/vendor/github.com/spf13/viper/internal/encoding/javaproperties/map_utils.go deleted file mode 100644 index 6e1aff223..000000000 --- a/vendor/github.com/spf13/viper/internal/encoding/javaproperties/map_utils.go +++ /dev/null @@ -1,74 +0,0 @@ -package javaproperties - -import ( - "strings" - - "github.com/spf13/cast" -) - -// THIS CODE IS COPIED HERE: IT SHOULD NOT BE MODIFIED -// AT SOME POINT IT WILL BE MOVED TO A COMMON PLACE -// deepSearch scans deep maps, following the key indexes listed in the -// sequence "path". -// The last value is expected to be another map, and is returned. -// -// In case intermediate keys do not exist, or map to a non-map value, -// a new map is created and inserted, and the search continues from there: -// the initial map "m" may be modified! -func deepSearch(m map[string]any, path []string) map[string]any { - for _, k := range path { - m2, ok := m[k] - if !ok { - // intermediate key does not exist - // => create it and continue from there - m3 := make(map[string]any) - m[k] = m3 - m = m3 - continue - } - m3, ok := m2.(map[string]any) - if !ok { - // intermediate key is a value - // => replace with a new map - m3 = make(map[string]any) - m[k] = m3 - } - // continue search from here - m = m3 - } - return m -} - -// flattenAndMergeMap recursively flattens the given map into a new map -// Code is based on the function with the same name in the main package. -// TODO: move it to a common place. -func flattenAndMergeMap(shadow, m map[string]any, prefix, delimiter string) map[string]any { - if shadow != nil && prefix != "" && shadow[prefix] != nil { - // prefix is shadowed => nothing more to flatten - return shadow - } - if shadow == nil { - shadow = make(map[string]any) - } - - var m2 map[string]any - if prefix != "" { - prefix += delimiter - } - for k, val := range m { - fullKey := prefix + k - switch val := val.(type) { - case map[string]any: - m2 = val - case map[any]any: - m2 = cast.ToStringMap(val) - default: - // immediate value - shadow[strings.ToLower(fullKey)] = val - continue - } - // recursively merge to shadow map - shadow = flattenAndMergeMap(shadow, m2, fullKey, delimiter) - } - return shadow -} diff --git a/vendor/github.com/spf13/viper/internal/features/finder.go b/vendor/github.com/spf13/viper/internal/features/finder.go new file mode 100644 index 000000000..983ea3a9d --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/features/finder.go @@ -0,0 +1,5 @@ +//go:build viper_finder + +package features + +const Finder = true diff --git a/vendor/github.com/spf13/viper/internal/features/finder_default.go b/vendor/github.com/spf13/viper/internal/features/finder_default.go new file mode 100644 index 000000000..89bcb06ee --- /dev/null +++ b/vendor/github.com/spf13/viper/internal/features/finder_default.go @@ -0,0 +1,5 @@ +//go:build !viper_finder + +package features + +const Finder = false diff --git a/vendor/github.com/spf13/viper/logger.go b/vendor/github.com/spf13/viper/logger.go index 8938053b3..828042f29 100644 --- a/vendor/github.com/spf13/viper/logger.go +++ b/vendor/github.com/spf13/viper/logger.go @@ -2,46 +2,9 @@ package viper import ( "context" - - slog "github.com/sagikazarmark/slog-shim" + "log/slog" ) -// Logger is a unified interface for various logging use cases and practices, including: -// - leveled logging -// - structured logging -// -// Deprecated: use `log/slog` instead. -type Logger interface { - // Trace logs a Trace event. - // - // Even more fine-grained information than Debug events. - // Loggers not supporting this level should fall back to Debug. - Trace(msg string, keyvals ...any) - - // Debug logs a Debug event. - // - // A verbose series of information events. - // They are useful when debugging the system. - Debug(msg string, keyvals ...any) - - // Info logs an Info event. - // - // General information about what's happening inside the system. - Info(msg string, keyvals ...any) - - // Warn logs a Warn(ing) event. - // - // Non-critical events that should be looked at. - Warn(msg string, keyvals ...any) - - // Error logs an Error event. - // - // Critical events that require immediate attention. - // Loggers commonly provide Fatal and Panic levels above Error level, - // but exiting and panicking is out of scope for a logging library. - Error(msg string, keyvals ...any) -} - // WithLogger sets a custom logger. func WithLogger(l *slog.Logger) Option { return optionFunc(func(v *Viper) { diff --git a/vendor/github.com/spf13/viper/remote.go b/vendor/github.com/spf13/viper/remote.go new file mode 100644 index 000000000..bdde7de26 --- /dev/null +++ b/vendor/github.com/spf13/viper/remote.go @@ -0,0 +1,256 @@ +package viper + +import ( + "bytes" + "fmt" + "io" + "reflect" + "slices" +) + +// SupportedRemoteProviders are universally supported remote providers. +var SupportedRemoteProviders = []string{"etcd", "etcd3", "consul", "firestore", "nats"} + +func resetRemote() { + SupportedRemoteProviders = []string{"etcd", "etcd3", "consul", "firestore", "nats"} +} + +type remoteConfigFactory interface { + Get(rp RemoteProvider) (io.Reader, error) + Watch(rp RemoteProvider) (io.Reader, error) + WatchChannel(rp RemoteProvider) (<-chan *RemoteResponse, chan bool) +} + +type RemoteResponse struct { + Value []byte + Error error +} + +// RemoteConfig is optional, see the remote package. +var RemoteConfig remoteConfigFactory + +// UnsupportedRemoteProviderError denotes encountering an unsupported remote +// provider. Currently only etcd and Consul are supported. +type UnsupportedRemoteProviderError string + +// Error returns the formatted remote provider error. +func (str UnsupportedRemoteProviderError) Error() string { + return fmt.Sprintf("Unsupported Remote Provider Type %q", string(str)) +} + +// RemoteConfigError denotes encountering an error while trying to +// pull the configuration from the remote provider. +type RemoteConfigError string + +// Error returns the formatted remote provider error. +func (rce RemoteConfigError) Error() string { + return fmt.Sprintf("Remote Configurations Error: %s", string(rce)) +} + +type defaultRemoteProvider struct { + provider string + endpoint string + path string + secretKeyring string +} + +func (rp defaultRemoteProvider) Provider() string { + return rp.provider +} + +func (rp defaultRemoteProvider) Endpoint() string { + return rp.endpoint +} + +func (rp defaultRemoteProvider) Path() string { + return rp.path +} + +func (rp defaultRemoteProvider) SecretKeyring() string { + return rp.secretKeyring +} + +// RemoteProvider stores the configuration necessary +// to connect to a remote key/value store. +// Optional secretKeyring to unencrypt encrypted values +// can be provided. +type RemoteProvider interface { + Provider() string + Endpoint() string + Path() string + SecretKeyring() string +} + +// AddRemoteProvider adds a remote configuration source. +// Remote Providers are searched in the order they are added. +// provider is a string value: "etcd", "etcd3", "consul", "firestore" or "nats" are currently supported. +// endpoint is the url. etcd requires http://ip:port, consul requires ip:port, nats requires nats://ip:port +// path is the path in the k/v store to retrieve configuration +// To retrieve a config file called myapp.json from /configs/myapp.json +// you should set path to /configs and set config name (SetConfigName()) to +// "myapp". +func AddRemoteProvider(provider, endpoint, path string) error { + return v.AddRemoteProvider(provider, endpoint, path) +} + +func (v *Viper) AddRemoteProvider(provider, endpoint, path string) error { + if !slices.Contains(SupportedRemoteProviders, provider) { + return UnsupportedRemoteProviderError(provider) + } + if provider != "" && endpoint != "" { + v.logger.Info("adding remote provider", "provider", provider, "endpoint", endpoint) + + rp := &defaultRemoteProvider{ + endpoint: endpoint, + provider: provider, + path: path, + } + if !v.providerPathExists(rp) { + v.remoteProviders = append(v.remoteProviders, rp) + } + } + return nil +} + +// AddSecureRemoteProvider adds a remote configuration source. +// Secure Remote Providers are searched in the order they are added. +// provider is a string value: "etcd", "etcd3", "consul", "firestore" or "nats" are currently supported. +// endpoint is the url. etcd requires http://ip:port consul requires ip:port +// secretkeyring is the filepath to your openpgp secret keyring. e.g. /etc/secrets/myring.gpg +// path is the path in the k/v store to retrieve configuration +// To retrieve a config file called myapp.json from /configs/myapp.json +// you should set path to /configs and set config name (SetConfigName()) to +// "myapp". +// Secure Remote Providers are implemented with github.com/sagikazarmark/crypt. +func AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error { + return v.AddSecureRemoteProvider(provider, endpoint, path, secretkeyring) +} + +func (v *Viper) AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error { + if !slices.Contains(SupportedRemoteProviders, provider) { + return UnsupportedRemoteProviderError(provider) + } + if provider != "" && endpoint != "" { + v.logger.Info("adding remote provider", "provider", provider, "endpoint", endpoint) + + rp := &defaultRemoteProvider{ + endpoint: endpoint, + provider: provider, + path: path, + secretKeyring: secretkeyring, + } + if !v.providerPathExists(rp) { + v.remoteProviders = append(v.remoteProviders, rp) + } + } + return nil +} + +func (v *Viper) providerPathExists(p *defaultRemoteProvider) bool { + for _, y := range v.remoteProviders { + if reflect.DeepEqual(y, p) { + return true + } + } + return false +} + +// ReadRemoteConfig attempts to get configuration from a remote source +// and read it in the remote configuration registry. +func ReadRemoteConfig() error { return v.ReadRemoteConfig() } + +func (v *Viper) ReadRemoteConfig() error { + return v.getKeyValueConfig() +} + +func WatchRemoteConfig() error { return v.WatchRemoteConfig() } +func (v *Viper) WatchRemoteConfig() error { + return v.watchKeyValueConfig() +} + +func (v *Viper) WatchRemoteConfigOnChannel() error { + return v.watchKeyValueConfigOnChannel() +} + +// Retrieve the first found remote configuration. +func (v *Viper) getKeyValueConfig() error { + if RemoteConfig == nil { + return RemoteConfigError("Enable the remote features by doing a blank import of the viper/remote package: '_ github.com/spf13/viper/remote'") + } + + if len(v.remoteProviders) == 0 { + return RemoteConfigError("No Remote Providers") + } + + for _, rp := range v.remoteProviders { + val, err := v.getRemoteConfig(rp) + if err != nil { + v.logger.Error(fmt.Errorf("get remote config: %w", err).Error()) + + continue + } + + v.kvstore = val + + return nil + } + return RemoteConfigError("No Files Found") +} + +func (v *Viper) getRemoteConfig(provider RemoteProvider) (map[string]any, error) { + reader, err := RemoteConfig.Get(provider) + if err != nil { + return nil, err + } + err = v.unmarshalReader(reader, v.kvstore) + return v.kvstore, err +} + +// Retrieve the first found remote configuration. +func (v *Viper) watchKeyValueConfigOnChannel() error { + if len(v.remoteProviders) == 0 { + return RemoteConfigError("No Remote Providers") + } + + for _, rp := range v.remoteProviders { + respc, _ := RemoteConfig.WatchChannel(rp) + // Todo: Add quit channel + go func(rc <-chan *RemoteResponse) { + for { + b := <-rc + reader := bytes.NewReader(b.Value) + v.unmarshalReader(reader, v.kvstore) + } + }(respc) + return nil + } + return RemoteConfigError("No Files Found") +} + +// Retrieve the first found remote configuration. +func (v *Viper) watchKeyValueConfig() error { + if len(v.remoteProviders) == 0 { + return RemoteConfigError("No Remote Providers") + } + + for _, rp := range v.remoteProviders { + val, err := v.watchRemoteConfig(rp) + if err != nil { + v.logger.Error(fmt.Errorf("watch remote config: %w", err).Error()) + + continue + } + v.kvstore = val + return nil + } + return RemoteConfigError("No Files Found") +} + +func (v *Viper) watchRemoteConfig(provider RemoteProvider) (map[string]any, error) { + reader, err := RemoteConfig.Watch(provider) + if err != nil { + return nil, err + } + err = v.unmarshalReader(reader, v.kvstore) + return v.kvstore, err +} diff --git a/vendor/github.com/spf13/viper/util.go b/vendor/github.com/spf13/viper/util.go index 117c6ac31..2a08074bc 100644 --- a/vendor/github.com/spf13/viper/util.go +++ b/vendor/github.com/spf13/viper/util.go @@ -12,13 +12,13 @@ package viper import ( "fmt" + "log/slog" "os" "path/filepath" "runtime" "strings" "unicode" - slog "github.com/sagikazarmark/slog-shim" "github.com/spf13/cast" ) @@ -128,15 +128,6 @@ func absPathify(logger *slog.Logger, inPath string) string { return "" } -func stringInSlice(a string, list []string) bool { - for _, b := range list { - if b == a { - return true - } - } - return false -} - func userHomeDir() string { if runtime.GOOS == "windows" { home := os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH") diff --git a/vendor/github.com/spf13/viper/viper.go b/vendor/github.com/spf13/viper/viper.go index da68d9944..f900e58b1 100644 --- a/vendor/github.com/spf13/viper/viper.go +++ b/vendor/github.com/spf13/viper/viper.go @@ -25,29 +25,22 @@ import ( "errors" "fmt" "io" + "log/slog" "os" "path/filepath" "reflect" + "slices" "strconv" "strings" "sync" "time" "github.com/fsnotify/fsnotify" - "github.com/mitchellh/mapstructure" - slog "github.com/sagikazarmark/slog-shim" + "github.com/go-viper/mapstructure/v2" "github.com/spf13/afero" "github.com/spf13/cast" "github.com/spf13/pflag" - "github.com/spf13/viper/internal/encoding" - "github.com/spf13/viper/internal/encoding/dotenv" - "github.com/spf13/viper/internal/encoding/hcl" - "github.com/spf13/viper/internal/encoding/ini" - "github.com/spf13/viper/internal/encoding/javaproperties" - "github.com/spf13/viper/internal/encoding/json" - "github.com/spf13/viper/internal/encoding/toml" - "github.com/spf13/viper/internal/encoding/yaml" "github.com/spf13/viper/internal/features" ) @@ -63,24 +56,10 @@ func (e ConfigMarshalError) Error() string { var v *Viper -type RemoteResponse struct { - Value []byte - Error error -} - func init() { v = New() } -type remoteConfigFactory interface { - Get(rp RemoteProvider) (io.Reader, error) - Watch(rp RemoteProvider) (io.Reader, error) - WatchChannel(rp RemoteProvider) (<-chan *RemoteResponse, chan bool) -} - -// RemoteConfig is optional, see the remote package. -var RemoteConfig remoteConfigFactory - // UnsupportedConfigError denotes encountering an unsupported // configuration filetype. type UnsupportedConfigError string @@ -90,24 +69,6 @@ func (str UnsupportedConfigError) Error() string { return fmt.Sprintf("Unsupported Config Type %q", string(str)) } -// UnsupportedRemoteProviderError denotes encountering an unsupported remote -// provider. Currently only etcd and Consul are supported. -type UnsupportedRemoteProviderError string - -// Error returns the formatted remote provider error. -func (str UnsupportedRemoteProviderError) Error() string { - return fmt.Sprintf("Unsupported Remote Provider Type %q", string(str)) -} - -// RemoteConfigError denotes encountering an error while trying to -// pull the configuration from the remote provider. -type RemoteConfigError string - -// Error returns the formatted remote provider error. -func (rce RemoteConfigError) Error() string { - return fmt.Sprintf("Remote Configurations Error: %s", string(rce)) -} - // ConfigFileNotFoundError denotes failing to find configuration file. type ConfigFileNotFoundError struct { name, locations string @@ -190,6 +151,8 @@ type Viper struct { // The filesystem to read config from. fs afero.Fs + finder Finder + // A set of remote providers to search for the configuration remoteProviders []*defaultRemoteProvider @@ -200,9 +163,6 @@ type Viper struct { configPermissions os.FileMode envPrefix string - // Specific commands for ini parsing - iniLoadOptions ini.LoadOptions - automaticEnvApplied bool envKeyReplacer StringReplacer allowEmptyEnv bool @@ -221,9 +181,13 @@ type Viper struct { logger *slog.Logger - // TODO: should probably be protected with a mutex - encoderRegistry *encoding.EncoderRegistry - decoderRegistry *encoding.DecoderRegistry + encoderRegistry EncoderRegistry + decoderRegistry DecoderRegistry + + decodeHook mapstructure.DecodeHookFunc + + experimentalFinder bool + experimentalBindStruct bool } // New returns an initialized Viper instance. @@ -244,7 +208,13 @@ func New() *Viper { v.typeByDefValue = false v.logger = slog.New(&discardHandler{}) - v.resetEncoding() + codecRegistry := NewCodecRegistry() + + v.encoderRegistry = codecRegistry + v.decoderRegistry = codecRegistry + + v.experimentalFinder = features.Finder + v.experimentalBindStruct = features.BindStruct return v } @@ -280,10 +250,25 @@ type StringReplacer interface { // EnvKeyReplacer sets a replacer used for mapping environment variables to internal keys. func EnvKeyReplacer(r StringReplacer) Option { return optionFunc(func(v *Viper) { + if r == nil { + return + } + v.envKeyReplacer = r }) } +// WithDecodeHook sets a default decode hook for mapstructure. +func WithDecodeHook(h mapstructure.DecodeHookFunc) Option { + return optionFunc(func(v *Viper) { + if h == nil { + return + } + + v.decodeHook = h + }) +} + // NewWithOptions creates a new Viper instance. func NewWithOptions(opts ...Option) *Viper { v := New() @@ -292,138 +277,32 @@ func NewWithOptions(opts ...Option) *Viper { opt.apply(v) } - v.resetEncoding() - return v } +// SetOptions sets the options on the global Viper instance. +// +// Be careful when using this function: subsequent calls may override options you set. +// It's always better to use a local Viper instance. +func SetOptions(opts ...Option) { + for _, opt := range opts { + opt.apply(v) + } +} + // Reset is intended for testing, will reset all to default settings. // In the public interface for the viper package so applications // can use it in their testing as well. func Reset() { v = New() SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "tfvars", "dotenv", "env", "ini"} - SupportedRemoteProviders = []string{"etcd", "etcd3", "consul", "firestore", "nats"} -} - -// TODO: make this lazy initialization instead. -func (v *Viper) resetEncoding() { - encoderRegistry := encoding.NewEncoderRegistry() - decoderRegistry := encoding.NewDecoderRegistry() - - { - codec := yaml.Codec{} - - encoderRegistry.RegisterEncoder("yaml", codec) - decoderRegistry.RegisterDecoder("yaml", codec) - - encoderRegistry.RegisterEncoder("yml", codec) - decoderRegistry.RegisterDecoder("yml", codec) - } - - { - codec := json.Codec{} - - encoderRegistry.RegisterEncoder("json", codec) - decoderRegistry.RegisterDecoder("json", codec) - } - - { - codec := toml.Codec{} - - encoderRegistry.RegisterEncoder("toml", codec) - decoderRegistry.RegisterDecoder("toml", codec) - } - - { - codec := hcl.Codec{} - - encoderRegistry.RegisterEncoder("hcl", codec) - decoderRegistry.RegisterDecoder("hcl", codec) - - encoderRegistry.RegisterEncoder("tfvars", codec) - decoderRegistry.RegisterDecoder("tfvars", codec) - } - - { - codec := ini.Codec{ - KeyDelimiter: v.keyDelim, - LoadOptions: v.iniLoadOptions, - } - - encoderRegistry.RegisterEncoder("ini", codec) - decoderRegistry.RegisterDecoder("ini", codec) - } - - { - codec := &javaproperties.Codec{ - KeyDelimiter: v.keyDelim, - } - - encoderRegistry.RegisterEncoder("properties", codec) - decoderRegistry.RegisterDecoder("properties", codec) - - encoderRegistry.RegisterEncoder("props", codec) - decoderRegistry.RegisterDecoder("props", codec) - - encoderRegistry.RegisterEncoder("prop", codec) - decoderRegistry.RegisterDecoder("prop", codec) - } - - { - codec := &dotenv.Codec{} - - encoderRegistry.RegisterEncoder("dotenv", codec) - decoderRegistry.RegisterDecoder("dotenv", codec) - - encoderRegistry.RegisterEncoder("env", codec) - decoderRegistry.RegisterDecoder("env", codec) - } - v.encoderRegistry = encoderRegistry - v.decoderRegistry = decoderRegistry -} - -type defaultRemoteProvider struct { - provider string - endpoint string - path string - secretKeyring string -} - -func (rp defaultRemoteProvider) Provider() string { - return rp.provider -} - -func (rp defaultRemoteProvider) Endpoint() string { - return rp.endpoint -} - -func (rp defaultRemoteProvider) Path() string { - return rp.path -} - -func (rp defaultRemoteProvider) SecretKeyring() string { - return rp.secretKeyring -} - -// RemoteProvider stores the configuration necessary -// to connect to a remote key/value store. -// Optional secretKeyring to unencrypt encrypted values -// can be provided. -type RemoteProvider interface { - Provider() string - Endpoint() string - Path() string - SecretKeyring() string + resetRemote() } // SupportedExts are universally supported extensions. var SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "tfvars", "dotenv", "env", "ini"} -// SupportedRemoteProviders are universally supported remote providers. -var SupportedRemoteProviders = []string{"etcd", "etcd3", "consul", "firestore", "nats"} - // OnConfigChange sets the event handler that is called when a config file changes. func OnConfigChange(run func(in fsnotify.Event)) { v.OnConfigChange(run) } @@ -574,90 +453,20 @@ func (v *Viper) ConfigFileUsed() string { return v.configFile } func AddConfigPath(in string) { v.AddConfigPath(in) } func (v *Viper) AddConfigPath(in string) { + if v.finder != nil { + v.logger.Warn("ineffective call to function: custom finder takes precedence", slog.String("function", "AddConfigPath")) + } + if in != "" { absin := absPathify(v.logger, in) v.logger.Info("adding path to search paths", "path", absin) - if !stringInSlice(absin, v.configPaths) { + if !slices.Contains(v.configPaths, absin) { v.configPaths = append(v.configPaths, absin) } } } -// AddRemoteProvider adds a remote configuration source. -// Remote Providers are searched in the order they are added. -// provider is a string value: "etcd", "etcd3", "consul", "firestore" or "nats" are currently supported. -// endpoint is the url. etcd requires http://ip:port, consul requires ip:port, nats requires nats://ip:port -// path is the path in the k/v store to retrieve configuration -// To retrieve a config file called myapp.json from /configs/myapp.json -// you should set path to /configs and set config name (SetConfigName()) to -// "myapp". -func AddRemoteProvider(provider, endpoint, path string) error { - return v.AddRemoteProvider(provider, endpoint, path) -} - -func (v *Viper) AddRemoteProvider(provider, endpoint, path string) error { - if !stringInSlice(provider, SupportedRemoteProviders) { - return UnsupportedRemoteProviderError(provider) - } - if provider != "" && endpoint != "" { - v.logger.Info("adding remote provider", "provider", provider, "endpoint", endpoint) - - rp := &defaultRemoteProvider{ - endpoint: endpoint, - provider: provider, - path: path, - } - if !v.providerPathExists(rp) { - v.remoteProviders = append(v.remoteProviders, rp) - } - } - return nil -} - -// AddSecureRemoteProvider adds a remote configuration source. -// Secure Remote Providers are searched in the order they are added. -// provider is a string value: "etcd", "etcd3", "consul", "firestore" or "nats" are currently supported. -// endpoint is the url. etcd requires http://ip:port consul requires ip:port -// secretkeyring is the filepath to your openpgp secret keyring. e.g. /etc/secrets/myring.gpg -// path is the path in the k/v store to retrieve configuration -// To retrieve a config file called myapp.json from /configs/myapp.json -// you should set path to /configs and set config name (SetConfigName()) to -// "myapp". -// Secure Remote Providers are implemented with github.com/sagikazarmark/crypt. -func AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error { - return v.AddSecureRemoteProvider(provider, endpoint, path, secretkeyring) -} - -func (v *Viper) AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error { - if !stringInSlice(provider, SupportedRemoteProviders) { - return UnsupportedRemoteProviderError(provider) - } - if provider != "" && endpoint != "" { - v.logger.Info("adding remote provider", "provider", provider, "endpoint", endpoint) - - rp := &defaultRemoteProvider{ - endpoint: endpoint, - provider: provider, - path: path, - secretKeyring: secretkeyring, - } - if !v.providerPathExists(rp) { - v.remoteProviders = append(v.remoteProviders, rp) - } - } - return nil -} - -func (v *Viper) providerPathExists(p *defaultRemoteProvider) bool { - for _, y := range v.remoteProviders { - if reflect.DeepEqual(y, p) { - return true - } - } - return false -} - // searchMap recursively searches for a value for path in source map. // Returns nil if not found. // Note: This assumes that the path entries and map keys are lower cased. @@ -965,6 +774,7 @@ func (v *Viper) Sub(key string) *Viper { subv.automaticEnvApplied = v.automaticEnvApplied subv.envPrefix = v.envPrefix subv.envKeyReplacer = v.envKeyReplacer + subv.keyDelim = v.keyDelim subv.config = cast.ToStringMap(data) return subv } @@ -1006,6 +816,13 @@ func (v *Viper) GetInt64(key string) int64 { return cast.ToInt64(v.Get(key)) } +// GetUint8 returns the value associated with the key as an unsigned integer. +func GetUint8(key string) uint8 { return v.GetUint8(key) } + +func (v *Viper) GetUint8(key string) uint8 { + return cast.ToUint8(v.Get(key)) +} + // GetUint returns the value associated with the key as an unsigned integer. func GetUint(key string) uint { return v.GetUint(key) } @@ -1105,7 +922,7 @@ func UnmarshalKey(key string, rawVal any, opts ...DecoderConfigOption) error { } func (v *Viper) UnmarshalKey(key string, rawVal any, opts ...DecoderConfigOption) error { - return decode(v.Get(key), defaultDecoderConfig(rawVal, opts...)) + return decode(v.Get(key), v.defaultDecoderConfig(rawVal, opts...)) } // Unmarshal unmarshals the config into a Struct. Make sure that the tags @@ -1117,7 +934,7 @@ func Unmarshal(rawVal any, opts ...DecoderConfigOption) error { func (v *Viper) Unmarshal(rawVal any, opts ...DecoderConfigOption) error { keys := v.AllKeys() - if features.BindStruct { + if v.experimentalBindStruct { // TODO: make this optional? structKeys, err := v.decodeStructKeys(rawVal, opts...) if err != nil { @@ -1128,13 +945,13 @@ func (v *Viper) Unmarshal(rawVal any, opts ...DecoderConfigOption) error { } // TODO: struct keys should be enough? - return decode(v.getSettings(keys), defaultDecoderConfig(rawVal, opts...)) + return decode(v.getSettings(keys), v.defaultDecoderConfig(rawVal, opts...)) } func (v *Viper) decodeStructKeys(input any, opts ...DecoderConfigOption) ([]string, error) { var structKeyMap map[string]any - err := decode(input, defaultDecoderConfig(&structKeyMap, opts...)) + err := decode(input, v.defaultDecoderConfig(&structKeyMap, opts...)) if err != nil { return nil, err } @@ -1151,22 +968,54 @@ func (v *Viper) decodeStructKeys(input any, opts ...DecoderConfigOption) ([]stri // defaultDecoderConfig returns default mapstructure.DecoderConfig with support // of time.Duration values & string slices. -func defaultDecoderConfig(output any, opts ...DecoderConfigOption) *mapstructure.DecoderConfig { +func (v *Viper) defaultDecoderConfig(output any, opts ...DecoderConfigOption) *mapstructure.DecoderConfig { + decodeHook := v.decodeHook + if decodeHook == nil { + decodeHook = mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + // mapstructure.StringToSliceHookFunc(","), + stringToWeakSliceHookFunc(","), + ) + } + c := &mapstructure.DecoderConfig{ Metadata: nil, - Result: output, WeaklyTypedInput: true, - DecodeHook: mapstructure.ComposeDecodeHookFunc( - mapstructure.StringToTimeDurationHookFunc(), - mapstructure.StringToSliceHookFunc(","), - ), + DecodeHook: decodeHook, } + for _, opt := range opts { opt(c) } + + // Do not allow overwriting the output + c.Result = output + return c } +// As of mapstructure v2.0.0 StringToSliceHookFunc checks if the return type is a string slice. +// This function removes that check. +// TODO: implement a function that checks if the value can be converted to the return type and use it instead. +func stringToWeakSliceHookFunc(sep string) mapstructure.DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}, + ) (interface{}, error) { + if f.Kind() != reflect.String || t.Kind() != reflect.Slice { + return data, nil + } + + raw := data.(string) + if raw == "" { + return []string{}, nil + } + + return strings.Split(raw, sep), nil + } +} + // decode is a wrapper around mapstructure.Decode that mimics the WeakDecode functionality. func decode(input any, config *mapstructure.DecoderConfig) error { decoder, err := mapstructure.NewDecoder(config) @@ -1183,12 +1032,12 @@ func UnmarshalExact(rawVal any, opts ...DecoderConfigOption) error { } func (v *Viper) UnmarshalExact(rawVal any, opts ...DecoderConfigOption) error { - config := defaultDecoderConfig(rawVal, opts...) + config := v.defaultDecoderConfig(rawVal, opts...) config.ErrorUnused = true keys := v.AllKeys() - if features.BindStruct { + if v.experimentalBindStruct { // TODO: make this optional? structKeys, err := v.decodeStructKeys(rawVal, opts...) if err != nil { @@ -1638,7 +1487,7 @@ func (v *Viper) ReadInConfig() error { return err } - if !stringInSlice(v.getConfigType(), SupportedExts) { + if !slices.Contains(SupportedExts, v.getConfigType()) { return UnsupportedConfigError(v.getConfigType()) } @@ -1669,7 +1518,7 @@ func (v *Viper) MergeInConfig() error { return err } - if !stringInSlice(v.getConfigType(), SupportedExts) { + if !slices.Contains(SupportedExts, v.getConfigType()) { return UnsupportedConfigError(v.getConfigType()) } @@ -1686,6 +1535,10 @@ func (v *Viper) MergeInConfig() error { func ReadConfig(in io.Reader) error { return v.ReadConfig(in) } func (v *Viper) ReadConfig(in io.Reader) error { + if v.configType == "" { + return errors.New("cannot decode configuration: config type is not set") + } + v.config = make(map[string]any) return v.unmarshalReader(in, v.config) } @@ -1694,6 +1547,10 @@ func (v *Viper) ReadConfig(in io.Reader) error { func MergeConfig(in io.Reader) error { return v.MergeConfig(in) } func (v *Viper) MergeConfig(in io.Reader) error { + if v.configType == "" { + return errors.New("cannot decode configuration: config type is not set") + } + cfg := make(map[string]any) if err := v.unmarshalReader(in, cfg); err != nil { return err @@ -1742,6 +1599,19 @@ func (v *Viper) WriteConfigAs(filename string) error { return v.writeConfig(filename, true) } +// WriteConfigTo writes current configuration to an [io.Writer]. +func WriteConfigTo(w io.Writer) error { return v.WriteConfigTo(w) } + +func (v *Viper) WriteConfigTo(w io.Writer) error { + format := strings.ToLower(v.getConfigType()) + + if !slices.Contains(SupportedExts, format) { + return UnsupportedConfigError(format) + } + + return v.marshalWriter(w, format) +} + // SafeWriteConfigAs writes current configuration to a given filename if it does not exist. func SafeWriteConfigAs(filename string) error { return v.SafeWriteConfigAs(filename) } @@ -1768,7 +1638,7 @@ func (v *Viper) writeConfig(filename string, force bool) error { return fmt.Errorf("config type could not be determined for %s", filename) } - if !stringInSlice(configType, SupportedExts) { + if !slices.Contains(SupportedExts, configType) { return UnsupportedConfigError(configType) } if v.config == nil { @@ -1795,12 +1665,20 @@ func (v *Viper) unmarshalReader(in io.Reader, c map[string]any) error { buf := new(bytes.Buffer) buf.ReadFrom(in) - switch format := strings.ToLower(v.getConfigType()); format { - case "yaml", "yml", "json", "toml", "hcl", "tfvars", "ini", "properties", "props", "prop", "dotenv", "env": - err := v.decoderRegistry.Decode(format, buf.Bytes(), c) - if err != nil { - return ConfigParseError{err} - } + format := strings.ToLower(v.getConfigType()) + + if !slices.Contains(SupportedExts, format) { + return UnsupportedConfigError(format) + } + + decoder, err := v.decoderRegistry.Decoder(format) + if err != nil { + return ConfigParseError{err} + } + + err = decoder.Decode(buf.Bytes(), c) + if err != nil { + return ConfigParseError{err} } insensitiviseMap(c) @@ -1808,20 +1686,24 @@ func (v *Viper) unmarshalReader(in io.Reader, c map[string]any) error { } // Marshal a map into Writer. -func (v *Viper) marshalWriter(f afero.File, configType string) error { +func (v *Viper) marshalWriter(w io.Writer, configType string) error { c := v.AllSettings() - switch configType { - case "yaml", "yml", "json", "toml", "hcl", "tfvars", "ini", "prop", "props", "properties", "dotenv", "env": - b, err := v.encoderRegistry.Encode(configType, c) - if err != nil { - return ConfigMarshalError{err} - } - _, err = f.WriteString(string(b)) - if err != nil { - return ConfigMarshalError{err} - } + encoder, err := v.encoderRegistry.Encoder(configType) + if err != nil { + return ConfigMarshalError{err} } + + b, err := encoder.Encode(c) + if err != nil { + return ConfigMarshalError{err} + } + + _, err = w.Write(b) + if err != nil { + return ConfigMarshalError{err} + } + return nil } @@ -1953,106 +1835,6 @@ func mergeMaps(src, tgt map[string]any, itgt map[any]any) { } } -// ReadRemoteConfig attempts to get configuration from a remote source -// and read it in the remote configuration registry. -func ReadRemoteConfig() error { return v.ReadRemoteConfig() } - -func (v *Viper) ReadRemoteConfig() error { - return v.getKeyValueConfig() -} - -func WatchRemoteConfig() error { return v.WatchRemoteConfig() } -func (v *Viper) WatchRemoteConfig() error { - return v.watchKeyValueConfig() -} - -func (v *Viper) WatchRemoteConfigOnChannel() error { - return v.watchKeyValueConfigOnChannel() -} - -// Retrieve the first found remote configuration. -func (v *Viper) getKeyValueConfig() error { - if RemoteConfig == nil { - return RemoteConfigError("Enable the remote features by doing a blank import of the viper/remote package: '_ github.com/spf13/viper/remote'") - } - - if len(v.remoteProviders) == 0 { - return RemoteConfigError("No Remote Providers") - } - - for _, rp := range v.remoteProviders { - val, err := v.getRemoteConfig(rp) - if err != nil { - v.logger.Error(fmt.Errorf("get remote config: %w", err).Error()) - - continue - } - - v.kvstore = val - - return nil - } - return RemoteConfigError("No Files Found") -} - -func (v *Viper) getRemoteConfig(provider RemoteProvider) (map[string]any, error) { - reader, err := RemoteConfig.Get(provider) - if err != nil { - return nil, err - } - err = v.unmarshalReader(reader, v.kvstore) - return v.kvstore, err -} - -// Retrieve the first found remote configuration. -func (v *Viper) watchKeyValueConfigOnChannel() error { - if len(v.remoteProviders) == 0 { - return RemoteConfigError("No Remote Providers") - } - - for _, rp := range v.remoteProviders { - respc, _ := RemoteConfig.WatchChannel(rp) - // Todo: Add quit channel - go func(rc <-chan *RemoteResponse) { - for { - b := <-rc - reader := bytes.NewReader(b.Value) - v.unmarshalReader(reader, v.kvstore) - } - }(respc) - return nil - } - return RemoteConfigError("No Files Found") -} - -// Retrieve the first found remote configuration. -func (v *Viper) watchKeyValueConfig() error { - if len(v.remoteProviders) == 0 { - return RemoteConfigError("No Remote Providers") - } - - for _, rp := range v.remoteProviders { - val, err := v.watchRemoteConfig(rp) - if err != nil { - v.logger.Error(fmt.Errorf("watch remote config: %w", err).Error()) - - continue - } - v.kvstore = val - return nil - } - return RemoteConfigError("No Files Found") -} - -func (v *Viper) watchRemoteConfig(provider RemoteProvider) (map[string]any, error) { - reader, err := RemoteConfig.Watch(provider) - if err != nil { - return nil, err - } - err = v.unmarshalReader(reader, v.kvstore) - return v.kvstore, err -} - // AllKeys returns all keys holding a value, regardless of where they are set. // Nested keys are returned with a v.keyDelim separator. func AllKeys() []string { return v.AllKeys() } @@ -2174,6 +1956,10 @@ func (v *Viper) SetFs(fs afero.Fs) { func SetConfigName(in string) { v.SetConfigName(in) } func (v *Viper) SetConfigName(in string) { + if v.finder != nil { + v.logger.Warn("ineffective call to function: custom finder takes precedence", slog.String("function", "SetConfigName")) + } + if in != "" { v.configName = in v.configFile = "" @@ -2197,13 +1983,6 @@ func (v *Viper) SetConfigPermissions(perm os.FileMode) { v.configPermissions = perm.Perm() } -// IniLoadOptions sets the load options for ini parsing. -func IniLoadOptions(in ini.LoadOptions) Option { - return optionFunc(func(v *Viper) { - v.iniLoadOptions = in - }) -} - func (v *Viper) getConfigType() string { if v.configType != "" { return v.configType diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/conversion.go b/vendor/github.com/zclconf/go-cty/cty/convert/conversion.go index bc79df8cf..423b62fc2 100644 --- a/vendor/github.com/zclconf/go-cty/cty/convert/conversion.go +++ b/vendor/github.com/zclconf/go-cty/cty/convert/conversion.go @@ -171,12 +171,16 @@ func getConversionKnown(in cty.Type, out cty.Type, unsafe bool) conversion { } if out.IsCapsuleType() { if fn := out.CapsuleOps().ConversionTo; fn != nil { - return conversionToCapsule(in, out, fn) + if conv := conversionToCapsule(in, out, fn); conv != nil { + return conv + } } } if in.IsCapsuleType() { if fn := in.CapsuleOps().ConversionFrom; fn != nil { - return conversionFromCapsule(in, out, fn) + if conv := conversionFromCapsule(in, out, fn); conv != nil { + return conv + } } } // No conversion operation is available, then. diff --git a/vendor/github.com/zclconf/go-cty/cty/convert/conversion_collection.go b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_collection.go index 05399c9a6..451367c7d 100644 --- a/vendor/github.com/zclconf/go-cty/cty/convert/conversion_collection.go +++ b/vendor/github.com/zclconf/go-cty/cty/convert/conversion_collection.go @@ -162,7 +162,7 @@ func conversionCollectionToMap(ety cty.Type, conv conversion) conversion { if ety == cty.DynamicPseudoType { return cty.MapValEmpty(val.Type().ElementType()), nil } - return cty.MapValEmpty(ety), nil + return cty.MapValEmpty(ety.WithoutOptionalAttributesDeep()), nil } if ety.IsCollectionType() || ety.IsObjectType() { diff --git a/vendor/github.com/zclconf/go-cty/cty/function/function.go b/vendor/github.com/zclconf/go-cty/cty/function/function.go index 6fc968282..4d7c61d01 100644 --- a/vendor/github.com/zclconf/go-cty/cty/function/function.go +++ b/vendor/github.com/zclconf/go-cty/cty/function/function.go @@ -251,15 +251,25 @@ func (f Function) Call(args []cty.Value) (val cty.Value, err error) { if err != nil { return cty.NilVal, err } + + var resultMarks []cty.ValueMarks + // If we are returning an unknown early due to some unknown in the + // arguments, we first need to complete the iteration over all the args + // to ensure we collect as many marks as possible for resultMarks. + returnUnknown := false + if dynTypeArgs { // returnTypeForValues sets this if any argument was inexactly typed // and the corresponding parameter did not indicate it could deal with // that. In that case we also avoid calling the implementation function // because it will also typically not be ready to deal with that case. - return cty.UnknownVal(expectedType), nil + returnUnknown = true } - if refineResult := f.spec.RefineResult; refineResult != nil { + // If returnUnknown is set already, it means we don't have a refinement + // because of dynTypeArgs, but we may still need to collect marks from the + // rest of the arguments. + if refineResult := f.spec.RefineResult; refineResult != nil && !returnUnknown { // If this function has a refinement callback then we'll refine // our result value in the same way regardless of how we return. // It's the function author's responsibility to ensure that the @@ -280,15 +290,10 @@ func (f Function) Call(args []cty.Value) (val cty.Value, err error) { // values and marked values. posArgs := args[:len(f.spec.Params)] varArgs := args[len(f.spec.Params):] - var resultMarks []cty.ValueMarks for i, spec := range f.spec.Params { val := posArgs[i] - if !val.IsKnown() && !spec.AllowUnknown { - return cty.UnknownVal(expectedType), nil - } - if !spec.AllowMarked { unwrappedVal, marks := val.UnmarkDeep() if len(marks) > 0 { @@ -305,14 +310,15 @@ func (f Function) Call(args []cty.Value) (val cty.Value, err error) { args = newArgs } } + + if !val.IsKnown() && !spec.AllowUnknown { + returnUnknown = true + } } if f.spec.VarParam != nil { spec := f.spec.VarParam for i, val := range varArgs { - if !val.IsKnown() && !spec.AllowUnknown { - return cty.UnknownVal(expectedType), nil - } if !spec.AllowMarked { unwrappedVal, marks := val.UnmarkDeep() if len(marks) > 0 { @@ -323,9 +329,16 @@ func (f Function) Call(args []cty.Value) (val cty.Value, err error) { args = newArgs } } + if !val.IsKnown() && !spec.AllowUnknown { + returnUnknown = true + } } } + if returnUnknown { + return cty.UnknownVal(expectedType).WithMarks(resultMarks...), nil + } + var retVal cty.Value { // Intercept any panics from the function and return them as normal errors, diff --git a/vendor/github.com/zclconf/go-cty/cty/value_ops.go b/vendor/github.com/zclconf/go-cty/cty/value_ops.go index c4584bd93..246cc3d74 100644 --- a/vendor/github.com/zclconf/go-cty/cty/value_ops.go +++ b/vendor/github.com/zclconf/go-cty/cty/value_ops.go @@ -1028,22 +1028,45 @@ func (val Value) HasElement(elem Value) Value { } ty := val.Type() + unknownResult := UnknownVal(Bool).RefineNotNull() + if val.IsNull() { + panic("cannot HasElement on null value") + } + if !val.IsKnown() { + return unknownResult + } + if elem.Type() != DynamicPseudoType && val.Type().IsSetType() && val.Type().ElementType() != DynamicPseudoType { + // If we know the type of the given element and the element type of + // the set then they must match for the element to be present, because + // a set can't contain elements of any other type than its element type. + if !elem.Type().Equals(val.ty.ElementType()) { + return False + } + } if !ty.IsSetType() { panic("not a set type") } - if !val.IsKnown() || !elem.IsKnown() { - return UnknownVal(Bool).RefineNotNull() + if !elem.IsKnown() { + return unknownResult } - if val.IsNull() { - panic("can't call HasElement on a null value") + noMatchResult := False + if !val.IsWhollyKnown() { + // If the set has any unknown elements then a failure to find a + // known-value elem in it means that we don't know whether the + // element is present, rather than that it definitely isn't. + noMatchResult = unknownResult } if !ty.ElementType().Equals(elem.Type()) { + // A set can only contain an element of its own element type return False } s := val.v.(set.Set[interface{}]) - return BoolVal(s.Has(elem.v)) + if !s.Has(elem.v) { + return noMatchResult + } + return True } // Length returns the length of the receiver, which must be a collection type diff --git a/vendor/golang.org/x/exp/constraints/constraints.go b/vendor/golang.org/x/exp/constraints/constraints.go deleted file mode 100644 index 2c033dff4..000000000 --- a/vendor/golang.org/x/exp/constraints/constraints.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package constraints defines a set of useful constraints to be used -// with type parameters. -package constraints - -// Signed is a constraint that permits any signed integer type. -// If future releases of Go add new predeclared signed integer types, -// this constraint will be modified to include them. -type Signed interface { - ~int | ~int8 | ~int16 | ~int32 | ~int64 -} - -// Unsigned is a constraint that permits any unsigned integer type. -// If future releases of Go add new predeclared unsigned integer types, -// this constraint will be modified to include them. -type Unsigned interface { - ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr -} - -// Integer is a constraint that permits any integer type. -// If future releases of Go add new predeclared integer types, -// this constraint will be modified to include them. -type Integer interface { - Signed | Unsigned -} - -// Float is a constraint that permits any floating-point type. -// If future releases of Go add new predeclared floating-point types, -// this constraint will be modified to include them. -type Float interface { - ~float32 | ~float64 -} - -// Complex is a constraint that permits any complex numeric type. -// If future releases of Go add new predeclared complex numeric types, -// this constraint will be modified to include them. -type Complex interface { - ~complex64 | ~complex128 -} - -// Ordered is a constraint that permits any ordered type: any type -// that supports the operators < <= >= >. -// If future releases of Go add new ordered types, -// this constraint will be modified to include them. -type Ordered interface { - Integer | Float | ~string -} diff --git a/vendor/golang.org/x/exp/maps/maps.go b/vendor/golang.org/x/exp/maps/maps.go index ecc0dabb7..4a9747ef4 100644 --- a/vendor/golang.org/x/exp/maps/maps.go +++ b/vendor/golang.org/x/exp/maps/maps.go @@ -5,9 +5,16 @@ // Package maps defines various functions useful with maps of any type. package maps +import "maps" + // Keys returns the keys of the map m. // The keys will be in an indeterminate order. +// +// The simplest true equivalent using the standard library is: +// +// slices.AppendSeq(make([]K, 0, len(m)), maps.Keys(m)) func Keys[M ~map[K]V, K comparable, V any](m M) []K { + r := make([]K, 0, len(m)) for k := range m { r = append(r, k) @@ -17,7 +24,12 @@ func Keys[M ~map[K]V, K comparable, V any](m M) []K { // Values returns the values of the map m. // The values will be in an indeterminate order. +// +// The simplest true equivalent using the standard library is: +// +// slices.AppendSeq(make([]V, 0, len(m)), maps.Values(m)) func Values[M ~map[K]V, K comparable, V any](m M) []V { + r := make([]V, 0, len(m)) for _, v := range m { r = append(r, v) @@ -27,68 +39,48 @@ func Values[M ~map[K]V, K comparable, V any](m M) []V { // Equal reports whether two maps contain the same key/value pairs. // Values are compared using ==. +// +//go:fix inline func Equal[M1, M2 ~map[K]V, K, V comparable](m1 M1, m2 M2) bool { - if len(m1) != len(m2) { - return false - } - for k, v1 := range m1 { - if v2, ok := m2[k]; !ok || v1 != v2 { - return false - } - } - return true + return maps.Equal(m1, m2) } // EqualFunc is like Equal, but compares values using eq. // Keys are still compared with ==. +// +//go:fix inline func EqualFunc[M1 ~map[K]V1, M2 ~map[K]V2, K comparable, V1, V2 any](m1 M1, m2 M2, eq func(V1, V2) bool) bool { - if len(m1) != len(m2) { - return false - } - for k, v1 := range m1 { - if v2, ok := m2[k]; !ok || !eq(v1, v2) { - return false - } - } - return true + return maps.EqualFunc(m1, m2, eq) } // Clear removes all entries from m, leaving it empty. +// +//go:fix inline func Clear[M ~map[K]V, K comparable, V any](m M) { - for k := range m { - delete(m, k) - } + clear(m) } // Clone returns a copy of m. This is a shallow clone: // the new keys and values are set using ordinary assignment. +// +//go:fix inline func Clone[M ~map[K]V, K comparable, V any](m M) M { - // Preserve nil in case it matters. - if m == nil { - return nil - } - r := make(M, len(m)) - for k, v := range m { - r[k] = v - } - return r + return maps.Clone(m) } // Copy copies all key/value pairs in src adding them to dst. // When a key in src is already present in dst, // the value in dst will be overwritten by the value associated // with the key in src. +// +//go:fix inline func Copy[M1 ~map[K]V, M2 ~map[K]V, K comparable, V any](dst M1, src M2) { - for k, v := range src { - dst[k] = v - } + maps.Copy(dst, src) } // DeleteFunc deletes any key/value pairs from m for which del returns true. +// +//go:fix inline func DeleteFunc[M ~map[K]V, K comparable, V any](m M, del func(K, V) bool) { - for k, v := range m { - if del(k, v) { - delete(m, k) - } - } + maps.DeleteFunc(m, del) } diff --git a/vendor/golang.org/x/exp/slices/cmp.go b/vendor/golang.org/x/exp/slices/cmp.go deleted file mode 100644 index fbf1934a0..000000000 --- a/vendor/golang.org/x/exp/slices/cmp.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package slices - -import "golang.org/x/exp/constraints" - -// min is a version of the predeclared function from the Go 1.21 release. -func min[T constraints.Ordered](a, b T) T { - if a < b || isNaN(a) { - return a - } - return b -} - -// max is a version of the predeclared function from the Go 1.21 release. -func max[T constraints.Ordered](a, b T) T { - if a > b || isNaN(a) { - return a - } - return b -} - -// cmpLess is a copy of cmp.Less from the Go 1.21 release. -func cmpLess[T constraints.Ordered](x, y T) bool { - return (isNaN(x) && !isNaN(y)) || x < y -} - -// cmpCompare is a copy of cmp.Compare from the Go 1.21 release. -func cmpCompare[T constraints.Ordered](x, y T) int { - xNaN := isNaN(x) - yNaN := isNaN(y) - if xNaN && yNaN { - return 0 - } - if xNaN || x < y { - return -1 - } - if yNaN || x > y { - return +1 - } - return 0 -} diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go index 46ceac343..da0df370d 100644 --- a/vendor/golang.org/x/exp/slices/slices.go +++ b/vendor/golang.org/x/exp/slices/slices.go @@ -6,9 +6,8 @@ package slices import ( - "unsafe" - - "golang.org/x/exp/constraints" + "cmp" + "slices" ) // Equal reports whether two slices are equal: the same length and all @@ -16,16 +15,10 @@ import ( // Otherwise, the elements are compared in increasing index order, and the // comparison stops at the first unequal pair. // Floating point NaNs are not considered equal. +// +//go:fix inline func Equal[S ~[]E, E comparable](s1, s2 S) bool { - if len(s1) != len(s2) { - return false - } - for i := range s1 { - if s1[i] != s2[i] { - return false - } - } - return true + return slices.Equal(s1, s2) } // EqualFunc reports whether two slices are equal using an equality @@ -33,17 +26,10 @@ func Equal[S ~[]E, E comparable](s1, s2 S) bool { // EqualFunc returns false. Otherwise, the elements are compared in // increasing index order, and the comparison stops at the first index // for which eq returns false. +// +//go:fix inline func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) bool) bool { - if len(s1) != len(s2) { - return false - } - for i, v1 := range s1 { - v2 := s2[i] - if !eq(v1, v2) { - return false - } - } - return true + return slices.EqualFunc(s1, s2, eq) } // Compare compares the elements of s1 and s2, using [cmp.Compare] on each pair @@ -53,20 +39,10 @@ func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) boo // If both slices are equal until one of them ends, the shorter slice is // considered less than the longer one. // The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2. -func Compare[S ~[]E, E constraints.Ordered](s1, s2 S) int { - for i, v1 := range s1 { - if i >= len(s2) { - return +1 - } - v2 := s2[i] - if c := cmpCompare(v1, v2); c != 0 { - return c - } - } - if len(s1) < len(s2) { - return -1 - } - return 0 +// +//go:fix inline +func Compare[S ~[]E, E cmp.Ordered](s1, s2 S) int { + return slices.Compare(s1, s2) } // CompareFunc is like [Compare] but uses a custom comparison function on each @@ -74,53 +50,41 @@ func Compare[S ~[]E, E constraints.Ordered](s1, s2 S) int { // The result is the first non-zero result of cmp; if cmp always // returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2), // and +1 if len(s1) > len(s2). +// +//go:fix inline func CompareFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, cmp func(E1, E2) int) int { - for i, v1 := range s1 { - if i >= len(s2) { - return +1 - } - v2 := s2[i] - if c := cmp(v1, v2); c != 0 { - return c - } - } - if len(s1) < len(s2) { - return -1 - } - return 0 + return slices.CompareFunc(s1, s2, cmp) } // Index returns the index of the first occurrence of v in s, // or -1 if not present. +// +//go:fix inline func Index[S ~[]E, E comparable](s S, v E) int { - for i := range s { - if v == s[i] { - return i - } - } - return -1 + return slices.Index(s, v) } // IndexFunc returns the first index i satisfying f(s[i]), // or -1 if none do. +// +//go:fix inline func IndexFunc[S ~[]E, E any](s S, f func(E) bool) int { - for i := range s { - if f(s[i]) { - return i - } - } - return -1 + return slices.IndexFunc(s, f) } // Contains reports whether v is present in s. +// +//go:fix inline func Contains[S ~[]E, E comparable](s S, v E) bool { - return Index(s, v) >= 0 + return slices.Contains(s, v) } // ContainsFunc reports whether at least one // element e of s satisfies f(e). +// +//go:fix inline func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool { - return IndexFunc(s, f) >= 0 + return slices.ContainsFunc(s, f) } // Insert inserts the values v... into s at index i, @@ -130,93 +94,10 @@ func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool { // and r[i+len(v)] == value originally at r[i]. // Insert panics if i is out of range. // This function is O(len(s) + len(v)). +// +//go:fix inline func Insert[S ~[]E, E any](s S, i int, v ...E) S { - m := len(v) - if m == 0 { - return s - } - n := len(s) - if i == n { - return append(s, v...) - } - if n+m > cap(s) { - // Use append rather than make so that we bump the size of - // the slice up to the next storage class. - // This is what Grow does but we don't call Grow because - // that might copy the values twice. - s2 := append(s[:i], make(S, n+m-i)...) - copy(s2[i:], v) - copy(s2[i+m:], s[i:]) - return s2 - } - s = s[:n+m] - - // before: - // s: aaaaaaaabbbbccccccccdddd - // ^ ^ ^ ^ - // i i+m n n+m - // after: - // s: aaaaaaaavvvvbbbbcccccccc - // ^ ^ ^ ^ - // i i+m n n+m - // - // a are the values that don't move in s. - // v are the values copied in from v. - // b and c are the values from s that are shifted up in index. - // d are the values that get overwritten, never to be seen again. - - if !overlaps(v, s[i+m:]) { - // Easy case - v does not overlap either the c or d regions. - // (It might be in some of a or b, or elsewhere entirely.) - // The data we copy up doesn't write to v at all, so just do it. - - copy(s[i+m:], s[i:]) - - // Now we have - // s: aaaaaaaabbbbbbbbcccccccc - // ^ ^ ^ ^ - // i i+m n n+m - // Note the b values are duplicated. - - copy(s[i:], v) - - // Now we have - // s: aaaaaaaavvvvbbbbcccccccc - // ^ ^ ^ ^ - // i i+m n n+m - // That's the result we want. - return s - } - - // The hard case - v overlaps c or d. We can't just shift up - // the data because we'd move or clobber the values we're trying - // to insert. - // So instead, write v on top of d, then rotate. - copy(s[n:], v) - - // Now we have - // s: aaaaaaaabbbbccccccccvvvv - // ^ ^ ^ ^ - // i i+m n n+m - - rotateRight(s[i:], m) - - // Now we have - // s: aaaaaaaavvvvbbbbcccccccc - // ^ ^ ^ ^ - // i i+m n n+m - // That's the result we want. - return s -} - -// clearSlice sets all elements up to the length of s to the zero value of E. -// We may use the builtin clear func instead, and remove clearSlice, when upgrading -// to Go 1.21+. -func clearSlice[S ~[]E, E any](s S) { - var zero E - for i := range s { - s[i] = zero - } + return slices.Insert(s, i, v...) } // Delete removes the elements s[i:j] from s, returning the modified slice. @@ -224,136 +105,36 @@ func clearSlice[S ~[]E, E any](s S) { // Delete is O(len(s)-i), so if many items must be deleted, it is better to // make a single call deleting them all together than to delete one at a time. // Delete zeroes the elements s[len(s)-(j-i):len(s)]. +// +//go:fix inline func Delete[S ~[]E, E any](s S, i, j int) S { - _ = s[i:j:len(s)] // bounds check - - if i == j { - return s - } - - oldlen := len(s) - s = append(s[:i], s[j:]...) - clearSlice(s[len(s):oldlen]) // zero/nil out the obsolete elements, for GC - return s + return slices.Delete(s, i, j) } // DeleteFunc removes any elements from s for which del returns true, // returning the modified slice. // DeleteFunc zeroes the elements between the new length and the original length. +// +//go:fix inline func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S { - i := IndexFunc(s, del) - if i == -1 { - return s - } - // Don't start copying elements until we find one to delete. - for j := i + 1; j < len(s); j++ { - if v := s[j]; !del(v) { - s[i] = v - i++ - } - } - clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC - return s[:i] + return slices.DeleteFunc(s, del) } // Replace replaces the elements s[i:j] by the given v, and returns the // modified slice. Replace panics if s[i:j] is not a valid slice of s. // When len(v) < (j-i), Replace zeroes the elements between the new length and the original length. +// +//go:fix inline func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { - _ = s[i:j] // verify that i:j is a valid subslice - - if i == j { - return Insert(s, i, v...) - } - if j == len(s) { - return append(s[:i], v...) - } - - tot := len(s[:i]) + len(v) + len(s[j:]) - if tot > cap(s) { - // Too big to fit, allocate and copy over. - s2 := append(s[:i], make(S, tot-i)...) // See Insert - copy(s2[i:], v) - copy(s2[i+len(v):], s[j:]) - return s2 - } - - r := s[:tot] - - if i+len(v) <= j { - // Easy, as v fits in the deleted portion. - copy(r[i:], v) - if i+len(v) != j { - copy(r[i+len(v):], s[j:]) - } - clearSlice(s[tot:]) // zero/nil out the obsolete elements, for GC - return r - } - - // We are expanding (v is bigger than j-i). - // The situation is something like this: - // (example has i=4,j=8,len(s)=16,len(v)=6) - // s: aaaaxxxxbbbbbbbbyy - // ^ ^ ^ ^ - // i j len(s) tot - // a: prefix of s - // x: deleted range - // b: more of s - // y: area to expand into - - if !overlaps(r[i+len(v):], v) { - // Easy, as v is not clobbered by the first copy. - copy(r[i+len(v):], s[j:]) - copy(r[i:], v) - return r - } - - // This is a situation where we don't have a single place to which - // we can copy v. Parts of it need to go to two different places. - // We want to copy the prefix of v into y and the suffix into x, then - // rotate |y| spots to the right. - // - // v[2:] v[:2] - // | | - // s: aaaavvvvbbbbbbbbvv - // ^ ^ ^ ^ - // i j len(s) tot - // - // If either of those two destinations don't alias v, then we're good. - y := len(v) - (j - i) // length of y portion - - if !overlaps(r[i:j], v) { - copy(r[i:j], v[y:]) - copy(r[len(s):], v[:y]) - rotateRight(r[i:], y) - return r - } - if !overlaps(r[len(s):], v) { - copy(r[len(s):], v[:y]) - copy(r[i:j], v[y:]) - rotateRight(r[i:], y) - return r - } - - // Now we know that v overlaps both x and y. - // That means that the entirety of b is *inside* v. - // So we don't need to preserve b at all; instead we - // can copy v first, then copy the b part of v out of - // v to the right destination. - k := startIdx(v, s[j:]) - copy(r[i:], v) - copy(r[i+len(v):], r[i+k:]) - return r + return slices.Replace(s, i, j, v...) } // Clone returns a copy of the slice. // The elements are copied using assignment, so this is a shallow clone. +// +//go:fix inline func Clone[S ~[]E, E any](s S) S { - // Preserve nil in case it matters. - if s == nil { - return nil - } - return append(S([]E{}), s...) + return slices.Clone(s) } // Compact replaces consecutive runs of equal elements with a single copy. @@ -361,155 +142,41 @@ func Clone[S ~[]E, E any](s S) S { // Compact modifies the contents of the slice s and returns the modified slice, // which may have a smaller length. // Compact zeroes the elements between the new length and the original length. +// +//go:fix inline func Compact[S ~[]E, E comparable](s S) S { - if len(s) < 2 { - return s - } - i := 1 - for k := 1; k < len(s); k++ { - if s[k] != s[k-1] { - if i != k { - s[i] = s[k] - } - i++ - } - } - clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC - return s[:i] + return slices.Compact(s) } // CompactFunc is like [Compact] but uses an equality function to compare elements. // For runs of elements that compare equal, CompactFunc keeps the first one. // CompactFunc zeroes the elements between the new length and the original length. +// +//go:fix inline func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { - if len(s) < 2 { - return s - } - i := 1 - for k := 1; k < len(s); k++ { - if !eq(s[k], s[k-1]) { - if i != k { - s[i] = s[k] - } - i++ - } - } - clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC - return s[:i] + return slices.CompactFunc(s, eq) } // Grow increases the slice's capacity, if necessary, to guarantee space for // another n elements. After Grow(n), at least n elements can be appended // to the slice without another allocation. If n is negative or too large to // allocate the memory, Grow panics. +// +//go:fix inline func Grow[S ~[]E, E any](s S, n int) S { - if n < 0 { - panic("cannot be negative") - } - if n -= cap(s) - len(s); n > 0 { - // TODO(https://go.dev/issue/53888): Make using []E instead of S - // to workaround a compiler bug where the runtime.growslice optimization - // does not take effect. Revert when the compiler is fixed. - s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)] - } - return s + return slices.Grow(s, n) } // Clip removes unused capacity from the slice, returning s[:len(s):len(s)]. -func Clip[S ~[]E, E any](s S) S { - return s[:len(s):len(s)] -} - -// Rotation algorithm explanation: -// -// rotate left by 2 -// start with -// 0123456789 -// split up like this -// 01 234567 89 -// swap first 2 and last 2 -// 89 234567 01 -// join first parts -// 89234567 01 -// recursively rotate first left part by 2 -// 23456789 01 -// join at the end -// 2345678901 // -// rotate left by 8 -// start with -// 0123456789 -// split up like this -// 01 234567 89 -// swap first 2 and last 2 -// 89 234567 01 -// join last parts -// 89 23456701 -// recursively rotate second part left by 6 -// 89 01234567 -// join at the end -// 8901234567 - -// TODO: There are other rotate algorithms. -// This algorithm has the desirable property that it moves each element exactly twice. -// The triple-reverse algorithm is simpler and more cache friendly, but takes more writes. -// The follow-cycles algorithm can be 1-write but it is not very cache friendly. - -// rotateLeft rotates b left by n spaces. -// s_final[i] = s_orig[i+r], wrapping around. -func rotateLeft[E any](s []E, r int) { - for r != 0 && r != len(s) { - if r*2 <= len(s) { - swap(s[:r], s[len(s)-r:]) - s = s[:len(s)-r] - } else { - swap(s[:len(s)-r], s[r:]) - s, r = s[len(s)-r:], r*2-len(s) - } - } -} -func rotateRight[E any](s []E, r int) { - rotateLeft(s, len(s)-r) -} - -// swap swaps the contents of x and y. x and y must be equal length and disjoint. -func swap[E any](x, y []E) { - for i := 0; i < len(x); i++ { - x[i], y[i] = y[i], x[i] - } -} - -// overlaps reports whether the memory ranges a[0:len(a)] and b[0:len(b)] overlap. -func overlaps[E any](a, b []E) bool { - if len(a) == 0 || len(b) == 0 { - return false - } - elemSize := unsafe.Sizeof(a[0]) - if elemSize == 0 { - return false - } - // TODO: use a runtime/unsafe facility once one becomes available. See issue 12445. - // Also see crypto/internal/alias/alias.go:AnyOverlap - return uintptr(unsafe.Pointer(&a[0])) <= uintptr(unsafe.Pointer(&b[len(b)-1]))+(elemSize-1) && - uintptr(unsafe.Pointer(&b[0])) <= uintptr(unsafe.Pointer(&a[len(a)-1]))+(elemSize-1) -} - -// startIdx returns the index in haystack where the needle starts. -// prerequisite: the needle must be aliased entirely inside the haystack. -func startIdx[E any](haystack, needle []E) int { - p := &needle[0] - for i := range haystack { - if p == &haystack[i] { - return i - } - } - // TODO: what if the overlap is by a non-integral number of Es? - panic("needle not found") +//go:fix inline +func Clip[S ~[]E, E any](s S) S { + return slices.Clip(s) } // Reverse reverses the elements of the slice in place. +// +//go:fix inline func Reverse[S ~[]E, E any](s S) { - for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { - s[i], s[j] = s[j], s[i] - } + slices.Reverse(s) } diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go index f58bbc7ba..bd91a8d40 100644 --- a/vendor/golang.org/x/exp/slices/sort.go +++ b/vendor/golang.org/x/exp/slices/sort.go @@ -2,21 +2,19 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:generate go run $GOROOT/src/sort/gen_sort_variants.go -exp - package slices import ( - "math/bits" - - "golang.org/x/exp/constraints" + "cmp" + "slices" ) // Sort sorts a slice of any ordered type in ascending order. // When sorting floating-point numbers, NaNs are ordered before other values. -func Sort[S ~[]E, E constraints.Ordered](x S) { - n := len(x) - pdqsortOrdered(x, 0, n, bits.Len(uint(n))) +// +//go:fix inline +func Sort[S ~[]E, E cmp.Ordered](x S) { + slices.Sort(x) } // SortFunc sorts the slice x in ascending order as determined by the cmp @@ -28,119 +26,79 @@ func Sort[S ~[]E, E constraints.Ordered](x S) { // SortFunc requires that cmp is a strict weak ordering. // See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings. // To indicate 'uncomparable', return 0 from the function. +// +//go:fix inline func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) { - n := len(x) - pdqsortCmpFunc(x, 0, n, bits.Len(uint(n)), cmp) + slices.SortFunc(x, cmp) } // SortStableFunc sorts the slice x while keeping the original order of equal // elements, using cmp to compare elements in the same way as [SortFunc]. +// +//go:fix inline func SortStableFunc[S ~[]E, E any](x S, cmp func(a, b E) int) { - stableCmpFunc(x, len(x), cmp) + slices.SortStableFunc(x, cmp) } // IsSorted reports whether x is sorted in ascending order. -func IsSorted[S ~[]E, E constraints.Ordered](x S) bool { - for i := len(x) - 1; i > 0; i-- { - if cmpLess(x[i], x[i-1]) { - return false - } - } - return true +// +//go:fix inline +func IsSorted[S ~[]E, E cmp.Ordered](x S) bool { + return slices.IsSorted(x) } // IsSortedFunc reports whether x is sorted in ascending order, with cmp as the // comparison function as defined by [SortFunc]. +// +//go:fix inline func IsSortedFunc[S ~[]E, E any](x S, cmp func(a, b E) int) bool { - for i := len(x) - 1; i > 0; i-- { - if cmp(x[i], x[i-1]) < 0 { - return false - } - } - return true + return slices.IsSortedFunc(x, cmp) } // Min returns the minimal value in x. It panics if x is empty. // For floating-point numbers, Min propagates NaNs (any NaN value in x // forces the output to be NaN). -func Min[S ~[]E, E constraints.Ordered](x S) E { - if len(x) < 1 { - panic("slices.Min: empty list") - } - m := x[0] - for i := 1; i < len(x); i++ { - m = min(m, x[i]) - } - return m +// +//go:fix inline +func Min[S ~[]E, E cmp.Ordered](x S) E { + return slices.Min(x) } // MinFunc returns the minimal value in x, using cmp to compare elements. // It panics if x is empty. If there is more than one minimal element // according to the cmp function, MinFunc returns the first one. +// +//go:fix inline func MinFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E { - if len(x) < 1 { - panic("slices.MinFunc: empty list") - } - m := x[0] - for i := 1; i < len(x); i++ { - if cmp(x[i], m) < 0 { - m = x[i] - } - } - return m + return slices.MinFunc(x, cmp) } // Max returns the maximal value in x. It panics if x is empty. // For floating-point E, Max propagates NaNs (any NaN value in x // forces the output to be NaN). -func Max[S ~[]E, E constraints.Ordered](x S) E { - if len(x) < 1 { - panic("slices.Max: empty list") - } - m := x[0] - for i := 1; i < len(x); i++ { - m = max(m, x[i]) - } - return m +// +//go:fix inline +func Max[S ~[]E, E cmp.Ordered](x S) E { + return slices.Max(x) } // MaxFunc returns the maximal value in x, using cmp to compare elements. // It panics if x is empty. If there is more than one maximal element // according to the cmp function, MaxFunc returns the first one. +// +//go:fix inline func MaxFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E { - if len(x) < 1 { - panic("slices.MaxFunc: empty list") - } - m := x[0] - for i := 1; i < len(x); i++ { - if cmp(x[i], m) > 0 { - m = x[i] - } - } - return m + return slices.MaxFunc(x, cmp) } // BinarySearch searches for target in a sorted slice and returns the position // where target is found, or the position where target would appear in the // sort order; it also returns a bool saying whether the target is really found // in the slice. The slice must be sorted in increasing order. -func BinarySearch[S ~[]E, E constraints.Ordered](x S, target E) (int, bool) { - // Inlining is faster than calling BinarySearchFunc with a lambda. - n := len(x) - // Define x[-1] < target and x[n] >= target. - // Invariant: x[i-1] < target, x[j] >= target. - i, j := 0, n - for i < j { - h := int(uint(i+j) >> 1) // avoid overflow when computing h - // i ≤ h < j - if cmpLess(x[h], target) { - i = h + 1 // preserves x[i-1] < target - } else { - j = h // preserves x[j] >= target - } - } - // i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i. - return i, i < n && (x[i] == target || (isNaN(x[i]) && isNaN(target))) +// +//go:fix inline +func BinarySearch[S ~[]E, E cmp.Ordered](x S, target E) (int, bool) { + return slices.BinarySearch(x, target) } // BinarySearchFunc works like [BinarySearch], but uses a custom comparison @@ -150,48 +108,8 @@ func BinarySearch[S ~[]E, E constraints.Ordered](x S, target E) (int, bool) { // or a positive number if the slice element follows the target. // cmp must implement the same ordering as the slice, such that if // cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice. +// +//go:fix inline func BinarySearchFunc[S ~[]E, E, T any](x S, target T, cmp func(E, T) int) (int, bool) { - n := len(x) - // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 . - // Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0. - i, j := 0, n - for i < j { - h := int(uint(i+j) >> 1) // avoid overflow when computing h - // i ≤ h < j - if cmp(x[h], target) < 0 { - i = h + 1 // preserves cmp(x[i - 1], target) < 0 - } else { - j = h // preserves cmp(x[j], target) >= 0 - } - } - // i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i. - return i, i < n && cmp(x[i], target) == 0 -} - -type sortedHint int // hint for pdqsort when choosing the pivot - -const ( - unknownHint sortedHint = iota - increasingHint - decreasingHint -) - -// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf -type xorshift uint64 - -func (r *xorshift) Next() uint64 { - *r ^= *r << 13 - *r ^= *r >> 17 - *r ^= *r << 5 - return uint64(*r) -} - -func nextPowerOfTwo(length int) uint { - return 1 << bits.Len(uint(length)) -} - -// isNaN reports whether x is a NaN without requiring the math package. -// This will always return false if T is not floating-point. -func isNaN[T constraints.Ordered](x T) bool { - return x != x + return slices.BinarySearchFunc(x, target, cmp) } diff --git a/vendor/golang.org/x/exp/slices/zsortanyfunc.go b/vendor/golang.org/x/exp/slices/zsortanyfunc.go deleted file mode 100644 index 06f2c7a24..000000000 --- a/vendor/golang.org/x/exp/slices/zsortanyfunc.go +++ /dev/null @@ -1,479 +0,0 @@ -// Code generated by gen_sort_variants.go; DO NOT EDIT. - -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package slices - -// insertionSortCmpFunc sorts data[a:b] using insertion sort. -func insertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { - for i := a + 1; i < b; i++ { - for j := i; j > a && (cmp(data[j], data[j-1]) < 0); j-- { - data[j], data[j-1] = data[j-1], data[j] - } - } -} - -// siftDownCmpFunc implements the heap property on data[lo:hi]. -// first is an offset into the array where the root of the heap lies. -func siftDownCmpFunc[E any](data []E, lo, hi, first int, cmp func(a, b E) int) { - root := lo - for { - child := 2*root + 1 - if child >= hi { - break - } - if child+1 < hi && (cmp(data[first+child], data[first+child+1]) < 0) { - child++ - } - if !(cmp(data[first+root], data[first+child]) < 0) { - return - } - data[first+root], data[first+child] = data[first+child], data[first+root] - root = child - } -} - -func heapSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { - first := a - lo := 0 - hi := b - a - - // Build heap with greatest element at top. - for i := (hi - 1) / 2; i >= 0; i-- { - siftDownCmpFunc(data, i, hi, first, cmp) - } - - // Pop elements, largest first, into end of data. - for i := hi - 1; i >= 0; i-- { - data[first], data[first+i] = data[first+i], data[first] - siftDownCmpFunc(data, lo, i, first, cmp) - } -} - -// pdqsortCmpFunc sorts data[a:b]. -// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. -// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf -// C++ implementation: https://github.com/orlp/pdqsort -// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ -// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. -func pdqsortCmpFunc[E any](data []E, a, b, limit int, cmp func(a, b E) int) { - const maxInsertion = 12 - - var ( - wasBalanced = true // whether the last partitioning was reasonably balanced - wasPartitioned = true // whether the slice was already partitioned - ) - - for { - length := b - a - - if length <= maxInsertion { - insertionSortCmpFunc(data, a, b, cmp) - return - } - - // Fall back to heapsort if too many bad choices were made. - if limit == 0 { - heapSortCmpFunc(data, a, b, cmp) - return - } - - // If the last partitioning was imbalanced, we need to breaking patterns. - if !wasBalanced { - breakPatternsCmpFunc(data, a, b, cmp) - limit-- - } - - pivot, hint := choosePivotCmpFunc(data, a, b, cmp) - if hint == decreasingHint { - reverseRangeCmpFunc(data, a, b, cmp) - // The chosen pivot was pivot-a elements after the start of the array. - // After reversing it is pivot-a elements before the end of the array. - // The idea came from Rust's implementation. - pivot = (b - 1) - (pivot - a) - hint = increasingHint - } - - // The slice is likely already sorted. - if wasBalanced && wasPartitioned && hint == increasingHint { - if partialInsertionSortCmpFunc(data, a, b, cmp) { - return - } - } - - // Probably the slice contains many duplicate elements, partition the slice into - // elements equal to and elements greater than the pivot. - if a > 0 && !(cmp(data[a-1], data[pivot]) < 0) { - mid := partitionEqualCmpFunc(data, a, b, pivot, cmp) - a = mid - continue - } - - mid, alreadyPartitioned := partitionCmpFunc(data, a, b, pivot, cmp) - wasPartitioned = alreadyPartitioned - - leftLen, rightLen := mid-a, b-mid - balanceThreshold := length / 8 - if leftLen < rightLen { - wasBalanced = leftLen >= balanceThreshold - pdqsortCmpFunc(data, a, mid, limit, cmp) - a = mid + 1 - } else { - wasBalanced = rightLen >= balanceThreshold - pdqsortCmpFunc(data, mid+1, b, limit, cmp) - b = mid - } - } -} - -// partitionCmpFunc does one quicksort partition. -// Let p = data[pivot] -// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. -// On return, data[newpivot] = p -func partitionCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int, alreadyPartitioned bool) { - data[a], data[pivot] = data[pivot], data[a] - i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned - - for i <= j && (cmp(data[i], data[a]) < 0) { - i++ - } - for i <= j && !(cmp(data[j], data[a]) < 0) { - j-- - } - if i > j { - data[j], data[a] = data[a], data[j] - return j, true - } - data[i], data[j] = data[j], data[i] - i++ - j-- - - for { - for i <= j && (cmp(data[i], data[a]) < 0) { - i++ - } - for i <= j && !(cmp(data[j], data[a]) < 0) { - j-- - } - if i > j { - break - } - data[i], data[j] = data[j], data[i] - i++ - j-- - } - data[j], data[a] = data[a], data[j] - return j, false -} - -// partitionEqualCmpFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. -// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. -func partitionEqualCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int) { - data[a], data[pivot] = data[pivot], data[a] - i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned - - for { - for i <= j && !(cmp(data[a], data[i]) < 0) { - i++ - } - for i <= j && (cmp(data[a], data[j]) < 0) { - j-- - } - if i > j { - break - } - data[i], data[j] = data[j], data[i] - i++ - j-- - } - return i -} - -// partialInsertionSortCmpFunc partially sorts a slice, returns true if the slice is sorted at the end. -func partialInsertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) bool { - const ( - maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted - shortestShifting = 50 // don't shift any elements on short arrays - ) - i := a + 1 - for j := 0; j < maxSteps; j++ { - for i < b && !(cmp(data[i], data[i-1]) < 0) { - i++ - } - - if i == b { - return true - } - - if b-a < shortestShifting { - return false - } - - data[i], data[i-1] = data[i-1], data[i] - - // Shift the smaller one to the left. - if i-a >= 2 { - for j := i - 1; j >= 1; j-- { - if !(cmp(data[j], data[j-1]) < 0) { - break - } - data[j], data[j-1] = data[j-1], data[j] - } - } - // Shift the greater one to the right. - if b-i >= 2 { - for j := i + 1; j < b; j++ { - if !(cmp(data[j], data[j-1]) < 0) { - break - } - data[j], data[j-1] = data[j-1], data[j] - } - } - } - return false -} - -// breakPatternsCmpFunc scatters some elements around in an attempt to break some patterns -// that might cause imbalanced partitions in quicksort. -func breakPatternsCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { - length := b - a - if length >= 8 { - random := xorshift(length) - modulus := nextPowerOfTwo(length) - - for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { - other := int(uint(random.Next()) & (modulus - 1)) - if other >= length { - other -= length - } - data[idx], data[a+other] = data[a+other], data[idx] - } - } -} - -// choosePivotCmpFunc chooses a pivot in data[a:b]. -// -// [0,8): chooses a static pivot. -// [8,shortestNinther): uses the simple median-of-three method. -// [shortestNinther,∞): uses the Tukey ninther method. -func choosePivotCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) (pivot int, hint sortedHint) { - const ( - shortestNinther = 50 - maxSwaps = 4 * 3 - ) - - l := b - a - - var ( - swaps int - i = a + l/4*1 - j = a + l/4*2 - k = a + l/4*3 - ) - - if l >= 8 { - if l >= shortestNinther { - // Tukey ninther method, the idea came from Rust's implementation. - i = medianAdjacentCmpFunc(data, i, &swaps, cmp) - j = medianAdjacentCmpFunc(data, j, &swaps, cmp) - k = medianAdjacentCmpFunc(data, k, &swaps, cmp) - } - // Find the median among i, j, k and stores it into j. - j = medianCmpFunc(data, i, j, k, &swaps, cmp) - } - - switch swaps { - case 0: - return j, increasingHint - case maxSwaps: - return j, decreasingHint - default: - return j, unknownHint - } -} - -// order2CmpFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. -func order2CmpFunc[E any](data []E, a, b int, swaps *int, cmp func(a, b E) int) (int, int) { - if cmp(data[b], data[a]) < 0 { - *swaps++ - return b, a - } - return a, b -} - -// medianCmpFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. -func medianCmpFunc[E any](data []E, a, b, c int, swaps *int, cmp func(a, b E) int) int { - a, b = order2CmpFunc(data, a, b, swaps, cmp) - b, c = order2CmpFunc(data, b, c, swaps, cmp) - a, b = order2CmpFunc(data, a, b, swaps, cmp) - return b -} - -// medianAdjacentCmpFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. -func medianAdjacentCmpFunc[E any](data []E, a int, swaps *int, cmp func(a, b E) int) int { - return medianCmpFunc(data, a-1, a, a+1, swaps, cmp) -} - -func reverseRangeCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { - i := a - j := b - 1 - for i < j { - data[i], data[j] = data[j], data[i] - i++ - j-- - } -} - -func swapRangeCmpFunc[E any](data []E, a, b, n int, cmp func(a, b E) int) { - for i := 0; i < n; i++ { - data[a+i], data[b+i] = data[b+i], data[a+i] - } -} - -func stableCmpFunc[E any](data []E, n int, cmp func(a, b E) int) { - blockSize := 20 // must be > 0 - a, b := 0, blockSize - for b <= n { - insertionSortCmpFunc(data, a, b, cmp) - a = b - b += blockSize - } - insertionSortCmpFunc(data, a, n, cmp) - - for blockSize < n { - a, b = 0, 2*blockSize - for b <= n { - symMergeCmpFunc(data, a, a+blockSize, b, cmp) - a = b - b += 2 * blockSize - } - if m := a + blockSize; m < n { - symMergeCmpFunc(data, a, m, n, cmp) - } - blockSize *= 2 - } -} - -// symMergeCmpFunc merges the two sorted subsequences data[a:m] and data[m:b] using -// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum -// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz -// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in -// Computer Science, pages 714-723. Springer, 2004. -// -// Let M = m-a and N = b-n. Wolog M < N. -// The recursion depth is bound by ceil(log(N+M)). -// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. -// The algorithm needs O((M+N)*log(M)) calls to data.Swap. -// -// The paper gives O((M+N)*log(M)) as the number of assignments assuming a -// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation -// in the paper carries through for Swap operations, especially as the block -// swapping rotate uses only O(M+N) Swaps. -// -// symMerge assumes non-degenerate arguments: a < m && m < b. -// Having the caller check this condition eliminates many leaf recursion calls, -// which improves performance. -func symMergeCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) { - // Avoid unnecessary recursions of symMerge - // by direct insertion of data[a] into data[m:b] - // if data[a:m] only contains one element. - if m-a == 1 { - // Use binary search to find the lowest index i - // such that data[i] >= data[a] for m <= i < b. - // Exit the search loop with i == b in case no such index exists. - i := m - j := b - for i < j { - h := int(uint(i+j) >> 1) - if cmp(data[h], data[a]) < 0 { - i = h + 1 - } else { - j = h - } - } - // Swap values until data[a] reaches the position before i. - for k := a; k < i-1; k++ { - data[k], data[k+1] = data[k+1], data[k] - } - return - } - - // Avoid unnecessary recursions of symMerge - // by direct insertion of data[m] into data[a:m] - // if data[m:b] only contains one element. - if b-m == 1 { - // Use binary search to find the lowest index i - // such that data[i] > data[m] for a <= i < m. - // Exit the search loop with i == m in case no such index exists. - i := a - j := m - for i < j { - h := int(uint(i+j) >> 1) - if !(cmp(data[m], data[h]) < 0) { - i = h + 1 - } else { - j = h - } - } - // Swap values until data[m] reaches the position i. - for k := m; k > i; k-- { - data[k], data[k-1] = data[k-1], data[k] - } - return - } - - mid := int(uint(a+b) >> 1) - n := mid + m - var start, r int - if m > mid { - start = n - b - r = mid - } else { - start = a - r = m - } - p := n - 1 - - for start < r { - c := int(uint(start+r) >> 1) - if !(cmp(data[p-c], data[c]) < 0) { - start = c + 1 - } else { - r = c - } - } - - end := n - start - if start < m && m < end { - rotateCmpFunc(data, start, m, end, cmp) - } - if a < start && start < mid { - symMergeCmpFunc(data, a, start, mid, cmp) - } - if mid < end && end < b { - symMergeCmpFunc(data, mid, end, b, cmp) - } -} - -// rotateCmpFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: -// Data of the form 'x u v y' is changed to 'x v u y'. -// rotate performs at most b-a many calls to data.Swap, -// and it assumes non-degenerate arguments: a < m && m < b. -func rotateCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) { - i := m - a - j := b - m - - for i != j { - if i > j { - swapRangeCmpFunc(data, m-i, m, j, cmp) - i -= j - } else { - swapRangeCmpFunc(data, m-i, m+j-i, i, cmp) - j -= i - } - } - // i == j - swapRangeCmpFunc(data, m-i, m, i, cmp) -} diff --git a/vendor/golang.org/x/exp/slices/zsortordered.go b/vendor/golang.org/x/exp/slices/zsortordered.go deleted file mode 100644 index 99b47c398..000000000 --- a/vendor/golang.org/x/exp/slices/zsortordered.go +++ /dev/null @@ -1,481 +0,0 @@ -// Code generated by gen_sort_variants.go; DO NOT EDIT. - -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package slices - -import "golang.org/x/exp/constraints" - -// insertionSortOrdered sorts data[a:b] using insertion sort. -func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) { - for i := a + 1; i < b; i++ { - for j := i; j > a && cmpLess(data[j], data[j-1]); j-- { - data[j], data[j-1] = data[j-1], data[j] - } - } -} - -// siftDownOrdered implements the heap property on data[lo:hi]. -// first is an offset into the array where the root of the heap lies. -func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) { - root := lo - for { - child := 2*root + 1 - if child >= hi { - break - } - if child+1 < hi && cmpLess(data[first+child], data[first+child+1]) { - child++ - } - if !cmpLess(data[first+root], data[first+child]) { - return - } - data[first+root], data[first+child] = data[first+child], data[first+root] - root = child - } -} - -func heapSortOrdered[E constraints.Ordered](data []E, a, b int) { - first := a - lo := 0 - hi := b - a - - // Build heap with greatest element at top. - for i := (hi - 1) / 2; i >= 0; i-- { - siftDownOrdered(data, i, hi, first) - } - - // Pop elements, largest first, into end of data. - for i := hi - 1; i >= 0; i-- { - data[first], data[first+i] = data[first+i], data[first] - siftDownOrdered(data, lo, i, first) - } -} - -// pdqsortOrdered sorts data[a:b]. -// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. -// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf -// C++ implementation: https://github.com/orlp/pdqsort -// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ -// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. -func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) { - const maxInsertion = 12 - - var ( - wasBalanced = true // whether the last partitioning was reasonably balanced - wasPartitioned = true // whether the slice was already partitioned - ) - - for { - length := b - a - - if length <= maxInsertion { - insertionSortOrdered(data, a, b) - return - } - - // Fall back to heapsort if too many bad choices were made. - if limit == 0 { - heapSortOrdered(data, a, b) - return - } - - // If the last partitioning was imbalanced, we need to breaking patterns. - if !wasBalanced { - breakPatternsOrdered(data, a, b) - limit-- - } - - pivot, hint := choosePivotOrdered(data, a, b) - if hint == decreasingHint { - reverseRangeOrdered(data, a, b) - // The chosen pivot was pivot-a elements after the start of the array. - // After reversing it is pivot-a elements before the end of the array. - // The idea came from Rust's implementation. - pivot = (b - 1) - (pivot - a) - hint = increasingHint - } - - // The slice is likely already sorted. - if wasBalanced && wasPartitioned && hint == increasingHint { - if partialInsertionSortOrdered(data, a, b) { - return - } - } - - // Probably the slice contains many duplicate elements, partition the slice into - // elements equal to and elements greater than the pivot. - if a > 0 && !cmpLess(data[a-1], data[pivot]) { - mid := partitionEqualOrdered(data, a, b, pivot) - a = mid - continue - } - - mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot) - wasPartitioned = alreadyPartitioned - - leftLen, rightLen := mid-a, b-mid - balanceThreshold := length / 8 - if leftLen < rightLen { - wasBalanced = leftLen >= balanceThreshold - pdqsortOrdered(data, a, mid, limit) - a = mid + 1 - } else { - wasBalanced = rightLen >= balanceThreshold - pdqsortOrdered(data, mid+1, b, limit) - b = mid - } - } -} - -// partitionOrdered does one quicksort partition. -// Let p = data[pivot] -// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. -// On return, data[newpivot] = p -func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) { - data[a], data[pivot] = data[pivot], data[a] - i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned - - for i <= j && cmpLess(data[i], data[a]) { - i++ - } - for i <= j && !cmpLess(data[j], data[a]) { - j-- - } - if i > j { - data[j], data[a] = data[a], data[j] - return j, true - } - data[i], data[j] = data[j], data[i] - i++ - j-- - - for { - for i <= j && cmpLess(data[i], data[a]) { - i++ - } - for i <= j && !cmpLess(data[j], data[a]) { - j-- - } - if i > j { - break - } - data[i], data[j] = data[j], data[i] - i++ - j-- - } - data[j], data[a] = data[a], data[j] - return j, false -} - -// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. -// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. -func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) { - data[a], data[pivot] = data[pivot], data[a] - i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned - - for { - for i <= j && !cmpLess(data[a], data[i]) { - i++ - } - for i <= j && cmpLess(data[a], data[j]) { - j-- - } - if i > j { - break - } - data[i], data[j] = data[j], data[i] - i++ - j-- - } - return i -} - -// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end. -func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool { - const ( - maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted - shortestShifting = 50 // don't shift any elements on short arrays - ) - i := a + 1 - for j := 0; j < maxSteps; j++ { - for i < b && !cmpLess(data[i], data[i-1]) { - i++ - } - - if i == b { - return true - } - - if b-a < shortestShifting { - return false - } - - data[i], data[i-1] = data[i-1], data[i] - - // Shift the smaller one to the left. - if i-a >= 2 { - for j := i - 1; j >= 1; j-- { - if !cmpLess(data[j], data[j-1]) { - break - } - data[j], data[j-1] = data[j-1], data[j] - } - } - // Shift the greater one to the right. - if b-i >= 2 { - for j := i + 1; j < b; j++ { - if !cmpLess(data[j], data[j-1]) { - break - } - data[j], data[j-1] = data[j-1], data[j] - } - } - } - return false -} - -// breakPatternsOrdered scatters some elements around in an attempt to break some patterns -// that might cause imbalanced partitions in quicksort. -func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) { - length := b - a - if length >= 8 { - random := xorshift(length) - modulus := nextPowerOfTwo(length) - - for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { - other := int(uint(random.Next()) & (modulus - 1)) - if other >= length { - other -= length - } - data[idx], data[a+other] = data[a+other], data[idx] - } - } -} - -// choosePivotOrdered chooses a pivot in data[a:b]. -// -// [0,8): chooses a static pivot. -// [8,shortestNinther): uses the simple median-of-three method. -// [shortestNinther,∞): uses the Tukey ninther method. -func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) { - const ( - shortestNinther = 50 - maxSwaps = 4 * 3 - ) - - l := b - a - - var ( - swaps int - i = a + l/4*1 - j = a + l/4*2 - k = a + l/4*3 - ) - - if l >= 8 { - if l >= shortestNinther { - // Tukey ninther method, the idea came from Rust's implementation. - i = medianAdjacentOrdered(data, i, &swaps) - j = medianAdjacentOrdered(data, j, &swaps) - k = medianAdjacentOrdered(data, k, &swaps) - } - // Find the median among i, j, k and stores it into j. - j = medianOrdered(data, i, j, k, &swaps) - } - - switch swaps { - case 0: - return j, increasingHint - case maxSwaps: - return j, decreasingHint - default: - return j, unknownHint - } -} - -// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. -func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) { - if cmpLess(data[b], data[a]) { - *swaps++ - return b, a - } - return a, b -} - -// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. -func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int { - a, b = order2Ordered(data, a, b, swaps) - b, c = order2Ordered(data, b, c, swaps) - a, b = order2Ordered(data, a, b, swaps) - return b -} - -// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. -func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int { - return medianOrdered(data, a-1, a, a+1, swaps) -} - -func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) { - i := a - j := b - 1 - for i < j { - data[i], data[j] = data[j], data[i] - i++ - j-- - } -} - -func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) { - for i := 0; i < n; i++ { - data[a+i], data[b+i] = data[b+i], data[a+i] - } -} - -func stableOrdered[E constraints.Ordered](data []E, n int) { - blockSize := 20 // must be > 0 - a, b := 0, blockSize - for b <= n { - insertionSortOrdered(data, a, b) - a = b - b += blockSize - } - insertionSortOrdered(data, a, n) - - for blockSize < n { - a, b = 0, 2*blockSize - for b <= n { - symMergeOrdered(data, a, a+blockSize, b) - a = b - b += 2 * blockSize - } - if m := a + blockSize; m < n { - symMergeOrdered(data, a, m, n) - } - blockSize *= 2 - } -} - -// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using -// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum -// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz -// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in -// Computer Science, pages 714-723. Springer, 2004. -// -// Let M = m-a and N = b-n. Wolog M < N. -// The recursion depth is bound by ceil(log(N+M)). -// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. -// The algorithm needs O((M+N)*log(M)) calls to data.Swap. -// -// The paper gives O((M+N)*log(M)) as the number of assignments assuming a -// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation -// in the paper carries through for Swap operations, especially as the block -// swapping rotate uses only O(M+N) Swaps. -// -// symMerge assumes non-degenerate arguments: a < m && m < b. -// Having the caller check this condition eliminates many leaf recursion calls, -// which improves performance. -func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) { - // Avoid unnecessary recursions of symMerge - // by direct insertion of data[a] into data[m:b] - // if data[a:m] only contains one element. - if m-a == 1 { - // Use binary search to find the lowest index i - // such that data[i] >= data[a] for m <= i < b. - // Exit the search loop with i == b in case no such index exists. - i := m - j := b - for i < j { - h := int(uint(i+j) >> 1) - if cmpLess(data[h], data[a]) { - i = h + 1 - } else { - j = h - } - } - // Swap values until data[a] reaches the position before i. - for k := a; k < i-1; k++ { - data[k], data[k+1] = data[k+1], data[k] - } - return - } - - // Avoid unnecessary recursions of symMerge - // by direct insertion of data[m] into data[a:m] - // if data[m:b] only contains one element. - if b-m == 1 { - // Use binary search to find the lowest index i - // such that data[i] > data[m] for a <= i < m. - // Exit the search loop with i == m in case no such index exists. - i := a - j := m - for i < j { - h := int(uint(i+j) >> 1) - if !cmpLess(data[m], data[h]) { - i = h + 1 - } else { - j = h - } - } - // Swap values until data[m] reaches the position i. - for k := m; k > i; k-- { - data[k], data[k-1] = data[k-1], data[k] - } - return - } - - mid := int(uint(a+b) >> 1) - n := mid + m - var start, r int - if m > mid { - start = n - b - r = mid - } else { - start = a - r = m - } - p := n - 1 - - for start < r { - c := int(uint(start+r) >> 1) - if !cmpLess(data[p-c], data[c]) { - start = c + 1 - } else { - r = c - } - } - - end := n - start - if start < m && m < end { - rotateOrdered(data, start, m, end) - } - if a < start && start < mid { - symMergeOrdered(data, a, start, mid) - } - if mid < end && end < b { - symMergeOrdered(data, mid, end, b) - } -} - -// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: -// Data of the form 'x u v y' is changed to 'x v u y'. -// rotate performs at most b-a many calls to data.Swap, -// and it assumes non-degenerate arguments: a < m && m < b. -func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) { - i := m - a - j := b - m - - for i != j { - if i > j { - swapRangeOrdered(data, m-i, m, j) - i -= j - } else { - swapRangeOrdered(data, m-i, m+j-i, i) - j -= i - } - } - // i == j - swapRangeOrdered(data, m-i, m, i) -} diff --git a/vendor/golang.org/x/exp/slog/attr.go b/vendor/golang.org/x/exp/slog/attr.go deleted file mode 100644 index a180d0e1d..000000000 --- a/vendor/golang.org/x/exp/slog/attr.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package slog - -import ( - "fmt" - "time" -) - -// An Attr is a key-value pair. -type Attr struct { - Key string - Value Value -} - -// String returns an Attr for a string value. -func String(key, value string) Attr { - return Attr{key, StringValue(value)} -} - -// Int64 returns an Attr for an int64. -func Int64(key string, value int64) Attr { - return Attr{key, Int64Value(value)} -} - -// Int converts an int to an int64 and returns -// an Attr with that value. -func Int(key string, value int) Attr { - return Int64(key, int64(value)) -} - -// Uint64 returns an Attr for a uint64. -func Uint64(key string, v uint64) Attr { - return Attr{key, Uint64Value(v)} -} - -// Float64 returns an Attr for a floating-point number. -func Float64(key string, v float64) Attr { - return Attr{key, Float64Value(v)} -} - -// Bool returns an Attr for a bool. -func Bool(key string, v bool) Attr { - return Attr{key, BoolValue(v)} -} - -// Time returns an Attr for a time.Time. -// It discards the monotonic portion. -func Time(key string, v time.Time) Attr { - return Attr{key, TimeValue(v)} -} - -// Duration returns an Attr for a time.Duration. -func Duration(key string, v time.Duration) Attr { - return Attr{key, DurationValue(v)} -} - -// Group returns an Attr for a Group Value. -// The first argument is the key; the remaining arguments -// are converted to Attrs as in [Logger.Log]. -// -// Use Group to collect several key-value pairs under a single -// key on a log line, or as the result of LogValue -// in order to log a single value as multiple Attrs. -func Group(key string, args ...any) Attr { - return Attr{key, GroupValue(argsToAttrSlice(args)...)} -} - -func argsToAttrSlice(args []any) []Attr { - var ( - attr Attr - attrs []Attr - ) - for len(args) > 0 { - attr, args = argsToAttr(args) - attrs = append(attrs, attr) - } - return attrs -} - -// Any returns an Attr for the supplied value. -// See [Value.AnyValue] for how values are treated. -func Any(key string, value any) Attr { - return Attr{key, AnyValue(value)} -} - -// Equal reports whether a and b have equal keys and values. -func (a Attr) Equal(b Attr) bool { - return a.Key == b.Key && a.Value.Equal(b.Value) -} - -func (a Attr) String() string { - return fmt.Sprintf("%s=%s", a.Key, a.Value) -} - -// isEmpty reports whether a has an empty key and a nil value. -// That can be written as Attr{} or Any("", nil). -func (a Attr) isEmpty() bool { - return a.Key == "" && a.Value.num == 0 && a.Value.any == nil -} diff --git a/vendor/golang.org/x/exp/slog/doc.go b/vendor/golang.org/x/exp/slog/doc.go deleted file mode 100644 index 4beaf8674..000000000 --- a/vendor/golang.org/x/exp/slog/doc.go +++ /dev/null @@ -1,316 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package slog provides structured logging, -in which log records include a message, -a severity level, and various other attributes -expressed as key-value pairs. - -It defines a type, [Logger], -which provides several methods (such as [Logger.Info] and [Logger.Error]) -for reporting events of interest. - -Each Logger is associated with a [Handler]. -A Logger output method creates a [Record] from the method arguments -and passes it to the Handler, which decides how to handle it. -There is a default Logger accessible through top-level functions -(such as [Info] and [Error]) that call the corresponding Logger methods. - -A log record consists of a time, a level, a message, and a set of key-value -pairs, where the keys are strings and the values may be of any type. -As an example, - - slog.Info("hello", "count", 3) - -creates a record containing the time of the call, -a level of Info, the message "hello", and a single -pair with key "count" and value 3. - -The [Info] top-level function calls the [Logger.Info] method on the default Logger. -In addition to [Logger.Info], there are methods for Debug, Warn and Error levels. -Besides these convenience methods for common levels, -there is also a [Logger.Log] method which takes the level as an argument. -Each of these methods has a corresponding top-level function that uses the -default logger. - -The default handler formats the log record's message, time, level, and attributes -as a string and passes it to the [log] package. - - 2022/11/08 15:28:26 INFO hello count=3 - -For more control over the output format, create a logger with a different handler. -This statement uses [New] to create a new logger with a TextHandler -that writes structured records in text form to standard error: - - logger := slog.New(slog.NewTextHandler(os.Stderr, nil)) - -[TextHandler] output is a sequence of key=value pairs, easily and unambiguously -parsed by machine. This statement: - - logger.Info("hello", "count", 3) - -produces this output: - - time=2022-11-08T15:28:26.000-05:00 level=INFO msg=hello count=3 - -The package also provides [JSONHandler], whose output is line-delimited JSON: - - logger := slog.New(slog.NewJSONHandler(os.Stdout, nil)) - logger.Info("hello", "count", 3) - -produces this output: - - {"time":"2022-11-08T15:28:26.000000000-05:00","level":"INFO","msg":"hello","count":3} - -Both [TextHandler] and [JSONHandler] can be configured with [HandlerOptions]. -There are options for setting the minimum level (see Levels, below), -displaying the source file and line of the log call, and -modifying attributes before they are logged. - -Setting a logger as the default with - - slog.SetDefault(logger) - -will cause the top-level functions like [Info] to use it. -[SetDefault] also updates the default logger used by the [log] package, -so that existing applications that use [log.Printf] and related functions -will send log records to the logger's handler without needing to be rewritten. - -Some attributes are common to many log calls. -For example, you may wish to include the URL or trace identifier of a server request -with all log events arising from the request. -Rather than repeat the attribute with every log call, you can use [Logger.With] -to construct a new Logger containing the attributes: - - logger2 := logger.With("url", r.URL) - -The arguments to With are the same key-value pairs used in [Logger.Info]. -The result is a new Logger with the same handler as the original, but additional -attributes that will appear in the output of every call. - -# Levels - -A [Level] is an integer representing the importance or severity of a log event. -The higher the level, the more severe the event. -This package defines constants for the most common levels, -but any int can be used as a level. - -In an application, you may wish to log messages only at a certain level or greater. -One common configuration is to log messages at Info or higher levels, -suppressing debug logging until it is needed. -The built-in handlers can be configured with the minimum level to output by -setting [HandlerOptions.Level]. -The program's `main` function typically does this. -The default value is LevelInfo. - -Setting the [HandlerOptions.Level] field to a [Level] value -fixes the handler's minimum level throughout its lifetime. -Setting it to a [LevelVar] allows the level to be varied dynamically. -A LevelVar holds a Level and is safe to read or write from multiple -goroutines. -To vary the level dynamically for an entire program, first initialize -a global LevelVar: - - var programLevel = new(slog.LevelVar) // Info by default - -Then use the LevelVar to construct a handler, and make it the default: - - h := slog.NewJSONHandler(os.Stderr, &slog.HandlerOptions{Level: programLevel}) - slog.SetDefault(slog.New(h)) - -Now the program can change its logging level with a single statement: - - programLevel.Set(slog.LevelDebug) - -# Groups - -Attributes can be collected into groups. -A group has a name that is used to qualify the names of its attributes. -How this qualification is displayed depends on the handler. -[TextHandler] separates the group and attribute names with a dot. -[JSONHandler] treats each group as a separate JSON object, with the group name as the key. - -Use [Group] to create a Group attribute from a name and a list of key-value pairs: - - slog.Group("request", - "method", r.Method, - "url", r.URL) - -TextHandler would display this group as - - request.method=GET request.url=http://example.com - -JSONHandler would display it as - - "request":{"method":"GET","url":"http://example.com"} - -Use [Logger.WithGroup] to qualify all of a Logger's output -with a group name. Calling WithGroup on a Logger results in a -new Logger with the same Handler as the original, but with all -its attributes qualified by the group name. - -This can help prevent duplicate attribute keys in large systems, -where subsystems might use the same keys. -Pass each subsystem a different Logger with its own group name so that -potential duplicates are qualified: - - logger := slog.Default().With("id", systemID) - parserLogger := logger.WithGroup("parser") - parseInput(input, parserLogger) - -When parseInput logs with parserLogger, its keys will be qualified with "parser", -so even if it uses the common key "id", the log line will have distinct keys. - -# Contexts - -Some handlers may wish to include information from the [context.Context] that is -available at the call site. One example of such information -is the identifier for the current span when tracing is enabled. - -The [Logger.Log] and [Logger.LogAttrs] methods take a context as a first -argument, as do their corresponding top-level functions. - -Although the convenience methods on Logger (Info and so on) and the -corresponding top-level functions do not take a context, the alternatives ending -in "Context" do. For example, - - slog.InfoContext(ctx, "message") - -It is recommended to pass a context to an output method if one is available. - -# Attrs and Values - -An [Attr] is a key-value pair. The Logger output methods accept Attrs as well as -alternating keys and values. The statement - - slog.Info("hello", slog.Int("count", 3)) - -behaves the same as - - slog.Info("hello", "count", 3) - -There are convenience constructors for [Attr] such as [Int], [String], and [Bool] -for common types, as well as the function [Any] for constructing Attrs of any -type. - -The value part of an Attr is a type called [Value]. -Like an [any], a Value can hold any Go value, -but it can represent typical values, including all numbers and strings, -without an allocation. - -For the most efficient log output, use [Logger.LogAttrs]. -It is similar to [Logger.Log] but accepts only Attrs, not alternating -keys and values; this allows it, too, to avoid allocation. - -The call - - logger.LogAttrs(nil, slog.LevelInfo, "hello", slog.Int("count", 3)) - -is the most efficient way to achieve the same output as - - slog.Info("hello", "count", 3) - -# Customizing a type's logging behavior - -If a type implements the [LogValuer] interface, the [Value] returned from its LogValue -method is used for logging. You can use this to control how values of the type -appear in logs. For example, you can redact secret information like passwords, -or gather a struct's fields in a Group. See the examples under [LogValuer] for -details. - -A LogValue method may return a Value that itself implements [LogValuer]. The [Value.Resolve] -method handles these cases carefully, avoiding infinite loops and unbounded recursion. -Handler authors and others may wish to use Value.Resolve instead of calling LogValue directly. - -# Wrapping output methods - -The logger functions use reflection over the call stack to find the file name -and line number of the logging call within the application. This can produce -incorrect source information for functions that wrap slog. For instance, if you -define this function in file mylog.go: - - func Infof(format string, args ...any) { - slog.Default().Info(fmt.Sprintf(format, args...)) - } - -and you call it like this in main.go: - - Infof(slog.Default(), "hello, %s", "world") - -then slog will report the source file as mylog.go, not main.go. - -A correct implementation of Infof will obtain the source location -(pc) and pass it to NewRecord. -The Infof function in the package-level example called "wrapping" -demonstrates how to do this. - -# Working with Records - -Sometimes a Handler will need to modify a Record -before passing it on to another Handler or backend. -A Record contains a mixture of simple public fields (e.g. Time, Level, Message) -and hidden fields that refer to state (such as attributes) indirectly. This -means that modifying a simple copy of a Record (e.g. by calling -[Record.Add] or [Record.AddAttrs] to add attributes) -may have unexpected effects on the original. -Before modifying a Record, use [Clone] to -create a copy that shares no state with the original, -or create a new Record with [NewRecord] -and build up its Attrs by traversing the old ones with [Record.Attrs]. - -# Performance considerations - -If profiling your application demonstrates that logging is taking significant time, -the following suggestions may help. - -If many log lines have a common attribute, use [Logger.With] to create a Logger with -that attribute. The built-in handlers will format that attribute only once, at the -call to [Logger.With]. The [Handler] interface is designed to allow that optimization, -and a well-written Handler should take advantage of it. - -The arguments to a log call are always evaluated, even if the log event is discarded. -If possible, defer computation so that it happens only if the value is actually logged. -For example, consider the call - - slog.Info("starting request", "url", r.URL.String()) // may compute String unnecessarily - -The URL.String method will be called even if the logger discards Info-level events. -Instead, pass the URL directly: - - slog.Info("starting request", "url", &r.URL) // calls URL.String only if needed - -The built-in [TextHandler] will call its String method, but only -if the log event is enabled. -Avoiding the call to String also preserves the structure of the underlying value. -For example [JSONHandler] emits the components of the parsed URL as a JSON object. -If you want to avoid eagerly paying the cost of the String call -without causing the handler to potentially inspect the structure of the value, -wrap the value in a fmt.Stringer implementation that hides its Marshal methods. - -You can also use the [LogValuer] interface to avoid unnecessary work in disabled log -calls. Say you need to log some expensive value: - - slog.Debug("frobbing", "value", computeExpensiveValue(arg)) - -Even if this line is disabled, computeExpensiveValue will be called. -To avoid that, define a type implementing LogValuer: - - type expensive struct { arg int } - - func (e expensive) LogValue() slog.Value { - return slog.AnyValue(computeExpensiveValue(e.arg)) - } - -Then use a value of that type in log calls: - - slog.Debug("frobbing", "value", expensive{arg}) - -Now computeExpensiveValue will only be called when the line is enabled. - -The built-in handlers acquire a lock before calling [io.Writer.Write] -to ensure that each record is written in one piece. User-defined -handlers are responsible for their own locking. -*/ -package slog diff --git a/vendor/golang.org/x/exp/slog/handler.go b/vendor/golang.org/x/exp/slog/handler.go deleted file mode 100644 index bd635cb81..000000000 --- a/vendor/golang.org/x/exp/slog/handler.go +++ /dev/null @@ -1,577 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package slog - -import ( - "context" - "fmt" - "io" - "reflect" - "strconv" - "sync" - "time" - - "golang.org/x/exp/slices" - "golang.org/x/exp/slog/internal/buffer" -) - -// A Handler handles log records produced by a Logger.. -// -// A typical handler may print log records to standard error, -// or write them to a file or database, or perhaps augment them -// with additional attributes and pass them on to another handler. -// -// Any of the Handler's methods may be called concurrently with itself -// or with other methods. It is the responsibility of the Handler to -// manage this concurrency. -// -// Users of the slog package should not invoke Handler methods directly. -// They should use the methods of [Logger] instead. -type Handler interface { - // Enabled reports whether the handler handles records at the given level. - // The handler ignores records whose level is lower. - // It is called early, before any arguments are processed, - // to save effort if the log event should be discarded. - // If called from a Logger method, the first argument is the context - // passed to that method, or context.Background() if nil was passed - // or the method does not take a context. - // The context is passed so Enabled can use its values - // to make a decision. - Enabled(context.Context, Level) bool - - // Handle handles the Record. - // It will only be called when Enabled returns true. - // The Context argument is as for Enabled. - // It is present solely to provide Handlers access to the context's values. - // Canceling the context should not affect record processing. - // (Among other things, log messages may be necessary to debug a - // cancellation-related problem.) - // - // Handle methods that produce output should observe the following rules: - // - If r.Time is the zero time, ignore the time. - // - If r.PC is zero, ignore it. - // - Attr's values should be resolved. - // - If an Attr's key and value are both the zero value, ignore the Attr. - // This can be tested with attr.Equal(Attr{}). - // - If a group's key is empty, inline the group's Attrs. - // - If a group has no Attrs (even if it has a non-empty key), - // ignore it. - Handle(context.Context, Record) error - - // WithAttrs returns a new Handler whose attributes consist of - // both the receiver's attributes and the arguments. - // The Handler owns the slice: it may retain, modify or discard it. - WithAttrs(attrs []Attr) Handler - - // WithGroup returns a new Handler with the given group appended to - // the receiver's existing groups. - // The keys of all subsequent attributes, whether added by With or in a - // Record, should be qualified by the sequence of group names. - // - // How this qualification happens is up to the Handler, so long as - // this Handler's attribute keys differ from those of another Handler - // with a different sequence of group names. - // - // A Handler should treat WithGroup as starting a Group of Attrs that ends - // at the end of the log event. That is, - // - // logger.WithGroup("s").LogAttrs(level, msg, slog.Int("a", 1), slog.Int("b", 2)) - // - // should behave like - // - // logger.LogAttrs(level, msg, slog.Group("s", slog.Int("a", 1), slog.Int("b", 2))) - // - // If the name is empty, WithGroup returns the receiver. - WithGroup(name string) Handler -} - -type defaultHandler struct { - ch *commonHandler - // log.Output, except for testing - output func(calldepth int, message string) error -} - -func newDefaultHandler(output func(int, string) error) *defaultHandler { - return &defaultHandler{ - ch: &commonHandler{json: false}, - output: output, - } -} - -func (*defaultHandler) Enabled(_ context.Context, l Level) bool { - return l >= LevelInfo -} - -// Collect the level, attributes and message in a string and -// write it with the default log.Logger. -// Let the log.Logger handle time and file/line. -func (h *defaultHandler) Handle(ctx context.Context, r Record) error { - buf := buffer.New() - buf.WriteString(r.Level.String()) - buf.WriteByte(' ') - buf.WriteString(r.Message) - state := h.ch.newHandleState(buf, true, " ", nil) - defer state.free() - state.appendNonBuiltIns(r) - - // skip [h.output, defaultHandler.Handle, handlerWriter.Write, log.Output] - return h.output(4, buf.String()) -} - -func (h *defaultHandler) WithAttrs(as []Attr) Handler { - return &defaultHandler{h.ch.withAttrs(as), h.output} -} - -func (h *defaultHandler) WithGroup(name string) Handler { - return &defaultHandler{h.ch.withGroup(name), h.output} -} - -// HandlerOptions are options for a TextHandler or JSONHandler. -// A zero HandlerOptions consists entirely of default values. -type HandlerOptions struct { - // AddSource causes the handler to compute the source code position - // of the log statement and add a SourceKey attribute to the output. - AddSource bool - - // Level reports the minimum record level that will be logged. - // The handler discards records with lower levels. - // If Level is nil, the handler assumes LevelInfo. - // The handler calls Level.Level for each record processed; - // to adjust the minimum level dynamically, use a LevelVar. - Level Leveler - - // ReplaceAttr is called to rewrite each non-group attribute before it is logged. - // The attribute's value has been resolved (see [Value.Resolve]). - // If ReplaceAttr returns an Attr with Key == "", the attribute is discarded. - // - // The built-in attributes with keys "time", "level", "source", and "msg" - // are passed to this function, except that time is omitted - // if zero, and source is omitted if AddSource is false. - // - // The first argument is a list of currently open groups that contain the - // Attr. It must not be retained or modified. ReplaceAttr is never called - // for Group attributes, only their contents. For example, the attribute - // list - // - // Int("a", 1), Group("g", Int("b", 2)), Int("c", 3) - // - // results in consecutive calls to ReplaceAttr with the following arguments: - // - // nil, Int("a", 1) - // []string{"g"}, Int("b", 2) - // nil, Int("c", 3) - // - // ReplaceAttr can be used to change the default keys of the built-in - // attributes, convert types (for example, to replace a `time.Time` with the - // integer seconds since the Unix epoch), sanitize personal information, or - // remove attributes from the output. - ReplaceAttr func(groups []string, a Attr) Attr -} - -// Keys for "built-in" attributes. -const ( - // TimeKey is the key used by the built-in handlers for the time - // when the log method is called. The associated Value is a [time.Time]. - TimeKey = "time" - // LevelKey is the key used by the built-in handlers for the level - // of the log call. The associated value is a [Level]. - LevelKey = "level" - // MessageKey is the key used by the built-in handlers for the - // message of the log call. The associated value is a string. - MessageKey = "msg" - // SourceKey is the key used by the built-in handlers for the source file - // and line of the log call. The associated value is a string. - SourceKey = "source" -) - -type commonHandler struct { - json bool // true => output JSON; false => output text - opts HandlerOptions - preformattedAttrs []byte - groupPrefix string // for text: prefix of groups opened in preformatting - groups []string // all groups started from WithGroup - nOpenGroups int // the number of groups opened in preformattedAttrs - mu sync.Mutex - w io.Writer -} - -func (h *commonHandler) clone() *commonHandler { - // We can't use assignment because we can't copy the mutex. - return &commonHandler{ - json: h.json, - opts: h.opts, - preformattedAttrs: slices.Clip(h.preformattedAttrs), - groupPrefix: h.groupPrefix, - groups: slices.Clip(h.groups), - nOpenGroups: h.nOpenGroups, - w: h.w, - } -} - -// enabled reports whether l is greater than or equal to the -// minimum level. -func (h *commonHandler) enabled(l Level) bool { - minLevel := LevelInfo - if h.opts.Level != nil { - minLevel = h.opts.Level.Level() - } - return l >= minLevel -} - -func (h *commonHandler) withAttrs(as []Attr) *commonHandler { - h2 := h.clone() - // Pre-format the attributes as an optimization. - prefix := buffer.New() - defer prefix.Free() - prefix.WriteString(h.groupPrefix) - state := h2.newHandleState((*buffer.Buffer)(&h2.preformattedAttrs), false, "", prefix) - defer state.free() - if len(h2.preformattedAttrs) > 0 { - state.sep = h.attrSep() - } - state.openGroups() - for _, a := range as { - state.appendAttr(a) - } - // Remember the new prefix for later keys. - h2.groupPrefix = state.prefix.String() - // Remember how many opened groups are in preformattedAttrs, - // so we don't open them again when we handle a Record. - h2.nOpenGroups = len(h2.groups) - return h2 -} - -func (h *commonHandler) withGroup(name string) *commonHandler { - if name == "" { - return h - } - h2 := h.clone() - h2.groups = append(h2.groups, name) - return h2 -} - -func (h *commonHandler) handle(r Record) error { - state := h.newHandleState(buffer.New(), true, "", nil) - defer state.free() - if h.json { - state.buf.WriteByte('{') - } - // Built-in attributes. They are not in a group. - stateGroups := state.groups - state.groups = nil // So ReplaceAttrs sees no groups instead of the pre groups. - rep := h.opts.ReplaceAttr - // time - if !r.Time.IsZero() { - key := TimeKey - val := r.Time.Round(0) // strip monotonic to match Attr behavior - if rep == nil { - state.appendKey(key) - state.appendTime(val) - } else { - state.appendAttr(Time(key, val)) - } - } - // level - key := LevelKey - val := r.Level - if rep == nil { - state.appendKey(key) - state.appendString(val.String()) - } else { - state.appendAttr(Any(key, val)) - } - // source - if h.opts.AddSource { - state.appendAttr(Any(SourceKey, r.source())) - } - key = MessageKey - msg := r.Message - if rep == nil { - state.appendKey(key) - state.appendString(msg) - } else { - state.appendAttr(String(key, msg)) - } - state.groups = stateGroups // Restore groups passed to ReplaceAttrs. - state.appendNonBuiltIns(r) - state.buf.WriteByte('\n') - - h.mu.Lock() - defer h.mu.Unlock() - _, err := h.w.Write(*state.buf) - return err -} - -func (s *handleState) appendNonBuiltIns(r Record) { - // preformatted Attrs - if len(s.h.preformattedAttrs) > 0 { - s.buf.WriteString(s.sep) - s.buf.Write(s.h.preformattedAttrs) - s.sep = s.h.attrSep() - } - // Attrs in Record -- unlike the built-in ones, they are in groups started - // from WithGroup. - s.prefix = buffer.New() - defer s.prefix.Free() - s.prefix.WriteString(s.h.groupPrefix) - s.openGroups() - r.Attrs(func(a Attr) bool { - s.appendAttr(a) - return true - }) - if s.h.json { - // Close all open groups. - for range s.h.groups { - s.buf.WriteByte('}') - } - // Close the top-level object. - s.buf.WriteByte('}') - } -} - -// attrSep returns the separator between attributes. -func (h *commonHandler) attrSep() string { - if h.json { - return "," - } - return " " -} - -// handleState holds state for a single call to commonHandler.handle. -// The initial value of sep determines whether to emit a separator -// before the next key, after which it stays true. -type handleState struct { - h *commonHandler - buf *buffer.Buffer - freeBuf bool // should buf be freed? - sep string // separator to write before next key - prefix *buffer.Buffer // for text: key prefix - groups *[]string // pool-allocated slice of active groups, for ReplaceAttr -} - -var groupPool = sync.Pool{New: func() any { - s := make([]string, 0, 10) - return &s -}} - -func (h *commonHandler) newHandleState(buf *buffer.Buffer, freeBuf bool, sep string, prefix *buffer.Buffer) handleState { - s := handleState{ - h: h, - buf: buf, - freeBuf: freeBuf, - sep: sep, - prefix: prefix, - } - if h.opts.ReplaceAttr != nil { - s.groups = groupPool.Get().(*[]string) - *s.groups = append(*s.groups, h.groups[:h.nOpenGroups]...) - } - return s -} - -func (s *handleState) free() { - if s.freeBuf { - s.buf.Free() - } - if gs := s.groups; gs != nil { - *gs = (*gs)[:0] - groupPool.Put(gs) - } -} - -func (s *handleState) openGroups() { - for _, n := range s.h.groups[s.h.nOpenGroups:] { - s.openGroup(n) - } -} - -// Separator for group names and keys. -const keyComponentSep = '.' - -// openGroup starts a new group of attributes -// with the given name. -func (s *handleState) openGroup(name string) { - if s.h.json { - s.appendKey(name) - s.buf.WriteByte('{') - s.sep = "" - } else { - s.prefix.WriteString(name) - s.prefix.WriteByte(keyComponentSep) - } - // Collect group names for ReplaceAttr. - if s.groups != nil { - *s.groups = append(*s.groups, name) - } -} - -// closeGroup ends the group with the given name. -func (s *handleState) closeGroup(name string) { - if s.h.json { - s.buf.WriteByte('}') - } else { - (*s.prefix) = (*s.prefix)[:len(*s.prefix)-len(name)-1 /* for keyComponentSep */] - } - s.sep = s.h.attrSep() - if s.groups != nil { - *s.groups = (*s.groups)[:len(*s.groups)-1] - } -} - -// appendAttr appends the Attr's key and value using app. -// It handles replacement and checking for an empty key. -// after replacement). -func (s *handleState) appendAttr(a Attr) { - if rep := s.h.opts.ReplaceAttr; rep != nil && a.Value.Kind() != KindGroup { - var gs []string - if s.groups != nil { - gs = *s.groups - } - // Resolve before calling ReplaceAttr, so the user doesn't have to. - a.Value = a.Value.Resolve() - a = rep(gs, a) - } - a.Value = a.Value.Resolve() - // Elide empty Attrs. - if a.isEmpty() { - return - } - // Special case: Source. - if v := a.Value; v.Kind() == KindAny { - if src, ok := v.Any().(*Source); ok { - if s.h.json { - a.Value = src.group() - } else { - a.Value = StringValue(fmt.Sprintf("%s:%d", src.File, src.Line)) - } - } - } - if a.Value.Kind() == KindGroup { - attrs := a.Value.Group() - // Output only non-empty groups. - if len(attrs) > 0 { - // Inline a group with an empty key. - if a.Key != "" { - s.openGroup(a.Key) - } - for _, aa := range attrs { - s.appendAttr(aa) - } - if a.Key != "" { - s.closeGroup(a.Key) - } - } - } else { - s.appendKey(a.Key) - s.appendValue(a.Value) - } -} - -func (s *handleState) appendError(err error) { - s.appendString(fmt.Sprintf("!ERROR:%v", err)) -} - -func (s *handleState) appendKey(key string) { - s.buf.WriteString(s.sep) - if s.prefix != nil { - // TODO: optimize by avoiding allocation. - s.appendString(string(*s.prefix) + key) - } else { - s.appendString(key) - } - if s.h.json { - s.buf.WriteByte(':') - } else { - s.buf.WriteByte('=') - } - s.sep = s.h.attrSep() -} - -func (s *handleState) appendString(str string) { - if s.h.json { - s.buf.WriteByte('"') - *s.buf = appendEscapedJSONString(*s.buf, str) - s.buf.WriteByte('"') - } else { - // text - if needsQuoting(str) { - *s.buf = strconv.AppendQuote(*s.buf, str) - } else { - s.buf.WriteString(str) - } - } -} - -func (s *handleState) appendValue(v Value) { - defer func() { - if r := recover(); r != nil { - // If it panics with a nil pointer, the most likely cases are - // an encoding.TextMarshaler or error fails to guard against nil, - // in which case "" seems to be the feasible choice. - // - // Adapted from the code in fmt/print.go. - if v := reflect.ValueOf(v.any); v.Kind() == reflect.Pointer && v.IsNil() { - s.appendString("") - return - } - - // Otherwise just print the original panic message. - s.appendString(fmt.Sprintf("!PANIC: %v", r)) - } - }() - - var err error - if s.h.json { - err = appendJSONValue(s, v) - } else { - err = appendTextValue(s, v) - } - if err != nil { - s.appendError(err) - } -} - -func (s *handleState) appendTime(t time.Time) { - if s.h.json { - appendJSONTime(s, t) - } else { - writeTimeRFC3339Millis(s.buf, t) - } -} - -// This takes half the time of Time.AppendFormat. -func writeTimeRFC3339Millis(buf *buffer.Buffer, t time.Time) { - year, month, day := t.Date() - buf.WritePosIntWidth(year, 4) - buf.WriteByte('-') - buf.WritePosIntWidth(int(month), 2) - buf.WriteByte('-') - buf.WritePosIntWidth(day, 2) - buf.WriteByte('T') - hour, min, sec := t.Clock() - buf.WritePosIntWidth(hour, 2) - buf.WriteByte(':') - buf.WritePosIntWidth(min, 2) - buf.WriteByte(':') - buf.WritePosIntWidth(sec, 2) - ns := t.Nanosecond() - buf.WriteByte('.') - buf.WritePosIntWidth(ns/1e6, 3) - _, offsetSeconds := t.Zone() - if offsetSeconds == 0 { - buf.WriteByte('Z') - } else { - offsetMinutes := offsetSeconds / 60 - if offsetMinutes < 0 { - buf.WriteByte('-') - offsetMinutes = -offsetMinutes - } else { - buf.WriteByte('+') - } - buf.WritePosIntWidth(offsetMinutes/60, 2) - buf.WriteByte(':') - buf.WritePosIntWidth(offsetMinutes%60, 2) - } -} diff --git a/vendor/golang.org/x/exp/slog/internal/buffer/buffer.go b/vendor/golang.org/x/exp/slog/internal/buffer/buffer.go deleted file mode 100644 index 7786c166e..000000000 --- a/vendor/golang.org/x/exp/slog/internal/buffer/buffer.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package buffer provides a pool-allocated byte buffer. -package buffer - -import ( - "sync" -) - -// Buffer adapted from go/src/fmt/print.go -type Buffer []byte - -// Having an initial size gives a dramatic speedup. -var bufPool = sync.Pool{ - New: func() any { - b := make([]byte, 0, 1024) - return (*Buffer)(&b) - }, -} - -func New() *Buffer { - return bufPool.Get().(*Buffer) -} - -func (b *Buffer) Free() { - // To reduce peak allocation, return only smaller buffers to the pool. - const maxBufferSize = 16 << 10 - if cap(*b) <= maxBufferSize { - *b = (*b)[:0] - bufPool.Put(b) - } -} - -func (b *Buffer) Reset() { - *b = (*b)[:0] -} - -func (b *Buffer) Write(p []byte) (int, error) { - *b = append(*b, p...) - return len(p), nil -} - -func (b *Buffer) WriteString(s string) { - *b = append(*b, s...) -} - -func (b *Buffer) WriteByte(c byte) { - *b = append(*b, c) -} - -func (b *Buffer) WritePosInt(i int) { - b.WritePosIntWidth(i, 0) -} - -// WritePosIntWidth writes non-negative integer i to the buffer, padded on the left -// by zeroes to the given width. Use a width of 0 to omit padding. -func (b *Buffer) WritePosIntWidth(i, width int) { - // Cheap integer to fixed-width decimal ASCII. - // Copied from log/log.go. - - if i < 0 { - panic("negative int") - } - - // Assemble decimal in reverse order. - var bb [20]byte - bp := len(bb) - 1 - for i >= 10 || width > 1 { - width-- - q := i / 10 - bb[bp] = byte('0' + i - q*10) - bp-- - i = q - } - // i < 10 - bb[bp] = byte('0' + i) - b.Write(bb[bp:]) -} - -func (b *Buffer) String() string { - return string(*b) -} diff --git a/vendor/golang.org/x/exp/slog/internal/ignorepc.go b/vendor/golang.org/x/exp/slog/internal/ignorepc.go deleted file mode 100644 index d1256426f..000000000 --- a/vendor/golang.org/x/exp/slog/internal/ignorepc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package internal - -// If IgnorePC is true, do not invoke runtime.Callers to get the pc. -// This is solely for benchmarking the slowdown from runtime.Callers. -var IgnorePC = false diff --git a/vendor/golang.org/x/exp/slog/json_handler.go b/vendor/golang.org/x/exp/slog/json_handler.go deleted file mode 100644 index 157ada869..000000000 --- a/vendor/golang.org/x/exp/slog/json_handler.go +++ /dev/null @@ -1,336 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package slog - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" - "time" - "unicode/utf8" - - "golang.org/x/exp/slog/internal/buffer" -) - -// JSONHandler is a Handler that writes Records to an io.Writer as -// line-delimited JSON objects. -type JSONHandler struct { - *commonHandler -} - -// NewJSONHandler creates a JSONHandler that writes to w, -// using the given options. -// If opts is nil, the default options are used. -func NewJSONHandler(w io.Writer, opts *HandlerOptions) *JSONHandler { - if opts == nil { - opts = &HandlerOptions{} - } - return &JSONHandler{ - &commonHandler{ - json: true, - w: w, - opts: *opts, - }, - } -} - -// Enabled reports whether the handler handles records at the given level. -// The handler ignores records whose level is lower. -func (h *JSONHandler) Enabled(_ context.Context, level Level) bool { - return h.commonHandler.enabled(level) -} - -// WithAttrs returns a new JSONHandler whose attributes consists -// of h's attributes followed by attrs. -func (h *JSONHandler) WithAttrs(attrs []Attr) Handler { - return &JSONHandler{commonHandler: h.commonHandler.withAttrs(attrs)} -} - -func (h *JSONHandler) WithGroup(name string) Handler { - return &JSONHandler{commonHandler: h.commonHandler.withGroup(name)} -} - -// Handle formats its argument Record as a JSON object on a single line. -// -// If the Record's time is zero, the time is omitted. -// Otherwise, the key is "time" -// and the value is output as with json.Marshal. -// -// If the Record's level is zero, the level is omitted. -// Otherwise, the key is "level" -// and the value of [Level.String] is output. -// -// If the AddSource option is set and source information is available, -// the key is "source" -// and the value is output as "FILE:LINE". -// -// The message's key is "msg". -// -// To modify these or other attributes, or remove them from the output, use -// [HandlerOptions.ReplaceAttr]. -// -// Values are formatted as with an [encoding/json.Encoder] with SetEscapeHTML(false), -// with two exceptions. -// -// First, an Attr whose Value is of type error is formatted as a string, by -// calling its Error method. Only errors in Attrs receive this special treatment, -// not errors embedded in structs, slices, maps or other data structures that -// are processed by the encoding/json package. -// -// Second, an encoding failure does not cause Handle to return an error. -// Instead, the error message is formatted as a string. -// -// Each call to Handle results in a single serialized call to io.Writer.Write. -func (h *JSONHandler) Handle(_ context.Context, r Record) error { - return h.commonHandler.handle(r) -} - -// Adapted from time.Time.MarshalJSON to avoid allocation. -func appendJSONTime(s *handleState, t time.Time) { - if y := t.Year(); y < 0 || y >= 10000 { - // RFC 3339 is clear that years are 4 digits exactly. - // See golang.org/issue/4556#c15 for more discussion. - s.appendError(errors.New("time.Time year outside of range [0,9999]")) - } - s.buf.WriteByte('"') - *s.buf = t.AppendFormat(*s.buf, time.RFC3339Nano) - s.buf.WriteByte('"') -} - -func appendJSONValue(s *handleState, v Value) error { - switch v.Kind() { - case KindString: - s.appendString(v.str()) - case KindInt64: - *s.buf = strconv.AppendInt(*s.buf, v.Int64(), 10) - case KindUint64: - *s.buf = strconv.AppendUint(*s.buf, v.Uint64(), 10) - case KindFloat64: - // json.Marshal is funny about floats; it doesn't - // always match strconv.AppendFloat. So just call it. - // That's expensive, but floats are rare. - if err := appendJSONMarshal(s.buf, v.Float64()); err != nil { - return err - } - case KindBool: - *s.buf = strconv.AppendBool(*s.buf, v.Bool()) - case KindDuration: - // Do what json.Marshal does. - *s.buf = strconv.AppendInt(*s.buf, int64(v.Duration()), 10) - case KindTime: - s.appendTime(v.Time()) - case KindAny: - a := v.Any() - _, jm := a.(json.Marshaler) - if err, ok := a.(error); ok && !jm { - s.appendString(err.Error()) - } else { - return appendJSONMarshal(s.buf, a) - } - default: - panic(fmt.Sprintf("bad kind: %s", v.Kind())) - } - return nil -} - -func appendJSONMarshal(buf *buffer.Buffer, v any) error { - // Use a json.Encoder to avoid escaping HTML. - var bb bytes.Buffer - enc := json.NewEncoder(&bb) - enc.SetEscapeHTML(false) - if err := enc.Encode(v); err != nil { - return err - } - bs := bb.Bytes() - buf.Write(bs[:len(bs)-1]) // remove final newline - return nil -} - -// appendEscapedJSONString escapes s for JSON and appends it to buf. -// It does not surround the string in quotation marks. -// -// Modified from encoding/json/encode.go:encodeState.string, -// with escapeHTML set to false. -func appendEscapedJSONString(buf []byte, s string) []byte { - char := func(b byte) { buf = append(buf, b) } - str := func(s string) { buf = append(buf, s...) } - - start := 0 - for i := 0; i < len(s); { - if b := s[i]; b < utf8.RuneSelf { - if safeSet[b] { - i++ - continue - } - if start < i { - str(s[start:i]) - } - char('\\') - switch b { - case '\\', '"': - char(b) - case '\n': - char('n') - case '\r': - char('r') - case '\t': - char('t') - default: - // This encodes bytes < 0x20 except for \t, \n and \r. - str(`u00`) - char(hex[b>>4]) - char(hex[b&0xF]) - } - i++ - start = i - continue - } - c, size := utf8.DecodeRuneInString(s[i:]) - if c == utf8.RuneError && size == 1 { - if start < i { - str(s[start:i]) - } - str(`\ufffd`) - i += size - start = i - continue - } - // U+2028 is LINE SEPARATOR. - // U+2029 is PARAGRAPH SEPARATOR. - // They are both technically valid characters in JSON strings, - // but don't work in JSONP, which has to be evaluated as JavaScript, - // and can lead to security holes there. It is valid JSON to - // escape them, so we do so unconditionally. - // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion. - if c == '\u2028' || c == '\u2029' { - if start < i { - str(s[start:i]) - } - str(`\u202`) - char(hex[c&0xF]) - i += size - start = i - continue - } - i += size - } - if start < len(s) { - str(s[start:]) - } - return buf -} - -var hex = "0123456789abcdef" - -// Copied from encoding/json/tables.go. -// -// safeSet holds the value true if the ASCII character with the given array -// position can be represented inside a JSON string without any further -// escaping. -// -// All values are true except for the ASCII control characters (0-31), the -// double quote ("), and the backslash character ("\"). -var safeSet = [utf8.RuneSelf]bool{ - ' ': true, - '!': true, - '"': false, - '#': true, - '$': true, - '%': true, - '&': true, - '\'': true, - '(': true, - ')': true, - '*': true, - '+': true, - ',': true, - '-': true, - '.': true, - '/': true, - '0': true, - '1': true, - '2': true, - '3': true, - '4': true, - '5': true, - '6': true, - '7': true, - '8': true, - '9': true, - ':': true, - ';': true, - '<': true, - '=': true, - '>': true, - '?': true, - '@': true, - 'A': true, - 'B': true, - 'C': true, - 'D': true, - 'E': true, - 'F': true, - 'G': true, - 'H': true, - 'I': true, - 'J': true, - 'K': true, - 'L': true, - 'M': true, - 'N': true, - 'O': true, - 'P': true, - 'Q': true, - 'R': true, - 'S': true, - 'T': true, - 'U': true, - 'V': true, - 'W': true, - 'X': true, - 'Y': true, - 'Z': true, - '[': true, - '\\': false, - ']': true, - '^': true, - '_': true, - '`': true, - 'a': true, - 'b': true, - 'c': true, - 'd': true, - 'e': true, - 'f': true, - 'g': true, - 'h': true, - 'i': true, - 'j': true, - 'k': true, - 'l': true, - 'm': true, - 'n': true, - 'o': true, - 'p': true, - 'q': true, - 'r': true, - 's': true, - 't': true, - 'u': true, - 'v': true, - 'w': true, - 'x': true, - 'y': true, - 'z': true, - '{': true, - '|': true, - '}': true, - '~': true, - '\u007f': true, -} diff --git a/vendor/golang.org/x/exp/slog/level.go b/vendor/golang.org/x/exp/slog/level.go deleted file mode 100644 index b2365f0aa..000000000 --- a/vendor/golang.org/x/exp/slog/level.go +++ /dev/null @@ -1,201 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package slog - -import ( - "errors" - "fmt" - "strconv" - "strings" - "sync/atomic" -) - -// A Level is the importance or severity of a log event. -// The higher the level, the more important or severe the event. -type Level int - -// Level numbers are inherently arbitrary, -// but we picked them to satisfy three constraints. -// Any system can map them to another numbering scheme if it wishes. -// -// First, we wanted the default level to be Info, Since Levels are ints, Info is -// the default value for int, zero. -// - -// Second, we wanted to make it easy to use levels to specify logger verbosity. -// Since a larger level means a more severe event, a logger that accepts events -// with smaller (or more negative) level means a more verbose logger. Logger -// verbosity is thus the negation of event severity, and the default verbosity -// of 0 accepts all events at least as severe as INFO. -// -// Third, we wanted some room between levels to accommodate schemes with named -// levels between ours. For example, Google Cloud Logging defines a Notice level -// between Info and Warn. Since there are only a few of these intermediate -// levels, the gap between the numbers need not be large. Our gap of 4 matches -// OpenTelemetry's mapping. Subtracting 9 from an OpenTelemetry level in the -// DEBUG, INFO, WARN and ERROR ranges converts it to the corresponding slog -// Level range. OpenTelemetry also has the names TRACE and FATAL, which slog -// does not. But those OpenTelemetry levels can still be represented as slog -// Levels by using the appropriate integers. -// -// Names for common levels. -const ( - LevelDebug Level = -4 - LevelInfo Level = 0 - LevelWarn Level = 4 - LevelError Level = 8 -) - -// String returns a name for the level. -// If the level has a name, then that name -// in uppercase is returned. -// If the level is between named values, then -// an integer is appended to the uppercased name. -// Examples: -// -// LevelWarn.String() => "WARN" -// (LevelInfo+2).String() => "INFO+2" -func (l Level) String() string { - str := func(base string, val Level) string { - if val == 0 { - return base - } - return fmt.Sprintf("%s%+d", base, val) - } - - switch { - case l < LevelInfo: - return str("DEBUG", l-LevelDebug) - case l < LevelWarn: - return str("INFO", l-LevelInfo) - case l < LevelError: - return str("WARN", l-LevelWarn) - default: - return str("ERROR", l-LevelError) - } -} - -// MarshalJSON implements [encoding/json.Marshaler] -// by quoting the output of [Level.String]. -func (l Level) MarshalJSON() ([]byte, error) { - // AppendQuote is sufficient for JSON-encoding all Level strings. - // They don't contain any runes that would produce invalid JSON - // when escaped. - return strconv.AppendQuote(nil, l.String()), nil -} - -// UnmarshalJSON implements [encoding/json.Unmarshaler] -// It accepts any string produced by [Level.MarshalJSON], -// ignoring case. -// It also accepts numeric offsets that would result in a different string on -// output. For example, "Error-8" would marshal as "INFO". -func (l *Level) UnmarshalJSON(data []byte) error { - s, err := strconv.Unquote(string(data)) - if err != nil { - return err - } - return l.parse(s) -} - -// MarshalText implements [encoding.TextMarshaler] -// by calling [Level.String]. -func (l Level) MarshalText() ([]byte, error) { - return []byte(l.String()), nil -} - -// UnmarshalText implements [encoding.TextUnmarshaler]. -// It accepts any string produced by [Level.MarshalText], -// ignoring case. -// It also accepts numeric offsets that would result in a different string on -// output. For example, "Error-8" would marshal as "INFO". -func (l *Level) UnmarshalText(data []byte) error { - return l.parse(string(data)) -} - -func (l *Level) parse(s string) (err error) { - defer func() { - if err != nil { - err = fmt.Errorf("slog: level string %q: %w", s, err) - } - }() - - name := s - offset := 0 - if i := strings.IndexAny(s, "+-"); i >= 0 { - name = s[:i] - offset, err = strconv.Atoi(s[i:]) - if err != nil { - return err - } - } - switch strings.ToUpper(name) { - case "DEBUG": - *l = LevelDebug - case "INFO": - *l = LevelInfo - case "WARN": - *l = LevelWarn - case "ERROR": - *l = LevelError - default: - return errors.New("unknown name") - } - *l += Level(offset) - return nil -} - -// Level returns the receiver. -// It implements Leveler. -func (l Level) Level() Level { return l } - -// A LevelVar is a Level variable, to allow a Handler level to change -// dynamically. -// It implements Leveler as well as a Set method, -// and it is safe for use by multiple goroutines. -// The zero LevelVar corresponds to LevelInfo. -type LevelVar struct { - val atomic.Int64 -} - -// Level returns v's level. -func (v *LevelVar) Level() Level { - return Level(int(v.val.Load())) -} - -// Set sets v's level to l. -func (v *LevelVar) Set(l Level) { - v.val.Store(int64(l)) -} - -func (v *LevelVar) String() string { - return fmt.Sprintf("LevelVar(%s)", v.Level()) -} - -// MarshalText implements [encoding.TextMarshaler] -// by calling [Level.MarshalText]. -func (v *LevelVar) MarshalText() ([]byte, error) { - return v.Level().MarshalText() -} - -// UnmarshalText implements [encoding.TextUnmarshaler] -// by calling [Level.UnmarshalText]. -func (v *LevelVar) UnmarshalText(data []byte) error { - var l Level - if err := l.UnmarshalText(data); err != nil { - return err - } - v.Set(l) - return nil -} - -// A Leveler provides a Level value. -// -// As Level itself implements Leveler, clients typically supply -// a Level value wherever a Leveler is needed, such as in HandlerOptions. -// Clients who need to vary the level dynamically can provide a more complex -// Leveler implementation such as *LevelVar. -type Leveler interface { - Level() Level -} diff --git a/vendor/golang.org/x/exp/slog/logger.go b/vendor/golang.org/x/exp/slog/logger.go deleted file mode 100644 index e87ec9936..000000000 --- a/vendor/golang.org/x/exp/slog/logger.go +++ /dev/null @@ -1,343 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package slog - -import ( - "context" - "log" - "runtime" - "sync/atomic" - "time" - - "golang.org/x/exp/slog/internal" -) - -var defaultLogger atomic.Value - -func init() { - defaultLogger.Store(New(newDefaultHandler(log.Output))) -} - -// Default returns the default Logger. -func Default() *Logger { return defaultLogger.Load().(*Logger) } - -// SetDefault makes l the default Logger. -// After this call, output from the log package's default Logger -// (as with [log.Print], etc.) will be logged at LevelInfo using l's Handler. -func SetDefault(l *Logger) { - defaultLogger.Store(l) - // If the default's handler is a defaultHandler, then don't use a handleWriter, - // or we'll deadlock as they both try to acquire the log default mutex. - // The defaultHandler will use whatever the log default writer is currently - // set to, which is correct. - // This can occur with SetDefault(Default()). - // See TestSetDefault. - if _, ok := l.Handler().(*defaultHandler); !ok { - capturePC := log.Flags()&(log.Lshortfile|log.Llongfile) != 0 - log.SetOutput(&handlerWriter{l.Handler(), LevelInfo, capturePC}) - log.SetFlags(0) // we want just the log message, no time or location - } -} - -// handlerWriter is an io.Writer that calls a Handler. -// It is used to link the default log.Logger to the default slog.Logger. -type handlerWriter struct { - h Handler - level Level - capturePC bool -} - -func (w *handlerWriter) Write(buf []byte) (int, error) { - if !w.h.Enabled(context.Background(), w.level) { - return 0, nil - } - var pc uintptr - if !internal.IgnorePC && w.capturePC { - // skip [runtime.Callers, w.Write, Logger.Output, log.Print] - var pcs [1]uintptr - runtime.Callers(4, pcs[:]) - pc = pcs[0] - } - - // Remove final newline. - origLen := len(buf) // Report that the entire buf was written. - if len(buf) > 0 && buf[len(buf)-1] == '\n' { - buf = buf[:len(buf)-1] - } - r := NewRecord(time.Now(), w.level, string(buf), pc) - return origLen, w.h.Handle(context.Background(), r) -} - -// A Logger records structured information about each call to its -// Log, Debug, Info, Warn, and Error methods. -// For each call, it creates a Record and passes it to a Handler. -// -// To create a new Logger, call [New] or a Logger method -// that begins "With". -type Logger struct { - handler Handler // for structured logging -} - -func (l *Logger) clone() *Logger { - c := *l - return &c -} - -// Handler returns l's Handler. -func (l *Logger) Handler() Handler { return l.handler } - -// With returns a new Logger that includes the given arguments, converted to -// Attrs as in [Logger.Log]. -// The Attrs will be added to each output from the Logger. -// The new Logger shares the old Logger's context. -// The new Logger's handler is the result of calling WithAttrs on the receiver's -// handler. -func (l *Logger) With(args ...any) *Logger { - c := l.clone() - c.handler = l.handler.WithAttrs(argsToAttrSlice(args)) - return c -} - -// WithGroup returns a new Logger that starts a group. The keys of all -// attributes added to the Logger will be qualified by the given name. -// (How that qualification happens depends on the [Handler.WithGroup] -// method of the Logger's Handler.) -// The new Logger shares the old Logger's context. -// -// The new Logger's handler is the result of calling WithGroup on the receiver's -// handler. -func (l *Logger) WithGroup(name string) *Logger { - c := l.clone() - c.handler = l.handler.WithGroup(name) - return c - -} - -// New creates a new Logger with the given non-nil Handler and a nil context. -func New(h Handler) *Logger { - if h == nil { - panic("nil Handler") - } - return &Logger{handler: h} -} - -// With calls Logger.With on the default logger. -func With(args ...any) *Logger { - return Default().With(args...) -} - -// Enabled reports whether l emits log records at the given context and level. -func (l *Logger) Enabled(ctx context.Context, level Level) bool { - if ctx == nil { - ctx = context.Background() - } - return l.Handler().Enabled(ctx, level) -} - -// NewLogLogger returns a new log.Logger such that each call to its Output method -// dispatches a Record to the specified handler. The logger acts as a bridge from -// the older log API to newer structured logging handlers. -func NewLogLogger(h Handler, level Level) *log.Logger { - return log.New(&handlerWriter{h, level, true}, "", 0) -} - -// Log emits a log record with the current time and the given level and message. -// The Record's Attrs consist of the Logger's attributes followed by -// the Attrs specified by args. -// -// The attribute arguments are processed as follows: -// - If an argument is an Attr, it is used as is. -// - If an argument is a string and this is not the last argument, -// the following argument is treated as the value and the two are combined -// into an Attr. -// - Otherwise, the argument is treated as a value with key "!BADKEY". -func (l *Logger) Log(ctx context.Context, level Level, msg string, args ...any) { - l.log(ctx, level, msg, args...) -} - -// LogAttrs is a more efficient version of [Logger.Log] that accepts only Attrs. -func (l *Logger) LogAttrs(ctx context.Context, level Level, msg string, attrs ...Attr) { - l.logAttrs(ctx, level, msg, attrs...) -} - -// Debug logs at LevelDebug. -func (l *Logger) Debug(msg string, args ...any) { - l.log(nil, LevelDebug, msg, args...) -} - -// DebugContext logs at LevelDebug with the given context. -func (l *Logger) DebugContext(ctx context.Context, msg string, args ...any) { - l.log(ctx, LevelDebug, msg, args...) -} - -// DebugCtx logs at LevelDebug with the given context. -// Deprecated: Use Logger.DebugContext. -func (l *Logger) DebugCtx(ctx context.Context, msg string, args ...any) { - l.log(ctx, LevelDebug, msg, args...) -} - -// Info logs at LevelInfo. -func (l *Logger) Info(msg string, args ...any) { - l.log(nil, LevelInfo, msg, args...) -} - -// InfoContext logs at LevelInfo with the given context. -func (l *Logger) InfoContext(ctx context.Context, msg string, args ...any) { - l.log(ctx, LevelInfo, msg, args...) -} - -// InfoCtx logs at LevelInfo with the given context. -// Deprecated: Use Logger.InfoContext. -func (l *Logger) InfoCtx(ctx context.Context, msg string, args ...any) { - l.log(ctx, LevelInfo, msg, args...) -} - -// Warn logs at LevelWarn. -func (l *Logger) Warn(msg string, args ...any) { - l.log(nil, LevelWarn, msg, args...) -} - -// WarnContext logs at LevelWarn with the given context. -func (l *Logger) WarnContext(ctx context.Context, msg string, args ...any) { - l.log(ctx, LevelWarn, msg, args...) -} - -// WarnCtx logs at LevelWarn with the given context. -// Deprecated: Use Logger.WarnContext. -func (l *Logger) WarnCtx(ctx context.Context, msg string, args ...any) { - l.log(ctx, LevelWarn, msg, args...) -} - -// Error logs at LevelError. -func (l *Logger) Error(msg string, args ...any) { - l.log(nil, LevelError, msg, args...) -} - -// ErrorContext logs at LevelError with the given context. -func (l *Logger) ErrorContext(ctx context.Context, msg string, args ...any) { - l.log(ctx, LevelError, msg, args...) -} - -// ErrorCtx logs at LevelError with the given context. -// Deprecated: Use Logger.ErrorContext. -func (l *Logger) ErrorCtx(ctx context.Context, msg string, args ...any) { - l.log(ctx, LevelError, msg, args...) -} - -// log is the low-level logging method for methods that take ...any. -// It must always be called directly by an exported logging method -// or function, because it uses a fixed call depth to obtain the pc. -func (l *Logger) log(ctx context.Context, level Level, msg string, args ...any) { - if !l.Enabled(ctx, level) { - return - } - var pc uintptr - if !internal.IgnorePC { - var pcs [1]uintptr - // skip [runtime.Callers, this function, this function's caller] - runtime.Callers(3, pcs[:]) - pc = pcs[0] - } - r := NewRecord(time.Now(), level, msg, pc) - r.Add(args...) - if ctx == nil { - ctx = context.Background() - } - _ = l.Handler().Handle(ctx, r) -} - -// logAttrs is like [Logger.log], but for methods that take ...Attr. -func (l *Logger) logAttrs(ctx context.Context, level Level, msg string, attrs ...Attr) { - if !l.Enabled(ctx, level) { - return - } - var pc uintptr - if !internal.IgnorePC { - var pcs [1]uintptr - // skip [runtime.Callers, this function, this function's caller] - runtime.Callers(3, pcs[:]) - pc = pcs[0] - } - r := NewRecord(time.Now(), level, msg, pc) - r.AddAttrs(attrs...) - if ctx == nil { - ctx = context.Background() - } - _ = l.Handler().Handle(ctx, r) -} - -// Debug calls Logger.Debug on the default logger. -func Debug(msg string, args ...any) { - Default().log(nil, LevelDebug, msg, args...) -} - -// DebugContext calls Logger.DebugContext on the default logger. -func DebugContext(ctx context.Context, msg string, args ...any) { - Default().log(ctx, LevelDebug, msg, args...) -} - -// Info calls Logger.Info on the default logger. -func Info(msg string, args ...any) { - Default().log(nil, LevelInfo, msg, args...) -} - -// InfoContext calls Logger.InfoContext on the default logger. -func InfoContext(ctx context.Context, msg string, args ...any) { - Default().log(ctx, LevelInfo, msg, args...) -} - -// Warn calls Logger.Warn on the default logger. -func Warn(msg string, args ...any) { - Default().log(nil, LevelWarn, msg, args...) -} - -// WarnContext calls Logger.WarnContext on the default logger. -func WarnContext(ctx context.Context, msg string, args ...any) { - Default().log(ctx, LevelWarn, msg, args...) -} - -// Error calls Logger.Error on the default logger. -func Error(msg string, args ...any) { - Default().log(nil, LevelError, msg, args...) -} - -// ErrorContext calls Logger.ErrorContext on the default logger. -func ErrorContext(ctx context.Context, msg string, args ...any) { - Default().log(ctx, LevelError, msg, args...) -} - -// DebugCtx calls Logger.DebugContext on the default logger. -// Deprecated: call DebugContext. -func DebugCtx(ctx context.Context, msg string, args ...any) { - Default().log(ctx, LevelDebug, msg, args...) -} - -// InfoCtx calls Logger.InfoContext on the default logger. -// Deprecated: call InfoContext. -func InfoCtx(ctx context.Context, msg string, args ...any) { - Default().log(ctx, LevelInfo, msg, args...) -} - -// WarnCtx calls Logger.WarnContext on the default logger. -// Deprecated: call WarnContext. -func WarnCtx(ctx context.Context, msg string, args ...any) { - Default().log(ctx, LevelWarn, msg, args...) -} - -// ErrorCtx calls Logger.ErrorContext on the default logger. -// Deprecated: call ErrorContext. -func ErrorCtx(ctx context.Context, msg string, args ...any) { - Default().log(ctx, LevelError, msg, args...) -} - -// Log calls Logger.Log on the default logger. -func Log(ctx context.Context, level Level, msg string, args ...any) { - Default().log(ctx, level, msg, args...) -} - -// LogAttrs calls Logger.LogAttrs on the default logger. -func LogAttrs(ctx context.Context, level Level, msg string, attrs ...Attr) { - Default().logAttrs(ctx, level, msg, attrs...) -} diff --git a/vendor/golang.org/x/exp/slog/noplog.bench b/vendor/golang.org/x/exp/slog/noplog.bench deleted file mode 100644 index ed9296ff6..000000000 --- a/vendor/golang.org/x/exp/slog/noplog.bench +++ /dev/null @@ -1,36 +0,0 @@ -goos: linux -goarch: amd64 -pkg: golang.org/x/exp/slog -cpu: Intel(R) Xeon(R) CPU @ 2.20GHz -BenchmarkNopLog/attrs-8 1000000 1090 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/attrs-8 1000000 1097 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/attrs-8 1000000 1078 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/attrs-8 1000000 1095 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/attrs-8 1000000 1096 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/attrs-parallel-8 4007268 308.2 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/attrs-parallel-8 4016138 299.7 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/attrs-parallel-8 4020529 305.9 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/attrs-parallel-8 3977829 303.4 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/attrs-parallel-8 3225438 318.5 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/keys-values-8 1179256 994.2 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/keys-values-8 1000000 1002 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/keys-values-8 1216710 993.2 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/keys-values-8 1000000 1013 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/keys-values-8 1000000 1016 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/WithContext-8 989066 1163 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/WithContext-8 994116 1163 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/WithContext-8 1000000 1152 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/WithContext-8 991675 1165 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/WithContext-8 965268 1166 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/WithContext-parallel-8 3955503 303.3 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/WithContext-parallel-8 3861188 307.8 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/WithContext-parallel-8 3967752 303.9 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/WithContext-parallel-8 3955203 302.7 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/WithContext-parallel-8 3948278 301.1 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/Ctx-8 940622 1247 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/Ctx-8 936381 1257 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/Ctx-8 959730 1266 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/Ctx-8 943473 1290 ns/op 0 B/op 0 allocs/op -BenchmarkNopLog/Ctx-8 919414 1259 ns/op 0 B/op 0 allocs/op -PASS -ok golang.org/x/exp/slog 40.566s diff --git a/vendor/golang.org/x/exp/slog/record.go b/vendor/golang.org/x/exp/slog/record.go deleted file mode 100644 index 38b3440f7..000000000 --- a/vendor/golang.org/x/exp/slog/record.go +++ /dev/null @@ -1,207 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package slog - -import ( - "runtime" - "time" - - "golang.org/x/exp/slices" -) - -const nAttrsInline = 5 - -// A Record holds information about a log event. -// Copies of a Record share state. -// Do not modify a Record after handing out a copy to it. -// Use [Record.Clone] to create a copy with no shared state. -type Record struct { - // The time at which the output method (Log, Info, etc.) was called. - Time time.Time - - // The log message. - Message string - - // The level of the event. - Level Level - - // The program counter at the time the record was constructed, as determined - // by runtime.Callers. If zero, no program counter is available. - // - // The only valid use for this value is as an argument to - // [runtime.CallersFrames]. In particular, it must not be passed to - // [runtime.FuncForPC]. - PC uintptr - - // Allocation optimization: an inline array sized to hold - // the majority of log calls (based on examination of open-source - // code). It holds the start of the list of Attrs. - front [nAttrsInline]Attr - - // The number of Attrs in front. - nFront int - - // The list of Attrs except for those in front. - // Invariants: - // - len(back) > 0 iff nFront == len(front) - // - Unused array elements are zero. Used to detect mistakes. - back []Attr -} - -// NewRecord creates a Record from the given arguments. -// Use [Record.AddAttrs] to add attributes to the Record. -// -// NewRecord is intended for logging APIs that want to support a [Handler] as -// a backend. -func NewRecord(t time.Time, level Level, msg string, pc uintptr) Record { - return Record{ - Time: t, - Message: msg, - Level: level, - PC: pc, - } -} - -// Clone returns a copy of the record with no shared state. -// The original record and the clone can both be modified -// without interfering with each other. -func (r Record) Clone() Record { - r.back = slices.Clip(r.back) // prevent append from mutating shared array - return r -} - -// NumAttrs returns the number of attributes in the Record. -func (r Record) NumAttrs() int { - return r.nFront + len(r.back) -} - -// Attrs calls f on each Attr in the Record. -// Iteration stops if f returns false. -func (r Record) Attrs(f func(Attr) bool) { - for i := 0; i < r.nFront; i++ { - if !f(r.front[i]) { - return - } - } - for _, a := range r.back { - if !f(a) { - return - } - } -} - -// AddAttrs appends the given Attrs to the Record's list of Attrs. -func (r *Record) AddAttrs(attrs ...Attr) { - n := copy(r.front[r.nFront:], attrs) - r.nFront += n - // Check if a copy was modified by slicing past the end - // and seeing if the Attr there is non-zero. - if cap(r.back) > len(r.back) { - end := r.back[:len(r.back)+1][len(r.back)] - if !end.isEmpty() { - panic("copies of a slog.Record were both modified") - } - } - r.back = append(r.back, attrs[n:]...) -} - -// Add converts the args to Attrs as described in [Logger.Log], -// then appends the Attrs to the Record's list of Attrs. -func (r *Record) Add(args ...any) { - var a Attr - for len(args) > 0 { - a, args = argsToAttr(args) - if r.nFront < len(r.front) { - r.front[r.nFront] = a - r.nFront++ - } else { - if r.back == nil { - r.back = make([]Attr, 0, countAttrs(args)) - } - r.back = append(r.back, a) - } - } - -} - -// countAttrs returns the number of Attrs that would be created from args. -func countAttrs(args []any) int { - n := 0 - for i := 0; i < len(args); i++ { - n++ - if _, ok := args[i].(string); ok { - i++ - } - } - return n -} - -const badKey = "!BADKEY" - -// argsToAttr turns a prefix of the nonempty args slice into an Attr -// and returns the unconsumed portion of the slice. -// If args[0] is an Attr, it returns it. -// If args[0] is a string, it treats the first two elements as -// a key-value pair. -// Otherwise, it treats args[0] as a value with a missing key. -func argsToAttr(args []any) (Attr, []any) { - switch x := args[0].(type) { - case string: - if len(args) == 1 { - return String(badKey, x), nil - } - return Any(x, args[1]), args[2:] - - case Attr: - return x, args[1:] - - default: - return Any(badKey, x), args[1:] - } -} - -// Source describes the location of a line of source code. -type Source struct { - // Function is the package path-qualified function name containing the - // source line. If non-empty, this string uniquely identifies a single - // function in the program. This may be the empty string if not known. - Function string `json:"function"` - // File and Line are the file name and line number (1-based) of the source - // line. These may be the empty string and zero, respectively, if not known. - File string `json:"file"` - Line int `json:"line"` -} - -// attrs returns the non-zero fields of s as a slice of attrs. -// It is similar to a LogValue method, but we don't want Source -// to implement LogValuer because it would be resolved before -// the ReplaceAttr function was called. -func (s *Source) group() Value { - var as []Attr - if s.Function != "" { - as = append(as, String("function", s.Function)) - } - if s.File != "" { - as = append(as, String("file", s.File)) - } - if s.Line != 0 { - as = append(as, Int("line", s.Line)) - } - return GroupValue(as...) -} - -// source returns a Source for the log event. -// If the Record was created without the necessary information, -// or if the location is unavailable, it returns a non-nil *Source -// with zero fields. -func (r Record) source() *Source { - fs := runtime.CallersFrames([]uintptr{r.PC}) - f, _ := fs.Next() - return &Source{ - Function: f.Function, - File: f.File, - Line: f.Line, - } -} diff --git a/vendor/golang.org/x/exp/slog/text_handler.go b/vendor/golang.org/x/exp/slog/text_handler.go deleted file mode 100644 index 75b66b716..000000000 --- a/vendor/golang.org/x/exp/slog/text_handler.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package slog - -import ( - "context" - "encoding" - "fmt" - "io" - "reflect" - "strconv" - "unicode" - "unicode/utf8" -) - -// TextHandler is a Handler that writes Records to an io.Writer as a -// sequence of key=value pairs separated by spaces and followed by a newline. -type TextHandler struct { - *commonHandler -} - -// NewTextHandler creates a TextHandler that writes to w, -// using the given options. -// If opts is nil, the default options are used. -func NewTextHandler(w io.Writer, opts *HandlerOptions) *TextHandler { - if opts == nil { - opts = &HandlerOptions{} - } - return &TextHandler{ - &commonHandler{ - json: false, - w: w, - opts: *opts, - }, - } -} - -// Enabled reports whether the handler handles records at the given level. -// The handler ignores records whose level is lower. -func (h *TextHandler) Enabled(_ context.Context, level Level) bool { - return h.commonHandler.enabled(level) -} - -// WithAttrs returns a new TextHandler whose attributes consists -// of h's attributes followed by attrs. -func (h *TextHandler) WithAttrs(attrs []Attr) Handler { - return &TextHandler{commonHandler: h.commonHandler.withAttrs(attrs)} -} - -func (h *TextHandler) WithGroup(name string) Handler { - return &TextHandler{commonHandler: h.commonHandler.withGroup(name)} -} - -// Handle formats its argument Record as a single line of space-separated -// key=value items. -// -// If the Record's time is zero, the time is omitted. -// Otherwise, the key is "time" -// and the value is output in RFC3339 format with millisecond precision. -// -// If the Record's level is zero, the level is omitted. -// Otherwise, the key is "level" -// and the value of [Level.String] is output. -// -// If the AddSource option is set and source information is available, -// the key is "source" and the value is output as FILE:LINE. -// -// The message's key is "msg". -// -// To modify these or other attributes, or remove them from the output, use -// [HandlerOptions.ReplaceAttr]. -// -// If a value implements [encoding.TextMarshaler], the result of MarshalText is -// written. Otherwise, the result of fmt.Sprint is written. -// -// Keys and values are quoted with [strconv.Quote] if they contain Unicode space -// characters, non-printing characters, '"' or '='. -// -// Keys inside groups consist of components (keys or group names) separated by -// dots. No further escaping is performed. -// Thus there is no way to determine from the key "a.b.c" whether there -// are two groups "a" and "b" and a key "c", or a single group "a.b" and a key "c", -// or single group "a" and a key "b.c". -// If it is necessary to reconstruct the group structure of a key -// even in the presence of dots inside components, use -// [HandlerOptions.ReplaceAttr] to encode that information in the key. -// -// Each call to Handle results in a single serialized call to -// io.Writer.Write. -func (h *TextHandler) Handle(_ context.Context, r Record) error { - return h.commonHandler.handle(r) -} - -func appendTextValue(s *handleState, v Value) error { - switch v.Kind() { - case KindString: - s.appendString(v.str()) - case KindTime: - s.appendTime(v.time()) - case KindAny: - if tm, ok := v.any.(encoding.TextMarshaler); ok { - data, err := tm.MarshalText() - if err != nil { - return err - } - // TODO: avoid the conversion to string. - s.appendString(string(data)) - return nil - } - if bs, ok := byteSlice(v.any); ok { - // As of Go 1.19, this only allocates for strings longer than 32 bytes. - s.buf.WriteString(strconv.Quote(string(bs))) - return nil - } - s.appendString(fmt.Sprintf("%+v", v.Any())) - default: - *s.buf = v.append(*s.buf) - } - return nil -} - -// byteSlice returns its argument as a []byte if the argument's -// underlying type is []byte, along with a second return value of true. -// Otherwise it returns nil, false. -func byteSlice(a any) ([]byte, bool) { - if bs, ok := a.([]byte); ok { - return bs, true - } - // Like Printf's %s, we allow both the slice type and the byte element type to be named. - t := reflect.TypeOf(a) - if t != nil && t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8 { - return reflect.ValueOf(a).Bytes(), true - } - return nil, false -} - -func needsQuoting(s string) bool { - if len(s) == 0 { - return true - } - for i := 0; i < len(s); { - b := s[i] - if b < utf8.RuneSelf { - // Quote anything except a backslash that would need quoting in a - // JSON string, as well as space and '=' - if b != '\\' && (b == ' ' || b == '=' || !safeSet[b]) { - return true - } - i++ - continue - } - r, size := utf8.DecodeRuneInString(s[i:]) - if r == utf8.RuneError || unicode.IsSpace(r) || !unicode.IsPrint(r) { - return true - } - i += size - } - return false -} diff --git a/vendor/golang.org/x/exp/slog/value.go b/vendor/golang.org/x/exp/slog/value.go deleted file mode 100644 index 3550c46fc..000000000 --- a/vendor/golang.org/x/exp/slog/value.go +++ /dev/null @@ -1,456 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package slog - -import ( - "fmt" - "math" - "runtime" - "strconv" - "strings" - "time" - "unsafe" - - "golang.org/x/exp/slices" -) - -// A Value can represent any Go value, but unlike type any, -// it can represent most small values without an allocation. -// The zero Value corresponds to nil. -type Value struct { - _ [0]func() // disallow == - // num holds the value for Kinds Int64, Uint64, Float64, Bool and Duration, - // the string length for KindString, and nanoseconds since the epoch for KindTime. - num uint64 - // If any is of type Kind, then the value is in num as described above. - // If any is of type *time.Location, then the Kind is Time and time.Time value - // can be constructed from the Unix nanos in num and the location (monotonic time - // is not preserved). - // If any is of type stringptr, then the Kind is String and the string value - // consists of the length in num and the pointer in any. - // Otherwise, the Kind is Any and any is the value. - // (This implies that Attrs cannot store values of type Kind, *time.Location - // or stringptr.) - any any -} - -// Kind is the kind of a Value. -type Kind int - -// The following list is sorted alphabetically, but it's also important that -// KindAny is 0 so that a zero Value represents nil. - -const ( - KindAny Kind = iota - KindBool - KindDuration - KindFloat64 - KindInt64 - KindString - KindTime - KindUint64 - KindGroup - KindLogValuer -) - -var kindStrings = []string{ - "Any", - "Bool", - "Duration", - "Float64", - "Int64", - "String", - "Time", - "Uint64", - "Group", - "LogValuer", -} - -func (k Kind) String() string { - if k >= 0 && int(k) < len(kindStrings) { - return kindStrings[k] - } - return "" -} - -// Unexported version of Kind, just so we can store Kinds in Values. -// (No user-provided value has this type.) -type kind Kind - -// Kind returns v's Kind. -func (v Value) Kind() Kind { - switch x := v.any.(type) { - case Kind: - return x - case stringptr: - return KindString - case timeLocation: - return KindTime - case groupptr: - return KindGroup - case LogValuer: - return KindLogValuer - case kind: // a kind is just a wrapper for a Kind - return KindAny - default: - return KindAny - } -} - -//////////////// Constructors - -// IntValue returns a Value for an int. -func IntValue(v int) Value { - return Int64Value(int64(v)) -} - -// Int64Value returns a Value for an int64. -func Int64Value(v int64) Value { - return Value{num: uint64(v), any: KindInt64} -} - -// Uint64Value returns a Value for a uint64. -func Uint64Value(v uint64) Value { - return Value{num: v, any: KindUint64} -} - -// Float64Value returns a Value for a floating-point number. -func Float64Value(v float64) Value { - return Value{num: math.Float64bits(v), any: KindFloat64} -} - -// BoolValue returns a Value for a bool. -func BoolValue(v bool) Value { - u := uint64(0) - if v { - u = 1 - } - return Value{num: u, any: KindBool} -} - -// Unexported version of *time.Location, just so we can store *time.Locations in -// Values. (No user-provided value has this type.) -type timeLocation *time.Location - -// TimeValue returns a Value for a time.Time. -// It discards the monotonic portion. -func TimeValue(v time.Time) Value { - if v.IsZero() { - // UnixNano on the zero time is undefined, so represent the zero time - // with a nil *time.Location instead. time.Time.Location method never - // returns nil, so a Value with any == timeLocation(nil) cannot be - // mistaken for any other Value, time.Time or otherwise. - return Value{any: timeLocation(nil)} - } - return Value{num: uint64(v.UnixNano()), any: timeLocation(v.Location())} -} - -// DurationValue returns a Value for a time.Duration. -func DurationValue(v time.Duration) Value { - return Value{num: uint64(v.Nanoseconds()), any: KindDuration} -} - -// AnyValue returns a Value for the supplied value. -// -// If the supplied value is of type Value, it is returned -// unmodified. -// -// Given a value of one of Go's predeclared string, bool, or -// (non-complex) numeric types, AnyValue returns a Value of kind -// String, Bool, Uint64, Int64, or Float64. The width of the -// original numeric type is not preserved. -// -// Given a time.Time or time.Duration value, AnyValue returns a Value of kind -// KindTime or KindDuration. The monotonic time is not preserved. -// -// For nil, or values of all other types, including named types whose -// underlying type is numeric, AnyValue returns a value of kind KindAny. -func AnyValue(v any) Value { - switch v := v.(type) { - case string: - return StringValue(v) - case int: - return Int64Value(int64(v)) - case uint: - return Uint64Value(uint64(v)) - case int64: - return Int64Value(v) - case uint64: - return Uint64Value(v) - case bool: - return BoolValue(v) - case time.Duration: - return DurationValue(v) - case time.Time: - return TimeValue(v) - case uint8: - return Uint64Value(uint64(v)) - case uint16: - return Uint64Value(uint64(v)) - case uint32: - return Uint64Value(uint64(v)) - case uintptr: - return Uint64Value(uint64(v)) - case int8: - return Int64Value(int64(v)) - case int16: - return Int64Value(int64(v)) - case int32: - return Int64Value(int64(v)) - case float64: - return Float64Value(v) - case float32: - return Float64Value(float64(v)) - case []Attr: - return GroupValue(v...) - case Kind: - return Value{any: kind(v)} - case Value: - return v - default: - return Value{any: v} - } -} - -//////////////// Accessors - -// Any returns v's value as an any. -func (v Value) Any() any { - switch v.Kind() { - case KindAny: - if k, ok := v.any.(kind); ok { - return Kind(k) - } - return v.any - case KindLogValuer: - return v.any - case KindGroup: - return v.group() - case KindInt64: - return int64(v.num) - case KindUint64: - return v.num - case KindFloat64: - return v.float() - case KindString: - return v.str() - case KindBool: - return v.bool() - case KindDuration: - return v.duration() - case KindTime: - return v.time() - default: - panic(fmt.Sprintf("bad kind: %s", v.Kind())) - } -} - -// Int64 returns v's value as an int64. It panics -// if v is not a signed integer. -func (v Value) Int64() int64 { - if g, w := v.Kind(), KindInt64; g != w { - panic(fmt.Sprintf("Value kind is %s, not %s", g, w)) - } - return int64(v.num) -} - -// Uint64 returns v's value as a uint64. It panics -// if v is not an unsigned integer. -func (v Value) Uint64() uint64 { - if g, w := v.Kind(), KindUint64; g != w { - panic(fmt.Sprintf("Value kind is %s, not %s", g, w)) - } - return v.num -} - -// Bool returns v's value as a bool. It panics -// if v is not a bool. -func (v Value) Bool() bool { - if g, w := v.Kind(), KindBool; g != w { - panic(fmt.Sprintf("Value kind is %s, not %s", g, w)) - } - return v.bool() -} - -func (v Value) bool() bool { - return v.num == 1 -} - -// Duration returns v's value as a time.Duration. It panics -// if v is not a time.Duration. -func (v Value) Duration() time.Duration { - if g, w := v.Kind(), KindDuration; g != w { - panic(fmt.Sprintf("Value kind is %s, not %s", g, w)) - } - - return v.duration() -} - -func (v Value) duration() time.Duration { - return time.Duration(int64(v.num)) -} - -// Float64 returns v's value as a float64. It panics -// if v is not a float64. -func (v Value) Float64() float64 { - if g, w := v.Kind(), KindFloat64; g != w { - panic(fmt.Sprintf("Value kind is %s, not %s", g, w)) - } - - return v.float() -} - -func (v Value) float() float64 { - return math.Float64frombits(v.num) -} - -// Time returns v's value as a time.Time. It panics -// if v is not a time.Time. -func (v Value) Time() time.Time { - if g, w := v.Kind(), KindTime; g != w { - panic(fmt.Sprintf("Value kind is %s, not %s", g, w)) - } - return v.time() -} - -func (v Value) time() time.Time { - loc := v.any.(timeLocation) - if loc == nil { - return time.Time{} - } - return time.Unix(0, int64(v.num)).In(loc) -} - -// LogValuer returns v's value as a LogValuer. It panics -// if v is not a LogValuer. -func (v Value) LogValuer() LogValuer { - return v.any.(LogValuer) -} - -// Group returns v's value as a []Attr. -// It panics if v's Kind is not KindGroup. -func (v Value) Group() []Attr { - if sp, ok := v.any.(groupptr); ok { - return unsafe.Slice((*Attr)(sp), v.num) - } - panic("Group: bad kind") -} - -func (v Value) group() []Attr { - return unsafe.Slice((*Attr)(v.any.(groupptr)), v.num) -} - -//////////////// Other - -// Equal reports whether v and w represent the same Go value. -func (v Value) Equal(w Value) bool { - k1 := v.Kind() - k2 := w.Kind() - if k1 != k2 { - return false - } - switch k1 { - case KindInt64, KindUint64, KindBool, KindDuration: - return v.num == w.num - case KindString: - return v.str() == w.str() - case KindFloat64: - return v.float() == w.float() - case KindTime: - return v.time().Equal(w.time()) - case KindAny, KindLogValuer: - return v.any == w.any // may panic if non-comparable - case KindGroup: - return slices.EqualFunc(v.group(), w.group(), Attr.Equal) - default: - panic(fmt.Sprintf("bad kind: %s", k1)) - } -} - -// append appends a text representation of v to dst. -// v is formatted as with fmt.Sprint. -func (v Value) append(dst []byte) []byte { - switch v.Kind() { - case KindString: - return append(dst, v.str()...) - case KindInt64: - return strconv.AppendInt(dst, int64(v.num), 10) - case KindUint64: - return strconv.AppendUint(dst, v.num, 10) - case KindFloat64: - return strconv.AppendFloat(dst, v.float(), 'g', -1, 64) - case KindBool: - return strconv.AppendBool(dst, v.bool()) - case KindDuration: - return append(dst, v.duration().String()...) - case KindTime: - return append(dst, v.time().String()...) - case KindGroup: - return fmt.Append(dst, v.group()) - case KindAny, KindLogValuer: - return fmt.Append(dst, v.any) - default: - panic(fmt.Sprintf("bad kind: %s", v.Kind())) - } -} - -// A LogValuer is any Go value that can convert itself into a Value for logging. -// -// This mechanism may be used to defer expensive operations until they are -// needed, or to expand a single value into a sequence of components. -type LogValuer interface { - LogValue() Value -} - -const maxLogValues = 100 - -// Resolve repeatedly calls LogValue on v while it implements LogValuer, -// and returns the result. -// If v resolves to a group, the group's attributes' values are not recursively -// resolved. -// If the number of LogValue calls exceeds a threshold, a Value containing an -// error is returned. -// Resolve's return value is guaranteed not to be of Kind KindLogValuer. -func (v Value) Resolve() (rv Value) { - orig := v - defer func() { - if r := recover(); r != nil { - rv = AnyValue(fmt.Errorf("LogValue panicked\n%s", stack(3, 5))) - } - }() - - for i := 0; i < maxLogValues; i++ { - if v.Kind() != KindLogValuer { - return v - } - v = v.LogValuer().LogValue() - } - err := fmt.Errorf("LogValue called too many times on Value of type %T", orig.Any()) - return AnyValue(err) -} - -func stack(skip, nFrames int) string { - pcs := make([]uintptr, nFrames+1) - n := runtime.Callers(skip+1, pcs) - if n == 0 { - return "(no stack)" - } - frames := runtime.CallersFrames(pcs[:n]) - var b strings.Builder - i := 0 - for { - frame, more := frames.Next() - fmt.Fprintf(&b, "called from %s (%s:%d)\n", frame.Function, frame.File, frame.Line) - if !more { - break - } - i++ - if i >= nFrames { - fmt.Fprintf(&b, "(rest of stack elided)\n") - break - } - } - return b.String() -} diff --git a/vendor/golang.org/x/exp/slog/value_119.go b/vendor/golang.org/x/exp/slog/value_119.go deleted file mode 100644 index 29b0d7329..000000000 --- a/vendor/golang.org/x/exp/slog/value_119.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.19 && !go1.20 - -package slog - -import ( - "reflect" - "unsafe" -) - -type ( - stringptr unsafe.Pointer // used in Value.any when the Value is a string - groupptr unsafe.Pointer // used in Value.any when the Value is a []Attr -) - -// StringValue returns a new Value for a string. -func StringValue(value string) Value { - hdr := (*reflect.StringHeader)(unsafe.Pointer(&value)) - return Value{num: uint64(hdr.Len), any: stringptr(hdr.Data)} -} - -func (v Value) str() string { - var s string - hdr := (*reflect.StringHeader)(unsafe.Pointer(&s)) - hdr.Data = uintptr(v.any.(stringptr)) - hdr.Len = int(v.num) - return s -} - -// String returns Value's value as a string, formatted like fmt.Sprint. Unlike -// the methods Int64, Float64, and so on, which panic if v is of the -// wrong kind, String never panics. -func (v Value) String() string { - if sp, ok := v.any.(stringptr); ok { - // Inlining this code makes a huge difference. - var s string - hdr := (*reflect.StringHeader)(unsafe.Pointer(&s)) - hdr.Data = uintptr(sp) - hdr.Len = int(v.num) - return s - } - return string(v.append(nil)) -} - -// GroupValue returns a new Value for a list of Attrs. -// The caller must not subsequently mutate the argument slice. -func GroupValue(as ...Attr) Value { - hdr := (*reflect.SliceHeader)(unsafe.Pointer(&as)) - return Value{num: uint64(hdr.Len), any: groupptr(hdr.Data)} -} diff --git a/vendor/golang.org/x/exp/slog/value_120.go b/vendor/golang.org/x/exp/slog/value_120.go deleted file mode 100644 index f7d4c0932..000000000 --- a/vendor/golang.org/x/exp/slog/value_120.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.20 - -package slog - -import "unsafe" - -type ( - stringptr *byte // used in Value.any when the Value is a string - groupptr *Attr // used in Value.any when the Value is a []Attr -) - -// StringValue returns a new Value for a string. -func StringValue(value string) Value { - return Value{num: uint64(len(value)), any: stringptr(unsafe.StringData(value))} -} - -// GroupValue returns a new Value for a list of Attrs. -// The caller must not subsequently mutate the argument slice. -func GroupValue(as ...Attr) Value { - return Value{num: uint64(len(as)), any: groupptr(unsafe.SliceData(as))} -} - -// String returns Value's value as a string, formatted like fmt.Sprint. Unlike -// the methods Int64, Float64, and so on, which panic if v is of the -// wrong kind, String never panics. -func (v Value) String() string { - if sp, ok := v.any.(stringptr); ok { - return unsafe.String(sp, v.num) - } - return string(v.append(nil)) -} - -func (v Value) str() string { - return unsafe.String(v.any.(stringptr), v.num) -} diff --git a/vendor/golang.org/x/tools/cmd/stringer/stringer.go b/vendor/golang.org/x/tools/cmd/stringer/stringer.go index 2b19c93e8..038e8e831 100644 --- a/vendor/golang.org/x/tools/cmd/stringer/stringer.go +++ b/vendor/golang.org/x/tools/cmd/stringer/stringer.go @@ -58,6 +58,11 @@ // where t is the lower-cased name of the first type listed. It can be overridden // with the -output flag. // +// Types can also be declared in tests, in which case type declarations in the +// non-test package or its test variant are preferred over types defined in the +// package with suffix "_test". +// The default output file for type declarations in tests is t_string_test.go with t picked as above. +// // The -linecomment flag tells stringer to generate the text of any line comment, trimmed // of leading spaces, instead of the constant name. For instance, if the constants above had a // Pill prefix, one could write @@ -128,10 +133,6 @@ func main() { // Parse the package once. var dir string - g := Generator{ - trimPrefix: *trimprefix, - lineComment: *linecomment, - } // TODO(suzmue): accept other patterns for packages (directories, list of files, import paths, etc). if len(args) == 1 && isDirectory(args[0]) { dir = args[0] @@ -142,33 +143,90 @@ func main() { dir = filepath.Dir(args[0]) } - g.parsePackage(args, tags) + // For each type, generate code in the first package where the type is declared. + // The order of packages is as follows: + // package x + // package x compiled for tests + // package x_test + // + // Each package pass could result in a separate generated file. + // These files must have the same package and test/not-test nature as the types + // from which they were generated. + // + // Types will be excluded when generated, to avoid repetitions. + pkgs := loadPackages(args, tags, *trimprefix, *linecomment, nil /* logf */) + sort.Slice(pkgs, func(i, j int) bool { + // Put x_test packages last. + iTest := strings.HasSuffix(pkgs[i].name, "_test") + jTest := strings.HasSuffix(pkgs[j].name, "_test") + if iTest != jTest { + return !iTest + } - // Print the header and package clause. - g.Printf("// Code generated by \"stringer %s\"; DO NOT EDIT.\n", strings.Join(os.Args[1:], " ")) - g.Printf("\n") - g.Printf("package %s", g.pkg.name) - g.Printf("\n") - g.Printf("import \"strconv\"\n") // Used by all methods. + return len(pkgs[i].files) < len(pkgs[j].files) + }) + for _, pkg := range pkgs { + g := Generator{ + pkg: pkg, + } - // Run generate for each type. - for _, typeName := range types { - g.generate(typeName) + // Print the header and package clause. + g.Printf("// Code generated by \"stringer %s\"; DO NOT EDIT.\n", strings.Join(os.Args[1:], " ")) + g.Printf("\n") + g.Printf("package %s", g.pkg.name) + g.Printf("\n") + g.Printf("import \"strconv\"\n") // Used by all methods. + + // Run generate for types that can be found. Keep the rest for the remainingTypes iteration. + var foundTypes, remainingTypes []string + for _, typeName := range types { + values := findValues(typeName, pkg) + if len(values) > 0 { + g.generate(typeName, values) + foundTypes = append(foundTypes, typeName) + } else { + remainingTypes = append(remainingTypes, typeName) + } + } + if len(foundTypes) == 0 { + // This package didn't have any of the relevant types, skip writing a file. + continue + } + if len(remainingTypes) > 0 && output != nil && *output != "" { + log.Fatalf("cannot write to single file (-output=%q) when matching types are found in multiple packages", *output) + } + types = remainingTypes + + // Format the output. + src := g.format() + + // Write to file. + outputName := *output + if outputName == "" { + // Type names will be unique across packages since only the first + // match is picked. + // So there won't be collisions between a package compiled for tests + // and the separate package of tests (package foo_test). + outputName = filepath.Join(dir, baseName(pkg, foundTypes[0])) + } + err := os.WriteFile(outputName, src, 0644) + if err != nil { + log.Fatalf("writing output: %s", err) + } } - // Format the output. - src := g.format() - - // Write to file. - outputName := *output - if outputName == "" { - baseName := fmt.Sprintf("%s_string.go", types[0]) - outputName = filepath.Join(dir, strings.ToLower(baseName)) + if len(types) > 0 { + log.Fatalf("no values defined for types: %s", strings.Join(types, ",")) } - err := os.WriteFile(outputName, src, 0644) - if err != nil { - log.Fatalf("writing output: %s", err) +} + +// baseName that will put the generated code together with pkg. +func baseName(pkg *Package, typename string) string { + suffix := "string.go" + if pkg.hasTestFiles { + suffix = "string_test.go" } + return fmt.Sprintf("%s_%s", strings.ToLower(typename), suffix) } // isDirectory reports whether the named file is a directory. @@ -186,13 +244,10 @@ type Generator struct { buf bytes.Buffer // Accumulated output. pkg *Package // Package we are scanning. - trimPrefix string - lineComment bool - - logf func(format string, args ...interface{}) // test logging hook; nil when not testing + logf func(format string, args ...any) // test logging hook; nil when not testing } -func (g *Generator) Printf(format string, args ...interface{}) { +func (g *Generator) Printf(format string, args ...any) { fmt.Fprintf(&g.buf, format, args...) } @@ -209,54 +264,74 @@ type File struct { } type Package struct { - name string - defs map[*ast.Ident]types.Object - files []*File + name string + defs map[*ast.Ident]types.Object + files []*File + hasTestFiles bool } -// parsePackage analyzes the single package constructed from the patterns and tags. -// parsePackage exits if there is an error. -func (g *Generator) parsePackage(patterns []string, tags []string) { +// loadPackages analyzes the single package constructed from the patterns and tags. +// loadPackages exits if there is an error. +// +// Returns all variants (such as tests) of the package. +// +// logf is a test logging hook. It can be nil when not testing. +func loadPackages( + patterns, tags []string, + trimPrefix string, lineComment bool, + logf func(format string, args ...any), +) []*Package { cfg := &packages.Config{ - Mode: packages.NeedName | packages.NeedTypes | packages.NeedTypesInfo | packages.NeedSyntax, - // TODO: Need to think about constants in test files. Maybe write type_string_test.go - // in a separate pass? For later. - Tests: false, + Mode: packages.NeedName | packages.NeedTypes | packages.NeedTypesInfo | packages.NeedSyntax | packages.NeedFiles, + // Tests are included, let the caller decide how to fold them in. + Tests: true, BuildFlags: []string{fmt.Sprintf("-tags=%s", strings.Join(tags, " "))}, - Logf: g.logf, + Logf: logf, } pkgs, err := packages.Load(cfg, patterns...) if err != nil { log.Fatal(err) } - if len(pkgs) != 1 { - log.Fatalf("error: %d packages matching %v", len(pkgs), strings.Join(patterns, " ")) + if len(pkgs) == 0 { + log.Fatalf("error: no packages matching %v", strings.Join(patterns, " ")) } - g.addPackage(pkgs[0]) -} -// addPackage adds a type checked Package and its syntax files to the generator. -func (g *Generator) addPackage(pkg *packages.Package) { - g.pkg = &Package{ - name: pkg.Name, - defs: pkg.TypesInfo.Defs, - files: make([]*File, len(pkg.Syntax)), - } + out := make([]*Package, len(pkgs)) + for i, pkg := range pkgs { + p := &Package{ + name: pkg.Name, + defs: pkg.TypesInfo.Defs, + files: make([]*File, len(pkg.Syntax)), + } + + for j, file := range pkg.Syntax { + p.files[j] = &File{ + file: file, + pkg: p, - for i, file := range pkg.Syntax { - g.pkg.files[i] = &File{ - file: file, - pkg: g.pkg, - trimPrefix: g.trimPrefix, - lineComment: g.lineComment, + trimPrefix: trimPrefix, + lineComment: lineComment, + } } + + // Keep track of test files, since we might want to generated + // code that ends up in that kind of package. + // Can be replaced once https://go.dev/issue/38445 lands. + for _, f := range pkg.GoFiles { + if strings.HasSuffix(f, "_test.go") { + p.hasTestFiles = true + break + } + } + + out[i] = p } + return out } -// generate produces the String method for the named type. -func (g *Generator) generate(typeName string) { +func findValues(typeName string, pkg *Package) []Value { values := make([]Value, 0, 100) - for _, file := range g.pkg.files { + for _, file := range pkg.files { // Set the state for this run of the walker. file.typeName = typeName file.values = nil @@ -265,10 +340,11 @@ func (g *Generator) generate(typeName string) { values = append(values, file.values...) } } + return values +} - if len(values) == 0 { - log.Fatalf("no values defined for type %s", typeName) - } +// generate produces the String method for the named type. +func (g *Generator) generate(typeName string, values []Value) { // Generate code that will fail if the constants change value. g.Printf("func _() {\n") g.Printf("\t// An \"invalid array index\" compiler error signifies that the constant values have changed.\n") diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go index 137cc8df1..65fe2628e 100644 --- a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go +++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -2,22 +2,64 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package gcexportdata provides functions for locating, reading, and -// writing export data files containing type information produced by the -// gc compiler. This package supports go1.7 export data format and all -// later versions. -// -// Although it might seem convenient for this package to live alongside -// go/types in the standard library, this would cause version skew -// problems for developer tools that use it, since they must be able to -// consume the outputs of the gc compiler both before and after a Go -// update such as from Go 1.7 to Go 1.8. Because this package lives in -// golang.org/x/tools, sites can update their version of this repo some -// time before the Go 1.8 release and rebuild and redeploy their -// developer tools, which will then be able to consume both Go 1.7 and -// Go 1.8 export data files, so they will work before and after the -// Go update. (See discussion at https://golang.org/issue/15651.) -package gcexportdata // import "golang.org/x/tools/go/gcexportdata" +// Package gcexportdata provides functions for reading and writing +// export data, which is a serialized description of the API of a Go +// package including the names, kinds, types, and locations of all +// exported declarations. +// +// The standard Go compiler (cmd/compile) writes an export data file +// for each package it compiles, which it later reads when compiling +// packages that import the earlier one. The compiler must thus +// contain logic to both write and read export data. +// (See the "Export" section in the cmd/compile/README file.) +// +// The [Read] function in this package can read files produced by the +// compiler, producing [go/types] data structures. As a matter of +// policy, Read supports export data files produced by only the last +// two Go releases plus tip; see https://go.dev/issue/68898. The +// export data files produced by the compiler contain additional +// details related to generics, inlining, and other optimizations that +// cannot be decoded by the [Read] function. +// +// In files written by the compiler, the export data is not at the +// start of the file. Before calling Read, use [NewReader] to locate +// the desired portion of the file. +// +// The [Write] function in this package encodes the exported API of a +// Go package ([types.Package]) as a file. Such files can be later +// decoded by Read, but cannot be consumed by the compiler. +// +// # Future changes +// +// Although Read supports the formats written by both Write and the +// compiler, the two are quite different, and there is an open +// proposal (https://go.dev/issue/69491) to separate these APIs. +// +// Under that proposal, this package would ultimately provide only the +// Read operation for compiler export data, which must be defined in +// this module (golang.org/x/tools), not in the standard library, to +// avoid version skew for developer tools that need to read compiler +// export data both before and after a Go release, such as from Go +// 1.23 to Go 1.24. Because this package lives in the tools module, +// clients can update their version of the module some time before the +// Go 1.24 release and rebuild and redeploy their tools, which will +// then be able to consume both Go 1.23 and Go 1.24 export data files, +// so they will work before and after the Go update. (See discussion +// at https://go.dev/issue/15651.) +// +// The operations to import and export [go/types] data structures +// would be defined in the go/types package as Import and Export. +// [Write] would (eventually) delegate to Export, +// and [Read], when it detects a file produced by Export, +// would delegate to Import. +// +// # Deprecations +// +// The [NewImporter] and [Find] functions are deprecated and should +// not be used in new code. The [WriteBundle] and [ReadBundle] +// functions are experimental, and there is an open proposal to +// deprecate them (https://go.dev/issue/69573). +package gcexportdata import ( "bufio" @@ -64,24 +106,18 @@ func Find(importPath, srcDir string) (filename, path string) { // additional trailing data beyond the end of the export data. func NewReader(r io.Reader) (io.Reader, error) { buf := bufio.NewReader(r) - _, size, err := gcimporter.FindExportData(buf) + size, err := gcimporter.FindExportData(buf) if err != nil { return nil, err } - if size >= 0 { - // We were given an archive and found the __.PKGDEF in it. - // This tells us the size of the export data, and we don't - // need to return the entire file. - return &io.LimitedReader{ - R: buf, - N: size, - }, nil - } else { - // We were given an object file. As such, we don't know how large - // the export data is and must return the entire file. - return buf, nil - } + // We were given an archive and found the __.PKGDEF in it. + // This tells us the size of the export data, and we don't + // need to return the entire file. + return &io.LimitedReader{ + R: buf, + N: size, + }, nil } // readAll works the same way as io.ReadAll, but avoids allocations and copies @@ -100,6 +136,11 @@ func readAll(r io.Reader) ([]byte, error) { // Read reads export data from in, decodes it, and returns type // information for the package. // +// Read is capable of reading export data produced by [Write] at the +// same source code version, or by the last two Go releases (plus tip) +// of the standard Go compiler. Reading files from older compilers may +// produce an error. +// // The package path (effectively its linker symbol prefix) is // specified by path, since unlike the package name, this information // may not be recorded in the export data. @@ -128,14 +169,26 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, // (from "version"). Select appropriate importer. if len(data) > 0 { switch data[0] { - case 'v', 'c', 'd': // binary, till go1.10 + case 'v', 'c', 'd': + // binary, produced by cmd/compile till go1.10 return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) - case 'i': // indexed, till go1.19 + case 'i': + // indexed, produced by cmd/compile till go1.19, + // and also by [Write]. + // + // If proposal #69491 is accepted, go/types + // serialization will be implemented by + // types.Export, to which Write would eventually + // delegate (explicitly dropping any pretence at + // inter-version Write-Read compatibility). + // This [Read] function would delegate to types.Import + // when it detects that the file was produced by Export. _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) return pkg, err - case 'u': // unified, from go1.20 + case 'u': + // unified, produced by cmd/compile since go1.20 _, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path) return pkg, err diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go index 3531ac8f5..f1931d10e 100644 --- a/vendor/golang.org/x/tools/go/packages/doc.go +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -64,7 +64,7 @@ graph using the Imports fields. The Load function can be configured by passing a pointer to a Config as the first argument. A nil Config is equivalent to the zero Config, which -causes Load to run in LoadFiles mode, collecting minimal information. +causes Load to run in [LoadFiles] mode, collecting minimal information. See the documentation for type Config for details. As noted earlier, the Config.Mode controls the amount of detail @@ -72,14 +72,14 @@ reported about the loaded packages. See the documentation for type LoadMode for details. Most tools should pass their command-line arguments (after any flags) -uninterpreted to [Load], so that it can interpret them +uninterpreted to Load, so that it can interpret them according to the conventions of the underlying build system. See the Example function for typical usage. # The driver protocol -[Load] may be used to load Go packages even in Go projects that use +Load may be used to load Go packages even in Go projects that use alternative build systems, by installing an appropriate "driver" program for the build system and specifying its location in the GOPACKAGESDRIVER environment variable. @@ -97,6 +97,15 @@ JSON-encoded [DriverRequest] message providing additional information is written to the driver's standard input. The driver must write a JSON-encoded [DriverResponse] message to its standard output. (This message differs from the JSON schema produced by 'go list'.) + +The value of the PWD environment variable seen by the driver process +is the preferred name of its working directory. (The working directory +may have other aliases due to symbolic links; see the comment on the +Dir field of [exec.Cmd] for related information.) +When the driver process emits in its response the name of a file +that is a descendant of this directory, it must use an absolute path +that has the value of PWD as a prefix, to ensure that the returned +filenames satisfy the original query. */ package packages // import "golang.org/x/tools/go/packages" diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go index 8f7afcb5d..91bd62e83 100644 --- a/vendor/golang.org/x/tools/go/packages/external.go +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -13,6 +13,7 @@ import ( "fmt" "os" "os/exec" + "slices" "strings" ) @@ -79,7 +80,7 @@ type DriverResponse struct { // driver is the type for functions that query the build system for the // packages named by the patterns. -type driver func(cfg *Config, patterns ...string) (*DriverResponse, error) +type driver func(cfg *Config, patterns []string) (*DriverResponse, error) // findExternalDriver returns the file path of a tool that supplies // the build system package structure, or "" if not found. @@ -103,7 +104,7 @@ func findExternalDriver(cfg *Config) driver { return nil } } - return func(cfg *Config, words ...string) (*DriverResponse, error) { + return func(cfg *Config, patterns []string) (*DriverResponse, error) { req, err := json.Marshal(DriverRequest{ Mode: cfg.Mode, Env: cfg.Env, @@ -117,7 +118,7 @@ func findExternalDriver(cfg *Config) driver { buf := new(bytes.Buffer) stderr := new(bytes.Buffer) - cmd := exec.CommandContext(cfg.Context, tool, words...) + cmd := exec.CommandContext(cfg.Context, tool, patterns...) cmd.Dir = cfg.Dir // The cwd gets resolved to the real path. On Darwin, where // /tmp is a symlink, this breaks anything that expects the @@ -131,7 +132,7 @@ func findExternalDriver(cfg *Config) driver { // command. // // (See similar trick in Invocation.run in ../../internal/gocommand/invoke.go) - cmd.Env = append(slicesClip(cfg.Env), "PWD="+cfg.Dir) + cmd.Env = append(slices.Clip(cfg.Env), "PWD="+cfg.Dir) cmd.Stdin = bytes.NewReader(req) cmd.Stdout = buf cmd.Stderr = stderr @@ -150,7 +151,3 @@ func findExternalDriver(cfg *Config) driver { return &response, nil } } - -// slicesClip removes unused capacity from the slice, returning s[:len(s):len(s)]. -// TODO(adonovan): use go1.21 slices.Clip. -func slicesClip[S ~[]E, E any](s S) S { return s[:len(s):len(s)] } diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index 1a3a5b44f..0458b4f9c 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -80,6 +80,12 @@ type golistState struct { cfg *Config ctx context.Context + runner *gocommand.Runner + + // overlay is the JSON file that encodes the Config.Overlay + // mapping, used by 'go list -overlay=...'. + overlay string + envOnce sync.Once goEnvError error goEnv map[string]string @@ -127,7 +133,10 @@ func (state *golistState) mustGetEnv() map[string]string { // goListDriver uses the go list command to interpret the patterns and produce // the build system package structure. // See driver for more details. -func goListDriver(cfg *Config, patterns ...string) (_ *DriverResponse, err error) { +// +// overlay is the JSON file that encodes the cfg.Overlay +// mapping, used by 'go list -overlay=...' +func goListDriver(cfg *Config, runner *gocommand.Runner, overlay string, patterns []string) (_ *DriverResponse, err error) { // Make sure that any asynchronous go commands are killed when we return. parentCtx := cfg.Context if parentCtx == nil { @@ -142,13 +151,15 @@ func goListDriver(cfg *Config, patterns ...string) (_ *DriverResponse, err error cfg: cfg, ctx: ctx, vendorDirs: map[string]bool{}, + overlay: overlay, + runner: runner, } // Fill in response.Sizes asynchronously if necessary. - if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 { + if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 { errCh := make(chan error) go func() { - compiler, arch, err := getSizesForArgs(ctx, state.cfgInvocation(), cfg.gocmdRunner) + compiler, arch, err := getSizesForArgs(ctx, state.cfgInvocation(), runner) response.dr.Compiler = compiler response.dr.Arch = arch errCh <- err @@ -311,6 +322,7 @@ type jsonPackage struct { ImportPath string Dir string Name string + Target string Export string GoFiles []string CompiledGoFiles []string @@ -494,13 +506,15 @@ func (state *golistState) createDriverResponse(words ...string) (*DriverResponse pkg := &Package{ Name: p.Name, ID: p.ImportPath, + Dir: p.Dir, + Target: p.Target, GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles), CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles), OtherFiles: absJoin(p.Dir, otherFiles(p)...), EmbedFiles: absJoin(p.Dir, p.EmbedFiles), EmbedPatterns: absJoin(p.Dir, p.EmbedPatterns), IgnoredFiles: absJoin(p.Dir, p.IgnoredGoFiles, p.IgnoredOtherFiles), - forTest: p.ForTest, + ForTest: p.ForTest, depsErrors: p.DepsErrors, Module: p.Module, } @@ -681,7 +695,7 @@ func (state *golistState) shouldAddFilenameFromError(p *jsonPackage) bool { // getGoVersion returns the effective minor version of the go command. func (state *golistState) getGoVersion() (int, error) { state.goVersionOnce.Do(func() { - state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.cfg.gocmdRunner) + state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.runner) }) return state.goVersion, state.goVersionError } @@ -751,7 +765,7 @@ func jsonFlag(cfg *Config, goVersion int) string { } } addFields("Name", "ImportPath", "Error") // These fields are always needed - if cfg.Mode&NeedFiles != 0 || cfg.Mode&NeedTypes != 0 { + if cfg.Mode&NeedFiles != 0 || cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 { addFields("Dir", "GoFiles", "IgnoredGoFiles", "IgnoredOtherFiles", "CFiles", "CgoFiles", "CXXFiles", "MFiles", "HFiles", "FFiles", "SFiles", "SwigFiles", "SwigCXXFiles", "SysoFiles") @@ -759,7 +773,7 @@ func jsonFlag(cfg *Config, goVersion int) string { addFields("TestGoFiles", "XTestGoFiles") } } - if cfg.Mode&NeedTypes != 0 { + if cfg.Mode&(NeedTypes|NeedTypesInfo) != 0 { // CompiledGoFiles seems to be required for the test case TestCgoNoSyntax, // even when -compiled isn't passed in. // TODO(#52435): Should we make the test ask for -compiled, or automatically @@ -784,7 +798,7 @@ func jsonFlag(cfg *Config, goVersion int) string { // Request Dir in the unlikely case Export is not absolute. addFields("Dir", "Export") } - if cfg.Mode&needInternalForTest != 0 { + if cfg.Mode&NeedForTest != 0 { addFields("ForTest") } if cfg.Mode&needInternalDepsErrors != 0 { @@ -799,6 +813,9 @@ func jsonFlag(cfg *Config, goVersion int) string { if cfg.Mode&NeedEmbedPatterns != 0 { addFields("EmbedPatterns") } + if cfg.Mode&NeedTarget != 0 { + addFields("Target") + } return "-json=" + strings.Join(fields, ",") } @@ -840,7 +857,7 @@ func (state *golistState) cfgInvocation() gocommand.Invocation { Env: cfg.Env, Logf: cfg.Logf, WorkingDir: cfg.Dir, - Overlay: cfg.goListOverlayFile, + Overlay: state.overlay, } } @@ -851,11 +868,8 @@ func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, inv := state.cfgInvocation() inv.Verb = verb inv.Args = args - gocmdRunner := cfg.gocmdRunner - if gocmdRunner == nil { - gocmdRunner = &gocommand.Runner{} - } - stdout, stderr, friendlyErr, err := gocmdRunner.RunRaw(cfg.Context, inv) + + stdout, stderr, friendlyErr, err := state.runner.RunRaw(cfg.Context, inv) if err != nil { // Check for 'go' executable not being found. if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound { @@ -879,6 +893,12 @@ func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, return nil, friendlyErr } + // Return an error if 'go list' failed due to missing tools in + // $GOROOT/pkg/tool/$GOOS_$GOARCH (#69606). + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), `go: no such tool`) { + return nil, friendlyErr + } + // Is there an error running the C compiler in cgo? This will be reported in the "Error" field // and should be suppressed by go list -e. // diff --git a/vendor/golang.org/x/tools/go/packages/loadmode_string.go b/vendor/golang.org/x/tools/go/packages/loadmode_string.go index 5c080d21b..69eec9f44 100644 --- a/vendor/golang.org/x/tools/go/packages/loadmode_string.go +++ b/vendor/golang.org/x/tools/go/packages/loadmode_string.go @@ -9,49 +9,48 @@ import ( "strings" ) -var allModes = []LoadMode{ - NeedName, - NeedFiles, - NeedCompiledGoFiles, - NeedImports, - NeedDeps, - NeedExportFile, - NeedTypes, - NeedSyntax, - NeedTypesInfo, - NeedTypesSizes, +var modes = [...]struct { + mode LoadMode + name string +}{ + {NeedName, "NeedName"}, + {NeedFiles, "NeedFiles"}, + {NeedCompiledGoFiles, "NeedCompiledGoFiles"}, + {NeedImports, "NeedImports"}, + {NeedDeps, "NeedDeps"}, + {NeedExportFile, "NeedExportFile"}, + {NeedTypes, "NeedTypes"}, + {NeedSyntax, "NeedSyntax"}, + {NeedTypesInfo, "NeedTypesInfo"}, + {NeedTypesSizes, "NeedTypesSizes"}, + {NeedForTest, "NeedForTest"}, + {NeedModule, "NeedModule"}, + {NeedEmbedFiles, "NeedEmbedFiles"}, + {NeedEmbedPatterns, "NeedEmbedPatterns"}, + {NeedTarget, "NeedTarget"}, } -var modeStrings = []string{ - "NeedName", - "NeedFiles", - "NeedCompiledGoFiles", - "NeedImports", - "NeedDeps", - "NeedExportFile", - "NeedTypes", - "NeedSyntax", - "NeedTypesInfo", - "NeedTypesSizes", -} - -func (mod LoadMode) String() string { - m := mod - if m == 0 { +func (mode LoadMode) String() string { + if mode == 0 { return "LoadMode(0)" } var out []string - for i, x := range allModes { - if x > m { - break + // named bits + for _, item := range modes { + if (mode & item.mode) != 0 { + mode ^= item.mode + out = append(out, item.name) } - if (m & x) != 0 { - out = append(out, modeStrings[i]) - m = m ^ x + } + // unnamed residue + if mode != 0 { + if out == nil { + return fmt.Sprintf("LoadMode(%#x)", int(mode)) } + out = append(out, fmt.Sprintf("%#x", int(mode))) } - if m != 0 { - out = append(out, "Unknown") + if len(out) == 1 { + return out[0] } - return fmt.Sprintf("LoadMode(%s)", strings.Join(out, "|")) + return "(" + strings.Join(out, "|") + ")" } diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 0b6bfaff8..6665a04c1 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -16,13 +16,13 @@ import ( "go/scanner" "go/token" "go/types" - "io" "log" "os" "path/filepath" "runtime" "strings" "sync" + "sync/atomic" "time" "golang.org/x/sync/errgroup" @@ -31,7 +31,6 @@ import ( "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/packagesinternal" "golang.org/x/tools/internal/typesinternal" - "golang.org/x/tools/internal/versions" ) // A LoadMode controls the amount of detail to return when loading. @@ -44,19 +43,33 @@ import ( // ID and Errors (if present) will always be filled. // [Load] may return more information than requested. // +// The Mode flag is a union of several bits named NeedName, +// NeedFiles, and so on, each of which determines whether +// a given field of Package (Name, Files, etc) should be +// populated. +// +// For convenience, we provide named constants for the most +// common combinations of Need flags: +// +// [LoadFiles] lists of files in each package +// [LoadImports] ... plus imports +// [LoadTypes] ... plus type information +// [LoadSyntax] ... plus type-annotated syntax +// [LoadAllSyntax] ... for all dependencies +// // Unfortunately there are a number of open bugs related to // interactions among the LoadMode bits: -// - https://github.com/golang/go/issues/56633 -// - https://github.com/golang/go/issues/56677 -// - https://github.com/golang/go/issues/58726 -// - https://github.com/golang/go/issues/63517 +// - https://go.dev/issue/56633 +// - https://go.dev/issue/56677 +// - https://go.dev/issue/58726 +// - https://go.dev/issue/63517 type LoadMode int const ( // NeedName adds Name and PkgPath. NeedName LoadMode = 1 << iota - // NeedFiles adds GoFiles and OtherFiles. + // NeedFiles adds Dir, GoFiles, OtherFiles, and IgnoredFiles NeedFiles // NeedCompiledGoFiles adds CompiledGoFiles. @@ -78,7 +91,7 @@ const ( // NeedSyntax adds Syntax and Fset. NeedSyntax - // NeedTypesInfo adds TypesInfo. + // NeedTypesInfo adds TypesInfo and Fset. NeedTypesInfo // NeedTypesSizes adds TypesSizes. @@ -87,9 +100,10 @@ const ( // needInternalDepsErrors adds the internal deps errors field for use by gopls. needInternalDepsErrors - // needInternalForTest adds the internal forTest field. + // NeedForTest adds ForTest. + // // Tests must also be set on the context for this field to be populated. - needInternalForTest + NeedForTest // typecheckCgo enables full support for type checking cgo. Requires Go 1.15+. // Modifies CompiledGoFiles and Types, and has no effect on its own. @@ -103,43 +117,39 @@ const ( // NeedEmbedPatterns adds EmbedPatterns. NeedEmbedPatterns + + // NeedTarget adds Target. + NeedTarget + + // Be sure to update loadmode_string.go when adding new items! ) const ( - // Deprecated: LoadFiles exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. + // LoadFiles loads the name and file names for the initial packages. LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles - // Deprecated: LoadImports exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. + // LoadImports loads the name, file names, and import mapping for the initial packages. LoadImports = LoadFiles | NeedImports - // Deprecated: LoadTypes exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. + // LoadTypes loads exported type information for the initial packages. LoadTypes = LoadImports | NeedTypes | NeedTypesSizes - // Deprecated: LoadSyntax exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. + // LoadSyntax loads typed syntax for the initial packages. LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo - // Deprecated: LoadAllSyntax exists for historical compatibility - // and should not be used. Please directly specify the needed fields using the Need values. + // LoadAllSyntax loads typed syntax for the initial packages and all dependencies. LoadAllSyntax = LoadSyntax | NeedDeps // Deprecated: NeedExportsFile is a historical misspelling of NeedExportFile. + // + //go:fix inline NeedExportsFile = NeedExportFile ) // A Config specifies details about how packages should be loaded. // The zero value is a valid configuration. // -// Calls to Load do not modify this struct. -// -// TODO(adonovan): #67702: this is currently false: in fact, -// calls to [Load] do not modify the public fields of this struct, but -// may modify hidden fields, so concurrent calls to [Load] must not -// use the same Config. But perhaps we should reestablish the -// documented invariant. +// Calls to [Load] do not modify this struct. type Config struct { // Mode controls the level of information returned for each package. Mode LoadMode @@ -153,7 +163,7 @@ type Config struct { // If the user provides a logger, debug logging is enabled. // If the GOPACKAGESDEBUG environment variable is set to true, // but the logger is nil, default to log.Printf. - Logf func(format string, args ...interface{}) + Logf func(format string, args ...any) // Dir is the directory in which to run the build system's query tool // that provides information about the packages. @@ -170,19 +180,10 @@ type Config struct { // Env []string - // gocmdRunner guards go command calls from concurrency errors. - gocmdRunner *gocommand.Runner - // BuildFlags is a list of command-line flags to be passed through to // the build system's query tool. BuildFlags []string - // modFile will be used for -modfile in go command invocations. - modFile string - - // modFlag will be used for -modfile in go command invocations. - modFlag string - // Fset provides source position information for syntax trees and types. // If Fset is nil, Load will use a new fileset, but preserve Fset's value. Fset *token.FileSet @@ -229,21 +230,24 @@ type Config struct { // drivers may vary in their level of support for overlays. Overlay map[string][]byte - // goListOverlayFile is the JSON file that encodes the Overlay - // mapping, used by 'go list -overlay=...' - goListOverlayFile string + // -- Hidden configuration fields only for use in x/tools -- + + // modFile will be used for -modfile in go command invocations. + modFile string + + // modFlag will be used for -modfile in go command invocations. + modFlag string } // Load loads and returns the Go packages named by the given patterns. // -// Config specifies loading options; -// nil behaves the same as an empty Config. +// The cfg parameter specifies loading options; nil behaves the same as an empty [Config]. // // The [Config.Mode] field is a set of bits that determine what kinds // of information should be computed and returned. Modes that require // more information tend to be slower. See [LoadMode] for details // and important caveats. Its zero value is equivalent to -// NeedName | NeedFiles | NeedCompiledGoFiles. +// [NeedName] | [NeedFiles] | [NeedCompiledGoFiles]. // // Each call to Load returns a new set of [Package] instances. // The Packages and their Imports form a directed acyclic graph. @@ -260,7 +264,7 @@ type Config struct { // Errors associated with a particular package are recorded in the // corresponding Package's Errors list, and do not cause Load to // return an error. Clients may need to handle such errors before -// proceeding with further analysis. The PrintErrors function is +// proceeding with further analysis. The [PrintErrors] function is // provided for convenient display of all errors. func Load(cfg *Config, patterns ...string) ([]*Package, error) { ld := newLoader(cfg) @@ -323,21 +327,24 @@ func defaultDriver(cfg *Config, patterns ...string) (*DriverResponse, bool, erro } else if !response.NotHandled { return response, true, nil } - // (fall through) + // not handled: fall through } // go list fallback - // + // Write overlays once, as there are many calls // to 'go list' (one per chunk plus others too). - overlay, cleanupOverlay, err := gocommand.WriteOverlays(cfg.Overlay) + overlayFile, cleanupOverlay, err := gocommand.WriteOverlays(cfg.Overlay) if err != nil { return nil, false, err } defer cleanupOverlay() - cfg.goListOverlayFile = overlay - response, err := callDriverOnChunks(goListDriver, cfg, chunks) + var runner gocommand.Runner // (shared across many 'go list' calls) + driver := func(cfg *Config, patterns []string) (*DriverResponse, error) { + return goListDriver(cfg, &runner, overlayFile, patterns) + } + response, err := callDriverOnChunks(driver, cfg, chunks) if err != nil { return nil, false, err } @@ -375,16 +382,14 @@ func splitIntoChunks(patterns []string, argMax int) ([][]string, error) { func callDriverOnChunks(driver driver, cfg *Config, chunks [][]string) (*DriverResponse, error) { if len(chunks) == 0 { - return driver(cfg) + return driver(cfg, nil) } responses := make([]*DriverResponse, len(chunks)) errNotHandled := errors.New("driver returned NotHandled") var g errgroup.Group for i, chunk := range chunks { - i := i - chunk := chunk g.Go(func() (err error) { - responses[i], err = driver(cfg, chunk...) + responses[i], err = driver(cfg, chunk) if responses[i] != nil && responses[i].NotHandled { err = errNotHandled } @@ -434,6 +439,12 @@ type Package struct { // PkgPath is the package path as used by the go/types package. PkgPath string + // Dir is the directory associated with the package, if it exists. + // + // For packages listed by the go command, this is the directory containing + // the package files. + Dir string + // Errors contains any errors encountered querying the metadata // of the package, or while parsing or type-checking its files. Errors []Error @@ -473,6 +484,10 @@ type Package struct { // information for the package as provided by the build system. ExportFile string + // Target is the absolute install path of the .a file, for libraries, + // and of the executable file, for binaries. + Target string + // Imports maps import paths appearing in the package's Go source files // to corresponding loaded Packages. Imports map[string]*Package @@ -521,8 +536,8 @@ type Package struct { // -- internal -- - // forTest is the package under test, if any. - forTest string + // ForTest is the package under test, if any. + ForTest string // depsErrors is the DepsErrors field from the go list response, if any. depsErrors []*packagesinternal.PackageError @@ -551,21 +566,17 @@ type ModuleError struct { } func init() { - packagesinternal.GetForTest = func(p interface{}) string { - return p.(*Package).forTest - } - packagesinternal.GetDepsErrors = func(p interface{}) []*packagesinternal.PackageError { + packagesinternal.GetDepsErrors = func(p any) []*packagesinternal.PackageError { return p.(*Package).depsErrors } - packagesinternal.SetModFile = func(config interface{}, value string) { + packagesinternal.SetModFile = func(config any, value string) { config.(*Config).modFile = value } - packagesinternal.SetModFlag = func(config interface{}, value string) { + packagesinternal.SetModFlag = func(config any, value string) { config.(*Config).modFlag = value } packagesinternal.TypecheckCgo = int(typecheckCgo) packagesinternal.DepsErrors = int(needInternalDepsErrors) - packagesinternal.ForTest = int(needInternalForTest) } // An Error describes a problem with a package's metadata, syntax, or types. @@ -681,18 +692,19 @@ func (p *Package) String() string { return p.ID } // loaderPackage augments Package with state used during the loading phase type loaderPackage struct { *Package - importErrors map[string]error // maps each bad import to its error - loadOnce sync.Once - color uint8 // for cycle detection - needsrc bool // load from source (Mode >= LoadTypes) - needtypes bool // type information is either requested or depended on - initial bool // package was matched by a pattern - goVersion int // minor version number of go command on PATH + importErrors map[string]error // maps each bad import to its error + preds []*loaderPackage // packages that import this one + unfinishedSuccs atomic.Int32 // number of direct imports not yet loaded + color uint8 // for cycle detection + needsrc bool // load from source (Mode >= LoadTypes) + needtypes bool // type information is either requested or depended on + initial bool // package was matched by a pattern + goVersion int // minor version number of go command on PATH } // loader holds the working state of a single call to load. type loader struct { - pkgs map[string]*loaderPackage + pkgs map[string]*loaderPackage // keyed by Package.ID Config sizes types.Sizes // non-nil if needed by mode parseCache map[string]*parseValue @@ -729,7 +741,7 @@ func newLoader(cfg *Config) *loader { if debug { ld.Config.Logf = log.Printf } else { - ld.Config.Logf = func(format string, args ...interface{}) {} + ld.Config.Logf = func(format string, args ...any) {} } } if ld.Config.Mode == 0 { @@ -738,9 +750,6 @@ func newLoader(cfg *Config) *loader { if ld.Config.Env == nil { ld.Config.Env = os.Environ() } - if ld.Config.gocmdRunner == nil { - ld.Config.gocmdRunner = &gocommand.Runner{} - } if ld.Context == nil { ld.Context = context.Background() } @@ -754,7 +763,7 @@ func newLoader(cfg *Config) *loader { ld.requestedMode = ld.Mode ld.Mode = impliedLoadMode(ld.Mode) - if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { + if ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0 { if ld.Fset == nil { ld.Fset = token.NewFileSet() } @@ -763,6 +772,7 @@ func newLoader(cfg *Config) *loader { // because we load source if export data is missing. if ld.ParseFile == nil { ld.ParseFile = func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) { + // We implicitly promise to keep doing ast.Object resolution. :( const mode = parser.AllErrors | parser.ParseComments return parser.ParseFile(fset, filename, src, mode) } @@ -794,7 +804,7 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { exportDataInvalid := len(ld.Overlay) > 0 || pkg.ExportFile == "" && pkg.PkgPath != "unsafe" // This package needs type information if the caller requested types and the package is // either a root, or it's a non-root and the user requested dependencies ... - needtypes := (ld.Mode&NeedTypes|NeedTypesInfo != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) + needtypes := (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) // This package needs source if the call requested source (or types info, which implies source) // and the package is either a root, or itas a non- root and the user requested dependencies... needsrc := ((ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) || @@ -819,9 +829,10 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { } } - if ld.Mode&NeedImports != 0 { - // Materialize the import graph. - + // Materialize the import graph if it is needed (NeedImports), + // or if we'll be using loadPackages (Need{Syntax|Types|TypesInfo}). + var leaves []*loaderPackage // packages with no unfinished successors + if ld.Mode&(NeedImports|NeedSyntax|NeedTypes|NeedTypesInfo) != 0 { const ( white = 0 // new grey = 1 // in progress @@ -840,63 +851,76 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { // dependency on a package that does. These are the only packages // for which we load source code. var stack []*loaderPackage - var visit func(lpkg *loaderPackage) bool - visit = func(lpkg *loaderPackage) bool { - switch lpkg.color { - case black: - return lpkg.needsrc - case grey: + var visit func(from, lpkg *loaderPackage) bool + visit = func(from, lpkg *loaderPackage) bool { + if lpkg.color == grey { panic("internal error: grey node") } - lpkg.color = grey - stack = append(stack, lpkg) // push - stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports - lpkg.Imports = make(map[string]*Package, len(stubs)) - for importPath, ipkg := range stubs { - var importErr error - imp := ld.pkgs[ipkg.ID] - if imp == nil { - // (includes package "C" when DisableCgo) - importErr = fmt.Errorf("missing package: %q", ipkg.ID) - } else if imp.color == grey { - importErr = fmt.Errorf("import cycle: %s", stack) + if lpkg.color == white { + lpkg.color = grey + stack = append(stack, lpkg) // push + stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports + lpkg.Imports = make(map[string]*Package, len(stubs)) + for importPath, ipkg := range stubs { + var importErr error + imp := ld.pkgs[ipkg.ID] + if imp == nil { + // (includes package "C" when DisableCgo) + importErr = fmt.Errorf("missing package: %q", ipkg.ID) + } else if imp.color == grey { + importErr = fmt.Errorf("import cycle: %s", stack) + } + if importErr != nil { + if lpkg.importErrors == nil { + lpkg.importErrors = make(map[string]error) + } + lpkg.importErrors[importPath] = importErr + continue + } + + if visit(lpkg, imp) { + lpkg.needsrc = true + } + lpkg.Imports[importPath] = imp.Package } - if importErr != nil { - if lpkg.importErrors == nil { - lpkg.importErrors = make(map[string]error) + + // -- postorder -- + + // Complete type information is required for the + // immediate dependencies of each source package. + if lpkg.needsrc && ld.Mode&NeedTypes != 0 { + for _, ipkg := range lpkg.Imports { + ld.pkgs[ipkg.ID].needtypes = true } - lpkg.importErrors[importPath] = importErr - continue } - if visit(imp) { - lpkg.needsrc = true + // NeedTypeSizes causes TypeSizes to be set even + // on packages for which types aren't needed. + if ld.Mode&NeedTypesSizes != 0 { + lpkg.TypesSizes = ld.sizes } - lpkg.Imports[importPath] = imp.Package - } - // Complete type information is required for the - // immediate dependencies of each source package. - if lpkg.needsrc && ld.Mode&NeedTypes != 0 { - for _, ipkg := range lpkg.Imports { - ld.pkgs[ipkg.ID].needtypes = true + // Add packages with no imports directly to the queue of leaves. + if len(lpkg.Imports) == 0 { + leaves = append(leaves, lpkg) } + + stack = stack[:len(stack)-1] // pop + lpkg.color = black } - // NeedTypeSizes causes TypeSizes to be set even - // on packages for which types aren't needed. - if ld.Mode&NeedTypesSizes != 0 { - lpkg.TypesSizes = ld.sizes + // Add edge from predecessor. + if from != nil { + from.unfinishedSuccs.Add(+1) // incref + lpkg.preds = append(lpkg.preds, from) } - stack = stack[:len(stack)-1] // pop - lpkg.color = black return lpkg.needsrc } // For each initial package, create its import DAG. for _, lpkg := range initial { - visit(lpkg) + visit(nil, lpkg) } } else { @@ -909,16 +933,45 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { // Load type data and syntax if needed, starting at // the initial packages (roots of the import DAG). - if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { - var wg sync.WaitGroup - for _, lpkg := range initial { - wg.Add(1) - go func(lpkg *loaderPackage) { - ld.loadRecursive(lpkg) - wg.Done() - }(lpkg) + if ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0 { + + // We avoid using g.SetLimit to limit concurrency as + // it makes g.Go stop accepting work, which prevents + // workers from enqeuing, and thus finishing, and thus + // allowing the group to make progress: deadlock. + // + // Instead we use the ioLimit and cpuLimit semaphores. + g, _ := errgroup.WithContext(ld.Context) + + // enqueues adds a package to the type-checking queue. + // It must have no unfinished successors. + var enqueue func(*loaderPackage) + enqueue = func(lpkg *loaderPackage) { + g.Go(func() error { + // Parse and type-check. + ld.loadPackage(lpkg) + + // Notify each waiting predecessor, + // and enqueue it when it becomes a leaf. + for _, pred := range lpkg.preds { + if pred.unfinishedSuccs.Add(-1) == 0 { // decref + enqueue(pred) + } + } + + return nil + }) + } + + // Load leaves first, adding new packages + // to the queue as they become leaves. + for _, leaf := range leaves { + enqueue(leaf) + } + + if err := g.Wait(); err != nil { + return nil, err // cancelled } - wg.Wait() } // If the context is done, return its error and @@ -965,7 +1018,7 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { if ld.requestedMode&NeedSyntax == 0 { ld.pkgs[i].Syntax = nil } - if ld.requestedMode&NeedTypes == 0 && ld.requestedMode&NeedSyntax == 0 { + if ld.requestedMode&(NeedSyntax|NeedTypes|NeedTypesInfo) == 0 { ld.pkgs[i].Fset = nil } if ld.requestedMode&NeedTypesInfo == 0 { @@ -982,31 +1035,10 @@ func (ld *loader) refine(response *DriverResponse) ([]*Package, error) { return result, nil } -// loadRecursive loads the specified package and its dependencies, -// recursively, in parallel, in topological order. -// It is atomic and idempotent. -// Precondition: ld.Mode&NeedTypes. -func (ld *loader) loadRecursive(lpkg *loaderPackage) { - lpkg.loadOnce.Do(func() { - // Load the direct dependencies, in parallel. - var wg sync.WaitGroup - for _, ipkg := range lpkg.Imports { - imp := ld.pkgs[ipkg.ID] - wg.Add(1) - go func(imp *loaderPackage) { - ld.loadRecursive(imp) - wg.Done() - }(imp) - } - wg.Wait() - ld.loadPackage(lpkg) - }) -} - -// loadPackage loads the specified package. +// loadPackage loads/parses/typechecks the specified package. // It must be called only once per Package, // after immediate dependencies are loaded. -// Precondition: ld.Mode & NeedTypes. +// Precondition: ld.Mode&(NeedSyntax|NeedTypes|NeedTypesInfo) != 0. func (ld *loader) loadPackage(lpkg *loaderPackage) { if lpkg.PkgPath == "unsafe" { // Fill in the blanks to avoid surprises. @@ -1042,6 +1074,10 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { if !lpkg.needtypes && !lpkg.needsrc { return } + + // TODO(adonovan): this condition looks wrong: + // I think it should be lpkg.needtypes && !lpg.needsrc, + // so that NeedSyntax without NeedTypes can be satisfied by export data. if !lpkg.needsrc { if err := ld.loadFromExportData(lpkg); err != nil { lpkg.Errors = append(lpkg.Errors, Error{ @@ -1147,7 +1183,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { } lpkg.Syntax = files - if ld.Config.Mode&NeedTypes == 0 { + if ld.Config.Mode&(NeedTypes|NeedTypesInfo) == 0 { return } @@ -1158,16 +1194,20 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { return } - lpkg.TypesInfo = &types.Info{ - Types: make(map[ast.Expr]types.TypeAndValue), - Defs: make(map[*ast.Ident]types.Object), - Uses: make(map[*ast.Ident]types.Object), - Implicits: make(map[ast.Node]types.Object), - Instances: make(map[*ast.Ident]types.Instance), - Scopes: make(map[ast.Node]*types.Scope), - Selections: make(map[*ast.SelectorExpr]*types.Selection), + // Populate TypesInfo only if needed, as it + // causes the type checker to work much harder. + if ld.Config.Mode&NeedTypesInfo != 0 { + lpkg.TypesInfo = &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Instances: make(map[*ast.Ident]types.Instance), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + FileVersions: make(map[*ast.File]string), + } } - versions.InitFileVersions(lpkg.TypesInfo) lpkg.TypesSizes = ld.sizes importer := importerFunc(func(path string) (*types.Package, error) { @@ -1220,6 +1260,10 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { } } + // Type-checking is CPU intensive. + cpuLimit <- unit{} // acquire a token + defer func() { <-cpuLimit }() // release a token + typErr := types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax) lpkg.importErrors = nil // no longer needed @@ -1284,8 +1328,11 @@ type importerFunc func(path string) (*types.Package, error) func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) } // We use a counting semaphore to limit -// the number of parallel I/O calls per process. -var ioLimit = make(chan bool, 20) +// the number of parallel I/O calls or CPU threads per process. +var ( + ioLimit = make(chan unit, 20) + cpuLimit = make(chan unit, runtime.GOMAXPROCS(0)) +) func (ld *loader) parseFile(filename string) (*ast.File, error) { ld.parseCacheMu.Lock() @@ -1302,20 +1349,28 @@ func (ld *loader) parseFile(filename string) (*ast.File, error) { var src []byte for f, contents := range ld.Config.Overlay { + // TODO(adonovan): Inefficient for large overlays. + // Do an exact name-based map lookup + // (for nonexistent files) followed by a + // FileID-based map lookup (for existing ones). if sameFile(f, filename) { src = contents + break } } var err error if src == nil { - ioLimit <- true // wait + ioLimit <- unit{} // acquire a token src, err = os.ReadFile(filename) - <-ioLimit // signal + <-ioLimit // release a token } if err != nil { v.err = err } else { + // Parsing is CPU intensive. + cpuLimit <- unit{} // acquire a token v.f, v.err = ld.ParseFile(ld.Fset, filename, src) + <-cpuLimit // release a token } close(v.ready) @@ -1330,18 +1385,21 @@ func (ld *loader) parseFile(filename string) (*ast.File, error) { // Because files are scanned in parallel, the token.Pos // positions of the resulting ast.Files are not ordered. func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) { - var wg sync.WaitGroup - n := len(filenames) - parsed := make([]*ast.File, n) - errors := make([]error, n) - for i, file := range filenames { - wg.Add(1) - go func(i int, filename string) { + var ( + n = len(filenames) + parsed = make([]*ast.File, n) + errors = make([]error, n) + ) + var g errgroup.Group + for i, filename := range filenames { + // This creates goroutines unnecessarily in the + // cache-hit case, but that case is uncommon. + g.Go(func() error { parsed[i], errors[i] = ld.parseFile(filename) - wg.Done() - }(i, file) + return nil + }) } - wg.Wait() + g.Wait() // Eliminate nils, preserving order. var o int @@ -1512,4 +1570,4 @@ func usesExportData(cfg *Config) bool { return cfg.Mode&NeedExportFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0 } -var _ interface{} = io.Discard // assert build toolchain is go1.16 or later +type unit struct{} diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go index 9ada17775..16ed3c178 100644 --- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -228,7 +228,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { // Reject obviously non-viable cases. switch obj := obj.(type) { case *types.TypeName: - if _, ok := aliases.Unalias(obj.Type()).(*types.TypeParam); !ok { + if _, ok := types.Unalias(obj.Type()).(*types.TypeParam); !ok { // With the exception of type parameters, only package-level type names // have a path. return "", fmt.Errorf("no path for %v", obj) @@ -280,26 +280,26 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { path = append(path, opType) T := o.Type() - if alias, ok := T.(*aliases.Alias); ok { - if r := findTypeParam(obj, aliases.TypeParams(alias), path, opTypeParam, nil); r != nil { + if alias, ok := T.(*types.Alias); ok { + if r := findTypeParam(obj, aliases.TypeParams(alias), path, opTypeParam); r != nil { return Path(r), nil } - if r := find(obj, aliases.Rhs(alias), append(path, opRhs), nil); r != nil { + if r := find(obj, aliases.Rhs(alias), append(path, opRhs)); r != nil { return Path(r), nil } } else if tname.IsAlias() { // legacy alias - if r := find(obj, T, path, nil); r != nil { + if r := find(obj, T, path); r != nil { return Path(r), nil } } else if named, ok := T.(*types.Named); ok { // defined (named) type - if r := findTypeParam(obj, named.TypeParams(), path, opTypeParam, nil); r != nil { + if r := findTypeParam(obj, named.TypeParams(), path, opTypeParam); r != nil { return Path(r), nil } - if r := find(obj, named.Underlying(), append(path, opUnderlying), nil); r != nil { + if r := find(obj, named.Underlying(), append(path, opUnderlying)); r != nil { return Path(r), nil } } @@ -312,7 +312,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { if _, ok := o.(*types.TypeName); !ok { if o.Exported() { // exported non-type (const, var, func) - if r := find(obj, o.Type(), append(path, opType), nil); r != nil { + if r := find(obj, o.Type(), append(path, opType)); r != nil { return Path(r), nil } } @@ -320,7 +320,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { } // Inspect declared methods of defined types. - if T, ok := aliases.Unalias(o.Type()).(*types.Named); ok { + if T, ok := types.Unalias(o.Type()).(*types.Named); ok { path = append(path, opType) // The method index here is always with respect // to the underlying go/types data structures, @@ -332,7 +332,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { if m == obj { return Path(path2), nil // found declared method } - if r := find(obj, m.Type(), append(path2, opType), nil); r != nil { + if r := find(obj, m.Type(), append(path2, opType)); r != nil { return Path(r), nil } } @@ -447,46 +447,64 @@ func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) { // // The seen map is used to short circuit cycles through type parameters. If // nil, it will be allocated as necessary. -func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]bool) []byte { +// +// The seenMethods map is used internally to short circuit cycles through +// interface methods, such as occur in the following example: +// +// type I interface { f() interface{I} } +// +// See golang/go#68046 for details. +func find(obj types.Object, T types.Type, path []byte) []byte { + return (&finder{obj: obj}).find(T, path) +} + +// finder closes over search state for a call to find. +type finder struct { + obj types.Object // the sought object + seenTParamNames map[*types.TypeName]bool // for cycle breaking through type parameters + seenMethods map[*types.Func]bool // for cycle breaking through recursive interfaces +} + +func (f *finder) find(T types.Type, path []byte) []byte { switch T := T.(type) { - case *aliases.Alias: - return find(obj, aliases.Unalias(T), path, seen) + case *types.Alias: + return f.find(types.Unalias(T), path) case *types.Basic, *types.Named: // Named types belonging to pkg were handled already, // so T must belong to another package. No path. return nil case *types.Pointer: - return find(obj, T.Elem(), append(path, opElem), seen) + return f.find(T.Elem(), append(path, opElem)) case *types.Slice: - return find(obj, T.Elem(), append(path, opElem), seen) + return f.find(T.Elem(), append(path, opElem)) case *types.Array: - return find(obj, T.Elem(), append(path, opElem), seen) + return f.find(T.Elem(), append(path, opElem)) case *types.Chan: - return find(obj, T.Elem(), append(path, opElem), seen) + return f.find(T.Elem(), append(path, opElem)) case *types.Map: - if r := find(obj, T.Key(), append(path, opKey), seen); r != nil { + if r := f.find(T.Key(), append(path, opKey)); r != nil { return r } - return find(obj, T.Elem(), append(path, opElem), seen) + return f.find(T.Elem(), append(path, opElem)) case *types.Signature: - if r := findTypeParam(obj, T.RecvTypeParams(), path, opRecvTypeParam, nil); r != nil { + if r := f.findTypeParam(T.RecvTypeParams(), path, opRecvTypeParam); r != nil { return r } - if r := findTypeParam(obj, T.TypeParams(), path, opTypeParam, seen); r != nil { + if r := f.findTypeParam(T.TypeParams(), path, opTypeParam); r != nil { return r } - if r := find(obj, T.Params(), append(path, opParams), seen); r != nil { + if r := f.find(T.Params(), append(path, opParams)); r != nil { return r } - return find(obj, T.Results(), append(path, opResults), seen) + return f.find(T.Results(), append(path, opResults)) case *types.Struct: for i := 0; i < T.NumFields(); i++ { fld := T.Field(i) path2 := appendOpArg(path, opField, i) - if fld == obj { + if fld == f.obj { return path2 // found field var } - if r := find(obj, fld.Type(), append(path2, opType), seen); r != nil { + if r := f.find(fld.Type(), append(path2, opType)); r != nil { return r } } @@ -495,10 +513,10 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] for i := 0; i < T.Len(); i++ { v := T.At(i) path2 := appendOpArg(path, opAt, i) - if v == obj { + if v == f.obj { return path2 // found param/result var } - if r := find(obj, v.Type(), append(path2, opType), seen); r != nil { + if r := f.find(v.Type(), append(path2, opType)); r != nil { return r } } @@ -506,28 +524,35 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] case *types.Interface: for i := 0; i < T.NumMethods(); i++ { m := T.Method(i) + if f.seenMethods[m] { + return nil + } path2 := appendOpArg(path, opMethod, i) - if m == obj { + if m == f.obj { return path2 // found interface method } - if r := find(obj, m.Type(), append(path2, opType), seen); r != nil { + if f.seenMethods == nil { + f.seenMethods = make(map[*types.Func]bool) + } + f.seenMethods[m] = true + if r := f.find(m.Type(), append(path2, opType)); r != nil { return r } } return nil case *types.TypeParam: name := T.Obj() - if name == obj { - return append(path, opObj) - } - if seen[name] { + if f.seenTParamNames[name] { return nil } - if seen == nil { - seen = make(map[*types.TypeName]bool) + if name == f.obj { + return append(path, opObj) } - seen[name] = true - if r := find(obj, T.Constraint(), append(path, opConstraint), seen); r != nil { + if f.seenTParamNames == nil { + f.seenTParamNames = make(map[*types.TypeName]bool) + } + f.seenTParamNames[name] = true + if r := f.find(T.Constraint(), append(path, opConstraint)); r != nil { return r } return nil @@ -535,11 +560,15 @@ func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName] panic(T) } -func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, op byte, seen map[*types.TypeName]bool) []byte { +func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, op byte) []byte { + return (&finder{obj: obj}).findTypeParam(list, path, op) +} + +func (f *finder) findTypeParam(list *types.TypeParamList, path []byte, op byte) []byte { for i := 0; i < list.Len(); i++ { tparam := list.At(i) path2 := appendOpArg(path, op, i) - if r := find(obj, tparam, path2, seen); r != nil { + if r := f.find(tparam, path2); r != nil { return r } } @@ -626,7 +655,7 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { // Inv: t != nil, obj == nil - t = aliases.Unalias(t) + t = types.Unalias(t) switch code { case opElem: hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map @@ -664,7 +693,7 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { t = named.Underlying() case opRhs: - if alias, ok := t.(*aliases.Alias); ok { + if alias, ok := t.(*types.Alias); ok { t = aliases.Rhs(alias) } else if false && aliases.Enabled() { // The Enabled check is too expensive, so for now we diff --git a/vendor/golang.org/x/tools/go/types/typeutil/callee.go b/vendor/golang.org/x/tools/go/types/typeutil/callee.go new file mode 100644 index 000000000..754380351 --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/callee.go @@ -0,0 +1,68 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeutil + +import ( + "go/ast" + "go/types" + + "golang.org/x/tools/internal/typeparams" +) + +// Callee returns the named target of a function call, if any: +// a function, method, builtin, or variable. +// +// Functions and methods may potentially have type parameters. +func Callee(info *types.Info, call *ast.CallExpr) types.Object { + fun := ast.Unparen(call.Fun) + + // Look through type instantiation if necessary. + isInstance := false + switch fun.(type) { + case *ast.IndexExpr, *ast.IndexListExpr: + // When extracting the callee from an *IndexExpr, we need to check that + // it is a *types.Func and not a *types.Var. + // Example: Don't match a slice m within the expression `m[0]()`. + isInstance = true + fun, _, _, _ = typeparams.UnpackIndexExpr(fun) + } + + var obj types.Object + switch fun := fun.(type) { + case *ast.Ident: + obj = info.Uses[fun] // type, var, builtin, or declared func + case *ast.SelectorExpr: + if sel, ok := info.Selections[fun]; ok { + obj = sel.Obj() // method or field + } else { + obj = info.Uses[fun.Sel] // qualified identifier? + } + } + if _, ok := obj.(*types.TypeName); ok { + return nil // T(x) is a conversion, not a call + } + // A Func is required to match instantiations. + if _, ok := obj.(*types.Func); isInstance && !ok { + return nil // Was not a Func. + } + return obj +} + +// StaticCallee returns the target (function or method) of a static function +// call, if any. It returns nil for calls to builtins. +// +// Note: for calls of instantiated functions and methods, StaticCallee returns +// the corresponding generic function or method on the generic type. +func StaticCallee(info *types.Info, call *ast.CallExpr) *types.Func { + if f, ok := Callee(info, call).(*types.Func); ok && !interfaceMethod(f) { + return f + } + return nil +} + +func interfaceMethod(f *types.Func) bool { + recv := f.Type().(*types.Signature).Recv() + return recv != nil && types.IsInterface(recv.Type()) +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/imports.go b/vendor/golang.org/x/tools/go/types/typeutil/imports.go new file mode 100644 index 000000000..b81ce0c33 --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/imports.go @@ -0,0 +1,30 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeutil + +import "go/types" + +// Dependencies returns all dependencies of the specified packages. +// +// Dependent packages appear in topological order: if package P imports +// package Q, Q appears earlier than P in the result. +// The algorithm follows import statements in the order they +// appear in the source code, so the result is a total order. +func Dependencies(pkgs ...*types.Package) []*types.Package { + var result []*types.Package + seen := make(map[*types.Package]bool) + var visit func(pkgs []*types.Package) + visit = func(pkgs []*types.Package) { + for _, p := range pkgs { + if !seen[p] { + seen[p] = true + visit(p.Imports()) + result = append(result, p) + } + } + } + visit(pkgs) + return result +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/map.go b/vendor/golang.org/x/tools/go/types/typeutil/map.go new file mode 100644 index 000000000..b6d542c64 --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/map.go @@ -0,0 +1,475 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package typeutil defines various utilities for types, such as [Map], +// a hash table that maps [types.Type] to any value. +package typeutil + +import ( + "bytes" + "fmt" + "go/types" + "hash/maphash" + "unsafe" + + "golang.org/x/tools/internal/typeparams" +) + +// Map is a hash-table-based mapping from types (types.Type) to +// arbitrary values. The concrete types that implement +// the Type interface are pointers. Since they are not canonicalized, +// == cannot be used to check for equivalence, and thus we cannot +// simply use a Go map. +// +// Just as with map[K]V, a nil *Map is a valid empty map. +// +// Read-only map operations ([Map.At], [Map.Len], and so on) may +// safely be called concurrently. +// +// TODO(adonovan): deprecate in favor of https://go.dev/issues/69420 +// and 69559, if the latter proposals for a generic hash-map type and +// a types.Hash function are accepted. +type Map struct { + table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused + length int // number of map entries +} + +// entry is an entry (key/value association) in a hash bucket. +type entry struct { + key types.Type + value any +} + +// SetHasher has no effect. +// +// It is a relic of an optimization that is no longer profitable. Do +// not use [Hasher], [MakeHasher], or [SetHasher] in new code. +func (m *Map) SetHasher(Hasher) {} + +// Delete removes the entry with the given key, if any. +// It returns true if the entry was found. +func (m *Map) Delete(key types.Type) bool { + if m != nil && m.table != nil { + hash := hash(key) + bucket := m.table[hash] + for i, e := range bucket { + if e.key != nil && types.Identical(key, e.key) { + // We can't compact the bucket as it + // would disturb iterators. + bucket[i] = entry{} + m.length-- + return true + } + } + } + return false +} + +// At returns the map entry for the given key. +// The result is nil if the entry is not present. +func (m *Map) At(key types.Type) any { + if m != nil && m.table != nil { + for _, e := range m.table[hash(key)] { + if e.key != nil && types.Identical(key, e.key) { + return e.value + } + } + } + return nil +} + +// Set sets the map entry for key to val, +// and returns the previous entry, if any. +func (m *Map) Set(key types.Type, value any) (prev any) { + if m.table != nil { + hash := hash(key) + bucket := m.table[hash] + var hole *entry + for i, e := range bucket { + if e.key == nil { + hole = &bucket[i] + } else if types.Identical(key, e.key) { + prev = e.value + bucket[i].value = value + return + } + } + + if hole != nil { + *hole = entry{key, value} // overwrite deleted entry + } else { + m.table[hash] = append(bucket, entry{key, value}) + } + } else { + hash := hash(key) + m.table = map[uint32][]entry{hash: {entry{key, value}}} + } + + m.length++ + return +} + +// Len returns the number of map entries. +func (m *Map) Len() int { + if m != nil { + return m.length + } + return 0 +} + +// Iterate calls function f on each entry in the map in unspecified order. +// +// If f should mutate the map, Iterate provides the same guarantees as +// Go maps: if f deletes a map entry that Iterate has not yet reached, +// f will not be invoked for it, but if f inserts a map entry that +// Iterate has not yet reached, whether or not f will be invoked for +// it is unspecified. +func (m *Map) Iterate(f func(key types.Type, value any)) { + if m != nil { + for _, bucket := range m.table { + for _, e := range bucket { + if e.key != nil { + f(e.key, e.value) + } + } + } + } +} + +// Keys returns a new slice containing the set of map keys. +// The order is unspecified. +func (m *Map) Keys() []types.Type { + keys := make([]types.Type, 0, m.Len()) + m.Iterate(func(key types.Type, _ any) { + keys = append(keys, key) + }) + return keys +} + +func (m *Map) toString(values bool) string { + if m == nil { + return "{}" + } + var buf bytes.Buffer + fmt.Fprint(&buf, "{") + sep := "" + m.Iterate(func(key types.Type, value any) { + fmt.Fprint(&buf, sep) + sep = ", " + fmt.Fprint(&buf, key) + if values { + fmt.Fprintf(&buf, ": %q", value) + } + }) + fmt.Fprint(&buf, "}") + return buf.String() +} + +// String returns a string representation of the map's entries. +// Values are printed using fmt.Sprintf("%v", v). +// Order is unspecified. +func (m *Map) String() string { + return m.toString(true) +} + +// KeysString returns a string representation of the map's key set. +// Order is unspecified. +func (m *Map) KeysString() string { + return m.toString(false) +} + +// -- Hasher -- + +// hash returns the hash of type t. +// TODO(adonovan): replace by types.Hash when Go proposal #69420 is accepted. +func hash(t types.Type) uint32 { + return theHasher.Hash(t) +} + +// A Hasher provides a [Hasher.Hash] method to map a type to its hash value. +// Hashers are stateless, and all are equivalent. +type Hasher struct{} + +var theHasher Hasher + +// MakeHasher returns Hasher{}. +// Hashers are stateless; all are equivalent. +func MakeHasher() Hasher { return theHasher } + +// Hash computes a hash value for the given type t such that +// Identical(t, t') => Hash(t) == Hash(t'). +func (h Hasher) Hash(t types.Type) uint32 { + return hasher{inGenericSig: false}.hash(t) +} + +// hasher holds the state of a single Hash traversal: whether we are +// inside the signature of a generic function; this is used to +// optimize [hasher.hashTypeParam]. +type hasher struct{ inGenericSig bool } + +// hashString computes the Fowler–Noll–Vo hash of s. +func hashString(s string) uint32 { + var h uint32 + for i := 0; i < len(s); i++ { + h ^= uint32(s[i]) + h *= 16777619 + } + return h +} + +// hash computes the hash of t. +func (h hasher) hash(t types.Type) uint32 { + // See Identical for rationale. + switch t := t.(type) { + case *types.Basic: + return uint32(t.Kind()) + + case *types.Alias: + return h.hash(types.Unalias(t)) + + case *types.Array: + return 9043 + 2*uint32(t.Len()) + 3*h.hash(t.Elem()) + + case *types.Slice: + return 9049 + 2*h.hash(t.Elem()) + + case *types.Struct: + var hash uint32 = 9059 + for i, n := 0, t.NumFields(); i < n; i++ { + f := t.Field(i) + if f.Anonymous() { + hash += 8861 + } + hash += hashString(t.Tag(i)) + hash += hashString(f.Name()) // (ignore f.Pkg) + hash += h.hash(f.Type()) + } + return hash + + case *types.Pointer: + return 9067 + 2*h.hash(t.Elem()) + + case *types.Signature: + var hash uint32 = 9091 + if t.Variadic() { + hash *= 8863 + } + + tparams := t.TypeParams() + if n := tparams.Len(); n > 0 { + h.inGenericSig = true // affects constraints, params, and results + + for i := range n { + tparam := tparams.At(i) + hash += 7 * h.hash(tparam.Constraint()) + } + } + + return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results()) + + case *types.Union: + return h.hashUnion(t) + + case *types.Interface: + // Interfaces are identical if they have the same set of methods, with + // identical names and types, and they have the same set of type + // restrictions. See go/types.identical for more details. + var hash uint32 = 9103 + + // Hash methods. + for i, n := 0, t.NumMethods(); i < n; i++ { + // Method order is not significant. + // Ignore m.Pkg(). + m := t.Method(i) + // Use shallow hash on method signature to + // avoid anonymous interface cycles. + hash += 3*hashString(m.Name()) + 5*h.shallowHash(m.Type()) + } + + // Hash type restrictions. + terms, err := typeparams.InterfaceTermSet(t) + // if err != nil t has invalid type restrictions. + if err == nil { + hash += h.hashTermSet(terms) + } + + return hash + + case *types.Map: + return 9109 + 2*h.hash(t.Key()) + 3*h.hash(t.Elem()) + + case *types.Chan: + return 9127 + 2*uint32(t.Dir()) + 3*h.hash(t.Elem()) + + case *types.Named: + hash := h.hashTypeName(t.Obj()) + targs := t.TypeArgs() + for i := 0; i < targs.Len(); i++ { + targ := targs.At(i) + hash += 2 * h.hash(targ) + } + return hash + + case *types.TypeParam: + return h.hashTypeParam(t) + + case *types.Tuple: + return h.hashTuple(t) + } + + panic(fmt.Sprintf("%T: %v", t, t)) +} + +func (h hasher) hashTuple(tuple *types.Tuple) uint32 { + // See go/types.identicalTypes for rationale. + n := tuple.Len() + hash := 9137 + 2*uint32(n) + for i := range n { + hash += 3 * h.hash(tuple.At(i).Type()) + } + return hash +} + +func (h hasher) hashUnion(t *types.Union) uint32 { + // Hash type restrictions. + terms, err := typeparams.UnionTermSet(t) + // if err != nil t has invalid type restrictions. Fall back on a non-zero + // hash. + if err != nil { + return 9151 + } + return h.hashTermSet(terms) +} + +func (h hasher) hashTermSet(terms []*types.Term) uint32 { + hash := 9157 + 2*uint32(len(terms)) + for _, term := range terms { + // term order is not significant. + termHash := h.hash(term.Type()) + if term.Tilde() { + termHash *= 9161 + } + hash += 3 * termHash + } + return hash +} + +// hashTypeParam returns the hash of a type parameter. +func (h hasher) hashTypeParam(t *types.TypeParam) uint32 { + // Within the signature of a generic function, TypeParams are + // identical if they have the same index and constraint, so we + // hash them based on index. + // + // When we are outside a generic function, free TypeParams are + // identical iff they are the same object, so we can use a + // more discriminating hash consistent with object identity. + // This optimization saves [Map] about 4% when hashing all the + // types.Info.Types in the forward closure of net/http. + if !h.inGenericSig { + // Optimization: outside a generic function signature, + // use a more discrimating hash consistent with object identity. + return h.hashTypeName(t.Obj()) + } + return 9173 + 3*uint32(t.Index()) +} + +var theSeed = maphash.MakeSeed() + +// hashTypeName hashes the pointer of tname. +func (hasher) hashTypeName(tname *types.TypeName) uint32 { + // Since types.Identical uses == to compare TypeNames, + // the Hash function uses maphash.Comparable. + // TODO(adonovan): or will, when it becomes available in go1.24. + // In the meantime we use the pointer's numeric value. + // + // hash := maphash.Comparable(theSeed, tname) + // + // (Another approach would be to hash the name and package + // path, and whether or not it is a package-level typename. It + // is rare for a package to define multiple local types with + // the same name.) + ptr := uintptr(unsafe.Pointer(tname)) + if unsafe.Sizeof(ptr) == 8 { + hash := uint64(ptr) + return uint32(hash ^ (hash >> 32)) + } else { + return uint32(ptr) + } +} + +// shallowHash computes a hash of t without looking at any of its +// element Types, to avoid potential anonymous cycles in the types of +// interface methods. +// +// When an unnamed non-empty interface type appears anywhere among the +// arguments or results of an interface method, there is a potential +// for endless recursion. Consider: +// +// type X interface { m() []*interface { X } } +// +// The problem is that the Methods of the interface in m's result type +// include m itself; there is no mention of the named type X that +// might help us break the cycle. +// (See comment in go/types.identical, case *Interface, for more.) +func (h hasher) shallowHash(t types.Type) uint32 { + // t is the type of an interface method (Signature), + // its params or results (Tuples), or their immediate + // elements (mostly Slice, Pointer, Basic, Named), + // so there's no need to optimize anything else. + switch t := t.(type) { + case *types.Alias: + return h.shallowHash(types.Unalias(t)) + + case *types.Signature: + var hash uint32 = 604171 + if t.Variadic() { + hash *= 971767 + } + // The Signature/Tuple recursion is always finite + // and invariably shallow. + return hash + 1062599*h.shallowHash(t.Params()) + 1282529*h.shallowHash(t.Results()) + + case *types.Tuple: + n := t.Len() + hash := 9137 + 2*uint32(n) + for i := range n { + hash += 53471161 * h.shallowHash(t.At(i).Type()) + } + return hash + + case *types.Basic: + return 45212177 * uint32(t.Kind()) + + case *types.Array: + return 1524181 + 2*uint32(t.Len()) + + case *types.Slice: + return 2690201 + + case *types.Struct: + return 3326489 + + case *types.Pointer: + return 4393139 + + case *types.Union: + return 562448657 + + case *types.Interface: + return 2124679 // no recursion here + + case *types.Map: + return 9109 + + case *types.Chan: + return 9127 + + case *types.Named: + return h.hashTypeName(t.Obj()) + + case *types.TypeParam: + return h.hashTypeParam(t) + } + panic(fmt.Sprintf("shallowHash: %T: %v", t, t)) +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go b/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go new file mode 100644 index 000000000..f7666028f --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go @@ -0,0 +1,71 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements a cache of method sets. + +package typeutil + +import ( + "go/types" + "sync" +) + +// A MethodSetCache records the method set of each type T for which +// MethodSet(T) is called so that repeat queries are fast. +// The zero value is a ready-to-use cache instance. +type MethodSetCache struct { + mu sync.Mutex + named map[*types.Named]struct{ value, pointer *types.MethodSet } // method sets for named N and *N + others map[types.Type]*types.MethodSet // all other types +} + +// MethodSet returns the method set of type T. It is thread-safe. +// +// If cache is nil, this function is equivalent to types.NewMethodSet(T). +// Utility functions can thus expose an optional *MethodSetCache +// parameter to clients that care about performance. +func (cache *MethodSetCache) MethodSet(T types.Type) *types.MethodSet { + if cache == nil { + return types.NewMethodSet(T) + } + cache.mu.Lock() + defer cache.mu.Unlock() + + switch T := types.Unalias(T).(type) { + case *types.Named: + return cache.lookupNamed(T).value + + case *types.Pointer: + if N, ok := types.Unalias(T.Elem()).(*types.Named); ok { + return cache.lookupNamed(N).pointer + } + } + + // all other types + // (The map uses pointer equivalence, not type identity.) + mset := cache.others[T] + if mset == nil { + mset = types.NewMethodSet(T) + if cache.others == nil { + cache.others = make(map[types.Type]*types.MethodSet) + } + cache.others[T] = mset + } + return mset +} + +func (cache *MethodSetCache) lookupNamed(named *types.Named) struct{ value, pointer *types.MethodSet } { + if cache.named == nil { + cache.named = make(map[*types.Named]struct{ value, pointer *types.MethodSet }) + } + // Avoid recomputing mset(*T) for each distinct Pointer + // instance whose underlying type is a named type. + msets, ok := cache.named[named] + if !ok { + msets.value = types.NewMethodSet(named) + msets.pointer = types.NewMethodSet(types.NewPointer(named)) + cache.named[named] = msets + } + return msets +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/ui.go b/vendor/golang.org/x/tools/go/types/typeutil/ui.go new file mode 100644 index 000000000..9dda6a25d --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/ui.go @@ -0,0 +1,53 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeutil + +// This file defines utilities for user interfaces that display types. + +import ( + "go/types" +) + +// IntuitiveMethodSet returns the intuitive method set of a type T, +// which is the set of methods you can call on an addressable value of +// that type. +// +// The result always contains MethodSet(T), and is exactly MethodSet(T) +// for interface types and for pointer-to-concrete types. +// For all other concrete types T, the result additionally +// contains each method belonging to *T if there is no identically +// named method on T itself. +// +// This corresponds to user intuition about method sets; +// this function is intended only for user interfaces. +// +// The order of the result is as for types.MethodSet(T). +func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection { + isPointerToConcrete := func(T types.Type) bool { + ptr, ok := types.Unalias(T).(*types.Pointer) + return ok && !types.IsInterface(ptr.Elem()) + } + + var result []*types.Selection + mset := msets.MethodSet(T) + if types.IsInterface(T) || isPointerToConcrete(T) { + for i, n := 0, mset.Len(); i < n; i++ { + result = append(result, mset.At(i)) + } + } else { + // T is some other concrete type. + // Report methods of T and *T, preferring those of T. + pmset := msets.MethodSet(types.NewPointer(T)) + for i, n := 0, pmset.Len(); i < n; i++ { + meth := pmset.At(i) + if m := mset.Lookup(meth.Obj().Pkg(), meth.Obj().Name()); m != nil { + meth = m + } + result = append(result, meth) + } + + } + return result +} diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases.go b/vendor/golang.org/x/tools/internal/aliases/aliases.go index f7798e335..b9425f5a2 100644 --- a/vendor/golang.org/x/tools/internal/aliases/aliases.go +++ b/vendor/golang.org/x/tools/internal/aliases/aliases.go @@ -28,7 +28,7 @@ import ( func NewAlias(enabled bool, pos token.Pos, pkg *types.Package, name string, rhs types.Type, tparams []*types.TypeParam) *types.TypeName { if enabled { tname := types.NewTypeName(pos, pkg, name, nil) - newAlias(tname, rhs, tparams) + SetTypeParams(types.NewAlias(tname, rhs), tparams) return tname } if len(tparams) > 0 { diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go deleted file mode 100644 index a775fcc4b..000000000 --- a/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.22 -// +build !go1.22 - -package aliases - -import ( - "go/types" -) - -// Alias is a placeholder for a go/types.Alias for <=1.21. -// It will never be created by go/types. -type Alias struct{} - -func (*Alias) String() string { panic("unreachable") } -func (*Alias) Underlying() types.Type { panic("unreachable") } -func (*Alias) Obj() *types.TypeName { panic("unreachable") } -func Rhs(alias *Alias) types.Type { panic("unreachable") } -func TypeParams(alias *Alias) *types.TypeParamList { panic("unreachable") } -func SetTypeParams(alias *Alias, tparams []*types.TypeParam) { panic("unreachable") } -func TypeArgs(alias *Alias) *types.TypeList { panic("unreachable") } -func Origin(alias *Alias) *Alias { panic("unreachable") } - -// Unalias returns the type t for go <=1.21. -func Unalias(t types.Type) types.Type { return t } - -func newAlias(name *types.TypeName, rhs types.Type, tparams []*types.TypeParam) *Alias { - panic("unreachable") -} - -// Enabled reports whether [NewAlias] should create [types.Alias] types. -// -// Before go1.22, this function always returns false. -func Enabled() bool { return false } diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go index 31c159e42..7716a3331 100644 --- a/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go +++ b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.22 -// +build go1.22 - package aliases import ( @@ -14,22 +11,19 @@ import ( "go/types" ) -// Alias is an alias of types.Alias. -type Alias = types.Alias - // Rhs returns the type on the right-hand side of the alias declaration. -func Rhs(alias *Alias) types.Type { +func Rhs(alias *types.Alias) types.Type { if alias, ok := any(alias).(interface{ Rhs() types.Type }); ok { return alias.Rhs() // go1.23+ } // go1.22's Alias didn't have the Rhs method, // so Unalias is the best we can do. - return Unalias(alias) + return types.Unalias(alias) } // TypeParams returns the type parameter list of the alias. -func TypeParams(alias *Alias) *types.TypeParamList { +func TypeParams(alias *types.Alias) *types.TypeParamList { if alias, ok := any(alias).(interface{ TypeParams() *types.TypeParamList }); ok { return alias.TypeParams() // go1.23+ } @@ -37,7 +31,7 @@ func TypeParams(alias *Alias) *types.TypeParamList { } // SetTypeParams sets the type parameters of the alias type. -func SetTypeParams(alias *Alias, tparams []*types.TypeParam) { +func SetTypeParams(alias *types.Alias, tparams []*types.TypeParam) { if alias, ok := any(alias).(interface { SetTypeParams(tparams []*types.TypeParam) }); ok { @@ -48,7 +42,7 @@ func SetTypeParams(alias *Alias, tparams []*types.TypeParam) { } // TypeArgs returns the type arguments used to instantiate the Alias type. -func TypeArgs(alias *Alias) *types.TypeList { +func TypeArgs(alias *types.Alias) *types.TypeList { if alias, ok := any(alias).(interface{ TypeArgs() *types.TypeList }); ok { return alias.TypeArgs() // go1.23+ } @@ -57,25 +51,13 @@ func TypeArgs(alias *Alias) *types.TypeList { // Origin returns the generic Alias type of which alias is an instance. // If alias is not an instance of a generic alias, Origin returns alias. -func Origin(alias *Alias) *Alias { +func Origin(alias *types.Alias) *types.Alias { if alias, ok := any(alias).(interface{ Origin() *types.Alias }); ok { return alias.Origin() // go1.23+ } return alias // not an instance of a generic alias (go1.22) } -// Unalias is a wrapper of types.Unalias. -func Unalias(t types.Type) types.Type { return types.Unalias(t) } - -// newAlias is an internal alias around types.NewAlias. -// Direct usage is discouraged as the moment. -// Try to use NewAlias instead. -func newAlias(tname *types.TypeName, rhs types.Type, tparams []*types.TypeParam) *Alias { - a := types.NewAlias(tname, rhs) - SetTypeParams(a, tparams) - return a -} - // Enabled reports whether [NewAlias] should create [types.Alias] types. // // This function is expensive! Call it sparingly. @@ -91,7 +73,7 @@ func Enabled() bool { // many tests. Therefore any attempt to cache the result // is just incorrect. fset := token.NewFileSet() - f, _ := parser.ParseFile(fset, "a.go", "package p; type A = int", 0) + f, _ := parser.ParseFile(fset, "a.go", "package p; type A = int", parser.SkipObjectResolution) pkg, _ := new(types.Config).Check("p", fset, []*ast.File{f}, nil) _, enabled := pkg.Scope().Lookup("A").Type().(*types.Alias) return enabled diff --git a/vendor/golang.org/x/tools/internal/event/keys/keys.go b/vendor/golang.org/x/tools/internal/event/keys/keys.go index a02206e30..4cfa51b61 100644 --- a/vendor/golang.org/x/tools/internal/event/keys/keys.go +++ b/vendor/golang.org/x/tools/internal/event/keys/keys.go @@ -32,7 +32,7 @@ func (k *Value) Format(w io.Writer, buf []byte, l label.Label) { } // Get can be used to get a label for the key from a label.Map. -func (k *Value) Get(lm label.Map) interface{} { +func (k *Value) Get(lm label.Map) any { if t := lm.Find(k); t.Valid() { return k.From(t) } @@ -40,10 +40,10 @@ func (k *Value) Get(lm label.Map) interface{} { } // From can be used to get a value from a Label. -func (k *Value) From(t label.Label) interface{} { return t.UnpackValue() } +func (k *Value) From(t label.Label) any { return t.UnpackValue() } // Of creates a new Label with this key and the supplied value. -func (k *Value) Of(value interface{}) label.Label { return label.OfValue(k, value) } +func (k *Value) Of(value any) label.Label { return label.OfValue(k, value) } // Tag represents a key for tagging labels that have no value. // These are used when the existence of the label is the entire information it diff --git a/vendor/golang.org/x/tools/internal/event/label/label.go b/vendor/golang.org/x/tools/internal/event/label/label.go index 0f526e1f9..7c00ca2a6 100644 --- a/vendor/golang.org/x/tools/internal/event/label/label.go +++ b/vendor/golang.org/x/tools/internal/event/label/label.go @@ -32,7 +32,7 @@ type Key interface { type Label struct { key Key packed uint64 - untyped interface{} + untyped any } // Map is the interface to a collection of Labels indexed by key. @@ -76,13 +76,13 @@ type mapChain struct { // OfValue creates a new label from the key and value. // This method is for implementing new key types, label creation should // normally be done with the Of method of the key. -func OfValue(k Key, value interface{}) Label { return Label{key: k, untyped: value} } +func OfValue(k Key, value any) Label { return Label{key: k, untyped: value} } // UnpackValue assumes the label was built using LabelOfValue and returns the value // that was passed to that constructor. // This method is for implementing new key types, for type safety normal // access should be done with the From method of the key. -func (t Label) UnpackValue() interface{} { return t.untyped } +func (t Label) UnpackValue() any { return t.untyped } // Of64 creates a new label from a key and a uint64. This is often // used for non uint64 values that can be packed into a uint64. diff --git a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go index d98b0db2a..734c46198 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go @@ -14,7 +14,7 @@ import ( "sync" ) -func errorf(format string, args ...interface{}) { +func errorf(format string, args ...any) { panic(fmt.Sprintf(format, args...)) } @@ -87,64 +87,3 @@ func chanDir(d int) types.ChanDir { return 0 } } - -var predeclOnce sync.Once -var predecl []types.Type // initialized lazily - -func predeclared() []types.Type { - predeclOnce.Do(func() { - // initialize lazily to be sure that all - // elements have been initialized before - predecl = []types.Type{ // basic types - types.Typ[types.Bool], - types.Typ[types.Int], - types.Typ[types.Int8], - types.Typ[types.Int16], - types.Typ[types.Int32], - types.Typ[types.Int64], - types.Typ[types.Uint], - types.Typ[types.Uint8], - types.Typ[types.Uint16], - types.Typ[types.Uint32], - types.Typ[types.Uint64], - types.Typ[types.Uintptr], - types.Typ[types.Float32], - types.Typ[types.Float64], - types.Typ[types.Complex64], - types.Typ[types.Complex128], - types.Typ[types.String], - - // basic type aliases - types.Universe.Lookup("byte").Type(), - types.Universe.Lookup("rune").Type(), - - // error - types.Universe.Lookup("error").Type(), - - // untyped types - types.Typ[types.UntypedBool], - types.Typ[types.UntypedInt], - types.Typ[types.UntypedRune], - types.Typ[types.UntypedFloat], - types.Typ[types.UntypedComplex], - types.Typ[types.UntypedString], - types.Typ[types.UntypedNil], - - // package unsafe - types.Typ[types.UnsafePointer], - - // invalid type - types.Typ[types.Invalid], // only appears in packages with errors - - // used internally by gc; never used by this package or in .a files - anyType{}, - } - predecl = append(predecl, additionalPredeclared()...) - }) - return predecl -} - -type anyType struct{} - -func (t anyType) Underlying() types.Type { return t } -func (t anyType) String() string { return "any" } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go b/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go index f6437feb1..5662a311d 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go @@ -2,49 +2,183 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// This file is a copy of $GOROOT/src/go/internal/gcimporter/exportdata.go. - -// This file implements FindExportData. +// This file should be kept in sync with $GOROOT/src/internal/exportdata/exportdata.go. +// This file also additionally implements FindExportData for gcexportdata.NewReader. package gcimporter import ( "bufio" + "bytes" + "errors" "fmt" + "go/build" "io" - "strconv" + "os" + "os/exec" + "path/filepath" "strings" + "sync" ) -func readGopackHeader(r *bufio.Reader) (name string, size int64, err error) { - // See $GOROOT/include/ar.h. - hdr := make([]byte, 16+12+6+6+8+10+2) - _, err = io.ReadFull(r, hdr) +// FindExportData positions the reader r at the beginning of the +// export data section of an underlying cmd/compile created archive +// file by reading from it. The reader must be positioned at the +// start of the file before calling this function. +// This returns the length of the export data in bytes. +// +// This function is needed by [gcexportdata.Read], which must +// accept inputs produced by the last two releases of cmd/compile, +// plus tip. +func FindExportData(r *bufio.Reader) (size int64, err error) { + arsize, err := FindPackageDefinition(r) + if err != nil { + return + } + size = int64(arsize) + + objapi, headers, err := ReadObjectHeaders(r) if err != nil { return } - // leave for debugging - if false { - fmt.Printf("header: %s", hdr) + size -= int64(len(objapi)) + for _, h := range headers { + size -= int64(len(h)) + } + + // Check for the binary export data section header "$$B\n". + // TODO(taking): Unify with ReadExportDataHeader so that it stops at the 'u' instead of reading + line, err := r.ReadSlice('\n') + if err != nil { + return + } + hdr := string(line) + if hdr != "$$B\n" { + err = fmt.Errorf("unknown export data header: %q", hdr) + return } - s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10])) - length, err := strconv.Atoi(s) - size = int64(length) - if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' { - err = fmt.Errorf("invalid archive header") + size -= int64(len(hdr)) + + // For files with a binary export data header "$$B\n", + // these are always terminated by an end-of-section marker "\n$$\n". + // So the last bytes must always be this constant. + // + // The end-of-section marker is not a part of the export data itself. + // Do not include these in size. + // + // It would be nice to have sanity check that the final bytes after + // the export data are indeed the end-of-section marker. The split + // of gcexportdata.NewReader and gcexportdata.Read make checking this + // ugly so gcimporter gives up enforcing this. The compiler and go/types + // importer do enforce this, which seems good enough. + const endofsection = "\n$$\n" + size -= int64(len(endofsection)) + + if size < 0 { + err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", arsize, size) return } - name = strings.TrimSpace(string(hdr[:16])) + return } -// FindExportData positions the reader r at the beginning of the -// export data section of an underlying GC-created object/archive -// file by reading from it. The reader must be positioned at the -// start of the file before calling this function. The hdr result -// is the string before the export data, either "$$" or "$$B". -// The size result is the length of the export data in bytes, or -1 if not known. -func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) { +// ReadUnified reads the contents of the unified export data from a reader r +// that contains the contents of a GC-created archive file. +// +// On success, the reader will be positioned after the end-of-section marker "\n$$\n". +// +// Supported GC-created archive files have 4 layers of nesting: +// - An archive file containing a package definition file. +// - The package definition file contains headers followed by a data section. +// Headers are lines (≤ 4kb) that do not start with "$$". +// - The data section starts with "$$B\n" followed by export data followed +// by an end of section marker "\n$$\n". (The section start "$$\n" is no +// longer supported.) +// - The export data starts with a format byte ('u') followed by the in +// the given format. (See ReadExportDataHeader for older formats.) +// +// Putting this together, the bytes in a GC-created archive files are expected +// to look like the following. +// See cmd/internal/archive for more details on ar file headers. +// +// | \n | ar file signature +// | __.PKGDEF...size...\n | ar header for __.PKGDEF including size. +// | go object <...>\n | objabi header +// | \n | other headers such as build id +// | $$B\n | binary format marker +// | u\n | unified export +// | $$\n | end-of-section marker +// | [optional padding] | padding byte (0x0A) if size is odd +// | [ar file header] | other ar files +// | [ar file data] | +func ReadUnified(r *bufio.Reader) (data []byte, err error) { + // We historically guaranteed headers at the default buffer size (4096) work. + // This ensures we can use ReadSlice throughout. + const minBufferSize = 4096 + r = bufio.NewReaderSize(r, minBufferSize) + + size, err := FindPackageDefinition(r) + if err != nil { + return + } + n := size + + objapi, headers, err := ReadObjectHeaders(r) + if err != nil { + return + } + n -= len(objapi) + for _, h := range headers { + n -= len(h) + } + + hdrlen, err := ReadExportDataHeader(r) + if err != nil { + return + } + n -= hdrlen + + // size also includes the end of section marker. Remove that many bytes from the end. + const marker = "\n$$\n" + n -= len(marker) + + if n < 0 { + err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", size, n) + return + } + + // Read n bytes from buf. + data = make([]byte, n) + _, err = io.ReadFull(r, data) + if err != nil { + return + } + + // Check for marker at the end. + var suffix [len(marker)]byte + _, err = io.ReadFull(r, suffix[:]) + if err != nil { + return + } + if s := string(suffix[:]); s != marker { + err = fmt.Errorf("read %q instead of end-of-section marker (%q)", s, marker) + return + } + + return +} + +// FindPackageDefinition positions the reader r at the beginning of a package +// definition file ("__.PKGDEF") within a GC-created archive by reading +// from it, and returns the size of the package definition file in the archive. +// +// The reader must be positioned at the start of the archive file before calling +// this function, and "__.PKGDEF" is assumed to be the first file in the archive. +// +// See cmd/internal/archive for details on the archive format. +func FindPackageDefinition(r *bufio.Reader) (size int, err error) { + // Uses ReadSlice to limit risk of malformed inputs. + // Read first line to make sure this is an object file. line, err := r.ReadSlice('\n') if err != nil { @@ -52,48 +186,236 @@ func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) { return } - if string(line) == "!\n" { - // Archive file. Scan to __.PKGDEF. - var name string - if name, size, err = readGopackHeader(r); err != nil { - return - } + // Is the first line an archive file signature? + if string(line) != "!\n" { + err = fmt.Errorf("not the start of an archive file (%q)", line) + return + } + + // package export block should be first + size = readArchiveHeader(r, "__.PKGDEF") + if size <= 0 { + err = fmt.Errorf("not a package file") + return + } + + return +} - // First entry should be __.PKGDEF. - if name != "__.PKGDEF" { - err = fmt.Errorf("go archive is missing __.PKGDEF") +// ReadObjectHeaders reads object headers from the reader. Object headers are +// lines that do not start with an end-of-section marker "$$". The first header +// is the objabi header. On success, the reader will be positioned at the beginning +// of the end-of-section marker. +// +// It returns an error if any header does not fit in r.Size() bytes. +func ReadObjectHeaders(r *bufio.Reader) (objapi string, headers []string, err error) { + // line is a temporary buffer for headers. + // Use bounded reads (ReadSlice, Peek) to limit risk of malformed inputs. + var line []byte + + // objapi header should be the first line + if line, err = r.ReadSlice('\n'); err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + objapi = string(line) + + // objapi header begins with "go object ". + if !strings.HasPrefix(objapi, "go object ") { + err = fmt.Errorf("not a go object file: %s", objapi) + return + } + + // process remaining object header lines + for { + // check for an end of section marker "$$" + line, err = r.Peek(2) + if err != nil { return } + if string(line) == "$$" { + return // stop + } - // Read first line of __.PKGDEF data, so that line - // is once again the first line of the input. - if line, err = r.ReadSlice('\n'); err != nil { - err = fmt.Errorf("can't find export data (%v)", err) + // read next header + line, err = r.ReadSlice('\n') + if err != nil { return } - size -= int64(len(line)) + headers = append(headers, string(line)) } +} - // Now at __.PKGDEF in archive or still at beginning of file. - // Either way, line should begin with "go object ". - if !strings.HasPrefix(string(line), "go object ") { - err = fmt.Errorf("not a Go object file") +// ReadExportDataHeader reads the export data header and format from r. +// It returns the number of bytes read, or an error if the format is no longer +// supported or it failed to read. +// +// The only currently supported format is binary export data in the +// unified export format. +func ReadExportDataHeader(r *bufio.Reader) (n int, err error) { + // Read export data header. + line, err := r.ReadSlice('\n') + if err != nil { return } - // Skip over object header to export data. - // Begins after first line starting with $$. - for line[0] != '$' { - if line, err = r.ReadSlice('\n'); err != nil { - err = fmt.Errorf("can't find export data (%v)", err) + hdr := string(line) + switch hdr { + case "$$\n": + err = fmt.Errorf("old textual export format no longer supported (recompile package)") + return + + case "$$B\n": + var format byte + format, err = r.ReadByte() + if err != nil { return } - size -= int64(len(line)) - } - hdr = string(line) - if size < 0 { - size = -1 + // The unified export format starts with a 'u'. + switch format { + case 'u': + default: + // Older no longer supported export formats include: + // indexed export format which started with an 'i'; and + // the older binary export format which started with a 'c', + // 'd', or 'v' (from "version"). + err = fmt.Errorf("binary export format %q is no longer supported (recompile package)", format) + return + } + + default: + err = fmt.Errorf("unknown export data header: %q", hdr) + return } + n = len(hdr) + 1 // + 1 is for 'u' return } + +// FindPkg returns the filename and unique package id for an import +// path based on package information provided by build.Import (using +// the build.Default build.Context). A relative srcDir is interpreted +// relative to the current working directory. +// +// FindPkg is only used in tests within x/tools. +func FindPkg(path, srcDir string) (filename, id string, err error) { + // TODO(taking): Move internal/exportdata.FindPkg into its own file, + // and then this copy into a _test package. + if path == "" { + return "", "", errors.New("path is empty") + } + + var noext string + switch { + default: + // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x" + // Don't require the source files to be present. + if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282 + srcDir = abs + } + var bp *build.Package + bp, err = build.Import(path, srcDir, build.FindOnly|build.AllowBinary) + if bp.PkgObj == "" { + if bp.Goroot && bp.Dir != "" { + filename, err = lookupGorootExport(bp.Dir) + if err == nil { + _, err = os.Stat(filename) + } + if err == nil { + return filename, bp.ImportPath, nil + } + } + goto notfound + } else { + noext = strings.TrimSuffix(bp.PkgObj, ".a") + } + id = bp.ImportPath + + case build.IsLocalImport(path): + // "./x" -> "/this/directory/x.ext", "/this/directory/x" + noext = filepath.Join(srcDir, path) + id = noext + + case filepath.IsAbs(path): + // for completeness only - go/build.Import + // does not support absolute imports + // "/x" -> "/x.ext", "/x" + noext = path + id = path + } + + if false { // for debugging + if path != id { + fmt.Printf("%s -> %s\n", path, id) + } + } + + // try extensions + for _, ext := range pkgExts { + filename = noext + ext + f, statErr := os.Stat(filename) + if statErr == nil && !f.IsDir() { + return filename, id, nil + } + if err == nil { + err = statErr + } + } + +notfound: + if err == nil { + return "", path, fmt.Errorf("can't find import: %q", path) + } + return "", path, fmt.Errorf("can't find import: %q: %w", path, err) +} + +var pkgExts = [...]string{".a", ".o"} // a file from the build cache will have no extension + +var exportMap sync.Map // package dir → func() (string, error) + +// lookupGorootExport returns the location of the export data +// (normally found in the build cache, but located in GOROOT/pkg +// in prior Go releases) for the package located in pkgDir. +// +// (We use the package's directory instead of its import path +// mainly to simplify handling of the packages in src/vendor +// and cmd/vendor.) +// +// lookupGorootExport is only used in tests within x/tools. +func lookupGorootExport(pkgDir string) (string, error) { + f, ok := exportMap.Load(pkgDir) + if !ok { + var ( + listOnce sync.Once + exportPath string + err error + ) + f, _ = exportMap.LoadOrStore(pkgDir, func() (string, error) { + listOnce.Do(func() { + cmd := exec.Command(filepath.Join(build.Default.GOROOT, "bin", "go"), "list", "-export", "-f", "{{.Export}}", pkgDir) + cmd.Dir = build.Default.GOROOT + cmd.Env = append(os.Environ(), "PWD="+cmd.Dir, "GOROOT="+build.Default.GOROOT) + var output []byte + output, err = cmd.Output() + if err != nil { + if ee, ok := err.(*exec.ExitError); ok && len(ee.Stderr) > 0 { + err = errors.New(string(ee.Stderr)) + } + return + } + + exports := strings.Split(string(bytes.TrimSpace(output)), "\n") + if len(exports) != 1 { + err = fmt.Errorf("go list reported %d exports; expected 1", len(exports)) + return + } + + exportPath = exports[0] + }) + + return exportPath, err + }) + } + + return f.(func() (string, error))() +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go index 39df91124..3dbd21d1b 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go @@ -23,17 +23,11 @@ package gcimporter // import "golang.org/x/tools/internal/gcimporter" import ( "bufio" - "bytes" "fmt" - "go/build" "go/token" "go/types" "io" "os" - "os/exec" - "path/filepath" - "strings" - "sync" ) const ( @@ -45,125 +39,14 @@ const ( trace = false ) -var exportMap sync.Map // package dir → func() (string, bool) - -// lookupGorootExport returns the location of the export data -// (normally found in the build cache, but located in GOROOT/pkg -// in prior Go releases) for the package located in pkgDir. -// -// (We use the package's directory instead of its import path -// mainly to simplify handling of the packages in src/vendor -// and cmd/vendor.) -func lookupGorootExport(pkgDir string) (string, bool) { - f, ok := exportMap.Load(pkgDir) - if !ok { - var ( - listOnce sync.Once - exportPath string - ) - f, _ = exportMap.LoadOrStore(pkgDir, func() (string, bool) { - listOnce.Do(func() { - cmd := exec.Command("go", "list", "-export", "-f", "{{.Export}}", pkgDir) - cmd.Dir = build.Default.GOROOT - var output []byte - output, err := cmd.Output() - if err != nil { - return - } - - exports := strings.Split(string(bytes.TrimSpace(output)), "\n") - if len(exports) != 1 { - return - } - - exportPath = exports[0] - }) - - return exportPath, exportPath != "" - }) - } - - return f.(func() (string, bool))() -} - -var pkgExts = [...]string{".a", ".o"} - -// FindPkg returns the filename and unique package id for an import -// path based on package information provided by build.Import (using -// the build.Default build.Context). A relative srcDir is interpreted -// relative to the current working directory. -// If no file was found, an empty filename is returned. -func FindPkg(path, srcDir string) (filename, id string) { - if path == "" { - return - } - - var noext string - switch { - default: - // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x" - // Don't require the source files to be present. - if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282 - srcDir = abs - } - bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary) - if bp.PkgObj == "" { - var ok bool - if bp.Goroot && bp.Dir != "" { - filename, ok = lookupGorootExport(bp.Dir) - } - if !ok { - id = path // make sure we have an id to print in error message - return - } - } else { - noext = strings.TrimSuffix(bp.PkgObj, ".a") - id = bp.ImportPath - } - - case build.IsLocalImport(path): - // "./x" -> "/this/directory/x.ext", "/this/directory/x" - noext = filepath.Join(srcDir, path) - id = noext - - case filepath.IsAbs(path): - // for completeness only - go/build.Import - // does not support absolute imports - // "/x" -> "/x.ext", "/x" - noext = path - id = path - } - - if false { // for debugging - if path != id { - fmt.Printf("%s -> %s\n", path, id) - } - } - - if filename != "" { - if f, err := os.Stat(filename); err == nil && !f.IsDir() { - return - } - } - - // try extensions - for _, ext := range pkgExts { - filename = noext + ext - if f, err := os.Stat(filename); err == nil && !f.IsDir() { - return - } - } - - filename = "" // not found - return -} - // Import imports a gc-generated package given its import path and srcDir, adds // the corresponding package object to the packages map, and returns the object. // The packages map must contain all packages already imported. -func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { +// +// Import is only used in tests. +func Import(fset *token.FileSet, packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { var rc io.ReadCloser - var filename, id string + var id string if lookup != nil { // With custom lookup specified, assume that caller has // converted path to a canonical import path for use in the map. @@ -182,12 +65,13 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func } rc = f } else { - filename, id = FindPkg(path, srcDir) + var filename string + filename, id, err = FindPkg(path, srcDir) if filename == "" { if path == "unsafe" { return types.Unsafe, nil } - return nil, fmt.Errorf("can't find import: %q", id) + return nil, err } // no need to re-import if the package was imported completely before @@ -210,57 +94,15 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func } defer rc.Close() - var hdr string - var size int64 buf := bufio.NewReader(rc) - if hdr, size, err = FindExportData(buf); err != nil { + data, err := ReadUnified(buf) + if err != nil { + err = fmt.Errorf("import %q: %v", path, err) return } - switch hdr { - case "$$B\n": - var data []byte - data, err = io.ReadAll(buf) - if err != nil { - break - } - - // TODO(gri): allow clients of go/importer to provide a FileSet. - // Or, define a new standard go/types/gcexportdata package. - fset := token.NewFileSet() - - // Select appropriate importer. - if len(data) > 0 { - switch data[0] { - case 'v', 'c', 'd': // binary, till go1.10 - return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) - - case 'i': // indexed, till go1.19 - _, pkg, err := IImportData(fset, packages, data[1:], id) - return pkg, err - - case 'u': // unified, from go1.20 - _, pkg, err := UImportData(fset, packages, data[1:size], id) - return pkg, err - - default: - l := len(data) - if l > 10 { - l = 10 - } - return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id) - } - } - - default: - err = fmt.Errorf("unknown export data header: %q", hdr) - } + // unified: emitted by cmd/compile since go1.20. + _, pkg, err = UImportData(fset, packages, data, id) return } - -type byPath []*types.Package - -func (a byPath) Len() int { return len(a) } -func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go index 5f283281a..253d6493c 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -242,11 +242,30 @@ import ( "golang.org/x/tools/go/types/objectpath" "golang.org/x/tools/internal/aliases" - "golang.org/x/tools/internal/tokeninternal" ) // IExportShallow encodes "shallow" export data for the specified package. // +// For types, we use "shallow" export data. Historically, the Go +// compiler always produced a summary of the types for a given package +// that included types from other packages that it indirectly +// referenced: "deep" export data. This had the advantage that the +// compiler (and analogous tools such as gopls) need only load one +// file per direct import. However, it meant that the files tended to +// get larger based on the level of the package in the import +// graph. For example, higher-level packages in the kubernetes module +// have over 1MB of "deep" export data, even when they have almost no +// content of their own, merely because they mention a major type that +// references many others. In pathological cases the export data was +// 300x larger than the source for a package due to this quadratic +// growth. +// +// "Shallow" export data means that the serialized types describe only +// a single package. If those types mention types from other packages, +// the type checker may need to request additional packages beyond +// just the direct imports. Type information for the entire transitive +// closure of imports is provided (lazily) by the DAG. +// // No promises are made about the encoding other than that it can be decoded by // the same version of IIExportShallow. If you plan to save export data in the // file system, be sure to include a cryptographic digest of the executable in @@ -269,8 +288,8 @@ func IExportShallow(fset *token.FileSet, pkg *types.Package, reportf ReportFunc) } // IImportShallow decodes "shallow" types.Package data encoded by -// IExportShallow in the same executable. This function cannot import data from -// cmd/compile or gcexportdata.Write. +// [IExportShallow] in the same executable. This function cannot import data +// from cmd/compile or gcexportdata.Write. // // The importer calls getPackages to obtain package symbols for all // packages mentioned in the export data, including the one being @@ -291,7 +310,7 @@ func IImportShallow(fset *token.FileSet, getPackages GetPackagesFunc, data []byt } // ReportFunc is the type of a function used to report formatted bugs. -type ReportFunc = func(string, ...interface{}) +type ReportFunc = func(string, ...any) // Current bundled export format version. Increase with each format change. // 0: initial implementation @@ -441,7 +460,7 @@ func (p *iexporter) encodeFile(w *intWriter, file *token.File, needed []uint64) // Sort the set of needed offsets. Duplicates are harmless. sort.Slice(needed, func(i, j int) bool { return needed[i] < needed[j] }) - lines := tokeninternal.GetLines(file) // byte offset of each line start + lines := file.Lines() // byte offset of each line start w.uint64(uint64(len(lines))) // Rather than record the entire array of line start offsets, @@ -578,7 +597,7 @@ type filePositions struct { needed []uint64 // unordered list of needed file offsets } -func (p *iexporter) trace(format string, args ...interface{}) { +func (p *iexporter) trace(format string, args ...any) { if !trace { // Call sites should also be guarded, but having this check here allows // easily enabling/disabling debug trace statements. @@ -725,13 +744,13 @@ func (p *iexporter) doDecl(obj types.Object) { case *types.TypeName: t := obj.Type() - if tparam, ok := aliases.Unalias(t).(*types.TypeParam); ok { + if tparam, ok := types.Unalias(t).(*types.TypeParam); ok { w.tag(typeParamTag) w.pos(obj.Pos()) constraint := tparam.Constraint() if p.version >= iexportVersionGo1_18 { implicit := false - if iface, _ := aliases.Unalias(constraint).(*types.Interface); iface != nil { + if iface, _ := types.Unalias(constraint).(*types.Interface); iface != nil { implicit = iface.IsImplicit() } w.bool(implicit) @@ -741,7 +760,7 @@ func (p *iexporter) doDecl(obj types.Object) { } if obj.IsAlias() { - alias, materialized := t.(*aliases.Alias) // may fail when aliases are not enabled + alias, materialized := t.(*types.Alias) // may fail when aliases are not enabled var tparams *types.TypeParamList if materialized { @@ -975,7 +994,7 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { }() } switch t := t.(type) { - case *aliases.Alias: + case *types.Alias: if targs := aliases.TypeArgs(t); targs.Len() > 0 { w.startType(instanceType) w.pos(t.Obj().Pos()) @@ -1091,7 +1110,7 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { for i := 0; i < n; i++ { ft := t.EmbeddedType(i) tPkg := pkg - if named, _ := aliases.Unalias(ft).(*types.Named); named != nil { + if named, _ := types.Unalias(ft).(*types.Named); named != nil { w.pos(named.Obj().Pos()) } else { w.pos(token.NoPos) @@ -1564,6 +1583,6 @@ func (e internalError) Error() string { return "gcimporter: " + string(e) } // "internalErrorf" as the former is used for bugs, whose cause is // internal inconsistency, whereas the latter is used for ordinary // situations like bad input, whose cause is external. -func internalErrorf(format string, args ...interface{}) error { +func internalErrorf(format string, args ...any) error { return internalError(fmt.Sprintf(format, args...)) } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index ed2d56295..bc6c9741e 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -5,8 +5,6 @@ // Indexed package import. // See iexport.go for the export data format. -// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go. - package gcimporter import ( @@ -53,6 +51,7 @@ const ( iexportVersionPosCol = 1 iexportVersionGo1_18 = 2 iexportVersionGenerics = 2 + iexportVersion = iexportVersionGenerics iexportVersionCurrent = 2 ) @@ -401,7 +400,7 @@ type iimporter struct { indent int // for tracing support } -func (p *iimporter) trace(format string, args ...interface{}) { +func (p *iimporter) trace(format string, args ...any) { if !trace { // Call sites should also be guarded, but having this check here allows // easily enabling/disabling debug trace statements. @@ -540,7 +539,7 @@ func canReuse(def *types.Named, rhs types.Type) bool { if def == nil { return true } - iface, _ := aliases.Unalias(rhs).(*types.Interface) + iface, _ := types.Unalias(rhs).(*types.Interface) if iface == nil { return true } @@ -557,6 +556,14 @@ type importReader struct { prevColumn int64 } +// markBlack is redefined in iimport_go123.go, to work around golang/go#69912. +// +// If TypeNames are not marked black (in the sense of go/types cycle +// detection), they may be mutated when dot-imported. Fix this by punching a +// hole through the type, when compiling with Go 1.23. (The bug has been fixed +// for 1.24, but the fix was not worth back-porting). +var markBlack = func(name *types.TypeName) {} + func (r *importReader) obj(name string) { tag := r.byte() pos := r.pos() @@ -569,6 +576,7 @@ func (r *importReader) obj(name string) { } typ := r.typ() obj := aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ, tparams) + markBlack(obj) // workaround for golang/go#69912 r.declare(obj) case constTag: @@ -589,6 +597,9 @@ func (r *importReader) obj(name string) { // declaration before recursing. obj := types.NewTypeName(pos, r.currPkg, name, nil) named := types.NewNamed(obj, nil, nil) + + markBlack(obj) // workaround for golang/go#69912 + // Declare obj before calling r.tparamList, so the new type name is recognized // if used in the constraint of one of its own typeparams (see #48280). r.declare(obj) @@ -615,7 +626,7 @@ func (r *importReader) obj(name string) { if targs.Len() > 0 { rparams = make([]*types.TypeParam, targs.Len()) for i := range rparams { - rparams[i] = aliases.Unalias(targs.At(i)).(*types.TypeParam) + rparams[i] = types.Unalias(targs.At(i)).(*types.TypeParam) } } msig := r.signature(recv, rparams, nil) @@ -645,7 +656,7 @@ func (r *importReader) obj(name string) { } constraint := r.typ() if implicit { - iface, _ := aliases.Unalias(constraint).(*types.Interface) + iface, _ := types.Unalias(constraint).(*types.Interface) if iface == nil { errorf("non-interface constraint marked implicit") } @@ -660,7 +671,9 @@ func (r *importReader) obj(name string) { case varTag: typ := r.typ() - r.declare(types.NewVar(pos, r.currPkg, name, typ)) + v := types.NewVar(pos, r.currPkg, name, typ) + typesinternal.SetVarKind(v, typesinternal.PackageVar) + r.declare(v) default: errorf("unexpected tag: %v", tag) @@ -852,7 +865,7 @@ func (r *importReader) typ() types.Type { } func isInterface(t types.Type) bool { - _, ok := aliases.Unalias(t).(*types.Interface) + _, ok := types.Unalias(t).(*types.Interface) return ok } @@ -959,7 +972,7 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { methods[i] = method } - typ := newInterface(methods, embeddeds) + typ := types.NewInterfaceType(methods, embeddeds) r.p.interfaceList = append(r.p.interfaceList, typ) return typ @@ -1051,7 +1064,7 @@ func (r *importReader) tparamList() []*types.TypeParam { for i := range xs { // Note: the standard library importer is tolerant of nil types here, // though would panic in SetTypeParams. - xs[i] = aliases.Unalias(r.typ()).(*types.TypeParam) + xs[i] = types.Unalias(r.typ()).(*types.TypeParam) } return xs } @@ -1098,3 +1111,9 @@ func (r *importReader) byte() byte { } return x } + +type byPath []*types.Package + +func (a byPath) Len() int { return len(a) } +func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go new file mode 100644 index 000000000..7586bfaca --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go @@ -0,0 +1,53 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.22 && !go1.24 + +package gcimporter + +import ( + "go/token" + "go/types" + "unsafe" +) + +// TODO(rfindley): delete this workaround once go1.24 is assured. + +func init() { + // Update markBlack so that it correctly sets the color + // of imported TypeNames. + // + // See the doc comment for markBlack for details. + + type color uint32 + const ( + white color = iota + black + grey + ) + type object struct { + _ *types.Scope + _ token.Pos + _ *types.Package + _ string + _ types.Type + _ uint32 + color_ color + _ token.Pos + } + type typeName struct { + object + } + + // If the size of types.TypeName changes, this will fail to compile. + const delta = int64(unsafe.Sizeof(typeName{})) - int64(unsafe.Sizeof(types.TypeName{})) + var _ [-delta * delta]int + + markBlack = func(obj *types.TypeName) { + type uP = unsafe.Pointer + var ptr *typeName + *(*uP)(uP(&ptr)) = uP(obj) + ptr.color_ = black + } +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go b/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go deleted file mode 100644 index 8b163e3d0..000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.11 -// +build !go1.11 - -package gcimporter - -import "go/types" - -func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { - named := make([]*types.Named, len(embeddeds)) - for i, e := range embeddeds { - var ok bool - named[i], ok = e.(*types.Named) - if !ok { - panic("embedding of non-defined interfaces in interfaces is not supported before Go 1.11") - } - } - return types.NewInterface(methods, named) -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go b/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go deleted file mode 100644 index 49984f40f..000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.11 -// +build go1.11 - -package gcimporter - -import "go/types" - -func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { - return types.NewInterfaceType(methods, embeddeds) -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go b/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go new file mode 100644 index 000000000..907c8557a --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go @@ -0,0 +1,91 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gcimporter + +import ( + "go/types" + "sync" +) + +// predecl is a cache for the predeclared types in types.Universe. +// +// Cache a distinct result based on the runtime value of any. +// The pointer value of the any type varies based on GODEBUG settings. +var predeclMu sync.Mutex +var predecl map[types.Type][]types.Type + +func predeclared() []types.Type { + anyt := types.Universe.Lookup("any").Type() + + predeclMu.Lock() + defer predeclMu.Unlock() + + if pre, ok := predecl[anyt]; ok { + return pre + } + + if predecl == nil { + predecl = make(map[types.Type][]types.Type) + } + + decls := []types.Type{ // basic types + types.Typ[types.Bool], + types.Typ[types.Int], + types.Typ[types.Int8], + types.Typ[types.Int16], + types.Typ[types.Int32], + types.Typ[types.Int64], + types.Typ[types.Uint], + types.Typ[types.Uint8], + types.Typ[types.Uint16], + types.Typ[types.Uint32], + types.Typ[types.Uint64], + types.Typ[types.Uintptr], + types.Typ[types.Float32], + types.Typ[types.Float64], + types.Typ[types.Complex64], + types.Typ[types.Complex128], + types.Typ[types.String], + + // basic type aliases + types.Universe.Lookup("byte").Type(), + types.Universe.Lookup("rune").Type(), + + // error + types.Universe.Lookup("error").Type(), + + // untyped types + types.Typ[types.UntypedBool], + types.Typ[types.UntypedInt], + types.Typ[types.UntypedRune], + types.Typ[types.UntypedFloat], + types.Typ[types.UntypedComplex], + types.Typ[types.UntypedString], + types.Typ[types.UntypedNil], + + // package unsafe + types.Typ[types.UnsafePointer], + + // invalid type + types.Typ[types.Invalid], // only appears in packages with errors + + // used internally by gc; never used by this package or in .a files + anyType{}, + + // comparable + types.Universe.Lookup("comparable").Type(), + + // any + anyt, + } + + predecl[anyt] = decls + return decls +} + +type anyType struct{} + +func (t anyType) Underlying() types.Type { return t } +func (t anyType) String() string { return "any" } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/support.go b/vendor/golang.org/x/tools/internal/gcimporter/support.go new file mode 100644 index 000000000..4af810dc4 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/support.go @@ -0,0 +1,30 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gcimporter + +import ( + "bufio" + "io" + "strconv" + "strings" +) + +// Copy of $GOROOT/src/cmd/internal/archive.ReadHeader. +func readArchiveHeader(b *bufio.Reader, name string) int { + // architecture-independent object file output + const HeaderSize = 60 + + var buf [HeaderSize]byte + if _, err := io.ReadFull(b, buf[:]); err != nil { + return -1 + } + aname := strings.Trim(string(buf[0:16]), " ") + if !strings.HasPrefix(aname, name) { + return -1 + } + asize := strings.Trim(string(buf[48:58]), " ") + i, _ := strconv.Atoi(asize) + return i +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go b/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go deleted file mode 100644 index 0cd3b91b6..000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gcimporter - -import "go/types" - -const iexportVersion = iexportVersionGenerics - -// additionalPredeclared returns additional predeclared types in go.1.18. -func additionalPredeclared() []types.Type { - return []types.Type{ - // comparable - types.Universe.Lookup("comparable").Type(), - - // any - types.Universe.Lookup("any").Type(), - } -} - -// See cmd/compile/internal/types.SplitVargenSuffix. -func splitVargenSuffix(name string) (base, suffix string) { - i := len(name) - for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' { - i-- - } - const dot = "·" - if i >= len(dot) && name[i-len(dot):i] == dot { - i -= len(dot) - return name[:i], name[i:] - } - return name, "" -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go deleted file mode 100644 index 38b624cad..000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !goexperiment.unified -// +build !goexperiment.unified - -package gcimporter - -const unifiedIR = false diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go deleted file mode 100644 index b5118d0b3..000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build goexperiment.unified -// +build goexperiment.unified - -package gcimporter - -const unifiedIR = true diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go index f0742f540..37b4a39e9 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go @@ -11,10 +11,10 @@ import ( "go/token" "go/types" "sort" - "strings" "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/pkgbits" + "golang.org/x/tools/internal/typesinternal" ) // A pkgReader holds the shared state for reading a unified IR package @@ -71,7 +71,6 @@ func UImportData(fset *token.FileSet, imports map[string]*types.Package, data [] } s := string(data) - s = s[:strings.LastIndex(s, "\n$$\n")] input := pkgbits.NewPkgDecoder(path, s) pkg = readUnifiedPackage(fset, nil, imports, input) return @@ -266,7 +265,12 @@ func (pr *pkgReader) pkgIdx(idx pkgbits.Index) *types.Package { func (r *reader) doPkg() *types.Package { path := r.String() switch path { - case "": + // cmd/compile emits path="main" for main packages because + // that's the linker symbol prefix it used; but we need + // the package's path as it would be reported by go list, + // hence "main" below. + // See test at go/packages.TestMainPackagePathInModeTypes. + case "", "main": path = r.p.PkgPath() case "builtin": return nil // universe @@ -562,14 +566,15 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { // If the underlying type is an interface, we need to // duplicate its methods so we can replace the receiver // parameter's type (#49906). - if iface, ok := aliases.Unalias(underlying).(*types.Interface); ok && iface.NumExplicitMethods() != 0 { + if iface, ok := types.Unalias(underlying).(*types.Interface); ok && iface.NumExplicitMethods() != 0 { methods := make([]*types.Func, iface.NumExplicitMethods()) for i := range methods { fn := iface.ExplicitMethod(i) sig := fn.Type().(*types.Signature) recv := types.NewVar(fn.Pos(), fn.Pkg(), "", named) - methods[i] = types.NewFunc(fn.Pos(), fn.Pkg(), fn.Name(), types.NewSignature(recv, sig.Params(), sig.Results(), sig.Variadic())) + typesinternal.SetVarKind(recv, typesinternal.RecvVar) + methods[i] = types.NewFunc(fn.Pos(), fn.Pkg(), fn.Name(), types.NewSignatureType(recv, nil, nil, sig.Params(), sig.Results(), sig.Variadic())) } embeds := make([]types.Type, iface.NumEmbeddeds()) @@ -616,7 +621,9 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { case pkgbits.ObjVar: pos := r.pos() typ := r.typ() - declare(types.NewVar(pos, objPkg, objName, typ)) + v := types.NewVar(pos, objPkg, objName, typ) + typesinternal.SetVarKind(v, typesinternal.PackageVar) + declare(v) } } @@ -738,3 +745,17 @@ func pkgScope(pkg *types.Package) *types.Scope { } return types.Universe } + +// See cmd/compile/internal/types.SplitVargenSuffix. +func splitVargenSuffix(name string) (base, suffix string) { + i := len(name) + for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' { + i-- + } + const dot = "·" + if i >= len(dot) && name[i-len(dot):i] == dot { + i -= len(dot) + return name[:i], name[i:] + } + return name, "" +} diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go index 2e59ff855..7ea901344 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -16,7 +16,6 @@ import ( "os" "os/exec" "path/filepath" - "reflect" "regexp" "runtime" "strconv" @@ -29,7 +28,7 @@ import ( "golang.org/x/tools/internal/event/label" ) -// An Runner will run go command invocations and serialize +// A Runner will run go command invocations and serialize // them if it sees a concurrency error. type Runner struct { // once guards the runner initialization. @@ -180,7 +179,7 @@ type Invocation struct { CleanEnv bool Env []string WorkingDir string - Logf func(format string, args ...interface{}) + Logf func(format string, args ...any) } // Postcondition: both error results have same nilness. @@ -250,16 +249,13 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { cmd.Stdout = stdout cmd.Stderr = stderr - // cmd.WaitDelay was added only in go1.20 (see #50436). - if waitDelay := reflect.ValueOf(cmd).Elem().FieldByName("WaitDelay"); waitDelay.IsValid() { - // https://go.dev/issue/59541: don't wait forever copying stderr - // after the command has exited. - // After CL 484741 we copy stdout manually, so we we'll stop reading that as - // soon as ctx is done. However, we also don't want to wait around forever - // for stderr. Give a much-longer-than-reasonable delay and then assume that - // something has wedged in the kernel or runtime. - waitDelay.Set(reflect.ValueOf(30 * time.Second)) - } + // https://go.dev/issue/59541: don't wait forever copying stderr + // after the command has exited. + // After CL 484741 we copy stdout manually, so we we'll stop reading that as + // soon as ctx is done. However, we also don't want to wait around forever + // for stderr. Give a much-longer-than-reasonable delay and then assume that + // something has wedged in the kernel or runtime. + cmd.WaitDelay = 30 * time.Second // The cwd gets resolved to the real path. On Darwin, where // /tmp is a symlink, this breaks anything that expects the @@ -392,7 +388,9 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) { case err := <-resChan: return err case <-timer.C: - HandleHangingGoCommand(startTime, cmd) + // HandleHangingGoCommand terminates this process. + // Pass off resChan in case we can collect the command error. + handleHangingGoCommand(startTime, cmd, resChan) case <-ctx.Done(): } } else { @@ -417,8 +415,6 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) { } // Didn't shut down in response to interrupt. Kill it hard. - // TODO(rfindley): per advice from bcmills@, it may be better to send SIGQUIT - // on certain platforms, such as unix. if err := cmd.Process.Kill(); err != nil && !errors.Is(err, os.ErrProcessDone) && debug { log.Printf("error killing the Go command: %v", err) } @@ -426,15 +422,17 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) { return <-resChan } -func HandleHangingGoCommand(start time.Time, cmd *exec.Cmd) { +// handleHangingGoCommand outputs debugging information to help diagnose the +// cause of a hanging Go command, and then exits with log.Fatalf. +func handleHangingGoCommand(start time.Time, cmd *exec.Cmd, resChan chan error) { switch runtime.GOOS { - case "linux", "darwin", "freebsd", "netbsd": + case "linux", "darwin", "freebsd", "netbsd", "openbsd": fmt.Fprintln(os.Stderr, `DETECTED A HANGING GO COMMAND -The gopls test runner has detected a hanging go command. In order to debug -this, the output of ps and lsof/fstat is printed below. + The gopls test runner has detected a hanging go command. In order to debug + this, the output of ps and lsof/fstat is printed below. -See golang/go#54461 for more details.`) + See golang/go#54461 for more details.`) fmt.Fprintln(os.Stderr, "\nps axo ppid,pid,command:") fmt.Fprintln(os.Stderr, "-------------------------") @@ -442,7 +440,7 @@ See golang/go#54461 for more details.`) psCmd.Stdout = os.Stderr psCmd.Stderr = os.Stderr if err := psCmd.Run(); err != nil { - panic(fmt.Sprintf("running ps: %v", err)) + log.Printf("Handling hanging Go command: running ps: %v", err) } listFiles := "lsof" @@ -456,10 +454,24 @@ See golang/go#54461 for more details.`) listFilesCmd.Stdout = os.Stderr listFilesCmd.Stderr = os.Stderr if err := listFilesCmd.Run(); err != nil { - panic(fmt.Sprintf("running %s: %v", listFiles, err)) + log.Printf("Handling hanging Go command: running %s: %v", listFiles, err) + } + // Try to extract information about the slow go process by issuing a SIGQUIT. + if err := cmd.Process.Signal(sigStuckProcess); err == nil { + select { + case err := <-resChan: + stderr := "not a bytes.Buffer" + if buf, _ := cmd.Stderr.(*bytes.Buffer); buf != nil { + stderr = buf.String() + } + log.Printf("Quit hanging go command:\n\terr:%v\n\tstderr:\n%v\n\n", err, stderr) + case <-time.After(5 * time.Second): + } + } else { + log.Printf("Sending signal %d to hanging go command: %v", sigStuckProcess, err) } } - panic(fmt.Sprintf("detected hanging go command (golang/go#54461); waited %s\n\tcommand:%s\n\tpid:%d", time.Since(start), cmd, cmd.Process.Pid)) + log.Fatalf("detected hanging go command (golang/go#54461); waited %s\n\tcommand:%s\n\tpid:%d", time.Since(start), cmd, cmd.Process.Pid) } func cmdDebugStr(cmd *exec.Cmd) string { diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke_notunix.go b/vendor/golang.org/x/tools/internal/gocommand/invoke_notunix.go new file mode 100644 index 000000000..469c648e4 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke_notunix.go @@ -0,0 +1,13 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !unix + +package gocommand + +import "os" + +// sigStuckProcess is the signal to send to kill a hanging subprocess. +// On Unix we send SIGQUIT, but on non-Unix we only have os.Kill. +var sigStuckProcess = os.Kill diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke_unix.go b/vendor/golang.org/x/tools/internal/gocommand/invoke_unix.go new file mode 100644 index 000000000..169d37c8e --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke_unix.go @@ -0,0 +1,13 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix + +package gocommand + +import "syscall" + +// Sigstuckprocess is the signal to send to kill a hanging subprocess. +// Send SIGQUIT to get a stack trace. +var sigStuckProcess = syscall.SIGQUIT diff --git a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go index 44719de17..25ebab663 100644 --- a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go +++ b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go @@ -5,8 +5,7 @@ // Package packagesinternal exposes internal-only fields from go/packages. package packagesinternal -var GetForTest = func(p interface{}) string { return "" } -var GetDepsErrors = func(p interface{}) []*PackageError { return nil } +var GetDepsErrors = func(p any) []*PackageError { return nil } type PackageError struct { ImportStack []string // shortest path from package named on command line to this one @@ -16,7 +15,6 @@ type PackageError struct { var TypecheckCgo int var DepsErrors int // must be set as a LoadMode to call GetDepsErrors -var ForTest int // must be set as a LoadMode to call GetForTest -var SetModFlag = func(config interface{}, value string) {} -var SetModFile = func(config interface{}, value string) {} +var SetModFlag = func(config any, value string) {} +var SetModFile = func(config any, value string) {} diff --git a/vendor/golang.org/x/tools/internal/stdlib/deps.go b/vendor/golang.org/x/tools/internal/stdlib/deps.go new file mode 100644 index 000000000..7cca431cd --- /dev/null +++ b/vendor/golang.org/x/tools/internal/stdlib/deps.go @@ -0,0 +1,359 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate.go. DO NOT EDIT. + +package stdlib + +type pkginfo struct { + name string + deps string // list of indices of dependencies, as varint-encoded deltas +} + +var deps = [...]pkginfo{ + {"archive/tar", "\x03k\x03E5\x01\v\x01#\x01\x01\x02\x05\t\x02\x01\x02\x02\v"}, + {"archive/zip", "\x02\x04a\a\x16\x0205\x01+\x05\x01\x10\x03\x02\r\x04"}, + {"bufio", "\x03k}E\x13"}, + {"bytes", "n+R\x03\fG\x02\x02"}, + {"cmp", ""}, + {"compress/bzip2", "\x02\x02\xe7\x01B"}, + {"compress/flate", "\x02l\x03z\r\x024\x01\x03"}, + {"compress/gzip", "\x02\x04a\a\x03\x15eT"}, + {"compress/lzw", "\x02l\x03z"}, + {"compress/zlib", "\x02\x04a\a\x03\x13\x01f"}, + {"container/heap", "\xae\x02"}, + {"container/list", ""}, + {"container/ring", ""}, + {"context", "n\\h\x01\f"}, + {"crypto", "\x84\x01gD"}, + {"crypto/aes", "\x10\n\a\x8e\x02"}, + {"crypto/cipher", "\x03\x1e\x01\x01\x1d\x11\x1d,Q"}, + {"crypto/des", "\x10\x13\x1d.,\x95\x01\x03"}, + {"crypto/dsa", "@\x04*}\x0e"}, + {"crypto/ecdh", "\x03\v\f\x0e\x04\x14\x04\r\x1d}"}, + {"crypto/ecdsa", "\x0e\x05\x03\x04\x01\x0e\x16\x01\x04\f\x01\x1d}\x0e\x04K\x01"}, + {"crypto/ed25519", "\x0e\x1c\x16\n\a\x1d}D"}, + {"crypto/elliptic", "0>}\x0e9"}, + {"crypto/fips140", " \x05\x91\x01"}, + {"crypto/hkdf", "-\x12\x01.\x16"}, + {"crypto/hmac", "\x1a\x14\x11\x01\x113"}, + {"crypto/internal/boring", "\x0e\x02\rg"}, + {"crypto/internal/boring/bbig", "\x1a\xdf\x01L"}, + {"crypto/internal/boring/bcache", "\xb3\x02\x12"}, + {"crypto/internal/boring/sig", ""}, + {"crypto/internal/cryptotest", "\x03\r\n)\x0e\x1a\x06\x13\x12#\a\t\x11\x11\x11\x1b\x01\f\f\x05\n"}, + {"crypto/internal/entropy", "E"}, + {"crypto/internal/fips140", ">0}9\f\x15"}, + {"crypto/internal/fips140/aes", "\x03\x1d\x03\x02\x13\x04\x01\x01\x05+\x8c\x015"}, + {"crypto/internal/fips140/aes/gcm", " \x01\x02\x02\x02\x11\x04\x01\x06+\x8a\x01"}, + {"crypto/internal/fips140/alias", "\xc5\x02"}, + {"crypto/internal/fips140/bigmod", "%\x17\x01\x06+\x8c\x01"}, + {"crypto/internal/fips140/check", " \x0e\x06\b\x02\xad\x01Z"}, + {"crypto/internal/fips140/check/checktest", "%\xff\x01!"}, + {"crypto/internal/fips140/drbg", "\x03\x1c\x01\x01\x04\x13\x04\b\x01)}\x0f8"}, + {"crypto/internal/fips140/ecdh", "\x03\x1d\x05\x02\t\f2}\x0f8"}, + {"crypto/internal/fips140/ecdsa", "\x03\x1d\x04\x01\x02\a\x02\x068}G"}, + {"crypto/internal/fips140/ed25519", "\x03\x1d\x05\x02\x04\v8\xc1\x01\x03"}, + {"crypto/internal/fips140/edwards25519", "%\a\f\x042\x8c\x018"}, + {"crypto/internal/fips140/edwards25519/field", "%\x13\x042\x8c\x01"}, + {"crypto/internal/fips140/hkdf", "\x03\x1d\x05\t\x06:"}, + {"crypto/internal/fips140/hmac", "\x03\x1d\x14\x01\x018"}, + {"crypto/internal/fips140/mlkem", "\x03\x1d\x05\x02\x0e\x03\x042"}, + {"crypto/internal/fips140/nistec", "%\f\a\x042\x8c\x01*\x0e\x13"}, + {"crypto/internal/fips140/nistec/fiat", "%\x136\x8c\x01"}, + {"crypto/internal/fips140/pbkdf2", "\x03\x1d\x05\t\x06:"}, + {"crypto/internal/fips140/rsa", "\x03\x1d\x04\x01\x02\r\x01\x01\x026}G"}, + {"crypto/internal/fips140/sha256", "\x03\x1d\x1c\x01\x06+\x8c\x01"}, + {"crypto/internal/fips140/sha3", "\x03\x1d\x18\x04\x011\x8c\x01K"}, + {"crypto/internal/fips140/sha512", "\x03\x1d\x1c\x01\x06+\x8c\x01"}, + {"crypto/internal/fips140/ssh", " \x05"}, + {"crypto/internal/fips140/subtle", "#\x19\xbe\x01"}, + {"crypto/internal/fips140/tls12", "\x03\x1d\x05\t\x06\x028"}, + {"crypto/internal/fips140/tls13", "\x03\x1d\x05\b\a\b2"}, + {"crypto/internal/fips140deps", ""}, + {"crypto/internal/fips140deps/byteorder", "\x9a\x01"}, + {"crypto/internal/fips140deps/cpu", "\xae\x01\a"}, + {"crypto/internal/fips140deps/godebug", "\xb6\x01"}, + {"crypto/internal/fips140hash", "5\x1a5\xc1\x01"}, + {"crypto/internal/fips140only", "'\r\x01\x01N25"}, + {"crypto/internal/fips140test", ""}, + {"crypto/internal/hpke", "\x0e\x01\x01\x03\x1a\x1d$,`M"}, + {"crypto/internal/impl", "\xb0\x02"}, + {"crypto/internal/randutil", "\xeb\x01\x12"}, + {"crypto/internal/sysrand", "\xd7\x01@\x1b\x01\f\x06"}, + {"crypto/internal/sysrand/internal/seccomp", "n"}, + {"crypto/md5", "\x0e2.\x16\x16`"}, + {"crypto/mlkem", "/"}, + {"crypto/pbkdf2", "2\r\x01.\x16"}, + {"crypto/rand", "\x1a\x06\a\x19\x04\x01)}\x0eL"}, + {"crypto/rc4", "#\x1d.\xc1\x01"}, + {"crypto/rsa", "\x0e\f\x01\t\x0f\f\x01\x04\x06\a\x1d\x03\x1325\r\x01"}, + {"crypto/sha1", "\x0e\f&.\x16\x16\x14L"}, + {"crypto/sha256", "\x0e\f\x1aP"}, + {"crypto/sha3", "\x0e'O\xc1\x01"}, + {"crypto/sha512", "\x0e\f\x1cN"}, + {"crypto/subtle", "8\x98\x01T"}, + {"crypto/tls", "\x03\b\x02\x01\x01\x01\x01\x02\x01\x01\x01\x03\x01\a\x01\v\x02\n\x01\b\x05\x03\x01\x01\x01\x01\x02\x01\x02\x01\x18\x02\x03\x13\x16\x14\b5\x16\x16\r\t\x01\x01\x01\x02\x01\f\x06\x02\x01"}, + {"crypto/tls/internal/fips140tls", " \x93\x02"}, + {"crypto/x509", "\x03\v\x01\x01\x01\x01\x01\x01\x01\x011\x03\x02\x01\x01\x02\x05\x01\x0e\x06\x02\x02\x03E5\x03\t\x01\x01\x01\a\x10\x05\t\x05\v\x01\x02\r\x02\x01\x01\x02\x03\x01"}, + {"crypto/x509/internal/macos", "\x03k'\x8f\x01\v\x10\x06"}, + {"crypto/x509/pkix", "d\x06\a\x88\x01F"}, + {"database/sql", "\x03\nK\x16\x03z\f\x06\"\x05\t\x02\x03\x01\f\x02\x02\x02"}, + {"database/sql/driver", "\ra\x03\xae\x01\x10\x10"}, + {"debug/buildinfo", "\x03X\x02\x01\x01\b\a\x03`\x18\x02\x01+\x10\x1e"}, + {"debug/dwarf", "\x03d\a\x03z1\x12\x01\x01"}, + {"debug/elf", "\x03\x06Q\r\a\x03`\x19\x01,\x18\x01\x15"}, + {"debug/gosym", "\x03d\n\xbd\x01\x01\x01\x02"}, + {"debug/macho", "\x03\x06Q\r\n`\x1a,\x18\x01"}, + {"debug/pe", "\x03\x06Q\r\a\x03`\x1a,\x18\x01\x15"}, + {"debug/plan9obj", "g\a\x03`\x1a,"}, + {"embed", "n+:\x18\x01S"}, + {"embed/internal/embedtest", ""}, + {"encoding", ""}, + {"encoding/ascii85", "\xeb\x01D"}, + {"encoding/asn1", "\x03k\x03\x87\x01\x01&\x0e\x02\x01\x0f\x03\x01"}, + {"encoding/base32", "\xeb\x01B\x02"}, + {"encoding/base64", "\x9a\x01QB\x02"}, + {"encoding/binary", "n}\r'\x0e\x05"}, + {"encoding/csv", "\x02\x01k\x03zE\x11\x02"}, + {"encoding/gob", "\x02`\x05\a\x03`\x1a\f\x01\x02\x1d\b\x13\x01\x0e\x02"}, + {"encoding/hex", "n\x03zB\x03"}, + {"encoding/json", "\x03\x01^\x04\b\x03z\r'\x0e\x02\x01\x02\x0f\x01\x01\x02"}, + {"encoding/pem", "\x03c\b}B\x03"}, + {"encoding/xml", "\x02\x01_\f\x03z4\x05\v\x01\x02\x0f\x02"}, + {"errors", "\xca\x01{"}, + {"expvar", "kK9\t\n\x15\r\t\x02\x03\x01\x10"}, + {"flag", "b\f\x03z,\b\x05\t\x02\x01\x0f"}, + {"fmt", "nE8\r\x1f\b\x0e\x02\x03\x11"}, + {"go/ast", "\x03\x01m\x0f\x01j\x03)\b\x0e\x02\x01"}, + {"go/ast/internal/tests", ""}, + {"go/build", "\x02\x01k\x03\x01\x03\x02\a\x02\x01\x17\x1e\x04\x02\t\x14\x12\x01+\x01\x04\x01\a\t\x02\x01\x11\x02\x02"}, + {"go/build/constraint", "n\xc1\x01\x01\x11\x02"}, + {"go/constant", "q\x10w\x01\x015\x01\x02\x11"}, + {"go/doc", "\x04m\x01\x06\t=-1\x11\x02\x01\x11\x02"}, + {"go/doc/comment", "\x03n\xbc\x01\x01\x01\x01\x11\x02"}, + {"go/format", "\x03n\x01\f\x01\x02jE"}, + {"go/importer", "t\a\x01\x01\x04\x01i9"}, + {"go/internal/gccgoimporter", "\x02\x01X\x13\x03\x05\v\x01g\x02,\x01\x05\x12\x01\v\b"}, + {"go/internal/gcimporter", "\x02o\x10\x01/\x05\x0e',\x16\x03\x02"}, + {"go/internal/srcimporter", "q\x01\x02\n\x03\x01i,\x01\x05\x13\x02\x13"}, + {"go/parser", "\x03k\x03\x01\x03\v\x01j\x01+\x06\x13"}, + {"go/printer", "q\x01\x03\x03\tj\r\x1f\x16\x02\x01\x02\n\x05\x02"}, + {"go/scanner", "\x03n\x10j2\x11\x01\x12\x02"}, + {"go/token", "\x04m\xbc\x01\x02\x03\x01\x0e\x02"}, + {"go/types", "\x03\x01\x06d\x03\x01\x04\b\x03\x02\x15\x1e\x06+\x04\x03\n%\a\t\x01\x01\x01\x02\x01\x0e\x02\x02"}, + {"go/version", "\xbb\x01u"}, + {"hash", "\xeb\x01"}, + {"hash/adler32", "n\x16\x16"}, + {"hash/crc32", "n\x16\x16\x14\x84\x01\x01"}, + {"hash/crc64", "n\x16\x16\x98\x01"}, + {"hash/fnv", "n\x16\x16`"}, + {"hash/maphash", "\x95\x01\x05\x1b\x03@M"}, + {"html", "\xb0\x02\x02\x11"}, + {"html/template", "\x03h\x06\x19,5\x01\v \x05\x01\x02\x03\r\x01\x02\v\x01\x03\x02"}, + {"image", "\x02l\x1f^\x0f5\x03\x01"}, + {"image/color", ""}, + {"image/color/palette", "\x8d\x01"}, + {"image/draw", "\x8c\x01\x01\x04"}, + {"image/gif", "\x02\x01\x05f\x03\x1b\x01\x01\x01\vQ"}, + {"image/internal/imageutil", "\x8c\x01"}, + {"image/jpeg", "\x02l\x1e\x01\x04Z"}, + {"image/png", "\x02\a^\n\x13\x02\x06\x01^D"}, + {"index/suffixarray", "\x03d\a}\r*\v\x01"}, + {"internal/abi", "\xb5\x01\x90\x01"}, + {"internal/asan", "\xc5\x02"}, + {"internal/bisect", "\xa4\x02\x0e\x01"}, + {"internal/buildcfg", "qG_\x06\x02\x05\v\x01"}, + {"internal/bytealg", "\xae\x01\x97\x01"}, + {"internal/byteorder", ""}, + {"internal/cfg", ""}, + {"internal/chacha8rand", "\x9a\x01\x1b\x90\x01"}, + {"internal/copyright", ""}, + {"internal/coverage", ""}, + {"internal/coverage/calloc", ""}, + {"internal/coverage/cfile", "k\x06\x17\x16\x01\x02\x01\x01\x01\x01\x01\x01\x01$\x01\x1e,\x06\a\v\x01\x03\f\x06"}, + {"internal/coverage/cformat", "\x04m-\x04I\f6\x01\x02\f"}, + {"internal/coverage/cmerge", "q-Z"}, + {"internal/coverage/decodecounter", "g\n-\v\x02@,\x18\x16"}, + {"internal/coverage/decodemeta", "\x02e\n\x17\x16\v\x02@,"}, + {"internal/coverage/encodecounter", "\x02e\n-\f\x01\x02>\f \x16"}, + {"internal/coverage/encodemeta", "\x02\x01d\n\x13\x04\x16\r\x02>,."}, + {"internal/coverage/pods", "\x04m-y\x06\x05\v\x02\x01"}, + {"internal/coverage/rtcov", "\xc5\x02"}, + {"internal/coverage/slicereader", "g\nzZ"}, + {"internal/coverage/slicewriter", "qz"}, + {"internal/coverage/stringtab", "q8\x04>"}, + {"internal/coverage/test", ""}, + {"internal/coverage/uleb128", ""}, + {"internal/cpu", "\xc5\x02"}, + {"internal/dag", "\x04m\xbc\x01\x03"}, + {"internal/diff", "\x03n\xbd\x01\x02"}, + {"internal/exportdata", "\x02\x01k\x03\x03]\x1a,\x01\x05\x12\x01\x02"}, + {"internal/filepathlite", "n+:\x19A"}, + {"internal/fmtsort", "\x04\x9b\x02\x0e"}, + {"internal/fuzz", "\x03\nA\x19\x04\x03\x03\x01\f\x0355\r\x02\x1d\x01\x05\x02\x05\v\x01\x02\x01\x01\v\x04\x02"}, + {"internal/goarch", ""}, + {"internal/godebug", "\x97\x01 {\x01\x12"}, + {"internal/godebugs", ""}, + {"internal/goexperiment", ""}, + {"internal/goos", ""}, + {"internal/goroot", "\x97\x02\x01\x05\x13\x02"}, + {"internal/gover", "\x04"}, + {"internal/goversion", ""}, + {"internal/itoa", ""}, + {"internal/lazyregexp", "\x97\x02\v\x0e\x02"}, + {"internal/lazytemplate", "\xeb\x01,\x19\x02\v"}, + {"internal/msan", "\xc5\x02"}, + {"internal/nettrace", ""}, + {"internal/obscuretestdata", "f\x85\x01,"}, + {"internal/oserror", "n"}, + {"internal/pkgbits", "\x03K\x19\a\x03\x05\vj\x0e\x1e\r\v\x01"}, + {"internal/platform", ""}, + {"internal/poll", "nO\x1a\x149\x0e\x01\x01\v\x06"}, + {"internal/profile", "\x03\x04g\x03z7\f\x01\x01\x0f"}, + {"internal/profilerecord", ""}, + {"internal/race", "\x95\x01\xb0\x01"}, + {"internal/reflectlite", "\x95\x01 3\x01P\x0e\x13\x12"}, + {"unsafe", ""}, + {"vendor/golang.org/x/crypto/chacha20", "\x10W\a\x8c\x01*&"}, + {"vendor/golang.org/x/crypto/chacha20poly1305", "\x10W\a\xd8\x01\x04\x01"}, + {"vendor/golang.org/x/crypto/cryptobyte", "d\n\x03\x88\x01& \n"}, + {"vendor/golang.org/x/crypto/cryptobyte/asn1", ""}, + {"vendor/golang.org/x/crypto/internal/alias", "\xc5\x02"}, + {"vendor/golang.org/x/crypto/internal/poly1305", "Q\x16\x93\x01"}, + {"vendor/golang.org/x/net/dns/dnsmessage", "n"}, + {"vendor/golang.org/x/net/http/httpguts", "\x81\x02\x14\x1b\x13\r"}, + {"vendor/golang.org/x/net/http/httpproxy", "n\x03\x90\x01\x15\x01\x19\x13\r"}, + {"vendor/golang.org/x/net/http2/hpack", "\x03k\x03zG"}, + {"vendor/golang.org/x/net/idna", "q\x87\x018\x13\x10\x02\x01"}, + {"vendor/golang.org/x/net/nettest", "\x03d\a\x03z\x11\x05\x16\x01\f\v\x01\x02\x02\x01\n"}, + {"vendor/golang.org/x/sys/cpu", "\x97\x02\r\v\x01\x15"}, + {"vendor/golang.org/x/text/secure/bidirule", "n\xd5\x01\x11\x01"}, + {"vendor/golang.org/x/text/transform", "\x03k}X"}, + {"vendor/golang.org/x/text/unicode/bidi", "\x03\bf~?\x15"}, + {"vendor/golang.org/x/text/unicode/norm", "g\nzG\x11\x11"}, + {"weak", "\x95\x01\x8f\x01!"}, +} diff --git a/vendor/golang.org/x/tools/internal/stdlib/import.go b/vendor/golang.org/x/tools/internal/stdlib/import.go new file mode 100644 index 000000000..f6909878a --- /dev/null +++ b/vendor/golang.org/x/tools/internal/stdlib/import.go @@ -0,0 +1,89 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package stdlib + +// This file provides the API for the import graph of the standard library. +// +// Be aware that the compiler-generated code for every package +// implicitly depends on package "runtime" and a handful of others +// (see runtimePkgs in GOROOT/src/cmd/internal/objabi/pkgspecial.go). + +import ( + "encoding/binary" + "iter" + "slices" + "strings" +) + +// Imports returns the sequence of packages directly imported by the +// named standard packages, in name order. +// The imports of an unknown package are the empty set. +// +// The graph is built into the application and may differ from the +// graph in the Go source tree being analyzed by the application. +func Imports(pkgs ...string) iter.Seq[string] { + return func(yield func(string) bool) { + for _, pkg := range pkgs { + if i, ok := find(pkg); ok { + var depIndex uint64 + for data := []byte(deps[i].deps); len(data) > 0; { + delta, n := binary.Uvarint(data) + depIndex += delta + if !yield(deps[depIndex].name) { + return + } + data = data[n:] + } + } + } + } +} + +// Dependencies returns the set of all dependencies of the named +// standard packages, including the initial package, +// in a deterministic topological order. +// The dependencies of an unknown package are the empty set. +// +// The graph is built into the application and may differ from the +// graph in the Go source tree being analyzed by the application. +func Dependencies(pkgs ...string) iter.Seq[string] { + return func(yield func(string) bool) { + for _, pkg := range pkgs { + if i, ok := find(pkg); ok { + var seen [1 + len(deps)/8]byte // bit set of seen packages + var visit func(i int) bool + visit = func(i int) bool { + bit := byte(1) << (i % 8) + if seen[i/8]&bit == 0 { + seen[i/8] |= bit + var depIndex uint64 + for data := []byte(deps[i].deps); len(data) > 0; { + delta, n := binary.Uvarint(data) + depIndex += delta + if !visit(int(depIndex)) { + return false + } + data = data[n:] + } + if !yield(deps[i].name) { + return false + } + } + return true + } + if !visit(i) { + return + } + } + } + } +} + +// find returns the index of pkg in the deps table. +func find(pkg string) (int, bool) { + return slices.BinarySearchFunc(deps[:], pkg, func(p pkginfo, n string) int { + return strings.Compare(p.name, n) + }) +} diff --git a/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/vendor/golang.org/x/tools/internal/stdlib/manifest.go index cdaac9ab3..00776a31b 100644 --- a/vendor/golang.org/x/tools/internal/stdlib/manifest.go +++ b/vendor/golang.org/x/tools/internal/stdlib/manifest.go @@ -1,4 +1,4 @@ -// Copyright 2024 The Go Authors. All rights reserved. +// Copyright 2025 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -268,6 +268,8 @@ var PackageSymbols = map[string][]Symbol{ {"ErrTooLarge", Var, 0}, {"Fields", Func, 0}, {"FieldsFunc", Func, 0}, + {"FieldsFuncSeq", Func, 24}, + {"FieldsSeq", Func, 24}, {"HasPrefix", Func, 0}, {"HasSuffix", Func, 0}, {"Index", Func, 0}, @@ -280,6 +282,7 @@ var PackageSymbols = map[string][]Symbol{ {"LastIndexAny", Func, 0}, {"LastIndexByte", Func, 5}, {"LastIndexFunc", Func, 0}, + {"Lines", Func, 24}, {"Map", Func, 0}, {"MinRead", Const, 0}, {"NewBuffer", Func, 0}, @@ -293,7 +296,9 @@ var PackageSymbols = map[string][]Symbol{ {"Split", Func, 0}, {"SplitAfter", Func, 0}, {"SplitAfterN", Func, 0}, + {"SplitAfterSeq", Func, 24}, {"SplitN", Func, 0}, + {"SplitSeq", Func, 24}, {"Title", Func, 0}, {"ToLower", Func, 0}, {"ToLowerSpecial", Func, 0}, @@ -535,6 +540,7 @@ var PackageSymbols = map[string][]Symbol{ {"NewCTR", Func, 0}, {"NewGCM", Func, 2}, {"NewGCMWithNonceSize", Func, 5}, + {"NewGCMWithRandomNonce", Func, 24}, {"NewGCMWithTagSize", Func, 11}, {"NewOFB", Func, 0}, {"Stream", Type, 0}, @@ -673,6 +679,14 @@ var PackageSymbols = map[string][]Symbol{ {"Unmarshal", Func, 0}, {"UnmarshalCompressed", Func, 15}, }, + "crypto/fips140": { + {"Enabled", Func, 24}, + }, + "crypto/hkdf": { + {"Expand", Func, 24}, + {"Extract", Func, 24}, + {"Key", Func, 24}, + }, "crypto/hmac": { {"Equal", Func, 1}, {"New", Func, 0}, @@ -683,11 +697,43 @@ var PackageSymbols = map[string][]Symbol{ {"Size", Const, 0}, {"Sum", Func, 2}, }, + "crypto/mlkem": { + {"(*DecapsulationKey1024).Bytes", Method, 24}, + {"(*DecapsulationKey1024).Decapsulate", Method, 24}, + {"(*DecapsulationKey1024).EncapsulationKey", Method, 24}, + {"(*DecapsulationKey768).Bytes", Method, 24}, + {"(*DecapsulationKey768).Decapsulate", Method, 24}, + {"(*DecapsulationKey768).EncapsulationKey", Method, 24}, + {"(*EncapsulationKey1024).Bytes", Method, 24}, + {"(*EncapsulationKey1024).Encapsulate", Method, 24}, + {"(*EncapsulationKey768).Bytes", Method, 24}, + {"(*EncapsulationKey768).Encapsulate", Method, 24}, + {"CiphertextSize1024", Const, 24}, + {"CiphertextSize768", Const, 24}, + {"DecapsulationKey1024", Type, 24}, + {"DecapsulationKey768", Type, 24}, + {"EncapsulationKey1024", Type, 24}, + {"EncapsulationKey768", Type, 24}, + {"EncapsulationKeySize1024", Const, 24}, + {"EncapsulationKeySize768", Const, 24}, + {"GenerateKey1024", Func, 24}, + {"GenerateKey768", Func, 24}, + {"NewDecapsulationKey1024", Func, 24}, + {"NewDecapsulationKey768", Func, 24}, + {"NewEncapsulationKey1024", Func, 24}, + {"NewEncapsulationKey768", Func, 24}, + {"SeedSize", Const, 24}, + {"SharedKeySize", Const, 24}, + }, + "crypto/pbkdf2": { + {"Key", Func, 24}, + }, "crypto/rand": { {"Int", Func, 0}, {"Prime", Func, 0}, {"Read", Func, 0}, {"Reader", Var, 0}, + {"Text", Func, 24}, }, "crypto/rc4": { {"(*Cipher).Reset", Method, 0}, @@ -766,6 +812,39 @@ var PackageSymbols = map[string][]Symbol{ {"Sum224", Func, 2}, {"Sum256", Func, 2}, }, + "crypto/sha3": { + {"(*SHA3).AppendBinary", Method, 24}, + {"(*SHA3).BlockSize", Method, 24}, + {"(*SHA3).MarshalBinary", Method, 24}, + {"(*SHA3).Reset", Method, 24}, + {"(*SHA3).Size", Method, 24}, + {"(*SHA3).Sum", Method, 24}, + {"(*SHA3).UnmarshalBinary", Method, 24}, + {"(*SHA3).Write", Method, 24}, + {"(*SHAKE).AppendBinary", Method, 24}, + {"(*SHAKE).BlockSize", Method, 24}, + {"(*SHAKE).MarshalBinary", Method, 24}, + {"(*SHAKE).Read", Method, 24}, + {"(*SHAKE).Reset", Method, 24}, + {"(*SHAKE).UnmarshalBinary", Method, 24}, + {"(*SHAKE).Write", Method, 24}, + {"New224", Func, 24}, + {"New256", Func, 24}, + {"New384", Func, 24}, + {"New512", Func, 24}, + {"NewCSHAKE128", Func, 24}, + {"NewCSHAKE256", Func, 24}, + {"NewSHAKE128", Func, 24}, + {"NewSHAKE256", Func, 24}, + {"SHA3", Type, 24}, + {"SHAKE", Type, 24}, + {"Sum224", Func, 24}, + {"Sum256", Func, 24}, + {"Sum384", Func, 24}, + {"Sum512", Func, 24}, + {"SumSHAKE128", Func, 24}, + {"SumSHAKE256", Func, 24}, + }, "crypto/sha512": { {"BlockSize", Const, 0}, {"New", Func, 0}, @@ -788,6 +867,7 @@ var PackageSymbols = map[string][]Symbol{ {"ConstantTimeEq", Func, 0}, {"ConstantTimeLessOrEq", Func, 2}, {"ConstantTimeSelect", Func, 0}, + {"WithDataIndependentTiming", Func, 24}, {"XORBytes", Func, 20}, }, "crypto/tls": { @@ -864,6 +944,7 @@ var PackageSymbols = map[string][]Symbol{ {"ClientHelloInfo", Type, 4}, {"ClientHelloInfo.CipherSuites", Field, 4}, {"ClientHelloInfo.Conn", Field, 8}, + {"ClientHelloInfo.Extensions", Field, 24}, {"ClientHelloInfo.ServerName", Field, 4}, {"ClientHelloInfo.SignatureSchemes", Field, 8}, {"ClientHelloInfo.SupportedCurves", Field, 4}, @@ -881,6 +962,7 @@ var PackageSymbols = map[string][]Symbol{ {"Config.CurvePreferences", Field, 3}, {"Config.DynamicRecordSizingDisabled", Field, 7}, {"Config.EncryptedClientHelloConfigList", Field, 23}, + {"Config.EncryptedClientHelloKeys", Field, 24}, {"Config.EncryptedClientHelloRejectionVerify", Field, 23}, {"Config.GetCertificate", Field, 4}, {"Config.GetClientCertificate", Field, 8}, @@ -934,6 +1016,10 @@ var PackageSymbols = map[string][]Symbol{ {"ECHRejectionError", Type, 23}, {"ECHRejectionError.RetryConfigList", Field, 23}, {"Ed25519", Const, 13}, + {"EncryptedClientHelloKey", Type, 24}, + {"EncryptedClientHelloKey.Config", Field, 24}, + {"EncryptedClientHelloKey.PrivateKey", Field, 24}, + {"EncryptedClientHelloKey.SendAsRetry", Field, 24}, {"InsecureCipherSuites", Func, 14}, {"Listen", Func, 0}, {"LoadX509KeyPair", Func, 0}, @@ -1032,6 +1118,7 @@ var PackageSymbols = map[string][]Symbol{ {"VersionTLS12", Const, 2}, {"VersionTLS13", Const, 12}, {"X25519", Const, 8}, + {"X25519MLKEM768", Const, 24}, {"X509KeyPair", Func, 0}, }, "crypto/x509": { @@ -1056,6 +1143,8 @@ var PackageSymbols = map[string][]Symbol{ {"(ConstraintViolationError).Error", Method, 0}, {"(HostnameError).Error", Method, 0}, {"(InsecureAlgorithmError).Error", Method, 6}, + {"(OID).AppendBinary", Method, 24}, + {"(OID).AppendText", Method, 24}, {"(OID).Equal", Method, 22}, {"(OID).EqualASN1OID", Method, 22}, {"(OID).MarshalBinary", Method, 23}, @@ -1084,6 +1173,10 @@ var PackageSymbols = map[string][]Symbol{ {"Certificate.Extensions", Field, 2}, {"Certificate.ExtraExtensions", Field, 2}, {"Certificate.IPAddresses", Field, 1}, + {"Certificate.InhibitAnyPolicy", Field, 24}, + {"Certificate.InhibitAnyPolicyZero", Field, 24}, + {"Certificate.InhibitPolicyMapping", Field, 24}, + {"Certificate.InhibitPolicyMappingZero", Field, 24}, {"Certificate.IsCA", Field, 0}, {"Certificate.Issuer", Field, 0}, {"Certificate.IssuingCertificateURL", Field, 2}, @@ -1100,6 +1193,7 @@ var PackageSymbols = map[string][]Symbol{ {"Certificate.PermittedURIDomains", Field, 10}, {"Certificate.Policies", Field, 22}, {"Certificate.PolicyIdentifiers", Field, 0}, + {"Certificate.PolicyMappings", Field, 24}, {"Certificate.PublicKey", Field, 0}, {"Certificate.PublicKeyAlgorithm", Field, 0}, {"Certificate.Raw", Field, 0}, @@ -1107,6 +1201,8 @@ var PackageSymbols = map[string][]Symbol{ {"Certificate.RawSubject", Field, 0}, {"Certificate.RawSubjectPublicKeyInfo", Field, 0}, {"Certificate.RawTBSCertificate", Field, 0}, + {"Certificate.RequireExplicitPolicy", Field, 24}, + {"Certificate.RequireExplicitPolicyZero", Field, 24}, {"Certificate.SerialNumber", Field, 0}, {"Certificate.Signature", Field, 0}, {"Certificate.SignatureAlgorithm", Field, 0}, @@ -1198,6 +1294,7 @@ var PackageSymbols = map[string][]Symbol{ {"NameConstraintsWithoutSANs", Const, 10}, {"NameMismatch", Const, 8}, {"NewCertPool", Func, 0}, + {"NoValidChains", Const, 24}, {"NotAuthorizedToSign", Const, 0}, {"OID", Type, 22}, {"OIDFromInts", Func, 22}, @@ -1219,6 +1316,9 @@ var PackageSymbols = map[string][]Symbol{ {"ParsePKCS8PrivateKey", Func, 0}, {"ParsePKIXPublicKey", Func, 0}, {"ParseRevocationList", Func, 19}, + {"PolicyMapping", Type, 24}, + {"PolicyMapping.IssuerDomainPolicy", Field, 24}, + {"PolicyMapping.SubjectDomainPolicy", Field, 24}, {"PublicKeyAlgorithm", Type, 0}, {"PureEd25519", Const, 13}, {"RSA", Const, 0}, @@ -1265,6 +1365,7 @@ var PackageSymbols = map[string][]Symbol{ {"UnknownPublicKeyAlgorithm", Const, 0}, {"UnknownSignatureAlgorithm", Const, 0}, {"VerifyOptions", Type, 0}, + {"VerifyOptions.CertificatePolicies", Field, 24}, {"VerifyOptions.CurrentTime", Field, 0}, {"VerifyOptions.DNSName", Field, 0}, {"VerifyOptions.Intermediates", Field, 0}, @@ -1975,6 +2076,8 @@ var PackageSymbols = map[string][]Symbol{ {"(*File).DynString", Method, 1}, {"(*File).DynValue", Method, 21}, {"(*File).DynamicSymbols", Method, 4}, + {"(*File).DynamicVersionNeeds", Method, 24}, + {"(*File).DynamicVersions", Method, 24}, {"(*File).ImportedLibraries", Method, 0}, {"(*File).ImportedSymbols", Method, 0}, {"(*File).Section", Method, 0}, @@ -2048,6 +2151,8 @@ var PackageSymbols = map[string][]Symbol{ {"(Type).String", Method, 0}, {"(Version).GoString", Method, 0}, {"(Version).String", Method, 0}, + {"(VersionIndex).Index", Method, 24}, + {"(VersionIndex).IsHidden", Method, 24}, {"ARM_MAGIC_TRAMP_NUMBER", Const, 0}, {"COMPRESS_HIOS", Const, 6}, {"COMPRESS_HIPROC", Const, 6}, @@ -2240,6 +2345,19 @@ var PackageSymbols = map[string][]Symbol{ {"DynFlag", Type, 0}, {"DynFlag1", Type, 21}, {"DynTag", Type, 0}, + {"DynamicVersion", Type, 24}, + {"DynamicVersion.Deps", Field, 24}, + {"DynamicVersion.Flags", Field, 24}, + {"DynamicVersion.Index", Field, 24}, + {"DynamicVersion.Name", Field, 24}, + {"DynamicVersionDep", Type, 24}, + {"DynamicVersionDep.Dep", Field, 24}, + {"DynamicVersionDep.Flags", Field, 24}, + {"DynamicVersionDep.Index", Field, 24}, + {"DynamicVersionFlag", Type, 24}, + {"DynamicVersionNeed", Type, 24}, + {"DynamicVersionNeed.Name", Field, 24}, + {"DynamicVersionNeed.Needs", Field, 24}, {"EI_ABIVERSION", Const, 0}, {"EI_CLASS", Const, 0}, {"EI_DATA", Const, 0}, @@ -3718,6 +3836,7 @@ var PackageSymbols = map[string][]Symbol{ {"SymType", Type, 0}, {"SymVis", Type, 0}, {"Symbol", Type, 0}, + {"Symbol.HasVersion", Field, 24}, {"Symbol.Info", Field, 0}, {"Symbol.Library", Field, 13}, {"Symbol.Name", Field, 0}, @@ -3726,8 +3845,13 @@ var PackageSymbols = map[string][]Symbol{ {"Symbol.Size", Field, 0}, {"Symbol.Value", Field, 0}, {"Symbol.Version", Field, 13}, + {"Symbol.VersionIndex", Field, 24}, {"Type", Type, 0}, + {"VER_FLG_BASE", Const, 24}, + {"VER_FLG_INFO", Const, 24}, + {"VER_FLG_WEAK", Const, 24}, {"Version", Type, 0}, + {"VersionIndex", Type, 24}, }, "debug/gosym": { {"(*DecodingError).Error", Method, 0}, @@ -4453,8 +4577,10 @@ var PackageSymbols = map[string][]Symbol{ {"FS", Type, 16}, }, "encoding": { + {"BinaryAppender", Type, 24}, {"BinaryMarshaler", Type, 2}, {"BinaryUnmarshaler", Type, 2}, + {"TextAppender", Type, 24}, {"TextMarshaler", Type, 2}, {"TextUnmarshaler", Type, 2}, }, @@ -5984,13 +6110,16 @@ var PackageSymbols = map[string][]Symbol{ {"(*Interface).Complete", Method, 5}, {"(*Interface).Embedded", Method, 5}, {"(*Interface).EmbeddedType", Method, 11}, + {"(*Interface).EmbeddedTypes", Method, 24}, {"(*Interface).Empty", Method, 5}, {"(*Interface).ExplicitMethod", Method, 5}, + {"(*Interface).ExplicitMethods", Method, 24}, {"(*Interface).IsComparable", Method, 18}, {"(*Interface).IsImplicit", Method, 18}, {"(*Interface).IsMethodSet", Method, 18}, {"(*Interface).MarkImplicit", Method, 18}, {"(*Interface).Method", Method, 5}, + {"(*Interface).Methods", Method, 24}, {"(*Interface).NumEmbeddeds", Method, 5}, {"(*Interface).NumExplicitMethods", Method, 5}, {"(*Interface).NumMethods", Method, 5}, @@ -6011,9 +6140,11 @@ var PackageSymbols = map[string][]Symbol{ {"(*MethodSet).At", Method, 5}, {"(*MethodSet).Len", Method, 5}, {"(*MethodSet).Lookup", Method, 5}, + {"(*MethodSet).Methods", Method, 24}, {"(*MethodSet).String", Method, 5}, {"(*Named).AddMethod", Method, 5}, {"(*Named).Method", Method, 5}, + {"(*Named).Methods", Method, 24}, {"(*Named).NumMethods", Method, 5}, {"(*Named).Obj", Method, 5}, {"(*Named).Origin", Method, 18}, @@ -6054,6 +6185,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Pointer).String", Method, 5}, {"(*Pointer).Underlying", Method, 5}, {"(*Scope).Child", Method, 5}, + {"(*Scope).Children", Method, 24}, {"(*Scope).Contains", Method, 5}, {"(*Scope).End", Method, 5}, {"(*Scope).Innermost", Method, 5}, @@ -6089,6 +6221,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*StdSizes).Offsetsof", Method, 5}, {"(*StdSizes).Sizeof", Method, 5}, {"(*Struct).Field", Method, 5}, + {"(*Struct).Fields", Method, 24}, {"(*Struct).NumFields", Method, 5}, {"(*Struct).String", Method, 5}, {"(*Struct).Tag", Method, 5}, @@ -6100,8 +6233,10 @@ var PackageSymbols = map[string][]Symbol{ {"(*Tuple).Len", Method, 5}, {"(*Tuple).String", Method, 5}, {"(*Tuple).Underlying", Method, 5}, + {"(*Tuple).Variables", Method, 24}, {"(*TypeList).At", Method, 18}, {"(*TypeList).Len", Method, 18}, + {"(*TypeList).Types", Method, 24}, {"(*TypeName).Exported", Method, 5}, {"(*TypeName).Id", Method, 5}, {"(*TypeName).IsAlias", Method, 9}, @@ -6119,9 +6254,11 @@ var PackageSymbols = map[string][]Symbol{ {"(*TypeParam).Underlying", Method, 18}, {"(*TypeParamList).At", Method, 18}, {"(*TypeParamList).Len", Method, 18}, + {"(*TypeParamList).TypeParams", Method, 24}, {"(*Union).Len", Method, 18}, {"(*Union).String", Method, 18}, {"(*Union).Term", Method, 18}, + {"(*Union).Terms", Method, 24}, {"(*Union).Underlying", Method, 18}, {"(*Var).Anonymous", Method, 5}, {"(*Var).Embedded", Method, 11}, @@ -6392,10 +6529,12 @@ var PackageSymbols = map[string][]Symbol{ {"(*Hash).WriteByte", Method, 14}, {"(*Hash).WriteString", Method, 14}, {"Bytes", Func, 19}, + {"Comparable", Func, 24}, {"Hash", Type, 14}, {"MakeSeed", Func, 14}, {"Seed", Type, 14}, {"String", Func, 19}, + {"WriteComparable", Func, 24}, }, "html": { {"EscapeString", Func, 0}, @@ -6980,6 +7119,7 @@ var PackageSymbols = map[string][]Symbol{ {"FormatFileInfo", Func, 21}, {"Glob", Func, 16}, {"GlobFS", Type, 16}, + {"Lstat", Func, 25}, {"ModeAppend", Const, 16}, {"ModeCharDevice", Const, 16}, {"ModeDevice", Const, 16}, @@ -7004,6 +7144,8 @@ var PackageSymbols = map[string][]Symbol{ {"ReadDirFile", Type, 16}, {"ReadFile", Func, 16}, {"ReadFileFS", Type, 16}, + {"ReadLink", Func, 25}, + {"ReadLinkFS", Type, 25}, {"SkipAll", Var, 20}, {"SkipDir", Var, 16}, {"Stat", Func, 16}, @@ -7082,6 +7224,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*JSONHandler).WithGroup", Method, 21}, {"(*Level).UnmarshalJSON", Method, 21}, {"(*Level).UnmarshalText", Method, 21}, + {"(*LevelVar).AppendText", Method, 24}, {"(*LevelVar).Level", Method, 21}, {"(*LevelVar).MarshalText", Method, 21}, {"(*LevelVar).Set", Method, 21}, @@ -7110,6 +7253,7 @@ var PackageSymbols = map[string][]Symbol{ {"(Attr).Equal", Method, 21}, {"(Attr).String", Method, 21}, {"(Kind).String", Method, 21}, + {"(Level).AppendText", Method, 24}, {"(Level).Level", Method, 21}, {"(Level).MarshalJSON", Method, 21}, {"(Level).MarshalText", Method, 21}, @@ -7140,6 +7284,7 @@ var PackageSymbols = map[string][]Symbol{ {"Debug", Func, 21}, {"DebugContext", Func, 21}, {"Default", Func, 21}, + {"DiscardHandler", Var, 24}, {"Duration", Func, 21}, {"DurationValue", Func, 21}, {"Error", Func, 21}, @@ -7375,6 +7520,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Float).Acc", Method, 5}, {"(*Float).Add", Method, 5}, {"(*Float).Append", Method, 5}, + {"(*Float).AppendText", Method, 24}, {"(*Float).Cmp", Method, 5}, {"(*Float).Copy", Method, 5}, {"(*Float).Float32", Method, 5}, @@ -7421,6 +7567,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Int).And", Method, 0}, {"(*Int).AndNot", Method, 0}, {"(*Int).Append", Method, 6}, + {"(*Int).AppendText", Method, 24}, {"(*Int).Binomial", Method, 0}, {"(*Int).Bit", Method, 0}, {"(*Int).BitLen", Method, 0}, @@ -7477,6 +7624,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Int).Xor", Method, 0}, {"(*Rat).Abs", Method, 0}, {"(*Rat).Add", Method, 0}, + {"(*Rat).AppendText", Method, 24}, {"(*Rat).Cmp", Method, 0}, {"(*Rat).Denom", Method, 0}, {"(*Rat).Float32", Method, 4}, @@ -7659,11 +7807,13 @@ var PackageSymbols = map[string][]Symbol{ {"Zipf", Type, 0}, }, "math/rand/v2": { + {"(*ChaCha8).AppendBinary", Method, 24}, {"(*ChaCha8).MarshalBinary", Method, 22}, {"(*ChaCha8).Read", Method, 23}, {"(*ChaCha8).Seed", Method, 22}, {"(*ChaCha8).Uint64", Method, 22}, {"(*ChaCha8).UnmarshalBinary", Method, 22}, + {"(*PCG).AppendBinary", Method, 24}, {"(*PCG).MarshalBinary", Method, 22}, {"(*PCG).Seed", Method, 22}, {"(*PCG).Uint64", Method, 22}, @@ -7931,6 +8081,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*UnixListener).SyscallConn", Method, 10}, {"(Flags).String", Method, 0}, {"(HardwareAddr).String", Method, 0}, + {"(IP).AppendText", Method, 24}, {"(IP).DefaultMask", Method, 0}, {"(IP).Equal", Method, 0}, {"(IP).IsGlobalUnicast", Method, 0}, @@ -8131,6 +8282,9 @@ var PackageSymbols = map[string][]Symbol{ {"(*MaxBytesError).Error", Method, 19}, {"(*ProtocolError).Error", Method, 0}, {"(*ProtocolError).Is", Method, 21}, + {"(*Protocols).SetHTTP1", Method, 24}, + {"(*Protocols).SetHTTP2", Method, 24}, + {"(*Protocols).SetUnencryptedHTTP2", Method, 24}, {"(*Request).AddCookie", Method, 0}, {"(*Request).BasicAuth", Method, 4}, {"(*Request).Clone", Method, 13}, @@ -8190,6 +8344,10 @@ var PackageSymbols = map[string][]Symbol{ {"(Header).Values", Method, 14}, {"(Header).Write", Method, 0}, {"(Header).WriteSubset", Method, 0}, + {"(Protocols).HTTP1", Method, 24}, + {"(Protocols).HTTP2", Method, 24}, + {"(Protocols).String", Method, 24}, + {"(Protocols).UnencryptedHTTP2", Method, 24}, {"AllowQuerySemicolons", Func, 17}, {"CanonicalHeaderKey", Func, 0}, {"Client", Type, 0}, @@ -8252,6 +8410,18 @@ var PackageSymbols = map[string][]Symbol{ {"FileSystem", Type, 0}, {"Flusher", Type, 0}, {"Get", Func, 0}, + {"HTTP2Config", Type, 24}, + {"HTTP2Config.CountError", Field, 24}, + {"HTTP2Config.MaxConcurrentStreams", Field, 24}, + {"HTTP2Config.MaxDecoderHeaderTableSize", Field, 24}, + {"HTTP2Config.MaxEncoderHeaderTableSize", Field, 24}, + {"HTTP2Config.MaxReadFrameSize", Field, 24}, + {"HTTP2Config.MaxReceiveBufferPerConnection", Field, 24}, + {"HTTP2Config.MaxReceiveBufferPerStream", Field, 24}, + {"HTTP2Config.PermitProhibitedCipherSuites", Field, 24}, + {"HTTP2Config.PingTimeout", Field, 24}, + {"HTTP2Config.SendPingTimeout", Field, 24}, + {"HTTP2Config.WriteByteTimeout", Field, 24}, {"Handle", Func, 0}, {"HandleFunc", Func, 0}, {"Handler", Type, 0}, @@ -8292,6 +8462,7 @@ var PackageSymbols = map[string][]Symbol{ {"PostForm", Func, 0}, {"ProtocolError", Type, 0}, {"ProtocolError.ErrorString", Field, 0}, + {"Protocols", Type, 24}, {"ProxyFromEnvironment", Func, 0}, {"ProxyURL", Func, 0}, {"PushOptions", Type, 8}, @@ -8361,9 +8532,11 @@ var PackageSymbols = map[string][]Symbol{ {"Server.ConnState", Field, 3}, {"Server.DisableGeneralOptionsHandler", Field, 20}, {"Server.ErrorLog", Field, 3}, + {"Server.HTTP2", Field, 24}, {"Server.Handler", Field, 0}, {"Server.IdleTimeout", Field, 8}, {"Server.MaxHeaderBytes", Field, 0}, + {"Server.Protocols", Field, 24}, {"Server.ReadHeaderTimeout", Field, 8}, {"Server.ReadTimeout", Field, 0}, {"Server.TLSConfig", Field, 0}, @@ -8453,12 +8626,14 @@ var PackageSymbols = map[string][]Symbol{ {"Transport.ExpectContinueTimeout", Field, 6}, {"Transport.ForceAttemptHTTP2", Field, 13}, {"Transport.GetProxyConnectHeader", Field, 16}, + {"Transport.HTTP2", Field, 24}, {"Transport.IdleConnTimeout", Field, 7}, {"Transport.MaxConnsPerHost", Field, 11}, {"Transport.MaxIdleConns", Field, 7}, {"Transport.MaxIdleConnsPerHost", Field, 0}, {"Transport.MaxResponseHeaderBytes", Field, 7}, {"Transport.OnProxyConnectResponse", Field, 20}, + {"Transport.Protocols", Field, 24}, {"Transport.Proxy", Field, 0}, {"Transport.ProxyConnectHeader", Field, 8}, {"Transport.ReadBufferSize", Field, 13}, @@ -8646,6 +8821,8 @@ var PackageSymbols = map[string][]Symbol{ {"(*AddrPort).UnmarshalText", Method, 18}, {"(*Prefix).UnmarshalBinary", Method, 18}, {"(*Prefix).UnmarshalText", Method, 18}, + {"(Addr).AppendBinary", Method, 24}, + {"(Addr).AppendText", Method, 24}, {"(Addr).AppendTo", Method, 18}, {"(Addr).As16", Method, 18}, {"(Addr).As4", Method, 18}, @@ -8676,6 +8853,8 @@ var PackageSymbols = map[string][]Symbol{ {"(Addr).WithZone", Method, 18}, {"(Addr).Zone", Method, 18}, {"(AddrPort).Addr", Method, 18}, + {"(AddrPort).AppendBinary", Method, 24}, + {"(AddrPort).AppendText", Method, 24}, {"(AddrPort).AppendTo", Method, 18}, {"(AddrPort).Compare", Method, 22}, {"(AddrPort).IsValid", Method, 18}, @@ -8684,6 +8863,8 @@ var PackageSymbols = map[string][]Symbol{ {"(AddrPort).Port", Method, 18}, {"(AddrPort).String", Method, 18}, {"(Prefix).Addr", Method, 18}, + {"(Prefix).AppendBinary", Method, 24}, + {"(Prefix).AppendText", Method, 24}, {"(Prefix).AppendTo", Method, 18}, {"(Prefix).Bits", Method, 18}, {"(Prefix).Contains", Method, 18}, @@ -8868,6 +9049,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Error).Temporary", Method, 6}, {"(*Error).Timeout", Method, 6}, {"(*Error).Unwrap", Method, 13}, + {"(*URL).AppendBinary", Method, 24}, {"(*URL).EscapedFragment", Method, 15}, {"(*URL).EscapedPath", Method, 5}, {"(*URL).Hostname", Method, 8}, @@ -8967,6 +9149,19 @@ var PackageSymbols = map[string][]Symbol{ {"(*ProcessState).SysUsage", Method, 0}, {"(*ProcessState).SystemTime", Method, 0}, {"(*ProcessState).UserTime", Method, 0}, + {"(*Root).Chmod", Method, 25}, + {"(*Root).Chown", Method, 25}, + {"(*Root).Close", Method, 24}, + {"(*Root).Create", Method, 24}, + {"(*Root).FS", Method, 24}, + {"(*Root).Lstat", Method, 24}, + {"(*Root).Mkdir", Method, 24}, + {"(*Root).Name", Method, 24}, + {"(*Root).Open", Method, 24}, + {"(*Root).OpenFile", Method, 24}, + {"(*Root).OpenRoot", Method, 24}, + {"(*Root).Remove", Method, 24}, + {"(*Root).Stat", Method, 24}, {"(*SyscallError).Error", Method, 0}, {"(*SyscallError).Timeout", Method, 10}, {"(*SyscallError).Unwrap", Method, 13}, @@ -9060,6 +9255,8 @@ var PackageSymbols = map[string][]Symbol{ {"O_WRONLY", Const, 0}, {"Open", Func, 0}, {"OpenFile", Func, 0}, + {"OpenInRoot", Func, 24}, + {"OpenRoot", Func, 24}, {"PathError", Type, 0}, {"PathError.Err", Field, 0}, {"PathError.Op", Field, 0}, @@ -9081,6 +9278,7 @@ var PackageSymbols = map[string][]Symbol{ {"Remove", Func, 0}, {"RemoveAll", Func, 0}, {"Rename", Func, 0}, + {"Root", Type, 24}, {"SEEK_CUR", Const, 0}, {"SEEK_END", Const, 0}, {"SEEK_SET", Const, 0}, @@ -9422,6 +9620,7 @@ var PackageSymbols = map[string][]Symbol{ {"Zero", Func, 0}, }, "regexp": { + {"(*Regexp).AppendText", Method, 24}, {"(*Regexp).Copy", Method, 6}, {"(*Regexp).Expand", Method, 0}, {"(*Regexp).ExpandString", Method, 0}, @@ -9602,6 +9801,8 @@ var PackageSymbols = map[string][]Symbol{ {"(*StackRecord).Stack", Method, 0}, {"(*TypeAssertionError).Error", Method, 0}, {"(*TypeAssertionError).RuntimeError", Method, 0}, + {"(Cleanup).Stop", Method, 24}, + {"AddCleanup", Func, 24}, {"BlockProfile", Func, 1}, {"BlockProfileRecord", Type, 1}, {"BlockProfileRecord.Count", Field, 1}, @@ -9612,6 +9813,7 @@ var PackageSymbols = map[string][]Symbol{ {"Caller", Func, 0}, {"Callers", Func, 0}, {"CallersFrames", Func, 7}, + {"Cleanup", Type, 24}, {"Compiler", Const, 0}, {"Error", Type, 0}, {"Frame", Type, 7}, @@ -9974,6 +10176,8 @@ var PackageSymbols = map[string][]Symbol{ {"EqualFold", Func, 0}, {"Fields", Func, 0}, {"FieldsFunc", Func, 0}, + {"FieldsFuncSeq", Func, 24}, + {"FieldsSeq", Func, 24}, {"HasPrefix", Func, 0}, {"HasSuffix", Func, 0}, {"Index", Func, 0}, @@ -9986,6 +10190,7 @@ var PackageSymbols = map[string][]Symbol{ {"LastIndexAny", Func, 0}, {"LastIndexByte", Func, 5}, {"LastIndexFunc", Func, 0}, + {"Lines", Func, 24}, {"Map", Func, 0}, {"NewReader", Func, 0}, {"NewReplacer", Func, 0}, @@ -9997,7 +10202,9 @@ var PackageSymbols = map[string][]Symbol{ {"Split", Func, 0}, {"SplitAfter", Func, 0}, {"SplitAfterN", Func, 0}, + {"SplitAfterSeq", Func, 24}, {"SplitN", Func, 0}, + {"SplitSeq", Func, 24}, {"Title", Func, 0}, {"ToLower", Func, 0}, {"ToLowerSpecial", Func, 0}, @@ -16413,7 +16620,9 @@ var PackageSymbols = map[string][]Symbol{ {"ValueOf", Func, 0}, }, "testing": { + {"(*B).Chdir", Method, 24}, {"(*B).Cleanup", Method, 14}, + {"(*B).Context", Method, 24}, {"(*B).Elapsed", Method, 20}, {"(*B).Error", Method, 0}, {"(*B).Errorf", Method, 0}, @@ -16425,6 +16634,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*B).Helper", Method, 9}, {"(*B).Log", Method, 0}, {"(*B).Logf", Method, 0}, + {"(*B).Loop", Method, 24}, {"(*B).Name", Method, 8}, {"(*B).ReportAllocs", Method, 1}, {"(*B).ReportMetric", Method, 13}, @@ -16442,7 +16652,9 @@ var PackageSymbols = map[string][]Symbol{ {"(*B).StopTimer", Method, 0}, {"(*B).TempDir", Method, 15}, {"(*F).Add", Method, 18}, + {"(*F).Chdir", Method, 24}, {"(*F).Cleanup", Method, 18}, + {"(*F).Context", Method, 24}, {"(*F).Error", Method, 18}, {"(*F).Errorf", Method, 18}, {"(*F).Fail", Method, 18}, @@ -16463,7 +16675,9 @@ var PackageSymbols = map[string][]Symbol{ {"(*F).TempDir", Method, 18}, {"(*M).Run", Method, 4}, {"(*PB).Next", Method, 3}, + {"(*T).Chdir", Method, 24}, {"(*T).Cleanup", Method, 14}, + {"(*T).Context", Method, 24}, {"(*T).Deadline", Method, 15}, {"(*T).Error", Method, 0}, {"(*T).Errorf", Method, 0}, @@ -16545,9 +16759,11 @@ var PackageSymbols = map[string][]Symbol{ }, "testing/fstest": { {"(MapFS).Glob", Method, 16}, + {"(MapFS).Lstat", Method, 25}, {"(MapFS).Open", Method, 16}, {"(MapFS).ReadDir", Method, 16}, {"(MapFS).ReadFile", Method, 16}, + {"(MapFS).ReadLink", Method, 25}, {"(MapFS).Stat", Method, 16}, {"(MapFS).Sub", Method, 16}, {"MapFS", Type, 16}, @@ -16954,7 +17170,9 @@ var PackageSymbols = map[string][]Symbol{ {"(Time).Add", Method, 0}, {"(Time).AddDate", Method, 0}, {"(Time).After", Method, 0}, + {"(Time).AppendBinary", Method, 24}, {"(Time).AppendFormat", Method, 5}, + {"(Time).AppendText", Method, 24}, {"(Time).Before", Method, 0}, {"(Time).Clock", Method, 0}, {"(Time).Compare", Method, 20}, @@ -17428,4 +17646,9 @@ var PackageSymbols = map[string][]Symbol{ {"String", Func, 0}, {"StringData", Func, 0}, }, + "weak": { + {"(Pointer).Value", Method, 24}, + {"Make", Func, 24}, + {"Pointer", Type, 24}, + }, } diff --git a/vendor/golang.org/x/tools/internal/stdlib/stdlib.go b/vendor/golang.org/x/tools/internal/stdlib/stdlib.go index 98904017f..3d96d3bf6 100644 --- a/vendor/golang.org/x/tools/internal/stdlib/stdlib.go +++ b/vendor/golang.org/x/tools/internal/stdlib/stdlib.go @@ -6,7 +6,7 @@ // Package stdlib provides a table of all exported symbols in the // standard library, along with the version at which they first -// appeared. +// appeared. It also provides the import graph of std packages. package stdlib import ( diff --git a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go b/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go deleted file mode 100644 index ff9437a36..000000000 --- a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// package tokeninternal provides access to some internal features of the token -// package. -package tokeninternal - -import ( - "fmt" - "go/token" - "sort" - "sync" - "unsafe" -) - -// GetLines returns the table of line-start offsets from a token.File. -func GetLines(file *token.File) []int { - // token.File has a Lines method on Go 1.21 and later. - if file, ok := (interface{})(file).(interface{ Lines() []int }); ok { - return file.Lines() - } - - // This declaration must match that of token.File. - // This creates a risk of dependency skew. - // For now we check that the size of the two - // declarations is the same, on the (fragile) assumption - // that future changes would add fields. - type tokenFile119 struct { - _ string - _ int - _ int - mu sync.Mutex // we're not complete monsters - lines []int - _ []struct{} - } - - if unsafe.Sizeof(*file) != unsafe.Sizeof(tokenFile119{}) { - panic("unexpected token.File size") - } - var ptr *tokenFile119 - type uP = unsafe.Pointer - *(*uP)(uP(&ptr)) = uP(file) - ptr.mu.Lock() - defer ptr.mu.Unlock() - return ptr.lines -} - -// AddExistingFiles adds the specified files to the FileSet if they -// are not already present. It panics if any pair of files in the -// resulting FileSet would overlap. -func AddExistingFiles(fset *token.FileSet, files []*token.File) { - // Punch through the FileSet encapsulation. - type tokenFileSet struct { - // This type remained essentially consistent from go1.16 to go1.21. - mutex sync.RWMutex - base int - files []*token.File - _ *token.File // changed to atomic.Pointer[token.File] in go1.19 - } - - // If the size of token.FileSet changes, this will fail to compile. - const delta = int64(unsafe.Sizeof(tokenFileSet{})) - int64(unsafe.Sizeof(token.FileSet{})) - var _ [-delta * delta]int - - type uP = unsafe.Pointer - var ptr *tokenFileSet - *(*uP)(uP(&ptr)) = uP(fset) - ptr.mutex.Lock() - defer ptr.mutex.Unlock() - - // Merge and sort. - newFiles := append(ptr.files, files...) - sort.Slice(newFiles, func(i, j int) bool { - return newFiles[i].Base() < newFiles[j].Base() - }) - - // Reject overlapping files. - // Discard adjacent identical files. - out := newFiles[:0] - for i, file := range newFiles { - if i > 0 { - prev := newFiles[i-1] - if file == prev { - continue - } - if prev.Base()+prev.Size()+1 > file.Base() { - panic(fmt.Sprintf("file %s (%d-%d) overlaps with file %s (%d-%d)", - prev.Name(), prev.Base(), prev.Base()+prev.Size(), - file.Name(), file.Base(), file.Base()+file.Size())) - } - } - out = append(out, file) - } - newFiles = out - - ptr.files = newFiles - - // Advance FileSet.Base(). - if len(newFiles) > 0 { - last := newFiles[len(newFiles)-1] - newBase := last.Base() + last.Size() + 1 - if ptr.base < newBase { - ptr.base = newBase - } - } -} - -// FileSetFor returns a new FileSet containing a sequence of new Files with -// the same base, size, and line as the input files, for use in APIs that -// require a FileSet. -// -// Precondition: the input files must be non-overlapping, and sorted in order -// of their Base. -func FileSetFor(files ...*token.File) *token.FileSet { - fset := token.NewFileSet() - for _, f := range files { - f2 := fset.AddFile(f.Name(), f.Base(), f.Size()) - lines := GetLines(f) - f2.SetLines(lines) - } - return fset -} - -// CloneFileSet creates a new FileSet holding all files in fset. It does not -// create copies of the token.Files in fset: they are added to the resulting -// FileSet unmodified. -func CloneFileSet(fset *token.FileSet) *token.FileSet { - var files []*token.File - fset.Iterate(func(f *token.File) bool { - files = append(files, f) - return true - }) - newFileSet := token.NewFileSet() - AddExistingFiles(newFileSet, files) - return newFileSet -} diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go new file mode 100644 index 000000000..cdae2b8e8 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/common.go @@ -0,0 +1,68 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package typeparams contains common utilities for writing tools that +// interact with generic Go code, as introduced with Go 1.18. It +// supplements the standard library APIs. Notably, the StructuralTerms +// API computes a minimal representation of the structural +// restrictions on a type parameter. +// +// An external version of these APIs is available in the +// golang.org/x/exp/typeparams module. +package typeparams + +import ( + "go/ast" + "go/token" + "go/types" +) + +// UnpackIndexExpr extracts data from AST nodes that represent index +// expressions. +// +// For an ast.IndexExpr, the resulting indices slice will contain exactly one +// index expression. For an ast.IndexListExpr (go1.18+), it may have a variable +// number of index expressions. +// +// For nodes that don't represent index expressions, the first return value of +// UnpackIndexExpr will be nil. +func UnpackIndexExpr(n ast.Node) (x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) { + switch e := n.(type) { + case *ast.IndexExpr: + return e.X, e.Lbrack, []ast.Expr{e.Index}, e.Rbrack + case *ast.IndexListExpr: + return e.X, e.Lbrack, e.Indices, e.Rbrack + } + return nil, token.NoPos, nil, token.NoPos +} + +// PackIndexExpr returns an *ast.IndexExpr or *ast.IndexListExpr, depending on +// the cardinality of indices. Calling PackIndexExpr with len(indices) == 0 +// will panic. +func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) ast.Expr { + switch len(indices) { + case 0: + panic("empty indices") + case 1: + return &ast.IndexExpr{ + X: x, + Lbrack: lbrack, + Index: indices[0], + Rbrack: rbrack, + } + default: + return &ast.IndexListExpr{ + X: x, + Lbrack: lbrack, + Indices: indices, + Rbrack: rbrack, + } + } +} + +// IsTypeParam reports whether t is a type parameter (or an alias of one). +func IsTypeParam(t types.Type) bool { + _, ok := types.Unalias(t).(*types.TypeParam) + return ok +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/coretype.go b/vendor/golang.org/x/tools/internal/typeparams/coretype.go new file mode 100644 index 000000000..27a2b1792 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/coretype.go @@ -0,0 +1,155 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeparams + +import ( + "fmt" + "go/types" +) + +// CoreType returns the core type of T or nil if T does not have a core type. +// +// See https://go.dev/ref/spec#Core_types for the definition of a core type. +func CoreType(T types.Type) types.Type { + U := T.Underlying() + if _, ok := U.(*types.Interface); !ok { + return U // for non-interface types, + } + + terms, err := NormalTerms(U) + if len(terms) == 0 || err != nil { + // len(terms) -> empty type set of interface. + // err != nil => U is invalid, exceeds complexity bounds, or has an empty type set. + return nil // no core type. + } + + U = terms[0].Type().Underlying() + var identical int // i in [0,identical) => Identical(U, terms[i].Type().Underlying()) + for identical = 1; identical < len(terms); identical++ { + if !types.Identical(U, terms[identical].Type().Underlying()) { + break + } + } + + if identical == len(terms) { + // https://go.dev/ref/spec#Core_types + // "There is a single type U which is the underlying type of all types in the type set of T" + return U + } + ch, ok := U.(*types.Chan) + if !ok { + return nil // no core type as identical < len(terms) and U is not a channel. + } + // https://go.dev/ref/spec#Core_types + // "the type chan E if T contains only bidirectional channels, or the type chan<- E or + // <-chan E depending on the direction of the directional channels present." + for chans := identical; chans < len(terms); chans++ { + curr, ok := terms[chans].Type().Underlying().(*types.Chan) + if !ok { + return nil + } + if !types.Identical(ch.Elem(), curr.Elem()) { + return nil // channel elements are not identical. + } + if ch.Dir() == types.SendRecv { + // ch is bidirectional. We can safely always use curr's direction. + ch = curr + } else if curr.Dir() != types.SendRecv && ch.Dir() != curr.Dir() { + // ch and curr are not bidirectional and not the same direction. + return nil + } + } + return ch +} + +// NormalTerms returns a slice of terms representing the normalized structural +// type restrictions of a type, if any. +// +// For all types other than *types.TypeParam, *types.Interface, and +// *types.Union, this is just a single term with Tilde() == false and +// Type() == typ. For *types.TypeParam, *types.Interface, and *types.Union, see +// below. +// +// Structural type restrictions of a type parameter are created via +// non-interface types embedded in its constraint interface (directly, or via a +// chain of interface embeddings). For example, in the declaration type +// T[P interface{~int; m()}] int the structural restriction of the type +// parameter P is ~int. +// +// With interface embedding and unions, the specification of structural type +// restrictions may be arbitrarily complex. For example, consider the +// following: +// +// type A interface{ ~string|~[]byte } +// +// type B interface{ int|string } +// +// type C interface { ~string|~int } +// +// type T[P interface{ A|B; C }] int +// +// In this example, the structural type restriction of P is ~string|int: A|B +// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, +// which when intersected with C (~string|~int) yields ~string|int. +// +// NormalTerms computes these expansions and reductions, producing a +// "normalized" form of the embeddings. A structural restriction is normalized +// if it is a single union containing no interface terms, and is minimal in the +// sense that removing any term changes the set of types satisfying the +// constraint. It is left as a proof for the reader that, modulo sorting, there +// is exactly one such normalized form. +// +// Because the minimal representation always takes this form, NormalTerms +// returns a slice of tilde terms corresponding to the terms of the union in +// the normalized structural restriction. An error is returned if the type is +// invalid, exceeds complexity bounds, or has an empty type set. In the latter +// case, NormalTerms returns ErrEmptyTypeSet. +// +// NormalTerms makes no guarantees about the order of terms, except that it +// is deterministic. +func NormalTerms(T types.Type) ([]*types.Term, error) { + // typeSetOf(T) == typeSetOf(Unalias(T)) + typ := types.Unalias(T) + if named, ok := typ.(*types.Named); ok { + typ = named.Underlying() + } + switch typ := typ.(type) { + case *types.TypeParam: + return StructuralTerms(typ) + case *types.Union: + return UnionTermSet(typ) + case *types.Interface: + return InterfaceTermSet(typ) + default: + return []*types.Term{types.NewTerm(false, T)}, nil + } +} + +// Deref returns the type of the variable pointed to by t, +// if t's core type is a pointer; otherwise it returns t. +// +// Do not assume that Deref(T)==T implies T is not a pointer: +// consider "type T *T", for example. +// +// TODO(adonovan): ideally this would live in typesinternal, but that +// creates an import cycle. Move there when we melt this package down. +func Deref(t types.Type) types.Type { + if ptr, ok := CoreType(t).(*types.Pointer); ok { + return ptr.Elem() + } + return t +} + +// MustDeref returns the type of the variable pointed to by t. +// It panics if t's core type is not a pointer. +// +// TODO(adonovan): ideally this would live in typesinternal, but that +// creates an import cycle. Move there when we melt this package down. +func MustDeref(t types.Type) types.Type { + if ptr, ok := CoreType(t).(*types.Pointer); ok { + return ptr.Elem() + } + panic(fmt.Sprintf("%v is not a pointer", t)) +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/free.go b/vendor/golang.org/x/tools/internal/typeparams/free.go new file mode 100644 index 000000000..0ade5c294 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/free.go @@ -0,0 +1,131 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeparams + +import ( + "go/types" + + "golang.org/x/tools/internal/aliases" +) + +// Free is a memoization of the set of free type parameters within a +// type. It makes a sequence of calls to [Free.Has] for overlapping +// types more efficient. The zero value is ready for use. +// +// NOTE: Adapted from go/types/infer.go. If it is later exported, factor. +type Free struct { + seen map[types.Type]bool +} + +// Has reports whether the specified type has a free type parameter. +func (w *Free) Has(typ types.Type) (res bool) { + // detect cycles + if x, ok := w.seen[typ]; ok { + return x + } + if w.seen == nil { + w.seen = make(map[types.Type]bool) + } + w.seen[typ] = false + defer func() { + w.seen[typ] = res + }() + + switch t := typ.(type) { + case nil, *types.Basic: // TODO(gri) should nil be handled here? + break + + case *types.Alias: + if aliases.TypeParams(t).Len() > aliases.TypeArgs(t).Len() { + return true // This is an uninstantiated Alias. + } + // The expansion of an alias can have free type parameters, + // whether or not the alias itself has type parameters: + // + // func _[K comparable]() { + // type Set = map[K]bool // free(Set) = {K} + // type MapTo[V] = map[K]V // free(Map[foo]) = {V} + // } + // + // So, we must Unalias. + return w.Has(types.Unalias(t)) + + case *types.Array: + return w.Has(t.Elem()) + + case *types.Slice: + return w.Has(t.Elem()) + + case *types.Struct: + for i, n := 0, t.NumFields(); i < n; i++ { + if w.Has(t.Field(i).Type()) { + return true + } + } + + case *types.Pointer: + return w.Has(t.Elem()) + + case *types.Tuple: + n := t.Len() + for i := 0; i < n; i++ { + if w.Has(t.At(i).Type()) { + return true + } + } + + case *types.Signature: + // t.tparams may not be nil if we are looking at a signature + // of a generic function type (or an interface method) that is + // part of the type we're testing. We don't care about these type + // parameters. + // Similarly, the receiver of a method may declare (rather than + // use) type parameters, we don't care about those either. + // Thus, we only need to look at the input and result parameters. + return w.Has(t.Params()) || w.Has(t.Results()) + + case *types.Interface: + for i, n := 0, t.NumMethods(); i < n; i++ { + if w.Has(t.Method(i).Type()) { + return true + } + } + terms, err := InterfaceTermSet(t) + if err != nil { + return false // ill typed + } + for _, term := range terms { + if w.Has(term.Type()) { + return true + } + } + + case *types.Map: + return w.Has(t.Key()) || w.Has(t.Elem()) + + case *types.Chan: + return w.Has(t.Elem()) + + case *types.Named: + args := t.TypeArgs() + if params := t.TypeParams(); params.Len() > args.Len() { + return true // this is an uninstantiated named type. + } + for i, n := 0, args.Len(); i < n; i++ { + if w.Has(args.At(i)) { + return true + } + } + return w.Has(t.Underlying()) // recurse for types local to parameterized functions + + case *types.TypeParam: + return true + + default: + panic(t) // unreachable + } + + return false +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/vendor/golang.org/x/tools/internal/typeparams/normalize.go new file mode 100644 index 000000000..f49802b8e --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/normalize.go @@ -0,0 +1,218 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeparams + +import ( + "errors" + "fmt" + "go/types" + "os" + "strings" +) + +//go:generate go run copytermlist.go + +const debug = false + +var ErrEmptyTypeSet = errors.New("empty type set") + +// StructuralTerms returns a slice of terms representing the normalized +// structural type restrictions of a type parameter, if any. +// +// Structural type restrictions of a type parameter are created via +// non-interface types embedded in its constraint interface (directly, or via a +// chain of interface embeddings). For example, in the declaration +// +// type T[P interface{~int; m()}] int +// +// the structural restriction of the type parameter P is ~int. +// +// With interface embedding and unions, the specification of structural type +// restrictions may be arbitrarily complex. For example, consider the +// following: +// +// type A interface{ ~string|~[]byte } +// +// type B interface{ int|string } +// +// type C interface { ~string|~int } +// +// type T[P interface{ A|B; C }] int +// +// In this example, the structural type restriction of P is ~string|int: A|B +// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, +// which when intersected with C (~string|~int) yields ~string|int. +// +// StructuralTerms computes these expansions and reductions, producing a +// "normalized" form of the embeddings. A structural restriction is normalized +// if it is a single union containing no interface terms, and is minimal in the +// sense that removing any term changes the set of types satisfying the +// constraint. It is left as a proof for the reader that, modulo sorting, there +// is exactly one such normalized form. +// +// Because the minimal representation always takes this form, StructuralTerms +// returns a slice of tilde terms corresponding to the terms of the union in +// the normalized structural restriction. An error is returned if the +// constraint interface is invalid, exceeds complexity bounds, or has an empty +// type set. In the latter case, StructuralTerms returns ErrEmptyTypeSet. +// +// StructuralTerms makes no guarantees about the order of terms, except that it +// is deterministic. +func StructuralTerms(tparam *types.TypeParam) ([]*types.Term, error) { + constraint := tparam.Constraint() + if constraint == nil { + return nil, fmt.Errorf("%s has nil constraint", tparam) + } + iface, _ := constraint.Underlying().(*types.Interface) + if iface == nil { + return nil, fmt.Errorf("constraint is %T, not *types.Interface", constraint.Underlying()) + } + return InterfaceTermSet(iface) +} + +// InterfaceTermSet computes the normalized terms for a constraint interface, +// returning an error if the term set cannot be computed or is empty. In the +// latter case, the error will be ErrEmptyTypeSet. +// +// See the documentation of StructuralTerms for more information on +// normalization. +func InterfaceTermSet(iface *types.Interface) ([]*types.Term, error) { + return computeTermSet(iface) +} + +// UnionTermSet computes the normalized terms for a union, returning an error +// if the term set cannot be computed or is empty. In the latter case, the +// error will be ErrEmptyTypeSet. +// +// See the documentation of StructuralTerms for more information on +// normalization. +func UnionTermSet(union *types.Union) ([]*types.Term, error) { + return computeTermSet(union) +} + +func computeTermSet(typ types.Type) ([]*types.Term, error) { + tset, err := computeTermSetInternal(typ, make(map[types.Type]*termSet), 0) + if err != nil { + return nil, err + } + if tset.terms.isEmpty() { + return nil, ErrEmptyTypeSet + } + if tset.terms.isAll() { + return nil, nil + } + var terms []*types.Term + for _, term := range tset.terms { + terms = append(terms, types.NewTerm(term.tilde, term.typ)) + } + return terms, nil +} + +// A termSet holds the normalized set of terms for a given type. +// +// The name termSet is intentionally distinct from 'type set': a type set is +// all types that implement a type (and includes method restrictions), whereas +// a term set just represents the structural restrictions on a type. +type termSet struct { + complete bool + terms termlist +} + +func indentf(depth int, format string, args ...any) { + fmt.Fprintf(os.Stderr, strings.Repeat(".", depth)+format+"\n", args...) +} + +func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth int) (res *termSet, err error) { + if t == nil { + panic("nil type") + } + + if debug { + indentf(depth, "%s", t.String()) + defer func() { + if err != nil { + indentf(depth, "=> %s", err) + } else { + indentf(depth, "=> %s", res.terms.String()) + } + }() + } + + const maxTermCount = 100 + if tset, ok := seen[t]; ok { + if !tset.complete { + return nil, fmt.Errorf("cycle detected in the declaration of %s", t) + } + return tset, nil + } + + // Mark the current type as seen to avoid infinite recursion. + tset := new(termSet) + defer func() { + tset.complete = true + }() + seen[t] = tset + + switch u := t.Underlying().(type) { + case *types.Interface: + // The term set of an interface is the intersection of the term sets of its + // embedded types. + tset.terms = allTermlist + for i := 0; i < u.NumEmbeddeds(); i++ { + embedded := u.EmbeddedType(i) + if _, ok := embedded.Underlying().(*types.TypeParam); ok { + return nil, fmt.Errorf("invalid embedded type %T", embedded) + } + tset2, err := computeTermSetInternal(embedded, seen, depth+1) + if err != nil { + return nil, err + } + tset.terms = tset.terms.intersect(tset2.terms) + } + case *types.Union: + // The term set of a union is the union of term sets of its terms. + tset.terms = nil + for i := 0; i < u.Len(); i++ { + t := u.Term(i) + var terms termlist + switch t.Type().Underlying().(type) { + case *types.Interface: + tset2, err := computeTermSetInternal(t.Type(), seen, depth+1) + if err != nil { + return nil, err + } + terms = tset2.terms + case *types.TypeParam, *types.Union: + // A stand-alone type parameter or union is not permitted as union + // term. + return nil, fmt.Errorf("invalid union term %T", t) + default: + if t.Type() == types.Typ[types.Invalid] { + continue + } + terms = termlist{{t.Tilde(), t.Type()}} + } + tset.terms = tset.terms.union(terms) + if len(tset.terms) > maxTermCount { + return nil, fmt.Errorf("exceeded max term count %d", maxTermCount) + } + } + case *types.TypeParam: + panic("unreachable") + default: + // For all other types, the term set is just a single non-tilde term + // holding the type itself. + if u != types.Typ[types.Invalid] { + tset.terms = termlist{{false, t}} + } + } + return tset, nil +} + +// under is a facade for the go/types internal function of the same name. It is +// used by typeterm.go. +func under(t types.Type) types.Type { + return t.Underlying() +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/termlist.go b/vendor/golang.org/x/tools/internal/typeparams/termlist.go new file mode 100644 index 000000000..cbd12f801 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/termlist.go @@ -0,0 +1,163 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by copytermlist.go DO NOT EDIT. + +package typeparams + +import ( + "bytes" + "go/types" +) + +// A termlist represents the type set represented by the union +// t1 ∪ y2 ∪ ... tn of the type sets of the terms t1 to tn. +// A termlist is in normal form if all terms are disjoint. +// termlist operations don't require the operands to be in +// normal form. +type termlist []*term + +// allTermlist represents the set of all types. +// It is in normal form. +var allTermlist = termlist{new(term)} + +// String prints the termlist exactly (without normalization). +func (xl termlist) String() string { + if len(xl) == 0 { + return "∅" + } + var buf bytes.Buffer + for i, x := range xl { + if i > 0 { + buf.WriteString(" | ") + } + buf.WriteString(x.String()) + } + return buf.String() +} + +// isEmpty reports whether the termlist xl represents the empty set of types. +func (xl termlist) isEmpty() bool { + // If there's a non-nil term, the entire list is not empty. + // If the termlist is in normal form, this requires at most + // one iteration. + for _, x := range xl { + if x != nil { + return false + } + } + return true +} + +// isAll reports whether the termlist xl represents the set of all types. +func (xl termlist) isAll() bool { + // If there's a 𝓤 term, the entire list is 𝓤. + // If the termlist is in normal form, this requires at most + // one iteration. + for _, x := range xl { + if x != nil && x.typ == nil { + return true + } + } + return false +} + +// norm returns the normal form of xl. +func (xl termlist) norm() termlist { + // Quadratic algorithm, but good enough for now. + // TODO(gri) fix asymptotic performance + used := make([]bool, len(xl)) + var rl termlist + for i, xi := range xl { + if xi == nil || used[i] { + continue + } + for j := i + 1; j < len(xl); j++ { + xj := xl[j] + if xj == nil || used[j] { + continue + } + if u1, u2 := xi.union(xj); u2 == nil { + // If we encounter a 𝓤 term, the entire list is 𝓤. + // Exit early. + // (Note that this is not just an optimization; + // if we continue, we may end up with a 𝓤 term + // and other terms and the result would not be + // in normal form.) + if u1.typ == nil { + return allTermlist + } + xi = u1 + used[j] = true // xj is now unioned into xi - ignore it in future iterations + } + } + rl = append(rl, xi) + } + return rl +} + +// union returns the union xl ∪ yl. +func (xl termlist) union(yl termlist) termlist { + return append(xl, yl...).norm() +} + +// intersect returns the intersection xl ∩ yl. +func (xl termlist) intersect(yl termlist) termlist { + if xl.isEmpty() || yl.isEmpty() { + return nil + } + + // Quadratic algorithm, but good enough for now. + // TODO(gri) fix asymptotic performance + var rl termlist + for _, x := range xl { + for _, y := range yl { + if r := x.intersect(y); r != nil { + rl = append(rl, r) + } + } + } + return rl.norm() +} + +// equal reports whether xl and yl represent the same type set. +func (xl termlist) equal(yl termlist) bool { + // TODO(gri) this should be more efficient + return xl.subsetOf(yl) && yl.subsetOf(xl) +} + +// includes reports whether t ∈ xl. +func (xl termlist) includes(t types.Type) bool { + for _, x := range xl { + if x.includes(t) { + return true + } + } + return false +} + +// supersetOf reports whether y ⊆ xl. +func (xl termlist) supersetOf(y *term) bool { + for _, x := range xl { + if y.subsetOf(x) { + return true + } + } + return false +} + +// subsetOf reports whether xl ⊆ yl. +func (xl termlist) subsetOf(yl termlist) bool { + if yl.isEmpty() { + return xl.isEmpty() + } + + // each term x of xl must be a subset of yl + for _, x := range xl { + if !yl.supersetOf(x) { + return false // x is not a subset yl + } + } + return true +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeterm.go b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go new file mode 100644 index 000000000..7350bb702 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go @@ -0,0 +1,169 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by copytermlist.go DO NOT EDIT. + +package typeparams + +import "go/types" + +// A term describes elementary type sets: +// +// ∅: (*term)(nil) == ∅ // set of no types (empty set) +// 𝓤: &term{} == 𝓤 // set of all types (𝓤niverse) +// T: &term{false, T} == {T} // set of type T +// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t +type term struct { + tilde bool // valid if typ != nil + typ types.Type +} + +func (x *term) String() string { + switch { + case x == nil: + return "∅" + case x.typ == nil: + return "𝓤" + case x.tilde: + return "~" + x.typ.String() + default: + return x.typ.String() + } +} + +// equal reports whether x and y represent the same type set. +func (x *term) equal(y *term) bool { + // easy cases + switch { + case x == nil || y == nil: + return x == y + case x.typ == nil || y.typ == nil: + return x.typ == y.typ + } + // ∅ ⊂ x, y ⊂ 𝓤 + + return x.tilde == y.tilde && types.Identical(x.typ, y.typ) +} + +// union returns the union x ∪ y: zero, one, or two non-nil terms. +func (x *term) union(y *term) (_, _ *term) { + // easy cases + switch { + case x == nil && y == nil: + return nil, nil // ∅ ∪ ∅ == ∅ + case x == nil: + return y, nil // ∅ ∪ y == y + case y == nil: + return x, nil // x ∪ ∅ == x + case x.typ == nil: + return x, nil // 𝓤 ∪ y == 𝓤 + case y.typ == nil: + return y, nil // x ∪ 𝓤 == 𝓤 + } + // ∅ ⊂ x, y ⊂ 𝓤 + + if x.disjoint(y) { + return x, y // x ∪ y == (x, y) if x ∩ y == ∅ + } + // x.typ == y.typ + + // ~t ∪ ~t == ~t + // ~t ∪ T == ~t + // T ∪ ~t == ~t + // T ∪ T == T + if x.tilde || !y.tilde { + return x, nil + } + return y, nil +} + +// intersect returns the intersection x ∩ y. +func (x *term) intersect(y *term) *term { + // easy cases + switch { + case x == nil || y == nil: + return nil // ∅ ∩ y == ∅ and ∩ ∅ == ∅ + case x.typ == nil: + return y // 𝓤 ∩ y == y + case y.typ == nil: + return x // x ∩ 𝓤 == x + } + // ∅ ⊂ x, y ⊂ 𝓤 + + if x.disjoint(y) { + return nil // x ∩ y == ∅ if x ∩ y == ∅ + } + // x.typ == y.typ + + // ~t ∩ ~t == ~t + // ~t ∩ T == T + // T ∩ ~t == T + // T ∩ T == T + if !x.tilde || y.tilde { + return x + } + return y +} + +// includes reports whether t ∈ x. +func (x *term) includes(t types.Type) bool { + // easy cases + switch { + case x == nil: + return false // t ∈ ∅ == false + case x.typ == nil: + return true // t ∈ 𝓤 == true + } + // ∅ ⊂ x ⊂ 𝓤 + + u := t + if x.tilde { + u = under(u) + } + return types.Identical(x.typ, u) +} + +// subsetOf reports whether x ⊆ y. +func (x *term) subsetOf(y *term) bool { + // easy cases + switch { + case x == nil: + return true // ∅ ⊆ y == true + case y == nil: + return false // x ⊆ ∅ == false since x != ∅ + case y.typ == nil: + return true // x ⊆ 𝓤 == true + case x.typ == nil: + return false // 𝓤 ⊆ y == false since y != 𝓤 + } + // ∅ ⊂ x, y ⊂ 𝓤 + + if x.disjoint(y) { + return false // x ⊆ y == false if x ∩ y == ∅ + } + // x.typ == y.typ + + // ~t ⊆ ~t == true + // ~t ⊆ T == false + // T ⊆ ~t == true + // T ⊆ T == true + return !x.tilde || y.tilde +} + +// disjoint reports whether x ∩ y == ∅. +// x.typ and y.typ must not be nil. +func (x *term) disjoint(y *term) bool { + if debug && (x.typ == nil || y.typ == nil) { + panic("invalid argument(s)") + } + ux := x.typ + if y.tilde { + ux = under(ux) + } + uy := y.typ + if x.tilde { + uy = under(uy) + } + return !types.Identical(ux, uy) +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/element.go b/vendor/golang.org/x/tools/internal/typesinternal/element.go new file mode 100644 index 000000000..4957f0216 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/element.go @@ -0,0 +1,133 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +import ( + "fmt" + "go/types" + + "golang.org/x/tools/go/types/typeutil" +) + +// ForEachElement calls f for type T and each type reachable from its +// type through reflection. It does this by recursively stripping off +// type constructors; in addition, for each named type N, the type *N +// is added to the result as it may have additional methods. +// +// The caller must provide an initially empty set used to de-duplicate +// identical types, potentially across multiple calls to ForEachElement. +// (Its final value holds all the elements seen, matching the arguments +// passed to f.) +// +// TODO(adonovan): share/harmonize with go/callgraph/rta. +func ForEachElement(rtypes *typeutil.Map, msets *typeutil.MethodSetCache, T types.Type, f func(types.Type)) { + var visit func(T types.Type, skip bool) + visit = func(T types.Type, skip bool) { + if !skip { + if seen, _ := rtypes.Set(T, true).(bool); seen { + return // de-dup + } + + f(T) // notify caller of new element type + } + + // Recursion over signatures of each method. + tmset := msets.MethodSet(T) + for i := 0; i < tmset.Len(); i++ { + sig := tmset.At(i).Type().(*types.Signature) + // It is tempting to call visit(sig, false) + // but, as noted in golang.org/cl/65450043, + // the Signature.Recv field is ignored by + // types.Identical and typeutil.Map, which + // is confusing at best. + // + // More importantly, the true signature rtype + // reachable from a method using reflection + // has no receiver but an extra ordinary parameter. + // For the Read method of io.Reader we want: + // func(Reader, []byte) (int, error) + // but here sig is: + // func([]byte) (int, error) + // with .Recv = Reader (though it is hard to + // notice because it doesn't affect Signature.String + // or types.Identical). + // + // TODO(adonovan): construct and visit the correct + // non-method signature with an extra parameter + // (though since unnamed func types have no methods + // there is essentially no actual demand for this). + // + // TODO(adonovan): document whether or not it is + // safe to skip non-exported methods (as RTA does). + visit(sig.Params(), true) // skip the Tuple + visit(sig.Results(), true) // skip the Tuple + } + + switch T := T.(type) { + case *types.Alias: + visit(types.Unalias(T), skip) // emulates the pre-Alias behavior + + case *types.Basic: + // nop + + case *types.Interface: + // nop---handled by recursion over method set. + + case *types.Pointer: + visit(T.Elem(), false) + + case *types.Slice: + visit(T.Elem(), false) + + case *types.Chan: + visit(T.Elem(), false) + + case *types.Map: + visit(T.Key(), false) + visit(T.Elem(), false) + + case *types.Signature: + if T.Recv() != nil { + panic(fmt.Sprintf("Signature %s has Recv %s", T, T.Recv())) + } + visit(T.Params(), true) // skip the Tuple + visit(T.Results(), true) // skip the Tuple + + case *types.Named: + // A pointer-to-named type can be derived from a named + // type via reflection. It may have methods too. + visit(types.NewPointer(T), false) + + // Consider 'type T struct{S}' where S has methods. + // Reflection provides no way to get from T to struct{S}, + // only to S, so the method set of struct{S} is unwanted, + // so set 'skip' flag during recursion. + visit(T.Underlying(), true) // skip the unnamed type + + case *types.Array: + visit(T.Elem(), false) + + case *types.Struct: + for i, n := 0, T.NumFields(); i < n; i++ { + // TODO(adonovan): document whether or not + // it is safe to skip non-exported fields. + visit(T.Field(i).Type(), false) + } + + case *types.Tuple: + for i, n := 0, T.Len(); i < n; i++ { + visit(T.At(i).Type(), false) + } + + case *types.TypeParam, *types.Union: + // forEachReachable must not be called on parameterized types. + panic(T) + + default: + panic(T) + } + } + visit(T, false) +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go index 131caab28..235a6defc 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go @@ -966,7 +966,7 @@ const ( // var _ = string(x) InvalidConversion - // InvalidUntypedConversion occurs when an there is no valid implicit + // InvalidUntypedConversion occurs when there is no valid implicit // conversion from an untyped value satisfying the type constraints of the // context in which it is used. // diff --git a/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go b/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go new file mode 100644 index 000000000..b64f714eb --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go @@ -0,0 +1,46 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +import ( + "go/ast" + "go/types" + "strconv" +) + +// FileQualifier returns a [types.Qualifier] function that qualifies +// imported symbols appropriately based on the import environment of a given +// file. +// If the same package is imported multiple times, the last appearance is +// recorded. +func FileQualifier(f *ast.File, pkg *types.Package) types.Qualifier { + // Construct mapping of import paths to their defined names. + // It is only necessary to look at renaming imports. + imports := make(map[string]string) + for _, imp := range f.Imports { + if imp.Name != nil && imp.Name.Name != "_" { + path, _ := strconv.Unquote(imp.Path.Value) + imports[path] = imp.Name.Name + } + } + + // Define qualifier to replace full package paths with names of the imports. + return func(p *types.Package) string { + if p == nil || p == pkg { + return "" + } + + if name, ok := imports[p.Path()]; ok { + if name == "." { + return "" + } else { + return name + } + } + + // If there is no local renaming, fall back to the package name. + return p.Name() + } +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/recv.go b/vendor/golang.org/x/tools/internal/typesinternal/recv.go index fea7c8b75..8352ea761 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/recv.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/recv.go @@ -6,20 +6,21 @@ package typesinternal import ( "go/types" - - "golang.org/x/tools/internal/aliases" ) // ReceiverNamed returns the named type (if any) associated with the // type of recv, which may be of the form N or *N, or aliases thereof. // It also reports whether a Pointer was present. +// +// The named result may be nil if recv is from a method on an +// anonymous interface or struct types or in ill-typed code. func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) { t := recv.Type() - if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok { + if ptr, ok := types.Unalias(t).(*types.Pointer); ok { isPtr = true t = ptr.Elem() } - named, _ = aliases.Unalias(t).(*types.Named) + named, _ = types.Unalias(t).(*types.Named) return } @@ -36,7 +37,7 @@ func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) { // indirection from the type, regardless of named types (analogous to // a LOAD instruction). func Unpointer(t types.Type) types.Type { - if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok { + if ptr, ok := types.Unalias(t).(*types.Pointer); ok { return ptr.Elem() } return t diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go index 839232861..edf0347ec 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/types.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go @@ -11,6 +11,8 @@ import ( "go/types" "reflect" "unsafe" + + "golang.org/x/tools/internal/aliases" ) func SetUsesCgo(conf *types.Config) bool { @@ -30,12 +32,14 @@ func SetUsesCgo(conf *types.Config) bool { return true } -// ReadGo116ErrorData extracts additional information from types.Error values +// ErrorCodeStartEnd extracts additional information from types.Error values // generated by Go version 1.16 and later: the error code, start position, and // end position. If all positions are valid, start <= err.Pos <= end. // // If the data could not be read, the final result parameter will be false. -func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos, ok bool) { +// +// TODO(adonovan): eliminate start/end when proposal #71803 is accepted. +func ErrorCodeStartEnd(err types.Error) (code ErrorCode, start, end token.Pos, ok bool) { var data [3]int // By coincidence all of these fields are ints, which simplifies things. v := reflect.ValueOf(err) @@ -63,3 +67,63 @@ func NameRelativeTo(pkg *types.Package) types.Qualifier { return other.Name() } } + +// A NamedOrAlias is a [types.Type] that is named (as +// defined by the spec) and capable of bearing type parameters: it +// abstracts aliases ([types.Alias]) and defined types +// ([types.Named]). +// +// Every type declared by an explicit "type" declaration is a +// NamedOrAlias. (Built-in type symbols may additionally +// have type [types.Basic], which is not a NamedOrAlias, +// though the spec regards them as "named".) +// +// NamedOrAlias cannot expose the Origin method, because +// [types.Alias.Origin] and [types.Named.Origin] have different +// (covariant) result types; use [Origin] instead. +type NamedOrAlias interface { + types.Type + Obj() *types.TypeName + // TODO(hxjiang): add method TypeArgs() *types.TypeList after stop supporting go1.22. +} + +// TypeParams is a light shim around t.TypeParams(). +// (go/types.Alias).TypeParams requires >= 1.23. +func TypeParams(t NamedOrAlias) *types.TypeParamList { + switch t := t.(type) { + case *types.Alias: + return aliases.TypeParams(t) + case *types.Named: + return t.TypeParams() + } + return nil +} + +// TypeArgs is a light shim around t.TypeArgs(). +// (go/types.Alias).TypeArgs requires >= 1.23. +func TypeArgs(t NamedOrAlias) *types.TypeList { + switch t := t.(type) { + case *types.Alias: + return aliases.TypeArgs(t) + case *types.Named: + return t.TypeArgs() + } + return nil +} + +// Origin returns the generic type of the Named or Alias type t if it +// is instantiated, otherwise it returns t. +func Origin(t NamedOrAlias) NamedOrAlias { + switch t := t.(type) { + case *types.Alias: + return aliases.Origin(t) + case *types.Named: + return t.Origin() + } + return t +} + +// IsPackageLevel reports whether obj is a package-level symbol. +func IsPackageLevel(obj types.Object) bool { + return obj.Pkg() != nil && obj.Parent() == obj.Pkg().Scope() +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/varkind.go b/vendor/golang.org/x/tools/internal/typesinternal/varkind.go new file mode 100644 index 000000000..e5da04951 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/varkind.go @@ -0,0 +1,40 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +// TODO(adonovan): when CL 645115 lands, define the go1.25 version of +// this API that actually does something. + +import "go/types" + +type VarKind uint8 + +const ( + _ VarKind = iota // (not meaningful) + PackageVar // a package-level variable + LocalVar // a local variable + RecvVar // a method receiver variable + ParamVar // a function parameter variable + ResultVar // a function result variable + FieldVar // a struct field +) + +func (kind VarKind) String() string { + return [...]string{ + 0: "VarKind(0)", + PackageVar: "PackageVar", + LocalVar: "LocalVar", + RecvVar: "RecvVar", + ParamVar: "ParamVar", + ResultVar: "ResultVar", + FieldVar: "FieldVar", + }[kind] +} + +// GetVarKind returns an invalid VarKind. +func GetVarKind(v *types.Var) VarKind { return 0 } + +// SetVarKind has no effect. +func SetVarKind(v *types.Var, kind VarKind) {} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go b/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go new file mode 100644 index 000000000..d272949c1 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go @@ -0,0 +1,392 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + "strings" +) + +// ZeroString returns the string representation of the zero value for any type t. +// The boolean result indicates whether the type is or contains an invalid type +// or a non-basic (constraint) interface type. +// +// Even for invalid input types, ZeroString may return a partially correct +// string representation. The caller should use the returned isValid boolean +// to determine the validity of the expression. +// +// When assigning to a wider type (such as 'any'), it's the caller's +// responsibility to handle any necessary type conversions. +// +// This string can be used on the right-hand side of an assignment where the +// left-hand side has that explicit type. +// References to named types are qualified by an appropriate (optional) +// qualifier function. +// Exception: This does not apply to tuples. Their string representation is +// informational only and cannot be used in an assignment. +// +// See [ZeroExpr] for a variant that returns an [ast.Expr]. +func ZeroString(t types.Type, qual types.Qualifier) (_ string, isValid bool) { + switch t := t.(type) { + case *types.Basic: + switch { + case t.Info()&types.IsBoolean != 0: + return "false", true + case t.Info()&types.IsNumeric != 0: + return "0", true + case t.Info()&types.IsString != 0: + return `""`, true + case t.Kind() == types.UnsafePointer: + fallthrough + case t.Kind() == types.UntypedNil: + return "nil", true + case t.Kind() == types.Invalid: + return "invalid", false + default: + panic(fmt.Sprintf("ZeroString for unexpected type %v", t)) + } + + case *types.Pointer, *types.Slice, *types.Chan, *types.Map, *types.Signature: + return "nil", true + + case *types.Interface: + if !t.IsMethodSet() { + return "invalid", false + } + return "nil", true + + case *types.Named: + switch under := t.Underlying().(type) { + case *types.Struct, *types.Array: + return types.TypeString(t, qual) + "{}", true + default: + return ZeroString(under, qual) + } + + case *types.Alias: + switch t.Underlying().(type) { + case *types.Struct, *types.Array: + return types.TypeString(t, qual) + "{}", true + default: + // A type parameter can have alias but alias type's underlying type + // can never be a type parameter. + // Use types.Unalias to preserve the info of type parameter instead + // of call Underlying() going right through and get the underlying + // type of the type parameter which is always an interface. + return ZeroString(types.Unalias(t), qual) + } + + case *types.Array, *types.Struct: + return types.TypeString(t, qual) + "{}", true + + case *types.TypeParam: + // Assumes func new is not shadowed. + return "*new(" + types.TypeString(t, qual) + ")", true + + case *types.Tuple: + // Tuples are not normal values. + // We are currently format as "(t[0], ..., t[n])". Could be something else. + isValid := true + components := make([]string, t.Len()) + for i := 0; i < t.Len(); i++ { + comp, ok := ZeroString(t.At(i).Type(), qual) + + components[i] = comp + isValid = isValid && ok + } + return "(" + strings.Join(components, ", ") + ")", isValid + + case *types.Union: + // Variables of these types cannot be created, so it makes + // no sense to ask for their zero value. + panic(fmt.Sprintf("invalid type for a variable: %v", t)) + + default: + panic(t) // unreachable. + } +} + +// ZeroExpr returns the ast.Expr representation of the zero value for any type t. +// The boolean result indicates whether the type is or contains an invalid type +// or a non-basic (constraint) interface type. +// +// Even for invalid input types, ZeroExpr may return a partially correct ast.Expr +// representation. The caller should use the returned isValid boolean to determine +// the validity of the expression. +// +// This function is designed for types suitable for variables and should not be +// used with Tuple or Union types.References to named types are qualified by an +// appropriate (optional) qualifier function. +// +// See [ZeroString] for a variant that returns a string. +func ZeroExpr(t types.Type, qual types.Qualifier) (_ ast.Expr, isValid bool) { + switch t := t.(type) { + case *types.Basic: + switch { + case t.Info()&types.IsBoolean != 0: + return &ast.Ident{Name: "false"}, true + case t.Info()&types.IsNumeric != 0: + return &ast.BasicLit{Kind: token.INT, Value: "0"}, true + case t.Info()&types.IsString != 0: + return &ast.BasicLit{Kind: token.STRING, Value: `""`}, true + case t.Kind() == types.UnsafePointer: + fallthrough + case t.Kind() == types.UntypedNil: + return ast.NewIdent("nil"), true + case t.Kind() == types.Invalid: + return &ast.BasicLit{Kind: token.STRING, Value: `"invalid"`}, false + default: + panic(fmt.Sprintf("ZeroExpr for unexpected type %v", t)) + } + + case *types.Pointer, *types.Slice, *types.Chan, *types.Map, *types.Signature: + return ast.NewIdent("nil"), true + + case *types.Interface: + if !t.IsMethodSet() { + return &ast.BasicLit{Kind: token.STRING, Value: `"invalid"`}, false + } + return ast.NewIdent("nil"), true + + case *types.Named: + switch under := t.Underlying().(type) { + case *types.Struct, *types.Array: + return &ast.CompositeLit{ + Type: TypeExpr(t, qual), + }, true + default: + return ZeroExpr(under, qual) + } + + case *types.Alias: + switch t.Underlying().(type) { + case *types.Struct, *types.Array: + return &ast.CompositeLit{ + Type: TypeExpr(t, qual), + }, true + default: + return ZeroExpr(types.Unalias(t), qual) + } + + case *types.Array, *types.Struct: + return &ast.CompositeLit{ + Type: TypeExpr(t, qual), + }, true + + case *types.TypeParam: + return &ast.StarExpr{ // *new(T) + X: &ast.CallExpr{ + // Assumes func new is not shadowed. + Fun: ast.NewIdent("new"), + Args: []ast.Expr{ + ast.NewIdent(t.Obj().Name()), + }, + }, + }, true + + case *types.Tuple: + // Unlike ZeroString, there is no ast.Expr can express tuple by + // "(t[0], ..., t[n])". + panic(fmt.Sprintf("invalid type for a variable: %v", t)) + + case *types.Union: + // Variables of these types cannot be created, so it makes + // no sense to ask for their zero value. + panic(fmt.Sprintf("invalid type for a variable: %v", t)) + + default: + panic(t) // unreachable. + } +} + +// IsZeroExpr uses simple syntactic heuristics to report whether expr +// is a obvious zero value, such as 0, "", nil, or false. +// It cannot do better without type information. +func IsZeroExpr(expr ast.Expr) bool { + switch e := expr.(type) { + case *ast.BasicLit: + return e.Value == "0" || e.Value == `""` + case *ast.Ident: + return e.Name == "nil" || e.Name == "false" + default: + return false + } +} + +// TypeExpr returns syntax for the specified type. References to named types +// are qualified by an appropriate (optional) qualifier function. +// It may panic for types such as Tuple or Union. +func TypeExpr(t types.Type, qual types.Qualifier) ast.Expr { + switch t := t.(type) { + case *types.Basic: + switch t.Kind() { + case types.UnsafePointer: + return &ast.SelectorExpr{X: ast.NewIdent(qual(types.NewPackage("unsafe", "unsafe"))), Sel: ast.NewIdent("Pointer")} + default: + return ast.NewIdent(t.Name()) + } + + case *types.Pointer: + return &ast.UnaryExpr{ + Op: token.MUL, + X: TypeExpr(t.Elem(), qual), + } + + case *types.Array: + return &ast.ArrayType{ + Len: &ast.BasicLit{ + Kind: token.INT, + Value: fmt.Sprintf("%d", t.Len()), + }, + Elt: TypeExpr(t.Elem(), qual), + } + + case *types.Slice: + return &ast.ArrayType{ + Elt: TypeExpr(t.Elem(), qual), + } + + case *types.Map: + return &ast.MapType{ + Key: TypeExpr(t.Key(), qual), + Value: TypeExpr(t.Elem(), qual), + } + + case *types.Chan: + dir := ast.ChanDir(t.Dir()) + if t.Dir() == types.SendRecv { + dir = ast.SEND | ast.RECV + } + return &ast.ChanType{ + Dir: dir, + Value: TypeExpr(t.Elem(), qual), + } + + case *types.Signature: + var params []*ast.Field + for i := 0; i < t.Params().Len(); i++ { + params = append(params, &ast.Field{ + Type: TypeExpr(t.Params().At(i).Type(), qual), + Names: []*ast.Ident{ + { + Name: t.Params().At(i).Name(), + }, + }, + }) + } + if t.Variadic() { + last := params[len(params)-1] + last.Type = &ast.Ellipsis{Elt: last.Type.(*ast.ArrayType).Elt} + } + var returns []*ast.Field + for i := 0; i < t.Results().Len(); i++ { + returns = append(returns, &ast.Field{ + Type: TypeExpr(t.Results().At(i).Type(), qual), + }) + } + return &ast.FuncType{ + Params: &ast.FieldList{ + List: params, + }, + Results: &ast.FieldList{ + List: returns, + }, + } + + case *types.TypeParam: + pkgName := qual(t.Obj().Pkg()) + if pkgName == "" || t.Obj().Pkg() == nil { + return ast.NewIdent(t.Obj().Name()) + } + return &ast.SelectorExpr{ + X: ast.NewIdent(pkgName), + Sel: ast.NewIdent(t.Obj().Name()), + } + + // types.TypeParam also implements interface NamedOrAlias. To differentiate, + // case TypeParam need to be present before case NamedOrAlias. + // TODO(hxjiang): remove this comment once TypeArgs() is added to interface + // NamedOrAlias. + case NamedOrAlias: + var expr ast.Expr = ast.NewIdent(t.Obj().Name()) + if pkgName := qual(t.Obj().Pkg()); pkgName != "." && pkgName != "" { + expr = &ast.SelectorExpr{ + X: ast.NewIdent(pkgName), + Sel: expr.(*ast.Ident), + } + } + + // TODO(hxjiang): call t.TypeArgs after adding method TypeArgs() to + // typesinternal.NamedOrAlias. + if hasTypeArgs, ok := t.(interface{ TypeArgs() *types.TypeList }); ok { + if typeArgs := hasTypeArgs.TypeArgs(); typeArgs != nil && typeArgs.Len() > 0 { + var indices []ast.Expr + for i := range typeArgs.Len() { + indices = append(indices, TypeExpr(typeArgs.At(i), qual)) + } + expr = &ast.IndexListExpr{ + X: expr, + Indices: indices, + } + } + } + + return expr + + case *types.Struct: + return ast.NewIdent(t.String()) + + case *types.Interface: + return ast.NewIdent(t.String()) + + case *types.Union: + if t.Len() == 0 { + panic("Union type should have at least one term") + } + // Same as go/ast, the return expression will put last term in the + // Y field at topmost level of BinaryExpr. + // For union of type "float32 | float64 | int64", the structure looks + // similar to: + // { + // X: { + // X: float32, + // Op: | + // Y: float64, + // } + // Op: |, + // Y: int64, + // } + var union ast.Expr + for i := range t.Len() { + term := t.Term(i) + termExpr := TypeExpr(term.Type(), qual) + if term.Tilde() { + termExpr = &ast.UnaryExpr{ + Op: token.TILDE, + X: termExpr, + } + } + if i == 0 { + union = termExpr + } else { + union = &ast.BinaryExpr{ + X: union, + Op: token.OR, + Y: termExpr, + } + } + } + return union + + case *types.Tuple: + panic("invalid input type types.Tuple") + + default: + panic("unreachable") + } +} diff --git a/vendor/golang.org/x/tools/internal/versions/constraint.go b/vendor/golang.org/x/tools/internal/versions/constraint.go deleted file mode 100644 index 179063d48..000000000 --- a/vendor/golang.org/x/tools/internal/versions/constraint.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package versions - -import "go/build/constraint" - -// ConstraintGoVersion is constraint.GoVersion (if built with go1.21+). -// Otherwise nil. -// -// Deprecate once x/tools is after go1.21. -var ConstraintGoVersion func(x constraint.Expr) string diff --git a/vendor/golang.org/x/tools/internal/versions/constraint_go121.go b/vendor/golang.org/x/tools/internal/versions/constraint_go121.go deleted file mode 100644 index 38011407d..000000000 --- a/vendor/golang.org/x/tools/internal/versions/constraint_go121.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.21 -// +build go1.21 - -package versions - -import "go/build/constraint" - -func init() { - ConstraintGoVersion = constraint.GoVersion -} diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain.go b/vendor/golang.org/x/tools/internal/versions/toolchain.go deleted file mode 100644 index 377bf7a53..000000000 --- a/vendor/golang.org/x/tools/internal/versions/toolchain.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package versions - -// toolchain is maximum version (<1.22) that the go toolchain used -// to build the current tool is known to support. -// -// When a tool is built with >=1.22, the value of toolchain is unused. -// -// x/tools does not support building with go <1.18. So we take this -// as the minimum possible maximum. -var toolchain string = Go1_18 diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go deleted file mode 100644 index f65beed9d..000000000 --- a/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.19 -// +build go1.19 - -package versions - -func init() { - if Compare(toolchain, Go1_19) < 0 { - toolchain = Go1_19 - } -} diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go deleted file mode 100644 index 1a9efa126..000000000 --- a/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.20 -// +build go1.20 - -package versions - -func init() { - if Compare(toolchain, Go1_20) < 0 { - toolchain = Go1_20 - } -} diff --git a/vendor/golang.org/x/tools/internal/versions/types.go b/vendor/golang.org/x/tools/internal/versions/types.go index 562eef21f..0fc10ce4e 100644 --- a/vendor/golang.org/x/tools/internal/versions/types.go +++ b/vendor/golang.org/x/tools/internal/versions/types.go @@ -5,15 +5,29 @@ package versions import ( + "go/ast" "go/types" ) -// GoVersion returns the Go version of the type package. -// It returns zero if no version can be determined. -func GoVersion(pkg *types.Package) string { - // TODO(taking): x/tools can call GoVersion() [from 1.21] after 1.25. - if pkg, ok := any(pkg).(interface{ GoVersion() string }); ok { - return pkg.GoVersion() +// FileVersion returns a file's Go version. +// The reported version is an unknown Future version if a +// version cannot be determined. +func FileVersion(info *types.Info, file *ast.File) string { + // In tools built with Go >= 1.22, the Go version of a file + // follow a cascades of sources: + // 1) types.Info.FileVersion, which follows the cascade: + // 1.a) file version (ast.File.GoVersion), + // 1.b) the package version (types.Config.GoVersion), or + // 2) is some unknown Future version. + // + // File versions require a valid package version to be provided to types + // in Config.GoVersion. Config.GoVersion is either from the package's module + // or the toolchain (go run). This value should be provided by go/packages + // or unitchecker.Config.GoVersion. + if v := info.FileVersions[file]; IsValid(v) { + return v } - return "" + // Note: we could instead return runtime.Version() [if valid]. + // This would act as a max version on what a tool can support. + return Future } diff --git a/vendor/golang.org/x/tools/internal/versions/types_go121.go b/vendor/golang.org/x/tools/internal/versions/types_go121.go deleted file mode 100644 index b4345d334..000000000 --- a/vendor/golang.org/x/tools/internal/versions/types_go121.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.22 -// +build !go1.22 - -package versions - -import ( - "go/ast" - "go/types" -) - -// FileVersion returns a language version (<=1.21) derived from runtime.Version() -// or an unknown future version. -func FileVersion(info *types.Info, file *ast.File) string { - // In x/tools built with Go <= 1.21, we do not have Info.FileVersions - // available. We use a go version derived from the toolchain used to - // compile the tool by default. - // This will be <= go1.21. We take this as the maximum version that - // this tool can support. - // - // There are no features currently in x/tools that need to tell fine grained - // differences for versions <1.22. - return toolchain -} - -// InitFileVersions is a noop when compiled with this Go version. -func InitFileVersions(*types.Info) {} diff --git a/vendor/golang.org/x/tools/internal/versions/types_go122.go b/vendor/golang.org/x/tools/internal/versions/types_go122.go deleted file mode 100644 index aac5db62c..000000000 --- a/vendor/golang.org/x/tools/internal/versions/types_go122.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.22 -// +build go1.22 - -package versions - -import ( - "go/ast" - "go/types" -) - -// FileVersion returns a file's Go version. -// The reported version is an unknown Future version if a -// version cannot be determined. -func FileVersion(info *types.Info, file *ast.File) string { - // In tools built with Go >= 1.22, the Go version of a file - // follow a cascades of sources: - // 1) types.Info.FileVersion, which follows the cascade: - // 1.a) file version (ast.File.GoVersion), - // 1.b) the package version (types.Config.GoVersion), or - // 2) is some unknown Future version. - // - // File versions require a valid package version to be provided to types - // in Config.GoVersion. Config.GoVersion is either from the package's module - // or the toolchain (go run). This value should be provided by go/packages - // or unitchecker.Config.GoVersion. - if v := info.FileVersions[file]; IsValid(v) { - return v - } - // Note: we could instead return runtime.Version() [if valid]. - // This would act as a max version on what a tool can support. - return Future -} - -// InitFileVersions initializes info to record Go versions for Go files. -func InitFileVersions(info *types.Info) { - info.FileVersions = make(map[*ast.File]string) -} diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go index 6ad1b1c1d..06a3f7106 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -1,4 +1,4 @@ -// Copyright 2024 Google LLC +// Copyright 2025 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index 0854d298e..d9bfa6e1e 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -4,7 +4,7 @@ We definitely welcome your patches and contributions to gRPC! Please read the gR organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md) and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding. -If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) +If you are new to GitHub, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) ## Legal requirements @@ -25,8 +25,8 @@ How to get your contributions merged smoothly and quickly. is a great place to start. These issues are well-documented and usually can be resolved with a single pull request. -- If you are adding a new file, make sure it has the copyright message template - at the top as a comment. You can copy over the message from an existing file +- If you are adding a new file, make sure it has the copyright message template + at the top as a comment. You can copy over the message from an existing file and update the year. - The grpc package should only depend on standard Go packages and a small number @@ -39,12 +39,12 @@ How to get your contributions merged smoothly and quickly. proposal](https://github.com/grpc/proposal). - Provide a good **PR description** as a record of **what** change is being made - and **why** it was made. Link to a github issue if it exists. + and **why** it was made. Link to a GitHub issue if it exists. -- If you want to fix formatting or style, consider whether your changes are an - obvious improvement or might be considered a personal preference. If a style - change is based on preference, it likely will not be accepted. If it corrects - widely agreed-upon anti-patterns, then please do create a PR and explain the +- If you want to fix formatting or style, consider whether your changes are an + obvious improvement or might be considered a personal preference. If a style + change is based on preference, it likely will not be accepted. If it corrects + widely agreed-upon anti-patterns, then please do create a PR and explain the benefits of the change. - Unless your PR is trivial, you should expect there will be reviewer comments diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index b181f386a..c9b343c71 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -73,17 +73,6 @@ func unregisterForTesting(name string) { delete(m, name) } -// connectedAddress returns the connected address for a SubConnState. The -// address is only valid if the state is READY. -func connectedAddress(scs SubConnState) resolver.Address { - return scs.connectedAddress -} - -// setConnectedAddress sets the connected address for a SubConnState. -func setConnectedAddress(scs *SubConnState, addr resolver.Address) { - scs.connectedAddress = addr -} - func init() { internal.BalancerUnregister = unregisterForTesting internal.ConnectedAddress = connectedAddress @@ -106,54 +95,6 @@ func Get(name string) Builder { return nil } -// A SubConn represents a single connection to a gRPC backend service. -// -// Each SubConn contains a list of addresses. -// -// All SubConns start in IDLE, and will not try to connect. To trigger the -// connecting, Balancers must call Connect. If a connection re-enters IDLE, -// Balancers must call Connect again to trigger a new connection attempt. -// -// gRPC will try to connect to the addresses in sequence, and stop trying the -// remainder once the first connection is successful. If an attempt to connect -// to all addresses encounters an error, the SubConn will enter -// TRANSIENT_FAILURE for a backoff period, and then transition to IDLE. -// -// Once established, if a connection is lost, the SubConn will transition -// directly to IDLE. -// -// This interface is to be implemented by gRPC. Users should not need their own -// implementation of this interface. For situations like testing, any -// implementations should embed this interface. This allows gRPC to add new -// methods to this interface. -type SubConn interface { - // UpdateAddresses updates the addresses used in this SubConn. - // gRPC checks if currently-connected address is still in the new list. - // If it's in the list, the connection will be kept. - // If it's not in the list, the connection will gracefully closed, and - // a new connection will be created. - // - // This will trigger a state transition for the SubConn. - // - // Deprecated: this method will be removed. Create new SubConns for new - // addresses instead. - UpdateAddresses([]resolver.Address) - // Connect starts the connecting for this SubConn. - Connect() - // GetOrBuildProducer returns a reference to the existing Producer for this - // ProducerBuilder in this SubConn, or, if one does not currently exist, - // creates a new one and returns it. Returns a close function which must - // be called when the Producer is no longer needed. - GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) - // Shutdown shuts down the SubConn gracefully. Any started RPCs will be - // allowed to complete. No future calls should be made on the SubConn. - // One final state update will be delivered to the StateListener (or - // UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to - // indicate the shutdown operation. This may be delivered before - // in-progress RPCs are complete and the actual connection is closed. - Shutdown() -} - // NewSubConnOptions contains options to create new SubConn. type NewSubConnOptions struct { // CredsBundle is the credentials bundle that will be used in the created @@ -188,6 +129,13 @@ type State struct { // brand new implementation of this interface. For the situations like // testing, the new implementation should embed this interface. This allows // gRPC to add new methods to this interface. +// +// NOTICE: This interface is intended to be implemented by gRPC, or intercepted +// by custom load balancing polices. Users should not need their own complete +// implementation of this interface -- they should always delegate to a +// ClientConn passed to Builder.Build() by embedding it in their +// implementations. An embedded ClientConn must never be nil, or runtime panics +// will occur. type ClientConn interface { // NewSubConn is called by balancer to create a new SubConn. // It doesn't block and wait for the connections to be established. @@ -226,6 +174,17 @@ type ClientConn interface { // // Deprecated: Use the Target field in the BuildOptions instead. Target() string + + // MetricsRecorder provides the metrics recorder that balancers can use to + // record metrics. Balancer implementations which do not register metrics on + // metrics registry and record on them can ignore this method. The returned + // MetricsRecorder is guaranteed to never be nil. + MetricsRecorder() estats.MetricsRecorder + + // EnforceClientConnEmbedding is included to force implementers to embed + // another implementation of this interface, allowing gRPC to add methods + // without breaking users. + internal.EnforceClientConnEmbedding } // BuildOptions contains additional information for Build. @@ -257,10 +216,6 @@ type BuildOptions struct { // same resolver.Target as passed to the resolver. See the documentation for // the resolver.Target type for details about what it contains. Target resolver.Target - // MetricsRecorder is the metrics recorder that balancers can use to record - // metrics. Balancer implementations which do not register metrics on - // metrics registry and record on them can ignore this field. - MetricsRecorder estats.MetricsRecorder } // Builder creates a balancer. @@ -421,18 +376,6 @@ type ExitIdler interface { ExitIdle() } -// SubConnState describes the state of a SubConn. -type SubConnState struct { - // ConnectivityState is the connectivity state of the SubConn. - ConnectivityState connectivity.State - // ConnectionError is set if the ConnectivityState is TransientFailure, - // describing the reason the SubConn failed. Otherwise, it is nil. - ConnectionError error - // connectedAddr contains the connected address when ConnectivityState is - // Ready. Otherwise, it is indeterminate. - connectedAddress resolver.Address -} - // ClientConnState describes the state of a ClientConn relevant to the // balancer. type ClientConnState struct { @@ -445,20 +388,3 @@ type ClientConnState struct { // ErrBadResolverState may be returned by UpdateClientConnState to indicate a // problem with the provided name resolver data. var ErrBadResolverState = errors.New("bad resolver state") - -// A ProducerBuilder is a simple constructor for a Producer. It is used by the -// SubConn to create producers when needed. -type ProducerBuilder interface { - // Build creates a Producer. The first parameter is always a - // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the - // associated SubConn), but is declared as `any` to avoid a dependency - // cycle. Should also return a close function that will be called when all - // references to the Producer have been given up. - Build(grpcClientConnInterface any) (p Producer, close func()) -} - -// A Producer is a type shared among potentially many consumers. It is -// associated with a SubConn, and an implementation will typically contain -// other methods to provide additional functionality, e.g. configuration or -// subscription registration. -type Producer any diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index 2b87bd79c..d5ed172ae 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -133,7 +133,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { } } // If resolver state contains no addresses, return an error so ClientConn - // will trigger re-resolve. Also records this as an resolver error, so when + // will trigger re-resolve. Also records this as a resolver error, so when // the overall state turns transient failure, the error message will have // the zero address information. if len(s.ResolverState.Addresses) == 0 { diff --git a/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go b/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go new file mode 100644 index 000000000..421c4fecc --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go @@ -0,0 +1,358 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package endpointsharding implements a load balancing policy that manages +// homogeneous child policies each owning a single endpoint. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package endpointsharding + +import ( + "errors" + rand "math/rand/v2" + "sync" + "sync/atomic" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/resolver" +) + +// ChildState is the balancer state of a child along with the endpoint which +// identifies the child balancer. +type ChildState struct { + Endpoint resolver.Endpoint + State balancer.State + + // Balancer exposes only the ExitIdler interface of the child LB policy. + // Other methods of the child policy are called only by endpointsharding. + Balancer balancer.ExitIdler +} + +// Options are the options to configure the behaviour of the +// endpointsharding balancer. +type Options struct { + // DisableAutoReconnect allows the balancer to keep child balancer in the + // IDLE state until they are explicitly triggered to exit using the + // ChildState obtained from the endpointsharding picker. When set to false, + // the endpointsharding balancer will automatically call ExitIdle on child + // connections that report IDLE. + DisableAutoReconnect bool +} + +// ChildBuilderFunc creates a new balancer with the ClientConn. It has the same +// type as the balancer.Builder.Build method. +type ChildBuilderFunc func(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer + +// NewBalancer returns a load balancing policy that manages homogeneous child +// policies each owning a single endpoint. The endpointsharding balancer +// forwards the LoadBalancingConfig in ClientConn state updates to its children. +func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions, childBuilder ChildBuilderFunc, esOpts Options) balancer.Balancer { + es := &endpointSharding{ + cc: cc, + bOpts: opts, + esOpts: esOpts, + childBuilder: childBuilder, + } + es.children.Store(resolver.NewEndpointMap()) + return es +} + +// endpointSharding is a balancer that wraps child balancers. It creates a child +// balancer with child config for every unique Endpoint received. It updates the +// child states on any update from parent or child. +type endpointSharding struct { + cc balancer.ClientConn + bOpts balancer.BuildOptions + esOpts Options + childBuilder ChildBuilderFunc + + // childMu synchronizes calls to any single child. It must be held for all + // calls into a child. To avoid deadlocks, do not acquire childMu while + // holding mu. + childMu sync.Mutex + children atomic.Pointer[resolver.EndpointMap] // endpoint -> *balancerWrapper + + // inhibitChildUpdates is set during UpdateClientConnState/ResolverError + // calls (calls to children will each produce an update, only want one + // update). + inhibitChildUpdates atomic.Bool + + // mu synchronizes access to the state stored in balancerWrappers in the + // children field. mu must not be held during calls into a child since + // synchronous calls back from the child may require taking mu, causing a + // deadlock. To avoid deadlocks, do not acquire childMu while holding mu. + mu sync.Mutex +} + +// UpdateClientConnState creates a child for new endpoints and deletes children +// for endpoints that are no longer present. It also updates all the children, +// and sends a single synchronous update of the childrens' aggregated state at +// the end of the UpdateClientConnState operation. If any endpoint has no +// addresses it will ignore that endpoint. Otherwise, returns first error found +// from a child, but fully processes the new update. +func (es *endpointSharding) UpdateClientConnState(state balancer.ClientConnState) error { + es.childMu.Lock() + defer es.childMu.Unlock() + + es.inhibitChildUpdates.Store(true) + defer func() { + es.inhibitChildUpdates.Store(false) + es.updateState() + }() + var ret error + + children := es.children.Load() + newChildren := resolver.NewEndpointMap() + + // Update/Create new children. + for _, endpoint := range state.ResolverState.Endpoints { + if _, ok := newChildren.Get(endpoint); ok { + // Endpoint child was already created, continue to avoid duplicate + // update. + continue + } + var childBalancer *balancerWrapper + if val, ok := children.Get(endpoint); ok { + childBalancer = val.(*balancerWrapper) + // Endpoint attributes may have changed, update the stored endpoint. + es.mu.Lock() + childBalancer.childState.Endpoint = endpoint + es.mu.Unlock() + } else { + childBalancer = &balancerWrapper{ + childState: ChildState{Endpoint: endpoint}, + ClientConn: es.cc, + es: es, + } + childBalancer.childState.Balancer = childBalancer + childBalancer.child = es.childBuilder(childBalancer, es.bOpts) + } + newChildren.Set(endpoint, childBalancer) + if err := childBalancer.updateClientConnStateLocked(balancer.ClientConnState{ + BalancerConfig: state.BalancerConfig, + ResolverState: resolver.State{ + Endpoints: []resolver.Endpoint{endpoint}, + Attributes: state.ResolverState.Attributes, + }, + }); err != nil && ret == nil { + // Return first error found, and always commit full processing of + // updating children. If desired to process more specific errors + // across all endpoints, caller should make these specific + // validations, this is a current limitation for simplicity sake. + ret = err + } + } + // Delete old children that are no longer present. + for _, e := range children.Keys() { + child, _ := children.Get(e) + if _, ok := newChildren.Get(e); !ok { + child.(*balancerWrapper).closeLocked() + } + } + es.children.Store(newChildren) + if newChildren.Len() == 0 { + return balancer.ErrBadResolverState + } + return ret +} + +// ResolverError forwards the resolver error to all of the endpointSharding's +// children and sends a single synchronous update of the childStates at the end +// of the ResolverError operation. +func (es *endpointSharding) ResolverError(err error) { + es.childMu.Lock() + defer es.childMu.Unlock() + es.inhibitChildUpdates.Store(true) + defer func() { + es.inhibitChildUpdates.Store(false) + es.updateState() + }() + children := es.children.Load() + for _, child := range children.Values() { + child.(*balancerWrapper).resolverErrorLocked(err) + } +} + +func (es *endpointSharding) UpdateSubConnState(balancer.SubConn, balancer.SubConnState) { + // UpdateSubConnState is deprecated. +} + +func (es *endpointSharding) Close() { + es.childMu.Lock() + defer es.childMu.Unlock() + children := es.children.Load() + for _, child := range children.Values() { + child.(*balancerWrapper).closeLocked() + } +} + +// updateState updates this component's state. It sends the aggregated state, +// and a picker with round robin behavior with all the child states present if +// needed. +func (es *endpointSharding) updateState() { + if es.inhibitChildUpdates.Load() { + return + } + var readyPickers, connectingPickers, idlePickers, transientFailurePickers []balancer.Picker + + es.mu.Lock() + defer es.mu.Unlock() + + children := es.children.Load() + childStates := make([]ChildState, 0, children.Len()) + + for _, child := range children.Values() { + bw := child.(*balancerWrapper) + childState := bw.childState + childStates = append(childStates, childState) + childPicker := childState.State.Picker + switch childState.State.ConnectivityState { + case connectivity.Ready: + readyPickers = append(readyPickers, childPicker) + case connectivity.Connecting: + connectingPickers = append(connectingPickers, childPicker) + case connectivity.Idle: + idlePickers = append(idlePickers, childPicker) + case connectivity.TransientFailure: + transientFailurePickers = append(transientFailurePickers, childPicker) + // connectivity.Shutdown shouldn't appear. + } + } + + // Construct the round robin picker based off the aggregated state. Whatever + // the aggregated state, use the pickers present that are currently in that + // state only. + var aggState connectivity.State + var pickers []balancer.Picker + if len(readyPickers) >= 1 { + aggState = connectivity.Ready + pickers = readyPickers + } else if len(connectingPickers) >= 1 { + aggState = connectivity.Connecting + pickers = connectingPickers + } else if len(idlePickers) >= 1 { + aggState = connectivity.Idle + pickers = idlePickers + } else if len(transientFailurePickers) >= 1 { + aggState = connectivity.TransientFailure + pickers = transientFailurePickers + } else { + aggState = connectivity.TransientFailure + pickers = []balancer.Picker{base.NewErrPicker(errors.New("no children to pick from"))} + } // No children (resolver error before valid update). + p := &pickerWithChildStates{ + pickers: pickers, + childStates: childStates, + next: uint32(rand.IntN(len(pickers))), + } + es.cc.UpdateState(balancer.State{ + ConnectivityState: aggState, + Picker: p, + }) +} + +// pickerWithChildStates delegates to the pickers it holds in a round robin +// fashion. It also contains the childStates of all the endpointSharding's +// children. +type pickerWithChildStates struct { + pickers []balancer.Picker + childStates []ChildState + next uint32 +} + +func (p *pickerWithChildStates) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + nextIndex := atomic.AddUint32(&p.next, 1) + picker := p.pickers[nextIndex%uint32(len(p.pickers))] + return picker.Pick(info) +} + +// ChildStatesFromPicker returns the state of all the children managed by the +// endpoint sharding balancer that created this picker. +func ChildStatesFromPicker(picker balancer.Picker) []ChildState { + p, ok := picker.(*pickerWithChildStates) + if !ok { + return nil + } + return p.childStates +} + +// balancerWrapper is a wrapper of a balancer. It ID's a child balancer by +// endpoint, and persists recent child balancer state. +type balancerWrapper struct { + // The following fields are initialized at build time and read-only after + // that and therefore do not need to be guarded by a mutex. + + // child contains the wrapped balancer. Access its methods only through + // methods on balancerWrapper to ensure proper synchronization + child balancer.Balancer + balancer.ClientConn // embed to intercept UpdateState, doesn't deal with SubConns + + es *endpointSharding + + // Access to the following fields is guarded by es.mu. + + childState ChildState + isClosed bool +} + +func (bw *balancerWrapper) UpdateState(state balancer.State) { + bw.es.mu.Lock() + bw.childState.State = state + bw.es.mu.Unlock() + if state.ConnectivityState == connectivity.Idle && !bw.es.esOpts.DisableAutoReconnect { + bw.ExitIdle() + } + bw.es.updateState() +} + +// ExitIdle pings an IDLE child balancer to exit idle in a new goroutine to +// avoid deadlocks due to synchronous balancer state updates. +func (bw *balancerWrapper) ExitIdle() { + if ei, ok := bw.child.(balancer.ExitIdler); ok { + go func() { + bw.es.childMu.Lock() + if !bw.isClosed { + ei.ExitIdle() + } + bw.es.childMu.Unlock() + }() + } +} + +// updateClientConnStateLocked delivers the ClientConnState to the child +// balancer. Callers must hold the child mutex of the parent endpointsharding +// balancer. +func (bw *balancerWrapper) updateClientConnStateLocked(ccs balancer.ClientConnState) error { + return bw.child.UpdateClientConnState(ccs) +} + +// closeLocked closes the child balancer. Callers must hold the child mutext of +// the parent endpointsharding balancer. +func (bw *balancerWrapper) closeLocked() { + bw.child.Close() + bw.isClosed = true +} + +func (bw *balancerWrapper) resolverErrorLocked(err error) { + bw.child.ResolverError(err) +} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go b/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go similarity index 55% rename from vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go rename to vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go index 6635f7bca..7d66cb491 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go @@ -1,6 +1,5 @@ /* - * - * Copyright 2022 gRPC authors. + * Copyright 2024 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,17 +15,21 @@ * */ -package grpcsync +// Package internal contains code internal to the pickfirst package. +package internal import ( - "sync" + rand "math/rand/v2" + "time" ) -// OnceFunc returns a function wrapping f which ensures f is only executed -// once even if the returned function is executed multiple times. -func OnceFunc(f func()) func() { - var once sync.Once - return func() { - once.Do(f) +var ( + // RandShuffle pseudo-randomizes the order of addresses. + RandShuffle = rand.Shuffle + // TimeAfterFunc allows mocking the timer for testing connection delay + // related functionality. + TimeAfterFunc = func(d time.Duration, f func()) func() { + timer := time.AfterFunc(d, f) + return func() { timer.Stop() } } -} +) diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go index 4d69b4052..ea8899818 100644 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go @@ -23,21 +23,26 @@ import ( "encoding/json" "errors" "fmt" - "math/rand" + rand "math/rand/v2" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/pickfirst/internal" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/envconfig" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" + + _ "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" // For automatically registering the new pickfirst if required. ) func init() { + if envconfig.NewPickFirstEnabled { + return + } balancer.Register(pickfirstBuilder{}) - internal.ShuffleAddressListForTesting = func(n int, swap func(i, j int)) { rand.Shuffle(n, swap) } } var logger = grpclog.Component("pick-first-lb") @@ -103,10 +108,13 @@ func (b *pickfirstBalancer) ResolverError(err error) { }) } +// Shuffler is an interface for shuffling an address list. type Shuffler interface { ShuffleAddressListForTesting(n int, swap func(i, j int)) } +// ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n +// is the number of elements. swap swaps the elements with indexes i and j. func ShuffleAddressListForTesting(n int, swap func(i, j int)) { rand.Shuffle(n, swap) } func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { @@ -140,7 +148,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // within each endpoint. - A61 if cfg.ShuffleAddressList { endpoints = append([]resolver.Endpoint{}, endpoints...) - internal.ShuffleAddressListForTesting.(func(int, func(int, int)))(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) + internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) } // "Flatten the list by concatenating the ordered list of addresses for each diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go new file mode 100644 index 000000000..113181e6b --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go @@ -0,0 +1,932 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package pickfirstleaf contains the pick_first load balancing policy which +// will be the universal leaf policy after dualstack changes are implemented. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package pickfirstleaf + +import ( + "encoding/json" + "errors" + "fmt" + "net" + "net/netip" + "sync" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/pickfirst/internal" + "google.golang.org/grpc/connectivity" + expstats "google.golang.org/grpc/experimental/stats" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/envconfig" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +func init() { + if envconfig.NewPickFirstEnabled { + // Register as the default pick_first balancer. + Name = "pick_first" + } + balancer.Register(pickfirstBuilder{}) +} + +type ( + // enableHealthListenerKeyType is a unique key type used in resolver + // attributes to indicate whether the health listener usage is enabled. + enableHealthListenerKeyType struct{} + // managedByPickfirstKeyType is an attribute key type to inform Outlier + // Detection that the generic health listener is being used. + // TODO: https://github.com/grpc/grpc-go/issues/7915 - Remove this when + // implementing the dualstack design. This is a hack. Once Dualstack is + // completed, outlier detection will stop sending ejection updates through + // the connectivity listener. + managedByPickfirstKeyType struct{} +) + +var ( + logger = grpclog.Component("pick-first-leaf-lb") + // Name is the name of the pick_first_leaf balancer. + // It is changed to "pick_first" in init() if this balancer is to be + // registered as the default pickfirst. + Name = "pick_first_leaf" + disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.lb.pick_first.disconnections", + Description: "EXPERIMENTAL. Number of times the selected subchannel becomes disconnected.", + Unit: "disconnection", + Labels: []string{"grpc.target"}, + Default: false, + }) + connectionAttemptsSucceededMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.lb.pick_first.connection_attempts_succeeded", + Description: "EXPERIMENTAL. Number of successful connection attempts.", + Unit: "attempt", + Labels: []string{"grpc.target"}, + Default: false, + }) + connectionAttemptsFailedMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.lb.pick_first.connection_attempts_failed", + Description: "EXPERIMENTAL. Number of failed connection attempts.", + Unit: "attempt", + Labels: []string{"grpc.target"}, + Default: false, + }) +) + +const ( + // TODO: change to pick-first when this becomes the default pick_first policy. + logPrefix = "[pick-first-leaf-lb %p] " + // connectionDelayInterval is the time to wait for during the happy eyeballs + // pass before starting the next connection attempt. + connectionDelayInterval = 250 * time.Millisecond +) + +type ipAddrFamily int + +const ( + // ipAddrFamilyUnknown represents strings that can't be parsed as an IP + // address. + ipAddrFamilyUnknown ipAddrFamily = iota + ipAddrFamilyV4 + ipAddrFamilyV6 +) + +type pickfirstBuilder struct{} + +func (pickfirstBuilder) Build(cc balancer.ClientConn, bo balancer.BuildOptions) balancer.Balancer { + b := &pickfirstBalancer{ + cc: cc, + target: bo.Target.String(), + metricsRecorder: cc.MetricsRecorder(), + + subConns: resolver.NewAddressMap(), + state: connectivity.Connecting, + cancelConnectionTimer: func() {}, + } + b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) + return b +} + +func (b pickfirstBuilder) Name() string { + return Name +} + +func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + var cfg pfConfig + if err := json.Unmarshal(js, &cfg); err != nil { + return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) + } + return cfg, nil +} + +// EnableHealthListener updates the state to configure pickfirst for using a +// generic health listener. +func EnableHealthListener(state resolver.State) resolver.State { + state.Attributes = state.Attributes.WithValue(enableHealthListenerKeyType{}, true) + return state +} + +// IsManagedByPickfirst returns whether an address belongs to a SubConn +// managed by the pickfirst LB policy. +// TODO: https://github.com/grpc/grpc-go/issues/7915 - This is a hack to disable +// outlier_detection via the with connectivity listener when using pick_first. +// Once Dualstack changes are complete, all SubConns will be created by +// pick_first and outlier detection will only use the health listener for +// ejection. This hack can then be removed. +func IsManagedByPickfirst(addr resolver.Address) bool { + return addr.BalancerAttributes.Value(managedByPickfirstKeyType{}) != nil +} + +type pfConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + // If set to true, instructs the LB policy to shuffle the order of the list + // of endpoints received from the name resolver before attempting to + // connect to them. + ShuffleAddressList bool `json:"shuffleAddressList"` +} + +// scData keeps track of the current state of the subConn. +// It is not safe for concurrent access. +type scData struct { + // The following fields are initialized at build time and read-only after + // that. + subConn balancer.SubConn + addr resolver.Address + + rawConnectivityState connectivity.State + // The effective connectivity state based on raw connectivity, health state + // and after following sticky TransientFailure behaviour defined in A62. + effectiveState connectivity.State + lastErr error + connectionFailedInFirstPass bool +} + +func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) { + addr.BalancerAttributes = addr.BalancerAttributes.WithValue(managedByPickfirstKeyType{}, true) + sd := &scData{ + rawConnectivityState: connectivity.Idle, + effectiveState: connectivity.Idle, + addr: addr, + } + sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{ + StateListener: func(state balancer.SubConnState) { + b.updateSubConnState(sd, state) + }, + }) + if err != nil { + return nil, err + } + sd.subConn = sc + return sd, nil +} + +type pickfirstBalancer struct { + // The following fields are initialized at build time and read-only after + // that and therefore do not need to be guarded by a mutex. + logger *internalgrpclog.PrefixLogger + cc balancer.ClientConn + target string + metricsRecorder expstats.MetricsRecorder // guaranteed to be non nil + + // The mutex is used to ensure synchronization of updates triggered + // from the idle picker and the already serialized resolver, + // SubConn state updates. + mu sync.Mutex + // State reported to the channel based on SubConn states and resolver + // updates. + state connectivity.State + // scData for active subonns mapped by address. + subConns *resolver.AddressMap + addressList addressList + firstPass bool + numTF int + cancelConnectionTimer func() + healthCheckingEnabled bool +} + +// ResolverError is called by the ClientConn when the name resolver produces +// an error or when pickfirst determined the resolver update to be invalid. +func (b *pickfirstBalancer) ResolverError(err error) { + b.mu.Lock() + defer b.mu.Unlock() + b.resolverErrorLocked(err) +} + +func (b *pickfirstBalancer) resolverErrorLocked(err error) { + if b.logger.V(2) { + b.logger.Infof("Received error from the name resolver: %v", err) + } + + // The picker will not change since the balancer does not currently + // report an error. If the balancer hasn't received a single good resolver + // update yet, transition to TRANSIENT_FAILURE. + if b.state != connectivity.TransientFailure && b.addressList.size() > 0 { + if b.logger.V(2) { + b.logger.Infof("Ignoring resolver error because balancer is using a previous good update.") + } + return + } + + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, + }) +} + +func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { + b.mu.Lock() + defer b.mu.Unlock() + b.cancelConnectionTimer() + if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 { + // Cleanup state pertaining to the previous resolver state. + // Treat an empty address list like an error by calling b.ResolverError. + b.closeSubConnsLocked() + b.addressList.updateAddrs(nil) + b.resolverErrorLocked(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } + b.healthCheckingEnabled = state.ResolverState.Attributes.Value(enableHealthListenerKeyType{}) != nil + cfg, ok := state.BalancerConfig.(pfConfig) + if state.BalancerConfig != nil && !ok { + return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState) + } + + if b.logger.V(2) { + b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState)) + } + + var newAddrs []resolver.Address + if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 { + // Perform the optional shuffling described in gRFC A62. The shuffling + // will change the order of endpoints but not touch the order of the + // addresses within each endpoint. - A61 + if cfg.ShuffleAddressList { + endpoints = append([]resolver.Endpoint{}, endpoints...) + internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) + } + + // "Flatten the list by concatenating the ordered list of addresses for + // each of the endpoints, in order." - A61 + for _, endpoint := range endpoints { + newAddrs = append(newAddrs, endpoint.Addresses...) + } + } else { + // Endpoints not set, process addresses until we migrate resolver + // emissions fully to Endpoints. The top channel does wrap emitted + // addresses with endpoints, however some balancers such as weighted + // target do not forward the corresponding correct endpoints down/split + // endpoints properly. Once all balancers correctly forward endpoints + // down, can delete this else conditional. + newAddrs = state.ResolverState.Addresses + if cfg.ShuffleAddressList { + newAddrs = append([]resolver.Address{}, newAddrs...) + internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) + } + } + + // If an address appears in multiple endpoints or in the same endpoint + // multiple times, we keep it only once. We will create only one SubConn + // for the address because an AddressMap is used to store SubConns. + // Not de-duplicating would result in attempting to connect to the same + // SubConn multiple times in the same pass. We don't want this. + newAddrs = deDupAddresses(newAddrs) + newAddrs = interleaveAddresses(newAddrs) + + prevAddr := b.addressList.currentAddress() + prevSCData, found := b.subConns.Get(prevAddr) + prevAddrsCount := b.addressList.size() + isPrevRawConnectivityStateReady := found && prevSCData.(*scData).rawConnectivityState == connectivity.Ready + b.addressList.updateAddrs(newAddrs) + + // If the previous ready SubConn exists in new address list, + // keep this connection and don't create new SubConns. + if isPrevRawConnectivityStateReady && b.addressList.seekTo(prevAddr) { + return nil + } + + b.reconcileSubConnsLocked(newAddrs) + // If it's the first resolver update or the balancer was already READY + // (but the new address list does not contain the ready SubConn) or + // CONNECTING, enter CONNECTING. + // We may be in TRANSIENT_FAILURE due to a previous empty address list, + // we should still enter CONNECTING because the sticky TF behaviour + // mentioned in A62 applies only when the TRANSIENT_FAILURE is reported + // due to connectivity failures. + if isPrevRawConnectivityStateReady || b.state == connectivity.Connecting || prevAddrsCount == 0 { + // Start connection attempt at first address. + b.forceUpdateConcludedStateLocked(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + b.startFirstPassLocked() + } else if b.state == connectivity.TransientFailure { + // If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until + // we're READY. See A62. + b.startFirstPassLocked() + } + return nil +} + +// UpdateSubConnState is unused as a StateListener is always registered when +// creating SubConns. +func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state) +} + +func (b *pickfirstBalancer) Close() { + b.mu.Lock() + defer b.mu.Unlock() + b.closeSubConnsLocked() + b.cancelConnectionTimer() + b.state = connectivity.Shutdown +} + +// ExitIdle moves the balancer out of idle state. It can be called concurrently +// by the idlePicker and clientConn so access to variables should be +// synchronized. +func (b *pickfirstBalancer) ExitIdle() { + b.mu.Lock() + defer b.mu.Unlock() + if b.state == connectivity.Idle { + b.startFirstPassLocked() + } +} + +func (b *pickfirstBalancer) startFirstPassLocked() { + b.firstPass = true + b.numTF = 0 + // Reset the connection attempt record for existing SubConns. + for _, sd := range b.subConns.Values() { + sd.(*scData).connectionFailedInFirstPass = false + } + b.requestConnectionLocked() +} + +func (b *pickfirstBalancer) closeSubConnsLocked() { + for _, sd := range b.subConns.Values() { + sd.(*scData).subConn.Shutdown() + } + b.subConns = resolver.NewAddressMap() +} + +// deDupAddresses ensures that each address appears only once in the slice. +func deDupAddresses(addrs []resolver.Address) []resolver.Address { + seenAddrs := resolver.NewAddressMap() + retAddrs := []resolver.Address{} + + for _, addr := range addrs { + if _, ok := seenAddrs.Get(addr); ok { + continue + } + retAddrs = append(retAddrs, addr) + } + return retAddrs +} + +// interleaveAddresses interleaves addresses of both families (IPv4 and IPv6) +// as per RFC-8305 section 4. +// Whichever address family is first in the list is followed by an address of +// the other address family; that is, if the first address in the list is IPv6, +// then the first IPv4 address should be moved up in the list to be second in +// the list. It doesn't support configuring "First Address Family Count", i.e. +// there will always be a single member of the first address family at the +// beginning of the interleaved list. +// Addresses that are neither IPv4 nor IPv6 are treated as part of a third +// "unknown" family for interleaving. +// See: https://datatracker.ietf.org/doc/html/rfc8305#autoid-6 +func interleaveAddresses(addrs []resolver.Address) []resolver.Address { + familyAddrsMap := map[ipAddrFamily][]resolver.Address{} + interleavingOrder := []ipAddrFamily{} + for _, addr := range addrs { + family := addressFamily(addr.Addr) + if _, found := familyAddrsMap[family]; !found { + interleavingOrder = append(interleavingOrder, family) + } + familyAddrsMap[family] = append(familyAddrsMap[family], addr) + } + + interleavedAddrs := make([]resolver.Address, 0, len(addrs)) + + for curFamilyIdx := 0; len(interleavedAddrs) < len(addrs); curFamilyIdx = (curFamilyIdx + 1) % len(interleavingOrder) { + // Some IP types may have fewer addresses than others, so we look for + // the next type that has a remaining member to add to the interleaved + // list. + family := interleavingOrder[curFamilyIdx] + remainingMembers := familyAddrsMap[family] + if len(remainingMembers) > 0 { + interleavedAddrs = append(interleavedAddrs, remainingMembers[0]) + familyAddrsMap[family] = remainingMembers[1:] + } + } + + return interleavedAddrs +} + +// addressFamily returns the ipAddrFamily after parsing the address string. +// If the address isn't of the format "ip-address:port", it returns +// ipAddrFamilyUnknown. The address may be valid even if it's not an IP when +// using a resolver like passthrough where the address may be a hostname in +// some format that the dialer can resolve. +func addressFamily(address string) ipAddrFamily { + // Parse the IP after removing the port. + host, _, err := net.SplitHostPort(address) + if err != nil { + return ipAddrFamilyUnknown + } + ip, err := netip.ParseAddr(host) + if err != nil { + return ipAddrFamilyUnknown + } + switch { + case ip.Is4() || ip.Is4In6(): + return ipAddrFamilyV4 + case ip.Is6(): + return ipAddrFamilyV6 + default: + return ipAddrFamilyUnknown + } +} + +// reconcileSubConnsLocked updates the active subchannels based on a new address +// list from the resolver. It does this by: +// - closing subchannels: any existing subchannels associated with addresses +// that are no longer in the updated list are shut down. +// - removing subchannels: entries for these closed subchannels are removed +// from the subchannel map. +// +// This ensures that the subchannel map accurately reflects the current set of +// addresses received from the name resolver. +func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) { + newAddrsMap := resolver.NewAddressMap() + for _, addr := range newAddrs { + newAddrsMap.Set(addr, true) + } + + for _, oldAddr := range b.subConns.Keys() { + if _, ok := newAddrsMap.Get(oldAddr); ok { + continue + } + val, _ := b.subConns.Get(oldAddr) + val.(*scData).subConn.Shutdown() + b.subConns.Delete(oldAddr) + } +} + +// shutdownRemainingLocked shuts down remaining subConns. Called when a subConn +// becomes ready, which means that all other subConn must be shutdown. +func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) { + b.cancelConnectionTimer() + for _, v := range b.subConns.Values() { + sd := v.(*scData) + if sd.subConn != selected.subConn { + sd.subConn.Shutdown() + } + } + b.subConns = resolver.NewAddressMap() + b.subConns.Set(selected.addr, selected) +} + +// requestConnectionLocked starts connecting on the subchannel corresponding to +// the current address. If no subchannel exists, one is created. If the current +// subchannel is in TransientFailure, a connection to the next address is +// attempted until a subchannel is found. +func (b *pickfirstBalancer) requestConnectionLocked() { + if !b.addressList.isValid() { + return + } + var lastErr error + for valid := true; valid; valid = b.addressList.increment() { + curAddr := b.addressList.currentAddress() + sd, ok := b.subConns.Get(curAddr) + if !ok { + var err error + // We want to assign the new scData to sd from the outer scope, + // hence we can't use := below. + sd, err = b.newSCData(curAddr) + if err != nil { + // This should never happen, unless the clientConn is being shut + // down. + if b.logger.V(2) { + b.logger.Infof("Failed to create a subConn for address %v: %v", curAddr.String(), err) + } + // Do nothing, the LB policy will be closed soon. + return + } + b.subConns.Set(curAddr, sd) + } + + scd := sd.(*scData) + switch scd.rawConnectivityState { + case connectivity.Idle: + scd.subConn.Connect() + b.scheduleNextConnectionLocked() + return + case connectivity.TransientFailure: + // The SubConn is being re-used and failed during a previous pass + // over the addressList. It has not completed backoff yet. + // Mark it as having failed and try the next address. + scd.connectionFailedInFirstPass = true + lastErr = scd.lastErr + continue + case connectivity.Connecting: + // Wait for the connection attempt to complete or the timer to fire + // before attempting the next address. + b.scheduleNextConnectionLocked() + return + default: + b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", scd.rawConnectivityState) + return + + } + } + + // All the remaining addresses in the list are in TRANSIENT_FAILURE, end the + // first pass if possible. + b.endFirstPassIfPossibleLocked(lastErr) +} + +func (b *pickfirstBalancer) scheduleNextConnectionLocked() { + b.cancelConnectionTimer() + if !b.addressList.hasNext() { + return + } + curAddr := b.addressList.currentAddress() + cancelled := false // Access to this is protected by the balancer's mutex. + closeFn := internal.TimeAfterFunc(connectionDelayInterval, func() { + b.mu.Lock() + defer b.mu.Unlock() + // If the scheduled task is cancelled while acquiring the mutex, return. + if cancelled { + return + } + if b.logger.V(2) { + b.logger.Infof("Happy Eyeballs timer expired while waiting for connection to %q.", curAddr.Addr) + } + if b.addressList.increment() { + b.requestConnectionLocked() + } + }) + // Access to the cancellation callback held by the balancer is guarded by + // the balancer's mutex, so it's safe to set the boolean from the callback. + b.cancelConnectionTimer = sync.OnceFunc(func() { + cancelled = true + closeFn() + }) +} + +func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) { + b.mu.Lock() + defer b.mu.Unlock() + oldState := sd.rawConnectivityState + sd.rawConnectivityState = newState.ConnectivityState + // Previously relevant SubConns can still callback with state updates. + // To prevent pickers from returning these obsolete SubConns, this logic + // is included to check if the current list of active SubConns includes this + // SubConn. + if !b.isActiveSCData(sd) { + return + } + if newState.ConnectivityState == connectivity.Shutdown { + sd.effectiveState = connectivity.Shutdown + return + } + + // Record a connection attempt when exiting CONNECTING. + if newState.ConnectivityState == connectivity.TransientFailure { + sd.connectionFailedInFirstPass = true + connectionAttemptsFailedMetric.Record(b.metricsRecorder, 1, b.target) + } + + if newState.ConnectivityState == connectivity.Ready { + connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target) + b.shutdownRemainingLocked(sd) + if !b.addressList.seekTo(sd.addr) { + // This should not fail as we should have only one SubConn after + // entering READY. The SubConn should be present in the addressList. + b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses) + return + } + if !b.healthCheckingEnabled { + if b.logger.V(2) { + b.logger.Infof("SubConn %p reported connectivity state READY and the health listener is disabled. Transitioning SubConn to READY.", sd.subConn) + } + + sd.effectiveState = connectivity.Ready + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, + }) + return + } + if b.logger.V(2) { + b.logger.Infof("SubConn %p reported connectivity state READY. Registering health listener.", sd.subConn) + } + // Send a CONNECTING update to take the SubConn out of sticky-TF if + // required. + sd.effectiveState = connectivity.Connecting + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + sd.subConn.RegisterHealthListener(func(scs balancer.SubConnState) { + b.updateSubConnHealthState(sd, scs) + }) + return + } + + // If the LB policy is READY, and it receives a subchannel state change, + // it means that the READY subchannel has failed. + // A SubConn can also transition from CONNECTING directly to IDLE when + // a transport is successfully created, but the connection fails + // before the SubConn can send the notification for READY. We treat + // this as a successful connection and transition to IDLE. + // TODO: https://github.com/grpc/grpc-go/issues/7862 - Remove the second + // part of the if condition below once the issue is fixed. + if oldState == connectivity.Ready || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) { + // Once a transport fails, the balancer enters IDLE and starts from + // the first address when the picker is used. + b.shutdownRemainingLocked(sd) + sd.effectiveState = newState.ConnectivityState + // READY SubConn interspliced in between CONNECTING and IDLE, need to + // account for that. + if oldState == connectivity.Connecting { + // A known issue (https://github.com/grpc/grpc-go/issues/7862) + // causes a race that prevents the READY state change notification. + // This works around it. + connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target) + } + disconnectionsMetric.Record(b.metricsRecorder, 1, b.target) + b.addressList.reset() + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Idle, + Picker: &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)}, + }) + return + } + + if b.firstPass { + switch newState.ConnectivityState { + case connectivity.Connecting: + // The effective state can be in either IDLE, CONNECTING or + // TRANSIENT_FAILURE. If it's TRANSIENT_FAILURE, stay in + // TRANSIENT_FAILURE until it's READY. See A62. + if sd.effectiveState != connectivity.TransientFailure { + sd.effectiveState = connectivity.Connecting + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + } + case connectivity.TransientFailure: + sd.lastErr = newState.ConnectionError + sd.effectiveState = connectivity.TransientFailure + // Since we're re-using common SubConns while handling resolver + // updates, we could receive an out of turn TRANSIENT_FAILURE from + // a pass over the previous address list. Happy Eyeballs will also + // cause out of order updates to arrive. + + if curAddr := b.addressList.currentAddress(); equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) { + b.cancelConnectionTimer() + if b.addressList.increment() { + b.requestConnectionLocked() + return + } + } + + // End the first pass if we've seen a TRANSIENT_FAILURE from all + // SubConns once. + b.endFirstPassIfPossibleLocked(newState.ConnectionError) + } + return + } + + // We have finished the first pass, keep re-connecting failing SubConns. + switch newState.ConnectivityState { + case connectivity.TransientFailure: + b.numTF = (b.numTF + 1) % b.subConns.Len() + sd.lastErr = newState.ConnectionError + if b.numTF%b.subConns.Len() == 0 { + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: newState.ConnectionError}, + }) + } + // We don't need to request re-resolution since the SubConn already + // does that before reporting TRANSIENT_FAILURE. + // TODO: #7534 - Move re-resolution requests from SubConn into + // pick_first. + case connectivity.Idle: + sd.subConn.Connect() + } +} + +// endFirstPassIfPossibleLocked ends the first happy-eyeballs pass if all the +// addresses are tried and their SubConns have reported a failure. +func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) { + // An optimization to avoid iterating over the entire SubConn map. + if b.addressList.isValid() { + return + } + // Connect() has been called on all the SubConns. The first pass can be + // ended if all the SubConns have reported a failure. + for _, v := range b.subConns.Values() { + sd := v.(*scData) + if !sd.connectionFailedInFirstPass { + return + } + } + b.firstPass = false + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: lastErr}, + }) + // Start re-connecting all the SubConns that are already in IDLE. + for _, v := range b.subConns.Values() { + sd := v.(*scData) + if sd.rawConnectivityState == connectivity.Idle { + sd.subConn.Connect() + } + } +} + +func (b *pickfirstBalancer) isActiveSCData(sd *scData) bool { + activeSD, found := b.subConns.Get(sd.addr) + return found && activeSD == sd +} + +func (b *pickfirstBalancer) updateSubConnHealthState(sd *scData, state balancer.SubConnState) { + b.mu.Lock() + defer b.mu.Unlock() + // Previously relevant SubConns can still callback with state updates. + // To prevent pickers from returning these obsolete SubConns, this logic + // is included to check if the current list of active SubConns includes + // this SubConn. + if !b.isActiveSCData(sd) { + return + } + sd.effectiveState = state.ConnectivityState + switch state.ConnectivityState { + case connectivity.Ready: + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, + }) + case connectivity.TransientFailure: + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("pickfirst: health check failure: %v", state.ConnectionError)}, + }) + case connectivity.Connecting: + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + default: + b.logger.Errorf("Got unexpected health update for SubConn %p: %v", state) + } +} + +// updateBalancerState stores the state reported to the channel and calls +// ClientConn.UpdateState(). As an optimization, it avoids sending duplicate +// updates to the channel. +func (b *pickfirstBalancer) updateBalancerState(newState balancer.State) { + // In case of TransientFailures allow the picker to be updated to update + // the connectivity error, in all other cases don't send duplicate state + // updates. + if newState.ConnectivityState == b.state && b.state != connectivity.TransientFailure { + return + } + b.forceUpdateConcludedStateLocked(newState) +} + +// forceUpdateConcludedStateLocked stores the state reported to the channel and +// calls ClientConn.UpdateState(). +// A separate function is defined to force update the ClientConn state since the +// channel doesn't correctly assume that LB policies start in CONNECTING and +// relies on LB policy to send an initial CONNECTING update. +func (b *pickfirstBalancer) forceUpdateConcludedStateLocked(newState balancer.State) { + b.state = newState.ConnectivityState + b.cc.UpdateState(newState) +} + +type picker struct { + result balancer.PickResult + err error +} + +func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + return p.result, p.err +} + +// idlePicker is used when the SubConn is IDLE and kicks the SubConn into +// CONNECTING when Pick is called. +type idlePicker struct { + exitIdle func() +} + +func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + i.exitIdle() + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable +} + +// addressList manages sequentially iterating over addresses present in a list +// of endpoints. It provides a 1 dimensional view of the addresses present in +// the endpoints. +// This type is not safe for concurrent access. +type addressList struct { + addresses []resolver.Address + idx int +} + +func (al *addressList) isValid() bool { + return al.idx < len(al.addresses) +} + +func (al *addressList) size() int { + return len(al.addresses) +} + +// increment moves to the next index in the address list. +// This method returns false if it went off the list, true otherwise. +func (al *addressList) increment() bool { + if !al.isValid() { + return false + } + al.idx++ + return al.idx < len(al.addresses) +} + +// currentAddress returns the current address pointed to in the addressList. +// If the list is in an invalid state, it returns an empty address instead. +func (al *addressList) currentAddress() resolver.Address { + if !al.isValid() { + return resolver.Address{} + } + return al.addresses[al.idx] +} + +func (al *addressList) reset() { + al.idx = 0 +} + +func (al *addressList) updateAddrs(addrs []resolver.Address) { + al.addresses = addrs + al.reset() +} + +// seekTo returns false if the needle was not found and the current index was +// left unchanged. +func (al *addressList) seekTo(needle resolver.Address) bool { + for ai, addr := range al.addresses { + if !equalAddressIgnoringBalAttributes(&addr, &needle) { + continue + } + al.idx = ai + return true + } + return false +} + +// hasNext returns whether incrementing the addressList will result in moving +// past the end of the list. If the list has already moved past the end, it +// returns false. +func (al *addressList) hasNext() bool { + if !al.isValid() { + return false + } + return al.idx+1 < len(al.addresses) +} + +// equalAddressIgnoringBalAttributes returns true is a and b are considered +// equal. This is different from the Equal method on the resolver.Address type +// which considers all fields to determine equality. Here, we only consider +// fields that are meaningful to the SubConn. +func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool { + return a.Addr == b.Addr && a.ServerName == b.ServerName && + a.Attributes.Equal(b.Attributes) && + a.Metadata == b.Metadata +} diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index 260255d31..35da5d1ec 100644 --- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -22,12 +22,13 @@ package roundrobin import ( - "math/rand" - "sync/atomic" + "fmt" "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/balancer/endpointsharding" + "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" ) // Name is the name of round_robin balancer. @@ -35,47 +36,44 @@ const Name = "round_robin" var logger = grpclog.Component("roundrobin") -// newBuilder creates a new roundrobin balancer builder. -func newBuilder() balancer.Builder { - return base.NewBalancerBuilder(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true}) -} - func init() { - balancer.Register(newBuilder()) + balancer.Register(builder{}) } -type rrPickerBuilder struct{} +type builder struct{} -func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { - logger.Infof("roundrobinPicker: Build called with info: %v", info) - if len(info.ReadySCs) == 0 { - return base.NewErrPicker(balancer.ErrNoSubConnAvailable) - } - scs := make([]balancer.SubConn, 0, len(info.ReadySCs)) - for sc := range info.ReadySCs { - scs = append(scs, sc) - } - return &rrPicker{ - subConns: scs, - // Start at a random index, as the same RR balancer rebuilds a new - // picker when SubConn states change, and we don't want to apply excess - // load to the first server in the list. - next: uint32(rand.Intn(len(scs))), +func (bb builder) Name() string { + return Name +} + +func (bb builder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + childBuilder := balancer.Get(pickfirstleaf.Name).Build + bal := &rrBalancer{ + cc: cc, + Balancer: endpointsharding.NewBalancer(cc, opts, childBuilder, endpointsharding.Options{}), } + bal.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[%p] ", bal)) + bal.logger.Infof("Created") + return bal } -type rrPicker struct { - // subConns is the snapshot of the roundrobin balancer when this picker was - // created. The slice is immutable. Each Get() will do a round robin - // selection from it and return the selected SubConn. - subConns []balancer.SubConn - next uint32 +type rrBalancer struct { + balancer.Balancer + cc balancer.ClientConn + logger *internalgrpclog.PrefixLogger } -func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { - subConnsLen := uint32(len(p.subConns)) - nextIndex := atomic.AddUint32(&p.next, 1) +func (b *rrBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { + return b.Balancer.UpdateClientConnState(balancer.ClientConnState{ + // Enable the health listener in pickfirst children for client side health + // checks and outlier detection, if configured. + ResolverState: pickfirstleaf.EnableHealthListener(ccs.ResolverState), + }) +} - sc := p.subConns[nextIndex%subConnsLen] - return balancer.PickResult{SubConn: sc}, nil +func (b *rrBalancer) ExitIdle() { + // Should always be ok, as child is endpoint sharding. + if ei, ok := b.Balancer.(balancer.ExitIdler); ok { + ei.ExitIdle() + } } diff --git a/vendor/google.golang.org/grpc/balancer/subconn.go b/vendor/google.golang.org/grpc/balancer/subconn.go new file mode 100644 index 000000000..9ee44d4af --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/subconn.go @@ -0,0 +1,134 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package balancer + +import ( + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/resolver" +) + +// A SubConn represents a single connection to a gRPC backend service. +// +// All SubConns start in IDLE, and will not try to connect. To trigger a +// connection attempt, Balancers must call Connect. +// +// If the connection attempt fails, the SubConn will transition to +// TRANSIENT_FAILURE for a backoff period, and then return to IDLE. If the +// connection attempt succeeds, it will transition to READY. +// +// If a READY SubConn becomes disconnected, the SubConn will transition to IDLE. +// +// If a connection re-enters IDLE, Balancers must call Connect again to trigger +// a new connection attempt. +// +// Each SubConn contains a list of addresses. gRPC will try to connect to the +// addresses in sequence, and stop trying the remainder once the first +// connection is successful. However, this behavior is deprecated. SubConns +// should only use a single address. +// +// NOTICE: This interface is intended to be implemented by gRPC, or intercepted +// by custom load balancing polices. Users should not need their own complete +// implementation of this interface -- they should always delegate to a SubConn +// returned by ClientConn.NewSubConn() by embedding it in their implementations. +// An embedded SubConn must never be nil, or runtime panics will occur. +type SubConn interface { + // UpdateAddresses updates the addresses used in this SubConn. + // gRPC checks if currently-connected address is still in the new list. + // If it's in the list, the connection will be kept. + // If it's not in the list, the connection will gracefully close, and + // a new connection will be created. + // + // This will trigger a state transition for the SubConn. + // + // Deprecated: this method will be removed. Create new SubConns for new + // addresses instead. + UpdateAddresses([]resolver.Address) + // Connect starts the connecting for this SubConn. + Connect() + // GetOrBuildProducer returns a reference to the existing Producer for this + // ProducerBuilder in this SubConn, or, if one does not currently exist, + // creates a new one and returns it. Returns a close function which may be + // called when the Producer is no longer needed. Otherwise the producer + // will automatically be closed upon connection loss or subchannel close. + // Should only be called on a SubConn in state Ready. Otherwise the + // producer will be unable to create streams. + GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) + // Shutdown shuts down the SubConn gracefully. Any started RPCs will be + // allowed to complete. No future calls should be made on the SubConn. + // One final state update will be delivered to the StateListener (or + // UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to + // indicate the shutdown operation. This may be delivered before + // in-progress RPCs are complete and the actual connection is closed. + Shutdown() + // RegisterHealthListener registers a health listener that receives health + // updates for a Ready SubConn. Only one health listener can be registered + // at a time. A health listener should be registered each time the SubConn's + // connectivity state changes to READY. Registering a health listener when + // the connectivity state is not READY may result in undefined behaviour. + // This method must not be called synchronously while handling an update + // from a previously registered health listener. + RegisterHealthListener(func(SubConnState)) + // EnforceSubConnEmbedding is included to force implementers to embed + // another implementation of this interface, allowing gRPC to add methods + // without breaking users. + internal.EnforceSubConnEmbedding +} + +// A ProducerBuilder is a simple constructor for a Producer. It is used by the +// SubConn to create producers when needed. +type ProducerBuilder interface { + // Build creates a Producer. The first parameter is always a + // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the + // associated SubConn), but is declared as `any` to avoid a dependency + // cycle. Build also returns a close function that will be called when all + // references to the Producer have been given up for a SubConn, or when a + // connectivity state change occurs on the SubConn. The close function + // should always block until all asynchronous cleanup work is completed. + Build(grpcClientConnInterface any) (p Producer, close func()) +} + +// SubConnState describes the state of a SubConn. +type SubConnState struct { + // ConnectivityState is the connectivity state of the SubConn. + ConnectivityState connectivity.State + // ConnectionError is set if the ConnectivityState is TransientFailure, + // describing the reason the SubConn failed. Otherwise, it is nil. + ConnectionError error + // connectedAddr contains the connected address when ConnectivityState is + // Ready. Otherwise, it is indeterminate. + connectedAddress resolver.Address +} + +// connectedAddress returns the connected address for a SubConnState. The +// address is only valid if the state is READY. +func connectedAddress(scs SubConnState) resolver.Address { + return scs.connectedAddress +} + +// setConnectedAddress sets the connected address for a SubConnState. +func setConnectedAddress(scs *SubConnState, addr resolver.Address) { + scs.connectedAddress = addr +} + +// A Producer is a type shared among potentially many consumers. It is +// associated with a SubConn, and an implementation will typically contain +// other methods to provide additional functionality, e.g. configuration or +// subscription registration. +type Producer any diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go index 8ad6ce2f0..948a21ef6 100644 --- a/vendor/google.golang.org/grpc/balancer_wrapper.go +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -24,15 +24,26 @@ import ( "sync" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" + "google.golang.org/grpc/status" ) -var setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address)) +var ( + setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address)) + // noOpRegisterHealthListenerFn is used when client side health checking is + // disabled. It sends a single READY update on the registered listener. + noOpRegisterHealthListenerFn = func(_ context.Context, listener func(balancer.SubConnState)) func() { + listener(balancer.SubConnState{ConnectivityState: connectivity.Ready}) + return func() {} + } +) // ccBalancerWrapper sits between the ClientConn and the Balancer. // @@ -49,6 +60,7 @@ var setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnSt // It uses the gracefulswitch.Balancer internally to ensure that balancer // switches happen in a graceful manner. type ccBalancerWrapper struct { + internal.EnforceClientConnEmbedding // The following fields are initialized when the wrapper is created and are // read-only afterwards, and therefore can be accessed without a mutex. cc *ClientConn @@ -82,7 +94,6 @@ func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { CustomUserAgent: cc.dopts.copts.UserAgent, ChannelzParent: cc.channelz, Target: cc.parsedTarget, - MetricsRecorder: cc.metricsRecorderList, }, serializer: grpcsync.NewCallbackSerializer(ctx), serializerCancel: cancel, @@ -91,6 +102,10 @@ func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { return ccb } +func (ccb *ccBalancerWrapper) MetricsRecorder() stats.MetricsRecorder { + return ccb.cc.metricsRecorderList +} + // updateClientConnState is invoked by grpc to push a ClientConnState update to // the underlying balancer. This is always executed from the serializer, so // it is safe to call into the balancer here. @@ -187,6 +202,7 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer), stateListener: opts.StateListener, + healthData: newHealthData(connectivity.Idle), } ac.acbw = acbw return acbw, nil @@ -252,12 +268,39 @@ func (ccb *ccBalancerWrapper) Target() string { // acBalancerWrapper is a wrapper on top of ac for balancers. // It implements balancer.SubConn interface. type acBalancerWrapper struct { + internal.EnforceSubConnEmbedding ac *addrConn // read-only ccb *ccBalancerWrapper // read-only stateListener func(balancer.SubConnState) - mu sync.Mutex - producers map[balancer.ProducerBuilder]*refCountedProducer + producersMu sync.Mutex + producers map[balancer.ProducerBuilder]*refCountedProducer + + // Access to healthData is protected by healthMu. + healthMu sync.Mutex + // healthData is stored as a pointer to detect when the health listener is + // dropped or updated. This is required as closures can't be compared for + // equality. + healthData *healthData +} + +// healthData holds data related to health state reporting. +type healthData struct { + // connectivityState stores the most recent connectivity state delivered + // to the LB policy. This is stored to avoid sending updates when the + // SubConn has already exited connectivity state READY. + connectivityState connectivity.State + // closeHealthProducer stores function to close the ref counted health + // producer. The health producer is automatically closed when the SubConn + // state changes. + closeHealthProducer func() +} + +func newHealthData(s connectivity.State) *healthData { + return &healthData{ + connectivityState: s, + closeHealthProducer: func() {}, + } } // updateState is invoked by grpc to push a subConn state update to the @@ -267,6 +310,9 @@ func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolve if ctx.Err() != nil || acbw.ccb.balancer == nil { return } + // Invalidate all producers on any state change. + acbw.closeProducers() + // Even though it is optional for balancers, gracefulswitch ensures // opts.StateListener is set, so this cannot ever be nil. // TODO: delete this comment when UpdateSubConnState is removed. @@ -274,17 +320,25 @@ func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolve if s == connectivity.Ready { setConnectedAddress(&scs, curAddr) } + // Invalidate the health listener by updating the healthData. + acbw.healthMu.Lock() + // A race may occur if a health listener is registered soon after the + // connectivity state is set but before the stateListener is called. + // Two cases may arise: + // 1. The new state is not READY: RegisterHealthListener has checks to + // ensure no updates are sent when the connectivity state is not + // READY. + // 2. The new state is READY: This means that the old state wasn't Ready. + // The RegisterHealthListener API mentions that a health listener + // must not be registered when a SubConn is not ready to avoid such + // races. When this happens, the LB policy would get health updates + // on the old listener. When the LB policy registers a new listener + // on receiving the connectivity update, the health updates will be + // sent to the new health listener. + acbw.healthData = newHealthData(scs.ConnectivityState) + acbw.healthMu.Unlock() + acbw.stateListener(scs) - acbw.ac.mu.Lock() - defer acbw.ac.mu.Unlock() - if s == connectivity.Ready { - // When changing states to READY, reset stateReadyChan. Wait until - // after we notify the LB policy's listener(s) in order to prevent - // ac.getTransport() from unblocking before the LB policy starts - // tracking the subchannel as READY. - close(acbw.ac.stateReadyChan) - acbw.ac.stateReadyChan = make(chan struct{}) - } }) } @@ -301,6 +355,7 @@ func (acbw *acBalancerWrapper) Connect() { } func (acbw *acBalancerWrapper) Shutdown() { + acbw.closeProducers() acbw.ccb.cc.removeAddrConn(acbw.ac, errConnDrain) } @@ -308,9 +363,10 @@ func (acbw *acBalancerWrapper) Shutdown() { // ready, blocks until it is or ctx expires. Returns an error when the context // expires or the addrConn is shut down. func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { - transport, err := acbw.ac.getTransport(ctx) - if err != nil { - return nil, err + transport := acbw.ac.getReadyTransport() + if transport == nil { + return nil, status.Errorf(codes.Unavailable, "SubConn state is not Ready") + } return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...) } @@ -335,8 +391,8 @@ type refCountedProducer struct { } func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) { - acbw.mu.Lock() - defer acbw.mu.Unlock() + acbw.producersMu.Lock() + defer acbw.producersMu.Unlock() // Look up existing producer from this builder. pData := acbw.producers[pb] @@ -353,13 +409,112 @@ func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) ( // and delete the refCountedProducer from the map if the total reference // count goes to zero. unref := func() { - acbw.mu.Lock() + acbw.producersMu.Lock() + // If closeProducers has already closed this producer instance, refs is + // set to 0, so the check after decrementing will never pass, and the + // producer will not be double-closed. pData.refs-- if pData.refs == 0 { defer pData.close() // Run outside the acbw mutex delete(acbw.producers, pb) } - acbw.mu.Unlock() + acbw.producersMu.Unlock() + } + return pData.producer, sync.OnceFunc(unref) +} + +func (acbw *acBalancerWrapper) closeProducers() { + acbw.producersMu.Lock() + defer acbw.producersMu.Unlock() + for pb, pData := range acbw.producers { + pData.refs = 0 + pData.close() + delete(acbw.producers, pb) + } +} + +// healthProducerRegisterFn is a type alias for the health producer's function +// for registering listeners. +type healthProducerRegisterFn = func(context.Context, balancer.SubConn, string, func(balancer.SubConnState)) func() + +// healthListenerRegFn returns a function to register a listener for health +// updates. If client side health checks are disabled, the registered listener +// will get a single READY (raw connectivity state) update. +// +// Client side health checking is enabled when all the following +// conditions are satisfied: +// 1. Health checking is not disabled using the dial option. +// 2. The health package is imported. +// 3. The health check config is present in the service config. +func (acbw *acBalancerWrapper) healthListenerRegFn() func(context.Context, func(balancer.SubConnState)) func() { + if acbw.ccb.cc.dopts.disableHealthCheck { + return noOpRegisterHealthListenerFn + } + regHealthLisFn := internal.RegisterClientHealthCheckListener + if regHealthLisFn == nil { + // The health package is not imported. + return noOpRegisterHealthListenerFn + } + cfg := acbw.ac.cc.healthCheckConfig() + if cfg == nil { + return noOpRegisterHealthListenerFn + } + return func(ctx context.Context, listener func(balancer.SubConnState)) func() { + return regHealthLisFn.(healthProducerRegisterFn)(ctx, acbw, cfg.ServiceName, listener) + } +} + +// RegisterHealthListener accepts a health listener from the LB policy. It sends +// updates to the health listener as long as the SubConn's connectivity state +// doesn't change and a new health listener is not registered. To invalidate +// the currently registered health listener, acbw updates the healthData. If a +// nil listener is registered, the active health listener is dropped. +func (acbw *acBalancerWrapper) RegisterHealthListener(listener func(balancer.SubConnState)) { + acbw.healthMu.Lock() + defer acbw.healthMu.Unlock() + acbw.healthData.closeHealthProducer() + // listeners should not be registered when the connectivity state + // isn't Ready. This may happen when the balancer registers a listener + // after the connectivityState is updated, but before it is notified + // of the update. + if acbw.healthData.connectivityState != connectivity.Ready { + return + } + // Replace the health data to stop sending updates to any previously + // registered health listeners. + hd := newHealthData(connectivity.Ready) + acbw.healthData = hd + if listener == nil { + return } - return pData.producer, grpcsync.OnceFunc(unref) + + registerFn := acbw.healthListenerRegFn() + acbw.ccb.serializer.TrySchedule(func(ctx context.Context) { + if ctx.Err() != nil || acbw.ccb.balancer == nil { + return + } + // Don't send updates if a new listener is registered. + acbw.healthMu.Lock() + defer acbw.healthMu.Unlock() + if acbw.healthData != hd { + return + } + // Serialize the health updates from the health producer with + // other calls into the LB policy. + listenerWrapper := func(scs balancer.SubConnState) { + acbw.ccb.serializer.TrySchedule(func(ctx context.Context) { + if ctx.Err() != nil || acbw.ccb.balancer == nil { + return + } + acbw.healthMu.Lock() + defer acbw.healthMu.Unlock() + if acbw.healthData != hd { + return + } + listener(scs) + }) + } + + hd.closeHealthProducer = registerFn(ctx, listenerWrapper) + }) } diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index 55bffaa77..b2f8fc7f4 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.36.4 // protoc v5.27.1 // source: grpc/binlog/v1/binarylog.proto @@ -31,6 +31,7 @@ import ( timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -233,10 +234,7 @@ func (Address_Type) EnumDescriptor() ([]byte, []int) { // Log entry we store in binary logs type GrpcLogEntry struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The timestamp of the binary log message Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` // Uniquely identifies a call. The value must not be 0 in order to disambiguate @@ -255,7 +253,7 @@ type GrpcLogEntry struct { // The logger uses one of the following fields to record the payload, // according to the type of the log entry. // - // Types that are assignable to Payload: + // Types that are valid to be assigned to Payload: // // *GrpcLogEntry_ClientHeader // *GrpcLogEntry_ServerHeader @@ -269,16 +267,16 @@ type GrpcLogEntry struct { // EVENT_TYPE_SERVER_HEADER normally or EVENT_TYPE_SERVER_TRAILER in // the case of trailers-only. On server side, peer is always // logged on EVENT_TYPE_CLIENT_HEADER. - Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"` + Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GrpcLogEntry) Reset() { *x = GrpcLogEntry{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GrpcLogEntry) String() string { @@ -289,7 +287,7 @@ func (*GrpcLogEntry) ProtoMessage() {} func (x *GrpcLogEntry) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -339,37 +337,45 @@ func (x *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger { return GrpcLogEntry_LOGGER_UNKNOWN } -func (m *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload { - if m != nil { - return m.Payload +func (x *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload { + if x != nil { + return x.Payload } return nil } func (x *GrpcLogEntry) GetClientHeader() *ClientHeader { - if x, ok := x.GetPayload().(*GrpcLogEntry_ClientHeader); ok { - return x.ClientHeader + if x != nil { + if x, ok := x.Payload.(*GrpcLogEntry_ClientHeader); ok { + return x.ClientHeader + } } return nil } func (x *GrpcLogEntry) GetServerHeader() *ServerHeader { - if x, ok := x.GetPayload().(*GrpcLogEntry_ServerHeader); ok { - return x.ServerHeader + if x != nil { + if x, ok := x.Payload.(*GrpcLogEntry_ServerHeader); ok { + return x.ServerHeader + } } return nil } func (x *GrpcLogEntry) GetMessage() *Message { - if x, ok := x.GetPayload().(*GrpcLogEntry_Message); ok { - return x.Message + if x != nil { + if x, ok := x.Payload.(*GrpcLogEntry_Message); ok { + return x.Message + } } return nil } func (x *GrpcLogEntry) GetTrailer() *Trailer { - if x, ok := x.GetPayload().(*GrpcLogEntry_Trailer); ok { - return x.Trailer + if x != nil { + if x, ok := x.Payload.(*GrpcLogEntry_Trailer); ok { + return x.Trailer + } } return nil } @@ -418,10 +424,7 @@ func (*GrpcLogEntry_Message) isGrpcLogEntry_Payload() {} func (*GrpcLogEntry_Trailer) isGrpcLogEntry_Payload() {} type ClientHeader struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // This contains only the metadata from the application. Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` // The name of the RPC method, which looks something like: @@ -435,16 +438,16 @@ type ClientHeader struct { // or : . Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"` // the RPC timeout - Timeout *durationpb.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"` + Timeout *durationpb.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ClientHeader) Reset() { *x = ClientHeader{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ClientHeader) String() string { @@ -455,7 +458,7 @@ func (*ClientHeader) ProtoMessage() {} func (x *ClientHeader) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -499,21 +502,18 @@ func (x *ClientHeader) GetTimeout() *durationpb.Duration { } type ServerHeader struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // This contains only the metadata from the application. - Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ServerHeader) Reset() { *x = ServerHeader{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServerHeader) String() string { @@ -524,7 +524,7 @@ func (*ServerHeader) ProtoMessage() {} func (x *ServerHeader) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -547,10 +547,7 @@ func (x *ServerHeader) GetMetadata() *Metadata { } type Trailer struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // This contains only the metadata from the application. Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` // The gRPC status code. @@ -561,15 +558,15 @@ type Trailer struct { // The value of the 'grpc-status-details-bin' metadata key. If // present, this is always an encoded 'google.rpc.Status' message. StatusDetails []byte `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Trailer) Reset() { *x = Trailer{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Trailer) String() string { @@ -580,7 +577,7 @@ func (*Trailer) ProtoMessage() {} func (x *Trailer) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -625,24 +622,21 @@ func (x *Trailer) GetStatusDetails() []byte { // Message payload, used by CLIENT_MESSAGE and SERVER_MESSAGE type Message struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Length of the message. It may not be the same as the length of the // data field, as the logging payload can be truncated or omitted. Length uint32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"` // May be truncated or omitted. - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Message) Reset() { *x = Message{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Message) String() string { @@ -653,7 +647,7 @@ func (*Message) ProtoMessage() {} func (x *Message) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -704,20 +698,17 @@ func (x *Message) GetData() []byte { // header is just a normal metadata key. // The pair will not count towards the size limit. type Metadata struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"` unknownFields protoimpl.UnknownFields - - Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Metadata) Reset() { *x = Metadata{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Metadata) String() string { @@ -728,7 +719,7 @@ func (*Metadata) ProtoMessage() {} func (x *Metadata) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -752,21 +743,18 @@ func (x *Metadata) GetEntry() []*MetadataEntry { // A metadata key value pair type MetadataEntry struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` unknownFields protoimpl.UnknownFields - - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + sizeCache protoimpl.SizeCache } func (x *MetadataEntry) Reset() { *x = MetadataEntry{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MetadataEntry) String() string { @@ -777,7 +765,7 @@ func (*MetadataEntry) ProtoMessage() {} func (x *MetadataEntry) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -808,23 +796,20 @@ func (x *MetadataEntry) GetValue() []byte { // Address information type Address struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Type Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"` - Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Type Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"` + Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` // only for TYPE_IPV4 and TYPE_IPV6 - IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"` + IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Address) Reset() { *x = Address{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Address) String() string { @@ -835,7 +820,7 @@ func (*Address) ProtoMessage() {} func (x *Address) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -873,7 +858,7 @@ func (x *Address) GetIpPort() uint32 { var File_grpc_binlog_v1_binarylog_proto protoreflect.FileDescriptor -var file_grpc_binlog_v1_binarylog_proto_rawDesc = []byte{ +var file_grpc_binlog_v1_binarylog_proto_rawDesc = string([]byte{ 0x0a, 0x1e, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x31, 0x2f, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, @@ -999,16 +984,16 @@ var file_grpc_binlog_v1_binarylog_proto_rawDesc = []byte{ 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +}) var ( file_grpc_binlog_v1_binarylog_proto_rawDescOnce sync.Once - file_grpc_binlog_v1_binarylog_proto_rawDescData = file_grpc_binlog_v1_binarylog_proto_rawDesc + file_grpc_binlog_v1_binarylog_proto_rawDescData []byte ) func file_grpc_binlog_v1_binarylog_proto_rawDescGZIP() []byte { file_grpc_binlog_v1_binarylog_proto_rawDescOnce.Do(func() { - file_grpc_binlog_v1_binarylog_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_binlog_v1_binarylog_proto_rawDescData) + file_grpc_binlog_v1_binarylog_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_grpc_binlog_v1_binarylog_proto_rawDesc), len(file_grpc_binlog_v1_binarylog_proto_rawDesc))) }) return file_grpc_binlog_v1_binarylog_proto_rawDescData } @@ -1057,104 +1042,6 @@ func file_grpc_binlog_v1_binarylog_proto_init() { if File_grpc_binlog_v1_binarylog_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*GrpcLogEntry); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*ClientHeader); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*ServerHeader); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*Trailer); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*Message); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*Metadata); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*MetadataEntry); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*Address); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []any{ (*GrpcLogEntry_ClientHeader)(nil), (*GrpcLogEntry_ServerHeader)(nil), @@ -1165,7 +1052,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_grpc_binlog_v1_binarylog_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_grpc_binlog_v1_binarylog_proto_rawDesc), len(file_grpc_binlog_v1_binarylog_proto_rawDesc)), NumEnums: 3, NumMessages: 8, NumExtensions: 0, @@ -1177,7 +1064,6 @@ func file_grpc_binlog_v1_binarylog_proto_init() { MessageInfos: file_grpc_binlog_v1_binarylog_proto_msgTypes, }.Build() File_grpc_binlog_v1_binarylog_proto = out.File - file_grpc_binlog_v1_binarylog_proto_rawDesc = nil file_grpc_binlog_v1_binarylog_proto_goTypes = nil file_grpc_binlog_v1_binarylog_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 9c8850e3f..a319ef979 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -118,12 +118,26 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires // NewClient creates a new gRPC "channel" for the target URI provided. No I/O // is performed. Use of the ClientConn for RPCs will automatically cause it to -// connect. Connect may be used to manually create a connection, but for most -// users this is unnecessary. +// connect. The Connect method may be called to manually create a connection, +// but for most users this should be unnecessary. // // The target name syntax is defined in -// https://github.com/grpc/grpc/blob/master/doc/naming.md. e.g. to use dns -// resolver, a "dns:///" prefix should be applied to the target. +// https://github.com/grpc/grpc/blob/master/doc/naming.md. E.g. to use the dns +// name resolver, a "dns:///" prefix may be applied to the target. The default +// name resolver will be used if no scheme is detected, or if the parsed scheme +// is not a registered name resolver. The default resolver is "dns" but can be +// overridden using the resolver package's SetDefaultScheme. +// +// Examples: +// +// - "foo.googleapis.com:8080" +// - "dns:///foo.googleapis.com:8080" +// - "dns:///foo.googleapis.com" +// - "dns:///10.0.0.213:8080" +// - "dns:///%5B2001:db8:85a3:8d3:1319:8a2e:370:7348%5D:443" +// - "dns://8.8.8.8/foo.googleapis.com:8080" +// - "dns://8.8.8.8/foo.googleapis.com" +// - "zookeeper://zk.example.com:9900/example_service" // // The DialOptions returned by WithBlock, WithTimeout, // WithReturnConnectionError, and FailOnNonTempDialError are ignored by this @@ -181,7 +195,7 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) } cc.dopts.defaultServiceConfig, _ = scpr.Config.(*ServiceConfig) } - cc.mkp = cc.dopts.copts.KeepaliveParams + cc.keepaliveParams = cc.dopts.copts.KeepaliveParams if err = cc.initAuthority(); err != nil { return nil, err @@ -225,7 +239,12 @@ func Dial(target string, opts ...DialOption) (*ClientConn, error) { func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { // At the end of this method, we kick the channel out of idle, rather than // waiting for the first rpc. - opts = append([]DialOption{withDefaultScheme("passthrough")}, opts...) + // + // WithLocalDNSResolution dial option in `grpc.Dial` ensures that it + // preserves behavior: when default scheme passthrough is used, skip + // hostname resolution, when "dns" is used for resolution, perform + // resolution on the client. + opts = append([]DialOption{withDefaultScheme("passthrough"), WithLocalDNSResolution()}, opts...) cc, err := NewClient(target, opts...) if err != nil { return nil, err @@ -618,7 +637,7 @@ type ClientConn struct { balancerWrapper *ccBalancerWrapper // Always recreated whenever entering idle to simplify Close. sc *ServiceConfig // Latest service config received from the resolver. conns map[*addrConn]struct{} // Set to nil on close. - mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway. + keepaliveParams keepalive.ClientParameters // May be updated upon receipt of a GoAway. // firstResolveEvent is used to track whether the name resolver sent us at // least one update. RPCs block on this event. May be accessed without mu // if we know we cannot be asked to enter idle mode while accessing it (e.g. @@ -775,10 +794,7 @@ func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) } } - var balCfg serviceconfig.LoadBalancingConfig - if cc.sc != nil && cc.sc.lbConfig != nil { - balCfg = cc.sc.lbConfig - } + balCfg := cc.sc.lbConfig bw := cc.balancerWrapper cc.mu.Unlock() @@ -825,14 +841,13 @@ func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer. } ac := &addrConn{ - state: connectivity.Idle, - cc: cc, - addrs: copyAddresses(addrs), - scopts: opts, - dopts: cc.dopts, - channelz: channelz.RegisterSubChannel(cc.channelz, ""), - resetBackoff: make(chan struct{}), - stateReadyChan: make(chan struct{}), + state: connectivity.Idle, + cc: cc, + addrs: copyAddresses(addrs), + scopts: opts, + dopts: cc.dopts, + channelz: channelz.RegisterSubChannel(cc.channelz, ""), + resetBackoff: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) // Start with our address set to the first address; this may be updated if @@ -871,7 +886,13 @@ func (cc *ClientConn) Target() string { return cc.target } -// CanonicalTarget returns the canonical target string of the ClientConn. +// CanonicalTarget returns the canonical target string used when creating cc. +// +// This always has the form "://[authority]/". For example: +// +// - "dns:///example.com:42" +// - "dns://8.8.8.8/example.com:42" +// - "unix:///path/to/socket" func (cc *ClientConn) CanonicalTarget() string { return cc.parsedTarget.String() } @@ -1141,10 +1162,15 @@ func (cc *ClientConn) Close() error { <-cc.resolverWrapper.serializer.Done() <-cc.balancerWrapper.serializer.Done() - + var wg sync.WaitGroup for ac := range conns { - ac.tearDown(ErrClientConnClosing) + wg.Add(1) + go func(ac *addrConn) { + defer wg.Done() + ac.tearDown(ErrClientConnClosing) + }(ac) } + wg.Wait() cc.addTraceEvent("deleted") // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add // trace reference to the entity being deleted, and thus prevent it from being @@ -1179,8 +1205,7 @@ type addrConn struct { addrs []resolver.Address // All addresses that the resolver resolved to. // Use updateConnectivityState for updating addrConn's connectivity state. - state connectivity.State - stateReadyChan chan struct{} // closed and recreated on every READY state change. + state connectivity.State backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} @@ -1210,8 +1235,8 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) { case transport.GoAwayTooManyPings: v := 2 * ac.dopts.copts.KeepaliveParams.Time ac.cc.mu.Lock() - if v > ac.cc.mkp.Time { - ac.cc.mkp.Time = v + if v > ac.cc.keepaliveParams.Time { + ac.cc.keepaliveParams.Time = v } ac.cc.mu.Unlock() } @@ -1251,6 +1276,8 @@ func (ac *addrConn) resetTransportAndUnlock() { ac.mu.Unlock() if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil { + // TODO: #7534 - Move re-resolution requests into the pick_first LB policy + // to ensure one resolution request per pass instead of per subconn failure. ac.cc.resolveNow(resolver.ResolveNowOptions{}) ac.mu.Lock() if acCtx.Err() != nil { @@ -1292,7 +1319,7 @@ func (ac *addrConn) resetTransportAndUnlock() { ac.mu.Unlock() } -// tryAllAddrs tries to creates a connection to the addresses, and stop when at +// tryAllAddrs tries to create a connection to the addresses, and stop when at // the first successful one. It returns an error if no address was successfully // connected, or updates ac appropriately with the new transport. func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error { @@ -1305,7 +1332,7 @@ func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, c ac.mu.Lock() ac.cc.mu.RLock() - ac.dopts.copts.KeepaliveParams = ac.cc.mkp + ac.dopts.copts.KeepaliveParams = ac.cc.keepaliveParams ac.cc.mu.RUnlock() copts := ac.dopts.copts @@ -1369,7 +1396,7 @@ func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, defer cancel() copts.ChannelzParent = ac.channelz - newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onClose) + newTr, err := transport.NewHTTP2Client(connectCtx, ac.cc.ctx, addr, copts, onClose) if err != nil { if logger.V(2) { logger.Infof("Creating new client transport to %q: %v", addr, err) @@ -1443,7 +1470,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { if !ac.scopts.HealthCheckEnabled { return } - healthCheckFunc := ac.cc.dopts.healthCheckFunc + healthCheckFunc := internal.HealthCheckFunc if healthCheckFunc == nil { // The health package is not imported to set health check function. // @@ -1475,7 +1502,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { } // Start the health checking stream. go func() { - err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) + err := healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) if err != nil { if status.Code(err) == codes.Unimplemented { channelz.Error(logger, ac.channelz, "Subchannel health check is unimplemented at server side, thus health check is disabled") @@ -1504,29 +1531,6 @@ func (ac *addrConn) getReadyTransport() transport.ClientTransport { return nil } -// getTransport waits until the addrconn is ready and returns the transport. -// If the context expires first, returns an appropriate status. If the -// addrConn is stopped first, returns an Unavailable status error. -func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) { - for ctx.Err() == nil { - ac.mu.Lock() - t, state, sc := ac.transport, ac.state, ac.stateReadyChan - ac.mu.Unlock() - if state == connectivity.Ready { - return t, nil - } - if state == connectivity.Shutdown { - return nil, status.Errorf(codes.Unavailable, "SubConn shutting down") - } - - select { - case <-ctx.Done(): - case <-sc: - } - } - return nil, status.FromContextError(ctx.Err()).Err() -} - // tearDown starts to tear down the addrConn. // // Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go index e840858b7..959c2f99d 100644 --- a/vendor/google.golang.org/grpc/codec.go +++ b/vendor/google.golang.org/grpc/codec.go @@ -71,7 +71,7 @@ func (c codecV0Bridge) Marshal(v any) (mem.BufferSlice, error) { if err != nil { return nil, err } - return mem.BufferSlice{mem.NewBuffer(&data, nil)}, nil + return mem.BufferSlice{mem.SliceBuffer(data)}, nil } func (c codecV0Bridge) Unmarshal(data mem.BufferSlice, v any) (err error) { diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index 411435854..bd5fe22b6 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -32,6 +32,8 @@ import ( "google.golang.org/grpc/internal/envconfig" ) +const alpnFailureHelpMessage = "If you upgraded from a grpc-go version earlier than 1.67, your TLS connections may have stopped working due to ALPN enforcement. For more details, see: https://github.com/grpc/grpc-go/issues/434" + var logger = grpclog.Component("credentials") // TLSInfo contains the auth information for a TLS authenticated connection. @@ -128,7 +130,7 @@ func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawCon if np == "" { if envconfig.EnforceALPNEnabled { conn.Close() - return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property") + return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property. %s", alpnFailureHelpMessage) } logger.Warningf("Allowing TLS connection to server %q with ALPN disabled. TLS connections to servers with ALPN disabled will be disallowed in future grpc-go releases", cfg.ServerName) } @@ -158,7 +160,7 @@ func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) if cs.NegotiatedProtocol == "" { if envconfig.EnforceALPNEnabled { conn.Close() - return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property") + return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property. %s", alpnFailureHelpMessage) } else if logger.V(2) { logger.Info("Allowing TLS connection from client with ALPN disabled. TLS connections with ALPN disabled will be disallowed in future grpc-go releases") } @@ -200,25 +202,40 @@ var tls12ForbiddenCipherSuites = map[uint16]struct{}{ // NewTLS uses c to construct a TransportCredentials based on TLS. func NewTLS(c *tls.Config) TransportCredentials { - tc := &tlsCreds{credinternal.CloneTLSConfig(c)} - tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos) + config := applyDefaults(c) + if config.GetConfigForClient != nil { + oldFn := config.GetConfigForClient + config.GetConfigForClient = func(hello *tls.ClientHelloInfo) (*tls.Config, error) { + cfgForClient, err := oldFn(hello) + if err != nil || cfgForClient == nil { + return cfgForClient, err + } + return applyDefaults(cfgForClient), nil + } + } + return &tlsCreds{config: config} +} + +func applyDefaults(c *tls.Config) *tls.Config { + config := credinternal.CloneTLSConfig(c) + config.NextProtos = credinternal.AppendH2ToNextProtos(config.NextProtos) // If the user did not configure a MinVersion and did not configure a // MaxVersion < 1.2, use MinVersion=1.2, which is required by // https://datatracker.ietf.org/doc/html/rfc7540#section-9.2 - if tc.config.MinVersion == 0 && (tc.config.MaxVersion == 0 || tc.config.MaxVersion >= tls.VersionTLS12) { - tc.config.MinVersion = tls.VersionTLS12 + if config.MinVersion == 0 && (config.MaxVersion == 0 || config.MaxVersion >= tls.VersionTLS12) { + config.MinVersion = tls.VersionTLS12 } // If the user did not configure CipherSuites, use all "secure" cipher // suites reported by the TLS package, but remove some explicitly forbidden // by https://datatracker.ietf.org/doc/html/rfc7540#appendix-A - if tc.config.CipherSuites == nil { + if config.CipherSuites == nil { for _, cs := range tls.CipherSuites() { if _, ok := tls12ForbiddenCipherSuites[cs.ID]; !ok { - tc.config.CipherSuites = append(tc.config.CipherSuites, cs.ID) + config.CipherSuites = append(config.CipherSuites, cs.ID) } } } - return tc + return config } // NewClientTLSFromCert constructs TLS credentials from the provided root diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 2b285beee..405a2ffeb 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -73,7 +73,7 @@ type dialOptions struct { chainUnaryInts []UnaryClientInterceptor chainStreamInts []StreamClientInterceptor - cp Compressor + compressorV0 Compressor dc Decompressor bs internalbackoff.Strategy block bool @@ -87,7 +87,6 @@ type dialOptions struct { disableServiceConfig bool disableRetry bool disableHealthCheck bool - healthCheckFunc internal.HealthChecker minConnectTimeout func() time.Duration defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. defaultServiceConfigRawJSON *string @@ -95,6 +94,8 @@ type dialOptions struct { idleTimeout time.Duration defaultScheme string maxCallAttempts int + enableLocalDNSResolution bool // Specifies if target hostnames should be resolved when proxying is enabled. + useProxy bool // Specifies if a server should be connected via proxy. } // DialOption configures how we set up the connection. @@ -257,7 +258,7 @@ func WithCodec(c Codec) DialOption { // Deprecated: use UseCompressor instead. Will be supported throughout 1.x. func WithCompressor(cp Compressor) DialOption { return newFuncDialOption(func(o *dialOptions) { - o.cp = cp + o.compressorV0 = cp }) } @@ -378,7 +379,22 @@ func WithInsecure() DialOption { // later release. func WithNoProxy() DialOption { return newFuncDialOption(func(o *dialOptions) { - o.copts.UseProxy = false + o.useProxy = false + }) +} + +// WithLocalDNSResolution forces local DNS name resolution even when a proxy is +// specified in the environment. By default, the server name is provided +// directly to the proxy as part of the CONNECT handshake. This is ignored if +// WithNoProxy is used. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithLocalDNSResolution() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.enableLocalDNSResolution = true }) } @@ -429,6 +445,11 @@ func WithTimeout(d time.Duration) DialOption { // returned by f, gRPC checks the error's Temporary() method to decide if it // should try to reconnect to the network address. // +// Note that gRPC by default performs name resolution on the target passed to +// NewClient. To bypass name resolution and cause the target string to be +// passed directly to the dialer here instead, use the "passthrough" resolver +// by specifying it in the target string, e.g. "passthrough:target". +// // Note: All supported releases of Go (as of December 2023) override the OS // defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive // with OS defaults for keepalive time and interval, use a net.Dialer that sets @@ -436,7 +457,7 @@ func WithTimeout(d time.Duration) DialOption { // option to true from the Control field. For a concrete example of how to do // this, see internal.NetDialerWithTCPKeepalive(). // -// For more information, please see [issue 23459] in the Go github repo. +// For more information, please see [issue 23459] in the Go GitHub repo. // // [issue 23459]: https://github.com/golang/go/issues/23459 func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { @@ -445,10 +466,6 @@ func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOp }) } -func init() { - internal.WithHealthCheckFunc = withHealthCheckFunc -} - // WithDialer returns a DialOption that specifies a function to use for dialing // network addresses. If FailOnNonTempDialError() is set to true, and an error // is returned by f, gRPC checks the error's Temporary() method to decide if it @@ -662,30 +679,20 @@ func WithDisableHealthCheck() DialOption { }) } -// withHealthCheckFunc replaces the default health check function with the -// provided one. It makes tests easier to change the health check function. -// -// For testing purpose only. -func withHealthCheckFunc(f internal.HealthChecker) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.healthCheckFunc = f - }) -} - func defaultDialOptions() dialOptions { return dialOptions{ copts: transport.ConnectOptions{ ReadBufferSize: defaultReadBufSize, WriteBufferSize: defaultWriteBufSize, - UseProxy: true, UserAgent: grpcUA, BufferPool: mem.DefaultBufferPool(), }, - bs: internalbackoff.DefaultExponential, - healthCheckFunc: internal.HealthCheckFunc, - idleTimeout: 30 * time.Minute, - defaultScheme: "dns", - maxCallAttempts: defaultMaxCallAttempts, + bs: internalbackoff.DefaultExponential, + idleTimeout: 30 * time.Minute, + defaultScheme: "dns", + maxCallAttempts: defaultMaxCallAttempts, + useProxy: true, + enableLocalDNSResolution: false, } } diff --git a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go index 1d827dd5d..ad75313a1 100644 --- a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go +++ b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go @@ -23,6 +23,7 @@ import ( "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" + "google.golang.org/grpc/stats" ) func init() { @@ -34,7 +35,7 @@ var logger = grpclog.Component("metrics-registry") // DefaultMetrics are the default metrics registered through global metrics // registry. This is written to at initialization time only, and is read only // after initialization. -var DefaultMetrics = NewMetrics() +var DefaultMetrics = stats.NewMetricSet() // MetricDescriptor is the data for a registered metric. type MetricDescriptor struct { @@ -42,7 +43,7 @@ type MetricDescriptor struct { // (including any per call metrics). See // https://github.com/grpc/proposal/blob/master/A79-non-per-call-metrics-architecture.md#metric-instrument-naming-conventions // for metric naming conventions. - Name Metric + Name string // The description of this metric. Description string // The unit (e.g. entries, seconds) of this metric. @@ -154,27 +155,27 @@ func (h *Int64GaugeHandle) Record(recorder MetricsRecorder, incr int64, labels . } // registeredMetrics are the registered metric descriptor names. -var registeredMetrics = make(map[Metric]bool) +var registeredMetrics = make(map[string]bool) // metricsRegistry contains all of the registered metrics. // // This is written to only at init time, and read only after that. -var metricsRegistry = make(map[Metric]*MetricDescriptor) +var metricsRegistry = make(map[string]*MetricDescriptor) // DescriptorForMetric returns the MetricDescriptor from the global registry. // // Returns nil if MetricDescriptor not present. -func DescriptorForMetric(metric Metric) *MetricDescriptor { - return metricsRegistry[metric] +func DescriptorForMetric(metricName string) *MetricDescriptor { + return metricsRegistry[metricName] } -func registerMetric(name Metric, def bool) { - if registeredMetrics[name] { - logger.Fatalf("metric %v already registered", name) +func registerMetric(metricName string, def bool) { + if registeredMetrics[metricName] { + logger.Fatalf("metric %v already registered", metricName) } - registeredMetrics[name] = true + registeredMetrics[metricName] = true if def { - DefaultMetrics = DefaultMetrics.Add(name) + DefaultMetrics = DefaultMetrics.Add(metricName) } } @@ -256,8 +257,8 @@ func snapshotMetricsRegistryForTesting() func() { oldRegisteredMetrics := registeredMetrics oldMetricsRegistry := metricsRegistry - registeredMetrics = make(map[Metric]bool) - metricsRegistry = make(map[Metric]*MetricDescriptor) + registeredMetrics = make(map[string]bool) + metricsRegistry = make(map[string]*MetricDescriptor) maps.Copy(registeredMetrics, registeredMetrics) maps.Copy(metricsRegistry, metricsRegistry) diff --git a/vendor/google.golang.org/grpc/experimental/stats/metrics.go b/vendor/google.golang.org/grpc/experimental/stats/metrics.go index 3221f7a63..ee1423605 100644 --- a/vendor/google.golang.org/grpc/experimental/stats/metrics.go +++ b/vendor/google.golang.org/grpc/experimental/stats/metrics.go @@ -19,7 +19,7 @@ // Package stats contains experimental metrics/stats API's. package stats -import "maps" +import "google.golang.org/grpc/stats" // MetricsRecorder records on metrics derived from metric registry. type MetricsRecorder interface { @@ -40,75 +40,15 @@ type MetricsRecorder interface { RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string) } -// Metric is an identifier for a metric. -type Metric string +// Metrics is an experimental legacy alias of the now-stable stats.MetricSet. +// Metrics will be deleted in a future release. +type Metrics = stats.MetricSet -// Metrics is a set of metrics to record. Once created, Metrics is immutable, -// however Add and Remove can make copies with specific metrics added or -// removed, respectively. -// -// Do not construct directly; use NewMetrics instead. -type Metrics struct { - // metrics are the set of metrics to initialize. - metrics map[Metric]bool -} +// Metric was replaced by direct usage of strings. +type Metric = string -// NewMetrics returns a Metrics containing Metrics. +// NewMetrics is an experimental legacy alias of the now-stable +// stats.NewMetricSet. NewMetrics will be deleted in a future release. func NewMetrics(metrics ...Metric) *Metrics { - newMetrics := make(map[Metric]bool) - for _, metric := range metrics { - newMetrics[metric] = true - } - return &Metrics{ - metrics: newMetrics, - } -} - -// Metrics returns the metrics set. The returned map is read-only and must not -// be modified. -func (m *Metrics) Metrics() map[Metric]bool { - return m.metrics -} - -// Add adds the metrics to the metrics set and returns a new copy with the -// additional metrics. -func (m *Metrics) Add(metrics ...Metric) *Metrics { - newMetrics := make(map[Metric]bool) - for metric := range m.metrics { - newMetrics[metric] = true - } - - for _, metric := range metrics { - newMetrics[metric] = true - } - return &Metrics{ - metrics: newMetrics, - } -} - -// Join joins the metrics passed in with the metrics set, and returns a new copy -// with the merged metrics. -func (m *Metrics) Join(metrics *Metrics) *Metrics { - newMetrics := make(map[Metric]bool) - maps.Copy(newMetrics, m.metrics) - maps.Copy(newMetrics, metrics.metrics) - return &Metrics{ - metrics: newMetrics, - } -} - -// Remove removes the metrics from the metrics set and returns a new copy with -// the metrics removed. -func (m *Metrics) Remove(metrics ...Metric) *Metrics { - newMetrics := make(map[Metric]bool) - for metric := range m.metrics { - newMetrics[metric] = true - } - - for _, metric := range metrics { - delete(newMetrics, metric) - } - return &Metrics{ - metrics: newMetrics, - } + return stats.NewMetricSet(metrics...) } diff --git a/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go index 07df71e98..ed90060c3 100644 --- a/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go @@ -101,6 +101,22 @@ var severityName = []string{ fatalLog: "FATAL", } +// sprintf is fmt.Sprintf. +// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily. +var sprintf = fmt.Sprintf + +// sprint is fmt.Sprint. +// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily. +var sprint = fmt.Sprint + +// sprintln is fmt.Sprintln. +// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily. +var sprintln = fmt.Sprintln + +// exit is os.Exit. +// This var exists to make it possible to test functions calling os.Exit. +var exit = os.Exit + // loggerT is the default logger used by grpclog. type loggerT struct { m []*log.Logger @@ -111,7 +127,7 @@ type loggerT struct { func (g *loggerT) output(severity int, s string) { sevStr := severityName[severity] if !g.jsonFormat { - g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) + g.m[severity].Output(2, sevStr+": "+s) return } // TODO: we can also include the logging component, but that needs more @@ -123,55 +139,79 @@ func (g *loggerT) output(severity int, s string) { g.m[severity].Output(2, string(b)) } +func (g *loggerT) printf(severity int, format string, args ...any) { + // Note the discard check is duplicated in each print func, rather than in + // output, to avoid the expensive Sprint calls. + // De-duplicating this by moving to output would be a significant performance regression! + if lg := g.m[severity]; lg.Writer() == io.Discard { + return + } + g.output(severity, sprintf(format, args...)) +} + +func (g *loggerT) print(severity int, v ...any) { + if lg := g.m[severity]; lg.Writer() == io.Discard { + return + } + g.output(severity, sprint(v...)) +} + +func (g *loggerT) println(severity int, v ...any) { + if lg := g.m[severity]; lg.Writer() == io.Discard { + return + } + g.output(severity, sprintln(v...)) +} + func (g *loggerT) Info(args ...any) { - g.output(infoLog, fmt.Sprint(args...)) + g.print(infoLog, args...) } func (g *loggerT) Infoln(args ...any) { - g.output(infoLog, fmt.Sprintln(args...)) + g.println(infoLog, args...) } func (g *loggerT) Infof(format string, args ...any) { - g.output(infoLog, fmt.Sprintf(format, args...)) + g.printf(infoLog, format, args...) } func (g *loggerT) Warning(args ...any) { - g.output(warningLog, fmt.Sprint(args...)) + g.print(warningLog, args...) } func (g *loggerT) Warningln(args ...any) { - g.output(warningLog, fmt.Sprintln(args...)) + g.println(warningLog, args...) } func (g *loggerT) Warningf(format string, args ...any) { - g.output(warningLog, fmt.Sprintf(format, args...)) + g.printf(warningLog, format, args...) } func (g *loggerT) Error(args ...any) { - g.output(errorLog, fmt.Sprint(args...)) + g.print(errorLog, args...) } func (g *loggerT) Errorln(args ...any) { - g.output(errorLog, fmt.Sprintln(args...)) + g.println(errorLog, args...) } func (g *loggerT) Errorf(format string, args ...any) { - g.output(errorLog, fmt.Sprintf(format, args...)) + g.printf(errorLog, format, args...) } func (g *loggerT) Fatal(args ...any) { - g.output(fatalLog, fmt.Sprint(args...)) - os.Exit(1) + g.print(fatalLog, args...) + exit(1) } func (g *loggerT) Fatalln(args ...any) { - g.output(fatalLog, fmt.Sprintln(args...)) - os.Exit(1) + g.println(fatalLog, args...) + exit(1) } func (g *loggerT) Fatalf(format string, args ...any) { - g.output(fatalLog, fmt.Sprintf(format, args...)) - os.Exit(1) + g.printf(fatalLog, format, args...) + exit(1) } func (g *loggerT) V(l int) bool { @@ -186,19 +226,42 @@ type LoggerV2Config struct { FormatJSON bool } +// combineLoggers returns a combined logger for both higher & lower severity logs, +// or only one if the other is io.Discard. +// +// This uses io.Discard instead of io.MultiWriter when all loggers +// are set to io.Discard. Both this package and the standard log package have +// significant optimizations for io.Discard, which io.MultiWriter lacks (as of +// this writing). +func combineLoggers(lower, higher io.Writer) io.Writer { + if lower == io.Discard { + return higher + } + if higher == io.Discard { + return lower + } + return io.MultiWriter(lower, higher) +} + // NewLoggerV2 creates a new LoggerV2 instance with the provided configuration. // The infoW, warningW, and errorW writers are used to write log messages of // different severity levels. func NewLoggerV2(infoW, warningW, errorW io.Writer, c LoggerV2Config) LoggerV2 { - var m []*log.Logger flag := log.LstdFlags if c.FormatJSON { flag = 0 } - m = append(m, log.New(infoW, "", flag)) - m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) - ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. - m = append(m, log.New(ew, "", flag)) - m = append(m, log.New(ew, "", flag)) + + warningW = combineLoggers(infoW, warningW) + errorW = combineLoggers(errorW, warningW) + + fatalW := errorW + + m := []*log.Logger{ + log.New(infoW, "", flag), + log.New(warningW, "", flag), + log.New(errorW, "", flag), + log.New(fatalW, "", flag), + } return &loggerT{m: m, v: c.Verbosity, jsonFormat: c.FormatJSON} } diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index d92335445..94177b05c 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.36.4 // protoc v5.27.1 // source: grpc/health/v1/health.proto @@ -28,6 +28,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -90,20 +91,17 @@ func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) { } type HealthCheckRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` unknownFields protoimpl.UnknownFields - - Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` + sizeCache protoimpl.SizeCache } func (x *HealthCheckRequest) Reset() { *x = HealthCheckRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_health_v1_health_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_health_v1_health_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HealthCheckRequest) String() string { @@ -114,7 +112,7 @@ func (*HealthCheckRequest) ProtoMessage() {} func (x *HealthCheckRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_health_v1_health_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -137,20 +135,17 @@ func (x *HealthCheckRequest) GetService() string { } type HealthCheckResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,proto3,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"` unknownFields protoimpl.UnknownFields - - Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,proto3,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"` + sizeCache protoimpl.SizeCache } func (x *HealthCheckResponse) Reset() { *x = HealthCheckResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_health_v1_health_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_health_v1_health_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HealthCheckResponse) String() string { @@ -161,7 +156,7 @@ func (*HealthCheckResponse) ProtoMessage() {} func (x *HealthCheckResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_health_v1_health_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -185,7 +180,7 @@ func (x *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus { var File_grpc_health_v1_health_proto protoreflect.FileDescriptor -var file_grpc_health_v1_health_proto_rawDesc = []byte{ +var file_grpc_health_v1_health_proto_rawDesc = string([]byte{ 0x0a, 0x1b, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x76, 0x31, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x22, 0x2e, 0x0a, @@ -214,23 +209,24 @@ var file_grpc_health_v1_health_proto_rawDesc = []byte{ 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x30, 0x01, 0x42, 0x61, 0x0a, 0x11, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, + 0x30, 0x01, 0x42, 0x70, 0x0a, 0x11, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x5f, 0x76, 0x31, 0xaa, 0x02, 0x0e, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x61, 0x6c, - 0x74, 0x68, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} + 0x68, 0x5f, 0x76, 0x31, 0xa2, 0x02, 0x0c, 0x47, 0x72, 0x70, 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x56, 0x31, 0xaa, 0x02, 0x0e, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +}) var ( file_grpc_health_v1_health_proto_rawDescOnce sync.Once - file_grpc_health_v1_health_proto_rawDescData = file_grpc_health_v1_health_proto_rawDesc + file_grpc_health_v1_health_proto_rawDescData []byte ) func file_grpc_health_v1_health_proto_rawDescGZIP() []byte { file_grpc_health_v1_health_proto_rawDescOnce.Do(func() { - file_grpc_health_v1_health_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_health_v1_health_proto_rawDescData) + file_grpc_health_v1_health_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_grpc_health_v1_health_proto_rawDesc), len(file_grpc_health_v1_health_proto_rawDesc))) }) return file_grpc_health_v1_health_proto_rawDescData } @@ -260,37 +256,11 @@ func file_grpc_health_v1_health_proto_init() { if File_grpc_health_v1_health_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*HealthCheckRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*HealthCheckResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_grpc_health_v1_health_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_grpc_health_v1_health_proto_rawDesc), len(file_grpc_health_v1_health_proto_rawDesc)), NumEnums: 1, NumMessages: 2, NumExtensions: 0, @@ -302,7 +272,6 @@ func file_grpc_health_v1_health_proto_init() { MessageInfos: file_grpc_health_v1_health_proto_msgTypes, }.Build() File_grpc_health_v1_health_proto = out.File - file_grpc_health_v1_health_proto_rawDesc = nil file_grpc_health_v1_health_proto_goTypes = nil file_grpc_health_v1_health_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/grpc/health/producer.go b/vendor/google.golang.org/grpc/health/producer.go new file mode 100644 index 000000000..f938e5790 --- /dev/null +++ b/vendor/google.golang.org/grpc/health/producer.go @@ -0,0 +1,106 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package health + +import ( + "context" + "sync" + + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/status" +) + +func init() { + producerBuilderSingleton = &producerBuilder{} + internal.RegisterClientHealthCheckListener = registerClientSideHealthCheckListener +} + +type producerBuilder struct{} + +var producerBuilderSingleton *producerBuilder + +// Build constructs and returns a producer and its cleanup function. +func (*producerBuilder) Build(cci any) (balancer.Producer, func()) { + p := &healthServiceProducer{ + cc: cci.(grpc.ClientConnInterface), + cancel: func() {}, + } + return p, func() { + p.mu.Lock() + defer p.mu.Unlock() + p.cancel() + } +} + +type healthServiceProducer struct { + // The following fields are initialized at build time and read-only after + // that and therefore do not need to be guarded by a mutex. + cc grpc.ClientConnInterface + + mu sync.Mutex + cancel func() +} + +// registerClientSideHealthCheckListener accepts a listener to provide server +// health state via the health service. +func registerClientSideHealthCheckListener(ctx context.Context, sc balancer.SubConn, serviceName string, listener func(balancer.SubConnState)) func() { + pr, closeFn := sc.GetOrBuildProducer(producerBuilderSingleton) + p := pr.(*healthServiceProducer) + p.mu.Lock() + defer p.mu.Unlock() + p.cancel() + if listener == nil { + return closeFn + } + + ctx, cancel := context.WithCancel(ctx) + p.cancel = cancel + + go p.startHealthCheck(ctx, sc, serviceName, listener) + return closeFn +} + +func (p *healthServiceProducer) startHealthCheck(ctx context.Context, sc balancer.SubConn, serviceName string, listener func(balancer.SubConnState)) { + newStream := func(method string) (any, error) { + return p.cc.NewStream(ctx, &grpc.StreamDesc{ServerStreams: true}, method) + } + + setConnectivityState := func(state connectivity.State, err error) { + listener(balancer.SubConnState{ + ConnectivityState: state, + ConnectionError: err, + }) + } + + // Call the function through the internal variable as tests use it for + // mocking. + err := internal.HealthCheckFunc(ctx, newStream, setConnectivityState, serviceName) + if err == nil { + return + } + if status.Code(err) == codes.Unimplemented { + logger.Errorf("Subchannel health check is unimplemented at server side, thus health check is disabled for SubConn %p", sc) + } else { + logger.Errorf("Health checking failed for SubConn %p: %v", sc, err) + } +} diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go index b15cf482d..b6ae7f258 100644 --- a/vendor/google.golang.org/grpc/internal/backoff/backoff.go +++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -25,7 +25,7 @@ package backoff import ( "context" "errors" - "math/rand" + rand "math/rand/v2" "time" grpcbackoff "google.golang.org/grpc/backoff" diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go index 13821a926..85540f86a 100644 --- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go @@ -33,6 +33,8 @@ type lbConfig struct { childConfig serviceconfig.LoadBalancingConfig } +// ChildName returns the name of the child balancer of the gracefulswitch +// Balancer. func ChildName(l serviceconfig.LoadBalancingConfig) string { return l.(*lbConfig).childBuilder.Name() } diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go index 73bb4c4ee..fbc1ca356 100644 --- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go @@ -109,8 +109,9 @@ func (gsb *Balancer) switchTo(builder balancer.Builder) (*balancerWrapper, error return nil, errBalancerClosed } bw := &balancerWrapper{ - builder: builder, - gsb: gsb, + ClientConn: gsb.cc, + builder: builder, + gsb: gsb, lastState: balancer.State{ ConnectivityState: connectivity.Connecting, Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), @@ -293,6 +294,7 @@ func (gsb *Balancer) Close() { // State updates from the wrapped balancer can result in invocation of the // graceful switch logic. type balancerWrapper struct { + balancer.ClientConn balancer.Balancer gsb *Balancer builder balancer.Builder @@ -413,7 +415,3 @@ func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver bw.gsb.mu.Unlock() bw.gsb.cc.UpdateAddresses(sc, addrs) } - -func (bw *balancerWrapper) Target() string { - return bw.gsb.cc.Target() -} diff --git a/vendor/google.golang.org/grpc/internal/channelz/channel.go b/vendor/google.golang.org/grpc/internal/channelz/channel.go index d7e9e1d54..3ec662799 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/channel.go +++ b/vendor/google.golang.org/grpc/internal/channelz/channel.go @@ -43,6 +43,8 @@ type Channel struct { // Non-zero traceRefCount means the trace of this channel cannot be deleted. traceRefCount int32 + // ChannelMetrics holds connectivity state, target and call metrics for the + // channel within channelz. ChannelMetrics ChannelMetrics } @@ -50,6 +52,8 @@ type Channel struct { // nesting. func (c *Channel) channelzIdentifier() {} +// String returns a string representation of the Channel, including its parent +// entity and ID. func (c *Channel) String() string { if c.Parent == nil { return fmt.Sprintf("Channel #%d", c.ID) @@ -61,24 +65,31 @@ func (c *Channel) id() int64 { return c.ID } +// SubChans returns a copy of the map of sub-channels associated with the +// Channel. func (c *Channel) SubChans() map[int64]string { db.mu.RLock() defer db.mu.RUnlock() return copyMap(c.subChans) } +// NestedChans returns a copy of the map of nested channels associated with the +// Channel. func (c *Channel) NestedChans() map[int64]string { db.mu.RLock() defer db.mu.RUnlock() return copyMap(c.nestedChans) } +// Trace returns a copy of the Channel's trace data. func (c *Channel) Trace() *ChannelTrace { db.mu.RLock() defer db.mu.RUnlock() return c.trace.copy() } +// ChannelMetrics holds connectivity state, target and call metrics for the +// channel within channelz. type ChannelMetrics struct { // The current connectivity state of the channel. State atomic.Pointer[connectivity.State] @@ -136,12 +147,16 @@ func strFromPointer(s *string) string { return *s } +// String returns a string representation of the ChannelMetrics, including its +// state, target, and call metrics. func (c *ChannelMetrics) String() string { return fmt.Sprintf("State: %v, Target: %s, CallsStarted: %v, CallsSucceeded: %v, CallsFailed: %v, LastCallStartedTimestamp: %v", c.State.Load(), strFromPointer(c.Target.Load()), c.CallsStarted.Load(), c.CallsSucceeded.Load(), c.CallsFailed.Load(), c.LastCallStartedTimestamp.Load(), ) } +// NewChannelMetricForTesting creates a new instance of ChannelMetrics with +// specified initial values for testing purposes. func NewChannelMetricForTesting(state connectivity.State, target string, started, succeeded, failed, timestamp int64) *ChannelMetrics { c := &ChannelMetrics{} c.State.Store(&state) diff --git a/vendor/google.golang.org/grpc/internal/channelz/server.go b/vendor/google.golang.org/grpc/internal/channelz/server.go index cdfc49d6e..b5a824992 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/server.go +++ b/vendor/google.golang.org/grpc/internal/channelz/server.go @@ -59,6 +59,8 @@ func NewServerMetricsForTesting(started, succeeded, failed, timestamp int64) *Se return sm } +// CopyFrom copies the metrics data from the provided ServerMetrics +// instance into the current instance. func (sm *ServerMetrics) CopyFrom(o *ServerMetrics) { sm.CallsStarted.Store(o.CallsStarted.Load()) sm.CallsSucceeded.Store(o.CallsSucceeded.Load()) diff --git a/vendor/google.golang.org/grpc/internal/channelz/socket.go b/vendor/google.golang.org/grpc/internal/channelz/socket.go index fa64834b2..90103847c 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/socket.go +++ b/vendor/google.golang.org/grpc/internal/channelz/socket.go @@ -70,13 +70,18 @@ type EphemeralSocketMetrics struct { RemoteFlowControlWindow int64 } +// SocketType represents the type of socket. type SocketType string +// SocketType can be one of these. const ( SocketTypeNormal = "NormalSocket" SocketTypeListen = "ListenSocket" ) +// Socket represents a socket within channelz which includes socket +// metrics and data related to socket activity and provides methods +// for managing and interacting with sockets. type Socket struct { Entity SocketType SocketType @@ -100,6 +105,8 @@ type Socket struct { Security credentials.ChannelzSecurityValue } +// String returns a string representation of the Socket, including its parent +// entity, socket type, and ID. func (ls *Socket) String() string { return fmt.Sprintf("%s %s #%d", ls.Parent, ls.SocketType, ls.ID) } diff --git a/vendor/google.golang.org/grpc/internal/channelz/subchannel.go b/vendor/google.golang.org/grpc/internal/channelz/subchannel.go index 3b88e4cba..b20802e6e 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/subchannel.go +++ b/vendor/google.golang.org/grpc/internal/channelz/subchannel.go @@ -47,12 +47,14 @@ func (sc *SubChannel) id() int64 { return sc.ID } +// Sockets returns a copy of the sockets map associated with the SubChannel. func (sc *SubChannel) Sockets() map[int64]string { db.mu.RLock() defer db.mu.RUnlock() return copyMap(sc.sockets) } +// Trace returns a copy of the ChannelTrace associated with the SubChannel. func (sc *SubChannel) Trace() *ChannelTrace { db.mu.RLock() defer db.mu.RUnlock() diff --git a/vendor/google.golang.org/grpc/internal/channelz/trace.go b/vendor/google.golang.org/grpc/internal/channelz/trace.go index 36b867403..2bffe4777 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/trace.go +++ b/vendor/google.golang.org/grpc/internal/channelz/trace.go @@ -79,13 +79,21 @@ type TraceEvent struct { Parent *TraceEvent } +// ChannelTrace provides tracing information for a channel. +// It tracks various events and metadata related to the channel's lifecycle +// and operations. type ChannelTrace struct { - cm *channelMap - clearCalled bool + cm *channelMap + clearCalled bool + // The time when the trace was created. CreationTime time.Time - EventNum int64 - mu sync.Mutex - Events []*traceEvent + // A counter for the number of events recorded in the + // trace. + EventNum int64 + mu sync.Mutex + // A slice of traceEvent pointers representing the events recorded for + // this channel. + Events []*traceEvent } func (c *ChannelTrace) copy() *ChannelTrace { @@ -175,6 +183,7 @@ var refChannelTypeToString = map[RefChannelType]string{ RefNormalSocket: "NormalSocket", } +// String returns a string representation of the RefChannelType func (r RefChannelType) String() string { return refChannelTypeToString[r] } diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 452985f8d..1e42b6fdc 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -49,7 +49,12 @@ var ( // XDSFallbackSupport is the env variable that controls whether support for // xDS fallback is turned on. If this is unset or is false, only the first // xDS server in the list of server configs will be used. - XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", false) + XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", true) + // NewPickFirstEnabled is set if the new pickfirst leaf policy is to be used + // instead of the exiting pickfirst implementation. This can be enabled by + // setting the environment variable "GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST" + // to "true". + NewPickFirstEnabled = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST", false) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go index 29f234acb..2eb97f832 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -53,4 +53,14 @@ var ( // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI") + + // XDSDualstackEndpointsEnabled is true if gRPC should read the + // "additional addresses" in the xDS endpoint resource. + XDSDualstackEndpointsEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_DUALSTACK_ENDPOINTS", true) + + // XDSSystemRootCertsEnabled is true when xDS enabled gRPC clients can use + // the system's default root certificates for TLS certificate validation. + // For more details, see: + // https://github.com/grpc/proposal/blob/master/A82-xds-system-root-certs.md. + XDSSystemRootCertsEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_SYSTEM_ROOT_CERTS", false) ) diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go index 19b9d6392..8e8e86128 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -53,7 +53,7 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { return cs } -// TrySchedule tries to schedules the provided callback function f to be +// TrySchedule tries to schedule the provided callback function f to be // executed in the order it was added. This is a best-effort operation. If the // context passed to NewCallbackSerializer was canceled before this method is // called, the callback will not be scheduled. diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/vendor/google.golang.org/grpc/internal/grpcutil/method.go index ec62b4775..683d1955c 100644 --- a/vendor/google.golang.org/grpc/internal/grpcutil/method.go +++ b/vendor/google.golang.org/grpc/internal/grpcutil/method.go @@ -39,7 +39,7 @@ func ParseMethod(methodName string) (service, method string, _ error) { } // baseContentType is the base content-type for gRPC. This is a valid -// content-type on it's own, but can also include a content-subtype such as +// content-type on its own, but can also include a content-subtype such as // "proto" as a suffix after "+" or ";". See // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests // for more details. diff --git a/vendor/google.golang.org/grpc/internal/idle/idle.go b/vendor/google.golang.org/grpc/internal/idle/idle.go index fe49cb74c..2c13ee9da 100644 --- a/vendor/google.golang.org/grpc/internal/idle/idle.go +++ b/vendor/google.golang.org/grpc/internal/idle/idle.go @@ -182,6 +182,7 @@ func (m *Manager) tryEnterIdleMode() bool { return true } +// EnterIdleModeForTesting instructs the channel to enter idle mode. func (m *Manager) EnterIdleModeForTesting() { m.tryEnterIdleMode() } @@ -225,7 +226,7 @@ func (m *Manager) ExitIdleMode() error { // came in and OnCallBegin() noticed that the calls count is negative. // - Channel is in idle mode, and multiple new RPCs come in at the same // time, all of them notice a negative calls count in OnCallBegin and get - // here. The first one to get the lock would got the channel to exit idle. + // here. The first one to get the lock would get the channel to exit idle. // - Channel is not in idle mode, and the user calls Connect which calls // m.ExitIdleMode. // @@ -266,6 +267,7 @@ func (m *Manager) isClosed() bool { return atomic.LoadInt32(&m.closed) == 1 } +// Close stops the timer associated with the Manager, if it exists. func (m *Manager) Close() { atomic.StoreInt32(&m.closed, 1) diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 7aae9240f..13e1f386b 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -29,10 +29,12 @@ import ( ) var ( - // WithHealthCheckFunc is set by dialoptions.go - WithHealthCheckFunc any // func (HealthChecker) DialOption // HealthCheckFunc is used to provide client-side LB channel health checking HealthCheckFunc HealthChecker + // RegisterClientHealthCheckListener is used to provide a listener for + // updates from the client-side health checking service. It returns a + // function that can be called to stop the health producer. + RegisterClientHealthCheckListener any // func(ctx context.Context, sc balancer.SubConn, serviceName string, listener func(balancer.SubConnState)) func() // BalancerUnregister is exported by package balancer to unregister a balancer. BalancerUnregister func(name string) // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by @@ -62,6 +64,9 @@ var ( // gRPC server. An xDS-enabled server needs to know what type of credentials // is configured on the underlying gRPC server. This is set by server.go. GetServerCredentials any // func (*grpc.Server) credentials.TransportCredentials + // MetricsRecorderForServer returns the MetricsRecorderList derived from a + // server's stats handlers. + MetricsRecorderForServer any // func (*grpc.Server) estats.MetricsRecorder // CanonicalString returns the canonical string of the code defined here: // https://github.com/grpc/grpc/blob/master/doc/statuscodes.md. // @@ -149,6 +154,34 @@ var ( // other features, including the CSDS service. NewXDSResolverWithConfigForTesting any // func([]byte) (resolver.Builder, error) + // NewXDSResolverWithPoolForTesting creates a new xDS resolver builder + // using the provided xDS pool instead of creating a new one using the + // bootstrap configuration specified by the supported environment variables. + // The resolver.Builder is meant to be used in conjunction with the + // grpc.WithResolvers DialOption. The resolver.Builder does not take + // ownership of the provided xDS client and it is the responsibility of the + // caller to close the client when no longer required. + // + // Testing Only + // + // This function should ONLY be used for testing and may not work with some + // other features, including the CSDS service. + NewXDSResolverWithPoolForTesting any // func(*xdsclient.Pool) (resolver.Builder, error) + + // NewXDSResolverWithClientForTesting creates a new xDS resolver builder + // using the provided xDS client instead of creating a new one using the + // bootstrap configuration specified by the supported environment variables. + // The resolver.Builder is meant to be used in conjunction with the + // grpc.WithResolvers DialOption. The resolver.Builder does not take + // ownership of the provided xDS client and it is the responsibility of the + // caller to close the client when no longer required. + // + // Testing Only + // + // This function should ONLY be used for testing and may not work with some + // other features, including the CSDS service. + NewXDSResolverWithClientForTesting any // func(xdsclient.XDSClient) (resolver.Builder, error) + // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster // Specifier Plugin for testing purposes, regardless of the XDSRLS environment // variable. @@ -191,6 +224,8 @@ var ( // ExitIdleModeForTesting gets the ClientConn to exit IDLE mode. ExitIdleModeForTesting any // func(*grpc.ClientConn) error + // ChannelzTurnOffForTesting disables the Channelz service for testing + // purposes. ChannelzTurnOffForTesting func() // TriggerXDSResourceNotFoundForTesting causes the provided xDS Client to @@ -205,10 +240,6 @@ var ( // default resolver scheme. UserSetDefaultScheme = false - // ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n - // is the number of elements. swap swaps the elements with indexes i and j. - ShuffleAddressListForTesting any // func(n int, swap func(i, j int)) - // ConnectedAddress returns the connected address for a SubConnState. The // address is only valid if the state is READY. ConnectedAddress any // func (scs SubConnState) resolver.Address @@ -235,7 +266,7 @@ var ( // // The implementation is expected to create a health checking RPC stream by // calling newStream(), watch for the health status of serviceName, and report -// it's health back by calling setConnectivityState(). +// its health back by calling setConnectivityState(). // // The health checking protocol is defined at: // https://github.com/grpc/grpc/blob/master/doc/health-checking.md @@ -257,3 +288,15 @@ const ( // It currently has an experimental suffix which would be removed once // end-to-end testing of the policy is completed. const RLSLoadBalancingPolicyName = "rls_experimental" + +// EnforceSubConnEmbedding is used to enforce proper SubConn implementation +// embedding. +type EnforceSubConnEmbedding interface { + enforceSubConnEmbedding() +} + +// EnforceClientConnEmbedding is used to enforce proper ClientConn implementation +// embedding. +type EnforceClientConnEmbedding interface { + enforceClientConnEmbedding() +} diff --git a/vendor/google.golang.org/grpc/internal/proxyattributes/proxyattributes.go b/vendor/google.golang.org/grpc/internal/proxyattributes/proxyattributes.go new file mode 100644 index 000000000..1f61f1a49 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/proxyattributes/proxyattributes.go @@ -0,0 +1,54 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package proxyattributes contains functions for getting and setting proxy +// attributes like the CONNECT address and user info. +package proxyattributes + +import ( + "net/url" + + "google.golang.org/grpc/resolver" +) + +type keyType string + +const proxyOptionsKey = keyType("grpc.resolver.delegatingresolver.proxyOptions") + +// Options holds the proxy connection details needed during the CONNECT +// handshake. +type Options struct { + User *url.Userinfo + ConnectAddr string +} + +// Set returns a copy of addr with opts set in its attributes. +func Set(addr resolver.Address, opts Options) resolver.Address { + addr.Attributes = addr.Attributes.WithValue(proxyOptionsKey, opts) + return addr +} + +// Get returns the Options for the proxy [resolver.Address] and a boolean +// value representing if the attribute is present or not. The returned data +// should not be mutated. +func Get(addr resolver.Address) (Options, bool) { + if a := addr.Attributes.Value(proxyOptionsKey); a != nil { + return a.(Options), true + } + return Options{}, false +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go b/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go new file mode 100644 index 000000000..a6c647013 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go @@ -0,0 +1,329 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package delegatingresolver implements a resolver capable of resolving both +// target URIs and proxy addresses. +package delegatingresolver + +import ( + "fmt" + "net/http" + "net/url" + "sync" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/proxyattributes" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +var ( + logger = grpclog.Component("delegating-resolver") + // HTTPSProxyFromEnvironment will be overwritten in the tests + HTTPSProxyFromEnvironment = http.ProxyFromEnvironment +) + +// delegatingResolver manages both target URI and proxy address resolution by +// delegating these tasks to separate child resolvers. Essentially, it acts as +// a intermediary between the gRPC ClientConn and the child resolvers. +// +// It implements the [resolver.Resolver] interface. +type delegatingResolver struct { + target resolver.Target // parsed target URI to be resolved + cc resolver.ClientConn // gRPC ClientConn + targetResolver resolver.Resolver // resolver for the target URI, based on its scheme + proxyResolver resolver.Resolver // resolver for the proxy URI; nil if no proxy is configured + proxyURL *url.URL // proxy URL, derived from proxy environment and target + + mu sync.Mutex // protects all the fields below + targetResolverState *resolver.State // state of the target resolver + proxyAddrs []resolver.Address // resolved proxy addresses; empty if no proxy is configured +} + +// nopResolver is a resolver that does nothing. +type nopResolver struct{} + +func (nopResolver) ResolveNow(resolver.ResolveNowOptions) {} + +func (nopResolver) Close() {} + +// proxyURLForTarget determines the proxy URL for the given address based on +// the environment. It can return the following: +// - nil URL, nil error: No proxy is configured or the address is excluded +// using the `NO_PROXY` environment variable or if req.URL.Host is +// "localhost" (with or without // a port number) +// - nil URL, non-nil error: An error occurred while retrieving the proxy URL. +// - non-nil URL, nil error: A proxy is configured, and the proxy URL was +// retrieved successfully without any errors. +func proxyURLForTarget(address string) (*url.URL, error) { + req := &http.Request{URL: &url.URL{ + Scheme: "https", + Host: address, + }} + return HTTPSProxyFromEnvironment(req) +} + +// New creates a new delegating resolver that can create up to two child +// resolvers: +// - one to resolve the proxy address specified using the supported +// environment variables. This uses the registered resolver for the "dns" +// scheme. +// - one to resolve the target URI using the resolver specified by the scheme +// in the target URI or specified by the user using the WithResolvers dial +// option. As a special case, if the target URI's scheme is "dns" and a +// proxy is specified using the supported environment variables, the target +// URI's path portion is used as the resolved address unless target +// resolution is enabled using the dial option. +func New(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions, targetResolverBuilder resolver.Builder, targetResolutionEnabled bool) (resolver.Resolver, error) { + r := &delegatingResolver{ + target: target, + cc: cc, + } + + var err error + r.proxyURL, err = proxyURLForTarget(target.Endpoint()) + if err != nil { + return nil, fmt.Errorf("delegating_resolver: failed to determine proxy URL for target %s: %v", target, err) + } + + // proxy is not configured or proxy address excluded using `NO_PROXY` env + // var, so only target resolver is used. + if r.proxyURL == nil { + return targetResolverBuilder.Build(target, cc, opts) + } + + if logger.V(2) { + logger.Infof("Proxy URL detected : %s", r.proxyURL) + } + + // When the scheme is 'dns' and target resolution on client is not enabled, + // resolution should be handled by the proxy, not the client. Therefore, we + // bypass the target resolver and store the unresolved target address. + if target.URL.Scheme == "dns" && !targetResolutionEnabled { + state := resolver.State{ + Addresses: []resolver.Address{{Addr: target.Endpoint()}}, + Endpoints: []resolver.Endpoint{{Addresses: []resolver.Address{{Addr: target.Endpoint()}}}}, + } + r.targetResolverState = &state + } else { + wcc := &wrappingClientConn{ + stateListener: r.updateTargetResolverState, + parent: r, + } + if r.targetResolver, err = targetResolverBuilder.Build(target, wcc, opts); err != nil { + return nil, fmt.Errorf("delegating_resolver: unable to build the resolver for target %s: %v", target, err) + } + } + + if r.proxyResolver, err = r.proxyURIResolver(opts); err != nil { + return nil, fmt.Errorf("delegating_resolver: failed to build resolver for proxy URL %q: %v", r.proxyURL, err) + } + + if r.targetResolver == nil { + r.targetResolver = nopResolver{} + } + if r.proxyResolver == nil { + r.proxyResolver = nopResolver{} + } + return r, nil +} + +// proxyURIResolver creates a resolver for resolving proxy URIs using the +// "dns" scheme. It adjusts the proxyURL to conform to the "dns:///" format and +// builds a resolver with a wrappingClientConn to capture resolved addresses. +func (r *delegatingResolver) proxyURIResolver(opts resolver.BuildOptions) (resolver.Resolver, error) { + proxyBuilder := resolver.Get("dns") + if proxyBuilder == nil { + panic("delegating_resolver: resolver for proxy not found for scheme dns") + } + url := *r.proxyURL + url.Scheme = "dns" + url.Path = "/" + r.proxyURL.Host + url.Host = "" // Clear the Host field to conform to the "dns:///" format + + proxyTarget := resolver.Target{URL: url} + wcc := &wrappingClientConn{ + stateListener: r.updateProxyResolverState, + parent: r, + } + return proxyBuilder.Build(proxyTarget, wcc, opts) +} + +func (r *delegatingResolver) ResolveNow(o resolver.ResolveNowOptions) { + r.targetResolver.ResolveNow(o) + r.proxyResolver.ResolveNow(o) +} + +func (r *delegatingResolver) Close() { + r.targetResolver.Close() + r.targetResolver = nil + + r.proxyResolver.Close() + r.proxyResolver = nil +} + +// updateClientConnStateLocked creates a list of combined addresses by +// pairing each proxy address with every target address. For each pair, it +// generates a new [resolver.Address] using the proxy address, and adding the +// target address as the attribute along with user info. It returns nil if +// either resolver has not sent update even once and returns the error from +// ClientConn update once both resolvers have sent update atleast once. +func (r *delegatingResolver) updateClientConnStateLocked() error { + if r.targetResolverState == nil || r.proxyAddrs == nil { + return nil + } + + curState := *r.targetResolverState + // If multiple resolved proxy addresses are present, we send only the + // unresolved proxy host and let net.Dial handle the proxy host name + // resolution when creating the transport. Sending all resolved addresses + // would increase the number of addresses passed to the ClientConn and + // subsequently to load balancing (LB) policies like Round Robin, leading + // to additional TCP connections. However, if there's only one resolved + // proxy address, we send it directly, as it doesn't affect the address + // count returned by the target resolver and the address count sent to the + // ClientConn. + var proxyAddr resolver.Address + if len(r.proxyAddrs) == 1 { + proxyAddr = r.proxyAddrs[0] + } else { + proxyAddr = resolver.Address{Addr: r.proxyURL.Host} + } + var addresses []resolver.Address + for _, targetAddr := range (*r.targetResolverState).Addresses { + addresses = append(addresses, proxyattributes.Set(proxyAddr, proxyattributes.Options{ + User: r.proxyURL.User, + ConnectAddr: targetAddr.Addr, + })) + } + + // Create a list of combined endpoints by pairing all proxy endpoints + // with every target endpoint. Each time, it constructs a new + // [resolver.Endpoint] using the all addresses from all the proxy endpoint + // and the target addresses from one endpoint. The target address and user + // information from the proxy URL are added as attributes to the proxy + // address.The resulting list of addresses is then grouped into endpoints, + // covering all combinations of proxy and target endpoints. + var endpoints []resolver.Endpoint + for _, endpt := range (*r.targetResolverState).Endpoints { + var addrs []resolver.Address + for _, proxyAddr := range r.proxyAddrs { + for _, targetAddr := range endpt.Addresses { + addrs = append(addrs, proxyattributes.Set(proxyAddr, proxyattributes.Options{ + User: r.proxyURL.User, + ConnectAddr: targetAddr.Addr, + })) + } + } + endpoints = append(endpoints, resolver.Endpoint{Addresses: addrs}) + } + // Use the targetResolverState for its service config and attributes + // contents. The state update is only sent after both the target and proxy + // resolvers have sent their updates, and curState has been updated with + // the combined addresses. + curState.Addresses = addresses + curState.Endpoints = endpoints + return r.cc.UpdateState(curState) +} + +// updateProxyResolverState updates the proxy resolver state by storing proxy +// addresses and endpoints, marking the resolver as ready, and triggering a +// state update if both proxy and target resolvers are ready. If the ClientConn +// returns a non-nil error, it calls `ResolveNow()` on the target resolver. It +// is a StateListener function of wrappingClientConn passed to the proxy resolver. +func (r *delegatingResolver) updateProxyResolverState(state resolver.State) error { + r.mu.Lock() + defer r.mu.Unlock() + if logger.V(2) { + logger.Infof("Addresses received from proxy resolver: %s", state.Addresses) + } + if len(state.Endpoints) > 0 { + // We expect exactly one address per endpoint because the proxy + // resolver uses "dns" resolution. + r.proxyAddrs = make([]resolver.Address, 0, len(state.Endpoints)) + for _, endpoint := range state.Endpoints { + r.proxyAddrs = append(r.proxyAddrs, endpoint.Addresses...) + } + } else if state.Addresses != nil { + r.proxyAddrs = state.Addresses + } else { + r.proxyAddrs = []resolver.Address{} // ensure proxyAddrs is non-nil to indicate an update has been received + } + err := r.updateClientConnStateLocked() + // Another possible approach was to block until updates are received from + // both resolvers. But this is not used because calling `New()` triggers + // `Build()` for the first resolver, which calls `UpdateState()`. And the + // second resolver hasn't sent an update yet, so it would cause `New()` to + // block indefinitely. + if err != nil { + r.targetResolver.ResolveNow(resolver.ResolveNowOptions{}) + } + return err +} + +// updateTargetResolverState updates the target resolver state by storing target +// addresses, endpoints, and service config, marking the resolver as ready, and +// triggering a state update if both resolvers are ready. If the ClientConn +// returns a non-nil error, it calls `ResolveNow()` on the proxy resolver. It +// is a StateListener function of wrappingClientConn passed to the target resolver. +func (r *delegatingResolver) updateTargetResolverState(state resolver.State) error { + r.mu.Lock() + defer r.mu.Unlock() + + if logger.V(2) { + logger.Infof("Addresses received from target resolver: %v", state.Addresses) + } + r.targetResolverState = &state + err := r.updateClientConnStateLocked() + if err != nil { + r.proxyResolver.ResolveNow(resolver.ResolveNowOptions{}) + } + return nil +} + +// wrappingClientConn serves as an intermediary between the parent ClientConn +// and the child resolvers created here. It implements the resolver.ClientConn +// interface and is passed in that capacity to the child resolvers. +type wrappingClientConn struct { + // Callback to deliver resolver state updates + stateListener func(state resolver.State) error + parent *delegatingResolver +} + +// UpdateState receives resolver state updates and forwards them to the +// appropriate listener function (either for the proxy or target resolver). +func (wcc *wrappingClientConn) UpdateState(state resolver.State) error { + return wcc.stateListener(state) +} + +// ReportError intercepts errors from the child resolvers and passes them to ClientConn. +func (wcc *wrappingClientConn) ReportError(err error) { + wcc.parent.cc.ReportError(err) +} + +// NewAddress intercepts the new resolved address from the child resolvers and +// passes them to ClientConn. +func (wcc *wrappingClientConn) NewAddress(addrs []resolver.Address) { + wcc.UpdateState(resolver.State{Addresses: addrs}) +} + +// ParseServiceConfig parses the provided service config and returns an +// object that provides the parsed config. +func (wcc *wrappingClientConn) ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult { + return wcc.parent.cc.ParseServiceConfig(serviceConfigJSON) +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 4552db16b..ba5c5a95d 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -24,8 +24,9 @@ import ( "context" "encoding/json" "fmt" - "math/rand" + rand "math/rand/v2" "net" + "net/netip" "os" "strconv" "strings" @@ -122,7 +123,7 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts } // IP address. - if ipAddr, ok := formatIP(host); ok { + if ipAddr, err := formatIP(host); err == nil { addr := []resolver.Address{{Addr: ipAddr + ":" + port}} cc.UpdateState(resolver.State{Addresses: addr}) return deadResolver{}, nil @@ -177,7 +178,7 @@ type dnsResolver struct { // finished. Otherwise, data race will be possible. [Race Example] in // dns_resolver_test we replace the real lookup functions with mocked ones to // facilitate testing. If Close() doesn't wait for watcher() goroutine - // finishes, race detector sometimes will warns lookup (READ the lookup + // finishes, race detector sometimes will warn lookup (READ the lookup // function pointers) inside watcher() goroutine has data race with // replaceNetFunc (WRITE the lookup function pointers). wg sync.WaitGroup @@ -237,7 +238,9 @@ func (d *dnsResolver) watcher() { } func (d *dnsResolver) lookupSRV(ctx context.Context) ([]resolver.Address, error) { - if !EnableSRVLookups { + // Skip this particular host to avoid timeouts with some versions of + // systemd-resolved. + if !EnableSRVLookups || d.host == "metadata.google.internal." { return nil, nil } var newAddrs []resolver.Address @@ -258,9 +261,9 @@ func (d *dnsResolver) lookupSRV(ctx context.Context) ([]resolver.Address, error) return nil, err } for _, a := range lbAddrs { - ip, ok := formatIP(a) - if !ok { - return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) + ip, err := formatIP(a) + if err != nil { + return nil, fmt.Errorf("dns: error parsing A record IP address %v: %v", a, err) } addr := ip + ":" + strconv.Itoa(int(s.Port)) newAddrs = append(newAddrs, resolver.Address{Addr: addr, ServerName: s.Target}) @@ -320,9 +323,9 @@ func (d *dnsResolver) lookupHost(ctx context.Context) ([]resolver.Address, error } newAddrs := make([]resolver.Address, 0, len(addrs)) for _, a := range addrs { - ip, ok := formatIP(a) - if !ok { - return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) + ip, err := formatIP(a) + if err != nil { + return nil, fmt.Errorf("dns: error parsing A record IP address %v: %v", a, err) } addr := ip + ":" + d.port newAddrs = append(newAddrs, resolver.Address{Addr: addr}) @@ -349,19 +352,19 @@ func (d *dnsResolver) lookup() (*resolver.State, error) { return &state, nil } -// formatIP returns ok = false if addr is not a valid textual representation of -// an IP address. If addr is an IPv4 address, return the addr and ok = true. +// formatIP returns an error if addr is not a valid textual representation of +// an IP address. If addr is an IPv4 address, return the addr and error = nil. // If addr is an IPv6 address, return the addr enclosed in square brackets and -// ok = true. -func formatIP(addr string) (addrIP string, ok bool) { - ip := net.ParseIP(addr) - if ip == nil { - return "", false +// error = nil. +func formatIP(addr string) (string, error) { + ip, err := netip.ParseAddr(addr) + if err != nil { + return "", err } - if ip.To4() != nil { - return addr, true + if ip.Is4() { + return addr, nil } - return "[" + addr + "]", true + return "[" + addr + "]", nil } // parseTarget takes the user input target string and default port, returns @@ -377,7 +380,7 @@ func parseTarget(target, defaultPort string) (host, port string, err error) { if target == "" { return "", "", internal.ErrMissingAddr } - if ip := net.ParseIP(target); ip != nil { + if _, err := netip.ParseAddr(target); err == nil { // target is an IPv4 or IPv6(without brackets) address return target, defaultPort, nil } @@ -425,7 +428,7 @@ func chosenByPercentage(a *int) bool { if a == nil { return true } - return rand.Intn(100)+1 <= *a + return rand.IntN(100)+1 <= *a } func canaryingSC(js string) string { diff --git a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go index be110d41f..79044657b 100644 --- a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go +++ b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go @@ -54,6 +54,8 @@ func verifyLabels(desc *estats.MetricDescriptor, labelsRecv ...string) { } } +// RecordInt64Count records the measurement alongside labels on the int +// count associated with the provided handle. func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle, incr int64, labels ...string) { verifyLabels(handle.Descriptor(), labels...) @@ -62,6 +64,8 @@ func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle, } } +// RecordFloat64Count records the measurement alongside labels on the float +// count associated with the provided handle. func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) { verifyLabels(handle.Descriptor(), labels...) @@ -70,6 +74,8 @@ func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHand } } +// RecordInt64Histo records the measurement alongside labels on the int +// histo associated with the provided handle. func (l *MetricsRecorderList) RecordInt64Histo(handle *estats.Int64HistoHandle, incr int64, labels ...string) { verifyLabels(handle.Descriptor(), labels...) @@ -78,6 +84,8 @@ func (l *MetricsRecorderList) RecordInt64Histo(handle *estats.Int64HistoHandle, } } +// RecordFloat64Histo records the measurement alongside labels on the float +// histo associated with the provided handle. func (l *MetricsRecorderList) RecordFloat64Histo(handle *estats.Float64HistoHandle, incr float64, labels ...string) { verifyLabels(handle.Descriptor(), labels...) @@ -86,6 +94,8 @@ func (l *MetricsRecorderList) RecordFloat64Histo(handle *estats.Float64HistoHand } } +// RecordInt64Gauge records the measurement alongside labels on the int +// gauge associated with the provided handle. func (l *MetricsRecorderList) RecordInt64Gauge(handle *estats.Int64GaugeHandle, incr int64, labels ...string) { verifyLabels(handle.Descriptor(), labels...) diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index 757925381..1186f1e9a 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -149,6 +149,8 @@ func (s *Status) WithDetails(details ...protoadapt.MessageV1) (*Status, error) { // Details returns a slice of details messages attached to the status. // If a detail cannot be decoded, the error is returned in place of the detail. +// If the detail can be decoded, the proto message returned is of the same +// type that was given to WithDetails(). func (s *Status) Details() []any { if s == nil || s.s == nil { return nil @@ -160,7 +162,38 @@ func (s *Status) Details() []any { details = append(details, err) continue } - details = append(details, detail) + // The call to MessageV1Of is required to unwrap the proto message if + // it implemented only the MessageV1 API. The proto message would have + // been wrapped in a V2 wrapper in Status.WithDetails. V2 messages are + // added to a global registry used by any.UnmarshalNew(). + // MessageV1Of has the following behaviour: + // 1. If the given message is a wrapped MessageV1, it returns the + // unwrapped value. + // 2. If the given message already implements MessageV1, it returns it + // as is. + // 3. Else, it wraps the MessageV2 in a MessageV1 wrapper. + // + // Since the Status.WithDetails() API only accepts MessageV1, calling + // MessageV1Of ensures we return the same type that was given to + // WithDetails: + // * If the give type implemented only MessageV1, the unwrapping from + // point 1 above will restore the type. + // * If the given type implemented both MessageV1 and MessageV2, point 2 + // above will ensure no wrapping is performed. + // * If the given type implemented only MessageV2 and was wrapped using + // MessageV1Of before passing to WithDetails(), it would be unwrapped + // in WithDetails by calling MessageV2Of(). Point 3 above will ensure + // that the type is wrapped in a MessageV1 wrapper again before + // returning. Note that protoc-gen-go doesn't generate code which + // implements ONLY MessageV2 at the time of writing. + // + // NOTE: Status details can also be added using the FromProto method. + // This could theoretically allow passing a Detail message that only + // implements the V2 API. In such a case the message will be wrapped in + // a MessageV1 wrapper when fetched using Details(). + // Since protoc-gen-go generates only code that implements both V1 and + // V2 APIs for backward compatibility, this is not a concern. + details = append(details, protoadapt.MessageV1Of(detail)) } return details } diff --git a/vendor/google.golang.org/grpc/internal/transport/client_stream.go b/vendor/google.golang.org/grpc/internal/transport/client_stream.go new file mode 100644 index 000000000..8ed347c54 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/client_stream.go @@ -0,0 +1,144 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "sync/atomic" + + "golang.org/x/net/http2" + "google.golang.org/grpc/mem" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// ClientStream implements streaming functionality for a gRPC client. +type ClientStream struct { + *Stream // Embed for common stream functionality. + + ct *http2Client + done chan struct{} // closed at the end of stream to unblock writers. + doneFunc func() // invoked at the end of stream. + + headerChan chan struct{} // closed to indicate the end of header metadata. + headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. + // headerValid indicates whether a valid header was received. Only + // meaningful after headerChan is closed (always call waitOnHeader() before + // reading its value). + headerValid bool + header metadata.MD // the received header metadata + noHeaders bool // set if the client never received headers (set only after the stream is done). + + bytesReceived atomic.Bool // indicates whether any bytes have been received on this stream + unprocessed atomic.Bool // set if the server sends a refused stream or GOAWAY including this stream + + status *status.Status // the status error received from the server +} + +// Read reads an n byte message from the input stream. +func (s *ClientStream) Read(n int) (mem.BufferSlice, error) { + b, err := s.Stream.read(n) + if err == nil { + s.ct.incrMsgRecv() + } + return b, err +} + +// Close closes the stream and popagates err to any readers. +func (s *ClientStream) Close(err error) { + var ( + rst bool + rstCode http2.ErrCode + ) + if err != nil { + rst = true + rstCode = http2.ErrCodeCancel + } + s.ct.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false) +} + +// Write writes the hdr and data bytes to the output stream. +func (s *ClientStream) Write(hdr []byte, data mem.BufferSlice, opts *WriteOptions) error { + return s.ct.write(s, hdr, data, opts) +} + +// BytesReceived indicates whether any bytes have been received on this stream. +func (s *ClientStream) BytesReceived() bool { + return s.bytesReceived.Load() +} + +// Unprocessed indicates whether the server did not process this stream -- +// i.e. it sent a refused stream or GOAWAY including this stream ID. +func (s *ClientStream) Unprocessed() bool { + return s.unprocessed.Load() +} + +func (s *ClientStream) waitOnHeader() { + select { + case <-s.ctx.Done(): + // Close the stream to prevent headers/trailers from changing after + // this function returns. + s.Close(ContextErr(s.ctx.Err())) + // headerChan could possibly not be closed yet if closeStream raced + // with operateHeaders; wait until it is closed explicitly here. + <-s.headerChan + case <-s.headerChan: + } +} + +// RecvCompress returns the compression algorithm applied to the inbound +// message. It is empty string if there is no compression applied. +func (s *ClientStream) RecvCompress() string { + s.waitOnHeader() + return s.recvCompress +} + +// Done returns a channel which is closed when it receives the final status +// from the server. +func (s *ClientStream) Done() <-chan struct{} { + return s.done +} + +// Header returns the header metadata of the stream. Acquires the key-value +// pairs of header metadata once it is available. It blocks until i) the +// metadata is ready or ii) there is no header metadata or iii) the stream is +// canceled/expired. +func (s *ClientStream) Header() (metadata.MD, error) { + s.waitOnHeader() + + if !s.headerValid || s.noHeaders { + return nil, s.status.Err() + } + + return s.header.Copy(), nil +} + +// TrailersOnly blocks until a header or trailers-only frame is received and +// then returns true if the stream was trailers-only. If the stream ends +// before headers are received, returns true, nil. +func (s *ClientStream) TrailersOnly() bool { + s.waitOnHeader() + return s.noHeaders +} + +// Status returns the status received from the server. +// Status can be read safely only after the stream has ended, +// that is, after Done() is closed. +func (s *ClientStream) Status() *status.Status { + return s.status +} diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go index 97198c515..dfc0f224e 100644 --- a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go +++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go @@ -92,14 +92,11 @@ func (f *trInFlow) newLimit(n uint32) uint32 { func (f *trInFlow) onData(n uint32) uint32 { f.unacked += n - if f.unacked >= f.limit/4 { - w := f.unacked - f.unacked = 0 + if f.unacked < f.limit/4 { f.updateEffectiveWindowSize() - return w + return 0 } - f.updateEffectiveWindowSize() - return 0 + return f.reset() } func (f *trInFlow) reset() uint32 { diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index ce878693b..3dea23573 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -225,7 +225,7 @@ func (ht *serverHandlerTransport) do(fn func()) error { } } -func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error { +func (ht *serverHandlerTransport) writeStatus(s *ServerStream, st *status.Status) error { ht.writeStatusMu.Lock() defer ht.writeStatusMu.Unlock() @@ -289,14 +289,14 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro // writePendingHeaders sets common and custom headers on the first // write call (Write, WriteHeader, or WriteStatus) -func (ht *serverHandlerTransport) writePendingHeaders(s *Stream) { +func (ht *serverHandlerTransport) writePendingHeaders(s *ServerStream) { ht.writeCommonHeaders(s) ht.writeCustomHeaders(s) } // writeCommonHeaders sets common headers on the first write // call (Write, WriteHeader, or WriteStatus). -func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { +func (ht *serverHandlerTransport) writeCommonHeaders(s *ServerStream) { h := ht.rw.Header() h["Date"] = nil // suppress Date to make tests happy; TODO: restore h.Set("Content-Type", ht.contentType) @@ -317,7 +317,7 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { // writeCustomHeaders sets custom headers set on the stream via SetHeader // on the first write call (Write, WriteHeader, or WriteStatus) -func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { +func (ht *serverHandlerTransport) writeCustomHeaders(s *ServerStream) { h := ht.rw.Header() s.hdrMu.Lock() @@ -333,7 +333,7 @@ func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { s.hdrMu.Unlock() } -func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error { +func (ht *serverHandlerTransport) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ *WriteOptions) error { // Always take a reference because otherwise there is no guarantee the data will // be available after this function returns. This is what callers to Write // expect. @@ -357,7 +357,7 @@ func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data mem.BufferSl return nil } -func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { +func (ht *serverHandlerTransport) writeHeader(s *ServerStream, md metadata.MD) error { if err := s.SetHeader(md); err != nil { return err } @@ -385,7 +385,7 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { return err } -func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*Stream)) { +func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*ServerStream)) { // With this transport type there will be exactly 1 stream: this HTTP request. var cancel context.CancelFunc if ht.timeoutSet { @@ -408,16 +408,18 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream ctx = metadata.NewIncomingContext(ctx, ht.headerMD) req := ht.req - s := &Stream{ - id: 0, // irrelevant - ctx: ctx, - requestRead: func(int) {}, + s := &ServerStream{ + Stream: &Stream{ + id: 0, // irrelevant + ctx: ctx, + requestRead: func(int) {}, + buf: newRecvBuffer(), + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), + contentSubtype: ht.contentSubtype, + }, cancel: cancel, - buf: newRecvBuffer(), st: ht, - method: req.URL.Path, - recvCompress: req.Header.Get("grpc-encoding"), - contentSubtype: ht.contentSubtype, headerWireLength: 0, // won't have access to header wire length until golang/go#18997. } s.trReader = &transportReader{ @@ -471,9 +473,7 @@ func (ht *serverHandlerTransport) runStream() { } } -func (ht *serverHandlerTransport) IncrMsgSent() {} - -func (ht *serverHandlerTransport) IncrMsgRecv() {} +func (ht *serverHandlerTransport) incrMsgRecv() {} func (ht *serverHandlerTransport) Drain(string) { panic("Drain() is not implemented") @@ -498,5 +498,5 @@ func mapRecvMsgError(err error) error { if strings.Contains(err.Error(), "body closed by handler") { return status.Error(codes.Canceled, err.Error()) } - return connectionErrorf(true, err, err.Error()) + return connectionErrorf(true, err, "%s", err.Error()) } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index c769deab5..513dbb93d 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -43,6 +43,7 @@ import ( "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpcutil" imetadata "google.golang.org/grpc/internal/metadata" + "google.golang.org/grpc/internal/proxyattributes" istatus "google.golang.org/grpc/internal/status" isyscall "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/internal/transport/networktype" @@ -86,9 +87,9 @@ type http2Client struct { writerDone chan struct{} // sync point to enable testing. // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) // that the server sent GoAway on this transport. - goAway chan struct{} - - framer *framer + goAway chan struct{} + keepaliveDone chan struct{} // Closed when the keepalive goroutine exits. + framer *framer // controlBuf delivers all the control related tasks (e.g., window // updates, reset streams, and various settings) to the controller. // Do not access controlBuf with mu held. @@ -123,7 +124,7 @@ type http2Client struct { mu sync.Mutex // guard the following variables nextID uint32 state transportState - activeStreams map[uint32]*Stream + activeStreams map[uint32]*ClientStream // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. prevGoAwayID uint32 // goAwayReason records the http2.ErrCode and debug data received with the @@ -153,7 +154,7 @@ type http2Client struct { logger *grpclog.PrefixLogger } -func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, useProxy bool, grpcUA string) (net.Conn, error) { +func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, grpcUA string) (net.Conn, error) { address := addr.Addr networkType, ok := networktype.Get(addr) if fn != nil { @@ -177,8 +178,8 @@ func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error if !ok { networkType, address = parseDialTarget(address) } - if networkType == "tcp" && useProxy { - return proxyDial(ctx, address, grpcUA) + if opts, present := proxyattributes.Get(addr); present { + return proxyDial(ctx, addr, grpcUA, opts) } return internal.NetDialerWithTCPKeepalive().DialContext(ctx, networkType, address) } @@ -199,10 +200,10 @@ func isTemporary(err error) bool { return true } -// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 +// NewHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 // and starts to receive messages on it. Non-nil error returns if construction // fails. -func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ *http2Client, err error) { +func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ ClientTransport, err error) { scheme := "http" ctx, cancel := context.WithCancel(ctx) defer func() { @@ -217,7 +218,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts // address specific arbitrary data to reach custom dialers and credential handshakers. connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) - conn, err := dial(connectCtx, opts.Dialer, addr, opts.UseProxy, opts.UserAgent) + conn, err := dial(connectCtx, opts.Dialer, addr, opts.UserAgent) if err != nil { if opts.FailOnNonTempDialError { return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) @@ -335,10 +336,11 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts readerDone: make(chan struct{}), writerDone: make(chan struct{}), goAway: make(chan struct{}), + keepaliveDone: make(chan struct{}), framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize), fc: &trInFlow{limit: uint32(icwz)}, scheme: scheme, - activeStreams: make(map[uint32]*Stream), + activeStreams: make(map[uint32]*ClientStream), isSecure: isSecure, perRPCCreds: perRPCCreds, kp: kp, @@ -479,17 +481,19 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts return t, nil } -func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { +func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *ClientStream { // TODO(zhaoq): Handle uint32 overflow of Stream.id. - s := &Stream{ - ct: t, - done: make(chan struct{}), - method: callHdr.Method, - sendCompress: callHdr.SendCompress, - buf: newRecvBuffer(), - headerChan: make(chan struct{}), - contentSubtype: callHdr.ContentSubtype, - doneFunc: callHdr.DoneFunc, + s := &ClientStream{ + Stream: &Stream{ + method: callHdr.Method, + sendCompress: callHdr.SendCompress, + buf: newRecvBuffer(), + contentSubtype: callHdr.ContentSubtype, + }, + ct: t, + done: make(chan struct{}), + headerChan: make(chan struct{}), + doneFunc: callHdr.DoneFunc, } s.wq = newWriteQuota(defaultWriteQuota, s.done) s.requestRead = func(n int) { @@ -505,7 +509,7 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { ctxDone: s.ctx.Done(), recv: s.buf, closeStream: func(err error) { - t.CloseStream(s, err) + s.Close(err) }, }, windowHandler: func(n int) { @@ -527,8 +531,9 @@ func (t *http2Client) getPeer() *peer.Peer { // to be the last frame loopy writes to the transport. func (t *http2Client) outgoingGoAwayHandler(g *goAway) (bool, error) { t.mu.Lock() - defer t.mu.Unlock() - if err := t.framer.fr.WriteGoAway(t.nextID-2, http2.ErrCodeNo, g.debugData); err != nil { + maxStreamID := t.nextID - 2 + t.mu.Unlock() + if err := t.framer.fr.WriteGoAway(maxStreamID, http2.ErrCodeNo, g.debugData); err != nil { return false, err } return false, g.closeConn @@ -595,12 +600,6 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) for k, v := range callAuthData { headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } - if b := stats.OutgoingTags(ctx); b != nil { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)}) - } - if b := stats.OutgoingTrace(ctx); b != nil { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) - } if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok { var k string @@ -736,7 +735,7 @@ func (e NewStreamError) Error() string { // NewStream creates a stream and registers it into the transport as "active" // streams. All non-nil errors returned will be *NewStreamError. -func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) { +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientStream, error) { ctx = peer.NewContext(ctx, t.getPeer()) // ServerName field of the resolver returned address takes precedence over @@ -761,7 +760,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return } // The stream was unprocessed by the server. - atomic.StoreUint32(&s.unprocessed, 1) + s.unprocessed.Store(true) s.write(recvMsg{err: err}) close(s.done) // If headerChan isn't closed, then close it. @@ -906,21 +905,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return s, nil } -// CloseStream clears the footprint of a stream when the stream is not needed any more. -// This must not be executed in reader's goroutine. -func (t *http2Client) CloseStream(s *Stream, err error) { - var ( - rst bool - rstCode http2.ErrCode - ) - if err != nil { - rst = true - rstCode = http2.ErrCodeCancel - } - t.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false) -} - -func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { +func (t *http2Client) closeStream(s *ClientStream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { // Set stream status to done. if s.swapState(streamDone) == streamDone { // If it was already done, return. If multiple closeStream calls @@ -1008,6 +993,9 @@ func (t *http2Client) Close(err error) { // should unblock it so that the goroutine eventually exits. t.kpDormancyCond.Signal() } + // Append info about previous goaways if there were any, since this may be important + // for understanding the root cause for this connection to be closed. + goAwayDebugMessage := t.goAwayDebugMessage t.mu.Unlock() // Per HTTP/2 spec, a GOAWAY frame must be sent before closing the @@ -1025,11 +1013,13 @@ func (t *http2Client) Close(err error) { } t.cancel() t.conn.Close() + // Waits for the reader and keepalive goroutines to exit before returning to + // ensure all resources are cleaned up before Close can return. + <-t.readerDone + if t.keepaliveEnabled { + <-t.keepaliveDone + } channelz.RemoveEntry(t.channelz.ID) - // Append info about previous goaways if there were any, since this may be important - // for understanding the root cause for this connection to be closed. - _, goAwayDebugMessage := t.GetGoAwayReason() - var st *status.Status if len(goAwayDebugMessage) > 0 { st = status.Newf(codes.Unavailable, "closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage) @@ -1078,7 +1068,7 @@ func (t *http2Client) GracefulClose() { // Write formats the data into HTTP2 data frame(s) and sends it out. The caller // should proceed only if Write returns nil. -func (t *http2Client) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error { +func (t *http2Client) write(s *ClientStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error { reader := data.Reader() if opts.Last { @@ -1107,10 +1097,11 @@ func (t *http2Client) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *O _ = reader.Close() return err } + t.incrMsgSent() return nil } -func (t *http2Client) getStream(f http2.Frame) *Stream { +func (t *http2Client) getStream(f http2.Frame) *ClientStream { t.mu.Lock() s := t.activeStreams[f.Header().StreamID] t.mu.Unlock() @@ -1120,7 +1111,7 @@ func (t *http2Client) getStream(f http2.Frame) *Stream { // adjustWindow sends out extra window update over the initial window size // of stream if the application is requesting data larger in size than // the window. -func (t *http2Client) adjustWindow(s *Stream, n uint32) { +func (t *http2Client) adjustWindow(s *ClientStream, n uint32) { if w := s.fc.maybeAdjust(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) } @@ -1129,7 +1120,7 @@ func (t *http2Client) adjustWindow(s *Stream, n uint32) { // updateWindow adjusts the inbound quota for the stream. // Window updates will be sent out when the cumulative quota // exceeds the corresponding threshold. -func (t *http2Client) updateWindow(s *Stream, n uint32) { +func (t *http2Client) updateWindow(s *ClientStream, n uint32) { if w := s.fc.onRead(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) } @@ -1235,7 +1226,7 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { } if f.ErrCode == http2.ErrCodeRefusedStream { // The stream was unprocessed by the server. - atomic.StoreUint32(&s.unprocessed, 1) + s.unprocessed.Store(true) } statusCode, ok := http2ErrConvTab[f.ErrCode] if !ok { @@ -1316,11 +1307,11 @@ func (t *http2Client) handlePing(f *http2.PingFrame) { t.controlBuf.put(pingAck) } -func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { +func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) error { t.mu.Lock() if t.state == closing { t.mu.Unlock() - return + return nil } if f.ErrCode == http2.ErrCodeEnhanceYourCalm && string(f.DebugData()) == "too_many_pings" { // When a client receives a GOAWAY with error code ENHANCE_YOUR_CALM and debug @@ -1332,8 +1323,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { id := f.LastStreamID if id > 0 && id%2 == 0 { t.mu.Unlock() - t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered stream id: %v", id)) - return + return connectionErrorf(true, nil, "received goaway with non-zero even-numbered stream id: %v", id) } // A client can receive multiple GoAways from the server (see // https://github.com/grpc/grpc-go/issues/1387). The idea is that the first @@ -1350,8 +1340,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { // If there are multiple GoAways the first one should always have an ID greater than the following ones. if id > t.prevGoAwayID { t.mu.Unlock() - t.Close(connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID)) - return + return connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID) } default: t.setGoAwayReason(f) @@ -1375,15 +1364,14 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { t.prevGoAwayID = id if len(t.activeStreams) == 0 { t.mu.Unlock() - t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams")) - return + return connectionErrorf(true, nil, "received goaway and there are no active streams") } - streamsToClose := make([]*Stream, 0) + streamsToClose := make([]*ClientStream, 0) for streamID, stream := range t.activeStreams { if streamID > id && streamID <= upperLimit { // The stream was unprocessed by the server. - atomic.StoreUint32(&stream.unprocessed, 1) + stream.unprocessed.Store(true) streamsToClose = append(streamsToClose, stream) } } @@ -1393,6 +1381,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { for _, stream := range streamsToClose { t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) } + return nil } // setGoAwayReason sets the value of t.goAwayReason based @@ -1434,7 +1423,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } endStream := frame.StreamEnded() - atomic.StoreUint32(&s.bytesReceived, 1) + s.bytesReceived.Store(true) initialHeader := atomic.LoadUint32(&s.headerChanClosed) == 0 if !initialHeader && !endStream { @@ -1628,7 +1617,13 @@ func (t *http2Client) readServerPreface() error { // network connection. If the server preface is not read successfully, an // error is pushed to errCh; otherwise errCh is closed with no error. func (t *http2Client) reader(errCh chan<- error) { - defer close(t.readerDone) + var errClose error + defer func() { + close(t.readerDone) + if errClose != nil { + t.Close(errClose) + } + }() if err := t.readServerPreface(); err != nil { errCh <- err @@ -1669,7 +1664,7 @@ func (t *http2Client) reader(errCh chan<- error) { continue } // Transport error. - t.Close(connectionErrorf(true, err, "error reading from server: %v", err)) + errClose = connectionErrorf(true, err, "error reading from server: %v", err) return } switch frame := frame.(type) { @@ -1684,7 +1679,7 @@ func (t *http2Client) reader(errCh chan<- error) { case *http2.PingFrame: t.handlePing(frame) case *http2.GoAwayFrame: - t.handleGoAway(frame) + errClose = t.handleGoAway(frame) case *http2.WindowUpdateFrame: t.handleWindowUpdate(frame) default: @@ -1697,6 +1692,13 @@ func (t *http2Client) reader(errCh chan<- error) { // keepalive running in a separate goroutine makes sure the connection is alive by sending pings. func (t *http2Client) keepalive() { + var err error + defer func() { + close(t.keepaliveDone) + if err != nil { + t.Close(err) + } + }() p := &ping{data: [8]byte{}} // True iff a ping has been sent, and no data has been received since then. outstandingPing := false @@ -1720,7 +1722,7 @@ func (t *http2Client) keepalive() { continue } if outstandingPing && timeoutLeft <= 0 { - t.Close(connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout")) + err = connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout") return } t.mu.Lock() @@ -1791,14 +1793,18 @@ func (t *http2Client) socketMetrics() *channelz.EphemeralSocketMetrics { func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr } -func (t *http2Client) IncrMsgSent() { - t.channelz.SocketMetrics.MessagesSent.Add(1) - t.channelz.SocketMetrics.LastMessageSentTimestamp.Store(time.Now().UnixNano()) +func (t *http2Client) incrMsgSent() { + if channelz.IsOn() { + t.channelz.SocketMetrics.MessagesSent.Add(1) + t.channelz.SocketMetrics.LastMessageSentTimestamp.Store(time.Now().UnixNano()) + } } -func (t *http2Client) IncrMsgRecv() { - t.channelz.SocketMetrics.MessagesReceived.Add(1) - t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Store(time.Now().UnixNano()) +func (t *http2Client) incrMsgRecv() { + if channelz.IsOn() { + t.channelz.SocketMetrics.MessagesReceived.Add(1) + t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Store(time.Now().UnixNano()) + } } func (t *http2Client) getOutFlowWindow() int64 { diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 584b50fe5..997b0a59b 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -25,7 +25,7 @@ import ( "fmt" "io" "math" - "math/rand" + rand "math/rand/v2" "net" "net/http" "strconv" @@ -111,7 +111,7 @@ type http2Server struct { // already initialized since draining is already underway. drainEvent *grpcsync.Event state transportState - activeStreams map[uint32]*Stream + activeStreams map[uint32]*ServerStream // idle is the time instant when the connection went idle. // This is either the beginning of the connection or when the number of // RPCs go down to 0. @@ -256,7 +256,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, inTapHandle: config.InTapHandle, fc: &trInFlow{limit: uint32(icwz)}, state: reachable, - activeStreams: make(map[uint32]*Stream), + activeStreams: make(map[uint32]*ServerStream), stats: config.StatsHandlers, kp: kp, idle: time.Now(), @@ -359,7 +359,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, // operateHeaders takes action on the decoded headers. Returns an error if fatal // error encountered and transport needs to close, otherwise returns nil. -func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*Stream)) error { +func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*ServerStream)) error { // Acquire max stream ID lock for entire duration t.maxStreamMu.Lock() defer t.maxStreamMu.Unlock() @@ -385,11 +385,13 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade t.maxStreamID = streamID buf := newRecvBuffer() - s := &Stream{ - id: streamID, + s := &ServerStream{ + Stream: &Stream{ + id: streamID, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + }, st: t, - buf: buf, - fc: &inFlow{limit: uint32(t.initialWindowSize)}, headerWireLength: int(frame.Header().Length), } var ( @@ -537,12 +539,6 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade // Attach the received metadata to the context. if len(mdata) > 0 { s.ctx = metadata.NewIncomingContext(s.ctx, mdata) - if statsTags := mdata["grpc-tags-bin"]; len(statsTags) > 0 { - s.ctx = stats.SetIncomingTags(s.ctx, []byte(statsTags[len(statsTags)-1])) - } - if statsTrace := mdata["grpc-trace-bin"]; len(statsTrace) > 0 { - s.ctx = stats.SetIncomingTrace(s.ctx, []byte(statsTrace[len(statsTrace)-1])) - } } t.mu.Lock() if t.state != reachable { @@ -568,7 +564,7 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade t.logger.Infof("Aborting the stream early: %v", errMsg) } t.controlBuf.put(&earlyAbortStream{ - httpStatus: 405, + httpStatus: http.StatusMethodNotAllowed, streamID: streamID, contentSubtype: s.contentSubtype, status: status.New(codes.Internal, errMsg), @@ -589,7 +585,7 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade stat = status.New(codes.PermissionDenied, err.Error()) } t.controlBuf.put(&earlyAbortStream{ - httpStatus: 200, + httpStatus: http.StatusOK, streamID: s.id, contentSubtype: s.contentSubtype, status: stat, @@ -634,7 +630,7 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade // HandleStreams receives incoming streams using the given handler. This is // typically run in a separate goroutine. // traceCtx attaches trace to ctx and returns the new context. -func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) { +func (t *http2Server) HandleStreams(ctx context.Context, handle func(*ServerStream)) { defer func() { close(t.readerDone) <-t.loopyWriterDone @@ -698,7 +694,7 @@ func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) { } } -func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { +func (t *http2Server) getStream(f http2.Frame) (*ServerStream, bool) { t.mu.Lock() defer t.mu.Unlock() if t.activeStreams == nil { @@ -716,7 +712,7 @@ func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { // adjustWindow sends out extra window update over the initial window size // of stream if the application is requesting data larger in size than // the window. -func (t *http2Server) adjustWindow(s *Stream, n uint32) { +func (t *http2Server) adjustWindow(s *ServerStream, n uint32) { if w := s.fc.maybeAdjust(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) } @@ -726,7 +722,7 @@ func (t *http2Server) adjustWindow(s *Stream, n uint32) { // updateWindow adjusts the inbound quota for the stream and the transport. // Window updates will deliver to the controller for sending when // the cumulative quota exceeds the corresponding threshold. -func (t *http2Server) updateWindow(s *Stream, n uint32) { +func (t *http2Server) updateWindow(s *ServerStream, n uint32) { if w := s.fc.onRead(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w, @@ -963,7 +959,7 @@ func (t *http2Server) checkForHeaderListSize(it any) bool { return true } -func (t *http2Server) streamContextErr(s *Stream) error { +func (t *http2Server) streamContextErr(s *ServerStream) error { select { case <-t.done: return ErrConnClosing @@ -973,7 +969,7 @@ func (t *http2Server) streamContextErr(s *Stream) error { } // WriteHeader sends the header metadata md back to the client. -func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { +func (t *http2Server) writeHeader(s *ServerStream, md metadata.MD) error { s.hdrMu.Lock() defer s.hdrMu.Unlock() if s.getState() == streamDone { @@ -1006,7 +1002,7 @@ func (t *http2Server) setResetPingStrikes() { atomic.StoreUint32(&t.resetPingStrikes, 1) } -func (t *http2Server) writeHeaderLocked(s *Stream) error { +func (t *http2Server) writeHeaderLocked(s *ServerStream) error { // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields // first and create a slice of that exact size. headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. @@ -1046,7 +1042,7 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { // There is no further I/O operations being able to perform on this stream. // TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early // OK is adopted. -func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { +func (t *http2Server) writeStatus(s *ServerStream, st *status.Status) error { s.hdrMu.Lock() defer s.hdrMu.Unlock() @@ -1117,11 +1113,11 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { // Write converts the data into HTTP2 data frame and sends it out. Non-nil error // is returns if it fails (e.g., framing error, transport error). -func (t *http2Server) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error { +func (t *http2Server) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ *WriteOptions) error { reader := data.Reader() if !s.isHeaderSent() { // Headers haven't been written yet. - if err := t.WriteHeader(s, nil); err != nil { + if err := t.writeHeader(s, nil); err != nil { _ = reader.Close() return err } @@ -1147,6 +1143,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Opti _ = reader.Close() return err } + t.incrMsgSent() return nil } @@ -1276,7 +1273,7 @@ func (t *http2Server) Close(err error) { } // deleteStream deletes the stream s from transport's active streams. -func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { +func (t *http2Server) deleteStream(s *ServerStream, eosReceived bool) { t.mu.Lock() if _, ok := t.activeStreams[s.id]; ok { @@ -1297,7 +1294,7 @@ func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { } // finishStream closes the stream and puts the trailing headerFrame into controlbuf. -func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { +func (t *http2Server) finishStream(s *ServerStream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { // In case stream sending and receiving are invoked in separate // goroutines (e.g., bi-directional streaming), cancel needs to be // called to interrupt the potential blocking on other goroutines. @@ -1321,7 +1318,7 @@ func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, h } // closeStream clears the footprint of a stream when the stream is not needed any more. -func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { +func (t *http2Server) closeStream(s *ServerStream, rst bool, rstCode http2.ErrCode, eosReceived bool) { // In case stream sending and receiving are invoked in separate // goroutines (e.g., bi-directional streaming), cancel needs to be // called to interrupt the potential blocking on other goroutines. @@ -1415,14 +1412,18 @@ func (t *http2Server) socketMetrics() *channelz.EphemeralSocketMetrics { } } -func (t *http2Server) IncrMsgSent() { - t.channelz.SocketMetrics.MessagesSent.Add(1) - t.channelz.SocketMetrics.LastMessageSentTimestamp.Add(1) +func (t *http2Server) incrMsgSent() { + if channelz.IsOn() { + t.channelz.SocketMetrics.MessagesSent.Add(1) + t.channelz.SocketMetrics.LastMessageSentTimestamp.Add(1) + } } -func (t *http2Server) IncrMsgRecv() { - t.channelz.SocketMetrics.MessagesReceived.Add(1) - t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Add(1) +func (t *http2Server) incrMsgRecv() { + if channelz.IsOn() { + t.channelz.SocketMetrics.MessagesReceived.Add(1) + t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Add(1) + } } func (t *http2Server) getOutFlowWindow() int64 { @@ -1455,7 +1456,7 @@ func getJitter(v time.Duration) time.Duration { } // Generate a jitter between +/- 10% of the value. r := int64(v / 10) - j := rand.Int63n(2*r) - r + j := rand.Int64N(2*r) - r return time.Duration(j) } diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go index 54b224436..d77384595 100644 --- a/vendor/google.golang.org/grpc/internal/transport/proxy.go +++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go @@ -30,34 +30,16 @@ import ( "net/url" "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/proxyattributes" + "google.golang.org/grpc/resolver" ) const proxyAuthHeaderKey = "Proxy-Authorization" -var ( - // The following variable will be overwritten in the tests. - httpProxyFromEnvironment = http.ProxyFromEnvironment -) - -func mapAddress(address string) (*url.URL, error) { - req := &http.Request{ - URL: &url.URL{ - Scheme: "https", - Host: address, - }, - } - url, err := httpProxyFromEnvironment(req) - if err != nil { - return nil, err - } - return url, nil -} - // To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader. -// It's possible that this reader reads more than what's need for the response and stores -// those bytes in the buffer. -// bufConn wraps the original net.Conn and the bufio.Reader to make sure we don't lose the -// bytes in the buffer. +// It's possible that this reader reads more than what's need for the response +// and stores those bytes in the buffer. bufConn wraps the original net.Conn +// and the bufio.Reader to make sure we don't lose the bytes in the buffer. type bufConn struct { net.Conn r io.Reader @@ -72,7 +54,7 @@ func basicAuth(username, password string) string { return base64.StdEncoding.EncodeToString([]byte(auth)) } -func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL, grpcUA string) (_ net.Conn, err error) { +func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, grpcUA string, opts proxyattributes.Options) (_ net.Conn, err error) { defer func() { if err != nil { conn.Close() @@ -81,15 +63,14 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri req := &http.Request{ Method: http.MethodConnect, - URL: &url.URL{Host: backendAddr}, + URL: &url.URL{Host: opts.ConnectAddr}, Header: map[string][]string{"User-Agent": {grpcUA}}, } - if t := proxyURL.User; t != nil { - u := t.Username() - p, _ := t.Password() + if user := opts.User; user != nil { + u := user.Username() + p, _ := user.Password() req.Header.Add(proxyAuthHeaderKey, "Basic "+basicAuth(u, p)) } - if err := sendHTTPRequest(ctx, req, conn); err != nil { return nil, fmt.Errorf("failed to write the HTTP request: %v", err) } @@ -117,28 +98,13 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri return conn, nil } -// proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy -// is necessary, dials, does the HTTP CONNECT handshake, and returns the -// connection. -func proxyDial(ctx context.Context, addr string, grpcUA string) (net.Conn, error) { - newAddr := addr - proxyURL, err := mapAddress(addr) - if err != nil { - return nil, err - } - if proxyURL != nil { - newAddr = proxyURL.Host - } - - conn, err := internal.NetDialerWithTCPKeepalive().DialContext(ctx, "tcp", newAddr) +// proxyDial establishes a TCP connection to the specified address and performs an HTTP CONNECT handshake. +func proxyDial(ctx context.Context, addr resolver.Address, grpcUA string, opts proxyattributes.Options) (net.Conn, error) { + conn, err := internal.NetDialerWithTCPKeepalive().DialContext(ctx, "tcp", addr.Addr) if err != nil { return nil, err } - if proxyURL == nil { - // proxy is disabled if proxyURL is nil. - return conn, err - } - return doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA) + return doHTTPConnectHandshake(ctx, conn, grpcUA, opts) } func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { diff --git a/vendor/google.golang.org/grpc/internal/transport/server_stream.go b/vendor/google.golang.org/grpc/internal/transport/server_stream.go new file mode 100644 index 000000000..a22a90151 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/server_stream.go @@ -0,0 +1,178 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "context" + "errors" + "strings" + "sync" + "sync/atomic" + + "google.golang.org/grpc/mem" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// ServerStream implements streaming functionality for a gRPC server. +type ServerStream struct { + *Stream // Embed for common stream functionality. + + st internalServerTransport + ctxDone <-chan struct{} // closed at the end of stream. Cache of ctx.Done() (for performance) + cancel context.CancelFunc // invoked at the end of stream to cancel ctx. + + // Holds compressor names passed in grpc-accept-encoding metadata from the + // client. + clientAdvertisedCompressors string + headerWireLength int + + // hdrMu protects outgoing header and trailer metadata. + hdrMu sync.Mutex + header metadata.MD // the outgoing header metadata. Updated by WriteHeader. + headerSent atomic.Bool // atomically set when the headers are sent out. +} + +// Read reads an n byte message from the input stream. +func (s *ServerStream) Read(n int) (mem.BufferSlice, error) { + b, err := s.Stream.read(n) + if err == nil { + s.st.incrMsgRecv() + } + return b, err +} + +// SendHeader sends the header metadata for the given stream. +func (s *ServerStream) SendHeader(md metadata.MD) error { + return s.st.writeHeader(s, md) +} + +// Write writes the hdr and data bytes to the output stream. +func (s *ServerStream) Write(hdr []byte, data mem.BufferSlice, opts *WriteOptions) error { + return s.st.write(s, hdr, data, opts) +} + +// WriteStatus sends the status of a stream to the client. WriteStatus is +// the final call made on a stream and always occurs. +func (s *ServerStream) WriteStatus(st *status.Status) error { + return s.st.writeStatus(s, st) +} + +// isHeaderSent indicates whether headers have been sent. +func (s *ServerStream) isHeaderSent() bool { + return s.headerSent.Load() +} + +// updateHeaderSent updates headerSent and returns true +// if it was already set. +func (s *ServerStream) updateHeaderSent() bool { + return s.headerSent.Swap(true) +} + +// RecvCompress returns the compression algorithm applied to the inbound +// message. It is empty string if there is no compression applied. +func (s *ServerStream) RecvCompress() string { + return s.recvCompress +} + +// SendCompress returns the send compressor name. +func (s *ServerStream) SendCompress() string { + return s.sendCompress +} + +// ContentSubtype returns the content-subtype for a request. For example, a +// content-subtype of "proto" will result in a content-type of +// "application/grpc+proto". This will always be lowercase. See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +func (s *ServerStream) ContentSubtype() string { + return s.contentSubtype +} + +// SetSendCompress sets the compression algorithm to the stream. +func (s *ServerStream) SetSendCompress(name string) error { + if s.isHeaderSent() || s.getState() == streamDone { + return errors.New("transport: set send compressor called after headers sent or stream done") + } + + s.sendCompress = name + return nil +} + +// SetContext sets the context of the stream. This will be deleted once the +// stats handler callouts all move to gRPC layer. +func (s *ServerStream) SetContext(ctx context.Context) { + s.ctx = ctx +} + +// ClientAdvertisedCompressors returns the compressor names advertised by the +// client via grpc-accept-encoding header. +func (s *ServerStream) ClientAdvertisedCompressors() []string { + values := strings.Split(s.clientAdvertisedCompressors, ",") + for i, v := range values { + values[i] = strings.TrimSpace(v) + } + return values +} + +// Header returns the header metadata of the stream. It returns the out header +// after t.WriteHeader is called. It does not block and must not be called +// until after WriteHeader. +func (s *ServerStream) Header() (metadata.MD, error) { + // Return the header in stream. It will be the out + // header after t.WriteHeader is called. + return s.header.Copy(), nil +} + +// HeaderWireLength returns the size of the headers of the stream as received +// from the wire. +func (s *ServerStream) HeaderWireLength() int { + return s.headerWireLength +} + +// SetHeader sets the header metadata. This can be called multiple times. +// This should not be called in parallel to other data writes. +func (s *ServerStream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.isHeaderSent() || s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.header = metadata.Join(s.header, md) + s.hdrMu.Unlock() + return nil +} + +// SetTrailer sets the trailer metadata which will be sent with the RPC status +// by the server. This can be called multiple times. +// This should not be called parallel to other data writes. +func (s *ServerStream) SetTrailer(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.trailer = metadata.Join(s.trailer, md) + s.hdrMu.Unlock() + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 924ba4f36..af4a4aeab 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -27,7 +27,6 @@ import ( "fmt" "io" "net" - "strings" "sync" "sync/atomic" "time" @@ -39,7 +38,6 @@ import ( "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" - "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" "google.golang.org/grpc/tap" @@ -133,7 +131,7 @@ type recvBufferReader struct { err error } -func (r *recvBufferReader) ReadHeader(header []byte) (n int, err error) { +func (r *recvBufferReader) ReadMessageHeader(header []byte) (n int, err error) { if r.err != nil { return 0, r.err } @@ -142,9 +140,9 @@ func (r *recvBufferReader) ReadHeader(header []byte) (n int, err error) { return n, nil } if r.closeStream != nil { - n, r.err = r.readHeaderClient(header) + n, r.err = r.readMessageHeaderClient(header) } else { - n, r.err = r.readHeader(header) + n, r.err = r.readMessageHeader(header) } return n, r.err } @@ -174,12 +172,12 @@ func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) { return buf, r.err } -func (r *recvBufferReader) readHeader(header []byte) (n int, err error) { +func (r *recvBufferReader) readMessageHeader(header []byte) (n int, err error) { select { case <-r.ctxDone: return 0, ContextErr(r.ctx.Err()) case m := <-r.recv.get(): - return r.readHeaderAdditional(m, header) + return r.readMessageHeaderAdditional(m, header) } } @@ -192,7 +190,7 @@ func (r *recvBufferReader) read(n int) (buf mem.Buffer, err error) { } } -func (r *recvBufferReader) readHeaderClient(header []byte) (n int, err error) { +func (r *recvBufferReader) readMessageHeaderClient(header []byte) (n int, err error) { // If the context is canceled, then closes the stream with nil metadata. // closeStream writes its error parameter to r.recv as a recvMsg. // r.readAdditional acts on that message and returns the necessary error. @@ -213,9 +211,9 @@ func (r *recvBufferReader) readHeaderClient(header []byte) (n int, err error) { // faster. r.closeStream(ContextErr(r.ctx.Err())) m := <-r.recv.get() - return r.readHeaderAdditional(m, header) + return r.readMessageHeaderAdditional(m, header) case m := <-r.recv.get(): - return r.readHeaderAdditional(m, header) + return r.readMessageHeaderAdditional(m, header) } } @@ -246,7 +244,7 @@ func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) { } } -func (r *recvBufferReader) readHeaderAdditional(m recvMsg, header []byte) (n int, err error) { +func (r *recvBufferReader) readMessageHeaderAdditional(m recvMsg, header []byte) (n int, err error) { r.recv.load() if m.err != nil { if m.buffer != nil { @@ -288,14 +286,8 @@ const ( // Stream represents an RPC in the transport layer. type Stream struct { id uint32 - st ServerTransport // nil for client side Stream - ct ClientTransport // nil for server side Stream - ctx context.Context // the associated context of the stream - cancel context.CancelFunc // always nil for client side Stream - done chan struct{} // closed at the end of stream to unblock writers. On the client side. - doneFunc func() // invoked at the end of stream on client side. - ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance) - method string // the associated RPC method of the stream + ctx context.Context // the associated context of the stream + method string // the associated RPC method of the stream recvCompress string sendCompress string buf *recvBuffer @@ -303,58 +295,17 @@ type Stream struct { fc *inFlow wq *writeQuota - // Holds compressor names passed in grpc-accept-encoding metadata from the - // client. This is empty for the client side stream. - clientAdvertisedCompressors string // Callback to state application's intentions to read data. This // is used to adjust flow control, if needed. requestRead func(int) - headerChan chan struct{} // closed to indicate the end of header metadata. - headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. - // headerValid indicates whether a valid header was received. Only - // meaningful after headerChan is closed (always call waitOnHeader() before - // reading its value). Not valid on server side. - headerValid bool - headerWireLength int // Only set on server side. - - // hdrMu protects header and trailer metadata on the server-side. - hdrMu sync.Mutex - // On client side, header keeps the received header metadata. - // - // On server side, header keeps the header set by SetHeader(). The complete - // header will merged into this after t.WriteHeader() is called. - header metadata.MD - trailer metadata.MD // the key-value map of trailer metadata. - - noHeaders bool // set if the client never received headers (set only after the stream is done). - - // On the server-side, headerSent is atomically set to 1 when the headers are sent out. - headerSent uint32 - state streamState - // On client-side it is the status error received from the server. - // On server-side it is unused. - status *status.Status - - bytesReceived uint32 // indicates whether any bytes have been received on this stream - unprocessed uint32 // set if the server sends a refused stream or GOAWAY including this stream - // contentSubtype is the content-subtype for requests. // this must be lowercase or the behavior is undefined. contentSubtype string -} - -// isHeaderSent is only valid on the server-side. -func (s *Stream) isHeaderSent() bool { - return atomic.LoadUint32(&s.headerSent) == 1 -} -// updateHeaderSent updates headerSent and returns true -// if it was already set. It is valid only on server-side. -func (s *Stream) updateHeaderSent() bool { - return atomic.SwapUint32(&s.headerSent, 1) == 1 + trailer metadata.MD // the key-value map of trailer metadata. } func (s *Stream) swapState(st streamState) streamState { @@ -369,110 +320,12 @@ func (s *Stream) getState() streamState { return streamState(atomic.LoadUint32((*uint32)(&s.state))) } -func (s *Stream) waitOnHeader() { - if s.headerChan == nil { - // On the server headerChan is always nil since a stream originates - // only after having received headers. - return - } - select { - case <-s.ctx.Done(): - // Close the stream to prevent headers/trailers from changing after - // this function returns. - s.ct.CloseStream(s, ContextErr(s.ctx.Err())) - // headerChan could possibly not be closed yet if closeStream raced - // with operateHeaders; wait until it is closed explicitly here. - <-s.headerChan - case <-s.headerChan: - } -} - -// RecvCompress returns the compression algorithm applied to the inbound -// message. It is empty string if there is no compression applied. -func (s *Stream) RecvCompress() string { - s.waitOnHeader() - return s.recvCompress -} - -// SetSendCompress sets the compression algorithm to the stream. -func (s *Stream) SetSendCompress(name string) error { - if s.isHeaderSent() || s.getState() == streamDone { - return errors.New("transport: set send compressor called after headers sent or stream done") - } - - s.sendCompress = name - return nil -} - -// SendCompress returns the send compressor name. -func (s *Stream) SendCompress() string { - return s.sendCompress -} - -// ClientAdvertisedCompressors returns the compressor names advertised by the -// client via grpc-accept-encoding header. -func (s *Stream) ClientAdvertisedCompressors() []string { - values := strings.Split(s.clientAdvertisedCompressors, ",") - for i, v := range values { - values[i] = strings.TrimSpace(v) - } - return values -} - -// Done returns a channel which is closed when it receives the final status -// from the server. -func (s *Stream) Done() <-chan struct{} { - return s.done -} - -// Header returns the header metadata of the stream. -// -// On client side, it acquires the key-value pairs of header metadata once it is -// available. It blocks until i) the metadata is ready or ii) there is no header -// metadata or iii) the stream is canceled/expired. -// -// On server side, it returns the out header after t.WriteHeader is called. It -// does not block and must not be called until after WriteHeader. -func (s *Stream) Header() (metadata.MD, error) { - if s.headerChan == nil { - // On server side, return the header in stream. It will be the out - // header after t.WriteHeader is called. - return s.header.Copy(), nil - } - s.waitOnHeader() - - if !s.headerValid || s.noHeaders { - return nil, s.status.Err() - } - - return s.header.Copy(), nil -} - -// TrailersOnly blocks until a header or trailers-only frame is received and -// then returns true if the stream was trailers-only. If the stream ends -// before headers are received, returns true, nil. Client-side only. -func (s *Stream) TrailersOnly() bool { - s.waitOnHeader() - return s.noHeaders -} - // Trailer returns the cached trailer metadata. Note that if it is not called -// after the entire stream is done, it could return an empty MD. Client -// side only. +// after the entire stream is done, it could return an empty MD. // It can be safely read only after stream has ended that is either read // or write have returned io.EOF. func (s *Stream) Trailer() metadata.MD { - c := s.trailer.Copy() - return c -} - -// ContentSubtype returns the content-subtype for a request. For example, a -// content-subtype of "proto" will result in a content-type of -// "application/grpc+proto". This will always be lowercase. See -// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for -// more details. -func (s *Stream) ContentSubtype() string { - return s.contentSubtype + return s.trailer.Copy() } // Context returns the context of the stream. @@ -480,81 +333,31 @@ func (s *Stream) Context() context.Context { return s.ctx } -// SetContext sets the context of the stream. This will be deleted once the -// stats handler callouts all move to gRPC layer. -func (s *Stream) SetContext(ctx context.Context) { - s.ctx = ctx -} - // Method returns the method for the stream. func (s *Stream) Method() string { return s.method } -// Status returns the status received from the server. -// Status can be read safely only after the stream has ended, -// that is, after Done() is closed. -func (s *Stream) Status() *status.Status { - return s.status -} - -// HeaderWireLength returns the size of the headers of the stream as received -// from the wire. Valid only on the server. -func (s *Stream) HeaderWireLength() int { - return s.headerWireLength -} - -// SetHeader sets the header metadata. This can be called multiple times. -// Server side only. -// This should not be called in parallel to other data writes. -func (s *Stream) SetHeader(md metadata.MD) error { - if md.Len() == 0 { - return nil - } - if s.isHeaderSent() || s.getState() == streamDone { - return ErrIllegalHeaderWrite - } - s.hdrMu.Lock() - s.header = metadata.Join(s.header, md) - s.hdrMu.Unlock() - return nil -} - -// SendHeader sends the given header metadata. The given metadata is -// combined with any metadata set by previous calls to SetHeader and -// then written to the transport stream. -func (s *Stream) SendHeader(md metadata.MD) error { - return s.st.WriteHeader(s, md) -} - -// SetTrailer sets the trailer metadata which will be sent with the RPC status -// by the server. This can be called multiple times. Server side only. -// This should not be called parallel to other data writes. -func (s *Stream) SetTrailer(md metadata.MD) error { - if md.Len() == 0 { - return nil - } - if s.getState() == streamDone { - return ErrIllegalHeaderWrite - } - s.hdrMu.Lock() - s.trailer = metadata.Join(s.trailer, md) - s.hdrMu.Unlock() - return nil -} - func (s *Stream) write(m recvMsg) { s.buf.put(m) } -func (s *Stream) ReadHeader(header []byte) (err error) { +// ReadMessageHeader reads data into the provided header slice from the stream. +// It first checks if there was an error during a previous read operation and +// returns it if present. It then requests a read operation for the length of +// the header. It continues to read from the stream until the entire header +// slice is filled or an error occurs. If an `io.EOF` error is encountered with +// partially read data, it is converted to `io.ErrUnexpectedEOF` to indicate an +// unexpected end of the stream. The method returns any error encountered during +// the read process or nil if the header was successfully read. +func (s *Stream) ReadMessageHeader(header []byte) (err error) { // Don't request a read if there was an error earlier if er := s.trReader.er; er != nil { return er } s.requestRead(len(header)) for len(header) != 0 { - n, err := s.trReader.ReadHeader(header) + n, err := s.trReader.ReadMessageHeader(header) header = header[n:] if len(header) == 0 { err = nil @@ -570,7 +373,7 @@ func (s *Stream) ReadHeader(header []byte) (err error) { } // Read reads n bytes from the wire for this stream. -func (s *Stream) Read(n int) (data mem.BufferSlice, err error) { +func (s *Stream) read(n int) (data mem.BufferSlice, err error) { // Don't request a read if there was an error earlier if er := s.trReader.er; er != nil { return nil, er @@ -610,8 +413,8 @@ type transportReader struct { er error } -func (t *transportReader) ReadHeader(header []byte) (int, error) { - n, err := t.reader.ReadHeader(header) +func (t *transportReader) ReadMessageHeader(header []byte) (int, error) { + n, err := t.reader.ReadMessageHeader(header) if err != nil { t.er = err return 0, err @@ -630,17 +433,6 @@ func (t *transportReader) Read(n int) (mem.Buffer, error) { return buf, nil } -// BytesReceived indicates whether any bytes have been received on this stream. -func (s *Stream) BytesReceived() bool { - return atomic.LoadUint32(&s.bytesReceived) == 1 -} - -// Unprocessed indicates whether the server did not process this stream -- -// i.e. it sent a refused stream or GOAWAY including this stream ID. -func (s *Stream) Unprocessed() bool { - return atomic.LoadUint32(&s.unprocessed) == 1 -} - // GoString is implemented by Stream so context.String() won't // race when printing %#v. func (s *Stream) GoString() string { @@ -710,21 +502,13 @@ type ConnectOptions struct { ChannelzParent *channelz.SubChannel // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. MaxHeaderListSize *uint32 - // UseProxy specifies if a proxy should be used. - UseProxy bool // The mem.BufferPool to use when reading/writing to the wire. BufferPool mem.BufferPool } -// NewClientTransport establishes the transport with the required ConnectOptions -// and returns it to the caller. -func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (ClientTransport, error) { - return newHTTP2Client(connectCtx, ctx, addr, opts, onClose) -} - -// Options provides additional hints and information for message +// WriteOptions provides additional hints and information for message // transmission. -type Options struct { +type WriteOptions struct { // Last indicates whether this write is the last piece for // this stream. Last bool @@ -773,18 +557,8 @@ type ClientTransport interface { // It does not block. GracefulClose() - // Write sends the data for the given stream. A nil stream indicates - // the write is to be performed on the transport as a whole. - Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error - // NewStream creates a Stream for an RPC. - NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) - - // CloseStream clears the footprint of a stream when the stream is - // not needed any more. The err indicates the error incurred when - // CloseStream is called. Must be called when a stream is finished - // unless the associated transport is closing. - CloseStream(stream *Stream, err error) + NewStream(ctx context.Context, callHdr *CallHdr) (*ClientStream, error) // Error returns a channel that is closed when some I/O error // happens. Typically the caller should have a goroutine to monitor @@ -804,12 +578,6 @@ type ClientTransport interface { // RemoteAddr returns the remote network address. RemoteAddr() net.Addr - - // IncrMsgSent increments the number of message sent through this transport. - IncrMsgSent() - - // IncrMsgRecv increments the number of message received through this transport. - IncrMsgRecv() } // ServerTransport is the common interface for all gRPC server-side transport @@ -819,19 +587,7 @@ type ClientTransport interface { // Write methods for a given Stream will be called serially. type ServerTransport interface { // HandleStreams receives incoming streams using the given handler. - HandleStreams(context.Context, func(*Stream)) - - // WriteHeader sends the header metadata for the given stream. - // WriteHeader may not be called on all streams. - WriteHeader(s *Stream, md metadata.MD) error - - // Write sends the data for the given stream. - // Write may not be called on all streams. - Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error - - // WriteStatus sends the status of a stream to the client. WriteStatus is - // the final call made on a stream and always occurs. - WriteStatus(s *Stream, st *status.Status) error + HandleStreams(context.Context, func(*ServerStream)) // Close tears down the transport. Once it is called, the transport // should not be accessed any more. All the pending streams and their @@ -843,12 +599,14 @@ type ServerTransport interface { // Drain notifies the client this ServerTransport stops accepting new RPCs. Drain(debugData string) +} - // IncrMsgSent increments the number of message sent through this transport. - IncrMsgSent() - - // IncrMsgRecv increments the number of message received through this transport. - IncrMsgRecv() +type internalServerTransport interface { + ServerTransport + writeHeader(s *ServerStream, md metadata.MD) error + write(s *ServerStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error + writeStatus(s *ServerStream, st *status.Status) error + incrMsgRecv() } // connectionErrorf creates an ConnectionError with the specified error description. diff --git a/vendor/google.golang.org/grpc/mem/buffer_slice.go b/vendor/google.golang.org/grpc/mem/buffer_slice.go index 228e9c2f2..65002e2cc 100644 --- a/vendor/google.golang.org/grpc/mem/buffer_slice.go +++ b/vendor/google.golang.org/grpc/mem/buffer_slice.go @@ -22,6 +22,11 @@ import ( "io" ) +const ( + // 32 KiB is what io.Copy uses. + readAllBufSize = 32 * 1024 +) + // BufferSlice offers a means to represent data that spans one or more Buffer // instances. A BufferSlice is meant to be immutable after creation, and methods // like Ref create and return copies of the slice. This is why all methods have @@ -219,8 +224,58 @@ func (w *writer) Write(p []byte) (n int, err error) { // NewWriter wraps the given BufferSlice and BufferPool to implement the // io.Writer interface. Every call to Write copies the contents of the given -// buffer into a new Buffer pulled from the given pool and the Buffer is added to -// the given BufferSlice. +// buffer into a new Buffer pulled from the given pool and the Buffer is +// added to the given BufferSlice. func NewWriter(buffers *BufferSlice, pool BufferPool) io.Writer { return &writer{buffers: buffers, pool: pool} } + +// ReadAll reads from r until an error or EOF and returns the data it read. +// A successful call returns err == nil, not err == EOF. Because ReadAll is +// defined to read from src until EOF, it does not treat an EOF from Read +// as an error to be reported. +// +// Important: A failed call returns a non-nil error and may also return +// partially read buffers. It is the responsibility of the caller to free the +// BufferSlice returned, or its memory will not be reused. +func ReadAll(r io.Reader, pool BufferPool) (BufferSlice, error) { + var result BufferSlice + if wt, ok := r.(io.WriterTo); ok { + // This is more optimal since wt knows the size of chunks it wants to + // write and, hence, we can allocate buffers of an optimal size to fit + // them. E.g. might be a single big chunk, and we wouldn't chop it + // into pieces. + w := NewWriter(&result, pool) + _, err := wt.WriteTo(w) + return result, err + } +nextBuffer: + for { + buf := pool.Get(readAllBufSize) + // We asked for 32KiB but may have been given a bigger buffer. + // Use all of it if that's the case. + *buf = (*buf)[:cap(*buf)] + usedCap := 0 + for { + n, err := r.Read((*buf)[usedCap:]) + usedCap += n + if err != nil { + if usedCap == 0 { + // Nothing in this buf, put it back + pool.Put(buf) + } else { + *buf = (*buf)[:usedCap] + result = append(result, NewBuffer(buf, pool)) + } + if err == io.EOF { + err = nil + } + return result, err + } + if len(*buf) == usedCap { + result = append(result, NewBuffer(buf, pool)) + continue nextBuffer + } + } + } +} diff --git a/vendor/google.golang.org/grpc/mem/buffers.go b/vendor/google.golang.org/grpc/mem/buffers.go index 4d66b2ccc..ecbf0b9a7 100644 --- a/vendor/google.golang.org/grpc/mem/buffers.go +++ b/vendor/google.golang.org/grpc/mem/buffers.go @@ -65,6 +65,9 @@ var ( refObjectPool = sync.Pool{New: func() any { return new(atomic.Int32) }} ) +// IsBelowBufferPoolingThreshold returns true if the given size is less than or +// equal to the threshold for buffer pooling. This is used to determine whether +// to pool buffers or allocate them directly. func IsBelowBufferPoolingThreshold(size int) bool { return size <= bufferPoolingThreshold } @@ -89,7 +92,11 @@ func newBuffer() *buffer { // // Note that the backing array of the given data is not copied. func NewBuffer(data *[]byte, pool BufferPool) Buffer { - if pool == nil || IsBelowBufferPoolingThreshold(len(*data)) { + // Use the buffer's capacity instead of the length, otherwise buffers may + // not be reused under certain conditions. For example, if a large buffer + // is acquired from the pool, but fewer bytes than the buffering threshold + // are written to it, the buffer will not be returned to the pool. + if pool == nil || IsBelowBufferPoolingThreshold(cap(*data)) { return (SliceBuffer)(*data) } b := newBuffer() @@ -194,19 +201,19 @@ func (b *buffer) read(buf []byte) (int, Buffer) { return n, b } -// String returns a string representation of the buffer. May be used for -// debugging purposes. func (b *buffer) String() string { return fmt.Sprintf("mem.Buffer(%p, data: %p, length: %d)", b, b.ReadOnlyData(), len(b.ReadOnlyData())) } +// ReadUnsafe reads bytes from the given Buffer into the provided slice. +// It does not perform safety checks. func ReadUnsafe(dst []byte, buf Buffer) (int, Buffer) { return buf.read(dst) } // SplitUnsafe modifies the receiver to point to the first n bytes while it -// returns a new reference to the remaining bytes. The returned Buffer functions -// just like a normal reference acquired using Ref(). +// returns a new reference to the remaining bytes. The returned Buffer +// functions just like a normal reference acquired using Ref(). func SplitUnsafe(buf Buffer, n int) (left, right Buffer) { return buf.split(n) } @@ -232,12 +239,21 @@ func (e emptyBuffer) read([]byte) (int, Buffer) { return 0, e } +// SliceBuffer is a Buffer implementation that wraps a byte slice. It provides +// methods for reading, splitting, and managing the byte slice. type SliceBuffer []byte +// ReadOnlyData returns the byte slice. func (s SliceBuffer) ReadOnlyData() []byte { return s } -func (s SliceBuffer) Ref() {} -func (s SliceBuffer) Free() {} -func (s SliceBuffer) Len() int { return len(s) } + +// Ref is a noop implementation of Ref. +func (s SliceBuffer) Ref() {} + +// Free is a noop implementation of Free. +func (s SliceBuffer) Free() {} + +// Len is a noop implementation of Len. +func (s SliceBuffer) Len() int { return len(s) } func (s SliceBuffer) split(n int) (left, right Buffer) { return s[:n], s[n:] diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index bdaa2130e..a2d2a798d 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -123,7 +123,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. if lastPickErr != nil { errStr = "latest balancer error: " + lastPickErr.Error() } else { - errStr = fmt.Sprintf("received context error while waiting for new LB policy update: %s", ctx.Err().Error()) + errStr = fmt.Sprintf("%v while waiting for connections to become ready", ctx.Err()) } switch ctx.Err() { case context.DeadlineExceeded: diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go index e87a17f36..ee0ff969a 100644 --- a/vendor/google.golang.org/grpc/preloader.go +++ b/vendor/google.golang.org/grpc/preloader.go @@ -62,7 +62,7 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error { materializedData := data.Materialize() data.Free() - p.encodedData = mem.BufferSlice{mem.NewBuffer(&materializedData, nil)} + p.encodedData = mem.BufferSlice{mem.SliceBuffer(materializedData)} // TODO: it should be possible to grab the bufferPool from the underlying // stream implementation with a type cast to its actual type (such as @@ -76,7 +76,7 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error { if p.pf.isCompressed() { materializedCompData := compData.Materialize() compData.Free() - compData = mem.BufferSlice{mem.NewBuffer(&materializedCompData, nil)} + compData = mem.BufferSlice{mem.SliceBuffer(materializedCompData)} } p.hdr, p.payload = msgHeader(p.encodedData, compData, p.pf) diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go index e1f58104d..12f2c9337 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go @@ -21,7 +21,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.36.4 // protoc v5.27.1 // source: grpc/reflection/v1/reflection.proto @@ -32,6 +32,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -43,16 +44,13 @@ const ( // The message sent by the client when calling ServerReflectionInfo method. type ServerReflectionRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` // To use reflection service, the client should set one of the following // fields in message_request. The server distinguishes requests by their // defined field and then handles them using corresponding methods. // - // Types that are assignable to MessageRequest: + // Types that are valid to be assigned to MessageRequest: // // *ServerReflectionRequest_FileByFilename // *ServerReflectionRequest_FileContainingSymbol @@ -60,15 +58,15 @@ type ServerReflectionRequest struct { // *ServerReflectionRequest_AllExtensionNumbersOfType // *ServerReflectionRequest_ListServices MessageRequest isServerReflectionRequest_MessageRequest `protobuf_oneof:"message_request"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ServerReflectionRequest) Reset() { *x = ServerReflectionRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServerReflectionRequest) String() string { @@ -79,7 +77,7 @@ func (*ServerReflectionRequest) ProtoMessage() {} func (x *ServerReflectionRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -101,44 +99,54 @@ func (x *ServerReflectionRequest) GetHost() string { return "" } -func (m *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_MessageRequest { - if m != nil { - return m.MessageRequest +func (x *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_MessageRequest { + if x != nil { + return x.MessageRequest } return nil } func (x *ServerReflectionRequest) GetFileByFilename() string { - if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileByFilename); ok { - return x.FileByFilename + if x != nil { + if x, ok := x.MessageRequest.(*ServerReflectionRequest_FileByFilename); ok { + return x.FileByFilename + } } return "" } func (x *ServerReflectionRequest) GetFileContainingSymbol() string { - if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingSymbol); ok { - return x.FileContainingSymbol + if x != nil { + if x, ok := x.MessageRequest.(*ServerReflectionRequest_FileContainingSymbol); ok { + return x.FileContainingSymbol + } } return "" } func (x *ServerReflectionRequest) GetFileContainingExtension() *ExtensionRequest { - if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingExtension); ok { - return x.FileContainingExtension + if x != nil { + if x, ok := x.MessageRequest.(*ServerReflectionRequest_FileContainingExtension); ok { + return x.FileContainingExtension + } } return nil } func (x *ServerReflectionRequest) GetAllExtensionNumbersOfType() string { - if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_AllExtensionNumbersOfType); ok { - return x.AllExtensionNumbersOfType + if x != nil { + if x, ok := x.MessageRequest.(*ServerReflectionRequest_AllExtensionNumbersOfType); ok { + return x.AllExtensionNumbersOfType + } } return "" } func (x *ServerReflectionRequest) GetListServices() string { - if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_ListServices); ok { - return x.ListServices + if x != nil { + if x, ok := x.MessageRequest.(*ServerReflectionRequest_ListServices); ok { + return x.ListServices + } } return "" } @@ -197,22 +205,19 @@ func (*ServerReflectionRequest_ListServices) isServerReflectionRequest_MessageRe // The type name and extension number sent by the client when requesting // file_containing_extension. type ExtensionRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Fully-qualified type name. The format should be . ContainingType string `protobuf:"bytes,1,opt,name=containing_type,json=containingType,proto3" json:"containing_type,omitempty"` ExtensionNumber int32 `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ExtensionRequest) Reset() { *x = ExtensionRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExtensionRequest) String() string { @@ -223,7 +228,7 @@ func (*ExtensionRequest) ProtoMessage() {} func (x *ExtensionRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -254,31 +259,28 @@ func (x *ExtensionRequest) GetExtensionNumber() int32 { // The message sent by the server to answer ServerReflectionInfo method. type ServerReflectionResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost,proto3" json:"valid_host,omitempty"` OriginalRequest *ServerReflectionRequest `protobuf:"bytes,2,opt,name=original_request,json=originalRequest,proto3" json:"original_request,omitempty"` // The server sets one of the following fields according to the message_request // in the request. // - // Types that are assignable to MessageResponse: + // Types that are valid to be assigned to MessageResponse: // // *ServerReflectionResponse_FileDescriptorResponse // *ServerReflectionResponse_AllExtensionNumbersResponse // *ServerReflectionResponse_ListServicesResponse // *ServerReflectionResponse_ErrorResponse MessageResponse isServerReflectionResponse_MessageResponse `protobuf_oneof:"message_response"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ServerReflectionResponse) Reset() { *x = ServerReflectionResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServerReflectionResponse) String() string { @@ -289,7 +291,7 @@ func (*ServerReflectionResponse) ProtoMessage() {} func (x *ServerReflectionResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -318,37 +320,45 @@ func (x *ServerReflectionResponse) GetOriginalRequest() *ServerReflectionRequest return nil } -func (m *ServerReflectionResponse) GetMessageResponse() isServerReflectionResponse_MessageResponse { - if m != nil { - return m.MessageResponse +func (x *ServerReflectionResponse) GetMessageResponse() isServerReflectionResponse_MessageResponse { + if x != nil { + return x.MessageResponse } return nil } func (x *ServerReflectionResponse) GetFileDescriptorResponse() *FileDescriptorResponse { - if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_FileDescriptorResponse); ok { - return x.FileDescriptorResponse + if x != nil { + if x, ok := x.MessageResponse.(*ServerReflectionResponse_FileDescriptorResponse); ok { + return x.FileDescriptorResponse + } } return nil } func (x *ServerReflectionResponse) GetAllExtensionNumbersResponse() *ExtensionNumberResponse { - if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_AllExtensionNumbersResponse); ok { - return x.AllExtensionNumbersResponse + if x != nil { + if x, ok := x.MessageResponse.(*ServerReflectionResponse_AllExtensionNumbersResponse); ok { + return x.AllExtensionNumbersResponse + } } return nil } func (x *ServerReflectionResponse) GetListServicesResponse() *ListServiceResponse { - if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ListServicesResponse); ok { - return x.ListServicesResponse + if x != nil { + if x, ok := x.MessageResponse.(*ServerReflectionResponse_ListServicesResponse); ok { + return x.ListServicesResponse + } } return nil } func (x *ServerReflectionResponse) GetErrorResponse() *ErrorResponse { - if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ErrorResponse); ok { - return x.ErrorResponse + if x != nil { + if x, ok := x.MessageResponse.(*ServerReflectionResponse_ErrorResponse); ok { + return x.ErrorResponse + } } return nil } @@ -396,23 +406,20 @@ func (*ServerReflectionResponse_ErrorResponse) isServerReflectionResponse_Messag // a file_by_filename, file_containing_symbol, or file_containing_extension // request. type FileDescriptorResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Serialized FileDescriptorProto messages. We avoid taking a dependency on // descriptor.proto, which uses proto2 only features, by making them opaque // bytes instead. FileDescriptorProto [][]byte `protobuf:"bytes,1,rep,name=file_descriptor_proto,json=fileDescriptorProto,proto3" json:"file_descriptor_proto,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FileDescriptorResponse) Reset() { *x = FileDescriptorResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileDescriptorResponse) String() string { @@ -423,7 +430,7 @@ func (*FileDescriptorResponse) ProtoMessage() {} func (x *FileDescriptorResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -448,23 +455,20 @@ func (x *FileDescriptorResponse) GetFileDescriptorProto() [][]byte { // A list of extension numbers sent by the server answering // all_extension_numbers_of_type request. type ExtensionNumberResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Full name of the base type, including the package name. The format // is . BaseTypeName string `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName,proto3" json:"base_type_name,omitempty"` ExtensionNumber []int32 `protobuf:"varint,2,rep,packed,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ExtensionNumberResponse) Reset() { *x = ExtensionNumberResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExtensionNumberResponse) String() string { @@ -475,7 +479,7 @@ func (*ExtensionNumberResponse) ProtoMessage() {} func (x *ExtensionNumberResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -506,22 +510,19 @@ func (x *ExtensionNumberResponse) GetExtensionNumber() []int32 { // A list of ServiceResponse sent by the server answering list_services request. type ListServiceResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The information of each service may be expanded in the future, so we use // ServiceResponse message to encapsulate it. - Service []*ServiceResponse `protobuf:"bytes,1,rep,name=service,proto3" json:"service,omitempty"` + Service []*ServiceResponse `protobuf:"bytes,1,rep,name=service,proto3" json:"service,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ListServiceResponse) Reset() { *x = ListServiceResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ListServiceResponse) String() string { @@ -532,7 +533,7 @@ func (*ListServiceResponse) ProtoMessage() {} func (x *ListServiceResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -557,22 +558,19 @@ func (x *ListServiceResponse) GetService() []*ServiceResponse { // The information of a single service used by ListServiceResponse to answer // list_services request. type ServiceResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Full name of a registered service, including its package name. The format // is . - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ServiceResponse) Reset() { *x = ServiceResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServiceResponse) String() string { @@ -583,7 +581,7 @@ func (*ServiceResponse) ProtoMessage() {} func (x *ServiceResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -607,22 +605,19 @@ func (x *ServiceResponse) GetName() string { // The error code and error message sent by the server when an error occurs. type ErrorResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // This field uses the error codes defined in grpc::StatusCode. - ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` - ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` + ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ErrorResponse) Reset() { *x = ErrorResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ErrorResponse) String() string { @@ -633,7 +628,7 @@ func (*ErrorResponse) ProtoMessage() {} func (x *ErrorResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -664,7 +659,7 @@ func (x *ErrorResponse) GetErrorMessage() string { var File_grpc_reflection_v1_reflection_proto protoreflect.FileDescriptor -var file_grpc_reflection_v1_reflection_proto_rawDesc = []byte{ +var file_grpc_reflection_v1_reflection_proto_rawDesc = string([]byte{ 0x0a, 0x23, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x72, 0x65, 0x66, 0x6c, @@ -774,16 +769,16 @@ var file_grpc_reflection_v1_reflection_proto_rawDesc = []byte{ 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +}) var ( file_grpc_reflection_v1_reflection_proto_rawDescOnce sync.Once - file_grpc_reflection_v1_reflection_proto_rawDescData = file_grpc_reflection_v1_reflection_proto_rawDesc + file_grpc_reflection_v1_reflection_proto_rawDescData []byte ) func file_grpc_reflection_v1_reflection_proto_rawDescGZIP() []byte { file_grpc_reflection_v1_reflection_proto_rawDescOnce.Do(func() { - file_grpc_reflection_v1_reflection_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_reflection_v1_reflection_proto_rawDescData) + file_grpc_reflection_v1_reflection_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_grpc_reflection_v1_reflection_proto_rawDesc), len(file_grpc_reflection_v1_reflection_proto_rawDesc))) }) return file_grpc_reflection_v1_reflection_proto_rawDescData } @@ -821,104 +816,6 @@ func file_grpc_reflection_v1_reflection_proto_init() { if File_grpc_reflection_v1_reflection_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_grpc_reflection_v1_reflection_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*ServerReflectionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1_reflection_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*ExtensionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1_reflection_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*ServerReflectionResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1_reflection_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*FileDescriptorResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1_reflection_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*ExtensionNumberResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1_reflection_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*ListServiceResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1_reflection_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*ServiceResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1_reflection_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*ErrorResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } file_grpc_reflection_v1_reflection_proto_msgTypes[0].OneofWrappers = []any{ (*ServerReflectionRequest_FileByFilename)(nil), (*ServerReflectionRequest_FileContainingSymbol)(nil), @@ -936,7 +833,7 @@ func file_grpc_reflection_v1_reflection_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_grpc_reflection_v1_reflection_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_grpc_reflection_v1_reflection_proto_rawDesc), len(file_grpc_reflection_v1_reflection_proto_rawDesc)), NumEnums: 0, NumMessages: 8, NumExtensions: 0, @@ -947,7 +844,6 @@ func file_grpc_reflection_v1_reflection_proto_init() { MessageInfos: file_grpc_reflection_v1_reflection_proto_msgTypes, }.Build() File_grpc_reflection_v1_reflection_proto = out.File - file_grpc_reflection_v1_reflection_proto_rawDesc = nil file_grpc_reflection_v1_reflection_proto_goTypes = nil file_grpc_reflection_v1_reflection_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go index 0582e16af..fcf9c1aa0 100644 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.36.4 // protoc v5.27.1 // grpc/reflection/v1alpha/reflection.proto is a deprecated file. @@ -29,6 +29,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -42,17 +43,14 @@ const ( // // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type ServerReflectionRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` // To use reflection service, the client should set one of the following // fields in message_request. The server distinguishes requests by their // defined field and then handles them using corresponding methods. // - // Types that are assignable to MessageRequest: + // Types that are valid to be assigned to MessageRequest: // // *ServerReflectionRequest_FileByFilename // *ServerReflectionRequest_FileContainingSymbol @@ -60,15 +58,15 @@ type ServerReflectionRequest struct { // *ServerReflectionRequest_AllExtensionNumbersOfType // *ServerReflectionRequest_ListServices MessageRequest isServerReflectionRequest_MessageRequest `protobuf_oneof:"message_request"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ServerReflectionRequest) Reset() { *x = ServerReflectionRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServerReflectionRequest) String() string { @@ -79,7 +77,7 @@ func (*ServerReflectionRequest) ProtoMessage() {} func (x *ServerReflectionRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -102,49 +100,59 @@ func (x *ServerReflectionRequest) GetHost() string { return "" } -func (m *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_MessageRequest { - if m != nil { - return m.MessageRequest +func (x *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_MessageRequest { + if x != nil { + return x.MessageRequest } return nil } // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionRequest) GetFileByFilename() string { - if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileByFilename); ok { - return x.FileByFilename + if x != nil { + if x, ok := x.MessageRequest.(*ServerReflectionRequest_FileByFilename); ok { + return x.FileByFilename + } } return "" } // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionRequest) GetFileContainingSymbol() string { - if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingSymbol); ok { - return x.FileContainingSymbol + if x != nil { + if x, ok := x.MessageRequest.(*ServerReflectionRequest_FileContainingSymbol); ok { + return x.FileContainingSymbol + } } return "" } // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionRequest) GetFileContainingExtension() *ExtensionRequest { - if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_FileContainingExtension); ok { - return x.FileContainingExtension + if x != nil { + if x, ok := x.MessageRequest.(*ServerReflectionRequest_FileContainingExtension); ok { + return x.FileContainingExtension + } } return nil } // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionRequest) GetAllExtensionNumbersOfType() string { - if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_AllExtensionNumbersOfType); ok { - return x.AllExtensionNumbersOfType + if x != nil { + if x, ok := x.MessageRequest.(*ServerReflectionRequest_AllExtensionNumbersOfType); ok { + return x.AllExtensionNumbersOfType + } } return "" } // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionRequest) GetListServices() string { - if x, ok := x.GetMessageRequest().(*ServerReflectionRequest_ListServices); ok { - return x.ListServices + if x != nil { + if x, ok := x.MessageRequest.(*ServerReflectionRequest_ListServices); ok { + return x.ListServices + } } return "" } @@ -215,25 +223,22 @@ func (*ServerReflectionRequest_ListServices) isServerReflectionRequest_MessageRe // // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type ExtensionRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Fully-qualified type name. The format should be . // // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. ContainingType string `protobuf:"bytes,1,opt,name=containing_type,json=containingType,proto3" json:"containing_type,omitempty"` // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. ExtensionNumber int32 `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ExtensionRequest) Reset() { *x = ExtensionRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExtensionRequest) String() string { @@ -244,7 +249,7 @@ func (*ExtensionRequest) ProtoMessage() {} func (x *ExtensionRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -279,10 +284,7 @@ func (x *ExtensionRequest) GetExtensionNumber() int32 { // // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type ServerReflectionResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost,proto3" json:"valid_host,omitempty"` // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. @@ -290,22 +292,22 @@ type ServerReflectionResponse struct { // The server set one of the following fields according to the message_request // in the request. // - // Types that are assignable to MessageResponse: + // Types that are valid to be assigned to MessageResponse: // // *ServerReflectionResponse_FileDescriptorResponse // *ServerReflectionResponse_AllExtensionNumbersResponse // *ServerReflectionResponse_ListServicesResponse // *ServerReflectionResponse_ErrorResponse MessageResponse isServerReflectionResponse_MessageResponse `protobuf_oneof:"message_response"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ServerReflectionResponse) Reset() { *x = ServerReflectionResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServerReflectionResponse) String() string { @@ -316,7 +318,7 @@ func (*ServerReflectionResponse) ProtoMessage() {} func (x *ServerReflectionResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -347,41 +349,49 @@ func (x *ServerReflectionResponse) GetOriginalRequest() *ServerReflectionRequest return nil } -func (m *ServerReflectionResponse) GetMessageResponse() isServerReflectionResponse_MessageResponse { - if m != nil { - return m.MessageResponse +func (x *ServerReflectionResponse) GetMessageResponse() isServerReflectionResponse_MessageResponse { + if x != nil { + return x.MessageResponse } return nil } // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionResponse) GetFileDescriptorResponse() *FileDescriptorResponse { - if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_FileDescriptorResponse); ok { - return x.FileDescriptorResponse + if x != nil { + if x, ok := x.MessageResponse.(*ServerReflectionResponse_FileDescriptorResponse); ok { + return x.FileDescriptorResponse + } } return nil } // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionResponse) GetAllExtensionNumbersResponse() *ExtensionNumberResponse { - if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_AllExtensionNumbersResponse); ok { - return x.AllExtensionNumbersResponse + if x != nil { + if x, ok := x.MessageResponse.(*ServerReflectionResponse_AllExtensionNumbersResponse); ok { + return x.AllExtensionNumbersResponse + } } return nil } // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionResponse) GetListServicesResponse() *ListServiceResponse { - if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ListServicesResponse); ok { - return x.ListServicesResponse + if x != nil { + if x, ok := x.MessageResponse.(*ServerReflectionResponse_ListServicesResponse); ok { + return x.ListServicesResponse + } } return nil } // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. func (x *ServerReflectionResponse) GetErrorResponse() *ErrorResponse { - if x, ok := x.GetMessageResponse().(*ServerReflectionResponse_ErrorResponse); ok { - return x.ErrorResponse + if x != nil { + if x, ok := x.MessageResponse.(*ServerReflectionResponse_ErrorResponse); ok { + return x.ErrorResponse + } } return nil } @@ -439,25 +449,22 @@ func (*ServerReflectionResponse_ErrorResponse) isServerReflectionResponse_Messag // // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type FileDescriptorResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Serialized FileDescriptorProto messages. We avoid taking a dependency on // descriptor.proto, which uses proto2 only features, by making them opaque // bytes instead. // // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. FileDescriptorProto [][]byte `protobuf:"bytes,1,rep,name=file_descriptor_proto,json=fileDescriptorProto,proto3" json:"file_descriptor_proto,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FileDescriptorResponse) Reset() { *x = FileDescriptorResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileDescriptorResponse) String() string { @@ -468,7 +475,7 @@ func (*FileDescriptorResponse) ProtoMessage() {} func (x *FileDescriptorResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -496,10 +503,7 @@ func (x *FileDescriptorResponse) GetFileDescriptorProto() [][]byte { // // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type ExtensionNumberResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Full name of the base type, including the package name. The format // is . // @@ -507,15 +511,15 @@ type ExtensionNumberResponse struct { BaseTypeName string `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName,proto3" json:"base_type_name,omitempty"` // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. ExtensionNumber []int32 `protobuf:"varint,2,rep,packed,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ExtensionNumberResponse) Reset() { *x = ExtensionNumberResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExtensionNumberResponse) String() string { @@ -526,7 +530,7 @@ func (*ExtensionNumberResponse) ProtoMessage() {} func (x *ExtensionNumberResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -561,24 +565,21 @@ func (x *ExtensionNumberResponse) GetExtensionNumber() []int32 { // // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type ListServiceResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The information of each service may be expanded in the future, so we use // ServiceResponse message to encapsulate it. // // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - Service []*ServiceResponse `protobuf:"bytes,1,rep,name=service,proto3" json:"service,omitempty"` + Service []*ServiceResponse `protobuf:"bytes,1,rep,name=service,proto3" json:"service,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ListServiceResponse) Reset() { *x = ListServiceResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ListServiceResponse) String() string { @@ -589,7 +590,7 @@ func (*ListServiceResponse) ProtoMessage() {} func (x *ListServiceResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -617,24 +618,21 @@ func (x *ListServiceResponse) GetService() []*ServiceResponse { // // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type ServiceResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Full name of a registered service, including its package name. The format // is . // // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ServiceResponse) Reset() { *x = ServiceResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServiceResponse) String() string { @@ -645,7 +643,7 @@ func (*ServiceResponse) ProtoMessage() {} func (x *ServiceResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -672,25 +670,22 @@ func (x *ServiceResponse) GetName() string { // // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. type ErrorResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // This field uses the error codes defined in grpc::StatusCode. // // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated. - ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ErrorResponse) Reset() { *x = ErrorResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ErrorResponse) String() string { @@ -701,7 +696,7 @@ func (*ErrorResponse) ProtoMessage() {} func (x *ErrorResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -734,7 +729,7 @@ func (x *ErrorResponse) GetErrorMessage() string { var File_grpc_reflection_v1alpha_reflection_proto protoreflect.FileDescriptor -var file_grpc_reflection_v1alpha_reflection_proto_rawDesc = []byte{ +var file_grpc_reflection_v1alpha_reflection_proto_rawDesc = string([]byte{ 0x0a, 0x28, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x67, 0x72, 0x70, 0x63, @@ -849,16 +844,16 @@ var file_grpc_reflection_v1alpha_reflection_proto_rawDesc = []byte{ 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0xb8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +}) var ( file_grpc_reflection_v1alpha_reflection_proto_rawDescOnce sync.Once - file_grpc_reflection_v1alpha_reflection_proto_rawDescData = file_grpc_reflection_v1alpha_reflection_proto_rawDesc + file_grpc_reflection_v1alpha_reflection_proto_rawDescData []byte ) func file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP() []byte { file_grpc_reflection_v1alpha_reflection_proto_rawDescOnce.Do(func() { - file_grpc_reflection_v1alpha_reflection_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_reflection_v1alpha_reflection_proto_rawDescData) + file_grpc_reflection_v1alpha_reflection_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_grpc_reflection_v1alpha_reflection_proto_rawDesc), len(file_grpc_reflection_v1alpha_reflection_proto_rawDesc))) }) return file_grpc_reflection_v1alpha_reflection_proto_rawDescData } @@ -896,104 +891,6 @@ func file_grpc_reflection_v1alpha_reflection_proto_init() { if File_grpc_reflection_v1alpha_reflection_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*ServerReflectionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*ExtensionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*ServerReflectionResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*FileDescriptorResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*ExtensionNumberResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*ListServiceResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*ServiceResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*ErrorResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].OneofWrappers = []any{ (*ServerReflectionRequest_FileByFilename)(nil), (*ServerReflectionRequest_FileContainingSymbol)(nil), @@ -1011,7 +908,7 @@ func file_grpc_reflection_v1alpha_reflection_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_grpc_reflection_v1alpha_reflection_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_grpc_reflection_v1alpha_reflection_proto_rawDesc), len(file_grpc_reflection_v1alpha_reflection_proto_rawDesc)), NumEnums: 0, NumMessages: 8, NumExtensions: 0, @@ -1022,7 +919,6 @@ func file_grpc_reflection_v1alpha_reflection_proto_init() { MessageInfos: file_grpc_reflection_v1alpha_reflection_proto_msgTypes, }.Build() File_grpc_reflection_v1alpha_reflection_proto = out.File - file_grpc_reflection_v1alpha_reflection_proto_rawDesc = nil file_grpc_reflection_v1alpha_reflection_proto_goTypes = nil file_grpc_reflection_v1alpha_reflection_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index 202854511..b84ef26d4 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -22,6 +22,7 @@ package resolver import ( "context" + "errors" "fmt" "net" "net/url" @@ -29,6 +30,7 @@ import ( "google.golang.org/grpc/attributes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/internal" "google.golang.org/grpc/serviceconfig" ) @@ -174,6 +176,8 @@ type BuildOptions struct { // Authority is the effective authority of the clientconn for which the // resolver is built. Authority string + // MetricsRecorder is the metrics recorder to do recording. + MetricsRecorder stats.MetricsRecorder } // An Endpoint is one network endpoint, or server, which may have multiple @@ -237,8 +241,8 @@ type ClientConn interface { // UpdateState can be omitted. UpdateState(State) error // ReportError notifies the ClientConn that the Resolver encountered an - // error. The ClientConn will notify the load balancer and begin calling - // ResolveNow on the Resolver with exponential backoff. + // error. The ClientConn then forwards this error to the load balancing + // policy. ReportError(error) // NewAddress is called by resolver to notify ClientConn a new list // of resolved addresses. @@ -330,3 +334,20 @@ type AuthorityOverrider interface { // typically in line, and must keep it unchanged. OverrideAuthority(Target) string } + +// ValidateEndpoints validates endpoints from a petiole policy's perspective. +// Petiole policies should call this before calling into their children. See +// [gRPC A61](https://github.com/grpc/proposal/blob/master/A61-IPv4-IPv6-dualstack-backends.md) +// for details. +func ValidateEndpoints(endpoints []Endpoint) error { + if len(endpoints) == 0 { + return errors.New("endpoints list is empty") + } + + for _, endpoint := range endpoints { + for range endpoint.Addresses { + return nil + } + } + return errors.New("endpoints list contains no addresses") +} diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go index 23bb3fb25..945e24ff8 100644 --- a/vendor/google.golang.org/grpc/resolver_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_wrapper.go @@ -26,6 +26,7 @@ import ( "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/internal/resolver/delegatingresolver" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) @@ -76,9 +77,19 @@ func (ccr *ccResolverWrapper) start() error { CredsBundle: ccr.cc.dopts.copts.CredsBundle, Dialer: ccr.cc.dopts.copts.Dialer, Authority: ccr.cc.authority, + MetricsRecorder: ccr.cc.metricsRecorderList, } var err error - ccr.resolver, err = ccr.cc.resolverBuilder.Build(ccr.cc.parsedTarget, ccr, opts) + // The delegating resolver is used unless: + // - A custom dialer is provided via WithContextDialer dialoption or + // - Proxy usage is disabled through WithNoProxy dialoption. + // In these cases, the resolver is built based on the scheme of target, + // using the appropriate resolver builder. + if ccr.cc.dopts.copts.Dialer != nil || !ccr.cc.dopts.useProxy { + ccr.resolver, err = ccr.cc.resolverBuilder.Build(ccr.cc.parsedTarget, ccr, opts) + } else { + ccr.resolver, err = delegatingresolver.New(ccr.cc.parsedTarget, ccr, opts, ccr.cc.resolverBuilder, ccr.cc.dopts.enableLocalDNSResolution) + } errCh <- err }) return <-errCh diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 2d96f1405..a8ddb0af5 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -151,7 +151,7 @@ func (d *gzipDecompressor) Type() string { // callInfo contains all related configuration and information about an RPC. type callInfo struct { - compressorType string + compressorName string failFast bool maxReceiveMessageSize *int maxSendMessageSize *int @@ -222,7 +222,7 @@ type HeaderCallOption struct { func (o HeaderCallOption) before(*callInfo) error { return nil } func (o HeaderCallOption) after(_ *callInfo, attempt *csAttempt) { - *o.HeaderAddr, _ = attempt.s.Header() + *o.HeaderAddr, _ = attempt.transportStream.Header() } // Trailer returns a CallOptions that retrieves the trailer metadata @@ -244,7 +244,7 @@ type TrailerCallOption struct { func (o TrailerCallOption) before(*callInfo) error { return nil } func (o TrailerCallOption) after(_ *callInfo, attempt *csAttempt) { - *o.TrailerAddr = attempt.s.Trailer() + *o.TrailerAddr = attempt.transportStream.Trailer() } // Peer returns a CallOption that retrieves peer information for a unary RPC. @@ -266,7 +266,7 @@ type PeerCallOption struct { func (o PeerCallOption) before(*callInfo) error { return nil } func (o PeerCallOption) after(_ *callInfo, attempt *csAttempt) { - if x, ok := peer.FromContext(attempt.s.Context()); ok { + if x, ok := peer.FromContext(attempt.transportStream.Context()); ok { *o.PeerAddr = *x } } @@ -435,7 +435,7 @@ type CompressorCallOption struct { } func (o CompressorCallOption) before(c *callInfo) error { - c.compressorType = o.CompressorType + c.compressorName = o.CompressorType return nil } func (o CompressorCallOption) after(*callInfo, *csAttempt) {} @@ -622,7 +622,7 @@ func (pf payloadFormat) isCompressed() bool { } type streamReader interface { - ReadHeader(header []byte) error + ReadMessageHeader(header []byte) error Read(n int) (mem.BufferSlice, error) } @@ -656,7 +656,7 @@ type parser struct { // that the underlying streamReader must not return an incompatible // error. func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSlice, error) { - err := p.r.ReadHeader(p.header[:]) + err := p.r.ReadMessageHeader(p.header[:]) if err != nil { return 0, nil, err } @@ -664,9 +664,6 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSl pf := payloadFormat(p.header[0]) length := binary.BigEndian.Uint32(p.header[1:]) - if length == 0 { - return pf, nil, nil - } if int64(length) > int64(maxInt) { return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt) } @@ -695,9 +692,9 @@ func encode(c baseCodec, msg any) (mem.BufferSlice, error) { if err != nil { return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) } - if uint(b.Len()) > math.MaxUint32 { + if bufSize := uint(b.Len()); bufSize > math.MaxUint32 { b.Free() - return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b)) + return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", bufSize) } return b, nil } @@ -791,9 +788,8 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool if !haveCompressor { if isServer { return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) - } else { - return status.Newf(codes.Internal, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) } + return status.Newf(codes.Internal, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) } default: return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf) @@ -818,7 +814,7 @@ func (p *payloadInfo) free() { // the buffer is no longer needed. // TODO: Refactor this function to reduce the number of arguments. // See: https://google.github.io/styleguide/go/best-practices.html#function-argument-lists -func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool, +func recvAndDecompress(p *parser, s recvCompressor, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool, ) (out mem.BufferSlice, err error) { pf, compressed, err := p.recvMsg(maxReceiveMessageSize) if err != nil { @@ -832,30 +828,13 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei return nil, st.Err() } - var size int if pf.isCompressed() { defer compressed.Free() - // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, // use this decompressor as the default. - if dc != nil { - var uncompressedBuf []byte - uncompressedBuf, err = dc.Do(compressed.Reader()) - if err == nil { - out = mem.BufferSlice{mem.NewBuffer(&uncompressedBuf, nil)} - } - size = len(uncompressedBuf) - } else { - out, size, err = decompress(compressor, compressed, maxReceiveMessageSize, p.bufferPool) - } + out, err = decompress(compressor, compressed, dc, maxReceiveMessageSize, p.bufferPool) if err != nil { - return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) - } - if size > maxReceiveMessageSize { - out.Free() - // TODO: Revisit the error code. Currently keep it consistent with java - // implementation. - return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) + return nil, err } } else { out = compressed @@ -870,49 +849,56 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei return out, nil } -// Using compressor, decompress d, returning data and size. -// Optionally, if data will be over maxReceiveMessageSize, just return the size. -func decompress(compressor encoding.Compressor, d mem.BufferSlice, maxReceiveMessageSize int, pool mem.BufferPool) (mem.BufferSlice, int, error) { - dcReader, err := compressor.Decompress(d.Reader()) - if err != nil { - return nil, 0, err +// decompress processes the given data by decompressing it using either a custom decompressor or a standard compressor. +// If a custom decompressor is provided, it takes precedence. The function validates that the decompressed data +// does not exceed the specified maximum size and returns an error if this limit is exceeded. +// On success, it returns the decompressed data. Otherwise, it returns an error if decompression fails or the data exceeds the size limit. +func decompress(compressor encoding.Compressor, d mem.BufferSlice, dc Decompressor, maxReceiveMessageSize int, pool mem.BufferPool) (mem.BufferSlice, error) { + if dc != nil { + uncompressed, err := dc.Do(d.Reader()) + if err != nil { + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) + } + if len(uncompressed) > maxReceiveMessageSize { + return nil, status.Errorf(codes.ResourceExhausted, "grpc: message after decompression larger than max (%d vs. %d)", len(uncompressed), maxReceiveMessageSize) + } + return mem.BufferSlice{mem.SliceBuffer(uncompressed)}, nil } + if compressor != nil { + dcReader, err := compressor.Decompress(d.Reader()) + if err != nil { + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the message: %v", err) + } - // TODO: Can/should this still be preserved with the new BufferSlice API? Are - // there any actual benefits to allocating a single large buffer instead of - // multiple smaller ones? - //if sizer, ok := compressor.(interface { - // DecompressedSize(compressedBytes []byte) int - //}); ok { - // if size := sizer.DecompressedSize(d); size >= 0 { - // if size > maxReceiveMessageSize { - // return nil, size, nil - // } - // // size is used as an estimate to size the buffer, but we - // // will read more data if available. - // // +MinRead so ReadFrom will not reallocate if size is correct. - // // - // // TODO: If we ensure that the buffer size is the same as the DecompressedSize, - // // we can also utilize the recv buffer pool here. - // buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) - // bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) - // return buf.Bytes(), int(bytesRead), err - // } - //} + out, err := mem.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)), pool) + if err != nil { + out.Free() + return nil, status.Errorf(codes.Internal, "grpc: failed to read decompressed data: %v", err) + } - var out mem.BufferSlice - _, err = io.Copy(mem.NewWriter(&out, pool), io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) - if err != nil { - out.Free() - return nil, 0, err + if out.Len() == maxReceiveMessageSize && !atEOF(dcReader) { + out.Free() + return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max %d", maxReceiveMessageSize) + } + return out, nil } - return out, out.Len(), nil + return nil, status.Errorf(codes.Internal, "grpc: no decompressor available for compressed payload") +} + +// atEOF reads data from r and returns true if zero bytes could be read and r.Read returns EOF. +func atEOF(dcReader io.Reader) bool { + n, err := dcReader.Read(make([]byte, 1)) + return n == 0 && err == io.EOF +} + +type recvCompressor interface { + RecvCompress() string } // For the two compressor parameters, both should not be set, but if they are, // dc takes precedence over compressor. // TODO(dfawley): wrap the old compressor/decompressor using the new API? -func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error { +func recv(p *parser, c baseCodec, s recvCompressor, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error { data, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor, isServer) if err != nil { return err diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index d1e1415a4..976e70ae0 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -37,12 +37,14 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/encoding" "google.golang.org/grpc/encoding/proto" + estats "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpcutil" + istats "google.golang.org/grpc/internal/stats" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/mem" @@ -82,17 +84,21 @@ func init() { internal.BinaryLogger = binaryLogger internal.JoinServerOptions = newJoinServerOption internal.BufferPool = bufferPool + internal.MetricsRecorderForServer = func(srv *Server) estats.MetricsRecorder { + return istats.NewMetricsRecorderList(srv.opts.statsHandlers) + } } var statusOK = status.New(codes.OK, "") var logger = grpclog.Component("core") -type methodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error) +// MethodHandler is a function type that processes a unary RPC method call. +type MethodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error) // MethodDesc represents an RPC service's method specification. type MethodDesc struct { MethodName string - Handler methodHandler + Handler MethodHandler } // ServiceDesc represents an RPC service's specification. @@ -621,8 +627,8 @@ func bufferPool(bufferPool mem.BufferPool) ServerOption { // workload (assuming a QPS of a few thousand requests/sec). const serverWorkerResetThreshold = 1 << 16 -// serverWorker blocks on a *transport.Stream channel forever and waits for -// data to be fed by serveStreams. This allows multiple requests to be +// serverWorker blocks on a *transport.ServerStream channel forever and waits +// for data to be fed by serveStreams. This allows multiple requests to be // processed by the same goroutine, removing the need for expensive stack // re-allocations (see the runtime.morestack problem [1]). // @@ -642,7 +648,7 @@ func (s *Server) serverWorker() { // connections to reduce the time spent overall on runtime.morestack. func (s *Server) initServerWorkers() { s.serverWorkerChannel = make(chan func()) - s.serverWorkerChannelClose = grpcsync.OnceFunc(func() { + s.serverWorkerChannelClose = sync.OnceFunc(func() { close(s.serverWorkerChannel) }) for i := uint32(0); i < s.opts.numServerWorkers; i++ { @@ -1020,7 +1026,7 @@ func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, }() streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) - st.HandleStreams(ctx, func(stream *transport.Stream) { + st.HandleStreams(ctx, func(stream *transport.ServerStream) { s.handlersWG.Add(1) streamQuota.acquire() f := func() { @@ -1136,7 +1142,7 @@ func (s *Server) incrCallsFailed() { s.channelz.ServerMetrics.CallsFailed.Add(1) } -func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { +func (s *Server) sendResponse(ctx context.Context, stream *transport.ServerStream, msg any, cp Compressor, opts *transport.WriteOptions, comp encoding.Compressor) error { data, err := encode(s.getCodec(stream.ContentSubtype()), msg) if err != nil { channelz.Error(logger, s.channelz, "grpc: server failed to encode response: ", err) @@ -1165,7 +1171,7 @@ func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, if payloadLen > s.opts.maxSendMessageSize { return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize) } - err = t.Write(stream, hdr, payload, opts) + err = stream.Write(hdr, payload, opts) if err == nil { if len(s.opts.statsHandlers) != 0 { for _, sh := range s.opts.statsHandlers { @@ -1212,7 +1218,7 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info } } -func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { +func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { shs := s.opts.statsHandlers if len(shs) != 0 || trInfo != nil || channelz.IsOn() { if channelz.IsOn() { @@ -1320,7 +1326,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor decomp = encoding.GetCompressor(rc) if decomp == nil { st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) - t.WriteStatus(stream, st) + stream.WriteStatus(st) return st.Err() } } @@ -1354,16 +1360,21 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor d, err := recvAndDecompress(&parser{r: stream, bufferPool: s.opts.bufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp, true) if err != nil { - if e := t.WriteStatus(stream, status.Convert(err)); e != nil { + if e := stream.WriteStatus(status.Convert(err)); e != nil { channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } return err } - defer d.Free() - if channelz.IsOn() { - t.IncrMsgRecv() + freed := false + dataFree := func() { + if !freed { + d.Free() + freed = true + } } + defer dataFree() df := func(v any) error { + defer dataFree() if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } @@ -1404,7 +1415,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor trInfo.tr.LazyLog(stringer(appStatus.Message()), true) trInfo.tr.SetError() } - if e := t.WriteStatus(stream, appStatus); e != nil { + if e := stream.WriteStatus(appStatus); e != nil { channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } if len(binlogs) != 0 { @@ -1431,20 +1442,20 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor if trInfo != nil { trInfo.tr.LazyLog(stringer("OK"), false) } - opts := &transport.Options{Last: true} + opts := &transport.WriteOptions{Last: true} // Server handler could have set new compressor by calling SetSendCompressor. // In case it is set, we need to use it for compressing outbound message. if stream.SendCompress() != sendCompressorName { comp = encoding.GetCompressor(stream.SendCompress()) } - if err := s.sendResponse(ctx, t, stream, reply, cp, opts, comp); err != nil { + if err := s.sendResponse(ctx, stream, reply, cp, opts, comp); err != nil { if err == io.EOF { // The entire stream is done (for unary RPC only). return err } if sts, ok := status.FromError(err); ok { - if e := t.WriteStatus(stream, sts); e != nil { + if e := stream.WriteStatus(sts); e != nil { channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } } else { @@ -1484,9 +1495,6 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor binlog.Log(ctx, sm) } } - if channelz.IsOn() { - t.IncrMsgSent() - } if trInfo != nil { trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) } @@ -1502,7 +1510,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor binlog.Log(ctx, st) } } - return t.WriteStatus(stream, statusOK) + return stream.WriteStatus(statusOK) } // chainStreamServerInterceptors chains all stream server interceptors into one. @@ -1541,7 +1549,7 @@ func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, inf } } -func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { +func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { if channelz.IsOn() { s.incrCallsStarted() } @@ -1561,7 +1569,6 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran ctx = NewContextWithServerTransportStream(ctx, stream) ss := &serverStream{ ctx: ctx, - t: t, s: stream, p: &parser{r: stream, bufferPool: s.opts.bufferPool}, codec: s.getCodec(stream.ContentSubtype()), @@ -1643,12 +1650,12 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran // If dc is set and matches the stream's compression, use it. Otherwise, try // to find a matching registered compressor for decomp. if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc { - ss.dc = s.opts.dc + ss.decompressorV0 = s.opts.dc } else if rc != "" && rc != encoding.Identity { - ss.decomp = encoding.GetCompressor(rc) - if ss.decomp == nil { + ss.decompressorV1 = encoding.GetCompressor(rc) + if ss.decompressorV1 == nil { st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) - t.WriteStatus(ss.s, st) + ss.s.WriteStatus(st) return st.Err() } } @@ -1658,12 +1665,12 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran // // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. if s.opts.cp != nil { - ss.cp = s.opts.cp + ss.compressorV0 = s.opts.cp ss.sendCompressorName = s.opts.cp.Type() } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { // Legacy compressor not specified; attempt to respond with same encoding. - ss.comp = encoding.GetCompressor(rc) - if ss.comp != nil { + ss.compressorV1 = encoding.GetCompressor(rc) + if ss.compressorV1 != nil { ss.sendCompressorName = rc } } @@ -1674,7 +1681,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran } } - ss.ctx = newContextWithRPCInfo(ss.ctx, false, ss.codec, ss.cp, ss.comp) + ss.ctx = newContextWithRPCInfo(ss.ctx, false, ss.codec, ss.compressorV0, ss.compressorV1) if trInfo != nil { trInfo.tr.LazyLog(&trInfo.firstLine, false) @@ -1717,7 +1724,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran binlog.Log(ctx, st) } } - t.WriteStatus(ss.s, appStatus) + ss.s.WriteStatus(appStatus) // TODO: Should we log an error from WriteStatus here and below? return appErr } @@ -1735,10 +1742,10 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran binlog.Log(ctx, st) } } - return t.WriteStatus(ss.s, statusOK) + return ss.s.WriteStatus(statusOK) } -func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) { +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.ServerStream) { ctx := stream.Context() ctx = contextWithServer(ctx, s) var ti *traceInfo @@ -1768,7 +1775,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str ti.tr.SetError() } errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) - if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { + if err := stream.WriteStatus(status.New(codes.Unimplemented, errDesc)); err != nil { if ti != nil { ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ti.tr.SetError() @@ -1783,17 +1790,20 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str service := sm[:pos] method := sm[pos+1:] - md, _ := metadata.FromIncomingContext(ctx) - for _, sh := range s.opts.statsHandlers { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) - sh.HandleRPC(ctx, &stats.InHeader{ - FullMethod: stream.Method(), - RemoteAddr: t.Peer().Addr, - LocalAddr: t.Peer().LocalAddr, - Compression: stream.RecvCompress(), - WireLength: stream.HeaderWireLength(), - Header: md, - }) + // FromIncomingContext is expensive: skip if there are no statsHandlers + if len(s.opts.statsHandlers) > 0 { + md, _ := metadata.FromIncomingContext(ctx) + for _, sh := range s.opts.statsHandlers { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) + sh.HandleRPC(ctx, &stats.InHeader{ + FullMethod: stream.Method(), + RemoteAddr: t.Peer().Addr, + LocalAddr: t.Peer().LocalAddr, + Compression: stream.RecvCompress(), + WireLength: stream.HeaderWireLength(), + Header: md, + }) + } } // To have calls in stream callouts work. Will delete once all stats handler // calls come from the gRPC layer. @@ -1802,17 +1812,17 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str srv, knownService := s.services[service] if knownService { if md, ok := srv.methods[method]; ok { - s.processUnaryRPC(ctx, t, stream, srv, md, ti) + s.processUnaryRPC(ctx, stream, srv, md, ti) return } if sd, ok := srv.streams[method]; ok { - s.processStreamingRPC(ctx, t, stream, srv, sd, ti) + s.processStreamingRPC(ctx, stream, srv, sd, ti) return } } // Unknown service, or known server unknown method. if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { - s.processStreamingRPC(ctx, t, stream, nil, unknownDesc, ti) + s.processStreamingRPC(ctx, stream, nil, unknownDesc, ti) return } var errDesc string @@ -1825,7 +1835,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str ti.tr.LazyPrintf("%s", errDesc) ti.tr.SetError() } - if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { + if err := stream.WriteStatus(status.New(codes.Unimplemented, errDesc)); err != nil { if ti != nil { ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ti.tr.SetError() @@ -1925,7 +1935,7 @@ func (s *Server) stop(graceful bool) { s.conns = nil if s.opts.numServerWorkers > 0 { - // Closing the channel (only once, via grpcsync.OnceFunc) after all the + // Closing the channel (only once, via sync.OnceFunc) after all the // connections have been closed above ensures that there are no // goroutines executing the callback passed to st.HandleStreams (where // the channel is written to). @@ -2100,7 +2110,7 @@ func SendHeader(ctx context.Context, md metadata.MD) error { // Notice: This function is EXPERIMENTAL and may be changed or removed in a // later release. func SetSendCompressor(ctx context.Context, name string) error { - stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.ServerStream) if !ok || stream == nil { return fmt.Errorf("failed to fetch the stream from the given context") } @@ -2122,7 +2132,7 @@ func SetSendCompressor(ctx context.Context, name string) error { // Notice: This function is EXPERIMENTAL and may be changed or removed in a // later release. func ClientSupportedCompressors(ctx context.Context) ([]string, error) { - stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.ServerStream) if !ok || stream == nil { return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx) } diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index 2671c5ef6..8d451e07c 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -168,6 +168,7 @@ func init() { return parseServiceConfig(js, defaultMaxCallAttempts) } } + func parseServiceConfig(js string, maxAttempts int) *serviceconfig.ParseResult { if len(js) == 0 { return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")} @@ -267,18 +268,21 @@ func parseServiceConfig(js string, maxAttempts int) *serviceconfig.ParseResult { return &serviceconfig.ParseResult{Config: &sc} } +func isValidRetryPolicy(jrp *jsonRetryPolicy) bool { + return jrp.MaxAttempts > 1 && + jrp.InitialBackoff > 0 && + jrp.MaxBackoff > 0 && + jrp.BackoffMultiplier > 0 && + len(jrp.RetryableStatusCodes) > 0 +} + func convertRetryPolicy(jrp *jsonRetryPolicy, maxAttempts int) (p *internalserviceconfig.RetryPolicy, err error) { if jrp == nil { return nil, nil } - if jrp.MaxAttempts <= 1 || - jrp.InitialBackoff <= 0 || - jrp.MaxBackoff <= 0 || - jrp.BackoffMultiplier <= 0 || - len(jrp.RetryableStatusCodes) == 0 { - logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) - return nil, nil + if !isValidRetryPolicy(jrp) { + return nil, fmt.Errorf("invalid retry policy (%+v): ", jrp) } if jrp.MaxAttempts < maxAttempts { @@ -297,7 +301,7 @@ func convertRetryPolicy(jrp *jsonRetryPolicy, maxAttempts int) (p *internalservi return rp, nil } -func min(a, b *int) *int { +func minPointers(a, b *int) *int { if *a < *b { return a } @@ -309,7 +313,7 @@ func getMaxSize(mcMax, doptMax *int, defaultVal int) *int { return &defaultVal } if mcMax != nil && doptMax != nil { - return min(mcMax, doptMax) + return minPointers(mcMax, doptMax) } if mcMax != nil { return mcMax diff --git a/vendor/google.golang.org/grpc/stats/metrics.go b/vendor/google.golang.org/grpc/stats/metrics.go new file mode 100644 index 000000000..641c8e979 --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/metrics.go @@ -0,0 +1,81 @@ +/* + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package stats + +import "maps" + +// MetricSet is a set of metrics to record. Once created, MetricSet is immutable, +// however Add and Remove can make copies with specific metrics added or +// removed, respectively. +// +// Do not construct directly; use NewMetricSet instead. +type MetricSet struct { + // metrics are the set of metrics to initialize. + metrics map[string]bool +} + +// NewMetricSet returns a MetricSet containing metricNames. +func NewMetricSet(metricNames ...string) *MetricSet { + newMetrics := make(map[string]bool) + for _, metric := range metricNames { + newMetrics[metric] = true + } + return &MetricSet{metrics: newMetrics} +} + +// Metrics returns the metrics set. The returned map is read-only and must not +// be modified. +func (m *MetricSet) Metrics() map[string]bool { + return m.metrics +} + +// Add adds the metricNames to the metrics set and returns a new copy with the +// additional metrics. +func (m *MetricSet) Add(metricNames ...string) *MetricSet { + newMetrics := make(map[string]bool) + for metric := range m.metrics { + newMetrics[metric] = true + } + + for _, metric := range metricNames { + newMetrics[metric] = true + } + return &MetricSet{metrics: newMetrics} +} + +// Join joins the metrics passed in with the metrics set, and returns a new copy +// with the merged metrics. +func (m *MetricSet) Join(metrics *MetricSet) *MetricSet { + newMetrics := make(map[string]bool) + maps.Copy(newMetrics, m.metrics) + maps.Copy(newMetrics, metrics.metrics) + return &MetricSet{metrics: newMetrics} +} + +// Remove removes the metricNames from the metrics set and returns a new copy +// with the metrics removed. +func (m *MetricSet) Remove(metricNames ...string) *MetricSet { + newMetrics := make(map[string]bool) + for metric := range m.metrics { + newMetrics[metric] = true + } + + for _, metric := range metricNames { + delete(newMetrics, metric) + } + return &MetricSet{metrics: newMetrics} +} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index 71195c494..6f20d2d54 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -260,84 +260,42 @@ func (s *ConnEnd) IsClient() bool { return s.Client } func (s *ConnEnd) isConnStats() {} -type incomingTagsKey struct{} -type outgoingTagsKey struct{} - // SetTags attaches stats tagging data to the context, which will be sent in // the outgoing RPC with the header grpc-tags-bin. Subsequent calls to // SetTags will overwrite the values from earlier calls. // -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. +// Deprecated: set the `grpc-tags-bin` header in the metadata instead. func SetTags(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, outgoingTagsKey{}, b) + return metadata.AppendToOutgoingContext(ctx, "grpc-tags-bin", string(b)) } // Tags returns the tags from the context for the inbound RPC. // -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. +// Deprecated: obtain the `grpc-tags-bin` header from metadata instead. func Tags(ctx context.Context) []byte { - b, _ := ctx.Value(incomingTagsKey{}).([]byte) - return b -} - -// SetIncomingTags attaches stats tagging data to the context, to be read by -// the application (not sent in outgoing RPCs). -// -// This is intended for gRPC-internal use ONLY. -func SetIncomingTags(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, incomingTagsKey{}, b) -} - -// OutgoingTags returns the tags from the context for the outbound RPC. -// -// This is intended for gRPC-internal use ONLY. -func OutgoingTags(ctx context.Context) []byte { - b, _ := ctx.Value(outgoingTagsKey{}).([]byte) - return b + traceValues := metadata.ValueFromIncomingContext(ctx, "grpc-tags-bin") + if len(traceValues) == 0 { + return nil + } + return []byte(traceValues[len(traceValues)-1]) } -type incomingTraceKey struct{} -type outgoingTraceKey struct{} - // SetTrace attaches stats tagging data to the context, which will be sent in // the outgoing RPC with the header grpc-trace-bin. Subsequent calls to // SetTrace will overwrite the values from earlier calls. // -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. +// Deprecated: set the `grpc-trace-bin` header in the metadata instead. func SetTrace(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, outgoingTraceKey{}, b) + return metadata.AppendToOutgoingContext(ctx, "grpc-trace-bin", string(b)) } // Trace returns the trace from the context for the inbound RPC. // -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. +// Deprecated: obtain the `grpc-trace-bin` header from metadata instead. func Trace(ctx context.Context) []byte { - b, _ := ctx.Value(incomingTraceKey{}).([]byte) - return b -} - -// SetIncomingTrace attaches stats tagging data to the context, to be read by -// the application (not sent in outgoing RPCs). It is intended for -// gRPC-internal use. -func SetIncomingTrace(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, incomingTraceKey{}, b) -} - -// OutgoingTrace returns the trace from the context for the outbound RPC. It is -// intended for gRPC-internal use. -func OutgoingTrace(ctx context.Context) []byte { - b, _ := ctx.Value(outgoingTraceKey{}).([]byte) - return b + traceValues := metadata.ValueFromIncomingContext(ctx, "grpc-trace-bin") + if len(traceValues) == 0 { + return nil + } + return []byte(traceValues[len(traceValues)-1]) } diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index bb2b2a216..12163150b 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -23,7 +23,7 @@ import ( "errors" "io" "math" - "math/rand" + rand "math/rand/v2" "strconv" "sync" "time" @@ -113,7 +113,9 @@ type ClientStream interface { // SendMsg is generally called by generated code. On error, SendMsg aborts // the stream. If the error was generated by the client, the status is // returned directly; otherwise, io.EOF is returned and the status of - // the stream may be discovered using RecvMsg. + // the stream may be discovered using RecvMsg. For unary or server-streaming + // RPCs (StreamDesc.ClientStreams is false), a nil error is returned + // unconditionally. // // SendMsg blocks until: // - There is sufficient flow control to schedule m with the transport, or @@ -216,7 +218,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth var mc serviceconfig.MethodConfig var onCommit func() - var newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) { + newStream := func(ctx context.Context, done func()) (iresolver.ClientStream, error) { return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, opts...) } @@ -256,9 +258,9 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), opts ...CallOption) (_ iresolver.ClientStream, err error) { - c := defaultCallInfo() + callInfo := defaultCallInfo() if mc.WaitForReady != nil { - c.failFast = !*mc.WaitForReady + callInfo.failFast = !*mc.WaitForReady } // Possible context leak: @@ -279,20 +281,20 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client }() for _, o := range opts { - if err := o.before(c); err != nil { + if err := o.before(callInfo); err != nil { return nil, toRPCErr(err) } } - c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize) - c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) - if err := setCallInfoCodec(c); err != nil { + callInfo.maxSendMessageSize = getMaxSize(mc.MaxReqSize, callInfo.maxSendMessageSize, defaultClientMaxSendMessageSize) + callInfo.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, callInfo.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) + if err := setCallInfoCodec(callInfo); err != nil { return nil, err } callHdr := &transport.CallHdr{ Host: cc.authority, Method: method, - ContentSubtype: c.contentSubtype, + ContentSubtype: callInfo.contentSubtype, DoneFunc: doneFunc, } @@ -300,22 +302,22 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client // set. In that case, also find the compressor from the encoding package. // Otherwise, use the compressor configured by the WithCompressor DialOption, // if set. - var cp Compressor - var comp encoding.Compressor - if ct := c.compressorType; ct != "" { + var compressorV0 Compressor + var compressorV1 encoding.Compressor + if ct := callInfo.compressorName; ct != "" { callHdr.SendCompress = ct if ct != encoding.Identity { - comp = encoding.GetCompressor(ct) - if comp == nil { + compressorV1 = encoding.GetCompressor(ct) + if compressorV1 == nil { return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) } } - } else if cc.dopts.cp != nil { - callHdr.SendCompress = cc.dopts.cp.Type() - cp = cc.dopts.cp + } else if cc.dopts.compressorV0 != nil { + callHdr.SendCompress = cc.dopts.compressorV0.Type() + compressorV0 = cc.dopts.compressorV0 } - if c.creds != nil { - callHdr.Creds = c.creds + if callInfo.creds != nil { + callHdr.Creds = callInfo.creds } cs := &clientStream{ @@ -323,12 +325,12 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client ctx: ctx, methodConfig: &mc, opts: opts, - callInfo: c, + callInfo: callInfo, cc: cc, desc: desc, - codec: c.codec, - cp: cp, - comp: comp, + codec: callInfo.codec, + compressorV0: compressorV0, + compressorV1: compressorV1, cancel: cancel, firstAttempt: true, onCommit: onCommit, @@ -410,7 +412,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) return nil, ErrClientConnClosing } - ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp) + ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.compressorV0, cs.compressorV1) method := cs.callHdr.Method var beginTime time.Time shs := cs.cc.dopts.copts.StatsHandlers @@ -452,12 +454,12 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) } return &csAttempt{ - ctx: ctx, - beginTime: beginTime, - cs: cs, - dc: cs.cc.dopts.dc, - statsHandlers: shs, - trInfo: trInfo, + ctx: ctx, + beginTime: beginTime, + cs: cs, + decompressorV0: cs.cc.dopts.dc, + statsHandlers: shs, + trInfo: trInfo, }, nil } @@ -465,7 +467,7 @@ func (a *csAttempt) getTransport() error { cs := a.cs var err error - a.t, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) + a.transport, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) if err != nil { if de, ok := err.(dropError); ok { err = de.error @@ -474,7 +476,7 @@ func (a *csAttempt) getTransport() error { return err } if a.trInfo != nil { - a.trInfo.firstLine.SetRemoteAddr(a.t.RemoteAddr()) + a.trInfo.firstLine.SetRemoteAddr(a.transport.RemoteAddr()) } return nil } @@ -501,7 +503,7 @@ func (a *csAttempt) newStream() error { a.ctx = metadata.NewOutgoingContext(a.ctx, md) } - s, err := a.t.NewStream(a.ctx, cs.callHdr) + s, err := a.transport.NewStream(a.ctx, cs.callHdr) if err != nil { nse, ok := err.(*transport.NewStreamError) if !ok { @@ -516,9 +518,9 @@ func (a *csAttempt) newStream() error { // Unwrap and convert error. return toRPCErr(nse.Err) } - a.s = s + a.transportStream = s a.ctx = s.Context() - a.p = &parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool} + a.parser = &parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool} return nil } @@ -530,9 +532,9 @@ type clientStream struct { cc *ClientConn desc *StreamDesc - codec baseCodec - cp Compressor - comp encoding.Compressor + codec baseCodec + compressorV0 Compressor + compressorV1 encoding.Compressor cancel context.CancelFunc // cancels all attempts @@ -581,17 +583,17 @@ type replayOp struct { // csAttempt implements a single transport stream attempt within a // clientStream. type csAttempt struct { - ctx context.Context - cs *clientStream - t transport.ClientTransport - s *transport.Stream - p *parser - pickResult balancer.PickResult - - finished bool - dc Decompressor - decomp encoding.Compressor - decompSet bool + ctx context.Context + cs *clientStream + transport transport.ClientTransport + transportStream *transport.ClientStream + parser *parser + pickResult balancer.PickResult + + finished bool + decompressorV0 Decompressor + decompressorV1 encoding.Compressor + decompressorSet bool mu sync.Mutex // guards trInfo.tr // trInfo may be nil (if EnableTracing is false). @@ -637,14 +639,14 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) { // RPC is finished or committed or was dropped by the picker; cannot retry. return false, err } - if a.s == nil && a.allowTransparentRetry { + if a.transportStream == nil && a.allowTransparentRetry { return true, nil } // Wait for the trailers. unprocessed := false - if a.s != nil { - <-a.s.Done() - unprocessed = a.s.Unprocessed() + if a.transportStream != nil { + <-a.transportStream.Done() + unprocessed = a.transportStream.Unprocessed() } if cs.firstAttempt && unprocessed { // First attempt, stream unprocessed: transparently retry. @@ -656,14 +658,14 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) { pushback := 0 hasPushback := false - if a.s != nil { - if !a.s.TrailersOnly() { + if a.transportStream != nil { + if !a.transportStream.TrailersOnly() { return false, err } // TODO(retry): Move down if the spec changes to not check server pushback // before considering this a failure for throttling. - sps := a.s.Trailer()["grpc-retry-pushback-ms"] + sps := a.transportStream.Trailer()["grpc-retry-pushback-ms"] if len(sps) == 1 { var e error if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { @@ -680,8 +682,8 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) { } var code codes.Code - if a.s != nil { - code = a.s.Status().Code() + if a.transportStream != nil { + code = a.transportStream.Status().Code() } else { code = status.Code(err) } @@ -706,11 +708,10 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) { cs.numRetriesSincePushback = 0 } else { fact := math.Pow(rp.BackoffMultiplier, float64(cs.numRetriesSincePushback)) - cur := float64(rp.InitialBackoff) * fact - if max := float64(rp.MaxBackoff); cur > max { - cur = max - } - dur = time.Duration(rand.Int63n(int64(cur))) + cur := min(float64(rp.InitialBackoff)*fact, float64(rp.MaxBackoff)) + // Apply jitter by multiplying with a random factor between 0.8 and 1.2 + cur *= 0.8 + 0.4*rand.Float64() + dur = time.Duration(int64(cur)) cs.numRetriesSincePushback++ } @@ -755,8 +756,8 @@ func (cs *clientStream) Context() context.Context { cs.commitAttempt() // No need to lock before using attempt, since we know it is committed and // cannot change. - if cs.attempt.s != nil { - return cs.attempt.s.Context() + if cs.attempt.transportStream != nil { + return cs.attempt.transportStream.Context() } return cs.ctx } @@ -793,9 +794,9 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) continue } if err == io.EOF { - <-a.s.Done() + <-a.transportStream.Done() } - if err == nil || (err == io.EOF && a.s.Status().Code() == codes.OK) { + if err == nil || (err == io.EOF && a.transportStream.Status().Code() == codes.OK) { onSuccess() cs.mu.Unlock() return err @@ -811,7 +812,7 @@ func (cs *clientStream) Header() (metadata.MD, error) { var m metadata.MD err := cs.withRetry(func(a *csAttempt) error { var err error - m, err = a.s.Header() + m, err = a.transportStream.Header() return toRPCErr(err) }, cs.commitAttemptLocked) @@ -855,10 +856,10 @@ func (cs *clientStream) Trailer() metadata.MD { // directions -- it will prevent races and should not meaningfully impact // performance. cs.commitAttempt() - if cs.attempt.s == nil { + if cs.attempt.transportStream == nil { return nil } - return cs.attempt.s.Trailer() + return cs.attempt.transportStream.Trailer() } func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error { @@ -903,7 +904,7 @@ func (cs *clientStream) SendMsg(m any) (err error) { } // load hdr, payload, data - hdr, data, payload, pf, err := prepareMsg(m, cs.codec, cs.cp, cs.comp, cs.cc.dopts.copts.BufferPool) + hdr, data, payload, pf, err := prepareMsg(m, cs.codec, cs.compressorV0, cs.compressorV1, cs.cc.dopts.copts.BufferPool) if err != nil { return err } @@ -991,7 +992,7 @@ func (cs *clientStream) CloseSend() error { } cs.sentLast = true op := func(a *csAttempt) error { - a.t.Write(a.s, nil, nil, &transport.Options{Last: true}) + a.transportStream.Write(nil, nil, &transport.WriteOptions{Last: true}) // Always return nil; io.EOF is the only error that might make sense // instead, but there is no need to signal the client to call RecvMsg // as the only use left for the stream after CloseSend is to call @@ -1029,7 +1030,7 @@ func (cs *clientStream) finish(err error) { if cs.attempt != nil { cs.attempt.finish(err) // after functions all rely upon having a stream. - if cs.attempt.s != nil { + if cs.attempt.transportStream != nil { for _, o := range cs.opts { o.after(cs.callInfo, cs.attempt) } @@ -1083,7 +1084,7 @@ func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength } a.mu.Unlock() } - if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil { + if err := a.transportStream.Write(hdr, payld, &transport.WriteOptions{Last: !cs.desc.ClientStreams}); err != nil { if !cs.desc.ClientStreams { // For non-client-streaming RPCs, we return nil instead of EOF on error // because the generated code requires it. finish is not called; RecvMsg() @@ -1097,9 +1098,6 @@ func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now())) } } - if channelz.IsOn() { - a.t.IncrMsgSent() - } return nil } @@ -1110,25 +1108,25 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { defer payInfo.free() } - if !a.decompSet { + if !a.decompressorSet { // Block until we receive headers containing received message encoding. - if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity { - if a.dc == nil || a.dc.Type() != ct { + if ct := a.transportStream.RecvCompress(); ct != "" && ct != encoding.Identity { + if a.decompressorV0 == nil || a.decompressorV0.Type() != ct { // No configured decompressor, or it does not match the incoming // message encoding; attempt to find a registered compressor that does. - a.dc = nil - a.decomp = encoding.GetCompressor(ct) + a.decompressorV0 = nil + a.decompressorV1 = encoding.GetCompressor(ct) } } else { // No compression is used; disable our decompressor. - a.dc = nil + a.decompressorV0 = nil } // Only initialize this state once per stream. - a.decompSet = true + a.decompressorSet = true } - if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp, false); err != nil { + if err := recv(a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decompressorV1, false); err != nil { if err == io.EOF { - if statusErr := a.s.Status().Err(); statusErr != nil { + if statusErr := a.transportStream.Status().Err(); statusErr != nil { return statusErr } return io.EOF // indicates successful end of stream. @@ -1153,17 +1151,14 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { Length: payInfo.uncompressedBytes.Len(), }) } - if channelz.IsOn() { - a.t.IncrMsgRecv() - } if cs.desc.ServerStreams { // Subsequent messages should be received by subsequent RecvMsg calls. return nil } // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp, false); err == io.EOF { - return a.s.Status().Err() // non-server streaming Recv returns nil on success + if err := recv(a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decompressorV1, false); err == io.EOF { + return a.transportStream.Status().Err() // non-server streaming Recv returns nil on success } else if err != nil { return toRPCErr(err) } @@ -1182,20 +1177,20 @@ func (a *csAttempt) finish(err error) { err = nil } var tr metadata.MD - if a.s != nil { - a.t.CloseStream(a.s, err) - tr = a.s.Trailer() + if a.transportStream != nil { + a.transportStream.Close(err) + tr = a.transportStream.Trailer() } if a.pickResult.Done != nil { br := false - if a.s != nil { - br = a.s.BytesReceived() + if a.transportStream != nil { + br = a.transportStream.BytesReceived() } a.pickResult.Done(balancer.DoneInfo{ Err: err, Trailer: tr, - BytesSent: a.s != nil, + BytesSent: a.transportStream != nil, BytesReceived: br, ServerLoad: balancerload.Parse(tr), }) @@ -1277,7 +1272,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin // if set. var cp Compressor var comp encoding.Compressor - if ct := c.compressorType; ct != "" { + if ct := c.compressorName; ct != "" { callHdr.SendCompress = ct if ct != encoding.Identity { comp = encoding.GetCompressor(ct) @@ -1285,9 +1280,9 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) } } - } else if ac.cc.dopts.cp != nil { - callHdr.SendCompress = ac.cc.dopts.cp.Type() - cp = ac.cc.dopts.cp + } else if ac.cc.dopts.compressorV0 != nil { + callHdr.SendCompress = ac.cc.dopts.compressorV0.Type() + cp = ac.cc.dopts.compressorV0 } if c.creds != nil { callHdr.Creds = c.creds @@ -1295,26 +1290,26 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin // Use a special addrConnStream to avoid retry. as := &addrConnStream{ - callHdr: callHdr, - ac: ac, - ctx: ctx, - cancel: cancel, - opts: opts, - callInfo: c, - desc: desc, - codec: c.codec, - cp: cp, - comp: comp, - t: t, - } - - s, err := as.t.NewStream(as.ctx, as.callHdr) + callHdr: callHdr, + ac: ac, + ctx: ctx, + cancel: cancel, + opts: opts, + callInfo: c, + desc: desc, + codec: c.codec, + sendCompressorV0: cp, + sendCompressorV1: comp, + transport: t, + } + + s, err := as.transport.NewStream(as.ctx, as.callHdr) if err != nil { err = toRPCErr(err) return nil, err } - as.s = s - as.p = &parser{r: s, bufferPool: ac.dopts.copts.BufferPool} + as.transportStream = s + as.parser = &parser{r: s, bufferPool: ac.dopts.copts.BufferPool} ac.incrCallsStarted() if desc != unaryStreamDesc { // Listen on stream context to cleanup when the stream context is @@ -1340,29 +1335,31 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin } type addrConnStream struct { - s *transport.Stream - ac *addrConn - callHdr *transport.CallHdr - cancel context.CancelFunc - opts []CallOption - callInfo *callInfo - t transport.ClientTransport - ctx context.Context - sentLast bool - desc *StreamDesc - codec baseCodec - cp Compressor - comp encoding.Compressor - decompSet bool - dc Decompressor - decomp encoding.Compressor - p *parser - mu sync.Mutex - finished bool + transportStream *transport.ClientStream + ac *addrConn + callHdr *transport.CallHdr + cancel context.CancelFunc + opts []CallOption + callInfo *callInfo + transport transport.ClientTransport + ctx context.Context + sentLast bool + desc *StreamDesc + codec baseCodec + sendCompressorV0 Compressor + sendCompressorV1 encoding.Compressor + decompressorSet bool + decompressorV0 Decompressor + decompressorV1 encoding.Compressor + parser *parser + + // mu guards finished and is held for the entire finish method. + mu sync.Mutex + finished bool } func (as *addrConnStream) Header() (metadata.MD, error) { - m, err := as.s.Header() + m, err := as.transportStream.Header() if err != nil { as.finish(toRPCErr(err)) } @@ -1370,7 +1367,7 @@ func (as *addrConnStream) Header() (metadata.MD, error) { } func (as *addrConnStream) Trailer() metadata.MD { - return as.s.Trailer() + return as.transportStream.Trailer() } func (as *addrConnStream) CloseSend() error { @@ -1380,7 +1377,7 @@ func (as *addrConnStream) CloseSend() error { } as.sentLast = true - as.t.Write(as.s, nil, nil, &transport.Options{Last: true}) + as.transportStream.Write(nil, nil, &transport.WriteOptions{Last: true}) // Always return nil; io.EOF is the only error that might make sense // instead, but there is no need to signal the client to call RecvMsg // as the only use left for the stream after CloseSend is to call @@ -1389,7 +1386,7 @@ func (as *addrConnStream) CloseSend() error { } func (as *addrConnStream) Context() context.Context { - return as.s.Context() + return as.transportStream.Context() } func (as *addrConnStream) SendMsg(m any) (err error) { @@ -1411,7 +1408,7 @@ func (as *addrConnStream) SendMsg(m any) (err error) { } // load hdr, payload, data - hdr, data, payload, pf, err := prepareMsg(m, as.codec, as.cp, as.comp, as.ac.dopts.copts.BufferPool) + hdr, data, payload, pf, err := prepareMsg(m, as.codec, as.sendCompressorV0, as.sendCompressorV1, as.ac.dopts.copts.BufferPool) if err != nil { return err } @@ -1430,7 +1427,7 @@ func (as *addrConnStream) SendMsg(m any) (err error) { return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payload.Len(), *as.callInfo.maxSendMessageSize) } - if err := as.t.Write(as.s, hdr, payload, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { + if err := as.transportStream.Write(hdr, payload, &transport.WriteOptions{Last: !as.desc.ClientStreams}); err != nil { if !as.desc.ClientStreams { // For non-client-streaming RPCs, we return nil instead of EOF on error // because the generated code requires it. finish is not called; RecvMsg() @@ -1440,9 +1437,6 @@ func (as *addrConnStream) SendMsg(m any) (err error) { return io.EOF } - if channelz.IsOn() { - as.t.IncrMsgSent() - } return nil } @@ -1454,25 +1448,25 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { } }() - if !as.decompSet { + if !as.decompressorSet { // Block until we receive headers containing received message encoding. - if ct := as.s.RecvCompress(); ct != "" && ct != encoding.Identity { - if as.dc == nil || as.dc.Type() != ct { + if ct := as.transportStream.RecvCompress(); ct != "" && ct != encoding.Identity { + if as.decompressorV0 == nil || as.decompressorV0.Type() != ct { // No configured decompressor, or it does not match the incoming // message encoding; attempt to find a registered compressor that does. - as.dc = nil - as.decomp = encoding.GetCompressor(ct) + as.decompressorV0 = nil + as.decompressorV1 = encoding.GetCompressor(ct) } } else { // No compression is used; disable our decompressor. - as.dc = nil + as.decompressorV0 = nil } // Only initialize this state once per stream. - as.decompSet = true + as.decompressorSet = true } - if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err != nil { + if err := recv(as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err != nil { if err == io.EOF { - if statusErr := as.s.Status().Err(); statusErr != nil { + if statusErr := as.transportStream.Status().Err(); statusErr != nil { return statusErr } return io.EOF // indicates successful end of stream. @@ -1480,9 +1474,6 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { return toRPCErr(err) } - if channelz.IsOn() { - as.t.IncrMsgRecv() - } if as.desc.ServerStreams { // Subsequent messages should be received by subsequent RecvMsg calls. return nil @@ -1490,8 +1481,8 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err == io.EOF { - return as.s.Status().Err() // non-server streaming Recv returns nil on success + if err := recv(as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err == io.EOF { + return as.transportStream.Status().Err() // non-server streaming Recv returns nil on success } else if err != nil { return toRPCErr(err) } @@ -1509,8 +1500,8 @@ func (as *addrConnStream) finish(err error) { // Ending a stream with EOF indicates a success. err = nil } - if as.s != nil { - as.t.CloseStream(as.s, err) + if as.transportStream != nil { + as.transportStream.Close(err) } if err != nil { @@ -1577,15 +1568,14 @@ type ServerStream interface { // serverStream implements a server side Stream. type serverStream struct { ctx context.Context - t transport.ServerTransport - s *transport.Stream + s *transport.ServerStream p *parser codec baseCodec - cp Compressor - dc Decompressor - comp encoding.Compressor - decomp encoding.Compressor + compressorV0 Compressor + compressorV1 encoding.Compressor + decompressorV0 Decompressor + decompressorV1 encoding.Compressor sendCompressorName string @@ -1628,7 +1618,7 @@ func (ss *serverStream) SendHeader(md metadata.MD) error { return status.Error(codes.Internal, err.Error()) } - err = ss.t.WriteHeader(ss.s, md) + err = ss.s.SendHeader(md) if len(ss.binlogs) != 0 && !ss.serverHeaderBinlogged { h, _ := ss.s.Header() sh := &binarylog.ServerHeader{ @@ -1668,7 +1658,7 @@ func (ss *serverStream) SendMsg(m any) (err error) { } if err != nil && err != io.EOF { st, _ := status.FromError(toRPCErr(err)) - ss.t.WriteStatus(ss.s, st) + ss.s.WriteStatus(st) // Non-user specified status was sent out. This should be an error // case (as a server side Cancel maybe). // @@ -1676,20 +1666,17 @@ func (ss *serverStream) SendMsg(m any) (err error) { // status from the service handler, we will log that error instead. // This behavior is similar to an interceptor. } - if channelz.IsOn() && err == nil { - ss.t.IncrMsgSent() - } }() // Server handler could have set new compressor by calling SetSendCompressor. // In case it is set, we need to use it for compressing outbound message. if sendCompressorsName := ss.s.SendCompress(); sendCompressorsName != ss.sendCompressorName { - ss.comp = encoding.GetCompressor(sendCompressorsName) + ss.compressorV1 = encoding.GetCompressor(sendCompressorsName) ss.sendCompressorName = sendCompressorsName } // load hdr, payload, data - hdr, data, payload, pf, err := prepareMsg(m, ss.codec, ss.cp, ss.comp, ss.p.bufferPool) + hdr, data, payload, pf, err := prepareMsg(m, ss.codec, ss.compressorV0, ss.compressorV1, ss.p.bufferPool) if err != nil { return err } @@ -1710,7 +1697,7 @@ func (ss *serverStream) SendMsg(m any) (err error) { if payloadLen > ss.maxSendMessageSize { return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, ss.maxSendMessageSize) } - if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { + if err := ss.s.Write(hdr, payload, &transport.WriteOptions{Last: false}); err != nil { return toRPCErr(err) } @@ -1756,7 +1743,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) { } if err != nil && err != io.EOF { st, _ := status.FromError(toRPCErr(err)) - ss.t.WriteStatus(ss.s, st) + ss.s.WriteStatus(st) // Non-user specified status was sent out. This should be an error // case (as a server side Cancel maybe). // @@ -1764,16 +1751,13 @@ func (ss *serverStream) RecvMsg(m any) (err error) { // status from the service handler, we will log that error instead. // This behavior is similar to an interceptor. } - if channelz.IsOn() && err == nil { - ss.t.IncrMsgRecv() - } }() var payInfo *payloadInfo if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 { payInfo = &payloadInfo{} defer payInfo.free() } - if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp, true); err != nil { + if err := recv(ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, payInfo, ss.decompressorV1, true); err != nil { if err == io.EOF { if len(ss.binlogs) != 0 { chc := &binarylog.ClientHalfClose{} @@ -1784,7 +1768,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) { return err } if err == io.ErrUnexpectedEOF { - err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) + err = status.Error(codes.Internal, io.ErrUnexpectedEOF.Error()) } return toRPCErr(err) } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index a96b6a6bf..783c41f78 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.67.1" +const Version = "1.71.0" diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go index 8f9e592f8..737d6876d 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -192,11 +192,6 @@ func (d decoder) unmarshalMessage(m protoreflect.Message, skipTypeURL bool) erro fd = fieldDescs.ByTextName(name) } } - if flags.ProtoLegacy { - if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() { - fd = nil // reset since the weak reference is not linked in - } - } if fd == nil { // Field is unknown. diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go index 4b177c820..e9fe10394 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go @@ -348,7 +348,11 @@ func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m protoreflect.Messa switch tok.Kind() { case json.ObjectClose: if !found { - return d.newError(tok.Pos(), `missing "value" field`) + // We tolerate an omitted `value` field with the google.protobuf.Empty Well-Known-Type, + // for compatibility with other proto runtimes that have interpreted the spec differently. + if m.Descriptor().FullName() != genid.Empty_message_fullname { + return d.newError(tok.Pos(), `missing "value" field`) + } } return nil diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go index 24bc98ac4..b53805056 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -185,11 +185,6 @@ func (d decoder) unmarshalMessage(m protoreflect.Message, checkDelims bool) erro } else if xtErr != nil && xtErr != protoregistry.NotFound { return d.newError(tok.Pos(), "unable to resolve [%s]: %v", tok.RawString(), xtErr) } - if flags.ProtoLegacy { - if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() { - fd = nil // reset since the weak reference is not linked in - } - } // Handle unknown fields. if fd == nil { diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb index ff6a38360add36f53d48bb0863b701696e0d7b2d..5a57ef6f3c80a4a930b7bdb33b039ea94d1eb5f2 100644 GIT binary patch literal 138 zcmd;*muO*EV!mX@pe4$|D8MAaq`<7fXux#Ijt$6VkYMDJmv|0Wz$CyZ!KlClRKN&Q wzyMY7f?Y`%s2WL*1th1%ddZFnY{E-+C6MVz3P75fB^b3pHY+@1*LcYe04AXnGXMYp literal 93 zcmd;*mUzal#C*w)K}(Q>QGiK;Nr72|(SYfa9TNv5m$bxlxFnMRqXeS@6Ht;7B*_4j Ve8H{+(u69m1u{(G8N0>{b^xZ!4_5#H diff --git a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go index 08dad7692..bf1aba0e8 100644 --- a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go +++ b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go @@ -10,4 +10,9 @@ import "google.golang.org/protobuf/types/descriptorpb" const ( Minimum = descriptorpb.Edition_EDITION_PROTO2 Maximum = descriptorpb.Edition_EDITION_2023 + + // MaximumKnown is the maximum edition that is known to Go Protobuf, but not + // declared as supported. In other words: end users cannot use it, but + // testprotos inside Go Protobuf can. + MaximumKnown = descriptorpb.Edition_EDITION_2024 ) diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go index 7e87c7604..669133d04 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go @@ -26,7 +26,7 @@ var byteType = reflect.TypeOf(byte(0)) // The type is the underlying field type (e.g., a repeated field may be // represented by []T, but the Go type passed in is just T). // A list of enum value descriptors must be provided for enum fields. -// This does not populate the Enum or Message (except for weak message). +// This does not populate the Enum or Message. // // This function is a best effort attempt; parsing errors are ignored. func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescriptors) protoreflect.FieldDescriptor { @@ -109,9 +109,6 @@ func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescri } case s == "packed": f.L1.EditionFeatures.IsPacked = true - case strings.HasPrefix(s, "weak="): - f.L1.IsWeak = true - f.L1.Message = filedesc.PlaceholderMessage(protoreflect.FullName(s[len("weak="):])) case strings.HasPrefix(s, "def="): // The default tag is special in that everything afterwards is the // default regardless of the presence of commas. @@ -183,9 +180,6 @@ func Marshal(fd protoreflect.FieldDescriptor, enumName string) string { // the exact same semantics from the previous generator. tag = append(tag, "json="+jsonName) } - if fd.IsWeak() { - tag = append(tag, "weak="+string(fd.Message().FullName())) - } // The previous implementation does not tag extension fields as proto3, // even when the field is defined in a proto3 file. Match that behavior // for consistency. diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go112.go b/vendor/google.golang.org/protobuf/internal/errors/is_go112.go deleted file mode 100644 index fbcd34920..000000000 --- a/vendor/google.golang.org/protobuf/internal/errors/is_go112.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.13 -// +build !go1.13 - -package errors - -import "reflect" - -// Is is a copy of Go 1.13's errors.Is for use with older Go versions. -func Is(err, target error) bool { - if target == nil { - return err == target - } - - isComparable := reflect.TypeOf(target).Comparable() - for { - if isComparable && err == target { - return true - } - if x, ok := err.(interface{ Is(error) bool }); ok && x.Is(target) { - return true - } - if err = unwrap(err); err == nil { - return false - } - } -} - -func unwrap(err error) error { - u, ok := err.(interface { - Unwrap() error - }) - if !ok { - return nil - } - return u.Unwrap() -} diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go113.go b/vendor/google.golang.org/protobuf/internal/errors/is_go113.go deleted file mode 100644 index 5e72f1cde..000000000 --- a/vendor/google.golang.org/protobuf/internal/errors/is_go113.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.13 -// +build go1.13 - -package errors - -import "errors" - -// Is is errors.Is. -func Is(err, target error) bool { return errors.Is(err, target) } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index fa790e0ff..688aabe43 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -19,7 +19,6 @@ import ( "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" ) // Edition is an Enum for proto2.Edition @@ -32,6 +31,7 @@ const ( EditionProto2 Edition = 998 EditionProto3 Edition = 999 Edition2023 Edition = 1000 + Edition2024 Edition = 1001 EditionUnsupported Edition = 100000 ) @@ -77,31 +77,48 @@ type ( Locations SourceLocations } + // EditionFeatures is a frequently-instantiated struct, so please take care + // to minimize padding when adding new fields to this struct (add them in + // the right place/order). EditionFeatures struct { + // StripEnumPrefix determines if the plugin generates enum value + // constants as-is, with their prefix stripped, or both variants. + StripEnumPrefix int + // IsFieldPresence is true if field_presence is EXPLICIT // https://protobuf.dev/editions/features/#field_presence IsFieldPresence bool + // IsFieldPresence is true if field_presence is LEGACY_REQUIRED // https://protobuf.dev/editions/features/#field_presence IsLegacyRequired bool + // IsOpenEnum is true if enum_type is OPEN // https://protobuf.dev/editions/features/#enum_type IsOpenEnum bool + // IsPacked is true if repeated_field_encoding is PACKED // https://protobuf.dev/editions/features/#repeated_field_encoding IsPacked bool + // IsUTF8Validated is true if utf_validation is VERIFY // https://protobuf.dev/editions/features/#utf8_validation IsUTF8Validated bool + // IsDelimitedEncoded is true if message_encoding is DELIMITED // https://protobuf.dev/editions/features/#message_encoding IsDelimitedEncoded bool + // IsJSONCompliant is true if json_format is ALLOW // https://protobuf.dev/editions/features/#json_format IsJSONCompliant bool + // GenerateLegacyUnmarshalJSON determines if the plugin generates the // UnmarshalJSON([]byte) error method for enums. GenerateLegacyUnmarshalJSON bool + // APILevel controls which API (Open, Hybrid or Opaque) should be used + // for generated code (.pb.go files). + APILevel int } ) @@ -257,7 +274,6 @@ type ( Kind protoreflect.Kind StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto - IsWeak bool // promoted from google.protobuf.FieldOptions IsLazy bool // promoted from google.protobuf.FieldOptions Default defaultValue ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields @@ -351,7 +367,7 @@ func (fd *Field) IsPacked() bool { return fd.L1.EditionFeatures.IsPacked } func (fd *Field) IsExtension() bool { return false } -func (fd *Field) IsWeak() bool { return fd.L1.IsWeak } +func (fd *Field) IsWeak() bool { return false } func (fd *Field) IsLazy() bool { return fd.L1.IsLazy } func (fd *Field) IsList() bool { return fd.Cardinality() == protoreflect.Repeated && !fd.IsMap() } func (fd *Field) IsMap() bool { return fd.Message() != nil && fd.Message().IsMapEntry() } @@ -378,11 +394,6 @@ func (fd *Field) Enum() protoreflect.EnumDescriptor { return fd.L1.Enum } func (fd *Field) Message() protoreflect.MessageDescriptor { - if fd.L1.IsWeak { - if d, _ := protoregistry.GlobalFiles.FindDescriptorByName(fd.L1.Message.FullName()); d != nil { - return d.(protoreflect.MessageDescriptor) - } - } return fd.L1.Message } func (fd *Field) IsMapEntry() bool { diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index 67a51b327..d4c94458b 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -32,11 +32,6 @@ func (file *File) resolveMessages() { for j := range md.L2.Fields.List { fd := &md.L2.Fields.List[j] - // Weak fields are resolved upon actual use. - if fd.L1.IsWeak { - continue - } - // Resolve message field dependency. switch fd.L1.Kind { case protoreflect.EnumKind: @@ -150,8 +145,6 @@ func (fd *File) unmarshalFull(b []byte) { switch num { case genid.FileDescriptorProto_PublicDependency_field_number: fd.L2.Imports[v].IsPublic = true - case genid.FileDescriptorProto_WeakDependency_field_number: - fd.L2.Imports[v].IsWeak = true } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) @@ -502,8 +495,6 @@ func (fd *Field) unmarshalOptions(b []byte) { switch num { case genid.FieldOptions_Packed_field_number: fd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) - case genid.FieldOptions_Weak_field_number: - fd.L1.IsWeak = protowire.DecodeBool(v) case genid.FieldOptions_Lazy_field_number: fd.L1.IsLazy = protowire.DecodeBool(v) case FieldOptions_EnforceUTF8: diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go index fd4d0c83d..10132c9b3 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go @@ -32,6 +32,14 @@ func unmarshalGoFeature(b []byte, parent EditionFeatures) EditionFeatures { v, m := protowire.ConsumeVarint(b) b = b[m:] parent.GenerateLegacyUnmarshalJSON = protowire.DecodeBool(v) + case genid.GoFeatures_ApiLevel_field_number: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + parent.APILevel = int(v) + case genid.GoFeatures_StripEnumPrefix_field_number: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + parent.StripEnumPrefix = int(v) default: panic(fmt.Sprintf("unkown field number %d while unmarshalling GoFeatures", num)) } diff --git a/vendor/google.golang.org/protobuf/internal/filetype/build.go b/vendor/google.golang.org/protobuf/internal/filetype/build.go index ba83fea44..e1b4130bd 100644 --- a/vendor/google.golang.org/protobuf/internal/filetype/build.go +++ b/vendor/google.golang.org/protobuf/internal/filetype/build.go @@ -63,7 +63,7 @@ type Builder struct { // message declarations in "flattened ordering". // // Dependencies are Go types for enums or messages referenced by - // message fields (excluding weak fields), for parent extended messages of + // message fields, for parent extended messages of // extension fields, for enums or messages referenced by extension fields, // and for input and output messages referenced by service methods. // Dependencies must come after declarations, but the ordering of diff --git a/vendor/google.golang.org/protobuf/internal/flags/flags.go b/vendor/google.golang.org/protobuf/internal/flags/flags.go index 58372dd34..a06ccabc2 100644 --- a/vendor/google.golang.org/protobuf/internal/flags/flags.go +++ b/vendor/google.golang.org/protobuf/internal/flags/flags.go @@ -6,7 +6,7 @@ package flags // ProtoLegacy specifies whether to enable support for legacy functionality -// such as MessageSets, weak fields, and various other obscure behavior +// such as MessageSets, and various other obscure behavior // that is necessary to maintain backwards compatibility with proto1 or // the pre-release variants of proto2 and proto3. // diff --git a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go index 7f67cbb6e..f5ee7f5c2 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go @@ -21,13 +21,47 @@ const ( // Field names for pb.GoFeatures. const ( GoFeatures_LegacyUnmarshalJsonEnum_field_name protoreflect.Name = "legacy_unmarshal_json_enum" + GoFeatures_ApiLevel_field_name protoreflect.Name = "api_level" + GoFeatures_StripEnumPrefix_field_name protoreflect.Name = "strip_enum_prefix" GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "pb.GoFeatures.legacy_unmarshal_json_enum" + GoFeatures_ApiLevel_field_fullname protoreflect.FullName = "pb.GoFeatures.api_level" + GoFeatures_StripEnumPrefix_field_fullname protoreflect.FullName = "pb.GoFeatures.strip_enum_prefix" ) // Field numbers for pb.GoFeatures. const ( GoFeatures_LegacyUnmarshalJsonEnum_field_number protoreflect.FieldNumber = 1 + GoFeatures_ApiLevel_field_number protoreflect.FieldNumber = 2 + GoFeatures_StripEnumPrefix_field_number protoreflect.FieldNumber = 3 +) + +// Full and short names for pb.GoFeatures.APILevel. +const ( + GoFeatures_APILevel_enum_fullname = "pb.GoFeatures.APILevel" + GoFeatures_APILevel_enum_name = "APILevel" +) + +// Enum values for pb.GoFeatures.APILevel. +const ( + GoFeatures_API_LEVEL_UNSPECIFIED_enum_value = 0 + GoFeatures_API_OPEN_enum_value = 1 + GoFeatures_API_HYBRID_enum_value = 2 + GoFeatures_API_OPAQUE_enum_value = 3 +) + +// Full and short names for pb.GoFeatures.StripEnumPrefix. +const ( + GoFeatures_StripEnumPrefix_enum_fullname = "pb.GoFeatures.StripEnumPrefix" + GoFeatures_StripEnumPrefix_enum_name = "StripEnumPrefix" +) + +// Enum values for pb.GoFeatures.StripEnumPrefix. +const ( + GoFeatures_STRIP_ENUM_PREFIX_UNSPECIFIED_enum_value = 0 + GoFeatures_STRIP_ENUM_PREFIX_KEEP_enum_value = 1 + GoFeatures_STRIP_ENUM_PREFIX_GENERATE_BOTH_enum_value = 2 + GoFeatures_STRIP_ENUM_PREFIX_STRIP_enum_value = 3 ) // Extension numbers diff --git a/vendor/google.golang.org/protobuf/internal/genid/goname.go b/vendor/google.golang.org/protobuf/internal/genid/goname.go index 693d2e9e1..99bb95baf 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/goname.go +++ b/vendor/google.golang.org/protobuf/internal/genid/goname.go @@ -11,15 +11,10 @@ const ( SizeCache_goname = "sizeCache" SizeCacheA_goname = "XXX_sizecache" - WeakFields_goname = "weakFields" - WeakFieldsA_goname = "XXX_weak" - UnknownFields_goname = "unknownFields" UnknownFieldsA_goname = "XXX_unrecognized" ExtensionFields_goname = "extensionFields" ExtensionFieldsA_goname = "XXX_InternalExtensions" ExtensionFieldsB_goname = "XXX_extensions" - - WeakFieldPrefix_goname = "XXX_weak_" ) diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go b/vendor/google.golang.org/protobuf/internal/genid/name.go similarity index 50% rename from vendor/golang.org/x/tools/internal/versions/toolchain_go121.go rename to vendor/google.golang.org/protobuf/internal/genid/name.go index b7ef216df..224f33930 100644 --- a/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go +++ b/vendor/google.golang.org/protobuf/internal/genid/name.go @@ -2,13 +2,11 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.21 -// +build go1.21 +package genid -package versions +const ( + NoUnkeyedLiteral_goname = "noUnkeyedLiteral" + NoUnkeyedLiteralA_goname = "XXX_NoUnkeyedLiteral" -func init() { - if Compare(toolchain, Go1_21) < 0 { - toolchain = Go1_21 - } -} + BuilderSuffix_goname = "_builder" +) diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/api_export_opaque.go new file mode 100644 index 000000000..6075d6f69 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/api_export_opaque.go @@ -0,0 +1,128 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "strconv" + "sync/atomic" + "unsafe" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +func (Export) UnmarshalField(msg any, fieldNum int32) { + UnmarshalField(msg.(protoreflect.ProtoMessage).ProtoReflect(), protoreflect.FieldNumber(fieldNum)) +} + +// Present checks the presence set for a certain field number (zero +// based, ordered by appearance in original proto file). part is +// a pointer to the correct element in the bitmask array, num is the +// field number unaltered. Example (field number 70 -> part = +// &m.XXX_presence[1], num = 70) +func (Export) Present(part *uint32, num uint32) bool { + // This hook will read an unprotected shadow presence set if + // we're unning under the race detector + raceDetectHookPresent(part, num) + return atomic.LoadUint32(part)&(1<<(num%32)) > 0 +} + +// SetPresent adds a field to the presence set. part is a pointer to +// the relevant element in the array and num is the field number +// unaltered. size is the number of fields in the protocol +// buffer. +func (Export) SetPresent(part *uint32, num uint32, size uint32) { + // This hook will mutate an unprotected shadow presence set if + // we're running under the race detector + raceDetectHookSetPresent(part, num, presenceSize(size)) + for { + old := atomic.LoadUint32(part) + if atomic.CompareAndSwapUint32(part, old, old|(1<<(num%32))) { + return + } + } +} + +// SetPresentNonAtomic is like SetPresent, but operates non-atomically. +// It is meant for use by builder methods, where the message is known not +// to be accessible yet by other goroutines. +func (Export) SetPresentNonAtomic(part *uint32, num uint32, size uint32) { + // This hook will mutate an unprotected shadow presence set if + // we're running under the race detector + raceDetectHookSetPresent(part, num, presenceSize(size)) + *part |= 1 << (num % 32) +} + +// ClearPresence removes a field from the presence set. part is a +// pointer to the relevant element in the presence array and num is +// the field number unaltered. +func (Export) ClearPresent(part *uint32, num uint32) { + // This hook will mutate an unprotected shadow presence set if + // we're running under the race detector + raceDetectHookClearPresent(part, num) + for { + old := atomic.LoadUint32(part) + if atomic.CompareAndSwapUint32(part, old, old&^(1<<(num%32))) { + return + } + } +} + +// interfaceToPointer takes a pointer to an empty interface whose value is a +// pointer type, and converts it into a "pointer" that points to the same +// target +func interfaceToPointer(i *any) pointer { + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} +} + +func (p pointer) atomicGetPointer() pointer { + return pointer{p: atomic.LoadPointer((*unsafe.Pointer)(p.p))} +} + +func (p pointer) atomicSetPointer(q pointer) { + atomic.StorePointer((*unsafe.Pointer)(p.p), q.p) +} + +// AtomicCheckPointerIsNil takes an interface (which is a pointer to a +// pointer) and returns true if the pointed-to pointer is nil (using an +// atomic load). This function is inlineable and, on x86, just becomes a +// simple load and compare. +func (Export) AtomicCheckPointerIsNil(ptr any) bool { + return interfaceToPointer(&ptr).atomicGetPointer().IsNil() +} + +// AtomicSetPointer takes two interfaces (first is a pointer to a pointer, +// second is a pointer) and atomically sets the second pointer into location +// referenced by first pointer. Unfortunately, atomicSetPointer() does not inline +// (even on x86), so this does not become a simple store on x86. +func (Export) AtomicSetPointer(dstPtr, valPtr any) { + interfaceToPointer(&dstPtr).atomicSetPointer(interfaceToPointer(&valPtr)) +} + +// AtomicLoadPointer loads the pointer at the location pointed at by src, +// and stores that pointer value into the location pointed at by dst. +func (Export) AtomicLoadPointer(ptr Pointer, dst Pointer) { + *(*unsafe.Pointer)(unsafe.Pointer(dst)) = atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(ptr))) +} + +// AtomicInitializePointer makes ptr and dst point to the same value. +// +// If *ptr is a nil pointer, it sets *ptr = *dst. +// +// If *ptr is a non-nil pointer, it sets *dst = *ptr. +func (Export) AtomicInitializePointer(ptr Pointer, dst Pointer) { + if !atomic.CompareAndSwapPointer((*unsafe.Pointer)(ptr), unsafe.Pointer(nil), *(*unsafe.Pointer)(dst)) { + *(*unsafe.Pointer)(unsafe.Pointer(dst)) = atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(ptr))) + } +} + +// MessageFieldStringOf returns the field formatted as a string, +// either as the field name if resolvable otherwise as a decimal string. +func (Export) MessageFieldStringOf(md protoreflect.MessageDescriptor, n protoreflect.FieldNumber) string { + fd := md.Fields().ByNumber(n) + if fd != nil { + return string(fd.Name()) + } + return strconv.Itoa(int(n)) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/bitmap.go b/vendor/google.golang.org/protobuf/internal/impl/bitmap.go new file mode 100644 index 000000000..ea276547c --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/bitmap.go @@ -0,0 +1,34 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !race + +package impl + +// There is no additional data as we're not running under race detector. +type RaceDetectHookData struct{} + +// Empty stubs for when not using the race detector. Calls to these from index.go should be optimized away. +func (presence) raceDetectHookPresent(num uint32) {} +func (presence) raceDetectHookSetPresent(num uint32, size presenceSize) {} +func (presence) raceDetectHookClearPresent(num uint32) {} +func (presence) raceDetectHookAllocAndCopy(src presence) {} + +// raceDetectHookPresent is called by the generated file interface +// (*proto.internalFuncs) Present to optionally read an unprotected +// shadow bitmap when race detection is enabled. In regular code it is +// a noop. +func raceDetectHookPresent(field *uint32, num uint32) {} + +// raceDetectHookSetPresent is called by the generated file interface +// (*proto.internalFuncs) SetPresent to optionally write an unprotected +// shadow bitmap when race detection is enabled. In regular code it is +// a noop. +func raceDetectHookSetPresent(field *uint32, num uint32, size presenceSize) {} + +// raceDetectHookClearPresent is called by the generated file interface +// (*proto.internalFuncs) ClearPresent to optionally write an unprotected +// shadow bitmap when race detection is enabled. In regular code it is +// a noop. +func raceDetectHookClearPresent(field *uint32, num uint32) {} diff --git a/vendor/google.golang.org/protobuf/internal/impl/bitmap_race.go b/vendor/google.golang.org/protobuf/internal/impl/bitmap_race.go new file mode 100644 index 000000000..e9a27583a --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/bitmap_race.go @@ -0,0 +1,126 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build race + +package impl + +// When running under race detector, we add a presence map of bytes, that we can access +// in the hook functions so that we trigger the race detection whenever we have concurrent +// Read-Writes or Write-Writes. The race detector does not otherwise detect invalid concurrent +// access to lazy fields as all updates of bitmaps and pointers are done using atomic operations. +type RaceDetectHookData struct { + shadowPresence *[]byte +} + +// Hooks for presence bitmap operations that allocate, read and write the shadowPresence +// using non-atomic operations. +func (data *RaceDetectHookData) raceDetectHookAlloc(size presenceSize) { + sp := make([]byte, size) + atomicStoreShadowPresence(&data.shadowPresence, &sp) +} + +func (p presence) raceDetectHookPresent(num uint32) { + data := p.toRaceDetectData() + if data == nil { + return + } + sp := atomicLoadShadowPresence(&data.shadowPresence) + if sp != nil { + _ = (*sp)[num] + } +} + +func (p presence) raceDetectHookSetPresent(num uint32, size presenceSize) { + data := p.toRaceDetectData() + if data == nil { + return + } + sp := atomicLoadShadowPresence(&data.shadowPresence) + if sp == nil { + data.raceDetectHookAlloc(size) + sp = atomicLoadShadowPresence(&data.shadowPresence) + } + (*sp)[num] = 1 +} + +func (p presence) raceDetectHookClearPresent(num uint32) { + data := p.toRaceDetectData() + if data == nil { + return + } + sp := atomicLoadShadowPresence(&data.shadowPresence) + if sp != nil { + (*sp)[num] = 0 + + } +} + +// raceDetectHookAllocAndCopy allocates a new shadowPresence slice at lazy and copies +// shadowPresence bytes from src to lazy. +func (p presence) raceDetectHookAllocAndCopy(q presence) { + sData := q.toRaceDetectData() + dData := p.toRaceDetectData() + if sData == nil { + return + } + srcSp := atomicLoadShadowPresence(&sData.shadowPresence) + if srcSp == nil { + atomicStoreShadowPresence(&dData.shadowPresence, nil) + return + } + n := len(*srcSp) + dSlice := make([]byte, n) + atomicStoreShadowPresence(&dData.shadowPresence, &dSlice) + for i := 0; i < n; i++ { + dSlice[i] = (*srcSp)[i] + } +} + +// raceDetectHookPresent is called by the generated file interface +// (*proto.internalFuncs) Present to optionally read an unprotected +// shadow bitmap when race detection is enabled. In regular code it is +// a noop. +func raceDetectHookPresent(field *uint32, num uint32) { + data := findPointerToRaceDetectData(field, num) + if data == nil { + return + } + sp := atomicLoadShadowPresence(&data.shadowPresence) + if sp != nil { + _ = (*sp)[num] + } +} + +// raceDetectHookSetPresent is called by the generated file interface +// (*proto.internalFuncs) SetPresent to optionally write an unprotected +// shadow bitmap when race detection is enabled. In regular code it is +// a noop. +func raceDetectHookSetPresent(field *uint32, num uint32, size presenceSize) { + data := findPointerToRaceDetectData(field, num) + if data == nil { + return + } + sp := atomicLoadShadowPresence(&data.shadowPresence) + if sp == nil { + data.raceDetectHookAlloc(size) + sp = atomicLoadShadowPresence(&data.shadowPresence) + } + (*sp)[num] = 1 +} + +// raceDetectHookClearPresent is called by the generated file interface +// (*proto.internalFuncs) ClearPresent to optionally write an unprotected +// shadow bitmap when race detection is enabled. In regular code it is +// a noop. +func raceDetectHookClearPresent(field *uint32, num uint32) { + data := findPointerToRaceDetectData(field, num) + if data == nil { + return + } + sp := atomicLoadShadowPresence(&data.shadowPresence) + if sp != nil { + (*sp)[num] = 0 + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go index f29e6a8fa..fe2c719ce 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go +++ b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go @@ -35,6 +35,12 @@ func (mi *MessageInfo) checkInitializedPointer(p pointer) error { } return nil } + + var presence presence + if mi.presenceOffset.IsValid() { + presence = p.Apply(mi.presenceOffset).PresenceInfo() + } + if mi.extensionOffset.IsValid() { e := p.Apply(mi.extensionOffset).Extensions() if err := mi.isInitExtensions(e); err != nil { @@ -45,6 +51,33 @@ func (mi *MessageInfo) checkInitializedPointer(p pointer) error { if !f.isRequired && f.funcs.isInit == nil { continue } + + if f.presenceIndex != noPresence { + if !presence.Present(f.presenceIndex) { + if f.isRequired { + return errors.RequiredNotSet(string(mi.Desc.Fields().ByNumber(f.num).FullName())) + } + continue + } + if f.funcs.isInit != nil { + f.mi.init() + if f.mi.needsInitCheck { + if f.isLazy && p.Apply(f.offset).AtomicGetPointer().IsNil() { + lazy := *p.Apply(mi.lazyOffset).LazyInfoPtr() + if !lazy.AllowedPartial() { + // Nothing to see here, it was checked on unmarshal + continue + } + mi.lazyUnmarshal(p, f.num) + } + if err := f.funcs.isInit(p.Apply(f.offset), f); err != nil { + return err + } + } + } + continue + } + fptr := p.Apply(f.offset) if f.isPointer && fptr.Elem().IsNil() { if f.isRequired { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go index 7c1f66c8c..d14d7d93c 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go @@ -5,15 +5,12 @@ package impl import ( - "fmt" "reflect" - "sync" "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" "google.golang.org/protobuf/runtime/protoiface" ) @@ -121,78 +118,6 @@ func (mi *MessageInfo) initOneofFieldCoders(od protoreflect.OneofDescriptor, si } } -func makeWeakMessageFieldCoder(fd protoreflect.FieldDescriptor) pointerCoderFuncs { - var once sync.Once - var messageType protoreflect.MessageType - lazyInit := func() { - once.Do(func() { - messageName := fd.Message().FullName() - messageType, _ = protoregistry.GlobalTypes.FindMessageByName(messageName) - }) - } - - return pointerCoderFuncs{ - size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { - m, ok := p.WeakFields().get(f.num) - if !ok { - return 0 - } - lazyInit() - if messageType == nil { - panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName())) - } - return sizeMessage(m, f.tagsize, opts) - }, - marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - m, ok := p.WeakFields().get(f.num) - if !ok { - return b, nil - } - lazyInit() - if messageType == nil { - panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName())) - } - return appendMessage(b, m, f.wiretag, opts) - }, - unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { - fs := p.WeakFields() - m, ok := fs.get(f.num) - if !ok { - lazyInit() - if messageType == nil { - return unmarshalOutput{}, errUnknown - } - m = messageType.New().Interface() - fs.set(f.num, m) - } - return consumeMessage(b, m, wtyp, opts) - }, - isInit: func(p pointer, f *coderFieldInfo) error { - m, ok := p.WeakFields().get(f.num) - if !ok { - return nil - } - return proto.CheckInitialized(m) - }, - merge: func(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { - sm, ok := src.WeakFields().get(f.num) - if !ok { - return - } - dm, ok := dst.WeakFields().get(f.num) - if !ok { - lazyInit() - if messageType == nil { - panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName())) - } - dm = messageType.New().Interface() - dst.WeakFields().set(f.num, dm) - } - opts.Merge(dm, sm) - }, - } -} - func makeMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { if mi := getMessageInfo(ft); mi != nil { funcs := pointerCoderFuncs{ diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field_opaque.go new file mode 100644 index 000000000..76818ea25 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field_opaque.go @@ -0,0 +1,264 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/reflect/protoreflect" +) + +func makeOpaqueMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) { + mi := getMessageInfo(ft) + if mi == nil { + panic(fmt.Sprintf("invalid field: %v: unsupported message type %v", fd.FullName(), ft)) + } + switch fd.Kind() { + case protoreflect.MessageKind: + return mi, pointerCoderFuncs{ + size: sizeOpaqueMessage, + marshal: appendOpaqueMessage, + unmarshal: consumeOpaqueMessage, + isInit: isInitOpaqueMessage, + merge: mergeOpaqueMessage, + } + case protoreflect.GroupKind: + return mi, pointerCoderFuncs{ + size: sizeOpaqueGroup, + marshal: appendOpaqueGroup, + unmarshal: consumeOpaqueGroup, + isInit: isInitOpaqueMessage, + merge: mergeOpaqueMessage, + } + } + panic("unexpected field kind") +} + +func sizeOpaqueMessage(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + return protowire.SizeBytes(f.mi.sizePointer(p.AtomicGetPointer(), opts)) + f.tagsize +} + +func appendOpaqueMessage(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + mp := p.AtomicGetPointer() + calculatedSize := f.mi.sizePointer(mp, opts) + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(calculatedSize)) + before := len(b) + b, err := f.mi.marshalAppendPointer(b, mp, opts) + if measuredSize := len(b) - before; calculatedSize != measuredSize && err == nil { + return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize) + } + return b, err +} + +func consumeOpaqueMessage(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + mp := p.AtomicGetPointer() + if mp.IsNil() { + mp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))) + } + o, err := f.mi.unmarshalPointer(v, mp, 0, opts) + if err != nil { + return out, err + } + out.n = n + out.initialized = o.initialized + return out, nil +} + +func isInitOpaqueMessage(p pointer, f *coderFieldInfo) error { + mp := p.AtomicGetPointer() + if mp.IsNil() { + return nil + } + return f.mi.checkInitializedPointer(mp) +} + +func mergeOpaqueMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + dstmp := dst.AtomicGetPointer() + if dstmp.IsNil() { + dstmp = dst.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))) + } + f.mi.mergePointer(dstmp, src.AtomicGetPointer(), opts) +} + +func sizeOpaqueGroup(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + return 2*f.tagsize + f.mi.sizePointer(p.AtomicGetPointer(), opts) +} + +func appendOpaqueGroup(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, f.wiretag) // start group + b, err := f.mi.marshalAppendPointer(b, p.AtomicGetPointer(), opts) + b = protowire.AppendVarint(b, f.wiretag+1) // end group + return b, err +} + +func consumeOpaqueGroup(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.StartGroupType { + return out, errUnknown + } + mp := p.AtomicGetPointer() + if mp.IsNil() { + mp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))) + } + o, e := f.mi.unmarshalPointer(b, mp, f.num, opts) + return o, e +} + +func makeOpaqueRepeatedMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) { + if ft.Kind() != reflect.Ptr || ft.Elem().Kind() != reflect.Slice { + panic(fmt.Sprintf("invalid field: %v: unsupported type for opaque repeated message: %v", fd.FullName(), ft)) + } + mt := ft.Elem().Elem() // *[]*T -> *T + mi := getMessageInfo(mt) + if mi == nil { + panic(fmt.Sprintf("invalid field: %v: unsupported message type %v", fd.FullName(), mt)) + } + switch fd.Kind() { + case protoreflect.MessageKind: + return mi, pointerCoderFuncs{ + size: sizeOpaqueMessageSlice, + marshal: appendOpaqueMessageSlice, + unmarshal: consumeOpaqueMessageSlice, + isInit: isInitOpaqueMessageSlice, + merge: mergeOpaqueMessageSlice, + } + case protoreflect.GroupKind: + return mi, pointerCoderFuncs{ + size: sizeOpaqueGroupSlice, + marshal: appendOpaqueGroupSlice, + unmarshal: consumeOpaqueGroupSlice, + isInit: isInitOpaqueMessageSlice, + merge: mergeOpaqueMessageSlice, + } + } + panic("unexpected field kind") +} + +func sizeOpaqueMessageSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := p.AtomicGetPointer().PointerSlice() + n := 0 + for _, v := range s { + n += protowire.SizeBytes(f.mi.sizePointer(v, opts)) + f.tagsize + } + return n +} + +func appendOpaqueMessageSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := p.AtomicGetPointer().PointerSlice() + var err error + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + siz := f.mi.sizePointer(v, opts) + b = protowire.AppendVarint(b, uint64(siz)) + before := len(b) + b, err = f.mi.marshalAppendPointer(b, v, opts) + if err != nil { + return b, err + } + if measuredSize := len(b) - before; siz != measuredSize { + return nil, errors.MismatchedSizeCalculation(siz, measuredSize) + } + } + return b, nil +} + +func consumeOpaqueMessageSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, errDecode + } + mp := pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())) + o, err := f.mi.unmarshalPointer(v, mp, 0, opts) + if err != nil { + return out, err + } + sp := p.AtomicGetPointer() + if sp.IsNil() { + sp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.ft.Elem()))) + } + sp.AppendPointerSlice(mp) + out.n = n + out.initialized = o.initialized + return out, nil +} + +func isInitOpaqueMessageSlice(p pointer, f *coderFieldInfo) error { + sp := p.AtomicGetPointer() + if sp.IsNil() { + return nil + } + s := sp.PointerSlice() + for _, v := range s { + if err := f.mi.checkInitializedPointer(v); err != nil { + return err + } + } + return nil +} + +func mergeOpaqueMessageSlice(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + ds := dst.AtomicGetPointer() + if ds.IsNil() { + ds = dst.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.ft.Elem()))) + } + for _, sp := range src.AtomicGetPointer().PointerSlice() { + dm := pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())) + f.mi.mergePointer(dm, sp, opts) + ds.AppendPointerSlice(dm) + } +} + +func sizeOpaqueGroupSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := p.AtomicGetPointer().PointerSlice() + n := 0 + for _, v := range s { + n += 2*f.tagsize + f.mi.sizePointer(v, opts) + } + return n +} + +func appendOpaqueGroupSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := p.AtomicGetPointer().PointerSlice() + var err error + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) // start group + b, err = f.mi.marshalAppendPointer(b, v, opts) + if err != nil { + return b, err + } + b = protowire.AppendVarint(b, f.wiretag+1) // end group + } + return b, nil +} + +func consumeOpaqueGroupSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.StartGroupType { + return out, errUnknown + } + mp := pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())) + out, err = f.mi.unmarshalPointer(b, mp, f.num, opts) + if err != nil { + return out, err + } + sp := p.AtomicGetPointer() + if sp.IsNil() { + sp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.ft.Elem()))) + } + sp.AppendPointerSlice(mp) + return out, err +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go index fb35f0bae..229c69801 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go @@ -94,7 +94,7 @@ func sizeMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalO return 0 } n := 0 - iter := mapRange(mapv) + iter := mapv.MapRange() for iter.Next() { key := mapi.conv.keyConv.PBValueOf(iter.Key()).MapKey() keySize := mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts) @@ -281,7 +281,7 @@ func appendMap(b []byte, mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, o if opts.Deterministic() { return appendMapDeterministic(b, mapv, mapi, f, opts) } - iter := mapRange(mapv) + iter := mapv.MapRange() for iter.Next() { var err error b = protowire.AppendVarint(b, f.wiretag) @@ -328,7 +328,7 @@ func isInitMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo) error { if !mi.needsInitCheck { return nil } - iter := mapRange(mapv) + iter := mapv.MapRange() for iter.Next() { val := pointerOfValue(iter.Value()) if err := mi.checkInitializedPointer(val); err != nil { @@ -336,7 +336,7 @@ func isInitMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo) error { } } } else { - iter := mapRange(mapv) + iter := mapv.MapRange() for iter.Next() { val := mapi.conv.valConv.PBValueOf(iter.Value()) if err := mapi.valFuncs.isInit(val); err != nil { @@ -356,7 +356,7 @@ func mergeMap(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { if dstm.IsNil() { dstm.Set(reflect.MakeMap(f.ft)) } - iter := mapRange(srcm) + iter := srcm.MapRange() for iter.Next() { dstm.SetMapIndex(iter.Key(), iter.Value()) } @@ -371,7 +371,7 @@ func mergeMapOfBytes(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { if dstm.IsNil() { dstm.Set(reflect.MakeMap(f.ft)) } - iter := mapRange(srcm) + iter := srcm.MapRange() for iter.Next() { dstm.SetMapIndex(iter.Key(), reflect.ValueOf(append(emptyBuf[:], iter.Value().Bytes()...))) } @@ -386,7 +386,7 @@ func mergeMapOfMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { if dstm.IsNil() { dstm.Set(reflect.MakeMap(f.ft)) } - iter := mapRange(srcm) + iter := srcm.MapRange() for iter.Next() { val := reflect.New(f.ft.Elem().Elem()) if f.mi != nil { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go deleted file mode 100644 index 4b15493f2..000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.12 -// +build !go1.12 - -package impl - -import "reflect" - -type mapIter struct { - v reflect.Value - keys []reflect.Value -} - -// mapRange provides a less-efficient equivalent to -// the Go 1.12 reflect.Value.MapRange method. -func mapRange(v reflect.Value) *mapIter { - return &mapIter{v: v} -} - -func (i *mapIter) Next() bool { - if i.keys == nil { - i.keys = i.v.MapKeys() - } else { - i.keys = i.keys[1:] - } - return len(i.keys) > 0 -} - -func (i *mapIter) Key() reflect.Value { - return i.keys[0] -} - -func (i *mapIter) Value() reflect.Value { - return i.v.MapIndex(i.keys[0]) -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go deleted file mode 100644 index 0b31b66ea..000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.12 -// +build go1.12 - -package impl - -import "reflect" - -func mapRange(v reflect.Value) *reflect.MapIter { return v.MapRange() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go index 78be9df34..f78b57b04 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go @@ -32,6 +32,10 @@ type coderMessageInfo struct { needsInitCheck bool isMessageSet bool numRequiredFields uint8 + + lazyOffset offset + presenceOffset offset + presenceSize presenceSize } type coderFieldInfo struct { @@ -45,12 +49,19 @@ type coderFieldInfo struct { tagsize int // size of the varint-encoded tag isPointer bool // true if IsNil may be called on the struct field isRequired bool // true if field is required + + isLazy bool + presenceIndex uint32 } +const noPresence = 0xffffffff + func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { mi.sizecacheOffset = invalidOffset mi.unknownOffset = invalidOffset mi.extensionOffset = invalidOffset + mi.lazyOffset = invalidOffset + mi.presenceOffset = si.presenceOffset if si.sizecacheOffset.IsValid() && si.sizecacheType == sizecacheType { mi.sizecacheOffset = si.sizecacheOffset @@ -107,12 +118,9 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { }, } case isOneof: - fieldOffset = offsetOf(fs, mi.Exporter) - case fd.IsWeak(): - fieldOffset = si.weakOffset - funcs = makeWeakMessageFieldCoder(fd) + fieldOffset = offsetOf(fs) default: - fieldOffset = offsetOf(fs, mi.Exporter) + fieldOffset = offsetOf(fs) childMessage, funcs = fieldCoder(fd, ft) } cf := &preallocFields[i] @@ -127,6 +135,8 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { validation: newFieldValidationInfo(mi, si, fd, ft), isPointer: fd.Cardinality() == protoreflect.Repeated || fd.HasPresence(), isRequired: fd.Cardinality() == protoreflect.Required, + + presenceIndex: noPresence, } mi.orderedCoderFields = append(mi.orderedCoderFields, cf) mi.coderFields[cf.num] = cf diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go new file mode 100644 index 000000000..41c1f74ef --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go @@ -0,0 +1,153 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + "sort" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/order" + "google.golang.org/protobuf/reflect/protoreflect" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +func (mi *MessageInfo) makeOpaqueCoderMethods(t reflect.Type, si opaqueStructInfo) { + mi.sizecacheOffset = si.sizecacheOffset + mi.unknownOffset = si.unknownOffset + mi.unknownPtrKind = si.unknownType.Kind() == reflect.Ptr + mi.extensionOffset = si.extensionOffset + mi.lazyOffset = si.lazyOffset + mi.presenceOffset = si.presenceOffset + + mi.coderFields = make(map[protowire.Number]*coderFieldInfo) + fields := mi.Desc.Fields() + for i := 0; i < fields.Len(); i++ { + fd := fields.Get(i) + + fs := si.fieldsByNumber[fd.Number()] + if fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic() { + fs = si.oneofsByName[fd.ContainingOneof().Name()] + } + ft := fs.Type + var wiretag uint64 + if !fd.IsPacked() { + wiretag = protowire.EncodeTag(fd.Number(), wireTypes[fd.Kind()]) + } else { + wiretag = protowire.EncodeTag(fd.Number(), protowire.BytesType) + } + var fieldOffset offset + var funcs pointerCoderFuncs + var childMessage *MessageInfo + switch { + case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): + fieldOffset = offsetOf(fs) + case fd.Message() != nil && !fd.IsMap(): + fieldOffset = offsetOf(fs) + if fd.IsList() { + childMessage, funcs = makeOpaqueRepeatedMessageFieldCoder(fd, ft) + } else { + childMessage, funcs = makeOpaqueMessageFieldCoder(fd, ft) + } + default: + fieldOffset = offsetOf(fs) + childMessage, funcs = fieldCoder(fd, ft) + } + cf := &coderFieldInfo{ + num: fd.Number(), + offset: fieldOffset, + wiretag: wiretag, + ft: ft, + tagsize: protowire.SizeVarint(wiretag), + funcs: funcs, + mi: childMessage, + validation: newFieldValidationInfo(mi, si.structInfo, fd, ft), + isPointer: (fd.Cardinality() == protoreflect.Repeated || + fd.Kind() == protoreflect.MessageKind || + fd.Kind() == protoreflect.GroupKind), + isRequired: fd.Cardinality() == protoreflect.Required, + presenceIndex: noPresence, + } + + // TODO: Use presence for all fields. + // + // In some cases, such as maps, presence means only "might be set" rather + // than "is definitely set", but every field should have a presence bit to + // permit us to skip over definitely-unset fields at marshal time. + + var hasPresence bool + hasPresence, cf.isLazy = usePresenceForField(si, fd) + + if hasPresence { + cf.presenceIndex, mi.presenceSize = presenceIndex(mi.Desc, fd) + } + + mi.orderedCoderFields = append(mi.orderedCoderFields, cf) + mi.coderFields[cf.num] = cf + } + for i, oneofs := 0, mi.Desc.Oneofs(); i < oneofs.Len(); i++ { + if od := oneofs.Get(i); !od.IsSynthetic() { + mi.initOneofFieldCoders(od, si.structInfo) + } + } + if messageset.IsMessageSet(mi.Desc) { + if !mi.extensionOffset.IsValid() { + panic(fmt.Sprintf("%v: MessageSet with no extensions field", mi.Desc.FullName())) + } + if !mi.unknownOffset.IsValid() { + panic(fmt.Sprintf("%v: MessageSet with no unknown field", mi.Desc.FullName())) + } + mi.isMessageSet = true + } + sort.Slice(mi.orderedCoderFields, func(i, j int) bool { + return mi.orderedCoderFields[i].num < mi.orderedCoderFields[j].num + }) + + var maxDense protoreflect.FieldNumber + for _, cf := range mi.orderedCoderFields { + if cf.num >= 16 && cf.num >= 2*maxDense { + break + } + maxDense = cf.num + } + mi.denseCoderFields = make([]*coderFieldInfo, maxDense+1) + for _, cf := range mi.orderedCoderFields { + if int(cf.num) > len(mi.denseCoderFields) { + break + } + mi.denseCoderFields[cf.num] = cf + } + + // To preserve compatibility with historic wire output, marshal oneofs last. + if mi.Desc.Oneofs().Len() > 0 { + sort.Slice(mi.orderedCoderFields, func(i, j int) bool { + fi := fields.ByNumber(mi.orderedCoderFields[i].num) + fj := fields.ByNumber(mi.orderedCoderFields[j].num) + return order.LegacyFieldOrder(fi, fj) + }) + } + + mi.needsInitCheck = needsInitCheck(mi.Desc) + if mi.methods.Marshal == nil && mi.methods.Size == nil { + mi.methods.Flags |= piface.SupportMarshalDeterministic + mi.methods.Marshal = mi.marshal + mi.methods.Size = mi.size + } + if mi.methods.Unmarshal == nil { + mi.methods.Flags |= piface.SupportUnmarshalDiscardUnknown + mi.methods.Unmarshal = mi.unmarshal + } + if mi.methods.CheckInitialized == nil { + mi.methods.CheckInitialized = mi.checkInitialized + } + if mi.methods.Merge == nil { + mi.methods.Merge = mi.merge + } + if mi.methods.Equal == nil { + mi.methods.Equal = equal + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go index 304244a65..e4580b3ac 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go @@ -101,7 +101,7 @@ func (ms *mapReflect) Mutable(k protoreflect.MapKey) protoreflect.Value { return v } func (ms *mapReflect) Range(f func(protoreflect.MapKey, protoreflect.Value) bool) { - iter := mapRange(ms.v) + iter := ms.v.MapRange() for iter.Next() { k := ms.keyConv.PBValueOf(iter.Key()).MapKey() v := ms.valConv.PBValueOf(iter.Value()) diff --git a/vendor/google.golang.org/protobuf/internal/impl/decode.go b/vendor/google.golang.org/protobuf/internal/impl/decode.go index cda0520c2..e0dd21fa5 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/decode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/decode.go @@ -34,6 +34,8 @@ func (o unmarshalOptions) Options() proto.UnmarshalOptions { AllowPartial: true, DiscardUnknown: o.DiscardUnknown(), Resolver: o.resolver, + + NoLazyDecoding: o.NoLazyDecoding(), } } @@ -41,13 +43,26 @@ func (o unmarshalOptions) DiscardUnknown() bool { return o.flags&protoiface.UnmarshalDiscardUnknown != 0 } -func (o unmarshalOptions) IsDefault() bool { - return o.flags == 0 && o.resolver == protoregistry.GlobalTypes +func (o unmarshalOptions) AliasBuffer() bool { return o.flags&protoiface.UnmarshalAliasBuffer != 0 } +func (o unmarshalOptions) Validated() bool { return o.flags&protoiface.UnmarshalValidated != 0 } +func (o unmarshalOptions) NoLazyDecoding() bool { + return o.flags&protoiface.UnmarshalNoLazyDecoding != 0 +} + +func (o unmarshalOptions) CanBeLazy() bool { + if o.resolver != protoregistry.GlobalTypes { + return false + } + // We ignore the UnmarshalInvalidateSizeCache even though it's not in the default set + return (o.flags & ^(protoiface.UnmarshalAliasBuffer | protoiface.UnmarshalValidated | protoiface.UnmarshalCheckRequired)) == 0 } var lazyUnmarshalOptions = unmarshalOptions{ resolver: protoregistry.GlobalTypes, - depth: protowire.DefaultRecursionLimit, + + flags: protoiface.UnmarshalAliasBuffer | protoiface.UnmarshalValidated, + + depth: protowire.DefaultRecursionLimit, } type unmarshalOutput struct { @@ -94,9 +109,30 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire. if flags.ProtoLegacy && mi.isMessageSet { return unmarshalMessageSet(mi, b, p, opts) } + + lazyDecoding := LazyEnabled() // default + if opts.NoLazyDecoding() { + lazyDecoding = false // explicitly disabled + } + if mi.lazyOffset.IsValid() && lazyDecoding { + return mi.unmarshalPointerLazy(b, p, groupTag, opts) + } + return mi.unmarshalPointerEager(b, p, groupTag, opts) +} + +// unmarshalPointerEager is the message unmarshalling function for all messages that are not lazy. +// The corresponding function for Lazy is in google_lazy.go. +func (mi *MessageInfo) unmarshalPointerEager(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) { + initialized := true var requiredMask uint64 var exts *map[int32]ExtensionField + + var presence presence + if mi.presenceOffset.IsValid() { + presence = p.Apply(mi.presenceOffset).PresenceInfo() + } + start := len(b) for len(b) > 0 { // Parse the tag (field number and wire type). @@ -154,6 +190,11 @@ func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire. if f.funcs.isInit != nil && !o.initialized { initialized = false } + + if f.presenceIndex != noPresence { + presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize) + } + default: // Possible extension. if exts == nil && mi.extensionOffset.IsValid() { @@ -222,7 +263,7 @@ func (mi *MessageInfo) unmarshalExtension(b []byte, num protowire.Number, wtyp p return out, errUnknown } if flags.LazyUnmarshalExtensions { - if opts.IsDefault() && x.canLazy(xt) { + if opts.CanBeLazy() && x.canLazy(xt) { out, valid := skipExtension(b, xi, num, wtyp, opts) switch valid { case ValidationValid: @@ -270,6 +311,13 @@ func skipExtension(b []byte, xi *extensionFieldInfo, num protowire.Number, wtyp if n < 0 { return out, ValidationUnknown } + + if opts.Validated() { + out.initialized = true + out.n = n + return out, ValidationValid + } + out, st := xi.validation.mi.validate(v, 0, opts) out.n = n return out, st diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go index 6254f5de4..b2e212291 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/encode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go @@ -10,6 +10,7 @@ import ( "sync/atomic" "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/protolazy" "google.golang.org/protobuf/proto" piface "google.golang.org/protobuf/runtime/protoiface" ) @@ -71,11 +72,39 @@ func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int e := p.Apply(mi.extensionOffset).Extensions() size += mi.sizeExtensions(e, opts) } + + var lazy **protolazy.XXX_lazyUnmarshalInfo + var presence presence + if mi.presenceOffset.IsValid() { + presence = p.Apply(mi.presenceOffset).PresenceInfo() + if mi.lazyOffset.IsValid() { + lazy = p.Apply(mi.lazyOffset).LazyInfoPtr() + } + } + for _, f := range mi.orderedCoderFields { if f.funcs.size == nil { continue } fptr := p.Apply(f.offset) + + if f.presenceIndex != noPresence { + if !presence.Present(f.presenceIndex) { + continue + } + + if f.isLazy && fptr.AtomicGetPointer().IsNil() { + if lazyFields(opts) { + size += (*lazy).SizeField(uint32(f.num)) + continue + } else { + mi.lazyUnmarshal(p, f.num) + } + } + size += f.funcs.size(fptr, f, opts) + continue + } + if f.isPointer && fptr.Elem().IsNil() { continue } @@ -134,11 +163,52 @@ func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOpt return b, err } } + + var lazy **protolazy.XXX_lazyUnmarshalInfo + var presence presence + if mi.presenceOffset.IsValid() { + presence = p.Apply(mi.presenceOffset).PresenceInfo() + if mi.lazyOffset.IsValid() { + lazy = p.Apply(mi.lazyOffset).LazyInfoPtr() + } + } + for _, f := range mi.orderedCoderFields { if f.funcs.marshal == nil { continue } fptr := p.Apply(f.offset) + + if f.presenceIndex != noPresence { + if !presence.Present(f.presenceIndex) { + continue + } + if f.isLazy { + // Be careful, this field needs to be read atomically, like for a get + if f.isPointer && fptr.AtomicGetPointer().IsNil() { + if lazyFields(opts) { + b, _ = (*lazy).AppendField(b, uint32(f.num)) + continue + } else { + mi.lazyUnmarshal(p, f.num) + } + } + + b, err = f.funcs.marshal(b, fptr, f, opts) + if err != nil { + return b, err + } + continue + } else if f.isPointer && fptr.Elem().IsNil() { + continue + } + b, err = f.funcs.marshal(b, fptr, f, opts) + if err != nil { + return b, err + } + continue + } + if f.isPointer && fptr.Elem().IsNil() { continue } @@ -163,6 +233,14 @@ func fullyLazyExtensions(opts marshalOptions) bool { return opts.flags&piface.MarshalDeterministic == 0 } +// lazyFields returns true if we should attempt to keep fields lazy over size and marshal. +func lazyFields(opts marshalOptions) bool { + // When deterministic marshaling is requested, force an unmarshal for lazy + // fields to produce a deterministic result, instead of passing through + // bytes lazily that may or may not match what Go Protobuf would produce. + return opts.flags&piface.MarshalDeterministic == 0 +} + func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marshalOptions) (n int) { if ext == nil { return 0 diff --git a/vendor/google.golang.org/protobuf/internal/impl/lazy.go b/vendor/google.golang.org/protobuf/internal/impl/lazy.go new file mode 100644 index 000000000..c7de31e24 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/lazy.go @@ -0,0 +1,433 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "math/bits" + "os" + "reflect" + "sort" + "sync/atomic" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/protolazy" + "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +var enableLazy int32 = func() int32 { + if os.Getenv("GOPROTODEBUG") == "nolazy" { + return 0 + } + return 1 +}() + +// EnableLazyUnmarshal enables lazy unmarshaling. +func EnableLazyUnmarshal(enable bool) { + if enable { + atomic.StoreInt32(&enableLazy, 1) + return + } + atomic.StoreInt32(&enableLazy, 0) +} + +// LazyEnabled reports whether lazy unmarshalling is currently enabled. +func LazyEnabled() bool { + return atomic.LoadInt32(&enableLazy) != 0 +} + +// UnmarshalField unmarshals a field in a message. +func UnmarshalField(m interface{}, num protowire.Number) { + switch m := m.(type) { + case *messageState: + m.messageInfo().lazyUnmarshal(m.pointer(), num) + case *messageReflectWrapper: + m.messageInfo().lazyUnmarshal(m.pointer(), num) + default: + panic(fmt.Sprintf("unsupported wrapper type %T", m)) + } +} + +func (mi *MessageInfo) lazyUnmarshal(p pointer, num protoreflect.FieldNumber) { + var f *coderFieldInfo + if int(num) < len(mi.denseCoderFields) { + f = mi.denseCoderFields[num] + } else { + f = mi.coderFields[num] + } + if f == nil { + panic(fmt.Sprintf("lazyUnmarshal: field info for %v.%v", mi.Desc.FullName(), num)) + } + lazy := *p.Apply(mi.lazyOffset).LazyInfoPtr() + start, end, found, _, multipleEntries := lazy.FindFieldInProto(uint32(num)) + if !found && multipleEntries == nil { + panic(fmt.Sprintf("lazyUnmarshal: can't find field data for %v.%v", mi.Desc.FullName(), num)) + } + // The actual pointer in the message can not be set until the whole struct is filled in, otherwise we will have races. + // Create another pointer and set it atomically, if we won the race and the pointer in the original message is still nil. + fp := pointerOfValue(reflect.New(f.ft)) + if multipleEntries != nil { + for _, entry := range multipleEntries { + mi.unmarshalField(lazy.Buffer()[entry.Start:entry.End], fp, f, lazy, lazy.UnmarshalFlags()) + } + } else { + mi.unmarshalField(lazy.Buffer()[start:end], fp, f, lazy, lazy.UnmarshalFlags()) + } + p.Apply(f.offset).AtomicSetPointerIfNil(fp.Elem()) +} + +func (mi *MessageInfo) unmarshalField(b []byte, p pointer, f *coderFieldInfo, lazyInfo *protolazy.XXX_lazyUnmarshalInfo, flags piface.UnmarshalInputFlags) error { + opts := lazyUnmarshalOptions + opts.flags |= flags + for len(b) > 0 { + // Parse the tag (field number and wire type). + var tag uint64 + if b[0] < 0x80 { + tag = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + tag = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + tag, n = protowire.ConsumeVarint(b) + if n < 0 { + return errors.New("invalid wire data") + } + b = b[n:] + } + var num protowire.Number + if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) { + return errors.New("invalid wire data") + } else { + num = protowire.Number(n) + } + wtyp := protowire.Type(tag & 7) + if num == f.num { + o, err := f.funcs.unmarshal(b, p, wtyp, f, opts) + if err == nil { + b = b[o.n:] + continue + } + if err != errUnknown { + return err + } + } + n := protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return errors.New("invalid wire data") + } + b = b[n:] + } + return nil +} + +func (mi *MessageInfo) skipField(b []byte, f *coderFieldInfo, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, _ ValidationStatus) { + fmi := f.validation.mi + if fmi == nil { + fd := mi.Desc.Fields().ByNumber(f.num) + if fd == nil { + return out, ValidationUnknown + } + messageName := fd.Message().FullName() + messageType, err := preg.GlobalTypes.FindMessageByName(messageName) + if err != nil { + return out, ValidationUnknown + } + var ok bool + fmi, ok = messageType.(*MessageInfo) + if !ok { + return out, ValidationUnknown + } + } + fmi.init() + switch f.validation.typ { + case validationTypeMessage: + if wtyp != protowire.BytesType { + return out, ValidationWrongWireType + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, ValidationInvalid + } + out, st := fmi.validate(v, 0, opts) + out.n = n + return out, st + case validationTypeGroup: + if wtyp != protowire.StartGroupType { + return out, ValidationWrongWireType + } + out, st := fmi.validate(b, f.num, opts) + return out, st + default: + return out, ValidationUnknown + } +} + +// unmarshalPointerLazy is similar to unmarshalPointerEager, but it +// specifically handles lazy unmarshalling. it expects lazyOffset and +// presenceOffset to both be valid. +func (mi *MessageInfo) unmarshalPointerLazy(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) { + initialized := true + var requiredMask uint64 + var lazy **protolazy.XXX_lazyUnmarshalInfo + var presence presence + var lazyIndex []protolazy.IndexEntry + var lastNum protowire.Number + outOfOrder := false + lazyDecode := false + presence = p.Apply(mi.presenceOffset).PresenceInfo() + lazy = p.Apply(mi.lazyOffset).LazyInfoPtr() + if !presence.AnyPresent(mi.presenceSize) { + if opts.CanBeLazy() { + // If the message contains existing data, we need to merge into it. + // Lazy unmarshaling doesn't merge, so only enable it when the + // message is empty (has no presence bitmap). + lazyDecode = true + if *lazy == nil { + *lazy = &protolazy.XXX_lazyUnmarshalInfo{} + } + (*lazy).SetUnmarshalFlags(opts.flags) + if !opts.AliasBuffer() { + // Make a copy of the buffer for lazy unmarshaling. + // Set the AliasBuffer flag so recursive unmarshal + // operations reuse the copy. + b = append([]byte{}, b...) + opts.flags |= piface.UnmarshalAliasBuffer + } + (*lazy).SetBuffer(b) + } + } + // Track special handling of lazy fields. + // + // In the common case, all fields are lazyValidateOnly (and lazyFields remains nil). + // In the event that validation for a field fails, this map tracks handling of the field. + type lazyAction uint8 + const ( + lazyValidateOnly lazyAction = iota // validate the field only + lazyUnmarshalNow // eagerly unmarshal the field + lazyUnmarshalLater // unmarshal the field after the message is fully processed + ) + var lazyFields map[*coderFieldInfo]lazyAction + var exts *map[int32]ExtensionField + start := len(b) + pos := 0 + for len(b) > 0 { + // Parse the tag (field number and wire type). + var tag uint64 + if b[0] < 0x80 { + tag = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + tag = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + tag, n = protowire.ConsumeVarint(b) + if n < 0 { + return out, errDecode + } + b = b[n:] + } + var num protowire.Number + if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) { + return out, errors.New("invalid field number") + } else { + num = protowire.Number(n) + } + wtyp := protowire.Type(tag & 7) + + if wtyp == protowire.EndGroupType { + if num != groupTag { + return out, errors.New("mismatching end group marker") + } + groupTag = 0 + break + } + + var f *coderFieldInfo + if int(num) < len(mi.denseCoderFields) { + f = mi.denseCoderFields[num] + } else { + f = mi.coderFields[num] + } + var n int + err := errUnknown + discardUnknown := false + Field: + switch { + case f != nil: + if f.funcs.unmarshal == nil { + break + } + if f.isLazy && lazyDecode { + switch { + case lazyFields == nil || lazyFields[f] == lazyValidateOnly: + // Attempt to validate this field and leave it for later lazy unmarshaling. + o, valid := mi.skipField(b, f, wtyp, opts) + switch valid { + case ValidationValid: + // Skip over the valid field and continue. + err = nil + presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize) + requiredMask |= f.validation.requiredBit + if !o.initialized { + initialized = false + } + n = o.n + break Field + case ValidationInvalid: + return out, errors.New("invalid proto wire format") + case ValidationWrongWireType: + break Field + case ValidationUnknown: + if lazyFields == nil { + lazyFields = make(map[*coderFieldInfo]lazyAction) + } + if presence.Present(f.presenceIndex) { + // We were unable to determine if the field is valid or not, + // and we've already skipped over at least one instance of this + // field. Clear the presence bit (so if we stop decoding early, + // we don't leave a partially-initialized field around) and flag + // the field for unmarshaling before we return. + presence.ClearPresent(f.presenceIndex) + lazyFields[f] = lazyUnmarshalLater + discardUnknown = true + break Field + } else { + // We were unable to determine if the field is valid or not, + // but this is the first time we've seen it. Flag it as needing + // eager unmarshaling and fall through to the eager unmarshal case below. + lazyFields[f] = lazyUnmarshalNow + } + } + case lazyFields[f] == lazyUnmarshalLater: + // This field will be unmarshaled in a separate pass below. + // Skip over it here. + discardUnknown = true + break Field + default: + // Eagerly unmarshal the field. + } + } + if f.isLazy && !lazyDecode && presence.Present(f.presenceIndex) { + if p.Apply(f.offset).AtomicGetPointer().IsNil() { + mi.lazyUnmarshal(p, f.num) + } + } + var o unmarshalOutput + o, err = f.funcs.unmarshal(b, p.Apply(f.offset), wtyp, f, opts) + n = o.n + if err != nil { + break + } + requiredMask |= f.validation.requiredBit + if f.funcs.isInit != nil && !o.initialized { + initialized = false + } + if f.presenceIndex != noPresence { + presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize) + } + default: + // Possible extension. + if exts == nil && mi.extensionOffset.IsValid() { + exts = p.Apply(mi.extensionOffset).Extensions() + if *exts == nil { + *exts = make(map[int32]ExtensionField) + } + } + if exts == nil { + break + } + var o unmarshalOutput + o, err = mi.unmarshalExtension(b, num, wtyp, *exts, opts) + if err != nil { + break + } + n = o.n + if !o.initialized { + initialized = false + } + } + if err != nil { + if err != errUnknown { + return out, err + } + n = protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return out, errDecode + } + if !discardUnknown && !opts.DiscardUnknown() && mi.unknownOffset.IsValid() { + u := mi.mutableUnknownBytes(p) + *u = protowire.AppendTag(*u, num, wtyp) + *u = append(*u, b[:n]...) + } + } + b = b[n:] + end := start - len(b) + if lazyDecode && f != nil && f.isLazy { + if num != lastNum { + lazyIndex = append(lazyIndex, protolazy.IndexEntry{ + FieldNum: uint32(num), + Start: uint32(pos), + End: uint32(end), + }) + } else { + i := len(lazyIndex) - 1 + lazyIndex[i].End = uint32(end) + lazyIndex[i].MultipleContiguous = true + } + } + if num < lastNum { + outOfOrder = true + } + pos = end + lastNum = num + } + if groupTag != 0 { + return out, errors.New("missing end group marker") + } + if lazyFields != nil { + // Some fields failed validation, and now need to be unmarshaled. + for f, action := range lazyFields { + if action != lazyUnmarshalLater { + continue + } + initialized = false + if *lazy == nil { + *lazy = &protolazy.XXX_lazyUnmarshalInfo{} + } + if err := mi.unmarshalField((*lazy).Buffer(), p.Apply(f.offset), f, *lazy, opts.flags); err != nil { + return out, err + } + presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize) + } + } + if lazyDecode { + if outOfOrder { + sort.Slice(lazyIndex, func(i, j int) bool { + return lazyIndex[i].FieldNum < lazyIndex[j].FieldNum || + (lazyIndex[i].FieldNum == lazyIndex[j].FieldNum && + lazyIndex[i].Start < lazyIndex[j].Start) + }) + } + if *lazy == nil { + *lazy = &protolazy.XXX_lazyUnmarshalInfo{} + } + + (*lazy).SetIndex(lazyIndex) + } + if mi.numRequiredFields > 0 && bits.OnesCount64(requiredMask) != int(mi.numRequiredFields) { + initialized = false + } + if initialized { + out.initialized = true + } + out.n = start - len(b) + return out, nil +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go index bf0b6049b..a51dffbe2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go @@ -310,12 +310,9 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, fd.L0.Parent = md fd.L0.Index = n - if fd.L1.IsWeak || fd.L1.EditionFeatures.IsPacked { + if fd.L1.EditionFeatures.IsPacked { fd.L1.Options = func() protoreflect.ProtoMessage { opts := descopts.Field.ProtoReflect().New() - if fd.L1.IsWeak { - opts.Set(opts.Descriptor().Fields().ByName("weak"), protoreflect.ValueOfBool(true)) - } if fd.L1.EditionFeatures.IsPacked { opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.EditionFeatures.IsPacked)) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/merge.go b/vendor/google.golang.org/protobuf/internal/impl/merge.go index 7e65f64f2..8ffdce67d 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/merge.go +++ b/vendor/google.golang.org/protobuf/internal/impl/merge.go @@ -41,11 +41,38 @@ func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) { if src.IsNil() { return } + + var presenceSrc presence + var presenceDst presence + if mi.presenceOffset.IsValid() { + presenceSrc = src.Apply(mi.presenceOffset).PresenceInfo() + presenceDst = dst.Apply(mi.presenceOffset).PresenceInfo() + } + for _, f := range mi.orderedCoderFields { if f.funcs.merge == nil { continue } sfptr := src.Apply(f.offset) + + if f.presenceIndex != noPresence { + if !presenceSrc.Present(f.presenceIndex) { + continue + } + dfptr := dst.Apply(f.offset) + if f.isLazy { + if sfptr.AtomicGetPointer().IsNil() { + mi.lazyUnmarshal(src, f.num) + } + if presenceDst.Present(f.presenceIndex) && dfptr.AtomicGetPointer().IsNil() { + mi.lazyUnmarshal(dst, f.num) + } + } + f.funcs.merge(dst.Apply(f.offset), sfptr, f, opts) + presenceDst.SetPresentUnatomic(f.presenceIndex, mi.presenceSize) + continue + } + if f.isPointer && sfptr.Elem().IsNil() { continue } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index 741b5ed29..d50423dcb 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -14,7 +14,6 @@ import ( "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" ) // MessageInfo provides protobuf related functionality for a given Go type @@ -79,6 +78,9 @@ func (mi *MessageInfo) initOnce() { if mi.initDone == 1 { return } + if opaqueInitHook(mi) { + return + } t := mi.GoReflectType if t.Kind() != reflect.Ptr && t.Elem().Kind() != reflect.Struct { @@ -117,7 +119,6 @@ type ( var ( sizecacheType = reflect.TypeOf(SizeCache(0)) - weakFieldsType = reflect.TypeOf(WeakFields(nil)) unknownFieldsAType = reflect.TypeOf(unknownFieldsA(nil)) unknownFieldsBType = reflect.TypeOf(unknownFieldsB(nil)) extensionFieldsType = reflect.TypeOf(ExtensionFields(nil)) @@ -126,13 +127,14 @@ var ( type structInfo struct { sizecacheOffset offset sizecacheType reflect.Type - weakOffset offset - weakType reflect.Type unknownOffset offset unknownType reflect.Type extensionOffset offset extensionType reflect.Type + lazyOffset offset + presenceOffset offset + fieldsByNumber map[protoreflect.FieldNumber]reflect.StructField oneofsByName map[protoreflect.Name]reflect.StructField oneofWrappersByType map[reflect.Type]protoreflect.FieldNumber @@ -142,9 +144,10 @@ type structInfo struct { func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo { si := structInfo{ sizecacheOffset: invalidOffset, - weakOffset: invalidOffset, unknownOffset: invalidOffset, extensionOffset: invalidOffset, + lazyOffset: invalidOffset, + presenceOffset: invalidOffset, fieldsByNumber: map[protoreflect.FieldNumber]reflect.StructField{}, oneofsByName: map[protoreflect.Name]reflect.StructField{}, @@ -157,24 +160,23 @@ fieldLoop: switch f := t.Field(i); f.Name { case genid.SizeCache_goname, genid.SizeCacheA_goname: if f.Type == sizecacheType { - si.sizecacheOffset = offsetOf(f, mi.Exporter) + si.sizecacheOffset = offsetOf(f) si.sizecacheType = f.Type } - case genid.WeakFields_goname, genid.WeakFieldsA_goname: - if f.Type == weakFieldsType { - si.weakOffset = offsetOf(f, mi.Exporter) - si.weakType = f.Type - } case genid.UnknownFields_goname, genid.UnknownFieldsA_goname: if f.Type == unknownFieldsAType || f.Type == unknownFieldsBType { - si.unknownOffset = offsetOf(f, mi.Exporter) + si.unknownOffset = offsetOf(f) si.unknownType = f.Type } case genid.ExtensionFields_goname, genid.ExtensionFieldsA_goname, genid.ExtensionFieldsB_goname: if f.Type == extensionFieldsType { - si.extensionOffset = offsetOf(f, mi.Exporter) + si.extensionOffset = offsetOf(f) si.extensionType = f.Type } + case "lazyFields", "XXX_lazyUnmarshalInfo": + si.lazyOffset = offsetOf(f) + case "XXX_presence": + si.presenceOffset = offsetOf(f) default: for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") { if len(s) > 0 && strings.Trim(s, "0123456789") == "" { @@ -244,9 +246,6 @@ func (mi *MessageInfo) Message(i int) protoreflect.MessageType { mi.init() fd := mi.Desc.Fields().Get(i) switch { - case fd.IsWeak(): - mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()) - return mt case fd.IsMap(): return mapEntryType{fd.Message(), mi.fieldTypes[fd.Number()]} default: diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go new file mode 100644 index 000000000..dd55e8e00 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go @@ -0,0 +1,627 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "math" + "reflect" + "strings" + "sync/atomic" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +type opaqueStructInfo struct { + structInfo +} + +// isOpaque determines whether a protobuf message type is on the Opaque API. It +// checks whether the type is a Go struct that protoc-gen-go would generate. +// +// This function only detects newly generated messages from the v2 +// implementation of protoc-gen-go. It is unable to classify generated messages +// that are too old or those that are generated by a different generator +// such as protoc-gen-gogo. +func isOpaque(t reflect.Type) bool { + // The current detection mechanism is to simply check the first field + // for a struct tag with the "protogen" key. + if t.Kind() == reflect.Struct && t.NumField() > 0 { + pgt := t.Field(0).Tag.Get("protogen") + return strings.HasPrefix(pgt, "opaque.") + } + return false +} + +func opaqueInitHook(mi *MessageInfo) bool { + mt := mi.GoReflectType.Elem() + si := opaqueStructInfo{ + structInfo: mi.makeStructInfo(mt), + } + + if !isOpaque(mt) { + return false + } + + defer atomic.StoreUint32(&mi.initDone, 1) + + mi.fields = map[protoreflect.FieldNumber]*fieldInfo{} + fds := mi.Desc.Fields() + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + fs := si.fieldsByNumber[fd.Number()] + var fi fieldInfo + usePresence, _ := usePresenceForField(si, fd) + + switch { + case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): + // Oneofs are no different for opaque. + fi = fieldInfoForOneof(fd, si.oneofsByName[fd.ContainingOneof().Name()], mi.Exporter, si.oneofWrappersByNumber[fd.Number()]) + case fd.IsMap(): + fi = mi.fieldInfoForMapOpaque(si, fd, fs) + case fd.IsList() && fd.Message() == nil && usePresence: + fi = mi.fieldInfoForScalarListOpaque(si, fd, fs) + case fd.IsList() && fd.Message() == nil: + // Proto3 lists without presence can use same access methods as open + fi = fieldInfoForList(fd, fs, mi.Exporter) + case fd.IsList() && usePresence: + fi = mi.fieldInfoForMessageListOpaque(si, fd, fs) + case fd.IsList(): + // Proto3 opaque messages that does not need presence bitmap. + // Different representation than open struct, but same logic + fi = mi.fieldInfoForMessageListOpaqueNoPresence(si, fd, fs) + case fd.Message() != nil && usePresence: + fi = mi.fieldInfoForMessageOpaque(si, fd, fs) + case fd.Message() != nil: + // Proto3 messages without presence can use same access methods as open + fi = fieldInfoForMessage(fd, fs, mi.Exporter) + default: + fi = mi.fieldInfoForScalarOpaque(si, fd, fs) + } + mi.fields[fd.Number()] = &fi + } + mi.oneofs = map[protoreflect.Name]*oneofInfo{} + for i := 0; i < mi.Desc.Oneofs().Len(); i++ { + od := mi.Desc.Oneofs().Get(i) + mi.oneofs[od.Name()] = makeOneofInfoOpaque(mi, od, si.structInfo, mi.Exporter) + } + + mi.denseFields = make([]*fieldInfo, fds.Len()*2) + for i := 0; i < fds.Len(); i++ { + if fd := fds.Get(i); int(fd.Number()) < len(mi.denseFields) { + mi.denseFields[fd.Number()] = mi.fields[fd.Number()] + } + } + + for i := 0; i < fds.Len(); { + fd := fds.Get(i) + if od := fd.ContainingOneof(); od != nil && !fd.ContainingOneof().IsSynthetic() { + mi.rangeInfos = append(mi.rangeInfos, mi.oneofs[od.Name()]) + i += od.Fields().Len() + } else { + mi.rangeInfos = append(mi.rangeInfos, mi.fields[fd.Number()]) + i++ + } + } + + mi.makeExtensionFieldsFunc(mt, si.structInfo) + mi.makeUnknownFieldsFunc(mt, si.structInfo) + mi.makeOpaqueCoderMethods(mt, si) + mi.makeFieldTypes(si.structInfo) + + return true +} + +func makeOneofInfoOpaque(mi *MessageInfo, od protoreflect.OneofDescriptor, si structInfo, x exporter) *oneofInfo { + oi := &oneofInfo{oneofDesc: od} + if od.IsSynthetic() { + fd := od.Fields().Get(0) + index, _ := presenceIndex(mi.Desc, fd) + oi.which = func(p pointer) protoreflect.FieldNumber { + if p.IsNil() { + return 0 + } + if !mi.present(p, index) { + return 0 + } + return od.Fields().Get(0).Number() + } + return oi + } + // Dispatch to non-opaque oneof implementation for non-synthetic oneofs. + return makeOneofInfo(od, si, x) +} + +func (mi *MessageInfo) fieldInfoForMapOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo { + ft := fs.Type + if ft.Kind() != reflect.Map { + panic(fmt.Sprintf("invalid type: got %v, want map kind", ft)) + } + fieldOffset := offsetOf(fs) + conv := NewConverter(ft, fd) + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + // Don't bother checking presence bits, since we need to + // look at the map length even if the presence bit is set. + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + return rv.Len() > 0 + }, + clear: func(p pointer) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(reflect.Zero(rv.Type())) + }, + get: func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.Len() == 0 { + return conv.Zero() + } + return conv.PBValueOf(rv) + }, + set: func(p pointer, v protoreflect.Value) { + pv := conv.GoValueOf(v) + if pv.IsNil() { + panic(fmt.Sprintf("invalid value: setting map field to read-only value")) + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(pv) + }, + mutable: func(p pointer) protoreflect.Value { + v := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if v.IsNil() { + v.Set(reflect.MakeMap(fs.Type)) + } + return conv.PBValueOf(v) + }, + newField: func() protoreflect.Value { + return conv.New() + }, + } +} + +func (mi *MessageInfo) fieldInfoForScalarListOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo { + ft := fs.Type + if ft.Kind() != reflect.Slice { + panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft)) + } + conv := NewConverter(reflect.PtrTo(ft), fd) + fieldOffset := offsetOf(fs) + index, _ := presenceIndex(mi.Desc, fd) + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + return rv.Len() > 0 + }, + clear: func(p pointer) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(reflect.Zero(rv.Type())) + }, + get: func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type) + if rv.Elem().Len() == 0 { + return conv.Zero() + } + return conv.PBValueOf(rv) + }, + set: func(p pointer, v protoreflect.Value) { + pv := conv.GoValueOf(v) + if pv.IsNil() { + panic(fmt.Sprintf("invalid value: setting repeated field to read-only value")) + } + mi.setPresent(p, index) + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(pv.Elem()) + }, + mutable: func(p pointer) protoreflect.Value { + mi.setPresent(p, index) + return conv.PBValueOf(p.Apply(fieldOffset).AsValueOf(fs.Type)) + }, + newField: func() protoreflect.Value { + return conv.New() + }, + } +} + +func (mi *MessageInfo) fieldInfoForMessageListOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo { + ft := fs.Type + if ft.Kind() != reflect.Ptr || ft.Elem().Kind() != reflect.Slice { + panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft)) + } + conv := NewConverter(ft, fd) + fieldOffset := offsetOf(fs) + index, _ := presenceIndex(mi.Desc, fd) + fieldNumber := fd.Number() + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + if !mi.present(p, index) { + return false + } + sp := p.Apply(fieldOffset).AtomicGetPointer() + if sp.IsNil() { + // Lazily unmarshal this field. + mi.lazyUnmarshal(p, fieldNumber) + sp = p.Apply(fieldOffset).AtomicGetPointer() + } + rv := sp.AsValueOf(fs.Type.Elem()) + return rv.Elem().Len() > 0 + }, + clear: func(p pointer) { + fp := p.Apply(fieldOffset) + sp := fp.AtomicGetPointer() + if sp.IsNil() { + sp = fp.AtomicSetPointerIfNil(pointerOfValue(reflect.New(fs.Type.Elem()))) + mi.setPresent(p, index) + } + rv := sp.AsValueOf(fs.Type.Elem()) + rv.Elem().Set(reflect.Zero(rv.Type().Elem())) + }, + get: func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + if !mi.present(p, index) { + return conv.Zero() + } + sp := p.Apply(fieldOffset).AtomicGetPointer() + if sp.IsNil() { + // Lazily unmarshal this field. + mi.lazyUnmarshal(p, fieldNumber) + sp = p.Apply(fieldOffset).AtomicGetPointer() + } + rv := sp.AsValueOf(fs.Type.Elem()) + if rv.Elem().Len() == 0 { + return conv.Zero() + } + return conv.PBValueOf(rv) + }, + set: func(p pointer, v protoreflect.Value) { + fp := p.Apply(fieldOffset) + sp := fp.AtomicGetPointer() + if sp.IsNil() { + sp = fp.AtomicSetPointerIfNil(pointerOfValue(reflect.New(fs.Type.Elem()))) + mi.setPresent(p, index) + } + rv := sp.AsValueOf(fs.Type.Elem()) + val := conv.GoValueOf(v) + if val.IsNil() { + panic(fmt.Sprintf("invalid value: setting repeated field to read-only value")) + } else { + rv.Elem().Set(val.Elem()) + } + }, + mutable: func(p pointer) protoreflect.Value { + fp := p.Apply(fieldOffset) + sp := fp.AtomicGetPointer() + if sp.IsNil() { + if mi.present(p, index) { + // Lazily unmarshal this field. + mi.lazyUnmarshal(p, fieldNumber) + sp = p.Apply(fieldOffset).AtomicGetPointer() + } else { + sp = fp.AtomicSetPointerIfNil(pointerOfValue(reflect.New(fs.Type.Elem()))) + mi.setPresent(p, index) + } + } + rv := sp.AsValueOf(fs.Type.Elem()) + return conv.PBValueOf(rv) + }, + newField: func() protoreflect.Value { + return conv.New() + }, + } +} + +func (mi *MessageInfo) fieldInfoForMessageListOpaqueNoPresence(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo { + ft := fs.Type + if ft.Kind() != reflect.Ptr || ft.Elem().Kind() != reflect.Slice { + panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft)) + } + conv := NewConverter(ft, fd) + fieldOffset := offsetOf(fs) + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + sp := p.Apply(fieldOffset).AtomicGetPointer() + if sp.IsNil() { + return false + } + rv := sp.AsValueOf(fs.Type.Elem()) + return rv.Elem().Len() > 0 + }, + clear: func(p pointer) { + sp := p.Apply(fieldOffset).AtomicGetPointer() + if !sp.IsNil() { + rv := sp.AsValueOf(fs.Type.Elem()) + rv.Elem().Set(reflect.Zero(rv.Type().Elem())) + } + }, + get: func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + sp := p.Apply(fieldOffset).AtomicGetPointer() + if sp.IsNil() { + return conv.Zero() + } + rv := sp.AsValueOf(fs.Type.Elem()) + if rv.Elem().Len() == 0 { + return conv.Zero() + } + return conv.PBValueOf(rv) + }, + set: func(p pointer, v protoreflect.Value) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() { + rv.Set(reflect.New(fs.Type.Elem())) + } + val := conv.GoValueOf(v) + if val.IsNil() { + panic(fmt.Sprintf("invalid value: setting repeated field to read-only value")) + } else { + rv.Elem().Set(val.Elem()) + } + }, + mutable: func(p pointer) protoreflect.Value { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() { + rv.Set(reflect.New(fs.Type.Elem())) + } + return conv.PBValueOf(rv) + }, + newField: func() protoreflect.Value { + return conv.New() + }, + } +} + +func (mi *MessageInfo) fieldInfoForScalarOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo { + ft := fs.Type + nullable := fd.HasPresence() + if oneof := fd.ContainingOneof(); oneof != nil && oneof.IsSynthetic() { + nullable = true + } + deref := false + if nullable && ft.Kind() == reflect.Ptr { + ft = ft.Elem() + deref = true + } + conv := NewConverter(ft, fd) + fieldOffset := offsetOf(fs) + index, _ := presenceIndex(mi.Desc, fd) + var getter func(p pointer) protoreflect.Value + if !nullable { + getter = getterForDirectScalar(fd, fs, conv, fieldOffset) + } else { + getter = getterForOpaqueNullableScalar(mi, index, fd, fs, conv, fieldOffset) + } + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + if nullable { + return mi.present(p, index) + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + switch rv.Kind() { + case reflect.Bool: + return rv.Bool() + case reflect.Int32, reflect.Int64: + return rv.Int() != 0 + case reflect.Uint32, reflect.Uint64: + return rv.Uint() != 0 + case reflect.Float32, reflect.Float64: + return rv.Float() != 0 || math.Signbit(rv.Float()) + case reflect.String, reflect.Slice: + return rv.Len() > 0 + default: + panic(fmt.Sprintf("invalid type: %v", rv.Type())) // should never happen + } + }, + clear: func(p pointer) { + if nullable { + mi.clearPresent(p, index) + } + // This is only valuable for bytes and strings, but we do it unconditionally. + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(reflect.Zero(rv.Type())) + }, + get: getter, + // TODO: Implement unsafe fast path for set? + set: func(p pointer, v protoreflect.Value) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if deref { + if rv.IsNil() { + rv.Set(reflect.New(ft)) + } + rv = rv.Elem() + } + + rv.Set(conv.GoValueOf(v)) + if nullable && rv.Kind() == reflect.Slice && rv.IsNil() { + rv.Set(emptyBytes) + } + if nullable { + mi.setPresent(p, index) + } + }, + newField: func() protoreflect.Value { + return conv.New() + }, + } +} + +func (mi *MessageInfo) fieldInfoForMessageOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo { + ft := fs.Type + conv := NewConverter(ft, fd) + fieldOffset := offsetOf(fs) + index, _ := presenceIndex(mi.Desc, fd) + fieldNumber := fd.Number() + elemType := fs.Type.Elem() + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + return mi.present(p, index) + }, + clear: func(p pointer) { + mi.clearPresent(p, index) + p.Apply(fieldOffset).AtomicSetNilPointer() + }, + get: func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + fp := p.Apply(fieldOffset) + mp := fp.AtomicGetPointer() + if mp.IsNil() { + // Lazily unmarshal this field. + mi.lazyUnmarshal(p, fieldNumber) + mp = fp.AtomicGetPointer() + } + rv := mp.AsValueOf(elemType) + return conv.PBValueOf(rv) + }, + set: func(p pointer, v protoreflect.Value) { + val := pointerOfValue(conv.GoValueOf(v)) + if val.IsNil() { + panic("invalid nil pointer") + } + p.Apply(fieldOffset).AtomicSetPointer(val) + mi.setPresent(p, index) + }, + mutable: func(p pointer) protoreflect.Value { + fp := p.Apply(fieldOffset) + mp := fp.AtomicGetPointer() + if mp.IsNil() { + if mi.present(p, index) { + // Lazily unmarshal this field. + mi.lazyUnmarshal(p, fieldNumber) + mp = fp.AtomicGetPointer() + } else { + mp = pointerOfValue(conv.GoValueOf(conv.New())) + fp.AtomicSetPointer(mp) + mi.setPresent(p, index) + } + } + return conv.PBValueOf(mp.AsValueOf(fs.Type.Elem())) + }, + newMessage: func() protoreflect.Message { + return conv.New().Message() + }, + newField: func() protoreflect.Value { + return conv.New() + }, + } +} + +// A presenceList wraps a List, updating presence bits as necessary when the +// list contents change. +type presenceList struct { + pvalueList + setPresence func(bool) +} +type pvalueList interface { + protoreflect.List + //Unwrapper +} + +func (list presenceList) Append(v protoreflect.Value) { + list.pvalueList.Append(v) + list.setPresence(true) +} +func (list presenceList) Truncate(i int) { + list.pvalueList.Truncate(i) + list.setPresence(i > 0) +} + +// presenceIndex returns the index to pass to presence functions. +// +// TODO: field.Desc.Index() would be simpler, and would give space to record the presence of oneof fields. +func presenceIndex(md protoreflect.MessageDescriptor, fd protoreflect.FieldDescriptor) (uint32, presenceSize) { + found := false + var index, numIndices uint32 + for i := 0; i < md.Fields().Len(); i++ { + f := md.Fields().Get(i) + if f == fd { + found = true + index = numIndices + } + if f.ContainingOneof() == nil || isLastOneofField(f) { + numIndices++ + } + } + if !found { + panic(fmt.Sprintf("BUG: %v not in %v", fd.Name(), md.FullName())) + } + return index, presenceSize(numIndices) +} + +func isLastOneofField(fd protoreflect.FieldDescriptor) bool { + fields := fd.ContainingOneof().Fields() + return fields.Get(fields.Len()-1) == fd +} + +func (mi *MessageInfo) setPresent(p pointer, index uint32) { + p.Apply(mi.presenceOffset).PresenceInfo().SetPresent(index, mi.presenceSize) +} + +func (mi *MessageInfo) clearPresent(p pointer, index uint32) { + p.Apply(mi.presenceOffset).PresenceInfo().ClearPresent(index) +} + +func (mi *MessageInfo) present(p pointer, index uint32) bool { + return p.Apply(mi.presenceOffset).PresenceInfo().Present(index) +} + +// usePresenceForField implements the somewhat intricate logic of when +// the presence bitmap is used for a field. The main logic is that a +// field that is optional or that can be lazy will use the presence +// bit, but for proto2, also maps have a presence bit. It also records +// if the field can ever be lazy, which is true if we have a +// lazyOffset and the field is a message or a slice of messages. A +// field that is lazy will always need a presence bit. Oneofs are not +// lazy and do not use presence, unless they are a synthetic oneof, +// which is a proto3 optional field. For proto3 optionals, we use the +// presence and they can also be lazy when applicable (a message). +func usePresenceForField(si opaqueStructInfo, fd protoreflect.FieldDescriptor) (usePresence, canBeLazy bool) { + hasLazyField := fd.(interface{ IsLazy() bool }).IsLazy() + + // Non-oneof scalar fields with explicit field presence use the presence array. + usesPresenceArray := fd.HasPresence() && fd.Message() == nil && (fd.ContainingOneof() == nil || fd.ContainingOneof().IsSynthetic()) + switch { + case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): + return false, false + case fd.IsMap(): + return false, false + case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind: + return hasLazyField, hasLazyField + default: + return usesPresenceArray || (hasLazyField && fd.HasPresence()), false + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_opaque_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_opaque_gen.go new file mode 100644 index 000000000..a69825699 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/message_opaque_gen.go @@ -0,0 +1,132 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package impl + +import ( + "reflect" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +func getterForOpaqueNullableScalar(mi *MessageInfo, index uint32, fd protoreflect.FieldDescriptor, fs reflect.StructField, conv Converter, fieldOffset offset) func(p pointer) protoreflect.Value { + ft := fs.Type + if ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + if fd.Kind() == protoreflect.EnumKind { + // Enums for nullable opaque types. + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + return conv.PBValueOf(rv) + } + } + switch ft.Kind() { + case reflect.Bool: + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bool() + return protoreflect.ValueOfBool(*x) + } + case reflect.Int32: + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Int32() + return protoreflect.ValueOfInt32(*x) + } + case reflect.Uint32: + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Uint32() + return protoreflect.ValueOfUint32(*x) + } + case reflect.Int64: + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Int64() + return protoreflect.ValueOfInt64(*x) + } + case reflect.Uint64: + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Uint64() + return protoreflect.ValueOfUint64(*x) + } + case reflect.Float32: + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Float32() + return protoreflect.ValueOfFloat32(*x) + } + case reflect.Float64: + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Float64() + return protoreflect.ValueOfFloat64(*x) + } + case reflect.String: + if fd.Kind() == protoreflect.BytesKind { + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).StringPtr() + if *x == nil { + return conv.Zero() + } + if len(**x) == 0 { + return protoreflect.ValueOfBytes(nil) + } + return protoreflect.ValueOfBytes([]byte(**x)) + } + } + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).StringPtr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfString(**x) + } + case reflect.Slice: + if fd.Kind() == protoreflect.StringKind { + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bytes() + return protoreflect.ValueOfString(string(*x)) + } + } + return func(p pointer) protoreflect.Value { + if p.IsNil() || !mi.present(p, index) { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bytes() + return protoreflect.ValueOfBytes(*x) + } + } + panic("unexpected protobuf kind: " + ft.Kind().String()) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go index ecb4623d7..0d20132fa 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go @@ -72,8 +72,6 @@ func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) { fi = fieldInfoForMap(fd, fs, mi.Exporter) case fd.IsList(): fi = fieldInfoForList(fd, fs, mi.Exporter) - case fd.IsWeak(): - fi = fieldInfoForWeakMessage(fd, si.weakOffset) case fd.Message() != nil: fi = fieldInfoForMessage(fd, fs, mi.Exporter) default: @@ -205,6 +203,11 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) { case fd.IsList(): if fd.Enum() != nil || fd.Message() != nil { ft = fs.Type.Elem() + + if ft.Kind() == reflect.Slice { + ft = ft.Elem() + } + } isMessage = fd.Message() != nil case fd.Enum() != nil: @@ -214,9 +217,6 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) { } case fd.Message() != nil: ft = fs.Type - if fd.IsWeak() { - ft = nil - } isMessage = true } if isMessage && ft != nil && ft.Kind() != reflect.Ptr { diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go index 986322b19..68d4ae32e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go @@ -8,11 +8,8 @@ import ( "fmt" "math" "reflect" - "sync" - "google.golang.org/protobuf/internal/flags" "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" ) type fieldInfo struct { @@ -76,7 +73,7 @@ func fieldInfoForOneof(fd protoreflect.FieldDescriptor, fs reflect.StructField, isMessage := fd.Message() != nil // TODO: Implement unsafe fast path? - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) return fieldInfo{ // NOTE: The logic below intentionally assumes that oneof fields are // well-formatted. That is, the oneof interface never contains a @@ -152,7 +149,7 @@ func fieldInfoForMap(fd protoreflect.FieldDescriptor, fs reflect.StructField, x conv := NewConverter(ft, fd) // TODO: Implement unsafe fast path? - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) return fieldInfo{ fieldDesc: fd, has: func(p pointer) bool { @@ -205,7 +202,7 @@ func fieldInfoForList(fd protoreflect.FieldDescriptor, fs reflect.StructField, x conv := NewConverter(reflect.PtrTo(ft), fd) // TODO: Implement unsafe fast path? - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) return fieldInfo{ fieldDesc: fd, has: func(p pointer) bool { @@ -256,6 +253,7 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, ft := fs.Type nullable := fd.HasPresence() isBytes := ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 + var getter func(p pointer) protoreflect.Value if nullable { if ft.Kind() != reflect.Ptr && ft.Kind() != reflect.Slice { // This never occurs for generated message types. @@ -268,19 +266,25 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, } } conv := NewConverter(ft, fd) + fieldOffset := offsetOf(fs) + + // Generate specialized getter functions to avoid going through reflect.Value + if nullable { + getter = getterForNullableScalar(fd, fs, conv, fieldOffset) + } else { + getter = getterForDirectScalar(fd, fs, conv, fieldOffset) + } - // TODO: Implement unsafe fast path? - fieldOffset := offsetOf(fs, x) return fieldInfo{ fieldDesc: fd, has: func(p pointer) bool { if p.IsNil() { return false } - rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() if nullable { - return !rv.IsNil() + return !p.Apply(fieldOffset).Elem().IsNil() } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() switch rv.Kind() { case reflect.Bool: return rv.Bool() @@ -300,21 +304,8 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() rv.Set(reflect.Zero(rv.Type())) }, - get: func(p pointer) protoreflect.Value { - if p.IsNil() { - return conv.Zero() - } - rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() - if nullable { - if rv.IsNil() { - return conv.Zero() - } - if rv.Kind() == reflect.Ptr { - rv = rv.Elem() - } - } - return conv.PBValueOf(rv) - }, + get: getter, + // TODO: Implement unsafe fast path for set? set: func(p pointer, v protoreflect.Value) { rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() if nullable && rv.Kind() == reflect.Ptr { @@ -338,85 +329,12 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, } } -func fieldInfoForWeakMessage(fd protoreflect.FieldDescriptor, weakOffset offset) fieldInfo { - if !flags.ProtoLegacy { - panic("no support for proto1 weak fields") - } - - var once sync.Once - var messageType protoreflect.MessageType - lazyInit := func() { - once.Do(func() { - messageName := fd.Message().FullName() - messageType, _ = protoregistry.GlobalTypes.FindMessageByName(messageName) - if messageType == nil { - panic(fmt.Sprintf("weak message %v for field %v is not linked in", messageName, fd.FullName())) - } - }) - } - - num := fd.Number() - return fieldInfo{ - fieldDesc: fd, - has: func(p pointer) bool { - if p.IsNil() { - return false - } - _, ok := p.Apply(weakOffset).WeakFields().get(num) - return ok - }, - clear: func(p pointer) { - p.Apply(weakOffset).WeakFields().clear(num) - }, - get: func(p pointer) protoreflect.Value { - lazyInit() - if p.IsNil() { - return protoreflect.ValueOfMessage(messageType.Zero()) - } - m, ok := p.Apply(weakOffset).WeakFields().get(num) - if !ok { - return protoreflect.ValueOfMessage(messageType.Zero()) - } - return protoreflect.ValueOfMessage(m.ProtoReflect()) - }, - set: func(p pointer, v protoreflect.Value) { - lazyInit() - m := v.Message() - if m.Descriptor() != messageType.Descriptor() { - if got, want := m.Descriptor().FullName(), messageType.Descriptor().FullName(); got != want { - panic(fmt.Sprintf("field %v has mismatching message descriptor: got %v, want %v", fd.FullName(), got, want)) - } - panic(fmt.Sprintf("field %v has mismatching message descriptor: %v", fd.FullName(), m.Descriptor().FullName())) - } - p.Apply(weakOffset).WeakFields().set(num, m.Interface()) - }, - mutable: func(p pointer) protoreflect.Value { - lazyInit() - fs := p.Apply(weakOffset).WeakFields() - m, ok := fs.get(num) - if !ok { - m = messageType.New().Interface() - fs.set(num, m) - } - return protoreflect.ValueOfMessage(m.ProtoReflect()) - }, - newMessage: func() protoreflect.Message { - lazyInit() - return messageType.New() - }, - newField: func() protoreflect.Value { - lazyInit() - return protoreflect.ValueOfMessage(messageType.New()) - }, - } -} - func fieldInfoForMessage(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { ft := fs.Type conv := NewConverter(ft, fd) // TODO: Implement unsafe fast path? - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) return fieldInfo{ fieldDesc: fd, has: func(p pointer) bool { @@ -425,7 +343,7 @@ func fieldInfoForMessage(fd protoreflect.FieldDescriptor, fs reflect.StructField } rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() if fs.Type.Kind() != reflect.Ptr { - return !isZero(rv) + return !rv.IsZero() } return !rv.IsNil() }, @@ -472,7 +390,7 @@ func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) * oi := &oneofInfo{oneofDesc: od} if od.IsSynthetic() { fs := si.fieldsByNumber[od.Fields().Get(0).Number()] - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) oi.which = func(p pointer) protoreflect.FieldNumber { if p.IsNil() { return 0 @@ -485,7 +403,7 @@ func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) * } } else { fs := si.oneofsByName[od.Name()] - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) oi.which = func(p pointer) protoreflect.FieldNumber { if p.IsNil() { return 0 @@ -503,41 +421,3 @@ func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) * } return oi } - -// isZero is identical to reflect.Value.IsZero. -// TODO: Remove this when Go1.13 is the minimally supported Go version. -func isZero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return math.Float64bits(v.Float()) == 0 - case reflect.Complex64, reflect.Complex128: - c := v.Complex() - return math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0 - case reflect.Array: - for i := 0; i < v.Len(); i++ { - if !isZero(v.Index(i)) { - return false - } - } - return true - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer: - return v.IsNil() - case reflect.String: - return v.Len() == 0 - case reflect.Struct: - for i := 0; i < v.NumField(); i++ { - if !isZero(v.Field(i)) { - return false - } - } - return true - default: - panic(&reflect.ValueError{Method: "reflect.Value.IsZero", Kind: v.Kind()}) - } -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field_gen.go new file mode 100644 index 000000000..af5e063a1 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field_gen.go @@ -0,0 +1,273 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package impl + +import ( + "reflect" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +func getterForNullableScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, conv Converter, fieldOffset offset) func(p pointer) protoreflect.Value { + ft := fs.Type + if ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + if fd.Kind() == protoreflect.EnumKind { + elemType := fs.Type.Elem() + // Enums for nullable types. + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).Elem().AsValueOf(elemType) + if rv.IsNil() { + return conv.Zero() + } + return conv.PBValueOf(rv.Elem()) + } + } + switch ft.Kind() { + case reflect.Bool: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).BoolPtr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfBool(**x) + } + case reflect.Int32: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Int32Ptr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfInt32(**x) + } + case reflect.Uint32: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Uint32Ptr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfUint32(**x) + } + case reflect.Int64: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Int64Ptr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfInt64(**x) + } + case reflect.Uint64: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Uint64Ptr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfUint64(**x) + } + case reflect.Float32: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Float32Ptr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfFloat32(**x) + } + case reflect.Float64: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Float64Ptr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfFloat64(**x) + } + case reflect.String: + if fd.Kind() == protoreflect.BytesKind { + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).StringPtr() + if *x == nil { + return conv.Zero() + } + if len(**x) == 0 { + return protoreflect.ValueOfBytes(nil) + } + return protoreflect.ValueOfBytes([]byte(**x)) + } + } + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).StringPtr() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfString(**x) + } + case reflect.Slice: + if fd.Kind() == protoreflect.StringKind { + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bytes() + if len(*x) == 0 { + return conv.Zero() + } + return protoreflect.ValueOfString(string(*x)) + } + } + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bytes() + if *x == nil { + return conv.Zero() + } + return protoreflect.ValueOfBytes(*x) + } + } + panic("unexpected protobuf kind: " + ft.Kind().String()) +} + +func getterForDirectScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, conv Converter, fieldOffset offset) func(p pointer) protoreflect.Value { + ft := fs.Type + if fd.Kind() == protoreflect.EnumKind { + // Enums for non nullable types. + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + return conv.PBValueOf(rv) + } + } + switch ft.Kind() { + case reflect.Bool: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bool() + return protoreflect.ValueOfBool(*x) + } + case reflect.Int32: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Int32() + return protoreflect.ValueOfInt32(*x) + } + case reflect.Uint32: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Uint32() + return protoreflect.ValueOfUint32(*x) + } + case reflect.Int64: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Int64() + return protoreflect.ValueOfInt64(*x) + } + case reflect.Uint64: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Uint64() + return protoreflect.ValueOfUint64(*x) + } + case reflect.Float32: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Float32() + return protoreflect.ValueOfFloat32(*x) + } + case reflect.Float64: + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Float64() + return protoreflect.ValueOfFloat64(*x) + } + case reflect.String: + if fd.Kind() == protoreflect.BytesKind { + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).String() + if len(*x) == 0 { + return protoreflect.ValueOfBytes(nil) + } + return protoreflect.ValueOfBytes([]byte(*x)) + } + } + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).String() + return protoreflect.ValueOfString(*x) + } + case reflect.Slice: + if fd.Kind() == protoreflect.StringKind { + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bytes() + return protoreflect.ValueOfString(string(*x)) + } + } + return func(p pointer) protoreflect.Value { + if p.IsNil() { + return conv.Zero() + } + x := p.Apply(fieldOffset).Bytes() + return protoreflect.ValueOfBytes(*x) + } + } + panic("unexpected protobuf kind: " + ft.Kind().String()) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index 79e186667..62f8bf663 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -8,6 +8,8 @@ import ( "reflect" "sync/atomic" "unsafe" + + "google.golang.org/protobuf/internal/protolazy" ) const UnsafeEnabled = true @@ -20,7 +22,7 @@ type Pointer unsafe.Pointer type offset uintptr // offsetOf returns a field offset for the struct field. -func offsetOf(f reflect.StructField, x exporter) offset { +func offsetOf(f reflect.StructField) offset { return offset(f.Offset) } @@ -109,8 +111,14 @@ func (p pointer) StringSlice() *[]string { return (*[]string)(p.p func (p pointer) Bytes() *[]byte { return (*[]byte)(p.p) } func (p pointer) BytesPtr() **[]byte { return (**[]byte)(p.p) } func (p pointer) BytesSlice() *[][]byte { return (*[][]byte)(p.p) } -func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.p) } func (p pointer) Extensions() *map[int32]ExtensionField { return (*map[int32]ExtensionField)(p.p) } +func (p pointer) LazyInfoPtr() **protolazy.XXX_lazyUnmarshalInfo { + return (**protolazy.XXX_lazyUnmarshalInfo)(p.p) +} + +func (p pointer) PresenceInfo() presence { + return presence{P: p.p} +} func (p pointer) Elem() pointer { return pointer{p: *(*unsafe.Pointer)(p.p)} diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe_opaque.go new file mode 100644 index 000000000..38aa7b7dc --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe_opaque.go @@ -0,0 +1,42 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "sync/atomic" + "unsafe" +) + +func (p pointer) AtomicGetPointer() pointer { + return pointer{p: atomic.LoadPointer((*unsafe.Pointer)(p.p))} +} + +func (p pointer) AtomicSetPointer(v pointer) { + atomic.StorePointer((*unsafe.Pointer)(p.p), v.p) +} + +func (p pointer) AtomicSetNilPointer() { + atomic.StorePointer((*unsafe.Pointer)(p.p), unsafe.Pointer(nil)) +} + +func (p pointer) AtomicSetPointerIfNil(v pointer) pointer { + if atomic.CompareAndSwapPointer((*unsafe.Pointer)(p.p), unsafe.Pointer(nil), v.p) { + return v + } + return pointer{p: atomic.LoadPointer((*unsafe.Pointer)(p.p))} +} + +type atomicV1MessageInfo struct{ p Pointer } + +func (mi *atomicV1MessageInfo) Get() Pointer { + return Pointer(atomic.LoadPointer((*unsafe.Pointer)(&mi.p))) +} + +func (mi *atomicV1MessageInfo) SetIfNil(p Pointer) Pointer { + if atomic.CompareAndSwapPointer((*unsafe.Pointer)(&mi.p), nil, unsafe.Pointer(p)) { + return p + } + return mi.Get() +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/presence.go b/vendor/google.golang.org/protobuf/internal/impl/presence.go new file mode 100644 index 000000000..914cb1ded --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/presence.go @@ -0,0 +1,142 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "sync/atomic" + "unsafe" +) + +// presenceSize represents the size of a presence set, which should be the largest index of the set+1 +type presenceSize uint32 + +// presence is the internal representation of the bitmap array in a generated protobuf +type presence struct { + // This is a pointer to the beginning of an array of uint32 + P unsafe.Pointer +} + +func (p presence) toElem(num uint32) (ret *uint32) { + const ( + bitsPerByte = 8 + siz = unsafe.Sizeof(*ret) + ) + // p.P points to an array of uint32, num is the bit in this array that the + // caller wants to check/manipulate. Calculate the index in the array that + // contains this specific bit. E.g.: 76 / 32 = 2 (integer division). + offset := uintptr(num) / (siz * bitsPerByte) * siz + return (*uint32)(unsafe.Pointer(uintptr(p.P) + offset)) +} + +// Present checks for the presence of a specific field number in a presence set. +func (p presence) Present(num uint32) bool { + if p.P == nil { + return false + } + return Export{}.Present(p.toElem(num), num) +} + +// SetPresent adds presence for a specific field number in a presence set. +func (p presence) SetPresent(num uint32, size presenceSize) { + Export{}.SetPresent(p.toElem(num), num, uint32(size)) +} + +// SetPresentUnatomic adds presence for a specific field number in a presence set without using +// atomic operations. Only to be called during unmarshaling. +func (p presence) SetPresentUnatomic(num uint32, size presenceSize) { + Export{}.SetPresentNonAtomic(p.toElem(num), num, uint32(size)) +} + +// ClearPresent removes presence for a specific field number in a presence set. +func (p presence) ClearPresent(num uint32) { + Export{}.ClearPresent(p.toElem(num), num) +} + +// LoadPresenceCache (together with PresentInCache) allows for a +// cached version of checking for presence without re-reading the word +// for every field. It is optimized for efficiency and assumes no +// simltaneous mutation of the presence set (or at least does not have +// a problem with simultaneous mutation giving inconsistent results). +func (p presence) LoadPresenceCache() (current uint32) { + if p.P == nil { + return 0 + } + return atomic.LoadUint32((*uint32)(p.P)) +} + +// PresentInCache reads presence from a cached word in the presence +// bitmap. It caches up a new word if the bit is outside the +// word. This is for really fast iteration through bitmaps in cases +// where we either know that the bitmap will not be altered, or we +// don't care about inconsistencies caused by simultaneous writes. +func (p presence) PresentInCache(num uint32, cachedElement *uint32, current *uint32) bool { + if num/32 != *cachedElement { + o := uintptr(num/32) * unsafe.Sizeof(uint32(0)) + q := (*uint32)(unsafe.Pointer(uintptr(p.P) + o)) + *current = atomic.LoadUint32(q) + *cachedElement = num / 32 + } + return (*current & (1 << (num % 32))) > 0 +} + +// AnyPresent checks if any field is marked as present in the bitmap. +func (p presence) AnyPresent(size presenceSize) bool { + n := uintptr((size + 31) / 32) + for j := uintptr(0); j < n; j++ { + o := j * unsafe.Sizeof(uint32(0)) + q := (*uint32)(unsafe.Pointer(uintptr(p.P) + o)) + b := atomic.LoadUint32(q) + if b > 0 { + return true + } + } + return false +} + +// toRaceDetectData finds the preceding RaceDetectHookData in a +// message by using pointer arithmetic. As the type of the presence +// set (bitmap) varies with the number of fields in the protobuf, we +// can not have a struct type containing the array and the +// RaceDetectHookData. instead the RaceDetectHookData is placed +// immediately before the bitmap array, and we find it by walking +// backwards in the struct. +// +// This method is only called from the race-detect version of the code, +// so RaceDetectHookData is never an empty struct. +func (p presence) toRaceDetectData() *RaceDetectHookData { + var template struct { + d RaceDetectHookData + a [1]uint32 + } + o := (uintptr(unsafe.Pointer(&template.a)) - uintptr(unsafe.Pointer(&template.d))) + return (*RaceDetectHookData)(unsafe.Pointer(uintptr(p.P) - o)) +} + +func atomicLoadShadowPresence(p **[]byte) *[]byte { + return (*[]byte)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreShadowPresence(p **[]byte, v *[]byte) { + atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(p)), nil, unsafe.Pointer(v)) +} + +// findPointerToRaceDetectData finds the preceding RaceDetectHookData +// in a message by using pointer arithmetic. For the methods called +// directy from generated code, we don't have a pointer to the +// beginning of the presence set, but a pointer inside the array. As +// we know the index of the bit we're manipulating (num), we can +// calculate which element of the array ptr is pointing to. With that +// information we find the preceding RaceDetectHookData and can +// manipulate the shadow bitmap. +// +// This method is only called from the race-detect version of the +// code, so RaceDetectHookData is never an empty struct. +func findPointerToRaceDetectData(ptr *uint32, num uint32) *RaceDetectHookData { + var template struct { + d RaceDetectHookData + a [1]uint32 + } + o := (uintptr(unsafe.Pointer(&template.a)) - uintptr(unsafe.Pointer(&template.d))) + uintptr(num/32)*unsafe.Sizeof(uint32(0)) + return (*RaceDetectHookData)(unsafe.Pointer(uintptr(unsafe.Pointer(ptr)) - o)) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/validate.go b/vendor/google.golang.org/protobuf/internal/impl/validate.go index a24e6bbd7..7b2995dde 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/validate.go +++ b/vendor/google.golang.org/protobuf/internal/impl/validate.go @@ -37,6 +37,10 @@ const ( // ValidationValid indicates that unmarshaling the message will succeed. ValidationValid + + // ValidationWrongWireType indicates that a validated field does not have + // the expected wire type. + ValidationWrongWireType ) func (v ValidationStatus) String() string { @@ -149,11 +153,23 @@ func newValidationInfo(fd protoreflect.FieldDescriptor, ft reflect.Type) validat switch fd.Kind() { case protoreflect.MessageKind: vi.typ = validationTypeMessage + + if ft.Kind() == reflect.Ptr { + // Repeated opaque message fields are *[]*T. + ft = ft.Elem() + } + if ft.Kind() == reflect.Slice { vi.mi = getMessageInfo(ft.Elem()) } case protoreflect.GroupKind: vi.typ = validationTypeGroup + + if ft.Kind() == reflect.Ptr { + // Repeated opaque message fields are *[]*T. + ft = ft.Elem() + } + if ft.Kind() == reflect.Slice { vi.mi = getMessageInfo(ft.Elem()) } @@ -195,9 +211,7 @@ func newValidationInfo(fd protoreflect.FieldDescriptor, ft reflect.Type) validat switch fd.Kind() { case protoreflect.MessageKind: vi.typ = validationTypeMessage - if !fd.IsWeak() { - vi.mi = getMessageInfo(ft) - } + vi.mi = getMessageInfo(ft) case protoreflect.GroupKind: vi.typ = validationTypeGroup vi.mi = getMessageInfo(ft) @@ -304,26 +318,6 @@ State: } if f != nil { vi = f.validation - if vi.typ == validationTypeMessage && vi.mi == nil { - // Probable weak field. - // - // TODO: Consider storing the results of this lookup somewhere - // rather than recomputing it on every validation. - fd := st.mi.Desc.Fields().ByNumber(num) - if fd == nil || !fd.IsWeak() { - break - } - messageName := fd.Message().FullName() - messageType, err := protoregistry.GlobalTypes.FindMessageByName(messageName) - switch err { - case nil: - vi.mi, _ = messageType.(*MessageInfo) - case protoregistry.NotFound: - vi.typ = validationTypeBytes - default: - return out, ValidationUnknown - } - } break } // Possible extension field. diff --git a/vendor/google.golang.org/protobuf/internal/impl/weak.go b/vendor/google.golang.org/protobuf/internal/impl/weak.go deleted file mode 100644 index eb79a7ba9..000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/weak.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package impl - -import ( - "fmt" - - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" -) - -// weakFields adds methods to the exported WeakFields type for internal use. -// -// The exported type is an alias to an unnamed type, so methods can't be -// defined directly on it. -type weakFields WeakFields - -func (w weakFields) get(num protoreflect.FieldNumber) (protoreflect.ProtoMessage, bool) { - m, ok := w[int32(num)] - return m, ok -} - -func (w *weakFields) set(num protoreflect.FieldNumber, m protoreflect.ProtoMessage) { - if *w == nil { - *w = make(weakFields) - } - (*w)[int32(num)] = m -} - -func (w *weakFields) clear(num protoreflect.FieldNumber) { - delete(*w, int32(num)) -} - -func (Export) HasWeak(w WeakFields, num protoreflect.FieldNumber) bool { - _, ok := w[int32(num)] - return ok -} - -func (Export) ClearWeak(w *WeakFields, num protoreflect.FieldNumber) { - delete(*w, int32(num)) -} - -func (Export) GetWeak(w WeakFields, num protoreflect.FieldNumber, name protoreflect.FullName) protoreflect.ProtoMessage { - if m, ok := w[int32(num)]; ok { - return m - } - mt, _ := protoregistry.GlobalTypes.FindMessageByName(name) - if mt == nil { - panic(fmt.Sprintf("message %v for weak field is not linked in", name)) - } - return mt.Zero().Interface() -} - -func (Export) SetWeak(w *WeakFields, num protoreflect.FieldNumber, name protoreflect.FullName, m protoreflect.ProtoMessage) { - if m != nil { - mt, _ := protoregistry.GlobalTypes.FindMessageByName(name) - if mt == nil { - panic(fmt.Sprintf("message %v for weak field is not linked in", name)) - } - if mt != m.ProtoReflect().Type() { - panic(fmt.Sprintf("invalid message type for weak field: got %T, want %T", m, mt.Zero().Interface())) - } - } - if m == nil || !m.ProtoReflect().IsValid() { - delete(*w, int32(num)) - return - } - if *w == nil { - *w = make(weakFields) - } - (*w)[int32(num)] = m -} diff --git a/vendor/google.golang.org/protobuf/internal/protolazy/bufferreader.go b/vendor/google.golang.org/protobuf/internal/protolazy/bufferreader.go new file mode 100644 index 000000000..82e5cab4a --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/protolazy/bufferreader.go @@ -0,0 +1,364 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Helper code for parsing a protocol buffer + +package protolazy + +import ( + "errors" + "fmt" + "io" + + "google.golang.org/protobuf/encoding/protowire" +) + +// BufferReader is a structure encapsulating a protobuf and a current position +type BufferReader struct { + Buf []byte + Pos int +} + +// NewBufferReader creates a new BufferRead from a protobuf +func NewBufferReader(buf []byte) BufferReader { + return BufferReader{Buf: buf, Pos: 0} +} + +var errOutOfBounds = errors.New("protobuf decoding: out of bounds") +var errOverflow = errors.New("proto: integer overflow") + +func (b *BufferReader) DecodeVarintSlow() (x uint64, err error) { + i := b.Pos + l := len(b.Buf) + + for shift := uint(0); shift < 64; shift += 7 { + if i >= l { + err = io.ErrUnexpectedEOF + return + } + v := b.Buf[i] + i++ + x |= (uint64(v) & 0x7F) << shift + if v < 0x80 { + b.Pos = i + return + } + } + + // The number is too large to represent in a 64-bit value. + err = errOverflow + return +} + +// decodeVarint decodes a varint at the current position +func (b *BufferReader) DecodeVarint() (x uint64, err error) { + i := b.Pos + buf := b.Buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + b.Pos++ + return uint64(buf[i]), nil + } else if len(buf)-i < 10 { + return b.DecodeVarintSlow() + } + + var v uint64 + // we already checked the first byte + x = uint64(buf[i]) & 127 + i++ + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 7 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 14 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 21 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 28 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 35 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 42 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 49 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 56 + if v < 128 { + goto done + } + + v = uint64(buf[i]) + i++ + x |= (v & 127) << 63 + if v < 128 { + goto done + } + + return 0, errOverflow + +done: + b.Pos = i + return +} + +// decodeVarint32 decodes a varint32 at the current position +func (b *BufferReader) DecodeVarint32() (x uint32, err error) { + i := b.Pos + buf := b.Buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + b.Pos++ + return uint32(buf[i]), nil + } else if len(buf)-i < 5 { + v, err := b.DecodeVarintSlow() + return uint32(v), err + } + + var v uint32 + // we already checked the first byte + x = uint32(buf[i]) & 127 + i++ + + v = uint32(buf[i]) + i++ + x |= (v & 127) << 7 + if v < 128 { + goto done + } + + v = uint32(buf[i]) + i++ + x |= (v & 127) << 14 + if v < 128 { + goto done + } + + v = uint32(buf[i]) + i++ + x |= (v & 127) << 21 + if v < 128 { + goto done + } + + v = uint32(buf[i]) + i++ + x |= (v & 127) << 28 + if v < 128 { + goto done + } + + return 0, errOverflow + +done: + b.Pos = i + return +} + +// skipValue skips a value in the protobuf, based on the specified tag +func (b *BufferReader) SkipValue(tag uint32) (err error) { + wireType := tag & 0x7 + switch protowire.Type(wireType) { + case protowire.VarintType: + err = b.SkipVarint() + case protowire.Fixed64Type: + err = b.SkipFixed64() + case protowire.BytesType: + var n uint32 + n, err = b.DecodeVarint32() + if err == nil { + err = b.Skip(int(n)) + } + case protowire.StartGroupType: + err = b.SkipGroup(tag) + case protowire.Fixed32Type: + err = b.SkipFixed32() + default: + err = fmt.Errorf("Unexpected wire type (%d)", wireType) + } + return +} + +// skipGroup skips a group with the specified tag. It executes efficiently using a tag stack +func (b *BufferReader) SkipGroup(tag uint32) (err error) { + tagStack := make([]uint32, 0, 16) + tagStack = append(tagStack, tag) + var n uint32 + for len(tagStack) > 0 { + tag, err = b.DecodeVarint32() + if err != nil { + return err + } + switch protowire.Type(tag & 0x7) { + case protowire.VarintType: + err = b.SkipVarint() + case protowire.Fixed64Type: + err = b.Skip(8) + case protowire.BytesType: + n, err = b.DecodeVarint32() + if err == nil { + err = b.Skip(int(n)) + } + case protowire.StartGroupType: + tagStack = append(tagStack, tag) + case protowire.Fixed32Type: + err = b.SkipFixed32() + case protowire.EndGroupType: + if protoFieldNumber(tagStack[len(tagStack)-1]) == protoFieldNumber(tag) { + tagStack = tagStack[:len(tagStack)-1] + } else { + err = fmt.Errorf("end group tag %d does not match begin group tag %d at pos %d", + protoFieldNumber(tag), protoFieldNumber(tagStack[len(tagStack)-1]), b.Pos) + } + } + if err != nil { + return err + } + } + return nil +} + +// skipVarint effiently skips a varint +func (b *BufferReader) SkipVarint() (err error) { + i := b.Pos + + if len(b.Buf)-i < 10 { + // Use DecodeVarintSlow() to check for buffer overflow, but ignore result + if _, err := b.DecodeVarintSlow(); err != nil { + return err + } + return nil + } + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + i++ + + if b.Buf[i] < 0x80 { + goto out + } + return errOverflow + +out: + b.Pos = i + 1 + return nil +} + +// skip skips the specified number of bytes +func (b *BufferReader) Skip(n int) (err error) { + if len(b.Buf) < b.Pos+n { + return io.ErrUnexpectedEOF + } + b.Pos += n + return +} + +// skipFixed64 skips a fixed64 +func (b *BufferReader) SkipFixed64() (err error) { + return b.Skip(8) +} + +// skipFixed32 skips a fixed32 +func (b *BufferReader) SkipFixed32() (err error) { + return b.Skip(4) +} + +// skipBytes skips a set of bytes +func (b *BufferReader) SkipBytes() (err error) { + n, err := b.DecodeVarint32() + if err != nil { + return err + } + return b.Skip(int(n)) +} + +// Done returns whether we are at the end of the protobuf +func (b *BufferReader) Done() bool { + return b.Pos == len(b.Buf) +} + +// Remaining returns how many bytes remain +func (b *BufferReader) Remaining() int { + return len(b.Buf) - b.Pos +} diff --git a/vendor/google.golang.org/protobuf/internal/protolazy/lazy.go b/vendor/google.golang.org/protobuf/internal/protolazy/lazy.go new file mode 100644 index 000000000..ff4d4834b --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/protolazy/lazy.go @@ -0,0 +1,359 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protolazy contains internal data structures for lazy message decoding. +package protolazy + +import ( + "fmt" + "sort" + + "google.golang.org/protobuf/encoding/protowire" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +// IndexEntry is the structure for an index of the fields in a message of a +// proto (not descending to sub-messages) +type IndexEntry struct { + FieldNum uint32 + // first byte of this tag/field + Start uint32 + // first byte after a contiguous sequence of bytes for this tag/field, which could + // include a single encoding of the field, or multiple encodings for the field + End uint32 + // True if this protobuf segment includes multiple encodings of the field + MultipleContiguous bool +} + +// XXX_lazyUnmarshalInfo has information about a particular lazily decoded message +// +// Deprecated: Do not use. This will be deleted in the near future. +type XXX_lazyUnmarshalInfo struct { + // Index of fields and their positions in the protobuf for this + // message. Make index be a pointer to a slice so it can be updated + // atomically. The index pointer is only set once (lazily when/if + // the index is first needed), and must always be SET and LOADED + // ATOMICALLY. + index *[]IndexEntry + // The protobuf associated with this lazily decoded message. It is + // only set during proto.Unmarshal(). It doesn't need to be set and + // loaded atomically, since any simultaneous set (Unmarshal) and read + // (during a get) would already be a race in the app code. + Protobuf []byte + // The flags present when Unmarshal was originally called for this particular message + unmarshalFlags piface.UnmarshalInputFlags +} + +// The Buffer and SetBuffer methods let v2/internal/impl interact with +// XXX_lazyUnmarshalInfo via an interface, to avoid an import cycle. + +// Buffer returns the lazy unmarshal buffer. +// +// Deprecated: Do not use. This will be deleted in the near future. +func (lazy *XXX_lazyUnmarshalInfo) Buffer() []byte { + return lazy.Protobuf +} + +// SetBuffer sets the lazy unmarshal buffer. +// +// Deprecated: Do not use. This will be deleted in the near future. +func (lazy *XXX_lazyUnmarshalInfo) SetBuffer(b []byte) { + lazy.Protobuf = b +} + +// SetUnmarshalFlags is called to set a copy of the original unmarshalInputFlags. +// The flags should reflect how Unmarshal was called. +func (lazy *XXX_lazyUnmarshalInfo) SetUnmarshalFlags(f piface.UnmarshalInputFlags) { + lazy.unmarshalFlags = f +} + +// UnmarshalFlags returns the original unmarshalInputFlags. +func (lazy *XXX_lazyUnmarshalInfo) UnmarshalFlags() piface.UnmarshalInputFlags { + return lazy.unmarshalFlags +} + +// AllowedPartial returns true if the user originally unmarshalled this message with +// AllowPartial set to true +func (lazy *XXX_lazyUnmarshalInfo) AllowedPartial() bool { + return (lazy.unmarshalFlags & piface.UnmarshalCheckRequired) == 0 +} + +func protoFieldNumber(tag uint32) uint32 { + return tag >> 3 +} + +// buildIndex builds an index of the specified protobuf, return the index +// array and an error. +func buildIndex(buf []byte) ([]IndexEntry, error) { + index := make([]IndexEntry, 0, 16) + var lastProtoFieldNum uint32 + var outOfOrder bool + + var r BufferReader = NewBufferReader(buf) + + for !r.Done() { + var tag uint32 + var err error + var curPos = r.Pos + // INLINED: tag, err = r.DecodeVarint32() + { + i := r.Pos + buf := r.Buf + + if i >= len(buf) { + return nil, errOutOfBounds + } else if buf[i] < 0x80 { + r.Pos++ + tag = uint32(buf[i]) + } else if r.Remaining() < 5 { + var v uint64 + v, err = r.DecodeVarintSlow() + tag = uint32(v) + } else { + var v uint32 + // we already checked the first byte + tag = uint32(buf[i]) & 127 + i++ + + v = uint32(buf[i]) + i++ + tag |= (v & 127) << 7 + if v < 128 { + goto done + } + + v = uint32(buf[i]) + i++ + tag |= (v & 127) << 14 + if v < 128 { + goto done + } + + v = uint32(buf[i]) + i++ + tag |= (v & 127) << 21 + if v < 128 { + goto done + } + + v = uint32(buf[i]) + i++ + tag |= (v & 127) << 28 + if v < 128 { + goto done + } + + return nil, errOutOfBounds + + done: + r.Pos = i + } + } + // DONE: tag, err = r.DecodeVarint32() + + fieldNum := protoFieldNumber(tag) + if fieldNum < lastProtoFieldNum { + outOfOrder = true + } + + // Skip the current value -- will skip over an entire group as well. + // INLINED: err = r.SkipValue(tag) + wireType := tag & 0x7 + switch protowire.Type(wireType) { + case protowire.VarintType: + // INLINED: err = r.SkipVarint() + i := r.Pos + + if len(r.Buf)-i < 10 { + // Use DecodeVarintSlow() to skip while + // checking for buffer overflow, but ignore result + _, err = r.DecodeVarintSlow() + goto out2 + } + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + i++ + + if r.Buf[i] < 0x80 { + goto out + } + return nil, errOverflow + out: + r.Pos = i + 1 + // DONE: err = r.SkipVarint() + case protowire.Fixed64Type: + err = r.SkipFixed64() + case protowire.BytesType: + var n uint32 + n, err = r.DecodeVarint32() + if err == nil { + err = r.Skip(int(n)) + } + case protowire.StartGroupType: + err = r.SkipGroup(tag) + case protowire.Fixed32Type: + err = r.SkipFixed32() + default: + err = fmt.Errorf("Unexpected wire type (%d)", wireType) + } + // DONE: err = r.SkipValue(tag) + + out2: + if err != nil { + return nil, err + } + if fieldNum != lastProtoFieldNum { + index = append(index, IndexEntry{FieldNum: fieldNum, + Start: uint32(curPos), + End: uint32(r.Pos)}, + ) + } else { + index[len(index)-1].End = uint32(r.Pos) + index[len(index)-1].MultipleContiguous = true + } + lastProtoFieldNum = fieldNum + } + if outOfOrder { + sort.Slice(index, func(i, j int) bool { + return index[i].FieldNum < index[j].FieldNum || + (index[i].FieldNum == index[j].FieldNum && + index[i].Start < index[j].Start) + }) + } + return index, nil +} + +func (lazy *XXX_lazyUnmarshalInfo) SizeField(num uint32) (size int) { + start, end, found, _, multipleEntries := lazy.FindFieldInProto(num) + if multipleEntries != nil { + for _, entry := range multipleEntries { + size += int(entry.End - entry.Start) + } + return size + } + if !found { + return 0 + } + return int(end - start) +} + +func (lazy *XXX_lazyUnmarshalInfo) AppendField(b []byte, num uint32) ([]byte, bool) { + start, end, found, _, multipleEntries := lazy.FindFieldInProto(num) + if multipleEntries != nil { + for _, entry := range multipleEntries { + b = append(b, lazy.Protobuf[entry.Start:entry.End]...) + } + return b, true + } + if !found { + return nil, false + } + b = append(b, lazy.Protobuf[start:end]...) + return b, true +} + +func (lazy *XXX_lazyUnmarshalInfo) SetIndex(index []IndexEntry) { + atomicStoreIndex(&lazy.index, &index) +} + +// FindFieldInProto looks for field fieldNum in lazyUnmarshalInfo information +// (including protobuf), returns startOffset/endOffset/found. +func (lazy *XXX_lazyUnmarshalInfo) FindFieldInProto(fieldNum uint32) (start, end uint32, found, multipleContiguous bool, multipleEntries []IndexEntry) { + if lazy.Protobuf == nil { + // There is no backing protobuf for this message -- it was made from a builder + return 0, 0, false, false, nil + } + index := atomicLoadIndex(&lazy.index) + if index == nil { + r, err := buildIndex(lazy.Protobuf) + if err != nil { + panic(fmt.Sprintf("findFieldInfo: error building index when looking for field %d: %v", fieldNum, err)) + } + // lazy.index is a pointer to the slice returned by BuildIndex + index = &r + atomicStoreIndex(&lazy.index, index) + } + return lookupField(index, fieldNum) +} + +// lookupField returns the offset at which the indicated field starts using +// the index, offset immediately after field ends (including all instances of +// a repeated field), and bools indicating if field was found and if there +// are multiple encodings of the field in the byte range. +// +// To hande the uncommon case where there are repeated encodings for the same +// field which are not consecutive in the protobuf (so we need to returns +// multiple start/end offsets), we also return a slice multipleEntries. If +// multipleEntries is non-nil, then multiple entries were found, and the +// values in the slice should be used, rather than start/end/found. +func lookupField(indexp *[]IndexEntry, fieldNum uint32) (start, end uint32, found bool, multipleContiguous bool, multipleEntries []IndexEntry) { + // The pointer indexp to the index was already loaded atomically. + // The slice is uniquely associated with the pointer, so it doesn't + // need to be loaded atomically. + index := *indexp + for i, entry := range index { + if fieldNum == entry.FieldNum { + if i < len(index)-1 && entry.FieldNum == index[i+1].FieldNum { + // Handle the uncommon case where there are + // repeated entries for the same field which + // are not contiguous in the protobuf. + multiple := make([]IndexEntry, 1, 2) + multiple[0] = IndexEntry{fieldNum, entry.Start, entry.End, entry.MultipleContiguous} + i++ + for i < len(index) && index[i].FieldNum == fieldNum { + multiple = append(multiple, IndexEntry{fieldNum, index[i].Start, index[i].End, index[i].MultipleContiguous}) + i++ + } + return 0, 0, false, false, multiple + + } + return entry.Start, entry.End, true, entry.MultipleContiguous, nil + } + if fieldNum < entry.FieldNum { + return 0, 0, false, false, nil + } + } + return 0, 0, false, false, nil +} diff --git a/vendor/google.golang.org/protobuf/internal/protolazy/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/protolazy/pointer_unsafe.go new file mode 100644 index 000000000..dc2a64ca6 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/protolazy/pointer_unsafe.go @@ -0,0 +1,17 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protolazy + +import ( + "sync/atomic" + "unsafe" +) + +func atomicLoadIndex(p **[]IndexEntry) *[]IndexEntry { + return (*[]IndexEntry)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreIndex(p **[]IndexEntry, v *[]IndexEntry) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index fb8e15e8d..01efc3303 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -51,8 +51,8 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 35 - Patch = 1 + Minor = 36 + Patch = 5 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go index d75a6534c..4cbf1aeaf 100644 --- a/vendor/google.golang.org/protobuf/proto/decode.go +++ b/vendor/google.golang.org/protobuf/proto/decode.go @@ -8,7 +8,6 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/errors" - "google.golang.org/protobuf/internal/flags" "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/reflect/protoreflect" @@ -47,6 +46,12 @@ type UnmarshalOptions struct { // RecursionLimit limits how deeply messages may be nested. // If zero, a default limit is applied. RecursionLimit int + + // + // NoLazyDecoding turns off lazy decoding, which otherwise is enabled by + // default. Lazy decoding only affects submessages (annotated with [lazy = + // true] in the .proto file) within messages that use the Opaque API. + NoLazyDecoding bool } // Unmarshal parses the wire-format message in b and places the result in m. @@ -104,6 +109,16 @@ func (o UnmarshalOptions) unmarshal(b []byte, m protoreflect.Message) (out proto if o.DiscardUnknown { in.Flags |= protoiface.UnmarshalDiscardUnknown } + + if !allowPartial { + // This does not affect how current unmarshal functions work, it just allows them + // to record this for lazy the decoding case. + in.Flags |= protoiface.UnmarshalCheckRequired + } + if o.NoLazyDecoding { + in.Flags |= protoiface.UnmarshalNoLazyDecoding + } + out, err = methods.Unmarshal(in) } else { o.RecursionLimit-- @@ -156,10 +171,6 @@ func (o UnmarshalOptions) unmarshalMessageSlow(b []byte, m protoreflect.Message) var err error if fd == nil { err = errUnknown - } else if flags.ProtoLegacy { - if fd.IsWeak() && fd.Message().IsPlaceholder() { - err = errUnknown // weak referent is not linked in - } } // Parse the field value. diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go index 1f847bcc3..f0473c586 100644 --- a/vendor/google.golang.org/protobuf/proto/encode.go +++ b/vendor/google.golang.org/protobuf/proto/encode.go @@ -63,7 +63,8 @@ type MarshalOptions struct { // options (except for UseCachedSize itself). // // 2. The message and all its submessages have not changed in any - // way since the Size call. + // way since the Size call. For lazily decoded messages, accessing + // a message results in decoding the message, which is a change. // // If either of these invariants is violated, // the results are undefined and may include panics or corrupted output. diff --git a/vendor/google.golang.org/protobuf/proto/size.go b/vendor/google.golang.org/protobuf/proto/size.go index 052fb5ae3..c8675806c 100644 --- a/vendor/google.golang.org/protobuf/proto/size.go +++ b/vendor/google.golang.org/protobuf/proto/size.go @@ -12,11 +12,19 @@ import ( ) // Size returns the size in bytes of the wire-format encoding of m. +// +// Note that Size might return more bytes than Marshal will write in the case of +// lazily decoded messages that arrive in non-minimal wire format: see +// https://protobuf.dev/reference/go/size/ for more details. func Size(m Message) int { return MarshalOptions{}.Size(m) } // Size returns the size in bytes of the wire-format encoding of m. +// +// Note that Size might return more bytes than Marshal will write in the case of +// lazily decoded messages that arrive in non-minimal wire format: see +// https://protobuf.dev/reference/go/size/ for more details. func (o MarshalOptions) Size(m Message) int { // Treat a nil message interface as an empty message; nothing to output. if m == nil { diff --git a/vendor/google.golang.org/protobuf/proto/wrapperopaque.go b/vendor/google.golang.org/protobuf/proto/wrapperopaque.go new file mode 100644 index 000000000..267fd0f1f --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/wrapperopaque.go @@ -0,0 +1,80 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +// ValueOrNil returns nil if has is false, or a pointer to a new variable +// containing the value returned by the specified getter. +// +// This function is similar to the wrappers (proto.Int32(), proto.String(), +// etc.), but is generic (works for any field type) and works with the hasser +// and getter of a field, as opposed to a value. +// +// This is convenient when populating builder fields. +// +// Example: +// +// hop := attr.GetDirectHop() +// injectedRoute := ripb.InjectedRoute_builder{ +// Prefixes: route.GetPrefixes(), +// NextHop: proto.ValueOrNil(hop.HasAddress(), hop.GetAddress), +// } +func ValueOrNil[T any](has bool, getter func() T) *T { + if !has { + return nil + } + v := getter() + return &v +} + +// ValueOrDefault returns the protobuf message val if val is not nil, otherwise +// it returns a pointer to an empty val message. +// +// This function allows for translating code from the old Open Struct API to the +// new Opaque API. +// +// The old Open Struct API represented oneof fields with a wrapper struct: +// +// var signedImg *accountpb.SignedImage +// profile := &accountpb.Profile{ +// // The Avatar oneof will be set, with an empty SignedImage. +// Avatar: &accountpb.Profile_SignedImage{signedImg}, +// } +// +// The new Opaque API treats oneof fields like regular fields, there are no more +// wrapper structs: +// +// var signedImg *accountpb.SignedImage +// profile := &accountpb.Profile{} +// profile.SetSignedImage(signedImg) +// +// For convenience, the Opaque API also offers Builders, which allow for a +// direct translation of struct initialization. However, because Builders use +// nilness to represent field presence (but there is no non-nil wrapper struct +// anymore), Builders cannot distinguish between an unset oneof and a set oneof +// with nil message. The above code would need to be translated with help of the +// ValueOrDefault function to retain the same behavior: +// +// var signedImg *accountpb.SignedImage +// return &accountpb.Profile_builder{ +// SignedImage: proto.ValueOrDefault(signedImg), +// }.Build() +func ValueOrDefault[T interface { + *P + Message +}, P any](val T) T { + if val == nil { + return T(new(P)) + } + return val +} + +// ValueOrDefaultBytes is like ValueOrDefault but for working with fields of +// type []byte. +func ValueOrDefaultBytes(val []byte) []byte { + if val == nil { + return []byte{} + } + return val +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go index 8fbecb4f5..823dbf3ba 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go @@ -13,6 +13,8 @@ package protodesc import ( + "strings" + "google.golang.org/protobuf/internal/editionssupport" "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/internal/filedesc" @@ -102,13 +104,17 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot default: return nil, errors.New("invalid syntax: %q", fd.GetSyntax()) } - if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < editionssupport.Minimum || fd.GetEdition() > editionssupport.Maximum) { - return nil, errors.New("use of edition %v not yet supported by the Go Protobuf runtime", fd.GetEdition()) - } f.L1.Path = fd.GetName() if f.L1.Path == "" { return nil, errors.New("file path must be populated") } + if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < editionssupport.Minimum || fd.GetEdition() > editionssupport.Maximum) { + // Allow cmd/protoc-gen-go/testdata to use any edition for easier + // testing of upcoming edition features. + if !strings.HasPrefix(fd.GetName(), "cmd/protoc-gen-go/testdata/") { + return nil, errors.New("use of edition %v not yet supported by the Go Protobuf runtime", fd.GetEdition()) + } + } f.L1.Package = protoreflect.FullName(fd.GetPackage()) if !f.L1.Package.IsValid() && f.L1.Package != "" { return nil, errors.New("invalid package: %q", f.L1.Package) @@ -126,17 +132,11 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot } f.L2.Imports[i].IsPublic = true } - for _, i := range fd.GetWeakDependency() { - if !(0 <= i && int(i) < len(f.L2.Imports)) || f.L2.Imports[i].IsWeak { - return nil, errors.New("invalid or duplicate weak import index: %d", i) - } - f.L2.Imports[i].IsWeak = true - } imps := importSet{f.Path(): true} for i, path := range fd.GetDependency() { imp := &f.L2.Imports[i] f, err := r.FindFileByPath(path) - if err == protoregistry.NotFound && (o.AllowUnresolvable || imp.IsWeak) { + if err == protoregistry.NotFound && o.AllowUnresolvable { f = filedesc.PlaceholderFile(path) } else if err != nil { return nil, errors.New("could not resolve import %q: %v", path, err) diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go index ebcb4a8ab..9da34998b 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go @@ -149,7 +149,6 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc if opts := fd.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.FieldOptions) f.L1.Options = func() protoreflect.ProtoMessage { return opts } - f.L1.IsWeak = opts.GetWeak() f.L1.IsLazy = opts.GetLazy() if opts.Packed != nil { f.L1.EditionFeatures.IsPacked = opts.GetPacked() diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go index f3cebab29..ff692436e 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go @@ -43,7 +43,7 @@ func (r *resolver) resolveMessageDependencies(ms []filedesc.Message, mds []*desc o.L1.Fields.List = append(o.L1.Fields.List, f) } - if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName()), f.IsWeak()); err != nil { + if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName())); err != nil { return errors.New("message field %q cannot resolve type: %v", f.FullName(), err) } if f.L1.Kind == protoreflect.GroupKind && (f.IsMap() || f.IsMapEntry()) { @@ -73,10 +73,10 @@ func (r *resolver) resolveMessageDependencies(ms []filedesc.Message, mds []*desc func (r *resolver) resolveExtensionDependencies(xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) (err error) { for i, xd := range xds { x := &xs[i] - if x.L1.Extendee, err = r.findMessageDescriptor(x.Parent().FullName(), partialName(xd.GetExtendee()), false); err != nil { + if x.L1.Extendee, err = r.findMessageDescriptor(x.Parent().FullName(), partialName(xd.GetExtendee())); err != nil { return errors.New("extension field %q cannot resolve extendee: %v", x.FullName(), err) } - if x.L1.Kind, x.L2.Enum, x.L2.Message, err = r.findTarget(x.Kind(), x.Parent().FullName(), partialName(xd.GetTypeName()), false); err != nil { + if x.L1.Kind, x.L2.Enum, x.L2.Message, err = r.findTarget(x.Kind(), x.Parent().FullName(), partialName(xd.GetTypeName())); err != nil { return errors.New("extension field %q cannot resolve type: %v", x.FullName(), err) } if xd.DefaultValue != nil { @@ -95,11 +95,11 @@ func (r *resolver) resolveServiceDependencies(ss []filedesc.Service, sds []*desc s := &ss[i] for j, md := range sd.GetMethod() { m := &s.L2.Methods.List[j] - m.L1.Input, err = r.findMessageDescriptor(m.Parent().FullName(), partialName(md.GetInputType()), false) + m.L1.Input, err = r.findMessageDescriptor(m.Parent().FullName(), partialName(md.GetInputType())) if err != nil { return errors.New("service method %q cannot resolve input: %v", m.FullName(), err) } - m.L1.Output, err = r.findMessageDescriptor(s.FullName(), partialName(md.GetOutputType()), false) + m.L1.Output, err = r.findMessageDescriptor(s.FullName(), partialName(md.GetOutputType())) if err != nil { return errors.New("service method %q cannot resolve output: %v", m.FullName(), err) } @@ -111,16 +111,16 @@ func (r *resolver) resolveServiceDependencies(ss []filedesc.Service, sds []*desc // findTarget finds an enum or message descriptor if k is an enum, message, // group, or unknown. If unknown, and the name could be resolved, the kind // returned kind is set based on the type of the resolved descriptor. -func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.Kind, protoreflect.EnumDescriptor, protoreflect.MessageDescriptor, error) { +func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, ref partialName) (protoreflect.Kind, protoreflect.EnumDescriptor, protoreflect.MessageDescriptor, error) { switch k { case protoreflect.EnumKind: - ed, err := r.findEnumDescriptor(scope, ref, isWeak) + ed, err := r.findEnumDescriptor(scope, ref) if err != nil { return 0, nil, nil, err } return k, ed, nil, nil case protoreflect.MessageKind, protoreflect.GroupKind: - md, err := r.findMessageDescriptor(scope, ref, isWeak) + md, err := r.findMessageDescriptor(scope, ref) if err != nil { return 0, nil, nil, err } @@ -129,7 +129,7 @@ func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, // Handle unspecified kinds (possible with parsers that operate // on a per-file basis without knowledge of dependencies). d, err := r.findDescriptor(scope, ref) - if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + if err == protoregistry.NotFound && r.allowUnresolvable { return k, filedesc.PlaceholderEnum(ref.FullName()), filedesc.PlaceholderMessage(ref.FullName()), nil } else if err == protoregistry.NotFound { return 0, nil, nil, errors.New("%q not found", ref.FullName()) @@ -206,9 +206,9 @@ func (r *resolver) findDescriptor(scope protoreflect.FullName, ref partialName) } } -func (r *resolver) findEnumDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.EnumDescriptor, error) { +func (r *resolver) findEnumDescriptor(scope protoreflect.FullName, ref partialName) (protoreflect.EnumDescriptor, error) { d, err := r.findDescriptor(scope, ref) - if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + if err == protoregistry.NotFound && r.allowUnresolvable { return filedesc.PlaceholderEnum(ref.FullName()), nil } else if err == protoregistry.NotFound { return nil, errors.New("%q not found", ref.FullName()) @@ -222,9 +222,9 @@ func (r *resolver) findEnumDescriptor(scope protoreflect.FullName, ref partialNa return ed, nil } -func (r *resolver) findMessageDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.MessageDescriptor, error) { +func (r *resolver) findMessageDescriptor(scope protoreflect.FullName, ref partialName) (protoreflect.MessageDescriptor, error) { d, err := r.findDescriptor(scope, ref) - if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + if err == protoregistry.NotFound && r.allowUnresolvable { return filedesc.PlaceholderMessage(ref.FullName()), nil } else if err == protoregistry.NotFound { return nil, errors.New("%q not found", ref.FullName()) diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go index 6de31c2eb..c343d9227 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go @@ -149,12 +149,6 @@ func validateMessageDeclarations(file *filedesc.File, ms []filedesc.Message, mds return errors.New("message field %q under proto3 optional semantics must be within a single element oneof", f.FullName()) } } - if f.IsWeak() && !flags.ProtoLegacy { - return errors.New("message field %q is a weak field, which is a legacy proto1 feature that is no longer supported", f.FullName()) - } - if f.IsWeak() && (!f.HasPresence() || !isOptionalMessage(f) || f.ContainingOneof() != nil) { - return errors.New("message field %q may only be weak for an optional message", f.FullName()) - } if f.IsPacked() && !isPackable(f) { return errors.New("message field %q is not packable", f.FullName()) } @@ -199,9 +193,6 @@ func validateMessageDeclarations(file *filedesc.File, ms []filedesc.Message, mds if f.Cardinality() != protoreflect.Optional { return errors.New("message field %q belongs in a oneof and must be optional", f.FullName()) } - if f.IsWeak() { - return errors.New("message field %q belongs in a oneof and must not be a weak reference", f.FullName()) - } } } @@ -254,9 +245,6 @@ func validateExtensionDeclarations(f *filedesc.File, xs []filedesc.Extension, xd return errors.New("extension field %q has an invalid number: %d", x.FullName(), x.Number()) } } - if xd.GetOptions().GetWeak() { - return errors.New("extension field %q cannot be a weak reference", x.FullName()) - } if x.IsPacked() && !isPackable(x) { return errors.New("extension field %q is not packable", x.FullName()) } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go index 002e0047a..697a61b29 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go @@ -11,6 +11,7 @@ import ( "google.golang.org/protobuf/internal/editiondefaults" "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/types/descriptorpb" @@ -43,6 +44,8 @@ func toEditionProto(ed filedesc.Edition) descriptorpb.Edition { return descriptorpb.Edition_EDITION_PROTO3 case filedesc.Edition2023: return descriptorpb.Edition_EDITION_2023 + case filedesc.Edition2024: + return descriptorpb.Edition_EDITION_2024 default: panic(fmt.Sprintf("unknown value for edition: %v", ed)) } @@ -123,10 +126,43 @@ func mergeEditionFeatures(parentDesc protoreflect.Descriptor, child *descriptorp parentFS.IsJSONCompliant = *jf == descriptorpb.FeatureSet_ALLOW } - if goFeatures, ok := proto.GetExtension(child, gofeaturespb.E_Go).(*gofeaturespb.GoFeatures); ok && goFeatures != nil { - if luje := goFeatures.LegacyUnmarshalJsonEnum; luje != nil { - parentFS.GenerateLegacyUnmarshalJSON = *luje - } + // We must not use proto.GetExtension(child, gofeaturespb.E_Go) + // because that only works for messages we generated, but not for + // dynamicpb messages. See golang/protobuf#1669. + // + // Further, we harden this code against adversarial inputs: a + // service which accepts descriptors from a possibly malicious + // source shouldn't crash. + goFeatures := child.ProtoReflect().Get(gofeaturespb.E_Go.TypeDescriptor()) + if !goFeatures.IsValid() { + return parentFS + } + gf, ok := goFeatures.Interface().(protoreflect.Message) + if !ok { + return parentFS + } + // gf.Interface() could be *dynamicpb.Message or *gofeaturespb.GoFeatures. + fields := gf.Descriptor().Fields() + + if fd := fields.ByNumber(genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number); fd != nil && + !fd.IsList() && + fd.Kind() == protoreflect.BoolKind && + gf.Has(fd) { + parentFS.GenerateLegacyUnmarshalJSON = gf.Get(fd).Bool() + } + + if fd := fields.ByNumber(genid.GoFeatures_StripEnumPrefix_field_number); fd != nil && + !fd.IsList() && + fd.Kind() == protoreflect.EnumKind && + gf.Has(fd) { + parentFS.StripEnumPrefix = int(gf.Get(fd).Enum()) + } + + if fd := fields.ByNumber(genid.GoFeatures_ApiLevel_field_number); fd != nil && + !fd.IsList() && + fd.Kind() == protoreflect.EnumKind && + gf.Has(fd) { + parentFS.APILevel = int(gf.Get(fd).Enum()) } return parentFS diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go index a5de8d400..9b880aa8c 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go @@ -32,9 +32,6 @@ func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileD if imp.IsPublic { p.PublicDependency = append(p.PublicDependency, int32(i)) } - if imp.IsWeak { - p.WeakDependency = append(p.WeakDependency, int32(i)) - } } for i, locs := 0, file.SourceLocations(); i < locs.Len(); i++ { loc := locs.Get(i) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go index cd8fadbaf..cd7fbc87a 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go @@ -68,7 +68,7 @@ type Descriptor interface { // dependency is not resolved, in which case only name information is known. // // Placeholder types may only be returned by the following accessors - // as a result of unresolved dependencies or weak imports: + // as a result of unresolved dependencies: // // ╔═══════════════════════════════════╤═════════════════════╗ // ║ Accessor │ Descriptor ║ @@ -168,11 +168,7 @@ type FileImport struct { // The current file and the imported file must be within proto package. IsPublic bool - // IsWeak reports whether this is a weak import, which does not impose - // a direct dependency on the target file. - // - // Weak imports are a legacy proto1 feature. Equivalent behavior is - // achieved using proto2 extension fields or proto3 Any messages. + // Deprecated: support for weak fields has been removed. IsWeak bool } @@ -325,9 +321,7 @@ type FieldDescriptor interface { // specified in the source .proto file. HasOptionalKeyword() bool - // IsWeak reports whether this is a weak field, which does not impose a - // direct dependency on the target type. - // If true, then Message returns a placeholder type. + // Deprecated: support for weak fields has been removed. IsWeak() bool // IsPacked reports whether repeated primitive numeric kinds should be diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go index a7b0d06ff..a4b78acef 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go @@ -152,7 +152,7 @@ type Message interface { // This method may return nil. // // The returned methods type is identical to - // google.golang.org/protobuf/runtime/protoiface.Methods. + // [google.golang.org/protobuf/runtime/protoiface.Methods]. // Consult the protoiface package documentation for details. ProtoMethods() *methods } diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go index 246156561..28e9e9f03 100644 --- a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go +++ b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go @@ -122,6 +122,22 @@ type UnmarshalInputFlags = uint8 const ( UnmarshalDiscardUnknown UnmarshalInputFlags = 1 << iota + + // UnmarshalAliasBuffer permits unmarshal operations to alias the input buffer. + // The unmarshaller must not modify the contents of the buffer. + UnmarshalAliasBuffer + + // UnmarshalValidated indicates that validation has already been + // performed on the input buffer. + UnmarshalValidated + + // UnmarshalCheckRequired is set if this unmarshal operation ultimately will care if required fields are + // initialized. + UnmarshalCheckRequired + + // UnmarshalNoLazyDecoding is set if this unmarshal operation should not use + // lazy decoding, even when otherwise available. + UnmarshalNoLazyDecoding ) // UnmarshalOutputFlags are output from the Unmarshal method. diff --git a/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go b/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go index 4a1ab7fb3..93df1b569 100644 --- a/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go +++ b/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go @@ -15,6 +15,7 @@ import ( "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/internal/filetype" "google.golang.org/protobuf/internal/impl" + "google.golang.org/protobuf/internal/protolazy" ) // UnsafeEnabled specifies whether package unsafe can be used. @@ -39,6 +40,9 @@ type ( ExtensionFieldV1 = impl.ExtensionField Pointer = impl.Pointer + + LazyUnmarshalInfo = *protolazy.XXX_lazyUnmarshalInfo + RaceDetectHookData = impl.RaceDetectHookData ) var X impl.Export diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index 6dea75cd5..a51633767 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -46,6 +46,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) // The full set of known editions. @@ -69,7 +70,7 @@ const ( Edition_EDITION_2023 Edition = 1000 Edition_EDITION_2024 Edition = 1001 // Placeholder editions for testing feature resolution. These should not be - // used or relyed on outside of tests. + // used or relied on outside of tests. Edition_EDITION_1_TEST_ONLY Edition = 1 Edition_EDITION_2_TEST_ONLY Edition = 2 Edition_EDITION_99997_TEST_ONLY Edition = 99997 @@ -577,8 +578,6 @@ func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { } // If set to RETENTION_SOURCE, the option will be omitted from the binary. -// Note: as of January 2023, support for this is in progress and does not yet -// have an effect (b/264593489). type FieldOptions_OptionRetention int32 const ( @@ -640,8 +639,7 @@ func (FieldOptions_OptionRetention) EnumDescriptor() ([]byte, []int) { // This indicates the types of entities that the field may apply to when used // as an option. If it is unset, then the field may be freely used as an -// option on any kind of entity. Note: as of January 2023, support for this is -// in progress and does not yet have an effect (b/264593489). +// option on any kind of entity. type FieldOptions_OptionTargetType int32 const ( @@ -1208,11 +1206,11 @@ func (GeneratedCodeInfo_Annotation_Semantic) EnumDescriptor() ([]byte, []int) { // The protocol compiler can output a FileDescriptorSet containing the .proto // files it parses. type FileDescriptorSet struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FileDescriptorSet) Reset() { @@ -1254,12 +1252,9 @@ func (x *FileDescriptorSet) GetFile() []*FileDescriptorProto { // Describes a complete .proto file. type FileDescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // file name, relative to root of source tree - Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` // e.g. "foo", "foo.bar", etc. + state protoimpl.MessageState `protogen:"open.v1"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // file name, relative to root of source tree + Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` // e.g. "foo", "foo.bar", etc. // Names of files imported by this file. Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` // Indexes of the public imported files in the dependency list above. @@ -1284,7 +1279,9 @@ type FileDescriptorProto struct { // If `edition` is present, this value must be "editions". Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` // The edition of the proto file. - Edition *Edition `protobuf:"varint,14,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` + Edition *Edition `protobuf:"varint,14,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FileDescriptorProto) Reset() { @@ -1410,10 +1407,7 @@ func (x *FileDescriptorProto) GetEdition() Edition { // Describes a message type. type DescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` @@ -1425,7 +1419,9 @@ type DescriptorProto struct { ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` // Reserved field names, which may not be used by fields in the same message. // A given name may only be reserved once. - ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *DescriptorProto) Reset() { @@ -1529,11 +1525,7 @@ func (x *DescriptorProto) GetReservedName() []string { } type ExtensionRangeOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` // For external users: DO NOT USE. We are in the process of open sourcing @@ -1545,7 +1537,10 @@ type ExtensionRangeOptions struct { // The verification state of the range. // TODO: flip the default to DECLARATION once all empty ranges // are marked as UNVERIFIED. - Verification *ExtensionRangeOptions_VerificationState `protobuf:"varint,3,opt,name=verification,enum=google.protobuf.ExtensionRangeOptions_VerificationState,def=1" json:"verification,omitempty"` + Verification *ExtensionRangeOptions_VerificationState `protobuf:"varint,3,opt,name=verification,enum=google.protobuf.ExtensionRangeOptions_VerificationState,def=1" json:"verification,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Default values for ExtensionRangeOptions fields. @@ -1613,10 +1608,7 @@ func (x *ExtensionRangeOptions) GetVerification() ExtensionRangeOptions_Verifica // Describes a field within a message. type FieldDescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` @@ -1668,6 +1660,8 @@ type FieldDescriptorProto struct { // Proto2 optional fields do not set this flag, because they already indicate // optional with `LABEL_OPTIONAL`. Proto3Optional *bool `protobuf:"varint,17,opt,name=proto3_optional,json=proto3Optional" json:"proto3_optional,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FieldDescriptorProto) Reset() { @@ -1779,12 +1773,11 @@ func (x *FieldDescriptorProto) GetProto3Optional() bool { // Describes a oneof. type OneofDescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + sizeCache protoimpl.SizeCache } func (x *OneofDescriptorProto) Reset() { @@ -1833,10 +1826,7 @@ func (x *OneofDescriptorProto) GetOptions() *OneofOptions { // Describes an enum type. type EnumDescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` @@ -1846,7 +1836,9 @@ type EnumDescriptorProto struct { ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` // Reserved enum value names, which may not be reused. A given name may only // be reserved once. - ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *EnumDescriptorProto) Reset() { @@ -1916,13 +1908,12 @@ func (x *EnumDescriptorProto) GetReservedName() []string { // Describes a value within an enum. type EnumValueDescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` - Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + sizeCache protoimpl.SizeCache } func (x *EnumValueDescriptorProto) Reset() { @@ -1978,13 +1969,12 @@ func (x *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { // Describes a service. type ServiceDescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` - Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ServiceDescriptorProto) Reset() { @@ -2040,11 +2030,8 @@ func (x *ServiceDescriptorProto) GetOptions() *ServiceOptions { // Describes a method of a service. type MethodDescriptorProto struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // Input and output type names. These are resolved in the same way as // FieldDescriptorProto.type_name, but must refer to a message type. InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` @@ -2054,6 +2041,8 @@ type MethodDescriptorProto struct { ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` // Identifies if server streams multiple server messages ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Default values for MethodDescriptorProto fields. @@ -2135,11 +2124,7 @@ func (x *MethodDescriptorProto) GetServerStreaming() bool { } type FileOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` // Sets the Java package where classes generated from this .proto will be // placed. By default, the proto package is used, but this is often // inappropriate because proto packages do not normally start with backwards @@ -2231,6 +2216,9 @@ type FileOptions struct { // The parser stores options it doesn't recognize here. // See the documentation for the "Options" section above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Default values for FileOptions fields. @@ -2424,11 +2412,7 @@ func (x *FileOptions) GetUninterpretedOption() []*UninterpretedOption { } type MessageOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` // Set true to use the old proto1 MessageSet wire format for extensions. // This is provided for backwards-compatibility with the MessageSet wire // format. You should not use this for any other reason: It's less @@ -2501,6 +2485,9 @@ type MessageOptions struct { Features *FeatureSet `protobuf:"bytes,12,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Default values for MessageOptions fields. @@ -2591,17 +2578,14 @@ func (x *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { } type FieldOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` + // NOTE: ctype is deprecated. Use `features.(pb.cpp).string_type` instead. // The ctype option instructs the C++ code generator to use a different // representation of the field than it normally would. See the specific // options below. This option is only implemented to support use of // [ctype=CORD] and [ctype=STRING] (the default) on non-repeated fields of - // type "bytes" in the open source release -- sorry, we'll try to include - // other types in a future version! + // type "bytes" in the open source release. + // TODO: make ctype actually deprecated. Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` // The packed option can be enabled for repeated primitive fields to enable // a more efficient representation on the wire. Rather than repeatedly @@ -2668,6 +2652,9 @@ type FieldOptions struct { FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,22,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Default values for FieldOptions fields. @@ -2810,15 +2797,14 @@ func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { } type OneofOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` // Any features defined in the specific edition. Features *FeatureSet `protobuf:"bytes,1,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *OneofOptions) Reset() { @@ -2866,11 +2852,7 @@ func (x *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { } type EnumOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` // Set this option to true to allow mapping different tag names to the same // value. AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` @@ -2892,6 +2874,9 @@ type EnumOptions struct { Features *FeatureSet `protobuf:"bytes,7,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Default values for EnumOptions fields. @@ -2966,11 +2951,7 @@ func (x *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { } type EnumValueOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` // Is this enum value deprecated? // Depending on the target platform, this can emit Deprecated annotations // for the enum value, or it will be completely ignored; in the very least, @@ -2986,6 +2967,9 @@ type EnumValueOptions struct { FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,4,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Default values for EnumValueOptions fields. @@ -3060,11 +3044,7 @@ func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { } type ServiceOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` // Any features defined in the specific edition. Features *FeatureSet `protobuf:"bytes,34,opt,name=features" json:"features,omitempty"` // Is this service deprecated? @@ -3074,6 +3054,9 @@ type ServiceOptions struct { Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Default values for ServiceOptions fields. @@ -3133,11 +3116,7 @@ func (x *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { } type MethodOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` // Is this method deprecated? // Depending on the target platform, this can emit Deprecated annotations // for the method, or it will be completely ignored; in the very least, @@ -3148,6 +3127,9 @@ type MethodOptions struct { Features *FeatureSet `protobuf:"bytes,35,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Default values for MethodOptions fields. @@ -3221,11 +3203,8 @@ func (x *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { // or produced by Descriptor::CopyTo()) will never have UninterpretedOptions // in them. type UninterpretedOption struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` // The value of the uninterpreted option, in whatever type the tokenizer // identified it as during parsing. Exactly one of these should be set. IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` @@ -3234,6 +3213,8 @@ type UninterpretedOption struct { DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *UninterpretedOption) Reset() { @@ -3322,17 +3303,16 @@ func (x *UninterpretedOption) GetAggregateValue() string { // be designed and implemented to handle this, hopefully before we ever hit a // conflict here. type FeatureSet struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - extensionFields protoimpl.ExtensionFields - + state protoimpl.MessageState `protogen:"open.v1"` FieldPresence *FeatureSet_FieldPresence `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"` EnumType *FeatureSet_EnumType `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"` RepeatedFieldEncoding *FeatureSet_RepeatedFieldEncoding `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"` Utf8Validation *FeatureSet_Utf8Validation `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"` MessageEncoding *FeatureSet_MessageEncoding `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"` JsonFormat *FeatureSet_JsonFormat `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FeatureSet) Reset() { @@ -3412,10 +3392,7 @@ func (x *FeatureSet) GetJsonFormat() FeatureSet_JsonFormat { // feature resolution. The resolution with this object becomes a simple search // for the closest matching edition, followed by proto merges. type FeatureSetDefaults struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` Defaults []*FeatureSetDefaults_FeatureSetEditionDefault `protobuf:"bytes,1,rep,name=defaults" json:"defaults,omitempty"` // The minimum supported edition (inclusive) when this was constructed. // Editions before this will not have defaults. @@ -3423,6 +3400,8 @@ type FeatureSetDefaults struct { // The maximum known edition (inclusive) when this was constructed. Editions // after this will not have reliable defaults. MaximumEdition *Edition `protobuf:"varint,5,opt,name=maximum_edition,json=maximumEdition,enum=google.protobuf.Edition" json:"maximum_edition,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FeatureSetDefaults) Reset() { @@ -3479,10 +3458,7 @@ func (x *FeatureSetDefaults) GetMaximumEdition() Edition { // Encapsulates information about the original source file from which a // FileDescriptorProto was generated. type SourceCodeInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // A Location identifies a piece of source code in a .proto file which // corresponds to a particular definition. This information is intended // to be useful to IDEs, code indexers, documentation generators, and similar @@ -3531,7 +3507,10 @@ type SourceCodeInfo struct { // - Code which tries to interpret locations should probably be designed to // ignore those that it doesn't understand, as more types of locations could // be recorded in the future. - Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + extensionFields protoimpl.ExtensionFields + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *SourceCodeInfo) Reset() { @@ -3575,13 +3554,12 @@ func (x *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { // file. A GeneratedCodeInfo message is associated with only one generated // source file, but may contain references to different source .proto files. type GeneratedCodeInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // An Annotation connects some span of text in generated code to an element // of its generating .proto file. - Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GeneratedCodeInfo) Reset() { @@ -3622,13 +3600,12 @@ func (x *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { } type DescriptorProto_ExtensionRange struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive. + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` unknownFields protoimpl.UnknownFields - - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive. - Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + sizeCache protoimpl.SizeCache } func (x *DescriptorProto_ExtensionRange) Reset() { @@ -3686,12 +3663,11 @@ func (x *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { // fields or extension ranges in the same message. Reserved ranges may // not overlap. type DescriptorProto_ReservedRange struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive. unknownFields protoimpl.UnknownFields - - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive. + sizeCache protoimpl.SizeCache } func (x *DescriptorProto_ReservedRange) Reset() { @@ -3739,10 +3715,7 @@ func (x *DescriptorProto_ReservedRange) GetEnd() int32 { } type ExtensionRangeOptions_Declaration struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The extension number declared within the extension range. Number *int32 `protobuf:"varint,1,opt,name=number" json:"number,omitempty"` // The fully-qualified name of the extension field. There must be a leading @@ -3758,7 +3731,9 @@ type ExtensionRangeOptions_Declaration struct { Reserved *bool `protobuf:"varint,5,opt,name=reserved" json:"reserved,omitempty"` // If true, indicates that the extension must be defined as repeated. // Otherwise the extension must be defined as optional. - Repeated *bool `protobuf:"varint,6,opt,name=repeated" json:"repeated,omitempty"` + Repeated *bool `protobuf:"varint,6,opt,name=repeated" json:"repeated,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ExtensionRangeOptions_Declaration) Reset() { @@ -3833,12 +3808,11 @@ func (x *ExtensionRangeOptions_Declaration) GetRepeated() bool { // is inclusive such that it can appropriately represent the entire int32 // domain. type EnumDescriptorProto_EnumReservedRange struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Inclusive. unknownFields protoimpl.UnknownFields - - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Inclusive. + sizeCache protoimpl.SizeCache } func (x *EnumDescriptorProto_EnumReservedRange) Reset() { @@ -3886,12 +3860,11 @@ func (x *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { } type FieldOptions_EditionDefault struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` // Textproto value. unknownFields protoimpl.UnknownFields - - Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` - Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` // Textproto value. + sizeCache protoimpl.SizeCache } func (x *FieldOptions_EditionDefault) Reset() { @@ -3940,10 +3913,7 @@ func (x *FieldOptions_EditionDefault) GetValue() string { // Information about the support window of a feature. type FieldOptions_FeatureSupport struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The edition that this feature was first available in. In editions // earlier than this one, the default assigned to EDITION_LEGACY will be // used, and proto files will not be able to override it. @@ -3958,6 +3928,8 @@ type FieldOptions_FeatureSupport struct { // this one, the last default assigned will be used, and proto files will // not be able to override it. EditionRemoved *Edition `protobuf:"varint,4,opt,name=edition_removed,json=editionRemoved,enum=google.protobuf.Edition" json:"edition_removed,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FieldOptions_FeatureSupport) Reset() { @@ -4024,12 +3996,11 @@ func (x *FieldOptions_FeatureSupport) GetEditionRemoved() Edition { // E.g.,{ ["foo", false], ["bar.baz", true], ["moo", false] } represents // "foo.(bar.baz).moo". type UninterpretedOption_NamePart struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` unknownFields protoimpl.UnknownFields - - NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` - IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` + sizeCache protoimpl.SizeCache } func (x *UninterpretedOption_NamePart) Reset() { @@ -4081,15 +4052,14 @@ func (x *UninterpretedOption_NamePart) GetIsExtension() bool { // the defaults at the closest matching edition ordered at or before it should // be used. This field must be in strict ascending order by edition. type FeatureSetDefaults_FeatureSetEditionDefault struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` // Defaults of features that can be overridden in this edition. OverridableFeatures *FeatureSet `protobuf:"bytes,4,opt,name=overridable_features,json=overridableFeatures" json:"overridable_features,omitempty"` // Defaults of features that can't be overridden in this edition. FixedFeatures *FeatureSet `protobuf:"bytes,5,opt,name=fixed_features,json=fixedFeatures" json:"fixed_features,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() { @@ -4144,10 +4114,7 @@ func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetFixedFeatures() *Featur } type SourceCodeInfo_Location struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Identifies which part of the FileDescriptorProto was defined at this // location. // @@ -4239,6 +4206,8 @@ type SourceCodeInfo_Location struct { LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *SourceCodeInfo_Location) Reset() { @@ -4307,10 +4276,7 @@ func (x *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { } type GeneratedCodeInfo_Annotation struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Identifies the element in the original source .proto file. This field // is formatted the same as SourceCodeInfo.Location.path. Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` @@ -4322,8 +4288,10 @@ type GeneratedCodeInfo_Annotation struct { // Identifies the ending offset in bytes in the generated code that // relates to the identified object. The end offset should be one past // the last relevant byte (so the length of the text = end - begin). - End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` - Semantic *GeneratedCodeInfo_Annotation_Semantic `protobuf:"varint,5,opt,name=semantic,enum=google.protobuf.GeneratedCodeInfo_Annotation_Semantic" json:"semantic,omitempty"` + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + Semantic *GeneratedCodeInfo_Annotation_Semantic `protobuf:"varint,5,opt,name=semantic,enum=google.protobuf.GeneratedCodeInfo_Annotation_Semantic" json:"semantic,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GeneratedCodeInfo_Annotation) Reset() { @@ -4393,498 +4361,478 @@ func (x *GeneratedCodeInfo_Annotation) GetSemantic() GeneratedCodeInfo_Annotatio var File_google_protobuf_descriptor_proto protoreflect.FileDescriptor -var file_google_protobuf_descriptor_proto_rawDesc = []byte{ +var file_google_protobuf_descriptor_proto_rawDesc = string([]byte{ 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x22, 0x4d, 0x0a, 0x11, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x62, 0x75, 0x66, 0x22, 0x5b, 0x0a, 0x11, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x38, 0x0a, 0x04, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69, - 0x6c, 0x65, 0x22, 0x98, 0x05, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, - 0x0a, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x65, - 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x65, - 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x75, 0x62, 0x6c, - 0x69, 0x63, 0x5f, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0a, 0x20, - 0x03, 0x28, 0x05, 0x52, 0x10, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x44, 0x65, 0x70, 0x65, 0x6e, - 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x65, 0x61, 0x6b, 0x5f, 0x64, 0x65, - 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0e, - 0x77, 0x65, 0x61, 0x6b, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x43, - 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, - 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, - 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, - 0x6f, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, - 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, - 0x0a, 0x0f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, - 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, - 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, - 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x6c, 0x65, 0x2a, 0x0c, 0x08, 0x80, 0xec, 0xca, 0xff, 0x01, 0x10, 0x81, 0xec, 0xca, 0xff, 0x01, + 0x22, 0x98, 0x05, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, + 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, + 0x65, 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x65, + 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, + 0x5f, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0a, 0x20, 0x03, 0x28, + 0x05, 0x52, 0x10, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, + 0x6e, 0x63, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x65, 0x61, 0x6b, 0x5f, 0x64, 0x65, 0x70, 0x65, + 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0e, 0x77, 0x65, + 0x61, 0x6b, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x43, 0x0a, 0x0c, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x07, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, - 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, - 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, - 0x5f, 0x64, 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, - 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, - 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, + 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, + 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, - 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, - 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, - 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, - 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x1a, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xcc, 0x04, 0x0a, 0x15, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, - 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, - 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, - 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x12, 0x6d, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, - 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x3a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x42, 0x03, 0x88, - 0x01, 0x02, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x1a, 0x94, 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, - 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, - 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, - 0x64, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, - 0x44, 0x45, 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, - 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, - 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, + 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, + 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, + 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, 0x0a, 0x0f, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, + 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65, + 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, + 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64, + 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, + 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, + 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, + 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, + 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, + 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37, + 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xcc, 0x04, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, 0x0b, 0x64, + 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, + 0x6d, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x3a, + 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x42, 0x03, 0x88, 0x01, 0x02, + 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x94, + 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, + 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, + 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x4a, + 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45, + 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, + 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07, + 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, - 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, + 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, + 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, - 0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, - 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, - 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, - 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, - 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, + 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, + 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, + 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, + 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, + 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, + 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, + 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, + 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, + 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, + 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, + 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, + 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, + 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, + 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, + 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, + 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, + 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, + 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, + 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, + 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, + 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, + 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, + 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, + 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, - 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, - 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, - 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, - 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, - 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, - 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, - 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, - 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, - 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, - 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, - 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, - 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, - 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, - 0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, - 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, - 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14, - 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, - 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, + 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, + 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, + 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, + 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, - 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, - 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, - 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, - 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, - 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, - 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, + 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, + 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, + 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, + 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, + 0x67, 0x22, 0xad, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, + 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, + 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, + 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, + 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, + 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, + 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, + 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, + 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, + 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, + 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, + 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, + 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, + 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, + 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, + 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, + 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, + 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, + 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, + 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, + 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, + 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, + 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, + 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, + 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, + 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, + 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, + 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, + 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, + 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, + 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, + 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, + 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, + 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, + 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, + 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, - 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, - 0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, - 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x69, 0x6e, 0x67, 0x22, 0xad, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, - 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, - 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, - 0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, - 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, - 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, - 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, - 0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, - 0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, - 0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, - 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, - 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, - 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, - 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, - 0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, - 0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, - 0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, - 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, - 0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, - 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, - 0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, - 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, - 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, - 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, - 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, - 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, - 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, - 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, - 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, - 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, - 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, - 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, - 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, - 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, - 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, - 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, - 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, - 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, - 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, - 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, - 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, - 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, - 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, - 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, - 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, - 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, - 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, - 0x02, 0x4a, 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x52, 0x14, 0x70, - 0x68, 0x70, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x22, 0xf4, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, - 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, - 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, - 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, - 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, - 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, - 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, - 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, - 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, - 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, - 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, - 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, - 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x9d, 0x0d, 0x0a, 0x0c, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, - 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, - 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, - 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, - 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, - 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, - 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, - 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, - 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, - 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, - 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, - 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, - 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, - 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, + 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, + 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, + 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, + 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, + 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x52, 0x14, 0x70, 0x68, 0x70, + 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x22, 0xf4, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, + 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, + 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, + 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, + 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, + 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, + 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, + 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, + 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, + 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, + 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, + 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x9d, 0x0d, 0x0a, 0x0c, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, + 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, + 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, + 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, - 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, - 0x10, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, + 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, + 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65, + 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, + 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, + 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, + 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, + 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, + 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, + 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, + 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, - 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, - 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, - 0x55, 0x0a, 0x0f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, - 0x72, 0x74, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, - 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, - 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, - 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x96, 0x02, 0x0a, - 0x0e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, - 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x72, 0x6f, - 0x64, 0x75, 0x63, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, - 0x74, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, - 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x77, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, - 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, - 0x6e, 0x67, 0x12, 0x41, 0x0a, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, - 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, - 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, - 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, - 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, - 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, - 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, - 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, - 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, - 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, - 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, - 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, - 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, - 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, - 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, - 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, - 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, - 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, - 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, - 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, - 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, - 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, - 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, - 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, - 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, + 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, + 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, + 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, + 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, + 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x55, 0x0a, + 0x0f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, + 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, + 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, + 0x70, 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, - 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, - 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, - 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, - 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, - 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, - 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, - 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x5a, + 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x96, 0x02, 0x0a, 0x0e, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x47, 0x0a, + 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x64, 0x75, + 0x63, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x72, + 0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, + 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x77, + 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, + 0x12, 0x41, 0x0a, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x6d, 0x6f, + 0x76, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, + 0x76, 0x65, 0x64, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, + 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, + 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, + 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, + 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, + 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, + 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, + 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, + 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, + 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, + 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, + 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, + 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f, + 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, + 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, + 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, + 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14, + 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, + 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07, + 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, + 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10, + 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, + 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, + 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, @@ -4893,284 +4841,306 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, - 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0xd8, 0x02, - 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, + 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, + 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, + 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, + 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, + 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, + 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, + 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42, + 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, + 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, + 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, + 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, + 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, + 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0xd8, 0x02, 0x0a, 0x10, + 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, + 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, + 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, + 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x55, 0x0a, 0x0f, 0x66, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, + 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, + 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, + 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, - 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, - 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x55, 0x0a, 0x0f, - 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, - 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, - 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, + 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x99, + 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, + 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, + 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, 0x65, 0x6d, 0x70, + 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x22, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, + 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, + 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, + 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, - 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, + 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, + 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, + 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, + 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, + 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, + 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, + 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, + 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, + 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, + 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, + 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, + 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, + 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, + 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x0a, 0x0a, 0x0a, 0x46, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c, 0x64, + 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x3f, 0x88, 0x01, 0x01, 0x98, + 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, + 0x49, 0x54, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, + 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, + 0x49, 0x54, 0x18, 0xe8, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0d, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x6c, 0x0a, 0x09, 0x65, 0x6e, + 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, - 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, - 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, - 0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, 0x65, - 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x22, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, - 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, - 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, 0x6d, - 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, 0x08, - 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, - 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, - 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, - 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, - 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, - 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, - 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, - 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, - 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, - 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, - 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, - 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, - 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, - 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, - 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, - 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, - 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, - 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, - 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, - 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x0a, 0x0a, 0x0a, 0x46, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x0e, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x3f, 0x88, 0x01, - 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, - 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, 0x4c, - 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, - 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0d, 0x66, - 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x6c, 0x0a, 0x09, - 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, - 0x6d, 0x54, 0x79, 0x70, 0x65, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, - 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, - 0x09, 0x12, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, - 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x17, 0x72, - 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, - 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, - 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, - 0x2d, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, - 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, - 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x15, - 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, - 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38, - 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, - 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0xe6, - 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0xb2, - 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x26, 0x88, 0x01, - 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, - 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xb2, 0x01, - 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, - 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, - 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, - 0x6d, 0x61, 0x74, 0x42, 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06, 0x98, 0x01, - 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, - 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, - 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0a, - 0x6a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, - 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, - 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, - 0x43, 0x49, 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, - 0x54, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, - 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, - 0x50, 0x45, 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, - 0x02, 0x22, 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, - 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, - 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, - 0x0a, 0x0a, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, - 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x49, 0x0a, 0x0e, 0x55, 0x74, 0x66, - 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, - 0x54, 0x46, 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, - 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, - 0x46, 0x59, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x04, - 0x08, 0x01, 0x10, 0x01, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, - 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, - 0x47, 0x45, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, - 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, - 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, - 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, - 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, - 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, - 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, - 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, - 0x54, 0x10, 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0x8b, 0x4e, 0x2a, 0x06, 0x08, 0x8b, 0x4e, - 0x10, 0x90, 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, - 0x10, 0xe8, 0x07, 0x22, 0xef, 0x03, 0x0a, 0x12, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, - 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, - 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x54, + 0x79, 0x70, 0x65, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, + 0x0b, 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x09, 0x12, + 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x08, + 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x17, 0x72, 0x65, 0x70, + 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x6f, + 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x2d, 0x88, + 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, + 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, 0x41, 0x43, + 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x15, 0x72, 0x65, + 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, + 0x69, 0x6e, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, - 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, + 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0x84, 0x07, 0xa2, + 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, + 0x08, 0xe8, 0x07, 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65, + 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, - 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, - 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, - 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xf8, 0x01, 0x0a, 0x18, 0x46, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x14, 0x6f, - 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x26, 0x88, 0x01, 0x01, 0x98, + 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, + 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0x84, 0x07, 0xb2, 0x01, 0x03, 0x08, + 0xe8, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, + 0x69, 0x6e, 0x67, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x13, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, - 0x62, 0x6c, 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x0e, 0x66, - 0x69, 0x78, 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, - 0x52, 0x0d, 0x66, 0x69, 0x78, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x4a, - 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x08, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, - 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, - 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, - 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, - 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, - 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, - 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, - 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, - 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, - 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, - 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, - 0xd0, 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, - 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, - 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, - 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, - 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x03, 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, - 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, - 0x6e, 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, - 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, - 0x10, 0x02, 0x2a, 0xa7, 0x02, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, - 0x0a, 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, - 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c, - 0x45, 0x47, 0x41, 0x43, 0x59, 0x10, 0x84, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, - 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, - 0xe7, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, - 0x32, 0x33, 0x10, 0xe8, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, - 0x5f, 0x32, 0x30, 0x32, 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, - 0x01, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, - 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, - 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, - 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, - 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, - 0x4e, 0x4c, 0x59, 0x10, 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, 0x0a, 0x13, - 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, - 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, -} + 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x42, 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, + 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, + 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x41, 0x4c, + 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0a, 0x6a, 0x73, + 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, 0x49, 0x45, + 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, + 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, + 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, 0x51, 0x55, + 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45, + 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x02, 0x22, + 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, 0x50, 0x45, + 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, + 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, + 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, + 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x49, 0x0a, 0x0e, 0x55, 0x74, 0x66, 0x38, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x54, 0x46, + 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, + 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, + 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x04, 0x08, 0x01, + 0x10, 0x01, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, + 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, + 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, + 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, + 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49, + 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f, + 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, + 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47, + 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10, + 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0x8b, 0x4e, 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90, + 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8, + 0x07, 0x22, 0xef, 0x03, 0x0a, 0x12, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, + 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, + 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, + 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xf8, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x14, 0x6f, 0x76, 0x65, + 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x53, 0x65, 0x74, 0x52, 0x13, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, + 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x0e, 0x66, 0x69, 0x78, + 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x0d, + 0x66, 0x69, 0x78, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x4a, 0x04, 0x08, + 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x73, 0x22, 0xb5, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, + 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, + 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, + 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, + 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, + 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, + 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, + 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, + 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, + 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, + 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, + 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2a, 0x0c, 0x08, + 0x80, 0xec, 0xca, 0xff, 0x01, 0x10, 0x81, 0xec, 0xca, 0xff, 0x01, 0x22, 0xd0, 0x02, 0x0a, 0x11, + 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, + 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, + 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10, + 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, + 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, + 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x61, + 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, + 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, + 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x02, 0x2a, 0xa7, + 0x02, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x44, + 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c, 0x45, 0x47, 0x41, 0x43, + 0x59, 0x10, 0x84, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, 0xe7, 0x07, 0x12, 0x11, + 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, 0x33, 0x10, 0xe8, + 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, + 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, 0x17, 0x0a, + 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, + 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, + 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, + 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, + 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, + 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, + 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, + 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, + 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, + 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, +}) var ( file_google_protobuf_descriptor_proto_rawDescOnce sync.Once - file_google_protobuf_descriptor_proto_rawDescData = file_google_protobuf_descriptor_proto_rawDesc + file_google_protobuf_descriptor_proto_rawDescData []byte ) func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { file_google_protobuf_descriptor_proto_rawDescOnce.Do(func() { - file_google_protobuf_descriptor_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_descriptor_proto_rawDescData) + file_google_protobuf_descriptor_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_descriptor_proto_rawDesc), len(file_google_protobuf_descriptor_proto_rawDesc))) }) return file_google_protobuf_descriptor_proto_rawDescData } @@ -5323,7 +5293,7 @@ func file_google_protobuf_descriptor_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_descriptor_proto_rawDesc), len(file_google_protobuf_descriptor_proto_rawDesc)), NumEnums: 17, NumMessages: 33, NumExtensions: 0, @@ -5335,7 +5305,6 @@ func file_google_protobuf_descriptor_proto_init() { MessageInfos: file_google_protobuf_descriptor_proto_msgTypes, }.Build() File_google_protobuf_descriptor_proto = out.File - file_google_protobuf_descriptor_proto_rawDesc = nil file_google_protobuf_descriptor_proto_goTypes = nil file_google_protobuf_descriptor_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go index c7e860fcd..28d24bad7 100644 --- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go +++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go @@ -16,15 +16,146 @@ import ( descriptorpb "google.golang.org/protobuf/types/descriptorpb" reflect "reflect" sync "sync" + unsafe "unsafe" ) -type GoFeatures struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +type GoFeatures_APILevel int32 + +const ( + // API_LEVEL_UNSPECIFIED results in selecting the OPEN API, + // but needs to be a separate value to distinguish between + // an explicitly set api level or a missing api level. + GoFeatures_API_LEVEL_UNSPECIFIED GoFeatures_APILevel = 0 + GoFeatures_API_OPEN GoFeatures_APILevel = 1 + GoFeatures_API_HYBRID GoFeatures_APILevel = 2 + GoFeatures_API_OPAQUE GoFeatures_APILevel = 3 +) + +// Enum value maps for GoFeatures_APILevel. +var ( + GoFeatures_APILevel_name = map[int32]string{ + 0: "API_LEVEL_UNSPECIFIED", + 1: "API_OPEN", + 2: "API_HYBRID", + 3: "API_OPAQUE", + } + GoFeatures_APILevel_value = map[string]int32{ + "API_LEVEL_UNSPECIFIED": 0, + "API_OPEN": 1, + "API_HYBRID": 2, + "API_OPAQUE": 3, + } +) + +func (x GoFeatures_APILevel) Enum() *GoFeatures_APILevel { + p := new(GoFeatures_APILevel) + *p = x + return p +} + +func (x GoFeatures_APILevel) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GoFeatures_APILevel) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_go_features_proto_enumTypes[0].Descriptor() +} + +func (GoFeatures_APILevel) Type() protoreflect.EnumType { + return &file_google_protobuf_go_features_proto_enumTypes[0] +} + +func (x GoFeatures_APILevel) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *GoFeatures_APILevel) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = GoFeatures_APILevel(num) + return nil +} + +// Deprecated: Use GoFeatures_APILevel.Descriptor instead. +func (GoFeatures_APILevel) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_go_features_proto_rawDescGZIP(), []int{0, 0} +} + +type GoFeatures_StripEnumPrefix int32 + +const ( + GoFeatures_STRIP_ENUM_PREFIX_UNSPECIFIED GoFeatures_StripEnumPrefix = 0 + GoFeatures_STRIP_ENUM_PREFIX_KEEP GoFeatures_StripEnumPrefix = 1 + GoFeatures_STRIP_ENUM_PREFIX_GENERATE_BOTH GoFeatures_StripEnumPrefix = 2 + GoFeatures_STRIP_ENUM_PREFIX_STRIP GoFeatures_StripEnumPrefix = 3 +) + +// Enum value maps for GoFeatures_StripEnumPrefix. +var ( + GoFeatures_StripEnumPrefix_name = map[int32]string{ + 0: "STRIP_ENUM_PREFIX_UNSPECIFIED", + 1: "STRIP_ENUM_PREFIX_KEEP", + 2: "STRIP_ENUM_PREFIX_GENERATE_BOTH", + 3: "STRIP_ENUM_PREFIX_STRIP", + } + GoFeatures_StripEnumPrefix_value = map[string]int32{ + "STRIP_ENUM_PREFIX_UNSPECIFIED": 0, + "STRIP_ENUM_PREFIX_KEEP": 1, + "STRIP_ENUM_PREFIX_GENERATE_BOTH": 2, + "STRIP_ENUM_PREFIX_STRIP": 3, + } +) + +func (x GoFeatures_StripEnumPrefix) Enum() *GoFeatures_StripEnumPrefix { + p := new(GoFeatures_StripEnumPrefix) + *p = x + return p +} + +func (x GoFeatures_StripEnumPrefix) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GoFeatures_StripEnumPrefix) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_go_features_proto_enumTypes[1].Descriptor() +} + +func (GoFeatures_StripEnumPrefix) Type() protoreflect.EnumType { + return &file_google_protobuf_go_features_proto_enumTypes[1] +} + +func (x GoFeatures_StripEnumPrefix) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} +// Deprecated: Do not use. +func (x *GoFeatures_StripEnumPrefix) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = GoFeatures_StripEnumPrefix(num) + return nil +} + +// Deprecated: Use GoFeatures_StripEnumPrefix.Descriptor instead. +func (GoFeatures_StripEnumPrefix) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_go_features_proto_rawDescGZIP(), []int{0, 1} +} + +type GoFeatures struct { + state protoimpl.MessageState `protogen:"open.v1"` // Whether or not to generate the deprecated UnmarshalJSON method for enums. + // Can only be true for proto using the Open Struct api. LegacyUnmarshalJsonEnum *bool `protobuf:"varint,1,opt,name=legacy_unmarshal_json_enum,json=legacyUnmarshalJsonEnum" json:"legacy_unmarshal_json_enum,omitempty"` + // One of OPEN, HYBRID or OPAQUE. + ApiLevel *GoFeatures_APILevel `protobuf:"varint,2,opt,name=api_level,json=apiLevel,enum=pb.GoFeatures_APILevel" json:"api_level,omitempty"` + StripEnumPrefix *GoFeatures_StripEnumPrefix `protobuf:"varint,3,opt,name=strip_enum_prefix,json=stripEnumPrefix,enum=pb.GoFeatures_StripEnumPrefix" json:"strip_enum_prefix,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GoFeatures) Reset() { @@ -64,6 +195,20 @@ func (x *GoFeatures) GetLegacyUnmarshalJsonEnum() bool { return false } +func (x *GoFeatures) GetApiLevel() GoFeatures_APILevel { + if x != nil && x.ApiLevel != nil { + return *x.ApiLevel + } + return GoFeatures_API_LEVEL_UNSPECIFIED +} + +func (x *GoFeatures) GetStripEnumPrefix() GoFeatures_StripEnumPrefix { + if x != nil && x.StripEnumPrefix != nil { + return *x.StripEnumPrefix + } + return GoFeatures_STRIP_ENUM_PREFIX_UNSPECIFIED +} + var file_google_protobuf_go_features_proto_extTypes = []protoimpl.ExtensionInfo{ { ExtendedType: (*descriptorpb.FeatureSet)(nil), @@ -83,12 +228,12 @@ var ( var File_google_protobuf_go_features_proto protoreflect.FileDescriptor -var file_google_protobuf_go_features_proto_rawDesc = []byte{ +var file_google_protobuf_go_features_proto_rawDesc = string([]byte{ 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcd, 0x01, 0x0a, 0x0a, 0x47, 0x6f, + 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xab, 0x05, 0x0a, 0x0a, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0xbe, 0x01, 0x0a, 0x1a, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x80, 0x01, @@ -101,41 +246,76 @@ var file_google_protobuf_go_features_proto_rawDesc = []byte{ 0x20, 0x62, 0x65, 0x20, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x61, 0x20, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x20, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, - 0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12, - 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x70, 0x62, -} + 0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x12, 0x74, 0x0a, 0x09, 0x61, 0x70, 0x69, + 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x70, + 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x41, 0x50, 0x49, + 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x42, 0x3e, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x01, + 0xa2, 0x01, 0x1a, 0x12, 0x15, 0x41, 0x50, 0x49, 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55, + 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0f, + 0x12, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x41, 0x51, 0x55, 0x45, 0x18, 0xe9, 0x07, 0xb2, + 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x08, 0x61, 0x70, 0x69, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, + 0x7c, 0x0a, 0x11, 0x73, 0x74, 0x72, 0x69, 0x70, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x70, 0x72, + 0x65, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x70, 0x62, 0x2e, + 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x70, + 0x45, 0x6e, 0x75, 0x6d, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x42, 0x30, 0x88, 0x01, 0x01, 0x98, + 0x01, 0x06, 0x98, 0x01, 0x07, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x1b, 0x12, 0x16, 0x53, 0x54, 0x52, + 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x4b, + 0x45, 0x45, 0x50, 0x18, 0x84, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe9, 0x07, 0x52, 0x0f, 0x73, 0x74, + 0x72, 0x69, 0x70, 0x45, 0x6e, 0x75, 0x6d, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x22, 0x53, 0x0a, + 0x08, 0x41, 0x50, 0x49, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x19, 0x0a, 0x15, 0x41, 0x50, 0x49, + 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, + 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x45, 0x4e, + 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x48, 0x59, 0x42, 0x52, 0x49, 0x44, + 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x41, 0x51, 0x55, 0x45, + 0x10, 0x03, 0x22, 0x92, 0x01, 0x0a, 0x0f, 0x53, 0x74, 0x72, 0x69, 0x70, 0x45, 0x6e, 0x75, 0x6d, + 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x54, 0x52, 0x49, 0x50, 0x5f, + 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x54, 0x52, + 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x4b, + 0x45, 0x45, 0x50, 0x10, 0x01, 0x12, 0x23, 0x0a, 0x1f, 0x53, 0x54, 0x52, 0x49, 0x50, 0x5f, 0x45, + 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x47, 0x45, 0x4e, 0x45, 0x52, + 0x41, 0x54, 0x45, 0x5f, 0x42, 0x4f, 0x54, 0x48, 0x10, 0x02, 0x12, 0x1b, 0x0a, 0x17, 0x53, 0x54, + 0x52, 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, + 0x53, 0x54, 0x52, 0x49, 0x50, 0x10, 0x03, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12, 0x1b, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x70, 0x62, +}) var ( file_google_protobuf_go_features_proto_rawDescOnce sync.Once - file_google_protobuf_go_features_proto_rawDescData = file_google_protobuf_go_features_proto_rawDesc + file_google_protobuf_go_features_proto_rawDescData []byte ) func file_google_protobuf_go_features_proto_rawDescGZIP() []byte { file_google_protobuf_go_features_proto_rawDescOnce.Do(func() { - file_google_protobuf_go_features_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_go_features_proto_rawDescData) + file_google_protobuf_go_features_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_go_features_proto_rawDesc), len(file_google_protobuf_go_features_proto_rawDesc))) }) return file_google_protobuf_go_features_proto_rawDescData } +var file_google_protobuf_go_features_proto_enumTypes = make([]protoimpl.EnumInfo, 2) var file_google_protobuf_go_features_proto_msgTypes = make([]protoimpl.MessageInfo, 1) var file_google_protobuf_go_features_proto_goTypes = []any{ - (*GoFeatures)(nil), // 0: pb.GoFeatures - (*descriptorpb.FeatureSet)(nil), // 1: google.protobuf.FeatureSet + (GoFeatures_APILevel)(0), // 0: pb.GoFeatures.APILevel + (GoFeatures_StripEnumPrefix)(0), // 1: pb.GoFeatures.StripEnumPrefix + (*GoFeatures)(nil), // 2: pb.GoFeatures + (*descriptorpb.FeatureSet)(nil), // 3: google.protobuf.FeatureSet } var file_google_protobuf_go_features_proto_depIdxs = []int32{ - 1, // 0: pb.go:extendee -> google.protobuf.FeatureSet - 0, // 1: pb.go:type_name -> pb.GoFeatures - 2, // [2:2] is the sub-list for method output_type - 2, // [2:2] is the sub-list for method input_type - 1, // [1:2] is the sub-list for extension type_name - 0, // [0:1] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name + 0, // 0: pb.GoFeatures.api_level:type_name -> pb.GoFeatures.APILevel + 1, // 1: pb.GoFeatures.strip_enum_prefix:type_name -> pb.GoFeatures.StripEnumPrefix + 3, // 2: pb.go:extendee -> google.protobuf.FeatureSet + 2, // 3: pb.go:type_name -> pb.GoFeatures + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 3, // [3:4] is the sub-list for extension type_name + 2, // [2:3] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name } func init() { file_google_protobuf_go_features_proto_init() } @@ -147,19 +327,19 @@ func file_google_protobuf_go_features_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_go_features_proto_rawDesc, - NumEnums: 0, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_go_features_proto_rawDesc), len(file_google_protobuf_go_features_proto_rawDesc)), + NumEnums: 2, NumMessages: 1, NumExtensions: 1, NumServices: 0, }, GoTypes: file_google_protobuf_go_features_proto_goTypes, DependencyIndexes: file_google_protobuf_go_features_proto_depIdxs, + EnumInfos: file_google_protobuf_go_features_proto_enumTypes, MessageInfos: file_google_protobuf_go_features_proto_msgTypes, ExtensionInfos: file_google_protobuf_go_features_proto_extTypes, }.Build() File_google_protobuf_go_features_proto = out.File - file_google_protobuf_go_features_proto_rawDesc = nil file_google_protobuf_go_features_proto_goTypes = nil file_google_protobuf_go_features_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 87da199a3..497da66e9 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -122,6 +122,7 @@ import ( reflect "reflect" strings "strings" sync "sync" + unsafe "unsafe" ) // `Any` contains an arbitrary serialized protocol buffer message along with a @@ -210,10 +211,7 @@ import ( // "value": "1.212s" // } type Any struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // A URL/resource name that uniquely identifies the type of the serialized // protocol buffer message. This string must contain at least // one "/" character. The last segment of the URL's path must represent @@ -244,7 +242,9 @@ type Any struct { // used with implementation specific semantics. TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` // Must be a valid serialized protocol buffer of the above specified type. - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // New marshals src into a new Any instance. @@ -412,7 +412,7 @@ func (x *Any) GetValue() []byte { var File_google_protobuf_any_proto protoreflect.FileDescriptor -var file_google_protobuf_any_proto_rawDesc = []byte{ +var file_google_protobuf_any_proto_rawDesc = string([]byte{ 0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x36, 0x0a, 0x03, @@ -428,16 +428,16 @@ var file_google_protobuf_any_proto_rawDesc = []byte{ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +}) var ( file_google_protobuf_any_proto_rawDescOnce sync.Once - file_google_protobuf_any_proto_rawDescData = file_google_protobuf_any_proto_rawDesc + file_google_protobuf_any_proto_rawDescData []byte ) func file_google_protobuf_any_proto_rawDescGZIP() []byte { file_google_protobuf_any_proto_rawDescOnce.Do(func() { - file_google_protobuf_any_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_any_proto_rawDescData) + file_google_protobuf_any_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_any_proto_rawDesc), len(file_google_protobuf_any_proto_rawDesc))) }) return file_google_protobuf_any_proto_rawDescData } @@ -463,7 +463,7 @@ func file_google_protobuf_any_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_any_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_any_proto_rawDesc), len(file_google_protobuf_any_proto_rawDesc)), NumEnums: 0, NumMessages: 1, NumExtensions: 0, @@ -474,7 +474,6 @@ func file_google_protobuf_any_proto_init() { MessageInfos: file_google_protobuf_any_proto_msgTypes, }.Build() File_google_protobuf_any_proto = out.File - file_google_protobuf_any_proto_rawDesc = nil file_google_protobuf_any_proto_goTypes = nil file_google_protobuf_any_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go index b99d4d241..193880d18 100644 --- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -80,6 +80,7 @@ import ( reflect "reflect" sync "sync" time "time" + unsafe "unsafe" ) // A Duration represents a signed, fixed-length span of time represented @@ -141,10 +142,7 @@ import ( // be expressed in JSON format as "3.000000001s", and 3 seconds and 1 // microsecond should be expressed in JSON format as "3.000001s". type Duration struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Signed seconds of the span of time. Must be from -315,576,000,000 // to +315,576,000,000 inclusive. Note: these bounds are computed from: // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years @@ -155,7 +153,9 @@ type Duration struct { // of one second or more, a non-zero value for the `nanos` field must be // of the same sign as the `seconds` field. Must be from -999,999,999 // to +999,999,999 inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // New constructs a new Duration from the provided time.Duration. @@ -289,7 +289,7 @@ func (x *Duration) GetNanos() int32 { var File_google_protobuf_duration_proto protoreflect.FileDescriptor -var file_google_protobuf_duration_proto_rawDesc = []byte{ +var file_google_protobuf_duration_proto_rawDesc = string([]byte{ 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, @@ -306,16 +306,16 @@ var file_google_protobuf_duration_proto_rawDesc = []byte{ 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +}) var ( file_google_protobuf_duration_proto_rawDescOnce sync.Once - file_google_protobuf_duration_proto_rawDescData = file_google_protobuf_duration_proto_rawDesc + file_google_protobuf_duration_proto_rawDescData []byte ) func file_google_protobuf_duration_proto_rawDescGZIP() []byte { file_google_protobuf_duration_proto_rawDescOnce.Do(func() { - file_google_protobuf_duration_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_duration_proto_rawDescData) + file_google_protobuf_duration_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_duration_proto_rawDesc), len(file_google_protobuf_duration_proto_rawDesc))) }) return file_google_protobuf_duration_proto_rawDescData } @@ -341,7 +341,7 @@ func file_google_protobuf_duration_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_duration_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_duration_proto_rawDesc), len(file_google_protobuf_duration_proto_rawDesc)), NumEnums: 0, NumMessages: 1, NumExtensions: 0, @@ -352,7 +352,6 @@ func file_google_protobuf_duration_proto_init() { MessageInfos: file_google_protobuf_duration_proto_msgTypes, }.Build() File_google_protobuf_duration_proto = out.File - file_google_protobuf_duration_proto_rawDesc = nil file_google_protobuf_duration_proto_goTypes = nil file_google_protobuf_duration_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go index 1761bc9c6..a5b8657c4 100644 --- a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go @@ -38,6 +38,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) // A generic empty message that you can re-use to avoid defining duplicated @@ -48,9 +49,9 @@ import ( // rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); // } type Empty struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Empty) Reset() { @@ -85,7 +86,7 @@ func (*Empty) Descriptor() ([]byte, []int) { var File_google_protobuf_empty_proto protoreflect.FileDescriptor -var file_google_protobuf_empty_proto_rawDesc = []byte{ +var file_google_protobuf_empty_proto_rawDesc = string([]byte{ 0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x07, @@ -98,16 +99,16 @@ var file_google_protobuf_empty_proto_rawDesc = []byte{ 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +}) var ( file_google_protobuf_empty_proto_rawDescOnce sync.Once - file_google_protobuf_empty_proto_rawDescData = file_google_protobuf_empty_proto_rawDesc + file_google_protobuf_empty_proto_rawDescData []byte ) func file_google_protobuf_empty_proto_rawDescGZIP() []byte { file_google_protobuf_empty_proto_rawDescOnce.Do(func() { - file_google_protobuf_empty_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_empty_proto_rawDescData) + file_google_protobuf_empty_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_empty_proto_rawDesc), len(file_google_protobuf_empty_proto_rawDesc))) }) return file_google_protobuf_empty_proto_rawDescData } @@ -133,7 +134,7 @@ func file_google_protobuf_empty_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_empty_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_empty_proto_rawDesc), len(file_google_protobuf_empty_proto_rawDesc)), NumEnums: 0, NumMessages: 1, NumExtensions: 0, @@ -144,7 +145,6 @@ func file_google_protobuf_empty_proto_init() { MessageInfos: file_google_protobuf_empty_proto_msgTypes, }.Build() File_google_protobuf_empty_proto = out.File - file_google_protobuf_empty_proto_rawDesc = nil file_google_protobuf_empty_proto_goTypes = nil file_google_protobuf_empty_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go index 8f206a661..ecdd31ab5 100644 --- a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go @@ -128,6 +128,7 @@ import ( reflect "reflect" sync "sync" utf8 "unicode/utf8" + unsafe "unsafe" ) // `NullValue` is a singleton enumeration to represent the null value for the @@ -187,12 +188,11 @@ func (NullValue) EnumDescriptor() ([]byte, []int) { // // The JSON representation for `Struct` is JSON object. type Struct struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Unordered map of dynamically typed values. - Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // NewStruct constructs a Struct from a general-purpose Go map. @@ -276,13 +276,10 @@ func (x *Struct) GetFields() map[string]*Value { // // The JSON representation for `Value` is JSON value. type Value struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The kind of value. // - // Types that are assignable to Kind: + // Types that are valid to be assigned to Kind: // // *Value_NullValue // *Value_NumberValue @@ -290,7 +287,9 @@ type Value struct { // *Value_BoolValue // *Value_StructValue // *Value_ListValue - Kind isValue_Kind `protobuf_oneof:"kind"` + Kind isValue_Kind `protobuf_oneof:"kind"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // NewValue constructs a Value from a general-purpose Go interface. @@ -483,51 +482,63 @@ func (*Value) Descriptor() ([]byte, []int) { return file_google_protobuf_struct_proto_rawDescGZIP(), []int{1} } -func (m *Value) GetKind() isValue_Kind { - if m != nil { - return m.Kind +func (x *Value) GetKind() isValue_Kind { + if x != nil { + return x.Kind } return nil } func (x *Value) GetNullValue() NullValue { - if x, ok := x.GetKind().(*Value_NullValue); ok { - return x.NullValue + if x != nil { + if x, ok := x.Kind.(*Value_NullValue); ok { + return x.NullValue + } } return NullValue_NULL_VALUE } func (x *Value) GetNumberValue() float64 { - if x, ok := x.GetKind().(*Value_NumberValue); ok { - return x.NumberValue + if x != nil { + if x, ok := x.Kind.(*Value_NumberValue); ok { + return x.NumberValue + } } return 0 } func (x *Value) GetStringValue() string { - if x, ok := x.GetKind().(*Value_StringValue); ok { - return x.StringValue + if x != nil { + if x, ok := x.Kind.(*Value_StringValue); ok { + return x.StringValue + } } return "" } func (x *Value) GetBoolValue() bool { - if x, ok := x.GetKind().(*Value_BoolValue); ok { - return x.BoolValue + if x != nil { + if x, ok := x.Kind.(*Value_BoolValue); ok { + return x.BoolValue + } } return false } func (x *Value) GetStructValue() *Struct { - if x, ok := x.GetKind().(*Value_StructValue); ok { - return x.StructValue + if x != nil { + if x, ok := x.Kind.(*Value_StructValue); ok { + return x.StructValue + } } return nil } func (x *Value) GetListValue() *ListValue { - if x, ok := x.GetKind().(*Value_ListValue); ok { - return x.ListValue + if x != nil { + if x, ok := x.Kind.(*Value_ListValue); ok { + return x.ListValue + } } return nil } @@ -582,12 +593,11 @@ func (*Value_ListValue) isValue_Kind() {} // // The JSON representation for `ListValue` is JSON array. type ListValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Repeated field of dynamically typed values. - Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // NewList constructs a ListValue from a general-purpose Go slice. @@ -662,7 +672,7 @@ func (x *ListValue) GetValues() []*Value { var File_google_protobuf_struct_proto protoreflect.FileDescriptor -var file_google_protobuf_struct_proto_rawDesc = []byte{ +var file_google_protobuf_struct_proto_rawDesc = string([]byte{ 0x0a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, @@ -710,16 +720,16 @@ var file_google_protobuf_struct_proto_rawDesc = []byte{ 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +}) var ( file_google_protobuf_struct_proto_rawDescOnce sync.Once - file_google_protobuf_struct_proto_rawDescData = file_google_protobuf_struct_proto_rawDesc + file_google_protobuf_struct_proto_rawDescData []byte ) func file_google_protobuf_struct_proto_rawDescGZIP() []byte { file_google_protobuf_struct_proto_rawDescOnce.Do(func() { - file_google_protobuf_struct_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_struct_proto_rawDescData) + file_google_protobuf_struct_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_struct_proto_rawDesc), len(file_google_protobuf_struct_proto_rawDesc))) }) return file_google_protobuf_struct_proto_rawDescData } @@ -764,7 +774,7 @@ func file_google_protobuf_struct_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_struct_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_struct_proto_rawDesc), len(file_google_protobuf_struct_proto_rawDesc)), NumEnums: 1, NumMessages: 4, NumExtensions: 0, @@ -776,7 +786,6 @@ func file_google_protobuf_struct_proto_init() { MessageInfos: file_google_protobuf_struct_proto_msgTypes, }.Build() File_google_protobuf_struct_proto = out.File - file_google_protobuf_struct_proto_rawDesc = nil file_google_protobuf_struct_proto_goTypes = nil file_google_protobuf_struct_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index 0d20722d7..00ac835c0 100644 --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -78,6 +78,7 @@ import ( reflect "reflect" sync "sync" time "time" + unsafe "unsafe" ) // A Timestamp represents a point in time independent of any time zone or local @@ -170,10 +171,7 @@ import ( // http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime() // ) to obtain a formatter capable of generating timestamps in this format. type Timestamp struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Represents seconds of UTC time since Unix epoch // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to // 9999-12-31T23:59:59Z inclusive. @@ -182,7 +180,9 @@ type Timestamp struct { // second values with fractions must still have non-negative nanos values // that count forward in time. Must be from 0 to 999,999,999 // inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } // Now constructs a new Timestamp from the current time. @@ -298,7 +298,7 @@ func (x *Timestamp) GetNanos() int32 { var File_google_protobuf_timestamp_proto protoreflect.FileDescriptor -var file_google_protobuf_timestamp_proto_rawDesc = []byte{ +var file_google_protobuf_timestamp_proto_rawDesc = string([]byte{ 0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, @@ -315,16 +315,16 @@ var file_google_protobuf_timestamp_proto_rawDesc = []byte{ 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +}) var ( file_google_protobuf_timestamp_proto_rawDescOnce sync.Once - file_google_protobuf_timestamp_proto_rawDescData = file_google_protobuf_timestamp_proto_rawDesc + file_google_protobuf_timestamp_proto_rawDescData []byte ) func file_google_protobuf_timestamp_proto_rawDescGZIP() []byte { file_google_protobuf_timestamp_proto_rawDescOnce.Do(func() { - file_google_protobuf_timestamp_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_timestamp_proto_rawDescData) + file_google_protobuf_timestamp_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_timestamp_proto_rawDesc), len(file_google_protobuf_timestamp_proto_rawDesc))) }) return file_google_protobuf_timestamp_proto_rawDescData } @@ -350,7 +350,7 @@ func file_google_protobuf_timestamp_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_timestamp_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_timestamp_proto_rawDesc), len(file_google_protobuf_timestamp_proto_rawDesc)), NumEnums: 0, NumMessages: 1, NumExtensions: 0, @@ -361,7 +361,6 @@ func file_google_protobuf_timestamp_proto_init() { MessageInfos: file_google_protobuf_timestamp_proto_msgTypes, }.Build() File_google_protobuf_timestamp_proto = out.File - file_google_protobuf_timestamp_proto_rawDesc = nil file_google_protobuf_timestamp_proto_goTypes = nil file_google_protobuf_timestamp_proto_depIdxs = nil } diff --git a/vendor/gopkg.in/ini.v1/.editorconfig b/vendor/gopkg.in/ini.v1/.editorconfig deleted file mode 100644 index 4a2d9180f..000000000 --- a/vendor/gopkg.in/ini.v1/.editorconfig +++ /dev/null @@ -1,12 +0,0 @@ -# http://editorconfig.org - -root = true - -[*] -charset = utf-8 -end_of_line = lf -insert_final_newline = true -trim_trailing_whitespace = true - -[*_test.go] -trim_trailing_whitespace = false diff --git a/vendor/gopkg.in/ini.v1/.gitignore b/vendor/gopkg.in/ini.v1/.gitignore deleted file mode 100644 index 588388bda..000000000 --- a/vendor/gopkg.in/ini.v1/.gitignore +++ /dev/null @@ -1,7 +0,0 @@ -testdata/conf_out.ini -ini.sublime-project -ini.sublime-workspace -testdata/conf_reflect.ini -.idea -/.vscode -.DS_Store diff --git a/vendor/gopkg.in/ini.v1/.golangci.yml b/vendor/gopkg.in/ini.v1/.golangci.yml deleted file mode 100644 index 631e36925..000000000 --- a/vendor/gopkg.in/ini.v1/.golangci.yml +++ /dev/null @@ -1,27 +0,0 @@ -linters-settings: - staticcheck: - checks: [ - "all", - "-SA1019" # There are valid use cases of strings.Title - ] - nakedret: - max-func-lines: 0 # Disallow any unnamed return statement - -linters: - enable: - - deadcode - - errcheck - - gosimple - - govet - - ineffassign - - staticcheck - - structcheck - - typecheck - - unused - - varcheck - - nakedret - - gofmt - - rowserrcheck - - unconvert - - goimports - - unparam diff --git a/vendor/gopkg.in/ini.v1/LICENSE b/vendor/gopkg.in/ini.v1/LICENSE deleted file mode 100644 index d361bbcdf..000000000 --- a/vendor/gopkg.in/ini.v1/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright 2014 Unknwon - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/gopkg.in/ini.v1/Makefile b/vendor/gopkg.in/ini.v1/Makefile deleted file mode 100644 index f3b0dae2d..000000000 --- a/vendor/gopkg.in/ini.v1/Makefile +++ /dev/null @@ -1,15 +0,0 @@ -.PHONY: build test bench vet coverage - -build: vet bench - -test: - go test -v -cover -race - -bench: - go test -v -cover -test.bench=. -test.benchmem - -vet: - go vet - -coverage: - go test -coverprofile=c.out && go tool cover -html=c.out && rm c.out diff --git a/vendor/gopkg.in/ini.v1/README.md b/vendor/gopkg.in/ini.v1/README.md deleted file mode 100644 index 30606d970..000000000 --- a/vendor/gopkg.in/ini.v1/README.md +++ /dev/null @@ -1,43 +0,0 @@ -# INI - -[![GitHub Workflow Status](https://img.shields.io/github/checks-status/go-ini/ini/main?logo=github&style=for-the-badge)](https://github.com/go-ini/ini/actions?query=branch%3Amain) -[![codecov](https://img.shields.io/codecov/c/github/go-ini/ini/master?logo=codecov&style=for-the-badge)](https://codecov.io/gh/go-ini/ini) -[![GoDoc](https://img.shields.io/badge/GoDoc-Reference-blue?style=for-the-badge&logo=go)](https://pkg.go.dev/github.com/go-ini/ini?tab=doc) -[![Sourcegraph](https://img.shields.io/badge/view%20on-Sourcegraph-brightgreen.svg?style=for-the-badge&logo=sourcegraph)](https://sourcegraph.com/github.com/go-ini/ini) - -![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) - -Package ini provides INI file read and write functionality in Go. - -## Features - -- Load from multiple data sources(file, `[]byte`, `io.Reader` and `io.ReadCloser`) with overwrites. -- Read with recursion values. -- Read with parent-child sections. -- Read with auto-increment key names. -- Read with multiple-line values. -- Read with tons of helper methods. -- Read and convert values to Go types. -- Read and **WRITE** comments of sections and keys. -- Manipulate sections, keys and comments with ease. -- Keep sections and keys in order as you parse and save. - -## Installation - -The minimum requirement of Go is **1.13**. - -```sh -$ go get gopkg.in/ini.v1 -``` - -Please add `-u` flag to update in the future. - -## Getting Help - -- [Getting Started](https://ini.unknwon.io/docs/intro/getting_started) -- [API Documentation](https://gowalker.org/gopkg.in/ini.v1) -- 中国大陆镜像:https://ini.unknwon.cn - -## License - -This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text. diff --git a/vendor/gopkg.in/ini.v1/codecov.yml b/vendor/gopkg.in/ini.v1/codecov.yml deleted file mode 100644 index e02ec84bc..000000000 --- a/vendor/gopkg.in/ini.v1/codecov.yml +++ /dev/null @@ -1,16 +0,0 @@ -coverage: - range: "60...95" - status: - project: - default: - threshold: 1% - informational: true - patch: - defualt: - only_pulls: true - informational: true - -comment: - layout: 'diff' - -github_checks: false diff --git a/vendor/gopkg.in/ini.v1/data_source.go b/vendor/gopkg.in/ini.v1/data_source.go deleted file mode 100644 index c3a541f1d..000000000 --- a/vendor/gopkg.in/ini.v1/data_source.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2019 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "os" -) - -var ( - _ dataSource = (*sourceFile)(nil) - _ dataSource = (*sourceData)(nil) - _ dataSource = (*sourceReadCloser)(nil) -) - -// dataSource is an interface that returns object which can be read and closed. -type dataSource interface { - ReadCloser() (io.ReadCloser, error) -} - -// sourceFile represents an object that contains content on the local file system. -type sourceFile struct { - name string -} - -func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) { - return os.Open(s.name) -} - -// sourceData represents an object that contains content in memory. -type sourceData struct { - data []byte -} - -func (s *sourceData) ReadCloser() (io.ReadCloser, error) { - return ioutil.NopCloser(bytes.NewReader(s.data)), nil -} - -// sourceReadCloser represents an input stream with Close method. -type sourceReadCloser struct { - reader io.ReadCloser -} - -func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) { - return s.reader, nil -} - -func parseDataSource(source interface{}) (dataSource, error) { - switch s := source.(type) { - case string: - return sourceFile{s}, nil - case []byte: - return &sourceData{s}, nil - case io.ReadCloser: - return &sourceReadCloser{s}, nil - case io.Reader: - return &sourceReadCloser{ioutil.NopCloser(s)}, nil - default: - return nil, fmt.Errorf("error parsing data source: unknown type %q", s) - } -} diff --git a/vendor/gopkg.in/ini.v1/deprecated.go b/vendor/gopkg.in/ini.v1/deprecated.go deleted file mode 100644 index 48b8e66d6..000000000 --- a/vendor/gopkg.in/ini.v1/deprecated.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2019 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -var ( - // Deprecated: Use "DefaultSection" instead. - DEFAULT_SECTION = DefaultSection - // Deprecated: AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE. - AllCapsUnderscore = SnackCase -) diff --git a/vendor/gopkg.in/ini.v1/error.go b/vendor/gopkg.in/ini.v1/error.go deleted file mode 100644 index f66bc94b8..000000000 --- a/vendor/gopkg.in/ini.v1/error.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2016 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "fmt" -) - -// ErrDelimiterNotFound indicates the error type of no delimiter is found which there should be one. -type ErrDelimiterNotFound struct { - Line string -} - -// IsErrDelimiterNotFound returns true if the given error is an instance of ErrDelimiterNotFound. -func IsErrDelimiterNotFound(err error) bool { - _, ok := err.(ErrDelimiterNotFound) - return ok -} - -func (err ErrDelimiterNotFound) Error() string { - return fmt.Sprintf("key-value delimiter not found: %s", err.Line) -} - -// ErrEmptyKeyName indicates the error type of no key name is found which there should be one. -type ErrEmptyKeyName struct { - Line string -} - -// IsErrEmptyKeyName returns true if the given error is an instance of ErrEmptyKeyName. -func IsErrEmptyKeyName(err error) bool { - _, ok := err.(ErrEmptyKeyName) - return ok -} - -func (err ErrEmptyKeyName) Error() string { - return fmt.Sprintf("empty key name: %s", err.Line) -} diff --git a/vendor/gopkg.in/ini.v1/file.go b/vendor/gopkg.in/ini.v1/file.go deleted file mode 100644 index f8b22408b..000000000 --- a/vendor/gopkg.in/ini.v1/file.go +++ /dev/null @@ -1,541 +0,0 @@ -// Copyright 2017 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bytes" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "strings" - "sync" -) - -// File represents a combination of one or more INI files in memory. -type File struct { - options LoadOptions - dataSources []dataSource - - // Should make things safe, but sometimes doesn't matter. - BlockMode bool - lock sync.RWMutex - - // To keep data in order. - sectionList []string - // To keep track of the index of a section with same name. - // This meta list is only used with non-unique section names are allowed. - sectionIndexes []int - - // Actual data is stored here. - sections map[string][]*Section - - NameMapper - ValueMapper -} - -// newFile initializes File object with given data sources. -func newFile(dataSources []dataSource, opts LoadOptions) *File { - if len(opts.KeyValueDelimiters) == 0 { - opts.KeyValueDelimiters = "=:" - } - if len(opts.KeyValueDelimiterOnWrite) == 0 { - opts.KeyValueDelimiterOnWrite = "=" - } - if len(opts.ChildSectionDelimiter) == 0 { - opts.ChildSectionDelimiter = "." - } - - return &File{ - BlockMode: true, - dataSources: dataSources, - sections: make(map[string][]*Section), - options: opts, - } -} - -// Empty returns an empty file object. -func Empty(opts ...LoadOptions) *File { - var opt LoadOptions - if len(opts) > 0 { - opt = opts[0] - } - - // Ignore error here, we are sure our data is good. - f, _ := LoadSources(opt, []byte("")) - return f -} - -// NewSection creates a new section. -func (f *File) NewSection(name string) (*Section, error) { - if len(name) == 0 { - return nil, errors.New("empty section name") - } - - if (f.options.Insensitive || f.options.InsensitiveSections) && name != DefaultSection { - name = strings.ToLower(name) - } - - if f.BlockMode { - f.lock.Lock() - defer f.lock.Unlock() - } - - if !f.options.AllowNonUniqueSections && inSlice(name, f.sectionList) { - return f.sections[name][0], nil - } - - f.sectionList = append(f.sectionList, name) - - // NOTE: Append to indexes must happen before appending to sections, - // otherwise index will have off-by-one problem. - f.sectionIndexes = append(f.sectionIndexes, len(f.sections[name])) - - sec := newSection(f, name) - f.sections[name] = append(f.sections[name], sec) - - return sec, nil -} - -// NewRawSection creates a new section with an unparseable body. -func (f *File) NewRawSection(name, body string) (*Section, error) { - section, err := f.NewSection(name) - if err != nil { - return nil, err - } - - section.isRawSection = true - section.rawBody = body - return section, nil -} - -// NewSections creates a list of sections. -func (f *File) NewSections(names ...string) (err error) { - for _, name := range names { - if _, err = f.NewSection(name); err != nil { - return err - } - } - return nil -} - -// GetSection returns section by given name. -func (f *File) GetSection(name string) (*Section, error) { - secs, err := f.SectionsByName(name) - if err != nil { - return nil, err - } - - return secs[0], err -} - -// HasSection returns true if the file contains a section with given name. -func (f *File) HasSection(name string) bool { - section, _ := f.GetSection(name) - return section != nil -} - -// SectionsByName returns all sections with given name. -func (f *File) SectionsByName(name string) ([]*Section, error) { - if len(name) == 0 { - name = DefaultSection - } - if f.options.Insensitive || f.options.InsensitiveSections { - name = strings.ToLower(name) - } - - if f.BlockMode { - f.lock.RLock() - defer f.lock.RUnlock() - } - - secs := f.sections[name] - if len(secs) == 0 { - return nil, fmt.Errorf("section %q does not exist", name) - } - - return secs, nil -} - -// Section assumes named section exists and returns a zero-value when not. -func (f *File) Section(name string) *Section { - sec, err := f.GetSection(name) - if err != nil { - if name == "" { - name = DefaultSection - } - sec, _ = f.NewSection(name) - return sec - } - return sec -} - -// SectionWithIndex assumes named section exists and returns a new section when not. -func (f *File) SectionWithIndex(name string, index int) *Section { - secs, err := f.SectionsByName(name) - if err != nil || len(secs) <= index { - // NOTE: It's OK here because the only possible error is empty section name, - // but if it's empty, this piece of code won't be executed. - newSec, _ := f.NewSection(name) - return newSec - } - - return secs[index] -} - -// Sections returns a list of Section stored in the current instance. -func (f *File) Sections() []*Section { - if f.BlockMode { - f.lock.RLock() - defer f.lock.RUnlock() - } - - sections := make([]*Section, len(f.sectionList)) - for i, name := range f.sectionList { - sections[i] = f.sections[name][f.sectionIndexes[i]] - } - return sections -} - -// ChildSections returns a list of child sections of given section name. -func (f *File) ChildSections(name string) []*Section { - return f.Section(name).ChildSections() -} - -// SectionStrings returns list of section names. -func (f *File) SectionStrings() []string { - list := make([]string, len(f.sectionList)) - copy(list, f.sectionList) - return list -} - -// DeleteSection deletes a section or all sections with given name. -func (f *File) DeleteSection(name string) { - secs, err := f.SectionsByName(name) - if err != nil { - return - } - - for i := 0; i < len(secs); i++ { - // For non-unique sections, it is always needed to remove the first one so - // in the next iteration, the subsequent section continue having index 0. - // Ignoring the error as index 0 never returns an error. - _ = f.DeleteSectionWithIndex(name, 0) - } -} - -// DeleteSectionWithIndex deletes a section with given name and index. -func (f *File) DeleteSectionWithIndex(name string, index int) error { - if !f.options.AllowNonUniqueSections && index != 0 { - return fmt.Errorf("delete section with non-zero index is only allowed when non-unique sections is enabled") - } - - if len(name) == 0 { - name = DefaultSection - } - if f.options.Insensitive || f.options.InsensitiveSections { - name = strings.ToLower(name) - } - - if f.BlockMode { - f.lock.Lock() - defer f.lock.Unlock() - } - - // Count occurrences of the sections - occurrences := 0 - - sectionListCopy := make([]string, len(f.sectionList)) - copy(sectionListCopy, f.sectionList) - - for i, s := range sectionListCopy { - if s != name { - continue - } - - if occurrences == index { - if len(f.sections[name]) <= 1 { - delete(f.sections, name) // The last one in the map - } else { - f.sections[name] = append(f.sections[name][:index], f.sections[name][index+1:]...) - } - - // Fix section lists - f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...) - f.sectionIndexes = append(f.sectionIndexes[:i], f.sectionIndexes[i+1:]...) - - } else if occurrences > index { - // Fix the indices of all following sections with this name. - f.sectionIndexes[i-1]-- - } - - occurrences++ - } - - return nil -} - -func (f *File) reload(s dataSource) error { - r, err := s.ReadCloser() - if err != nil { - return err - } - defer r.Close() - - return f.parse(r) -} - -// Reload reloads and parses all data sources. -func (f *File) Reload() (err error) { - for _, s := range f.dataSources { - if err = f.reload(s); err != nil { - // In loose mode, we create an empty default section for nonexistent files. - if os.IsNotExist(err) && f.options.Loose { - _ = f.parse(bytes.NewBuffer(nil)) - continue - } - return err - } - if f.options.ShortCircuit { - return nil - } - } - return nil -} - -// Append appends one or more data sources and reloads automatically. -func (f *File) Append(source interface{}, others ...interface{}) error { - ds, err := parseDataSource(source) - if err != nil { - return err - } - f.dataSources = append(f.dataSources, ds) - for _, s := range others { - ds, err = parseDataSource(s) - if err != nil { - return err - } - f.dataSources = append(f.dataSources, ds) - } - return f.Reload() -} - -func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) { - equalSign := DefaultFormatLeft + f.options.KeyValueDelimiterOnWrite + DefaultFormatRight - - if PrettyFormat || PrettyEqual { - equalSign = fmt.Sprintf(" %s ", f.options.KeyValueDelimiterOnWrite) - } - - // Use buffer to make sure target is safe until finish encoding. - buf := bytes.NewBuffer(nil) - lastSectionIdx := len(f.sectionList) - 1 - for i, sname := range f.sectionList { - sec := f.SectionWithIndex(sname, f.sectionIndexes[i]) - if len(sec.Comment) > 0 { - // Support multiline comments - lines := strings.Split(sec.Comment, LineBreak) - for i := range lines { - if lines[i][0] != '#' && lines[i][0] != ';' { - lines[i] = "; " + lines[i] - } else { - lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:]) - } - - if _, err := buf.WriteString(lines[i] + LineBreak); err != nil { - return nil, err - } - } - } - - if i > 0 || DefaultHeader || (i == 0 && strings.ToUpper(sec.name) != DefaultSection) { - if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil { - return nil, err - } - } else { - // Write nothing if default section is empty - if len(sec.keyList) == 0 { - continue - } - } - - isLastSection := i == lastSectionIdx - if sec.isRawSection { - if _, err := buf.WriteString(sec.rawBody); err != nil { - return nil, err - } - - if PrettySection && !isLastSection { - // Put a line between sections - if _, err := buf.WriteString(LineBreak); err != nil { - return nil, err - } - } - continue - } - - // Count and generate alignment length and buffer spaces using the - // longest key. Keys may be modified if they contain certain characters so - // we need to take that into account in our calculation. - alignLength := 0 - if PrettyFormat { - for _, kname := range sec.keyList { - keyLength := len(kname) - // First case will surround key by ` and second by """ - if strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters) { - keyLength += 2 - } else if strings.Contains(kname, "`") { - keyLength += 6 - } - - if keyLength > alignLength { - alignLength = keyLength - } - } - } - alignSpaces := bytes.Repeat([]byte(" "), alignLength) - - KeyList: - for _, kname := range sec.keyList { - key := sec.Key(kname) - if len(key.Comment) > 0 { - if len(indent) > 0 && sname != DefaultSection { - buf.WriteString(indent) - } - - // Support multiline comments - lines := strings.Split(key.Comment, LineBreak) - for i := range lines { - if lines[i][0] != '#' && lines[i][0] != ';' { - lines[i] = "; " + strings.TrimSpace(lines[i]) - } else { - lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:]) - } - - if _, err := buf.WriteString(lines[i] + LineBreak); err != nil { - return nil, err - } - } - } - - if len(indent) > 0 && sname != DefaultSection { - buf.WriteString(indent) - } - - switch { - case key.isAutoIncrement: - kname = "-" - case strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters): - kname = "`" + kname + "`" - case strings.Contains(kname, "`"): - kname = `"""` + kname + `"""` - } - - writeKeyValue := func(val string) (bool, error) { - if _, err := buf.WriteString(kname); err != nil { - return false, err - } - - if key.isBooleanType { - buf.WriteString(LineBreak) - return true, nil - } - - // Write out alignment spaces before "=" sign - if PrettyFormat { - buf.Write(alignSpaces[:alignLength-len(kname)]) - } - - // In case key value contains "\n", "`", "\"", "#" or ";" - if strings.ContainsAny(val, "\n`") { - val = `"""` + val + `"""` - } else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") { - val = "`" + val + "`" - } else if len(strings.TrimSpace(val)) != len(val) { - val = `"` + val + `"` - } - if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil { - return false, err - } - return false, nil - } - - shadows := key.ValueWithShadows() - if len(shadows) == 0 { - if _, err := writeKeyValue(""); err != nil { - return nil, err - } - } - - for _, val := range shadows { - exitLoop, err := writeKeyValue(val) - if err != nil { - return nil, err - } else if exitLoop { - continue KeyList - } - } - - for _, val := range key.nestedValues { - if _, err := buf.WriteString(indent + " " + val + LineBreak); err != nil { - return nil, err - } - } - } - - if PrettySection && !isLastSection { - // Put a line between sections - if _, err := buf.WriteString(LineBreak); err != nil { - return nil, err - } - } - } - - return buf, nil -} - -// WriteToIndent writes content into io.Writer with given indention. -// If PrettyFormat has been set to be true, -// it will align "=" sign with spaces under each section. -func (f *File) WriteToIndent(w io.Writer, indent string) (int64, error) { - buf, err := f.writeToBuffer(indent) - if err != nil { - return 0, err - } - return buf.WriteTo(w) -} - -// WriteTo writes file content into io.Writer. -func (f *File) WriteTo(w io.Writer) (int64, error) { - return f.WriteToIndent(w, "") -} - -// SaveToIndent writes content to file system with given value indention. -func (f *File) SaveToIndent(filename, indent string) error { - // Note: Because we are truncating with os.Create, - // so it's safer to save to a temporary file location and rename after done. - buf, err := f.writeToBuffer(indent) - if err != nil { - return err - } - - return ioutil.WriteFile(filename, buf.Bytes(), 0666) -} - -// SaveTo writes content to file system. -func (f *File) SaveTo(filename string) error { - return f.SaveToIndent(filename, "") -} diff --git a/vendor/gopkg.in/ini.v1/helper.go b/vendor/gopkg.in/ini.v1/helper.go deleted file mode 100644 index f9d80a682..000000000 --- a/vendor/gopkg.in/ini.v1/helper.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2019 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -func inSlice(str string, s []string) bool { - for _, v := range s { - if str == v { - return true - } - } - return false -} diff --git a/vendor/gopkg.in/ini.v1/ini.go b/vendor/gopkg.in/ini.v1/ini.go deleted file mode 100644 index 99e7f8651..000000000 --- a/vendor/gopkg.in/ini.v1/ini.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -// Package ini provides INI file read and write functionality in Go. -package ini - -import ( - "os" - "regexp" - "runtime" - "strings" -) - -const ( - // Maximum allowed depth when recursively substituing variable names. - depthValues = 99 -) - -var ( - // DefaultSection is the name of default section. You can use this var or the string literal. - // In most of cases, an empty string is all you need to access the section. - DefaultSection = "DEFAULT" - - // LineBreak is the delimiter to determine or compose a new line. - // This variable will be changed to "\r\n" automatically on Windows at package init time. - LineBreak = "\n" - - // Variable regexp pattern: %(variable)s - varPattern = regexp.MustCompile(`%\(([^)]+)\)s`) - - // DefaultHeader explicitly writes default section header. - DefaultHeader = false - - // PrettySection indicates whether to put a line between sections. - PrettySection = true - // PrettyFormat indicates whether to align "=" sign with spaces to produce pretty output - // or reduce all possible spaces for compact format. - PrettyFormat = true - // PrettyEqual places spaces around "=" sign even when PrettyFormat is false. - PrettyEqual = false - // DefaultFormatLeft places custom spaces on the left when PrettyFormat and PrettyEqual are both disabled. - DefaultFormatLeft = "" - // DefaultFormatRight places custom spaces on the right when PrettyFormat and PrettyEqual are both disabled. - DefaultFormatRight = "" -) - -var inTest = len(os.Args) > 0 && strings.HasSuffix(strings.TrimSuffix(os.Args[0], ".exe"), ".test") - -func init() { - if runtime.GOOS == "windows" && !inTest { - LineBreak = "\r\n" - } -} - -// LoadOptions contains all customized options used for load data source(s). -type LoadOptions struct { - // Loose indicates whether the parser should ignore nonexistent files or return error. - Loose bool - // Insensitive indicates whether the parser forces all section and key names to lowercase. - Insensitive bool - // InsensitiveSections indicates whether the parser forces all section to lowercase. - InsensitiveSections bool - // InsensitiveKeys indicates whether the parser forces all key names to lowercase. - InsensitiveKeys bool - // IgnoreContinuation indicates whether to ignore continuation lines while parsing. - IgnoreContinuation bool - // IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value. - IgnoreInlineComment bool - // SkipUnrecognizableLines indicates whether to skip unrecognizable lines that do not conform to key/value pairs. - SkipUnrecognizableLines bool - // ShortCircuit indicates whether to ignore other configuration sources after loaded the first available configuration source. - ShortCircuit bool - // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing. - // This type of keys are mostly used in my.cnf. - AllowBooleanKeys bool - // AllowShadows indicates whether to keep track of keys with same name under same section. - AllowShadows bool - // AllowNestedValues indicates whether to allow AWS-like nested values. - // Docs: http://docs.aws.amazon.com/cli/latest/topic/config-vars.html#nested-values - AllowNestedValues bool - // AllowPythonMultilineValues indicates whether to allow Python-like multi-line values. - // Docs: https://docs.python.org/3/library/configparser.html#supported-ini-file-structure - // Relevant quote: Values can also span multiple lines, as long as they are indented deeper - // than the first line of the value. - AllowPythonMultilineValues bool - // SpaceBeforeInlineComment indicates whether to allow comment symbols (\# and \;) inside value. - // Docs: https://docs.python.org/2/library/configparser.html - // Quote: Comments may appear on their own in an otherwise empty line, or may be entered in lines holding values or section names. - // In the latter case, they need to be preceded by a whitespace character to be recognized as a comment. - SpaceBeforeInlineComment bool - // UnescapeValueDoubleQuotes indicates whether to unescape double quotes inside value to regular format - // when value is surrounded by double quotes, e.g. key="a \"value\"" => key=a "value" - UnescapeValueDoubleQuotes bool - // UnescapeValueCommentSymbols indicates to unescape comment symbols (\# and \;) inside value to regular format - // when value is NOT surrounded by any quotes. - // Note: UNSTABLE, behavior might change to only unescape inside double quotes but may noy necessary at all. - UnescapeValueCommentSymbols bool - // UnparseableSections stores a list of blocks that are allowed with raw content which do not otherwise - // conform to key/value pairs. Specify the names of those blocks here. - UnparseableSections []string - // KeyValueDelimiters is the sequence of delimiters that are used to separate key and value. By default, it is "=:". - KeyValueDelimiters string - // KeyValueDelimiterOnWrite is the delimiter that are used to separate key and value output. By default, it is "=". - KeyValueDelimiterOnWrite string - // ChildSectionDelimiter is the delimiter that is used to separate child sections. By default, it is ".". - ChildSectionDelimiter string - // PreserveSurroundedQuote indicates whether to preserve surrounded quote (single and double quotes). - PreserveSurroundedQuote bool - // DebugFunc is called to collect debug information (currently only useful to debug parsing Python-style multiline values). - DebugFunc DebugFunc - // ReaderBufferSize is the buffer size of the reader in bytes. - ReaderBufferSize int - // AllowNonUniqueSections indicates whether to allow sections with the same name multiple times. - AllowNonUniqueSections bool - // AllowDuplicateShadowValues indicates whether values for shadowed keys should be deduplicated. - AllowDuplicateShadowValues bool -} - -// DebugFunc is the type of function called to log parse events. -type DebugFunc func(message string) - -// LoadSources allows caller to apply customized options for loading from data source(s). -func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) { - sources := make([]dataSource, len(others)+1) - sources[0], err = parseDataSource(source) - if err != nil { - return nil, err - } - for i := range others { - sources[i+1], err = parseDataSource(others[i]) - if err != nil { - return nil, err - } - } - f := newFile(sources, opts) - if err = f.Reload(); err != nil { - return nil, err - } - return f, nil -} - -// Load loads and parses from INI data sources. -// Arguments can be mixed of file name with string type, or raw data in []byte. -// It will return error if list contains nonexistent files. -func Load(source interface{}, others ...interface{}) (*File, error) { - return LoadSources(LoadOptions{}, source, others...) -} - -// LooseLoad has exactly same functionality as Load function -// except it ignores nonexistent files instead of returning error. -func LooseLoad(source interface{}, others ...interface{}) (*File, error) { - return LoadSources(LoadOptions{Loose: true}, source, others...) -} - -// InsensitiveLoad has exactly same functionality as Load function -// except it forces all section and key names to be lowercased. -func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) { - return LoadSources(LoadOptions{Insensitive: true}, source, others...) -} - -// ShadowLoad has exactly same functionality as Load function -// except it allows have shadow keys. -func ShadowLoad(source interface{}, others ...interface{}) (*File, error) { - return LoadSources(LoadOptions{AllowShadows: true}, source, others...) -} diff --git a/vendor/gopkg.in/ini.v1/key.go b/vendor/gopkg.in/ini.v1/key.go deleted file mode 100644 index a19d9f38e..000000000 --- a/vendor/gopkg.in/ini.v1/key.go +++ /dev/null @@ -1,837 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bytes" - "errors" - "fmt" - "strconv" - "strings" - "time" -) - -// Key represents a key under a section. -type Key struct { - s *Section - Comment string - name string - value string - isAutoIncrement bool - isBooleanType bool - - isShadow bool - shadows []*Key - - nestedValues []string -} - -// newKey simply return a key object with given values. -func newKey(s *Section, name, val string) *Key { - return &Key{ - s: s, - name: name, - value: val, - } -} - -func (k *Key) addShadow(val string) error { - if k.isShadow { - return errors.New("cannot add shadow to another shadow key") - } else if k.isAutoIncrement || k.isBooleanType { - return errors.New("cannot add shadow to auto-increment or boolean key") - } - - if !k.s.f.options.AllowDuplicateShadowValues { - // Deduplicate shadows based on their values. - if k.value == val { - return nil - } - for i := range k.shadows { - if k.shadows[i].value == val { - return nil - } - } - } - - shadow := newKey(k.s, k.name, val) - shadow.isShadow = true - k.shadows = append(k.shadows, shadow) - return nil -} - -// AddShadow adds a new shadow key to itself. -func (k *Key) AddShadow(val string) error { - if !k.s.f.options.AllowShadows { - return errors.New("shadow key is not allowed") - } - return k.addShadow(val) -} - -func (k *Key) addNestedValue(val string) error { - if k.isAutoIncrement || k.isBooleanType { - return errors.New("cannot add nested value to auto-increment or boolean key") - } - - k.nestedValues = append(k.nestedValues, val) - return nil -} - -// AddNestedValue adds a nested value to the key. -func (k *Key) AddNestedValue(val string) error { - if !k.s.f.options.AllowNestedValues { - return errors.New("nested value is not allowed") - } - return k.addNestedValue(val) -} - -// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv -type ValueMapper func(string) string - -// Name returns name of key. -func (k *Key) Name() string { - return k.name -} - -// Value returns raw value of key for performance purpose. -func (k *Key) Value() string { - return k.value -} - -// ValueWithShadows returns raw values of key and its shadows if any. Shadow -// keys with empty values are ignored from the returned list. -func (k *Key) ValueWithShadows() []string { - if len(k.shadows) == 0 { - if k.value == "" { - return []string{} - } - return []string{k.value} - } - - vals := make([]string, 0, len(k.shadows)+1) - if k.value != "" { - vals = append(vals, k.value) - } - for _, s := range k.shadows { - if s.value != "" { - vals = append(vals, s.value) - } - } - return vals -} - -// NestedValues returns nested values stored in the key. -// It is possible returned value is nil if no nested values stored in the key. -func (k *Key) NestedValues() []string { - return k.nestedValues -} - -// transformValue takes a raw value and transforms to its final string. -func (k *Key) transformValue(val string) string { - if k.s.f.ValueMapper != nil { - val = k.s.f.ValueMapper(val) - } - - // Fail-fast if no indicate char found for recursive value - if !strings.Contains(val, "%") { - return val - } - for i := 0; i < depthValues; i++ { - vr := varPattern.FindString(val) - if len(vr) == 0 { - break - } - - // Take off leading '%(' and trailing ')s'. - noption := vr[2 : len(vr)-2] - - // Search in the same section. - // If not found or found the key itself, then search again in default section. - nk, err := k.s.GetKey(noption) - if err != nil || k == nk { - nk, _ = k.s.f.Section("").GetKey(noption) - if nk == nil { - // Stop when no results found in the default section, - // and returns the value as-is. - break - } - } - - // Substitute by new value and take off leading '%(' and trailing ')s'. - val = strings.Replace(val, vr, nk.value, -1) - } - return val -} - -// String returns string representation of value. -func (k *Key) String() string { - return k.transformValue(k.value) -} - -// Validate accepts a validate function which can -// return modifed result as key value. -func (k *Key) Validate(fn func(string) string) string { - return fn(k.String()) -} - -// parseBool returns the boolean value represented by the string. -// -// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On, -// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off. -// Any other value returns an error. -func parseBool(str string) (value bool, err error) { - switch str { - case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On": - return true, nil - case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off": - return false, nil - } - return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) -} - -// Bool returns bool type value. -func (k *Key) Bool() (bool, error) { - return parseBool(k.String()) -} - -// Float64 returns float64 type value. -func (k *Key) Float64() (float64, error) { - return strconv.ParseFloat(k.String(), 64) -} - -// Int returns int type value. -func (k *Key) Int() (int, error) { - v, err := strconv.ParseInt(k.String(), 0, 64) - return int(v), err -} - -// Int64 returns int64 type value. -func (k *Key) Int64() (int64, error) { - return strconv.ParseInt(k.String(), 0, 64) -} - -// Uint returns uint type valued. -func (k *Key) Uint() (uint, error) { - u, e := strconv.ParseUint(k.String(), 0, 64) - return uint(u), e -} - -// Uint64 returns uint64 type value. -func (k *Key) Uint64() (uint64, error) { - return strconv.ParseUint(k.String(), 0, 64) -} - -// Duration returns time.Duration type value. -func (k *Key) Duration() (time.Duration, error) { - return time.ParseDuration(k.String()) -} - -// TimeFormat parses with given format and returns time.Time type value. -func (k *Key) TimeFormat(format string) (time.Time, error) { - return time.Parse(format, k.String()) -} - -// Time parses with RFC3339 format and returns time.Time type value. -func (k *Key) Time() (time.Time, error) { - return k.TimeFormat(time.RFC3339) -} - -// MustString returns default value if key value is empty. -func (k *Key) MustString(defaultVal string) string { - val := k.String() - if len(val) == 0 { - k.value = defaultVal - return defaultVal - } - return val -} - -// MustBool always returns value without error, -// it returns false if error occurs. -func (k *Key) MustBool(defaultVal ...bool) bool { - val, err := k.Bool() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatBool(defaultVal[0]) - return defaultVal[0] - } - return val -} - -// MustFloat64 always returns value without error, -// it returns 0.0 if error occurs. -func (k *Key) MustFloat64(defaultVal ...float64) float64 { - val, err := k.Float64() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64) - return defaultVal[0] - } - return val -} - -// MustInt always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustInt(defaultVal ...int) int { - val, err := k.Int() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatInt(int64(defaultVal[0]), 10) - return defaultVal[0] - } - return val -} - -// MustInt64 always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustInt64(defaultVal ...int64) int64 { - val, err := k.Int64() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatInt(defaultVal[0], 10) - return defaultVal[0] - } - return val -} - -// MustUint always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustUint(defaultVal ...uint) uint { - val, err := k.Uint() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatUint(uint64(defaultVal[0]), 10) - return defaultVal[0] - } - return val -} - -// MustUint64 always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustUint64(defaultVal ...uint64) uint64 { - val, err := k.Uint64() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatUint(defaultVal[0], 10) - return defaultVal[0] - } - return val -} - -// MustDuration always returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { - val, err := k.Duration() - if len(defaultVal) > 0 && err != nil { - k.value = defaultVal[0].String() - return defaultVal[0] - } - return val -} - -// MustTimeFormat always parses with given format and returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { - val, err := k.TimeFormat(format) - if len(defaultVal) > 0 && err != nil { - k.value = defaultVal[0].Format(format) - return defaultVal[0] - } - return val -} - -// MustTime always parses with RFC3339 format and returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustTime(defaultVal ...time.Time) time.Time { - return k.MustTimeFormat(time.RFC3339, defaultVal...) -} - -// In always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) In(defaultVal string, candidates []string) string { - val := k.String() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InFloat64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { - val := k.MustFloat64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InInt always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InInt(defaultVal int, candidates []int) int { - val := k.MustInt() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InInt64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { - val := k.MustInt64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InUint always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InUint(defaultVal uint, candidates []uint) uint { - val := k.MustUint() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InUint64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { - val := k.MustUint64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InTimeFormat always parses with given format and returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { - val := k.MustTimeFormat(format) - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InTime always parses with RFC3339 format and returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { - return k.InTimeFormat(time.RFC3339, defaultVal, candidates) -} - -// RangeFloat64 checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { - val := k.MustFloat64() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeInt checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeInt(defaultVal, min, max int) int { - val := k.MustInt() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeInt64 checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { - val := k.MustInt64() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeTimeFormat checks if value with given format is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { - val := k.MustTimeFormat(format) - if val.Unix() < min.Unix() || val.Unix() > max.Unix() { - return defaultVal - } - return val -} - -// RangeTime checks if value with RFC3339 format is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { - return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) -} - -// Strings returns list of string divided by given delimiter. -func (k *Key) Strings(delim string) []string { - str := k.String() - if len(str) == 0 { - return []string{} - } - - runes := []rune(str) - vals := make([]string, 0, 2) - var buf bytes.Buffer - escape := false - idx := 0 - for { - if escape { - escape = false - if runes[idx] != '\\' && !strings.HasPrefix(string(runes[idx:]), delim) { - buf.WriteRune('\\') - } - buf.WriteRune(runes[idx]) - } else { - if runes[idx] == '\\' { - escape = true - } else if strings.HasPrefix(string(runes[idx:]), delim) { - idx += len(delim) - 1 - vals = append(vals, strings.TrimSpace(buf.String())) - buf.Reset() - } else { - buf.WriteRune(runes[idx]) - } - } - idx++ - if idx == len(runes) { - break - } - } - - if buf.Len() > 0 { - vals = append(vals, strings.TrimSpace(buf.String())) - } - - return vals -} - -// StringsWithShadows returns list of string divided by given delimiter. -// Shadows will also be appended if any. -func (k *Key) StringsWithShadows(delim string) []string { - vals := k.ValueWithShadows() - results := make([]string, 0, len(vals)*2) - for i := range vals { - if len(vals) == 0 { - continue - } - - results = append(results, strings.Split(vals[i], delim)...) - } - - for i := range results { - results[i] = k.transformValue(strings.TrimSpace(results[i])) - } - return results -} - -// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Float64s(delim string) []float64 { - vals, _ := k.parseFloat64s(k.Strings(delim), true, false) - return vals -} - -// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Ints(delim string) []int { - vals, _ := k.parseInts(k.Strings(delim), true, false) - return vals -} - -// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Int64s(delim string) []int64 { - vals, _ := k.parseInt64s(k.Strings(delim), true, false) - return vals -} - -// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Uints(delim string) []uint { - vals, _ := k.parseUints(k.Strings(delim), true, false) - return vals -} - -// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Uint64s(delim string) []uint64 { - vals, _ := k.parseUint64s(k.Strings(delim), true, false) - return vals -} - -// Bools returns list of bool divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Bools(delim string) []bool { - vals, _ := k.parseBools(k.Strings(delim), true, false) - return vals -} - -// TimesFormat parses with given format and returns list of time.Time divided by given delimiter. -// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). -func (k *Key) TimesFormat(format, delim string) []time.Time { - vals, _ := k.parseTimesFormat(format, k.Strings(delim), true, false) - return vals -} - -// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter. -// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). -func (k *Key) Times(delim string) []time.Time { - return k.TimesFormat(time.RFC3339, delim) -} - -// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then -// it will not be included to result list. -func (k *Key) ValidFloat64s(delim string) []float64 { - vals, _ := k.parseFloat64s(k.Strings(delim), false, false) - return vals -} - -// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will -// not be included to result list. -func (k *Key) ValidInts(delim string) []int { - vals, _ := k.parseInts(k.Strings(delim), false, false) - return vals -} - -// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer, -// then it will not be included to result list. -func (k *Key) ValidInt64s(delim string) []int64 { - vals, _ := k.parseInt64s(k.Strings(delim), false, false) - return vals -} - -// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer, -// then it will not be included to result list. -func (k *Key) ValidUints(delim string) []uint { - vals, _ := k.parseUints(k.Strings(delim), false, false) - return vals -} - -// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned -// integer, then it will not be included to result list. -func (k *Key) ValidUint64s(delim string) []uint64 { - vals, _ := k.parseUint64s(k.Strings(delim), false, false) - return vals -} - -// ValidBools returns list of bool divided by given delimiter. If some value is not 64-bit unsigned -// integer, then it will not be included to result list. -func (k *Key) ValidBools(delim string) []bool { - vals, _ := k.parseBools(k.Strings(delim), false, false) - return vals -} - -// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter. -func (k *Key) ValidTimesFormat(format, delim string) []time.Time { - vals, _ := k.parseTimesFormat(format, k.Strings(delim), false, false) - return vals -} - -// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter. -func (k *Key) ValidTimes(delim string) []time.Time { - return k.ValidTimesFormat(time.RFC3339, delim) -} - -// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input. -func (k *Key) StrictFloat64s(delim string) ([]float64, error) { - return k.parseFloat64s(k.Strings(delim), false, true) -} - -// StrictInts returns list of int divided by given delimiter or error on first invalid input. -func (k *Key) StrictInts(delim string) ([]int, error) { - return k.parseInts(k.Strings(delim), false, true) -} - -// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input. -func (k *Key) StrictInt64s(delim string) ([]int64, error) { - return k.parseInt64s(k.Strings(delim), false, true) -} - -// StrictUints returns list of uint divided by given delimiter or error on first invalid input. -func (k *Key) StrictUints(delim string) ([]uint, error) { - return k.parseUints(k.Strings(delim), false, true) -} - -// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input. -func (k *Key) StrictUint64s(delim string) ([]uint64, error) { - return k.parseUint64s(k.Strings(delim), false, true) -} - -// StrictBools returns list of bool divided by given delimiter or error on first invalid input. -func (k *Key) StrictBools(delim string) ([]bool, error) { - return k.parseBools(k.Strings(delim), false, true) -} - -// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter -// or error on first invalid input. -func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) { - return k.parseTimesFormat(format, k.Strings(delim), false, true) -} - -// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter -// or error on first invalid input. -func (k *Key) StrictTimes(delim string) ([]time.Time, error) { - return k.StrictTimesFormat(time.RFC3339, delim) -} - -// parseBools transforms strings to bools. -func (k *Key) parseBools(strs []string, addInvalid, returnOnInvalid bool) ([]bool, error) { - vals := make([]bool, 0, len(strs)) - parser := func(str string) (interface{}, error) { - val, err := parseBool(str) - return val, err - } - rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) - if err == nil { - for _, val := range rawVals { - vals = append(vals, val.(bool)) - } - } - return vals, err -} - -// parseFloat64s transforms strings to float64s. -func (k *Key) parseFloat64s(strs []string, addInvalid, returnOnInvalid bool) ([]float64, error) { - vals := make([]float64, 0, len(strs)) - parser := func(str string) (interface{}, error) { - val, err := strconv.ParseFloat(str, 64) - return val, err - } - rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) - if err == nil { - for _, val := range rawVals { - vals = append(vals, val.(float64)) - } - } - return vals, err -} - -// parseInts transforms strings to ints. -func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) { - vals := make([]int, 0, len(strs)) - parser := func(str string) (interface{}, error) { - val, err := strconv.ParseInt(str, 0, 64) - return val, err - } - rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) - if err == nil { - for _, val := range rawVals { - vals = append(vals, int(val.(int64))) - } - } - return vals, err -} - -// parseInt64s transforms strings to int64s. -func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) { - vals := make([]int64, 0, len(strs)) - parser := func(str string) (interface{}, error) { - val, err := strconv.ParseInt(str, 0, 64) - return val, err - } - - rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) - if err == nil { - for _, val := range rawVals { - vals = append(vals, val.(int64)) - } - } - return vals, err -} - -// parseUints transforms strings to uints. -func (k *Key) parseUints(strs []string, addInvalid, returnOnInvalid bool) ([]uint, error) { - vals := make([]uint, 0, len(strs)) - parser := func(str string) (interface{}, error) { - val, err := strconv.ParseUint(str, 0, 64) - return val, err - } - - rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) - if err == nil { - for _, val := range rawVals { - vals = append(vals, uint(val.(uint64))) - } - } - return vals, err -} - -// parseUint64s transforms strings to uint64s. -func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]uint64, error) { - vals := make([]uint64, 0, len(strs)) - parser := func(str string) (interface{}, error) { - val, err := strconv.ParseUint(str, 0, 64) - return val, err - } - rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) - if err == nil { - for _, val := range rawVals { - vals = append(vals, val.(uint64)) - } - } - return vals, err -} - -type Parser func(str string) (interface{}, error) - -// parseTimesFormat transforms strings to times in given format. -func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) { - vals := make([]time.Time, 0, len(strs)) - parser := func(str string) (interface{}, error) { - val, err := time.Parse(format, str) - return val, err - } - rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) - if err == nil { - for _, val := range rawVals { - vals = append(vals, val.(time.Time)) - } - } - return vals, err -} - -// doParse transforms strings to different types -func (k *Key) doParse(strs []string, addInvalid, returnOnInvalid bool, parser Parser) ([]interface{}, error) { - vals := make([]interface{}, 0, len(strs)) - for _, str := range strs { - val, err := parser(str) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, val) - } - } - return vals, nil -} - -// SetValue changes key value. -func (k *Key) SetValue(v string) { - if k.s.f.BlockMode { - k.s.f.lock.Lock() - defer k.s.f.lock.Unlock() - } - - k.value = v - k.s.keysHash[k.name] = v -} diff --git a/vendor/gopkg.in/ini.v1/parser.go b/vendor/gopkg.in/ini.v1/parser.go deleted file mode 100644 index 44fc526c2..000000000 --- a/vendor/gopkg.in/ini.v1/parser.go +++ /dev/null @@ -1,520 +0,0 @@ -// Copyright 2015 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bufio" - "bytes" - "fmt" - "io" - "regexp" - "strconv" - "strings" - "unicode" -) - -const minReaderBufferSize = 4096 - -var pythonMultiline = regexp.MustCompile(`^([\t\f ]+)(.*)`) - -type parserOptions struct { - IgnoreContinuation bool - IgnoreInlineComment bool - AllowPythonMultilineValues bool - SpaceBeforeInlineComment bool - UnescapeValueDoubleQuotes bool - UnescapeValueCommentSymbols bool - PreserveSurroundedQuote bool - DebugFunc DebugFunc - ReaderBufferSize int -} - -type parser struct { - buf *bufio.Reader - options parserOptions - - isEOF bool - count int - comment *bytes.Buffer -} - -func (p *parser) debug(format string, args ...interface{}) { - if p.options.DebugFunc != nil { - p.options.DebugFunc(fmt.Sprintf(format, args...)) - } -} - -func newParser(r io.Reader, opts parserOptions) *parser { - size := opts.ReaderBufferSize - if size < minReaderBufferSize { - size = minReaderBufferSize - } - - return &parser{ - buf: bufio.NewReaderSize(r, size), - options: opts, - count: 1, - comment: &bytes.Buffer{}, - } -} - -// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format. -// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding -func (p *parser) BOM() error { - mask, err := p.buf.Peek(2) - if err != nil && err != io.EOF { - return err - } else if len(mask) < 2 { - return nil - } - - switch { - case mask[0] == 254 && mask[1] == 255: - fallthrough - case mask[0] == 255 && mask[1] == 254: - _, err = p.buf.Read(mask) - if err != nil { - return err - } - case mask[0] == 239 && mask[1] == 187: - mask, err := p.buf.Peek(3) - if err != nil && err != io.EOF { - return err - } else if len(mask) < 3 { - return nil - } - if mask[2] == 191 { - _, err = p.buf.Read(mask) - if err != nil { - return err - } - } - } - return nil -} - -func (p *parser) readUntil(delim byte) ([]byte, error) { - data, err := p.buf.ReadBytes(delim) - if err != nil { - if err == io.EOF { - p.isEOF = true - } else { - return nil, err - } - } - return data, nil -} - -func cleanComment(in []byte) ([]byte, bool) { - i := bytes.IndexAny(in, "#;") - if i == -1 { - return nil, false - } - return in[i:], true -} - -func readKeyName(delimiters string, in []byte) (string, int, error) { - line := string(in) - - // Check if key name surrounded by quotes. - var keyQuote string - if line[0] == '"' { - if len(line) > 6 && line[0:3] == `"""` { - keyQuote = `"""` - } else { - keyQuote = `"` - } - } else if line[0] == '`' { - keyQuote = "`" - } - - // Get out key name - var endIdx int - if len(keyQuote) > 0 { - startIdx := len(keyQuote) - // FIXME: fail case -> """"""name"""=value - pos := strings.Index(line[startIdx:], keyQuote) - if pos == -1 { - return "", -1, fmt.Errorf("missing closing key quote: %s", line) - } - pos += startIdx - - // Find key-value delimiter - i := strings.IndexAny(line[pos+startIdx:], delimiters) - if i < 0 { - return "", -1, ErrDelimiterNotFound{line} - } - endIdx = pos + i - return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil - } - - endIdx = strings.IndexAny(line, delimiters) - if endIdx < 0 { - return "", -1, ErrDelimiterNotFound{line} - } - if endIdx == 0 { - return "", -1, ErrEmptyKeyName{line} - } - - return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil -} - -func (p *parser) readMultilines(line, val, valQuote string) (string, error) { - for { - data, err := p.readUntil('\n') - if err != nil { - return "", err - } - next := string(data) - - pos := strings.LastIndex(next, valQuote) - if pos > -1 { - val += next[:pos] - - comment, has := cleanComment([]byte(next[pos:])) - if has { - p.comment.Write(bytes.TrimSpace(comment)) - } - break - } - val += next - if p.isEOF { - return "", fmt.Errorf("missing closing key quote from %q to %q", line, next) - } - } - return val, nil -} - -func (p *parser) readContinuationLines(val string) (string, error) { - for { - data, err := p.readUntil('\n') - if err != nil { - return "", err - } - next := strings.TrimSpace(string(data)) - - if len(next) == 0 { - break - } - val += next - if val[len(val)-1] != '\\' { - break - } - val = val[:len(val)-1] - } - return val, nil -} - -// hasSurroundedQuote check if and only if the first and last characters -// are quotes \" or \'. -// It returns false if any other parts also contain same kind of quotes. -func hasSurroundedQuote(in string, quote byte) bool { - return len(in) >= 2 && in[0] == quote && in[len(in)-1] == quote && - strings.IndexByte(in[1:], quote) == len(in)-2 -} - -func (p *parser) readValue(in []byte, bufferSize int) (string, error) { - - line := strings.TrimLeftFunc(string(in), unicode.IsSpace) - if len(line) == 0 { - if p.options.AllowPythonMultilineValues && len(in) > 0 && in[len(in)-1] == '\n' { - return p.readPythonMultilines(line, bufferSize) - } - return "", nil - } - - var valQuote string - if len(line) > 3 && line[0:3] == `"""` { - valQuote = `"""` - } else if line[0] == '`' { - valQuote = "`" - } else if p.options.UnescapeValueDoubleQuotes && line[0] == '"' { - valQuote = `"` - } - - if len(valQuote) > 0 { - startIdx := len(valQuote) - pos := strings.LastIndex(line[startIdx:], valQuote) - // Check for multi-line value - if pos == -1 { - return p.readMultilines(line, line[startIdx:], valQuote) - } - - if p.options.UnescapeValueDoubleQuotes && valQuote == `"` { - return strings.Replace(line[startIdx:pos+startIdx], `\"`, `"`, -1), nil - } - return line[startIdx : pos+startIdx], nil - } - - lastChar := line[len(line)-1] - // Won't be able to reach here if value only contains whitespace - line = strings.TrimSpace(line) - trimmedLastChar := line[len(line)-1] - - // Check continuation lines when desired - if !p.options.IgnoreContinuation && trimmedLastChar == '\\' { - return p.readContinuationLines(line[:len(line)-1]) - } - - // Check if ignore inline comment - if !p.options.IgnoreInlineComment { - var i int - if p.options.SpaceBeforeInlineComment { - i = strings.Index(line, " #") - if i == -1 { - i = strings.Index(line, " ;") - } - - } else { - i = strings.IndexAny(line, "#;") - } - - if i > -1 { - p.comment.WriteString(line[i:]) - line = strings.TrimSpace(line[:i]) - } - - } - - // Trim single and double quotes - if (hasSurroundedQuote(line, '\'') || - hasSurroundedQuote(line, '"')) && !p.options.PreserveSurroundedQuote { - line = line[1 : len(line)-1] - } else if len(valQuote) == 0 && p.options.UnescapeValueCommentSymbols { - line = strings.ReplaceAll(line, `\;`, ";") - line = strings.ReplaceAll(line, `\#`, "#") - } else if p.options.AllowPythonMultilineValues && lastChar == '\n' { - return p.readPythonMultilines(line, bufferSize) - } - - return line, nil -} - -func (p *parser) readPythonMultilines(line string, bufferSize int) (string, error) { - parserBufferPeekResult, _ := p.buf.Peek(bufferSize) - peekBuffer := bytes.NewBuffer(parserBufferPeekResult) - - for { - peekData, peekErr := peekBuffer.ReadBytes('\n') - if peekErr != nil && peekErr != io.EOF { - p.debug("readPythonMultilines: failed to peek with error: %v", peekErr) - return "", peekErr - } - - p.debug("readPythonMultilines: parsing %q", string(peekData)) - - peekMatches := pythonMultiline.FindStringSubmatch(string(peekData)) - p.debug("readPythonMultilines: matched %d parts", len(peekMatches)) - for n, v := range peekMatches { - p.debug(" %d: %q", n, v) - } - - // Return if not a Python multiline value. - if len(peekMatches) != 3 { - p.debug("readPythonMultilines: end of value, got: %q", line) - return line, nil - } - - // Advance the parser reader (buffer) in-sync with the peek buffer. - _, err := p.buf.Discard(len(peekData)) - if err != nil { - p.debug("readPythonMultilines: failed to skip to the end, returning error") - return "", err - } - - line += "\n" + peekMatches[0] - } -} - -// parse parses data through an io.Reader. -func (f *File) parse(reader io.Reader) (err error) { - p := newParser(reader, parserOptions{ - IgnoreContinuation: f.options.IgnoreContinuation, - IgnoreInlineComment: f.options.IgnoreInlineComment, - AllowPythonMultilineValues: f.options.AllowPythonMultilineValues, - SpaceBeforeInlineComment: f.options.SpaceBeforeInlineComment, - UnescapeValueDoubleQuotes: f.options.UnescapeValueDoubleQuotes, - UnescapeValueCommentSymbols: f.options.UnescapeValueCommentSymbols, - PreserveSurroundedQuote: f.options.PreserveSurroundedQuote, - DebugFunc: f.options.DebugFunc, - ReaderBufferSize: f.options.ReaderBufferSize, - }) - if err = p.BOM(); err != nil { - return fmt.Errorf("BOM: %v", err) - } - - // Ignore error because default section name is never empty string. - name := DefaultSection - if f.options.Insensitive || f.options.InsensitiveSections { - name = strings.ToLower(DefaultSection) - } - section, _ := f.NewSection(name) - - // This "last" is not strictly equivalent to "previous one" if current key is not the first nested key - var isLastValueEmpty bool - var lastRegularKey *Key - - var line []byte - var inUnparseableSection bool - - // NOTE: Iterate and increase `currentPeekSize` until - // the size of the parser buffer is found. - // TODO(unknwon): When Golang 1.10 is the lowest version supported, replace with `parserBufferSize := p.buf.Size()`. - parserBufferSize := 0 - // NOTE: Peek 4kb at a time. - currentPeekSize := minReaderBufferSize - - if f.options.AllowPythonMultilineValues { - for { - peekBytes, _ := p.buf.Peek(currentPeekSize) - peekBytesLength := len(peekBytes) - - if parserBufferSize >= peekBytesLength { - break - } - - currentPeekSize *= 2 - parserBufferSize = peekBytesLength - } - } - - for !p.isEOF { - line, err = p.readUntil('\n') - if err != nil { - return err - } - - if f.options.AllowNestedValues && - isLastValueEmpty && len(line) > 0 { - if line[0] == ' ' || line[0] == '\t' { - err = lastRegularKey.addNestedValue(string(bytes.TrimSpace(line))) - if err != nil { - return err - } - continue - } - } - - line = bytes.TrimLeftFunc(line, unicode.IsSpace) - if len(line) == 0 { - continue - } - - // Comments - if line[0] == '#' || line[0] == ';' { - // Note: we do not care ending line break, - // it is needed for adding second line, - // so just clean it once at the end when set to value. - p.comment.Write(line) - continue - } - - // Section - if line[0] == '[' { - // Read to the next ']' (TODO: support quoted strings) - closeIdx := bytes.LastIndexByte(line, ']') - if closeIdx == -1 { - return fmt.Errorf("unclosed section: %s", line) - } - - name := string(line[1:closeIdx]) - section, err = f.NewSection(name) - if err != nil { - return err - } - - comment, has := cleanComment(line[closeIdx+1:]) - if has { - p.comment.Write(comment) - } - - section.Comment = strings.TrimSpace(p.comment.String()) - - // Reset auto-counter and comments - p.comment.Reset() - p.count = 1 - // Nested values can't span sections - isLastValueEmpty = false - - inUnparseableSection = false - for i := range f.options.UnparseableSections { - if f.options.UnparseableSections[i] == name || - ((f.options.Insensitive || f.options.InsensitiveSections) && strings.EqualFold(f.options.UnparseableSections[i], name)) { - inUnparseableSection = true - continue - } - } - continue - } - - if inUnparseableSection { - section.isRawSection = true - section.rawBody += string(line) - continue - } - - kname, offset, err := readKeyName(f.options.KeyValueDelimiters, line) - if err != nil { - switch { - // Treat as boolean key when desired, and whole line is key name. - case IsErrDelimiterNotFound(err): - switch { - case f.options.AllowBooleanKeys: - kname, err := p.readValue(line, parserBufferSize) - if err != nil { - return err - } - key, err := section.NewBooleanKey(kname) - if err != nil { - return err - } - key.Comment = strings.TrimSpace(p.comment.String()) - p.comment.Reset() - continue - - case f.options.SkipUnrecognizableLines: - continue - } - case IsErrEmptyKeyName(err) && f.options.SkipUnrecognizableLines: - continue - } - return err - } - - // Auto increment. - isAutoIncr := false - if kname == "-" { - isAutoIncr = true - kname = "#" + strconv.Itoa(p.count) - p.count++ - } - - value, err := p.readValue(line[offset:], parserBufferSize) - if err != nil { - return err - } - isLastValueEmpty = len(value) == 0 - - key, err := section.NewKey(kname, value) - if err != nil { - return err - } - key.isAutoIncrement = isAutoIncr - key.Comment = strings.TrimSpace(p.comment.String()) - p.comment.Reset() - lastRegularKey = key - } - return nil -} diff --git a/vendor/gopkg.in/ini.v1/section.go b/vendor/gopkg.in/ini.v1/section.go deleted file mode 100644 index a3615d820..000000000 --- a/vendor/gopkg.in/ini.v1/section.go +++ /dev/null @@ -1,256 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "errors" - "fmt" - "strings" -) - -// Section represents a config section. -type Section struct { - f *File - Comment string - name string - keys map[string]*Key - keyList []string - keysHash map[string]string - - isRawSection bool - rawBody string -} - -func newSection(f *File, name string) *Section { - return &Section{ - f: f, - name: name, - keys: make(map[string]*Key), - keyList: make([]string, 0, 10), - keysHash: make(map[string]string), - } -} - -// Name returns name of Section. -func (s *Section) Name() string { - return s.name -} - -// Body returns rawBody of Section if the section was marked as unparseable. -// It still follows the other rules of the INI format surrounding leading/trailing whitespace. -func (s *Section) Body() string { - return strings.TrimSpace(s.rawBody) -} - -// SetBody updates body content only if section is raw. -func (s *Section) SetBody(body string) { - if !s.isRawSection { - return - } - s.rawBody = body -} - -// NewKey creates a new key to given section. -func (s *Section) NewKey(name, val string) (*Key, error) { - if len(name) == 0 { - return nil, errors.New("error creating new key: empty key name") - } else if s.f.options.Insensitive || s.f.options.InsensitiveKeys { - name = strings.ToLower(name) - } - - if s.f.BlockMode { - s.f.lock.Lock() - defer s.f.lock.Unlock() - } - - if inSlice(name, s.keyList) { - if s.f.options.AllowShadows { - if err := s.keys[name].addShadow(val); err != nil { - return nil, err - } - } else { - s.keys[name].value = val - s.keysHash[name] = val - } - return s.keys[name], nil - } - - s.keyList = append(s.keyList, name) - s.keys[name] = newKey(s, name, val) - s.keysHash[name] = val - return s.keys[name], nil -} - -// NewBooleanKey creates a new boolean type key to given section. -func (s *Section) NewBooleanKey(name string) (*Key, error) { - key, err := s.NewKey(name, "true") - if err != nil { - return nil, err - } - - key.isBooleanType = true - return key, nil -} - -// GetKey returns key in section by given name. -func (s *Section) GetKey(name string) (*Key, error) { - if s.f.BlockMode { - s.f.lock.RLock() - } - if s.f.options.Insensitive || s.f.options.InsensitiveKeys { - name = strings.ToLower(name) - } - key := s.keys[name] - if s.f.BlockMode { - s.f.lock.RUnlock() - } - - if key == nil { - // Check if it is a child-section. - sname := s.name - for { - if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 { - sname = sname[:i] - sec, err := s.f.GetSection(sname) - if err != nil { - continue - } - return sec.GetKey(name) - } - break - } - return nil, fmt.Errorf("error when getting key of section %q: key %q not exists", s.name, name) - } - return key, nil -} - -// HasKey returns true if section contains a key with given name. -func (s *Section) HasKey(name string) bool { - key, _ := s.GetKey(name) - return key != nil -} - -// Deprecated: Use "HasKey" instead. -func (s *Section) Haskey(name string) bool { - return s.HasKey(name) -} - -// HasValue returns true if section contains given raw value. -func (s *Section) HasValue(value string) bool { - if s.f.BlockMode { - s.f.lock.RLock() - defer s.f.lock.RUnlock() - } - - for _, k := range s.keys { - if value == k.value { - return true - } - } - return false -} - -// Key assumes named Key exists in section and returns a zero-value when not. -func (s *Section) Key(name string) *Key { - key, err := s.GetKey(name) - if err != nil { - // It's OK here because the only possible error is empty key name, - // but if it's empty, this piece of code won't be executed. - key, _ = s.NewKey(name, "") - return key - } - return key -} - -// Keys returns list of keys of section. -func (s *Section) Keys() []*Key { - keys := make([]*Key, len(s.keyList)) - for i := range s.keyList { - keys[i] = s.Key(s.keyList[i]) - } - return keys -} - -// ParentKeys returns list of keys of parent section. -func (s *Section) ParentKeys() []*Key { - var parentKeys []*Key - sname := s.name - for { - if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 { - sname = sname[:i] - sec, err := s.f.GetSection(sname) - if err != nil { - continue - } - parentKeys = append(parentKeys, sec.Keys()...) - } else { - break - } - - } - return parentKeys -} - -// KeyStrings returns list of key names of section. -func (s *Section) KeyStrings() []string { - list := make([]string, len(s.keyList)) - copy(list, s.keyList) - return list -} - -// KeysHash returns keys hash consisting of names and values. -func (s *Section) KeysHash() map[string]string { - if s.f.BlockMode { - s.f.lock.RLock() - defer s.f.lock.RUnlock() - } - - hash := make(map[string]string, len(s.keysHash)) - for key, value := range s.keysHash { - hash[key] = value - } - return hash -} - -// DeleteKey deletes a key from section. -func (s *Section) DeleteKey(name string) { - if s.f.BlockMode { - s.f.lock.Lock() - defer s.f.lock.Unlock() - } - - for i, k := range s.keyList { - if k == name { - s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) - delete(s.keys, name) - delete(s.keysHash, name) - return - } - } -} - -// ChildSections returns a list of child sections of current section. -// For example, "[parent.child1]" and "[parent.child12]" are child sections -// of section "[parent]". -func (s *Section) ChildSections() []*Section { - prefix := s.name + s.f.options.ChildSectionDelimiter - children := make([]*Section, 0, 3) - for _, name := range s.f.sectionList { - if strings.HasPrefix(name, prefix) { - children = append(children, s.f.sections[name]...) - } - } - return children -} diff --git a/vendor/gopkg.in/ini.v1/struct.go b/vendor/gopkg.in/ini.v1/struct.go deleted file mode 100644 index a486b2fe0..000000000 --- a/vendor/gopkg.in/ini.v1/struct.go +++ /dev/null @@ -1,747 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bytes" - "errors" - "fmt" - "reflect" - "strings" - "time" - "unicode" -) - -// NameMapper represents a ini tag name mapper. -type NameMapper func(string) string - -// Built-in name getters. -var ( - // SnackCase converts to format SNACK_CASE. - SnackCase NameMapper = func(raw string) string { - newstr := make([]rune, 0, len(raw)) - for i, chr := range raw { - if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { - if i > 0 { - newstr = append(newstr, '_') - } - } - newstr = append(newstr, unicode.ToUpper(chr)) - } - return string(newstr) - } - // TitleUnderscore converts to format title_underscore. - TitleUnderscore NameMapper = func(raw string) string { - newstr := make([]rune, 0, len(raw)) - for i, chr := range raw { - if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { - if i > 0 { - newstr = append(newstr, '_') - } - chr -= 'A' - 'a' - } - newstr = append(newstr, chr) - } - return string(newstr) - } -) - -func (s *Section) parseFieldName(raw, actual string) string { - if len(actual) > 0 { - return actual - } - if s.f.NameMapper != nil { - return s.f.NameMapper(raw) - } - return raw -} - -func parseDelim(actual string) string { - if len(actual) > 0 { - return actual - } - return "," -} - -var reflectTime = reflect.TypeOf(time.Now()).Kind() - -// setSliceWithProperType sets proper values to slice based on its type. -func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { - var strs []string - if allowShadow { - strs = key.StringsWithShadows(delim) - } else { - strs = key.Strings(delim) - } - - numVals := len(strs) - if numVals == 0 { - return nil - } - - var vals interface{} - var err error - - sliceOf := field.Type().Elem().Kind() - switch sliceOf { - case reflect.String: - vals = strs - case reflect.Int: - vals, err = key.parseInts(strs, true, false) - case reflect.Int64: - vals, err = key.parseInt64s(strs, true, false) - case reflect.Uint: - vals, err = key.parseUints(strs, true, false) - case reflect.Uint64: - vals, err = key.parseUint64s(strs, true, false) - case reflect.Float64: - vals, err = key.parseFloat64s(strs, true, false) - case reflect.Bool: - vals, err = key.parseBools(strs, true, false) - case reflectTime: - vals, err = key.parseTimesFormat(time.RFC3339, strs, true, false) - default: - return fmt.Errorf("unsupported type '[]%s'", sliceOf) - } - if err != nil && isStrict { - return err - } - - slice := reflect.MakeSlice(field.Type(), numVals, numVals) - for i := 0; i < numVals; i++ { - switch sliceOf { - case reflect.String: - slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i])) - case reflect.Int: - slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i])) - case reflect.Int64: - slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i])) - case reflect.Uint: - slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i])) - case reflect.Uint64: - slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i])) - case reflect.Float64: - slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i])) - case reflect.Bool: - slice.Index(i).Set(reflect.ValueOf(vals.([]bool)[i])) - case reflectTime: - slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i])) - } - } - field.Set(slice) - return nil -} - -func wrapStrictError(err error, isStrict bool) error { - if isStrict { - return err - } - return nil -} - -// setWithProperType sets proper value to field based on its type, -// but it does not return error for failing parsing, -// because we want to use default value that is already assigned to struct. -func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { - vt := t - isPtr := t.Kind() == reflect.Ptr - if isPtr { - vt = t.Elem() - } - switch vt.Kind() { - case reflect.String: - stringVal := key.String() - if isPtr { - field.Set(reflect.ValueOf(&stringVal)) - } else if len(stringVal) > 0 { - field.SetString(key.String()) - } - case reflect.Bool: - boolVal, err := key.Bool() - if err != nil { - return wrapStrictError(err, isStrict) - } - if isPtr { - field.Set(reflect.ValueOf(&boolVal)) - } else { - field.SetBool(boolVal) - } - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - // ParseDuration will not return err for `0`, so check the type name - if vt.Name() == "Duration" { - durationVal, err := key.Duration() - if err != nil { - if intVal, err := key.Int64(); err == nil { - field.SetInt(intVal) - return nil - } - return wrapStrictError(err, isStrict) - } - if isPtr { - field.Set(reflect.ValueOf(&durationVal)) - } else if int64(durationVal) > 0 { - field.Set(reflect.ValueOf(durationVal)) - } - return nil - } - - intVal, err := key.Int64() - if err != nil { - return wrapStrictError(err, isStrict) - } - if isPtr { - pv := reflect.New(t.Elem()) - pv.Elem().SetInt(intVal) - field.Set(pv) - } else { - field.SetInt(intVal) - } - // byte is an alias for uint8, so supporting uint8 breaks support for byte - case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: - durationVal, err := key.Duration() - // Skip zero value - if err == nil && uint64(durationVal) > 0 { - if isPtr { - field.Set(reflect.ValueOf(&durationVal)) - } else { - field.Set(reflect.ValueOf(durationVal)) - } - return nil - } - - uintVal, err := key.Uint64() - if err != nil { - return wrapStrictError(err, isStrict) - } - if isPtr { - pv := reflect.New(t.Elem()) - pv.Elem().SetUint(uintVal) - field.Set(pv) - } else { - field.SetUint(uintVal) - } - - case reflect.Float32, reflect.Float64: - floatVal, err := key.Float64() - if err != nil { - return wrapStrictError(err, isStrict) - } - if isPtr { - pv := reflect.New(t.Elem()) - pv.Elem().SetFloat(floatVal) - field.Set(pv) - } else { - field.SetFloat(floatVal) - } - case reflectTime: - timeVal, err := key.Time() - if err != nil { - return wrapStrictError(err, isStrict) - } - if isPtr { - field.Set(reflect.ValueOf(&timeVal)) - } else { - field.Set(reflect.ValueOf(timeVal)) - } - case reflect.Slice: - return setSliceWithProperType(key, field, delim, allowShadow, isStrict) - default: - return fmt.Errorf("unsupported type %q", t) - } - return nil -} - -func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool, allowNonUnique bool, extends bool) { - opts := strings.SplitN(tag, ",", 5) - rawName = opts[0] - for _, opt := range opts[1:] { - omitEmpty = omitEmpty || (opt == "omitempty") - allowShadow = allowShadow || (opt == "allowshadow") - allowNonUnique = allowNonUnique || (opt == "nonunique") - extends = extends || (opt == "extends") - } - return rawName, omitEmpty, allowShadow, allowNonUnique, extends -} - -// mapToField maps the given value to the matching field of the given section. -// The sectionIndex is the index (if non unique sections are enabled) to which the value should be added. -func (s *Section) mapToField(val reflect.Value, isStrict bool, sectionIndex int, sectionName string) error { - if val.Kind() == reflect.Ptr { - val = val.Elem() - } - typ := val.Type() - - for i := 0; i < typ.NumField(); i++ { - field := val.Field(i) - tpField := typ.Field(i) - - tag := tpField.Tag.Get("ini") - if tag == "-" { - continue - } - - rawName, _, allowShadow, allowNonUnique, extends := parseTagOptions(tag) - fieldName := s.parseFieldName(tpField.Name, rawName) - if len(fieldName) == 0 || !field.CanSet() { - continue - } - - isStruct := tpField.Type.Kind() == reflect.Struct - isStructPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct - isAnonymousPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous - if isAnonymousPtr { - field.Set(reflect.New(tpField.Type.Elem())) - } - - if extends && (isAnonymousPtr || (isStruct && tpField.Anonymous)) { - if isStructPtr && field.IsNil() { - field.Set(reflect.New(tpField.Type.Elem())) - } - fieldSection := s - if rawName != "" { - sectionName = s.name + s.f.options.ChildSectionDelimiter + rawName - if secs, err := s.f.SectionsByName(sectionName); err == nil && sectionIndex < len(secs) { - fieldSection = secs[sectionIndex] - } - } - if err := fieldSection.mapToField(field, isStrict, sectionIndex, sectionName); err != nil { - return fmt.Errorf("map to field %q: %v", fieldName, err) - } - } else if isAnonymousPtr || isStruct || isStructPtr { - if secs, err := s.f.SectionsByName(fieldName); err == nil { - if len(secs) <= sectionIndex { - return fmt.Errorf("there are not enough sections (%d <= %d) for the field %q", len(secs), sectionIndex, fieldName) - } - // Only set the field to non-nil struct value if we have a section for it. - // Otherwise, we end up with a non-nil struct ptr even though there is no data. - if isStructPtr && field.IsNil() { - field.Set(reflect.New(tpField.Type.Elem())) - } - if err = secs[sectionIndex].mapToField(field, isStrict, sectionIndex, fieldName); err != nil { - return fmt.Errorf("map to field %q: %v", fieldName, err) - } - continue - } - } - - // Map non-unique sections - if allowNonUnique && tpField.Type.Kind() == reflect.Slice { - newField, err := s.mapToSlice(fieldName, field, isStrict) - if err != nil { - return fmt.Errorf("map to slice %q: %v", fieldName, err) - } - - field.Set(newField) - continue - } - - if key, err := s.GetKey(fieldName); err == nil { - delim := parseDelim(tpField.Tag.Get("delim")) - if err = setWithProperType(tpField.Type, key, field, delim, allowShadow, isStrict); err != nil { - return fmt.Errorf("set field %q: %v", fieldName, err) - } - } - } - return nil -} - -// mapToSlice maps all sections with the same name and returns the new value. -// The type of the Value must be a slice. -func (s *Section) mapToSlice(secName string, val reflect.Value, isStrict bool) (reflect.Value, error) { - secs, err := s.f.SectionsByName(secName) - if err != nil { - return reflect.Value{}, err - } - - typ := val.Type().Elem() - for i, sec := range secs { - elem := reflect.New(typ) - if err = sec.mapToField(elem, isStrict, i, sec.name); err != nil { - return reflect.Value{}, fmt.Errorf("map to field from section %q: %v", secName, err) - } - - val = reflect.Append(val, elem.Elem()) - } - return val, nil -} - -// mapTo maps a section to object v. -func (s *Section) mapTo(v interface{}, isStrict bool) error { - typ := reflect.TypeOf(v) - val := reflect.ValueOf(v) - if typ.Kind() == reflect.Ptr { - typ = typ.Elem() - val = val.Elem() - } else { - return errors.New("not a pointer to a struct") - } - - if typ.Kind() == reflect.Slice { - newField, err := s.mapToSlice(s.name, val, isStrict) - if err != nil { - return err - } - - val.Set(newField) - return nil - } - - return s.mapToField(val, isStrict, 0, s.name) -} - -// MapTo maps section to given struct. -func (s *Section) MapTo(v interface{}) error { - return s.mapTo(v, false) -} - -// StrictMapTo maps section to given struct in strict mode, -// which returns all possible error including value parsing error. -func (s *Section) StrictMapTo(v interface{}) error { - return s.mapTo(v, true) -} - -// MapTo maps file to given struct. -func (f *File) MapTo(v interface{}) error { - return f.Section("").MapTo(v) -} - -// StrictMapTo maps file to given struct in strict mode, -// which returns all possible error including value parsing error. -func (f *File) StrictMapTo(v interface{}) error { - return f.Section("").StrictMapTo(v) -} - -// MapToWithMapper maps data sources to given struct with name mapper. -func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { - cfg, err := Load(source, others...) - if err != nil { - return err - } - cfg.NameMapper = mapper - return cfg.MapTo(v) -} - -// StrictMapToWithMapper maps data sources to given struct with name mapper in strict mode, -// which returns all possible error including value parsing error. -func StrictMapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { - cfg, err := Load(source, others...) - if err != nil { - return err - } - cfg.NameMapper = mapper - return cfg.StrictMapTo(v) -} - -// MapTo maps data sources to given struct. -func MapTo(v, source interface{}, others ...interface{}) error { - return MapToWithMapper(v, nil, source, others...) -} - -// StrictMapTo maps data sources to given struct in strict mode, -// which returns all possible error including value parsing error. -func StrictMapTo(v, source interface{}, others ...interface{}) error { - return StrictMapToWithMapper(v, nil, source, others...) -} - -// reflectSliceWithProperType does the opposite thing as setSliceWithProperType. -func reflectSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow bool) error { - slice := field.Slice(0, field.Len()) - if field.Len() == 0 { - return nil - } - sliceOf := field.Type().Elem().Kind() - - if allowShadow { - var keyWithShadows *Key - for i := 0; i < field.Len(); i++ { - var val string - switch sliceOf { - case reflect.String: - val = slice.Index(i).String() - case reflect.Int, reflect.Int64: - val = fmt.Sprint(slice.Index(i).Int()) - case reflect.Uint, reflect.Uint64: - val = fmt.Sprint(slice.Index(i).Uint()) - case reflect.Float64: - val = fmt.Sprint(slice.Index(i).Float()) - case reflect.Bool: - val = fmt.Sprint(slice.Index(i).Bool()) - case reflectTime: - val = slice.Index(i).Interface().(time.Time).Format(time.RFC3339) - default: - return fmt.Errorf("unsupported type '[]%s'", sliceOf) - } - - if i == 0 { - keyWithShadows = newKey(key.s, key.name, val) - } else { - _ = keyWithShadows.AddShadow(val) - } - } - *key = *keyWithShadows - return nil - } - - var buf bytes.Buffer - for i := 0; i < field.Len(); i++ { - switch sliceOf { - case reflect.String: - buf.WriteString(slice.Index(i).String()) - case reflect.Int, reflect.Int64: - buf.WriteString(fmt.Sprint(slice.Index(i).Int())) - case reflect.Uint, reflect.Uint64: - buf.WriteString(fmt.Sprint(slice.Index(i).Uint())) - case reflect.Float64: - buf.WriteString(fmt.Sprint(slice.Index(i).Float())) - case reflect.Bool: - buf.WriteString(fmt.Sprint(slice.Index(i).Bool())) - case reflectTime: - buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339)) - default: - return fmt.Errorf("unsupported type '[]%s'", sliceOf) - } - buf.WriteString(delim) - } - key.SetValue(buf.String()[:buf.Len()-len(delim)]) - return nil -} - -// reflectWithProperType does the opposite thing as setWithProperType. -func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow bool) error { - switch t.Kind() { - case reflect.String: - key.SetValue(field.String()) - case reflect.Bool: - key.SetValue(fmt.Sprint(field.Bool())) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - key.SetValue(fmt.Sprint(field.Int())) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - key.SetValue(fmt.Sprint(field.Uint())) - case reflect.Float32, reflect.Float64: - key.SetValue(fmt.Sprint(field.Float())) - case reflectTime: - key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339))) - case reflect.Slice: - return reflectSliceWithProperType(key, field, delim, allowShadow) - case reflect.Ptr: - if !field.IsNil() { - return reflectWithProperType(t.Elem(), key, field.Elem(), delim, allowShadow) - } - default: - return fmt.Errorf("unsupported type %q", t) - } - return nil -} - -// CR: copied from encoding/json/encode.go with modifications of time.Time support. -// TODO: add more test coverage. -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - case reflectTime: - t, ok := v.Interface().(time.Time) - return ok && t.IsZero() - } - return false -} - -// StructReflector is the interface implemented by struct types that can extract themselves into INI objects. -type StructReflector interface { - ReflectINIStruct(*File) error -} - -func (s *Section) reflectFrom(val reflect.Value) error { - if val.Kind() == reflect.Ptr { - val = val.Elem() - } - typ := val.Type() - - for i := 0; i < typ.NumField(); i++ { - if !val.Field(i).CanInterface() { - continue - } - - field := val.Field(i) - tpField := typ.Field(i) - - tag := tpField.Tag.Get("ini") - if tag == "-" { - continue - } - - rawName, omitEmpty, allowShadow, allowNonUnique, extends := parseTagOptions(tag) - if omitEmpty && isEmptyValue(field) { - continue - } - - if r, ok := field.Interface().(StructReflector); ok { - return r.ReflectINIStruct(s.f) - } - - fieldName := s.parseFieldName(tpField.Name, rawName) - if len(fieldName) == 0 || !field.CanSet() { - continue - } - - if extends && tpField.Anonymous && (tpField.Type.Kind() == reflect.Ptr || tpField.Type.Kind() == reflect.Struct) { - if err := s.reflectFrom(field); err != nil { - return fmt.Errorf("reflect from field %q: %v", fieldName, err) - } - continue - } - - if (tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct) || - (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") { - // Note: The only error here is section doesn't exist. - sec, err := s.f.GetSection(fieldName) - if err != nil { - // Note: fieldName can never be empty here, ignore error. - sec, _ = s.f.NewSection(fieldName) - } - - // Add comment from comment tag - if len(sec.Comment) == 0 { - sec.Comment = tpField.Tag.Get("comment") - } - - if err = sec.reflectFrom(field); err != nil { - return fmt.Errorf("reflect from field %q: %v", fieldName, err) - } - continue - } - - if allowNonUnique && tpField.Type.Kind() == reflect.Slice { - slice := field.Slice(0, field.Len()) - if field.Len() == 0 { - return nil - } - sliceOf := field.Type().Elem().Kind() - - for i := 0; i < field.Len(); i++ { - if sliceOf != reflect.Struct && sliceOf != reflect.Ptr { - return fmt.Errorf("field %q is not a slice of pointer or struct", fieldName) - } - - sec, err := s.f.NewSection(fieldName) - if err != nil { - return err - } - - // Add comment from comment tag - if len(sec.Comment) == 0 { - sec.Comment = tpField.Tag.Get("comment") - } - - if err := sec.reflectFrom(slice.Index(i)); err != nil { - return fmt.Errorf("reflect from field %q: %v", fieldName, err) - } - } - continue - } - - // Note: Same reason as section. - key, err := s.GetKey(fieldName) - if err != nil { - key, _ = s.NewKey(fieldName, "") - } - - // Add comment from comment tag - if len(key.Comment) == 0 { - key.Comment = tpField.Tag.Get("comment") - } - - delim := parseDelim(tpField.Tag.Get("delim")) - if err = reflectWithProperType(tpField.Type, key, field, delim, allowShadow); err != nil { - return fmt.Errorf("reflect field %q: %v", fieldName, err) - } - - } - return nil -} - -// ReflectFrom reflects section from given struct. It overwrites existing ones. -func (s *Section) ReflectFrom(v interface{}) error { - typ := reflect.TypeOf(v) - val := reflect.ValueOf(v) - - if s.name != DefaultSection && s.f.options.AllowNonUniqueSections && - (typ.Kind() == reflect.Slice || typ.Kind() == reflect.Ptr) { - // Clear sections to make sure none exists before adding the new ones - s.f.DeleteSection(s.name) - - if typ.Kind() == reflect.Ptr { - sec, err := s.f.NewSection(s.name) - if err != nil { - return err - } - return sec.reflectFrom(val.Elem()) - } - - slice := val.Slice(0, val.Len()) - sliceOf := val.Type().Elem().Kind() - if sliceOf != reflect.Ptr { - return fmt.Errorf("not a slice of pointers") - } - - for i := 0; i < slice.Len(); i++ { - sec, err := s.f.NewSection(s.name) - if err != nil { - return err - } - - err = sec.reflectFrom(slice.Index(i)) - if err != nil { - return fmt.Errorf("reflect from %dth field: %v", i, err) - } - } - - return nil - } - - if typ.Kind() == reflect.Ptr { - val = val.Elem() - } else { - return errors.New("not a pointer to a struct") - } - - return s.reflectFrom(val) -} - -// ReflectFrom reflects file from given struct. -func (f *File) ReflectFrom(v interface{}) error { - return f.Section("").ReflectFrom(v) -} - -// ReflectFromWithMapper reflects data sources from given struct with name mapper. -func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error { - cfg.NameMapper = mapper - return cfg.ReflectFrom(v) -} - -// ReflectFrom reflects data sources from given struct. -func ReflectFrom(cfg *File, v interface{}) error { - return ReflectFromWithMapper(cfg, v, nil) -} diff --git a/vendor/lukechampine.com/frand/README.md b/vendor/lukechampine.com/frand/README.md index af7eab21c..a4f96c4c0 100644 --- a/vendor/lukechampine.com/frand/README.md +++ b/vendor/lukechampine.com/frand/README.md @@ -8,11 +8,10 @@ frand go get lukechampine.com/frand ``` -`frand` is a [fast-key-erasure -CSPRNG](https://blog.cr.yp.to/20170723-random.html) in userspace. Its output is -sourced from the keystream of a [ChaCha](https://en.wikipedia.org/wiki/Salsa20#ChaCha_variant) -cipher, much like the CSPRNG in the Linux and BSD kernels. The initial cipher key is -derived from the kernel CSPRNG, after which [no new entropy is ever mixed in](https://blog.cr.yp.to/20140205-entropy.html). +`frand` is a fast CSPRNG in userspace, implemented as a lightweight wrapper +around the [`(math/rand/v2).ChaCha8`](https://go.dev/src/math/rand/v2/chacha8.go) generator. The initial +cipher key is derived from the kernel CSPRNG, after which [no new entropy is +ever mixed in](https://blog.cr.yp.to/20140205-entropy.html). The primary advantage of `frand` over `crypto/rand` is speed: when generating large amounts of random data, `frand` is 20x faster than `crypto/rand`, and over @@ -23,45 +22,25 @@ and convenient as `math/rand`, but far more secure. ## Design -`frand` closely follows the FKE-CSPRNG design linked above. The generator -maintains a buffer consisting of a ChaCha key followed by random data. When a -caller requests data, the generator fills the request from its buffer, and -immediately overwrites that portion of the buffer with zeros. If the buffer is -exhausted, the generator "refills" the buffer by xoring it with the ChaCha key's -keystream; thus, the key is only used once, and immediately overwrites itself. -This provides [forward secrecy](https://en.wikipedia.org/wiki/Forward_secrecy): -even if an attacker learns the secret key, they cannot learn what data was -previously output from the generator. - -One optimization is implemented. If the caller requests a large amount of data -(larger than the generator's buffer), the generator does not repeatedly fill and -drain its buffer. Instead, it requests 32 bytes from itself, and uses this as a -ChaCha key to directly generate the amount of data requested. The key is then -overwritten with zeros. - -The default generator uses ChaCha12. ChaCha8 is significantly weaker without -being much faster; ChaCha20 is significantly slower without being much stronger. - -At `init` time, a "master" generator is created using an initial key sourced -from `crypto/rand`. New generators source their initial keys from this master -generator. Following [djb's recommendation](https://blog.cr.yp.to/20140205-entropy.html), -the generator never mixes in new entropy. +`frand` was previously implemented as a [FKE-CSPRNG](https://blog.cr.yp.to/20170723-random.html). +Since the addition of `ChaCha8` in Go 1.23, this is no longer necessary, as the +stdlib generator is equally fast and secure. If you're curious about the old +implementation, you can read about it [here](https://github.com/lukechampine/frand/blob/3f951cc7c03d029aba12f89106666489fd8c769b/README.md). Calls to global functions like `Read` and `Intn` are serviced by a `sync.Pool` of generators derived from the master generator. This allows `frand` to scale near-linearly across CPU cores, which is what makes it so much faster than `crypto/rand` and `math/rand` in parallel benchmarks. - ## Security -Some may object to substituting a userspace CSPRNG for the kernel's -CSPRNG. Certain userspace CSPRNGs (e.g. [OpenSSL](https://research.swtch.com/openssl)) -have contained bugs, causing serious vulnerabilities. `frand` has some -advantages over these CSPRNGs: it never mixes in new entropy, it doesn't -maintain an "entropy estimate," and its implementation is extremely simple. -Still, if you only need to generate a handful of secret keys, you should -probably stick with `crypto/rand`. +This is a slightly hot take, but I do not believe there is a meaningful +difference in security between using `crypto/rand` and a `crypto/rand`-seeded +ChaCha8 generator. There are caveats, of course: for example, since the +generator is in userspace, it is somewhat easier to access its internal state +and predict future. But if an attacker can access the internal state of your +program, surely you have bigger fish to fry. If `crypto/rand` makes you feel +safer, great, use it. But I don't think it's actually any more secure. Even if you aren't comfortable using `frand` for critical secrets, you should still use it everywhere you would normally use an insecure PRNG like @@ -73,24 +52,3 @@ of `math/rand` can lead to bugs or vulnerabilities. For example, using run-to-run if the generator is left unseeded. More seriously, using `math/rand` to salt a hash table may make your program [vulnerable to denial of service attacks](https://stackoverflow.com/questions/52184366/why-does-hashmap-need-a-cryptographically-secure-hashing-function). Substituting `frand` for `math/rand` would avoid both of these outcomes. - - -## Benchmarks - - -| Benchmark | `frand` |`crypto/rand` | `math/rand` (insecure) | [`fastrand`](https://gitlab.com/NebulousLabs/fastrand) | -|--------------------------|----------------|---------------|------------------------|------------| -| Read (32b) | **964 MB/s** | 59 MB/s | 634.21 MB/s | 215 MB/s | -| Read (32b, concurrent) | **3566 MB/s** | 70 MB/s | 198.97 MB/s | 615 MB/s | -| Read (512kb) | **5094 MB/s** | 239 MB/s | 965.85 MB/s | 452 MB/s | -| Read (512kb, concurrent) | **19665 MB/s** | 191 MB/s | 958.01 MB/s | 1599 MB/s | -| Intn (n =~ 4e18) | 45 ns/op | 725 ns/op | **20 ns/op** | 210 ns/op | -| BigIntn (n = 2^630) | **223 ns/op** | 1013 ns/op | 295 ns/op | 468 ns/op | -| Perm (n = 32) | 954 ns/op | 17197 ns/op | **789 ns/op** | 5021 ns/op | - -Benchmark details: - -"Concurrent" means the `Read` function was called in parallel from 64 goroutines. - -`Intn` was benchmarked with n = 2^62 + 1, which maximizes the number of expected -retries required to remove bias. The number of expected retries is 1.333. diff --git a/vendor/lukechampine.com/frand/frand.go b/vendor/lukechampine.com/frand/frand.go index 4393b3c85..9f9868d75 100644 --- a/vendor/lukechampine.com/frand/frand.go +++ b/vendor/lukechampine.com/frand/frand.go @@ -1,64 +1,24 @@ -package frand // import "lukechampine.com/frand" +package frand import ( - "crypto/rand" + crand "crypto/rand" "encoding/binary" "math" "math/big" + "math/rand/v2" "strconv" "sync" - - "github.com/aead/chacha20/chacha" ) -func erase(b []byte) { - // compiles to memclr - for i := range b { - b[i] = 0 - } -} - -func copyAndErase(dst, src []byte) int { - n := copy(dst, src) - erase(src[:n]) - return n -} - // An RNG is a cryptographically-strong RNG constructed from the ChaCha stream // cipher. type RNG struct { - buf []byte - n int - rounds int + r *rand.ChaCha8 } // Read fills b with random data. It always returns len(b), nil. -// -// For performance reasons, calling Read once on a "large" buffer (larger than -// the RNG's internal buffer) will produce different output than calling Read -// multiple times on smaller buffers. If deterministic output is required, -// clients should call Read in a loop; when copying to an io.Writer, use -// io.CopyBuffer instead of io.Copy. Callers should also be aware that b is -// xored with random data, not directly overwritten; this means that the new -// contents of b depend on its previous contents. func (r *RNG) Read(b []byte) (int, error) { - if len(b) <= len(r.buf[r.n:]) { - // can fill b entirely from buffer - r.n += copyAndErase(b, r.buf[r.n:]) - } else if len(b) <= len(r.buf[r.n:])+len(r.buf[chacha.KeySize:]) { - // b is larger than current buffer, but can be filled after a reseed - n := copy(b, r.buf[r.n:]) - chacha.XORKeyStream(r.buf, r.buf, make([]byte, chacha.NonceSize), r.buf[:chacha.KeySize], r.rounds) - r.n = chacha.KeySize + copyAndErase(b[n:], r.buf[chacha.KeySize:]) - } else { - // filling b would require multiple reseeds; instead, generate a - // temporary key, then write directly into b using that key - tmpKey := make([]byte, chacha.KeySize) - r.Read(tmpKey) - chacha.XORKeyStream(b, b, make([]byte, chacha.NonceSize), tmpKey, r.rounds) - erase(tmpKey) - } - return len(b), nil + return r.r.Read(b) } // Bytes is a helper function that allocates and returns n bytes of random data. @@ -97,10 +57,8 @@ func (r *RNG) Uint64n(n uint64) uint64 { // n = math.MaxUint64/2 + 1 -> max = math.MaxUint64 - math.MaxUint64/2 // This gives an expected 2 tries before choosing a value < max. max := math.MaxUint64 - math.MaxUint64%n - b := make([]byte, 8) again: - r.Read(b) - i := binary.LittleEndian.Uint64(b) + i := r.r.Uint64() if i >= max { goto again } @@ -120,7 +78,7 @@ func (r *RNG) Intn(n int) int { // BigIntn returns a uniform random *big.Int in [0,n). It panics if n <= 0. func (r *RNG) BigIntn(n *big.Int) *big.Int { - i, _ := rand.Int(r, n) + i, _ := crand.Int(r, n) return i } @@ -148,36 +106,24 @@ func (r *RNG) Shuffle(n int, swap func(i, j int)) { } } -// NewCustom returns a new RNG instance seeded with the provided entropy and -// using the specified buffer size and number of ChaCha rounds. It panics if -// len(seed) != 32, bufsize < 32, or rounds != 8, 12 or 20. +// NewCustom returns a new RNG instance seeded with the provided entropy. It +// panics if len(seed) != 32. The bufsize and rounds parameters are ignored. func NewCustom(seed []byte, bufsize int, rounds int) *RNG { - if len(seed) != chacha.KeySize { + if len(seed) != 32 { panic("frand: invalid seed size") - } else if bufsize < chacha.KeySize { - panic("frand: bufsize must be at least 32") - } else if !(rounds == 8 || rounds == 12 || rounds == 20) { - panic("frand: rounds must be 8, 12, or 20") - } - buf := make([]byte, chacha.KeySize+bufsize) - chacha.XORKeyStream(buf, buf, make([]byte, chacha.NonceSize), seed, rounds) - return &RNG{ - buf: buf, - n: chacha.KeySize, - rounds: rounds, } + return &RNG{rand.NewChaCha8(([32]byte)(seed))} } // "master" RNG, seeded from crypto/rand; RNGs returned by New derive their seed // from this RNG. This means we only ever need to read system entropy a single // time, at startup. var masterRNG = func() *RNG { - seed := make([]byte, chacha.KeySize) - n, err := rand.Read(seed) - if err != nil || n != len(seed) { + var seed [32]byte + if _, err := crand.Read(seed[:]); err != nil { panic("not enough system entropy to seed master RNG") } - return NewCustom(seed, 1024, 12) + return &RNG{rand.NewChaCha8(seed)} }() var masterMu sync.Mutex @@ -188,7 +134,7 @@ var masterMu sync.Mutex func New() *RNG { masterMu.Lock() defer masterMu.Unlock() - return NewCustom(masterRNG.Bytes(32), 1024, 12) + return &RNG{rand.NewChaCha8(masterRNG.Entropy256())} } // Global versions of each RNG method, leveraging a pool of RNGs. @@ -286,7 +232,7 @@ type rngReader struct{} func (rngReader) Read(b []byte) (int, error) { return Read(b) } // A Source is a math/rand-compatible source of entropy. It is safe for -// concurrent use by multiple goroutines +// concurrent use by multiple goroutines. type Source struct { rng *RNG mu sync.Mutex @@ -297,13 +243,9 @@ type Source struct { func (s *Source) Seed(i int64) { s.mu.Lock() defer s.mu.Unlock() - seed := make([]byte, chacha.KeySize) - binary.LittleEndian.PutUint64(seed, uint64(i)) - for i := range s.rng.buf { - s.rng.buf[i] = 0 - } - chacha.XORKeyStream(s.rng.buf, s.rng.buf, make([]byte, chacha.NonceSize), seed, s.rng.rounds) - s.rng.n = chacha.KeySize + var seed [32]byte + binary.LittleEndian.PutUint64(seed[:], uint64(i)) + s.rng.r.Seed(seed) } // Int63 returns a non-negative random 63-bit integer as an int64. diff --git a/vendor/modules.txt b/vendor/modules.txt index ab9e5587c..e7142ab34 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -66,7 +66,7 @@ github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version github.com/AzureAD/microsoft-authentication-library-for-go/apps/public -# github.com/BurntSushi/toml v1.4.0 +# github.com/BurntSushi/toml v1.5.0 ## explicit; go 1.18 github.com/BurntSushi/toml github.com/BurntSushi/toml/internal @@ -77,7 +77,7 @@ github.com/Microsoft/go-winio/internal/fs github.com/Microsoft/go-winio/internal/socket github.com/Microsoft/go-winio/internal/stringbuffer github.com/Microsoft/go-winio/pkg/guid -# github.com/ProtonMail/go-crypto v1.1.3 +# github.com/ProtonMail/go-crypto v1.1.6 ## explicit; go 1.17 github.com/ProtonMail/go-crypto/bitcurves github.com/ProtonMail/go-crypto/brainpool @@ -101,9 +101,6 @@ github.com/ProtonMail/go-crypto/openpgp/packet github.com/ProtonMail/go-crypto/openpgp/s2k github.com/ProtonMail/go-crypto/openpgp/x25519 github.com/ProtonMail/go-crypto/openpgp/x448 -# github.com/aead/chacha20 v0.0.0-20180709150244-8b13a72661da -## explicit -github.com/aead/chacha20/chacha # github.com/agext/levenshtein v1.2.3 ## explicit github.com/agext/levenshtein @@ -150,11 +147,11 @@ github.com/aws/aws-sdk-go-v2/internal/shareddefaults github.com/aws/aws-sdk-go-v2/internal/strings github.com/aws/aws-sdk-go-v2/internal/sync/singleflight github.com/aws/aws-sdk-go-v2/internal/timeconv -# github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 -## explicit; go 1.20 +# github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 +## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi -# github.com/aws/aws-sdk-go-v2/config v1.29.9 +# github.com/aws/aws-sdk-go-v2/config v1.29.8 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/config # github.com/aws/aws-sdk-go-v2/credentials v1.17.62 @@ -179,22 +176,22 @@ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 # github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/internal/ini -# github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5 -## explicit; go 1.20 +# github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 +## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/internal/v4a github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4 -# github.com/aws/aws-sdk-go-v2/service/ec2 v1.203.1 +# github.com/aws/aws-sdk-go-v2/service/ec2 v1.210.1 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/service/ec2 github.com/aws/aws-sdk-go-v2/service/ec2/internal/endpoints github.com/aws/aws-sdk-go-v2/service/ec2/types -# github.com/aws/aws-sdk-go-v2/service/ecs v1.53.0 -## explicit; go 1.21 +# github.com/aws/aws-sdk-go-v2/service/ecs v1.54.2 +## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/service/ecs github.com/aws/aws-sdk-go-v2/service/ecs/internal/endpoints github.com/aws/aws-sdk-go-v2/service/ecs/types -# github.com/aws/aws-sdk-go-v2/service/iam v1.39.2 +# github.com/aws/aws-sdk-go-v2/service/iam v1.40.1 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/service/iam github.com/aws/aws-sdk-go-v2/service/iam/internal/endpoints @@ -202,24 +199,24 @@ github.com/aws/aws-sdk-go-v2/service/iam/types # github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding -# github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 -## explicit; go 1.20 +# github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.0 +## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/service/internal/checksum # github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/service/internal/presigned-url -# github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5 -## explicit; go 1.20 +# github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 +## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/service/internal/s3shared github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config -# github.com/aws/aws-sdk-go-v2/service/pricing v1.32.17 +# github.com/aws/aws-sdk-go-v2/service/pricing v1.34.1 ## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/service/pricing github.com/aws/aws-sdk-go-v2/service/pricing/internal/endpoints github.com/aws/aws-sdk-go-v2/service/pricing/types -# github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1 -## explicit; go 1.20 +# github.com/aws/aws-sdk-go-v2/service/s3 v1.78.2 +## explicit; go 1.22 github.com/aws/aws-sdk-go-v2/service/s3 github.com/aws/aws-sdk-go-v2/service/s3/internal/arn github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations @@ -370,8 +367,8 @@ github.com/go-git/go-billy/v5/helper/polyfill github.com/go-git/go-billy/v5/memfs github.com/go-git/go-billy/v5/osfs github.com/go-git/go-billy/v5/util -# github.com/go-git/go-git/v5 v5.13.1 -## explicit; go 1.21 +# github.com/go-git/go-git/v5 v5.14.0 +## explicit; go 1.23.0 github.com/go-git/go-git/v5 github.com/go-git/go-git/v5/config github.com/go-git/go-git/v5/internal/path_util @@ -418,6 +415,10 @@ github.com/go-git/go-git/v5/utils/merkletrie/internal/frame github.com/go-git/go-git/v5/utils/merkletrie/noder github.com/go-git/go-git/v5/utils/sync github.com/go-git/go-git/v5/utils/trace +# github.com/go-viper/mapstructure/v2 v2.2.1 +## explicit; go 1.18 +github.com/go-viper/mapstructure/v2 +github.com/go-viper/mapstructure/v2/internal/errors # github.com/gogo/protobuf v1.3.2 ## explicit; go 1.15 github.com/gogo/protobuf/io @@ -430,8 +431,8 @@ github.com/golang-jwt/jwt/v5 github.com/golang/glog github.com/golang/glog/internal/logsink github.com/golang/glog/internal/stackdump -# github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da -## explicit +# github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 +## explicit; go 1.20 github.com/golang/groupcache/lru # github.com/google/uuid v1.6.0 ## explicit @@ -445,18 +446,6 @@ github.com/hashicorp/errwrap # github.com/hashicorp/go-multierror v1.1.1 ## explicit; go 1.13 github.com/hashicorp/go-multierror -# github.com/hashicorp/hcl v1.0.0 -## explicit -github.com/hashicorp/hcl -github.com/hashicorp/hcl/hcl/ast -github.com/hashicorp/hcl/hcl/parser -github.com/hashicorp/hcl/hcl/printer -github.com/hashicorp/hcl/hcl/scanner -github.com/hashicorp/hcl/hcl/strconv -github.com/hashicorp/hcl/hcl/token -github.com/hashicorp/hcl/json/parser -github.com/hashicorp/hcl/json/scanner -github.com/hashicorp/hcl/json/token # github.com/hashicorp/hcl/v2 v2.23.0 ## explicit; go 1.18 github.com/hashicorp/hcl/v2 @@ -469,9 +458,6 @@ github.com/iwdgo/sigintwindows # github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 ## explicit github.com/jbenet/go-context/io -# github.com/jmespath/go-jmespath v0.4.0 -## explicit; go 1.14 -github.com/jmespath/go-jmespath # github.com/kevinburke/ssh_config v1.2.0 ## explicit github.com/kevinburke/ssh_config @@ -482,9 +468,6 @@ github.com/kylelemons/godebug/pretty # github.com/lucasb-eyer/go-colorful v1.2.0 ## explicit; go 1.12 github.com/lucasb-eyer/go-colorful -# github.com/magiconair/properties v1.8.7 -## explicit; go 1.19 -github.com/magiconair/properties # github.com/mattn/go-isatty v0.0.20 ## explicit; go 1.15 github.com/mattn/go-isatty @@ -503,9 +486,6 @@ github.com/mitchellh/go-ps # github.com/mitchellh/go-wordwrap v1.0.1 ## explicit; go 1.14 github.com/mitchellh/go-wordwrap -# github.com/mitchellh/mapstructure v1.5.0 -## explicit; go 1.14 -github.com/mitchellh/mapstructure # github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 ## explicit; go 1.17 github.com/muesli/ansi @@ -551,8 +531,8 @@ github.com/pelletier/go-toml/v2/unstable # github.com/pgavlin/fx v0.1.6 ## explicit; go 1.20 github.com/pgavlin/fx -# github.com/pjbgf/sha1cd v0.3.0 -## explicit; go 1.19 +# github.com/pjbgf/sha1cd v0.3.2 +## explicit; go 1.21 github.com/pjbgf/sha1cd github.com/pjbgf/sha1cd/internal github.com/pjbgf/sha1cd/ubc @@ -570,7 +550,7 @@ github.com/pkg/term/termios github.com/pulumi/appdash github.com/pulumi/appdash/internal/wire github.com/pulumi/appdash/opentracing -# github.com/pulumi/esc v0.12.0 +# github.com/pulumi/esc v0.13.0 ## explicit; go 1.21 github.com/pulumi/esc github.com/pulumi/esc/ast @@ -580,12 +560,12 @@ github.com/pulumi/esc/internal/util github.com/pulumi/esc/schema github.com/pulumi/esc/syntax github.com/pulumi/esc/syntax/encoding -# github.com/pulumi/pulumi-aws-native/sdk v1.25.0 +# github.com/pulumi/pulumi-aws-native/sdk v1.26.0 ## explicit; go 1.22 github.com/pulumi/pulumi-aws-native/sdk/go/aws github.com/pulumi/pulumi-aws-native/sdk/go/aws/internal github.com/pulumi/pulumi-aws-native/sdk/go/aws/scheduler -# github.com/pulumi/pulumi-aws/sdk/v6 v6.69.0 +# github.com/pulumi/pulumi-aws/sdk/v6 v6.72.0 ## explicit; go 1.22 github.com/pulumi/pulumi-aws/sdk/v6/go/aws/autoscaling github.com/pulumi/pulumi-aws/sdk/v6/go/aws/cloudwatch @@ -595,56 +575,56 @@ github.com/pulumi/pulumi-aws/sdk/v6/go/aws/iam github.com/pulumi/pulumi-aws/sdk/v6/go/aws/internal github.com/pulumi/pulumi-aws/sdk/v6/go/aws/lb github.com/pulumi/pulumi-aws/sdk/v6/go/aws/s3 -# github.com/pulumi/pulumi-awsx/sdk/v2 v2.21.1 -## explicit; go 1.22 +# github.com/pulumi/pulumi-awsx/sdk/v2 v2.21.0 +## explicit; go 1.21.12 github.com/pulumi/pulumi-awsx/sdk/v2/go/awsx/awsx github.com/pulumi/pulumi-awsx/sdk/v2/go/awsx/ecs github.com/pulumi/pulumi-awsx/sdk/v2/go/awsx/internal -# github.com/pulumi/pulumi-azure-native-sdk/authorization/v2 v2.88.0 +# github.com/pulumi/pulumi-azure-native-sdk/authorization/v2 v2.89.3 ## explicit; go 1.22 github.com/pulumi/pulumi-azure-native-sdk/authorization/v2 -# github.com/pulumi/pulumi-azure-native-sdk/compute/v2 v2.88.0 +# github.com/pulumi/pulumi-azure-native-sdk/compute/v2 v2.89.3 ## explicit; go 1.22 github.com/pulumi/pulumi-azure-native-sdk/compute/v2 -# github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2 v2.88.0 +# github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2 v2.89.3 ## explicit; go 1.22 github.com/pulumi/pulumi-azure-native-sdk/containerservice/v2/v20240801 -# github.com/pulumi/pulumi-azure-native-sdk/managedidentity/v2 v2.53.0 -## explicit; go 1.18 +# github.com/pulumi/pulumi-azure-native-sdk/managedidentity/v2 v2.89.3 +## explicit; go 1.22 github.com/pulumi/pulumi-azure-native-sdk/managedidentity/v2 -# github.com/pulumi/pulumi-azure-native-sdk/network/v2 v2.88.0 +# github.com/pulumi/pulumi-azure-native-sdk/network/v2 v2.89.3 ## explicit; go 1.22 github.com/pulumi/pulumi-azure-native-sdk/network/v2 -# github.com/pulumi/pulumi-azure-native-sdk/resources/v2 v2.88.0 +# github.com/pulumi/pulumi-azure-native-sdk/resources/v2 v2.89.3 ## explicit; go 1.22 github.com/pulumi/pulumi-azure-native-sdk/resources/v2 -# github.com/pulumi/pulumi-azure-native-sdk/storage/v2 v2.88.0 +# github.com/pulumi/pulumi-azure-native-sdk/storage/v2 v2.89.3 ## explicit; go 1.22 github.com/pulumi/pulumi-azure-native-sdk/storage/v2 -# github.com/pulumi/pulumi-azure-native-sdk/v2 v2.88.0 +# github.com/pulumi/pulumi-azure-native-sdk/v2 v2.89.3 ## explicit; go 1.22 github.com/pulumi/pulumi-azure-native-sdk/v2/utilities -# github.com/pulumi/pulumi-command/sdk v1.0.1 -## explicit; go 1.21 +# github.com/pulumi/pulumi-command/sdk v1.0.2 +## explicit; go 1.22 github.com/pulumi/pulumi-command/sdk/go/command/internal github.com/pulumi/pulumi-command/sdk/go/command/remote -# github.com/pulumi/pulumi-docker-build/sdk/go/dockerbuild v0.0.3 -## explicit; go 1.21.7 +# github.com/pulumi/pulumi-docker-build/sdk/go/dockerbuild v0.0.10 +## explicit; go 1.22.0 github.com/pulumi/pulumi-docker-build/sdk/go/dockerbuild github.com/pulumi/pulumi-docker-build/sdk/go/dockerbuild/internal -# github.com/pulumi/pulumi-docker/sdk/v4 v4.5.8 -## explicit; go 1.21 +# github.com/pulumi/pulumi-docker/sdk/v4 v4.6.2 +## explicit; go 1.22 github.com/pulumi/pulumi-docker/sdk/v4/go/docker github.com/pulumi/pulumi-docker/sdk/v4/go/docker/internal -# github.com/pulumi/pulumi-random/sdk/v4 v4.16.7 -## explicit; go 1.21 +# github.com/pulumi/pulumi-random/sdk/v4 v4.18.0 +## explicit; go 1.22 github.com/pulumi/pulumi-random/sdk/v4/go/random github.com/pulumi/pulumi-random/sdk/v4/go/random/internal -# github.com/pulumi/pulumi-tls/sdk/v5 v5.0.9 -## explicit; go 1.21 +# github.com/pulumi/pulumi-tls/sdk/v5 v5.1.1 +## explicit; go 1.22 github.com/pulumi/pulumi-tls/sdk/v5/go/tls github.com/pulumi/pulumi-tls/sdk/v5/go/tls/internal -# github.com/pulumi/pulumi/sdk/v3 v3.154.0 +# github.com/pulumi/pulumi/sdk/v3 v3.157.0 ## explicit; go 1.22 github.com/pulumi/pulumi/sdk/v3 github.com/pulumi/pulumi/sdk/v3/go/auto @@ -711,8 +691,8 @@ github.com/pulumi/pulumi/sdk/v3/python/toolchain # github.com/rivo/uniseg v0.4.7 ## explicit; go 1.18 github.com/rivo/uniseg -# github.com/rogpeppe/go-internal v1.12.0 -## explicit; go 1.20 +# github.com/rogpeppe/go-internal v1.14.1 +## explicit; go 1.23 github.com/rogpeppe/go-internal/internal/syscall/windows github.com/rogpeppe/go-internal/internal/syscall/windows/sysdll github.com/rogpeppe/go-internal/lockedfile @@ -720,12 +700,9 @@ github.com/rogpeppe/go-internal/lockedfile/internal/filelock # github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 ## explicit; go 1.13 github.com/sabhiram/go-gitignore -# github.com/sagikazarmark/locafero v0.4.0 -## explicit; go 1.20 +# github.com/sagikazarmark/locafero v0.8.0 +## explicit; go 1.23.0 github.com/sagikazarmark/locafero -# github.com/sagikazarmark/slog-shim v0.1.0 -## explicit; go 1.20 -github.com/sagikazarmark/slog-shim # github.com/sahilm/fuzzy v0.1.1 ## explicit github.com/sahilm/fuzzy @@ -738,8 +715,8 @@ github.com/sergi/go-diff/diffmatchpatch # github.com/sirupsen/logrus v1.9.3 ## explicit; go 1.13 github.com/sirupsen/logrus -# github.com/skeema/knownhosts v1.3.0 -## explicit; go 1.17 +# github.com/skeema/knownhosts v1.3.1 +## explicit; go 1.22 github.com/skeema/knownhosts # github.com/sourcegraph/conc v0.3.0 ## explicit; go 1.19 @@ -747,12 +724,12 @@ github.com/sourcegraph/conc github.com/sourcegraph/conc/internal/multierror github.com/sourcegraph/conc/iter github.com/sourcegraph/conc/panics -# github.com/spf13/afero v1.11.0 -## explicit; go 1.19 +# github.com/spf13/afero v1.14.0 +## explicit; go 1.23.0 github.com/spf13/afero github.com/spf13/afero/internal/common github.com/spf13/afero/mem -# github.com/spf13/cast v1.6.0 +# github.com/spf13/cast v1.7.1 ## explicit; go 1.19 github.com/spf13/cast # github.com/spf13/cobra v1.9.1 @@ -761,14 +738,10 @@ github.com/spf13/cobra # github.com/spf13/pflag v1.0.6 ## explicit; go 1.12 github.com/spf13/pflag -# github.com/spf13/viper v1.19.0 -## explicit; go 1.20 +# github.com/spf13/viper v1.20.0 +## explicit; go 1.21.0 github.com/spf13/viper -github.com/spf13/viper/internal/encoding github.com/spf13/viper/internal/encoding/dotenv -github.com/spf13/viper/internal/encoding/hcl -github.com/spf13/viper/internal/encoding/ini -github.com/spf13/viper/internal/encoding/javaproperties github.com/spf13/viper/internal/encoding/json github.com/spf13/viper/internal/encoding/toml github.com/spf13/viper/internal/encoding/yaml @@ -803,7 +776,7 @@ github.com/xanzy/ssh-agent # github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e ## explicit; go 1.19 github.com/xo/terminfo -# github.com/zclconf/go-cty v1.15.0 +# github.com/zclconf/go-cty v1.16.2 ## explicit; go 1.18 github.com/zclconf/go-cty/cty github.com/zclconf/go-cty/cty/convert @@ -837,16 +810,12 @@ golang.org/x/crypto/ssh golang.org/x/crypto/ssh/agent golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/ssh/knownhosts -# golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 -## explicit; go 1.22.0 -golang.org/x/exp/constraints +# golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 +## explicit; go 1.23.0 golang.org/x/exp/maps golang.org/x/exp/slices -golang.org/x/exp/slog -golang.org/x/exp/slog/internal -golang.org/x/exp/slog/internal/buffer -# golang.org/x/mod v0.21.0 -## explicit; go 1.22.0 +# golang.org/x/mod v0.24.0 +## explicit; go 1.23.0 golang.org/x/mod/semver # golang.org/x/net v0.37.0 ## explicit; go 1.23.0 @@ -886,12 +855,13 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/tools v0.25.0 -## explicit; go 1.22.0 +# golang.org/x/tools v0.31.0 +## explicit; go 1.23.0 golang.org/x/tools/cmd/stringer golang.org/x/tools/go/gcexportdata golang.org/x/tools/go/packages golang.org/x/tools/go/types/objectpath +golang.org/x/tools/go/types/typeutil golang.org/x/tools/internal/aliases golang.org/x/tools/internal/event golang.org/x/tools/internal/event/core @@ -902,21 +872,24 @@ golang.org/x/tools/internal/gocommand golang.org/x/tools/internal/packagesinternal golang.org/x/tools/internal/pkgbits golang.org/x/tools/internal/stdlib -golang.org/x/tools/internal/tokeninternal +golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal golang.org/x/tools/internal/versions -# google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 -## explicit; go 1.21 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20250313205543-e70fdf4c4cb4 +## explicit; go 1.23.0 google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.67.1 -## explicit; go 1.21 +# google.golang.org/grpc v1.71.0 +## explicit; go 1.22.0 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff google.golang.org/grpc/balancer google.golang.org/grpc/balancer/base +google.golang.org/grpc/balancer/endpointsharding google.golang.org/grpc/balancer/grpclb/state google.golang.org/grpc/balancer/pickfirst +google.golang.org/grpc/balancer/pickfirst/internal +google.golang.org/grpc/balancer/pickfirst/pickfirstleaf google.golang.org/grpc/balancer/roundrobin google.golang.org/grpc/binarylog/grpc_binarylog_v1 google.golang.org/grpc/channelz @@ -946,7 +919,9 @@ google.golang.org/grpc/internal/grpcutil google.golang.org/grpc/internal/idle google.golang.org/grpc/internal/metadata google.golang.org/grpc/internal/pretty +google.golang.org/grpc/internal/proxyattributes google.golang.org/grpc/internal/resolver +google.golang.org/grpc/internal/resolver/delegatingresolver google.golang.org/grpc/internal/resolver/dns google.golang.org/grpc/internal/resolver/dns/internal google.golang.org/grpc/internal/resolver/passthrough @@ -971,7 +946,7 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.35.1 +# google.golang.org/protobuf v1.36.5 ## explicit; go 1.21 google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext @@ -994,6 +969,7 @@ google.golang.org/protobuf/internal/genid google.golang.org/protobuf/internal/impl google.golang.org/protobuf/internal/order google.golang.org/protobuf/internal/pragma +google.golang.org/protobuf/internal/protolazy google.golang.org/protobuf/internal/set google.golang.org/protobuf/internal/strs google.golang.org/protobuf/internal/version @@ -1011,9 +987,6 @@ google.golang.org/protobuf/types/known/durationpb google.golang.org/protobuf/types/known/emptypb google.golang.org/protobuf/types/known/structpb google.golang.org/protobuf/types/known/timestamppb -# gopkg.in/ini.v1 v1.67.0 -## explicit -gopkg.in/ini.v1 # gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 ## explicit gopkg.in/tomb.v1 @@ -1023,6 +996,6 @@ gopkg.in/warnings.v0 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# lukechampine.com/frand v1.4.2 -## explicit; go 1.12 +# lukechampine.com/frand v1.5.1 +## explicit; go 1.23.1 lukechampine.com/frand From eb643b87f7adee2af86edd046755b2ea84471c6d Mon Sep 17 00:00:00 2001 From: Adrian Riobo Date: Thu, 20 Mar 2025 09:51:47 +0100 Subject: [PATCH 06/11] fix: azure non spot option, check vmsize on current location. Fix #432 Signed-off-by: Adrian Riobo --- cmd/mapt/cmd/azure/hosts/constants.go | 2 +- go.mod | 1 + go.sum | 6 + pkg/provider/azure/action/windows/windows.go | 12 +- pkg/provider/azure/data/vmsize.go | 57 + .../compute/armcompute/CHANGELOG.md | 7980 ++++++++++++++ .../compute/armcompute/LICENSE.txt | 21 + .../compute/armcompute/README.md | 88 + .../compute/armcompute/autorest.md | 12 + .../compute/armcompute/build.go | 7 + .../resourcemanager/compute/armcompute/ci.yml | 28 + .../zz_generated_availabilitysets_client.go | 466 + ...erated_capacityreservationgroups_client.go | 418 + ...z_generated_capacityreservations_client.go | 406 + ...ted_cloudserviceoperatingsystems_client.go | 306 + ...erated_cloudserviceroleinstances_client.go | 573 + .../zz_generated_cloudserviceroles_client.go | 183 + .../zz_generated_cloudservices_client.go | 886 ++ ...erated_cloudservicesupdatedomain_client.go | 257 + .../zz_generated_communitygalleries_client.go | 112 + ...generated_communitygalleryimages_client.go | 118 + ...ed_communitygalleryimageversions_client.go | 125 + .../armcompute/zz_generated_constants.go | 2495 +++++ ...zz_generated_dedicatedhostgroups_client.go | 407 + .../zz_generated_dedicatedhosts_client.go | 471 + .../zz_generated_diskaccesses_client.go | 774 ++ .../zz_generated_diskencryptionsets_client.go | 507 + .../zz_generated_diskrestorepoint_client.go | 348 + .../armcompute/zz_generated_disks_client.go | 564 + .../zz_generated_galleries_client.go | 433 + ...zz_generated_galleryapplications_client.go | 397 + ...rated_galleryapplicationversions_client.go | 427 + .../zz_generated_galleryimages_client.go | 396 + ...z_generated_galleryimageversions_client.go | 426 + ..._generated_gallerysharingprofile_client.go | 120 + .../armcompute/zz_generated_images_client.go | 429 + .../zz_generated_loganalytics_client.go | 181 + .../compute/armcompute/zz_generated_models.go | 9540 +++++++++++++++++ .../armcompute/zz_generated_models_serde.go | 3416 ++++++ .../zz_generated_operations_client.go | 98 + ...nerated_proximityplacementgroups_client.go | 405 + .../zz_generated_resourceskus_client.go | 121 + .../armcompute/zz_generated_response_types.go | 1403 +++ ...enerated_restorepointcollections_client.go | 425 + .../zz_generated_restorepoints_client.go | 257 + .../zz_generated_sharedgalleries_client.go | 179 + ...zz_generated_sharedgalleryimages_client.go | 190 + ...rated_sharedgalleryimageversions_client.go | 203 + .../zz_generated_snapshots_client.go | 566 + .../zz_generated_sshpublickeys_client.go | 459 + .../armcompute/zz_generated_time_rfc3339.go | 86 + .../armcompute/zz_generated_usage_client.go | 121 + ...ed_virtualmachineextensionimages_client.go | 246 + ...nerated_virtualmachineextensions_client.go | 387 + ...z_generated_virtualmachineimages_client.go | 376 + ...ted_virtualmachineimagesedgezone_client.go | 401 + ...erated_virtualmachineruncommands_client.go | 522 + .../zz_generated_virtualmachines_client.go | 1639 +++ ...virtualmachinescalesetextensions_client.go | 397 + ...almachinescalesetrollingupgrades_client.go | 310 + ...enerated_virtualmachinescalesets_client.go | 1562 +++ ...rtualmachinescalesetvmextensions_client.go | 412 + ...tualmachinescalesetvmruncommands_client.go | 425 + ...erated_virtualmachinescalesetvms_client.go | 1155 ++ ...zz_generated_virtualmachinesizes_client.go | 115 + vendor/modules.txt | 3 + 66 files changed, 45856 insertions(+), 2 deletions(-) create mode 100644 pkg/provider/azure/data/vmsize.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/CHANGELOG.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/LICENSE.txt create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/README.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/autorest.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/build.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/ci.yml create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_availabilitysets_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_capacityreservationgroups_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_capacityreservations_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_cloudserviceoperatingsystems_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_cloudserviceroleinstances_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_cloudserviceroles_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_cloudservices_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_cloudservicesupdatedomain_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_communitygalleries_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_communitygalleryimages_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_communitygalleryimageversions_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_constants.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_dedicatedhostgroups_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_dedicatedhosts_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_diskaccesses_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_diskencryptionsets_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_diskrestorepoint_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_disks_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_galleries_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_galleryapplications_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_galleryapplicationversions_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_galleryimages_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_galleryimageversions_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_gallerysharingprofile_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_images_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_loganalytics_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_models.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_models_serde.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_operations_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_proximityplacementgroups_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_resourceskus_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_response_types.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_restorepointcollections_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_restorepoints_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_sharedgalleries_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_sharedgalleryimages_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_sharedgalleryimageversions_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_snapshots_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_sshpublickeys_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_time_rfc3339.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_usage_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachineextensionimages_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachineextensions_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachineimages_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachineimagesedgezone_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachineruncommands_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachines_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachinescalesetextensions_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachinescalesetrollingupgrades_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachinescalesets_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachinescalesetvmextensions_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachinescalesetvmruncommands_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachinescalesetvms_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachinesizes_client.go diff --git a/cmd/mapt/cmd/azure/hosts/constants.go b/cmd/mapt/cmd/azure/hosts/constants.go index cd034e930..d12be725e 100644 --- a/cmd/mapt/cmd/azure/hosts/constants.go +++ b/cmd/mapt/cmd/azure/hosts/constants.go @@ -3,7 +3,7 @@ package hosts const ( paramLocation = "location" paramLocationDesc = "If spot is passed location will be calculated based on spot results. Otherwise localtion will be used to create resources." - defaultLocation = "West US" + defaultLocation = "westeurope" paramVMSize = "vmsize" paramVMSizeDesc = "set specific size for the VM and ignore any CPUs, Memory and Arch parameters set. Type requires to allow nested virtualization" paramUsername = "username" diff --git a/go.mod b/go.mod index 2f350a075..664be339e 100644 --- a/go.mod +++ b/go.mod @@ -124,6 +124,7 @@ require ( ) require ( + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute v1.0.0 github.com/Microsoft/go-winio v0.6.2 // indirect github.com/ProtonMail/go-crypto v1.1.6 // indirect github.com/aws/aws-sdk-go-v2/service/ecs v1.54.2 diff --git a/go.sum b/go.sum index 1f112ef77..8be7b7118 100644 --- a/go.sum +++ b/go.sum @@ -8,10 +8,16 @@ github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+ github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute v1.0.0 h1:/Di3vB4sNeQ+7A8efjUVENvyB945Wruvstucqp7ZArg= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute v1.0.0/go.mod h1:gM3K25LQlsET3QR+4V74zxCsFAy0r6xMNN9n80SZn+4= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6 v6.3.0 h1:Dc9miZr1Mhaqbb3cmJCRokkG16uk8JKkqOADf084zy4= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6 v6.3.0/go.mod h1:CHo9QYhWEvrKVeXsEMJSl2bpmYYNu6aG12JsSaFBXlY= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.0.0 h1:lMW1lD/17LUA5z1XTURo7LcVG2ICBPlyMHjIUrcFZNQ= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.0.0/go.mod h1:ceIuwmxDWptoW3eCqSXlnPsZFKh4X+R38dWPv7GS9Vs= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v3 v3.1.0 h1:2qsIIvxVT+uE6yrNldntJKlLRgxGbZ85kgtz5SNBhMw= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v3 v3.1.0/go.mod h1:AW8VEadnhw9xox+VaVd9sP7NjzOAnaZBLRH6Tq3cJ38= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork v1.0.0 h1:nBy98uKOIfun5z6wx6jwWLrULcM0+cjBalBFZlEZ7CA= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork v1.0.0/go.mod h1:243D9iHbcQXoFUtgHJwL7gl2zx1aDuDMjvBZVGr2uW0= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resourcegraph/armresourcegraph v0.9.0 h1:zLzoX5+W2l95UJoVwiyNS4dX8vHyQ6x2xRLoBBL9wMk= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resourcegraph/armresourcegraph v0.9.0/go.mod h1:wVEOJfGTj0oPAUGA1JuRAvz/lxXQsWW16axmHPP47Bk= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 h1:Dd+RhdJn0OTtVGaeDLZpcumkIVCtA/3/Fo42+eoYvVM= diff --git a/pkg/provider/azure/action/windows/windows.go b/pkg/provider/azure/action/windows/windows.go index cd324c12d..662a62af9 100644 --- a/pkg/provider/azure/action/windows/windows.go +++ b/pkg/provider/azure/action/windows/windows.go @@ -18,6 +18,7 @@ import ( "github.com/redhat-developer/mapt/pkg/manager" maptContext "github.com/redhat-developer/mapt/pkg/manager/context" "github.com/redhat-developer/mapt/pkg/provider/azure" + "github.com/redhat-developer/mapt/pkg/provider/azure/data" "github.com/redhat-developer/mapt/pkg/provider/azure/module/network" virtualmachine "github.com/redhat-developer/mapt/pkg/provider/azure/module/virtual-machine" "github.com/redhat-developer/mapt/pkg/provider/util/command" @@ -194,7 +195,16 @@ func (r *WindowsRequest) valuesCheckingSpot() (*string, string, *float64, error) } return &bsc.Location, bsc.VMType, &bsc.Price, nil } - return &r.Location, "", nil, nil + // TODO we need to extend this to other azure targets (refactor this function) + // plus we probably would need to check prices for vmsizes and pick the cheaper + availableVMSizes, err := data.FilterVMSizeOfferedByLocation(r.VMSizes, r.Location) + if err != nil { + return nil, "", nil, err + } + if len(availableVMSizes) == 0 { + return nil, "", nil, fmt.Errorf("no vm size mathing expectations on current region") + } + return &r.Location, availableVMSizes[0], nil, nil } // Write exported values in context to files o a selected target folder diff --git a/pkg/provider/azure/data/vmsize.go b/pkg/provider/azure/data/vmsize.go new file mode 100644 index 000000000..001e0ad52 --- /dev/null +++ b/pkg/provider/azure/data/vmsize.go @@ -0,0 +1,57 @@ +package data + +import ( + "context" + "os" + "slices" + + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6" + "github.com/redhat-developer/mapt/pkg/util" +) + +func IsVMSizeOfferedByLocation(vmSize, location string) (bool, error) { + offerings, err := FilterVMSizeOfferedByLocation([]string{vmSize}, location) + return len(offerings) == 1, err +} + +// Get InstanceTypes offerings on current location +func FilterVMSizeOfferedByLocation(vmSizes []string, location string) ([]string, error) { + // Create a new Azure credential (uses environment variables or managed identity) + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + return nil, err + } + ctx := context.Background() + subscriptionId := os.Getenv("AZURE_SUBSCRIPTION_ID") + clientFactory, err := armcompute.NewClientFactory(subscriptionId, cred, nil) + if err != nil { + return nil, err + } + client := clientFactory.NewResourceSKUsClient() + p := client.NewListPager(&armcompute.ResourceSKUsClientListOptions{ + Filter: &location, + }) + var offerings []string + for p.More() { + page, err := p.NextPage(ctx) + if err != nil { + return nil, err + } + for _, sku := range page.Value { + if slices.Contains(vmSizes, *sku.Name) && + !slices.Contains(offerings, *sku.Name) { + if slices.Contains( + util.ArrayConvert( + sku.Locations, + func(l *string) string { + return *l + }), + location) { + offerings = append(offerings, *sku.Name) + } + } + } + } + return offerings, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/CHANGELOG.md new file mode 100644 index 000000000..051813c3b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/CHANGELOG.md @@ -0,0 +1,7980 @@ +# Release History + +## 1.0.0 (2022-05-16) +### Breaking Changes + +- Function `*VirtualMachinesClient.BeginPowerOff` return value(s) have been changed from `(*armruntime.Poller[VirtualMachinesClientPowerOffResponse], error)` to `(*runtime.Poller[VirtualMachinesClientPowerOffResponse], error)` +- Function `*VirtualMachineScaleSetVMExtensionsClient.BeginDelete` return value(s) have been changed from `(*armruntime.Poller[VirtualMachineScaleSetVMExtensionsClientDeleteResponse], error)` to `(*runtime.Poller[VirtualMachineScaleSetVMExtensionsClientDeleteResponse], error)` +- Function `*VirtualMachineExtensionsClient.BeginUpdate` return value(s) have been changed from `(*armruntime.Poller[VirtualMachineExtensionsClientUpdateResponse], error)` to `(*runtime.Poller[VirtualMachineExtensionsClientUpdateResponse], error)` +- Function `*GalleryImagesClient.BeginDelete` return value(s) have been changed from `(*armruntime.Poller[GalleryImagesClientDeleteResponse], error)` to `(*runtime.Poller[GalleryImagesClientDeleteResponse], error)` +- Function `*VirtualMachinesClient.BeginPerformMaintenance` return value(s) have been changed from `(*armruntime.Poller[VirtualMachinesClientPerformMaintenanceResponse], error)` to `(*runtime.Poller[VirtualMachinesClientPerformMaintenanceResponse], error)` +- Function `*GalleryApplicationVersionsClient.BeginDelete` return value(s) have been changed from `(*armruntime.Poller[GalleryApplicationVersionsClientDeleteResponse], error)` to `(*runtime.Poller[GalleryApplicationVersionsClientDeleteResponse], error)` +- Function `*VirtualMachineExtensionsClient.BeginDelete` return value(s) have been changed from `(*armruntime.Poller[VirtualMachineExtensionsClientDeleteResponse], error)` to `(*runtime.Poller[VirtualMachineExtensionsClientDeleteResponse], error)` +- Function `*VirtualMachineScaleSetVMExtensionsClient.BeginCreateOrUpdate` return value(s) have been changed from `(*armruntime.Poller[VirtualMachineScaleSetVMExtensionsClientCreateOrUpdateResponse], error)` to `(*runtime.Poller[VirtualMachineScaleSetVMExtensionsClientCreateOrUpdateResponse], error)` +- Function `*CapacityReservationsClient.BeginDelete` return value(s) have been changed from `(*armruntime.Poller[CapacityReservationsClientDeleteResponse], error)` to `(*runtime.Poller[CapacityReservationsClientDeleteResponse], error)` +- Function `*CloudServicesClient.BeginPowerOff` return value(s) have been changed from `(*armruntime.Poller[CloudServicesClientPowerOffResponse], error)` to `(*runtime.Poller[CloudServicesClientPowerOffResponse], error)` +- Function `*CloudServicesClient.BeginDelete` return value(s) have been changed from `(*armruntime.Poller[CloudServicesClientDeleteResponse], error)` to `(*runtime.Poller[CloudServicesClientDeleteResponse], error)` +- Function `*DiskAccessesClient.BeginUpdate` return value(s) have been changed from `(*armruntime.Poller[DiskAccessesClientUpdateResponse], error)` to `(*runtime.Poller[DiskAccessesClientUpdateResponse], error)` +- Function `*GalleryImageVersionsClient.BeginUpdate` return value(s) have been changed from `(*armruntime.Poller[GalleryImageVersionsClientUpdateResponse], error)` to `(*runtime.Poller[GalleryImageVersionsClientUpdateResponse], error)` +- Function `*DiskAccessesClient.BeginUpdateAPrivateEndpointConnection` return value(s) have been changed from `(*armruntime.Poller[DiskAccessesClientUpdateAPrivateEndpointConnectionResponse], error)` to `(*runtime.Poller[DiskAccessesClientUpdateAPrivateEndpointConnectionResponse], error)` +- Function `*CloudServiceRoleInstancesClient.BeginRebuild` return value(s) have been changed from `(*armruntime.Poller[CloudServiceRoleInstancesClientRebuildResponse], error)` to `(*runtime.Poller[CloudServiceRoleInstancesClientRebuildResponse], error)` +- Function `*DiskAccessesClient.BeginDelete` return value(s) have been changed from `(*armruntime.Poller[DiskAccessesClientDeleteResponse], error)` to `(*runtime.Poller[DiskAccessesClientDeleteResponse], error)` +- Function `*VirtualMachinesClient.BeginDeallocate` return value(s) have been changed from `(*armruntime.Poller[VirtualMachinesClientDeallocateResponse], error)` to `(*runtime.Poller[VirtualMachinesClientDeallocateResponse], error)` +- Function `*RestorePointCollectionsClient.BeginDelete` return value(s) have been changed from `(*armruntime.Poller[RestorePointCollectionsClientDeleteResponse], error)` to `(*runtime.Poller[RestorePointCollectionsClientDeleteResponse], error)` +- Function `*DisksClient.BeginRevokeAccess` return value(s) have been changed from `(*armruntime.Poller[DisksClientRevokeAccessResponse], error)` to `(*runtime.Poller[DisksClientRevokeAccessResponse], error)` +- Function `*CapacityReservationsClient.BeginUpdate` return value(s) have been changed from `(*armruntime.Poller[CapacityReservationsClientUpdateResponse], error)` to `(*runtime.Poller[CapacityReservationsClientUpdateResponse], error)` +- Function `*DiskEncryptionSetsClient.BeginUpdate` return value(s) have been changed from `(*armruntime.Poller[DiskEncryptionSetsClientUpdateResponse], error)` to `(*runtime.Poller[DiskEncryptionSetsClientUpdateResponse], error)` +- Function `*GalleryImageVersionsClient.BeginDelete` return value(s) have been changed from `(*armruntime.Poller[GalleryImageVersionsClientDeleteResponse], error)` to `(*runtime.Poller[GalleryImageVersionsClientDeleteResponse], error)` +- Function `*DiskEncryptionSetsClient.BeginDelete` return value(s) have been changed from `(*armruntime.Poller[DiskEncryptionSetsClientDeleteResponse], error)` to `(*runtime.Poller[DiskEncryptionSetsClientDeleteResponse], error)` +- Function `*VirtualMachineScaleSetVMRunCommandsClient.BeginDelete` return value(s) have been changed from `(*armruntime.Poller[VirtualMachineScaleSetVMRunCommandsClientDeleteResponse], error)` to `(*runtime.Poller[VirtualMachineScaleSetVMRunCommandsClientDeleteResponse], error)` +- Function `*DisksClient.BeginCreateOrUpdate` return value(s) have been changed from `(*armruntime.Poller[DisksClientCreateOrUpdateResponse], error)` to `(*runtime.Poller[DisksClientCreateOrUpdateResponse], error)` +- Function `*VirtualMachineScaleSetVMRunCommandsClient.BeginUpdate` return value(s) have been changed from `(*armruntime.Poller[VirtualMachineScaleSetVMRunCommandsClientUpdateResponse], error)` to `(*runtime.Poller[VirtualMachineScaleSetVMRunCommandsClientUpdateResponse], error)` +- Function `*VirtualMachineRunCommandsClient.BeginCreateOrUpdate` return value(s) have been changed from `(*armruntime.Poller[VirtualMachineRunCommandsClientCreateOrUpdateResponse], error)` to `(*runtime.Poller[VirtualMachineRunCommandsClientCreateOrUpdateResponse], error)` +- Function `*VirtualMachineScaleSetVMsClient.BeginRedeploy` return value(s) have been changed from `(*armruntime.Poller[VirtualMachineScaleSetVMsClientRedeployResponse], error)` to `(*runtime.Poller[VirtualMachineScaleSetVMsClientRedeployResponse], error)` +- Function `*DisksClient.BeginUpdate` return value(s) have been changed from `(*armruntime.Poller[DisksClientUpdateResponse], error)` to `(*runtime.Poller[DisksClientUpdateResponse], error)` +- Function `*VirtualMachineScaleSetsClient.BeginRestart` return value(s) have been changed from `(*armruntime.Poller[VirtualMachineScaleSetsClientRestartResponse], error)` to `(*runtime.Poller[VirtualMachineScaleSetsClientRestartResponse], error)` +- Function `*GalleryImagesClient.BeginCreateOrUpdate` return value(s) have been changed from `(*armruntime.Poller[GalleryImagesClientCreateOrUpdateResponse], error)` to `(*runtime.Poller[GalleryImagesClientCreateOrUpdateResponse], error)` +- Function `*VirtualMachineScaleSetRollingUpgradesClient.BeginStartExtensionUpgrade` return value(s) have been changed from `(*armruntime.Poller[VirtualMachineScaleSetRollingUpgradesClientStartExtensionUpgradeResponse], error)` to `(*runtime.Poller[VirtualMachineScaleSetRollingUpgradesClientStartExtensionUpgradeResponse], error)` +- Function `*VirtualMachineScaleSetVMsClient.BeginDelete` return value(s) have been changed from `(*armruntime.Poller[VirtualMachineScaleSetVMsClientDeleteResponse], error)` to `(*runtime.Poller[VirtualMachineScaleSetVMsClientDeleteResponse], error)` +- Function `*VirtualMachineScaleSetVMsClient.BeginRunCommand` return value(s) have been changed from `(*armruntime.Poller[VirtualMachineScaleSetVMsClientRunCommandResponse], error)` to `(*runtime.Poller[VirtualMachineScaleSetVMsClientRunCommandResponse], error)` +- Function `*VirtualMachineScaleSetVMsClient.BeginReimageAll` return value(s) have been changed from `(*armruntime.Poller[VirtualMachineScaleSetVMsClientReimageAllResponse], error)` to `(*runtime.Poller[VirtualMachineScaleSetVMsClientReimageAllResponse], error)` +- Function `*GalleryImageVersionsClient.BeginCreateOrUpdate` return value(s) have been changed from `(*armruntime.Poller[GalleryImageVersionsClientCreateOrUpdateResponse], error)` to `(*runtime.Poller[GalleryImageVersionsClientCreateOrUpdateResponse], error)` +- Function `*VirtualMachineScaleSetsClient.BeginUpdate` return value(s) have been changed from `(*armruntime.Poller[VirtualMachineScaleSetsClientUpdateResponse], error)` to `(*runtime.Poller[VirtualMachineScaleSetsClientUpdateResponse], error)` +- Function `*RestorePointsClient.BeginDelete` return value(s) have been changed from `(*armruntime.Poller[RestorePointsClientDeleteResponse], error)` to `(*runtime.Poller[RestorePointsClientDeleteResponse], error)` +- Function `*VirtualMachineScaleSetVMsClient.BeginStart` return value(s) have been changed from `(*armruntime.Poller[VirtualMachineScaleSetVMsClientStartResponse], error)` to `(*runtime.Poller[VirtualMachineScaleSetVMsClientStartResponse], error)` +- Function `*GalleriesClient.BeginCreateOrUpdate` return value(s) have been changed from `(*armruntime.Poller[GalleriesClientCreateOrUpdateResponse], error)` to `(*runtime.Poller[GalleriesClientCreateOrUpdateResponse], error)` +- Function `*DiskAccessesClient.BeginCreateOrUpdate` return value(s) have been changed from `(*armruntime.Poller[DiskAccessesClientCreateOrUpdateResponse], error)` to `(*runtime.Poller[DiskAccessesClientCreateOrUpdateResponse], error)` +- Function `*GalleryApplicationVersionsClient.BeginCreateOrUpdate` return value(s) have been changed from `(*armruntime.Poller[GalleryApplicationVersionsClientCreateOrUpdateResponse], error)` to `(*runtime.Poller[GalleryApplicationVersionsClientCreateOrUpdateResponse], error)` +- Function `*DiskAccessesClient.BeginDeleteAPrivateEndpointConnection` return value(s) have been changed from `(*armruntime.Poller[DiskAccessesClientDeleteAPrivateEndpointConnectionResponse], error)` to `(*runtime.Poller[DiskAccessesClientDeleteAPrivateEndpointConnectionResponse], error)` +- Function `*CloudServicesUpdateDomainClient.BeginWalkUpdateDomain` return value(s) have been changed from `(*armruntime.Poller[CloudServicesUpdateDomainClientWalkUpdateDomainResponse], error)` to `(*runtime.Poller[CloudServicesUpdateDomainClientWalkUpdateDomainResponse], error)` +- Function `*DedicatedHostsClient.BeginRestart` return value(s) have been changed from `(*armruntime.Poller[DedicatedHostsClientRestartResponse], error)` to `(*runtime.Poller[DedicatedHostsClientRestartResponse], error)` +- Function `*GalleriesClient.BeginDelete` return value(s) have been changed from `(*armruntime.Poller[GalleriesClientDeleteResponse], error)` to `(*runtime.Poller[GalleriesClientDeleteResponse], error)` +- Function `*GalleryApplicationVersionsClient.BeginUpdate` return value(s) have been changed from `(*armruntime.Poller[GalleryApplicationVersionsClientUpdateResponse], error)` to `(*runtime.Poller[GalleryApplicationVersionsClientUpdateResponse], error)` +- Function `*GalleryImagesClient.BeginUpdate` return value(s) have been changed from `(*armruntime.Poller[GalleryImagesClientUpdateResponse], error)` to `(*runtime.Poller[GalleryImagesClientUpdateResponse], error)` +- Function `*CloudServicesClient.BeginCreateOrUpdate` return value(s) have been changed from `(*armruntime.Poller[CloudServicesClientCreateOrUpdateResponse], error)` to `(*runtime.Poller[CloudServicesClientCreateOrUpdateResponse], error)` +- Function `*SnapshotsClient.BeginRevokeAccess` return value(s) have been changed from `(*armruntime.Poller[SnapshotsClientRevokeAccessResponse], error)` to `(*runtime.Poller[SnapshotsClientRevokeAccessResponse], error)` +- Function `*SnapshotsClient.BeginGrantAccess` return value(s) have been changed from `(*armruntime.Poller[SnapshotsClientGrantAccessResponse], error)` to `(*runtime.Poller[SnapshotsClientGrantAccessResponse], error)` +- Function `*VirtualMachineScaleSetsClient.BeginCreateOrUpdate` return value(s) have been changed from `(*armruntime.Poller[VirtualMachineScaleSetsClientCreateOrUpdateResponse], error)` to `(*runtime.Poller[VirtualMachineScaleSetsClientCreateOrUpdateResponse], error)` +- Function `*LogAnalyticsClient.BeginExportThrottledRequests` return value(s) have been changed from `(*armruntime.Poller[LogAnalyticsClientExportThrottledRequestsResponse], error)` to `(*runtime.Poller[LogAnalyticsClientExportThrottledRequestsResponse], error)` +- Function `*VirtualMachineScaleSetVMsClient.BeginReimage` return value(s) have been changed from `(*armruntime.Poller[VirtualMachineScaleSetVMsClientReimageResponse], error)` to `(*runtime.Poller[VirtualMachineScaleSetVMsClientReimageResponse], error)` +- Function `*CapacityReservationsClient.BeginCreateOrUpdate` return value(s) have been changed from `(*armruntime.Poller[CapacityReservationsClientCreateOrUpdateResponse], error)` to `(*runtime.Poller[CapacityReservationsClientCreateOrUpdateResponse], error)` +- Function `*CloudServicesClient.BeginReimage` return value(s) have been changed from `(*armruntime.Poller[CloudServicesClientReimageResponse], error)` to `(*runtime.Poller[CloudServicesClientReimageResponse], error)` +- Function `*VirtualMachineScaleSetsClient.BeginDeallocate` return value(s) have been changed from `(*armruntime.Poller[VirtualMachineScaleSetsClientDeallocateResponse], error)` to `(*runtime.Poller[VirtualMachineScaleSetsClientDeallocateResponse], error)` +- Function `*VirtualMachinesClient.BeginConvertToManagedDisks` return value(s) have been changed from `(*armruntime.Poller[VirtualMachinesClientConvertToManagedDisksResponse], error)` to `(*runtime.Poller[VirtualMachinesClientConvertToManagedDisksResponse], error)` +- Function `*ImagesClient.BeginCreateOrUpdate` return value(s) have been changed from `(*armruntime.Poller[ImagesClientCreateOrUpdateResponse], error)` to `(*runtime.Poller[ImagesClientCreateOrUpdateResponse], error)` +- Function `*VirtualMachineScaleSetsClient.BeginReimage` return value(s) have been changed from `(*armruntime.Poller[VirtualMachineScaleSetsClientReimageResponse], error)` to `(*runtime.Poller[VirtualMachineScaleSetsClientReimageResponse], error)` +- Function `*VirtualMachinesClient.BeginCapture` return value(s) have been changed from `(*armruntime.Poller[VirtualMachinesClientCaptureResponse], error)` to `(*runtime.Poller[VirtualMachinesClientCaptureResponse], error)` +- Function `*VirtualMachinesClient.BeginDelete` return value(s) have been changed from `(*armruntime.Poller[VirtualMachinesClientDeleteResponse], error)` to `(*runtime.Poller[VirtualMachinesClientDeleteResponse], error)` +- Function `*VirtualMachineRunCommandsClient.BeginUpdate` return value(s) have been changed from `(*armruntime.Poller[VirtualMachineRunCommandsClientUpdateResponse], error)` to `(*runtime.Poller[VirtualMachineRunCommandsClientUpdateResponse], error)` +- Function `*DisksClient.BeginDelete` return value(s) have been changed from `(*armruntime.Poller[DisksClientDeleteResponse], error)` to `(*runtime.Poller[DisksClientDeleteResponse], error)` +- Function `*GalleryApplicationsClient.BeginCreateOrUpdate` return value(s) have been changed from `(*armruntime.Poller[GalleryApplicationsClientCreateOrUpdateResponse], error)` to `(*runtime.Poller[GalleryApplicationsClientCreateOrUpdateResponse], error)` +- Function `*VirtualMachinesClient.BeginReimage` return value(s) have been changed from `(*armruntime.Poller[VirtualMachinesClientReimageResponse], error)` to `(*runtime.Poller[VirtualMachinesClientReimageResponse], error)` +- Function `*DedicatedHostsClient.BeginUpdate` return value(s) have been changed from `(*armruntime.Poller[DedicatedHostsClientUpdateResponse], error)` to `(*runtime.Poller[DedicatedHostsClientUpdateResponse], error)` +- Function `*VirtualMachinesClient.BeginRunCommand` return value(s) have been changed from `(*armruntime.Poller[VirtualMachinesClientRunCommandResponse], error)` to `(*runtime.Poller[VirtualMachinesClientRunCommandResponse], error)` +- Function `*DisksClient.BeginGrantAccess` return value(s) have been changed from `(*armruntime.Poller[DisksClientGrantAccessResponse], error)` to `(*runtime.Poller[DisksClientGrantAccessResponse], error)` +- Function `*VirtualMachineScaleSetsClient.BeginReimageAll` return value(s) have been changed from `(*armruntime.Poller[VirtualMachineScaleSetsClientReimageAllResponse], error)` to `(*runtime.Poller[VirtualMachineScaleSetsClientReimageAllResponse], error)` +- Function `*VirtualMachineScaleSetsClient.BeginRedeploy` return value(s) have been changed from `(*armruntime.Poller[VirtualMachineScaleSetsClientRedeployResponse], error)` to `(*runtime.Poller[VirtualMachineScaleSetsClientRedeployResponse], error)` +- Function `*VirtualMachinesClient.BeginUpdate` return value(s) have been changed from `(*armruntime.Poller[VirtualMachinesClientUpdateResponse], error)` to `(*runtime.Poller[VirtualMachinesClientUpdateResponse], error)` +- Function `*CloudServicesClient.BeginUpdate` return value(s) have been changed from `(*armruntime.Poller[CloudServicesClientUpdateResponse], error)` to `(*runtime.Poller[CloudServicesClientUpdateResponse], error)` +- Function `*VirtualMachineScaleSetVMsClient.BeginDeallocate` return value(s) have been changed from `(*armruntime.Poller[VirtualMachineScaleSetVMsClientDeallocateResponse], error)` to `(*runtime.Poller[VirtualMachineScaleSetVMsClientDeallocateResponse], error)` +- Function `*CloudServicesClient.BeginRestart` return value(s) have been changed from `(*armruntime.Poller[CloudServicesClientRestartResponse], error)` to `(*runtime.Poller[CloudServicesClientRestartResponse], error)` +- Function `*VirtualMachinesClient.BeginStart` return value(s) have been changed from `(*armruntime.Poller[VirtualMachinesClientStartResponse], error)` to `(*runtime.Poller[VirtualMachinesClientStartResponse], error)` +- Function `*RestorePointsClient.BeginCreate` return value(s) have been changed from `(*armruntime.Poller[RestorePointsClientCreateResponse], error)` to `(*runtime.Poller[RestorePointsClientCreateResponse], error)` +- Function `*VirtualMachineScaleSetVMsClient.BeginRestart` return value(s) have been changed from `(*armruntime.Poller[VirtualMachineScaleSetVMsClientRestartResponse], error)` to `(*runtime.Poller[VirtualMachineScaleSetVMsClientRestartResponse], error)` +- Function `*GalleriesClient.BeginUpdate` return value(s) have been changed from `(*armruntime.Poller[GalleriesClientUpdateResponse], error)` to `(*runtime.Poller[GalleriesClientUpdateResponse], error)` +- Function `*VirtualMachineScaleSetsClient.BeginDelete` return value(s) have been changed from `(*armruntime.Poller[VirtualMachineScaleSetsClientDeleteResponse], error)` to `(*runtime.Poller[VirtualMachineScaleSetsClientDeleteResponse], error)` +- Function `*VirtualMachinesClient.BeginCreateOrUpdate` return value(s) have been changed from `(*armruntime.Poller[VirtualMachinesClientCreateOrUpdateResponse], error)` to `(*runtime.Poller[VirtualMachinesClientCreateOrUpdateResponse], error)` +- Function `*ImagesClient.BeginDelete` return value(s) have been changed from `(*armruntime.Poller[ImagesClientDeleteResponse], error)` to `(*runtime.Poller[ImagesClientDeleteResponse], error)` +- Function `*ImagesClient.BeginUpdate` return value(s) have been changed from `(*armruntime.Poller[ImagesClientUpdateResponse], error)` to `(*runtime.Poller[ImagesClientUpdateResponse], error)` +- Function `*VirtualMachineScaleSetVMRunCommandsClient.BeginCreateOrUpdate` return value(s) have been changed from `(*armruntime.Poller[VirtualMachineScaleSetVMRunCommandsClientCreateOrUpdateResponse], error)` to `(*runtime.Poller[VirtualMachineScaleSetVMRunCommandsClientCreateOrUpdateResponse], error)` +- Function `*VirtualMachinesClient.BeginReapply` return value(s) have been changed from `(*armruntime.Poller[VirtualMachinesClientReapplyResponse], error)` to `(*runtime.Poller[VirtualMachinesClientReapplyResponse], error)` +- Function `*DiskRestorePointClient.BeginGrantAccess` return value(s) have been changed from `(*armruntime.Poller[DiskRestorePointClientGrantAccessResponse], error)` to `(*runtime.Poller[DiskRestorePointClientGrantAccessResponse], error)` +- Function `*VirtualMachineRunCommandsClient.BeginDelete` return value(s) have been changed from `(*armruntime.Poller[VirtualMachineRunCommandsClientDeleteResponse], error)` to `(*runtime.Poller[VirtualMachineRunCommandsClientDeleteResponse], error)` +- Function `*CloudServicesClient.BeginRebuild` return value(s) have been changed from `(*armruntime.Poller[CloudServicesClientRebuildResponse], error)` to `(*runtime.Poller[CloudServicesClientRebuildResponse], error)` +- Function `*VirtualMachinesClient.BeginAssessPatches` return value(s) have been changed from `(*armruntime.Poller[VirtualMachinesClientAssessPatchesResponse], error)` to `(*runtime.Poller[VirtualMachinesClientAssessPatchesResponse], error)` +- Function `*VirtualMachineScaleSetsClient.BeginPerformMaintenance` return value(s) have been changed from `(*armruntime.Poller[VirtualMachineScaleSetsClientPerformMaintenanceResponse], error)` to `(*runtime.Poller[VirtualMachineScaleSetsClientPerformMaintenanceResponse], error)` +- Function `*VirtualMachinesClient.BeginRedeploy` return value(s) have been changed from `(*armruntime.Poller[VirtualMachinesClientRedeployResponse], error)` to `(*runtime.Poller[VirtualMachinesClientRedeployResponse], error)` +- Function `*SnapshotsClient.BeginUpdate` return value(s) have been changed from `(*armruntime.Poller[SnapshotsClientUpdateResponse], error)` to `(*runtime.Poller[SnapshotsClientUpdateResponse], error)` +- Function `*SnapshotsClient.BeginCreateOrUpdate` return value(s) have been changed from `(*armruntime.Poller[SnapshotsClientCreateOrUpdateResponse], error)` to `(*runtime.Poller[SnapshotsClientCreateOrUpdateResponse], error)` +- Function `*VirtualMachinesClient.BeginInstallPatches` return value(s) have been changed from `(*armruntime.Poller[VirtualMachinesClientInstallPatchesResponse], error)` to `(*runtime.Poller[VirtualMachinesClientInstallPatchesResponse], error)` +- Function `*VirtualMachineScaleSetVMExtensionsClient.BeginUpdate` return value(s) have been changed from `(*armruntime.Poller[VirtualMachineScaleSetVMExtensionsClientUpdateResponse], error)` to `(*runtime.Poller[VirtualMachineScaleSetVMExtensionsClientUpdateResponse], error)` +- Function `*CloudServicesClient.BeginDeleteInstances` return value(s) have been changed from `(*armruntime.Poller[CloudServicesClientDeleteInstancesResponse], error)` to `(*runtime.Poller[CloudServicesClientDeleteInstancesResponse], error)` +- Function `*CloudServiceRoleInstancesClient.BeginRestart` return value(s) have been changed from `(*armruntime.Poller[CloudServiceRoleInstancesClientRestartResponse], error)` to `(*runtime.Poller[CloudServiceRoleInstancesClientRestartResponse], error)` +- Function `*VirtualMachineScaleSetVMsClient.BeginPowerOff` return value(s) have been changed from `(*armruntime.Poller[VirtualMachineScaleSetVMsClientPowerOffResponse], error)` to `(*runtime.Poller[VirtualMachineScaleSetVMsClientPowerOffResponse], error)` +- Function `*VirtualMachineExtensionsClient.BeginCreateOrUpdate` return value(s) have been changed from `(*armruntime.Poller[VirtualMachineExtensionsClientCreateOrUpdateResponse], error)` to `(*runtime.Poller[VirtualMachineExtensionsClientCreateOrUpdateResponse], error)` +- Function `*VirtualMachineScaleSetsClient.BeginDeleteInstances` return value(s) have been changed from `(*armruntime.Poller[VirtualMachineScaleSetsClientDeleteInstancesResponse], error)` to `(*runtime.Poller[VirtualMachineScaleSetsClientDeleteInstancesResponse], error)` +- Function `*GalleryApplicationsClient.BeginUpdate` return value(s) have been changed from `(*armruntime.Poller[GalleryApplicationsClientUpdateResponse], error)` to `(*runtime.Poller[GalleryApplicationsClientUpdateResponse], error)` +- Function `*VirtualMachineScaleSetsClient.BeginUpdateInstances` return value(s) have been changed from `(*armruntime.Poller[VirtualMachineScaleSetsClientUpdateInstancesResponse], error)` to `(*runtime.Poller[VirtualMachineScaleSetsClientUpdateInstancesResponse], error)` +- Function `*CloudServiceRoleInstancesClient.BeginReimage` return value(s) have been changed from `(*armruntime.Poller[CloudServiceRoleInstancesClientReimageResponse], error)` to `(*runtime.Poller[CloudServiceRoleInstancesClientReimageResponse], error)` +- Function `*LogAnalyticsClient.BeginExportRequestRateByInterval` return value(s) have been changed from `(*armruntime.Poller[LogAnalyticsClientExportRequestRateByIntervalResponse], error)` to `(*runtime.Poller[LogAnalyticsClientExportRequestRateByIntervalResponse], error)` +- Function `*VirtualMachineScaleSetsClient.BeginPowerOff` return value(s) have been changed from `(*armruntime.Poller[VirtualMachineScaleSetsClientPowerOffResponse], error)` to `(*runtime.Poller[VirtualMachineScaleSetsClientPowerOffResponse], error)` +- Function `*VirtualMachineScaleSetVMsClient.BeginPerformMaintenance` return value(s) have been changed from `(*armruntime.Poller[VirtualMachineScaleSetVMsClientPerformMaintenanceResponse], error)` to `(*runtime.Poller[VirtualMachineScaleSetVMsClientPerformMaintenanceResponse], error)` +- Function `*DiskRestorePointClient.BeginRevokeAccess` return value(s) have been changed from `(*armruntime.Poller[DiskRestorePointClientRevokeAccessResponse], error)` to `(*runtime.Poller[DiskRestorePointClientRevokeAccessResponse], error)` +- Function `*VirtualMachineScaleSetsClient.BeginSetOrchestrationServiceState` return value(s) have been changed from `(*armruntime.Poller[VirtualMachineScaleSetsClientSetOrchestrationServiceStateResponse], error)` to `(*runtime.Poller[VirtualMachineScaleSetsClientSetOrchestrationServiceStateResponse], error)` +- Function `*VirtualMachineScaleSetExtensionsClient.BeginDelete` return value(s) have been changed from `(*armruntime.Poller[VirtualMachineScaleSetExtensionsClientDeleteResponse], error)` to `(*runtime.Poller[VirtualMachineScaleSetExtensionsClientDeleteResponse], error)` +- Function `*VirtualMachineScaleSetExtensionsClient.BeginCreateOrUpdate` return value(s) have been changed from `(*armruntime.Poller[VirtualMachineScaleSetExtensionsClientCreateOrUpdateResponse], error)` to `(*runtime.Poller[VirtualMachineScaleSetExtensionsClientCreateOrUpdateResponse], error)` +- Function `*SnapshotsClient.BeginDelete` return value(s) have been changed from `(*armruntime.Poller[SnapshotsClientDeleteResponse], error)` to `(*runtime.Poller[SnapshotsClientDeleteResponse], error)` +- Function `*GallerySharingProfileClient.BeginUpdate` return value(s) have been changed from `(*armruntime.Poller[GallerySharingProfileClientUpdateResponse], error)` to `(*runtime.Poller[GallerySharingProfileClientUpdateResponse], error)` +- Function `*GalleryApplicationsClient.BeginDelete` return value(s) have been changed from `(*armruntime.Poller[GalleryApplicationsClientDeleteResponse], error)` to `(*runtime.Poller[GalleryApplicationsClientDeleteResponse], error)` +- Function `*DedicatedHostsClient.BeginCreateOrUpdate` return value(s) have been changed from `(*armruntime.Poller[DedicatedHostsClientCreateOrUpdateResponse], error)` to `(*runtime.Poller[DedicatedHostsClientCreateOrUpdateResponse], error)` +- Function `*VirtualMachineScaleSetExtensionsClient.BeginUpdate` return value(s) have been changed from `(*armruntime.Poller[VirtualMachineScaleSetExtensionsClientUpdateResponse], error)` to `(*runtime.Poller[VirtualMachineScaleSetExtensionsClientUpdateResponse], error)` +- Function `*DiskEncryptionSetsClient.BeginCreateOrUpdate` return value(s) have been changed from `(*armruntime.Poller[DiskEncryptionSetsClientCreateOrUpdateResponse], error)` to `(*runtime.Poller[DiskEncryptionSetsClientCreateOrUpdateResponse], error)` +- Function `*VirtualMachinesClient.BeginRestart` return value(s) have been changed from `(*armruntime.Poller[VirtualMachinesClientRestartResponse], error)` to `(*runtime.Poller[VirtualMachinesClientRestartResponse], error)` +- Function `*VirtualMachineScaleSetRollingUpgradesClient.BeginStartOSUpgrade` return value(s) have been changed from `(*armruntime.Poller[VirtualMachineScaleSetRollingUpgradesClientStartOSUpgradeResponse], error)` to `(*runtime.Poller[VirtualMachineScaleSetRollingUpgradesClientStartOSUpgradeResponse], error)` +- Function `*CloudServicesClient.BeginStart` return value(s) have been changed from `(*armruntime.Poller[CloudServicesClientStartResponse], error)` to `(*runtime.Poller[CloudServicesClientStartResponse], error)` +- Function `*VirtualMachineScaleSetRollingUpgradesClient.BeginCancel` return value(s) have been changed from `(*armruntime.Poller[VirtualMachineScaleSetRollingUpgradesClientCancelResponse], error)` to `(*runtime.Poller[VirtualMachineScaleSetRollingUpgradesClientCancelResponse], error)` +- Function `*DedicatedHostsClient.BeginDelete` return value(s) have been changed from `(*armruntime.Poller[DedicatedHostsClientDeleteResponse], error)` to `(*runtime.Poller[DedicatedHostsClientDeleteResponse], error)` +- Function `*VirtualMachineScaleSetVMsClient.BeginUpdate` return value(s) have been changed from `(*armruntime.Poller[VirtualMachineScaleSetVMsClientUpdateResponse], error)` to `(*runtime.Poller[VirtualMachineScaleSetVMsClientUpdateResponse], error)` +- Function `*CloudServiceRoleInstancesClient.BeginDelete` return value(s) have been changed from `(*armruntime.Poller[CloudServiceRoleInstancesClientDeleteResponse], error)` to `(*runtime.Poller[CloudServiceRoleInstancesClientDeleteResponse], error)` +- Function `*VirtualMachineScaleSetsClient.BeginStart` return value(s) have been changed from `(*armruntime.Poller[VirtualMachineScaleSetsClientStartResponse], error)` to `(*runtime.Poller[VirtualMachineScaleSetsClientStartResponse], error)` +- Type of `DiskRestorePointInstanceView.ReplicationStatus` has been changed from `interface{}` to `*DiskRestorePointReplicationStatus` +- Type of `DiskRestorePointReplicationStatus.Status` has been changed from `interface{}` to `*InstanceViewStatus` +- Function `DedicatedHostGroupListResult.MarshalJSON` has been removed +- Function `ResourceSKURestrictions.MarshalJSON` has been removed +- Function `RunCommandListResult.MarshalJSON` has been removed +- Function `GalleryApplicationVersionList.MarshalJSON` has been removed +- Function `CommunityGalleryImageProperties.MarshalJSON` has been removed +- Function `VirtualMachineListResult.MarshalJSON` has been removed +- Function `VirtualMachineAssessPatchesResult.MarshalJSON` has been removed +- Function `VirtualMachineScaleSetListResult.MarshalJSON` has been removed +- Function `VirtualMachineExtensionsListResult.MarshalJSON` has been removed +- Function `GalleryImageList.MarshalJSON` has been removed +- Function `DiskRestorePointList.MarshalJSON` has been removed +- Function `VirtualMachineRunCommandsListResult.MarshalJSON` has been removed +- Function `DiskRestorePointProperties.MarshalJSON` has been removed +- Function `VirtualMachineScaleSetInstanceView.MarshalJSON` has been removed +- Function `ResourceSKUZoneDetails.MarshalJSON` has been removed +- Function `VirtualMachineSoftwarePatchProperties.MarshalJSON` has been removed +- Function `RoleInstance.MarshalJSON` has been removed +- Function `GalleryApplicationList.MarshalJSON` has been removed +- Function `GalleryList.MarshalJSON` has been removed +- Function `AvailabilitySetListResult.MarshalJSON` has been removed +- Function `DiskEncryptionSetList.MarshalJSON` has been removed +- Function `PrivateLinkResourceProperties.MarshalJSON` has been removed +- Function `RoleInstanceView.MarshalJSON` has been removed +- Function `RoleInstanceListResult.MarshalJSON` has been removed +- Function `SharedGalleryImageVersionProperties.MarshalJSON` has been removed +- Function `VirtualMachineSizeListResult.MarshalJSON` has been removed +- Function `ResourceSKUsResult.MarshalJSON` has been removed +- Function `SnapshotList.MarshalJSON` has been removed +- Function `RoleInstanceNetworkProfile.MarshalJSON` has been removed +- Function `ResourceSKURestrictionInfo.MarshalJSON` has been removed +- Function `VirtualMachineInstallPatchesResult.MarshalJSON` has been removed +- Function `SharedGalleryImageProperties.MarshalJSON` has been removed +- Function `CommunityGalleryImageVersionProperties.MarshalJSON` has been removed +- Function `VirtualMachineScaleSetVMListResult.MarshalJSON` has been removed +- Function `SharedGalleryImageVersionList.MarshalJSON` has been removed +- Function `ResourceURIList.MarshalJSON` has been removed +- Function `OSVersionListResult.MarshalJSON` has been removed +- Function `ResourceInstanceViewStatus.MarshalJSON` has been removed +- Function `RunCommandResult.MarshalJSON` has been removed +- Function `VirtualMachineScaleSetListOSUpgradeHistory.MarshalJSON` has been removed +- Function `VirtualMachineScaleSetListWithLinkResult.MarshalJSON` has been removed +- Function `PatchInstallationDetail.MarshalJSON` has been removed +- Function `UpgradeOperationHistoryStatus.MarshalJSON` has been removed +- Function `VirtualMachineScaleSetListSKUsResult.MarshalJSON` has been removed +- Function `CloudServiceInstanceView.MarshalJSON` has been removed +- Function `DedicatedHostListResult.MarshalJSON` has been removed +- Function `RestorePointCollectionListResult.MarshalJSON` has been removed +- Function `VirtualMachineScaleSetVMExtensionsSummary.MarshalJSON` has been removed +- Function `PrivateLinkResourceListResult.MarshalJSON` has been removed +- Function `SharedGalleryImageList.MarshalJSON` has been removed +- Function `OSFamilyProperties.MarshalJSON` has been removed +- Function `CapacityReservationGroupListResult.MarshalJSON` has been removed +- Function `ProximityPlacementGroupListResult.MarshalJSON` has been removed +- Function `PrivateEndpointConnectionListResult.MarshalJSON` has been removed +- Function `ResourceSKU.MarshalJSON` has been removed +- Function `OperationListResult.MarshalJSON` has been removed +- Function `CapacityReservationListResult.MarshalJSON` has been removed +- Function `OSFamilyListResult.MarshalJSON` has been removed +- Function `VirtualMachineScaleSetInstanceViewStatusesSummary.MarshalJSON` has been removed +- Function `VirtualMachineScaleSetExtensionListResult.MarshalJSON` has been removed +- Function `InstanceViewStatusesSummary.MarshalJSON` has been removed +- Function `CloudServiceRoleListResult.MarshalJSON` has been removed +- Function `ResourceSKULocationInfo.MarshalJSON` has been removed +- Function `CloudServiceListResult.MarshalJSON` has been removed +- Function `RunCommandDocument.MarshalJSON` has been removed +- Function `ListUsagesResult.MarshalJSON` has been removed +- Function `GalleryImageVersionList.MarshalJSON` has been removed +- Function `ImageListResult.MarshalJSON` has been removed +- Function `DiskList.MarshalJSON` has been removed +- Function `VirtualMachineScaleSetVMExtensionsListResult.MarshalJSON` has been removed +- Function `CommunityGalleryInfo.MarshalJSON` has been removed +- Function `DiskAccessList.MarshalJSON` has been removed +- Function `SharedGalleryList.MarshalJSON` has been removed +- Function `UpdateDomainListResult.MarshalJSON` has been removed +- Function `SSHPublicKeysGroupListResult.MarshalJSON` has been removed + +### Features Added + +- New const `LinuxVMGuestPatchAutomaticByPlatformRebootSettingUnknown` +- New const `LinuxVMGuestPatchAutomaticByPlatformRebootSettingNever` +- New const `WindowsVMGuestPatchAutomaticByPlatformRebootSettingIfRequired` +- New const `LinuxVMGuestPatchAutomaticByPlatformRebootSettingIfRequired` +- New const `WindowsVMGuestPatchAutomaticByPlatformRebootSettingNever` +- New const `LinuxVMGuestPatchAutomaticByPlatformRebootSettingAlways` +- New const `StorageAccountTypesPremiumV2LRS` +- New const `WindowsVMGuestPatchAutomaticByPlatformRebootSettingUnknown` +- New const `WindowsVMGuestPatchAutomaticByPlatformRebootSettingAlways` +- New function `ProximityPlacementGroupPropertiesIntent.MarshalJSON() ([]byte, error)` +- New function `PossibleLinuxVMGuestPatchAutomaticByPlatformRebootSettingValues() []LinuxVMGuestPatchAutomaticByPlatformRebootSetting` +- New function `PossibleWindowsVMGuestPatchAutomaticByPlatformRebootSettingValues() []WindowsVMGuestPatchAutomaticByPlatformRebootSetting` +- New function `ResourceWithOptionalLocation.MarshalJSON() ([]byte, error)` +- New struct `DedicatedHostGroupPropertiesAdditionalCapabilities` +- New struct `LinuxVMGuestPatchAutomaticByPlatformSettings` +- New struct `ProximityPlacementGroupPropertiesIntent` +- New struct `ResourceWithOptionalLocation` +- New struct `WindowsVMGuestPatchAutomaticByPlatformSettings` +- New field `Zones` in struct `ProximityPlacementGroup` +- New field `Intent` in struct `ProximityPlacementGroupProperties` +- New field `AutomaticByPlatformSettings` in struct `LinuxPatchSettings` +- New field `AdditionalCapabilities` in struct `DedicatedHostGroupProperties` +- New field `DeleteOption` in struct `VirtualMachineScaleSetDataDisk` +- New field `DeleteOption` in struct `VirtualMachineScaleSetOSDisk` +- New field `DeleteOption` in struct `VirtualMachineScaleSetUpdateOSDisk` +- New field `AutomaticByPlatformSettings` in struct `PatchSettings` +- New field `UseRollingUpgradePolicy` in struct `AutomaticOSUpgradePolicy` +- New field `Identity` in struct `VirtualMachineScaleSetVM` +- New field `TreatFailureAsDeploymentFailure` in struct `VMGalleryApplication` +- New field `EnableAutomaticUpgrade` in struct `VMGalleryApplication` +- New field `CompletionPercent` in struct `DiskRestorePointReplicationStatus` + + +## 0.7.0 (2022-04-15) +### Breaking Changes + +- Function `*DisksClient.ListByResourceGroup` has been removed +- Function `*VirtualMachinesClient.ListByLocation` has been removed +- Function `*VirtualMachinesClient.ListAvailableSizes` has been removed +- Function `*DiskAccessesClient.ListPrivateEndpointConnections` has been removed +- Function `*RestorePointCollectionsClient.ListAll` has been removed +- Function `*VirtualMachineScaleSetsClient.List` has been removed +- Function `*SharedGalleryImagesClient.List` has been removed +- Function `*CloudServicesUpdateDomainClient.ListUpdateDomains` has been removed +- Function `*SharedGalleriesClient.List` has been removed +- Function `*VirtualMachineScaleSetExtensionsClient.List` has been removed +- Function `*DiskEncryptionSetsClient.ListByResourceGroup` has been removed +- Function `*VirtualMachineScaleSetsClient.ListAll` has been removed +- Function `*ProximityPlacementGroupsClient.ListBySubscription` has been removed +- Function `*DiskEncryptionSetsClient.ListAssociatedResources` has been removed +- Function `*CloudServiceOperatingSystemsClient.ListOSVersions` has been removed +- Function `*GalleriesClient.ListByResourceGroup` has been removed +- Function `*UsageClient.List` has been removed +- Function `*GalleryImagesClient.ListByGallery` has been removed +- Function `*GalleriesClient.List` has been removed +- Function `*CloudServicesClient.List` has been removed +- Function `*SSHPublicKeysClient.ListByResourceGroup` has been removed +- Function `*RestorePointCollectionsClient.List` has been removed +- Function `*VirtualMachineRunCommandsClient.ListByVirtualMachine` has been removed +- Function `*SnapshotsClient.ListByResourceGroup` has been removed +- Function `*DiskEncryptionSetsClient.List` has been removed +- Function `*ResourceSKUsClient.List` has been removed +- Function `*CloudServiceRolesClient.List` has been removed +- Function `*DisksClient.List` has been removed +- Function `*DiskRestorePointClient.ListByRestorePoint` has been removed +- Function `*ProximityPlacementGroupsClient.ListByResourceGroup` has been removed +- Function `*CloudServiceOperatingSystemsClient.ListOSFamilies` has been removed +- Function `*VirtualMachinesClient.ListAll` has been removed +- Function `*VirtualMachineRunCommandsClient.List` has been removed +- Function `*DiskAccessesClient.List` has been removed +- Function `*GalleryApplicationVersionsClient.ListByGalleryApplication` has been removed +- Function `*AvailabilitySetsClient.List` has been removed +- Function `*AvailabilitySetsClient.ListBySubscription` has been removed +- Function `*DedicatedHostGroupsClient.ListByResourceGroup` has been removed +- Function `*VirtualMachinesClient.List` has been removed +- Function `*CloudServicesClient.ListAll` has been removed +- Function `*DedicatedHostsClient.ListByHostGroup` has been removed +- Function `*ImagesClient.ListByResourceGroup` has been removed +- Function `*VirtualMachineScaleSetsClient.ListByLocation` has been removed +- Function `*CapacityReservationsClient.ListByCapacityReservationGroup` has been removed +- Function `*AvailabilitySetsClient.ListAvailableSizes` has been removed +- Function `*CloudServiceRoleInstancesClient.List` has been removed +- Function `*DiskAccessesClient.ListByResourceGroup` has been removed +- Function `*SharedGalleryImageVersionsClient.List` has been removed +- Function `*GalleryImageVersionsClient.ListByGalleryImage` has been removed +- Function `*VirtualMachineScaleSetVMRunCommandsClient.List` has been removed +- Function `*SSHPublicKeysClient.ListBySubscription` has been removed +- Function `*GalleryApplicationsClient.ListByGallery` has been removed +- Function `*OperationsClient.List` has been removed +- Function `*CapacityReservationGroupsClient.ListBySubscription` has been removed +- Function `*CapacityReservationGroupsClient.ListByResourceGroup` has been removed +- Function `*ImagesClient.List` has been removed +- Function `*VirtualMachineSizesClient.List` has been removed +- Function `*DedicatedHostGroupsClient.ListBySubscription` has been removed +- Function `*SnapshotsClient.List` has been removed +- Function `*VirtualMachineScaleSetVMsClient.List` has been removed +- Function `*VirtualMachineScaleSetsClient.GetOSUpgradeHistory` has been removed +- Function `*VirtualMachineScaleSetsClient.ListSKUs` has been removed + +### Features Added + +- New function `*SharedGalleriesClient.NewListPager(string, *SharedGalleriesClientListOptions) *runtime.Pager[SharedGalleriesClientListResponse]` +- New function `*VirtualMachinesClient.NewListAvailableSizesPager(string, string, *VirtualMachinesClientListAvailableSizesOptions) *runtime.Pager[VirtualMachinesClientListAvailableSizesResponse]` +- New function `*VirtualMachineScaleSetsClient.NewListByLocationPager(string, *VirtualMachineScaleSetsClientListByLocationOptions) *runtime.Pager[VirtualMachineScaleSetsClientListByLocationResponse]` +- New function `*VirtualMachineScaleSetsClient.NewListAllPager(*VirtualMachineScaleSetsClientListAllOptions) *runtime.Pager[VirtualMachineScaleSetsClientListAllResponse]` +- New function `*VirtualMachineScaleSetsClient.NewListPager(string, *VirtualMachineScaleSetsClientListOptions) *runtime.Pager[VirtualMachineScaleSetsClientListResponse]` +- New function `*GalleryApplicationVersionsClient.NewListByGalleryApplicationPager(string, string, string, *GalleryApplicationVersionsClientListByGalleryApplicationOptions) *runtime.Pager[GalleryApplicationVersionsClientListByGalleryApplicationResponse]` +- New function `*GalleryImagesClient.NewListByGalleryPager(string, string, *GalleryImagesClientListByGalleryOptions) *runtime.Pager[GalleryImagesClientListByGalleryResponse]` +- New function `*ProximityPlacementGroupsClient.NewListBySubscriptionPager(*ProximityPlacementGroupsClientListBySubscriptionOptions) *runtime.Pager[ProximityPlacementGroupsClientListBySubscriptionResponse]` +- New function `*DiskAccessesClient.NewListPager(*DiskAccessesClientListOptions) *runtime.Pager[DiskAccessesClientListResponse]` +- New function `*SSHPublicKeysClient.NewListBySubscriptionPager(*SSHPublicKeysClientListBySubscriptionOptions) *runtime.Pager[SSHPublicKeysClientListBySubscriptionResponse]` +- New function `*DiskRestorePointClient.NewListByRestorePointPager(string, string, string, *DiskRestorePointClientListByRestorePointOptions) *runtime.Pager[DiskRestorePointClientListByRestorePointResponse]` +- New function `*CloudServicesClient.NewListAllPager(*CloudServicesClientListAllOptions) *runtime.Pager[CloudServicesClientListAllResponse]` +- New function `*DedicatedHostsClient.NewListByHostGroupPager(string, string, *DedicatedHostsClientListByHostGroupOptions) *runtime.Pager[DedicatedHostsClientListByHostGroupResponse]` +- New function `*VirtualMachinesClient.NewListByLocationPager(string, *VirtualMachinesClientListByLocationOptions) *runtime.Pager[VirtualMachinesClientListByLocationResponse]` +- New function `*GalleryApplicationsClient.NewListByGalleryPager(string, string, *GalleryApplicationsClientListByGalleryOptions) *runtime.Pager[GalleryApplicationsClientListByGalleryResponse]` +- New function `*VirtualMachineScaleSetExtensionsClient.NewListPager(string, string, *VirtualMachineScaleSetExtensionsClientListOptions) *runtime.Pager[VirtualMachineScaleSetExtensionsClientListResponse]` +- New function `*VirtualMachineSizesClient.NewListPager(string, *VirtualMachineSizesClientListOptions) *runtime.Pager[VirtualMachineSizesClientListResponse]` +- New function `*SharedGalleryImageVersionsClient.NewListPager(string, string, string, *SharedGalleryImageVersionsClientListOptions) *runtime.Pager[SharedGalleryImageVersionsClientListResponse]` +- New function `*ImagesClient.NewListByResourceGroupPager(string, *ImagesClientListByResourceGroupOptions) *runtime.Pager[ImagesClientListByResourceGroupResponse]` +- New function `*VirtualMachineRunCommandsClient.NewListPager(string, *VirtualMachineRunCommandsClientListOptions) *runtime.Pager[VirtualMachineRunCommandsClientListResponse]` +- New function `*DisksClient.NewListPager(*DisksClientListOptions) *runtime.Pager[DisksClientListResponse]` +- New function `*VirtualMachineScaleSetVMsClient.NewListPager(string, string, *VirtualMachineScaleSetVMsClientListOptions) *runtime.Pager[VirtualMachineScaleSetVMsClientListResponse]` +- New function `*CloudServiceRoleInstancesClient.NewListPager(string, string, *CloudServiceRoleInstancesClientListOptions) *runtime.Pager[CloudServiceRoleInstancesClientListResponse]` +- New function `*SnapshotsClient.NewListPager(*SnapshotsClientListOptions) *runtime.Pager[SnapshotsClientListResponse]` +- New function `*ImagesClient.NewListPager(*ImagesClientListOptions) *runtime.Pager[ImagesClientListResponse]` +- New function `*DiskAccessesClient.NewListByResourceGroupPager(string, *DiskAccessesClientListByResourceGroupOptions) *runtime.Pager[DiskAccessesClientListByResourceGroupResponse]` +- New function `*ResourceSKUsClient.NewListPager(*ResourceSKUsClientListOptions) *runtime.Pager[ResourceSKUsClientListResponse]` +- New function `*DisksClient.NewListByResourceGroupPager(string, *DisksClientListByResourceGroupOptions) *runtime.Pager[DisksClientListByResourceGroupResponse]` +- New function `*CloudServicesClient.NewListPager(string, *CloudServicesClientListOptions) *runtime.Pager[CloudServicesClientListResponse]` +- New function `*AvailabilitySetsClient.NewListAvailableSizesPager(string, string, *AvailabilitySetsClientListAvailableSizesOptions) *runtime.Pager[AvailabilitySetsClientListAvailableSizesResponse]` +- New function `*CloudServicesUpdateDomainClient.NewListUpdateDomainsPager(string, string, *CloudServicesUpdateDomainClientListUpdateDomainsOptions) *runtime.Pager[CloudServicesUpdateDomainClientListUpdateDomainsResponse]` +- New function `*ProximityPlacementGroupsClient.NewListByResourceGroupPager(string, *ProximityPlacementGroupsClientListByResourceGroupOptions) *runtime.Pager[ProximityPlacementGroupsClientListByResourceGroupResponse]` +- New function `*DiskEncryptionSetsClient.NewListPager(*DiskEncryptionSetsClientListOptions) *runtime.Pager[DiskEncryptionSetsClientListResponse]` +- New function `*OperationsClient.NewListPager(*OperationsClientListOptions) *runtime.Pager[OperationsClientListResponse]` +- New function `*SnapshotsClient.NewListByResourceGroupPager(string, *SnapshotsClientListByResourceGroupOptions) *runtime.Pager[SnapshotsClientListByResourceGroupResponse]` +- New function `*DedicatedHostGroupsClient.NewListBySubscriptionPager(*DedicatedHostGroupsClientListBySubscriptionOptions) *runtime.Pager[DedicatedHostGroupsClientListBySubscriptionResponse]` +- New function `*CapacityReservationsClient.NewListByCapacityReservationGroupPager(string, string, *CapacityReservationsClientListByCapacityReservationGroupOptions) *runtime.Pager[CapacityReservationsClientListByCapacityReservationGroupResponse]` +- New function `*CapacityReservationGroupsClient.NewListByResourceGroupPager(string, *CapacityReservationGroupsClientListByResourceGroupOptions) *runtime.Pager[CapacityReservationGroupsClientListByResourceGroupResponse]` +- New function `*CloudServiceOperatingSystemsClient.NewListOSVersionsPager(string, *CloudServiceOperatingSystemsClientListOSVersionsOptions) *runtime.Pager[CloudServiceOperatingSystemsClientListOSVersionsResponse]` +- New function `*VirtualMachineScaleSetsClient.NewGetOSUpgradeHistoryPager(string, string, *VirtualMachineScaleSetsClientGetOSUpgradeHistoryOptions) *runtime.Pager[VirtualMachineScaleSetsClientGetOSUpgradeHistoryResponse]` +- New function `*GalleriesClient.NewListByResourceGroupPager(string, *GalleriesClientListByResourceGroupOptions) *runtime.Pager[GalleriesClientListByResourceGroupResponse]` +- New function `*CloudServiceRolesClient.NewListPager(string, string, *CloudServiceRolesClientListOptions) *runtime.Pager[CloudServiceRolesClientListResponse]` +- New function `*VirtualMachinesClient.NewListPager(string, *VirtualMachinesClientListOptions) *runtime.Pager[VirtualMachinesClientListResponse]` +- New function `*DedicatedHostGroupsClient.NewListByResourceGroupPager(string, *DedicatedHostGroupsClientListByResourceGroupOptions) *runtime.Pager[DedicatedHostGroupsClientListByResourceGroupResponse]` +- New function `*RestorePointCollectionsClient.NewListPager(string, *RestorePointCollectionsClientListOptions) *runtime.Pager[RestorePointCollectionsClientListResponse]` +- New function `*AvailabilitySetsClient.NewListBySubscriptionPager(*AvailabilitySetsClientListBySubscriptionOptions) *runtime.Pager[AvailabilitySetsClientListBySubscriptionResponse]` +- New function `*CapacityReservationGroupsClient.NewListBySubscriptionPager(*CapacityReservationGroupsClientListBySubscriptionOptions) *runtime.Pager[CapacityReservationGroupsClientListBySubscriptionResponse]` +- New function `*RestorePointCollectionsClient.NewListAllPager(*RestorePointCollectionsClientListAllOptions) *runtime.Pager[RestorePointCollectionsClientListAllResponse]` +- New function `*CloudServiceOperatingSystemsClient.NewListOSFamiliesPager(string, *CloudServiceOperatingSystemsClientListOSFamiliesOptions) *runtime.Pager[CloudServiceOperatingSystemsClientListOSFamiliesResponse]` +- New function `*SharedGalleryImagesClient.NewListPager(string, string, *SharedGalleryImagesClientListOptions) *runtime.Pager[SharedGalleryImagesClientListResponse]` +- New function `*GalleriesClient.NewListPager(*GalleriesClientListOptions) *runtime.Pager[GalleriesClientListResponse]` +- New function `*VirtualMachinesClient.NewListAllPager(*VirtualMachinesClientListAllOptions) *runtime.Pager[VirtualMachinesClientListAllResponse]` +- New function `*SSHPublicKeysClient.NewListByResourceGroupPager(string, *SSHPublicKeysClientListByResourceGroupOptions) *runtime.Pager[SSHPublicKeysClientListByResourceGroupResponse]` +- New function `*VirtualMachineScaleSetsClient.NewListSKUsPager(string, string, *VirtualMachineScaleSetsClientListSKUsOptions) *runtime.Pager[VirtualMachineScaleSetsClientListSKUsResponse]` +- New function `*DiskAccessesClient.NewListPrivateEndpointConnectionsPager(string, string, *DiskAccessesClientListPrivateEndpointConnectionsOptions) *runtime.Pager[DiskAccessesClientListPrivateEndpointConnectionsResponse]` +- New function `*VirtualMachineRunCommandsClient.NewListByVirtualMachinePager(string, string, *VirtualMachineRunCommandsClientListByVirtualMachineOptions) *runtime.Pager[VirtualMachineRunCommandsClientListByVirtualMachineResponse]` +- New function `*VirtualMachineScaleSetVMRunCommandsClient.NewListPager(string, string, string, *VirtualMachineScaleSetVMRunCommandsClientListOptions) *runtime.Pager[VirtualMachineScaleSetVMRunCommandsClientListResponse]` +- New function `*DiskEncryptionSetsClient.NewListByResourceGroupPager(string, *DiskEncryptionSetsClientListByResourceGroupOptions) *runtime.Pager[DiskEncryptionSetsClientListByResourceGroupResponse]` +- New function `*UsageClient.NewListPager(string, *UsageClientListOptions) *runtime.Pager[UsageClientListResponse]` +- New function `*DiskEncryptionSetsClient.NewListAssociatedResourcesPager(string, string, *DiskEncryptionSetsClientListAssociatedResourcesOptions) *runtime.Pager[DiskEncryptionSetsClientListAssociatedResourcesResponse]` +- New function `*GalleryImageVersionsClient.NewListByGalleryImagePager(string, string, string, *GalleryImageVersionsClientListByGalleryImageOptions) *runtime.Pager[GalleryImageVersionsClientListByGalleryImageResponse]` +- New function `*AvailabilitySetsClient.NewListPager(string, *AvailabilitySetsClientListOptions) *runtime.Pager[AvailabilitySetsClientListResponse]` + + +## 0.6.0 (2022-04-13) +### Breaking Changes + +- Function `*GalleryImagesClient.ListByGallery` return value(s) have been changed from `(*GalleryImagesClientListByGalleryPager)` to `(*runtime.Pager[GalleryImagesClientListByGalleryResponse])` +- Function `*CapacityReservationsClient.ListByCapacityReservationGroup` return value(s) have been changed from `(*CapacityReservationsClientListByCapacityReservationGroupPager)` to `(*runtime.Pager[CapacityReservationsClientListByCapacityReservationGroupResponse])` +- Function `NewVirtualMachineScaleSetsClient` return value(s) have been changed from `(*VirtualMachineScaleSetsClient)` to `(*VirtualMachineScaleSetsClient, error)` +- Function `*VirtualMachineScaleSetsClient.List` return value(s) have been changed from `(*VirtualMachineScaleSetsClientListPager)` to `(*runtime.Pager[VirtualMachineScaleSetsClientListResponse])` +- Function `*CapacityReservationsClient.BeginCreateOrUpdate` return value(s) have been changed from `(CapacityReservationsClientCreateOrUpdatePollerResponse, error)` to `(*armruntime.Poller[CapacityReservationsClientCreateOrUpdateResponse], error)` +- Function `*SharedGalleriesClient.List` return value(s) have been changed from `(*SharedGalleriesClientListPager)` to `(*runtime.Pager[SharedGalleriesClientListResponse])` +- Function `*VirtualMachinesClient.BeginDeallocate` return value(s) have been changed from `(VirtualMachinesClientDeallocatePollerResponse, error)` to `(*armruntime.Poller[VirtualMachinesClientDeallocateResponse], error)` +- Function `*LogAnalyticsClient.BeginExportThrottledRequests` return value(s) have been changed from `(LogAnalyticsClientExportThrottledRequestsPollerResponse, error)` to `(*armruntime.Poller[LogAnalyticsClientExportThrottledRequestsResponse], error)` +- Function `*VirtualMachineScaleSetsClient.BeginRestart` return value(s) have been changed from `(VirtualMachineScaleSetsClientRestartPollerResponse, error)` to `(*armruntime.Poller[VirtualMachineScaleSetsClientRestartResponse], error)` +- Function `*VirtualMachinesClient.BeginAssessPatches` return value(s) have been changed from `(VirtualMachinesClientAssessPatchesPollerResponse, error)` to `(*armruntime.Poller[VirtualMachinesClientAssessPatchesResponse], error)` +- Function `*RestorePointCollectionsClient.List` return value(s) have been changed from `(*RestorePointCollectionsClientListPager)` to `(*runtime.Pager[RestorePointCollectionsClientListResponse])` +- Function `*CloudServiceOperatingSystemsClient.ListOSVersions` return value(s) have been changed from `(*CloudServiceOperatingSystemsClientListOSVersionsPager)` to `(*runtime.Pager[CloudServiceOperatingSystemsClientListOSVersionsResponse])` +- Function `*VirtualMachineScaleSetsClient.ListAll` return value(s) have been changed from `(*VirtualMachineScaleSetsClientListAllPager)` to `(*runtime.Pager[VirtualMachineScaleSetsClientListAllResponse])` +- Function `NewDedicatedHostGroupsClient` return value(s) have been changed from `(*DedicatedHostGroupsClient)` to `(*DedicatedHostGroupsClient, error)` +- Function `*DisksClient.BeginCreateOrUpdate` return value(s) have been changed from `(DisksClientCreateOrUpdatePollerResponse, error)` to `(*armruntime.Poller[DisksClientCreateOrUpdateResponse], error)` +- Function `NewLogAnalyticsClient` return value(s) have been changed from `(*LogAnalyticsClient)` to `(*LogAnalyticsClient, error)` +- Function `*CloudServiceRoleInstancesClient.List` return value(s) have been changed from `(*CloudServiceRoleInstancesClientListPager)` to `(*runtime.Pager[CloudServiceRoleInstancesClientListResponse])` +- Function `*DiskEncryptionSetsClient.BeginDelete` return value(s) have been changed from `(DiskEncryptionSetsClientDeletePollerResponse, error)` to `(*armruntime.Poller[DiskEncryptionSetsClientDeleteResponse], error)` +- Function `*GalleryImageVersionsClient.ListByGalleryImage` return value(s) have been changed from `(*GalleryImageVersionsClientListByGalleryImagePager)` to `(*runtime.Pager[GalleryImageVersionsClientListByGalleryImageResponse])` +- Function `*DiskEncryptionSetsClient.BeginCreateOrUpdate` return value(s) have been changed from `(DiskEncryptionSetsClientCreateOrUpdatePollerResponse, error)` to `(*armruntime.Poller[DiskEncryptionSetsClientCreateOrUpdateResponse], error)` +- Function `*CloudServiceRoleInstancesClient.BeginDelete` return value(s) have been changed from `(CloudServiceRoleInstancesClientDeletePollerResponse, error)` to `(*armruntime.Poller[CloudServiceRoleInstancesClientDeleteResponse], error)` +- Function `*GalleryImageVersionsClient.BeginDelete` return value(s) have been changed from `(GalleryImageVersionsClientDeletePollerResponse, error)` to `(*armruntime.Poller[GalleryImageVersionsClientDeleteResponse], error)` +- Function `*VirtualMachinesClient.ListAvailableSizes` parameter(s) have been changed from `(context.Context, string, string, *VirtualMachinesClientListAvailableSizesOptions)` to `(string, string, *VirtualMachinesClientListAvailableSizesOptions)` +- Function `*VirtualMachinesClient.ListAvailableSizes` return value(s) have been changed from `(VirtualMachinesClientListAvailableSizesResponse, error)` to `(*runtime.Pager[VirtualMachinesClientListAvailableSizesResponse])` +- Function `*CloudServiceRolesClient.List` return value(s) have been changed from `(*CloudServiceRolesClientListPager)` to `(*runtime.Pager[CloudServiceRolesClientListResponse])` +- Function `*DedicatedHostGroupsClient.ListBySubscription` return value(s) have been changed from `(*DedicatedHostGroupsClientListBySubscriptionPager)` to `(*runtime.Pager[DedicatedHostGroupsClientListBySubscriptionResponse])` +- Function `*CloudServiceRoleInstancesClient.BeginRebuild` return value(s) have been changed from `(CloudServiceRoleInstancesClientRebuildPollerResponse, error)` to `(*armruntime.Poller[CloudServiceRoleInstancesClientRebuildResponse], error)` +- Function `NewSSHPublicKeysClient` return value(s) have been changed from `(*SSHPublicKeysClient)` to `(*SSHPublicKeysClient, error)` +- Function `*VirtualMachinesClient.BeginRedeploy` return value(s) have been changed from `(VirtualMachinesClientRedeployPollerResponse, error)` to `(*armruntime.Poller[VirtualMachinesClientRedeployResponse], error)` +- Function `*CloudServicesClient.BeginDelete` return value(s) have been changed from `(CloudServicesClientDeletePollerResponse, error)` to `(*armruntime.Poller[CloudServicesClientDeleteResponse], error)` +- Function `*AvailabilitySetsClient.ListAvailableSizes` parameter(s) have been changed from `(context.Context, string, string, *AvailabilitySetsClientListAvailableSizesOptions)` to `(string, string, *AvailabilitySetsClientListAvailableSizesOptions)` +- Function `*AvailabilitySetsClient.ListAvailableSizes` return value(s) have been changed from `(AvailabilitySetsClientListAvailableSizesResponse, error)` to `(*runtime.Pager[AvailabilitySetsClientListAvailableSizesResponse])` +- Function `*DiskAccessesClient.BeginDelete` return value(s) have been changed from `(DiskAccessesClientDeletePollerResponse, error)` to `(*armruntime.Poller[DiskAccessesClientDeleteResponse], error)` +- Function `*VirtualMachineScaleSetExtensionsClient.BeginCreateOrUpdate` return value(s) have been changed from `(VirtualMachineScaleSetExtensionsClientCreateOrUpdatePollerResponse, error)` to `(*armruntime.Poller[VirtualMachineScaleSetExtensionsClientCreateOrUpdateResponse], error)` +- Function `*CloudServicesClient.BeginStart` return value(s) have been changed from `(CloudServicesClientStartPollerResponse, error)` to `(*armruntime.Poller[CloudServicesClientStartResponse], error)` +- Function `NewGalleriesClient` return value(s) have been changed from `(*GalleriesClient)` to `(*GalleriesClient, error)` +- Function `NewCommunityGalleryImageVersionsClient` return value(s) have been changed from `(*CommunityGalleryImageVersionsClient)` to `(*CommunityGalleryImageVersionsClient, error)` +- Function `*RestorePointsClient.BeginCreate` return value(s) have been changed from `(RestorePointsClientCreatePollerResponse, error)` to `(*armruntime.Poller[RestorePointsClientCreateResponse], error)` +- Function `*GalleryImagesClient.BeginDelete` return value(s) have been changed from `(GalleryImagesClientDeletePollerResponse, error)` to `(*armruntime.Poller[GalleryImagesClientDeleteResponse], error)` +- Function `*VirtualMachinesClient.BeginRestart` return value(s) have been changed from `(VirtualMachinesClientRestartPollerResponse, error)` to `(*armruntime.Poller[VirtualMachinesClientRestartResponse], error)` +- Function `NewImagesClient` return value(s) have been changed from `(*ImagesClient)` to `(*ImagesClient, error)` +- Function `*VirtualMachineScaleSetRollingUpgradesClient.BeginStartExtensionUpgrade` return value(s) have been changed from `(VirtualMachineScaleSetRollingUpgradesClientStartExtensionUpgradePollerResponse, error)` to `(*armruntime.Poller[VirtualMachineScaleSetRollingUpgradesClientStartExtensionUpgradeResponse], error)` +- Function `*VirtualMachineScaleSetVMRunCommandsClient.BeginUpdate` return value(s) have been changed from `(VirtualMachineScaleSetVMRunCommandsClientUpdatePollerResponse, error)` to `(*armruntime.Poller[VirtualMachineScaleSetVMRunCommandsClientUpdateResponse], error)` +- Function `*VirtualMachinesClient.BeginReapply` return value(s) have been changed from `(VirtualMachinesClientReapplyPollerResponse, error)` to `(*armruntime.Poller[VirtualMachinesClientReapplyResponse], error)` +- Function `*VirtualMachineScaleSetsClient.ListByLocation` return value(s) have been changed from `(*VirtualMachineScaleSetsClientListByLocationPager)` to `(*runtime.Pager[VirtualMachineScaleSetsClientListByLocationResponse])` +- Function `*DiskRestorePointClient.BeginRevokeAccess` return value(s) have been changed from `(DiskRestorePointClientRevokeAccessPollerResponse, error)` to `(*armruntime.Poller[DiskRestorePointClientRevokeAccessResponse], error)` +- Function `NewCapacityReservationGroupsClient` return value(s) have been changed from `(*CapacityReservationGroupsClient)` to `(*CapacityReservationGroupsClient, error)` +- Function `*GalleryApplicationsClient.BeginUpdate` return value(s) have been changed from `(GalleryApplicationsClientUpdatePollerResponse, error)` to `(*armruntime.Poller[GalleryApplicationsClientUpdateResponse], error)` +- Function `NewUsageClient` return value(s) have been changed from `(*UsageClient)` to `(*UsageClient, error)` +- Function `*VirtualMachinesClient.BeginConvertToManagedDisks` return value(s) have been changed from `(VirtualMachinesClientConvertToManagedDisksPollerResponse, error)` to `(*armruntime.Poller[VirtualMachinesClientConvertToManagedDisksResponse], error)` +- Function `*SharedGalleryImagesClient.List` return value(s) have been changed from `(*SharedGalleryImagesClientListPager)` to `(*runtime.Pager[SharedGalleryImagesClientListResponse])` +- Function `*CapacityReservationGroupsClient.ListByResourceGroup` return value(s) have been changed from `(*CapacityReservationGroupsClientListByResourceGroupPager)` to `(*runtime.Pager[CapacityReservationGroupsClientListByResourceGroupResponse])` +- Function `*UsageClient.List` return value(s) have been changed from `(*UsageClientListPager)` to `(*runtime.Pager[UsageClientListResponse])` +- Function `NewSharedGalleryImagesClient` return value(s) have been changed from `(*SharedGalleryImagesClient)` to `(*SharedGalleryImagesClient, error)` +- Function `*DedicatedHostsClient.BeginUpdate` return value(s) have been changed from `(DedicatedHostsClientUpdatePollerResponse, error)` to `(*armruntime.Poller[DedicatedHostsClientUpdateResponse], error)` +- Function `*CloudServicesUpdateDomainClient.ListUpdateDomains` return value(s) have been changed from `(*CloudServicesUpdateDomainClientListUpdateDomainsPager)` to `(*runtime.Pager[CloudServicesUpdateDomainClientListUpdateDomainsResponse])` +- Function `*AvailabilitySetsClient.List` return value(s) have been changed from `(*AvailabilitySetsClientListPager)` to `(*runtime.Pager[AvailabilitySetsClientListResponse])` +- Function `*GalleryImageVersionsClient.BeginUpdate` return value(s) have been changed from `(GalleryImageVersionsClientUpdatePollerResponse, error)` to `(*armruntime.Poller[GalleryImageVersionsClientUpdateResponse], error)` +- Function `NewCloudServiceRolesClient` return value(s) have been changed from `(*CloudServiceRolesClient)` to `(*CloudServiceRolesClient, error)` +- Function `*DiskAccessesClient.BeginCreateOrUpdate` return value(s) have been changed from `(DiskAccessesClientCreateOrUpdatePollerResponse, error)` to `(*armruntime.Poller[DiskAccessesClientCreateOrUpdateResponse], error)` +- Function `NewVirtualMachinesClient` return value(s) have been changed from `(*VirtualMachinesClient)` to `(*VirtualMachinesClient, error)` +- Function `*RestorePointCollectionsClient.ListAll` return value(s) have been changed from `(*RestorePointCollectionsClientListAllPager)` to `(*runtime.Pager[RestorePointCollectionsClientListAllResponse])` +- Function `*VirtualMachineScaleSetsClient.BeginPowerOff` return value(s) have been changed from `(VirtualMachineScaleSetsClientPowerOffPollerResponse, error)` to `(*armruntime.Poller[VirtualMachineScaleSetsClientPowerOffResponse], error)` +- Function `NewDedicatedHostsClient` return value(s) have been changed from `(*DedicatedHostsClient)` to `(*DedicatedHostsClient, error)` +- Function `*VirtualMachineExtensionsClient.BeginDelete` return value(s) have been changed from `(VirtualMachineExtensionsClientDeletePollerResponse, error)` to `(*armruntime.Poller[VirtualMachineExtensionsClientDeleteResponse], error)` +- Function `NewVirtualMachineScaleSetExtensionsClient` return value(s) have been changed from `(*VirtualMachineScaleSetExtensionsClient)` to `(*VirtualMachineScaleSetExtensionsClient, error)` +- Function `*VirtualMachineRunCommandsClient.BeginUpdate` return value(s) have been changed from `(VirtualMachineRunCommandsClientUpdatePollerResponse, error)` to `(*armruntime.Poller[VirtualMachineRunCommandsClientUpdateResponse], error)` +- Function `*VirtualMachineScaleSetVMsClient.BeginRunCommand` return value(s) have been changed from `(VirtualMachineScaleSetVMsClientRunCommandPollerResponse, error)` to `(*armruntime.Poller[VirtualMachineScaleSetVMsClientRunCommandResponse], error)` +- Function `*DedicatedHostsClient.BeginRestart` return value(s) have been changed from `(DedicatedHostsClientRestartPollerResponse, error)` to `(*armruntime.Poller[DedicatedHostsClientRestartResponse], error)` +- Function `*VirtualMachineScaleSetVMRunCommandsClient.BeginCreateOrUpdate` return value(s) have been changed from `(VirtualMachineScaleSetVMRunCommandsClientCreateOrUpdatePollerResponse, error)` to `(*armruntime.Poller[VirtualMachineScaleSetVMRunCommandsClientCreateOrUpdateResponse], error)` +- Function `*GalleryApplicationVersionsClient.BeginUpdate` return value(s) have been changed from `(GalleryApplicationVersionsClientUpdatePollerResponse, error)` to `(*armruntime.Poller[GalleryApplicationVersionsClientUpdateResponse], error)` +- Function `NewDiskAccessesClient` return value(s) have been changed from `(*DiskAccessesClient)` to `(*DiskAccessesClient, error)` +- Function `NewGalleryApplicationVersionsClient` return value(s) have been changed from `(*GalleryApplicationVersionsClient)` to `(*GalleryApplicationVersionsClient, error)` +- Function `*DiskRestorePointClient.BeginGrantAccess` return value(s) have been changed from `(DiskRestorePointClientGrantAccessPollerResponse, error)` to `(*armruntime.Poller[DiskRestorePointClientGrantAccessResponse], error)` +- Function `*CloudServiceRoleInstancesClient.BeginRestart` return value(s) have been changed from `(CloudServiceRoleInstancesClientRestartPollerResponse, error)` to `(*armruntime.Poller[CloudServiceRoleInstancesClientRestartResponse], error)` +- Function `*VirtualMachineScaleSetVMsClient.BeginReimageAll` return value(s) have been changed from `(VirtualMachineScaleSetVMsClientReimageAllPollerResponse, error)` to `(*armruntime.Poller[VirtualMachineScaleSetVMsClientReimageAllResponse], error)` +- Function `*VirtualMachinesClient.BeginStart` return value(s) have been changed from `(VirtualMachinesClientStartPollerResponse, error)` to `(*armruntime.Poller[VirtualMachinesClientStartResponse], error)` +- Function `*VirtualMachineRunCommandsClient.BeginDelete` return value(s) have been changed from `(VirtualMachineRunCommandsClientDeletePollerResponse, error)` to `(*armruntime.Poller[VirtualMachineRunCommandsClientDeleteResponse], error)` +- Function `NewVirtualMachineScaleSetRollingUpgradesClient` return value(s) have been changed from `(*VirtualMachineScaleSetRollingUpgradesClient)` to `(*VirtualMachineScaleSetRollingUpgradesClient, error)` +- Function `*GallerySharingProfileClient.BeginUpdate` return value(s) have been changed from `(GallerySharingProfileClientUpdatePollerResponse, error)` to `(*armruntime.Poller[GallerySharingProfileClientUpdateResponse], error)` +- Function `NewCloudServiceRoleInstancesClient` return value(s) have been changed from `(*CloudServiceRoleInstancesClient)` to `(*CloudServiceRoleInstancesClient, error)` +- Function `NewSnapshotsClient` return value(s) have been changed from `(*SnapshotsClient)` to `(*SnapshotsClient, error)` +- Function `NewResourceSKUsClient` return value(s) have been changed from `(*ResourceSKUsClient)` to `(*ResourceSKUsClient, error)` +- Function `*VirtualMachineScaleSetVMExtensionsClient.BeginUpdate` return value(s) have been changed from `(VirtualMachineScaleSetVMExtensionsClientUpdatePollerResponse, error)` to `(*armruntime.Poller[VirtualMachineScaleSetVMExtensionsClientUpdateResponse], error)` +- Function `*VirtualMachineScaleSetsClient.BeginReimage` return value(s) have been changed from `(VirtualMachineScaleSetsClientReimagePollerResponse, error)` to `(*armruntime.Poller[VirtualMachineScaleSetsClientReimageResponse], error)` +- Function `*SnapshotsClient.BeginRevokeAccess` return value(s) have been changed from `(SnapshotsClientRevokeAccessPollerResponse, error)` to `(*armruntime.Poller[SnapshotsClientRevokeAccessResponse], error)` +- Function `*CloudServicesUpdateDomainClient.BeginWalkUpdateDomain` return value(s) have been changed from `(CloudServicesUpdateDomainClientWalkUpdateDomainPollerResponse, error)` to `(*armruntime.Poller[CloudServicesUpdateDomainClientWalkUpdateDomainResponse], error)` +- Function `*GalleryApplicationVersionsClient.BeginDelete` return value(s) have been changed from `(GalleryApplicationVersionsClientDeletePollerResponse, error)` to `(*armruntime.Poller[GalleryApplicationVersionsClientDeleteResponse], error)` +- Function `*DedicatedHostsClient.BeginDelete` return value(s) have been changed from `(DedicatedHostsClientDeletePollerResponse, error)` to `(*armruntime.Poller[DedicatedHostsClientDeleteResponse], error)` +- Function `*DiskEncryptionSetsClient.ListAssociatedResources` return value(s) have been changed from `(*DiskEncryptionSetsClientListAssociatedResourcesPager)` to `(*runtime.Pager[DiskEncryptionSetsClientListAssociatedResourcesResponse])` +- Function `*SSHPublicKeysClient.ListByResourceGroup` return value(s) have been changed from `(*SSHPublicKeysClientListByResourceGroupPager)` to `(*runtime.Pager[SSHPublicKeysClientListByResourceGroupResponse])` +- Function `*VirtualMachineScaleSetsClient.BeginReimageAll` return value(s) have been changed from `(VirtualMachineScaleSetsClientReimageAllPollerResponse, error)` to `(*armruntime.Poller[VirtualMachineScaleSetsClientReimageAllResponse], error)` +- Function `*VirtualMachinesClient.BeginInstallPatches` return value(s) have been changed from `(VirtualMachinesClientInstallPatchesPollerResponse, error)` to `(*armruntime.Poller[VirtualMachinesClientInstallPatchesResponse], error)` +- Function `*AvailabilitySetsClient.ListBySubscription` return value(s) have been changed from `(*AvailabilitySetsClientListBySubscriptionPager)` to `(*runtime.Pager[AvailabilitySetsClientListBySubscriptionResponse])` +- Function `*CloudServicesClient.BeginRebuild` return value(s) have been changed from `(CloudServicesClientRebuildPollerResponse, error)` to `(*armruntime.Poller[CloudServicesClientRebuildResponse], error)` +- Function `NewSharedGalleriesClient` return value(s) have been changed from `(*SharedGalleriesClient)` to `(*SharedGalleriesClient, error)` +- Function `*VirtualMachineScaleSetVMsClient.List` return value(s) have been changed from `(*VirtualMachineScaleSetVMsClientListPager)` to `(*runtime.Pager[VirtualMachineScaleSetVMsClientListResponse])` +- Function `*VirtualMachinesClient.List` return value(s) have been changed from `(*VirtualMachinesClientListPager)` to `(*runtime.Pager[VirtualMachinesClientListResponse])` +- Function `*GalleryApplicationsClient.ListByGallery` return value(s) have been changed from `(*GalleryApplicationsClientListByGalleryPager)` to `(*runtime.Pager[GalleryApplicationsClientListByGalleryResponse])` +- Function `*VirtualMachineExtensionsClient.BeginUpdate` return value(s) have been changed from `(VirtualMachineExtensionsClientUpdatePollerResponse, error)` to `(*armruntime.Poller[VirtualMachineExtensionsClientUpdateResponse], error)` +- Function `*VirtualMachineScaleSetVMExtensionsClient.BeginDelete` return value(s) have been changed from `(VirtualMachineScaleSetVMExtensionsClientDeletePollerResponse, error)` to `(*armruntime.Poller[VirtualMachineScaleSetVMExtensionsClientDeleteResponse], error)` +- Function `*VirtualMachineScaleSetVMsClient.BeginReimage` return value(s) have been changed from `(VirtualMachineScaleSetVMsClientReimagePollerResponse, error)` to `(*armruntime.Poller[VirtualMachineScaleSetVMsClientReimageResponse], error)` +- Function `*ResourceSKUsClient.List` return value(s) have been changed from `(*ResourceSKUsClientListPager)` to `(*runtime.Pager[ResourceSKUsClientListResponse])` +- Function `NewVirtualMachineScaleSetVMRunCommandsClient` return value(s) have been changed from `(*VirtualMachineScaleSetVMRunCommandsClient)` to `(*VirtualMachineScaleSetVMRunCommandsClient, error)` +- Function `*VirtualMachineScaleSetsClient.BeginPerformMaintenance` return value(s) have been changed from `(VirtualMachineScaleSetsClientPerformMaintenancePollerResponse, error)` to `(*armruntime.Poller[VirtualMachineScaleSetsClientPerformMaintenanceResponse], error)` +- Function `*VirtualMachineScaleSetsClient.BeginUpdateInstances` return value(s) have been changed from `(VirtualMachineScaleSetsClientUpdateInstancesPollerResponse, error)` to `(*armruntime.Poller[VirtualMachineScaleSetsClientUpdateInstancesResponse], error)` +- Function `*VirtualMachinesClient.BeginCapture` return value(s) have been changed from `(VirtualMachinesClientCapturePollerResponse, error)` to `(*armruntime.Poller[VirtualMachinesClientCaptureResponse], error)` +- Function `*DiskAccessesClient.BeginDeleteAPrivateEndpointConnection` return value(s) have been changed from `(DiskAccessesClientDeleteAPrivateEndpointConnectionPollerResponse, error)` to `(*armruntime.Poller[DiskAccessesClientDeleteAPrivateEndpointConnectionResponse], error)` +- Function `NewDiskEncryptionSetsClient` return value(s) have been changed from `(*DiskEncryptionSetsClient)` to `(*DiskEncryptionSetsClient, error)` +- Function `*SnapshotsClient.BeginCreateOrUpdate` return value(s) have been changed from `(SnapshotsClientCreateOrUpdatePollerResponse, error)` to `(*armruntime.Poller[SnapshotsClientCreateOrUpdateResponse], error)` +- Function `NewVirtualMachineRunCommandsClient` return value(s) have been changed from `(*VirtualMachineRunCommandsClient)` to `(*VirtualMachineRunCommandsClient, error)` +- Function `NewCapacityReservationsClient` return value(s) have been changed from `(*CapacityReservationsClient)` to `(*CapacityReservationsClient, error)` +- Function `NewCommunityGalleriesClient` return value(s) have been changed from `(*CommunityGalleriesClient)` to `(*CommunityGalleriesClient, error)` +- Function `*SnapshotsClient.ListByResourceGroup` return value(s) have been changed from `(*SnapshotsClientListByResourceGroupPager)` to `(*runtime.Pager[SnapshotsClientListByResourceGroupResponse])` +- Function `NewRestorePointCollectionsClient` return value(s) have been changed from `(*RestorePointCollectionsClient)` to `(*RestorePointCollectionsClient, error)` +- Function `*DiskAccessesClient.BeginUpdate` return value(s) have been changed from `(DiskAccessesClientUpdatePollerResponse, error)` to `(*armruntime.Poller[DiskAccessesClientUpdateResponse], error)` +- Function `*DedicatedHostsClient.ListByHostGroup` return value(s) have been changed from `(*DedicatedHostsClientListByHostGroupPager)` to `(*runtime.Pager[DedicatedHostsClientListByHostGroupResponse])` +- Function `*VirtualMachineScaleSetsClient.BeginDeleteInstances` return value(s) have been changed from `(VirtualMachineScaleSetsClientDeleteInstancesPollerResponse, error)` to `(*armruntime.Poller[VirtualMachineScaleSetsClientDeleteInstancesResponse], error)` +- Function `*VirtualMachinesClient.BeginRunCommand` return value(s) have been changed from `(VirtualMachinesClientRunCommandPollerResponse, error)` to `(*armruntime.Poller[VirtualMachinesClientRunCommandResponse], error)` +- Function `*GalleryApplicationVersionsClient.ListByGalleryApplication` return value(s) have been changed from `(*GalleryApplicationVersionsClientListByGalleryApplicationPager)` to `(*runtime.Pager[GalleryApplicationVersionsClientListByGalleryApplicationResponse])` +- Function `NewCloudServiceOperatingSystemsClient` return value(s) have been changed from `(*CloudServiceOperatingSystemsClient)` to `(*CloudServiceOperatingSystemsClient, error)` +- Function `*ImagesClient.List` return value(s) have been changed from `(*ImagesClientListPager)` to `(*runtime.Pager[ImagesClientListResponse])` +- Function `*GalleriesClient.BeginUpdate` return value(s) have been changed from `(GalleriesClientUpdatePollerResponse, error)` to `(*armruntime.Poller[GalleriesClientUpdateResponse], error)` +- Function `*VirtualMachineScaleSetVMsClient.BeginUpdate` return value(s) have been changed from `(VirtualMachineScaleSetVMsClientUpdatePollerResponse, error)` to `(*armruntime.Poller[VirtualMachineScaleSetVMsClientUpdateResponse], error)` +- Function `*DiskEncryptionSetsClient.List` return value(s) have been changed from `(*DiskEncryptionSetsClientListPager)` to `(*runtime.Pager[DiskEncryptionSetsClientListResponse])` +- Function `*DedicatedHostsClient.BeginCreateOrUpdate` return value(s) have been changed from `(DedicatedHostsClientCreateOrUpdatePollerResponse, error)` to `(*armruntime.Poller[DedicatedHostsClientCreateOrUpdateResponse], error)` +- Function `*DiskAccessesClient.ListByResourceGroup` return value(s) have been changed from `(*DiskAccessesClientListByResourceGroupPager)` to `(*runtime.Pager[DiskAccessesClientListByResourceGroupResponse])` +- Function `NewCloudServicesClient` return value(s) have been changed from `(*CloudServicesClient)` to `(*CloudServicesClient, error)` +- Function `*CloudServicesClient.ListAll` return value(s) have been changed from `(*CloudServicesClientListAllPager)` to `(*runtime.Pager[CloudServicesClientListAllResponse])` +- Function `*VirtualMachinesClient.BeginPerformMaintenance` return value(s) have been changed from `(VirtualMachinesClientPerformMaintenancePollerResponse, error)` to `(*armruntime.Poller[VirtualMachinesClientPerformMaintenanceResponse], error)` +- Function `*GalleriesClient.BeginCreateOrUpdate` return value(s) have been changed from `(GalleriesClientCreateOrUpdatePollerResponse, error)` to `(*armruntime.Poller[GalleriesClientCreateOrUpdateResponse], error)` +- Function `*VirtualMachinesClient.BeginReimage` return value(s) have been changed from `(VirtualMachinesClientReimagePollerResponse, error)` to `(*armruntime.Poller[VirtualMachinesClientReimageResponse], error)` +- Function `*SnapshotsClient.BeginGrantAccess` return value(s) have been changed from `(SnapshotsClientGrantAccessPollerResponse, error)` to `(*armruntime.Poller[SnapshotsClientGrantAccessResponse], error)` +- Function `*VirtualMachineScaleSetVMsClient.BeginDelete` return value(s) have been changed from `(VirtualMachineScaleSetVMsClientDeletePollerResponse, error)` to `(*armruntime.Poller[VirtualMachineScaleSetVMsClientDeleteResponse], error)` +- Function `*DiskAccessesClient.BeginUpdateAPrivateEndpointConnection` return value(s) have been changed from `(DiskAccessesClientUpdateAPrivateEndpointConnectionPollerResponse, error)` to `(*armruntime.Poller[DiskAccessesClientUpdateAPrivateEndpointConnectionResponse], error)` +- Function `*VirtualMachineRunCommandsClient.BeginCreateOrUpdate` return value(s) have been changed from `(VirtualMachineRunCommandsClientCreateOrUpdatePollerResponse, error)` to `(*armruntime.Poller[VirtualMachineRunCommandsClientCreateOrUpdateResponse], error)` +- Function `*CloudServicesClient.List` return value(s) have been changed from `(*CloudServicesClientListPager)` to `(*runtime.Pager[CloudServicesClientListResponse])` +- Function `NewProximityPlacementGroupsClient` return value(s) have been changed from `(*ProximityPlacementGroupsClient)` to `(*ProximityPlacementGroupsClient, error)` +- Function `*DiskAccessesClient.ListPrivateEndpointConnections` return value(s) have been changed from `(*DiskAccessesClientListPrivateEndpointConnectionsPager)` to `(*runtime.Pager[DiskAccessesClientListPrivateEndpointConnectionsResponse])` +- Function `NewVirtualMachineScaleSetVMsClient` return value(s) have been changed from `(*VirtualMachineScaleSetVMsClient)` to `(*VirtualMachineScaleSetVMsClient, error)` +- Function `*ProximityPlacementGroupsClient.ListBySubscription` return value(s) have been changed from `(*ProximityPlacementGroupsClientListBySubscriptionPager)` to `(*runtime.Pager[ProximityPlacementGroupsClientListBySubscriptionResponse])` +- Function `*VirtualMachineScaleSetVMsClient.BeginPowerOff` return value(s) have been changed from `(VirtualMachineScaleSetVMsClientPowerOffPollerResponse, error)` to `(*armruntime.Poller[VirtualMachineScaleSetVMsClientPowerOffResponse], error)` +- Function `*VirtualMachineScaleSetVMsClient.BeginDeallocate` return value(s) have been changed from `(VirtualMachineScaleSetVMsClientDeallocatePollerResponse, error)` to `(*armruntime.Poller[VirtualMachineScaleSetVMsClientDeallocateResponse], error)` +- Function `*RestorePointCollectionsClient.BeginDelete` return value(s) have been changed from `(RestorePointCollectionsClientDeletePollerResponse, error)` to `(*armruntime.Poller[RestorePointCollectionsClientDeleteResponse], error)` +- Function `*CloudServicesClient.BeginPowerOff` return value(s) have been changed from `(CloudServicesClientPowerOffPollerResponse, error)` to `(*armruntime.Poller[CloudServicesClientPowerOffResponse], error)` +- Function `*VirtualMachineScaleSetsClient.BeginDelete` return value(s) have been changed from `(VirtualMachineScaleSetsClientDeletePollerResponse, error)` to `(*armruntime.Poller[VirtualMachineScaleSetsClientDeleteResponse], error)` +- Function `*CapacityReservationsClient.BeginDelete` return value(s) have been changed from `(CapacityReservationsClientDeletePollerResponse, error)` to `(*armruntime.Poller[CapacityReservationsClientDeleteResponse], error)` +- Function `*VirtualMachineScaleSetVMsClient.BeginRedeploy` return value(s) have been changed from `(VirtualMachineScaleSetVMsClientRedeployPollerResponse, error)` to `(*armruntime.Poller[VirtualMachineScaleSetVMsClientRedeployResponse], error)` +- Function `NewVirtualMachineSizesClient` return value(s) have been changed from `(*VirtualMachineSizesClient)` to `(*VirtualMachineSizesClient, error)` +- Function `*VirtualMachinesClient.ListAll` return value(s) have been changed from `(*VirtualMachinesClientListAllPager)` to `(*runtime.Pager[VirtualMachinesClientListAllResponse])` +- Function `*CloudServicesClient.BeginDeleteInstances` return value(s) have been changed from `(CloudServicesClientDeleteInstancesPollerResponse, error)` to `(*armruntime.Poller[CloudServicesClientDeleteInstancesResponse], error)` +- Function `*VirtualMachineScaleSetRollingUpgradesClient.BeginCancel` return value(s) have been changed from `(VirtualMachineScaleSetRollingUpgradesClientCancelPollerResponse, error)` to `(*armruntime.Poller[VirtualMachineScaleSetRollingUpgradesClientCancelResponse], error)` +- Function `*CloudServiceOperatingSystemsClient.ListOSFamilies` return value(s) have been changed from `(*CloudServiceOperatingSystemsClientListOSFamiliesPager)` to `(*runtime.Pager[CloudServiceOperatingSystemsClientListOSFamiliesResponse])` +- Function `*VirtualMachinesClient.BeginUpdate` return value(s) have been changed from `(VirtualMachinesClientUpdatePollerResponse, error)` to `(*armruntime.Poller[VirtualMachinesClientUpdateResponse], error)` +- Function `*DiskEncryptionSetsClient.BeginUpdate` return value(s) have been changed from `(DiskEncryptionSetsClientUpdatePollerResponse, error)` to `(*armruntime.Poller[DiskEncryptionSetsClientUpdateResponse], error)` +- Function `NewDisksClient` return value(s) have been changed from `(*DisksClient)` to `(*DisksClient, error)` +- Function `*VirtualMachineScaleSetsClient.BeginUpdate` return value(s) have been changed from `(VirtualMachineScaleSetsClientUpdatePollerResponse, error)` to `(*armruntime.Poller[VirtualMachineScaleSetsClientUpdateResponse], error)` +- Function `*VirtualMachineScaleSetVMExtensionsClient.BeginCreateOrUpdate` return value(s) have been changed from `(VirtualMachineScaleSetVMExtensionsClientCreateOrUpdatePollerResponse, error)` to `(*armruntime.Poller[VirtualMachineScaleSetVMExtensionsClientCreateOrUpdateResponse], error)` +- Function `*VirtualMachineScaleSetsClient.GetOSUpgradeHistory` return value(s) have been changed from `(*VirtualMachineScaleSetsClientGetOSUpgradeHistoryPager)` to `(*runtime.Pager[VirtualMachineScaleSetsClientGetOSUpgradeHistoryResponse])` +- Function `NewDiskRestorePointClient` return value(s) have been changed from `(*DiskRestorePointClient)` to `(*DiskRestorePointClient, error)` +- Function `*SnapshotsClient.List` return value(s) have been changed from `(*SnapshotsClientListPager)` to `(*runtime.Pager[SnapshotsClientListResponse])` +- Function `*VirtualMachineScaleSetExtensionsClient.BeginDelete` return value(s) have been changed from `(VirtualMachineScaleSetExtensionsClientDeletePollerResponse, error)` to `(*armruntime.Poller[VirtualMachineScaleSetExtensionsClientDeleteResponse], error)` +- Function `*VirtualMachineScaleSetsClient.BeginStart` return value(s) have been changed from `(VirtualMachineScaleSetsClientStartPollerResponse, error)` to `(*armruntime.Poller[VirtualMachineScaleSetsClientStartResponse], error)` +- Function `NewAvailabilitySetsClient` return value(s) have been changed from `(*AvailabilitySetsClient)` to `(*AvailabilitySetsClient, error)` +- Function `*ImagesClient.BeginUpdate` return value(s) have been changed from `(ImagesClientUpdatePollerResponse, error)` to `(*armruntime.Poller[ImagesClientUpdateResponse], error)` +- Function `*ImagesClient.ListByResourceGroup` return value(s) have been changed from `(*ImagesClientListByResourceGroupPager)` to `(*runtime.Pager[ImagesClientListByResourceGroupResponse])` +- Function `*ImagesClient.BeginCreateOrUpdate` return value(s) have been changed from `(ImagesClientCreateOrUpdatePollerResponse, error)` to `(*armruntime.Poller[ImagesClientCreateOrUpdateResponse], error)` +- Function `*VirtualMachinesClient.BeginPowerOff` return value(s) have been changed from `(VirtualMachinesClientPowerOffPollerResponse, error)` to `(*armruntime.Poller[VirtualMachinesClientPowerOffResponse], error)` +- Function `NewCloudServicesUpdateDomainClient` return value(s) have been changed from `(*CloudServicesUpdateDomainClient)` to `(*CloudServicesUpdateDomainClient, error)` +- Function `NewGalleryApplicationsClient` return value(s) have been changed from `(*GalleryApplicationsClient)` to `(*GalleryApplicationsClient, error)` +- Function `*VirtualMachineSizesClient.List` parameter(s) have been changed from `(context.Context, string, *VirtualMachineSizesClientListOptions)` to `(string, *VirtualMachineSizesClientListOptions)` +- Function `*VirtualMachineSizesClient.List` return value(s) have been changed from `(VirtualMachineSizesClientListResponse, error)` to `(*runtime.Pager[VirtualMachineSizesClientListResponse])` +- Function `*RestorePointsClient.BeginDelete` return value(s) have been changed from `(RestorePointsClientDeletePollerResponse, error)` to `(*armruntime.Poller[RestorePointsClientDeleteResponse], error)` +- Function `*DiskEncryptionSetsClient.ListByResourceGroup` return value(s) have been changed from `(*DiskEncryptionSetsClientListByResourceGroupPager)` to `(*runtime.Pager[DiskEncryptionSetsClientListByResourceGroupResponse])` +- Function `NewGallerySharingProfileClient` return value(s) have been changed from `(*GallerySharingProfileClient)` to `(*GallerySharingProfileClient, error)` +- Function `*CloudServicesClient.BeginReimage` return value(s) have been changed from `(CloudServicesClientReimagePollerResponse, error)` to `(*armruntime.Poller[CloudServicesClientReimageResponse], error)` +- Function `NewGalleryImagesClient` return value(s) have been changed from `(*GalleryImagesClient)` to `(*GalleryImagesClient, error)` +- Function `*DisksClient.BeginRevokeAccess` return value(s) have been changed from `(DisksClientRevokeAccessPollerResponse, error)` to `(*armruntime.Poller[DisksClientRevokeAccessResponse], error)` +- Function `NewGalleryImageVersionsClient` return value(s) have been changed from `(*GalleryImageVersionsClient)` to `(*GalleryImageVersionsClient, error)` +- Function `*GalleryApplicationVersionsClient.BeginCreateOrUpdate` return value(s) have been changed from `(GalleryApplicationVersionsClientCreateOrUpdatePollerResponse, error)` to `(*armruntime.Poller[GalleryApplicationVersionsClientCreateOrUpdateResponse], error)` +- Function `NewVirtualMachineImagesClient` return value(s) have been changed from `(*VirtualMachineImagesClient)` to `(*VirtualMachineImagesClient, error)` +- Function `*DedicatedHostGroupsClient.ListByResourceGroup` return value(s) have been changed from `(*DedicatedHostGroupsClientListByResourceGroupPager)` to `(*runtime.Pager[DedicatedHostGroupsClientListByResourceGroupResponse])` +- Function `*DisksClient.BeginGrantAccess` return value(s) have been changed from `(DisksClientGrantAccessPollerResponse, error)` to `(*armruntime.Poller[DisksClientGrantAccessResponse], error)` +- Function `*OperationsClient.List` parameter(s) have been changed from `(context.Context, *OperationsClientListOptions)` to `(*OperationsClientListOptions)` +- Function `*OperationsClient.List` return value(s) have been changed from `(OperationsClientListResponse, error)` to `(*runtime.Pager[OperationsClientListResponse])` +- Function `NewVirtualMachineScaleSetVMExtensionsClient` return value(s) have been changed from `(*VirtualMachineScaleSetVMExtensionsClient)` to `(*VirtualMachineScaleSetVMExtensionsClient, error)` +- Function `*VirtualMachineExtensionsClient.BeginCreateOrUpdate` return value(s) have been changed from `(VirtualMachineExtensionsClientCreateOrUpdatePollerResponse, error)` to `(*armruntime.Poller[VirtualMachineExtensionsClientCreateOrUpdateResponse], error)` +- Function `*GalleriesClient.ListByResourceGroup` return value(s) have been changed from `(*GalleriesClientListByResourceGroupPager)` to `(*runtime.Pager[GalleriesClientListByResourceGroupResponse])` +- Function `*DisksClient.BeginUpdate` return value(s) have been changed from `(DisksClientUpdatePollerResponse, error)` to `(*armruntime.Poller[DisksClientUpdateResponse], error)` +- Function `*VirtualMachineRunCommandsClient.List` return value(s) have been changed from `(*VirtualMachineRunCommandsClientListPager)` to `(*runtime.Pager[VirtualMachineRunCommandsClientListResponse])` +- Function `*VirtualMachineScaleSetExtensionsClient.BeginUpdate` return value(s) have been changed from `(VirtualMachineScaleSetExtensionsClientUpdatePollerResponse, error)` to `(*armruntime.Poller[VirtualMachineScaleSetExtensionsClientUpdateResponse], error)` +- Function `*DisksClient.List` return value(s) have been changed from `(*DisksClientListPager)` to `(*runtime.Pager[DisksClientListResponse])` +- Function `*CapacityReservationsClient.BeginUpdate` return value(s) have been changed from `(CapacityReservationsClientUpdatePollerResponse, error)` to `(*armruntime.Poller[CapacityReservationsClientUpdateResponse], error)` +- Function `*SnapshotsClient.BeginUpdate` return value(s) have been changed from `(SnapshotsClientUpdatePollerResponse, error)` to `(*armruntime.Poller[SnapshotsClientUpdateResponse], error)` +- Function `*VirtualMachineScaleSetExtensionsClient.List` return value(s) have been changed from `(*VirtualMachineScaleSetExtensionsClientListPager)` to `(*runtime.Pager[VirtualMachineScaleSetExtensionsClientListResponse])` +- Function `*VirtualMachineScaleSetVMRunCommandsClient.List` return value(s) have been changed from `(*VirtualMachineScaleSetVMRunCommandsClientListPager)` to `(*runtime.Pager[VirtualMachineScaleSetVMRunCommandsClientListResponse])` +- Function `*VirtualMachineRunCommandsClient.ListByVirtualMachine` return value(s) have been changed from `(*VirtualMachineRunCommandsClientListByVirtualMachinePager)` to `(*runtime.Pager[VirtualMachineRunCommandsClientListByVirtualMachineResponse])` +- Function `*DiskAccessesClient.List` return value(s) have been changed from `(*DiskAccessesClientListPager)` to `(*runtime.Pager[DiskAccessesClientListResponse])` +- Function `*CloudServicesClient.BeginRestart` return value(s) have been changed from `(CloudServicesClientRestartPollerResponse, error)` to `(*armruntime.Poller[CloudServicesClientRestartResponse], error)` +- Function `*ProximityPlacementGroupsClient.ListByResourceGroup` return value(s) have been changed from `(*ProximityPlacementGroupsClientListByResourceGroupPager)` to `(*runtime.Pager[ProximityPlacementGroupsClientListByResourceGroupResponse])` +- Function `*GalleriesClient.List` return value(s) have been changed from `(*GalleriesClientListPager)` to `(*runtime.Pager[GalleriesClientListResponse])` +- Function `NewCommunityGalleryImagesClient` return value(s) have been changed from `(*CommunityGalleryImagesClient)` to `(*CommunityGalleryImagesClient, error)` +- Function `*VirtualMachineScaleSetVMRunCommandsClient.BeginDelete` return value(s) have been changed from `(VirtualMachineScaleSetVMRunCommandsClientDeletePollerResponse, error)` to `(*armruntime.Poller[VirtualMachineScaleSetVMRunCommandsClientDeleteResponse], error)` +- Function `*VirtualMachinesClient.BeginDelete` return value(s) have been changed from `(VirtualMachinesClientDeletePollerResponse, error)` to `(*armruntime.Poller[VirtualMachinesClientDeleteResponse], error)` +- Function `NewRestorePointsClient` return value(s) have been changed from `(*RestorePointsClient)` to `(*RestorePointsClient, error)` +- Function `*LogAnalyticsClient.BeginExportRequestRateByInterval` return value(s) have been changed from `(LogAnalyticsClientExportRequestRateByIntervalPollerResponse, error)` to `(*armruntime.Poller[LogAnalyticsClientExportRequestRateByIntervalResponse], error)` +- Function `*GalleryImagesClient.BeginUpdate` return value(s) have been changed from `(GalleryImagesClientUpdatePollerResponse, error)` to `(*armruntime.Poller[GalleryImagesClientUpdateResponse], error)` +- Function `*VirtualMachinesClient.ListByLocation` return value(s) have been changed from `(*VirtualMachinesClientListByLocationPager)` to `(*runtime.Pager[VirtualMachinesClientListByLocationResponse])` +- Function `*VirtualMachineScaleSetVMsClient.BeginRestart` return value(s) have been changed from `(VirtualMachineScaleSetVMsClientRestartPollerResponse, error)` to `(*armruntime.Poller[VirtualMachineScaleSetVMsClientRestartResponse], error)` +- Function `*DisksClient.ListByResourceGroup` return value(s) have been changed from `(*DisksClientListByResourceGroupPager)` to `(*runtime.Pager[DisksClientListByResourceGroupResponse])` +- Function `*GalleryApplicationsClient.BeginCreateOrUpdate` return value(s) have been changed from `(GalleryApplicationsClientCreateOrUpdatePollerResponse, error)` to `(*armruntime.Poller[GalleryApplicationsClientCreateOrUpdateResponse], error)` +- Function `*GalleryApplicationsClient.BeginDelete` return value(s) have been changed from `(GalleryApplicationsClientDeletePollerResponse, error)` to `(*armruntime.Poller[GalleryApplicationsClientDeleteResponse], error)` +- Function `*VirtualMachineScaleSetsClient.BeginRedeploy` return value(s) have been changed from `(VirtualMachineScaleSetsClientRedeployPollerResponse, error)` to `(*armruntime.Poller[VirtualMachineScaleSetsClientRedeployResponse], error)` +- Function `*GalleriesClient.BeginDelete` return value(s) have been changed from `(GalleriesClientDeletePollerResponse, error)` to `(*armruntime.Poller[GalleriesClientDeleteResponse], error)` +- Function `*VirtualMachineScaleSetsClient.BeginCreateOrUpdate` return value(s) have been changed from `(VirtualMachineScaleSetsClientCreateOrUpdatePollerResponse, error)` to `(*armruntime.Poller[VirtualMachineScaleSetsClientCreateOrUpdateResponse], error)` +- Function `*SharedGalleryImageVersionsClient.List` return value(s) have been changed from `(*SharedGalleryImageVersionsClientListPager)` to `(*runtime.Pager[SharedGalleryImageVersionsClientListResponse])` +- Function `NewVirtualMachineExtensionsClient` return value(s) have been changed from `(*VirtualMachineExtensionsClient)` to `(*VirtualMachineExtensionsClient, error)` +- Function `*VirtualMachineScaleSetsClient.ListSKUs` return value(s) have been changed from `(*VirtualMachineScaleSetsClientListSKUsPager)` to `(*runtime.Pager[VirtualMachineScaleSetsClientListSKUsResponse])` +- Function `*SSHPublicKeysClient.ListBySubscription` return value(s) have been changed from `(*SSHPublicKeysClientListBySubscriptionPager)` to `(*runtime.Pager[SSHPublicKeysClientListBySubscriptionResponse])` +- Function `*SnapshotsClient.BeginDelete` return value(s) have been changed from `(SnapshotsClientDeletePollerResponse, error)` to `(*armruntime.Poller[SnapshotsClientDeleteResponse], error)` +- Function `*VirtualMachineScaleSetsClient.BeginSetOrchestrationServiceState` return value(s) have been changed from `(VirtualMachineScaleSetsClientSetOrchestrationServiceStatePollerResponse, error)` to `(*armruntime.Poller[VirtualMachineScaleSetsClientSetOrchestrationServiceStateResponse], error)` +- Function `NewOperationsClient` return value(s) have been changed from `(*OperationsClient)` to `(*OperationsClient, error)` +- Function `NewSharedGalleryImageVersionsClient` return value(s) have been changed from `(*SharedGalleryImageVersionsClient)` to `(*SharedGalleryImageVersionsClient, error)` +- Function `*VirtualMachineScaleSetVMsClient.BeginStart` return value(s) have been changed from `(VirtualMachineScaleSetVMsClientStartPollerResponse, error)` to `(*armruntime.Poller[VirtualMachineScaleSetVMsClientStartResponse], error)` +- Function `*CloudServicesClient.BeginCreateOrUpdate` return value(s) have been changed from `(CloudServicesClientCreateOrUpdatePollerResponse, error)` to `(*armruntime.Poller[CloudServicesClientCreateOrUpdateResponse], error)` +- Function `*VirtualMachineScaleSetRollingUpgradesClient.BeginStartOSUpgrade` return value(s) have been changed from `(VirtualMachineScaleSetRollingUpgradesClientStartOSUpgradePollerResponse, error)` to `(*armruntime.Poller[VirtualMachineScaleSetRollingUpgradesClientStartOSUpgradeResponse], error)` +- Function `*DiskRestorePointClient.ListByRestorePoint` return value(s) have been changed from `(*DiskRestorePointClientListByRestorePointPager)` to `(*runtime.Pager[DiskRestorePointClientListByRestorePointResponse])` +- Function `*GalleryImageVersionsClient.BeginCreateOrUpdate` return value(s) have been changed from `(GalleryImageVersionsClientCreateOrUpdatePollerResponse, error)` to `(*armruntime.Poller[GalleryImageVersionsClientCreateOrUpdateResponse], error)` +- Function `*CloudServicesClient.BeginUpdate` return value(s) have been changed from `(CloudServicesClientUpdatePollerResponse, error)` to `(*armruntime.Poller[CloudServicesClientUpdateResponse], error)` +- Function `*DisksClient.BeginDelete` return value(s) have been changed from `(DisksClientDeletePollerResponse, error)` to `(*armruntime.Poller[DisksClientDeleteResponse], error)` +- Function `*VirtualMachineScaleSetsClient.BeginDeallocate` return value(s) have been changed from `(VirtualMachineScaleSetsClientDeallocatePollerResponse, error)` to `(*armruntime.Poller[VirtualMachineScaleSetsClientDeallocateResponse], error)` +- Function `*VirtualMachinesClient.BeginCreateOrUpdate` return value(s) have been changed from `(VirtualMachinesClientCreateOrUpdatePollerResponse, error)` to `(*armruntime.Poller[VirtualMachinesClientCreateOrUpdateResponse], error)` +- Function `NewVirtualMachineExtensionImagesClient` return value(s) have been changed from `(*VirtualMachineExtensionImagesClient)` to `(*VirtualMachineExtensionImagesClient, error)` +- Function `NewVirtualMachineImagesEdgeZoneClient` return value(s) have been changed from `(*VirtualMachineImagesEdgeZoneClient)` to `(*VirtualMachineImagesEdgeZoneClient, error)` +- Function `*GalleryImagesClient.BeginCreateOrUpdate` return value(s) have been changed from `(GalleryImagesClientCreateOrUpdatePollerResponse, error)` to `(*armruntime.Poller[GalleryImagesClientCreateOrUpdateResponse], error)` +- Function `*CloudServiceRoleInstancesClient.BeginReimage` return value(s) have been changed from `(CloudServiceRoleInstancesClientReimagePollerResponse, error)` to `(*armruntime.Poller[CloudServiceRoleInstancesClientReimageResponse], error)` +- Function `*ImagesClient.BeginDelete` return value(s) have been changed from `(ImagesClientDeletePollerResponse, error)` to `(*armruntime.Poller[ImagesClientDeleteResponse], error)` +- Function `*CapacityReservationGroupsClient.ListBySubscription` return value(s) have been changed from `(*CapacityReservationGroupsClientListBySubscriptionPager)` to `(*runtime.Pager[CapacityReservationGroupsClientListBySubscriptionResponse])` +- Function `*VirtualMachineScaleSetVMsClient.BeginPerformMaintenance` return value(s) have been changed from `(VirtualMachineScaleSetVMsClientPerformMaintenancePollerResponse, error)` to `(*armruntime.Poller[VirtualMachineScaleSetVMsClientPerformMaintenanceResponse], error)` +- Function `*SnapshotsClientListPager.NextPage` has been removed +- Function `*ImagesClientDeletePoller.FinalResponse` has been removed +- Function `*VirtualMachineExtensionsClientCreateOrUpdatePoller.Done` has been removed +- Function `*DiskAccessesClientDeletePoller.Poll` has been removed +- Function `*DiskEncryptionSetsClientListByResourceGroupPager.NextPage` has been removed +- Function `DedicatedHostsClientDeletePollerResponse.PollUntilDone` has been removed +- Function `*DedicatedHostsClientDeletePoller.FinalResponse` has been removed +- Function `*SnapshotsClientGrantAccessPollerResponse.Resume` has been removed +- Function `*GalleryImageVersionsClientUpdatePollerResponse.Resume` has been removed +- Function `*ImagesClientUpdatePoller.Done` has been removed +- Function `*VirtualMachineScaleSetVMRunCommandsClientCreateOrUpdatePollerResponse.Resume` has been removed +- Function `SharingState.ToPtr` has been removed +- Function `*VirtualMachineScaleSetsClientReimageAllPoller.Done` has been removed +- Function `*VirtualMachineScaleSetRollingUpgradesClientCancelPoller.ResumeToken` has been removed +- Function `GallerySharingPermissionTypes.ToPtr` has been removed +- Function `*CloudServiceOperatingSystemsClientListOSFamiliesPager.NextPage` has been removed +- Function `*VirtualMachineScaleSetsClientReimagePoller.ResumeToken` has been removed +- Function `DiskAccessesClientDeletePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachinesClientRestartPollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetVMExtensionsClientDeletePoller.ResumeToken` has been removed +- Function `*AvailabilitySetsClientListPager.Err` has been removed +- Function `*DiskEncryptionSetsClientCreateOrUpdatePoller.FinalResponse` has been removed +- Function `*VirtualMachineRunCommandsClientCreateOrUpdatePoller.Poll` has been removed +- Function `*DiskAccessesClientUpdateAPrivateEndpointConnectionPollerResponse.Resume` has been removed +- Function `*CapacityReservationGroupsClientListBySubscriptionPager.PageResponse` has been removed +- Function `ExpandTypesForGetCapacityReservationGroups.ToPtr` has been removed +- Function `*GalleriesClientCreateOrUpdatePoller.ResumeToken` has been removed +- Function `VirtualMachineExtensionsClientCreateOrUpdatePollerResponse.PollUntilDone` has been removed +- Function `*ImagesClientCreateOrUpdatePoller.ResumeToken` has been removed +- Function `*DiskEncryptionSetsClientUpdatePoller.Done` has been removed +- Function `*DiskEncryptionSetsClientUpdatePoller.FinalResponse` has been removed +- Function `*ImagesClientUpdatePoller.FinalResponse` has been removed +- Function `*CapacityReservationsClientListByCapacityReservationGroupPager.NextPage` has been removed +- Function `*CloudServicesClientPowerOffPoller.Done` has been removed +- Function `*GalleryImageVersionsClientDeletePoller.Done` has been removed +- Function `*DiskAccessesClientListPager.Err` has been removed +- Function `*DedicatedHostsClientDeletePoller.ResumeToken` has been removed +- Function `*VirtualMachineExtensionsClientCreateOrUpdatePoller.Poll` has been removed +- Function `*VirtualMachineScaleSetVMsClientListPager.NextPage` has been removed +- Function `VirtualMachineScaleSetVMsClientStartPollerResponse.PollUntilDone` has been removed +- Function `DiskEncryptionSetsClientDeletePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetVMRunCommandsClientUpdatePoller.Done` has been removed +- Function `*VirtualMachineScaleSetsClientCreateOrUpdatePollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetsClientGetOSUpgradeHistoryPager.PageResponse` has been removed +- Function `*DiskAccessesClientListPrivateEndpointConnectionsPager.NextPage` has been removed +- Function `VirtualMachineSizeTypes.ToPtr` has been removed +- Function `*VirtualMachineScaleSetsClientCreateOrUpdatePoller.ResumeToken` has been removed +- Function `*VirtualMachineExtensionsClientUpdatePoller.FinalResponse` has been removed +- Function `*ProximityPlacementGroupsClientListByResourceGroupPager.NextPage` has been removed +- Function `*SnapshotsClientListByResourceGroupPager.PageResponse` has been removed +- Function `*LogAnalyticsClientExportThrottledRequestsPollerResponse.Resume` has been removed +- Function `*GalleryApplicationsClientUpdatePoller.ResumeToken` has been removed +- Function `*DiskEncryptionSetsClientDeletePollerResponse.Resume` has been removed +- Function `EncryptionType.ToPtr` has been removed +- Function `DiskAccessesClientUpdateAPrivateEndpointConnectionPollerResponse.PollUntilDone` has been removed +- Function `*DiskAccessesClientUpdatePoller.Poll` has been removed +- Function `*DisksClientUpdatePoller.FinalResponse` has been removed +- Function `*VirtualMachinesClientStartPollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetsClientUpdatePoller.Poll` has been removed +- Function `*DiskAccessesClientDeletePoller.ResumeToken` has been removed +- Function `*CloudServiceRoleInstancesClientRestartPoller.Done` has been removed +- Function `*VirtualMachineScaleSetsClientRestartPoller.FinalResponse` has been removed +- Function `*CloudServicesClientRebuildPollerResponse.Resume` has been removed +- Function `*LogAnalyticsClientExportThrottledRequestsPoller.Poll` has been removed +- Function `*VirtualMachineRunCommandsClientListByVirtualMachinePager.Err` has been removed +- Function `*VirtualMachinesClientPowerOffPollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetVMsClientRunCommandPoller.Done` has been removed +- Function `VirtualMachineScaleSetsClientPerformMaintenancePollerResponse.PollUntilDone` has been removed +- Function `*CloudServiceOperatingSystemsClientListOSFamiliesPager.PageResponse` has been removed +- Function `*ImagesClientUpdatePollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetVMsClientPowerOffPoller.ResumeToken` has been removed +- Function `*SnapshotsClientCreateOrUpdatePoller.ResumeToken` has been removed +- Function `*SharedGalleryImageVersionsClientListPager.PageResponse` has been removed +- Function `*GalleryImagesClientUpdatePoller.FinalResponse` has been removed +- Function `*VirtualMachineRunCommandsClientListByVirtualMachinePager.PageResponse` has been removed +- Function `*ProximityPlacementGroupsClientListBySubscriptionPager.Err` has been removed +- Function `*DedicatedHostsClientRestartPollerResponse.Resume` has been removed +- Function `*VirtualMachineExtensionsClientCreateOrUpdatePoller.ResumeToken` has been removed +- Function `*SSHPublicKeysClientListBySubscriptionPager.NextPage` has been removed +- Function `*CloudServicesClientPowerOffPollerResponse.Resume` has been removed +- Function `CachingTypes.ToPtr` has been removed +- Function `*ImagesClientListByResourceGroupPager.Err` has been removed +- Function `*ImagesClientListByResourceGroupPager.PageResponse` has been removed +- Function `*VirtualMachinesClientRunCommandPoller.ResumeToken` has been removed +- Function `VirtualMachineScaleSetExtensionsClientDeletePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetsClientListByLocationPager.Err` has been removed +- Function `*DiskEncryptionSetsClientListByResourceGroupPager.Err` has been removed +- Function `*GalleriesClientDeletePoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetsClientStartPoller.Poll` has been removed +- Function `*VirtualMachineScaleSetVMsClientRunCommandPoller.ResumeToken` has been removed +- Function `*DiskRestorePointClientGrantAccessPoller.FinalResponse` has been removed +- Function `*AvailabilitySetsClientListPager.PageResponse` has been removed +- Function `*VirtualMachinesClientRedeployPoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetsClientStartPoller.FinalResponse` has been removed +- Function `*RestorePointsClientDeletePoller.FinalResponse` has been removed +- Function `DedicatedHostsClientUpdatePollerResponse.PollUntilDone` has been removed +- Function `ResourceSKURestrictionsType.ToPtr` has been removed +- Function `*DiskAccessesClientListPrivateEndpointConnectionsPager.Err` has been removed +- Function `IntervalInMins.ToPtr` has been removed +- Function `*SnapshotsClientRevokeAccessPollerResponse.Resume` has been removed +- Function `*VirtualMachineRunCommandsClientListPager.Err` has been removed +- Function `*VirtualMachineScaleSetVMRunCommandsClientListPager.NextPage` has been removed +- Function `VirtualMachineScaleSetVMExtensionsClientUpdatePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineRunCommandsClientCreateOrUpdatePollerResponse.Resume` has been removed +- Function `*VirtualMachineRunCommandsClientUpdatePoller.Poll` has been removed +- Function `*CloudServicesClientStartPoller.Done` has been removed +- Function `*VirtualMachinesClientStartPoller.ResumeToken` has been removed +- Function `VirtualMachineRunCommandsClientUpdatePollerResponse.PollUntilDone` has been removed +- Function `*CapacityReservationsClientCreateOrUpdatePoller.Done` has been removed +- Function `*VirtualMachineScaleSetExtensionsClientDeletePoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetsClientUpdatePoller.Done` has been removed +- Function `ReplicationStatusTypes.ToPtr` has been removed +- Function `*DiskEncryptionSetsClientListPager.PageResponse` has been removed +- Function `*RestorePointsClientDeletePoller.Done` has been removed +- Function `VirtualMachineScaleSetsClientRestartPollerResponse.PollUntilDone` has been removed +- Function `*GalleryImagesClientCreateOrUpdatePoller.Done` has been removed +- Function `ImagesClientDeletePollerResponse.PollUntilDone` has been removed +- Function `PatchInstallationState.ToPtr` has been removed +- Function `*VirtualMachinesClientListByLocationPager.PageResponse` has been removed +- Function `*GalleryImagesClientUpdatePollerResponse.Resume` has been removed +- Function `*GalleryApplicationsClientListByGalleryPager.PageResponse` has been removed +- Function `*CloudServicesClientRebuildPoller.Done` has been removed +- Function `*DiskAccessesClientDeleteAPrivateEndpointConnectionPollerResponse.Resume` has been removed +- Function `*DiskAccessesClientCreateOrUpdatePoller.Poll` has been removed +- Function `*GalleryApplicationsClientUpdatePoller.Poll` has been removed +- Function `*ImagesClientListPager.NextPage` has been removed +- Function `VirtualMachinesClientStartPollerResponse.PollUntilDone` has been removed +- Function `*SSHPublicKeysClientListBySubscriptionPager.Err` has been removed +- Function `*GallerySharingProfileClientUpdatePollerResponse.Resume` has been removed +- Function `*SnapshotsClientRevokeAccessPoller.ResumeToken` has been removed +- Function `OrchestrationMode.ToPtr` has been removed +- Function `*GalleryImagesClientUpdatePoller.Poll` has been removed +- Function `NetworkAccessPolicy.ToPtr` has been removed +- Function `*VirtualMachinesClientReapplyPoller.Poll` has been removed +- Function `*DisksClientListByResourceGroupPager.PageResponse` has been removed +- Function `GalleryPropertiesProvisioningState.ToPtr` has been removed +- Function `*VirtualMachineRunCommandsClientListByVirtualMachinePager.NextPage` has been removed +- Function `VirtualMachineScaleSetVMsClientReimageAllPollerResponse.PollUntilDone` has been removed +- Function `*ProximityPlacementGroupsClientListByResourceGroupPager.Err` has been removed +- Function `*GalleryApplicationVersionsClientListByGalleryApplicationPager.Err` has been removed +- Function `PrivateEndpointServiceConnectionStatus.ToPtr` has been removed +- Function `*CloudServicesClientReimagePoller.FinalResponse` has been removed +- Function `*CloudServicesClientDeleteInstancesPoller.Poll` has been removed +- Function `DedicatedHostLicenseTypes.ToPtr` has been removed +- Function `*VirtualMachineExtensionsClientCreateOrUpdatePollerResponse.Resume` has been removed +- Function `*SharedGalleriesClientListPager.NextPage` has been removed +- Function `*VirtualMachineScaleSetsClientDeletePoller.ResumeToken` has been removed +- Function `CloudServicesClientReimagePollerResponse.PollUntilDone` has been removed +- Function `*CloudServiceRoleInstancesClientReimagePoller.Done` has been removed +- Function `*VirtualMachinesClientRestartPoller.ResumeToken` has been removed +- Function `*DiskEncryptionSetsClientUpdatePoller.Poll` has been removed +- Function `*VirtualMachineExtensionsClientUpdatePollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetsClientStartPoller.ResumeToken` has been removed +- Function `OrchestrationServiceState.ToPtr` has been removed +- Function `*SSHPublicKeysClientListByResourceGroupPager.Err` has been removed +- Function `*CloudServicesUpdateDomainClientWalkUpdateDomainPollerResponse.Resume` has been removed +- Function `*DiskAccessesClientDeletePollerResponse.Resume` has been removed +- Function `*VirtualMachinesClientPowerOffPoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetsClientDeletePollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetVMsClientDeletePoller.Poll` has been removed +- Function `GalleryApplicationVersionsClientUpdatePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetsClientRestartPoller.Done` has been removed +- Function `*SnapshotsClientListPager.PageResponse` has been removed +- Function `*DiskAccessesClientCreateOrUpdatePollerResponse.Resume` has been removed +- Function `*VirtualMachinesClientUpdatePollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetVMsClientStartPoller.FinalResponse` has been removed +- Function `GalleryImagesClientCreateOrUpdatePollerResponse.PollUntilDone` has been removed +- Function `VirtualMachineScaleSetsClientDeleteInstancesPollerResponse.PollUntilDone` has been removed +- Function `*CloudServiceRoleInstancesClientDeletePollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetsClientListAllPager.NextPage` has been removed +- Function `*SnapshotsClientDeletePoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetVMsClientStartPoller.Poll` has been removed +- Function `HyperVGenerationType.ToPtr` has been removed +- Function `OperatingSystemType.ToPtr` has been removed +- Function `*VirtualMachineScaleSetVMsClientPerformMaintenancePoller.Poll` has been removed +- Function `*VirtualMachineScaleSetsClientDeletePoller.Poll` has been removed +- Function `*DedicatedHostsClientUpdatePollerResponse.Resume` has been removed +- Function `VirtualMachinesClientAssessPatchesPollerResponse.PollUntilDone` has been removed +- Function `*DiskAccessesClientDeletePoller.Done` has been removed +- Function `*VirtualMachineScaleSetVMsClientReimagePoller.FinalResponse` has been removed +- Function `VirtualMachineScaleSetVMRunCommandsClientDeletePollerResponse.PollUntilDone` has been removed +- Function `*CapacityReservationsClientDeletePoller.Done` has been removed +- Function `*VirtualMachinesClientCapturePoller.ResumeToken` has been removed +- Function `*VirtualMachinesClientReapplyPoller.Done` has been removed +- Function `*VirtualMachineScaleSetVMsClientDeallocatePoller.Poll` has been removed +- Function `*VirtualMachineScaleSetExtensionsClientCreateOrUpdatePollerResponse.Resume` has been removed +- Function `*GalleryImageVersionsClientUpdatePoller.Poll` has been removed +- Function `VirtualMachinesClientCreateOrUpdatePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineExtensionsClientDeletePollerResponse.Resume` has been removed +- Function `*VirtualMachinesClientCreateOrUpdatePoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetExtensionsClientListPager.PageResponse` has been removed +- Function `*DiskEncryptionSetsClientUpdatePoller.ResumeToken` has been removed +- Function `ExtendedLocationType.ToPtr` has been removed +- Function `VirtualMachinesClientDeletePollerResponse.PollUntilDone` has been removed +- Function `*GalleryApplicationsClientCreateOrUpdatePoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetsClientUpdateInstancesPoller.FinalResponse` has been removed +- Function `VirtualMachineScaleSetsClientUpdatePollerResponse.PollUntilDone` has been removed +- Function `*DedicatedHostGroupsClientListBySubscriptionPager.NextPage` has been removed +- Function `*GalleryApplicationVersionsClientDeletePoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetVMsClientDeallocatePoller.ResumeToken` has been removed +- Function `OperatingSystemTypes.ToPtr` has been removed +- Function `*DisksClientRevokeAccessPoller.ResumeToken` has been removed +- Function `OrchestrationServiceNames.ToPtr` has been removed +- Function `*GalleryImagesClientListByGalleryPager.Err` has been removed +- Function `*VirtualMachineScaleSetsClientDeallocatePoller.Done` has been removed +- Function `*VirtualMachinesClientDeletePoller.FinalResponse` has been removed +- Function `*CloudServicesClientRestartPoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetsClientRedeployPoller.Poll` has been removed +- Function `*DiskAccessesClientDeletePoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetsClientDeletePoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetVMsClientReimagePollerResponse.Resume` has been removed +- Function `*VirtualMachinesClientReapplyPoller.ResumeToken` has been removed +- Function `*GalleryImageVersionsClientCreateOrUpdatePoller.Poll` has been removed +- Function `*VirtualMachineScaleSetVMRunCommandsClientDeletePoller.Poll` has been removed +- Function `*DiskRestorePointClientRevokeAccessPoller.FinalResponse` has been removed +- Function `*CloudServicesClientDeleteInstancesPoller.ResumeToken` has been removed +- Function `*DedicatedHostsClientUpdatePoller.Poll` has been removed +- Function `*VirtualMachineScaleSetVMsClientReimageAllPoller.FinalResponse` has been removed +- Function `*DiskAccessesClientUpdatePollerResponse.Resume` has been removed +- Function `*DiskRestorePointClientRevokeAccessPoller.Poll` has been removed +- Function `CloudServiceRoleInstancesClientRestartPollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineRunCommandsClientCreateOrUpdatePoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetVMsClientUpdatePoller.Poll` has been removed +- Function `*VirtualMachineScaleSetVMsClientRedeployPoller.Done` has been removed +- Function `VirtualMachineScaleSetExtensionsClientCreateOrUpdatePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetExtensionsClientCreateOrUpdatePoller.FinalResponse` has been removed +- Function `DiffDiskOptions.ToPtr` has been removed +- Function `*VirtualMachinesClientRedeployPoller.Poll` has been removed +- Function `*GalleryImageVersionsClientCreateOrUpdatePoller.ResumeToken` has been removed +- Function `*CloudServicesClientUpdatePoller.Poll` has been removed +- Function `*VirtualMachineScaleSetVMRunCommandsClientUpdatePoller.Poll` has been removed +- Function `*VirtualMachineScaleSetsClientDeallocatePoller.Poll` has been removed +- Function `*DiskRestorePointClientGrantAccessPollerResponse.Resume` has been removed +- Function `StorageAccountTypes.ToPtr` has been removed +- Function `CapacityReservationGroupInstanceViewTypes.ToPtr` has been removed +- Function `CapacityReservationsClientDeletePollerResponse.PollUntilDone` has been removed +- Function `HostCaching.ToPtr` has been removed +- Function `*VirtualMachineScaleSetsClientSetOrchestrationServiceStatePollerResponse.Resume` has been removed +- Function `VirtualMachineScaleSetVMsClientPowerOffPollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetVMExtensionsClientCreateOrUpdatePoller.Done` has been removed +- Function `*GallerySharingProfileClientUpdatePoller.Poll` has been removed +- Function `*VirtualMachineRunCommandsClientListPager.NextPage` has been removed +- Function `*VirtualMachineRunCommandsClientCreateOrUpdatePoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetVMsClientUpdatePoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetVMRunCommandsClientUpdatePollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetVMsClientStartPoller.Done` has been removed +- Function `DedicatedHostsClientCreateOrUpdatePollerResponse.PollUntilDone` has been removed +- Function `*AvailabilitySetsClientListBySubscriptionPager.Err` has been removed +- Function `VirtualMachineScaleSetsClientRedeployPollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachinesClientConvertToManagedDisksPoller.Poll` has been removed +- Function `*CloudServicesUpdateDomainClientWalkUpdateDomainPoller.Done` has been removed +- Function `*VirtualMachineScaleSetVMsClientDeallocatePoller.Done` has been removed +- Function `*VirtualMachineScaleSetVMExtensionsClientUpdatePoller.FinalResponse` has been removed +- Function `*GalleryImagesClientCreateOrUpdatePoller.FinalResponse` has been removed +- Function `VirtualMachineExtensionsClientUpdatePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachinesClientDeallocatePoller.Poll` has been removed +- Function `CapacityReservationsClientUpdatePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetsClientReimagePoller.FinalResponse` has been removed +- Function `*SnapshotsClientRevokeAccessPoller.Done` has been removed +- Function `*CloudServicesClientCreateOrUpdatePoller.Done` has been removed +- Function `*CloudServiceRolesClientListPager.Err` has been removed +- Function `*VirtualMachineScaleSetVMRunCommandsClientUpdatePoller.FinalResponse` has been removed +- Function `*VirtualMachinesClientRestartPoller.FinalResponse` has been removed +- Function `*GalleryApplicationVersionsClientDeletePoller.ResumeToken` has been removed +- Function `*DedicatedHostsClientCreateOrUpdatePoller.FinalResponse` has been removed +- Function `DiskDeleteOptionTypes.ToPtr` has been removed +- Function `DeleteOptions.ToPtr` has been removed +- Function `*VirtualMachineScaleSetExtensionsClientCreateOrUpdatePoller.Done` has been removed +- Function `*VirtualMachineScaleSetsClientUpdateInstancesPoller.Poll` has been removed +- Function `*CapacityReservationGroupsClientListByResourceGroupPager.Err` has been removed +- Function `*VirtualMachineScaleSetRollingUpgradesClientStartExtensionUpgradePollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetsClientReimagePoller.Poll` has been removed +- Function `NetworkAPIVersion.ToPtr` has been removed +- Function `InstanceViewTypes.ToPtr` has been removed +- Function `*CloudServiceOperatingSystemsClientListOSFamiliesPager.Err` has been removed +- Function `*GalleryApplicationVersionsClientCreateOrUpdatePoller.Poll` has been removed +- Function `SnapshotsClientGrantAccessPollerResponse.PollUntilDone` has been removed +- Function `*CapacityReservationsClientCreateOrUpdatePoller.Poll` has been removed +- Function `RestorePointsClientDeletePollerResponse.PollUntilDone` has been removed +- Function `*DiskAccessesClientUpdateAPrivateEndpointConnectionPoller.Done` has been removed +- Function `*VirtualMachineRunCommandsClientUpdatePoller.Done` has been removed +- Function `GalleryApplicationVersionsClientDeletePollerResponse.PollUntilDone` has been removed +- Function `DiskCreateOptionTypes.ToPtr` has been removed +- Function `VirtualMachinesClientRunCommandPollerResponse.PollUntilDone` has been removed +- Function `*CloudServicesClientRebuildPoller.ResumeToken` has been removed +- Function `RollingUpgradeActionType.ToPtr` has been removed +- Function `AggregatedReplicationState.ToPtr` has been removed +- Function `*CloudServicesClientStartPoller.Poll` has been removed +- Function `*VirtualMachineScaleSetsClientListByLocationPager.PageResponse` has been removed +- Function `*GalleryImageVersionsClientDeletePoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetsClientListPager.Err` has been removed +- Function `*VirtualMachineScaleSetsClientCreateOrUpdatePoller.FinalResponse` has been removed +- Function `DiskEncryptionSetsClientUpdatePollerResponse.PollUntilDone` has been removed +- Function `*CloudServicesClientDeleteInstancesPoller.Done` has been removed +- Function `*DiskRestorePointClientGrantAccessPoller.Done` has been removed +- Function `VirtualMachinesClientDeallocatePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetVMsClientRedeployPoller.Poll` has been removed +- Function `*CloudServicesClientCreateOrUpdatePoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetVMsClientRedeployPoller.FinalResponse` has been removed +- Function `*DedicatedHostsClientRestartPoller.Done` has been removed +- Function `*VirtualMachineScaleSetVMsClientDeletePoller.ResumeToken` has been removed +- Function `*VirtualMachinesClientStartPoller.Poll` has been removed +- Function `*CapacityReservationsClientCreateOrUpdatePoller.ResumeToken` has been removed +- Function `*GalleriesClientCreateOrUpdatePoller.FinalResponse` has been removed +- Function `VirtualMachinesClientConvertToManagedDisksPollerResponse.PollUntilDone` has been removed +- Function `*DedicatedHostsClientUpdatePoller.Done` has been removed +- Function `*CloudServicesClientPowerOffPoller.ResumeToken` has been removed +- Function `*SnapshotsClientCreateOrUpdatePoller.Done` has been removed +- Function `*VirtualMachineScaleSetRollingUpgradesClientStartOSUpgradePoller.Poll` has been removed +- Function `*VirtualMachinesClientCreateOrUpdatePoller.Poll` has been removed +- Function `*DiskEncryptionSetsClientListAssociatedResourcesPager.NextPage` has been removed +- Function `*VirtualMachineScaleSetsClientPerformMaintenancePoller.Poll` has been removed +- Function `*VirtualMachineScaleSetVMExtensionsClientUpdatePollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetsClientPowerOffPoller.Poll` has been removed +- Function `VirtualMachineScaleSetRollingUpgradesClientCancelPollerResponse.PollUntilDone` has been removed +- Function `*CapacityReservationsClientDeletePoller.Poll` has been removed +- Function `*DisksClientListByResourceGroupPager.NextPage` has been removed +- Function `ImagesClientCreateOrUpdatePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachinesClientReapplyPoller.FinalResponse` has been removed +- Function `*VirtualMachineExtensionsClientUpdatePoller.Done` has been removed +- Function `*SnapshotsClientDeletePollerResponse.Resume` has been removed +- Function `*GalleryImageVersionsClientUpdatePoller.FinalResponse` has been removed +- Function `*DedicatedHostsClientListByHostGroupPager.Err` has been removed +- Function `*DiskEncryptionSetsClientCreateOrUpdatePoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetRollingUpgradesClientStartExtensionUpgradePoller.ResumeToken` has been removed +- Function `*DiskRestorePointClientRevokeAccessPollerResponse.Resume` has been removed +- Function `*DiskRestorePointClientListByRestorePointPager.NextPage` has been removed +- Function `*RestorePointsClientDeletePollerResponse.Resume` has been removed +- Function `VirtualMachineScaleSetsClientStartPollerResponse.PollUntilDone` has been removed +- Function `*DisksClientCreateOrUpdatePoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetsClientListPager.PageResponse` has been removed +- Function `*SnapshotsClientListByResourceGroupPager.NextPage` has been removed +- Function `*CloudServicesClientListAllPager.NextPage` has been removed +- Function `*RestorePointsClientCreatePollerResponse.Resume` has been removed +- Function `*GalleryApplicationsClientCreateOrUpdatePoller.Poll` has been removed +- Function `GalleriesClientUpdatePollerResponse.PollUntilDone` has been removed +- Function `*GalleriesClientCreateOrUpdatePollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetVMsClientRedeployPollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetVMsClientRestartPoller.Poll` has been removed +- Function `*GalleryApplicationVersionsClientUpdatePollerResponse.Resume` has been removed +- Function `*VirtualMachineExtensionsClientDeletePoller.Done` has been removed +- Function `*VirtualMachineScaleSetVMsClientRunCommandPoller.FinalResponse` has been removed +- Function `*SnapshotsClientCreateOrUpdatePollerResponse.Resume` has been removed +- Function `*CloudServicesUpdateDomainClientWalkUpdateDomainPoller.FinalResponse` has been removed +- Function `RepairAction.ToPtr` has been removed +- Function `DiskSecurityTypes.ToPtr` has been removed +- Function `VirtualMachinePriorityTypes.ToPtr` has been removed +- Function `VirtualMachineScaleSetsClientDeallocatePollerResponse.PollUntilDone` has been removed +- Function `*SnapshotsClientGrantAccessPoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetsClientDeallocatePoller.FinalResponse` has been removed +- Function `*CloudServiceRoleInstancesClientListPager.PageResponse` has been removed +- Function `*CapacityReservationsClientDeletePoller.FinalResponse` has been removed +- Function `*DiskRestorePointClientGrantAccessPoller.ResumeToken` has been removed +- Function `VirtualMachineScaleSetsClientCreateOrUpdatePollerResponse.PollUntilDone` has been removed +- Function `MaintenanceOperationResultCodeTypes.ToPtr` has been removed +- Function `*SnapshotsClientUpdatePoller.ResumeToken` has been removed +- Function `*GalleriesClientListPager.NextPage` has been removed +- Function `*GalleriesClientDeletePollerResponse.Resume` has been removed +- Function `*VirtualMachinesClientDeallocatePollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetsClientPerformMaintenancePoller.FinalResponse` has been removed +- Function `*DisksClientCreateOrUpdatePoller.Done` has been removed +- Function `*SnapshotsClientListPager.Err` has been removed +- Function `*CloudServicesClientStartPollerResponse.Resume` has been removed +- Function `SnapshotsClientDeletePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetVMExtensionsClientCreateOrUpdatePollerResponse.Resume` has been removed +- Function `*CloudServicesClientCreateOrUpdatePoller.FinalResponse` has been removed +- Function `*DiskRestorePointClientListByRestorePointPager.Err` has been removed +- Function `VirtualMachineScaleSetsClientReimagePollerResponse.PollUntilDone` has been removed +- Function `*CloudServiceOperatingSystemsClientListOSVersionsPager.PageResponse` has been removed +- Function `CloudServicesClientDeleteInstancesPollerResponse.PollUntilDone` has been removed +- Function `IPVersion.ToPtr` has been removed +- Function `*VirtualMachineScaleSetsClientCreateOrUpdatePoller.Poll` has been removed +- Function `*VirtualMachineScaleSetsClientDeleteInstancesPoller.Poll` has been removed +- Function `CloudServicesUpdateDomainClientWalkUpdateDomainPollerResponse.PollUntilDone` has been removed +- Function `StatusLevelTypes.ToPtr` has been removed +- Function `*CloudServicesClientUpdatePollerResponse.Resume` has been removed +- Function `*CloudServicesUpdateDomainClientListUpdateDomainsPager.Err` has been removed +- Function `*DiskEncryptionSetsClientDeletePoller.ResumeToken` has been removed +- Function `*ImagesClientDeletePoller.Poll` has been removed +- Function `*VirtualMachineScaleSetVMsClientDeletePoller.Done` has been removed +- Function `*VirtualMachineScaleSetVMsClientPowerOffPoller.Done` has been removed +- Function `*VirtualMachineScaleSetRollingUpgradesClientCancelPollerResponse.Resume` has been removed +- Function `*CloudServicesUpdateDomainClientListUpdateDomainsPager.NextPage` has been removed +- Function `*VirtualMachinesClientAssessPatchesPoller.Done` has been removed +- Function `*DedicatedHostsClientUpdatePoller.FinalResponse` has been removed +- Function `*VirtualMachinesClientCreateOrUpdatePoller.Done` has been removed +- Function `GalleryExpandParams.ToPtr` has been removed +- Function `*VirtualMachineScaleSetVMsClientRestartPoller.ResumeToken` has been removed +- Function `*DisksClientGrantAccessPoller.Poll` has been removed +- Function `*VirtualMachineExtensionsClientDeletePoller.Poll` has been removed +- Function `*VirtualMachineScaleSetVMsClientPowerOffPollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetVMsClientListPager.PageResponse` has been removed +- Function `*CloudServiceRoleInstancesClientRestartPollerResponse.Resume` has been removed +- Function `*CloudServiceRolesClientListPager.PageResponse` has been removed +- Function `*SharedGalleryImagesClientListPager.PageResponse` has been removed +- Function `*VirtualMachineScaleSetRollingUpgradesClientStartOSUpgradePoller.Done` has been removed +- Function `*VirtualMachinesClientAssessPatchesPoller.FinalResponse` has been removed +- Function `ExecutionState.ToPtr` has been removed +- Function `*CapacityReservationsClientCreateOrUpdatePoller.FinalResponse` has been removed +- Function `VirtualMachineScaleSetScaleInRules.ToPtr` has been removed +- Function `*CloudServicesClientListPager.PageResponse` has been removed +- Function `*GalleriesClientListPager.Err` has been removed +- Function `SharingUpdateOperationTypes.ToPtr` has been removed +- Function `*CapacityReservationsClientUpdatePoller.Done` has been removed +- Function `*VirtualMachinesClientInstallPatchesPoller.Poll` has been removed +- Function `*GalleryApplicationVersionsClientDeletePoller.Done` has been removed +- Function `*VirtualMachinesClientUpdatePoller.Done` has been removed +- Function `*VirtualMachinesClientRunCommandPoller.Done` has been removed +- Function `*DiskAccessesClientDeleteAPrivateEndpointConnectionPoller.Done` has been removed +- Function `*VirtualMachineScaleSetRollingUpgradesClientStartExtensionUpgradePoller.Done` has been removed +- Function `*DiskEncryptionSetsClientListAssociatedResourcesPager.PageResponse` has been removed +- Function `*CloudServicesClientDeleteInstancesPollerResponse.Resume` has been removed +- Function `*GalleryApplicationVersionsClientCreateOrUpdatePollerResponse.Resume` has been removed +- Function `*DiskRestorePointClientRevokeAccessPoller.Done` has been removed +- Function `*RestorePointCollectionsClientListAllPager.PageResponse` has been removed +- Function `*DisksClientDeletePoller.Done` has been removed +- Function `*VirtualMachineScaleSetVMsClientReimagePoller.ResumeToken` has been removed +- Function `*CloudServicesClientDeletePoller.Poll` has been removed +- Function `*VirtualMachineScaleSetVMRunCommandsClientUpdatePoller.ResumeToken` has been removed +- Function `GalleryApplicationsClientCreateOrUpdatePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetsClientReimagePoller.Done` has been removed +- Function `*DisksClientRevokeAccessPoller.FinalResponse` has been removed +- Function `CloudServicesClientPowerOffPollerResponse.PollUntilDone` has been removed +- Function `DataAccessAuthMode.ToPtr` has been removed +- Function `*VirtualMachineScaleSetVMsClientDeletePollerResponse.Resume` has been removed +- Function `*SnapshotsClientGrantAccessPoller.Done` has been removed +- Function `*VirtualMachinesClientConvertToManagedDisksPollerResponse.Resume` has been removed +- Function `*CapacityReservationsClientUpdatePoller.FinalResponse` has been removed +- Function `*VirtualMachinesClientPerformMaintenancePoller.Poll` has been removed +- Function `*RestorePointsClientDeletePoller.ResumeToken` has been removed +- Function `AvailabilitySetSKUTypes.ToPtr` has been removed +- Function `*VirtualMachinesClientAssessPatchesPoller.ResumeToken` has been removed +- Function `*DiskAccessesClientListPrivateEndpointConnectionsPager.PageResponse` has been removed +- Function `*GalleryApplicationsClientCreateOrUpdatePoller.ResumeToken` has been removed +- Function `*DisksClientUpdatePoller.Done` has been removed +- Function `VirtualMachineScaleSetVMsClientDeletePollerResponse.PollUntilDone` has been removed +- Function `*GalleriesClientCreateOrUpdatePoller.Done` has been removed +- Function `*VirtualMachinesClientInstallPatchesPoller.FinalResponse` has been removed +- Function `*CapacityReservationsClientUpdatePoller.ResumeToken` has been removed +- Function `*DisksClientListPager.NextPage` has been removed +- Function `*CloudServiceRolesClientListPager.NextPage` has been removed +- Function `*CloudServiceRoleInstancesClientReimagePoller.FinalResponse` has been removed +- Function `*SnapshotsClientCreateOrUpdatePoller.Poll` has been removed +- Function `*GalleryImagesClientListByGalleryPager.NextPage` has been removed +- Function `*VirtualMachineScaleSetsClientPowerOffPollerResponse.Resume` has been removed +- Function `*CloudServicesClientListPager.NextPage` has been removed +- Function `SharedToValues.ToPtr` has been removed +- Function `*DiskEncryptionSetsClientDeletePoller.Poll` has been removed +- Function `GalleryApplicationsClientDeletePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetsClientStartPollerResponse.Resume` has been removed +- Function `CloudServicesClientCreateOrUpdatePollerResponse.PollUntilDone` has been removed +- Function `VirtualMachineScaleSetVMExtensionsClientDeletePollerResponse.PollUntilDone` has been removed +- Function `*GalleryImagesClientCreateOrUpdatePoller.ResumeToken` has been removed +- Function `*DisksClientDeletePoller.Poll` has been removed +- Function `*DedicatedHostGroupsClientListBySubscriptionPager.Err` has been removed +- Function `VirtualMachineScaleSetsClientUpdateInstancesPollerResponse.PollUntilDone` has been removed +- Function `*AvailabilitySetsClientListBySubscriptionPager.PageResponse` has been removed +- Function `*VirtualMachineScaleSetsClientListSKUsPager.NextPage` has been removed +- Function `*VirtualMachinesClientInstallPatchesPollerResponse.Resume` has been removed +- Function `ExtendedLocationTypes.ToPtr` has been removed +- Function `*DiskEncryptionSetsClientListPager.Err` has been removed +- Function `*VirtualMachinesClientDeletePoller.Done` has been removed +- Function `*CloudServicesClientStartPoller.ResumeToken` has been removed +- Function `VMGuestPatchRebootSetting.ToPtr` has been removed +- Function `*ImagesClientDeletePoller.ResumeToken` has been removed +- Function `*DiskAccessesClientUpdatePoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetVMExtensionsClientUpdatePoller.Done` has been removed +- Function `CloudServiceUpgradeMode.ToPtr` has been removed +- Function `*CloudServiceRoleInstancesClientReimagePoller.Poll` has been removed +- Function `*VirtualMachineScaleSetVMsClientRunCommandPollerResponse.Resume` has been removed +- Function `*ImagesClientUpdatePoller.ResumeToken` has been removed +- Function `VirtualMachinesClientRestartPollerResponse.PollUntilDone` has been removed +- Function `*GalleryApplicationsClientDeletePoller.Poll` has been removed +- Function `VirtualMachineScaleSetVMsClientRestartPollerResponse.PollUntilDone` has been removed +- Function `DisksClientRevokeAccessPollerResponse.PollUntilDone` has been removed +- Function `*CloudServicesClientStartPoller.FinalResponse` has been removed +- Function `*LogAnalyticsClientExportThrottledRequestsPoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetVMsClientReimageAllPollerResponse.Resume` has been removed +- Function `*GalleryApplicationVersionsClientDeletePollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetsClientListByLocationPager.NextPage` has been removed +- Function `*VirtualMachineScaleSetsClientDeletePoller.Done` has been removed +- Function `*CloudServicesClientUpdatePoller.FinalResponse` has been removed +- Function `CloudServicesClientRestartPollerResponse.PollUntilDone` has been removed +- Function `*DisksClientGrantAccessPoller.FinalResponse` has been removed +- Function `DiskEncryptionSetType.ToPtr` has been removed +- Function `*GalleryImagesClientDeletePoller.Poll` has been removed +- Function `*VirtualMachineScaleSetsClientDeallocatePollerResponse.Resume` has been removed +- Function `*LogAnalyticsClientExportRequestRateByIntervalPoller.FinalResponse` has been removed +- Function `HyperVGeneration.ToPtr` has been removed +- Function `*ResourceSKUsClientListPager.NextPage` has been removed +- Function `*DedicatedHostsClientCreateOrUpdatePoller.Poll` has been removed +- Function `*VirtualMachinesClientPerformMaintenancePollerResponse.Resume` has been removed +- Function `*GalleryApplicationsClientDeletePoller.Done` has been removed +- Function `*ImagesClientDeletePoller.Done` has been removed +- Function `*VirtualMachineScaleSetRollingUpgradesClientCancelPoller.Done` has been removed +- Function `*GalleryImagesClientListByGalleryPager.PageResponse` has been removed +- Function `VirtualMachineScaleSetsClientDeletePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineRunCommandsClientDeletePoller.ResumeToken` has been removed +- Function `*SnapshotsClientUpdatePollerResponse.Resume` has been removed +- Function `*CloudServicesClientUpdatePoller.ResumeToken` has been removed +- Function `*GalleryApplicationsClientUpdatePoller.FinalResponse` has been removed +- Function `*ImagesClientCreateOrUpdatePollerResponse.Resume` has been removed +- Function `*DisksClientListByResourceGroupPager.Err` has been removed +- Function `*VirtualMachinesClientReimagePollerResponse.Resume` has been removed +- Function `*CloudServicesClientRestartPollerResponse.Resume` has been removed +- Function `*CloudServicesClientListAllPager.Err` has been removed +- Function `CloudServiceRoleInstancesClientRebuildPollerResponse.PollUntilDone` has been removed +- Function `DiskRestorePointClientRevokeAccessPollerResponse.PollUntilDone` has been removed +- Function `*GalleryImageVersionsClientListByGalleryImagePager.PageResponse` has been removed +- Function `LinuxVMGuestPatchMode.ToPtr` has been removed +- Function `VMGuestPatchClassificationWindows.ToPtr` has been removed +- Function `VirtualMachinesClientUpdatePollerResponse.PollUntilDone` has been removed +- Function `DisksClientUpdatePollerResponse.PollUntilDone` has been removed +- Function `*CloudServiceRoleInstancesClientRebuildPoller.ResumeToken` has been removed +- Function `*GalleryImagesClientDeletePoller.ResumeToken` has been removed +- Function `ReplicationMode.ToPtr` has been removed +- Function `*AvailabilitySetsClientListBySubscriptionPager.NextPage` has been removed +- Function `*ProximityPlacementGroupsClientListBySubscriptionPager.PageResponse` has been removed +- Function `*CloudServiceRoleInstancesClientRebuildPoller.Done` has been removed +- Function `*RestorePointsClientCreatePoller.Done` has been removed +- Function `*VirtualMachineScaleSetsClientPerformMaintenancePoller.ResumeToken` has been removed +- Function `SnapshotsClientCreateOrUpdatePollerResponse.PollUntilDone` has been removed +- Function `RestorePointCollectionsClientDeletePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetExtensionsClientListPager.Err` has been removed +- Function `*RestorePointCollectionsClientListPager.NextPage` has been removed +- Function `*VirtualMachinesClientAssessPatchesPollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetVMsClientDeallocatePollerResponse.Resume` has been removed +- Function `*DedicatedHostsClientDeletePoller.Done` has been removed +- Function `*DisksClientDeletePoller.FinalResponse` has been removed +- Function `*SnapshotsClientListByResourceGroupPager.Err` has been removed +- Function `VirtualMachineExtensionsClientDeletePollerResponse.PollUntilDone` has been removed +- Function `*GalleriesClientUpdatePollerResponse.Resume` has been removed +- Function `SettingNames.ToPtr` has been removed +- Function `UpgradeMode.ToPtr` has been removed +- Function `*DisksClientUpdatePoller.ResumeToken` has been removed +- Function `*SharedGalleriesClientListPager.PageResponse` has been removed +- Function `*VirtualMachinesClientInstallPatchesPoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetsClientReimageAllPoller.FinalResponse` has been removed +- Function `DiskEncryptionSetIdentityType.ToPtr` has been removed +- Function `*GalleriesClientCreateOrUpdatePoller.Poll` has been removed +- Function `*SnapshotsClientRevokeAccessPoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetVMRunCommandsClientCreateOrUpdatePoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetVMsClientDeallocatePoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetVMRunCommandsClientDeletePoller.FinalResponse` has been removed +- Function `ResourceIdentityType.ToPtr` has been removed +- Function `PublicIPAddressSKUTier.ToPtr` has been removed +- Function `*CloudServicesUpdateDomainClientWalkUpdateDomainPoller.Poll` has been removed +- Function `CapacityReservationInstanceViewTypes.ToPtr` has been removed +- Function `*VirtualMachinesClientUpdatePoller.ResumeToken` has been removed +- Function `*CapacityReservationsClientCreateOrUpdatePollerResponse.Resume` has been removed +- Function `*VirtualMachineExtensionsClientUpdatePoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetsClientPerformMaintenancePollerResponse.Resume` has been removed +- Function `*GalleryImageVersionsClientCreateOrUpdatePoller.Done` has been removed +- Function `*VirtualMachineRunCommandsClientDeletePollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetVMExtensionsClientCreateOrUpdatePoller.FinalResponse` has been removed +- Function `*VirtualMachinesClientCapturePoller.Poll` has been removed +- Function `*VirtualMachineScaleSetVMsClientUpdatePoller.Done` has been removed +- Function `*SSHPublicKeysClientListBySubscriptionPager.PageResponse` has been removed +- Function `*VirtualMachineScaleSetsClientUpdatePoller.ResumeToken` has been removed +- Function `*DiskAccessesClientUpdateAPrivateEndpointConnectionPoller.ResumeToken` has been removed +- Function `*DiskRestorePointClientRevokeAccessPoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetExtensionsClientCreateOrUpdatePoller.Poll` has been removed +- Function `*DiskAccessesClientDeleteAPrivateEndpointConnectionPoller.Poll` has been removed +- Function `*VirtualMachineScaleSetsClientRestartPoller.Poll` has been removed +- Function `*CloudServiceRoleInstancesClientRebuildPoller.Poll` has been removed +- Function `*ImagesClientListPager.Err` has been removed +- Function `*VirtualMachineScaleSetsClientRestartPoller.ResumeToken` has been removed +- Function `*CloudServicesClientDeletePollerResponse.Resume` has been removed +- Function `*DisksClientCreateOrUpdatePoller.Poll` has been removed +- Function `*SnapshotsClientUpdatePoller.Poll` has been removed +- Function `*CloudServicesUpdateDomainClientListUpdateDomainsPager.PageResponse` has been removed +- Function `ResourceSKUCapacityScaleType.ToPtr` has been removed +- Function `VirtualMachineScaleSetsClientSetOrchestrationServiceStatePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachinesClientRedeployPoller.ResumeToken` has been removed +- Function `*SnapshotsClientUpdatePoller.FinalResponse` has been removed +- Function `*DedicatedHostsClientCreateOrUpdatePollerResponse.Resume` has been removed +- Function `*GalleryApplicationsClientDeletePoller.ResumeToken` has been removed +- Function `PrivateEndpointConnectionProvisioningState.ToPtr` has been removed +- Function `SecurityTypes.ToPtr` has been removed +- Function `*ImagesClientDeletePollerResponse.Resume` has been removed +- Function `*GalleryApplicationsClientCreateOrUpdatePollerResponse.Resume` has been removed +- Function `*GalleryApplicationsClientCreateOrUpdatePoller.Done` has been removed +- Function `*CloudServicesClientRebuildPoller.Poll` has been removed +- Function `*VirtualMachineScaleSetVMsClientPerformMaintenancePoller.Done` has been removed +- Function `*VirtualMachineScaleSetVMsClientListPager.Err` has been removed +- Function `*CloudServiceRoleInstancesClientRestartPoller.Poll` has been removed +- Function `*DedicatedHostsClientRestartPoller.FinalResponse` has been removed +- Function `*ImagesClientListByResourceGroupPager.NextPage` has been removed +- Function `*VirtualMachinesClientRedeployPoller.Done` has been removed +- Function `*VirtualMachineScaleSetExtensionsClientUpdatePoller.Done` has been removed +- Function `*VirtualMachinesClientDeallocatePoller.FinalResponse` has been removed +- Function `*VirtualMachinesClientUpdatePoller.FinalResponse` has been removed +- Function `*CloudServiceRoleInstancesClientDeletePoller.ResumeToken` has been removed +- Function `*DiskRestorePointClientGrantAccessPoller.Poll` has been removed +- Function `*GallerySharingProfileClientUpdatePoller.Done` has been removed +- Function `*VirtualMachinesClientListByLocationPager.Err` has been removed +- Function `*GalleriesClientUpdatePoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetExtensionsClientUpdatePoller.ResumeToken` has been removed +- Function `VirtualMachinesClientCapturePollerResponse.PollUntilDone` has been removed +- Function `CloudServiceRoleInstancesClientReimagePollerResponse.PollUntilDone` has been removed +- Function `*DisksClientGrantAccessPoller.ResumeToken` has been removed +- Function `VirtualMachinesClientRedeployPollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetsClientListSKUsPager.Err` has been removed +- Function `*VirtualMachineScaleSetVMRunCommandsClientListPager.Err` has been removed +- Function `*VirtualMachineScaleSetVMExtensionsClientCreateOrUpdatePoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetsClientUpdatePoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetsClientUpdateInstancesPoller.Done` has been removed +- Function `ProximityPlacementGroupType.ToPtr` has been removed +- Function `*RestorePointCollectionsClientListPager.PageResponse` has been removed +- Function `*DiskAccessesClientCreateOrUpdatePoller.Done` has been removed +- Function `*VirtualMachineRunCommandsClientUpdatePoller.ResumeToken` has been removed +- Function `*DisksClientUpdatePollerResponse.Resume` has been removed +- Function `*DiskEncryptionSetsClientListAssociatedResourcesPager.Err` has been removed +- Function `*CloudServicesClientDeletePoller.FinalResponse` has been removed +- Function `*DisksClientListPager.PageResponse` has been removed +- Function `CapacityReservationsClientCreateOrUpdatePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetExtensionsClientUpdatePoller.Poll` has been removed +- Function `*DiskEncryptionSetsClientCreateOrUpdatePoller.Poll` has been removed +- Function `*CloudServicesClientCreateOrUpdatePoller.Poll` has been removed +- Function `VirtualMachineScaleSetVMsClientRunCommandPollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachinesClientRestartPoller.Done` has been removed +- Function `GalleryApplicationsClientUpdatePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachinesClientReapplyPollerResponse.Resume` has been removed +- Function `*VirtualMachinesClientConvertToManagedDisksPoller.ResumeToken` has been removed +- Function `*VirtualMachinesClientStartPoller.Done` has been removed +- Function `GalleryImageVersionsClientUpdatePollerResponse.PollUntilDone` has been removed +- Function `CloudServicesClientStartPollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachinesClientDeletePoller.ResumeToken` has been removed +- Function `VMGuestPatchRebootStatus.ToPtr` has been removed +- Function `*VirtualMachineScaleSetVMsClientPerformMaintenancePollerResponse.Resume` has been removed +- Function `*VirtualMachinesClientDeallocatePoller.Done` has been removed +- Function `*VirtualMachinesClientReimagePoller.FinalResponse` has been removed +- Function `*VirtualMachineExtensionsClientDeletePoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetsClientListSKUsPager.PageResponse` has been removed +- Function `*CloudServiceRoleInstancesClientRebuildPollerResponse.Resume` has been removed +- Function `*DiskEncryptionSetsClientCreateOrUpdatePoller.Done` has been removed +- Function `*VirtualMachinesClientInstallPatchesPoller.Done` has been removed +- Function `*GalleryImageVersionsClientDeletePollerResponse.Resume` has been removed +- Function `VirtualMachinesClientPowerOffPollerResponse.PollUntilDone` has been removed +- Function `*CloudServicesClientRestartPoller.Poll` has been removed +- Function `OrchestrationServiceStateAction.ToPtr` has been removed +- Function `*VirtualMachineScaleSetVMsClientDeletePoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetsClientPowerOffPoller.Done` has been removed +- Function `*LogAnalyticsClientExportRequestRateByIntervalPollerResponse.Resume` has been removed +- Function `OperatingSystemStateTypes.ToPtr` has been removed +- Function `CloudServicesClientDeletePollerResponse.PollUntilDone` has been removed +- Function `*SnapshotsClientCreateOrUpdatePoller.FinalResponse` has been removed +- Function `DiskStorageAccountTypes.ToPtr` has been removed +- Function `GalleryImageVersionsClientCreateOrUpdatePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachinesClientPowerOffPoller.Poll` has been removed +- Function `*GalleryImageVersionsClientDeletePoller.Poll` has been removed +- Function `*CloudServiceRoleInstancesClientRestartPoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetsClientReimageAllPoller.Poll` has been removed +- Function `WindowsVMGuestPatchMode.ToPtr` has been removed +- Function `*DisksClientGrantAccessPollerResponse.Resume` has been removed +- Function `ArchitectureTypes.ToPtr` has been removed +- Function `GalleryImageVersionsClientDeletePollerResponse.PollUntilDone` has been removed +- Function `*DedicatedHostsClientListByHostGroupPager.NextPage` has been removed +- Function `*CloudServicesClientReimagePoller.ResumeToken` has been removed +- Function `*DedicatedHostsClientCreateOrUpdatePoller.ResumeToken` has been removed +- Function `*DiskAccessesClientCreateOrUpdatePoller.ResumeToken` has been removed +- Function `*VirtualMachineRunCommandsClientListPager.PageResponse` has been removed +- Function `*GalleriesClientUpdatePoller.FinalResponse` has been removed +- Function `DiskAccessesClientCreateOrUpdatePollerResponse.PollUntilDone` has been removed +- Function `*DiskAccessesClientUpdateAPrivateEndpointConnectionPoller.Poll` has been removed +- Function `DiskDetachOptionTypes.ToPtr` has been removed +- Function `DiskState.ToPtr` has been removed +- Function `*DiskEncryptionSetsClientCreateOrUpdatePollerResponse.Resume` has been removed +- Function `PatchOperationStatus.ToPtr` has been removed +- Function `*CloudServiceRoleInstancesClientDeletePoller.Poll` has been removed +- Function `*VirtualMachinesClientCapturePoller.Done` has been removed +- Function `*DiskAccessesClientUpdatePoller.Done` has been removed +- Function `*CloudServiceRoleInstancesClientListPager.Err` has been removed +- Function `LinuxPatchAssessmentMode.ToPtr` has been removed +- Function `SharingProfileGroupTypes.ToPtr` has been removed +- Function `*GalleryImageVersionsClientCreateOrUpdatePollerResponse.Resume` has been removed +- Function `VirtualMachineScaleSetExtensionsClientUpdatePollerResponse.PollUntilDone` has been removed +- Function `VirtualMachineRunCommandsClientDeletePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineRunCommandsClientDeletePoller.Poll` has been removed +- Function `*DedicatedHostGroupsClientListBySubscriptionPager.PageResponse` has been removed +- Function `*RestorePointCollectionsClientDeletePoller.Done` has been removed +- Function `GallerySharingProfileClientUpdatePollerResponse.PollUntilDone` has been removed +- Function `*CloudServicesClientRestartPoller.ResumeToken` has been removed +- Function `VirtualMachinesClientReapplyPollerResponse.PollUntilDone` has been removed +- Function `DiskEncryptionSetsClientCreateOrUpdatePollerResponse.PollUntilDone` has been removed +- Function `RestorePointCollectionExpandOptions.ToPtr` has been removed +- Function `*VirtualMachineScaleSetVMExtensionsClientUpdatePoller.Poll` has been removed +- Function `*CloudServiceOperatingSystemsClientListOSVersionsPager.NextPage` has been removed +- Function `*VirtualMachineScaleSetExtensionsClientDeletePoller.Done` has been removed +- Function `*VirtualMachineScaleSetsClientUpdateInstancesPoller.ResumeToken` has been removed +- Function `*DiskAccessesClientCreateOrUpdatePoller.FinalResponse` has been removed +- Function `ImagesClientUpdatePollerResponse.PollUntilDone` has been removed +- Function `*DisksClientRevokeAccessPoller.Done` has been removed +- Function `*VirtualMachineScaleSetsClientReimageAllPollerResponse.Resume` has been removed +- Function `*GalleryApplicationsClientListByGalleryPager.NextPage` has been removed +- Function `*VirtualMachineScaleSetRollingUpgradesClientCancelPoller.FinalResponse` has been removed +- Function `*GalleryImagesClientCreateOrUpdatePoller.Poll` has been removed +- Function `*VirtualMachineScaleSetVMsClientReimagePoller.Poll` has been removed +- Function `ExpandTypesForGetVMScaleSets.ToPtr` has been removed +- Function `*VirtualMachineRunCommandsClientDeletePoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetsClientStartPoller.Done` has been removed +- Function `*VirtualMachinesClientListByLocationPager.NextPage` has been removed +- Function `*VirtualMachineScaleSetsClientPerformMaintenancePoller.Done` has been removed +- Function `*VirtualMachineScaleSetExtensionsClientListPager.NextPage` has been removed +- Function `*VirtualMachineScaleSetsClientDeleteInstancesPollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetsClientSetOrchestrationServiceStatePoller.Done` has been removed +- Function `LogAnalyticsClientExportRequestRateByIntervalPollerResponse.PollUntilDone` has been removed +- Function `*GalleryImageVersionsClientUpdatePoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetVMsClientReimageAllPoller.ResumeToken` has been removed +- Function `*CloudServiceRoleInstancesClientReimagePoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetsClientSetOrchestrationServiceStatePoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetVMsClientPerformMaintenancePoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetsClientSetOrchestrationServiceStatePoller.Poll` has been removed +- Function `HyperVGenerationTypes.ToPtr` has been removed +- Function `VirtualMachineScaleSetRollingUpgradesClientStartExtensionUpgradePollerResponse.PollUntilDone` has been removed +- Function `Architecture.ToPtr` has been removed +- Function `PublicIPAddressSKUName.ToPtr` has been removed +- Function `VirtualMachineScaleSetVMsClientDeallocatePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetsClientCreateOrUpdatePoller.Done` has been removed +- Function `*CapacityReservationsClientListByCapacityReservationGroupPager.Err` has been removed +- Function `*VirtualMachinesClientListPager.Err` has been removed +- Function `*DedicatedHostsClientRestartPoller.Poll` has been removed +- Function `*VirtualMachineScaleSetExtensionsClientCreateOrUpdatePoller.ResumeToken` has been removed +- Function `*DisksClientDeletePoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetExtensionsClientDeletePoller.ResumeToken` has been removed +- Function `*GalleryApplicationVersionsClientDeletePoller.Poll` has been removed +- Function `*DiskAccessesClientListPager.NextPage` has been removed +- Function `*DiskAccessesClientListByResourceGroupPager.Err` has been removed +- Function `*VirtualMachinesClientCreateOrUpdatePollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetsClientRedeployPollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetVMsClientPowerOffPoller.Poll` has been removed +- Function `*RestorePointsClientDeletePoller.Poll` has been removed +- Function `*ImagesClientCreateOrUpdatePoller.Poll` has been removed +- Function `*GalleryImageVersionsClientListByGalleryImagePager.Err` has been removed +- Function `*DiskEncryptionSetsClientListByResourceGroupPager.PageResponse` has been removed +- Function `*GalleryApplicationsClientDeletePollerResponse.Resume` has been removed +- Function `*GalleriesClientUpdatePoller.Done` has been removed +- Function `*CapacityReservationGroupsClientListByResourceGroupPager.NextPage` has been removed +- Function `*VirtualMachineScaleSetVMsClientReimageAllPoller.Done` has been removed +- Function `*VirtualMachineScaleSetsClientReimagePollerResponse.Resume` has been removed +- Function `*LogAnalyticsClientExportRequestRateByIntervalPoller.Poll` has been removed +- Function `*VirtualMachineScaleSetRollingUpgradesClientStartOSUpgradePoller.ResumeToken` has been removed +- Function `*DedicatedHostsClientRestartPoller.ResumeToken` has been removed +- Function `SnapshotStorageAccountTypes.ToPtr` has been removed +- Function `VirtualMachineRunCommandsClientCreateOrUpdatePollerResponse.PollUntilDone` has been removed +- Function `*RestorePointCollectionsClientDeletePoller.FinalResponse` has been removed +- Function `*CloudServiceRoleInstancesClientRestartPoller.ResumeToken` has been removed +- Function `DisksClientCreateOrUpdatePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetVMsClientRedeployPoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetsClientListAllPager.Err` has been removed +- Function `*GalleryApplicationVersionsClientUpdatePoller.Done` has been removed +- Function `*LogAnalyticsClientExportThrottledRequestsPoller.Done` has been removed +- Function `DisksClientDeletePollerResponse.PollUntilDone` has been removed +- Function `*CloudServicesClientPowerOffPoller.Poll` has been removed +- Function `*GalleryApplicationVersionsClientUpdatePoller.Poll` has been removed +- Function `*ImagesClientListPager.PageResponse` has been removed +- Function `AccessLevel.ToPtr` has been removed +- Function `*VirtualMachineScaleSetVMRunCommandsClientCreateOrUpdatePoller.Poll` has been removed +- Function `DiskCreateOption.ToPtr` has been removed +- Function `*DedicatedHostsClientCreateOrUpdatePoller.Done` has been removed +- Function `VirtualMachineScaleSetVMsClientPerformMaintenancePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetsClientRedeployPoller.ResumeToken` has been removed +- Function `*LogAnalyticsClientExportRequestRateByIntervalPoller.ResumeToken` has been removed +- Function `VirtualMachineScaleSetVMRunCommandsClientUpdatePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetVMRunCommandsClientDeletePoller.Done` has been removed +- Function `*LogAnalyticsClientExportRequestRateByIntervalPoller.Done` has been removed +- Function `*DiskAccessesClientUpdateAPrivateEndpointConnectionPoller.FinalResponse` has been removed +- Function `*CloudServicesClientListPager.Err` has been removed +- Function `*VirtualMachineScaleSetsClientListPager.NextPage` has been removed +- Function `*VirtualMachineScaleSetsClientPowerOffPoller.FinalResponse` has been removed +- Function `*RestorePointCollectionsClientListAllPager.Err` has been removed +- Function `VirtualMachineEvictionPolicyTypes.ToPtr` has been removed +- Function `*ProximityPlacementGroupsClientListBySubscriptionPager.NextPage` has been removed +- Function `*VirtualMachineScaleSetVMExtensionsClientUpdatePoller.ResumeToken` has been removed +- Function `*DisksClientListPager.Err` has been removed +- Function `*ProximityPlacementGroupsClientListByResourceGroupPager.PageResponse` has been removed +- Function `*DisksClientRevokeAccessPollerResponse.Resume` has been removed +- Function `*CloudServiceRoleInstancesClientReimagePollerResponse.Resume` has been removed +- Function `GalleryImagesClientUpdatePollerResponse.PollUntilDone` has been removed +- Function `*GalleriesClientDeletePoller.Poll` has been removed +- Function `*DiskAccessesClientListByResourceGroupPager.PageResponse` has been removed +- Function `SnapshotsClientUpdatePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetsClientPowerOffPoller.ResumeToken` has been removed +- Function `*UsageClientListPager.Err` has been removed +- Function `PublicIPAllocationMethod.ToPtr` has been removed +- Function `*SharedGalleryImageVersionsClientListPager.Err` has been removed +- Function `*VirtualMachinesClientListAllPager.NextPage` has been removed +- Function `*VirtualMachinesClientRunCommandPollerResponse.Resume` has been removed +- Function `StorageAccountType.ToPtr` has been removed +- Function `*GalleryImageVersionsClientUpdatePoller.Done` has been removed +- Function `GalleryApplicationVersionPropertiesProvisioningState.ToPtr` has been removed +- Function `*RestorePointCollectionsClientListPager.Err` has been removed +- Function `*CapacityReservationGroupsClientListBySubscriptionPager.Err` has been removed +- Function `*RestorePointsClientCreatePoller.ResumeToken` has been removed +- Function `*CloudServicesClientCreateOrUpdatePollerResponse.Resume` has been removed +- Function `SnapshotsClientRevokeAccessPollerResponse.PollUntilDone` has been removed +- Function `*DedicatedHostsClientListByHostGroupPager.PageResponse` has been removed +- Function `*SnapshotsClientDeletePoller.Poll` has been removed +- Function `*VirtualMachineScaleSetsClientRestartPollerResponse.Resume` has been removed +- Function `IPVersions.ToPtr` has been removed +- Function `VirtualMachineScaleSetVMsClientReimagePollerResponse.PollUntilDone` has been removed +- Function `*DedicatedHostsClientDeletePoller.Poll` has been removed +- Function `PublicNetworkAccess.ToPtr` has been removed +- Function `CloudServiceRoleInstancesClientDeletePollerResponse.PollUntilDone` has been removed +- Function `VirtualMachineScaleSetVMRunCommandsClientCreateOrUpdatePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetVMRunCommandsClientListPager.PageResponse` has been removed +- Function `PatchAssessmentState.ToPtr` has been removed +- Function `*GalleryApplicationsClientUpdatePollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetsClientRedeployPoller.Done` has been removed +- Function `*GallerySharingProfileClientUpdatePoller.ResumeToken` has been removed +- Function `*CloudServicesClientReimagePoller.Done` has been removed +- Function `*VirtualMachineScaleSetVMsClientReimageAllPoller.Poll` has been removed +- Function `*CloudServicesClientListAllPager.PageResponse` has been removed +- Function `*VirtualMachinesClientReimagePoller.ResumeToken` has been removed +- Function `*VirtualMachinesClientListAllPager.Err` has been removed +- Function `ResourceSKURestrictionsReasonCode.ToPtr` has been removed +- Function `*VirtualMachineScaleSetRollingUpgradesClientStartOSUpgradePollerResponse.Resume` has been removed +- Function `UpgradeState.ToPtr` has been removed +- Function `*VirtualMachinesClientConvertToManagedDisksPoller.FinalResponse` has been removed +- Function `*VirtualMachinesClientCapturePoller.FinalResponse` has been removed +- Function `*GalleryApplicationVersionsClientListByGalleryApplicationPager.PageResponse` has been removed +- Function `*DedicatedHostsClientUpdatePoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetVMsClientRestartPollerResponse.Resume` has been removed +- Function `*VirtualMachinesClientStartPoller.FinalResponse` has been removed +- Function `*RestorePointsClientCreatePoller.FinalResponse` has been removed +- Function `*DisksClientCreateOrUpdatePollerResponse.Resume` has been removed +- Function `*VirtualMachinesClientDeallocatePoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetVMExtensionsClientDeletePoller.FinalResponse` has been removed +- Function `*GalleriesClientDeletePoller.FinalResponse` has been removed +- Function `*VirtualMachinesClientAssessPatchesPoller.Poll` has been removed +- Function `*DisksClientUpdatePoller.Poll` has been removed +- Function `*GalleryImagesClientDeletePollerResponse.Resume` has been removed +- Function `*UsageClientListPager.PageResponse` has been removed +- Function `VMDiskTypes.ToPtr` has been removed +- Function `CloudServicesClientRebuildPollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachinesClientConvertToManagedDisksPoller.Done` has been removed +- Function `*CloudServicesClientPowerOffPoller.FinalResponse` has been removed +- Function `*ImagesClientCreateOrUpdatePoller.Done` has been removed +- Function `*VirtualMachinesClientReimagePoller.Done` has been removed +- Function `*LogAnalyticsClientExportThrottledRequestsPoller.FinalResponse` has been removed +- Function `*CapacityReservationsClientUpdatePoller.Poll` has been removed +- Function `*AvailabilitySetsClientListPager.NextPage` has been removed +- Function `*VirtualMachinesClientPerformMaintenancePoller.FinalResponse` has been removed +- Function `*CapacityReservationsClientUpdatePollerResponse.Resume` has been removed +- Function `*CloudServicesClientReimagePoller.Poll` has been removed +- Function `*GalleryApplicationVersionsClientListByGalleryApplicationPager.NextPage` has been removed +- Function `*GalleryImagesClientUpdatePoller.Done` has been removed +- Function `*VirtualMachinesClientUpdatePoller.Poll` has been removed +- Function `DisksClientGrantAccessPollerResponse.PollUntilDone` has been removed +- Function `*GalleryApplicationVersionsClientUpdatePoller.FinalResponse` has been removed +- Function `*CloudServiceRoleInstancesClientDeletePoller.FinalResponse` has been removed +- Function `*VirtualMachinesClientPerformMaintenancePoller.Done` has been removed +- Function `*GalleriesClientListPager.PageResponse` has been removed +- Function `*VirtualMachinesClientPowerOffPoller.ResumeToken` has been removed +- Function `*GalleriesClientListByResourceGroupPager.NextPage` has been removed +- Function `*VirtualMachineScaleSetExtensionsClientDeletePoller.Poll` has been removed +- Function `*GalleryApplicationVersionsClientCreateOrUpdatePoller.ResumeToken` has been removed +- Function `RestorePointExpandOptions.ToPtr` has been removed +- Function `*DedicatedHostGroupsClientListByResourceGroupPager.PageResponse` has been removed +- Function `*VirtualMachineScaleSetExtensionsClientUpdatePoller.FinalResponse` has been removed +- Function `*UsageClientListPager.NextPage` has been removed +- Function `*VirtualMachinesClientListPager.NextPage` has been removed +- Function `*CapacityReservationGroupsClientListByResourceGroupPager.PageResponse` has been removed +- Function `*GalleryApplicationVersionsClientCreateOrUpdatePoller.Done` has been removed +- Function `*SSHPublicKeysClientListByResourceGroupPager.NextPage` has been removed +- Function `*VirtualMachineScaleSetVMsClientRestartPoller.Done` has been removed +- Function `*VirtualMachineScaleSetVMsClientStartPollerResponse.Resume` has been removed +- Function `*SnapshotsClientUpdatePoller.Done` has been removed +- Function `RestorePointsClientCreatePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineRunCommandsClientCreateOrUpdatePoller.Done` has been removed +- Function `*DiskAccessesClientDeleteAPrivateEndpointConnectionPoller.FinalResponse` has been removed +- Function `VirtualMachineScaleSetSKUScaleType.ToPtr` has been removed +- Function `*VirtualMachineScaleSetRollingUpgradesClientStartOSUpgradePoller.FinalResponse` has been removed +- Function `CloudServicesClientUpdatePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineExtensionsClientUpdatePoller.Poll` has been removed +- Function `*VirtualMachineScaleSetVMRunCommandsClientDeletePoller.ResumeToken` has been removed +- Function `*CloudServicesClientDeletePoller.ResumeToken` has been removed +- Function `*RestorePointCollectionsClientListAllPager.NextPage` has been removed +- Function `*ResourceSKUsClientListPager.PageResponse` has been removed +- Function `*VirtualMachineScaleSetVMsClientRunCommandPoller.Poll` has been removed +- Function `*VirtualMachineScaleSetsClientUpdateInstancesPollerResponse.Resume` has been removed +- Function `GalleryImagePropertiesProvisioningState.ToPtr` has been removed +- Function `*VirtualMachineScaleSetVMsClientStartPoller.ResumeToken` has been removed +- Function `*VirtualMachineRunCommandsClientUpdatePollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetVMRunCommandsClientDeletePollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetVMRunCommandsClientCreateOrUpdatePoller.Done` has been removed +- Function `GalleriesClientCreateOrUpdatePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetRollingUpgradesClientStartExtensionUpgradePoller.Poll` has been removed +- Function `*VirtualMachineScaleSetVMsClientPowerOffPoller.FinalResponse` has been removed +- Function `*VirtualMachinesClientRunCommandPoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetsClientUpdatePollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetsClientDeleteInstancesPoller.ResumeToken` has been removed +- Function `*SharedGalleryImagesClientListPager.NextPage` has been removed +- Function `ConfidentialVMEncryptionType.ToPtr` has been removed +- Function `VirtualMachineScaleSetVMsClientUpdatePollerResponse.PollUntilDone` has been removed +- Function `*GalleryImagesClientUpdatePoller.ResumeToken` has been removed +- Function `*VirtualMachinesClientRedeployPollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetsClientGetOSUpgradeHistoryPager.NextPage` has been removed +- Function `*CapacityReservationsClientDeletePoller.ResumeToken` has been removed +- Function `*DedicatedHostGroupsClientListByResourceGroupPager.NextPage` has been removed +- Function `*SnapshotsClientGrantAccessPoller.Poll` has been removed +- Function `*VirtualMachineScaleSetVMsClientReimagePoller.Done` has been removed +- Function `*CapacityReservationGroupsClientListBySubscriptionPager.NextPage` has been removed +- Function `*RestorePointCollectionsClientDeletePoller.Poll` has been removed +- Function `*DedicatedHostGroupsClientListByResourceGroupPager.Err` has been removed +- Function `*DiskRestorePointClientListByRestorePointPager.PageResponse` has been removed +- Function `*GalleryApplicationsClientListByGalleryPager.Err` has been removed +- Function `*CloudServiceRoleInstancesClientDeletePoller.Done` has been removed +- Function `*CloudServiceRoleInstancesClientRebuildPoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetVMExtensionsClientCreateOrUpdatePoller.Poll` has been removed +- Function `*DisksClientDeletePollerResponse.Resume` has been removed +- Function `*VirtualMachineRunCommandsClientDeletePoller.Done` has been removed +- Function `*VirtualMachineScaleSetRollingUpgradesClientStartExtensionUpgradePoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetExtensionsClientUpdatePollerResponse.Resume` has been removed +- Function `*ImagesClientCreateOrUpdatePoller.FinalResponse` has been removed +- Function `VirtualMachineScaleSetsClientPowerOffPollerResponse.PollUntilDone` has been removed +- Function `*CapacityReservationsClientDeletePollerResponse.Resume` has been removed +- Function `*DiskAccessesClientDeleteAPrivateEndpointConnectionPoller.ResumeToken` has been removed +- Function `*SharedGalleryImagesClientListPager.Err` has been removed +- Function `*GallerySharingProfileClientUpdatePoller.FinalResponse` has been removed +- Function `*VirtualMachinesClientDeletePollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetVMsClientRestartPoller.FinalResponse` has been removed +- Function `*GalleryImagesClientDeletePoller.Done` has been removed +- Function `*VirtualMachinesClientCreateOrUpdatePoller.FinalResponse` has been removed +- Function `*DedicatedHostsClientDeletePollerResponse.Resume` has been removed +- Function `VirtualMachinesClientReimagePollerResponse.PollUntilDone` has been removed +- Function `*GalleryImageVersionsClientCreateOrUpdatePoller.FinalResponse` has been removed +- Function `*GalleryImagesClientCreateOrUpdatePollerResponse.Resume` has been removed +- Function `VirtualMachinesClientInstallPatchesPollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetsClientDeleteInstancesPoller.FinalResponse` has been removed +- Function `DiskAccessesClientDeleteAPrivateEndpointConnectionPollerResponse.PollUntilDone` has been removed +- Function `*SnapshotsClientDeletePoller.FinalResponse` has been removed +- Function `VMGuestPatchRebootBehavior.ToPtr` has been removed +- Function `*RestorePointCollectionsClientDeletePoller.ResumeToken` has been removed +- Function `*VirtualMachineRunCommandsClientUpdatePoller.FinalResponse` has been removed +- Function `DedicatedHostsClientRestartPollerResponse.PollUntilDone` has been removed +- Function `*DiskEncryptionSetsClientUpdatePollerResponse.Resume` has been removed +- Function `*VirtualMachinesClientListPager.PageResponse` has been removed +- Function `SelectPermissions.ToPtr` has been removed +- Function `GalleriesClientDeletePollerResponse.PollUntilDone` has been removed +- Function `*CloudServiceOperatingSystemsClientListOSVersionsPager.Err` has been removed +- Function `*ImagesClientUpdatePoller.Poll` has been removed +- Function `*RestorePointCollectionsClientDeletePollerResponse.Resume` has been removed +- Function `*CloudServicesUpdateDomainClientWalkUpdateDomainPoller.ResumeToken` has been removed +- Function `*VirtualMachinesClientCapturePollerResponse.Resume` has been removed +- Function `*DiskAccessesClientListByResourceGroupPager.NextPage` has been removed +- Function `VirtualMachineScaleSetVMExtensionsClientCreateOrUpdatePollerResponse.PollUntilDone` has been removed +- Function `GalleryApplicationVersionsClientCreateOrUpdatePollerResponse.PollUntilDone` has been removed +- Function `VMGuestPatchClassificationLinux.ToPtr` has been removed +- Function `*DiskEncryptionSetsClientDeletePoller.FinalResponse` has been removed +- Function `*VirtualMachinesClientListAllPager.PageResponse` has been removed +- Function `*VirtualMachineScaleSetExtensionsClientDeletePollerResponse.Resume` has been removed +- Function `*GalleryImageVersionsClientDeletePoller.FinalResponse` has been removed +- Function `*CloudServiceRoleInstancesClientListPager.NextPage` has been removed +- Function `*DisksClientGrantAccessPoller.Done` has been removed +- Function `*GalleryApplicationsClientUpdatePoller.Done` has been removed +- Function `*CloudServicesClientUpdatePoller.Done` has been removed +- Function `*SharedGalleriesClientListPager.Err` has been removed +- Function `UpgradeOperationInvoker.ToPtr` has been removed +- Function `SecurityEncryptionTypes.ToPtr` has been removed +- Function `*VirtualMachinesClientPowerOffPoller.Done` has been removed +- Function `*VirtualMachinesClientReimagePoller.Poll` has been removed +- Function `ReplicationState.ToPtr` has been removed +- Function `*VirtualMachineScaleSetsClientListAllPager.PageResponse` has been removed +- Function `*SnapshotsClientDeletePoller.Done` has been removed +- Function `*GalleryImageVersionsClientListByGalleryImagePager.NextPage` has been removed +- Function `*GalleriesClientListByResourceGroupPager.Err` has been removed +- Function `VirtualMachineScaleSetVMsClientRedeployPollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetVMExtensionsClientDeletePoller.Done` has been removed +- Function `*SharedGalleryImageVersionsClientListPager.NextPage` has been removed +- Function `VirtualMachineScaleSetRollingUpgradesClientStartOSUpgradePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetsClientReimageAllPoller.ResumeToken` has been removed +- Function `*DiskEncryptionSetsClientDeletePoller.Done` has been removed +- Function `*VirtualMachinesClientDeletePoller.Poll` has been removed +- Function `*GalleryImagesClientDeletePoller.FinalResponse` has been removed +- Function `DiskAccessesClientUpdatePollerResponse.PollUntilDone` has been removed +- Function `*GalleryApplicationsClientDeletePoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetsClientDeleteInstancesPoller.Done` has been removed +- Function `*VirtualMachineScaleSetVMExtensionsClientDeletePoller.Poll` has been removed +- Function `*GalleriesClientUpdatePoller.Poll` has been removed +- Function `DiskRestorePointClientGrantAccessPollerResponse.PollUntilDone` has been removed +- Function `*CloudServicesClientDeletePoller.Done` has been removed +- Function `*DisksClientRevokeAccessPoller.Poll` has been removed +- Function `*DisksClientCreateOrUpdatePoller.ResumeToken` has been removed +- Function `*CapacityReservationsClientListByCapacityReservationGroupPager.PageResponse` has been removed +- Function `*VirtualMachineScaleSetRollingUpgradesClientCancelPoller.Poll` has been removed +- Function `*SnapshotsClientGrantAccessPoller.ResumeToken` has been removed +- Function `ConsistencyModeTypes.ToPtr` has been removed +- Function `VirtualMachinesClientPerformMaintenancePollerResponse.PollUntilDone` has been removed +- Function `VirtualMachineScaleSetsClientReimageAllPollerResponse.PollUntilDone` has been removed +- Function `GalleryImagesClientDeletePollerResponse.PollUntilDone` has been removed +- Function `*DiskAccessesClientUpdatePoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetsClientSetOrchestrationServiceStatePoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetsClientDeallocatePoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetVMsClientUpdatePoller.FinalResponse` has been removed +- Function `*RestorePointsClientCreatePoller.Poll` has been removed +- Function `DiffDiskPlacement.ToPtr` has been removed +- Function `WindowsPatchAssessmentMode.ToPtr` has been removed +- Function `*GalleriesClientDeletePoller.Done` has been removed +- Function `*ResourceSKUsClientListPager.Err` has been removed +- Function `*SSHPublicKeysClientListByResourceGroupPager.PageResponse` has been removed +- Function `*DiskEncryptionSetsClientListPager.NextPage` has been removed +- Function `*VirtualMachineScaleSetVMRunCommandsClientCreateOrUpdatePoller.ResumeToken` has been removed +- Function `*VirtualMachineExtensionsClientCreateOrUpdatePoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetsClientGetOSUpgradeHistoryPager.Err` has been removed +- Function `GalleryImageVersionPropertiesProvisioningState.ToPtr` has been removed +- Function `GalleryExtendedLocationType.ToPtr` has been removed +- Function `*VirtualMachineScaleSetsClientRedeployPoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetVMsClientUpdatePollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetVMExtensionsClientDeletePollerResponse.Resume` has been removed +- Function `*VirtualMachinesClientRestartPoller.Poll` has been removed +- Function `*GalleryApplicationVersionsClientUpdatePoller.ResumeToken` has been removed +- Function `*DiskAccessesClientListPager.PageResponse` has been removed +- Function `*CloudServicesClientDeleteInstancesPoller.FinalResponse` has been removed +- Function `RollingUpgradeStatusCode.ToPtr` has been removed +- Function `*VirtualMachineScaleSetVMsClientPerformMaintenancePoller.ResumeToken` has been removed +- Function `ProtocolTypes.ToPtr` has been removed +- Function `*CloudServicesClientRestartPoller.Done` has been removed +- Function `*SnapshotsClientRevokeAccessPoller.Poll` has been removed +- Function `*GalleryApplicationVersionsClientCreateOrUpdatePoller.FinalResponse` has been removed +- Function `*GalleriesClientListByResourceGroupPager.PageResponse` has been removed +- Function `*CloudServicesClientReimagePollerResponse.Resume` has been removed +- Function `*VirtualMachinesClientRunCommandPoller.Poll` has been removed +- Function `*CloudServicesClientRebuildPoller.FinalResponse` has been removed +- Function `LogAnalyticsClientExportThrottledRequestsPollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachinesClientPerformMaintenancePoller.ResumeToken` has been removed +- Function `*VirtualMachineExtensionsClientDeletePoller.FinalResponse` has been removed +- Struct `AvailabilitySetsClientCreateOrUpdateResult` has been removed +- Struct `AvailabilitySetsClientGetResult` has been removed +- Struct `AvailabilitySetsClientListAvailableSizesResult` has been removed +- Struct `AvailabilitySetsClientListBySubscriptionPager` has been removed +- Struct `AvailabilitySetsClientListBySubscriptionResult` has been removed +- Struct `AvailabilitySetsClientListPager` has been removed +- Struct `AvailabilitySetsClientListResult` has been removed +- Struct `AvailabilitySetsClientUpdateResult` has been removed +- Struct `CapacityReservationGroupsClientCreateOrUpdateResult` has been removed +- Struct `CapacityReservationGroupsClientGetResult` has been removed +- Struct `CapacityReservationGroupsClientListByResourceGroupPager` has been removed +- Struct `CapacityReservationGroupsClientListByResourceGroupResult` has been removed +- Struct `CapacityReservationGroupsClientListBySubscriptionPager` has been removed +- Struct `CapacityReservationGroupsClientListBySubscriptionResult` has been removed +- Struct `CapacityReservationGroupsClientUpdateResult` has been removed +- Struct `CapacityReservationsClientCreateOrUpdatePoller` has been removed +- Struct `CapacityReservationsClientCreateOrUpdatePollerResponse` has been removed +- Struct `CapacityReservationsClientCreateOrUpdateResult` has been removed +- Struct `CapacityReservationsClientDeletePoller` has been removed +- Struct `CapacityReservationsClientDeletePollerResponse` has been removed +- Struct `CapacityReservationsClientGetResult` has been removed +- Struct `CapacityReservationsClientListByCapacityReservationGroupPager` has been removed +- Struct `CapacityReservationsClientListByCapacityReservationGroupResult` has been removed +- Struct `CapacityReservationsClientUpdatePoller` has been removed +- Struct `CapacityReservationsClientUpdatePollerResponse` has been removed +- Struct `CapacityReservationsClientUpdateResult` has been removed +- Struct `CloudServiceOperatingSystemsClientGetOSFamilyResult` has been removed +- Struct `CloudServiceOperatingSystemsClientGetOSVersionResult` has been removed +- Struct `CloudServiceOperatingSystemsClientListOSFamiliesPager` has been removed +- Struct `CloudServiceOperatingSystemsClientListOSFamiliesResult` has been removed +- Struct `CloudServiceOperatingSystemsClientListOSVersionsPager` has been removed +- Struct `CloudServiceOperatingSystemsClientListOSVersionsResult` has been removed +- Struct `CloudServiceRoleInstancesClientDeletePoller` has been removed +- Struct `CloudServiceRoleInstancesClientDeletePollerResponse` has been removed +- Struct `CloudServiceRoleInstancesClientGetInstanceViewResult` has been removed +- Struct `CloudServiceRoleInstancesClientGetResult` has been removed +- Struct `CloudServiceRoleInstancesClientListPager` has been removed +- Struct `CloudServiceRoleInstancesClientListResult` has been removed +- Struct `CloudServiceRoleInstancesClientRebuildPoller` has been removed +- Struct `CloudServiceRoleInstancesClientRebuildPollerResponse` has been removed +- Struct `CloudServiceRoleInstancesClientReimagePoller` has been removed +- Struct `CloudServiceRoleInstancesClientReimagePollerResponse` has been removed +- Struct `CloudServiceRoleInstancesClientRestartPoller` has been removed +- Struct `CloudServiceRoleInstancesClientRestartPollerResponse` has been removed +- Struct `CloudServiceRolesClientGetResult` has been removed +- Struct `CloudServiceRolesClientListPager` has been removed +- Struct `CloudServiceRolesClientListResult` has been removed +- Struct `CloudServicesClientCreateOrUpdatePoller` has been removed +- Struct `CloudServicesClientCreateOrUpdatePollerResponse` has been removed +- Struct `CloudServicesClientCreateOrUpdateResult` has been removed +- Struct `CloudServicesClientDeleteInstancesPoller` has been removed +- Struct `CloudServicesClientDeleteInstancesPollerResponse` has been removed +- Struct `CloudServicesClientDeletePoller` has been removed +- Struct `CloudServicesClientDeletePollerResponse` has been removed +- Struct `CloudServicesClientGetInstanceViewResult` has been removed +- Struct `CloudServicesClientGetResult` has been removed +- Struct `CloudServicesClientListAllPager` has been removed +- Struct `CloudServicesClientListAllResult` has been removed +- Struct `CloudServicesClientListPager` has been removed +- Struct `CloudServicesClientListResult` has been removed +- Struct `CloudServicesClientPowerOffPoller` has been removed +- Struct `CloudServicesClientPowerOffPollerResponse` has been removed +- Struct `CloudServicesClientRebuildPoller` has been removed +- Struct `CloudServicesClientRebuildPollerResponse` has been removed +- Struct `CloudServicesClientReimagePoller` has been removed +- Struct `CloudServicesClientReimagePollerResponse` has been removed +- Struct `CloudServicesClientRestartPoller` has been removed +- Struct `CloudServicesClientRestartPollerResponse` has been removed +- Struct `CloudServicesClientStartPoller` has been removed +- Struct `CloudServicesClientStartPollerResponse` has been removed +- Struct `CloudServicesClientUpdatePoller` has been removed +- Struct `CloudServicesClientUpdatePollerResponse` has been removed +- Struct `CloudServicesClientUpdateResult` has been removed +- Struct `CloudServicesUpdateDomainClientGetUpdateDomainResult` has been removed +- Struct `CloudServicesUpdateDomainClientListUpdateDomainsPager` has been removed +- Struct `CloudServicesUpdateDomainClientListUpdateDomainsResult` has been removed +- Struct `CloudServicesUpdateDomainClientWalkUpdateDomainPoller` has been removed +- Struct `CloudServicesUpdateDomainClientWalkUpdateDomainPollerResponse` has been removed +- Struct `CommunityGalleriesClientGetResult` has been removed +- Struct `CommunityGalleryImageVersionsClientGetResult` has been removed +- Struct `CommunityGalleryImagesClientGetResult` has been removed +- Struct `DedicatedHostGroupsClientCreateOrUpdateResult` has been removed +- Struct `DedicatedHostGroupsClientGetResult` has been removed +- Struct `DedicatedHostGroupsClientListByResourceGroupPager` has been removed +- Struct `DedicatedHostGroupsClientListByResourceGroupResult` has been removed +- Struct `DedicatedHostGroupsClientListBySubscriptionPager` has been removed +- Struct `DedicatedHostGroupsClientListBySubscriptionResult` has been removed +- Struct `DedicatedHostGroupsClientUpdateResult` has been removed +- Struct `DedicatedHostsClientCreateOrUpdatePoller` has been removed +- Struct `DedicatedHostsClientCreateOrUpdatePollerResponse` has been removed +- Struct `DedicatedHostsClientCreateOrUpdateResult` has been removed +- Struct `DedicatedHostsClientDeletePoller` has been removed +- Struct `DedicatedHostsClientDeletePollerResponse` has been removed +- Struct `DedicatedHostsClientGetResult` has been removed +- Struct `DedicatedHostsClientListByHostGroupPager` has been removed +- Struct `DedicatedHostsClientListByHostGroupResult` has been removed +- Struct `DedicatedHostsClientRestartPoller` has been removed +- Struct `DedicatedHostsClientRestartPollerResponse` has been removed +- Struct `DedicatedHostsClientUpdatePoller` has been removed +- Struct `DedicatedHostsClientUpdatePollerResponse` has been removed +- Struct `DedicatedHostsClientUpdateResult` has been removed +- Struct `DiskAccessesClientCreateOrUpdatePoller` has been removed +- Struct `DiskAccessesClientCreateOrUpdatePollerResponse` has been removed +- Struct `DiskAccessesClientCreateOrUpdateResult` has been removed +- Struct `DiskAccessesClientDeleteAPrivateEndpointConnectionPoller` has been removed +- Struct `DiskAccessesClientDeleteAPrivateEndpointConnectionPollerResponse` has been removed +- Struct `DiskAccessesClientDeletePoller` has been removed +- Struct `DiskAccessesClientDeletePollerResponse` has been removed +- Struct `DiskAccessesClientGetAPrivateEndpointConnectionResult` has been removed +- Struct `DiskAccessesClientGetPrivateLinkResourcesResult` has been removed +- Struct `DiskAccessesClientGetResult` has been removed +- Struct `DiskAccessesClientListByResourceGroupPager` has been removed +- Struct `DiskAccessesClientListByResourceGroupResult` has been removed +- Struct `DiskAccessesClientListPager` has been removed +- Struct `DiskAccessesClientListPrivateEndpointConnectionsPager` has been removed +- Struct `DiskAccessesClientListPrivateEndpointConnectionsResult` has been removed +- Struct `DiskAccessesClientListResult` has been removed +- Struct `DiskAccessesClientUpdateAPrivateEndpointConnectionPoller` has been removed +- Struct `DiskAccessesClientUpdateAPrivateEndpointConnectionPollerResponse` has been removed +- Struct `DiskAccessesClientUpdateAPrivateEndpointConnectionResult` has been removed +- Struct `DiskAccessesClientUpdatePoller` has been removed +- Struct `DiskAccessesClientUpdatePollerResponse` has been removed +- Struct `DiskAccessesClientUpdateResult` has been removed +- Struct `DiskEncryptionSetsClientCreateOrUpdatePoller` has been removed +- Struct `DiskEncryptionSetsClientCreateOrUpdatePollerResponse` has been removed +- Struct `DiskEncryptionSetsClientCreateOrUpdateResult` has been removed +- Struct `DiskEncryptionSetsClientDeletePoller` has been removed +- Struct `DiskEncryptionSetsClientDeletePollerResponse` has been removed +- Struct `DiskEncryptionSetsClientGetResult` has been removed +- Struct `DiskEncryptionSetsClientListAssociatedResourcesPager` has been removed +- Struct `DiskEncryptionSetsClientListAssociatedResourcesResult` has been removed +- Struct `DiskEncryptionSetsClientListByResourceGroupPager` has been removed +- Struct `DiskEncryptionSetsClientListByResourceGroupResult` has been removed +- Struct `DiskEncryptionSetsClientListPager` has been removed +- Struct `DiskEncryptionSetsClientListResult` has been removed +- Struct `DiskEncryptionSetsClientUpdatePoller` has been removed +- Struct `DiskEncryptionSetsClientUpdatePollerResponse` has been removed +- Struct `DiskEncryptionSetsClientUpdateResult` has been removed +- Struct `DiskRestorePointClientGetResult` has been removed +- Struct `DiskRestorePointClientGrantAccessPoller` has been removed +- Struct `DiskRestorePointClientGrantAccessPollerResponse` has been removed +- Struct `DiskRestorePointClientGrantAccessResult` has been removed +- Struct `DiskRestorePointClientListByRestorePointPager` has been removed +- Struct `DiskRestorePointClientListByRestorePointResult` has been removed +- Struct `DiskRestorePointClientRevokeAccessPoller` has been removed +- Struct `DiskRestorePointClientRevokeAccessPollerResponse` has been removed +- Struct `DisksClientCreateOrUpdatePoller` has been removed +- Struct `DisksClientCreateOrUpdatePollerResponse` has been removed +- Struct `DisksClientCreateOrUpdateResult` has been removed +- Struct `DisksClientDeletePoller` has been removed +- Struct `DisksClientDeletePollerResponse` has been removed +- Struct `DisksClientGetResult` has been removed +- Struct `DisksClientGrantAccessPoller` has been removed +- Struct `DisksClientGrantAccessPollerResponse` has been removed +- Struct `DisksClientGrantAccessResult` has been removed +- Struct `DisksClientListByResourceGroupPager` has been removed +- Struct `DisksClientListByResourceGroupResult` has been removed +- Struct `DisksClientListPager` has been removed +- Struct `DisksClientListResult` has been removed +- Struct `DisksClientRevokeAccessPoller` has been removed +- Struct `DisksClientRevokeAccessPollerResponse` has been removed +- Struct `DisksClientUpdatePoller` has been removed +- Struct `DisksClientUpdatePollerResponse` has been removed +- Struct `DisksClientUpdateResult` has been removed +- Struct `GalleriesClientCreateOrUpdatePoller` has been removed +- Struct `GalleriesClientCreateOrUpdatePollerResponse` has been removed +- Struct `GalleriesClientCreateOrUpdateResult` has been removed +- Struct `GalleriesClientDeletePoller` has been removed +- Struct `GalleriesClientDeletePollerResponse` has been removed +- Struct `GalleriesClientGetResult` has been removed +- Struct `GalleriesClientListByResourceGroupPager` has been removed +- Struct `GalleriesClientListByResourceGroupResult` has been removed +- Struct `GalleriesClientListPager` has been removed +- Struct `GalleriesClientListResult` has been removed +- Struct `GalleriesClientUpdatePoller` has been removed +- Struct `GalleriesClientUpdatePollerResponse` has been removed +- Struct `GalleriesClientUpdateResult` has been removed +- Struct `GalleryApplicationVersionsClientCreateOrUpdatePoller` has been removed +- Struct `GalleryApplicationVersionsClientCreateOrUpdatePollerResponse` has been removed +- Struct `GalleryApplicationVersionsClientCreateOrUpdateResult` has been removed +- Struct `GalleryApplicationVersionsClientDeletePoller` has been removed +- Struct `GalleryApplicationVersionsClientDeletePollerResponse` has been removed +- Struct `GalleryApplicationVersionsClientGetResult` has been removed +- Struct `GalleryApplicationVersionsClientListByGalleryApplicationPager` has been removed +- Struct `GalleryApplicationVersionsClientListByGalleryApplicationResult` has been removed +- Struct `GalleryApplicationVersionsClientUpdatePoller` has been removed +- Struct `GalleryApplicationVersionsClientUpdatePollerResponse` has been removed +- Struct `GalleryApplicationVersionsClientUpdateResult` has been removed +- Struct `GalleryApplicationsClientCreateOrUpdatePoller` has been removed +- Struct `GalleryApplicationsClientCreateOrUpdatePollerResponse` has been removed +- Struct `GalleryApplicationsClientCreateOrUpdateResult` has been removed +- Struct `GalleryApplicationsClientDeletePoller` has been removed +- Struct `GalleryApplicationsClientDeletePollerResponse` has been removed +- Struct `GalleryApplicationsClientGetResult` has been removed +- Struct `GalleryApplicationsClientListByGalleryPager` has been removed +- Struct `GalleryApplicationsClientListByGalleryResult` has been removed +- Struct `GalleryApplicationsClientUpdatePoller` has been removed +- Struct `GalleryApplicationsClientUpdatePollerResponse` has been removed +- Struct `GalleryApplicationsClientUpdateResult` has been removed +- Struct `GalleryImageVersionsClientCreateOrUpdatePoller` has been removed +- Struct `GalleryImageVersionsClientCreateOrUpdatePollerResponse` has been removed +- Struct `GalleryImageVersionsClientCreateOrUpdateResult` has been removed +- Struct `GalleryImageVersionsClientDeletePoller` has been removed +- Struct `GalleryImageVersionsClientDeletePollerResponse` has been removed +- Struct `GalleryImageVersionsClientGetResult` has been removed +- Struct `GalleryImageVersionsClientListByGalleryImagePager` has been removed +- Struct `GalleryImageVersionsClientListByGalleryImageResult` has been removed +- Struct `GalleryImageVersionsClientUpdatePoller` has been removed +- Struct `GalleryImageVersionsClientUpdatePollerResponse` has been removed +- Struct `GalleryImageVersionsClientUpdateResult` has been removed +- Struct `GalleryImagesClientCreateOrUpdatePoller` has been removed +- Struct `GalleryImagesClientCreateOrUpdatePollerResponse` has been removed +- Struct `GalleryImagesClientCreateOrUpdateResult` has been removed +- Struct `GalleryImagesClientDeletePoller` has been removed +- Struct `GalleryImagesClientDeletePollerResponse` has been removed +- Struct `GalleryImagesClientGetResult` has been removed +- Struct `GalleryImagesClientListByGalleryPager` has been removed +- Struct `GalleryImagesClientListByGalleryResult` has been removed +- Struct `GalleryImagesClientUpdatePoller` has been removed +- Struct `GalleryImagesClientUpdatePollerResponse` has been removed +- Struct `GalleryImagesClientUpdateResult` has been removed +- Struct `GallerySharingProfileClientUpdatePoller` has been removed +- Struct `GallerySharingProfileClientUpdatePollerResponse` has been removed +- Struct `GallerySharingProfileClientUpdateResult` has been removed +- Struct `ImagesClientCreateOrUpdatePoller` has been removed +- Struct `ImagesClientCreateOrUpdatePollerResponse` has been removed +- Struct `ImagesClientCreateOrUpdateResult` has been removed +- Struct `ImagesClientDeletePoller` has been removed +- Struct `ImagesClientDeletePollerResponse` has been removed +- Struct `ImagesClientGetResult` has been removed +- Struct `ImagesClientListByResourceGroupPager` has been removed +- Struct `ImagesClientListByResourceGroupResult` has been removed +- Struct `ImagesClientListPager` has been removed +- Struct `ImagesClientListResult` has been removed +- Struct `ImagesClientUpdatePoller` has been removed +- Struct `ImagesClientUpdatePollerResponse` has been removed +- Struct `ImagesClientUpdateResult` has been removed +- Struct `LogAnalyticsClientExportRequestRateByIntervalPoller` has been removed +- Struct `LogAnalyticsClientExportRequestRateByIntervalPollerResponse` has been removed +- Struct `LogAnalyticsClientExportRequestRateByIntervalResult` has been removed +- Struct `LogAnalyticsClientExportThrottledRequestsPoller` has been removed +- Struct `LogAnalyticsClientExportThrottledRequestsPollerResponse` has been removed +- Struct `LogAnalyticsClientExportThrottledRequestsResult` has been removed +- Struct `OperationsClientListResult` has been removed +- Struct `ProximityPlacementGroupsClientCreateOrUpdateResult` has been removed +- Struct `ProximityPlacementGroupsClientGetResult` has been removed +- Struct `ProximityPlacementGroupsClientListByResourceGroupPager` has been removed +- Struct `ProximityPlacementGroupsClientListByResourceGroupResult` has been removed +- Struct `ProximityPlacementGroupsClientListBySubscriptionPager` has been removed +- Struct `ProximityPlacementGroupsClientListBySubscriptionResult` has been removed +- Struct `ProximityPlacementGroupsClientUpdateResult` has been removed +- Struct `ResourceSKUsClientListPager` has been removed +- Struct `ResourceSKUsClientListResult` has been removed +- Struct `RestorePointCollectionsClientCreateOrUpdateResult` has been removed +- Struct `RestorePointCollectionsClientDeletePoller` has been removed +- Struct `RestorePointCollectionsClientDeletePollerResponse` has been removed +- Struct `RestorePointCollectionsClientGetResult` has been removed +- Struct `RestorePointCollectionsClientListAllPager` has been removed +- Struct `RestorePointCollectionsClientListAllResult` has been removed +- Struct `RestorePointCollectionsClientListPager` has been removed +- Struct `RestorePointCollectionsClientListResult` has been removed +- Struct `RestorePointCollectionsClientUpdateResult` has been removed +- Struct `RestorePointsClientCreatePoller` has been removed +- Struct `RestorePointsClientCreatePollerResponse` has been removed +- Struct `RestorePointsClientCreateResult` has been removed +- Struct `RestorePointsClientDeletePoller` has been removed +- Struct `RestorePointsClientDeletePollerResponse` has been removed +- Struct `RestorePointsClientGetResult` has been removed +- Struct `SSHPublicKeysClientCreateResult` has been removed +- Struct `SSHPublicKeysClientGenerateKeyPairResult` has been removed +- Struct `SSHPublicKeysClientGetResult` has been removed +- Struct `SSHPublicKeysClientListByResourceGroupPager` has been removed +- Struct `SSHPublicKeysClientListByResourceGroupResult` has been removed +- Struct `SSHPublicKeysClientListBySubscriptionPager` has been removed +- Struct `SSHPublicKeysClientListBySubscriptionResult` has been removed +- Struct `SSHPublicKeysClientUpdateResult` has been removed +- Struct `SharedGalleriesClientGetResult` has been removed +- Struct `SharedGalleriesClientListPager` has been removed +- Struct `SharedGalleriesClientListResult` has been removed +- Struct `SharedGalleryImageVersionsClientGetResult` has been removed +- Struct `SharedGalleryImageVersionsClientListPager` has been removed +- Struct `SharedGalleryImageVersionsClientListResult` has been removed +- Struct `SharedGalleryImagesClientGetResult` has been removed +- Struct `SharedGalleryImagesClientListPager` has been removed +- Struct `SharedGalleryImagesClientListResult` has been removed +- Struct `SnapshotsClientCreateOrUpdatePoller` has been removed +- Struct `SnapshotsClientCreateOrUpdatePollerResponse` has been removed +- Struct `SnapshotsClientCreateOrUpdateResult` has been removed +- Struct `SnapshotsClientDeletePoller` has been removed +- Struct `SnapshotsClientDeletePollerResponse` has been removed +- Struct `SnapshotsClientGetResult` has been removed +- Struct `SnapshotsClientGrantAccessPoller` has been removed +- Struct `SnapshotsClientGrantAccessPollerResponse` has been removed +- Struct `SnapshotsClientGrantAccessResult` has been removed +- Struct `SnapshotsClientListByResourceGroupPager` has been removed +- Struct `SnapshotsClientListByResourceGroupResult` has been removed +- Struct `SnapshotsClientListPager` has been removed +- Struct `SnapshotsClientListResult` has been removed +- Struct `SnapshotsClientRevokeAccessPoller` has been removed +- Struct `SnapshotsClientRevokeAccessPollerResponse` has been removed +- Struct `SnapshotsClientUpdatePoller` has been removed +- Struct `SnapshotsClientUpdatePollerResponse` has been removed +- Struct `SnapshotsClientUpdateResult` has been removed +- Struct `UsageClientListPager` has been removed +- Struct `UsageClientListResult` has been removed +- Struct `VirtualMachineExtensionImagesClientGetResult` has been removed +- Struct `VirtualMachineExtensionImagesClientListTypesResult` has been removed +- Struct `VirtualMachineExtensionImagesClientListVersionsResult` has been removed +- Struct `VirtualMachineExtensionsClientCreateOrUpdatePoller` has been removed +- Struct `VirtualMachineExtensionsClientCreateOrUpdatePollerResponse` has been removed +- Struct `VirtualMachineExtensionsClientCreateOrUpdateResult` has been removed +- Struct `VirtualMachineExtensionsClientDeletePoller` has been removed +- Struct `VirtualMachineExtensionsClientDeletePollerResponse` has been removed +- Struct `VirtualMachineExtensionsClientGetResult` has been removed +- Struct `VirtualMachineExtensionsClientListResult` has been removed +- Struct `VirtualMachineExtensionsClientUpdatePoller` has been removed +- Struct `VirtualMachineExtensionsClientUpdatePollerResponse` has been removed +- Struct `VirtualMachineExtensionsClientUpdateResult` has been removed +- Struct `VirtualMachineImagesClientGetResult` has been removed +- Struct `VirtualMachineImagesClientListOffersResult` has been removed +- Struct `VirtualMachineImagesClientListPublishersResult` has been removed +- Struct `VirtualMachineImagesClientListResult` has been removed +- Struct `VirtualMachineImagesClientListSKUsResult` has been removed +- Struct `VirtualMachineImagesEdgeZoneClientGetResult` has been removed +- Struct `VirtualMachineImagesEdgeZoneClientListOffersResult` has been removed +- Struct `VirtualMachineImagesEdgeZoneClientListPublishersResult` has been removed +- Struct `VirtualMachineImagesEdgeZoneClientListResult` has been removed +- Struct `VirtualMachineImagesEdgeZoneClientListSKUsResult` has been removed +- Struct `VirtualMachineRunCommandsClientCreateOrUpdatePoller` has been removed +- Struct `VirtualMachineRunCommandsClientCreateOrUpdatePollerResponse` has been removed +- Struct `VirtualMachineRunCommandsClientCreateOrUpdateResult` has been removed +- Struct `VirtualMachineRunCommandsClientDeletePoller` has been removed +- Struct `VirtualMachineRunCommandsClientDeletePollerResponse` has been removed +- Struct `VirtualMachineRunCommandsClientGetByVirtualMachineResult` has been removed +- Struct `VirtualMachineRunCommandsClientGetResult` has been removed +- Struct `VirtualMachineRunCommandsClientListByVirtualMachinePager` has been removed +- Struct `VirtualMachineRunCommandsClientListByVirtualMachineResult` has been removed +- Struct `VirtualMachineRunCommandsClientListPager` has been removed +- Struct `VirtualMachineRunCommandsClientListResult` has been removed +- Struct `VirtualMachineRunCommandsClientUpdatePoller` has been removed +- Struct `VirtualMachineRunCommandsClientUpdatePollerResponse` has been removed +- Struct `VirtualMachineRunCommandsClientUpdateResult` has been removed +- Struct `VirtualMachineScaleSetExtensionsClientCreateOrUpdatePoller` has been removed +- Struct `VirtualMachineScaleSetExtensionsClientCreateOrUpdatePollerResponse` has been removed +- Struct `VirtualMachineScaleSetExtensionsClientCreateOrUpdateResult` has been removed +- Struct `VirtualMachineScaleSetExtensionsClientDeletePoller` has been removed +- Struct `VirtualMachineScaleSetExtensionsClientDeletePollerResponse` has been removed +- Struct `VirtualMachineScaleSetExtensionsClientGetResult` has been removed +- Struct `VirtualMachineScaleSetExtensionsClientListPager` has been removed +- Struct `VirtualMachineScaleSetExtensionsClientListResult` has been removed +- Struct `VirtualMachineScaleSetExtensionsClientUpdatePoller` has been removed +- Struct `VirtualMachineScaleSetExtensionsClientUpdatePollerResponse` has been removed +- Struct `VirtualMachineScaleSetExtensionsClientUpdateResult` has been removed +- Struct `VirtualMachineScaleSetRollingUpgradesClientCancelPoller` has been removed +- Struct `VirtualMachineScaleSetRollingUpgradesClientCancelPollerResponse` has been removed +- Struct `VirtualMachineScaleSetRollingUpgradesClientGetLatestResult` has been removed +- Struct `VirtualMachineScaleSetRollingUpgradesClientStartExtensionUpgradePoller` has been removed +- Struct `VirtualMachineScaleSetRollingUpgradesClientStartExtensionUpgradePollerResponse` has been removed +- Struct `VirtualMachineScaleSetRollingUpgradesClientStartOSUpgradePoller` has been removed +- Struct `VirtualMachineScaleSetRollingUpgradesClientStartOSUpgradePollerResponse` has been removed +- Struct `VirtualMachineScaleSetVMExtensionsClientCreateOrUpdatePoller` has been removed +- Struct `VirtualMachineScaleSetVMExtensionsClientCreateOrUpdatePollerResponse` has been removed +- Struct `VirtualMachineScaleSetVMExtensionsClientCreateOrUpdateResult` has been removed +- Struct `VirtualMachineScaleSetVMExtensionsClientDeletePoller` has been removed +- Struct `VirtualMachineScaleSetVMExtensionsClientDeletePollerResponse` has been removed +- Struct `VirtualMachineScaleSetVMExtensionsClientGetResult` has been removed +- Struct `VirtualMachineScaleSetVMExtensionsClientListResult` has been removed +- Struct `VirtualMachineScaleSetVMExtensionsClientUpdatePoller` has been removed +- Struct `VirtualMachineScaleSetVMExtensionsClientUpdatePollerResponse` has been removed +- Struct `VirtualMachineScaleSetVMExtensionsClientUpdateResult` has been removed +- Struct `VirtualMachineScaleSetVMRunCommandsClientCreateOrUpdatePoller` has been removed +- Struct `VirtualMachineScaleSetVMRunCommandsClientCreateOrUpdatePollerResponse` has been removed +- Struct `VirtualMachineScaleSetVMRunCommandsClientCreateOrUpdateResult` has been removed +- Struct `VirtualMachineScaleSetVMRunCommandsClientDeletePoller` has been removed +- Struct `VirtualMachineScaleSetVMRunCommandsClientDeletePollerResponse` has been removed +- Struct `VirtualMachineScaleSetVMRunCommandsClientGetResult` has been removed +- Struct `VirtualMachineScaleSetVMRunCommandsClientListPager` has been removed +- Struct `VirtualMachineScaleSetVMRunCommandsClientListResult` has been removed +- Struct `VirtualMachineScaleSetVMRunCommandsClientUpdatePoller` has been removed +- Struct `VirtualMachineScaleSetVMRunCommandsClientUpdatePollerResponse` has been removed +- Struct `VirtualMachineScaleSetVMRunCommandsClientUpdateResult` has been removed +- Struct `VirtualMachineScaleSetVMsClientDeallocatePoller` has been removed +- Struct `VirtualMachineScaleSetVMsClientDeallocatePollerResponse` has been removed +- Struct `VirtualMachineScaleSetVMsClientDeletePoller` has been removed +- Struct `VirtualMachineScaleSetVMsClientDeletePollerResponse` has been removed +- Struct `VirtualMachineScaleSetVMsClientGetInstanceViewResult` has been removed +- Struct `VirtualMachineScaleSetVMsClientGetResult` has been removed +- Struct `VirtualMachineScaleSetVMsClientListPager` has been removed +- Struct `VirtualMachineScaleSetVMsClientListResult` has been removed +- Struct `VirtualMachineScaleSetVMsClientPerformMaintenancePoller` has been removed +- Struct `VirtualMachineScaleSetVMsClientPerformMaintenancePollerResponse` has been removed +- Struct `VirtualMachineScaleSetVMsClientPowerOffPoller` has been removed +- Struct `VirtualMachineScaleSetVMsClientPowerOffPollerResponse` has been removed +- Struct `VirtualMachineScaleSetVMsClientRedeployPoller` has been removed +- Struct `VirtualMachineScaleSetVMsClientRedeployPollerResponse` has been removed +- Struct `VirtualMachineScaleSetVMsClientReimageAllPoller` has been removed +- Struct `VirtualMachineScaleSetVMsClientReimageAllPollerResponse` has been removed +- Struct `VirtualMachineScaleSetVMsClientReimagePoller` has been removed +- Struct `VirtualMachineScaleSetVMsClientReimagePollerResponse` has been removed +- Struct `VirtualMachineScaleSetVMsClientRestartPoller` has been removed +- Struct `VirtualMachineScaleSetVMsClientRestartPollerResponse` has been removed +- Struct `VirtualMachineScaleSetVMsClientRetrieveBootDiagnosticsDataResult` has been removed +- Struct `VirtualMachineScaleSetVMsClientRunCommandPoller` has been removed +- Struct `VirtualMachineScaleSetVMsClientRunCommandPollerResponse` has been removed +- Struct `VirtualMachineScaleSetVMsClientRunCommandResult` has been removed +- Struct `VirtualMachineScaleSetVMsClientStartPoller` has been removed +- Struct `VirtualMachineScaleSetVMsClientStartPollerResponse` has been removed +- Struct `VirtualMachineScaleSetVMsClientUpdatePoller` has been removed +- Struct `VirtualMachineScaleSetVMsClientUpdatePollerResponse` has been removed +- Struct `VirtualMachineScaleSetVMsClientUpdateResult` has been removed +- Struct `VirtualMachineScaleSetsClientCreateOrUpdatePoller` has been removed +- Struct `VirtualMachineScaleSetsClientCreateOrUpdatePollerResponse` has been removed +- Struct `VirtualMachineScaleSetsClientCreateOrUpdateResult` has been removed +- Struct `VirtualMachineScaleSetsClientDeallocatePoller` has been removed +- Struct `VirtualMachineScaleSetsClientDeallocatePollerResponse` has been removed +- Struct `VirtualMachineScaleSetsClientDeleteInstancesPoller` has been removed +- Struct `VirtualMachineScaleSetsClientDeleteInstancesPollerResponse` has been removed +- Struct `VirtualMachineScaleSetsClientDeletePoller` has been removed +- Struct `VirtualMachineScaleSetsClientDeletePollerResponse` has been removed +- Struct `VirtualMachineScaleSetsClientForceRecoveryServiceFabricPlatformUpdateDomainWalkResult` has been removed +- Struct `VirtualMachineScaleSetsClientGetInstanceViewResult` has been removed +- Struct `VirtualMachineScaleSetsClientGetOSUpgradeHistoryPager` has been removed +- Struct `VirtualMachineScaleSetsClientGetOSUpgradeHistoryResult` has been removed +- Struct `VirtualMachineScaleSetsClientGetResult` has been removed +- Struct `VirtualMachineScaleSetsClientListAllPager` has been removed +- Struct `VirtualMachineScaleSetsClientListAllResult` has been removed +- Struct `VirtualMachineScaleSetsClientListByLocationPager` has been removed +- Struct `VirtualMachineScaleSetsClientListByLocationResult` has been removed +- Struct `VirtualMachineScaleSetsClientListPager` has been removed +- Struct `VirtualMachineScaleSetsClientListResult` has been removed +- Struct `VirtualMachineScaleSetsClientListSKUsPager` has been removed +- Struct `VirtualMachineScaleSetsClientListSKUsResult` has been removed +- Struct `VirtualMachineScaleSetsClientPerformMaintenancePoller` has been removed +- Struct `VirtualMachineScaleSetsClientPerformMaintenancePollerResponse` has been removed +- Struct `VirtualMachineScaleSetsClientPowerOffPoller` has been removed +- Struct `VirtualMachineScaleSetsClientPowerOffPollerResponse` has been removed +- Struct `VirtualMachineScaleSetsClientRedeployPoller` has been removed +- Struct `VirtualMachineScaleSetsClientRedeployPollerResponse` has been removed +- Struct `VirtualMachineScaleSetsClientReimageAllPoller` has been removed +- Struct `VirtualMachineScaleSetsClientReimageAllPollerResponse` has been removed +- Struct `VirtualMachineScaleSetsClientReimagePoller` has been removed +- Struct `VirtualMachineScaleSetsClientReimagePollerResponse` has been removed +- Struct `VirtualMachineScaleSetsClientRestartPoller` has been removed +- Struct `VirtualMachineScaleSetsClientRestartPollerResponse` has been removed +- Struct `VirtualMachineScaleSetsClientSetOrchestrationServiceStatePoller` has been removed +- Struct `VirtualMachineScaleSetsClientSetOrchestrationServiceStatePollerResponse` has been removed +- Struct `VirtualMachineScaleSetsClientStartPoller` has been removed +- Struct `VirtualMachineScaleSetsClientStartPollerResponse` has been removed +- Struct `VirtualMachineScaleSetsClientUpdateInstancesPoller` has been removed +- Struct `VirtualMachineScaleSetsClientUpdateInstancesPollerResponse` has been removed +- Struct `VirtualMachineScaleSetsClientUpdatePoller` has been removed +- Struct `VirtualMachineScaleSetsClientUpdatePollerResponse` has been removed +- Struct `VirtualMachineScaleSetsClientUpdateResult` has been removed +- Struct `VirtualMachineSizesClientListResult` has been removed +- Struct `VirtualMachinesClientAssessPatchesPoller` has been removed +- Struct `VirtualMachinesClientAssessPatchesPollerResponse` has been removed +- Struct `VirtualMachinesClientAssessPatchesResult` has been removed +- Struct `VirtualMachinesClientCapturePoller` has been removed +- Struct `VirtualMachinesClientCapturePollerResponse` has been removed +- Struct `VirtualMachinesClientCaptureResult` has been removed +- Struct `VirtualMachinesClientConvertToManagedDisksPoller` has been removed +- Struct `VirtualMachinesClientConvertToManagedDisksPollerResponse` has been removed +- Struct `VirtualMachinesClientCreateOrUpdatePoller` has been removed +- Struct `VirtualMachinesClientCreateOrUpdatePollerResponse` has been removed +- Struct `VirtualMachinesClientCreateOrUpdateResult` has been removed +- Struct `VirtualMachinesClientDeallocatePoller` has been removed +- Struct `VirtualMachinesClientDeallocatePollerResponse` has been removed +- Struct `VirtualMachinesClientDeletePoller` has been removed +- Struct `VirtualMachinesClientDeletePollerResponse` has been removed +- Struct `VirtualMachinesClientGetResult` has been removed +- Struct `VirtualMachinesClientInstallPatchesPoller` has been removed +- Struct `VirtualMachinesClientInstallPatchesPollerResponse` has been removed +- Struct `VirtualMachinesClientInstallPatchesResult` has been removed +- Struct `VirtualMachinesClientInstanceViewResult` has been removed +- Struct `VirtualMachinesClientListAllPager` has been removed +- Struct `VirtualMachinesClientListAllResult` has been removed +- Struct `VirtualMachinesClientListAvailableSizesResult` has been removed +- Struct `VirtualMachinesClientListByLocationPager` has been removed +- Struct `VirtualMachinesClientListByLocationResult` has been removed +- Struct `VirtualMachinesClientListPager` has been removed +- Struct `VirtualMachinesClientListResult` has been removed +- Struct `VirtualMachinesClientPerformMaintenancePoller` has been removed +- Struct `VirtualMachinesClientPerformMaintenancePollerResponse` has been removed +- Struct `VirtualMachinesClientPowerOffPoller` has been removed +- Struct `VirtualMachinesClientPowerOffPollerResponse` has been removed +- Struct `VirtualMachinesClientReapplyPoller` has been removed +- Struct `VirtualMachinesClientReapplyPollerResponse` has been removed +- Struct `VirtualMachinesClientRedeployPoller` has been removed +- Struct `VirtualMachinesClientRedeployPollerResponse` has been removed +- Struct `VirtualMachinesClientReimagePoller` has been removed +- Struct `VirtualMachinesClientReimagePollerResponse` has been removed +- Struct `VirtualMachinesClientRestartPoller` has been removed +- Struct `VirtualMachinesClientRestartPollerResponse` has been removed +- Struct `VirtualMachinesClientRetrieveBootDiagnosticsDataResult` has been removed +- Struct `VirtualMachinesClientRunCommandPoller` has been removed +- Struct `VirtualMachinesClientRunCommandPollerResponse` has been removed +- Struct `VirtualMachinesClientRunCommandResult` has been removed +- Struct `VirtualMachinesClientStartPoller` has been removed +- Struct `VirtualMachinesClientStartPollerResponse` has been removed +- Struct `VirtualMachinesClientUpdatePoller` has been removed +- Struct `VirtualMachinesClientUpdatePollerResponse` has been removed +- Struct `VirtualMachinesClientUpdateResult` has been removed +- Field `RawResponse` of struct `CloudServicesClientReimageResponse` has been removed +- Field `RawResponse` of struct `CloudServiceRoleInstancesClientRestartResponse` has been removed +- Field `VirtualMachineRunCommandsClientUpdateResult` of struct `VirtualMachineRunCommandsClientUpdateResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineRunCommandsClientUpdateResponse` has been removed +- Field `CommunityGalleryImageVersionsClientGetResult` of struct `CommunityGalleryImageVersionsClientGetResponse` has been removed +- Field `RawResponse` of struct `CommunityGalleryImageVersionsClientGetResponse` has been removed +- Field `DiskAccessesClientGetResult` of struct `DiskAccessesClientGetResponse` has been removed +- Field `RawResponse` of struct `DiskAccessesClientGetResponse` has been removed +- Field `GalleryApplicationVersionsClientCreateOrUpdateResult` of struct `GalleryApplicationVersionsClientCreateOrUpdateResponse` has been removed +- Field `RawResponse` of struct `GalleryApplicationVersionsClientCreateOrUpdateResponse` has been removed +- Field `CloudServicesUpdateDomainClientGetUpdateDomainResult` of struct `CloudServicesUpdateDomainClientGetUpdateDomainResponse` has been removed +- Field `RawResponse` of struct `CloudServicesUpdateDomainClientGetUpdateDomainResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetRollingUpgradesClientStartOSUpgradeResponse` has been removed +- Field `VirtualMachineScaleSetsClientCreateOrUpdateResult` of struct `VirtualMachineScaleSetsClientCreateOrUpdateResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetsClientCreateOrUpdateResponse` has been removed +- Field `DiskRestorePointClientGrantAccessResult` of struct `DiskRestorePointClientGrantAccessResponse` has been removed +- Field `RawResponse` of struct `DiskRestorePointClientGrantAccessResponse` has been removed +- Field `ImagesClientListResult` of struct `ImagesClientListResponse` has been removed +- Field `RawResponse` of struct `ImagesClientListResponse` has been removed +- Field `SharedGalleriesClientListResult` of struct `SharedGalleriesClientListResponse` has been removed +- Field `RawResponse` of struct `SharedGalleriesClientListResponse` has been removed +- Field `VirtualMachineExtensionsClientUpdateResult` of struct `VirtualMachineExtensionsClientUpdateResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineExtensionsClientUpdateResponse` has been removed +- Field `RawResponse` of struct `SnapshotsClientDeleteResponse` has been removed +- Field `VirtualMachineScaleSetRollingUpgradesClientGetLatestResult` of struct `VirtualMachineScaleSetRollingUpgradesClientGetLatestResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetRollingUpgradesClientGetLatestResponse` has been removed +- Field `RawResponse` of struct `DisksClientDeleteResponse` has been removed +- Field `DiskAccessesClientListResult` of struct `DiskAccessesClientListResponse` has been removed +- Field `RawResponse` of struct `DiskAccessesClientListResponse` has been removed +- Field `RawResponse` of struct `VirtualMachinesClientRestartResponse` has been removed +- Field `GallerySharingProfileClientUpdateResult` of struct `GallerySharingProfileClientUpdateResponse` has been removed +- Field `RawResponse` of struct `GallerySharingProfileClientUpdateResponse` has been removed +- Field `VirtualMachineExtensionsClientListResult` of struct `VirtualMachineExtensionsClientListResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineExtensionsClientListResponse` has been removed +- Field `GalleryImageVersionsClientListByGalleryImageResult` of struct `GalleryImageVersionsClientListByGalleryImageResponse` has been removed +- Field `RawResponse` of struct `GalleryImageVersionsClientListByGalleryImageResponse` has been removed +- Field `DedicatedHostGroupsClientListByResourceGroupResult` of struct `DedicatedHostGroupsClientListByResourceGroupResponse` has been removed +- Field `RawResponse` of struct `DedicatedHostGroupsClientListByResourceGroupResponse` has been removed +- Field `VirtualMachineRunCommandsClientGetByVirtualMachineResult` of struct `VirtualMachineRunCommandsClientGetByVirtualMachineResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineRunCommandsClientGetByVirtualMachineResponse` has been removed +- Field `CloudServicesClientGetInstanceViewResult` of struct `CloudServicesClientGetInstanceViewResponse` has been removed +- Field `RawResponse` of struct `CloudServicesClientGetInstanceViewResponse` has been removed +- Field `VirtualMachineImagesClientListPublishersResult` of struct `VirtualMachineImagesClientListPublishersResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineImagesClientListPublishersResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetVMsClientPerformMaintenanceResponse` has been removed +- Field `VirtualMachineImagesClientListSKUsResult` of struct `VirtualMachineImagesClientListSKUsResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineImagesClientListSKUsResponse` has been removed +- Field `SSHPublicKeysClientCreateResult` of struct `SSHPublicKeysClientCreateResponse` has been removed +- Field `RawResponse` of struct `SSHPublicKeysClientCreateResponse` has been removed +- Field `CloudServiceOperatingSystemsClientGetOSVersionResult` of struct `CloudServiceOperatingSystemsClientGetOSVersionResponse` has been removed +- Field `RawResponse` of struct `CloudServiceOperatingSystemsClientGetOSVersionResponse` has been removed +- Field `VirtualMachineImagesEdgeZoneClientListResult` of struct `VirtualMachineImagesEdgeZoneClientListResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineImagesEdgeZoneClientListResponse` has been removed +- Field `VirtualMachineScaleSetVMsClientGetResult` of struct `VirtualMachineScaleSetVMsClientGetResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetVMsClientGetResponse` has been removed +- Field `GalleriesClientListByResourceGroupResult` of struct `GalleriesClientListByResourceGroupResponse` has been removed +- Field `RawResponse` of struct `GalleriesClientListByResourceGroupResponse` has been removed +- Field `RawResponse` of struct `VirtualMachinesClientGeneralizeResponse` has been removed +- Field `RawResponse` of struct `CloudServiceRoleInstancesClientRebuildResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetExtensionsClientDeleteResponse` has been removed +- Field `VirtualMachineScaleSetVMsClientListResult` of struct `VirtualMachineScaleSetVMsClientListResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetVMsClientListResponse` has been removed +- Field `CapacityReservationGroupsClientListByResourceGroupResult` of struct `CapacityReservationGroupsClientListByResourceGroupResponse` has been removed +- Field `RawResponse` of struct `CapacityReservationGroupsClientListByResourceGroupResponse` has been removed +- Field `CloudServicesUpdateDomainClientListUpdateDomainsResult` of struct `CloudServicesUpdateDomainClientListUpdateDomainsResponse` has been removed +- Field `RawResponse` of struct `CloudServicesUpdateDomainClientListUpdateDomainsResponse` has been removed +- Field `VirtualMachineScaleSetsClientGetResult` of struct `VirtualMachineScaleSetsClientGetResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetsClientGetResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetVMExtensionsClientDeleteResponse` has been removed +- Field `CloudServiceRoleInstancesClientGetResult` of struct `CloudServiceRoleInstancesClientGetResponse` has been removed +- Field `RawResponse` of struct `CloudServiceRoleInstancesClientGetResponse` has been removed +- Field `AvailabilitySetsClientListResult` of struct `AvailabilitySetsClientListResponse` has been removed +- Field `RawResponse` of struct `AvailabilitySetsClientListResponse` has been removed +- Field `VirtualMachineExtensionsClientGetResult` of struct `VirtualMachineExtensionsClientGetResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineExtensionsClientGetResponse` has been removed +- Field `VirtualMachineScaleSetVMRunCommandsClientListResult` of struct `VirtualMachineScaleSetVMRunCommandsClientListResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetVMRunCommandsClientListResponse` has been removed +- Field `VirtualMachinesClientAssessPatchesResult` of struct `VirtualMachinesClientAssessPatchesResponse` has been removed +- Field `RawResponse` of struct `VirtualMachinesClientAssessPatchesResponse` has been removed +- Field `DiskAccessesClientGetAPrivateEndpointConnectionResult` of struct `DiskAccessesClientGetAPrivateEndpointConnectionResponse` has been removed +- Field `RawResponse` of struct `DiskAccessesClientGetAPrivateEndpointConnectionResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetsClientUpdateInstancesResponse` has been removed +- Field `CapacityReservationGroupsClientListBySubscriptionResult` of struct `CapacityReservationGroupsClientListBySubscriptionResponse` has been removed +- Field `RawResponse` of struct `CapacityReservationGroupsClientListBySubscriptionResponse` has been removed +- Field `SSHPublicKeysClientGenerateKeyPairResult` of struct `SSHPublicKeysClientGenerateKeyPairResponse` has been removed +- Field `RawResponse` of struct `SSHPublicKeysClientGenerateKeyPairResponse` has been removed +- Field `DiskEncryptionSetsClientListByResourceGroupResult` of struct `DiskEncryptionSetsClientListByResourceGroupResponse` has been removed +- Field `RawResponse` of struct `DiskEncryptionSetsClientListByResourceGroupResponse` has been removed +- Field `GalleryImageVersionsClientCreateOrUpdateResult` of struct `GalleryImageVersionsClientCreateOrUpdateResponse` has been removed +- Field `RawResponse` of struct `GalleryImageVersionsClientCreateOrUpdateResponse` has been removed +- Field `SharedGalleryImageVersionsClientListResult` of struct `SharedGalleryImageVersionsClientListResponse` has been removed +- Field `RawResponse` of struct `SharedGalleryImageVersionsClientListResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetsClientPerformMaintenanceResponse` has been removed +- Field `VirtualMachinesClientRetrieveBootDiagnosticsDataResult` of struct `VirtualMachinesClientRetrieveBootDiagnosticsDataResponse` has been removed +- Field `RawResponse` of struct `VirtualMachinesClientRetrieveBootDiagnosticsDataResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetVMsClientSimulateEvictionResponse` has been removed +- Field `VirtualMachineImagesEdgeZoneClientGetResult` of struct `VirtualMachineImagesEdgeZoneClientGetResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineImagesEdgeZoneClientGetResponse` has been removed +- Field `VirtualMachineImagesEdgeZoneClientListSKUsResult` of struct `VirtualMachineImagesEdgeZoneClientListSKUsResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineImagesEdgeZoneClientListSKUsResponse` has been removed +- Field `CommunityGalleriesClientGetResult` of struct `CommunityGalleriesClientGetResponse` has been removed +- Field `RawResponse` of struct `CommunityGalleriesClientGetResponse` has been removed +- Field `GalleryApplicationsClientCreateOrUpdateResult` of struct `GalleryApplicationsClientCreateOrUpdateResponse` has been removed +- Field `RawResponse` of struct `GalleryApplicationsClientCreateOrUpdateResponse` has been removed +- Field `VirtualMachinesClientInstanceViewResult` of struct `VirtualMachinesClientInstanceViewResponse` has been removed +- Field `RawResponse` of struct `VirtualMachinesClientInstanceViewResponse` has been removed +- Field `DiskRestorePointClientGetResult` of struct `DiskRestorePointClientGetResponse` has been removed +- Field `RawResponse` of struct `DiskRestorePointClientGetResponse` has been removed +- Field `SharedGalleriesClientGetResult` of struct `SharedGalleriesClientGetResponse` has been removed +- Field `RawResponse` of struct `SharedGalleriesClientGetResponse` has been removed +- Field `DiskAccessesClientGetPrivateLinkResourcesResult` of struct `DiskAccessesClientGetPrivateLinkResourcesResponse` has been removed +- Field `RawResponse` of struct `DiskAccessesClientGetPrivateLinkResourcesResponse` has been removed +- Field `CloudServiceOperatingSystemsClientListOSFamiliesResult` of struct `CloudServiceOperatingSystemsClientListOSFamiliesResponse` has been removed +- Field `RawResponse` of struct `CloudServiceOperatingSystemsClientListOSFamiliesResponse` has been removed +- Field `VirtualMachineScaleSetVMExtensionsClientGetResult` of struct `VirtualMachineScaleSetVMExtensionsClientGetResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetVMExtensionsClientGetResponse` has been removed +- Field `RawResponse` of struct `VirtualMachinesClientSimulateEvictionResponse` has been removed +- Field `RawResponse` of struct `RestorePointCollectionsClientDeleteResponse` has been removed +- Field `VirtualMachinesClientCaptureResult` of struct `VirtualMachinesClientCaptureResponse` has been removed +- Field `RawResponse` of struct `VirtualMachinesClientCaptureResponse` has been removed +- Field `GalleryApplicationsClientListByGalleryResult` of struct `GalleryApplicationsClientListByGalleryResponse` has been removed +- Field `RawResponse` of struct `GalleryApplicationsClientListByGalleryResponse` has been removed +- Field `VirtualMachineScaleSetsClientListByLocationResult` of struct `VirtualMachineScaleSetsClientListByLocationResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetsClientListByLocationResponse` has been removed +- Field `CloudServiceRolesClientGetResult` of struct `CloudServiceRolesClientGetResponse` has been removed +- Field `RawResponse` of struct `CloudServiceRolesClientGetResponse` has been removed +- Field `CommunityGalleryImagesClientGetResult` of struct `CommunityGalleryImagesClientGetResponse` has been removed +- Field `RawResponse` of struct `CommunityGalleryImagesClientGetResponse` has been removed +- Field `SharedGalleryImagesClientGetResult` of struct `SharedGalleryImagesClientGetResponse` has been removed +- Field `RawResponse` of struct `SharedGalleryImagesClientGetResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetVMsClientReimageAllResponse` has been removed +- Field `VirtualMachineScaleSetsClientForceRecoveryServiceFabricPlatformUpdateDomainWalkResult` of struct `VirtualMachineScaleSetsClientForceRecoveryServiceFabricPlatformUpdateDomainWalkResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetsClientForceRecoveryServiceFabricPlatformUpdateDomainWalkResponse` has been removed +- Field `RawResponse` of struct `VirtualMachinesClientConvertToManagedDisksResponse` has been removed +- Field `DisksClientGrantAccessResult` of struct `DisksClientGrantAccessResponse` has been removed +- Field `RawResponse` of struct `DisksClientGrantAccessResponse` has been removed +- Field `RestorePointCollectionsClientGetResult` of struct `RestorePointCollectionsClientGetResponse` has been removed +- Field `RawResponse` of struct `RestorePointCollectionsClientGetResponse` has been removed +- Field `VirtualMachineSizesClientListResult` of struct `VirtualMachineSizesClientListResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineSizesClientListResponse` has been removed +- Field `SSHPublicKeysClientListByResourceGroupResult` of struct `SSHPublicKeysClientListByResourceGroupResponse` has been removed +- Field `RawResponse` of struct `SSHPublicKeysClientListByResourceGroupResponse` has been removed +- Field `VirtualMachinesClientInstallPatchesResult` of struct `VirtualMachinesClientInstallPatchesResponse` has been removed +- Field `RawResponse` of struct `VirtualMachinesClientInstallPatchesResponse` has been removed +- Field `RawResponse` of struct `RestorePointsClientDeleteResponse` has been removed +- Field `VirtualMachineScaleSetVMsClientRetrieveBootDiagnosticsDataResult` of struct `VirtualMachineScaleSetVMsClientRetrieveBootDiagnosticsDataResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetVMsClientRetrieveBootDiagnosticsDataResponse` has been removed +- Field `GalleriesClientGetResult` of struct `GalleriesClientGetResponse` has been removed +- Field `RawResponse` of struct `GalleriesClientGetResponse` has been removed +- Field `CloudServiceRoleInstancesClientGetInstanceViewResult` of struct `CloudServiceRoleInstancesClientGetInstanceViewResponse` has been removed +- Field `RawResponse` of struct `CloudServiceRoleInstancesClientGetInstanceViewResponse` has been removed +- Field `RawResponse` of struct `DiskAccessesClientDeleteAPrivateEndpointConnectionResponse` has been removed +- Field `DisksClientGetResult` of struct `DisksClientGetResponse` has been removed +- Field `RawResponse` of struct `DisksClientGetResponse` has been removed +- Field `VirtualMachineScaleSetVMsClientGetInstanceViewResult` of struct `VirtualMachineScaleSetVMsClientGetInstanceViewResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetVMsClientGetInstanceViewResponse` has been removed +- Field `RawResponse` of struct `CloudServicesClientDeleteInstancesResponse` has been removed +- Field `CapacityReservationGroupsClientUpdateResult` of struct `CapacityReservationGroupsClientUpdateResponse` has been removed +- Field `RawResponse` of struct `CapacityReservationGroupsClientUpdateResponse` has been removed +- Field `VirtualMachineScaleSetVMExtensionsClientUpdateResult` of struct `VirtualMachineScaleSetVMExtensionsClientUpdateResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetVMExtensionsClientUpdateResponse` has been removed +- Field `DiskRestorePointClientListByRestorePointResult` of struct `DiskRestorePointClientListByRestorePointResponse` has been removed +- Field `RawResponse` of struct `DiskRestorePointClientListByRestorePointResponse` has been removed +- Field `RawResponse` of struct `CloudServicesClientRebuildResponse` has been removed +- Field `VirtualMachinesClientListByLocationResult` of struct `VirtualMachinesClientListByLocationResponse` has been removed +- Field `RawResponse` of struct `VirtualMachinesClientListByLocationResponse` has been removed +- Field `DedicatedHostGroupsClientUpdateResult` of struct `DedicatedHostGroupsClientUpdateResponse` has been removed +- Field `RawResponse` of struct `DedicatedHostGroupsClientUpdateResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetsClientRedeployResponse` has been removed +- Field `VirtualMachineScaleSetExtensionsClientListResult` of struct `VirtualMachineScaleSetExtensionsClientListResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetExtensionsClientListResponse` has been removed +- Field `RestorePointCollectionsClientUpdateResult` of struct `RestorePointCollectionsClientUpdateResponse` has been removed +- Field `RawResponse` of struct `RestorePointCollectionsClientUpdateResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetVMRunCommandsClientDeleteResponse` has been removed +- Field `DisksClientCreateOrUpdateResult` of struct `DisksClientCreateOrUpdateResponse` has been removed +- Field `RawResponse` of struct `DisksClientCreateOrUpdateResponse` has been removed +- Field `AvailabilitySetsClientCreateOrUpdateResult` of struct `AvailabilitySetsClientCreateOrUpdateResponse` has been removed +- Field `RawResponse` of struct `AvailabilitySetsClientCreateOrUpdateResponse` has been removed +- Field `VirtualMachineScaleSetVMExtensionsClientCreateOrUpdateResult` of struct `VirtualMachineScaleSetVMExtensionsClientCreateOrUpdateResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetVMExtensionsClientCreateOrUpdateResponse` has been removed +- Field `SharedGalleryImageVersionsClientGetResult` of struct `SharedGalleryImageVersionsClientGetResponse` has been removed +- Field `RawResponse` of struct `SharedGalleryImageVersionsClientGetResponse` has been removed +- Field `RawResponse` of struct `VirtualMachinesClientRedeployResponse` has been removed +- Field `CapacityReservationsClientListByCapacityReservationGroupResult` of struct `CapacityReservationsClientListByCapacityReservationGroupResponse` has been removed +- Field `RawResponse` of struct `CapacityReservationsClientListByCapacityReservationGroupResponse` has been removed +- Field `VirtualMachineScaleSetsClientListSKUsResult` of struct `VirtualMachineScaleSetsClientListSKUsResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetsClientListSKUsResponse` has been removed +- Field `DiskEncryptionSetsClientListAssociatedResourcesResult` of struct `DiskEncryptionSetsClientListAssociatedResourcesResponse` has been removed +- Field `RawResponse` of struct `DiskEncryptionSetsClientListAssociatedResourcesResponse` has been removed +- Field `RawResponse` of struct `CloudServicesClientDeleteResponse` has been removed +- Field `VirtualMachineScaleSetsClientUpdateResult` of struct `VirtualMachineScaleSetsClientUpdateResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetsClientUpdateResponse` has been removed +- Field `GalleriesClientListResult` of struct `GalleriesClientListResponse` has been removed +- Field `RawResponse` of struct `GalleriesClientListResponse` has been removed +- Field `RawResponse` of struct `ImagesClientDeleteResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetsClientStartResponse` has been removed +- Field `CloudServicesClientUpdateResult` of struct `CloudServicesClientUpdateResponse` has been removed +- Field `RawResponse` of struct `CloudServicesClientUpdateResponse` has been removed +- Field `VirtualMachineScaleSetExtensionsClientCreateOrUpdateResult` of struct `VirtualMachineScaleSetExtensionsClientCreateOrUpdateResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetExtensionsClientCreateOrUpdateResponse` has been removed +- Field `ResourceSKUsClientListResult` of struct `ResourceSKUsClientListResponse` has been removed +- Field `RawResponse` of struct `ResourceSKUsClientListResponse` has been removed +- Field `RestorePointCollectionsClientListAllResult` of struct `RestorePointCollectionsClientListAllResponse` has been removed +- Field `RawResponse` of struct `RestorePointCollectionsClientListAllResponse` has been removed +- Field `RawResponse` of struct `DedicatedHostsClientRestartResponse` has been removed +- Field `RestorePointCollectionsClientListResult` of struct `RestorePointCollectionsClientListResponse` has been removed +- Field `RawResponse` of struct `RestorePointCollectionsClientListResponse` has been removed +- Field `ImagesClientCreateOrUpdateResult` of struct `ImagesClientCreateOrUpdateResponse` has been removed +- Field `RawResponse` of struct `ImagesClientCreateOrUpdateResponse` has been removed +- Field `VirtualMachineRunCommandsClientListResult` of struct `VirtualMachineRunCommandsClientListResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineRunCommandsClientListResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetsClientReimageResponse` has been removed +- Field `VirtualMachineScaleSetVMExtensionsClientListResult` of struct `VirtualMachineScaleSetVMExtensionsClientListResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetVMExtensionsClientListResponse` has been removed +- Field `DiskAccessesClientUpdateResult` of struct `DiskAccessesClientUpdateResponse` has been removed +- Field `RawResponse` of struct `DiskAccessesClientUpdateResponse` has been removed +- Field `RawResponse` of struct `VirtualMachinesClientDeleteResponse` has been removed +- Field `VirtualMachineScaleSetVMRunCommandsClientGetResult` of struct `VirtualMachineScaleSetVMRunCommandsClientGetResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetVMRunCommandsClientGetResponse` has been removed +- Field `VirtualMachinesClientListResult` of struct `VirtualMachinesClientListResponse` has been removed +- Field `RawResponse` of struct `VirtualMachinesClientListResponse` has been removed +- Field `GalleryApplicationsClientUpdateResult` of struct `GalleryApplicationsClientUpdateResponse` has been removed +- Field `RawResponse` of struct `GalleryApplicationsClientUpdateResponse` has been removed +- Field `RawResponse` of struct `GalleryApplicationsClientDeleteResponse` has been removed +- Field `RawResponse` of struct `DiskRestorePointClientRevokeAccessResponse` has been removed +- Field `GalleriesClientUpdateResult` of struct `GalleriesClientUpdateResponse` has been removed +- Field `RawResponse` of struct `GalleriesClientUpdateResponse` has been removed +- Field `RawResponse` of struct `DiskEncryptionSetsClientDeleteResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetsClientConvertToSinglePlacementGroupResponse` has been removed +- Field `CloudServiceRolesClientListResult` of struct `CloudServiceRolesClientListResponse` has been removed +- Field `RawResponse` of struct `CloudServiceRolesClientListResponse` has been removed +- Field `RawResponse` of struct `GalleryImagesClientDeleteResponse` has been removed +- Field `RawResponse` of struct `VirtualMachinesClientReimageResponse` has been removed +- Field `DiskEncryptionSetsClientListResult` of struct `DiskEncryptionSetsClientListResponse` has been removed +- Field `RawResponse` of struct `DiskEncryptionSetsClientListResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetsClientRestartResponse` has been removed +- Field `GalleryImagesClientUpdateResult` of struct `GalleryImagesClientUpdateResponse` has been removed +- Field `RawResponse` of struct `GalleryImagesClientUpdateResponse` has been removed +- Field `DiskEncryptionSetsClientCreateOrUpdateResult` of struct `DiskEncryptionSetsClientCreateOrUpdateResponse` has been removed +- Field `RawResponse` of struct `DiskEncryptionSetsClientCreateOrUpdateResponse` has been removed +- Field `DedicatedHostGroupsClientCreateOrUpdateResult` of struct `DedicatedHostGroupsClientCreateOrUpdateResponse` has been removed +- Field `RawResponse` of struct `DedicatedHostGroupsClientCreateOrUpdateResponse` has been removed +- Field `ProximityPlacementGroupsClientListByResourceGroupResult` of struct `ProximityPlacementGroupsClientListByResourceGroupResponse` has been removed +- Field `RawResponse` of struct `ProximityPlacementGroupsClientListByResourceGroupResponse` has been removed +- Field `VirtualMachinesClientListAllResult` of struct `VirtualMachinesClientListAllResponse` has been removed +- Field `RawResponse` of struct `VirtualMachinesClientListAllResponse` has been removed +- Field `SSHPublicKeysClientGetResult` of struct `SSHPublicKeysClientGetResponse` has been removed +- Field `RawResponse` of struct `SSHPublicKeysClientGetResponse` has been removed +- Field `RawResponse` of struct `CloudServicesClientRestartResponse` has been removed +- Field `VirtualMachineImagesEdgeZoneClientListOffersResult` of struct `VirtualMachineImagesEdgeZoneClientListOffersResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineImagesEdgeZoneClientListOffersResponse` has been removed +- Field `VirtualMachineImagesClientListResult` of struct `VirtualMachineImagesClientListResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineImagesClientListResponse` has been removed +- Field `RestorePointCollectionsClientCreateOrUpdateResult` of struct `RestorePointCollectionsClientCreateOrUpdateResponse` has been removed +- Field `RawResponse` of struct `RestorePointCollectionsClientCreateOrUpdateResponse` has been removed +- Field `CloudServicesClientListResult` of struct `CloudServicesClientListResponse` has been removed +- Field `RawResponse` of struct `CloudServicesClientListResponse` has been removed +- Field `VirtualMachinesClientRunCommandResult` of struct `VirtualMachinesClientRunCommandResponse` has been removed +- Field `RawResponse` of struct `VirtualMachinesClientRunCommandResponse` has been removed +- Field `CapacityReservationsClientCreateOrUpdateResult` of struct `CapacityReservationsClientCreateOrUpdateResponse` has been removed +- Field `RawResponse` of struct `CapacityReservationsClientCreateOrUpdateResponse` has been removed +- Field `VirtualMachineScaleSetsClientListAllResult` of struct `VirtualMachineScaleSetsClientListAllResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetsClientListAllResponse` has been removed +- Field `RawResponse` of struct `SnapshotsClientRevokeAccessResponse` has been removed +- Field `VirtualMachinesClientCreateOrUpdateResult` of struct `VirtualMachinesClientCreateOrUpdateResponse` has been removed +- Field `RawResponse` of struct `VirtualMachinesClientCreateOrUpdateResponse` has been removed +- Field `RawResponse` of struct `DiskAccessesClientDeleteResponse` has been removed +- Field `CloudServiceRoleInstancesClientListResult` of struct `CloudServiceRoleInstancesClientListResponse` has been removed +- Field `RawResponse` of struct `CloudServiceRoleInstancesClientListResponse` has been removed +- Field `GalleryImageVersionsClientUpdateResult` of struct `GalleryImageVersionsClientUpdateResponse` has been removed +- Field `RawResponse` of struct `GalleryImageVersionsClientUpdateResponse` has been removed +- Field `RawResponse` of struct `VirtualMachinesClientStartResponse` has been removed +- Field `ImagesClientListByResourceGroupResult` of struct `ImagesClientListByResourceGroupResponse` has been removed +- Field `RawResponse` of struct `ImagesClientListByResourceGroupResponse` has been removed +- Field `SharedGalleryImagesClientListResult` of struct `SharedGalleryImagesClientListResponse` has been removed +- Field `RawResponse` of struct `SharedGalleryImagesClientListResponse` has been removed +- Field `ProximityPlacementGroupsClientGetResult` of struct `ProximityPlacementGroupsClientGetResponse` has been removed +- Field `RawResponse` of struct `ProximityPlacementGroupsClientGetResponse` has been removed +- Field `GalleryApplicationsClientGetResult` of struct `GalleryApplicationsClientGetResponse` has been removed +- Field `RawResponse` of struct `GalleryApplicationsClientGetResponse` has been removed +- Field `DiskEncryptionSetsClientGetResult` of struct `DiskEncryptionSetsClientGetResponse` has been removed +- Field `RawResponse` of struct `DiskEncryptionSetsClientGetResponse` has been removed +- Field `DedicatedHostsClientGetResult` of struct `DedicatedHostsClientGetResponse` has been removed +- Field `RawResponse` of struct `DedicatedHostsClientGetResponse` has been removed +- Field `VirtualMachinesClientUpdateResult` of struct `VirtualMachinesClientUpdateResponse` has been removed +- Field `RawResponse` of struct `VirtualMachinesClientUpdateResponse` has been removed +- Field `CapacityReservationsClientGetResult` of struct `CapacityReservationsClientGetResponse` has been removed +- Field `RawResponse` of struct `CapacityReservationsClientGetResponse` has been removed +- Field `RawResponse` of struct `CloudServiceRoleInstancesClientReimageResponse` has been removed +- Field `RawResponse` of struct `GalleryImageVersionsClientDeleteResponse` has been removed +- Field `CloudServiceOperatingSystemsClientGetOSFamilyResult` of struct `CloudServiceOperatingSystemsClientGetOSFamilyResponse` has been removed +- Field `RawResponse` of struct `CloudServiceOperatingSystemsClientGetOSFamilyResponse` has been removed +- Field `RawResponse` of struct `CloudServicesClientStartResponse` has been removed +- Field `SSHPublicKeysClientListBySubscriptionResult` of struct `SSHPublicKeysClientListBySubscriptionResponse` has been removed +- Field `RawResponse` of struct `SSHPublicKeysClientListBySubscriptionResponse` has been removed +- Field `VirtualMachinesClientGetResult` of struct `VirtualMachinesClientGetResponse` has been removed +- Field `RawResponse` of struct `VirtualMachinesClientGetResponse` has been removed +- Field `RawResponse` of struct `VirtualMachinesClientDeallocateResponse` has been removed +- Field `VirtualMachineRunCommandsClientCreateOrUpdateResult` of struct `VirtualMachineRunCommandsClientCreateOrUpdateResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineRunCommandsClientCreateOrUpdateResponse` has been removed +- Field `RawResponse` of struct `GalleryApplicationVersionsClientDeleteResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetVMsClientDeleteResponse` has been removed +- Field `GalleryImagesClientCreateOrUpdateResult` of struct `GalleryImagesClientCreateOrUpdateResponse` has been removed +- Field `RawResponse` of struct `GalleryImagesClientCreateOrUpdateResponse` has been removed +- Field `DiskAccessesClientUpdateAPrivateEndpointConnectionResult` of struct `DiskAccessesClientUpdateAPrivateEndpointConnectionResponse` has been removed +- Field `RawResponse` of struct `DiskAccessesClientUpdateAPrivateEndpointConnectionResponse` has been removed +- Field `UsageClientListResult` of struct `UsageClientListResponse` has been removed +- Field `RawResponse` of struct `UsageClientListResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetVMsClientRestartResponse` has been removed +- Field `AvailabilitySetsClientUpdateResult` of struct `AvailabilitySetsClientUpdateResponse` has been removed +- Field `RawResponse` of struct `AvailabilitySetsClientUpdateResponse` has been removed +- Field `RestorePointsClientCreateResult` of struct `RestorePointsClientCreateResponse` has been removed +- Field `RawResponse` of struct `RestorePointsClientCreateResponse` has been removed +- Field `ImagesClientGetResult` of struct `ImagesClientGetResponse` has been removed +- Field `RawResponse` of struct `ImagesClientGetResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineExtensionsClientDeleteResponse` has been removed +- Field `RawResponse` of struct `CloudServicesUpdateDomainClientWalkUpdateDomainResponse` has been removed +- Field `CapacityReservationGroupsClientGetResult` of struct `CapacityReservationGroupsClientGetResponse` has been removed +- Field `RawResponse` of struct `CapacityReservationGroupsClientGetResponse` has been removed +- Field `ProximityPlacementGroupsClientCreateOrUpdateResult` of struct `ProximityPlacementGroupsClientCreateOrUpdateResponse` has been removed +- Field `RawResponse` of struct `ProximityPlacementGroupsClientCreateOrUpdateResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetRollingUpgradesClientCancelResponse` has been removed +- Field `VirtualMachineScaleSetExtensionsClientGetResult` of struct `VirtualMachineScaleSetExtensionsClientGetResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetExtensionsClientGetResponse` has been removed +- Field `RawResponse` of struct `AvailabilitySetsClientDeleteResponse` has been removed +- Field `LogAnalyticsClientExportThrottledRequestsResult` of struct `LogAnalyticsClientExportThrottledRequestsResponse` has been removed +- Field `RawResponse` of struct `LogAnalyticsClientExportThrottledRequestsResponse` has been removed +- Field `OperationsClientListResult` of struct `OperationsClientListResponse` has been removed +- Field `RawResponse` of struct `OperationsClientListResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetsClientSetOrchestrationServiceStateResponse` has been removed +- Field `SnapshotsClientGetResult` of struct `SnapshotsClientGetResponse` has been removed +- Field `RawResponse` of struct `SnapshotsClientGetResponse` has been removed +- Field `VirtualMachinesClientListAvailableSizesResult` of struct `VirtualMachinesClientListAvailableSizesResponse` has been removed +- Field `RawResponse` of struct `VirtualMachinesClientListAvailableSizesResponse` has been removed +- Field `CloudServicesClientCreateOrUpdateResult` of struct `CloudServicesClientCreateOrUpdateResponse` has been removed +- Field `RawResponse` of struct `CloudServicesClientCreateOrUpdateResponse` has been removed +- Field `DisksClientListResult` of struct `DisksClientListResponse` has been removed +- Field `RawResponse` of struct `DisksClientListResponse` has been removed +- Field `RawResponse` of struct `CloudServiceRoleInstancesClientDeleteResponse` has been removed +- Field `CloudServicesClientGetResult` of struct `CloudServicesClientGetResponse` has been removed +- Field `RawResponse` of struct `CloudServicesClientGetResponse` has been removed +- Field `VirtualMachineImagesClientGetResult` of struct `VirtualMachineImagesClientGetResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineImagesClientGetResponse` has been removed +- Field `DiskEncryptionSetsClientUpdateResult` of struct `DiskEncryptionSetsClientUpdateResponse` has been removed +- Field `RawResponse` of struct `DiskEncryptionSetsClientUpdateResponse` has been removed +- Field `VirtualMachineScaleSetVMRunCommandsClientUpdateResult` of struct `VirtualMachineScaleSetVMRunCommandsClientUpdateResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetVMRunCommandsClientUpdateResponse` has been removed +- Field `DisksClientListByResourceGroupResult` of struct `DisksClientListByResourceGroupResponse` has been removed +- Field `RawResponse` of struct `DisksClientListByResourceGroupResponse` has been removed +- Field `VirtualMachineExtensionImagesClientGetResult` of struct `VirtualMachineExtensionImagesClientGetResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineExtensionImagesClientGetResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineRunCommandsClientDeleteResponse` has been removed +- Field `VirtualMachineScaleSetsClientListResult` of struct `VirtualMachineScaleSetsClientListResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetsClientListResponse` has been removed +- Field `GalleryApplicationVersionsClientListByGalleryApplicationResult` of struct `GalleryApplicationVersionsClientListByGalleryApplicationResponse` has been removed +- Field `RawResponse` of struct `GalleryApplicationVersionsClientListByGalleryApplicationResponse` has been removed +- Field `RawResponse` of struct `ProximityPlacementGroupsClientDeleteResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetsClientDeleteInstancesResponse` has been removed +- Field `GalleryApplicationVersionsClientGetResult` of struct `GalleryApplicationVersionsClientGetResponse` has been removed +- Field `RawResponse` of struct `GalleryApplicationVersionsClientGetResponse` has been removed +- Field `DiskAccessesClientListPrivateEndpointConnectionsResult` of struct `DiskAccessesClientListPrivateEndpointConnectionsResponse` has been removed +- Field `RawResponse` of struct `DiskAccessesClientListPrivateEndpointConnectionsResponse` has been removed +- Field `VirtualMachineScaleSetVMsClientUpdateResult` of struct `VirtualMachineScaleSetVMsClientUpdateResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetVMsClientUpdateResponse` has been removed +- Field `DedicatedHostsClientUpdateResult` of struct `DedicatedHostsClientUpdateResponse` has been removed +- Field `RawResponse` of struct `DedicatedHostsClientUpdateResponse` has been removed +- Field `VirtualMachineScaleSetsClientGetInstanceViewResult` of struct `VirtualMachineScaleSetsClientGetInstanceViewResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetsClientGetInstanceViewResponse` has been removed +- Field `DedicatedHostGroupsClientListBySubscriptionResult` of struct `DedicatedHostGroupsClientListBySubscriptionResponse` has been removed +- Field `RawResponse` of struct `DedicatedHostGroupsClientListBySubscriptionResponse` has been removed +- Field `DiskAccessesClientCreateOrUpdateResult` of struct `DiskAccessesClientCreateOrUpdateResponse` has been removed +- Field `RawResponse` of struct `DiskAccessesClientCreateOrUpdateResponse` has been removed +- Field `DiskAccessesClientListByResourceGroupResult` of struct `DiskAccessesClientListByResourceGroupResponse` has been removed +- Field `RawResponse` of struct `DiskAccessesClientListByResourceGroupResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetVMsClientPowerOffResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetVMsClientRedeployResponse` has been removed +- Field `RestorePointsClientGetResult` of struct `RestorePointsClientGetResponse` has been removed +- Field `RawResponse` of struct `RestorePointsClientGetResponse` has been removed +- Field `ProximityPlacementGroupsClientUpdateResult` of struct `ProximityPlacementGroupsClientUpdateResponse` has been removed +- Field `RawResponse` of struct `ProximityPlacementGroupsClientUpdateResponse` has been removed +- Field `VirtualMachineScaleSetsClientGetOSUpgradeHistoryResult` of struct `VirtualMachineScaleSetsClientGetOSUpgradeHistoryResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetsClientGetOSUpgradeHistoryResponse` has been removed +- Field `RawResponse` of struct `VirtualMachinesClientPowerOffResponse` has been removed +- Field `VirtualMachineImagesClientListOffersResult` of struct `VirtualMachineImagesClientListOffersResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineImagesClientListOffersResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetVMsClientDeallocateResponse` has been removed +- Field `RawResponse` of struct `CloudServiceRoleInstancesClientGetRemoteDesktopFileResponse` has been removed +- Field `SnapshotsClientListResult` of struct `SnapshotsClientListResponse` has been removed +- Field `RawResponse` of struct `SnapshotsClientListResponse` has been removed +- Field `CapacityReservationsClientUpdateResult` of struct `CapacityReservationsClientUpdateResponse` has been removed +- Field `RawResponse` of struct `CapacityReservationsClientUpdateResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetsClientReimageAllResponse` has been removed +- Field `VirtualMachineExtensionImagesClientListVersionsResult` of struct `VirtualMachineExtensionImagesClientListVersionsResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineExtensionImagesClientListVersionsResponse` has been removed +- Field `VirtualMachineExtensionImagesClientListTypesResult` of struct `VirtualMachineExtensionImagesClientListTypesResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineExtensionImagesClientListTypesResponse` has been removed +- Field `VirtualMachineScaleSetVMRunCommandsClientCreateOrUpdateResult` of struct `VirtualMachineScaleSetVMRunCommandsClientCreateOrUpdateResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetVMRunCommandsClientCreateOrUpdateResponse` has been removed +- Field `RawResponse` of struct `DedicatedHostsClientDeleteResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetVMsClientStartResponse` has been removed +- Field `VirtualMachineScaleSetVMsClientRunCommandResult` of struct `VirtualMachineScaleSetVMsClientRunCommandResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetVMsClientRunCommandResponse` has been removed +- Field `ProximityPlacementGroupsClientListBySubscriptionResult` of struct `ProximityPlacementGroupsClientListBySubscriptionResponse` has been removed +- Field `RawResponse` of struct `ProximityPlacementGroupsClientListBySubscriptionResponse` has been removed +- Field `SnapshotsClientGrantAccessResult` of struct `SnapshotsClientGrantAccessResponse` has been removed +- Field `RawResponse` of struct `SnapshotsClientGrantAccessResponse` has been removed +- Field `VirtualMachineRunCommandsClientGetResult` of struct `VirtualMachineRunCommandsClientGetResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineRunCommandsClientGetResponse` has been removed +- Field `RawResponse` of struct `CloudServicesClientPowerOffResponse` has been removed +- Field `GalleriesClientCreateOrUpdateResult` of struct `GalleriesClientCreateOrUpdateResponse` has been removed +- Field `RawResponse` of struct `GalleriesClientCreateOrUpdateResponse` has been removed +- Field `SnapshotsClientUpdateResult` of struct `SnapshotsClientUpdateResponse` has been removed +- Field `RawResponse` of struct `SnapshotsClientUpdateResponse` has been removed +- Field `GalleryImagesClientGetResult` of struct `GalleryImagesClientGetResponse` has been removed +- Field `RawResponse` of struct `GalleryImagesClientGetResponse` has been removed +- Field `CloudServiceOperatingSystemsClientListOSVersionsResult` of struct `CloudServiceOperatingSystemsClientListOSVersionsResponse` has been removed +- Field `RawResponse` of struct `CloudServiceOperatingSystemsClientListOSVersionsResponse` has been removed +- Field `DedicatedHostsClientCreateOrUpdateResult` of struct `DedicatedHostsClientCreateOrUpdateResponse` has been removed +- Field `RawResponse` of struct `DedicatedHostsClientCreateOrUpdateResponse` has been removed +- Field `LogAnalyticsClientExportRequestRateByIntervalResult` of struct `LogAnalyticsClientExportRequestRateByIntervalResponse` has been removed +- Field `RawResponse` of struct `LogAnalyticsClientExportRequestRateByIntervalResponse` has been removed +- Field `RawResponse` of struct `GalleriesClientDeleteResponse` has been removed +- Field `AvailabilitySetsClientListAvailableSizesResult` of struct `AvailabilitySetsClientListAvailableSizesResponse` has been removed +- Field `RawResponse` of struct `AvailabilitySetsClientListAvailableSizesResponse` has been removed +- Field `DedicatedHostsClientListByHostGroupResult` of struct `DedicatedHostsClientListByHostGroupResponse` has been removed +- Field `RawResponse` of struct `DedicatedHostsClientListByHostGroupResponse` has been removed +- Field `CloudServicesClientListAllResult` of struct `CloudServicesClientListAllResponse` has been removed +- Field `RawResponse` of struct `CloudServicesClientListAllResponse` has been removed +- Field `AvailabilitySetsClientGetResult` of struct `AvailabilitySetsClientGetResponse` has been removed +- Field `RawResponse` of struct `AvailabilitySetsClientGetResponse` has been removed +- Field `RawResponse` of struct `VirtualMachinesClientReapplyResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetsClientDeallocateResponse` has been removed +- Field `RawResponse` of struct `CapacityReservationsClientDeleteResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetRollingUpgradesClientStartExtensionUpgradeResponse` has been removed +- Field `VirtualMachineExtensionsClientCreateOrUpdateResult` of struct `VirtualMachineExtensionsClientCreateOrUpdateResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineExtensionsClientCreateOrUpdateResponse` has been removed +- Field `AvailabilitySetsClientListBySubscriptionResult` of struct `AvailabilitySetsClientListBySubscriptionResponse` has been removed +- Field `RawResponse` of struct `AvailabilitySetsClientListBySubscriptionResponse` has been removed +- Field `ImagesClientUpdateResult` of struct `ImagesClientUpdateResponse` has been removed +- Field `RawResponse` of struct `ImagesClientUpdateResponse` has been removed +- Field `VirtualMachineScaleSetExtensionsClientUpdateResult` of struct `VirtualMachineScaleSetExtensionsClientUpdateResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetExtensionsClientUpdateResponse` has been removed +- Field `RawResponse` of struct `CapacityReservationGroupsClientDeleteResponse` has been removed +- Field `GalleryImagesClientListByGalleryResult` of struct `GalleryImagesClientListByGalleryResponse` has been removed +- Field `RawResponse` of struct `GalleryImagesClientListByGalleryResponse` has been removed +- Field `VirtualMachineRunCommandsClientListByVirtualMachineResult` of struct `VirtualMachineRunCommandsClientListByVirtualMachineResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineRunCommandsClientListByVirtualMachineResponse` has been removed +- Field `DisksClientUpdateResult` of struct `DisksClientUpdateResponse` has been removed +- Field `RawResponse` of struct `DisksClientUpdateResponse` has been removed +- Field `GalleryImageVersionsClientGetResult` of struct `GalleryImageVersionsClientGetResponse` has been removed +- Field `RawResponse` of struct `GalleryImageVersionsClientGetResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetsClientDeleteResponse` has been removed +- Field `SnapshotsClientListByResourceGroupResult` of struct `SnapshotsClientListByResourceGroupResponse` has been removed +- Field `RawResponse` of struct `SnapshotsClientListByResourceGroupResponse` has been removed +- Field `RawResponse` of struct `DedicatedHostGroupsClientDeleteResponse` has been removed +- Field `DedicatedHostGroupsClientGetResult` of struct `DedicatedHostGroupsClientGetResponse` has been removed +- Field `RawResponse` of struct `DedicatedHostGroupsClientGetResponse` has been removed +- Field `SSHPublicKeysClientUpdateResult` of struct `SSHPublicKeysClientUpdateResponse` has been removed +- Field `RawResponse` of struct `SSHPublicKeysClientUpdateResponse` has been removed +- Field `VirtualMachineImagesEdgeZoneClientListPublishersResult` of struct `VirtualMachineImagesEdgeZoneClientListPublishersResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineImagesEdgeZoneClientListPublishersResponse` has been removed +- Field `RawResponse` of struct `SSHPublicKeysClientDeleteResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetsClientPowerOffResponse` has been removed +- Field `SnapshotsClientCreateOrUpdateResult` of struct `SnapshotsClientCreateOrUpdateResponse` has been removed +- Field `RawResponse` of struct `SnapshotsClientCreateOrUpdateResponse` has been removed +- Field `CapacityReservationGroupsClientCreateOrUpdateResult` of struct `CapacityReservationGroupsClientCreateOrUpdateResponse` has been removed +- Field `RawResponse` of struct `CapacityReservationGroupsClientCreateOrUpdateResponse` has been removed +- Field `RawResponse` of struct `VirtualMachineScaleSetVMsClientReimageResponse` has been removed +- Field `RawResponse` of struct `DisksClientRevokeAccessResponse` has been removed +- Field `RawResponse` of struct `VirtualMachinesClientPerformMaintenanceResponse` has been removed +- Field `GalleryApplicationVersionsClientUpdateResult` of struct `GalleryApplicationVersionsClientUpdateResponse` has been removed +- Field `RawResponse` of struct `GalleryApplicationVersionsClientUpdateResponse` has been removed + +### Features Added + +- New function `CommunityGalleryInfo.MarshalJSON() ([]byte, error)` +- New struct `CloudError` +- New struct `CommunityGalleryInfo` +- New struct `DiskRestorePointReplicationStatus` +- New struct `GalleryArtifactSource` +- New struct `ManagedArtifact` +- New field `ResumeToken` in struct `DedicatedHostsClientBeginUpdateOptions` +- New anonymous field `VirtualMachine` in struct `VirtualMachinesClientUpdateResponse` +- New field `ResumeToken` in struct `VirtualMachineScaleSetVMRunCommandsClientBeginCreateOrUpdateOptions` +- New anonymous field `GalleryList` in struct `GalleriesClientListResponse` +- New field `ResumeToken` in struct `VirtualMachineRunCommandsClientBeginUpdateOptions` +- New field `ResumeToken` in struct `VirtualMachineScaleSetVMsClientBeginReimageOptions` +- New field `ResumeToken` in struct `VirtualMachineScaleSetsClientBeginReimageAllOptions` +- New anonymous field `VirtualMachineScaleSetListResult` in struct `VirtualMachineScaleSetsClientListResponse` +- New anonymous field `GalleryImage` in struct `GalleryImagesClientUpdateResponse` +- New anonymous field `AvailabilitySet` in struct `AvailabilitySetsClientCreateOrUpdateResponse` +- New anonymous field `VirtualMachineScaleSetListSKUsResult` in struct `VirtualMachineScaleSetsClientListSKUsResponse` +- New anonymous field `DiskAccessList` in struct `DiskAccessesClientListResponse` +- New field `ResumeToken` in struct `VirtualMachinesClientBeginStartOptions` +- New field `VirtualMachineExtensionImageArray` in struct `VirtualMachineExtensionImagesClientListVersionsResponse` +- New anonymous field `VirtualMachineScaleSetVMInstanceView` in struct `VirtualMachineScaleSetVMsClientGetInstanceViewResponse` +- New field `Body` in struct `CloudServiceRoleInstancesClientGetRemoteDesktopFileResponse` +- New field `ResumeToken` in struct `VirtualMachineScaleSetsClientBeginPerformMaintenanceOptions` +- New anonymous field `DiskAccess` in struct `DiskAccessesClientGetResponse` +- New field `ResumeToken` in struct `CloudServiceRoleInstancesClientBeginRestartOptions` +- New field `ResumeToken` in struct `RestorePointsClientBeginCreateOptions` +- New field `ResumeToken` in struct `DiskRestorePointClientBeginRevokeAccessOptions` +- New anonymous field `SSHPublicKeyGenerateKeyPairResult` in struct `SSHPublicKeysClientGenerateKeyPairResponse` +- New anonymous field `AvailabilitySetListResult` in struct `AvailabilitySetsClientListBySubscriptionResponse` +- New field `ResumeToken` in struct `CloudServicesUpdateDomainClientBeginWalkUpdateDomainOptions` +- New anonymous field `VirtualMachineRunCommand` in struct `VirtualMachineScaleSetVMRunCommandsClientGetResponse` +- New field `ResumeToken` in struct `CloudServicesClientBeginDeleteOptions` +- New field `ResumeToken` in struct `VirtualMachinesClientBeginConvertToManagedDisksOptions` +- New field `ResumeToken` in struct `CloudServicesClientBeginStartOptions` +- New field `ResumeToken` in struct `GalleryApplicationVersionsClientBeginDeleteOptions` +- New anonymous field `AccessURI` in struct `SnapshotsClientGrantAccessResponse` +- New anonymous field `Disk` in struct `DisksClientCreateOrUpdateResponse` +- New anonymous field `VirtualMachineListResult` in struct `VirtualMachinesClientListResponse` +- New field `ResumeToken` in struct `GalleryApplicationsClientBeginDeleteOptions` +- New anonymous field `VirtualMachineCaptureResult` in struct `VirtualMachinesClientCaptureResponse` +- New anonymous field `DiskEncryptionSetList` in struct `DiskEncryptionSetsClientListByResourceGroupResponse` +- New field `ResumeToken` in struct `CloudServicesClientBeginRebuildOptions` +- New anonymous field `VirtualMachineSizeListResult` in struct `VirtualMachineSizesClientListResponse` +- New anonymous field `RestorePointCollection` in struct `RestorePointCollectionsClientCreateOrUpdateResponse` +- New anonymous field `Snapshot` in struct `SnapshotsClientCreateOrUpdateResponse` +- New field `ResumeToken` in struct `GalleriesClientBeginCreateOrUpdateOptions` +- New anonymous field `ListUsagesResult` in struct `UsageClientListResponse` +- New field `ResumeToken` in struct `VirtualMachineScaleSetsClientBeginRestartOptions` +- New anonymous field `DiskList` in struct `DisksClientListResponse` +- New field `ResumeToken` in struct `GalleryApplicationVersionsClientBeginCreateOrUpdateOptions` +- New field `ResumeToken` in struct `VirtualMachinesClientBeginPerformMaintenanceOptions` +- New anonymous field `SharingUpdate` in struct `GallerySharingProfileClientUpdateResponse` +- New anonymous field `RetrieveBootDiagnosticsDataResult` in struct `VirtualMachinesClientRetrieveBootDiagnosticsDataResponse` +- New field `ResumeToken` in struct `LogAnalyticsClientBeginExportRequestRateByIntervalOptions` +- New field `ResumeToken` in struct `SnapshotsClientBeginGrantAccessOptions` +- New anonymous field `CloudServiceInstanceView` in struct `CloudServicesClientGetInstanceViewResponse` +- New anonymous field `CapacityReservation` in struct `CapacityReservationsClientGetResponse` +- New anonymous field `ProximityPlacementGroup` in struct `ProximityPlacementGroupsClientCreateOrUpdateResponse` +- New anonymous field `DedicatedHostListResult` in struct `DedicatedHostsClientListByHostGroupResponse` +- New anonymous field `Disk` in struct `DisksClientUpdateResponse` +- New anonymous field `OperationListResult` in struct `OperationsClientListResponse` +- New anonymous field `VirtualMachineScaleSetVMListResult` in struct `VirtualMachineScaleSetVMsClientListResponse` +- New anonymous field `Image` in struct `ImagesClientUpdateResponse` +- New field `VirtualMachineExtensionImageArray` in struct `VirtualMachineExtensionImagesClientListTypesResponse` +- New anonymous field `SharedGalleryImageVersion` in struct `SharedGalleryImageVersionsClientGetResponse` +- New field `ResumeToken` in struct `DisksClientBeginCreateOrUpdateOptions` +- New anonymous field `CapacityReservationGroup` in struct `CapacityReservationGroupsClientCreateOrUpdateResponse` +- New anonymous field `VirtualMachineRunCommand` in struct `VirtualMachineScaleSetVMRunCommandsClientUpdateResponse` +- New field `ResumeToken` in struct `DiskAccessesClientBeginUpdateAPrivateEndpointConnectionOptions` +- New anonymous field `DiskRestorePointList` in struct `DiskRestorePointClientListByRestorePointResponse` +- New anonymous field `VirtualMachineScaleSet` in struct `VirtualMachineScaleSetsClientCreateOrUpdateResponse` +- New anonymous field `VirtualMachineExtensionImage` in struct `VirtualMachineExtensionImagesClientGetResponse` +- New anonymous field `AccessURI` in struct `DisksClientGrantAccessResponse` +- New anonymous field `DedicatedHostGroupListResult` in struct `DedicatedHostGroupsClientListByResourceGroupResponse` +- New anonymous field `SSHPublicKeysGroupListResult` in struct `SSHPublicKeysClientListBySubscriptionResponse` +- New anonymous field `VirtualMachineImage` in struct `VirtualMachineImagesClientGetResponse` +- New anonymous field `RestorePointCollectionListResult` in struct `RestorePointCollectionsClientListAllResponse` +- New anonymous field `CapacityReservation` in struct `CapacityReservationsClientCreateOrUpdateResponse` +- New field `ResumeToken` in struct `RestorePointsClientBeginDeleteOptions` +- New anonymous field `VirtualMachineExtensionsListResult` in struct `VirtualMachineExtensionsClientListResponse` +- New anonymous field `RollingUpgradeStatusInfo` in struct `VirtualMachineScaleSetRollingUpgradesClientGetLatestResponse` +- New field `ResumeToken` in struct `VirtualMachineScaleSetRollingUpgradesClientBeginStartExtensionUpgradeOptions` +- New field `ResumeToken` in struct `GalleriesClientBeginUpdateOptions` +- New anonymous field `VirtualMachineListResult` in struct `VirtualMachinesClientListAllResponse` +- New anonymous field `Gallery` in struct `GalleriesClientUpdateResponse` +- New field `ResumeToken` in struct `CloudServicesClientBeginRestartOptions` +- New field `VirtualMachineImageResourceArray` in struct `VirtualMachineImagesEdgeZoneClientListSKUsResponse` +- New anonymous field `VirtualMachineScaleSet` in struct `VirtualMachineScaleSetsClientGetResponse` +- New field `ResumeToken` in struct `GalleryImagesClientBeginDeleteOptions` +- New anonymous field `CapacityReservationGroupListResult` in struct `CapacityReservationGroupsClientListByResourceGroupResponse` +- New anonymous field `ImageListResult` in struct `ImagesClientListResponse` +- New anonymous field `DedicatedHost` in struct `DedicatedHostsClientGetResponse` +- New field `ResumeToken` in struct `CloudServiceRoleInstancesClientBeginDeleteOptions` +- New anonymous field `VirtualMachineScaleSetExtension` in struct `VirtualMachineScaleSetExtensionsClientCreateOrUpdateResponse` +- New field `ResumeToken` in struct `CapacityReservationsClientBeginDeleteOptions` +- New anonymous field `VirtualMachineExtension` in struct `VirtualMachineExtensionsClientUpdateResponse` +- New field `ResumeToken` in struct `DisksClientBeginGrantAccessOptions` +- New field `VirtualMachineImageResourceArray` in struct `VirtualMachineImagesClientListSKUsResponse` +- New anonymous field `ResourceURIList` in struct `DiskEncryptionSetsClientListAssociatedResourcesResponse` +- New anonymous field `OSFamily` in struct `CloudServiceOperatingSystemsClientGetOSFamilyResponse` +- New anonymous field `GalleryApplicationVersion` in struct `GalleryApplicationVersionsClientCreateOrUpdateResponse` +- New anonymous field `RestorePoint` in struct `RestorePointsClientGetResponse` +- New field `ResumeToken` in struct `VirtualMachineScaleSetsClientBeginRedeployOptions` +- New field `ResumeToken` in struct `VirtualMachinesClientBeginUpdateOptions` +- New anonymous field `LogAnalyticsOperationResult` in struct `LogAnalyticsClientExportThrottledRequestsResponse` +- New anonymous field `UpdateDomain` in struct `CloudServicesUpdateDomainClientGetUpdateDomainResponse` +- New anonymous field `VirtualMachineScaleSetVMExtension` in struct `VirtualMachineScaleSetVMExtensionsClientCreateOrUpdateResponse` +- New anonymous field `CloudServiceRoleListResult` in struct `CloudServiceRolesClientListResponse` +- New field `ResumeToken` in struct `DiskEncryptionSetsClientBeginDeleteOptions` +- New anonymous field `DiskEncryptionSetList` in struct `DiskEncryptionSetsClientListResponse` +- New field `ResumeToken` in struct `VirtualMachinesClientBeginCreateOrUpdateOptions` +- New field `ResumeToken` in struct `VirtualMachinesClientBeginDeallocateOptions` +- New anonymous field `CommunityGalleryImage` in struct `CommunityGalleryImagesClientGetResponse` +- New anonymous field `ImageListResult` in struct `ImagesClientListByResourceGroupResponse` +- New field `VirtualMachineImageResourceArray` in struct `VirtualMachineImagesClientListPublishersResponse` +- New anonymous field `GalleryImageVersion` in struct `GalleryImageVersionsClientGetResponse` +- New anonymous field `VirtualMachineAssessPatchesResult` in struct `VirtualMachinesClientAssessPatchesResponse` +- New anonymous field `VirtualMachineScaleSetExtension` in struct `VirtualMachineScaleSetExtensionsClientGetResponse` +- New field `ResumeToken` in struct `VirtualMachinesClientBeginReimageOptions` +- New field `ResumeToken` in struct `DiskEncryptionSetsClientBeginUpdateOptions` +- New anonymous field `DiskRestorePoint` in struct `DiskRestorePointClientGetResponse` +- New field `ResumeToken` in struct `VirtualMachineScaleSetVMsClientBeginRunCommandOptions` +- New anonymous field `VirtualMachineSizeListResult` in struct `VirtualMachinesClientListAvailableSizesResponse` +- New anonymous field `OSVersion` in struct `CloudServiceOperatingSystemsClientGetOSVersionResponse` +- New anonymous field `PrivateLinkResourceListResult` in struct `DiskAccessesClientGetPrivateLinkResourcesResponse` +- New anonymous field `GalleryApplication` in struct `GalleryApplicationsClientUpdateResponse` +- New anonymous field `CapacityReservationGroupListResult` in struct `CapacityReservationGroupsClientListBySubscriptionResponse` +- New field `ResumeToken` in struct `GalleryApplicationVersionsClientBeginUpdateOptions` +- New field `ResumeToken` in struct `VirtualMachineScaleSetVMExtensionsClientBeginCreateOrUpdateOptions` +- New anonymous field `CloudService` in struct `CloudServicesClientUpdateResponse` +- New anonymous field `GalleryImageVersionList` in struct `GalleryImageVersionsClientListByGalleryImageResponse` +- New field `ResumeToken` in struct `SnapshotsClientBeginCreateOrUpdateOptions` +- New anonymous field `CloudService` in struct `CloudServicesClientCreateOrUpdateResponse` +- New anonymous field `CapacityReservation` in struct `CapacityReservationsClientUpdateResponse` +- New anonymous field `RestorePointCollection` in struct `RestorePointCollectionsClientGetResponse` +- New anonymous field `DiskAccessList` in struct `DiskAccessesClientListByResourceGroupResponse` +- New field `ResumeToken` in struct `GalleryImageVersionsClientBeginUpdateOptions` +- New anonymous field `SharedGallery` in struct `SharedGalleriesClientGetResponse` +- New field `ResumeToken` in struct `VirtualMachineScaleSetVMsClientBeginRestartOptions` +- New field `ResumeToken` in struct `SnapshotsClientBeginUpdateOptions` +- New anonymous field `RoleInstanceListResult` in struct `CloudServiceRoleInstancesClientListResponse` +- New anonymous field `CommunityGalleryImageVersion` in struct `CommunityGalleryImageVersionsClientGetResponse` +- New anonymous field `VirtualMachineScaleSetVMExtension` in struct `VirtualMachineScaleSetVMExtensionsClientGetResponse` +- New anonymous field `DiskEncryptionSet` in struct `DiskEncryptionSetsClientGetResponse` +- New anonymous field `VirtualMachineScaleSetVM` in struct `VirtualMachineScaleSetVMsClientGetResponse` +- New field `ResumeToken` in struct `CloudServicesClientBeginReimageOptions` +- New field `ResumeToken` in struct `GalleryApplicationsClientBeginCreateOrUpdateOptions` +- New anonymous field `GalleryApplicationList` in struct `GalleryApplicationsClientListByGalleryResponse` +- New field `ResumeToken` in struct `CloudServiceRoleInstancesClientBeginReimageOptions` +- New field `ResumeToken` in struct `CloudServicesClientBeginPowerOffOptions` +- New field `ResumeToken` in struct `VirtualMachineRunCommandsClientBeginCreateOrUpdateOptions` +- New anonymous field `DedicatedHost` in struct `DedicatedHostsClientCreateOrUpdateResponse` +- New anonymous field `GalleryList` in struct `GalleriesClientListByResourceGroupResponse` +- New anonymous field `RunCommandResult` in struct `VirtualMachineScaleSetVMsClientRunCommandResponse` +- New anonymous field `VirtualMachineRunCommandsListResult` in struct `VirtualMachineRunCommandsClientListByVirtualMachineResponse` +- New anonymous field `UpdateDomainListResult` in struct `CloudServicesUpdateDomainClientListUpdateDomainsResponse` +- New anonymous field `VirtualMachineScaleSet` in struct `VirtualMachineScaleSetsClientUpdateResponse` +- New anonymous field `SSHPublicKeyResource` in struct `SSHPublicKeysClientUpdateResponse` +- New field `ResumeToken` in struct `VirtualMachineScaleSetVMRunCommandsClientBeginDeleteOptions` +- New field `ResumeToken` in struct `VirtualMachineScaleSetsClientBeginReimageOptions` +- New anonymous field `RunCommandDocument` in struct `VirtualMachineRunCommandsClientGetResponse` +- New anonymous field `VirtualMachineInstallPatchesResult` in struct `VirtualMachinesClientInstallPatchesResponse` +- New anonymous field `SharedGalleryList` in struct `SharedGalleriesClientListResponse` +- New anonymous field `VirtualMachineScaleSetListResult` in struct `VirtualMachineScaleSetsClientListByLocationResponse` +- New field `ResumeToken` in struct `DiskRestorePointClientBeginGrantAccessOptions` +- New anonymous field `DedicatedHostGroup` in struct `DedicatedHostGroupsClientUpdateResponse` +- New anonymous field `ProximityPlacementGroup` in struct `ProximityPlacementGroupsClientGetResponse` +- New field `ResumeToken` in struct `GalleryImagesClientBeginUpdateOptions` +- New field `ResumeToken` in struct `DedicatedHostsClientBeginCreateOrUpdateOptions` +- New anonymous field `GalleryApplicationVersionList` in struct `GalleryApplicationVersionsClientListByGalleryApplicationResponse` +- New anonymous field `GalleryApplication` in struct `GalleryApplicationsClientCreateOrUpdateResponse` +- New field `ResumeToken` in struct `VirtualMachineScaleSetVMsClientBeginPerformMaintenanceOptions` +- New anonymous field `PrivateEndpointConnectionListResult` in struct `DiskAccessesClientListPrivateEndpointConnectionsResponse` +- New field `ResumeToken` in struct `CloudServicesClientBeginDeleteInstancesOptions` +- New field `ResumeToken` in struct `VirtualMachinesClientBeginAssessPatchesOptions` +- New field `ResumeToken` in struct `DiskAccessesClientBeginDeleteOptions` +- New anonymous field `VirtualMachineExtension` in struct `VirtualMachineExtensionsClientCreateOrUpdateResponse` +- New anonymous field `DedicatedHost` in struct `DedicatedHostsClientUpdateResponse` +- New field `ResumeToken` in struct `VirtualMachineScaleSetVMExtensionsClientBeginDeleteOptions` +- New anonymous field `VirtualMachineScaleSetVMExtensionsListResult` in struct `VirtualMachineScaleSetVMExtensionsClientListResponse` +- New field `ResumeToken` in struct `VirtualMachineScaleSetsClientBeginCreateOrUpdateOptions` +- New field `ResumeToken` in struct `GalleriesClientBeginDeleteOptions` +- New field `ResumeToken` in struct `ImagesClientBeginUpdateOptions` +- New field `ResumeToken` in struct `DisksClientBeginRevokeAccessOptions` +- New field `ResumeToken` in struct `VirtualMachineScaleSetsClientBeginStartOptions` +- New anonymous field `SSHPublicKeysGroupListResult` in struct `SSHPublicKeysClientListByResourceGroupResponse` +- New field `ResumeToken` in struct `SnapshotsClientBeginRevokeAccessOptions` +- New field `ResumeToken` in struct `ImagesClientBeginDeleteOptions` +- New anonymous field `VirtualMachineScaleSetExtension` in struct `VirtualMachineScaleSetExtensionsClientUpdateResponse` +- New anonymous field `Disk` in struct `DisksClientGetResponse` +- New field `ResumeToken` in struct `GalleryImageVersionsClientBeginCreateOrUpdateOptions` +- New anonymous field `VirtualMachineSizeListResult` in struct `AvailabilitySetsClientListAvailableSizesResponse` +- New field `ResumeToken` in struct `VirtualMachinesClientBeginDeleteOptions` +- New anonymous field `SnapshotList` in struct `SnapshotsClientListResponse` +- New field `ResumeToken` in struct `VirtualMachineExtensionsClientBeginDeleteOptions` +- New field `ResumeToken` in struct `VirtualMachineExtensionsClientBeginUpdateOptions` +- New field `ResumeToken` in struct `GalleryApplicationsClientBeginUpdateOptions` +- New anonymous field `SharedGalleryImageList` in struct `SharedGalleryImagesClientListResponse` +- New field `VirtualMachineImageResourceArray` in struct `VirtualMachineImagesClientListResponse` +- New field `ResumeToken` in struct `CloudServiceRoleInstancesClientBeginRebuildOptions` +- New anonymous field `CapacityReservationGroup` in struct `CapacityReservationGroupsClientUpdateResponse` +- New field `ResumeToken` in struct `VirtualMachineScaleSetExtensionsClientBeginCreateOrUpdateOptions` +- New field `ResumeToken` in struct `DisksClientBeginUpdateOptions` +- New anonymous field `ProximityPlacementGroupListResult` in struct `ProximityPlacementGroupsClientListByResourceGroupResponse` +- New anonymous field `VirtualMachine` in struct `VirtualMachinesClientCreateOrUpdateResponse` +- New anonymous field `Gallery` in struct `GalleriesClientGetResponse` +- New anonymous field `GalleryImage` in struct `GalleryImagesClientCreateOrUpdateResponse` +- New field `VirtualMachineImageResourceArray` in struct `VirtualMachineImagesEdgeZoneClientListOffersResponse` +- New anonymous field `VirtualMachineRunCommand` in struct `VirtualMachineRunCommandsClientGetByVirtualMachineResponse` +- New anonymous field `VirtualMachineRunCommandsListResult` in struct `VirtualMachineScaleSetVMRunCommandsClientListResponse` +- New anonymous field `CapacityReservationGroup` in struct `CapacityReservationGroupsClientGetResponse` +- New field `ResumeToken` in struct `VirtualMachineScaleSetsClientBeginDeallocateOptions` +- New anonymous field `CloudServiceListResult` in struct `CloudServicesClientListResponse` +- New field `ResumeToken` in struct `VirtualMachinesClientBeginPowerOffOptions` +- New anonymous field `CapacityReservationListResult` in struct `CapacityReservationsClientListByCapacityReservationGroupResponse` +- New anonymous field `SharedGalleryImage` in struct `SharedGalleryImagesClientGetResponse` +- New anonymous field `VirtualMachineRunCommand` in struct `VirtualMachineRunCommandsClientCreateOrUpdateResponse` +- New anonymous field `RestorePointCollectionListResult` in struct `RestorePointCollectionsClientListResponse` +- New anonymous field `RoleInstance` in struct `CloudServiceRoleInstancesClientGetResponse` +- New anonymous field `VirtualMachineScaleSetExtensionListResult` in struct `VirtualMachineScaleSetExtensionsClientListResponse` +- New anonymous field `AvailabilitySet` in struct `AvailabilitySetsClientUpdateResponse` +- New field `ResumeToken` in struct `VirtualMachineScaleSetRollingUpgradesClientBeginCancelOptions` +- New field `ResumeToken` in struct `LogAnalyticsClientBeginExportThrottledRequestsOptions` +- New field `ResumeToken` in struct `DedicatedHostsClientBeginRestartOptions` +- New field `ResumeToken` in struct `ImagesClientBeginCreateOrUpdateOptions` +- New field `ResumeToken` in struct `VirtualMachineScaleSetVMsClientBeginDeallocateOptions` +- New field `VirtualMachineImageResourceArray` in struct `VirtualMachineImagesEdgeZoneClientListResponse` +- New anonymous field `GalleryImageList` in struct `GalleryImagesClientListByGalleryResponse` +- New anonymous field `AccessURI` in struct `DiskRestorePointClientGrantAccessResponse` +- New anonymous field `RunCommandResult` in struct `VirtualMachinesClientRunCommandResponse` +- New field `VirtualMachineImageResourceArray` in struct `VirtualMachineImagesClientListOffersResponse` +- New field `ResumeToken` in struct `DiskAccessesClientBeginCreateOrUpdateOptions` +- New anonymous field `CloudServiceListResult` in struct `CloudServicesClientListAllResponse` +- New anonymous field `RestorePoint` in struct `RestorePointsClientCreateResponse` +- New field `ResumeToken` in struct `VirtualMachinesClientBeginRunCommandOptions` +- New anonymous field `RunCommandListResult` in struct `VirtualMachineRunCommandsClientListResponse` +- New anonymous field `GalleryImageVersion` in struct `GalleryImageVersionsClientUpdateResponse` +- New anonymous field `Image` in struct `ImagesClientCreateOrUpdateResponse` +- New field `ResumeToken` in struct `SnapshotsClientBeginDeleteOptions` +- New anonymous field `LogAnalyticsOperationResult` in struct `LogAnalyticsClientExportRequestRateByIntervalResponse` +- New anonymous field `VirtualMachineInstanceView` in struct `VirtualMachinesClientInstanceViewResponse` +- New anonymous field `CloudServiceRole` in struct `CloudServiceRolesClientGetResponse` +- New field `ResumeToken` in struct `VirtualMachineScaleSetsClientBeginDeleteInstancesOptions` +- New field `VirtualMachineImageResourceArray` in struct `VirtualMachineImagesEdgeZoneClientListPublishersResponse` +- New anonymous field `RecoveryWalkResponse` in struct `VirtualMachineScaleSetsClientForceRecoveryServiceFabricPlatformUpdateDomainWalkResponse` +- New anonymous field `DedicatedHostGroup` in struct `DedicatedHostGroupsClientCreateOrUpdateResponse` +- New field `ResumeToken` in struct `DiskEncryptionSetsClientBeginCreateOrUpdateOptions` +- New anonymous field `Snapshot` in struct `SnapshotsClientUpdateResponse` +- New anonymous field `ResourceSKUsResult` in struct `ResourceSKUsClientListResponse` +- New anonymous field `SnapshotList` in struct `SnapshotsClientListByResourceGroupResponse` +- New anonymous field `GalleryImageVersion` in struct `GalleryImageVersionsClientCreateOrUpdateResponse` +- New anonymous field `ProximityPlacementGroup` in struct `ProximityPlacementGroupsClientUpdateResponse` +- New field `ResumeToken` in struct `VirtualMachineScaleSetVMsClientBeginReimageAllOptions` +- New anonymous field `Gallery` in struct `GalleriesClientCreateOrUpdateResponse` +- New anonymous field `SharedGalleryImageVersionList` in struct `SharedGalleryImageVersionsClientListResponse` +- New field `ResumeToken` in struct `VirtualMachinesClientBeginCaptureOptions` +- New anonymous field `ProximityPlacementGroupListResult` in struct `ProximityPlacementGroupsClientListBySubscriptionResponse` +- New anonymous field `OSVersionListResult` in struct `CloudServiceOperatingSystemsClientListOSVersionsResponse` +- New anonymous field `GalleryImage` in struct `GalleryImagesClientGetResponse` +- New field `ResumeToken` in struct `DiskAccessesClientBeginUpdateOptions` +- New field `ResumeToken` in struct `DiskAccessesClientBeginDeleteAPrivateEndpointConnectionOptions` +- New field `ResumeToken` in struct `CapacityReservationsClientBeginUpdateOptions` +- New anonymous field `AvailabilitySetListResult` in struct `AvailabilitySetsClientListResponse` +- New field `ResumeToken` in struct `DedicatedHostsClientBeginDeleteOptions` +- New field `ResumeToken` in struct `VirtualMachineScaleSetVMRunCommandsClientBeginUpdateOptions` +- New anonymous field `VirtualMachineScaleSetListOSUpgradeHistory` in struct `VirtualMachineScaleSetsClientGetOSUpgradeHistoryResponse` +- New anonymous field `VirtualMachineScaleSetVM` in struct `VirtualMachineScaleSetVMsClientUpdateResponse` +- New field `ResumeToken` in struct `VirtualMachineScaleSetsClientBeginUpdateInstancesOptions` +- New anonymous field `GalleryApplicationVersion` in struct `GalleryApplicationVersionsClientGetResponse` +- New field `ResumeToken` in struct `VirtualMachineScaleSetVMsClientBeginUpdateOptions` +- New anonymous field `RetrieveBootDiagnosticsDataResult` in struct `VirtualMachineScaleSetVMsClientRetrieveBootDiagnosticsDataResponse` +- New field `ResumeToken` in struct `CloudServicesClientBeginUpdateOptions` +- New anonymous field `DiskList` in struct `DisksClientListByResourceGroupResponse` +- New anonymous field `GalleryApplication` in struct `GalleryApplicationsClientGetResponse` +- New field `ResumeToken` in struct `VirtualMachineScaleSetsClientBeginUpdateOptions` +- New anonymous field `Snapshot` in struct `SnapshotsClientGetResponse` +- New anonymous field `VirtualMachineListResult` in struct `VirtualMachinesClientListByLocationResponse` +- New field `ResumeToken` in struct `VirtualMachinesClientBeginReapplyOptions` +- New field `ResumeToken` in struct `VirtualMachineScaleSetVMsClientBeginDeleteOptions` +- New field `ResumeToken` in struct `VirtualMachineScaleSetsClientBeginSetOrchestrationServiceStateOptions` +- New anonymous field `RoleInstanceView` in struct `CloudServiceRoleInstancesClientGetInstanceViewResponse` +- New field `ResumeToken` in struct `RestorePointCollectionsClientBeginDeleteOptions` +- New field `ResumeToken` in struct `CapacityReservationsClientBeginCreateOrUpdateOptions` +- New anonymous field `DiskEncryptionSet` in struct `DiskEncryptionSetsClientCreateOrUpdateResponse` +- New anonymous field `SSHPublicKeyResource` in struct `SSHPublicKeysClientCreateResponse` +- New field `ResumeToken` in struct `VirtualMachineScaleSetVMsClientBeginPowerOffOptions` +- New anonymous field `DiskEncryptionSet` in struct `DiskEncryptionSetsClientUpdateResponse` +- New anonymous field `DedicatedHostGroup` in struct `DedicatedHostGroupsClientGetResponse` +- New field `ResumeToken` in struct `VirtualMachineScaleSetVMsClientBeginRedeployOptions` +- New field `ResumeToken` in struct `VirtualMachinesClientBeginRedeployOptions` +- New anonymous field `VirtualMachineScaleSetListWithLinkResult` in struct `VirtualMachineScaleSetsClientListAllResponse` +- New field `ResumeToken` in struct `GalleryImagesClientBeginCreateOrUpdateOptions` +- New anonymous field `VirtualMachineImage` in struct `VirtualMachineImagesEdgeZoneClientGetResponse` +- New field `ResumeToken` in struct `VirtualMachinesClientBeginInstallPatchesOptions` +- New field `ResumeToken` in struct `VirtualMachineScaleSetExtensionsClientBeginDeleteOptions` +- New field `ResumeToken` in struct `VirtualMachineScaleSetVMExtensionsClientBeginUpdateOptions` +- New field `ResumeToken` in struct `VirtualMachinesClientBeginRestartOptions` +- New anonymous field `DedicatedHostGroupListResult` in struct `DedicatedHostGroupsClientListBySubscriptionResponse` +- New field `ResumeToken` in struct `VirtualMachineScaleSetVMsClientBeginStartOptions` +- New anonymous field `VirtualMachineRunCommand` in struct `VirtualMachineScaleSetVMRunCommandsClientCreateOrUpdateResponse` +- New field `ResumeToken` in struct `GallerySharingProfileClientBeginUpdateOptions` +- New anonymous field `VirtualMachineRunCommand` in struct `VirtualMachineRunCommandsClientUpdateResponse` +- New field `ResumeToken` in struct `GalleryImageVersionsClientBeginDeleteOptions` +- New field `ResumeToken` in struct `VirtualMachineScaleSetExtensionsClientBeginUpdateOptions` +- New anonymous field `RestorePointCollection` in struct `RestorePointCollectionsClientUpdateResponse` +- New anonymous field `VirtualMachineExtension` in struct `VirtualMachineExtensionsClientGetResponse` +- New anonymous field `DiskAccess` in struct `DiskAccessesClientUpdateResponse` +- New field `ResumeToken` in struct `VirtualMachineScaleSetsClientBeginDeleteOptions` +- New field `ResumeToken` in struct `CloudServicesClientBeginCreateOrUpdateOptions` +- New anonymous field `Image` in struct `ImagesClientGetResponse` +- New anonymous field `GalleryApplicationVersion` in struct `GalleryApplicationVersionsClientUpdateResponse` +- New anonymous field `PrivateEndpointConnection` in struct `DiskAccessesClientGetAPrivateEndpointConnectionResponse` +- New anonymous field `VirtualMachineScaleSetInstanceView` in struct `VirtualMachineScaleSetsClientGetInstanceViewResponse` +- New field `ResumeToken` in struct `VirtualMachineScaleSetsClientBeginPowerOffOptions` +- New field `ResumeToken` in struct `DisksClientBeginDeleteOptions` +- New anonymous field `VirtualMachine` in struct `VirtualMachinesClientGetResponse` +- New anonymous field `DiskAccess` in struct `DiskAccessesClientCreateOrUpdateResponse` +- New anonymous field `OSFamilyListResult` in struct `CloudServiceOperatingSystemsClientListOSFamiliesResponse` +- New field `ResumeToken` in struct `VirtualMachineScaleSetRollingUpgradesClientBeginStartOSUpgradeOptions` +- New field `ResumeToken` in struct `VirtualMachineExtensionsClientBeginCreateOrUpdateOptions` +- New anonymous field `CommunityGallery` in struct `CommunityGalleriesClientGetResponse` +- New anonymous field `PrivateEndpointConnection` in struct `DiskAccessesClientUpdateAPrivateEndpointConnectionResponse` +- New anonymous field `AvailabilitySet` in struct `AvailabilitySetsClientGetResponse` +- New anonymous field `VirtualMachineScaleSetVMExtension` in struct `VirtualMachineScaleSetVMExtensionsClientUpdateResponse` +- New field `ResumeToken` in struct `VirtualMachineRunCommandsClientBeginDeleteOptions` +- New anonymous field `SSHPublicKeyResource` in struct `SSHPublicKeysClientGetResponse` +- New anonymous field `CloudService` in struct `CloudServicesClientGetResponse` + + +## 0.5.0 (2022-03-07) +### Features Added + +- New const `DataAccessAuthModeAzureActiveDirectory` +- New const `DataAccessAuthModeNone` +- New function `PossibleDataAccessAuthModeValues() []DataAccessAuthMode` +- New function `DataAccessAuthMode.ToPtr() *DataAccessAuthMode` +- New field `DataAccessAuthMode` in struct `SnapshotProperties` +- New field `DataAccessAuthMode` in struct `DiskProperties` +- New field `Architecture` in struct `SupportedCapabilities` +- New field `DataAccessAuthMode` in struct `SnapshotUpdateProperties` +- New field `DataAccessAuthMode` in struct `DiskUpdateProperties` + + +## 0.4.0 (2022-03-02) +### Breaking Changes + +- Type of `VirtualMachineExtensionProperties.ProtectedSettings` has been changed from `map[string]interface{}` to `interface{}` +- Type of `VirtualMachineExtensionProperties.Settings` has been changed from `map[string]interface{}` to `interface{}` +- Type of `VirtualMachineScaleSetExtensionProperties.ProtectedSettings` has been changed from `map[string]interface{}` to `interface{}` +- Type of `VirtualMachineScaleSetExtensionProperties.Settings` has been changed from `map[string]interface{}` to `interface{}` +- Type of `VirtualMachineCaptureResult.Parameters` has been changed from `map[string]interface{}` to `interface{}` +- Type of `VirtualMachineCaptureResult.Resources` has been changed from `[]map[string]interface{}` to `[]interface{}` +- Type of `VirtualMachineExtensionUpdateProperties.ProtectedSettings` has been changed from `map[string]interface{}` to `interface{}` +- Type of `VirtualMachineExtensionUpdateProperties.Settings` has been changed from `map[string]interface{}` to `interface{}` +- Struct `CloudError` has been removed +- Struct `GalleryArtifactSource` has been removed +- Struct `ManagedArtifact` has been removed + +### Features Added + +- New const `RepairActionReplace` +- New const `RestorePointExpandOptionsInstanceView` +- New const `GalleryExtendedLocationTypeEdgeZone` +- New const `ConfidentialVMEncryptionTypeEncryptedVMGuestStateOnlyWithPmk` +- New const `SharingProfileGroupTypesCommunity` +- New const `ArchitectureTypesArm64` +- New const `RepairActionReimage` +- New const `ArchitectureX64` +- New const `SecurityEncryptionTypesDiskWithVMGuestState` +- New const `SecurityTypesConfidentialVM` +- New const `SharingStateSucceeded` +- New const `RepairActionRestart` +- New const `SecurityEncryptionTypesVMGuestStateOnly` +- New const `SharingUpdateOperationTypesEnableCommunity` +- New const `SharingStateUnknown` +- New const `SharingStateInProgress` +- New const `ConfidentialVMEncryptionTypeEncryptedWithPmk` +- New const `SharingStateFailed` +- New const `ConfidentialVMEncryptionTypeEncryptedWithCmk` +- New const `ArchitectureTypesX64` +- New const `GalleryExtendedLocationTypeUnknown` +- New const `GalleryExpandParamsSharingProfileGroups` +- New const `ArchitectureArm64` +- New function `RestorePointExpandOptions.ToPtr() *RestorePointExpandOptions` +- New function `PossibleGalleryExtendedLocationTypeValues() []GalleryExtendedLocationType` +- New function `PossibleGalleryExpandParamsValues() []GalleryExpandParams` +- New function `PossibleSharingStateValues() []SharingState` +- New function `PossibleSecurityEncryptionTypesValues() []SecurityEncryptionTypes` +- New function `PossibleArchitectureTypesValues() []ArchitectureTypes` +- New function `*DedicatedHostsClientRestartPollerResponse.Resume(context.Context, *DedicatedHostsClient, string) error` +- New function `DedicatedHostsClientRestartPollerResponse.PollUntilDone(context.Context, time.Duration) (DedicatedHostsClientRestartResponse, error)` +- New function `ConfidentialVMEncryptionType.ToPtr() *ConfidentialVMEncryptionType` +- New function `SharingState.ToPtr() *SharingState` +- New function `PossibleConfidentialVMEncryptionTypeValues() []ConfidentialVMEncryptionType` +- New function `*DedicatedHostsClient.BeginRestart(context.Context, string, string, string, *DedicatedHostsClientBeginRestartOptions) (DedicatedHostsClientRestartPollerResponse, error)` +- New function `RestorePointInstanceView.MarshalJSON() ([]byte, error)` +- New function `*DedicatedHostsClientRestartPoller.ResumeToken() (string, error)` +- New function `*VirtualMachineProperties.UnmarshalJSON([]byte) error` +- New function `SharingStatus.MarshalJSON() ([]byte, error)` +- New function `SecurityEncryptionTypes.ToPtr() *SecurityEncryptionTypes` +- New function `PossibleArchitectureValues() []Architecture` +- New function `PossibleRestorePointExpandOptionsValues() []RestorePointExpandOptions` +- New function `GalleryExtendedLocationType.ToPtr() *GalleryExtendedLocationType` +- New function `ArchitectureTypes.ToPtr() *ArchitectureTypes` +- New function `GalleryExpandParams.ToPtr() *GalleryExpandParams` +- New function `*DedicatedHostsClientRestartPoller.Done() bool` +- New function `RepairAction.ToPtr() *RepairAction` +- New function `PossibleRepairActionValues() []RepairAction` +- New function `VirtualMachineScaleSetProperties.MarshalJSON() ([]byte, error)` +- New function `*VirtualMachineScaleSetProperties.UnmarshalJSON([]byte) error` +- New function `*DedicatedHostsClientRestartPoller.FinalResponse(context.Context) (DedicatedHostsClientRestartResponse, error)` +- New function `*DedicatedHostsClientRestartPoller.Poll(context.Context) (*http.Response, error)` +- New function `Architecture.ToPtr() *Architecture` +- New function `VirtualMachineProperties.MarshalJSON() ([]byte, error)` +- New struct `DedicatedHostsClientBeginRestartOptions` +- New struct `DedicatedHostsClientRestartPoller` +- New struct `DedicatedHostsClientRestartPollerResponse` +- New struct `DedicatedHostsClientRestartResponse` +- New struct `DiskRestorePointInstanceView` +- New struct `GalleryExtendedLocation` +- New struct `GalleryTargetExtendedLocation` +- New struct `OSDiskImageSecurityProfile` +- New struct `RegionalSharingStatus` +- New struct `RestorePointInstanceView` +- New struct `SharingStatus` +- New struct `VMDiskSecurityProfile` +- New struct `VirtualMachineScaleSetHardwareProfile` +- New field `SourceRestorePoint` in struct `RestorePointProperties` +- New field `InstanceView` in struct `RestorePointProperties` +- New field `SecurityProfile` in struct `VirtualMachineScaleSetManagedDiskParameters` +- New field `TimeCreated` in struct `VirtualMachineProperties` +- New field `ProtectedSettingsFromKeyVault` in struct `VirtualMachineExtensionUpdateProperties` +- New field `SecurityProfile` in struct `ManagedDiskParameters` +- New field `TimeCreated` in struct `CapacityReservationProperties` +- New field `Expand` in struct `RestorePointsClientGetOptions` +- New field `PlacementGroupID` in struct `VirtualMachineScaleSetsClientForceRecoveryServiceFabricPlatformUpdateDomainWalkOptions` +- New field `Zone` in struct `VirtualMachineScaleSetsClientForceRecoveryServiceFabricPlatformUpdateDomainWalkOptions` +- New field `TimeCreated` in struct `VirtualMachineScaleSetProperties` +- New field `CommunityGalleryImageID` in struct `ImageReference` +- New field `AllowExtensionOperations` in struct `VirtualMachineScaleSetOSProfile` +- New field `ProtectedSettingsFromKeyVault` in struct `VirtualMachineScaleSetExtensionProperties` +- New field `PublicIPPrefix` in struct `VirtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties` +- New field `Filter` in struct `VirtualMachinesClientListOptions` +- New field `TargetExtendedLocations` in struct `GalleryImageVersionPublishingProfile` +- New field `Architecture` in struct `VirtualMachineImageProperties` +- New field `HardwareProfile` in struct `VirtualMachineScaleSetVMProfile` +- New field `Architecture` in struct `GalleryImageProperties` +- New field `SecurityProfile` in struct `OSDiskImageEncryption` +- New field `TimeCreated` in struct `DedicatedHostProperties` +- New field `TargetExtendedLocations` in struct `GalleryArtifactPublishingProfileBase` +- New field `TargetExtendedLocations` in struct `GalleryApplicationVersionPublishingProfile` +- New field `Expand` in struct `GalleriesClientGetOptions` +- New field `Filter` in struct `VirtualMachinesClientListAllOptions` +- New field `RepairAction` in struct `AutomaticRepairsPolicy` +- New field `SharingStatus` in struct `GalleryProperties` +- New field `CommunityGalleryInfo` in struct `SharingProfile` +- New field `ProtectedSettingsFromKeyVault` in struct `VirtualMachineExtensionProperties` + + +## 0.3.1 (2022-02-22) + +### Other Changes + +- Remove the go_mod_tidy_hack.go file. + +## 0.3.0 (2022-01-13) +### Breaking Changes + +- Function `*VirtualMachineScaleSetsClient.ListAll` parameter(s) have been changed from `(*VirtualMachineScaleSetsListAllOptions)` to `(*VirtualMachineScaleSetsClientListAllOptions)` +- Function `*VirtualMachineScaleSetsClient.ListAll` return value(s) have been changed from `(*VirtualMachineScaleSetsListAllPager)` to `(*VirtualMachineScaleSetsClientListAllPager)` +- Function `*VirtualMachinesClient.Generalize` parameter(s) have been changed from `(context.Context, string, string, *VirtualMachinesGeneralizeOptions)` to `(context.Context, string, string, *VirtualMachinesClientGeneralizeOptions)` +- Function `*VirtualMachinesClient.Generalize` return value(s) have been changed from `(VirtualMachinesGeneralizeResponse, error)` to `(VirtualMachinesClientGeneralizeResponse, error)` +- Function `*DedicatedHostsClient.Get` parameter(s) have been changed from `(context.Context, string, string, string, *DedicatedHostsGetOptions)` to `(context.Context, string, string, string, *DedicatedHostsClientGetOptions)` +- Function `*DedicatedHostsClient.Get` return value(s) have been changed from `(DedicatedHostsGetResponse, error)` to `(DedicatedHostsClientGetResponse, error)` +- Function `*ProximityPlacementGroupsClient.Get` parameter(s) have been changed from `(context.Context, string, string, *ProximityPlacementGroupsGetOptions)` to `(context.Context, string, string, *ProximityPlacementGroupsClientGetOptions)` +- Function `*ProximityPlacementGroupsClient.Get` return value(s) have been changed from `(ProximityPlacementGroupsGetResponse, error)` to `(ProximityPlacementGroupsClientGetResponse, error)` +- Function `*VirtualMachinesClient.ListByLocation` parameter(s) have been changed from `(string, *VirtualMachinesListByLocationOptions)` to `(string, *VirtualMachinesClientListByLocationOptions)` +- Function `*VirtualMachinesClient.ListByLocation` return value(s) have been changed from `(*VirtualMachinesListByLocationPager)` to `(*VirtualMachinesClientListByLocationPager)` +- Function `*DiskEncryptionSetsClient.BeginDelete` parameter(s) have been changed from `(context.Context, string, string, *DiskEncryptionSetsBeginDeleteOptions)` to `(context.Context, string, string, *DiskEncryptionSetsClientBeginDeleteOptions)` +- Function `*DiskEncryptionSetsClient.BeginDelete` return value(s) have been changed from `(DiskEncryptionSetsDeletePollerResponse, error)` to `(DiskEncryptionSetsClientDeletePollerResponse, error)` +- Function `*AvailabilitySetsClient.List` parameter(s) have been changed from `(string, *AvailabilitySetsListOptions)` to `(string, *AvailabilitySetsClientListOptions)` +- Function `*AvailabilitySetsClient.List` return value(s) have been changed from `(*AvailabilitySetsListPager)` to `(*AvailabilitySetsClientListPager)` +- Function `*VirtualMachinesClient.SimulateEviction` parameter(s) have been changed from `(context.Context, string, string, *VirtualMachinesSimulateEvictionOptions)` to `(context.Context, string, string, *VirtualMachinesClientSimulateEvictionOptions)` +- Function `*VirtualMachinesClient.SimulateEviction` return value(s) have been changed from `(VirtualMachinesSimulateEvictionResponse, error)` to `(VirtualMachinesClientSimulateEvictionResponse, error)` +- Function `*VirtualMachineImagesClient.ListOffers` parameter(s) have been changed from `(context.Context, string, string, *VirtualMachineImagesListOffersOptions)` to `(context.Context, string, string, *VirtualMachineImagesClientListOffersOptions)` +- Function `*VirtualMachineImagesClient.ListOffers` return value(s) have been changed from `(VirtualMachineImagesListOffersResponse, error)` to `(VirtualMachineImagesClientListOffersResponse, error)` +- Function `*CloudServiceOperatingSystemsClient.GetOSVersion` parameter(s) have been changed from `(context.Context, string, string, *CloudServiceOperatingSystemsGetOSVersionOptions)` to `(context.Context, string, string, *CloudServiceOperatingSystemsClientGetOSVersionOptions)` +- Function `*CloudServiceOperatingSystemsClient.GetOSVersion` return value(s) have been changed from `(CloudServiceOperatingSystemsGetOSVersionResponse, error)` to `(CloudServiceOperatingSystemsClientGetOSVersionResponse, error)` +- Function `*VirtualMachinesClient.Get` parameter(s) have been changed from `(context.Context, string, string, *VirtualMachinesGetOptions)` to `(context.Context, string, string, *VirtualMachinesClientGetOptions)` +- Function `*VirtualMachinesClient.Get` return value(s) have been changed from `(VirtualMachinesGetResponse, error)` to `(VirtualMachinesClientGetResponse, error)` +- Function `*VirtualMachinesClient.BeginRestart` parameter(s) have been changed from `(context.Context, string, string, *VirtualMachinesBeginRestartOptions)` to `(context.Context, string, string, *VirtualMachinesClientBeginRestartOptions)` +- Function `*VirtualMachinesClient.BeginRestart` return value(s) have been changed from `(VirtualMachinesRestartPollerResponse, error)` to `(VirtualMachinesClientRestartPollerResponse, error)` +- Function `*DedicatedHostsClient.BeginCreateOrUpdate` parameter(s) have been changed from `(context.Context, string, string, string, DedicatedHost, *DedicatedHostsBeginCreateOrUpdateOptions)` to `(context.Context, string, string, string, DedicatedHost, *DedicatedHostsClientBeginCreateOrUpdateOptions)` +- Function `*DedicatedHostsClient.BeginCreateOrUpdate` return value(s) have been changed from `(DedicatedHostsCreateOrUpdatePollerResponse, error)` to `(DedicatedHostsClientCreateOrUpdatePollerResponse, error)` +- Function `*SharedGalleriesClient.List` parameter(s) have been changed from `(string, *SharedGalleriesListOptions)` to `(string, *SharedGalleriesClientListOptions)` +- Function `*SharedGalleriesClient.List` return value(s) have been changed from `(*SharedGalleriesListPager)` to `(*SharedGalleriesClientListPager)` +- Function `*VirtualMachinesClient.BeginRunCommand` parameter(s) have been changed from `(context.Context, string, string, RunCommandInput, *VirtualMachinesBeginRunCommandOptions)` to `(context.Context, string, string, RunCommandInput, *VirtualMachinesClientBeginRunCommandOptions)` +- Function `*VirtualMachinesClient.BeginRunCommand` return value(s) have been changed from `(VirtualMachinesRunCommandPollerResponse, error)` to `(VirtualMachinesClientRunCommandPollerResponse, error)` +- Function `*CloudServicesClient.BeginRestart` parameter(s) have been changed from `(context.Context, string, string, *CloudServicesBeginRestartOptions)` to `(context.Context, string, string, *CloudServicesClientBeginRestartOptions)` +- Function `*CloudServicesClient.BeginRestart` return value(s) have been changed from `(CloudServicesRestartPollerResponse, error)` to `(CloudServicesClientRestartPollerResponse, error)` +- Function `*DiskRestorePointClient.ListByRestorePoint` parameter(s) have been changed from `(string, string, string, *DiskRestorePointListByRestorePointOptions)` to `(string, string, string, *DiskRestorePointClientListByRestorePointOptions)` +- Function `*DiskRestorePointClient.ListByRestorePoint` return value(s) have been changed from `(*DiskRestorePointListByRestorePointPager)` to `(*DiskRestorePointClientListByRestorePointPager)` +- Function `*VirtualMachinesClient.BeginReapply` parameter(s) have been changed from `(context.Context, string, string, *VirtualMachinesBeginReapplyOptions)` to `(context.Context, string, string, *VirtualMachinesClientBeginReapplyOptions)` +- Function `*VirtualMachinesClient.BeginReapply` return value(s) have been changed from `(VirtualMachinesReapplyPollerResponse, error)` to `(VirtualMachinesClientReapplyPollerResponse, error)` +- Function `*SnapshotsClient.BeginGrantAccess` parameter(s) have been changed from `(context.Context, string, string, GrantAccessData, *SnapshotsBeginGrantAccessOptions)` to `(context.Context, string, string, GrantAccessData, *SnapshotsClientBeginGrantAccessOptions)` +- Function `*SnapshotsClient.BeginGrantAccess` return value(s) have been changed from `(SnapshotsGrantAccessPollerResponse, error)` to `(SnapshotsClientGrantAccessPollerResponse, error)` +- Function `*VirtualMachinesClient.ListAvailableSizes` parameter(s) have been changed from `(context.Context, string, string, *VirtualMachinesListAvailableSizesOptions)` to `(context.Context, string, string, *VirtualMachinesClientListAvailableSizesOptions)` +- Function `*VirtualMachinesClient.ListAvailableSizes` return value(s) have been changed from `(VirtualMachinesListAvailableSizesResponse, error)` to `(VirtualMachinesClientListAvailableSizesResponse, error)` +- Function `*CloudServicesClient.Get` parameter(s) have been changed from `(context.Context, string, string, *CloudServicesGetOptions)` to `(context.Context, string, string, *CloudServicesClientGetOptions)` +- Function `*CloudServicesClient.Get` return value(s) have been changed from `(CloudServicesGetResponse, error)` to `(CloudServicesClientGetResponse, error)` +- Function `*GalleryImageVersionsClient.BeginUpdate` parameter(s) have been changed from `(context.Context, string, string, string, string, GalleryImageVersionUpdate, *GalleryImageVersionsBeginUpdateOptions)` to `(context.Context, string, string, string, string, GalleryImageVersionUpdate, *GalleryImageVersionsClientBeginUpdateOptions)` +- Function `*GalleryImageVersionsClient.BeginUpdate` return value(s) have been changed from `(GalleryImageVersionsUpdatePollerResponse, error)` to `(GalleryImageVersionsClientUpdatePollerResponse, error)` +- Function `*CloudServicesClient.ListAll` parameter(s) have been changed from `(*CloudServicesListAllOptions)` to `(*CloudServicesClientListAllOptions)` +- Function `*CloudServicesClient.ListAll` return value(s) have been changed from `(*CloudServicesListAllPager)` to `(*CloudServicesClientListAllPager)` +- Function `*SharedGalleryImagesClient.Get` parameter(s) have been changed from `(context.Context, string, string, string, *SharedGalleryImagesGetOptions)` to `(context.Context, string, string, string, *SharedGalleryImagesClientGetOptions)` +- Function `*SharedGalleryImagesClient.Get` return value(s) have been changed from `(SharedGalleryImagesGetResponse, error)` to `(SharedGalleryImagesClientGetResponse, error)` +- Function `*CapacityReservationGroupsClient.ListBySubscription` parameter(s) have been changed from `(*CapacityReservationGroupsListBySubscriptionOptions)` to `(*CapacityReservationGroupsClientListBySubscriptionOptions)` +- Function `*CapacityReservationGroupsClient.ListBySubscription` return value(s) have been changed from `(*CapacityReservationGroupsListBySubscriptionPager)` to `(*CapacityReservationGroupsClientListBySubscriptionPager)` +- Function `*CloudServiceOperatingSystemsClient.ListOSFamilies` parameter(s) have been changed from `(string, *CloudServiceOperatingSystemsListOSFamiliesOptions)` to `(string, *CloudServiceOperatingSystemsClientListOSFamiliesOptions)` +- Function `*CloudServiceOperatingSystemsClient.ListOSFamilies` return value(s) have been changed from `(*CloudServiceOperatingSystemsListOSFamiliesPager)` to `(*CloudServiceOperatingSystemsClientListOSFamiliesPager)` +- Function `*VirtualMachinesClient.BeginCapture` parameter(s) have been changed from `(context.Context, string, string, VirtualMachineCaptureParameters, *VirtualMachinesBeginCaptureOptions)` to `(context.Context, string, string, VirtualMachineCaptureParameters, *VirtualMachinesClientBeginCaptureOptions)` +- Function `*VirtualMachinesClient.BeginCapture` return value(s) have been changed from `(VirtualMachinesCapturePollerResponse, error)` to `(VirtualMachinesClientCapturePollerResponse, error)` +- Function `*CapacityReservationGroupsClient.Delete` parameter(s) have been changed from `(context.Context, string, string, *CapacityReservationGroupsDeleteOptions)` to `(context.Context, string, string, *CapacityReservationGroupsClientDeleteOptions)` +- Function `*CapacityReservationGroupsClient.Delete` return value(s) have been changed from `(CapacityReservationGroupsDeleteResponse, error)` to `(CapacityReservationGroupsClientDeleteResponse, error)` +- Function `*VirtualMachineScaleSetVMsClient.BeginReimageAll` parameter(s) have been changed from `(context.Context, string, string, string, *VirtualMachineScaleSetVMsBeginReimageAllOptions)` to `(context.Context, string, string, string, *VirtualMachineScaleSetVMsClientBeginReimageAllOptions)` +- Function `*VirtualMachineScaleSetVMsClient.BeginReimageAll` return value(s) have been changed from `(VirtualMachineScaleSetVMsReimageAllPollerResponse, error)` to `(VirtualMachineScaleSetVMsClientReimageAllPollerResponse, error)` +- Function `*DisksClient.ListByResourceGroup` parameter(s) have been changed from `(string, *DisksListByResourceGroupOptions)` to `(string, *DisksClientListByResourceGroupOptions)` +- Function `*DisksClient.ListByResourceGroup` return value(s) have been changed from `(*DisksListByResourceGroupPager)` to `(*DisksClientListByResourceGroupPager)` +- Function `*VirtualMachinesClient.BeginUpdate` parameter(s) have been changed from `(context.Context, string, string, VirtualMachineUpdate, *VirtualMachinesBeginUpdateOptions)` to `(context.Context, string, string, VirtualMachineUpdate, *VirtualMachinesClientBeginUpdateOptions)` +- Function `*VirtualMachinesClient.BeginUpdate` return value(s) have been changed from `(VirtualMachinesUpdatePollerResponse, error)` to `(VirtualMachinesClientUpdatePollerResponse, error)` +- Function `*VirtualMachineScaleSetVMsClient.BeginRestart` parameter(s) have been changed from `(context.Context, string, string, string, *VirtualMachineScaleSetVMsBeginRestartOptions)` to `(context.Context, string, string, string, *VirtualMachineScaleSetVMsClientBeginRestartOptions)` +- Function `*VirtualMachineScaleSetVMsClient.BeginRestart` return value(s) have been changed from `(VirtualMachineScaleSetVMsRestartPollerResponse, error)` to `(VirtualMachineScaleSetVMsClientRestartPollerResponse, error)` +- Function `*VirtualMachineImagesEdgeZoneClient.List` parameter(s) have been changed from `(context.Context, string, string, string, string, string, *VirtualMachineImagesEdgeZoneListOptions)` to `(context.Context, string, string, string, string, string, *VirtualMachineImagesEdgeZoneClientListOptions)` +- Function `*VirtualMachineImagesEdgeZoneClient.List` return value(s) have been changed from `(VirtualMachineImagesEdgeZoneListResponse, error)` to `(VirtualMachineImagesEdgeZoneClientListResponse, error)` +- Function `*DisksClient.List` parameter(s) have been changed from `(*DisksListOptions)` to `(*DisksClientListOptions)` +- Function `*DisksClient.List` return value(s) have been changed from `(*DisksListPager)` to `(*DisksClientListPager)` +- Function `*VirtualMachineScaleSetsClient.GetOSUpgradeHistory` parameter(s) have been changed from `(string, string, *VirtualMachineScaleSetsGetOSUpgradeHistoryOptions)` to `(string, string, *VirtualMachineScaleSetsClientGetOSUpgradeHistoryOptions)` +- Function `*VirtualMachineScaleSetsClient.GetOSUpgradeHistory` return value(s) have been changed from `(*VirtualMachineScaleSetsGetOSUpgradeHistoryPager)` to `(*VirtualMachineScaleSetsClientGetOSUpgradeHistoryPager)` +- Function `*GalleryApplicationVersionsClient.Get` parameter(s) have been changed from `(context.Context, string, string, string, string, *GalleryApplicationVersionsGetOptions)` to `(context.Context, string, string, string, string, *GalleryApplicationVersionsClientGetOptions)` +- Function `*GalleryApplicationVersionsClient.Get` return value(s) have been changed from `(GalleryApplicationVersionsGetResponse, error)` to `(GalleryApplicationVersionsClientGetResponse, error)` +- Function `*VirtualMachineImagesEdgeZoneClient.Get` parameter(s) have been changed from `(context.Context, string, string, string, string, string, string, *VirtualMachineImagesEdgeZoneGetOptions)` to `(context.Context, string, string, string, string, string, string, *VirtualMachineImagesEdgeZoneClientGetOptions)` +- Function `*VirtualMachineImagesEdgeZoneClient.Get` return value(s) have been changed from `(VirtualMachineImagesEdgeZoneGetResponse, error)` to `(VirtualMachineImagesEdgeZoneClientGetResponse, error)` +- Function `*VirtualMachineScaleSetsClient.Get` parameter(s) have been changed from `(context.Context, string, string, *VirtualMachineScaleSetsGetOptions)` to `(context.Context, string, string, *VirtualMachineScaleSetsClientGetOptions)` +- Function `*VirtualMachineScaleSetsClient.Get` return value(s) have been changed from `(VirtualMachineScaleSetsGetResponse, error)` to `(VirtualMachineScaleSetsClientGetResponse, error)` +- Function `*LogAnalyticsClient.BeginExportThrottledRequests` parameter(s) have been changed from `(context.Context, string, ThrottledRequestsInput, *LogAnalyticsBeginExportThrottledRequestsOptions)` to `(context.Context, string, ThrottledRequestsInput, *LogAnalyticsClientBeginExportThrottledRequestsOptions)` +- Function `*LogAnalyticsClient.BeginExportThrottledRequests` return value(s) have been changed from `(LogAnalyticsExportThrottledRequestsPollerResponse, error)` to `(LogAnalyticsClientExportThrottledRequestsPollerResponse, error)` +- Function `*VirtualMachineImagesEdgeZoneClient.ListPublishers` parameter(s) have been changed from `(context.Context, string, string, *VirtualMachineImagesEdgeZoneListPublishersOptions)` to `(context.Context, string, string, *VirtualMachineImagesEdgeZoneClientListPublishersOptions)` +- Function `*VirtualMachineImagesEdgeZoneClient.ListPublishers` return value(s) have been changed from `(VirtualMachineImagesEdgeZoneListPublishersResponse, error)` to `(VirtualMachineImagesEdgeZoneClientListPublishersResponse, error)` +- Function `*CloudServicesClient.BeginCreateOrUpdate` parameter(s) have been changed from `(context.Context, string, string, *CloudServicesBeginCreateOrUpdateOptions)` to `(context.Context, string, string, *CloudServicesClientBeginCreateOrUpdateOptions)` +- Function `*CloudServicesClient.BeginCreateOrUpdate` return value(s) have been changed from `(CloudServicesCreateOrUpdatePollerResponse, error)` to `(CloudServicesClientCreateOrUpdatePollerResponse, error)` +- Function `*CapacityReservationGroupsClient.CreateOrUpdate` parameter(s) have been changed from `(context.Context, string, string, CapacityReservationGroup, *CapacityReservationGroupsCreateOrUpdateOptions)` to `(context.Context, string, string, CapacityReservationGroup, *CapacityReservationGroupsClientCreateOrUpdateOptions)` +- Function `*CapacityReservationGroupsClient.CreateOrUpdate` return value(s) have been changed from `(CapacityReservationGroupsCreateOrUpdateResponse, error)` to `(CapacityReservationGroupsClientCreateOrUpdateResponse, error)` +- Function `*VirtualMachinesClient.List` parameter(s) have been changed from `(string, *VirtualMachinesListOptions)` to `(string, *VirtualMachinesClientListOptions)` +- Function `*VirtualMachinesClient.List` return value(s) have been changed from `(*VirtualMachinesListPager)` to `(*VirtualMachinesClientListPager)` +- Function `*DiskAccessesClient.BeginDeleteAPrivateEndpointConnection` parameter(s) have been changed from `(context.Context, string, string, string, *DiskAccessesBeginDeleteAPrivateEndpointConnectionOptions)` to `(context.Context, string, string, string, *DiskAccessesClientBeginDeleteAPrivateEndpointConnectionOptions)` +- Function `*DiskAccessesClient.BeginDeleteAPrivateEndpointConnection` return value(s) have been changed from `(DiskAccessesDeleteAPrivateEndpointConnectionPollerResponse, error)` to `(DiskAccessesClientDeleteAPrivateEndpointConnectionPollerResponse, error)` +- Function `*LogAnalyticsClient.BeginExportRequestRateByInterval` parameter(s) have been changed from `(context.Context, string, RequestRateByIntervalInput, *LogAnalyticsBeginExportRequestRateByIntervalOptions)` to `(context.Context, string, RequestRateByIntervalInput, *LogAnalyticsClientBeginExportRequestRateByIntervalOptions)` +- Function `*LogAnalyticsClient.BeginExportRequestRateByInterval` return value(s) have been changed from `(LogAnalyticsExportRequestRateByIntervalPollerResponse, error)` to `(LogAnalyticsClientExportRequestRateByIntervalPollerResponse, error)` +- Function `*AvailabilitySetsClient.Delete` parameter(s) have been changed from `(context.Context, string, string, *AvailabilitySetsDeleteOptions)` to `(context.Context, string, string, *AvailabilitySetsClientDeleteOptions)` +- Function `*AvailabilitySetsClient.Delete` return value(s) have been changed from `(AvailabilitySetsDeleteResponse, error)` to `(AvailabilitySetsClientDeleteResponse, error)` +- Function `*VirtualMachineScaleSetVMExtensionsClient.BeginDelete` parameter(s) have been changed from `(context.Context, string, string, string, string, *VirtualMachineScaleSetVMExtensionsBeginDeleteOptions)` to `(context.Context, string, string, string, string, *VirtualMachineScaleSetVMExtensionsClientBeginDeleteOptions)` +- Function `*VirtualMachineScaleSetVMExtensionsClient.BeginDelete` return value(s) have been changed from `(VirtualMachineScaleSetVMExtensionsDeletePollerResponse, error)` to `(VirtualMachineScaleSetVMExtensionsClientDeletePollerResponse, error)` +- Function `*ImagesClient.BeginCreateOrUpdate` parameter(s) have been changed from `(context.Context, string, string, Image, *ImagesBeginCreateOrUpdateOptions)` to `(context.Context, string, string, Image, *ImagesClientBeginCreateOrUpdateOptions)` +- Function `*ImagesClient.BeginCreateOrUpdate` return value(s) have been changed from `(ImagesCreateOrUpdatePollerResponse, error)` to `(ImagesClientCreateOrUpdatePollerResponse, error)` +- Function `*ProximityPlacementGroupsClient.ListByResourceGroup` parameter(s) have been changed from `(string, *ProximityPlacementGroupsListByResourceGroupOptions)` to `(string, *ProximityPlacementGroupsClientListByResourceGroupOptions)` +- Function `*ProximityPlacementGroupsClient.ListByResourceGroup` return value(s) have been changed from `(*ProximityPlacementGroupsListByResourceGroupPager)` to `(*ProximityPlacementGroupsClientListByResourceGroupPager)` +- Function `*AvailabilitySetsClient.ListAvailableSizes` parameter(s) have been changed from `(context.Context, string, string, *AvailabilitySetsListAvailableSizesOptions)` to `(context.Context, string, string, *AvailabilitySetsClientListAvailableSizesOptions)` +- Function `*AvailabilitySetsClient.ListAvailableSizes` return value(s) have been changed from `(AvailabilitySetsListAvailableSizesResponse, error)` to `(AvailabilitySetsClientListAvailableSizesResponse, error)` +- Function `*VirtualMachineScaleSetsClient.BeginPowerOff` parameter(s) have been changed from `(context.Context, string, string, *VirtualMachineScaleSetsBeginPowerOffOptions)` to `(context.Context, string, string, *VirtualMachineScaleSetsClientBeginPowerOffOptions)` +- Function `*VirtualMachineScaleSetsClient.BeginPowerOff` return value(s) have been changed from `(VirtualMachineScaleSetsPowerOffPollerResponse, error)` to `(VirtualMachineScaleSetsClientPowerOffPollerResponse, error)` +- Function `*VirtualMachineScaleSetsClient.ConvertToSinglePlacementGroup` parameter(s) have been changed from `(context.Context, string, string, VMScaleSetConvertToSinglePlacementGroupInput, *VirtualMachineScaleSetsConvertToSinglePlacementGroupOptions)` to `(context.Context, string, string, VMScaleSetConvertToSinglePlacementGroupInput, *VirtualMachineScaleSetsClientConvertToSinglePlacementGroupOptions)` +- Function `*VirtualMachineScaleSetsClient.ConvertToSinglePlacementGroup` return value(s) have been changed from `(VirtualMachineScaleSetsConvertToSinglePlacementGroupResponse, error)` to `(VirtualMachineScaleSetsClientConvertToSinglePlacementGroupResponse, error)` +- Function `*CapacityReservationsClient.Get` parameter(s) have been changed from `(context.Context, string, string, string, *CapacityReservationsGetOptions)` to `(context.Context, string, string, string, *CapacityReservationsClientGetOptions)` +- Function `*CapacityReservationsClient.Get` return value(s) have been changed from `(CapacityReservationsGetResponse, error)` to `(CapacityReservationsClientGetResponse, error)` +- Function `*SnapshotsClient.Get` parameter(s) have been changed from `(context.Context, string, string, *SnapshotsGetOptions)` to `(context.Context, string, string, *SnapshotsClientGetOptions)` +- Function `*SnapshotsClient.Get` return value(s) have been changed from `(SnapshotsGetResponse, error)` to `(SnapshotsClientGetResponse, error)` +- Function `*GalleryImagesClient.BeginCreateOrUpdate` parameter(s) have been changed from `(context.Context, string, string, string, GalleryImage, *GalleryImagesBeginCreateOrUpdateOptions)` to `(context.Context, string, string, string, GalleryImage, *GalleryImagesClientBeginCreateOrUpdateOptions)` +- Function `*GalleryImagesClient.BeginCreateOrUpdate` return value(s) have been changed from `(GalleryImagesCreateOrUpdatePollerResponse, error)` to `(GalleryImagesClientCreateOrUpdatePollerResponse, error)` +- Function `*AvailabilitySetsClient.Get` parameter(s) have been changed from `(context.Context, string, string, *AvailabilitySetsGetOptions)` to `(context.Context, string, string, *AvailabilitySetsClientGetOptions)` +- Function `*AvailabilitySetsClient.Get` return value(s) have been changed from `(AvailabilitySetsGetResponse, error)` to `(AvailabilitySetsClientGetResponse, error)` +- Function `*CloudServicesClient.BeginUpdate` parameter(s) have been changed from `(context.Context, string, string, *CloudServicesBeginUpdateOptions)` to `(context.Context, string, string, *CloudServicesClientBeginUpdateOptions)` +- Function `*CloudServicesClient.BeginUpdate` return value(s) have been changed from `(CloudServicesUpdatePollerResponse, error)` to `(CloudServicesClientUpdatePollerResponse, error)` +- Function `*VirtualMachineScaleSetRollingUpgradesClient.BeginStartExtensionUpgrade` parameter(s) have been changed from `(context.Context, string, string, *VirtualMachineScaleSetRollingUpgradesBeginStartExtensionUpgradeOptions)` to `(context.Context, string, string, *VirtualMachineScaleSetRollingUpgradesClientBeginStartExtensionUpgradeOptions)` +- Function `*VirtualMachineScaleSetRollingUpgradesClient.BeginStartExtensionUpgrade` return value(s) have been changed from `(VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradePollerResponse, error)` to `(VirtualMachineScaleSetRollingUpgradesClientStartExtensionUpgradePollerResponse, error)` +- Function `*VirtualMachineExtensionsClient.List` parameter(s) have been changed from `(context.Context, string, string, *VirtualMachineExtensionsListOptions)` to `(context.Context, string, string, *VirtualMachineExtensionsClientListOptions)` +- Function `*VirtualMachineExtensionsClient.List` return value(s) have been changed from `(VirtualMachineExtensionsListResponse, error)` to `(VirtualMachineExtensionsClientListResponse, error)` +- Function `*GalleriesClient.List` parameter(s) have been changed from `(*GalleriesListOptions)` to `(*GalleriesClientListOptions)` +- Function `*GalleriesClient.List` return value(s) have been changed from `(*GalleriesListPager)` to `(*GalleriesClientListPager)` +- Function `*VirtualMachineScaleSetVMsClient.BeginReimage` parameter(s) have been changed from `(context.Context, string, string, string, *VirtualMachineScaleSetVMsBeginReimageOptions)` to `(context.Context, string, string, string, *VirtualMachineScaleSetVMsClientBeginReimageOptions)` +- Function `*VirtualMachineScaleSetVMsClient.BeginReimage` return value(s) have been changed from `(VirtualMachineScaleSetVMsReimagePollerResponse, error)` to `(VirtualMachineScaleSetVMsClientReimagePollerResponse, error)` +- Function `*ImagesClient.BeginUpdate` parameter(s) have been changed from `(context.Context, string, string, ImageUpdate, *ImagesBeginUpdateOptions)` to `(context.Context, string, string, ImageUpdate, *ImagesClientBeginUpdateOptions)` +- Function `*ImagesClient.BeginUpdate` return value(s) have been changed from `(ImagesUpdatePollerResponse, error)` to `(ImagesClientUpdatePollerResponse, error)` +- Function `*VirtualMachinesClient.BeginInstallPatches` parameter(s) have been changed from `(context.Context, string, string, VirtualMachineInstallPatchesParameters, *VirtualMachinesBeginInstallPatchesOptions)` to `(context.Context, string, string, VirtualMachineInstallPatchesParameters, *VirtualMachinesClientBeginInstallPatchesOptions)` +- Function `*VirtualMachinesClient.BeginInstallPatches` return value(s) have been changed from `(VirtualMachinesInstallPatchesPollerResponse, error)` to `(VirtualMachinesClientInstallPatchesPollerResponse, error)` +- Function `*VirtualMachineScaleSetsClient.ForceRecoveryServiceFabricPlatformUpdateDomainWalk` parameter(s) have been changed from `(context.Context, string, string, int32, *VirtualMachineScaleSetsForceRecoveryServiceFabricPlatformUpdateDomainWalkOptions)` to `(context.Context, string, string, int32, *VirtualMachineScaleSetsClientForceRecoveryServiceFabricPlatformUpdateDomainWalkOptions)` +- Function `*VirtualMachineScaleSetsClient.ForceRecoveryServiceFabricPlatformUpdateDomainWalk` return value(s) have been changed from `(VirtualMachineScaleSetsForceRecoveryServiceFabricPlatformUpdateDomainWalkResponse, error)` to `(VirtualMachineScaleSetsClientForceRecoveryServiceFabricPlatformUpdateDomainWalkResponse, error)` +- Function `*VirtualMachineScaleSetsClient.ListByLocation` parameter(s) have been changed from `(string, *VirtualMachineScaleSetsListByLocationOptions)` to `(string, *VirtualMachineScaleSetsClientListByLocationOptions)` +- Function `*VirtualMachineScaleSetsClient.ListByLocation` return value(s) have been changed from `(*VirtualMachineScaleSetsListByLocationPager)` to `(*VirtualMachineScaleSetsClientListByLocationPager)` +- Function `*VirtualMachineScaleSetVMsClient.List` parameter(s) have been changed from `(string, string, *VirtualMachineScaleSetVMsListOptions)` to `(string, string, *VirtualMachineScaleSetVMsClientListOptions)` +- Function `*VirtualMachineScaleSetVMsClient.List` return value(s) have been changed from `(*VirtualMachineScaleSetVMsListPager)` to `(*VirtualMachineScaleSetVMsClientListPager)` +- Function `*GalleryImagesClient.BeginDelete` parameter(s) have been changed from `(context.Context, string, string, string, *GalleryImagesBeginDeleteOptions)` to `(context.Context, string, string, string, *GalleryImagesClientBeginDeleteOptions)` +- Function `*GalleryImagesClient.BeginDelete` return value(s) have been changed from `(GalleryImagesDeletePollerResponse, error)` to `(GalleryImagesClientDeletePollerResponse, error)` +- Function `*GalleryImageVersionsClient.Get` parameter(s) have been changed from `(context.Context, string, string, string, string, *GalleryImageVersionsGetOptions)` to `(context.Context, string, string, string, string, *GalleryImageVersionsClientGetOptions)` +- Function `*GalleryImageVersionsClient.Get` return value(s) have been changed from `(GalleryImageVersionsGetResponse, error)` to `(GalleryImageVersionsClientGetResponse, error)` +- Function `*GalleryApplicationsClient.BeginCreateOrUpdate` parameter(s) have been changed from `(context.Context, string, string, string, GalleryApplication, *GalleryApplicationsBeginCreateOrUpdateOptions)` to `(context.Context, string, string, string, GalleryApplication, *GalleryApplicationsClientBeginCreateOrUpdateOptions)` +- Function `*GalleryApplicationsClient.BeginCreateOrUpdate` return value(s) have been changed from `(GalleryApplicationsCreateOrUpdatePollerResponse, error)` to `(GalleryApplicationsClientCreateOrUpdatePollerResponse, error)` +- Function `*CloudServiceRoleInstancesClient.BeginDelete` parameter(s) have been changed from `(context.Context, string, string, string, *CloudServiceRoleInstancesBeginDeleteOptions)` to `(context.Context, string, string, string, *CloudServiceRoleInstancesClientBeginDeleteOptions)` +- Function `*CloudServiceRoleInstancesClient.BeginDelete` return value(s) have been changed from `(CloudServiceRoleInstancesDeletePollerResponse, error)` to `(CloudServiceRoleInstancesClientDeletePollerResponse, error)` +- Function `*VirtualMachineScaleSetVMsClient.Get` parameter(s) have been changed from `(context.Context, string, string, string, *VirtualMachineScaleSetVMsGetOptions)` to `(context.Context, string, string, string, *VirtualMachineScaleSetVMsClientGetOptions)` +- Function `*VirtualMachineScaleSetVMsClient.Get` return value(s) have been changed from `(VirtualMachineScaleSetVMsGetResponse, error)` to `(VirtualMachineScaleSetVMsClientGetResponse, error)` +- Function `*GalleriesClient.Get` parameter(s) have been changed from `(context.Context, string, string, *GalleriesGetOptions)` to `(context.Context, string, string, *GalleriesClientGetOptions)` +- Function `*GalleriesClient.Get` return value(s) have been changed from `(GalleriesGetResponse, error)` to `(GalleriesClientGetResponse, error)` +- Function `*VirtualMachineRunCommandsClient.BeginDelete` parameter(s) have been changed from `(context.Context, string, string, string, *VirtualMachineRunCommandsBeginDeleteOptions)` to `(context.Context, string, string, string, *VirtualMachineRunCommandsClientBeginDeleteOptions)` +- Function `*VirtualMachineRunCommandsClient.BeginDelete` return value(s) have been changed from `(VirtualMachineRunCommandsDeletePollerResponse, error)` to `(VirtualMachineRunCommandsClientDeletePollerResponse, error)` +- Function `*GalleryApplicationVersionsClient.ListByGalleryApplication` parameter(s) have been changed from `(string, string, string, *GalleryApplicationVersionsListByGalleryApplicationOptions)` to `(string, string, string, *GalleryApplicationVersionsClientListByGalleryApplicationOptions)` +- Function `*GalleryApplicationVersionsClient.ListByGalleryApplication` return value(s) have been changed from `(*GalleryApplicationVersionsListByGalleryApplicationPager)` to `(*GalleryApplicationVersionsClientListByGalleryApplicationPager)` +- Function `*DedicatedHostGroupsClient.Update` parameter(s) have been changed from `(context.Context, string, string, DedicatedHostGroupUpdate, *DedicatedHostGroupsUpdateOptions)` to `(context.Context, string, string, DedicatedHostGroupUpdate, *DedicatedHostGroupsClientUpdateOptions)` +- Function `*DedicatedHostGroupsClient.Update` return value(s) have been changed from `(DedicatedHostGroupsUpdateResponse, error)` to `(DedicatedHostGroupsClientUpdateResponse, error)` +- Function `*VirtualMachineScaleSetVMExtensionsClient.List` parameter(s) have been changed from `(context.Context, string, string, string, *VirtualMachineScaleSetVMExtensionsListOptions)` to `(context.Context, string, string, string, *VirtualMachineScaleSetVMExtensionsClientListOptions)` +- Function `*VirtualMachineScaleSetVMExtensionsClient.List` return value(s) have been changed from `(VirtualMachineScaleSetVMExtensionsListResponse, error)` to `(VirtualMachineScaleSetVMExtensionsClientListResponse, error)` +- Function `*RestorePointsClient.Get` parameter(s) have been changed from `(context.Context, string, string, string, *RestorePointsGetOptions)` to `(context.Context, string, string, string, *RestorePointsClientGetOptions)` +- Function `*RestorePointsClient.Get` return value(s) have been changed from `(RestorePointsGetResponse, error)` to `(RestorePointsClientGetResponse, error)` +- Function `*GalleriesClient.BeginCreateOrUpdate` parameter(s) have been changed from `(context.Context, string, string, Gallery, *GalleriesBeginCreateOrUpdateOptions)` to `(context.Context, string, string, Gallery, *GalleriesClientBeginCreateOrUpdateOptions)` +- Function `*GalleriesClient.BeginCreateOrUpdate` return value(s) have been changed from `(GalleriesCreateOrUpdatePollerResponse, error)` to `(GalleriesClientCreateOrUpdatePollerResponse, error)` +- Function `*VirtualMachinesClient.BeginDeallocate` parameter(s) have been changed from `(context.Context, string, string, *VirtualMachinesBeginDeallocateOptions)` to `(context.Context, string, string, *VirtualMachinesClientBeginDeallocateOptions)` +- Function `*VirtualMachinesClient.BeginDeallocate` return value(s) have been changed from `(VirtualMachinesDeallocatePollerResponse, error)` to `(VirtualMachinesClientDeallocatePollerResponse, error)` +- Function `*VirtualMachineScaleSetVMRunCommandsClient.BeginCreateOrUpdate` parameter(s) have been changed from `(context.Context, string, string, string, string, VirtualMachineRunCommand, *VirtualMachineScaleSetVMRunCommandsBeginCreateOrUpdateOptions)` to `(context.Context, string, string, string, string, VirtualMachineRunCommand, *VirtualMachineScaleSetVMRunCommandsClientBeginCreateOrUpdateOptions)` +- Function `*VirtualMachineScaleSetVMRunCommandsClient.BeginCreateOrUpdate` return value(s) have been changed from `(VirtualMachineScaleSetVMRunCommandsCreateOrUpdatePollerResponse, error)` to `(VirtualMachineScaleSetVMRunCommandsClientCreateOrUpdatePollerResponse, error)` +- Function `*VirtualMachinesClient.BeginStart` parameter(s) have been changed from `(context.Context, string, string, *VirtualMachinesBeginStartOptions)` to `(context.Context, string, string, *VirtualMachinesClientBeginStartOptions)` +- Function `*VirtualMachinesClient.BeginStart` return value(s) have been changed from `(VirtualMachinesStartPollerResponse, error)` to `(VirtualMachinesClientStartPollerResponse, error)` +- Function `*SharedGalleryImageVersionsClient.List` parameter(s) have been changed from `(string, string, string, *SharedGalleryImageVersionsListOptions)` to `(string, string, string, *SharedGalleryImageVersionsClientListOptions)` +- Function `*SharedGalleryImageVersionsClient.List` return value(s) have been changed from `(*SharedGalleryImageVersionsListPager)` to `(*SharedGalleryImageVersionsClientListPager)` +- Function `*DiskEncryptionSetsClient.BeginUpdate` parameter(s) have been changed from `(context.Context, string, string, DiskEncryptionSetUpdate, *DiskEncryptionSetsBeginUpdateOptions)` to `(context.Context, string, string, DiskEncryptionSetUpdate, *DiskEncryptionSetsClientBeginUpdateOptions)` +- Function `*DiskEncryptionSetsClient.BeginUpdate` return value(s) have been changed from `(DiskEncryptionSetsUpdatePollerResponse, error)` to `(DiskEncryptionSetsClientUpdatePollerResponse, error)` +- Function `*DiskEncryptionSetsClient.ListAssociatedResources` parameter(s) have been changed from `(string, string, *DiskEncryptionSetsListAssociatedResourcesOptions)` to `(string, string, *DiskEncryptionSetsClientListAssociatedResourcesOptions)` +- Function `*DiskEncryptionSetsClient.ListAssociatedResources` return value(s) have been changed from `(*DiskEncryptionSetsListAssociatedResourcesPager)` to `(*DiskEncryptionSetsClientListAssociatedResourcesPager)` +- Function `*VirtualMachineScaleSetVMsClient.BeginDelete` parameter(s) have been changed from `(context.Context, string, string, string, *VirtualMachineScaleSetVMsBeginDeleteOptions)` to `(context.Context, string, string, string, *VirtualMachineScaleSetVMsClientBeginDeleteOptions)` +- Function `*VirtualMachineScaleSetVMsClient.BeginDelete` return value(s) have been changed from `(VirtualMachineScaleSetVMsDeletePollerResponse, error)` to `(VirtualMachineScaleSetVMsClientDeletePollerResponse, error)` +- Function `*SnapshotsClient.BeginUpdate` parameter(s) have been changed from `(context.Context, string, string, SnapshotUpdate, *SnapshotsBeginUpdateOptions)` to `(context.Context, string, string, SnapshotUpdate, *SnapshotsClientBeginUpdateOptions)` +- Function `*SnapshotsClient.BeginUpdate` return value(s) have been changed from `(SnapshotsUpdatePollerResponse, error)` to `(SnapshotsClientUpdatePollerResponse, error)` +- Function `*DedicatedHostsClient.BeginUpdate` parameter(s) have been changed from `(context.Context, string, string, string, DedicatedHostUpdate, *DedicatedHostsBeginUpdateOptions)` to `(context.Context, string, string, string, DedicatedHostUpdate, *DedicatedHostsClientBeginUpdateOptions)` +- Function `*DedicatedHostsClient.BeginUpdate` return value(s) have been changed from `(DedicatedHostsUpdatePollerResponse, error)` to `(DedicatedHostsClientUpdatePollerResponse, error)` +- Function `*VirtualMachinesClient.InstanceView` parameter(s) have been changed from `(context.Context, string, string, *VirtualMachinesInstanceViewOptions)` to `(context.Context, string, string, *VirtualMachinesClientInstanceViewOptions)` +- Function `*VirtualMachinesClient.InstanceView` return value(s) have been changed from `(VirtualMachinesInstanceViewResponse, error)` to `(VirtualMachinesClientInstanceViewResponse, error)` +- Function `*OperationsClient.List` parameter(s) have been changed from `(context.Context, *OperationsListOptions)` to `(context.Context, *OperationsClientListOptions)` +- Function `*OperationsClient.List` return value(s) have been changed from `(OperationsListResponse, error)` to `(OperationsClientListResponse, error)` +- Function `*VirtualMachineScaleSetVMExtensionsClient.BeginCreateOrUpdate` parameter(s) have been changed from `(context.Context, string, string, string, string, VirtualMachineScaleSetVMExtension, *VirtualMachineScaleSetVMExtensionsBeginCreateOrUpdateOptions)` to `(context.Context, string, string, string, string, VirtualMachineScaleSetVMExtension, *VirtualMachineScaleSetVMExtensionsClientBeginCreateOrUpdateOptions)` +- Function `*VirtualMachineScaleSetVMExtensionsClient.BeginCreateOrUpdate` return value(s) have been changed from `(VirtualMachineScaleSetVMExtensionsCreateOrUpdatePollerResponse, error)` to `(VirtualMachineScaleSetVMExtensionsClientCreateOrUpdatePollerResponse, error)` +- Function `*GalleryImageVersionsClient.BeginCreateOrUpdate` parameter(s) have been changed from `(context.Context, string, string, string, string, GalleryImageVersion, *GalleryImageVersionsBeginCreateOrUpdateOptions)` to `(context.Context, string, string, string, string, GalleryImageVersion, *GalleryImageVersionsClientBeginCreateOrUpdateOptions)` +- Function `*GalleryImageVersionsClient.BeginCreateOrUpdate` return value(s) have been changed from `(GalleryImageVersionsCreateOrUpdatePollerResponse, error)` to `(GalleryImageVersionsClientCreateOrUpdatePollerResponse, error)` +- Function `*DiskAccessesClient.GetAPrivateEndpointConnection` parameter(s) have been changed from `(context.Context, string, string, string, *DiskAccessesGetAPrivateEndpointConnectionOptions)` to `(context.Context, string, string, string, *DiskAccessesClientGetAPrivateEndpointConnectionOptions)` +- Function `*DiskAccessesClient.GetAPrivateEndpointConnection` return value(s) have been changed from `(DiskAccessesGetAPrivateEndpointConnectionResponse, error)` to `(DiskAccessesClientGetAPrivateEndpointConnectionResponse, error)` +- Function `*VirtualMachineScaleSetVMRunCommandsClient.BeginDelete` parameter(s) have been changed from `(context.Context, string, string, string, string, *VirtualMachineScaleSetVMRunCommandsBeginDeleteOptions)` to `(context.Context, string, string, string, string, *VirtualMachineScaleSetVMRunCommandsClientBeginDeleteOptions)` +- Function `*VirtualMachineScaleSetVMRunCommandsClient.BeginDelete` return value(s) have been changed from `(VirtualMachineScaleSetVMRunCommandsDeletePollerResponse, error)` to `(VirtualMachineScaleSetVMRunCommandsClientDeletePollerResponse, error)` +- Function `*VirtualMachineRunCommandsClient.GetByVirtualMachine` parameter(s) have been changed from `(context.Context, string, string, string, *VirtualMachineRunCommandsGetByVirtualMachineOptions)` to `(context.Context, string, string, string, *VirtualMachineRunCommandsClientGetByVirtualMachineOptions)` +- Function `*VirtualMachineRunCommandsClient.GetByVirtualMachine` return value(s) have been changed from `(VirtualMachineRunCommandsGetByVirtualMachineResponse, error)` to `(VirtualMachineRunCommandsClientGetByVirtualMachineResponse, error)` +- Function `*SnapshotsClient.BeginDelete` parameter(s) have been changed from `(context.Context, string, string, *SnapshotsBeginDeleteOptions)` to `(context.Context, string, string, *SnapshotsClientBeginDeleteOptions)` +- Function `*SnapshotsClient.BeginDelete` return value(s) have been changed from `(SnapshotsDeletePollerResponse, error)` to `(SnapshotsClientDeletePollerResponse, error)` +- Function `*VirtualMachineScaleSetRollingUpgradesClient.BeginCancel` parameter(s) have been changed from `(context.Context, string, string, *VirtualMachineScaleSetRollingUpgradesBeginCancelOptions)` to `(context.Context, string, string, *VirtualMachineScaleSetRollingUpgradesClientBeginCancelOptions)` +- Function `*VirtualMachineScaleSetRollingUpgradesClient.BeginCancel` return value(s) have been changed from `(VirtualMachineScaleSetRollingUpgradesCancelPollerResponse, error)` to `(VirtualMachineScaleSetRollingUpgradesClientCancelPollerResponse, error)` +- Function `*VirtualMachinesClient.BeginPowerOff` parameter(s) have been changed from `(context.Context, string, string, *VirtualMachinesBeginPowerOffOptions)` to `(context.Context, string, string, *VirtualMachinesClientBeginPowerOffOptions)` +- Function `*VirtualMachinesClient.BeginPowerOff` return value(s) have been changed from `(VirtualMachinesPowerOffPollerResponse, error)` to `(VirtualMachinesClientPowerOffPollerResponse, error)` +- Function `*VirtualMachineExtensionImagesClient.ListTypes` parameter(s) have been changed from `(context.Context, string, string, *VirtualMachineExtensionImagesListTypesOptions)` to `(context.Context, string, string, *VirtualMachineExtensionImagesClientListTypesOptions)` +- Function `*VirtualMachineExtensionImagesClient.ListTypes` return value(s) have been changed from `(VirtualMachineExtensionImagesListTypesResponse, error)` to `(VirtualMachineExtensionImagesClientListTypesResponse, error)` +- Function `*SSHPublicKeysClient.ListBySubscription` parameter(s) have been changed from `(*SSHPublicKeysListBySubscriptionOptions)` to `(*SSHPublicKeysClientListBySubscriptionOptions)` +- Function `*SSHPublicKeysClient.ListBySubscription` return value(s) have been changed from `(*SSHPublicKeysListBySubscriptionPager)` to `(*SSHPublicKeysClientListBySubscriptionPager)` +- Function `*VirtualMachineImagesClient.ListSKUs` parameter(s) have been changed from `(context.Context, string, string, string, *VirtualMachineImagesListSKUsOptions)` to `(context.Context, string, string, string, *VirtualMachineImagesClientListSKUsOptions)` +- Function `*VirtualMachineImagesClient.ListSKUs` return value(s) have been changed from `(VirtualMachineImagesListSKUsResponse, error)` to `(VirtualMachineImagesClientListSKUsResponse, error)` +- Function `*RestorePointCollectionsClient.BeginDelete` parameter(s) have been changed from `(context.Context, string, string, *RestorePointCollectionsBeginDeleteOptions)` to `(context.Context, string, string, *RestorePointCollectionsClientBeginDeleteOptions)` +- Function `*RestorePointCollectionsClient.BeginDelete` return value(s) have been changed from `(RestorePointCollectionsDeletePollerResponse, error)` to `(RestorePointCollectionsClientDeletePollerResponse, error)` +- Function `*VirtualMachineExtensionsClient.BeginUpdate` parameter(s) have been changed from `(context.Context, string, string, string, VirtualMachineExtensionUpdate, *VirtualMachineExtensionsBeginUpdateOptions)` to `(context.Context, string, string, string, VirtualMachineExtensionUpdate, *VirtualMachineExtensionsClientBeginUpdateOptions)` +- Function `*VirtualMachineExtensionsClient.BeginUpdate` return value(s) have been changed from `(VirtualMachineExtensionsUpdatePollerResponse, error)` to `(VirtualMachineExtensionsClientUpdatePollerResponse, error)` +- Function `*DisksClient.BeginGrantAccess` parameter(s) have been changed from `(context.Context, string, string, GrantAccessData, *DisksBeginGrantAccessOptions)` to `(context.Context, string, string, GrantAccessData, *DisksClientBeginGrantAccessOptions)` +- Function `*DisksClient.BeginGrantAccess` return value(s) have been changed from `(DisksGrantAccessPollerResponse, error)` to `(DisksClientGrantAccessPollerResponse, error)` +- Function `*SharedGalleriesClient.Get` parameter(s) have been changed from `(context.Context, string, string, *SharedGalleriesGetOptions)` to `(context.Context, string, string, *SharedGalleriesClientGetOptions)` +- Function `*SharedGalleriesClient.Get` return value(s) have been changed from `(SharedGalleriesGetResponse, error)` to `(SharedGalleriesClientGetResponse, error)` +- Function `*VirtualMachineExtensionsClient.BeginDelete` parameter(s) have been changed from `(context.Context, string, string, string, *VirtualMachineExtensionsBeginDeleteOptions)` to `(context.Context, string, string, string, *VirtualMachineExtensionsClientBeginDeleteOptions)` +- Function `*VirtualMachineExtensionsClient.BeginDelete` return value(s) have been changed from `(VirtualMachineExtensionsDeletePollerResponse, error)` to `(VirtualMachineExtensionsClientDeletePollerResponse, error)` +- Function `*CloudServiceRoleInstancesClient.GetInstanceView` parameter(s) have been changed from `(context.Context, string, string, string, *CloudServiceRoleInstancesGetInstanceViewOptions)` to `(context.Context, string, string, string, *CloudServiceRoleInstancesClientGetInstanceViewOptions)` +- Function `*CloudServiceRoleInstancesClient.GetInstanceView` return value(s) have been changed from `(CloudServiceRoleInstancesGetInstanceViewResponse, error)` to `(CloudServiceRoleInstancesClientGetInstanceViewResponse, error)` +- Function `*VirtualMachineScaleSetsClient.List` parameter(s) have been changed from `(string, *VirtualMachineScaleSetsListOptions)` to `(string, *VirtualMachineScaleSetsClientListOptions)` +- Function `*VirtualMachineScaleSetsClient.List` return value(s) have been changed from `(*VirtualMachineScaleSetsListPager)` to `(*VirtualMachineScaleSetsClientListPager)` +- Function `*VirtualMachineScaleSetVMRunCommandsClient.BeginUpdate` parameter(s) have been changed from `(context.Context, string, string, string, string, VirtualMachineRunCommandUpdate, *VirtualMachineScaleSetVMRunCommandsBeginUpdateOptions)` to `(context.Context, string, string, string, string, VirtualMachineRunCommandUpdate, *VirtualMachineScaleSetVMRunCommandsClientBeginUpdateOptions)` +- Function `*VirtualMachineScaleSetVMRunCommandsClient.BeginUpdate` return value(s) have been changed from `(VirtualMachineScaleSetVMRunCommandsUpdatePollerResponse, error)` to `(VirtualMachineScaleSetVMRunCommandsClientUpdatePollerResponse, error)` +- Function `*DedicatedHostGroupsClient.ListByResourceGroup` parameter(s) have been changed from `(string, *DedicatedHostGroupsListByResourceGroupOptions)` to `(string, *DedicatedHostGroupsClientListByResourceGroupOptions)` +- Function `*DedicatedHostGroupsClient.ListByResourceGroup` return value(s) have been changed from `(*DedicatedHostGroupsListByResourceGroupPager)` to `(*DedicatedHostGroupsClientListByResourceGroupPager)` +- Function `*CommunityGalleriesClient.Get` parameter(s) have been changed from `(context.Context, string, string, *CommunityGalleriesGetOptions)` to `(context.Context, string, string, *CommunityGalleriesClientGetOptions)` +- Function `*CommunityGalleriesClient.Get` return value(s) have been changed from `(CommunityGalleriesGetResponse, error)` to `(CommunityGalleriesClientGetResponse, error)` +- Function `*VirtualMachineScaleSetVMsClient.BeginRunCommand` parameter(s) have been changed from `(context.Context, string, string, string, RunCommandInput, *VirtualMachineScaleSetVMsBeginRunCommandOptions)` to `(context.Context, string, string, string, RunCommandInput, *VirtualMachineScaleSetVMsClientBeginRunCommandOptions)` +- Function `*VirtualMachineScaleSetVMsClient.BeginRunCommand` return value(s) have been changed from `(VirtualMachineScaleSetVMsRunCommandPollerResponse, error)` to `(VirtualMachineScaleSetVMsClientRunCommandPollerResponse, error)` +- Function `*CapacityReservationGroupsClient.Get` parameter(s) have been changed from `(context.Context, string, string, *CapacityReservationGroupsGetOptions)` to `(context.Context, string, string, *CapacityReservationGroupsClientGetOptions)` +- Function `*CapacityReservationGroupsClient.Get` return value(s) have been changed from `(CapacityReservationGroupsGetResponse, error)` to `(CapacityReservationGroupsClientGetResponse, error)` +- Function `*AvailabilitySetsClient.ListBySubscription` parameter(s) have been changed from `(*AvailabilitySetsListBySubscriptionOptions)` to `(*AvailabilitySetsClientListBySubscriptionOptions)` +- Function `*AvailabilitySetsClient.ListBySubscription` return value(s) have been changed from `(*AvailabilitySetsListBySubscriptionPager)` to `(*AvailabilitySetsClientListBySubscriptionPager)` +- Function `*VirtualMachineScaleSetsClient.BeginReimage` parameter(s) have been changed from `(context.Context, string, string, *VirtualMachineScaleSetsBeginReimageOptions)` to `(context.Context, string, string, *VirtualMachineScaleSetsClientBeginReimageOptions)` +- Function `*VirtualMachineScaleSetsClient.BeginReimage` return value(s) have been changed from `(VirtualMachineScaleSetsReimagePollerResponse, error)` to `(VirtualMachineScaleSetsClientReimagePollerResponse, error)` +- Function `*VirtualMachineScaleSetVMsClient.BeginRedeploy` parameter(s) have been changed from `(context.Context, string, string, string, *VirtualMachineScaleSetVMsBeginRedeployOptions)` to `(context.Context, string, string, string, *VirtualMachineScaleSetVMsClientBeginRedeployOptions)` +- Function `*VirtualMachineScaleSetVMsClient.BeginRedeploy` return value(s) have been changed from `(VirtualMachineScaleSetVMsRedeployPollerResponse, error)` to `(VirtualMachineScaleSetVMsClientRedeployPollerResponse, error)` +- Function `*VirtualMachineExtensionsClient.BeginCreateOrUpdate` parameter(s) have been changed from `(context.Context, string, string, string, VirtualMachineExtension, *VirtualMachineExtensionsBeginCreateOrUpdateOptions)` to `(context.Context, string, string, string, VirtualMachineExtension, *VirtualMachineExtensionsClientBeginCreateOrUpdateOptions)` +- Function `*VirtualMachineExtensionsClient.BeginCreateOrUpdate` return value(s) have been changed from `(VirtualMachineExtensionsCreateOrUpdatePollerResponse, error)` to `(VirtualMachineExtensionsClientCreateOrUpdatePollerResponse, error)` +- Function `*RestorePointCollectionsClient.Get` parameter(s) have been changed from `(context.Context, string, string, *RestorePointCollectionsGetOptions)` to `(context.Context, string, string, *RestorePointCollectionsClientGetOptions)` +- Function `*RestorePointCollectionsClient.Get` return value(s) have been changed from `(RestorePointCollectionsGetResponse, error)` to `(RestorePointCollectionsClientGetResponse, error)` +- Function `*VirtualMachinesClient.BeginAssessPatches` parameter(s) have been changed from `(context.Context, string, string, *VirtualMachinesBeginAssessPatchesOptions)` to `(context.Context, string, string, *VirtualMachinesClientBeginAssessPatchesOptions)` +- Function `*VirtualMachinesClient.BeginAssessPatches` return value(s) have been changed from `(VirtualMachinesAssessPatchesPollerResponse, error)` to `(VirtualMachinesClientAssessPatchesPollerResponse, error)` +- Function `*VirtualMachineScaleSetVMRunCommandsClient.List` parameter(s) have been changed from `(string, string, string, *VirtualMachineScaleSetVMRunCommandsListOptions)` to `(string, string, string, *VirtualMachineScaleSetVMRunCommandsClientListOptions)` +- Function `*VirtualMachineScaleSetVMRunCommandsClient.List` return value(s) have been changed from `(*VirtualMachineScaleSetVMRunCommandsListPager)` to `(*VirtualMachineScaleSetVMRunCommandsClientListPager)` +- Function `*VirtualMachineScaleSetExtensionsClient.BeginCreateOrUpdate` parameter(s) have been changed from `(context.Context, string, string, string, VirtualMachineScaleSetExtension, *VirtualMachineScaleSetExtensionsBeginCreateOrUpdateOptions)` to `(context.Context, string, string, string, VirtualMachineScaleSetExtension, *VirtualMachineScaleSetExtensionsClientBeginCreateOrUpdateOptions)` +- Function `*VirtualMachineScaleSetExtensionsClient.BeginCreateOrUpdate` return value(s) have been changed from `(VirtualMachineScaleSetExtensionsCreateOrUpdatePollerResponse, error)` to `(VirtualMachineScaleSetExtensionsClientCreateOrUpdatePollerResponse, error)` +- Function `*CloudServiceOperatingSystemsClient.ListOSVersions` parameter(s) have been changed from `(string, *CloudServiceOperatingSystemsListOSVersionsOptions)` to `(string, *CloudServiceOperatingSystemsClientListOSVersionsOptions)` +- Function `*CloudServiceOperatingSystemsClient.ListOSVersions` return value(s) have been changed from `(*CloudServiceOperatingSystemsListOSVersionsPager)` to `(*CloudServiceOperatingSystemsClientListOSVersionsPager)` +- Function `*VirtualMachinesClient.BeginPerformMaintenance` parameter(s) have been changed from `(context.Context, string, string, *VirtualMachinesBeginPerformMaintenanceOptions)` to `(context.Context, string, string, *VirtualMachinesClientBeginPerformMaintenanceOptions)` +- Function `*VirtualMachinesClient.BeginPerformMaintenance` return value(s) have been changed from `(VirtualMachinesPerformMaintenancePollerResponse, error)` to `(VirtualMachinesClientPerformMaintenancePollerResponse, error)` +- Function `*VirtualMachineScaleSetExtensionsClient.Get` parameter(s) have been changed from `(context.Context, string, string, string, *VirtualMachineScaleSetExtensionsGetOptions)` to `(context.Context, string, string, string, *VirtualMachineScaleSetExtensionsClientGetOptions)` +- Function `*VirtualMachineScaleSetExtensionsClient.Get` return value(s) have been changed from `(VirtualMachineScaleSetExtensionsGetResponse, error)` to `(VirtualMachineScaleSetExtensionsClientGetResponse, error)` +- Function `*CapacityReservationsClient.BeginDelete` parameter(s) have been changed from `(context.Context, string, string, string, *CapacityReservationsBeginDeleteOptions)` to `(context.Context, string, string, string, *CapacityReservationsClientBeginDeleteOptions)` +- Function `*CapacityReservationsClient.BeginDelete` return value(s) have been changed from `(CapacityReservationsDeletePollerResponse, error)` to `(CapacityReservationsClientDeletePollerResponse, error)` +- Function `*GalleryApplicationsClient.BeginUpdate` parameter(s) have been changed from `(context.Context, string, string, string, GalleryApplicationUpdate, *GalleryApplicationsBeginUpdateOptions)` to `(context.Context, string, string, string, GalleryApplicationUpdate, *GalleryApplicationsClientBeginUpdateOptions)` +- Function `*GalleryApplicationsClient.BeginUpdate` return value(s) have been changed from `(GalleryApplicationsUpdatePollerResponse, error)` to `(GalleryApplicationsClientUpdatePollerResponse, error)` +- Function `*VirtualMachineExtensionsClient.Get` parameter(s) have been changed from `(context.Context, string, string, string, *VirtualMachineExtensionsGetOptions)` to `(context.Context, string, string, string, *VirtualMachineExtensionsClientGetOptions)` +- Function `*VirtualMachineExtensionsClient.Get` return value(s) have been changed from `(VirtualMachineExtensionsGetResponse, error)` to `(VirtualMachineExtensionsClientGetResponse, error)` +- Function `*VirtualMachineScaleSetVMRunCommandsClient.Get` parameter(s) have been changed from `(context.Context, string, string, string, string, *VirtualMachineScaleSetVMRunCommandsGetOptions)` to `(context.Context, string, string, string, string, *VirtualMachineScaleSetVMRunCommandsClientGetOptions)` +- Function `*VirtualMachineScaleSetVMRunCommandsClient.Get` return value(s) have been changed from `(VirtualMachineScaleSetVMRunCommandsGetResponse, error)` to `(VirtualMachineScaleSetVMRunCommandsClientGetResponse, error)` +- Function `*DiskAccessesClient.BeginCreateOrUpdate` parameter(s) have been changed from `(context.Context, string, string, DiskAccess, *DiskAccessesBeginCreateOrUpdateOptions)` to `(context.Context, string, string, DiskAccess, *DiskAccessesClientBeginCreateOrUpdateOptions)` +- Function `*DiskAccessesClient.BeginCreateOrUpdate` return value(s) have been changed from `(DiskAccessesCreateOrUpdatePollerResponse, error)` to `(DiskAccessesClientCreateOrUpdatePollerResponse, error)` +- Function `*SSHPublicKeysClient.GenerateKeyPair` parameter(s) have been changed from `(context.Context, string, string, *SSHPublicKeysGenerateKeyPairOptions)` to `(context.Context, string, string, *SSHPublicKeysClientGenerateKeyPairOptions)` +- Function `*SSHPublicKeysClient.GenerateKeyPair` return value(s) have been changed from `(SSHPublicKeysGenerateKeyPairResponse, error)` to `(SSHPublicKeysClientGenerateKeyPairResponse, error)` +- Function `*VirtualMachineRunCommandsClient.ListByVirtualMachine` parameter(s) have been changed from `(string, string, *VirtualMachineRunCommandsListByVirtualMachineOptions)` to `(string, string, *VirtualMachineRunCommandsClientListByVirtualMachineOptions)` +- Function `*VirtualMachineRunCommandsClient.ListByVirtualMachine` return value(s) have been changed from `(*VirtualMachineRunCommandsListByVirtualMachinePager)` to `(*VirtualMachineRunCommandsClientListByVirtualMachinePager)` +- Function `*VirtualMachinesClient.ListAll` parameter(s) have been changed from `(*VirtualMachinesListAllOptions)` to `(*VirtualMachinesClientListAllOptions)` +- Function `*VirtualMachinesClient.ListAll` return value(s) have been changed from `(*VirtualMachinesListAllPager)` to `(*VirtualMachinesClientListAllPager)` +- Function `*SSHPublicKeysClient.Delete` parameter(s) have been changed from `(context.Context, string, string, *SSHPublicKeysDeleteOptions)` to `(context.Context, string, string, *SSHPublicKeysClientDeleteOptions)` +- Function `*SSHPublicKeysClient.Delete` return value(s) have been changed from `(SSHPublicKeysDeleteResponse, error)` to `(SSHPublicKeysClientDeleteResponse, error)` +- Function `*SnapshotsClient.List` parameter(s) have been changed from `(*SnapshotsListOptions)` to `(*SnapshotsClientListOptions)` +- Function `*SnapshotsClient.List` return value(s) have been changed from `(*SnapshotsListPager)` to `(*SnapshotsClientListPager)` +- Function `*SnapshotsClient.BeginRevokeAccess` parameter(s) have been changed from `(context.Context, string, string, *SnapshotsBeginRevokeAccessOptions)` to `(context.Context, string, string, *SnapshotsClientBeginRevokeAccessOptions)` +- Function `*SnapshotsClient.BeginRevokeAccess` return value(s) have been changed from `(SnapshotsRevokeAccessPollerResponse, error)` to `(SnapshotsClientRevokeAccessPollerResponse, error)` +- Function `*CapacityReservationsClient.BeginUpdate` parameter(s) have been changed from `(context.Context, string, string, string, CapacityReservationUpdate, *CapacityReservationsBeginUpdateOptions)` to `(context.Context, string, string, string, CapacityReservationUpdate, *CapacityReservationsClientBeginUpdateOptions)` +- Function `*CapacityReservationsClient.BeginUpdate` return value(s) have been changed from `(CapacityReservationsUpdatePollerResponse, error)` to `(CapacityReservationsClientUpdatePollerResponse, error)` +- Function `*CloudServiceRolesClient.Get` parameter(s) have been changed from `(context.Context, string, string, string, *CloudServiceRolesGetOptions)` to `(context.Context, string, string, string, *CloudServiceRolesClientGetOptions)` +- Function `*CloudServiceRolesClient.Get` return value(s) have been changed from `(CloudServiceRolesGetResponse, error)` to `(CloudServiceRolesClientGetResponse, error)` +- Function `*ImagesClient.List` parameter(s) have been changed from `(*ImagesListOptions)` to `(*ImagesClientListOptions)` +- Function `*ImagesClient.List` return value(s) have been changed from `(*ImagesListPager)` to `(*ImagesClientListPager)` +- Function `*RestorePointCollectionsClient.CreateOrUpdate` parameter(s) have been changed from `(context.Context, string, string, RestorePointCollection, *RestorePointCollectionsCreateOrUpdateOptions)` to `(context.Context, string, string, RestorePointCollection, *RestorePointCollectionsClientCreateOrUpdateOptions)` +- Function `*RestorePointCollectionsClient.CreateOrUpdate` return value(s) have been changed from `(RestorePointCollectionsCreateOrUpdateResponse, error)` to `(RestorePointCollectionsClientCreateOrUpdateResponse, error)` +- Function `*ProximityPlacementGroupsClient.Update` parameter(s) have been changed from `(context.Context, string, string, ProximityPlacementGroupUpdate, *ProximityPlacementGroupsUpdateOptions)` to `(context.Context, string, string, ProximityPlacementGroupUpdate, *ProximityPlacementGroupsClientUpdateOptions)` +- Function `*ProximityPlacementGroupsClient.Update` return value(s) have been changed from `(ProximityPlacementGroupsUpdateResponse, error)` to `(ProximityPlacementGroupsClientUpdateResponse, error)` +- Function `*RestorePointsClient.BeginDelete` parameter(s) have been changed from `(context.Context, string, string, string, *RestorePointsBeginDeleteOptions)` to `(context.Context, string, string, string, *RestorePointsClientBeginDeleteOptions)` +- Function `*RestorePointsClient.BeginDelete` return value(s) have been changed from `(RestorePointsDeletePollerResponse, error)` to `(RestorePointsClientDeletePollerResponse, error)` +- Function `*DiskAccessesClient.ListByResourceGroup` parameter(s) have been changed from `(string, *DiskAccessesListByResourceGroupOptions)` to `(string, *DiskAccessesClientListByResourceGroupOptions)` +- Function `*DiskAccessesClient.ListByResourceGroup` return value(s) have been changed from `(*DiskAccessesListByResourceGroupPager)` to `(*DiskAccessesClientListByResourceGroupPager)` +- Function `*GallerySharingProfileClient.BeginUpdate` parameter(s) have been changed from `(context.Context, string, string, SharingUpdate, *GallerySharingProfileBeginUpdateOptions)` to `(context.Context, string, string, SharingUpdate, *GallerySharingProfileClientBeginUpdateOptions)` +- Function `*GallerySharingProfileClient.BeginUpdate` return value(s) have been changed from `(GallerySharingProfileUpdatePollerResponse, error)` to `(GallerySharingProfileClientUpdatePollerResponse, error)` +- Function `*GalleryApplicationsClient.BeginDelete` parameter(s) have been changed from `(context.Context, string, string, string, *GalleryApplicationsBeginDeleteOptions)` to `(context.Context, string, string, string, *GalleryApplicationsClientBeginDeleteOptions)` +- Function `*GalleryApplicationsClient.BeginDelete` return value(s) have been changed from `(GalleryApplicationsDeletePollerResponse, error)` to `(GalleryApplicationsClientDeletePollerResponse, error)` +- Function `*ProximityPlacementGroupsClient.Delete` parameter(s) have been changed from `(context.Context, string, string, *ProximityPlacementGroupsDeleteOptions)` to `(context.Context, string, string, *ProximityPlacementGroupsClientDeleteOptions)` +- Function `*ProximityPlacementGroupsClient.Delete` return value(s) have been changed from `(ProximityPlacementGroupsDeleteResponse, error)` to `(ProximityPlacementGroupsClientDeleteResponse, error)` +- Function `*DiskAccessesClient.BeginUpdate` parameter(s) have been changed from `(context.Context, string, string, DiskAccessUpdate, *DiskAccessesBeginUpdateOptions)` to `(context.Context, string, string, DiskAccessUpdate, *DiskAccessesClientBeginUpdateOptions)` +- Function `*DiskAccessesClient.BeginUpdate` return value(s) have been changed from `(DiskAccessesUpdatePollerResponse, error)` to `(DiskAccessesClientUpdatePollerResponse, error)` +- Function `*DedicatedHostGroupsClient.CreateOrUpdate` parameter(s) have been changed from `(context.Context, string, string, DedicatedHostGroup, *DedicatedHostGroupsCreateOrUpdateOptions)` to `(context.Context, string, string, DedicatedHostGroup, *DedicatedHostGroupsClientCreateOrUpdateOptions)` +- Function `*DedicatedHostGroupsClient.CreateOrUpdate` return value(s) have been changed from `(DedicatedHostGroupsCreateOrUpdateResponse, error)` to `(DedicatedHostGroupsClientCreateOrUpdateResponse, error)` +- Function `*RestorePointCollectionsClient.ListAll` parameter(s) have been changed from `(*RestorePointCollectionsListAllOptions)` to `(*RestorePointCollectionsClientListAllOptions)` +- Function `*RestorePointCollectionsClient.ListAll` return value(s) have been changed from `(*RestorePointCollectionsListAllPager)` to `(*RestorePointCollectionsClientListAllPager)` +- Function `*VirtualMachineScaleSetExtensionsClient.List` parameter(s) have been changed from `(string, string, *VirtualMachineScaleSetExtensionsListOptions)` to `(string, string, *VirtualMachineScaleSetExtensionsClientListOptions)` +- Function `*VirtualMachineScaleSetExtensionsClient.List` return value(s) have been changed from `(*VirtualMachineScaleSetExtensionsListPager)` to `(*VirtualMachineScaleSetExtensionsClientListPager)` +- Function `*CloudServicesClient.BeginReimage` parameter(s) have been changed from `(context.Context, string, string, *CloudServicesBeginReimageOptions)` to `(context.Context, string, string, *CloudServicesClientBeginReimageOptions)` +- Function `*CloudServicesClient.BeginReimage` return value(s) have been changed from `(CloudServicesReimagePollerResponse, error)` to `(CloudServicesClientReimagePollerResponse, error)` +- Function `*GalleryApplicationsClient.Get` parameter(s) have been changed from `(context.Context, string, string, string, *GalleryApplicationsGetOptions)` to `(context.Context, string, string, string, *GalleryApplicationsClientGetOptions)` +- Function `*GalleryApplicationsClient.Get` return value(s) have been changed from `(GalleryApplicationsGetResponse, error)` to `(GalleryApplicationsClientGetResponse, error)` +- Function `*RestorePointsClient.BeginCreate` parameter(s) have been changed from `(context.Context, string, string, string, RestorePoint, *RestorePointsBeginCreateOptions)` to `(context.Context, string, string, string, RestorePoint, *RestorePointsClientBeginCreateOptions)` +- Function `*RestorePointsClient.BeginCreate` return value(s) have been changed from `(RestorePointsCreatePollerResponse, error)` to `(RestorePointsClientCreatePollerResponse, error)` +- Function `*DiskRestorePointClient.Get` parameter(s) have been changed from `(context.Context, string, string, string, string, *DiskRestorePointGetOptions)` to `(context.Context, string, string, string, string, *DiskRestorePointClientGetOptions)` +- Function `*DiskRestorePointClient.Get` return value(s) have been changed from `(DiskRestorePointGetResponse, error)` to `(DiskRestorePointClientGetResponse, error)` +- Function `*GalleryApplicationsClient.ListByGallery` parameter(s) have been changed from `(string, string, *GalleryApplicationsListByGalleryOptions)` to `(string, string, *GalleryApplicationsClientListByGalleryOptions)` +- Function `*GalleryApplicationsClient.ListByGallery` return value(s) have been changed from `(*GalleryApplicationsListByGalleryPager)` to `(*GalleryApplicationsClientListByGalleryPager)` +- Function `*DedicatedHostsClient.BeginDelete` parameter(s) have been changed from `(context.Context, string, string, string, *DedicatedHostsBeginDeleteOptions)` to `(context.Context, string, string, string, *DedicatedHostsClientBeginDeleteOptions)` +- Function `*DedicatedHostsClient.BeginDelete` return value(s) have been changed from `(DedicatedHostsDeletePollerResponse, error)` to `(DedicatedHostsClientDeletePollerResponse, error)` +- Function `*VirtualMachineImagesEdgeZoneClient.ListOffers` parameter(s) have been changed from `(context.Context, string, string, string, *VirtualMachineImagesEdgeZoneListOffersOptions)` to `(context.Context, string, string, string, *VirtualMachineImagesEdgeZoneClientListOffersOptions)` +- Function `*VirtualMachineImagesEdgeZoneClient.ListOffers` return value(s) have been changed from `(VirtualMachineImagesEdgeZoneListOffersResponse, error)` to `(VirtualMachineImagesEdgeZoneClientListOffersResponse, error)` +- Function `*CloudServicesUpdateDomainClient.ListUpdateDomains` parameter(s) have been changed from `(string, string, *CloudServicesUpdateDomainListUpdateDomainsOptions)` to `(string, string, *CloudServicesUpdateDomainClientListUpdateDomainsOptions)` +- Function `*CloudServicesUpdateDomainClient.ListUpdateDomains` return value(s) have been changed from `(*CloudServicesUpdateDomainListUpdateDomainsPager)` to `(*CloudServicesUpdateDomainClientListUpdateDomainsPager)` +- Function `*ResourceSKUsClient.List` parameter(s) have been changed from `(*ResourceSKUsListOptions)` to `(*ResourceSKUsClientListOptions)` +- Function `*ResourceSKUsClient.List` return value(s) have been changed from `(*ResourceSKUsListPager)` to `(*ResourceSKUsClientListPager)` +- Function `*VirtualMachineScaleSetVMsClient.GetInstanceView` parameter(s) have been changed from `(context.Context, string, string, string, *VirtualMachineScaleSetVMsGetInstanceViewOptions)` to `(context.Context, string, string, string, *VirtualMachineScaleSetVMsClientGetInstanceViewOptions)` +- Function `*VirtualMachineScaleSetVMsClient.GetInstanceView` return value(s) have been changed from `(VirtualMachineScaleSetVMsGetInstanceViewResponse, error)` to `(VirtualMachineScaleSetVMsClientGetInstanceViewResponse, error)` +- Function `*VirtualMachineScaleSetsClient.ListSKUs` parameter(s) have been changed from `(string, string, *VirtualMachineScaleSetsListSKUsOptions)` to `(string, string, *VirtualMachineScaleSetsClientListSKUsOptions)` +- Function `*VirtualMachineScaleSetsClient.ListSKUs` return value(s) have been changed from `(*VirtualMachineScaleSetsListSKUsPager)` to `(*VirtualMachineScaleSetsClientListSKUsPager)` +- Function `*SnapshotsClient.ListByResourceGroup` parameter(s) have been changed from `(string, *SnapshotsListByResourceGroupOptions)` to `(string, *SnapshotsClientListByResourceGroupOptions)` +- Function `*SnapshotsClient.ListByResourceGroup` return value(s) have been changed from `(*SnapshotsListByResourceGroupPager)` to `(*SnapshotsClientListByResourceGroupPager)` +- Function `*VirtualMachineScaleSetsClient.BeginUpdateInstances` parameter(s) have been changed from `(context.Context, string, string, VirtualMachineScaleSetVMInstanceRequiredIDs, *VirtualMachineScaleSetsBeginUpdateInstancesOptions)` to `(context.Context, string, string, VirtualMachineScaleSetVMInstanceRequiredIDs, *VirtualMachineScaleSetsClientBeginUpdateInstancesOptions)` +- Function `*VirtualMachineScaleSetsClient.BeginUpdateInstances` return value(s) have been changed from `(VirtualMachineScaleSetsUpdateInstancesPollerResponse, error)` to `(VirtualMachineScaleSetsClientUpdateInstancesPollerResponse, error)` +- Function `*VirtualMachineScaleSetsClient.BeginPerformMaintenance` parameter(s) have been changed from `(context.Context, string, string, *VirtualMachineScaleSetsBeginPerformMaintenanceOptions)` to `(context.Context, string, string, *VirtualMachineScaleSetsClientBeginPerformMaintenanceOptions)` +- Function `*VirtualMachineScaleSetsClient.BeginPerformMaintenance` return value(s) have been changed from `(VirtualMachineScaleSetsPerformMaintenancePollerResponse, error)` to `(VirtualMachineScaleSetsClientPerformMaintenancePollerResponse, error)` +- Function `*DiskAccessesClient.Get` parameter(s) have been changed from `(context.Context, string, string, *DiskAccessesGetOptions)` to `(context.Context, string, string, *DiskAccessesClientGetOptions)` +- Function `*DiskAccessesClient.Get` return value(s) have been changed from `(DiskAccessesGetResponse, error)` to `(DiskAccessesClientGetResponse, error)` +- Function `*CapacityReservationGroupsClient.Update` parameter(s) have been changed from `(context.Context, string, string, CapacityReservationGroupUpdate, *CapacityReservationGroupsUpdateOptions)` to `(context.Context, string, string, CapacityReservationGroupUpdate, *CapacityReservationGroupsClientUpdateOptions)` +- Function `*CapacityReservationGroupsClient.Update` return value(s) have been changed from `(CapacityReservationGroupsUpdateResponse, error)` to `(CapacityReservationGroupsClientUpdateResponse, error)` +- Function `*CloudServicesClient.GetInstanceView` parameter(s) have been changed from `(context.Context, string, string, *CloudServicesGetInstanceViewOptions)` to `(context.Context, string, string, *CloudServicesClientGetInstanceViewOptions)` +- Function `*CloudServicesClient.GetInstanceView` return value(s) have been changed from `(CloudServicesGetInstanceViewResponse, error)` to `(CloudServicesClientGetInstanceViewResponse, error)` +- Function `*VirtualMachinesClient.BeginRedeploy` parameter(s) have been changed from `(context.Context, string, string, *VirtualMachinesBeginRedeployOptions)` to `(context.Context, string, string, *VirtualMachinesClientBeginRedeployOptions)` +- Function `*VirtualMachinesClient.BeginRedeploy` return value(s) have been changed from `(VirtualMachinesRedeployPollerResponse, error)` to `(VirtualMachinesClientRedeployPollerResponse, error)` +- Function `*SSHPublicKeysClient.ListByResourceGroup` parameter(s) have been changed from `(string, *SSHPublicKeysListByResourceGroupOptions)` to `(string, *SSHPublicKeysClientListByResourceGroupOptions)` +- Function `*SSHPublicKeysClient.ListByResourceGroup` return value(s) have been changed from `(*SSHPublicKeysListByResourceGroupPager)` to `(*SSHPublicKeysClientListByResourceGroupPager)` +- Function `*VirtualMachineImagesClient.Get` parameter(s) have been changed from `(context.Context, string, string, string, string, string, *VirtualMachineImagesGetOptions)` to `(context.Context, string, string, string, string, string, *VirtualMachineImagesClientGetOptions)` +- Function `*VirtualMachineImagesClient.Get` return value(s) have been changed from `(VirtualMachineImagesGetResponse, error)` to `(VirtualMachineImagesClientGetResponse, error)` +- Function `*VirtualMachineScaleSetsClient.BeginDeleteInstances` parameter(s) have been changed from `(context.Context, string, string, VirtualMachineScaleSetVMInstanceRequiredIDs, *VirtualMachineScaleSetsBeginDeleteInstancesOptions)` to `(context.Context, string, string, VirtualMachineScaleSetVMInstanceRequiredIDs, *VirtualMachineScaleSetsClientBeginDeleteInstancesOptions)` +- Function `*VirtualMachineScaleSetsClient.BeginDeleteInstances` return value(s) have been changed from `(VirtualMachineScaleSetsDeleteInstancesPollerResponse, error)` to `(VirtualMachineScaleSetsClientDeleteInstancesPollerResponse, error)` +- Function `*VirtualMachineScaleSetsClient.BeginStart` parameter(s) have been changed from `(context.Context, string, string, *VirtualMachineScaleSetsBeginStartOptions)` to `(context.Context, string, string, *VirtualMachineScaleSetsClientBeginStartOptions)` +- Function `*VirtualMachineScaleSetsClient.BeginStart` return value(s) have been changed from `(VirtualMachineScaleSetsStartPollerResponse, error)` to `(VirtualMachineScaleSetsClientStartPollerResponse, error)` +- Function `*GalleryImagesClient.BeginUpdate` parameter(s) have been changed from `(context.Context, string, string, string, GalleryImageUpdate, *GalleryImagesBeginUpdateOptions)` to `(context.Context, string, string, string, GalleryImageUpdate, *GalleryImagesClientBeginUpdateOptions)` +- Function `*GalleryImagesClient.BeginUpdate` return value(s) have been changed from `(GalleryImagesUpdatePollerResponse, error)` to `(GalleryImagesClientUpdatePollerResponse, error)` +- Function `*DiskRestorePointClient.BeginRevokeAccess` parameter(s) have been changed from `(context.Context, string, string, string, string, *DiskRestorePointBeginRevokeAccessOptions)` to `(context.Context, string, string, string, string, *DiskRestorePointClientBeginRevokeAccessOptions)` +- Function `*DiskRestorePointClient.BeginRevokeAccess` return value(s) have been changed from `(DiskRestorePointRevokeAccessPollerResponse, error)` to `(DiskRestorePointClientRevokeAccessPollerResponse, error)` +- Function `*VirtualMachineExtensionImagesClient.ListVersions` parameter(s) have been changed from `(context.Context, string, string, string, *VirtualMachineExtensionImagesListVersionsOptions)` to `(context.Context, string, string, string, *VirtualMachineExtensionImagesClientListVersionsOptions)` +- Function `*VirtualMachineExtensionImagesClient.ListVersions` return value(s) have been changed from `(VirtualMachineExtensionImagesListVersionsResponse, error)` to `(VirtualMachineExtensionImagesClientListVersionsResponse, error)` +- Function `*DiskEncryptionSetsClient.Get` parameter(s) have been changed from `(context.Context, string, string, *DiskEncryptionSetsGetOptions)` to `(context.Context, string, string, *DiskEncryptionSetsClientGetOptions)` +- Function `*DiskEncryptionSetsClient.Get` return value(s) have been changed from `(DiskEncryptionSetsGetResponse, error)` to `(DiskEncryptionSetsClientGetResponse, error)` +- Function `*GalleriesClient.ListByResourceGroup` parameter(s) have been changed from `(string, *GalleriesListByResourceGroupOptions)` to `(string, *GalleriesClientListByResourceGroupOptions)` +- Function `*GalleriesClient.ListByResourceGroup` return value(s) have been changed from `(*GalleriesListByResourceGroupPager)` to `(*GalleriesClientListByResourceGroupPager)` +- Function `*VirtualMachineRunCommandsClient.Get` parameter(s) have been changed from `(context.Context, string, string, *VirtualMachineRunCommandsGetOptions)` to `(context.Context, string, string, *VirtualMachineRunCommandsClientGetOptions)` +- Function `*VirtualMachineRunCommandsClient.Get` return value(s) have been changed from `(VirtualMachineRunCommandsGetResponse, error)` to `(VirtualMachineRunCommandsClientGetResponse, error)` +- Function `*GalleriesClient.BeginDelete` parameter(s) have been changed from `(context.Context, string, string, *GalleriesBeginDeleteOptions)` to `(context.Context, string, string, *GalleriesClientBeginDeleteOptions)` +- Function `*GalleriesClient.BeginDelete` return value(s) have been changed from `(GalleriesDeletePollerResponse, error)` to `(GalleriesClientDeletePollerResponse, error)` +- Function `*SnapshotsClient.BeginCreateOrUpdate` parameter(s) have been changed from `(context.Context, string, string, Snapshot, *SnapshotsBeginCreateOrUpdateOptions)` to `(context.Context, string, string, Snapshot, *SnapshotsClientBeginCreateOrUpdateOptions)` +- Function `*SnapshotsClient.BeginCreateOrUpdate` return value(s) have been changed from `(SnapshotsCreateOrUpdatePollerResponse, error)` to `(SnapshotsClientCreateOrUpdatePollerResponse, error)` +- Function `*CapacityReservationsClient.ListByCapacityReservationGroup` parameter(s) have been changed from `(string, string, *CapacityReservationsListByCapacityReservationGroupOptions)` to `(string, string, *CapacityReservationsClientListByCapacityReservationGroupOptions)` +- Function `*CapacityReservationsClient.ListByCapacityReservationGroup` return value(s) have been changed from `(*CapacityReservationsListByCapacityReservationGroupPager)` to `(*CapacityReservationsClientListByCapacityReservationGroupPager)` +- Function `*VirtualMachinesClient.BeginReimage` parameter(s) have been changed from `(context.Context, string, string, *VirtualMachinesBeginReimageOptions)` to `(context.Context, string, string, *VirtualMachinesClientBeginReimageOptions)` +- Function `*VirtualMachinesClient.BeginReimage` return value(s) have been changed from `(VirtualMachinesReimagePollerResponse, error)` to `(VirtualMachinesClientReimagePollerResponse, error)` +- Function `*VirtualMachineExtensionImagesClient.Get` parameter(s) have been changed from `(context.Context, string, string, string, string, *VirtualMachineExtensionImagesGetOptions)` to `(context.Context, string, string, string, string, *VirtualMachineExtensionImagesClientGetOptions)` +- Function `*VirtualMachineExtensionImagesClient.Get` return value(s) have been changed from `(VirtualMachineExtensionImagesGetResponse, error)` to `(VirtualMachineExtensionImagesClientGetResponse, error)` +- Function `*CloudServiceRoleInstancesClient.BeginReimage` parameter(s) have been changed from `(context.Context, string, string, string, *CloudServiceRoleInstancesBeginReimageOptions)` to `(context.Context, string, string, string, *CloudServiceRoleInstancesClientBeginReimageOptions)` +- Function `*CloudServiceRoleInstancesClient.BeginReimage` return value(s) have been changed from `(CloudServiceRoleInstancesReimagePollerResponse, error)` to `(CloudServiceRoleInstancesClientReimagePollerResponse, error)` +- Function `*ImagesClient.BeginDelete` parameter(s) have been changed from `(context.Context, string, string, *ImagesBeginDeleteOptions)` to `(context.Context, string, string, *ImagesClientBeginDeleteOptions)` +- Function `*ImagesClient.BeginDelete` return value(s) have been changed from `(ImagesDeletePollerResponse, error)` to `(ImagesClientDeletePollerResponse, error)` +- Function `*GalleryImagesClient.ListByGallery` parameter(s) have been changed from `(string, string, *GalleryImagesListByGalleryOptions)` to `(string, string, *GalleryImagesClientListByGalleryOptions)` +- Function `*GalleryImagesClient.ListByGallery` return value(s) have been changed from `(*GalleryImagesListByGalleryPager)` to `(*GalleryImagesClientListByGalleryPager)` +- Function `*VirtualMachineScaleSetsClient.BeginCreateOrUpdate` parameter(s) have been changed from `(context.Context, string, string, VirtualMachineScaleSet, *VirtualMachineScaleSetsBeginCreateOrUpdateOptions)` to `(context.Context, string, string, VirtualMachineScaleSet, *VirtualMachineScaleSetsClientBeginCreateOrUpdateOptions)` +- Function `*VirtualMachineScaleSetsClient.BeginCreateOrUpdate` return value(s) have been changed from `(VirtualMachineScaleSetsCreateOrUpdatePollerResponse, error)` to `(VirtualMachineScaleSetsClientCreateOrUpdatePollerResponse, error)` +- Function `*DiskEncryptionSetsClient.List` parameter(s) have been changed from `(*DiskEncryptionSetsListOptions)` to `(*DiskEncryptionSetsClientListOptions)` +- Function `*DiskEncryptionSetsClient.List` return value(s) have been changed from `(*DiskEncryptionSetsListPager)` to `(*DiskEncryptionSetsClientListPager)` +- Function `*VirtualMachineScaleSetsClient.BeginUpdate` parameter(s) have been changed from `(context.Context, string, string, VirtualMachineScaleSetUpdate, *VirtualMachineScaleSetsBeginUpdateOptions)` to `(context.Context, string, string, VirtualMachineScaleSetUpdate, *VirtualMachineScaleSetsClientBeginUpdateOptions)` +- Function `*VirtualMachineScaleSetsClient.BeginUpdate` return value(s) have been changed from `(VirtualMachineScaleSetsUpdatePollerResponse, error)` to `(VirtualMachineScaleSetsClientUpdatePollerResponse, error)` +- Function `*VirtualMachineScaleSetsClient.BeginReimageAll` parameter(s) have been changed from `(context.Context, string, string, *VirtualMachineScaleSetsBeginReimageAllOptions)` to `(context.Context, string, string, *VirtualMachineScaleSetsClientBeginReimageAllOptions)` +- Function `*VirtualMachineScaleSetsClient.BeginReimageAll` return value(s) have been changed from `(VirtualMachineScaleSetsReimageAllPollerResponse, error)` to `(VirtualMachineScaleSetsClientReimageAllPollerResponse, error)` +- Function `*VirtualMachineScaleSetVMsClient.BeginStart` parameter(s) have been changed from `(context.Context, string, string, string, *VirtualMachineScaleSetVMsBeginStartOptions)` to `(context.Context, string, string, string, *VirtualMachineScaleSetVMsClientBeginStartOptions)` +- Function `*VirtualMachineScaleSetVMsClient.BeginStart` return value(s) have been changed from `(VirtualMachineScaleSetVMsStartPollerResponse, error)` to `(VirtualMachineScaleSetVMsClientStartPollerResponse, error)` +- Function `*CloudServicesClient.BeginDelete` parameter(s) have been changed from `(context.Context, string, string, *CloudServicesBeginDeleteOptions)` to `(context.Context, string, string, *CloudServicesClientBeginDeleteOptions)` +- Function `*CloudServicesClient.BeginDelete` return value(s) have been changed from `(CloudServicesDeletePollerResponse, error)` to `(CloudServicesClientDeletePollerResponse, error)` +- Function `*CommunityGalleryImagesClient.Get` parameter(s) have been changed from `(context.Context, string, string, string, *CommunityGalleryImagesGetOptions)` to `(context.Context, string, string, string, *CommunityGalleryImagesClientGetOptions)` +- Function `*CommunityGalleryImagesClient.Get` return value(s) have been changed from `(CommunityGalleryImagesGetResponse, error)` to `(CommunityGalleryImagesClientGetResponse, error)` +- Function `*VirtualMachineScaleSetVMExtensionsClient.Get` parameter(s) have been changed from `(context.Context, string, string, string, string, *VirtualMachineScaleSetVMExtensionsGetOptions)` to `(context.Context, string, string, string, string, *VirtualMachineScaleSetVMExtensionsClientGetOptions)` +- Function `*VirtualMachineScaleSetVMExtensionsClient.Get` return value(s) have been changed from `(VirtualMachineScaleSetVMExtensionsGetResponse, error)` to `(VirtualMachineScaleSetVMExtensionsClientGetResponse, error)` +- Function `*VirtualMachineScaleSetVMsClient.SimulateEviction` parameter(s) have been changed from `(context.Context, string, string, string, *VirtualMachineScaleSetVMsSimulateEvictionOptions)` to `(context.Context, string, string, string, *VirtualMachineScaleSetVMsClientSimulateEvictionOptions)` +- Function `*VirtualMachineScaleSetVMsClient.SimulateEviction` return value(s) have been changed from `(VirtualMachineScaleSetVMsSimulateEvictionResponse, error)` to `(VirtualMachineScaleSetVMsClientSimulateEvictionResponse, error)` +- Function `*ProximityPlacementGroupsClient.CreateOrUpdate` parameter(s) have been changed from `(context.Context, string, string, ProximityPlacementGroup, *ProximityPlacementGroupsCreateOrUpdateOptions)` to `(context.Context, string, string, ProximityPlacementGroup, *ProximityPlacementGroupsClientCreateOrUpdateOptions)` +- Function `*ProximityPlacementGroupsClient.CreateOrUpdate` return value(s) have been changed from `(ProximityPlacementGroupsCreateOrUpdateResponse, error)` to `(ProximityPlacementGroupsClientCreateOrUpdateResponse, error)` +- Function `*VirtualMachineScaleSetsClient.GetInstanceView` parameter(s) have been changed from `(context.Context, string, string, *VirtualMachineScaleSetsGetInstanceViewOptions)` to `(context.Context, string, string, *VirtualMachineScaleSetsClientGetInstanceViewOptions)` +- Function `*VirtualMachineScaleSetsClient.GetInstanceView` return value(s) have been changed from `(VirtualMachineScaleSetsGetInstanceViewResponse, error)` to `(VirtualMachineScaleSetsClientGetInstanceViewResponse, error)` +- Function `*ProximityPlacementGroupsClient.ListBySubscription` parameter(s) have been changed from `(*ProximityPlacementGroupsListBySubscriptionOptions)` to `(*ProximityPlacementGroupsClientListBySubscriptionOptions)` +- Function `*ProximityPlacementGroupsClient.ListBySubscription` return value(s) have been changed from `(*ProximityPlacementGroupsListBySubscriptionPager)` to `(*ProximityPlacementGroupsClientListBySubscriptionPager)` +- Function `*DiskEncryptionSetsClient.BeginCreateOrUpdate` parameter(s) have been changed from `(context.Context, string, string, DiskEncryptionSet, *DiskEncryptionSetsBeginCreateOrUpdateOptions)` to `(context.Context, string, string, DiskEncryptionSet, *DiskEncryptionSetsClientBeginCreateOrUpdateOptions)` +- Function `*DiskEncryptionSetsClient.BeginCreateOrUpdate` return value(s) have been changed from `(DiskEncryptionSetsCreateOrUpdatePollerResponse, error)` to `(DiskEncryptionSetsClientCreateOrUpdatePollerResponse, error)` +- Function `*CloudServicesUpdateDomainClient.BeginWalkUpdateDomain` parameter(s) have been changed from `(context.Context, string, string, int32, *CloudServicesUpdateDomainBeginWalkUpdateDomainOptions)` to `(context.Context, string, string, int32, *CloudServicesUpdateDomainClientBeginWalkUpdateDomainOptions)` +- Function `*CloudServicesUpdateDomainClient.BeginWalkUpdateDomain` return value(s) have been changed from `(CloudServicesUpdateDomainWalkUpdateDomainPollerResponse, error)` to `(CloudServicesUpdateDomainClientWalkUpdateDomainPollerResponse, error)` +- Function `*VirtualMachinesClient.BeginDelete` parameter(s) have been changed from `(context.Context, string, string, *VirtualMachinesBeginDeleteOptions)` to `(context.Context, string, string, *VirtualMachinesClientBeginDeleteOptions)` +- Function `*VirtualMachinesClient.BeginDelete` return value(s) have been changed from `(VirtualMachinesDeletePollerResponse, error)` to `(VirtualMachinesClientDeletePollerResponse, error)` +- Function `*DiskAccessesClient.BeginUpdateAPrivateEndpointConnection` parameter(s) have been changed from `(context.Context, string, string, string, PrivateEndpointConnection, *DiskAccessesBeginUpdateAPrivateEndpointConnectionOptions)` to `(context.Context, string, string, string, PrivateEndpointConnection, *DiskAccessesClientBeginUpdateAPrivateEndpointConnectionOptions)` +- Function `*DiskAccessesClient.BeginUpdateAPrivateEndpointConnection` return value(s) have been changed from `(DiskAccessesUpdateAPrivateEndpointConnectionPollerResponse, error)` to `(DiskAccessesClientUpdateAPrivateEndpointConnectionPollerResponse, error)` +- Function `*CloudServiceRolesClient.List` parameter(s) have been changed from `(string, string, *CloudServiceRolesListOptions)` to `(string, string, *CloudServiceRolesClientListOptions)` +- Function `*CloudServiceRolesClient.List` return value(s) have been changed from `(*CloudServiceRolesListPager)` to `(*CloudServiceRolesClientListPager)` +- Function `*CloudServicesClient.BeginDeleteInstances` parameter(s) have been changed from `(context.Context, string, string, *CloudServicesBeginDeleteInstancesOptions)` to `(context.Context, string, string, *CloudServicesClientBeginDeleteInstancesOptions)` +- Function `*CloudServicesClient.BeginDeleteInstances` return value(s) have been changed from `(CloudServicesDeleteInstancesPollerResponse, error)` to `(CloudServicesClientDeleteInstancesPollerResponse, error)` +- Function `*VirtualMachineScaleSetVMExtensionsClient.BeginUpdate` parameter(s) have been changed from `(context.Context, string, string, string, string, VirtualMachineScaleSetVMExtensionUpdate, *VirtualMachineScaleSetVMExtensionsBeginUpdateOptions)` to `(context.Context, string, string, string, string, VirtualMachineScaleSetVMExtensionUpdate, *VirtualMachineScaleSetVMExtensionsClientBeginUpdateOptions)` +- Function `*VirtualMachineScaleSetVMExtensionsClient.BeginUpdate` return value(s) have been changed from `(VirtualMachineScaleSetVMExtensionsUpdatePollerResponse, error)` to `(VirtualMachineScaleSetVMExtensionsClientUpdatePollerResponse, error)` +- Function `*CommunityGalleryImageVersionsClient.Get` parameter(s) have been changed from `(context.Context, string, string, string, string, *CommunityGalleryImageVersionsGetOptions)` to `(context.Context, string, string, string, string, *CommunityGalleryImageVersionsClientGetOptions)` +- Function `*CommunityGalleryImageVersionsClient.Get` return value(s) have been changed from `(CommunityGalleryImageVersionsGetResponse, error)` to `(CommunityGalleryImageVersionsClientGetResponse, error)` +- Function `*GalleryApplicationVersionsClient.BeginCreateOrUpdate` parameter(s) have been changed from `(context.Context, string, string, string, string, GalleryApplicationVersion, *GalleryApplicationVersionsBeginCreateOrUpdateOptions)` to `(context.Context, string, string, string, string, GalleryApplicationVersion, *GalleryApplicationVersionsClientBeginCreateOrUpdateOptions)` +- Function `*GalleryApplicationVersionsClient.BeginCreateOrUpdate` return value(s) have been changed from `(GalleryApplicationVersionsCreateOrUpdatePollerResponse, error)` to `(GalleryApplicationVersionsClientCreateOrUpdatePollerResponse, error)` +- Function `*DisksClient.BeginRevokeAccess` parameter(s) have been changed from `(context.Context, string, string, *DisksBeginRevokeAccessOptions)` to `(context.Context, string, string, *DisksClientBeginRevokeAccessOptions)` +- Function `*DisksClient.BeginRevokeAccess` return value(s) have been changed from `(DisksRevokeAccessPollerResponse, error)` to `(DisksClientRevokeAccessPollerResponse, error)` +- Function `*CloudServiceOperatingSystemsClient.GetOSFamily` parameter(s) have been changed from `(context.Context, string, string, *CloudServiceOperatingSystemsGetOSFamilyOptions)` to `(context.Context, string, string, *CloudServiceOperatingSystemsClientGetOSFamilyOptions)` +- Function `*CloudServiceOperatingSystemsClient.GetOSFamily` return value(s) have been changed from `(CloudServiceOperatingSystemsGetOSFamilyResponse, error)` to `(CloudServiceOperatingSystemsClientGetOSFamilyResponse, error)` +- Function `*SSHPublicKeysClient.Create` parameter(s) have been changed from `(context.Context, string, string, SSHPublicKeyResource, *SSHPublicKeysCreateOptions)` to `(context.Context, string, string, SSHPublicKeyResource, *SSHPublicKeysClientCreateOptions)` +- Function `*SSHPublicKeysClient.Create` return value(s) have been changed from `(SSHPublicKeysCreateResponse, error)` to `(SSHPublicKeysClientCreateResponse, error)` +- Function `*DedicatedHostGroupsClient.Get` parameter(s) have been changed from `(context.Context, string, string, *DedicatedHostGroupsGetOptions)` to `(context.Context, string, string, *DedicatedHostGroupsClientGetOptions)` +- Function `*DedicatedHostGroupsClient.Get` return value(s) have been changed from `(DedicatedHostGroupsGetResponse, error)` to `(DedicatedHostGroupsClientGetResponse, error)` +- Function `*GalleryApplicationVersionsClient.BeginUpdate` parameter(s) have been changed from `(context.Context, string, string, string, string, GalleryApplicationVersionUpdate, *GalleryApplicationVersionsBeginUpdateOptions)` to `(context.Context, string, string, string, string, GalleryApplicationVersionUpdate, *GalleryApplicationVersionsClientBeginUpdateOptions)` +- Function `*GalleryApplicationVersionsClient.BeginUpdate` return value(s) have been changed from `(GalleryApplicationVersionsUpdatePollerResponse, error)` to `(GalleryApplicationVersionsClientUpdatePollerResponse, error)` +- Function `*VirtualMachineScaleSetVMsClient.BeginPerformMaintenance` parameter(s) have been changed from `(context.Context, string, string, string, *VirtualMachineScaleSetVMsBeginPerformMaintenanceOptions)` to `(context.Context, string, string, string, *VirtualMachineScaleSetVMsClientBeginPerformMaintenanceOptions)` +- Function `*VirtualMachineScaleSetVMsClient.BeginPerformMaintenance` return value(s) have been changed from `(VirtualMachineScaleSetVMsPerformMaintenancePollerResponse, error)` to `(VirtualMachineScaleSetVMsClientPerformMaintenancePollerResponse, error)` +- Function `*DedicatedHostsClient.ListByHostGroup` parameter(s) have been changed from `(string, string, *DedicatedHostsListByHostGroupOptions)` to `(string, string, *DedicatedHostsClientListByHostGroupOptions)` +- Function `*DedicatedHostsClient.ListByHostGroup` return value(s) have been changed from `(*DedicatedHostsListByHostGroupPager)` to `(*DedicatedHostsClientListByHostGroupPager)` +- Function `*VirtualMachineScaleSetRollingUpgradesClient.GetLatest` parameter(s) have been changed from `(context.Context, string, string, *VirtualMachineScaleSetRollingUpgradesGetLatestOptions)` to `(context.Context, string, string, *VirtualMachineScaleSetRollingUpgradesClientGetLatestOptions)` +- Function `*VirtualMachineScaleSetRollingUpgradesClient.GetLatest` return value(s) have been changed from `(VirtualMachineScaleSetRollingUpgradesGetLatestResponse, error)` to `(VirtualMachineScaleSetRollingUpgradesClientGetLatestResponse, error)` +- Function `*VirtualMachineScaleSetRollingUpgradesClient.BeginStartOSUpgrade` parameter(s) have been changed from `(context.Context, string, string, *VirtualMachineScaleSetRollingUpgradesBeginStartOSUpgradeOptions)` to `(context.Context, string, string, *VirtualMachineScaleSetRollingUpgradesClientBeginStartOSUpgradeOptions)` +- Function `*VirtualMachineScaleSetRollingUpgradesClient.BeginStartOSUpgrade` return value(s) have been changed from `(VirtualMachineScaleSetRollingUpgradesStartOSUpgradePollerResponse, error)` to `(VirtualMachineScaleSetRollingUpgradesClientStartOSUpgradePollerResponse, error)` +- Function `*VirtualMachineScaleSetVMsClient.RetrieveBootDiagnosticsData` parameter(s) have been changed from `(context.Context, string, string, string, *VirtualMachineScaleSetVMsRetrieveBootDiagnosticsDataOptions)` to `(context.Context, string, string, string, *VirtualMachineScaleSetVMsClientRetrieveBootDiagnosticsDataOptions)` +- Function `*VirtualMachineScaleSetVMsClient.RetrieveBootDiagnosticsData` return value(s) have been changed from `(VirtualMachineScaleSetVMsRetrieveBootDiagnosticsDataResponse, error)` to `(VirtualMachineScaleSetVMsClientRetrieveBootDiagnosticsDataResponse, error)` +- Function `*UsageClient.List` parameter(s) have been changed from `(string, *UsageListOptions)` to `(string, *UsageClientListOptions)` +- Function `*UsageClient.List` return value(s) have been changed from `(*UsageListPager)` to `(*UsageClientListPager)` +- Function `*VirtualMachineImagesClient.ListPublishers` parameter(s) have been changed from `(context.Context, string, *VirtualMachineImagesListPublishersOptions)` to `(context.Context, string, *VirtualMachineImagesClientListPublishersOptions)` +- Function `*VirtualMachineImagesClient.ListPublishers` return value(s) have been changed from `(VirtualMachineImagesListPublishersResponse, error)` to `(VirtualMachineImagesClientListPublishersResponse, error)` +- Function `*DisksClient.BeginUpdate` parameter(s) have been changed from `(context.Context, string, string, DiskUpdate, *DisksBeginUpdateOptions)` to `(context.Context, string, string, DiskUpdate, *DisksClientBeginUpdateOptions)` +- Function `*DisksClient.BeginUpdate` return value(s) have been changed from `(DisksUpdatePollerResponse, error)` to `(DisksClientUpdatePollerResponse, error)` +- Function `*CloudServiceRoleInstancesClient.BeginRestart` parameter(s) have been changed from `(context.Context, string, string, string, *CloudServiceRoleInstancesBeginRestartOptions)` to `(context.Context, string, string, string, *CloudServiceRoleInstancesClientBeginRestartOptions)` +- Function `*CloudServiceRoleInstancesClient.BeginRestart` return value(s) have been changed from `(CloudServiceRoleInstancesRestartPollerResponse, error)` to `(CloudServiceRoleInstancesClientRestartPollerResponse, error)` +- Function `*DiskEncryptionSetsClient.ListByResourceGroup` parameter(s) have been changed from `(string, *DiskEncryptionSetsListByResourceGroupOptions)` to `(string, *DiskEncryptionSetsClientListByResourceGroupOptions)` +- Function `*DiskEncryptionSetsClient.ListByResourceGroup` return value(s) have been changed from `(*DiskEncryptionSetsListByResourceGroupPager)` to `(*DiskEncryptionSetsClientListByResourceGroupPager)` +- Function `*VirtualMachineScaleSetsClient.BeginRestart` parameter(s) have been changed from `(context.Context, string, string, *VirtualMachineScaleSetsBeginRestartOptions)` to `(context.Context, string, string, *VirtualMachineScaleSetsClientBeginRestartOptions)` +- Function `*VirtualMachineScaleSetsClient.BeginRestart` return value(s) have been changed from `(VirtualMachineScaleSetsRestartPollerResponse, error)` to `(VirtualMachineScaleSetsClientRestartPollerResponse, error)` +- Function `*VirtualMachinesClient.BeginCreateOrUpdate` parameter(s) have been changed from `(context.Context, string, string, VirtualMachine, *VirtualMachinesBeginCreateOrUpdateOptions)` to `(context.Context, string, string, VirtualMachine, *VirtualMachinesClientBeginCreateOrUpdateOptions)` +- Function `*VirtualMachinesClient.BeginCreateOrUpdate` return value(s) have been changed from `(VirtualMachinesCreateOrUpdatePollerResponse, error)` to `(VirtualMachinesClientCreateOrUpdatePollerResponse, error)` +- Function `*CapacityReservationGroupsClient.ListByResourceGroup` parameter(s) have been changed from `(string, *CapacityReservationGroupsListByResourceGroupOptions)` to `(string, *CapacityReservationGroupsClientListByResourceGroupOptions)` +- Function `*CapacityReservationGroupsClient.ListByResourceGroup` return value(s) have been changed from `(*CapacityReservationGroupsListByResourceGroupPager)` to `(*CapacityReservationGroupsClientListByResourceGroupPager)` +- Function `*GalleryImagesClient.Get` parameter(s) have been changed from `(context.Context, string, string, string, *GalleryImagesGetOptions)` to `(context.Context, string, string, string, *GalleryImagesClientGetOptions)` +- Function `*GalleryImagesClient.Get` return value(s) have been changed from `(GalleryImagesGetResponse, error)` to `(GalleryImagesClientGetResponse, error)` +- Function `*CloudServicesClient.List` parameter(s) have been changed from `(string, *CloudServicesListOptions)` to `(string, *CloudServicesClientListOptions)` +- Function `*CloudServicesClient.List` return value(s) have been changed from `(*CloudServicesListPager)` to `(*CloudServicesClientListPager)` +- Function `*CloudServiceRoleInstancesClient.Get` parameter(s) have been changed from `(context.Context, string, string, string, *CloudServiceRoleInstancesGetOptions)` to `(context.Context, string, string, string, *CloudServiceRoleInstancesClientGetOptions)` +- Function `*CloudServiceRoleInstancesClient.Get` return value(s) have been changed from `(CloudServiceRoleInstancesGetResponse, error)` to `(CloudServiceRoleInstancesClientGetResponse, error)` +- Function `*DiskAccessesClient.GetPrivateLinkResources` parameter(s) have been changed from `(context.Context, string, string, *DiskAccessesGetPrivateLinkResourcesOptions)` to `(context.Context, string, string, *DiskAccessesClientGetPrivateLinkResourcesOptions)` +- Function `*DiskAccessesClient.GetPrivateLinkResources` return value(s) have been changed from `(DiskAccessesGetPrivateLinkResourcesResponse, error)` to `(DiskAccessesClientGetPrivateLinkResourcesResponse, error)` +- Function `*VirtualMachineScaleSetsClient.BeginRedeploy` parameter(s) have been changed from `(context.Context, string, string, *VirtualMachineScaleSetsBeginRedeployOptions)` to `(context.Context, string, string, *VirtualMachineScaleSetsClientBeginRedeployOptions)` +- Function `*VirtualMachineScaleSetsClient.BeginRedeploy` return value(s) have been changed from `(VirtualMachineScaleSetsRedeployPollerResponse, error)` to `(VirtualMachineScaleSetsClientRedeployPollerResponse, error)` +- Function `*DedicatedHostGroupsClient.ListBySubscription` parameter(s) have been changed from `(*DedicatedHostGroupsListBySubscriptionOptions)` to `(*DedicatedHostGroupsClientListBySubscriptionOptions)` +- Function `*DedicatedHostGroupsClient.ListBySubscription` return value(s) have been changed from `(*DedicatedHostGroupsListBySubscriptionPager)` to `(*DedicatedHostGroupsClientListBySubscriptionPager)` +- Function `*ImagesClient.ListByResourceGroup` parameter(s) have been changed from `(string, *ImagesListByResourceGroupOptions)` to `(string, *ImagesClientListByResourceGroupOptions)` +- Function `*ImagesClient.ListByResourceGroup` return value(s) have been changed from `(*ImagesListByResourceGroupPager)` to `(*ImagesClientListByResourceGroupPager)` +- Function `*DiskRestorePointClient.BeginGrantAccess` parameter(s) have been changed from `(context.Context, string, string, string, string, GrantAccessData, *DiskRestorePointBeginGrantAccessOptions)` to `(context.Context, string, string, string, string, GrantAccessData, *DiskRestorePointClientBeginGrantAccessOptions)` +- Function `*DiskRestorePointClient.BeginGrantAccess` return value(s) have been changed from `(DiskRestorePointGrantAccessPollerResponse, error)` to `(DiskRestorePointClientGrantAccessPollerResponse, error)` +- Function `*VirtualMachineScaleSetsClient.BeginDeallocate` parameter(s) have been changed from `(context.Context, string, string, *VirtualMachineScaleSetsBeginDeallocateOptions)` to `(context.Context, string, string, *VirtualMachineScaleSetsClientBeginDeallocateOptions)` +- Function `*VirtualMachineScaleSetsClient.BeginDeallocate` return value(s) have been changed from `(VirtualMachineScaleSetsDeallocatePollerResponse, error)` to `(VirtualMachineScaleSetsClientDeallocatePollerResponse, error)` +- Function `*ImagesClient.Get` parameter(s) have been changed from `(context.Context, string, string, *ImagesGetOptions)` to `(context.Context, string, string, *ImagesClientGetOptions)` +- Function `*ImagesClient.Get` return value(s) have been changed from `(ImagesGetResponse, error)` to `(ImagesClientGetResponse, error)` +- Function `*DisksClient.BeginCreateOrUpdate` parameter(s) have been changed from `(context.Context, string, string, Disk, *DisksBeginCreateOrUpdateOptions)` to `(context.Context, string, string, Disk, *DisksClientBeginCreateOrUpdateOptions)` +- Function `*DisksClient.BeginCreateOrUpdate` return value(s) have been changed from `(DisksCreateOrUpdatePollerResponse, error)` to `(DisksClientCreateOrUpdatePollerResponse, error)` +- Function `*CloudServiceRoleInstancesClient.BeginRebuild` parameter(s) have been changed from `(context.Context, string, string, string, *CloudServiceRoleInstancesBeginRebuildOptions)` to `(context.Context, string, string, string, *CloudServiceRoleInstancesClientBeginRebuildOptions)` +- Function `*CloudServiceRoleInstancesClient.BeginRebuild` return value(s) have been changed from `(CloudServiceRoleInstancesRebuildPollerResponse, error)` to `(CloudServiceRoleInstancesClientRebuildPollerResponse, error)` +- Function `*CloudServicesClient.BeginStart` parameter(s) have been changed from `(context.Context, string, string, *CloudServicesBeginStartOptions)` to `(context.Context, string, string, *CloudServicesClientBeginStartOptions)` +- Function `*CloudServicesClient.BeginStart` return value(s) have been changed from `(CloudServicesStartPollerResponse, error)` to `(CloudServicesClientStartPollerResponse, error)` +- Function `*DedicatedHostGroupsClient.Delete` parameter(s) have been changed from `(context.Context, string, string, *DedicatedHostGroupsDeleteOptions)` to `(context.Context, string, string, *DedicatedHostGroupsClientDeleteOptions)` +- Function `*DedicatedHostGroupsClient.Delete` return value(s) have been changed from `(DedicatedHostGroupsDeleteResponse, error)` to `(DedicatedHostGroupsClientDeleteResponse, error)` +- Function `*DiskAccessesClient.ListPrivateEndpointConnections` parameter(s) have been changed from `(string, string, *DiskAccessesListPrivateEndpointConnectionsOptions)` to `(string, string, *DiskAccessesClientListPrivateEndpointConnectionsOptions)` +- Function `*DiskAccessesClient.ListPrivateEndpointConnections` return value(s) have been changed from `(*DiskAccessesListPrivateEndpointConnectionsPager)` to `(*DiskAccessesClientListPrivateEndpointConnectionsPager)` +- Function `*GalleryImageVersionsClient.BeginDelete` parameter(s) have been changed from `(context.Context, string, string, string, string, *GalleryImageVersionsBeginDeleteOptions)` to `(context.Context, string, string, string, string, *GalleryImageVersionsClientBeginDeleteOptions)` +- Function `*GalleryImageVersionsClient.BeginDelete` return value(s) have been changed from `(GalleryImageVersionsDeletePollerResponse, error)` to `(GalleryImageVersionsClientDeletePollerResponse, error)` +- Function `*CloudServicesClient.BeginPowerOff` parameter(s) have been changed from `(context.Context, string, string, *CloudServicesBeginPowerOffOptions)` to `(context.Context, string, string, *CloudServicesClientBeginPowerOffOptions)` +- Function `*CloudServicesClient.BeginPowerOff` return value(s) have been changed from `(CloudServicesPowerOffPollerResponse, error)` to `(CloudServicesClientPowerOffPollerResponse, error)` +- Function `*GalleryApplicationVersionsClient.BeginDelete` parameter(s) have been changed from `(context.Context, string, string, string, string, *GalleryApplicationVersionsBeginDeleteOptions)` to `(context.Context, string, string, string, string, *GalleryApplicationVersionsClientBeginDeleteOptions)` +- Function `*GalleryApplicationVersionsClient.BeginDelete` return value(s) have been changed from `(GalleryApplicationVersionsDeletePollerResponse, error)` to `(GalleryApplicationVersionsClientDeletePollerResponse, error)` +- Function `*CapacityReservationsClient.BeginCreateOrUpdate` parameter(s) have been changed from `(context.Context, string, string, string, CapacityReservation, *CapacityReservationsBeginCreateOrUpdateOptions)` to `(context.Context, string, string, string, CapacityReservation, *CapacityReservationsClientBeginCreateOrUpdateOptions)` +- Function `*CapacityReservationsClient.BeginCreateOrUpdate` return value(s) have been changed from `(CapacityReservationsCreateOrUpdatePollerResponse, error)` to `(CapacityReservationsClientCreateOrUpdatePollerResponse, error)` +- Function `*DisksClient.BeginDelete` parameter(s) have been changed from `(context.Context, string, string, *DisksBeginDeleteOptions)` to `(context.Context, string, string, *DisksClientBeginDeleteOptions)` +- Function `*DisksClient.BeginDelete` return value(s) have been changed from `(DisksDeletePollerResponse, error)` to `(DisksClientDeletePollerResponse, error)` +- Function `*AvailabilitySetsClient.Update` parameter(s) have been changed from `(context.Context, string, string, AvailabilitySetUpdate, *AvailabilitySetsUpdateOptions)` to `(context.Context, string, string, AvailabilitySetUpdate, *AvailabilitySetsClientUpdateOptions)` +- Function `*AvailabilitySetsClient.Update` return value(s) have been changed from `(AvailabilitySetsUpdateResponse, error)` to `(AvailabilitySetsClientUpdateResponse, error)` +- Function `*SharedGalleryImageVersionsClient.Get` parameter(s) have been changed from `(context.Context, string, string, string, string, *SharedGalleryImageVersionsGetOptions)` to `(context.Context, string, string, string, string, *SharedGalleryImageVersionsClientGetOptions)` +- Function `*SharedGalleryImageVersionsClient.Get` return value(s) have been changed from `(SharedGalleryImageVersionsGetResponse, error)` to `(SharedGalleryImageVersionsClientGetResponse, error)` +- Function `*VirtualMachineSizesClient.List` parameter(s) have been changed from `(context.Context, string, *VirtualMachineSizesListOptions)` to `(context.Context, string, *VirtualMachineSizesClientListOptions)` +- Function `*VirtualMachineSizesClient.List` return value(s) have been changed from `(VirtualMachineSizesListResponse, error)` to `(VirtualMachineSizesClientListResponse, error)` +- Function `*VirtualMachineScaleSetsClient.BeginDelete` parameter(s) have been changed from `(context.Context, string, string, *VirtualMachineScaleSetsBeginDeleteOptions)` to `(context.Context, string, string, *VirtualMachineScaleSetsClientBeginDeleteOptions)` +- Function `*VirtualMachineScaleSetsClient.BeginDelete` return value(s) have been changed from `(VirtualMachineScaleSetsDeletePollerResponse, error)` to `(VirtualMachineScaleSetsClientDeletePollerResponse, error)` +- Function `*VirtualMachineRunCommandsClient.List` parameter(s) have been changed from `(string, *VirtualMachineRunCommandsListOptions)` to `(string, *VirtualMachineRunCommandsClientListOptions)` +- Function `*VirtualMachineRunCommandsClient.List` return value(s) have been changed from `(*VirtualMachineRunCommandsListPager)` to `(*VirtualMachineRunCommandsClientListPager)` +- Function `*VirtualMachineRunCommandsClient.BeginCreateOrUpdate` parameter(s) have been changed from `(context.Context, string, string, string, VirtualMachineRunCommand, *VirtualMachineRunCommandsBeginCreateOrUpdateOptions)` to `(context.Context, string, string, string, VirtualMachineRunCommand, *VirtualMachineRunCommandsClientBeginCreateOrUpdateOptions)` +- Function `*VirtualMachineRunCommandsClient.BeginCreateOrUpdate` return value(s) have been changed from `(VirtualMachineRunCommandsCreateOrUpdatePollerResponse, error)` to `(VirtualMachineRunCommandsClientCreateOrUpdatePollerResponse, error)` +- Function `*SSHPublicKeysClient.Update` parameter(s) have been changed from `(context.Context, string, string, SSHPublicKeyUpdateResource, *SSHPublicKeysUpdateOptions)` to `(context.Context, string, string, SSHPublicKeyUpdateResource, *SSHPublicKeysClientUpdateOptions)` +- Function `*SSHPublicKeysClient.Update` return value(s) have been changed from `(SSHPublicKeysUpdateResponse, error)` to `(SSHPublicKeysClientUpdateResponse, error)` +- Function `*VirtualMachineScaleSetsClient.BeginSetOrchestrationServiceState` parameter(s) have been changed from `(context.Context, string, string, OrchestrationServiceStateInput, *VirtualMachineScaleSetsBeginSetOrchestrationServiceStateOptions)` to `(context.Context, string, string, OrchestrationServiceStateInput, *VirtualMachineScaleSetsClientBeginSetOrchestrationServiceStateOptions)` +- Function `*VirtualMachineScaleSetsClient.BeginSetOrchestrationServiceState` return value(s) have been changed from `(VirtualMachineScaleSetsSetOrchestrationServiceStatePollerResponse, error)` to `(VirtualMachineScaleSetsClientSetOrchestrationServiceStatePollerResponse, error)` +- Function `*VirtualMachineScaleSetVMsClient.BeginPowerOff` parameter(s) have been changed from `(context.Context, string, string, string, *VirtualMachineScaleSetVMsBeginPowerOffOptions)` to `(context.Context, string, string, string, *VirtualMachineScaleSetVMsClientBeginPowerOffOptions)` +- Function `*VirtualMachineScaleSetVMsClient.BeginPowerOff` return value(s) have been changed from `(VirtualMachineScaleSetVMsPowerOffPollerResponse, error)` to `(VirtualMachineScaleSetVMsClientPowerOffPollerResponse, error)` +- Function `*AvailabilitySetsClient.CreateOrUpdate` parameter(s) have been changed from `(context.Context, string, string, AvailabilitySet, *AvailabilitySetsCreateOrUpdateOptions)` to `(context.Context, string, string, AvailabilitySet, *AvailabilitySetsClientCreateOrUpdateOptions)` +- Function `*AvailabilitySetsClient.CreateOrUpdate` return value(s) have been changed from `(AvailabilitySetsCreateOrUpdateResponse, error)` to `(AvailabilitySetsClientCreateOrUpdateResponse, error)` +- Function `*CloudServiceRoleInstancesClient.GetRemoteDesktopFile` parameter(s) have been changed from `(context.Context, string, string, string, *CloudServiceRoleInstancesGetRemoteDesktopFileOptions)` to `(context.Context, string, string, string, *CloudServiceRoleInstancesClientGetRemoteDesktopFileOptions)` +- Function `*CloudServiceRoleInstancesClient.GetRemoteDesktopFile` return value(s) have been changed from `(CloudServiceRoleInstancesGetRemoteDesktopFileResponse, error)` to `(CloudServiceRoleInstancesClientGetRemoteDesktopFileResponse, error)` +- Function `*CloudServicesUpdateDomainClient.GetUpdateDomain` parameter(s) have been changed from `(context.Context, string, string, int32, *CloudServicesUpdateDomainGetUpdateDomainOptions)` to `(context.Context, string, string, int32, *CloudServicesUpdateDomainClientGetUpdateDomainOptions)` +- Function `*CloudServicesUpdateDomainClient.GetUpdateDomain` return value(s) have been changed from `(CloudServicesUpdateDomainGetUpdateDomainResponse, error)` to `(CloudServicesUpdateDomainClientGetUpdateDomainResponse, error)` +- Function `*RestorePointCollectionsClient.List` parameter(s) have been changed from `(string, *RestorePointCollectionsListOptions)` to `(string, *RestorePointCollectionsClientListOptions)` +- Function `*RestorePointCollectionsClient.List` return value(s) have been changed from `(*RestorePointCollectionsListPager)` to `(*RestorePointCollectionsClientListPager)` +- Function `*CloudServicesClient.BeginRebuild` parameter(s) have been changed from `(context.Context, string, string, *CloudServicesBeginRebuildOptions)` to `(context.Context, string, string, *CloudServicesClientBeginRebuildOptions)` +- Function `*CloudServicesClient.BeginRebuild` return value(s) have been changed from `(CloudServicesRebuildPollerResponse, error)` to `(CloudServicesClientRebuildPollerResponse, error)` +- Function `*VirtualMachineScaleSetExtensionsClient.BeginUpdate` parameter(s) have been changed from `(context.Context, string, string, string, VirtualMachineScaleSetExtensionUpdate, *VirtualMachineScaleSetExtensionsBeginUpdateOptions)` to `(context.Context, string, string, string, VirtualMachineScaleSetExtensionUpdate, *VirtualMachineScaleSetExtensionsClientBeginUpdateOptions)` +- Function `*VirtualMachineScaleSetExtensionsClient.BeginUpdate` return value(s) have been changed from `(VirtualMachineScaleSetExtensionsUpdatePollerResponse, error)` to `(VirtualMachineScaleSetExtensionsClientUpdatePollerResponse, error)` +- Function `*VirtualMachinesClient.RetrieveBootDiagnosticsData` parameter(s) have been changed from `(context.Context, string, string, *VirtualMachinesRetrieveBootDiagnosticsDataOptions)` to `(context.Context, string, string, *VirtualMachinesClientRetrieveBootDiagnosticsDataOptions)` +- Function `*VirtualMachinesClient.RetrieveBootDiagnosticsData` return value(s) have been changed from `(VirtualMachinesRetrieveBootDiagnosticsDataResponse, error)` to `(VirtualMachinesClientRetrieveBootDiagnosticsDataResponse, error)` +- Function `*DiskAccessesClient.BeginDelete` parameter(s) have been changed from `(context.Context, string, string, *DiskAccessesBeginDeleteOptions)` to `(context.Context, string, string, *DiskAccessesClientBeginDeleteOptions)` +- Function `*DiskAccessesClient.BeginDelete` return value(s) have been changed from `(DiskAccessesDeletePollerResponse, error)` to `(DiskAccessesClientDeletePollerResponse, error)` +- Function `*VirtualMachinesClient.BeginConvertToManagedDisks` parameter(s) have been changed from `(context.Context, string, string, *VirtualMachinesBeginConvertToManagedDisksOptions)` to `(context.Context, string, string, *VirtualMachinesClientBeginConvertToManagedDisksOptions)` +- Function `*VirtualMachinesClient.BeginConvertToManagedDisks` return value(s) have been changed from `(VirtualMachinesConvertToManagedDisksPollerResponse, error)` to `(VirtualMachinesClientConvertToManagedDisksPollerResponse, error)` +- Function `*SSHPublicKeysClient.Get` parameter(s) have been changed from `(context.Context, string, string, *SSHPublicKeysGetOptions)` to `(context.Context, string, string, *SSHPublicKeysClientGetOptions)` +- Function `*SSHPublicKeysClient.Get` return value(s) have been changed from `(SSHPublicKeysGetResponse, error)` to `(SSHPublicKeysClientGetResponse, error)` +- Function `*DisksClient.Get` parameter(s) have been changed from `(context.Context, string, string, *DisksGetOptions)` to `(context.Context, string, string, *DisksClientGetOptions)` +- Function `*DisksClient.Get` return value(s) have been changed from `(DisksGetResponse, error)` to `(DisksClientGetResponse, error)` +- Function `*VirtualMachineImagesClient.List` parameter(s) have been changed from `(context.Context, string, string, string, string, *VirtualMachineImagesListOptions)` to `(context.Context, string, string, string, string, *VirtualMachineImagesClientListOptions)` +- Function `*VirtualMachineImagesClient.List` return value(s) have been changed from `(VirtualMachineImagesListResponse, error)` to `(VirtualMachineImagesClientListResponse, error)` +- Function `*SharedGalleryImagesClient.List` parameter(s) have been changed from `(string, string, *SharedGalleryImagesListOptions)` to `(string, string, *SharedGalleryImagesClientListOptions)` +- Function `*SharedGalleryImagesClient.List` return value(s) have been changed from `(*SharedGalleryImagesListPager)` to `(*SharedGalleryImagesClientListPager)` +- Function `*VirtualMachineRunCommandsClient.BeginUpdate` parameter(s) have been changed from `(context.Context, string, string, string, VirtualMachineRunCommandUpdate, *VirtualMachineRunCommandsBeginUpdateOptions)` to `(context.Context, string, string, string, VirtualMachineRunCommandUpdate, *VirtualMachineRunCommandsClientBeginUpdateOptions)` +- Function `*VirtualMachineRunCommandsClient.BeginUpdate` return value(s) have been changed from `(VirtualMachineRunCommandsUpdatePollerResponse, error)` to `(VirtualMachineRunCommandsClientUpdatePollerResponse, error)` +- Function `*VirtualMachineScaleSetVMsClient.BeginDeallocate` parameter(s) have been changed from `(context.Context, string, string, string, *VirtualMachineScaleSetVMsBeginDeallocateOptions)` to `(context.Context, string, string, string, *VirtualMachineScaleSetVMsClientBeginDeallocateOptions)` +- Function `*VirtualMachineScaleSetVMsClient.BeginDeallocate` return value(s) have been changed from `(VirtualMachineScaleSetVMsDeallocatePollerResponse, error)` to `(VirtualMachineScaleSetVMsClientDeallocatePollerResponse, error)` +- Function `*DiskAccessesClient.List` parameter(s) have been changed from `(*DiskAccessesListOptions)` to `(*DiskAccessesClientListOptions)` +- Function `*DiskAccessesClient.List` return value(s) have been changed from `(*DiskAccessesListPager)` to `(*DiskAccessesClientListPager)` +- Function `*CloudServiceRoleInstancesClient.List` parameter(s) have been changed from `(string, string, *CloudServiceRoleInstancesListOptions)` to `(string, string, *CloudServiceRoleInstancesClientListOptions)` +- Function `*CloudServiceRoleInstancesClient.List` return value(s) have been changed from `(*CloudServiceRoleInstancesListPager)` to `(*CloudServiceRoleInstancesClientListPager)` +- Function `*RestorePointCollectionsClient.Update` parameter(s) have been changed from `(context.Context, string, string, RestorePointCollectionUpdate, *RestorePointCollectionsUpdateOptions)` to `(context.Context, string, string, RestorePointCollectionUpdate, *RestorePointCollectionsClientUpdateOptions)` +- Function `*RestorePointCollectionsClient.Update` return value(s) have been changed from `(RestorePointCollectionsUpdateResponse, error)` to `(RestorePointCollectionsClientUpdateResponse, error)` +- Function `*GalleriesClient.BeginUpdate` parameter(s) have been changed from `(context.Context, string, string, GalleryUpdate, *GalleriesBeginUpdateOptions)` to `(context.Context, string, string, GalleryUpdate, *GalleriesClientBeginUpdateOptions)` +- Function `*GalleriesClient.BeginUpdate` return value(s) have been changed from `(GalleriesUpdatePollerResponse, error)` to `(GalleriesClientUpdatePollerResponse, error)` +- Function `*GalleryImageVersionsClient.ListByGalleryImage` parameter(s) have been changed from `(string, string, string, *GalleryImageVersionsListByGalleryImageOptions)` to `(string, string, string, *GalleryImageVersionsClientListByGalleryImageOptions)` +- Function `*GalleryImageVersionsClient.ListByGalleryImage` return value(s) have been changed from `(*GalleryImageVersionsListByGalleryImagePager)` to `(*GalleryImageVersionsClientListByGalleryImagePager)` +- Function `*VirtualMachineScaleSetExtensionsClient.BeginDelete` parameter(s) have been changed from `(context.Context, string, string, string, *VirtualMachineScaleSetExtensionsBeginDeleteOptions)` to `(context.Context, string, string, string, *VirtualMachineScaleSetExtensionsClientBeginDeleteOptions)` +- Function `*VirtualMachineScaleSetExtensionsClient.BeginDelete` return value(s) have been changed from `(VirtualMachineScaleSetExtensionsDeletePollerResponse, error)` to `(VirtualMachineScaleSetExtensionsClientDeletePollerResponse, error)` +- Function `*VirtualMachineScaleSetVMsClient.BeginUpdate` parameter(s) have been changed from `(context.Context, string, string, string, VirtualMachineScaleSetVM, *VirtualMachineScaleSetVMsBeginUpdateOptions)` to `(context.Context, string, string, string, VirtualMachineScaleSetVM, *VirtualMachineScaleSetVMsClientBeginUpdateOptions)` +- Function `*VirtualMachineScaleSetVMsClient.BeginUpdate` return value(s) have been changed from `(VirtualMachineScaleSetVMsUpdatePollerResponse, error)` to `(VirtualMachineScaleSetVMsClientUpdatePollerResponse, error)` +- Function `*VirtualMachineImagesEdgeZoneClient.ListSKUs` parameter(s) have been changed from `(context.Context, string, string, string, string, *VirtualMachineImagesEdgeZoneListSKUsOptions)` to `(context.Context, string, string, string, string, *VirtualMachineImagesEdgeZoneClientListSKUsOptions)` +- Function `*VirtualMachineImagesEdgeZoneClient.ListSKUs` return value(s) have been changed from `(VirtualMachineImagesEdgeZoneListSKUsResponse, error)` to `(VirtualMachineImagesEdgeZoneClientListSKUsResponse, error)` +- Function `VirtualMachinesPerformMaintenancePollerResponse.PollUntilDone` has been removed +- Function `*CapacityReservationsUpdatePoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetVMsReimageAllPoller.Poll` has been removed +- Function `*DedicatedHostsUpdatePollerResponse.Resume` has been removed +- Function `*VirtualMachinesListPager.Err` has been removed +- Function `VirtualMachineScaleSetExtensionsCreateOrUpdatePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachinesDeletePollerResponse.Resume` has been removed +- Function `*CloudServiceRoleInstancesReimagePoller.ResumeToken` has been removed +- Function `*SnapshotsUpdatePoller.ResumeToken` has been removed +- Function `VirtualMachineScaleSetsRestartPollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachinesConvertToManagedDisksPoller.Poll` has been removed +- Function `*GalleryImageVersionsListByGalleryImagePager.PageResponse` has been removed +- Function `VirtualMachineScaleSetVMExtensionsDeletePollerResponse.PollUntilDone` has been removed +- Function `*RestorePointCollectionsListPager.PageResponse` has been removed +- Function `*DedicatedHostsDeletePollerResponse.Resume` has been removed +- Function `*ImagesListPager.Err` has been removed +- Function `*VirtualMachinesReimagePoller.Poll` has been removed +- Function `*DedicatedHostsCreateOrUpdatePoller.Done` has been removed +- Function `VirtualMachinesReimagePollerResponse.PollUntilDone` has been removed +- Function `*CloudServicesReimagePoller.FinalResponse` has been removed +- Function `*CloudServicesRebuildPoller.Poll` has been removed +- Function `*VirtualMachineScaleSetsCreateOrUpdatePoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetsRestartPollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetVMsStartPoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetsDeletePoller.Poll` has been removed +- Function `*VirtualMachinesListAllPager.PageResponse` has been removed +- Function `*VirtualMachineExtensionsCreateOrUpdatePoller.ResumeToken` has been removed +- Function `*VirtualMachineExtensionsUpdatePollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradePoller.Poll` has been removed +- Function `VirtualMachinesAssessPatchesPollerResponse.PollUntilDone` has been removed +- Function `*DiskAccessesUpdateAPrivateEndpointConnectionPoller.Done` has been removed +- Function `*VirtualMachineRunCommandsDeletePollerResponse.Resume` has been removed +- Function `ImagesUpdatePollerResponse.PollUntilDone` has been removed +- Function `*AvailabilitySetsListPager.PageResponse` has been removed +- Function `*GalleryApplicationVersionsUpdatePoller.ResumeToken` has been removed +- Function `*AvailabilitySetsListBySubscriptionPager.Err` has been removed +- Function `*VirtualMachineScaleSetsListByLocationPager.NextPage` has been removed +- Function `*CloudServicesRestartPoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetsListAllPager.NextPage` has been removed +- Function `*VirtualMachineScaleSetsDeletePoller.ResumeToken` has been removed +- Function `*GalleryImageVersionsDeletePoller.Poll` has been removed +- Function `*VirtualMachineScaleSetRollingUpgradesStartOSUpgradePollerResponse.Resume` has been removed +- Function `*GalleriesUpdatePollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetVMRunCommandsCreateOrUpdatePoller.Done` has been removed +- Function `*VirtualMachineRunCommandsUpdatePoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetVMExtensionsCreateOrUpdatePoller.Poll` has been removed +- Function `*VirtualMachineScaleSetVMExtensionsUpdatePollerResponse.Resume` has been removed +- Function `*CapacityReservationGroupsListBySubscriptionPager.PageResponse` has been removed +- Function `VirtualMachineScaleSetsStartPollerResponse.PollUntilDone` has been removed +- Function `*SnapshotsListPager.NextPage` has been removed +- Function `VirtualMachineScaleSetRollingUpgradesStartOSUpgradePollerResponse.PollUntilDone` has been removed +- Function `GalleryApplicationVersionsCreateOrUpdatePollerResponse.PollUntilDone` has been removed +- Function `*CloudServicesReimagePoller.Done` has been removed +- Function `*CloudServicesCreateOrUpdatePoller.FinalResponse` has been removed +- Function `*CloudServicesRebuildPoller.FinalResponse` has been removed +- Function `*VirtualMachineExtensionsCreateOrUpdatePollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetVMRunCommandsCreateOrUpdatePoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetExtensionsListPager.NextPage` has been removed +- Function `*RestorePointsCreatePoller.ResumeToken` has been removed +- Function `*GalleryImagesUpdatePoller.Done` has been removed +- Function `*DisksGrantAccessPollerResponse.Resume` has been removed +- Function `*CloudServicesUpdateDomainWalkUpdateDomainPoller.ResumeToken` has been removed +- Function `*ImagesCreateOrUpdatePoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetVMRunCommandsUpdatePoller.ResumeToken` has been removed +- Function `*RestorePointCollectionsListAllPager.Err` has been removed +- Function `*VirtualMachineScaleSetsSetOrchestrationServiceStatePoller.ResumeToken` has been removed +- Function `*DiskAccessesUpdatePollerResponse.Resume` has been removed +- Function `*VirtualMachinesStartPoller.FinalResponse` has been removed +- Function `*ImagesUpdatePoller.FinalResponse` has been removed +- Function `*GalleriesUpdatePoller.FinalResponse` has been removed +- Function `*SharedGalleryImageVersionsListPager.NextPage` has been removed +- Function `*SnapshotsListPager.PageResponse` has been removed +- Function `*GalleryImagesUpdatePoller.Poll` has been removed +- Function `*VirtualMachineRunCommandsListByVirtualMachinePager.Err` has been removed +- Function `*VirtualMachineScaleSetsCreateOrUpdatePoller.Poll` has been removed +- Function `*CloudServicesDeletePoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetsCreateOrUpdatePoller.FinalResponse` has been removed +- Function `*GalleryImageVersionsCreateOrUpdatePoller.Done` has been removed +- Function `*VirtualMachinesRestartPoller.ResumeToken` has been removed +- Function `*GalleryApplicationVersionsDeletePoller.FinalResponse` has been removed +- Function `*RestorePointCollectionsListAllPager.PageResponse` has been removed +- Function `*CloudServiceRoleInstancesRestartPoller.ResumeToken` has been removed +- Function `*CloudServiceRoleInstancesRestartPollerResponse.Resume` has been removed +- Function `VirtualMachineScaleSetVMsDeallocatePollerResponse.PollUntilDone` has been removed +- Function `*DedicatedHostsUpdatePoller.Done` has been removed +- Function `*VirtualMachineScaleSetsListAllPager.PageResponse` has been removed +- Function `*DiskAccessesListPrivateEndpointConnectionsPager.PageResponse` has been removed +- Function `*VirtualMachinesCreateOrUpdatePollerResponse.Resume` has been removed +- Function `*ResourceSKUsListPager.PageResponse` has been removed +- Function `*CloudServiceRoleInstancesRebuildPoller.ResumeToken` has been removed +- Function `*GalleryImagesListByGalleryPager.Err` has been removed +- Function `*DedicatedHostGroupsListByResourceGroupPager.Err` has been removed +- Function `*SnapshotsListByResourceGroupPager.NextPage` has been removed +- Function `CloudError.Error` has been removed +- Function `VirtualMachineScaleSetUpdateNetworkConfiguration.MarshalJSON` has been removed +- Function `*CloudServicesDeleteInstancesPoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetExtensionsUpdatePoller.Done` has been removed +- Function `*ImagesDeletePoller.ResumeToken` has been removed +- Function `*DiskAccessesCreateOrUpdatePoller.Done` has been removed +- Function `*VirtualMachinesListAllPager.Err` has been removed +- Function `VirtualMachinesStartPollerResponse.PollUntilDone` has been removed +- Function `*DiskAccessesUpdateAPrivateEndpointConnectionPoller.Poll` has been removed +- Function `*CloudServicesUpdateDomainWalkUpdateDomainPoller.Poll` has been removed +- Function `*VirtualMachineScaleSetVMsDeletePoller.FinalResponse` has been removed +- Function `VirtualMachineScaleSetsPowerOffPollerResponse.PollUntilDone` has been removed +- Function `VirtualMachineScaleSetUpdateIPConfiguration.MarshalJSON` has been removed +- Function `*VirtualMachineExtensionsUpdatePoller.Done` has been removed +- Function `*VirtualMachineRunCommandsDeletePoller.ResumeToken` has been removed +- Function `VirtualMachinesDeletePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetsPowerOffPollerResponse.Resume` has been removed +- Function `*SnapshotsDeletePoller.Done` has been removed +- Function `VirtualMachineScaleSetNetworkConfiguration.MarshalJSON` has been removed +- Function `*CapacityReservationsUpdatePoller.ResumeToken` has been removed +- Function `*DedicatedHostsUpdatePoller.Poll` has been removed +- Function `*CapacityReservationsUpdatePoller.Poll` has been removed +- Function `*DiskEncryptionSetsCreateOrUpdatePoller.Done` has been removed +- Function `*DiskRestorePointListByRestorePointPager.Err` has been removed +- Function `VirtualMachineScaleSetsDeleteInstancesPollerResponse.PollUntilDone` has been removed +- Function `*GalleriesUpdatePoller.Poll` has been removed +- Function `VirtualMachineRunCommandsDeletePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachinesDeallocatePoller.FinalResponse` has been removed +- Function `*GalleriesUpdatePoller.Done` has been removed +- Function `VirtualMachineScaleSetVMExtensionsUpdatePollerResponse.PollUntilDone` has been removed +- Function `CloudServiceRoleInstancesRestartPollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineRunCommandsCreateOrUpdatePoller.Done` has been removed +- Function `*CapacityReservationsDeletePoller.Poll` has been removed +- Function `*GalleriesListPager.NextPage` has been removed +- Function `*DiskEncryptionSetsDeletePollerResponse.Resume` has been removed +- Function `*VirtualMachineRunCommandsDeletePoller.Done` has been removed +- Function `*VirtualMachinesPowerOffPoller.Poll` has been removed +- Function `*GalleryImagesCreateOrUpdatePoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetVMRunCommandsCreateOrUpdatePoller.Poll` has been removed +- Function `*VirtualMachinesCreateOrUpdatePoller.Poll` has been removed +- Function `*ImagesDeletePoller.Poll` has been removed +- Function `*VirtualMachinesUpdatePoller.Poll` has been removed +- Function `*GalleryApplicationsDeletePoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetsGetOSUpgradeHistoryPager.PageResponse` has been removed +- Function `*SharedGalleryImagesListPager.PageResponse` has been removed +- Function `*GalleryImagesUpdatePoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetExtensionsUpdatePollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetsReimageAllPollerResponse.Resume` has been removed +- Function `*CloudServiceRoleInstancesDeletePoller.Done` has been removed +- Function `CloudServicesPowerOffPollerResponse.PollUntilDone` has been removed +- Function `*CloudServicesReimagePoller.ResumeToken` has been removed +- Function `*CloudServicesUpdateDomainListUpdateDomainsPager.NextPage` has been removed +- Function `*DedicatedHostsUpdatePoller.FinalResponse` has been removed +- Function `ManagedDiskParameters.MarshalJSON` has been removed +- Function `VirtualMachineScaleSetVMsPowerOffPollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachinesRedeployPollerResponse.Resume` has been removed +- Function `*DiskAccessesUpdateAPrivateEndpointConnectionPollerResponse.Resume` has been removed +- Function `VirtualMachinesInstallPatchesPollerResponse.PollUntilDone` has been removed +- Function `*DiskEncryptionSetsCreateOrUpdatePoller.FinalResponse` has been removed +- Function `*VirtualMachinesCapturePollerResponse.Resume` has been removed +- Function `*DiskAccessesListPrivateEndpointConnectionsPager.Err` has been removed +- Function `*DiskAccessesDeletePoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetExtensionsCreateOrUpdatePoller.Poll` has been removed +- Function `*VirtualMachineScaleSetsListSKUsPager.PageResponse` has been removed +- Function `VirtualMachineScaleSetsPerformMaintenancePollerResponse.PollUntilDone` has been removed +- Function `*CloudServicesDeletePollerResponse.Resume` has been removed +- Function `*CloudServicesListAllPager.NextPage` has been removed +- Function `*VirtualMachinesUpdatePoller.FinalResponse` has been removed +- Function `*DisksUpdatePoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetVMsPowerOffPoller.ResumeToken` has been removed +- Function `*DedicatedHostsDeletePoller.Poll` has been removed +- Function `CapacityReservationsUpdatePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetVMExtensionsUpdatePoller.ResumeToken` has been removed +- Function `*CloudServiceOperatingSystemsListOSFamiliesPager.Err` has been removed +- Function `*VirtualMachineScaleSetsRedeployPoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetsDeletePoller.Done` has been removed +- Function `*LogAnalyticsExportThrottledRequestsPollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetVMsRunCommandPoller.FinalResponse` has been removed +- Function `*VirtualMachinesRunCommandPollerResponse.Resume` has been removed +- Function `*ImagesListByResourceGroupPager.Err` has been removed +- Function `*DisksGrantAccessPoller.ResumeToken` has been removed +- Function `*DisksCreateOrUpdatePoller.FinalResponse` has been removed +- Function `*VirtualMachineExtensionsDeletePoller.FinalResponse` has been removed +- Function `*DiskEncryptionSetsListAssociatedResourcesPager.PageResponse` has been removed +- Function `*VirtualMachinesAssessPatchesPoller.ResumeToken` has been removed +- Function `*GalleryImageVersionsCreateOrUpdatePoller.Poll` has been removed +- Function `*DiskEncryptionSetsUpdatePollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetVMsUpdatePollerResponse.Resume` has been removed +- Function `*DiskAccessesUpdatePoller.FinalResponse` has been removed +- Function `*DiskAccessesDeletePoller.Poll` has been removed +- Function `*VirtualMachineScaleSetVMsRestartPoller.Poll` has been removed +- Function `*CloudServicesDeleteInstancesPoller.ResumeToken` has been removed +- Function `CapacityReservationsDeletePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetsGetOSUpgradeHistoryPager.NextPage` has been removed +- Function `*CloudServicesCreateOrUpdatePoller.ResumeToken` has been removed +- Function `LogAnalyticsExportThrottledRequestsPollerResponse.PollUntilDone` has been removed +- Function `*GalleriesCreateOrUpdatePoller.FinalResponse` has been removed +- Function `DiskAccessesCreateOrUpdatePollerResponse.PollUntilDone` has been removed +- Function `*CloudServicesDeletePoller.Done` has been removed +- Function `*VirtualMachineScaleSetsUpdateInstancesPoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetVMsPowerOffPoller.Poll` has been removed +- Function `*DisksCreateOrUpdatePoller.Done` has been removed +- Function `*DisksUpdatePoller.Poll` has been removed +- Function `*RestorePointsDeletePoller.FinalResponse` has been removed +- Function `*CloudServicesStartPoller.ResumeToken` has been removed +- Function `*VirtualMachinesReapplyPoller.Poll` has been removed +- Function `*VirtualMachineScaleSetVMsReimagePoller.ResumeToken` has been removed +- Function `*CloudServicesUpdateDomainListUpdateDomainsPager.PageResponse` has been removed +- Function `*VirtualMachineScaleSetsUpdatePoller.Done` has been removed +- Function `*VirtualMachineScaleSetsDeletePoller.FinalResponse` has been removed +- Function `*VirtualMachinesRedeployPoller.ResumeToken` has been removed +- Function `*VirtualMachinesRedeployPoller.Done` has been removed +- Function `GalleryApplicationsDeletePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetVMRunCommandsUpdatePoller.Poll` has been removed +- Function `DisksDeletePollerResponse.PollUntilDone` has been removed +- Function `*SnapshotsDeletePoller.ResumeToken` has been removed +- Function `*DisksListByResourceGroupPager.Err` has been removed +- Function `*VirtualMachinesConvertToManagedDisksPoller.ResumeToken` has been removed +- Function `*CloudServicesCreateOrUpdatePoller.Done` has been removed +- Function `*DisksCreateOrUpdatePoller.Poll` has been removed +- Function `*VirtualMachineScaleSetVMsPowerOffPollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetsPowerOffPoller.FinalResponse` has been removed +- Function `GalleriesCreateOrUpdatePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetVMRunCommandsUpdatePoller.Done` has been removed +- Function `*ImagesListByResourceGroupPager.NextPage` has been removed +- Function `*VirtualMachinesStartPoller.ResumeToken` has been removed +- Function `*VirtualMachineRunCommandsListByVirtualMachinePager.NextPage` has been removed +- Function `*DisksDeletePoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetExtensionsDeletePollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetVMsDeallocatePoller.Poll` has been removed +- Function `*VirtualMachinesCapturePoller.FinalResponse` has been removed +- Function `*CloudServiceRoleInstancesDeletePollerResponse.Resume` has been removed +- Function `*VirtualMachinesListByLocationPager.Err` has been removed +- Function `*CloudServiceOperatingSystemsListOSVersionsPager.PageResponse` has been removed +- Function `*SharedGalleryImageVersionsListPager.Err` has been removed +- Function `VirtualMachineScaleSetVMExtension.MarshalJSON` has been removed +- Function `*VirtualMachineScaleSetsSetOrchestrationServiceStatePoller.Done` has been removed +- Function `*GalleryApplicationVersionsListByGalleryApplicationPager.PageResponse` has been removed +- Function `*VirtualMachineScaleSetsReimagePoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetVMsReimagePoller.Done` has been removed +- Function `*AvailabilitySetsListPager.Err` has been removed +- Function `VirtualMachineScaleSetVMRunCommandsDeletePollerResponse.PollUntilDone` has been removed +- Function `*DisksDeletePoller.Done` has been removed +- Function `VirtualMachineScaleSetExtensionsDeletePollerResponse.PollUntilDone` has been removed +- Function `*CloudServicesRestartPoller.Poll` has been removed +- Function `*VirtualMachineScaleSetExtensionsUpdatePoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetVMsReimageAllPoller.Done` has been removed +- Function `NetworkInterfaceReference.MarshalJSON` has been removed +- Function `*CloudServicesDeletePoller.FinalResponse` has been removed +- Function `*DiskEncryptionSetsUpdatePoller.Poll` has been removed +- Function `*VirtualMachinesConvertToManagedDisksPollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetExtensionsDeletePoller.Done` has been removed +- Function `GalleryApplicationsUpdatePollerResponse.PollUntilDone` has been removed +- Function `*DisksCreateOrUpdatePollerResponse.Resume` has been removed +- Function `*AvailabilitySetsListBySubscriptionPager.PageResponse` has been removed +- Function `*DiskEncryptionSetsListPager.PageResponse` has been removed +- Function `*VirtualMachineScaleSetVMsReimagePoller.Poll` has been removed +- Function `*VirtualMachineRunCommandsDeletePoller.Poll` has been removed +- Function `*DiskRestorePointListByRestorePointPager.PageResponse` has been removed +- Function `*VirtualMachineExtensionsCreateOrUpdatePoller.Done` has been removed +- Function `*CapacityReservationsCreateOrUpdatePollerResponse.Resume` has been removed +- Function `*VirtualMachinesPerformMaintenancePoller.FinalResponse` has been removed +- Function `*GalleryApplicationsCreateOrUpdatePoller.FinalResponse` has been removed +- Function `*CloudServiceRoleInstancesDeletePoller.Poll` has been removed +- Function `VirtualMachineExtensionsCreateOrUpdatePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetExtensionsCreateOrUpdatePoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetVMsRestartPoller.FinalResponse` has been removed +- Function `*CloudServicesRebuildPoller.ResumeToken` has been removed +- Function `GalleryApplicationVersionsUpdatePollerResponse.PollUntilDone` has been removed +- Function `VirtualMachineScaleSetVMRunCommandsCreateOrUpdatePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetVMsRunCommandPoller.ResumeToken` has been removed +- Function `*DedicatedHostsListByHostGroupPager.Err` has been removed +- Function `*CloudServicesUpdatePoller.ResumeToken` has been removed +- Function `*SSHPublicKeysListByResourceGroupPager.PageResponse` has been removed +- Function `*DisksRevokeAccessPoller.FinalResponse` has been removed +- Function `*DisksDeletePoller.ResumeToken` has been removed +- Function `*DedicatedHostGroupsListBySubscriptionPager.PageResponse` has been removed +- Function `*DisksRevokeAccessPollerResponse.Resume` has been removed +- Function `*CapacityReservationGroupsListByResourceGroupPager.PageResponse` has been removed +- Function `*VirtualMachineScaleSetsListPager.Err` has been removed +- Function `*VirtualMachineScaleSetsDeleteInstancesPoller.Poll` has been removed +- Function `*VirtualMachineScaleSetsPerformMaintenancePoller.FinalResponse` has been removed +- Function `*VirtualMachinesAssessPatchesPoller.FinalResponse` has been removed +- Function `*VirtualMachinesRunCommandPoller.Done` has been removed +- Function `DedicatedHostsCreateOrUpdatePollerResponse.PollUntilDone` has been removed +- Function `GalleriesDeletePollerResponse.PollUntilDone` has been removed +- Function `DedicatedHostsDeletePollerResponse.PollUntilDone` has been removed +- Function `*GalleryImageVersionsDeletePoller.FinalResponse` has been removed +- Function `*VirtualMachinesListPager.NextPage` has been removed +- Function `*DiskRestorePointRevokeAccessPollerResponse.Resume` has been removed +- Function `*CloudServicesUpdateDomainWalkUpdateDomainPollerResponse.Resume` has been removed +- Function `*CapacityReservationsDeletePoller.Done` has been removed +- Function `*CapacityReservationsListByCapacityReservationGroupPager.Err` has been removed +- Function `*VirtualMachinesCapturePoller.Done` has been removed +- Function `*CloudServiceRoleInstancesRebuildPoller.Poll` has been removed +- Function `*VirtualMachineScaleSetsListByLocationPager.Err` has been removed +- Function `*GalleriesListByResourceGroupPager.NextPage` has been removed +- Function `*CloudServicesPowerOffPoller.FinalResponse` has been removed +- Function `*CloudServicesListAllPager.PageResponse` has been removed +- Function `*SharedGalleryImageVersionsListPager.PageResponse` has been removed +- Function `*VirtualMachineScaleSetsReimagePoller.Done` has been removed +- Function `VirtualMachineScaleSetRollingUpgradesCancelPollerResponse.PollUntilDone` has been removed +- Function `*CloudServicesCreateOrUpdatePoller.Poll` has been removed +- Function `*VirtualMachinesPerformMaintenancePoller.ResumeToken` has been removed +- Function `CapacityReservationsCreateOrUpdatePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachinesConvertToManagedDisksPoller.FinalResponse` has been removed +- Function `*DisksGrantAccessPoller.FinalResponse` has been removed +- Function `*LogAnalyticsExportThrottledRequestsPoller.Done` has been removed +- Function `*VirtualMachineScaleSetsCreateOrUpdatePoller.Done` has been removed +- Function `*VirtualMachinesCapturePoller.ResumeToken` has been removed +- Function `*VirtualMachinesRunCommandPoller.FinalResponse` has been removed +- Function `*SharedGalleriesListPager.Err` has been removed +- Function `*DiskRestorePointRevokeAccessPoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetsDeletePollerResponse.Resume` has been removed +- Function `*RestorePointCollectionsListAllPager.NextPage` has been removed +- Function `*VirtualMachineExtensionsUpdatePoller.Poll` has been removed +- Function `*LogAnalyticsExportRequestRateByIntervalPoller.ResumeToken` has been removed +- Function `*RestorePointCollectionsDeletePoller.FinalResponse` has been removed +- Function `*CapacityReservationsListByCapacityReservationGroupPager.NextPage` has been removed +- Function `CloudServiceRoleInstancesRebuildPollerResponse.PollUntilDone` has been removed +- Function `*DiskEncryptionSetsDeletePoller.ResumeToken` has been removed +- Function `*VirtualMachinesInstallPatchesPoller.FinalResponse` has been removed +- Function `*CloudServicesPowerOffPoller.Done` has been removed +- Function `*VirtualMachinesPerformMaintenancePoller.Done` has been removed +- Function `*VirtualMachinesPerformMaintenancePoller.Poll` has been removed +- Function `*ImagesDeletePoller.Done` has been removed +- Function `*CloudServicesRestartPoller.Done` has been removed +- Function `*VirtualMachineRunCommandsListPager.NextPage` has been removed +- Function `VirtualMachineScaleSetsRedeployPollerResponse.PollUntilDone` has been removed +- Function `*DedicatedHostsUpdatePoller.ResumeToken` has been removed +- Function `*CapacityReservationGroupsListByResourceGroupPager.Err` has been removed +- Function `*VirtualMachineScaleSetsPerformMaintenancePoller.Done` has been removed +- Function `*GalleryApplicationsDeletePollerResponse.Resume` has been removed +- Function `*GalleryApplicationVersionsListByGalleryApplicationPager.NextPage` has been removed +- Function `*VirtualMachineScaleSetsDeallocatePoller.Done` has been removed +- Function `*DiskEncryptionSetsUpdatePoller.FinalResponse` has been removed +- Function `LogAnalyticsExportRequestRateByIntervalPollerResponse.PollUntilDone` has been removed +- Function `*DiskRestorePointGrantAccessPoller.Done` has been removed +- Function `CloudServicesUpdateDomainWalkUpdateDomainPollerResponse.PollUntilDone` has been removed +- Function `DiskAccessesUpdatePollerResponse.PollUntilDone` has been removed +- Function `*DedicatedHostsCreateOrUpdatePollerResponse.Resume` has been removed +- Function `*SnapshotsRevokeAccessPoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetVMsPerformMaintenancePoller.Done` has been removed +- Function `VirtualMachineExtensionsDeletePollerResponse.PollUntilDone` has been removed +- Function `*GalleryApplicationVersionsUpdatePollerResponse.Resume` has been removed +- Function `*AvailabilitySetsListPager.NextPage` has been removed +- Function `*VirtualMachineScaleSetsRedeployPoller.Done` has been removed +- Function `*VirtualMachineRunCommandsUpdatePoller.FinalResponse` has been removed +- Function `*GalleryApplicationVersionsDeletePollerResponse.Resume` has been removed +- Function `*CloudServicesReimagePoller.Poll` has been removed +- Function `*VirtualMachinesStartPoller.Poll` has been removed +- Function `*SSHPublicKeysListBySubscriptionPager.NextPage` has been removed +- Function `*CloudServicesRebuildPoller.Done` has been removed +- Function `*GalleryApplicationVersionsUpdatePoller.Poll` has been removed +- Function `*DedicatedHostsDeletePoller.ResumeToken` has been removed +- Function `*DiskAccessesCreateOrUpdatePoller.Poll` has been removed +- Function `*VirtualMachineScaleSetsPerformMaintenancePoller.Poll` has been removed +- Function `*GalleriesCreateOrUpdatePoller.ResumeToken` has been removed +- Function `*GalleryApplicationsListByGalleryPager.PageResponse` has been removed +- Function `*VirtualMachineScaleSetVMExtensionsUpdatePoller.Done` has been removed +- Function `*VirtualMachineScaleSetVMsStartPoller.Poll` has been removed +- Function `ImageReference.MarshalJSON` has been removed +- Function `DiskRestorePointGrantAccessPollerResponse.PollUntilDone` has been removed +- Function `*DisksRevokeAccessPoller.Poll` has been removed +- Function `*CloudServiceRoleInstancesRebuildPollerResponse.Resume` has been removed +- Function `*CloudServicesUpdateDomainWalkUpdateDomainPoller.Done` has been removed +- Function `*VirtualMachineRunCommandsCreateOrUpdatePoller.ResumeToken` has been removed +- Function `*VirtualMachinesRedeployPoller.FinalResponse` has been removed +- Function `*VirtualMachinesUpdatePoller.Done` has been removed +- Function `*SnapshotsRevokeAccessPoller.FinalResponse` has been removed +- Function `*GalleryImagesCreateOrUpdatePoller.ResumeToken` has been removed +- Function `*CapacityReservationGroupsListByResourceGroupPager.NextPage` has been removed +- Function `*VirtualMachinesUpdatePollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetExtensionsDeletePoller.ResumeToken` has been removed +- Function `*VirtualMachinesListAllPager.NextPage` has been removed +- Function `*DiskEncryptionSetsDeletePoller.FinalResponse` has been removed +- Function `*CapacityReservationsUpdatePoller.Done` has been removed +- Function `VirtualMachinesUpdatePollerResponse.PollUntilDone` has been removed +- Function `*GalleryImageVersionsCreateOrUpdatePollerResponse.Resume` has been removed +- Function `*DiskEncryptionSetsListAssociatedResourcesPager.NextPage` has been removed +- Function `VirtualMachinesCreateOrUpdatePollerResponse.PollUntilDone` has been removed +- Function `*DisksUpdatePoller.Done` has been removed +- Function `*SnapshotsGrantAccessPoller.Done` has been removed +- Function `*CloudServiceRoleInstancesDeletePoller.ResumeToken` has been removed +- Function `*CloudServicesDeleteInstancesPoller.Poll` has been removed +- Function `*DiskAccessesDeletePollerResponse.Resume` has been removed +- Function `*GalleryImagesDeletePoller.ResumeToken` has been removed +- Function `*GalleryImageVersionsDeletePoller.Done` has been removed +- Function `*CapacityReservationsDeletePoller.ResumeToken` has been removed +- Function `*DiskEncryptionSetsListAssociatedResourcesPager.Err` has been removed +- Function `*CloudServiceRoleInstancesRebuildPoller.FinalResponse` has been removed +- Function `*VirtualMachinesCreateOrUpdatePoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetExtensionsUpdatePoller.ResumeToken` has been removed +- Function `VirtualMachineScaleSetVMsRunCommandPollerResponse.PollUntilDone` has been removed +- Function `*GalleryApplicationsUpdatePoller.FinalResponse` has been removed +- Function `*DiskEncryptionSetsCreateOrUpdatePollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetVMsReimageAllPoller.FinalResponse` has been removed +- Function `*GalleriesDeletePoller.FinalResponse` has been removed +- Function `*ImagesCreateOrUpdatePoller.Poll` has been removed +- Function `*GalleryApplicationVersionsListByGalleryApplicationPager.Err` has been removed +- Function `*CloudServicesUpdatePoller.Poll` has been removed +- Function `*GalleryImagesCreateOrUpdatePollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetVMsRunCommandPollerResponse.Resume` has been removed +- Function `*CloudServiceRoleInstancesReimagePollerResponse.Resume` has been removed +- Function `*CapacityReservationsUpdatePollerResponse.Resume` has been removed +- Function `*VirtualMachinesPowerOffPollerResponse.Resume` has been removed +- Function `*DedicatedHostGroupsListByResourceGroupPager.NextPage` has been removed +- Function `*GalleryImagesListByGalleryPager.PageResponse` has been removed +- Function `*VirtualMachineRunCommandsCreateOrUpdatePoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetVMExtensionsDeletePoller.Poll` has been removed +- Function `*DedicatedHostGroupsListBySubscriptionPager.Err` has been removed +- Function `*VirtualMachineScaleSetVMsRedeployPoller.Done` has been removed +- Function `*GalleriesDeletePollerResponse.Resume` has been removed +- Function `*DedicatedHostGroupsListByResourceGroupPager.PageResponse` has been removed +- Function `*DisksDeletePoller.Poll` has been removed +- Function `*DiskRestorePointRevokeAccessPoller.ResumeToken` has been removed +- Function `*ImagesDeletePoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetVMExtensionsDeletePoller.Done` has been removed +- Function `*VirtualMachineScaleSetVMsPerformMaintenancePoller.FinalResponse` has been removed +- Function `*DiskRestorePointRevokeAccessPoller.Poll` has been removed +- Function `*DiskAccessesListPrivateEndpointConnectionsPager.NextPage` has been removed +- Function `*SnapshotsCreateOrUpdatePoller.Done` has been removed +- Function `*VirtualMachineScaleSetsCreateOrUpdatePollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetsUpdateInstancesPoller.FinalResponse` has been removed +- Function `*RestorePointsDeletePollerResponse.Resume` has been removed +- Function `*CloudServiceOperatingSystemsListOSFamiliesPager.PageResponse` has been removed +- Function `*SnapshotsDeletePollerResponse.Resume` has been removed +- Function `*DedicatedHostsCreateOrUpdatePoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetsReimageAllPoller.FinalResponse` has been removed +- Function `*CloudServiceRoleInstancesRestartPoller.Poll` has been removed +- Function `*VirtualMachinesRestartPoller.Poll` has been removed +- Function `CloudServicesDeleteInstancesPollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetVMRunCommandsUpdatePollerResponse.Resume` has been removed +- Function `*CapacityReservationGroupsListBySubscriptionPager.Err` has been removed +- Function `*GalleryApplicationsDeletePoller.Poll` has been removed +- Function `*GalleryImagesDeletePoller.Poll` has been removed +- Function `*CloudServicesUpdateDomainWalkUpdateDomainPoller.FinalResponse` has been removed +- Function `*CloudServiceRolesListPager.PageResponse` has been removed +- Function `*DiskEncryptionSetsUpdatePoller.Done` has been removed +- Function `*ImagesListPager.PageResponse` has been removed +- Function `*VirtualMachineScaleSetVMRunCommandsListPager.NextPage` has been removed +- Function `VirtualMachineScaleSetVMRunCommandsUpdatePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineRunCommandsDeletePoller.FinalResponse` has been removed +- Function `*VirtualMachinesReimagePoller.FinalResponse` has been removed +- Function `SnapshotsCreateOrUpdatePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetVMsRunCommandPoller.Done` has been removed +- Function `GalleriesUpdatePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineRunCommandsUpdatePoller.Poll` has been removed +- Function `*ImagesCreateOrUpdatePoller.Done` has been removed +- Function `*VirtualMachinesRedeployPoller.Poll` has been removed +- Function `*GalleriesListByResourceGroupPager.Err` has been removed +- Function `*DiskEncryptionSetsDeletePoller.Poll` has been removed +- Function `*SnapshotsCreateOrUpdatePollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetVMExtensionsUpdatePoller.FinalResponse` has been removed +- Function `*VirtualMachinesCreateOrUpdatePoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetsUpdatePoller.FinalResponse` has been removed +- Function `*DiskEncryptionSetsListByResourceGroupPager.PageResponse` has been removed +- Function `*CloudServicesReimagePollerResponse.Resume` has been removed +- Function `VirtualMachineScaleSetVMsStartPollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetVMsUpdatePoller.Done` has been removed +- Function `*SnapshotsListPager.Err` has been removed +- Function `*VirtualMachineScaleSetsSetOrchestrationServiceStatePoller.Poll` has been removed +- Function `*DiskAccessesListPager.Err` has been removed +- Function `*DisksUpdatePoller.FinalResponse` has been removed +- Function `*CloudServiceRolesListPager.NextPage` has been removed +- Function `*CloudServiceRoleInstancesListPager.Err` has been removed +- Function `*VirtualMachineScaleSetVMExtensionsCreateOrUpdatePoller.ResumeToken` has been removed +- Function `*DiskRestorePointRevokeAccessPoller.Done` has been removed +- Function `*VirtualMachinesReimagePollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetsDeleteInstancesPoller.Done` has been removed +- Function `*RestorePointsCreatePollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetsDeallocatePoller.FinalResponse` has been removed +- Function `CloudServicesReimagePollerResponse.PollUntilDone` has been removed +- Function `*CloudServiceOperatingSystemsListOSVersionsPager.NextPage` has been removed +- Function `*RestorePointsDeletePoller.Done` has been removed +- Function `*VirtualMachineScaleSetVMsReimageAllPollerResponse.Resume` has been removed +- Function `*DisksUpdatePollerResponse.Resume` has been removed +- Function `*GalleryApplicationVersionsCreateOrUpdatePoller.Poll` has been removed +- Function `*GalleriesCreateOrUpdatePoller.Poll` has been removed +- Function `*SnapshotsDeletePoller.FinalResponse` has been removed +- Function `*GalleryImagesDeletePollerResponse.Resume` has been removed +- Function `*GallerySharingProfileUpdatePoller.ResumeToken` has been removed +- Function `*GallerySharingProfileUpdatePollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetVMsStartPollerResponse.Resume` has been removed +- Function `VirtualMachinesRunCommandPollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetsStartPollerResponse.Resume` has been removed +- Function `*VirtualMachinesDeletePoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetExtensionsListPager.PageResponse` has been removed +- Function `*DisksGrantAccessPoller.Poll` has been removed +- Function `*CloudServicesUpdatePoller.Done` has been removed +- Function `VirtualMachineScaleSetVMExtensionsCreateOrUpdatePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetsDeallocatePoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetVMExtensionsDeletePoller.FinalResponse` has been removed +- Function `*VirtualMachineExtensionsDeletePoller.Poll` has been removed +- Function `*VirtualMachineScaleSetVMsRedeployPoller.Poll` has been removed +- Function `*GalleryImageVersionsDeletePoller.ResumeToken` has been removed +- Function `SnapshotsRevokeAccessPollerResponse.PollUntilDone` has been removed +- Function `*ResourceSKUsListPager.NextPage` has been removed +- Function `*VirtualMachineScaleSetVMsListPager.NextPage` has been removed +- Function `*VirtualMachineScaleSetVMsReimagePollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetsReimagePollerResponse.Resume` has been removed +- Function `VirtualMachinesPowerOffPollerResponse.PollUntilDone` has been removed +- Function `*GalleryApplicationsCreateOrUpdatePoller.Poll` has been removed +- Function `VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradePollerResponse.PollUntilDone` has been removed +- Function `*CloudServiceRoleInstancesListPager.NextPage` has been removed +- Function `*VirtualMachineScaleSetsSetOrchestrationServiceStatePoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetsDeleteInstancesPollerResponse.Resume` has been removed +- Function `GalleryImagesDeletePollerResponse.PollUntilDone` has been removed +- Function `*SnapshotsGrantAccessPoller.Poll` has been removed +- Function `*GalleryImageVersionsListByGalleryImagePager.NextPage` has been removed +- Function `*VirtualMachineScaleSetVMsRedeployPoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetsPerformMaintenancePoller.ResumeToken` has been removed +- Function `*RestorePointCollectionsListPager.NextPage` has been removed +- Function `*VirtualMachineScaleSetsListByLocationPager.PageResponse` has been removed +- Function `*GalleriesDeletePoller.Poll` has been removed +- Function `*SnapshotsListByResourceGroupPager.PageResponse` has been removed +- Function `*SharedGalleryImagesListPager.Err` has been removed +- Function `*CloudServicesListPager.Err` has been removed +- Function `*CloudServicesListPager.PageResponse` has been removed +- Function `*DiskAccessesCreateOrUpdatePollerResponse.Resume` has been removed +- Function `RunCommandDocumentBase.MarshalJSON` has been removed +- Function `*VirtualMachineScaleSetsPowerOffPoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetsReimageAllPoller.ResumeToken` has been removed +- Function `SnapshotsDeletePollerResponse.PollUntilDone` has been removed +- Function `RestorePointsDeletePollerResponse.PollUntilDone` has been removed +- Function `VirtualMachineReimageParameters.MarshalJSON` has been removed +- Function `DiskAccessesUpdateAPrivateEndpointConnectionPollerResponse.PollUntilDone` has been removed +- Function `*CloudServiceRoleInstancesReimagePoller.Poll` has been removed +- Function `*VirtualMachineScaleSetExtensionsCreateOrUpdatePollerResponse.Resume` has been removed +- Function `*CapacityReservationsCreateOrUpdatePoller.Done` has been removed +- Function `*VirtualMachineScaleSetVMsReimagePoller.FinalResponse` has been removed +- Function `*VirtualMachineExtensionsUpdatePoller.ResumeToken` has been removed +- Function `*DisksListPager.NextPage` has been removed +- Function `*VirtualMachineScaleSetVMsDeallocatePollerResponse.Resume` has been removed +- Function `*SnapshotsCreateOrUpdatePoller.ResumeToken` has been removed +- Function `CloudServicesDeletePollerResponse.PollUntilDone` has been removed +- Function `VirtualMachineScaleSetVMsUpdatePollerResponse.PollUntilDone` has been removed +- Function `*RestorePointsCreatePoller.Poll` has been removed +- Function `*SnapshotsRevokeAccessPollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetRollingUpgradesStartOSUpgradePoller.Poll` has been removed +- Function `*LogAnalyticsExportThrottledRequestsPoller.FinalResponse` has been removed +- Function `*VirtualMachinesReimagePoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetsStartPoller.ResumeToken` has been removed +- Function `*CloudServiceRoleInstancesReimagePoller.Done` has been removed +- Function `*VirtualMachineRunCommandsUpdatePollerResponse.Resume` has been removed +- Function `*GalleryImageVersionsUpdatePollerResponse.Resume` has been removed +- Function `*VirtualMachinesListByLocationPager.NextPage` has been removed +- Function `*DiskAccessesListByResourceGroupPager.Err` has been removed +- Function `*GalleryApplicationVersionsUpdatePoller.Done` has been removed +- Function `*CloudServiceRoleInstancesDeletePoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetVMExtensionsCreateOrUpdatePoller.Done` has been removed +- Function `*DiskEncryptionSetsListByResourceGroupPager.Err` has been removed +- Function `ImagesDeletePollerResponse.PollUntilDone` has been removed +- Function `*RestorePointCollectionsDeletePoller.Done` has been removed +- Function `*CloudServicesDeleteInstancesPollerResponse.Resume` has been removed +- Function `VirtualMachineScaleSetsUpdatePollerResponse.PollUntilDone` has been removed +- Function `*CapacityReservationsCreateOrUpdatePoller.FinalResponse` has been removed +- Function `*CloudServicesStartPollerResponse.Resume` has been removed +- Function `VirtualMachinesRestartPollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetVMExtensionsDeletePoller.ResumeToken` has been removed +- Function `RestorePointCollectionsDeletePollerResponse.PollUntilDone` has been removed +- Function `*RestorePointsCreatePoller.Done` has been removed +- Function `DisksCreateOrUpdatePollerResponse.PollUntilDone` has been removed +- Function `*ImagesCreateOrUpdatePollerResponse.Resume` has been removed +- Function `*GalleryApplicationVersionsDeletePoller.ResumeToken` has been removed +- Function `VirtualMachineScaleSetVMsReimageAllPollerResponse.PollUntilDone` has been removed +- Function `*SnapshotsListByResourceGroupPager.Err` has been removed +- Function `*CloudServiceRoleInstancesRestartPoller.FinalResponse` has been removed +- Function `*DiskRestorePointGrantAccessPoller.Poll` has been removed +- Function `*VirtualMachineScaleSetVMRunCommandsDeletePoller.Done` has been removed +- Function `*GalleryApplicationsUpdatePoller.ResumeToken` has been removed +- Function `*UsageListPager.NextPage` has been removed +- Function `*VirtualMachineScaleSetsDeleteInstancesPoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetExtensionsDeletePoller.FinalResponse` has been removed +- Function `*SnapshotsDeletePoller.Poll` has been removed +- Function `*VirtualMachineScaleSetVMsDeallocatePoller.ResumeToken` has been removed +- Function `*SSHPublicKeysListByResourceGroupPager.Err` has been removed +- Function `*VirtualMachinesRestartPoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetVMRunCommandsDeletePollerResponse.Resume` has been removed +- Function `*DisksCreateOrUpdatePoller.ResumeToken` has been removed +- Function `*ImagesDeletePollerResponse.Resume` has been removed +- Function `*ProximityPlacementGroupsListBySubscriptionPager.Err` has been removed +- Function `*VirtualMachinesDeletePoller.Done` has been removed +- Function `*CloudServicesRestartPollerResponse.Resume` has been removed +- Function `*LogAnalyticsExportRequestRateByIntervalPollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetVMsUpdatePoller.ResumeToken` has been removed +- Function `*DiskRestorePointGrantAccessPoller.ResumeToken` has been removed +- Function `CloudServicesRebuildPollerResponse.PollUntilDone` has been removed +- Function `*DisksListByResourceGroupPager.NextPage` has been removed +- Function `*GalleryImageVersionsCreateOrUpdatePoller.ResumeToken` has been removed +- Function `*SSHPublicKeysListBySubscriptionPager.PageResponse` has been removed +- Function `*CapacityReservationsListByCapacityReservationGroupPager.PageResponse` has been removed +- Function `*VirtualMachineRunCommandsListByVirtualMachinePager.PageResponse` has been removed +- Function `*DiskRestorePointListByRestorePointPager.NextPage` has been removed +- Function `*VirtualMachinesConvertToManagedDisksPoller.Done` has been removed +- Function `*DisksRevokeAccessPoller.ResumeToken` has been removed +- Function `*CloudServicesUpdateDomainListUpdateDomainsPager.Err` has been removed +- Function `*AvailabilitySetsListBySubscriptionPager.NextPage` has been removed +- Function `*VirtualMachineScaleSetsSetOrchestrationServiceStatePollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetsPowerOffPoller.Done` has been removed +- Function `*GalleryApplicationsListByGalleryPager.Err` has been removed +- Function `*ImagesUpdatePoller.Done` has been removed +- Function `*VirtualMachineScaleSetsRestartPoller.FinalResponse` has been removed +- Function `*GalleryImagesCreateOrUpdatePoller.Poll` has been removed +- Function `*CloudServiceRoleInstancesReimagePoller.FinalResponse` has been removed +- Function `*DiskAccessesUpdateAPrivateEndpointConnectionPoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetRollingUpgradesStartOSUpgradePoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetsUpdateInstancesPoller.Poll` has been removed +- Function `*DisksListPager.PageResponse` has been removed +- Function `*ProximityPlacementGroupsListBySubscriptionPager.PageResponse` has been removed +- Function `*VirtualMachineScaleSetExtensionsCreateOrUpdatePoller.Done` has been removed +- Function `*GalleryApplicationVersionsDeletePoller.Done` has been removed +- Function `*GalleryApplicationsCreateOrUpdatePoller.Done` has been removed +- Function `*DisksListPager.Err` has been removed +- Function `*VirtualMachineScaleSetExtensionsListPager.Err` has been removed +- Function `VirtualMachineScaleSetVMsRedeployPollerResponse.PollUntilDone` has been removed +- Function `DiskAccessesDeletePollerResponse.PollUntilDone` has been removed +- Function `*GalleryImageVersionsUpdatePoller.Done` has been removed +- Function `*DiskEncryptionSetsUpdatePoller.ResumeToken` has been removed +- Function `*GalleryApplicationsUpdatePoller.Done` has been removed +- Function `*SnapshotsRevokeAccessPoller.Poll` has been removed +- Function `*VirtualMachinesUpdatePoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetsStartPoller.Poll` has been removed +- Function `*VirtualMachineScaleSetVMExtensionsUpdatePoller.Poll` has been removed +- Function `*VirtualMachinesRestartPollerResponse.Resume` has been removed +- Function `CloudServicesStartPollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetsRestartPoller.Done` has been removed +- Function `*GalleryImageVersionsDeletePollerResponse.Resume` has been removed +- Function `*DiskAccessesDeleteAPrivateEndpointConnectionPollerResponse.Resume` has been removed +- Function `*CloudServicesStartPoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetRollingUpgradesCancelPoller.Poll` has been removed +- Function `*VirtualMachinesReimagePoller.Done` has been removed +- Function `*DiskAccessesCreateOrUpdatePoller.ResumeToken` has been removed +- Function `GalleryImageVersionsUpdatePollerResponse.PollUntilDone` has been removed +- Function `*ImagesUpdatePoller.ResumeToken` has been removed +- Function `*RestorePointCollectionsDeletePoller.Poll` has been removed +- Function `*DiskEncryptionSetsListByResourceGroupPager.NextPage` has been removed +- Function `DisksGrantAccessPollerResponse.PollUntilDone` has been removed +- Function `*CapacityReservationsCreateOrUpdatePoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetVMsUpdatePoller.Poll` has been removed +- Function `*DiskAccessesListPager.PageResponse` has been removed +- Function `*CapacityReservationsDeletePoller.FinalResponse` has been removed +- Function `*SnapshotsUpdatePollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetsListPager.NextPage` has been removed +- Function `GallerySharingProfileUpdatePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetExtensionsCreateOrUpdatePoller.FinalResponse` has been removed +- Function `*GalleryImageVersionsUpdatePoller.FinalResponse` has been removed +- Function `VirtualMachineRunCommandsCreateOrUpdatePollerResponse.PollUntilDone` has been removed +- Function `VirtualMachineScaleSetIPConfiguration.MarshalJSON` has been removed +- Function `GalleryApplicationsCreateOrUpdatePollerResponse.PollUntilDone` has been removed +- Function `GalleryImageVersionsDeletePollerResponse.PollUntilDone` has been removed +- Function `*CloudServiceRoleInstancesRebuildPoller.Done` has been removed +- Function `VirtualMachinesRedeployPollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachinesRunCommandPoller.Poll` has been removed +- Function `*ProximityPlacementGroupsListByResourceGroupPager.NextPage` has been removed +- Function `*VirtualMachineScaleSetsListSKUsPager.NextPage` has been removed +- Function `*VirtualMachineScaleSetVMsListPager.Err` has been removed +- Function `*CloudServicesListAllPager.Err` has been removed +- Function `*VirtualMachineScaleSetVMRunCommandsListPager.Err` has been removed +- Function `*CloudServicesPowerOffPollerResponse.Resume` has been removed +- Function `*GalleriesListPager.PageResponse` has been removed +- Function `*DiskAccessesDeletePoller.ResumeToken` has been removed +- Function `*VirtualMachinesRestartPoller.Done` has been removed +- Function `*VirtualMachineScaleSetsUpdateInstancesPoller.Done` has been removed +- Function `*VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradePoller.ResumeToken` has been removed +- Function `*SnapshotsUpdatePoller.Poll` has been removed +- Function `*GalleryImagesListByGalleryPager.NextPage` has been removed +- Function `*SnapshotsUpdatePoller.Done` has been removed +- Function `*GalleryImagesUpdatePoller.FinalResponse` has been removed +- Function `*VirtualMachinesDeletePoller.ResumeToken` has been removed +- Function `*GalleryImageVersionsListByGalleryImagePager.Err` has been removed +- Function `*VirtualMachineScaleSetVMsDeallocatePoller.Done` has been removed +- Function `*DiskEncryptionSetsCreateOrUpdatePoller.ResumeToken` has been removed +- Function `*CloudServicesPowerOffPoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetVMExtensionsCreateOrUpdatePoller.FinalResponse` has been removed +- Function `*DedicatedHostsCreateOrUpdatePoller.Poll` has been removed +- Function `*SSHPublicKeysListBySubscriptionPager.Err` has been removed +- Function `*VirtualMachineScaleSetsReimageAllPoller.Done` has been removed +- Function `DiskAccessesDeleteAPrivateEndpointConnectionPollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetVMsStartPoller.FinalResponse` has been removed +- Function `*DiskAccessesUpdatePoller.Done` has been removed +- Function `*VirtualMachinesInstallPatchesPollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetVMsStartPoller.Done` has been removed +- Function `ComputeOperationListResult.MarshalJSON` has been removed +- Function `DisksUpdatePollerResponse.PollUntilDone` has been removed +- Function `*UsageListPager.PageResponse` has been removed +- Function `*VirtualMachineScaleSetVMsPowerOffPoller.Done` has been removed +- Function `*DisksGrantAccessPoller.Done` has been removed +- Function `*GallerySharingProfileUpdatePoller.Done` has been removed +- Function `*GalleryApplicationVersionsCreateOrUpdatePoller.Done` has been removed +- Function `*LogAnalyticsExportThrottledRequestsPoller.Poll` has been removed +- Function `*VirtualMachineScaleSetsStartPoller.Done` has been removed +- Function `*VirtualMachineScaleSetVMsRestartPoller.Done` has been removed +- Function `*DiskAccessesCreateOrUpdatePoller.FinalResponse` has been removed +- Function `VirtualMachineScaleSetExtension.MarshalJSON` has been removed +- Function `*CloudServicesPowerOffPoller.Poll` has been removed +- Function `*VirtualMachineScaleSetVMRunCommandsListPager.PageResponse` has been removed +- Function `*SnapshotsGrantAccessPoller.ResumeToken` has been removed +- Function `DiskEncryptionSetsCreateOrUpdatePollerResponse.PollUntilDone` has been removed +- Function `*DiskAccessesListByResourceGroupPager.NextPage` has been removed +- Function `*VirtualMachinesReapplyPoller.ResumeToken` has been removed +- Function `*CloudServicesDeletePoller.Poll` has been removed +- Function `*VirtualMachinesInstallPatchesPoller.Poll` has been removed +- Function `*LogAnalyticsExportRequestRateByIntervalPoller.FinalResponse` has been removed +- Function `*VirtualMachineRunCommandsUpdatePoller.Done` has been removed +- Function `VirtualMachineScaleSetsCreateOrUpdatePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachinesPowerOffPoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetVMRunCommandsCreateOrUpdatePoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetVMsRestartPoller.ResumeToken` has been removed +- Function `*GalleriesDeletePoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetsListSKUsPager.Err` has been removed +- Function `*VirtualMachinesAssessPatchesPoller.Done` has been removed +- Function `*DedicatedHostsDeletePoller.Done` has been removed +- Function `*VirtualMachineScaleSetsUpdatePollerResponse.Resume` has been removed +- Function `*LogAnalyticsExportRequestRateByIntervalPoller.Done` has been removed +- Function `*VirtualMachineScaleSetVMExtensionsDeletePollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetsUpdatePoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradePollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetsDeleteInstancesPoller.ResumeToken` has been removed +- Function `*VirtualMachinesReapplyPoller.Done` has been removed +- Function `*DiskAccessesUpdateAPrivateEndpointConnectionPoller.FinalResponse` has been removed +- Function `*GalleryApplicationVersionsCreateOrUpdatePoller.FinalResponse` has been removed +- Function `RestorePointsCreatePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetVMsReimageAllPoller.ResumeToken` has been removed +- Function `*CapacityReservationsCreateOrUpdatePoller.Poll` has been removed +- Function `VirtualMachineScaleSetVMsDeletePollerResponse.PollUntilDone` has been removed +- Function `*GalleriesListPager.Err` has been removed +- Function `*RestorePointsDeletePoller.ResumeToken` has been removed +- Function `*SnapshotsCreateOrUpdatePoller.Poll` has been removed +- Function `SnapshotsUpdatePollerResponse.PollUntilDone` has been removed +- Function `*CloudServicesUpdatePollerResponse.Resume` has been removed +- Function `*GalleryImageVersionsUpdatePoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetVMsRedeployPoller.ResumeToken` has been removed +- Function `*GalleryApplicationVersionsUpdatePoller.FinalResponse` has been removed +- Function `*VirtualMachinesRunCommandPoller.ResumeToken` has been removed +- Function `*CloudServicesListPager.NextPage` has been removed +- Function `*DiskAccessesListPager.NextPage` has been removed +- Function `*VirtualMachinesPerformMaintenancePollerResponse.Resume` has been removed +- Function `*DiskAccessesUpdatePoller.Poll` has been removed +- Function `*VirtualMachinesReapplyPollerResponse.Resume` has been removed +- Function `*GalleryApplicationsCreateOrUpdatePollerResponse.Resume` has been removed +- Function `*ResourceSKUsListPager.Err` has been removed +- Function `*VirtualMachineScaleSetVMsDeletePoller.Poll` has been removed +- Function `*VirtualMachinesReapplyPoller.FinalResponse` has been removed +- Function `*DedicatedHostsListByHostGroupPager.PageResponse` has been removed +- Function `DiskEncryptionSetsDeletePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachinesListPager.PageResponse` has been removed +- Function `*DedicatedHostsListByHostGroupPager.NextPage` has been removed +- Function `*VirtualMachineScaleSetsRedeployPollerResponse.Resume` has been removed +- Function `*SnapshotsCreateOrUpdatePoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetExtensionsDeletePoller.Poll` has been removed +- Function `*VirtualMachineScaleSetsPerformMaintenancePollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetVMExtensionsCreateOrUpdatePollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetsReimageAllPoller.Poll` has been removed +- Function `*ImagesUpdatePoller.Poll` has been removed +- Function `*RestorePointsCreatePoller.FinalResponse` has been removed +- Function `*GalleryApplicationsDeletePoller.ResumeToken` has been removed +- Function `*SharedGalleryImagesListPager.NextPage` has been removed +- Function `*SnapshotsGrantAccessPoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetVMsRedeployPollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetRollingUpgradesStartOSUpgradePoller.Done` has been removed +- Function `*VirtualMachineScaleSetRollingUpgradesCancelPollerResponse.Resume` has been removed +- Function `*CloudServicesStartPoller.Poll` has been removed +- Function `*VirtualMachineScaleSetsReimagePoller.ResumeToken` has been removed +- Function `CloudServicesUpdatePollerResponse.PollUntilDone` has been removed +- Function `*GalleryImagesDeletePoller.FinalResponse` has been removed +- Function `VirtualMachineScaleSetsUpdateInstancesPollerResponse.PollUntilDone` has been removed +- Function `*SSHPublicKeysListByResourceGroupPager.NextPage` has been removed +- Function `*VirtualMachineExtensionsDeletePoller.Done` has been removed +- Function `*VirtualMachineScaleSetsUpdateInstancesPollerResponse.Resume` has been removed +- Function `DiskRestorePointRevokeAccessPollerResponse.PollUntilDone` has been removed +- Function `*GalleryImagesUpdatePollerResponse.Resume` has been removed +- Function `VirtualMachineScaleSetsSetOrchestrationServiceStatePollerResponse.PollUntilDone` has been removed +- Function `*DiskAccessesDeleteAPrivateEndpointConnectionPoller.Poll` has been removed +- Function `*VirtualMachinesPowerOffPoller.Done` has been removed +- Function `CloudServicesCreateOrUpdatePollerResponse.PollUntilDone` has been removed +- Function `GalleryImagesUpdatePollerResponse.PollUntilDone` has been removed +- Function `DisksRevokeAccessPollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineExtensionsCreateOrUpdatePoller.Poll` has been removed +- Function `*VirtualMachineScaleSetsGetOSUpgradeHistoryPager.Err` has been removed +- Function `*ProximityPlacementGroupsListByResourceGroupPager.PageResponse` has been removed +- Function `*DiskEncryptionSetsCreateOrUpdatePoller.Poll` has been removed +- Function `*GallerySharingProfileUpdatePoller.Poll` has been removed +- Function `*VirtualMachineScaleSetVMsDeletePollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetsUpdatePoller.Poll` has been removed +- Function `*VirtualMachineScaleSetVMsPerformMaintenancePollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetsRestartPoller.ResumeToken` has been removed +- Function `*ImagesCreateOrUpdatePoller.ResumeToken` has been removed +- Function `*CloudServiceRoleInstancesListPager.PageResponse` has been removed +- Function `*GalleryApplicationsDeletePoller.Done` has been removed +- Function `*DedicatedHostsCreateOrUpdatePoller.ResumeToken` has been removed +- Function `*UsageListPager.Err` has been removed +- Function `*VirtualMachinesDeallocatePoller.Poll` has been removed +- Function `*LogAnalyticsExportThrottledRequestsPoller.ResumeToken` has been removed +- Function `*DisksRevokeAccessPoller.Done` has been removed +- Function `*VirtualMachineRunCommandsCreateOrUpdatePollerResponse.Resume` has been removed +- Function `*DiskAccessesDeletePoller.Done` has been removed +- Function `*CloudServiceOperatingSystemsListOSFamiliesPager.NextPage` has been removed +- Function `*VirtualMachineScaleSetVMsPerformMaintenancePoller.ResumeToken` has been removed +- Function `VirtualMachinesConvertToManagedDisksPollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetRollingUpgradesStartOSUpgradePoller.ResumeToken` has been removed +- Function `*VirtualMachinesListByLocationPager.PageResponse` has been removed +- Function `VirtualMachinesCapturePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradePoller.FinalResponse` has been removed +- Function `*GalleryApplicationsUpdatePollerResponse.Resume` has been removed +- Function `*SnapshotsUpdatePoller.FinalResponse` has been removed +- Function `*GalleryApplicationsCreateOrUpdatePoller.ResumeToken` has been removed +- Function `VirtualMachineScaleSetExtensionsUpdatePollerResponse.PollUntilDone` has been removed +- Function `*DiskAccessesDeleteAPrivateEndpointConnectionPoller.Done` has been removed +- Function `*LogAnalyticsExportRequestRateByIntervalPoller.Poll` has been removed +- Function `SubResource.MarshalJSON` has been removed +- Function `*VirtualMachineScaleSetVMsDeletePoller.ResumeToken` has been removed +- Function `VirtualMachineScaleSetsReimageAllPollerResponse.PollUntilDone` has been removed +- Function `*GalleryApplicationVersionsCreateOrUpdatePoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetsRedeployPoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetVMRunCommandsUpdatePoller.FinalResponse` has been removed +- Function `DedicatedHostsUpdatePollerResponse.PollUntilDone` has been removed +- Function `*CloudServicesDeleteInstancesPoller.Done` has been removed +- Function `*VirtualMachineExtensionsDeletePoller.ResumeToken` has been removed +- Function `*CapacityReservationsDeletePollerResponse.Resume` has been removed +- Function `*RestorePointCollectionsDeletePollerResponse.Resume` has been removed +- Function `*VirtualMachinesInstallPatchesPoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetsDeallocatePollerResponse.Resume` has been removed +- Function `*VirtualMachinesAssessPatchesPoller.Poll` has been removed +- Function `*VirtualMachinesDeallocatePoller.Done` has been removed +- Function `VirtualMachineScaleSetVMsReimagePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetVMsRunCommandPoller.Poll` has been removed +- Function `*VirtualMachinesCreateOrUpdatePoller.Done` has been removed +- Function `*DiskEncryptionSetsListPager.Err` has been removed +- Function `*GalleriesCreateOrUpdatePoller.Done` has been removed +- Function `DiskEncryptionSetsUpdatePollerResponse.PollUntilDone` has been removed +- Function `*GalleryImageVersionsUpdatePoller.Poll` has been removed +- Function `*VirtualMachineScaleSetVMsDeallocatePoller.FinalResponse` has been removed +- Function `VirtualMachinesDeallocatePollerResponse.PollUntilDone` has been removed +- Function `*GalleryApplicationsListByGalleryPager.NextPage` has been removed +- Function `*DiskAccessesDeleteAPrivateEndpointConnectionPoller.FinalResponse` has been removed +- Function `*GalleryImageVersionsCreateOrUpdatePoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetVMsDeletePoller.Done` has been removed +- Function `*DiskEncryptionSetsListPager.NextPage` has been removed +- Function `*VirtualMachinesDeletePoller.Poll` has been removed +- Function `*VirtualMachineExtensionsUpdatePoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetVMsListPager.PageResponse` has been removed +- Function `*CloudServicesUpdatePoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetVMRunCommandsDeletePoller.Poll` has been removed +- Function `*CapacityReservationGroupsListBySubscriptionPager.NextPage` has been removed +- Function `*CloudServiceOperatingSystemsListOSVersionsPager.Err` has been removed +- Function `*VirtualMachinesStartPoller.Done` has been removed +- Function `*CloudServicesStartPoller.Done` has been removed +- Function `*VirtualMachineScaleSetVMRunCommandsCreateOrUpdatePollerResponse.Resume` has been removed +- Function `*SharedGalleriesListPager.NextPage` has been removed +- Function `*VirtualMachineScaleSetVMRunCommandsDeletePoller.FinalResponse` has been removed +- Function `*VirtualMachinesPowerOffPoller.ResumeToken` has been removed +- Function `*DisksDeletePollerResponse.Resume` has been removed +- Function `*GalleryImagesCreateOrUpdatePoller.Done` has been removed +- Function `SubResourceWithColocationStatus.MarshalJSON` has been removed +- Function `VirtualMachineScaleSetVMsRestartPollerResponse.PollUntilDone` has been removed +- Function `*GalleryApplicationVersionsCreateOrUpdatePollerResponse.Resume` has been removed +- Function `ImagesCreateOrUpdatePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetsPowerOffPoller.Poll` has been removed +- Function `*VirtualMachineScaleSetRollingUpgradesCancelPoller.ResumeToken` has been removed +- Function `*ImagesListByResourceGroupPager.PageResponse` has been removed +- Function `*VirtualMachinesCapturePoller.Poll` has been removed +- Function `*ProximityPlacementGroupsListByResourceGroupPager.Err` has been removed +- Function `*VirtualMachineScaleSetsRedeployPoller.Poll` has been removed +- Function `*SnapshotsRevokeAccessPoller.Done` has been removed +- Function `CloudServicesRestartPollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetVMsUpdatePoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetVMRunCommandsDeletePoller.ResumeToken` has been removed +- Function `*VirtualMachineRunCommandsListPager.PageResponse` has been removed +- Function `*GalleriesUpdatePoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetVMsPerformMaintenancePoller.Poll` has been removed +- Function `CloudServiceRoleInstancesReimagePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetExtensionsUpdatePoller.Poll` has been removed +- Function `*VirtualMachineScaleSetsReimagePoller.Poll` has been removed +- Function `*DedicatedHostsDeletePoller.FinalResponse` has been removed +- Function `*VirtualMachineScaleSetsListPager.PageResponse` has been removed +- Function `*DiskAccessesUpdatePoller.ResumeToken` has been removed +- Function `*SharedGalleriesListPager.PageResponse` has been removed +- Function `*VirtualMachineScaleSetRollingUpgradesCancelPoller.FinalResponse` has been removed +- Function `*DisksListByResourceGroupPager.PageResponse` has been removed +- Function `*VirtualMachineRunCommandsCreateOrUpdatePoller.Poll` has been removed +- Function `*ImagesUpdatePollerResponse.Resume` has been removed +- Function `*VirtualMachineExtensionsCreateOrUpdatePoller.FinalResponse` has been removed +- Function `*RestorePointsDeletePoller.Poll` has been removed +- Function `*DiskAccessesDeleteAPrivateEndpointConnectionPoller.ResumeToken` has been removed +- Function `VirtualMachineScaleSetVMsPerformMaintenancePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetVMsRestartPollerResponse.Resume` has been removed +- Function `GalleryImagesCreateOrUpdatePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachinesInstallPatchesPoller.Done` has been removed +- Function `*VirtualMachinesStartPollerResponse.Resume` has been removed +- Function `SnapshotsGrantAccessPollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineScaleSetVMsPowerOffPoller.FinalResponse` has been removed +- Function `*DedicatedHostGroupsListBySubscriptionPager.NextPage` has been removed +- Function `*ImagesListPager.NextPage` has been removed +- Function `*VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradePoller.Done` has been removed +- Function `*DiskAccessesListByResourceGroupPager.PageResponse` has been removed +- Function `GalleryImageVersionsCreateOrUpdatePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachinesAssessPatchesPollerResponse.Resume` has been removed +- Function `*CloudServiceRoleInstancesRestartPoller.Done` has been removed +- Function `*CloudServicesCreateOrUpdatePollerResponse.Resume` has been removed +- Function `*VirtualMachineRunCommandsListPager.Err` has been removed +- Function `*GalleriesListByResourceGroupPager.PageResponse` has been removed +- Function `*GalleriesCreateOrUpdatePollerResponse.Resume` has been removed +- Function `*GalleryApplicationVersionsDeletePoller.Poll` has been removed +- Function `*CloudServicesRestartPoller.ResumeToken` has been removed +- Function `*DiskRestorePointGrantAccessPoller.FinalResponse` has been removed +- Function `*GalleryApplicationsUpdatePoller.Poll` has been removed +- Function `*GalleryImagesDeletePoller.Done` has been removed +- Function `*SnapshotsGrantAccessPollerResponse.Resume` has been removed +- Function `*CloudServiceRolesListPager.Err` has been removed +- Function `*VirtualMachinesDeallocatePollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetsRestartPoller.Poll` has been removed +- Function `*VirtualMachinesDeallocatePoller.ResumeToken` has been removed +- Function `*VirtualMachineScaleSetsListAllPager.Err` has been removed +- Function `VirtualMachineScaleSetsReimagePollerResponse.PollUntilDone` has been removed +- Function `CloudServiceRoleInstancesDeletePollerResponse.PollUntilDone` has been removed +- Function `*DiskEncryptionSetsDeletePoller.Done` has been removed +- Function `*RestorePointCollectionsDeletePoller.ResumeToken` has been removed +- Function `GalleryApplicationVersionsDeletePollerResponse.PollUntilDone` has been removed +- Function `VirtualMachineScaleSetsDeletePollerResponse.PollUntilDone` has been removed +- Function `VirtualMachinesReapplyPollerResponse.PollUntilDone` has been removed +- Function `VirtualMachineExtensionsUpdatePollerResponse.PollUntilDone` has been removed +- Function `VirtualMachineScaleSetsDeallocatePollerResponse.PollUntilDone` has been removed +- Function `*VirtualMachineExtensionsDeletePollerResponse.Resume` has been removed +- Function `*GalleriesDeletePoller.Done` has been removed +- Function `*CloudServicesRebuildPollerResponse.Resume` has been removed +- Function `*RestorePointCollectionsListPager.Err` has been removed +- Function `*VirtualMachineScaleSetsDeallocatePoller.Poll` has been removed +- Function `*DiskRestorePointGrantAccessPollerResponse.Resume` has been removed +- Function `*VirtualMachineScaleSetsStartPoller.FinalResponse` has been removed +- Function `*GallerySharingProfileUpdatePoller.FinalResponse` has been removed +- Function `*ProximityPlacementGroupsListBySubscriptionPager.NextPage` has been removed +- Function `VirtualMachineRunCommandsUpdatePollerResponse.PollUntilDone` has been removed +- Function `SubResourceReadOnly.MarshalJSON` has been removed +- Function `*VirtualMachineScaleSetRollingUpgradesCancelPoller.Done` has been removed +- Struct `AvailabilitySetsCreateOrUpdateOptions` has been removed +- Struct `AvailabilitySetsCreateOrUpdateResponse` has been removed +- Struct `AvailabilitySetsCreateOrUpdateResult` has been removed +- Struct `AvailabilitySetsDeleteOptions` has been removed +- Struct `AvailabilitySetsDeleteResponse` has been removed +- Struct `AvailabilitySetsGetOptions` has been removed +- Struct `AvailabilitySetsGetResponse` has been removed +- Struct `AvailabilitySetsGetResult` has been removed +- Struct `AvailabilitySetsListAvailableSizesOptions` has been removed +- Struct `AvailabilitySetsListAvailableSizesResponse` has been removed +- Struct `AvailabilitySetsListAvailableSizesResult` has been removed +- Struct `AvailabilitySetsListBySubscriptionOptions` has been removed +- Struct `AvailabilitySetsListBySubscriptionPager` has been removed +- Struct `AvailabilitySetsListBySubscriptionResponse` has been removed +- Struct `AvailabilitySetsListBySubscriptionResult` has been removed +- Struct `AvailabilitySetsListOptions` has been removed +- Struct `AvailabilitySetsListPager` has been removed +- Struct `AvailabilitySetsListResponse` has been removed +- Struct `AvailabilitySetsListResult` has been removed +- Struct `AvailabilitySetsUpdateOptions` has been removed +- Struct `AvailabilitySetsUpdateResponse` has been removed +- Struct `AvailabilitySetsUpdateResult` has been removed +- Struct `CapacityReservationGroupsCreateOrUpdateOptions` has been removed +- Struct `CapacityReservationGroupsCreateOrUpdateResponse` has been removed +- Struct `CapacityReservationGroupsCreateOrUpdateResult` has been removed +- Struct `CapacityReservationGroupsDeleteOptions` has been removed +- Struct `CapacityReservationGroupsDeleteResponse` has been removed +- Struct `CapacityReservationGroupsGetOptions` has been removed +- Struct `CapacityReservationGroupsGetResponse` has been removed +- Struct `CapacityReservationGroupsGetResult` has been removed +- Struct `CapacityReservationGroupsListByResourceGroupOptions` has been removed +- Struct `CapacityReservationGroupsListByResourceGroupPager` has been removed +- Struct `CapacityReservationGroupsListByResourceGroupResponse` has been removed +- Struct `CapacityReservationGroupsListByResourceGroupResult` has been removed +- Struct `CapacityReservationGroupsListBySubscriptionOptions` has been removed +- Struct `CapacityReservationGroupsListBySubscriptionPager` has been removed +- Struct `CapacityReservationGroupsListBySubscriptionResponse` has been removed +- Struct `CapacityReservationGroupsListBySubscriptionResult` has been removed +- Struct `CapacityReservationGroupsUpdateOptions` has been removed +- Struct `CapacityReservationGroupsUpdateResponse` has been removed +- Struct `CapacityReservationGroupsUpdateResult` has been removed +- Struct `CapacityReservationsBeginCreateOrUpdateOptions` has been removed +- Struct `CapacityReservationsBeginDeleteOptions` has been removed +- Struct `CapacityReservationsBeginUpdateOptions` has been removed +- Struct `CapacityReservationsCreateOrUpdatePoller` has been removed +- Struct `CapacityReservationsCreateOrUpdatePollerResponse` has been removed +- Struct `CapacityReservationsCreateOrUpdateResponse` has been removed +- Struct `CapacityReservationsCreateOrUpdateResult` has been removed +- Struct `CapacityReservationsDeletePoller` has been removed +- Struct `CapacityReservationsDeletePollerResponse` has been removed +- Struct `CapacityReservationsDeleteResponse` has been removed +- Struct `CapacityReservationsGetOptions` has been removed +- Struct `CapacityReservationsGetResponse` has been removed +- Struct `CapacityReservationsGetResult` has been removed +- Struct `CapacityReservationsListByCapacityReservationGroupOptions` has been removed +- Struct `CapacityReservationsListByCapacityReservationGroupPager` has been removed +- Struct `CapacityReservationsListByCapacityReservationGroupResponse` has been removed +- Struct `CapacityReservationsListByCapacityReservationGroupResult` has been removed +- Struct `CapacityReservationsUpdatePoller` has been removed +- Struct `CapacityReservationsUpdatePollerResponse` has been removed +- Struct `CapacityReservationsUpdateResponse` has been removed +- Struct `CapacityReservationsUpdateResult` has been removed +- Struct `CloudServiceOperatingSystemsGetOSFamilyOptions` has been removed +- Struct `CloudServiceOperatingSystemsGetOSFamilyResponse` has been removed +- Struct `CloudServiceOperatingSystemsGetOSFamilyResult` has been removed +- Struct `CloudServiceOperatingSystemsGetOSVersionOptions` has been removed +- Struct `CloudServiceOperatingSystemsGetOSVersionResponse` has been removed +- Struct `CloudServiceOperatingSystemsGetOSVersionResult` has been removed +- Struct `CloudServiceOperatingSystemsListOSFamiliesOptions` has been removed +- Struct `CloudServiceOperatingSystemsListOSFamiliesPager` has been removed +- Struct `CloudServiceOperatingSystemsListOSFamiliesResponse` has been removed +- Struct `CloudServiceOperatingSystemsListOSFamiliesResult` has been removed +- Struct `CloudServiceOperatingSystemsListOSVersionsOptions` has been removed +- Struct `CloudServiceOperatingSystemsListOSVersionsPager` has been removed +- Struct `CloudServiceOperatingSystemsListOSVersionsResponse` has been removed +- Struct `CloudServiceOperatingSystemsListOSVersionsResult` has been removed +- Struct `CloudServiceRoleInstancesBeginDeleteOptions` has been removed +- Struct `CloudServiceRoleInstancesBeginRebuildOptions` has been removed +- Struct `CloudServiceRoleInstancesBeginReimageOptions` has been removed +- Struct `CloudServiceRoleInstancesBeginRestartOptions` has been removed +- Struct `CloudServiceRoleInstancesDeletePoller` has been removed +- Struct `CloudServiceRoleInstancesDeletePollerResponse` has been removed +- Struct `CloudServiceRoleInstancesDeleteResponse` has been removed +- Struct `CloudServiceRoleInstancesGetInstanceViewOptions` has been removed +- Struct `CloudServiceRoleInstancesGetInstanceViewResponse` has been removed +- Struct `CloudServiceRoleInstancesGetInstanceViewResult` has been removed +- Struct `CloudServiceRoleInstancesGetOptions` has been removed +- Struct `CloudServiceRoleInstancesGetRemoteDesktopFileOptions` has been removed +- Struct `CloudServiceRoleInstancesGetRemoteDesktopFileResponse` has been removed +- Struct `CloudServiceRoleInstancesGetResponse` has been removed +- Struct `CloudServiceRoleInstancesGetResult` has been removed +- Struct `CloudServiceRoleInstancesListOptions` has been removed +- Struct `CloudServiceRoleInstancesListPager` has been removed +- Struct `CloudServiceRoleInstancesListResponse` has been removed +- Struct `CloudServiceRoleInstancesListResult` has been removed +- Struct `CloudServiceRoleInstancesRebuildPoller` has been removed +- Struct `CloudServiceRoleInstancesRebuildPollerResponse` has been removed +- Struct `CloudServiceRoleInstancesRebuildResponse` has been removed +- Struct `CloudServiceRoleInstancesReimagePoller` has been removed +- Struct `CloudServiceRoleInstancesReimagePollerResponse` has been removed +- Struct `CloudServiceRoleInstancesReimageResponse` has been removed +- Struct `CloudServiceRoleInstancesRestartPoller` has been removed +- Struct `CloudServiceRoleInstancesRestartPollerResponse` has been removed +- Struct `CloudServiceRoleInstancesRestartResponse` has been removed +- Struct `CloudServiceRolesGetOptions` has been removed +- Struct `CloudServiceRolesGetResponse` has been removed +- Struct `CloudServiceRolesGetResult` has been removed +- Struct `CloudServiceRolesListOptions` has been removed +- Struct `CloudServiceRolesListPager` has been removed +- Struct `CloudServiceRolesListResponse` has been removed +- Struct `CloudServiceRolesListResult` has been removed +- Struct `CloudServicesBeginCreateOrUpdateOptions` has been removed +- Struct `CloudServicesBeginDeleteInstancesOptions` has been removed +- Struct `CloudServicesBeginDeleteOptions` has been removed +- Struct `CloudServicesBeginPowerOffOptions` has been removed +- Struct `CloudServicesBeginRebuildOptions` has been removed +- Struct `CloudServicesBeginReimageOptions` has been removed +- Struct `CloudServicesBeginRestartOptions` has been removed +- Struct `CloudServicesBeginStartOptions` has been removed +- Struct `CloudServicesBeginUpdateOptions` has been removed +- Struct `CloudServicesCreateOrUpdatePoller` has been removed +- Struct `CloudServicesCreateOrUpdatePollerResponse` has been removed +- Struct `CloudServicesCreateOrUpdateResponse` has been removed +- Struct `CloudServicesCreateOrUpdateResult` has been removed +- Struct `CloudServicesDeleteInstancesPoller` has been removed +- Struct `CloudServicesDeleteInstancesPollerResponse` has been removed +- Struct `CloudServicesDeleteInstancesResponse` has been removed +- Struct `CloudServicesDeletePoller` has been removed +- Struct `CloudServicesDeletePollerResponse` has been removed +- Struct `CloudServicesDeleteResponse` has been removed +- Struct `CloudServicesGetInstanceViewOptions` has been removed +- Struct `CloudServicesGetInstanceViewResponse` has been removed +- Struct `CloudServicesGetInstanceViewResult` has been removed +- Struct `CloudServicesGetOptions` has been removed +- Struct `CloudServicesGetResponse` has been removed +- Struct `CloudServicesGetResult` has been removed +- Struct `CloudServicesListAllOptions` has been removed +- Struct `CloudServicesListAllPager` has been removed +- Struct `CloudServicesListAllResponse` has been removed +- Struct `CloudServicesListAllResult` has been removed +- Struct `CloudServicesListOptions` has been removed +- Struct `CloudServicesListPager` has been removed +- Struct `CloudServicesListResponse` has been removed +- Struct `CloudServicesListResult` has been removed +- Struct `CloudServicesPowerOffPoller` has been removed +- Struct `CloudServicesPowerOffPollerResponse` has been removed +- Struct `CloudServicesPowerOffResponse` has been removed +- Struct `CloudServicesRebuildPoller` has been removed +- Struct `CloudServicesRebuildPollerResponse` has been removed +- Struct `CloudServicesRebuildResponse` has been removed +- Struct `CloudServicesReimagePoller` has been removed +- Struct `CloudServicesReimagePollerResponse` has been removed +- Struct `CloudServicesReimageResponse` has been removed +- Struct `CloudServicesRestartPoller` has been removed +- Struct `CloudServicesRestartPollerResponse` has been removed +- Struct `CloudServicesRestartResponse` has been removed +- Struct `CloudServicesStartPoller` has been removed +- Struct `CloudServicesStartPollerResponse` has been removed +- Struct `CloudServicesStartResponse` has been removed +- Struct `CloudServicesUpdateDomainBeginWalkUpdateDomainOptions` has been removed +- Struct `CloudServicesUpdateDomainGetUpdateDomainOptions` has been removed +- Struct `CloudServicesUpdateDomainGetUpdateDomainResponse` has been removed +- Struct `CloudServicesUpdateDomainGetUpdateDomainResult` has been removed +- Struct `CloudServicesUpdateDomainListUpdateDomainsOptions` has been removed +- Struct `CloudServicesUpdateDomainListUpdateDomainsPager` has been removed +- Struct `CloudServicesUpdateDomainListUpdateDomainsResponse` has been removed +- Struct `CloudServicesUpdateDomainListUpdateDomainsResult` has been removed +- Struct `CloudServicesUpdateDomainWalkUpdateDomainPoller` has been removed +- Struct `CloudServicesUpdateDomainWalkUpdateDomainPollerResponse` has been removed +- Struct `CloudServicesUpdateDomainWalkUpdateDomainResponse` has been removed +- Struct `CloudServicesUpdatePoller` has been removed +- Struct `CloudServicesUpdatePollerResponse` has been removed +- Struct `CloudServicesUpdateResponse` has been removed +- Struct `CloudServicesUpdateResult` has been removed +- Struct `CommunityGalleriesGetOptions` has been removed +- Struct `CommunityGalleriesGetResponse` has been removed +- Struct `CommunityGalleriesGetResult` has been removed +- Struct `CommunityGalleryImageVersionsGetOptions` has been removed +- Struct `CommunityGalleryImageVersionsGetResponse` has been removed +- Struct `CommunityGalleryImageVersionsGetResult` has been removed +- Struct `CommunityGalleryImagesGetOptions` has been removed +- Struct `CommunityGalleryImagesGetResponse` has been removed +- Struct `CommunityGalleryImagesGetResult` has been removed +- Struct `ComputeOperationListResult` has been removed +- Struct `ComputeOperationValue` has been removed +- Struct `ComputeOperationValueDisplay` has been removed +- Struct `DedicatedHostGroupsCreateOrUpdateOptions` has been removed +- Struct `DedicatedHostGroupsCreateOrUpdateResponse` has been removed +- Struct `DedicatedHostGroupsCreateOrUpdateResult` has been removed +- Struct `DedicatedHostGroupsDeleteOptions` has been removed +- Struct `DedicatedHostGroupsDeleteResponse` has been removed +- Struct `DedicatedHostGroupsGetOptions` has been removed +- Struct `DedicatedHostGroupsGetResponse` has been removed +- Struct `DedicatedHostGroupsGetResult` has been removed +- Struct `DedicatedHostGroupsListByResourceGroupOptions` has been removed +- Struct `DedicatedHostGroupsListByResourceGroupPager` has been removed +- Struct `DedicatedHostGroupsListByResourceGroupResponse` has been removed +- Struct `DedicatedHostGroupsListByResourceGroupResult` has been removed +- Struct `DedicatedHostGroupsListBySubscriptionOptions` has been removed +- Struct `DedicatedHostGroupsListBySubscriptionPager` has been removed +- Struct `DedicatedHostGroupsListBySubscriptionResponse` has been removed +- Struct `DedicatedHostGroupsListBySubscriptionResult` has been removed +- Struct `DedicatedHostGroupsUpdateOptions` has been removed +- Struct `DedicatedHostGroupsUpdateResponse` has been removed +- Struct `DedicatedHostGroupsUpdateResult` has been removed +- Struct `DedicatedHostsBeginCreateOrUpdateOptions` has been removed +- Struct `DedicatedHostsBeginDeleteOptions` has been removed +- Struct `DedicatedHostsBeginUpdateOptions` has been removed +- Struct `DedicatedHostsCreateOrUpdatePoller` has been removed +- Struct `DedicatedHostsCreateOrUpdatePollerResponse` has been removed +- Struct `DedicatedHostsCreateOrUpdateResponse` has been removed +- Struct `DedicatedHostsCreateOrUpdateResult` has been removed +- Struct `DedicatedHostsDeletePoller` has been removed +- Struct `DedicatedHostsDeletePollerResponse` has been removed +- Struct `DedicatedHostsDeleteResponse` has been removed +- Struct `DedicatedHostsGetOptions` has been removed +- Struct `DedicatedHostsGetResponse` has been removed +- Struct `DedicatedHostsGetResult` has been removed +- Struct `DedicatedHostsListByHostGroupOptions` has been removed +- Struct `DedicatedHostsListByHostGroupPager` has been removed +- Struct `DedicatedHostsListByHostGroupResponse` has been removed +- Struct `DedicatedHostsListByHostGroupResult` has been removed +- Struct `DedicatedHostsUpdatePoller` has been removed +- Struct `DedicatedHostsUpdatePollerResponse` has been removed +- Struct `DedicatedHostsUpdateResponse` has been removed +- Struct `DedicatedHostsUpdateResult` has been removed +- Struct `DiskAccessesBeginCreateOrUpdateOptions` has been removed +- Struct `DiskAccessesBeginDeleteAPrivateEndpointConnectionOptions` has been removed +- Struct `DiskAccessesBeginDeleteOptions` has been removed +- Struct `DiskAccessesBeginUpdateAPrivateEndpointConnectionOptions` has been removed +- Struct `DiskAccessesBeginUpdateOptions` has been removed +- Struct `DiskAccessesCreateOrUpdatePoller` has been removed +- Struct `DiskAccessesCreateOrUpdatePollerResponse` has been removed +- Struct `DiskAccessesCreateOrUpdateResponse` has been removed +- Struct `DiskAccessesCreateOrUpdateResult` has been removed +- Struct `DiskAccessesDeleteAPrivateEndpointConnectionPoller` has been removed +- Struct `DiskAccessesDeleteAPrivateEndpointConnectionPollerResponse` has been removed +- Struct `DiskAccessesDeleteAPrivateEndpointConnectionResponse` has been removed +- Struct `DiskAccessesDeletePoller` has been removed +- Struct `DiskAccessesDeletePollerResponse` has been removed +- Struct `DiskAccessesDeleteResponse` has been removed +- Struct `DiskAccessesGetAPrivateEndpointConnectionOptions` has been removed +- Struct `DiskAccessesGetAPrivateEndpointConnectionResponse` has been removed +- Struct `DiskAccessesGetAPrivateEndpointConnectionResult` has been removed +- Struct `DiskAccessesGetOptions` has been removed +- Struct `DiskAccessesGetPrivateLinkResourcesOptions` has been removed +- Struct `DiskAccessesGetPrivateLinkResourcesResponse` has been removed +- Struct `DiskAccessesGetPrivateLinkResourcesResult` has been removed +- Struct `DiskAccessesGetResponse` has been removed +- Struct `DiskAccessesGetResult` has been removed +- Struct `DiskAccessesListByResourceGroupOptions` has been removed +- Struct `DiskAccessesListByResourceGroupPager` has been removed +- Struct `DiskAccessesListByResourceGroupResponse` has been removed +- Struct `DiskAccessesListByResourceGroupResult` has been removed +- Struct `DiskAccessesListOptions` has been removed +- Struct `DiskAccessesListPager` has been removed +- Struct `DiskAccessesListPrivateEndpointConnectionsOptions` has been removed +- Struct `DiskAccessesListPrivateEndpointConnectionsPager` has been removed +- Struct `DiskAccessesListPrivateEndpointConnectionsResponse` has been removed +- Struct `DiskAccessesListPrivateEndpointConnectionsResult` has been removed +- Struct `DiskAccessesListResponse` has been removed +- Struct `DiskAccessesListResult` has been removed +- Struct `DiskAccessesUpdateAPrivateEndpointConnectionPoller` has been removed +- Struct `DiskAccessesUpdateAPrivateEndpointConnectionPollerResponse` has been removed +- Struct `DiskAccessesUpdateAPrivateEndpointConnectionResponse` has been removed +- Struct `DiskAccessesUpdateAPrivateEndpointConnectionResult` has been removed +- Struct `DiskAccessesUpdatePoller` has been removed +- Struct `DiskAccessesUpdatePollerResponse` has been removed +- Struct `DiskAccessesUpdateResponse` has been removed +- Struct `DiskAccessesUpdateResult` has been removed +- Struct `DiskEncryptionSetsBeginCreateOrUpdateOptions` has been removed +- Struct `DiskEncryptionSetsBeginDeleteOptions` has been removed +- Struct `DiskEncryptionSetsBeginUpdateOptions` has been removed +- Struct `DiskEncryptionSetsCreateOrUpdatePoller` has been removed +- Struct `DiskEncryptionSetsCreateOrUpdatePollerResponse` has been removed +- Struct `DiskEncryptionSetsCreateOrUpdateResponse` has been removed +- Struct `DiskEncryptionSetsCreateOrUpdateResult` has been removed +- Struct `DiskEncryptionSetsDeletePoller` has been removed +- Struct `DiskEncryptionSetsDeletePollerResponse` has been removed +- Struct `DiskEncryptionSetsDeleteResponse` has been removed +- Struct `DiskEncryptionSetsGetOptions` has been removed +- Struct `DiskEncryptionSetsGetResponse` has been removed +- Struct `DiskEncryptionSetsGetResult` has been removed +- Struct `DiskEncryptionSetsListAssociatedResourcesOptions` has been removed +- Struct `DiskEncryptionSetsListAssociatedResourcesPager` has been removed +- Struct `DiskEncryptionSetsListAssociatedResourcesResponse` has been removed +- Struct `DiskEncryptionSetsListAssociatedResourcesResult` has been removed +- Struct `DiskEncryptionSetsListByResourceGroupOptions` has been removed +- Struct `DiskEncryptionSetsListByResourceGroupPager` has been removed +- Struct `DiskEncryptionSetsListByResourceGroupResponse` has been removed +- Struct `DiskEncryptionSetsListByResourceGroupResult` has been removed +- Struct `DiskEncryptionSetsListOptions` has been removed +- Struct `DiskEncryptionSetsListPager` has been removed +- Struct `DiskEncryptionSetsListResponse` has been removed +- Struct `DiskEncryptionSetsListResult` has been removed +- Struct `DiskEncryptionSetsUpdatePoller` has been removed +- Struct `DiskEncryptionSetsUpdatePollerResponse` has been removed +- Struct `DiskEncryptionSetsUpdateResponse` has been removed +- Struct `DiskEncryptionSetsUpdateResult` has been removed +- Struct `DiskRestorePointBeginGrantAccessOptions` has been removed +- Struct `DiskRestorePointBeginRevokeAccessOptions` has been removed +- Struct `DiskRestorePointGetOptions` has been removed +- Struct `DiskRestorePointGetResponse` has been removed +- Struct `DiskRestorePointGetResult` has been removed +- Struct `DiskRestorePointGrantAccessPoller` has been removed +- Struct `DiskRestorePointGrantAccessPollerResponse` has been removed +- Struct `DiskRestorePointGrantAccessResponse` has been removed +- Struct `DiskRestorePointGrantAccessResult` has been removed +- Struct `DiskRestorePointListByRestorePointOptions` has been removed +- Struct `DiskRestorePointListByRestorePointPager` has been removed +- Struct `DiskRestorePointListByRestorePointResponse` has been removed +- Struct `DiskRestorePointListByRestorePointResult` has been removed +- Struct `DiskRestorePointRevokeAccessPoller` has been removed +- Struct `DiskRestorePointRevokeAccessPollerResponse` has been removed +- Struct `DiskRestorePointRevokeAccessResponse` has been removed +- Struct `DisksBeginCreateOrUpdateOptions` has been removed +- Struct `DisksBeginDeleteOptions` has been removed +- Struct `DisksBeginGrantAccessOptions` has been removed +- Struct `DisksBeginRevokeAccessOptions` has been removed +- Struct `DisksBeginUpdateOptions` has been removed +- Struct `DisksCreateOrUpdatePoller` has been removed +- Struct `DisksCreateOrUpdatePollerResponse` has been removed +- Struct `DisksCreateOrUpdateResponse` has been removed +- Struct `DisksCreateOrUpdateResult` has been removed +- Struct `DisksDeletePoller` has been removed +- Struct `DisksDeletePollerResponse` has been removed +- Struct `DisksDeleteResponse` has been removed +- Struct `DisksGetOptions` has been removed +- Struct `DisksGetResponse` has been removed +- Struct `DisksGetResult` has been removed +- Struct `DisksGrantAccessPoller` has been removed +- Struct `DisksGrantAccessPollerResponse` has been removed +- Struct `DisksGrantAccessResponse` has been removed +- Struct `DisksGrantAccessResult` has been removed +- Struct `DisksListByResourceGroupOptions` has been removed +- Struct `DisksListByResourceGroupPager` has been removed +- Struct `DisksListByResourceGroupResponse` has been removed +- Struct `DisksListByResourceGroupResult` has been removed +- Struct `DisksListOptions` has been removed +- Struct `DisksListPager` has been removed +- Struct `DisksListResponse` has been removed +- Struct `DisksListResult` has been removed +- Struct `DisksRevokeAccessPoller` has been removed +- Struct `DisksRevokeAccessPollerResponse` has been removed +- Struct `DisksRevokeAccessResponse` has been removed +- Struct `DisksUpdatePoller` has been removed +- Struct `DisksUpdatePollerResponse` has been removed +- Struct `DisksUpdateResponse` has been removed +- Struct `DisksUpdateResult` has been removed +- Struct `GalleriesBeginCreateOrUpdateOptions` has been removed +- Struct `GalleriesBeginDeleteOptions` has been removed +- Struct `GalleriesBeginUpdateOptions` has been removed +- Struct `GalleriesCreateOrUpdatePoller` has been removed +- Struct `GalleriesCreateOrUpdatePollerResponse` has been removed +- Struct `GalleriesCreateOrUpdateResponse` has been removed +- Struct `GalleriesCreateOrUpdateResult` has been removed +- Struct `GalleriesDeletePoller` has been removed +- Struct `GalleriesDeletePollerResponse` has been removed +- Struct `GalleriesDeleteResponse` has been removed +- Struct `GalleriesGetOptions` has been removed +- Struct `GalleriesGetResponse` has been removed +- Struct `GalleriesGetResult` has been removed +- Struct `GalleriesListByResourceGroupOptions` has been removed +- Struct `GalleriesListByResourceGroupPager` has been removed +- Struct `GalleriesListByResourceGroupResponse` has been removed +- Struct `GalleriesListByResourceGroupResult` has been removed +- Struct `GalleriesListOptions` has been removed +- Struct `GalleriesListPager` has been removed +- Struct `GalleriesListResponse` has been removed +- Struct `GalleriesListResult` has been removed +- Struct `GalleriesUpdatePoller` has been removed +- Struct `GalleriesUpdatePollerResponse` has been removed +- Struct `GalleriesUpdateResponse` has been removed +- Struct `GalleriesUpdateResult` has been removed +- Struct `GalleryApplicationVersionsBeginCreateOrUpdateOptions` has been removed +- Struct `GalleryApplicationVersionsBeginDeleteOptions` has been removed +- Struct `GalleryApplicationVersionsBeginUpdateOptions` has been removed +- Struct `GalleryApplicationVersionsCreateOrUpdatePoller` has been removed +- Struct `GalleryApplicationVersionsCreateOrUpdatePollerResponse` has been removed +- Struct `GalleryApplicationVersionsCreateOrUpdateResponse` has been removed +- Struct `GalleryApplicationVersionsCreateOrUpdateResult` has been removed +- Struct `GalleryApplicationVersionsDeletePoller` has been removed +- Struct `GalleryApplicationVersionsDeletePollerResponse` has been removed +- Struct `GalleryApplicationVersionsDeleteResponse` has been removed +- Struct `GalleryApplicationVersionsGetOptions` has been removed +- Struct `GalleryApplicationVersionsGetResponse` has been removed +- Struct `GalleryApplicationVersionsGetResult` has been removed +- Struct `GalleryApplicationVersionsListByGalleryApplicationOptions` has been removed +- Struct `GalleryApplicationVersionsListByGalleryApplicationPager` has been removed +- Struct `GalleryApplicationVersionsListByGalleryApplicationResponse` has been removed +- Struct `GalleryApplicationVersionsListByGalleryApplicationResult` has been removed +- Struct `GalleryApplicationVersionsUpdatePoller` has been removed +- Struct `GalleryApplicationVersionsUpdatePollerResponse` has been removed +- Struct `GalleryApplicationVersionsUpdateResponse` has been removed +- Struct `GalleryApplicationVersionsUpdateResult` has been removed +- Struct `GalleryApplicationsBeginCreateOrUpdateOptions` has been removed +- Struct `GalleryApplicationsBeginDeleteOptions` has been removed +- Struct `GalleryApplicationsBeginUpdateOptions` has been removed +- Struct `GalleryApplicationsCreateOrUpdatePoller` has been removed +- Struct `GalleryApplicationsCreateOrUpdatePollerResponse` has been removed +- Struct `GalleryApplicationsCreateOrUpdateResponse` has been removed +- Struct `GalleryApplicationsCreateOrUpdateResult` has been removed +- Struct `GalleryApplicationsDeletePoller` has been removed +- Struct `GalleryApplicationsDeletePollerResponse` has been removed +- Struct `GalleryApplicationsDeleteResponse` has been removed +- Struct `GalleryApplicationsGetOptions` has been removed +- Struct `GalleryApplicationsGetResponse` has been removed +- Struct `GalleryApplicationsGetResult` has been removed +- Struct `GalleryApplicationsListByGalleryOptions` has been removed +- Struct `GalleryApplicationsListByGalleryPager` has been removed +- Struct `GalleryApplicationsListByGalleryResponse` has been removed +- Struct `GalleryApplicationsListByGalleryResult` has been removed +- Struct `GalleryApplicationsUpdatePoller` has been removed +- Struct `GalleryApplicationsUpdatePollerResponse` has been removed +- Struct `GalleryApplicationsUpdateResponse` has been removed +- Struct `GalleryApplicationsUpdateResult` has been removed +- Struct `GalleryImageVersionsBeginCreateOrUpdateOptions` has been removed +- Struct `GalleryImageVersionsBeginDeleteOptions` has been removed +- Struct `GalleryImageVersionsBeginUpdateOptions` has been removed +- Struct `GalleryImageVersionsCreateOrUpdatePoller` has been removed +- Struct `GalleryImageVersionsCreateOrUpdatePollerResponse` has been removed +- Struct `GalleryImageVersionsCreateOrUpdateResponse` has been removed +- Struct `GalleryImageVersionsCreateOrUpdateResult` has been removed +- Struct `GalleryImageVersionsDeletePoller` has been removed +- Struct `GalleryImageVersionsDeletePollerResponse` has been removed +- Struct `GalleryImageVersionsDeleteResponse` has been removed +- Struct `GalleryImageVersionsGetOptions` has been removed +- Struct `GalleryImageVersionsGetResponse` has been removed +- Struct `GalleryImageVersionsGetResult` has been removed +- Struct `GalleryImageVersionsListByGalleryImageOptions` has been removed +- Struct `GalleryImageVersionsListByGalleryImagePager` has been removed +- Struct `GalleryImageVersionsListByGalleryImageResponse` has been removed +- Struct `GalleryImageVersionsListByGalleryImageResult` has been removed +- Struct `GalleryImageVersionsUpdatePoller` has been removed +- Struct `GalleryImageVersionsUpdatePollerResponse` has been removed +- Struct `GalleryImageVersionsUpdateResponse` has been removed +- Struct `GalleryImageVersionsUpdateResult` has been removed +- Struct `GalleryImagesBeginCreateOrUpdateOptions` has been removed +- Struct `GalleryImagesBeginDeleteOptions` has been removed +- Struct `GalleryImagesBeginUpdateOptions` has been removed +- Struct `GalleryImagesCreateOrUpdatePoller` has been removed +- Struct `GalleryImagesCreateOrUpdatePollerResponse` has been removed +- Struct `GalleryImagesCreateOrUpdateResponse` has been removed +- Struct `GalleryImagesCreateOrUpdateResult` has been removed +- Struct `GalleryImagesDeletePoller` has been removed +- Struct `GalleryImagesDeletePollerResponse` has been removed +- Struct `GalleryImagesDeleteResponse` has been removed +- Struct `GalleryImagesGetOptions` has been removed +- Struct `GalleryImagesGetResponse` has been removed +- Struct `GalleryImagesGetResult` has been removed +- Struct `GalleryImagesListByGalleryOptions` has been removed +- Struct `GalleryImagesListByGalleryPager` has been removed +- Struct `GalleryImagesListByGalleryResponse` has been removed +- Struct `GalleryImagesListByGalleryResult` has been removed +- Struct `GalleryImagesUpdatePoller` has been removed +- Struct `GalleryImagesUpdatePollerResponse` has been removed +- Struct `GalleryImagesUpdateResponse` has been removed +- Struct `GalleryImagesUpdateResult` has been removed +- Struct `GallerySharingProfileBeginUpdateOptions` has been removed +- Struct `GallerySharingProfileUpdatePoller` has been removed +- Struct `GallerySharingProfileUpdatePollerResponse` has been removed +- Struct `GallerySharingProfileUpdateResponse` has been removed +- Struct `GallerySharingProfileUpdateResult` has been removed +- Struct `ImagesBeginCreateOrUpdateOptions` has been removed +- Struct `ImagesBeginDeleteOptions` has been removed +- Struct `ImagesBeginUpdateOptions` has been removed +- Struct `ImagesCreateOrUpdatePoller` has been removed +- Struct `ImagesCreateOrUpdatePollerResponse` has been removed +- Struct `ImagesCreateOrUpdateResponse` has been removed +- Struct `ImagesCreateOrUpdateResult` has been removed +- Struct `ImagesDeletePoller` has been removed +- Struct `ImagesDeletePollerResponse` has been removed +- Struct `ImagesDeleteResponse` has been removed +- Struct `ImagesGetOptions` has been removed +- Struct `ImagesGetResponse` has been removed +- Struct `ImagesGetResult` has been removed +- Struct `ImagesListByResourceGroupOptions` has been removed +- Struct `ImagesListByResourceGroupPager` has been removed +- Struct `ImagesListByResourceGroupResponse` has been removed +- Struct `ImagesListByResourceGroupResult` has been removed +- Struct `ImagesListOptions` has been removed +- Struct `ImagesListPager` has been removed +- Struct `ImagesListResponse` has been removed +- Struct `ImagesListResult` has been removed +- Struct `ImagesUpdatePoller` has been removed +- Struct `ImagesUpdatePollerResponse` has been removed +- Struct `ImagesUpdateResponse` has been removed +- Struct `ImagesUpdateResult` has been removed +- Struct `LogAnalyticsBeginExportRequestRateByIntervalOptions` has been removed +- Struct `LogAnalyticsBeginExportThrottledRequestsOptions` has been removed +- Struct `LogAnalyticsExportRequestRateByIntervalPoller` has been removed +- Struct `LogAnalyticsExportRequestRateByIntervalPollerResponse` has been removed +- Struct `LogAnalyticsExportRequestRateByIntervalResponse` has been removed +- Struct `LogAnalyticsExportRequestRateByIntervalResult` has been removed +- Struct `LogAnalyticsExportThrottledRequestsPoller` has been removed +- Struct `LogAnalyticsExportThrottledRequestsPollerResponse` has been removed +- Struct `LogAnalyticsExportThrottledRequestsResponse` has been removed +- Struct `LogAnalyticsExportThrottledRequestsResult` has been removed +- Struct `OperationsListOptions` has been removed +- Struct `OperationsListResponse` has been removed +- Struct `OperationsListResult` has been removed +- Struct `ProximityPlacementGroupsCreateOrUpdateOptions` has been removed +- Struct `ProximityPlacementGroupsCreateOrUpdateResponse` has been removed +- Struct `ProximityPlacementGroupsCreateOrUpdateResult` has been removed +- Struct `ProximityPlacementGroupsDeleteOptions` has been removed +- Struct `ProximityPlacementGroupsDeleteResponse` has been removed +- Struct `ProximityPlacementGroupsGetOptions` has been removed +- Struct `ProximityPlacementGroupsGetResponse` has been removed +- Struct `ProximityPlacementGroupsGetResult` has been removed +- Struct `ProximityPlacementGroupsListByResourceGroupOptions` has been removed +- Struct `ProximityPlacementGroupsListByResourceGroupPager` has been removed +- Struct `ProximityPlacementGroupsListByResourceGroupResponse` has been removed +- Struct `ProximityPlacementGroupsListByResourceGroupResult` has been removed +- Struct `ProximityPlacementGroupsListBySubscriptionOptions` has been removed +- Struct `ProximityPlacementGroupsListBySubscriptionPager` has been removed +- Struct `ProximityPlacementGroupsListBySubscriptionResponse` has been removed +- Struct `ProximityPlacementGroupsListBySubscriptionResult` has been removed +- Struct `ProximityPlacementGroupsUpdateOptions` has been removed +- Struct `ProximityPlacementGroupsUpdateResponse` has been removed +- Struct `ProximityPlacementGroupsUpdateResult` has been removed +- Struct `ResourceSKUsListOptions` has been removed +- Struct `ResourceSKUsListPager` has been removed +- Struct `ResourceSKUsListResponse` has been removed +- Struct `ResourceSKUsListResult` has been removed +- Struct `RestorePointCollectionsBeginDeleteOptions` has been removed +- Struct `RestorePointCollectionsCreateOrUpdateOptions` has been removed +- Struct `RestorePointCollectionsCreateOrUpdateResponse` has been removed +- Struct `RestorePointCollectionsCreateOrUpdateResult` has been removed +- Struct `RestorePointCollectionsDeletePoller` has been removed +- Struct `RestorePointCollectionsDeletePollerResponse` has been removed +- Struct `RestorePointCollectionsDeleteResponse` has been removed +- Struct `RestorePointCollectionsGetOptions` has been removed +- Struct `RestorePointCollectionsGetResponse` has been removed +- Struct `RestorePointCollectionsGetResult` has been removed +- Struct `RestorePointCollectionsListAllOptions` has been removed +- Struct `RestorePointCollectionsListAllPager` has been removed +- Struct `RestorePointCollectionsListAllResponse` has been removed +- Struct `RestorePointCollectionsListAllResult` has been removed +- Struct `RestorePointCollectionsListOptions` has been removed +- Struct `RestorePointCollectionsListPager` has been removed +- Struct `RestorePointCollectionsListResponse` has been removed +- Struct `RestorePointCollectionsListResult` has been removed +- Struct `RestorePointCollectionsUpdateOptions` has been removed +- Struct `RestorePointCollectionsUpdateResponse` has been removed +- Struct `RestorePointCollectionsUpdateResult` has been removed +- Struct `RestorePointsBeginCreateOptions` has been removed +- Struct `RestorePointsBeginDeleteOptions` has been removed +- Struct `RestorePointsCreatePoller` has been removed +- Struct `RestorePointsCreatePollerResponse` has been removed +- Struct `RestorePointsCreateResponse` has been removed +- Struct `RestorePointsCreateResult` has been removed +- Struct `RestorePointsDeletePoller` has been removed +- Struct `RestorePointsDeletePollerResponse` has been removed +- Struct `RestorePointsDeleteResponse` has been removed +- Struct `RestorePointsGetOptions` has been removed +- Struct `RestorePointsGetResponse` has been removed +- Struct `RestorePointsGetResult` has been removed +- Struct `SSHPublicKeysCreateOptions` has been removed +- Struct `SSHPublicKeysCreateResponse` has been removed +- Struct `SSHPublicKeysCreateResult` has been removed +- Struct `SSHPublicKeysDeleteOptions` has been removed +- Struct `SSHPublicKeysDeleteResponse` has been removed +- Struct `SSHPublicKeysGenerateKeyPairOptions` has been removed +- Struct `SSHPublicKeysGenerateKeyPairResponse` has been removed +- Struct `SSHPublicKeysGenerateKeyPairResult` has been removed +- Struct `SSHPublicKeysGetOptions` has been removed +- Struct `SSHPublicKeysGetResponse` has been removed +- Struct `SSHPublicKeysGetResult` has been removed +- Struct `SSHPublicKeysListByResourceGroupOptions` has been removed +- Struct `SSHPublicKeysListByResourceGroupPager` has been removed +- Struct `SSHPublicKeysListByResourceGroupResponse` has been removed +- Struct `SSHPublicKeysListByResourceGroupResult` has been removed +- Struct `SSHPublicKeysListBySubscriptionOptions` has been removed +- Struct `SSHPublicKeysListBySubscriptionPager` has been removed +- Struct `SSHPublicKeysListBySubscriptionResponse` has been removed +- Struct `SSHPublicKeysListBySubscriptionResult` has been removed +- Struct `SSHPublicKeysUpdateOptions` has been removed +- Struct `SSHPublicKeysUpdateResponse` has been removed +- Struct `SSHPublicKeysUpdateResult` has been removed +- Struct `SharedGalleriesGetOptions` has been removed +- Struct `SharedGalleriesGetResponse` has been removed +- Struct `SharedGalleriesGetResult` has been removed +- Struct `SharedGalleriesListOptions` has been removed +- Struct `SharedGalleriesListPager` has been removed +- Struct `SharedGalleriesListResponse` has been removed +- Struct `SharedGalleriesListResult` has been removed +- Struct `SharedGalleryImageVersionsGetOptions` has been removed +- Struct `SharedGalleryImageVersionsGetResponse` has been removed +- Struct `SharedGalleryImageVersionsGetResult` has been removed +- Struct `SharedGalleryImageVersionsListOptions` has been removed +- Struct `SharedGalleryImageVersionsListPager` has been removed +- Struct `SharedGalleryImageVersionsListResponse` has been removed +- Struct `SharedGalleryImageVersionsListResult` has been removed +- Struct `SharedGalleryImagesGetOptions` has been removed +- Struct `SharedGalleryImagesGetResponse` has been removed +- Struct `SharedGalleryImagesGetResult` has been removed +- Struct `SharedGalleryImagesListOptions` has been removed +- Struct `SharedGalleryImagesListPager` has been removed +- Struct `SharedGalleryImagesListResponse` has been removed +- Struct `SharedGalleryImagesListResult` has been removed +- Struct `SnapshotsBeginCreateOrUpdateOptions` has been removed +- Struct `SnapshotsBeginDeleteOptions` has been removed +- Struct `SnapshotsBeginGrantAccessOptions` has been removed +- Struct `SnapshotsBeginRevokeAccessOptions` has been removed +- Struct `SnapshotsBeginUpdateOptions` has been removed +- Struct `SnapshotsCreateOrUpdatePoller` has been removed +- Struct `SnapshotsCreateOrUpdatePollerResponse` has been removed +- Struct `SnapshotsCreateOrUpdateResponse` has been removed +- Struct `SnapshotsCreateOrUpdateResult` has been removed +- Struct `SnapshotsDeletePoller` has been removed +- Struct `SnapshotsDeletePollerResponse` has been removed +- Struct `SnapshotsDeleteResponse` has been removed +- Struct `SnapshotsGetOptions` has been removed +- Struct `SnapshotsGetResponse` has been removed +- Struct `SnapshotsGetResult` has been removed +- Struct `SnapshotsGrantAccessPoller` has been removed +- Struct `SnapshotsGrantAccessPollerResponse` has been removed +- Struct `SnapshotsGrantAccessResponse` has been removed +- Struct `SnapshotsGrantAccessResult` has been removed +- Struct `SnapshotsListByResourceGroupOptions` has been removed +- Struct `SnapshotsListByResourceGroupPager` has been removed +- Struct `SnapshotsListByResourceGroupResponse` has been removed +- Struct `SnapshotsListByResourceGroupResult` has been removed +- Struct `SnapshotsListOptions` has been removed +- Struct `SnapshotsListPager` has been removed +- Struct `SnapshotsListResponse` has been removed +- Struct `SnapshotsListResult` has been removed +- Struct `SnapshotsRevokeAccessPoller` has been removed +- Struct `SnapshotsRevokeAccessPollerResponse` has been removed +- Struct `SnapshotsRevokeAccessResponse` has been removed +- Struct `SnapshotsUpdatePoller` has been removed +- Struct `SnapshotsUpdatePollerResponse` has been removed +- Struct `SnapshotsUpdateResponse` has been removed +- Struct `SnapshotsUpdateResult` has been removed +- Struct `UsageListOptions` has been removed +- Struct `UsageListPager` has been removed +- Struct `UsageListResponse` has been removed +- Struct `UsageListResult` has been removed +- Struct `VirtualMachineExtensionImagesGetOptions` has been removed +- Struct `VirtualMachineExtensionImagesGetResponse` has been removed +- Struct `VirtualMachineExtensionImagesGetResult` has been removed +- Struct `VirtualMachineExtensionImagesListTypesOptions` has been removed +- Struct `VirtualMachineExtensionImagesListTypesResponse` has been removed +- Struct `VirtualMachineExtensionImagesListTypesResult` has been removed +- Struct `VirtualMachineExtensionImagesListVersionsOptions` has been removed +- Struct `VirtualMachineExtensionImagesListVersionsResponse` has been removed +- Struct `VirtualMachineExtensionImagesListVersionsResult` has been removed +- Struct `VirtualMachineExtensionsBeginCreateOrUpdateOptions` has been removed +- Struct `VirtualMachineExtensionsBeginDeleteOptions` has been removed +- Struct `VirtualMachineExtensionsBeginUpdateOptions` has been removed +- Struct `VirtualMachineExtensionsCreateOrUpdatePoller` has been removed +- Struct `VirtualMachineExtensionsCreateOrUpdatePollerResponse` has been removed +- Struct `VirtualMachineExtensionsCreateOrUpdateResponse` has been removed +- Struct `VirtualMachineExtensionsCreateOrUpdateResult` has been removed +- Struct `VirtualMachineExtensionsDeletePoller` has been removed +- Struct `VirtualMachineExtensionsDeletePollerResponse` has been removed +- Struct `VirtualMachineExtensionsDeleteResponse` has been removed +- Struct `VirtualMachineExtensionsGetOptions` has been removed +- Struct `VirtualMachineExtensionsGetResponse` has been removed +- Struct `VirtualMachineExtensionsGetResult` has been removed +- Struct `VirtualMachineExtensionsListOptions` has been removed +- Struct `VirtualMachineExtensionsListResponse` has been removed +- Struct `VirtualMachineExtensionsListResultEnvelope` has been removed +- Struct `VirtualMachineExtensionsUpdatePoller` has been removed +- Struct `VirtualMachineExtensionsUpdatePollerResponse` has been removed +- Struct `VirtualMachineExtensionsUpdateResponse` has been removed +- Struct `VirtualMachineExtensionsUpdateResult` has been removed +- Struct `VirtualMachineImagesEdgeZoneGetOptions` has been removed +- Struct `VirtualMachineImagesEdgeZoneGetResponse` has been removed +- Struct `VirtualMachineImagesEdgeZoneGetResult` has been removed +- Struct `VirtualMachineImagesEdgeZoneListOffersOptions` has been removed +- Struct `VirtualMachineImagesEdgeZoneListOffersResponse` has been removed +- Struct `VirtualMachineImagesEdgeZoneListOffersResult` has been removed +- Struct `VirtualMachineImagesEdgeZoneListOptions` has been removed +- Struct `VirtualMachineImagesEdgeZoneListPublishersOptions` has been removed +- Struct `VirtualMachineImagesEdgeZoneListPublishersResponse` has been removed +- Struct `VirtualMachineImagesEdgeZoneListPublishersResult` has been removed +- Struct `VirtualMachineImagesEdgeZoneListResponse` has been removed +- Struct `VirtualMachineImagesEdgeZoneListResult` has been removed +- Struct `VirtualMachineImagesEdgeZoneListSKUsOptions` has been removed +- Struct `VirtualMachineImagesEdgeZoneListSKUsResponse` has been removed +- Struct `VirtualMachineImagesEdgeZoneListSKUsResult` has been removed +- Struct `VirtualMachineImagesGetOptions` has been removed +- Struct `VirtualMachineImagesGetResponse` has been removed +- Struct `VirtualMachineImagesGetResult` has been removed +- Struct `VirtualMachineImagesListOffersOptions` has been removed +- Struct `VirtualMachineImagesListOffersResponse` has been removed +- Struct `VirtualMachineImagesListOffersResult` has been removed +- Struct `VirtualMachineImagesListOptions` has been removed +- Struct `VirtualMachineImagesListPublishersOptions` has been removed +- Struct `VirtualMachineImagesListPublishersResponse` has been removed +- Struct `VirtualMachineImagesListPublishersResult` has been removed +- Struct `VirtualMachineImagesListResponse` has been removed +- Struct `VirtualMachineImagesListResult` has been removed +- Struct `VirtualMachineImagesListSKUsOptions` has been removed +- Struct `VirtualMachineImagesListSKUsResponse` has been removed +- Struct `VirtualMachineImagesListSKUsResult` has been removed +- Struct `VirtualMachineRunCommandsBeginCreateOrUpdateOptions` has been removed +- Struct `VirtualMachineRunCommandsBeginDeleteOptions` has been removed +- Struct `VirtualMachineRunCommandsBeginUpdateOptions` has been removed +- Struct `VirtualMachineRunCommandsCreateOrUpdatePoller` has been removed +- Struct `VirtualMachineRunCommandsCreateOrUpdatePollerResponse` has been removed +- Struct `VirtualMachineRunCommandsCreateOrUpdateResponse` has been removed +- Struct `VirtualMachineRunCommandsCreateOrUpdateResult` has been removed +- Struct `VirtualMachineRunCommandsDeletePoller` has been removed +- Struct `VirtualMachineRunCommandsDeletePollerResponse` has been removed +- Struct `VirtualMachineRunCommandsDeleteResponse` has been removed +- Struct `VirtualMachineRunCommandsGetByVirtualMachineOptions` has been removed +- Struct `VirtualMachineRunCommandsGetByVirtualMachineResponse` has been removed +- Struct `VirtualMachineRunCommandsGetByVirtualMachineResult` has been removed +- Struct `VirtualMachineRunCommandsGetOptions` has been removed +- Struct `VirtualMachineRunCommandsGetResponse` has been removed +- Struct `VirtualMachineRunCommandsGetResult` has been removed +- Struct `VirtualMachineRunCommandsListByVirtualMachineOptions` has been removed +- Struct `VirtualMachineRunCommandsListByVirtualMachinePager` has been removed +- Struct `VirtualMachineRunCommandsListByVirtualMachineResponse` has been removed +- Struct `VirtualMachineRunCommandsListByVirtualMachineResult` has been removed +- Struct `VirtualMachineRunCommandsListOptions` has been removed +- Struct `VirtualMachineRunCommandsListPager` has been removed +- Struct `VirtualMachineRunCommandsListResponse` has been removed +- Struct `VirtualMachineRunCommandsListResultEnvelope` has been removed +- Struct `VirtualMachineRunCommandsUpdatePoller` has been removed +- Struct `VirtualMachineRunCommandsUpdatePollerResponse` has been removed +- Struct `VirtualMachineRunCommandsUpdateResponse` has been removed +- Struct `VirtualMachineRunCommandsUpdateResult` has been removed +- Struct `VirtualMachineScaleSetExtensionsBeginCreateOrUpdateOptions` has been removed +- Struct `VirtualMachineScaleSetExtensionsBeginDeleteOptions` has been removed +- Struct `VirtualMachineScaleSetExtensionsBeginUpdateOptions` has been removed +- Struct `VirtualMachineScaleSetExtensionsCreateOrUpdatePoller` has been removed +- Struct `VirtualMachineScaleSetExtensionsCreateOrUpdatePollerResponse` has been removed +- Struct `VirtualMachineScaleSetExtensionsCreateOrUpdateResponse` has been removed +- Struct `VirtualMachineScaleSetExtensionsCreateOrUpdateResult` has been removed +- Struct `VirtualMachineScaleSetExtensionsDeletePoller` has been removed +- Struct `VirtualMachineScaleSetExtensionsDeletePollerResponse` has been removed +- Struct `VirtualMachineScaleSetExtensionsDeleteResponse` has been removed +- Struct `VirtualMachineScaleSetExtensionsGetOptions` has been removed +- Struct `VirtualMachineScaleSetExtensionsGetResponse` has been removed +- Struct `VirtualMachineScaleSetExtensionsGetResult` has been removed +- Struct `VirtualMachineScaleSetExtensionsListOptions` has been removed +- Struct `VirtualMachineScaleSetExtensionsListPager` has been removed +- Struct `VirtualMachineScaleSetExtensionsListResponse` has been removed +- Struct `VirtualMachineScaleSetExtensionsListResult` has been removed +- Struct `VirtualMachineScaleSetExtensionsUpdatePoller` has been removed +- Struct `VirtualMachineScaleSetExtensionsUpdatePollerResponse` has been removed +- Struct `VirtualMachineScaleSetExtensionsUpdateResponse` has been removed +- Struct `VirtualMachineScaleSetExtensionsUpdateResult` has been removed +- Struct `VirtualMachineScaleSetRollingUpgradesBeginCancelOptions` has been removed +- Struct `VirtualMachineScaleSetRollingUpgradesBeginStartExtensionUpgradeOptions` has been removed +- Struct `VirtualMachineScaleSetRollingUpgradesBeginStartOSUpgradeOptions` has been removed +- Struct `VirtualMachineScaleSetRollingUpgradesCancelPoller` has been removed +- Struct `VirtualMachineScaleSetRollingUpgradesCancelPollerResponse` has been removed +- Struct `VirtualMachineScaleSetRollingUpgradesCancelResponse` has been removed +- Struct `VirtualMachineScaleSetRollingUpgradesGetLatestOptions` has been removed +- Struct `VirtualMachineScaleSetRollingUpgradesGetLatestResponse` has been removed +- Struct `VirtualMachineScaleSetRollingUpgradesGetLatestResult` has been removed +- Struct `VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradePoller` has been removed +- Struct `VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradePollerResponse` has been removed +- Struct `VirtualMachineScaleSetRollingUpgradesStartExtensionUpgradeResponse` has been removed +- Struct `VirtualMachineScaleSetRollingUpgradesStartOSUpgradePoller` has been removed +- Struct `VirtualMachineScaleSetRollingUpgradesStartOSUpgradePollerResponse` has been removed +- Struct `VirtualMachineScaleSetRollingUpgradesStartOSUpgradeResponse` has been removed +- Struct `VirtualMachineScaleSetVMExtensionsBeginCreateOrUpdateOptions` has been removed +- Struct `VirtualMachineScaleSetVMExtensionsBeginDeleteOptions` has been removed +- Struct `VirtualMachineScaleSetVMExtensionsBeginUpdateOptions` has been removed +- Struct `VirtualMachineScaleSetVMExtensionsCreateOrUpdatePoller` has been removed +- Struct `VirtualMachineScaleSetVMExtensionsCreateOrUpdatePollerResponse` has been removed +- Struct `VirtualMachineScaleSetVMExtensionsCreateOrUpdateResponse` has been removed +- Struct `VirtualMachineScaleSetVMExtensionsCreateOrUpdateResult` has been removed +- Struct `VirtualMachineScaleSetVMExtensionsDeletePoller` has been removed +- Struct `VirtualMachineScaleSetVMExtensionsDeletePollerResponse` has been removed +- Struct `VirtualMachineScaleSetVMExtensionsDeleteResponse` has been removed +- Struct `VirtualMachineScaleSetVMExtensionsGetOptions` has been removed +- Struct `VirtualMachineScaleSetVMExtensionsGetResponse` has been removed +- Struct `VirtualMachineScaleSetVMExtensionsGetResult` has been removed +- Struct `VirtualMachineScaleSetVMExtensionsListOptions` has been removed +- Struct `VirtualMachineScaleSetVMExtensionsListResponse` has been removed +- Struct `VirtualMachineScaleSetVMExtensionsListResultEnvelope` has been removed +- Struct `VirtualMachineScaleSetVMExtensionsUpdatePoller` has been removed +- Struct `VirtualMachineScaleSetVMExtensionsUpdatePollerResponse` has been removed +- Struct `VirtualMachineScaleSetVMExtensionsUpdateResponse` has been removed +- Struct `VirtualMachineScaleSetVMExtensionsUpdateResult` has been removed +- Struct `VirtualMachineScaleSetVMRunCommandsBeginCreateOrUpdateOptions` has been removed +- Struct `VirtualMachineScaleSetVMRunCommandsBeginDeleteOptions` has been removed +- Struct `VirtualMachineScaleSetVMRunCommandsBeginUpdateOptions` has been removed +- Struct `VirtualMachineScaleSetVMRunCommandsCreateOrUpdatePoller` has been removed +- Struct `VirtualMachineScaleSetVMRunCommandsCreateOrUpdatePollerResponse` has been removed +- Struct `VirtualMachineScaleSetVMRunCommandsCreateOrUpdateResponse` has been removed +- Struct `VirtualMachineScaleSetVMRunCommandsCreateOrUpdateResult` has been removed +- Struct `VirtualMachineScaleSetVMRunCommandsDeletePoller` has been removed +- Struct `VirtualMachineScaleSetVMRunCommandsDeletePollerResponse` has been removed +- Struct `VirtualMachineScaleSetVMRunCommandsDeleteResponse` has been removed +- Struct `VirtualMachineScaleSetVMRunCommandsGetOptions` has been removed +- Struct `VirtualMachineScaleSetVMRunCommandsGetResponse` has been removed +- Struct `VirtualMachineScaleSetVMRunCommandsGetResult` has been removed +- Struct `VirtualMachineScaleSetVMRunCommandsListOptions` has been removed +- Struct `VirtualMachineScaleSetVMRunCommandsListPager` has been removed +- Struct `VirtualMachineScaleSetVMRunCommandsListResponse` has been removed +- Struct `VirtualMachineScaleSetVMRunCommandsListResult` has been removed +- Struct `VirtualMachineScaleSetVMRunCommandsUpdatePoller` has been removed +- Struct `VirtualMachineScaleSetVMRunCommandsUpdatePollerResponse` has been removed +- Struct `VirtualMachineScaleSetVMRunCommandsUpdateResponse` has been removed +- Struct `VirtualMachineScaleSetVMRunCommandsUpdateResult` has been removed +- Struct `VirtualMachineScaleSetVMsBeginDeallocateOptions` has been removed +- Struct `VirtualMachineScaleSetVMsBeginDeleteOptions` has been removed +- Struct `VirtualMachineScaleSetVMsBeginPerformMaintenanceOptions` has been removed +- Struct `VirtualMachineScaleSetVMsBeginPowerOffOptions` has been removed +- Struct `VirtualMachineScaleSetVMsBeginRedeployOptions` has been removed +- Struct `VirtualMachineScaleSetVMsBeginReimageAllOptions` has been removed +- Struct `VirtualMachineScaleSetVMsBeginReimageOptions` has been removed +- Struct `VirtualMachineScaleSetVMsBeginRestartOptions` has been removed +- Struct `VirtualMachineScaleSetVMsBeginRunCommandOptions` has been removed +- Struct `VirtualMachineScaleSetVMsBeginStartOptions` has been removed +- Struct `VirtualMachineScaleSetVMsBeginUpdateOptions` has been removed +- Struct `VirtualMachineScaleSetVMsDeallocatePoller` has been removed +- Struct `VirtualMachineScaleSetVMsDeallocatePollerResponse` has been removed +- Struct `VirtualMachineScaleSetVMsDeallocateResponse` has been removed +- Struct `VirtualMachineScaleSetVMsDeletePoller` has been removed +- Struct `VirtualMachineScaleSetVMsDeletePollerResponse` has been removed +- Struct `VirtualMachineScaleSetVMsDeleteResponse` has been removed +- Struct `VirtualMachineScaleSetVMsGetInstanceViewOptions` has been removed +- Struct `VirtualMachineScaleSetVMsGetInstanceViewResponse` has been removed +- Struct `VirtualMachineScaleSetVMsGetInstanceViewResult` has been removed +- Struct `VirtualMachineScaleSetVMsGetOptions` has been removed +- Struct `VirtualMachineScaleSetVMsGetResponse` has been removed +- Struct `VirtualMachineScaleSetVMsGetResult` has been removed +- Struct `VirtualMachineScaleSetVMsListOptions` has been removed +- Struct `VirtualMachineScaleSetVMsListPager` has been removed +- Struct `VirtualMachineScaleSetVMsListResponse` has been removed +- Struct `VirtualMachineScaleSetVMsListResult` has been removed +- Struct `VirtualMachineScaleSetVMsPerformMaintenancePoller` has been removed +- Struct `VirtualMachineScaleSetVMsPerformMaintenancePollerResponse` has been removed +- Struct `VirtualMachineScaleSetVMsPerformMaintenanceResponse` has been removed +- Struct `VirtualMachineScaleSetVMsPowerOffPoller` has been removed +- Struct `VirtualMachineScaleSetVMsPowerOffPollerResponse` has been removed +- Struct `VirtualMachineScaleSetVMsPowerOffResponse` has been removed +- Struct `VirtualMachineScaleSetVMsRedeployPoller` has been removed +- Struct `VirtualMachineScaleSetVMsRedeployPollerResponse` has been removed +- Struct `VirtualMachineScaleSetVMsRedeployResponse` has been removed +- Struct `VirtualMachineScaleSetVMsReimageAllPoller` has been removed +- Struct `VirtualMachineScaleSetVMsReimageAllPollerResponse` has been removed +- Struct `VirtualMachineScaleSetVMsReimageAllResponse` has been removed +- Struct `VirtualMachineScaleSetVMsReimagePoller` has been removed +- Struct `VirtualMachineScaleSetVMsReimagePollerResponse` has been removed +- Struct `VirtualMachineScaleSetVMsReimageResponse` has been removed +- Struct `VirtualMachineScaleSetVMsRestartPoller` has been removed +- Struct `VirtualMachineScaleSetVMsRestartPollerResponse` has been removed +- Struct `VirtualMachineScaleSetVMsRestartResponse` has been removed +- Struct `VirtualMachineScaleSetVMsRetrieveBootDiagnosticsDataOptions` has been removed +- Struct `VirtualMachineScaleSetVMsRetrieveBootDiagnosticsDataResponse` has been removed +- Struct `VirtualMachineScaleSetVMsRetrieveBootDiagnosticsDataResult` has been removed +- Struct `VirtualMachineScaleSetVMsRunCommandPoller` has been removed +- Struct `VirtualMachineScaleSetVMsRunCommandPollerResponse` has been removed +- Struct `VirtualMachineScaleSetVMsRunCommandResponse` has been removed +- Struct `VirtualMachineScaleSetVMsRunCommandResult` has been removed +- Struct `VirtualMachineScaleSetVMsSimulateEvictionOptions` has been removed +- Struct `VirtualMachineScaleSetVMsSimulateEvictionResponse` has been removed +- Struct `VirtualMachineScaleSetVMsStartPoller` has been removed +- Struct `VirtualMachineScaleSetVMsStartPollerResponse` has been removed +- Struct `VirtualMachineScaleSetVMsStartResponse` has been removed +- Struct `VirtualMachineScaleSetVMsUpdatePoller` has been removed +- Struct `VirtualMachineScaleSetVMsUpdatePollerResponse` has been removed +- Struct `VirtualMachineScaleSetVMsUpdateResponse` has been removed +- Struct `VirtualMachineScaleSetVMsUpdateResult` has been removed +- Struct `VirtualMachineScaleSetsBeginCreateOrUpdateOptions` has been removed +- Struct `VirtualMachineScaleSetsBeginDeallocateOptions` has been removed +- Struct `VirtualMachineScaleSetsBeginDeleteInstancesOptions` has been removed +- Struct `VirtualMachineScaleSetsBeginDeleteOptions` has been removed +- Struct `VirtualMachineScaleSetsBeginPerformMaintenanceOptions` has been removed +- Struct `VirtualMachineScaleSetsBeginPowerOffOptions` has been removed +- Struct `VirtualMachineScaleSetsBeginRedeployOptions` has been removed +- Struct `VirtualMachineScaleSetsBeginReimageAllOptions` has been removed +- Struct `VirtualMachineScaleSetsBeginReimageOptions` has been removed +- Struct `VirtualMachineScaleSetsBeginRestartOptions` has been removed +- Struct `VirtualMachineScaleSetsBeginSetOrchestrationServiceStateOptions` has been removed +- Struct `VirtualMachineScaleSetsBeginStartOptions` has been removed +- Struct `VirtualMachineScaleSetsBeginUpdateInstancesOptions` has been removed +- Struct `VirtualMachineScaleSetsBeginUpdateOptions` has been removed +- Struct `VirtualMachineScaleSetsConvertToSinglePlacementGroupOptions` has been removed +- Struct `VirtualMachineScaleSetsConvertToSinglePlacementGroupResponse` has been removed +- Struct `VirtualMachineScaleSetsCreateOrUpdatePoller` has been removed +- Struct `VirtualMachineScaleSetsCreateOrUpdatePollerResponse` has been removed +- Struct `VirtualMachineScaleSetsCreateOrUpdateResponse` has been removed +- Struct `VirtualMachineScaleSetsCreateOrUpdateResult` has been removed +- Struct `VirtualMachineScaleSetsDeallocatePoller` has been removed +- Struct `VirtualMachineScaleSetsDeallocatePollerResponse` has been removed +- Struct `VirtualMachineScaleSetsDeallocateResponse` has been removed +- Struct `VirtualMachineScaleSetsDeleteInstancesPoller` has been removed +- Struct `VirtualMachineScaleSetsDeleteInstancesPollerResponse` has been removed +- Struct `VirtualMachineScaleSetsDeleteInstancesResponse` has been removed +- Struct `VirtualMachineScaleSetsDeletePoller` has been removed +- Struct `VirtualMachineScaleSetsDeletePollerResponse` has been removed +- Struct `VirtualMachineScaleSetsDeleteResponse` has been removed +- Struct `VirtualMachineScaleSetsForceRecoveryServiceFabricPlatformUpdateDomainWalkOptions` has been removed +- Struct `VirtualMachineScaleSetsForceRecoveryServiceFabricPlatformUpdateDomainWalkResponse` has been removed +- Struct `VirtualMachineScaleSetsForceRecoveryServiceFabricPlatformUpdateDomainWalkResult` has been removed +- Struct `VirtualMachineScaleSetsGetInstanceViewOptions` has been removed +- Struct `VirtualMachineScaleSetsGetInstanceViewResponse` has been removed +- Struct `VirtualMachineScaleSetsGetInstanceViewResult` has been removed +- Struct `VirtualMachineScaleSetsGetOSUpgradeHistoryOptions` has been removed +- Struct `VirtualMachineScaleSetsGetOSUpgradeHistoryPager` has been removed +- Struct `VirtualMachineScaleSetsGetOSUpgradeHistoryResponse` has been removed +- Struct `VirtualMachineScaleSetsGetOSUpgradeHistoryResult` has been removed +- Struct `VirtualMachineScaleSetsGetOptions` has been removed +- Struct `VirtualMachineScaleSetsGetResponse` has been removed +- Struct `VirtualMachineScaleSetsGetResult` has been removed +- Struct `VirtualMachineScaleSetsListAllOptions` has been removed +- Struct `VirtualMachineScaleSetsListAllPager` has been removed +- Struct `VirtualMachineScaleSetsListAllResponse` has been removed +- Struct `VirtualMachineScaleSetsListAllResult` has been removed +- Struct `VirtualMachineScaleSetsListByLocationOptions` has been removed +- Struct `VirtualMachineScaleSetsListByLocationPager` has been removed +- Struct `VirtualMachineScaleSetsListByLocationResponse` has been removed +- Struct `VirtualMachineScaleSetsListByLocationResult` has been removed +- Struct `VirtualMachineScaleSetsListOptions` has been removed +- Struct `VirtualMachineScaleSetsListPager` has been removed +- Struct `VirtualMachineScaleSetsListResponse` has been removed +- Struct `VirtualMachineScaleSetsListResult` has been removed +- Struct `VirtualMachineScaleSetsListSKUsOptions` has been removed +- Struct `VirtualMachineScaleSetsListSKUsPager` has been removed +- Struct `VirtualMachineScaleSetsListSKUsResponse` has been removed +- Struct `VirtualMachineScaleSetsListSKUsResult` has been removed +- Struct `VirtualMachineScaleSetsPerformMaintenancePoller` has been removed +- Struct `VirtualMachineScaleSetsPerformMaintenancePollerResponse` has been removed +- Struct `VirtualMachineScaleSetsPerformMaintenanceResponse` has been removed +- Struct `VirtualMachineScaleSetsPowerOffPoller` has been removed +- Struct `VirtualMachineScaleSetsPowerOffPollerResponse` has been removed +- Struct `VirtualMachineScaleSetsPowerOffResponse` has been removed +- Struct `VirtualMachineScaleSetsRedeployPoller` has been removed +- Struct `VirtualMachineScaleSetsRedeployPollerResponse` has been removed +- Struct `VirtualMachineScaleSetsRedeployResponse` has been removed +- Struct `VirtualMachineScaleSetsReimageAllPoller` has been removed +- Struct `VirtualMachineScaleSetsReimageAllPollerResponse` has been removed +- Struct `VirtualMachineScaleSetsReimageAllResponse` has been removed +- Struct `VirtualMachineScaleSetsReimagePoller` has been removed +- Struct `VirtualMachineScaleSetsReimagePollerResponse` has been removed +- Struct `VirtualMachineScaleSetsReimageResponse` has been removed +- Struct `VirtualMachineScaleSetsRestartPoller` has been removed +- Struct `VirtualMachineScaleSetsRestartPollerResponse` has been removed +- Struct `VirtualMachineScaleSetsRestartResponse` has been removed +- Struct `VirtualMachineScaleSetsSetOrchestrationServiceStatePoller` has been removed +- Struct `VirtualMachineScaleSetsSetOrchestrationServiceStatePollerResponse` has been removed +- Struct `VirtualMachineScaleSetsSetOrchestrationServiceStateResponse` has been removed +- Struct `VirtualMachineScaleSetsStartPoller` has been removed +- Struct `VirtualMachineScaleSetsStartPollerResponse` has been removed +- Struct `VirtualMachineScaleSetsStartResponse` has been removed +- Struct `VirtualMachineScaleSetsUpdateInstancesPoller` has been removed +- Struct `VirtualMachineScaleSetsUpdateInstancesPollerResponse` has been removed +- Struct `VirtualMachineScaleSetsUpdateInstancesResponse` has been removed +- Struct `VirtualMachineScaleSetsUpdatePoller` has been removed +- Struct `VirtualMachineScaleSetsUpdatePollerResponse` has been removed +- Struct `VirtualMachineScaleSetsUpdateResponse` has been removed +- Struct `VirtualMachineScaleSetsUpdateResult` has been removed +- Struct `VirtualMachineSizesListOptions` has been removed +- Struct `VirtualMachineSizesListResponse` has been removed +- Struct `VirtualMachineSizesListResult` has been removed +- Struct `VirtualMachinesAssessPatchesPoller` has been removed +- Struct `VirtualMachinesAssessPatchesPollerResponse` has been removed +- Struct `VirtualMachinesAssessPatchesResponse` has been removed +- Struct `VirtualMachinesAssessPatchesResult` has been removed +- Struct `VirtualMachinesBeginAssessPatchesOptions` has been removed +- Struct `VirtualMachinesBeginCaptureOptions` has been removed +- Struct `VirtualMachinesBeginConvertToManagedDisksOptions` has been removed +- Struct `VirtualMachinesBeginCreateOrUpdateOptions` has been removed +- Struct `VirtualMachinesBeginDeallocateOptions` has been removed +- Struct `VirtualMachinesBeginDeleteOptions` has been removed +- Struct `VirtualMachinesBeginInstallPatchesOptions` has been removed +- Struct `VirtualMachinesBeginPerformMaintenanceOptions` has been removed +- Struct `VirtualMachinesBeginPowerOffOptions` has been removed +- Struct `VirtualMachinesBeginReapplyOptions` has been removed +- Struct `VirtualMachinesBeginRedeployOptions` has been removed +- Struct `VirtualMachinesBeginReimageOptions` has been removed +- Struct `VirtualMachinesBeginRestartOptions` has been removed +- Struct `VirtualMachinesBeginRunCommandOptions` has been removed +- Struct `VirtualMachinesBeginStartOptions` has been removed +- Struct `VirtualMachinesBeginUpdateOptions` has been removed +- Struct `VirtualMachinesCapturePoller` has been removed +- Struct `VirtualMachinesCapturePollerResponse` has been removed +- Struct `VirtualMachinesCaptureResponse` has been removed +- Struct `VirtualMachinesCaptureResult` has been removed +- Struct `VirtualMachinesConvertToManagedDisksPoller` has been removed +- Struct `VirtualMachinesConvertToManagedDisksPollerResponse` has been removed +- Struct `VirtualMachinesConvertToManagedDisksResponse` has been removed +- Struct `VirtualMachinesCreateOrUpdatePoller` has been removed +- Struct `VirtualMachinesCreateOrUpdatePollerResponse` has been removed +- Struct `VirtualMachinesCreateOrUpdateResponse` has been removed +- Struct `VirtualMachinesCreateOrUpdateResult` has been removed +- Struct `VirtualMachinesDeallocatePoller` has been removed +- Struct `VirtualMachinesDeallocatePollerResponse` has been removed +- Struct `VirtualMachinesDeallocateResponse` has been removed +- Struct `VirtualMachinesDeletePoller` has been removed +- Struct `VirtualMachinesDeletePollerResponse` has been removed +- Struct `VirtualMachinesDeleteResponse` has been removed +- Struct `VirtualMachinesGeneralizeOptions` has been removed +- Struct `VirtualMachinesGeneralizeResponse` has been removed +- Struct `VirtualMachinesGetOptions` has been removed +- Struct `VirtualMachinesGetResponse` has been removed +- Struct `VirtualMachinesGetResult` has been removed +- Struct `VirtualMachinesInstallPatchesPoller` has been removed +- Struct `VirtualMachinesInstallPatchesPollerResponse` has been removed +- Struct `VirtualMachinesInstallPatchesResponse` has been removed +- Struct `VirtualMachinesInstallPatchesResult` has been removed +- Struct `VirtualMachinesInstanceViewOptions` has been removed +- Struct `VirtualMachinesInstanceViewResponse` has been removed +- Struct `VirtualMachinesInstanceViewResult` has been removed +- Struct `VirtualMachinesListAllOptions` has been removed +- Struct `VirtualMachinesListAllPager` has been removed +- Struct `VirtualMachinesListAllResponse` has been removed +- Struct `VirtualMachinesListAllResult` has been removed +- Struct `VirtualMachinesListAvailableSizesOptions` has been removed +- Struct `VirtualMachinesListAvailableSizesResponse` has been removed +- Struct `VirtualMachinesListAvailableSizesResult` has been removed +- Struct `VirtualMachinesListByLocationOptions` has been removed +- Struct `VirtualMachinesListByLocationPager` has been removed +- Struct `VirtualMachinesListByLocationResponse` has been removed +- Struct `VirtualMachinesListByLocationResult` has been removed +- Struct `VirtualMachinesListOptions` has been removed +- Struct `VirtualMachinesListPager` has been removed +- Struct `VirtualMachinesListResponse` has been removed +- Struct `VirtualMachinesListResult` has been removed +- Struct `VirtualMachinesPerformMaintenancePoller` has been removed +- Struct `VirtualMachinesPerformMaintenancePollerResponse` has been removed +- Struct `VirtualMachinesPerformMaintenanceResponse` has been removed +- Struct `VirtualMachinesPowerOffPoller` has been removed +- Struct `VirtualMachinesPowerOffPollerResponse` has been removed +- Struct `VirtualMachinesPowerOffResponse` has been removed +- Struct `VirtualMachinesReapplyPoller` has been removed +- Struct `VirtualMachinesReapplyPollerResponse` has been removed +- Struct `VirtualMachinesReapplyResponse` has been removed +- Struct `VirtualMachinesRedeployPoller` has been removed +- Struct `VirtualMachinesRedeployPollerResponse` has been removed +- Struct `VirtualMachinesRedeployResponse` has been removed +- Struct `VirtualMachinesReimagePoller` has been removed +- Struct `VirtualMachinesReimagePollerResponse` has been removed +- Struct `VirtualMachinesReimageResponse` has been removed +- Struct `VirtualMachinesRestartPoller` has been removed +- Struct `VirtualMachinesRestartPollerResponse` has been removed +- Struct `VirtualMachinesRestartResponse` has been removed +- Struct `VirtualMachinesRetrieveBootDiagnosticsDataOptions` has been removed +- Struct `VirtualMachinesRetrieveBootDiagnosticsDataResponse` has been removed +- Struct `VirtualMachinesRetrieveBootDiagnosticsDataResult` has been removed +- Struct `VirtualMachinesRunCommandPoller` has been removed +- Struct `VirtualMachinesRunCommandPollerResponse` has been removed +- Struct `VirtualMachinesRunCommandResponse` has been removed +- Struct `VirtualMachinesRunCommandResult` has been removed +- Struct `VirtualMachinesSimulateEvictionOptions` has been removed +- Struct `VirtualMachinesSimulateEvictionResponse` has been removed +- Struct `VirtualMachinesStartPoller` has been removed +- Struct `VirtualMachinesStartPollerResponse` has been removed +- Struct `VirtualMachinesStartResponse` has been removed +- Struct `VirtualMachinesUpdatePoller` has been removed +- Struct `VirtualMachinesUpdatePollerResponse` has been removed +- Struct `VirtualMachinesUpdateResponse` has been removed +- Struct `VirtualMachinesUpdateResult` has been removed +- Field `GalleryDiskImage` of struct `GalleryDataDiskImage` has been removed +- Field `UpdateResource` of struct `DedicatedHostUpdate` has been removed +- Field `GalleryArtifactPublishingProfileBase` of struct `GalleryApplicationVersionPublishingProfile` has been removed +- Field `Resource` of struct `CapacityReservation` has been removed +- Field `SubResource` of struct `VirtualMachineScaleSetNetworkConfiguration` has been removed +- Field `Resource` of struct `Snapshot` has been removed +- Field `PirCommunityGalleryResource` of struct `CommunityGalleryImage` has been removed +- Field `UpdateResource` of struct `RestorePointCollectionUpdate` has been removed +- Field `DedicatedHostInstanceView` of struct `DedicatedHostInstanceViewWithName` has been removed +- Field `UpdateResource` of struct `AvailabilitySetUpdate` has been removed +- Field `Resource` of struct `SSHPublicKeyResource` has been removed +- Field `ProxyOnlyResource` of struct `DiskRestorePoint` has been removed +- Field `GalleryArtifactPublishingProfileBase` of struct `GalleryImageVersionPublishingProfile` has been removed +- Field `Resource` of struct `DedicatedHost` has been removed +- Field `PirCommunityGalleryResource` of struct `CommunityGallery` has been removed +- Field `Resource` of struct `Image` has been removed +- Field `UpdateResource` of struct `ProximityPlacementGroupUpdate` has been removed +- Field `Resource` of struct `GalleryImageVersion` has been removed +- Field `ImageDisk` of struct `ImageOSDisk` has been removed +- Field `UpdateResource` of struct `VirtualMachineRunCommandUpdate` has been removed +- Field `RunCommandDocumentBase` of struct `RunCommandDocument` has been removed +- Field `Resource` of struct `VirtualMachineRunCommand` has been removed +- Field `Resource` of struct `GalleryImage` has been removed +- Field `SubResource` of struct `SubResourceWithColocationStatus` has been removed +- Field `SubResource` of struct `VirtualMachineCaptureResult` has been removed +- Field `SubResource` of struct `ManagedDiskParameters` has been removed +- Field `UpdateResource` of struct `VirtualMachineUpdate` has been removed +- Field `UpdateResource` of struct `SSHPublicKeyUpdateResource` has been removed +- Field `Resource` of struct `DedicatedHostGroup` has been removed +- Field `VirtualMachineImageResource` of struct `VirtualMachineImage` has been removed +- Field `SubResource` of struct `DiskEncryptionSetParameters` has been removed +- Field `Resource` of struct `DiskEncryptionSet` has been removed +- Field `SubResourceReadOnly` of struct `VirtualMachineScaleSetExtensionUpdate` has been removed +- Field `UpdateResourceDefinition` of struct `GalleryUpdate` has been removed +- Field `Resource` of struct `VirtualMachineScaleSet` has been removed +- Field `UpdateResource` of struct `CapacityReservationGroupUpdate` has been removed +- Field `UpdateResourceDefinition` of struct `GalleryImageVersionUpdate` has been removed +- Field `ProxyResource` of struct `RestorePoint` has been removed +- Field `UpdateResourceDefinition` of struct `GalleryImageUpdate` has been removed +- Field `UpdateResource` of struct `VirtualMachineExtensionUpdate` has been removed +- Field `Resource` of struct `AvailabilitySet` has been removed +- Field `VirtualMachineReimageParameters` of struct `VirtualMachineScaleSetVMReimageParameters` has been removed +- Field `UpdateResource` of struct `VirtualMachineScaleSetUpdate` has been removed +- Field `Resource` of struct `RollingUpgradeStatusInfo` has been removed +- Field `DiskImageEncryption` of struct `OSDiskImageEncryption` has been removed +- Field `SubResource` of struct `ImageReference` has been removed +- Field `Resource` of struct `Gallery` has been removed +- Field `GalleryDiskImage` of struct `GalleryOSDiskImage` has been removed +- Field `PirResource` of struct `PirSharedGalleryResource` has been removed +- Field `Resource` of struct `GalleryApplicationVersion` has been removed +- Field `PirSharedGalleryResource` of struct `SharedGalleryImage` has been removed +- Field `UpdateResource` of struct `DedicatedHostGroupUpdate` has been removed +- Field `SubResourceReadOnly` of struct `VirtualMachineScaleSetVMExtension` has been removed +- Field `UpdateResource` of struct `ImageUpdate` has been removed +- Field `Resource` of struct `VirtualMachine` has been removed +- Field `Resource` of struct `CapacityReservationGroup` has been removed +- Field `UpdateResourceDefinition` of struct `GalleryApplicationVersionUpdate` has been removed +- Field `Resource` of struct `VirtualMachineExtensionImage` has been removed +- Field `UpdateResourceDefinition` of struct `GalleryApplicationUpdate` has been removed +- Field `SubResource` of struct `NetworkInterfaceReference` has been removed +- Field `SubResource` of struct `VirtualMachineScaleSetIPConfiguration` has been removed +- Field `InnerError` of struct `CloudError` has been removed +- Field `ImageDisk` of struct `ImageDataDisk` has been removed +- Field `Resource` of struct `RestorePointCollection` has been removed +- Field `Resource` of struct `DiskAccess` has been removed +- Field `Resource` of struct `VirtualMachineExtension` has been removed +- Field `Resource` of struct `VirtualMachineScaleSetVM` has been removed +- Field `LogAnalyticsInputBase` of struct `ThrottledRequestsInput` has been removed +- Field `SubResourceReadOnly` of struct `VirtualMachineScaleSetVMExtensionUpdate` has been removed +- Field `SubResource` of struct `VirtualMachineScaleSetUpdateNetworkConfiguration` has been removed +- Field `SubResource` of struct `VirtualMachineScaleSetUpdateIPConfiguration` has been removed +- Field `PirSharedGalleryResource` of struct `SharedGallery` has been removed +- Field `LogAnalyticsInputBase` of struct `RequestRateByIntervalInput` has been removed +- Field `DiskImageEncryption` of struct `DataDiskImageEncryption` has been removed +- Field `SubResource` of struct `VirtualMachineImageResource` has been removed +- Field `Resource` of struct `Disk` has been removed +- Field `PirSharedGalleryResource` of struct `SharedGalleryImageVersion` has been removed +- Field `PirCommunityGalleryResource` of struct `CommunityGalleryImageVersion` has been removed +- Field `VirtualMachineScaleSetVMReimageParameters` of struct `VirtualMachineScaleSetReimageParameters` has been removed +- Field `CapacityReservationInstanceView` of struct `CapacityReservationInstanceViewWithName` has been removed +- Field `Resource` of struct `ProximityPlacementGroup` has been removed +- Field `Resource` of struct `GalleryApplication` has been removed +- Field `SubResourceReadOnly` of struct `VirtualMachineScaleSetExtension` has been removed +- Field `UpdateResource` of struct `CapacityReservationUpdate` has been removed + +### Features Added + +- New const `DiskCreateOptionUploadPreparedSecure` +- New const `DiskSecurityTypesConfidentialVMVmguestStateOnlyEncryptedWithPlatformKey` +- New const `DiskEncryptionSetTypeConfidentialVMEncryptedWithCustomerKey` +- New const `DiskSecurityTypesConfidentialVMDiskEncryptedWithCustomerKey` +- New const `DiskSecurityTypesConfidentialVMDiskEncryptedWithPlatformKey` +- New const `DiskCreateOptionImportSecure` +- New function `*UsageClientListPager.PageResponse() UsageClientListResponse` +- New function `*VirtualMachineScaleSetRollingUpgradesClientStartExtensionUpgradePoller.FinalResponse(context.Context) (VirtualMachineScaleSetRollingUpgradesClientStartExtensionUpgradeResponse, error)` +- New function `*ProximityPlacementGroupsClientListBySubscriptionPager.NextPage(context.Context) bool` +- New function `*GalleryApplicationsClientCreateOrUpdatePoller.ResumeToken() (string, error)` +- New function `*DedicatedHostsClientListByHostGroupPager.PageResponse() DedicatedHostsClientListByHostGroupResponse` +- New function `*GallerySharingProfileClientUpdatePollerResponse.Resume(context.Context, *GallerySharingProfileClient, string) error` +- New function `*DiskEncryptionSetsClientDeletePollerResponse.Resume(context.Context, *DiskEncryptionSetsClient, string) error` +- New function `*VirtualMachineExtensionsClientDeletePoller.ResumeToken() (string, error)` +- New function `*GalleryImagesClientListByGalleryPager.NextPage(context.Context) bool` +- New function `*DisksClientGrantAccessPollerResponse.Resume(context.Context, *DisksClient, string) error` +- New function `*GalleryApplicationVersionsClientUpdatePoller.ResumeToken() (string, error)` +- New function `VirtualMachineScaleSetVMsClientPerformMaintenancePollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachineScaleSetVMsClientPerformMaintenanceResponse, error)` +- New function `VirtualMachinesClientPowerOffPollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachinesClientPowerOffResponse, error)` +- New function `*VirtualMachineScaleSetsClientListSKUsPager.NextPage(context.Context) bool` +- New function `*SnapshotsClientRevokeAccessPoller.Done() bool` +- New function `*VirtualMachineScaleSetExtensionsClientUpdatePollerResponse.Resume(context.Context, *VirtualMachineScaleSetExtensionsClient, string) error` +- New function `*GalleryApplicationsClientDeletePoller.ResumeToken() (string, error)` +- New function `*VirtualMachineScaleSetVMExtensionsClientUpdatePoller.ResumeToken() (string, error)` +- New function `*VirtualMachineRunCommandsClientListPager.NextPage(context.Context) bool` +- New function `*CloudServicesClientListAllPager.Err() error` +- New function `*UsageClientListPager.Err() error` +- New function `*ResourceSKUsClientListPager.Err() error` +- New function `*CloudServicesClientPowerOffPoller.ResumeToken() (string, error)` +- New function `*VirtualMachinesClientListByLocationPager.NextPage(context.Context) bool` +- New function `ImagesClientCreateOrUpdatePollerResponse.PollUntilDone(context.Context, time.Duration) (ImagesClientCreateOrUpdateResponse, error)` +- New function `*VirtualMachineScaleSetsClientListSKUsPager.PageResponse() VirtualMachineScaleSetsClientListSKUsResponse` +- New function `*VirtualMachineScaleSetRollingUpgradesClientStartOSUpgradePoller.FinalResponse(context.Context) (VirtualMachineScaleSetRollingUpgradesClientStartOSUpgradeResponse, error)` +- New function `*VirtualMachineScaleSetsClientUpdatePoller.ResumeToken() (string, error)` +- New function `*VirtualMachineScaleSetRollingUpgradesClientCancelPoller.Done() bool` +- New function `*UsageClientListPager.NextPage(context.Context) bool` +- New function `*VirtualMachinesClientCreateOrUpdatePoller.FinalResponse(context.Context) (VirtualMachinesClientCreateOrUpdateResponse, error)` +- New function `*CapacityReservationsClientUpdatePoller.Done() bool` +- New function `*GalleriesClientUpdatePoller.Poll(context.Context) (*http.Response, error)` +- New function `*CloudServicesClientUpdatePoller.Done() bool` +- New function `CloudServicesClientRebuildPollerResponse.PollUntilDone(context.Context, time.Duration) (CloudServicesClientRebuildResponse, error)` +- New function `*CloudServicesClientPowerOffPoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachineScaleSetRollingUpgradesClientCancelPoller.ResumeToken() (string, error)` +- New function `*CloudServicesClientReimagePoller.ResumeToken() (string, error)` +- New function `*VirtualMachineScaleSetsClientPerformMaintenancePoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachineScaleSetsClientListByLocationPager.Err() error` +- New function `*VirtualMachineScaleSetsClientCreateOrUpdatePoller.Done() bool` +- New function `*AvailabilitySetsClientListPager.PageResponse() AvailabilitySetsClientListResponse` +- New function `*GalleryImagesClientCreateOrUpdatePoller.FinalResponse(context.Context) (GalleryImagesClientCreateOrUpdateResponse, error)` +- New function `VirtualMachineScaleSetRollingUpgradesClientCancelPollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachineScaleSetRollingUpgradesClientCancelResponse, error)` +- New function `*CapacityReservationsClientListByCapacityReservationGroupPager.PageResponse() CapacityReservationsClientListByCapacityReservationGroupResponse` +- New function `*CloudServiceRoleInstancesClientDeletePollerResponse.Resume(context.Context, *CloudServiceRoleInstancesClient, string) error` +- New function `CapacityReservationsClientDeletePollerResponse.PollUntilDone(context.Context, time.Duration) (CapacityReservationsClientDeleteResponse, error)` +- New function `*VirtualMachineScaleSetVMRunCommandsClientListPager.PageResponse() VirtualMachineScaleSetVMRunCommandsClientListResponse` +- New function `VirtualMachineScaleSetVMsClientRunCommandPollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachineScaleSetVMsClientRunCommandResponse, error)` +- New function `*VirtualMachinesClientDeletePoller.FinalResponse(context.Context) (VirtualMachinesClientDeleteResponse, error)` +- New function `*DiskRestorePointClientGrantAccessPoller.FinalResponse(context.Context) (DiskRestorePointClientGrantAccessResponse, error)` +- New function `*CloudServicesUpdateDomainClientWalkUpdateDomainPollerResponse.Resume(context.Context, *CloudServicesUpdateDomainClient, string) error` +- New function `OperationListResult.MarshalJSON() ([]byte, error)` +- New function `*GalleriesClientUpdatePoller.ResumeToken() (string, error)` +- New function `*GallerySharingProfileClientUpdatePoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachineScaleSetsClientDeleteInstancesPoller.FinalResponse(context.Context) (VirtualMachineScaleSetsClientDeleteInstancesResponse, error)` +- New function `*VirtualMachineExtensionsClientDeletePoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachinesClientCapturePoller.Done() bool` +- New function `*CloudServicesClientCreateOrUpdatePoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachineScaleSetVMExtensionsClientDeletePoller.Poll(context.Context) (*http.Response, error)` +- New function `*ImagesClientListByResourceGroupPager.Err() error` +- New function `*VirtualMachineScaleSetsClientSetOrchestrationServiceStatePollerResponse.Resume(context.Context, *VirtualMachineScaleSetsClient, string) error` +- New function `*DiskEncryptionSetsClientListAssociatedResourcesPager.PageResponse() DiskEncryptionSetsClientListAssociatedResourcesResponse` +- New function `*VirtualMachineScaleSetsClientGetOSUpgradeHistoryPager.PageResponse() VirtualMachineScaleSetsClientGetOSUpgradeHistoryResponse` +- New function `*ResourceSKUsClientListPager.NextPage(context.Context) bool` +- New function `*DiskAccessesClientUpdatePoller.Done() bool` +- New function `*CapacityReservationsClientDeletePoller.ResumeToken() (string, error)` +- New function `*GalleriesClientDeletePoller.Poll(context.Context) (*http.Response, error)` +- New function `DiskEncryptionSetsClientUpdatePollerResponse.PollUntilDone(context.Context, time.Duration) (DiskEncryptionSetsClientUpdateResponse, error)` +- New function `*DiskRestorePointClientRevokeAccessPoller.Done() bool` +- New function `VirtualMachinesClientRedeployPollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachinesClientRedeployResponse, error)` +- New function `*VirtualMachinesClientRunCommandPoller.FinalResponse(context.Context) (VirtualMachinesClientRunCommandResponse, error)` +- New function `*SnapshotsClientDeletePoller.ResumeToken() (string, error)` +- New function `*DedicatedHostGroupsClientListBySubscriptionPager.PageResponse() DedicatedHostGroupsClientListBySubscriptionResponse` +- New function `*RestorePointCollectionsClientDeletePoller.Poll(context.Context) (*http.Response, error)` +- New function `VirtualMachineScaleSetVMExtensionsClientDeletePollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachineScaleSetVMExtensionsClientDeleteResponse, error)` +- New function `*VirtualMachineScaleSetsClientStartPoller.Done() bool` +- New function `VirtualMachineScaleSetsClientReimageAllPollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachineScaleSetsClientReimageAllResponse, error)` +- New function `*DedicatedHostsClientUpdatePoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachineScaleSetsClientUpdateInstancesPollerResponse.Resume(context.Context, *VirtualMachineScaleSetsClient, string) error` +- New function `*RestorePointsClientCreatePoller.ResumeToken() (string, error)` +- New function `*VirtualMachineScaleSetsClientReimagePollerResponse.Resume(context.Context, *VirtualMachineScaleSetsClient, string) error` +- New function `*DiskEncryptionSetsClientUpdatePollerResponse.Resume(context.Context, *DiskEncryptionSetsClient, string) error` +- New function `*DisksClientDeletePoller.FinalResponse(context.Context) (DisksClientDeleteResponse, error)` +- New function `*GalleryApplicationsClientUpdatePoller.Done() bool` +- New function `*DedicatedHostGroupsClientListByResourceGroupPager.Err() error` +- New function `*CloudServicesClientStartPoller.Poll(context.Context) (*http.Response, error)` +- New function `*DiskEncryptionSetsClientListPager.PageResponse() DiskEncryptionSetsClientListResponse` +- New function `*DiskAccessesClientUpdatePoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachineScaleSetVMsClientPowerOffPoller.FinalResponse(context.Context) (VirtualMachineScaleSetVMsClientPowerOffResponse, error)` +- New function `*VirtualMachineScaleSetsClientReimagePoller.Done() bool` +- New function `*DiskEncryptionSetsClientUpdatePoller.Poll(context.Context) (*http.Response, error)` +- New function `*DedicatedHostsClientCreateOrUpdatePoller.Done() bool` +- New function `*ImagesClientCreateOrUpdatePollerResponse.Resume(context.Context, *ImagesClient, string) error` +- New function `*VirtualMachineScaleSetExtensionsClientCreateOrUpdatePollerResponse.Resume(context.Context, *VirtualMachineScaleSetExtensionsClient, string) error` +- New function `*VirtualMachineScaleSetVMsClientRedeployPoller.FinalResponse(context.Context) (VirtualMachineScaleSetVMsClientRedeployResponse, error)` +- New function `*VirtualMachinesClientPerformMaintenancePollerResponse.Resume(context.Context, *VirtualMachinesClient, string) error` +- New function `*VirtualMachineScaleSetsClientGetOSUpgradeHistoryPager.Err() error` +- New function `*VirtualMachineScaleSetsClientUpdatePoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachineScaleSetVMsClientStartPollerResponse.Resume(context.Context, *VirtualMachineScaleSetVMsClient, string) error` +- New function `VirtualMachinesClientRunCommandPollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachinesClientRunCommandResponse, error)` +- New function `*RestorePointCollectionsClientDeletePollerResponse.Resume(context.Context, *RestorePointCollectionsClient, string) error` +- New function `VirtualMachineScaleSetVMsClientUpdatePollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachineScaleSetVMsClientUpdateResponse, error)` +- New function `*DiskRestorePointClientGrantAccessPollerResponse.Resume(context.Context, *DiskRestorePointClient, string) error` +- New function `*GalleryApplicationsClientCreateOrUpdatePoller.Poll(context.Context) (*http.Response, error)` +- New function `VirtualMachinesClientDeallocatePollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachinesClientDeallocateResponse, error)` +- New function `GalleryImagesClientCreateOrUpdatePollerResponse.PollUntilDone(context.Context, time.Duration) (GalleryImagesClientCreateOrUpdateResponse, error)` +- New function `*VirtualMachineRunCommandsClientDeletePoller.ResumeToken() (string, error)` +- New function `*VirtualMachinesClientReimagePollerResponse.Resume(context.Context, *VirtualMachinesClient, string) error` +- New function `*VirtualMachineScaleSetsClientPowerOffPoller.Poll(context.Context) (*http.Response, error)` +- New function `*DedicatedHostsClientUpdatePoller.Done() bool` +- New function `*VirtualMachineScaleSetsClientPerformMaintenancePollerResponse.Resume(context.Context, *VirtualMachineScaleSetsClient, string) error` +- New function `*CapacityReservationsClientUpdatePoller.FinalResponse(context.Context) (CapacityReservationsClientUpdateResponse, error)` +- New function `VirtualMachineRunCommandsClientDeletePollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachineRunCommandsClientDeleteResponse, error)` +- New function `*CloudServiceRoleInstancesClientRebuildPoller.ResumeToken() (string, error)` +- New function `*VirtualMachinesClientDeletePoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachineScaleSetVMsClientDeletePoller.ResumeToken() (string, error)` +- New function `*VirtualMachineScaleSetVMsClientListPager.PageResponse() VirtualMachineScaleSetVMsClientListResponse` +- New function `GalleryImageVersionPublishingProfile.MarshalJSON() ([]byte, error)` +- New function `*DedicatedHostsClientUpdatePollerResponse.Resume(context.Context, *DedicatedHostsClient, string) error` +- New function `*DiskRestorePointClientRevokeAccessPollerResponse.Resume(context.Context, *DiskRestorePointClient, string) error` +- New function `*ProximityPlacementGroupsClientListByResourceGroupPager.Err() error` +- New function `*GalleryImageVersionsClientListByGalleryImagePager.Err() error` +- New function `*DisksClientRevokeAccessPoller.ResumeToken() (string, error)` +- New function `*VirtualMachinesClientReapplyPollerResponse.Resume(context.Context, *VirtualMachinesClient, string) error` +- New function `*VirtualMachineScaleSetExtensionsClientDeletePollerResponse.Resume(context.Context, *VirtualMachineScaleSetExtensionsClient, string) error` +- New function `*VirtualMachineScaleSetVMExtensionsClientCreateOrUpdatePollerResponse.Resume(context.Context, *VirtualMachineScaleSetVMExtensionsClient, string) error` +- New function `*VirtualMachineRunCommandsClientUpdatePoller.Done() bool` +- New function `*DisksClientDeletePollerResponse.Resume(context.Context, *DisksClient, string) error` +- New function `*VirtualMachineScaleSetVMsClientUpdatePoller.ResumeToken() (string, error)` +- New function `*VirtualMachineScaleSetsClientPerformMaintenancePoller.Done() bool` +- New function `*VirtualMachineRunCommandsClientCreateOrUpdatePoller.FinalResponse(context.Context) (VirtualMachineRunCommandsClientCreateOrUpdateResponse, error)` +- New function `VirtualMachineScaleSetsClientUpdateInstancesPollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachineScaleSetsClientUpdateInstancesResponse, error)` +- New function `*RestorePointCollectionsClientListPager.NextPage(context.Context) bool` +- New function `*CloudServiceOperatingSystemsClientListOSFamiliesPager.NextPage(context.Context) bool` +- New function `*VirtualMachineScaleSetsClientUpdatePollerResponse.Resume(context.Context, *VirtualMachineScaleSetsClient, string) error` +- New function `*VirtualMachinesClientCreateOrUpdatePoller.Done() bool` +- New function `DisksClientCreateOrUpdatePollerResponse.PollUntilDone(context.Context, time.Duration) (DisksClientCreateOrUpdateResponse, error)` +- New function `*DiskEncryptionSetsClientUpdatePoller.Done() bool` +- New function `*DiskEncryptionSetsClientUpdatePoller.ResumeToken() (string, error)` +- New function `*VirtualMachinesClientStartPoller.FinalResponse(context.Context) (VirtualMachinesClientStartResponse, error)` +- New function `*VirtualMachineScaleSetVMsClientRunCommandPoller.Poll(context.Context) (*http.Response, error)` +- New function `*RestorePointCollectionsClientListAllPager.PageResponse() RestorePointCollectionsClientListAllResponse` +- New function `*VirtualMachinesClientStartPollerResponse.Resume(context.Context, *VirtualMachinesClient, string) error` +- New function `*SnapshotsClientUpdatePoller.FinalResponse(context.Context) (SnapshotsClientUpdateResponse, error)` +- New function `*VirtualMachineScaleSetRollingUpgradesClientStartExtensionUpgradePollerResponse.Resume(context.Context, *VirtualMachineScaleSetRollingUpgradesClient, string) error` +- New function `DisksClientUpdatePollerResponse.PollUntilDone(context.Context, time.Duration) (DisksClientUpdateResponse, error)` +- New function `*GalleryApplicationVersionsClientListByGalleryApplicationPager.NextPage(context.Context) bool` +- New function `*VirtualMachineScaleSetVMsClientListPager.NextPage(context.Context) bool` +- New function `*AvailabilitySetsClientListBySubscriptionPager.Err() error` +- New function `*DiskRestorePointClientRevokeAccessPoller.ResumeToken() (string, error)` +- New function `*VirtualMachinesClientCapturePollerResponse.Resume(context.Context, *VirtualMachinesClient, string) error` +- New function `GalleryApplicationVersionsClientUpdatePollerResponse.PollUntilDone(context.Context, time.Duration) (GalleryApplicationVersionsClientUpdateResponse, error)` +- New function `*VirtualMachineExtensionsClientDeletePoller.FinalResponse(context.Context) (VirtualMachineExtensionsClientDeleteResponse, error)` +- New function `*VirtualMachineScaleSetsClientListPager.Err() error` +- New function `CloudServicesClientStartPollerResponse.PollUntilDone(context.Context, time.Duration) (CloudServicesClientStartResponse, error)` +- New function `*VirtualMachineScaleSetExtensionsClientUpdatePoller.Poll(context.Context) (*http.Response, error)` +- New function `*CloudServiceRoleInstancesClientReimagePoller.Done() bool` +- New function `*CloudServicesUpdateDomainClientWalkUpdateDomainPoller.Done() bool` +- New function `*ImagesClientCreateOrUpdatePoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachineScaleSetsClientDeleteInstancesPoller.Done() bool` +- New function `*CloudServicesClientDeleteInstancesPoller.ResumeToken() (string, error)` +- New function `*GalleryApplicationVersionsClientListByGalleryApplicationPager.PageResponse() GalleryApplicationVersionsClientListByGalleryApplicationResponse` +- New function `*DisksClientUpdatePoller.ResumeToken() (string, error)` +- New function `*CloudServicesClientDeleteInstancesPoller.Done() bool` +- New function `*VirtualMachineScaleSetExtensionsClientCreateOrUpdatePoller.Poll(context.Context) (*http.Response, error)` +- New function `*CloudServicesClientListPager.PageResponse() CloudServicesClientListResponse` +- New function `*VirtualMachineRunCommandsClientDeletePollerResponse.Resume(context.Context, *VirtualMachineRunCommandsClient, string) error` +- New function `*CapacityReservationGroupsClientListByResourceGroupPager.PageResponse() CapacityReservationGroupsClientListByResourceGroupResponse` +- New function `*VirtualMachinesClientReimagePoller.FinalResponse(context.Context) (VirtualMachinesClientReimageResponse, error)` +- New function `*VirtualMachinesClientRestartPoller.Poll(context.Context) (*http.Response, error)` +- New function `*GalleriesClientDeletePoller.FinalResponse(context.Context) (GalleriesClientDeleteResponse, error)` +- New function `*CapacityReservationsClientCreateOrUpdatePollerResponse.Resume(context.Context, *CapacityReservationsClient, string) error` +- New function `*DiskRestorePointClientListByRestorePointPager.NextPage(context.Context) bool` +- New function `GalleryImagesClientDeletePollerResponse.PollUntilDone(context.Context, time.Duration) (GalleryImagesClientDeleteResponse, error)` +- New function `*GalleriesClientCreateOrUpdatePollerResponse.Resume(context.Context, *GalleriesClient, string) error` +- New function `*SharedGalleryImagesClientListPager.NextPage(context.Context) bool` +- New function `*VirtualMachineScaleSetVMExtensionsClientUpdatePollerResponse.Resume(context.Context, *VirtualMachineScaleSetVMExtensionsClient, string) error` +- New function `*ProximityPlacementGroupsClientListBySubscriptionPager.Err() error` +- New function `*GalleryImageVersionsClientUpdatePollerResponse.Resume(context.Context, *GalleryImageVersionsClient, string) error` +- New function `*DiskAccessesClientUpdateAPrivateEndpointConnectionPoller.ResumeToken() (string, error)` +- New function `*DisksClientListPager.Err() error` +- New function `*DisksClientListByResourceGroupPager.Err() error` +- New function `*VirtualMachineScaleSetsClientUpdateInstancesPoller.Done() bool` +- New function `*CloudServicesClientRestartPoller.Done() bool` +- New function `*GalleriesClientListByResourceGroupPager.NextPage(context.Context) bool` +- New function `VirtualMachineScaleSetVMsClientPowerOffPollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachineScaleSetVMsClientPowerOffResponse, error)` +- New function `*VirtualMachinesClientCreateOrUpdatePoller.ResumeToken() (string, error)` +- New function `*ImagesClientDeletePollerResponse.Resume(context.Context, *ImagesClient, string) error` +- New function `*DiskAccessesClientCreateOrUpdatePoller.FinalResponse(context.Context) (DiskAccessesClientCreateOrUpdateResponse, error)` +- New function `*DiskAccessesClientDeleteAPrivateEndpointConnectionPoller.FinalResponse(context.Context) (DiskAccessesClientDeleteAPrivateEndpointConnectionResponse, error)` +- New function `*VirtualMachinesClientListAllPager.PageResponse() VirtualMachinesClientListAllResponse` +- New function `*DiskRestorePointClientGrantAccessPoller.Poll(context.Context) (*http.Response, error)` +- New function `*GalleryImageVersionsClientCreateOrUpdatePoller.Poll(context.Context) (*http.Response, error)` +- New function `*DisksClientGrantAccessPoller.Done() bool` +- New function `*GalleryImageVersionsClientDeletePoller.ResumeToken() (string, error)` +- New function `*VirtualMachineScaleSetRollingUpgradesClientStartOSUpgradePollerResponse.Resume(context.Context, *VirtualMachineScaleSetRollingUpgradesClient, string) error` +- New function `*VirtualMachineScaleSetVMsClientPowerOffPoller.ResumeToken() (string, error)` +- New function `*SnapshotsClientRevokeAccessPoller.FinalResponse(context.Context) (SnapshotsClientRevokeAccessResponse, error)` +- New function `*VirtualMachinesClientRunCommandPollerResponse.Resume(context.Context, *VirtualMachinesClient, string) error` +- New function `*CapacityReservationsClientListByCapacityReservationGroupPager.NextPage(context.Context) bool` +- New function `*GalleryApplicationVersionsClientDeletePollerResponse.Resume(context.Context, *GalleryApplicationVersionsClient, string) error` +- New function `*LogAnalyticsClientExportThrottledRequestsPoller.ResumeToken() (string, error)` +- New function `*CloudServiceRoleInstancesClientListPager.Err() error` +- New function `*CloudServicesUpdateDomainClientListUpdateDomainsPager.PageResponse() CloudServicesUpdateDomainClientListUpdateDomainsResponse` +- New function `*CloudServiceRoleInstancesClientRestartPoller.FinalResponse(context.Context) (CloudServiceRoleInstancesClientRestartResponse, error)` +- New function `*RestorePointsClientDeletePoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachineScaleSetsClientUpdatePoller.Done() bool` +- New function `GalleriesClientDeletePollerResponse.PollUntilDone(context.Context, time.Duration) (GalleriesClientDeleteResponse, error)` +- New function `*VirtualMachineScaleSetVMsClientUpdatePoller.Poll(context.Context) (*http.Response, error)` +- New function `*CloudServicesClientListAllPager.PageResponse() CloudServicesClientListAllResponse` +- New function `VirtualMachineScaleSetVMRunCommandsClientDeletePollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachineScaleSetVMRunCommandsClientDeleteResponse, error)` +- New function `*VirtualMachineScaleSetsClientStartPollerResponse.Resume(context.Context, *VirtualMachineScaleSetsClient, string) error` +- New function `*VirtualMachineScaleSetsClientDeallocatePoller.ResumeToken() (string, error)` +- New function `*VirtualMachineScaleSetsClientRedeployPoller.Done() bool` +- New function `*VirtualMachineScaleSetsClientReimageAllPoller.ResumeToken() (string, error)` +- New function `*VirtualMachineExtensionsClientUpdatePoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachineRunCommandsClientDeletePoller.Done() bool` +- New function `*ImagesClientListByResourceGroupPager.NextPage(context.Context) bool` +- New function `*CloudServicesClientDeleteInstancesPoller.FinalResponse(context.Context) (CloudServicesClientDeleteInstancesResponse, error)` +- New function `GalleryImageVersionsClientDeletePollerResponse.PollUntilDone(context.Context, time.Duration) (GalleryImageVersionsClientDeleteResponse, error)` +- New function `*GalleryApplicationVersionsClientUpdatePoller.Done() bool` +- New function `*SSHPublicKeysClientListByResourceGroupPager.Err() error` +- New function `*SharedGalleryImageVersionsClientListPager.PageResponse() SharedGalleryImageVersionsClientListResponse` +- New function `*DiskAccessesClientUpdatePoller.ResumeToken() (string, error)` +- New function `*CloudServiceOperatingSystemsClientListOSVersionsPager.PageResponse() CloudServiceOperatingSystemsClientListOSVersionsResponse` +- New function `*VirtualMachinesClientPerformMaintenancePoller.ResumeToken() (string, error)` +- New function `*ImagesClientCreateOrUpdatePoller.Done() bool` +- New function `*ImagesClientListPager.NextPage(context.Context) bool` +- New function `*VirtualMachineScaleSetsClientUpdatePoller.FinalResponse(context.Context) (VirtualMachineScaleSetsClientUpdateResponse, error)` +- New function `*CloudServicesClientRestartPollerResponse.Resume(context.Context, *CloudServicesClient, string) error` +- New function `*DisksClientDeletePoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachinesClientPowerOffPoller.FinalResponse(context.Context) (VirtualMachinesClientPowerOffResponse, error)` +- New function `*VirtualMachinesClientDeletePoller.Done() bool` +- New function `*VirtualMachineScaleSetVMRunCommandsClientDeletePollerResponse.Resume(context.Context, *VirtualMachineScaleSetVMRunCommandsClient, string) error` +- New function `*ProximityPlacementGroupsClientListBySubscriptionPager.PageResponse() ProximityPlacementGroupsClientListBySubscriptionResponse` +- New function `*DiskEncryptionSetsClientListByResourceGroupPager.Err() error` +- New function `*VirtualMachinesClientPerformMaintenancePoller.Poll(context.Context) (*http.Response, error)` +- New function `*CloudServiceRoleInstancesClientReimagePoller.Poll(context.Context) (*http.Response, error)` +- New function `*DiskRestorePointClientListByRestorePointPager.Err() error` +- New function `*GalleryImageVersionsClientDeletePoller.Done() bool` +- New function `*VirtualMachinesClientCreateOrUpdatePoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachinesClientUpdatePoller.Poll(context.Context) (*http.Response, error)` +- New function `*CloudServicesClientDeleteInstancesPoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachineScaleSetsClientRestartPoller.Done() bool` +- New function `*DisksClientUpdatePoller.FinalResponse(context.Context) (DisksClientUpdateResponse, error)` +- New function `*VirtualMachineScaleSetVMExtensionsClientCreateOrUpdatePoller.FinalResponse(context.Context) (VirtualMachineScaleSetVMExtensionsClientCreateOrUpdateResponse, error)` +- New function `*CloudServiceRoleInstancesClientRebuildPollerResponse.Resume(context.Context, *CloudServiceRoleInstancesClient, string) error` +- New function `DedicatedHostsClientDeletePollerResponse.PollUntilDone(context.Context, time.Duration) (DedicatedHostsClientDeleteResponse, error)` +- New function `*GalleryImageVersionsClientUpdatePoller.ResumeToken() (string, error)` +- New function `*GalleryApplicationVersionsClientUpdatePollerResponse.Resume(context.Context, *GalleryApplicationVersionsClient, string) error` +- New function `*DiskAccessesClientCreateOrUpdatePollerResponse.Resume(context.Context, *DiskAccessesClient, string) error` +- New function `*VirtualMachineRunCommandsClientDeletePoller.Poll(context.Context) (*http.Response, error)` +- New function `*SharedGalleriesClientListPager.NextPage(context.Context) bool` +- New function `*GalleryImagesClientCreateOrUpdatePoller.Poll(context.Context) (*http.Response, error)` +- New function `*SharedGalleriesClientListPager.PageResponse() SharedGalleriesClientListResponse` +- New function `*GalleryApplicationsClientListByGalleryPager.PageResponse() GalleryApplicationsClientListByGalleryResponse` +- New function `VirtualMachineScaleSetsClientStartPollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachineScaleSetsClientStartResponse, error)` +- New function `*CapacityReservationsClientCreateOrUpdatePoller.FinalResponse(context.Context) (CapacityReservationsClientCreateOrUpdateResponse, error)` +- New function `*VirtualMachineScaleSetVMsClientPerformMaintenancePoller.FinalResponse(context.Context) (VirtualMachineScaleSetVMsClientPerformMaintenanceResponse, error)` +- New function `*GalleriesClientCreateOrUpdatePoller.ResumeToken() (string, error)` +- New function `*GalleriesClientCreateOrUpdatePoller.Poll(context.Context) (*http.Response, error)` +- New function `*DedicatedHostsClientCreateOrUpdatePoller.FinalResponse(context.Context) (DedicatedHostsClientCreateOrUpdateResponse, error)` +- New function `*CloudServiceRolesClientListPager.PageResponse() CloudServiceRolesClientListResponse` +- New function `VirtualMachineRunCommandsClientCreateOrUpdatePollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachineRunCommandsClientCreateOrUpdateResponse, error)` +- New function `LogAnalyticsClientExportThrottledRequestsPollerResponse.PollUntilDone(context.Context, time.Duration) (LogAnalyticsClientExportThrottledRequestsResponse, error)` +- New function `*VirtualMachinesClientListPager.PageResponse() VirtualMachinesClientListResponse` +- New function `*VirtualMachineScaleSetVMsClientRunCommandPoller.Done() bool` +- New function `*CapacityReservationsClientUpdatePoller.ResumeToken() (string, error)` +- New function `*VirtualMachineScaleSetVMsClientReimagePoller.Poll(context.Context) (*http.Response, error)` +- New function `*GalleryApplicationVersionsClientDeletePoller.Poll(context.Context) (*http.Response, error)` +- New function `*GalleriesClientListByResourceGroupPager.Err() error` +- New function `RestorePointsClientDeletePollerResponse.PollUntilDone(context.Context, time.Duration) (RestorePointsClientDeleteResponse, error)` +- New function `*VirtualMachinesClientListByLocationPager.PageResponse() VirtualMachinesClientListByLocationResponse` +- New function `*SharedGalleriesClientListPager.Err() error` +- New function `*VirtualMachinesClientPowerOffPollerResponse.Resume(context.Context, *VirtualMachinesClient, string) error` +- New function `VirtualMachineScaleSetsClientSetOrchestrationServiceStatePollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachineScaleSetsClientSetOrchestrationServiceStateResponse, error)` +- New function `*VirtualMachinesClientAssessPatchesPoller.FinalResponse(context.Context) (VirtualMachinesClientAssessPatchesResponse, error)` +- New function `*SnapshotsClientGrantAccessPoller.Done() bool` +- New function `*SSHPublicKeysClientListBySubscriptionPager.PageResponse() SSHPublicKeysClientListBySubscriptionResponse` +- New function `DiskAccessesClientCreateOrUpdatePollerResponse.PollUntilDone(context.Context, time.Duration) (DiskAccessesClientCreateOrUpdateResponse, error)` +- New function `*VirtualMachineScaleSetVMRunCommandsClientDeletePoller.FinalResponse(context.Context) (VirtualMachineScaleSetVMRunCommandsClientDeleteResponse, error)` +- New function `VirtualMachineScaleSetVMsClientDeallocatePollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachineScaleSetVMsClientDeallocateResponse, error)` +- New function `*SnapshotsClientCreateOrUpdatePoller.FinalResponse(context.Context) (SnapshotsClientCreateOrUpdateResponse, error)` +- New function `*DiskAccessesClientCreateOrUpdatePoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachinesClientReapplyPoller.ResumeToken() (string, error)` +- New function `*DisksClientRevokeAccessPoller.Done() bool` +- New function `*VirtualMachineRunCommandsClientCreateOrUpdatePoller.ResumeToken() (string, error)` +- New function `*VirtualMachinesClientUpdatePoller.ResumeToken() (string, error)` +- New function `*DiskAccessesClientUpdateAPrivateEndpointConnectionPoller.Poll(context.Context) (*http.Response, error)` +- New function `*SnapshotsClientCreateOrUpdatePoller.ResumeToken() (string, error)` +- New function `*GalleryApplicationsClientDeletePollerResponse.Resume(context.Context, *GalleryApplicationsClient, string) error` +- New function `*VirtualMachinesClientConvertToManagedDisksPoller.Poll(context.Context) (*http.Response, error)` +- New function `*CloudServicesClientRebuildPoller.ResumeToken() (string, error)` +- New function `*SnapshotsClientDeletePoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachineScaleSetExtensionsClientCreateOrUpdatePoller.Done() bool` +- New function `*VirtualMachineRunCommandsClientUpdatePoller.FinalResponse(context.Context) (VirtualMachineRunCommandsClientUpdateResponse, error)` +- New function `*DiskAccessesClientListPager.Err() error` +- New function `*VirtualMachineScaleSetExtensionsClientCreateOrUpdatePoller.FinalResponse(context.Context) (VirtualMachineScaleSetExtensionsClientCreateOrUpdateResponse, error)` +- New function `*VirtualMachineRunCommandsClientListByVirtualMachinePager.PageResponse() VirtualMachineRunCommandsClientListByVirtualMachineResponse` +- New function `*VirtualMachinesClientCreateOrUpdatePollerResponse.Resume(context.Context, *VirtualMachinesClient, string) error` +- New function `VirtualMachineScaleSetVMRunCommandsClientUpdatePollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachineScaleSetVMRunCommandsClientUpdateResponse, error)` +- New function `DedicatedHostsClientCreateOrUpdatePollerResponse.PollUntilDone(context.Context, time.Duration) (DedicatedHostsClientCreateOrUpdateResponse, error)` +- New function `*VirtualMachineScaleSetVMsClientUpdatePoller.Done() bool` +- New function `*GalleryImageVersionsClientCreateOrUpdatePoller.Done() bool` +- New function `*DisksClientCreateOrUpdatePoller.ResumeToken() (string, error)` +- New function `*VirtualMachineScaleSetExtensionsClientUpdatePoller.ResumeToken() (string, error)` +- New function `CloudServicesClientDeletePollerResponse.PollUntilDone(context.Context, time.Duration) (CloudServicesClientDeleteResponse, error)` +- New function `*VirtualMachineScaleSetsClientStartPoller.Poll(context.Context) (*http.Response, error)` +- New function `CloudServiceRoleInstancesClientDeletePollerResponse.PollUntilDone(context.Context, time.Duration) (CloudServiceRoleInstancesClientDeleteResponse, error)` +- New function `*VirtualMachineScaleSetsClientSetOrchestrationServiceStatePoller.Done() bool` +- New function `*ImagesClientUpdatePoller.Done() bool` +- New function `*VirtualMachinesClientListPager.Err() error` +- New function `*GalleryApplicationsClientCreateOrUpdatePollerResponse.Resume(context.Context, *GalleryApplicationsClient, string) error` +- New function `*CloudServicesClientRebuildPollerResponse.Resume(context.Context, *CloudServicesClient, string) error` +- New function `*DisksClientGrantAccessPoller.ResumeToken() (string, error)` +- New function `*VirtualMachineRunCommandsClientDeletePoller.FinalResponse(context.Context) (VirtualMachineRunCommandsClientDeleteResponse, error)` +- New function `*SnapshotsClientGrantAccessPoller.ResumeToken() (string, error)` +- New function `*CapacityReservationsClientListByCapacityReservationGroupPager.Err() error` +- New function `*VirtualMachineScaleSetVMsClientRestartPoller.FinalResponse(context.Context) (VirtualMachineScaleSetVMsClientRestartResponse, error)` +- New function `*CloudServiceRoleInstancesClientRestartPollerResponse.Resume(context.Context, *CloudServiceRoleInstancesClient, string) error` +- New function `*VirtualMachineScaleSetsClientRestartPoller.FinalResponse(context.Context) (VirtualMachineScaleSetsClientRestartResponse, error)` +- New function `*DiskAccessesClientDeleteAPrivateEndpointConnectionPoller.Poll(context.Context) (*http.Response, error)` +- New function `*DiskAccessesClientCreateOrUpdatePoller.Done() bool` +- New function `*GalleryImagesClientDeletePoller.Poll(context.Context) (*http.Response, error)` +- New function `*DiskAccessesClientDeletePoller.Done() bool` +- New function `*VirtualMachineScaleSetVMRunCommandsClientUpdatePoller.Done() bool` +- New function `CloudServiceRoleInstancesClientRestartPollerResponse.PollUntilDone(context.Context, time.Duration) (CloudServiceRoleInstancesClientRestartResponse, error)` +- New function `*VirtualMachineScaleSetsClientPerformMaintenancePoller.ResumeToken() (string, error)` +- New function `SnapshotsClientGrantAccessPollerResponse.PollUntilDone(context.Context, time.Duration) (SnapshotsClientGrantAccessResponse, error)` +- New function `*VirtualMachineScaleSetsClientStartPoller.FinalResponse(context.Context) (VirtualMachineScaleSetsClientStartResponse, error)` +- New function `*ImagesClientDeletePoller.Done() bool` +- New function `*GalleryApplicationsClientDeletePoller.FinalResponse(context.Context) (GalleryApplicationsClientDeleteResponse, error)` +- New function `*VirtualMachinesClientPerformMaintenancePoller.Done() bool` +- New function `*CapacityReservationGroupsClientListBySubscriptionPager.PageResponse() CapacityReservationGroupsClientListBySubscriptionResponse` +- New function `*RestorePointCollectionsClientListAllPager.Err() error` +- New function `*VirtualMachineExtensionsClientUpdatePollerResponse.Resume(context.Context, *VirtualMachineExtensionsClient, string) error` +- New function `*VirtualMachinesClientRunCommandPoller.Poll(context.Context) (*http.Response, error)` +- New function `*CapacityReservationsClientDeletePoller.Done() bool` +- New function `*DedicatedHostGroupsClientListByResourceGroupPager.NextPage(context.Context) bool` +- New function `*GalleryImagesClientUpdatePoller.FinalResponse(context.Context) (GalleryImagesClientUpdateResponse, error)` +- New function `*CloudServicesClientListPager.NextPage(context.Context) bool` +- New function `*DiskAccessesClientDeletePoller.ResumeToken() (string, error)` +- New function `*RestorePointsClientCreatePoller.Done() bool` +- New function `*DiskEncryptionSetsClientCreateOrUpdatePoller.Done() bool` +- New function `*VirtualMachineScaleSetVMsClientPerformMaintenancePollerResponse.Resume(context.Context, *VirtualMachineScaleSetVMsClient, string) error` +- New function `*VirtualMachineScaleSetVMRunCommandsClientUpdatePoller.ResumeToken() (string, error)` +- New function `*ProximityPlacementGroupsClientListByResourceGroupPager.NextPage(context.Context) bool` +- New function `*CloudServicesUpdateDomainClientWalkUpdateDomainPoller.FinalResponse(context.Context) (CloudServicesUpdateDomainClientWalkUpdateDomainResponse, error)` +- New function `VirtualMachineScaleSetsClientPerformMaintenancePollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachineScaleSetsClientPerformMaintenanceResponse, error)` +- New function `*VirtualMachineScaleSetVMsClientRestartPollerResponse.Resume(context.Context, *VirtualMachineScaleSetVMsClient, string) error` +- New function `*DedicatedHostsClientUpdatePoller.ResumeToken() (string, error)` +- New function `*DiskEncryptionSetsClientDeletePoller.Done() bool` +- New function `*VirtualMachineScaleSetVMsClientReimagePoller.Done() bool` +- New function `VirtualMachineScaleSetsClientRedeployPollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachineScaleSetsClientRedeployResponse, error)` +- New function `DiskAccessesClientUpdatePollerResponse.PollUntilDone(context.Context, time.Duration) (DiskAccessesClientUpdateResponse, error)` +- New function `*DiskAccessesClientDeletePoller.Poll(context.Context) (*http.Response, error)` +- New function `*DiskRestorePointClientRevokeAccessPoller.FinalResponse(context.Context) (DiskRestorePointClientRevokeAccessResponse, error)` +- New function `*VirtualMachineScaleSetVMExtensionsClientCreateOrUpdatePoller.ResumeToken() (string, error)` +- New function `*VirtualMachineScaleSetsClientDeletePoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachineScaleSetVMsClientDeallocatePoller.Poll(context.Context) (*http.Response, error)` +- New function `*GalleryImagesClientUpdatePoller.Poll(context.Context) (*http.Response, error)` +- New function `*SSHPublicKeysClientListBySubscriptionPager.Err() error` +- New function `GalleryApplicationsClientCreateOrUpdatePollerResponse.PollUntilDone(context.Context, time.Duration) (GalleryApplicationsClientCreateOrUpdateResponse, error)` +- New function `*ImagesClientCreateOrUpdatePoller.ResumeToken() (string, error)` +- New function `*VirtualMachinesClientRestartPoller.ResumeToken() (string, error)` +- New function `*DisksClientRevokeAccessPollerResponse.Resume(context.Context, *DisksClient, string) error` +- New function `*VirtualMachinesClientDeletePollerResponse.Resume(context.Context, *VirtualMachinesClient, string) error` +- New function `VirtualMachineScaleSetsClientPowerOffPollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachineScaleSetsClientPowerOffResponse, error)` +- New function `*VirtualMachineScaleSetVMsClientReimageAllPollerResponse.Resume(context.Context, *VirtualMachineScaleSetVMsClient, string) error` +- New function `*VirtualMachineScaleSetVMsClientPowerOffPollerResponse.Resume(context.Context, *VirtualMachineScaleSetVMsClient, string) error` +- New function `*RestorePointCollectionsClientListPager.Err() error` +- New function `*SnapshotsClientDeletePoller.FinalResponse(context.Context) (SnapshotsClientDeleteResponse, error)` +- New function `*LogAnalyticsClientExportRequestRateByIntervalPoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachinesClientReimagePoller.Done() bool` +- New function `GalleryApplicationVersionsClientDeletePollerResponse.PollUntilDone(context.Context, time.Duration) (GalleryApplicationVersionsClientDeleteResponse, error)` +- New function `*GalleryImagesClientDeletePoller.ResumeToken() (string, error)` +- New function `*GalleryImageVersionsClientListByGalleryImagePager.NextPage(context.Context) bool` +- New function `*VirtualMachineScaleSetsClientRedeployPoller.FinalResponse(context.Context) (VirtualMachineScaleSetsClientRedeployResponse, error)` +- New function `*VirtualMachineScaleSetsClientDeleteInstancesPoller.ResumeToken() (string, error)` +- New function `*GalleryImageVersionsClientDeletePoller.FinalResponse(context.Context) (GalleryImageVersionsClientDeleteResponse, error)` +- New function `*VirtualMachineScaleSetExtensionsClientDeletePoller.FinalResponse(context.Context) (VirtualMachineScaleSetExtensionsClientDeleteResponse, error)` +- New function `*VirtualMachineScaleSetVMsClientStartPoller.Done() bool` +- New function `*VirtualMachineScaleSetsClientDeleteInstancesPoller.Poll(context.Context) (*http.Response, error)` +- New function `*GalleryImagesClientListByGalleryPager.PageResponse() GalleryImagesClientListByGalleryResponse` +- New function `*VirtualMachinesClientStartPoller.Done() bool` +- New function `VirtualMachinesClientRestartPollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachinesClientRestartResponse, error)` +- New function `*SnapshotsClientGrantAccessPoller.FinalResponse(context.Context) (SnapshotsClientGrantAccessResponse, error)` +- New function `*VirtualMachineScaleSetVMRunCommandsClientListPager.Err() error` +- New function `*VirtualMachinesClientInstallPatchesPoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachineScaleSetsClientRedeployPoller.Poll(context.Context) (*http.Response, error)` +- New function `*CloudServicesClientDeletePoller.FinalResponse(context.Context) (CloudServicesClientDeleteResponse, error)` +- New function `*VirtualMachineScaleSetVMExtensionsClientUpdatePoller.Poll(context.Context) (*http.Response, error)` +- New function `*CloudServicesClientRebuildPoller.Done() bool` +- New function `*GalleryApplicationsClientUpdatePoller.ResumeToken() (string, error)` +- New function `*CloudServicesClientRebuildPoller.Poll(context.Context) (*http.Response, error)` +- New function `*DiskAccessesClientDeleteAPrivateEndpointConnectionPoller.ResumeToken() (string, error)` +- New function `*VirtualMachineScaleSetVMsClientStartPoller.Poll(context.Context) (*http.Response, error)` +- New function `*DiskAccessesClientDeletePollerResponse.Resume(context.Context, *DiskAccessesClient, string) error` +- New function `*GallerySharingProfileClientUpdatePoller.FinalResponse(context.Context) (GallerySharingProfileClientUpdateResponse, error)` +- New function `*GalleryImagesClientListByGalleryPager.Err() error` +- New function `*VirtualMachineScaleSetVMExtensionsClientDeletePoller.FinalResponse(context.Context) (VirtualMachineScaleSetVMExtensionsClientDeleteResponse, error)` +- New function `DiskAccessesClientDeleteAPrivateEndpointConnectionPollerResponse.PollUntilDone(context.Context, time.Duration) (DiskAccessesClientDeleteAPrivateEndpointConnectionResponse, error)` +- New function `*VirtualMachineScaleSetsClientCreateOrUpdatePoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachineScaleSetVMsClientRunCommandPoller.FinalResponse(context.Context) (VirtualMachineScaleSetVMsClientRunCommandResponse, error)` +- New function `*VirtualMachineScaleSetVMRunCommandsClientUpdatePollerResponse.Resume(context.Context, *VirtualMachineScaleSetVMRunCommandsClient, string) error` +- New function `*VirtualMachineScaleSetsClientDeletePoller.FinalResponse(context.Context) (VirtualMachineScaleSetsClientDeleteResponse, error)` +- New function `*GalleryImageVersionPublishingProfile.UnmarshalJSON([]byte) error` +- New function `*VirtualMachinesClientDeallocatePollerResponse.Resume(context.Context, *VirtualMachinesClient, string) error` +- New function `*GalleryImageVersionsClientDeletePollerResponse.Resume(context.Context, *GalleryImageVersionsClient, string) error` +- New function `*DedicatedHostsClientCreateOrUpdatePoller.ResumeToken() (string, error)` +- New function `*CloudServicesClientReimagePollerResponse.Resume(context.Context, *CloudServicesClient, string) error` +- New function `*GalleryImagesClientCreateOrUpdatePoller.ResumeToken() (string, error)` +- New function `*VirtualMachineScaleSetsClientUpdateInstancesPoller.Poll(context.Context) (*http.Response, error)` +- New function `*SnapshotsClientListByResourceGroupPager.NextPage(context.Context) bool` +- New function `*VirtualMachinesClientConvertToManagedDisksPoller.FinalResponse(context.Context) (VirtualMachinesClientConvertToManagedDisksResponse, error)` +- New function `*VirtualMachinesClientReapplyPoller.Poll(context.Context) (*http.Response, error)` +- New function `*DiskEncryptionSetsClientListByResourceGroupPager.NextPage(context.Context) bool` +- New function `*DiskRestorePointClientGrantAccessPoller.ResumeToken() (string, error)` +- New function `*VirtualMachinesClientReimagePoller.ResumeToken() (string, error)` +- New function `*DiskEncryptionSetsClientUpdatePoller.FinalResponse(context.Context) (DiskEncryptionSetsClientUpdateResponse, error)` +- New function `*VirtualMachineRunCommandsClientListPager.PageResponse() VirtualMachineRunCommandsClientListResponse` +- New function `*SSHPublicKeysClientListByResourceGroupPager.PageResponse() SSHPublicKeysClientListByResourceGroupResponse` +- New function `*GalleryApplicationVersionsClientDeletePoller.ResumeToken() (string, error)` +- New function `*DedicatedHostsClientDeletePoller.ResumeToken() (string, error)` +- New function `*VirtualMachineScaleSetsClientCreateOrUpdatePoller.FinalResponse(context.Context) (VirtualMachineScaleSetsClientCreateOrUpdateResponse, error)` +- New function `DiskAccessesClientDeletePollerResponse.PollUntilDone(context.Context, time.Duration) (DiskAccessesClientDeleteResponse, error)` +- New function `*VirtualMachineExtensionsClientCreateOrUpdatePoller.FinalResponse(context.Context) (VirtualMachineExtensionsClientCreateOrUpdateResponse, error)` +- New function `SnapshotsClientDeletePollerResponse.PollUntilDone(context.Context, time.Duration) (SnapshotsClientDeleteResponse, error)` +- New function `*ImagesClientCreateOrUpdatePoller.FinalResponse(context.Context) (ImagesClientCreateOrUpdateResponse, error)` +- New function `*LogAnalyticsClientExportThrottledRequestsPollerResponse.Resume(context.Context, *LogAnalyticsClient, string) error` +- New function `*GalleryImageVersionsClientCreateOrUpdatePoller.ResumeToken() (string, error)` +- New function `*LogAnalyticsClientExportThrottledRequestsPoller.Poll(context.Context) (*http.Response, error)` +- New function `*CloudServicesClientDeleteInstancesPollerResponse.Resume(context.Context, *CloudServicesClient, string) error` +- New function `*CapacityReservationGroupsClientListBySubscriptionPager.Err() error` +- New function `*GalleryApplicationVersionsClientDeletePoller.FinalResponse(context.Context) (GalleryApplicationVersionsClientDeleteResponse, error)` +- New function `*LogAnalyticsClientExportRequestRateByIntervalPoller.ResumeToken() (string, error)` +- New function `VirtualMachineExtensionsClientUpdatePollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachineExtensionsClientUpdateResponse, error)` +- New function `*VirtualMachinesClientInstallPatchesPoller.ResumeToken() (string, error)` +- New function `*CapacityReservationsClientUpdatePollerResponse.Resume(context.Context, *CapacityReservationsClient, string) error` +- New function `*GalleriesClientDeletePollerResponse.Resume(context.Context, *GalleriesClient, string) error` +- New function `*GalleryImageVersionsClientUpdatePoller.Done() bool` +- New function `*CloudServicesClientRestartPoller.Poll(context.Context) (*http.Response, error)` +- New function `*ImagesClientListByResourceGroupPager.PageResponse() ImagesClientListByResourceGroupResponse` +- New function `*VirtualMachineScaleSetsClientPowerOffPollerResponse.Resume(context.Context, *VirtualMachineScaleSetsClient, string) error` +- New function `*CloudServicesClientRebuildPoller.FinalResponse(context.Context) (CloudServicesClientRebuildResponse, error)` +- New function `*VirtualMachinesClientDeallocatePoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachineScaleSetVMExtensionsClientDeletePoller.Done() bool` +- New function `*VirtualMachineScaleSetVMsClientDeletePoller.Done() bool` +- New function `*GalleryImageVersionsClientDeletePoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachineExtensionsClientDeletePoller.Done() bool` +- New function `*CloudServicesClientListAllPager.NextPage(context.Context) bool` +- New function `*SnapshotsClientListPager.NextPage(context.Context) bool` +- New function `RestorePointsClientCreatePollerResponse.PollUntilDone(context.Context, time.Duration) (RestorePointsClientCreateResponse, error)` +- New function `*DiskAccessesClientListByResourceGroupPager.NextPage(context.Context) bool` +- New function `*DedicatedHostGroupsClientListBySubscriptionPager.Err() error` +- New function `*DiskEncryptionSetsClientListAssociatedResourcesPager.Err() error` +- New function `*GalleryApplicationsClientUpdatePollerResponse.Resume(context.Context, *GalleryApplicationsClient, string) error` +- New function `*DedicatedHostsClientCreateOrUpdatePoller.Poll(context.Context) (*http.Response, error)` +- New function `*AvailabilitySetsClientListPager.NextPage(context.Context) bool` +- New function `*VirtualMachinesClientPowerOffPoller.Poll(context.Context) (*http.Response, error)` +- New function `*CloudServiceRoleInstancesClientRestartPoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachineScaleSetsClientListSKUsPager.Err() error` +- New function `*DiskAccessesClientUpdateAPrivateEndpointConnectionPoller.Done() bool` +- New function `*GalleryApplicationVersionsClientCreateOrUpdatePoller.Done() bool` +- New function `VirtualMachineScaleSetExtensionsClientCreateOrUpdatePollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachineScaleSetExtensionsClientCreateOrUpdateResponse, error)` +- New function `CapacityReservationsClientUpdatePollerResponse.PollUntilDone(context.Context, time.Duration) (CapacityReservationsClientUpdateResponse, error)` +- New function `*VirtualMachineScaleSetVMExtensionsClientDeletePollerResponse.Resume(context.Context, *VirtualMachineScaleSetVMExtensionsClient, string) error` +- New function `*DiskAccessesClientCreateOrUpdatePoller.ResumeToken() (string, error)` +- New function `*VirtualMachineScaleSetVMsClientDeletePoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachinesClientPowerOffPoller.ResumeToken() (string, error)` +- New function `*VirtualMachinesClientAssessPatchesPollerResponse.Resume(context.Context, *VirtualMachinesClient, string) error` +- New function `*GalleryImagesClientCreateOrUpdatePollerResponse.Resume(context.Context, *GalleryImagesClient, string) error` +- New function `ImagesClientDeletePollerResponse.PollUntilDone(context.Context, time.Duration) (ImagesClientDeleteResponse, error)` +- New function `*VirtualMachineScaleSetVMRunCommandsClientCreateOrUpdatePoller.Done() bool` +- New function `*CapacityReservationsClientDeletePollerResponse.Resume(context.Context, *CapacityReservationsClient, string) error` +- New function `*VirtualMachineScaleSetsClientPowerOffPoller.ResumeToken() (string, error)` +- New function `*VirtualMachineScaleSetExtensionsClientDeletePoller.ResumeToken() (string, error)` +- New function `*RestorePointCollectionsClientListPager.PageResponse() RestorePointCollectionsClientListResponse` +- New function `*VirtualMachineScaleSetVMRunCommandsClientDeletePoller.Done() bool` +- New function `*SSHPublicKeysClientListByResourceGroupPager.NextPage(context.Context) bool` +- New function `*SnapshotsClientRevokeAccessPoller.Poll(context.Context) (*http.Response, error)` +- New function `*GalleryApplicationVersionsClientCreateOrUpdatePollerResponse.Resume(context.Context, *GalleryApplicationVersionsClient, string) error` +- New function `*GalleryImageVersionsClientUpdatePoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachinesClientRedeployPoller.ResumeToken() (string, error)` +- New function `LogAnalyticsClientExportRequestRateByIntervalPollerResponse.PollUntilDone(context.Context, time.Duration) (LogAnalyticsClientExportRequestRateByIntervalResponse, error)` +- New function `*VirtualMachineRunCommandsClientCreateOrUpdatePoller.Done() bool` +- New function `*VirtualMachinesClientRunCommandPoller.ResumeToken() (string, error)` +- New function `*SnapshotsClientCreateOrUpdatePollerResponse.Resume(context.Context, *SnapshotsClient, string) error` +- New function `CapacityReservationsClientCreateOrUpdatePollerResponse.PollUntilDone(context.Context, time.Duration) (CapacityReservationsClientCreateOrUpdateResponse, error)` +- New function `*DisksClientRevokeAccessPoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachineScaleSetVMsClientReimageAllPoller.FinalResponse(context.Context) (VirtualMachineScaleSetVMsClientReimageAllResponse, error)` +- New function `*GalleryApplicationsClientListByGalleryPager.Err() error` +- New function `*VirtualMachinesClientConvertToManagedDisksPoller.ResumeToken() (string, error)` +- New function `DisksClientGrantAccessPollerResponse.PollUntilDone(context.Context, time.Duration) (DisksClientGrantAccessResponse, error)` +- New function `*LogAnalyticsClientExportThrottledRequestsPoller.Done() bool` +- New function `*DedicatedHostsClientListByHostGroupPager.NextPage(context.Context) bool` +- New function `*DedicatedHostsClientCreateOrUpdatePollerResponse.Resume(context.Context, *DedicatedHostsClient, string) error` +- New function `*VirtualMachineScaleSetVMRunCommandsClientListPager.NextPage(context.Context) bool` +- New function `*LogAnalyticsClientExportRequestRateByIntervalPoller.Done() bool` +- New function `*VirtualMachineScaleSetExtensionsClientUpdatePoller.Done() bool` +- New function `*CloudServicesClientStartPoller.Done() bool` +- New function `*GalleriesClientCreateOrUpdatePoller.FinalResponse(context.Context) (GalleriesClientCreateOrUpdateResponse, error)` +- New function `*CloudServiceRoleInstancesClientReimagePoller.FinalResponse(context.Context) (CloudServiceRoleInstancesClientReimageResponse, error)` +- New function `*CloudServiceRoleInstancesClientRebuildPoller.Done() bool` +- New function `*VirtualMachineScaleSetsClientGetOSUpgradeHistoryPager.NextPage(context.Context) bool` +- New function `*DiskAccessesClientDeleteAPrivateEndpointConnectionPoller.Done() bool` +- New function `*SnapshotsClientDeletePoller.Done() bool` +- New function `*CloudServiceOperatingSystemsClientListOSFamiliesPager.Err() error` +- New function `*VirtualMachineRunCommandsClientUpdatePollerResponse.Resume(context.Context, *VirtualMachineRunCommandsClient, string) error` +- New function `*VirtualMachineScaleSetVMExtensionsClientUpdatePoller.Done() bool` +- New function `VirtualMachineScaleSetsClientDeallocatePollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachineScaleSetsClientDeallocateResponse, error)` +- New function `*DiskEncryptionSetsClientDeletePoller.FinalResponse(context.Context) (DiskEncryptionSetsClientDeleteResponse, error)` +- New function `*SnapshotsClientRevokeAccessPoller.ResumeToken() (string, error)` +- New function `CloudServicesClientCreateOrUpdatePollerResponse.PollUntilDone(context.Context, time.Duration) (CloudServicesClientCreateOrUpdateResponse, error)` +- New function `*VirtualMachineExtensionsClientCreateOrUpdatePoller.ResumeToken() (string, error)` +- New function `*GalleryApplicationVersionsClientCreateOrUpdatePoller.FinalResponse(context.Context) (GalleryApplicationVersionsClientCreateOrUpdateResponse, error)` +- New function `*VirtualMachineScaleSetsClientReimageAllPoller.FinalResponse(context.Context) (VirtualMachineScaleSetsClientReimageAllResponse, error)` +- New function `*VirtualMachineScaleSetVMsClientRedeployPoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachineScaleSetVMsClientPowerOffPoller.Done() bool` +- New function `VirtualMachineScaleSetVMExtensionsClientUpdatePollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachineScaleSetVMExtensionsClientUpdateResponse, error)` +- New function `*DedicatedHostsClientUpdatePoller.FinalResponse(context.Context) (DedicatedHostsClientUpdateResponse, error)` +- New function `*VirtualMachineScaleSetsClientListByLocationPager.NextPage(context.Context) bool` +- New function `*ImagesClientListPager.PageResponse() ImagesClientListResponse` +- New function `*DedicatedHostsClientListByHostGroupPager.Err() error` +- New function `*VirtualMachinesClientDeallocatePoller.ResumeToken() (string, error)` +- New function `*CloudServicesClientUpdatePollerResponse.Resume(context.Context, *CloudServicesClient, string) error` +- New function `*RestorePointCollectionsClientDeletePoller.Done() bool` +- New function `*DisksClientGrantAccessPoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachineScaleSetVMRunCommandsClientCreateOrUpdatePoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachineScaleSetsClientStartPoller.ResumeToken() (string, error)` +- New function `*ImagesClientUpdatePoller.Poll(context.Context) (*http.Response, error)` +- New function `SnapshotsClientRevokeAccessPollerResponse.PollUntilDone(context.Context, time.Duration) (SnapshotsClientRevokeAccessResponse, error)` +- New function `*VirtualMachineScaleSetVMsClientReimageAllPoller.Poll(context.Context) (*http.Response, error)` +- New function `*DiskAccessesClientUpdateAPrivateEndpointConnectionPoller.FinalResponse(context.Context) (DiskAccessesClientUpdateAPrivateEndpointConnectionResponse, error)` +- New function `*VirtualMachinesClientReimagePoller.Poll(context.Context) (*http.Response, error)` +- New function `VirtualMachineScaleSetVMsClientRedeployPollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachineScaleSetVMsClientRedeployResponse, error)` +- New function `RestorePointCollectionsClientDeletePollerResponse.PollUntilDone(context.Context, time.Duration) (RestorePointCollectionsClientDeleteResponse, error)` +- New function `VirtualMachineScaleSetExtensionsClientDeletePollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachineScaleSetExtensionsClientDeleteResponse, error)` +- New function `*VirtualMachineScaleSetsClientReimageAllPoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachineScaleSetsClientRestartPoller.Poll(context.Context) (*http.Response, error)` +- New function `DisksClientRevokeAccessPollerResponse.PollUntilDone(context.Context, time.Duration) (DisksClientRevokeAccessResponse, error)` +- New function `*VirtualMachineScaleSetsClientDeallocatePoller.Done() bool` +- New function `*VirtualMachineScaleSetsClientDeletePollerResponse.Resume(context.Context, *VirtualMachineScaleSetsClient, string) error` +- New function `*DisksClientCreateOrUpdatePoller.Poll(context.Context) (*http.Response, error)` +- New function `*SnapshotsClientCreateOrUpdatePoller.Poll(context.Context) (*http.Response, error)` +- New function `*GalleryApplicationVersionsClientListByGalleryApplicationPager.Err() error` +- New function `*CapacityReservationGroupsClientListBySubscriptionPager.NextPage(context.Context) bool` +- New function `*VirtualMachineScaleSetsClientReimagePoller.ResumeToken() (string, error)` +- New function `*CloudServicesClientCreateOrUpdatePoller.Done() bool` +- New function `*AvailabilitySetsClientListBySubscriptionPager.NextPage(context.Context) bool` +- New function `*VirtualMachineScaleSetVMsClientDeallocatePoller.ResumeToken() (string, error)` +- New function `*VirtualMachinesClientDeletePoller.ResumeToken() (string, error)` +- New function `*VirtualMachineScaleSetVMsClientRedeployPoller.Done() bool` +- New function `ProximityPlacementGroupUpdate.MarshalJSON() ([]byte, error)` +- New function `*SnapshotsClientGrantAccessPoller.Poll(context.Context) (*http.Response, error)` +- New function `*GallerySharingProfileClientUpdatePoller.Done() bool` +- New function `*VirtualMachinesClientReapplyPoller.Done() bool` +- New function `VirtualMachineScaleSetVMsClientRestartPollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachineScaleSetVMsClientRestartResponse, error)` +- New function `*VirtualMachineScaleSetVMExtensionsClientCreateOrUpdatePoller.Done() bool` +- New function `*VirtualMachineScaleSetVMsClientReimagePoller.ResumeToken() (string, error)` +- New function `VirtualMachineScaleSetVMsClientReimagePollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachineScaleSetVMsClientReimageResponse, error)` +- New function `*CloudServicesClientRestartPoller.FinalResponse(context.Context) (CloudServicesClientRestartResponse, error)` +- New function `*CloudServicesClientDeletePollerResponse.Resume(context.Context, *CloudServicesClient, string) error` +- New function `*VirtualMachineScaleSetVMsClientDeallocatePollerResponse.Resume(context.Context, *VirtualMachineScaleSetVMsClient, string) error` +- New function `*DiskEncryptionSetsClientListAssociatedResourcesPager.NextPage(context.Context) bool` +- New function `*VirtualMachineScaleSetVMsClientRestartPoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachinesClientDeallocatePoller.Done() bool` +- New function `*GalleryApplicationsClientCreateOrUpdatePoller.Done() bool` +- New function `*VirtualMachineScaleSetVMExtensionsClientUpdatePoller.FinalResponse(context.Context) (VirtualMachineScaleSetVMExtensionsClientUpdateResponse, error)` +- New function `*SnapshotsClientListByResourceGroupPager.PageResponse() SnapshotsClientListByResourceGroupResponse` +- New function `*SnapshotsClientListPager.Err() error` +- New function `*VirtualMachineScaleSetVMsClientRestartPoller.ResumeToken() (string, error)` +- New function `*VirtualMachineRunCommandsClientUpdatePoller.ResumeToken() (string, error)` +- New function `*DiskAccessesClientUpdatePoller.FinalResponse(context.Context) (DiskAccessesClientUpdateResponse, error)` +- New function `*CloudServiceRoleInstancesClientListPager.PageResponse() CloudServiceRoleInstancesClientListResponse` +- New function `*VirtualMachineScaleSetVMsClientDeallocatePoller.Done() bool` +- New function `*SnapshotsClientUpdatePoller.Done() bool` +- New function `*GalleryApplicationsClientUpdatePoller.FinalResponse(context.Context) (GalleryApplicationsClientUpdateResponse, error)` +- New function `*DiskEncryptionSetsClientCreateOrUpdatePoller.FinalResponse(context.Context) (DiskEncryptionSetsClientCreateOrUpdateResponse, error)` +- New function `*SnapshotsClientListByResourceGroupPager.Err() error` +- New function `*VirtualMachineScaleSetRollingUpgradesClientStartExtensionUpgradePoller.Done() bool` +- New function `*CloudServicesClientDeletePoller.ResumeToken() (string, error)` +- New function `*GallerySharingProfileClientUpdatePoller.ResumeToken() (string, error)` +- New function `*DedicatedHostGroupsClientListBySubscriptionPager.NextPage(context.Context) bool` +- New function `*CloudServicesClientDeletePoller.Poll(context.Context) (*http.Response, error)` +- New function `*GalleryApplicationVersionsClientCreateOrUpdatePoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachineScaleSetsClientDeletePoller.ResumeToken() (string, error)` +- New function `*VirtualMachinesClientPerformMaintenancePoller.FinalResponse(context.Context) (VirtualMachinesClientPerformMaintenanceResponse, error)` +- New function `*ProximityPlacementGroupsClientListByResourceGroupPager.PageResponse() ProximityPlacementGroupsClientListByResourceGroupResponse` +- New function `*GalleriesClientUpdatePollerResponse.Resume(context.Context, *GalleriesClient, string) error` +- New function `*VirtualMachineScaleSetExtensionsClientCreateOrUpdatePoller.ResumeToken() (string, error)` +- New function `VirtualMachineScaleSetsClientUpdatePollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachineScaleSetsClientUpdateResponse, error)` +- New function `*DisksClientUpdatePollerResponse.Resume(context.Context, *DisksClient, string) error` +- New function `VirtualMachineScaleSetsClientDeleteInstancesPollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachineScaleSetsClientDeleteInstancesResponse, error)` +- New function `VirtualMachineScaleSetsClientReimagePollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachineScaleSetsClientReimageResponse, error)` +- New function `*ImagesClientDeletePoller.ResumeToken() (string, error)` +- New function `*VirtualMachinesClientRedeployPoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachineScaleSetVMRunCommandsClientDeletePoller.ResumeToken() (string, error)` +- New function `*VirtualMachineScaleSetVMsClientStartPoller.ResumeToken() (string, error)` +- New function `*VirtualMachineScaleSetExtensionsClientDeletePoller.Done() bool` +- New function `*SnapshotsClientUpdatePoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachineScaleSetsClientRestartPoller.ResumeToken() (string, error)` +- New function `*DiskAccessesClientListPager.NextPage(context.Context) bool` +- New function `*GalleriesClientListPager.Err() error` +- New function `*CapacityReservationsClientDeletePoller.FinalResponse(context.Context) (CapacityReservationsClientDeleteResponse, error)` +- New function `*VirtualMachineScaleSetRollingUpgradesClientStartOSUpgradePoller.ResumeToken() (string, error)` +- New function `*GalleryImageVersionsClientListByGalleryImagePager.PageResponse() GalleryImageVersionsClientListByGalleryImageResponse` +- New function `*VirtualMachinesClientRedeployPoller.Done() bool` +- New function `VirtualMachinesClientDeletePollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachinesClientDeleteResponse, error)` +- New function `DiskRestorePointClientRevokeAccessPollerResponse.PollUntilDone(context.Context, time.Duration) (DiskRestorePointClientRevokeAccessResponse, error)` +- New function `*VirtualMachineScaleSetVMRunCommandsClientDeletePoller.Poll(context.Context) (*http.Response, error)` +- New function `*CloudServicesClientCreateOrUpdatePoller.FinalResponse(context.Context) (CloudServicesClientCreateOrUpdateResponse, error)` +- New function `*DiskAccessesClientListPrivateEndpointConnectionsPager.PageResponse() DiskAccessesClientListPrivateEndpointConnectionsResponse` +- New function `*CapacityReservationsClientCreateOrUpdatePoller.ResumeToken() (string, error)` +- New function `*ImagesClientDeletePoller.FinalResponse(context.Context) (ImagesClientDeleteResponse, error)` +- New function `*CapacityReservationGroupsClientListByResourceGroupPager.Err() error` +- New function `*CloudServiceOperatingSystemsClientListOSVersionsPager.Err() error` +- New function `*GalleryApplicationsClientUpdatePoller.Poll(context.Context) (*http.Response, error)` +- New function `*CloudServicesClientReimagePoller.Done() bool` +- New function `*VirtualMachinesClientRestartPollerResponse.Resume(context.Context, *VirtualMachinesClient, string) error` +- New function `ImagesClientUpdatePollerResponse.PollUntilDone(context.Context, time.Duration) (ImagesClientUpdateResponse, error)` +- New function `ThrottledRequestsInput.MarshalJSON() ([]byte, error)` +- New function `*VirtualMachinesClientConvertToManagedDisksPollerResponse.Resume(context.Context, *VirtualMachinesClient, string) error` +- New function `*SharedGalleryImagesClientListPager.PageResponse() SharedGalleryImagesClientListResponse` +- New function `*VirtualMachineScaleSetRollingUpgradesClientCancelPoller.FinalResponse(context.Context) (VirtualMachineScaleSetRollingUpgradesClientCancelResponse, error)` +- New function `*VirtualMachineScaleSetsClientPowerOffPoller.FinalResponse(context.Context) (VirtualMachineScaleSetsClientPowerOffResponse, error)` +- New function `*VirtualMachineScaleSetsClientSetOrchestrationServiceStatePoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachineScaleSetsClientRedeployPoller.ResumeToken() (string, error)` +- New function `*DiskEncryptionSetsClientCreateOrUpdatePollerResponse.Resume(context.Context, *DiskEncryptionSetsClient, string) error` +- New function `*GalleryImageVersionsClientUpdatePoller.FinalResponse(context.Context) (GalleryImageVersionsClientUpdateResponse, error)` +- New function `*RestorePointCollectionsClientDeletePoller.FinalResponse(context.Context) (RestorePointCollectionsClientDeleteResponse, error)` +- New function `GalleryApplicationsClientUpdatePollerResponse.PollUntilDone(context.Context, time.Duration) (GalleryApplicationsClientUpdateResponse, error)` +- New function `*CapacityReservationsClientDeletePoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachineScaleSetVMsClientReimagePoller.FinalResponse(context.Context) (VirtualMachineScaleSetVMsClientReimageResponse, error)` +- New function `*CloudServicesClientUpdatePoller.Poll(context.Context) (*http.Response, error)` +- New function `*CloudServicesUpdateDomainClientListUpdateDomainsPager.NextPage(context.Context) bool` +- New function `*VirtualMachineScaleSetVMExtensionsClientCreateOrUpdatePoller.Poll(context.Context) (*http.Response, error)` +- New function `*DiskRestorePointClientListByRestorePointPager.PageResponse() DiskRestorePointClientListByRestorePointResponse` +- New function `*VirtualMachinesClientInstallPatchesPoller.Done() bool` +- New function `*VirtualMachinesClientRedeployPollerResponse.Resume(context.Context, *VirtualMachinesClient, string) error` +- New function `*VirtualMachineScaleSetsClientDeleteInstancesPollerResponse.Resume(context.Context, *VirtualMachineScaleSetsClient, string) error` +- New function `*CloudServiceRoleInstancesClientRestartPoller.Done() bool` +- New function `*VirtualMachineScaleSetsClientCreateOrUpdatePollerResponse.Resume(context.Context, *VirtualMachineScaleSetsClient, string) error` +- New function `*SharedGalleryImageVersionsClientListPager.NextPage(context.Context) bool` +- New function `*CloudServiceRoleInstancesClientRebuildPoller.FinalResponse(context.Context) (CloudServiceRoleInstancesClientRebuildResponse, error)` +- New function `*GalleryImageVersionsClientCreateOrUpdatePoller.FinalResponse(context.Context) (GalleryImageVersionsClientCreateOrUpdateResponse, error)` +- New function `*VirtualMachineScaleSetsClientSetOrchestrationServiceStatePoller.ResumeToken() (string, error)` +- New function `*ImagesClientDeletePoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachineScaleSetsClientListAllPager.NextPage(context.Context) bool` +- New function `*DiskEncryptionSetsClientCreateOrUpdatePoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachinesClientListPager.NextPage(context.Context) bool` +- New function `*GalleryImagesClientUpdatePoller.ResumeToken() (string, error)` +- New function `*VirtualMachinesClientStartPoller.ResumeToken() (string, error)` +- New function `*CloudServiceRoleInstancesClientRestartPoller.ResumeToken() (string, error)` +- New function `VirtualMachinesClientAssessPatchesPollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachinesClientAssessPatchesResponse, error)` +- New function `*RestorePointsClientCreatePollerResponse.Resume(context.Context, *RestorePointsClient, string) error` +- New function `CloudServicesUpdateDomainClientWalkUpdateDomainPollerResponse.PollUntilDone(context.Context, time.Duration) (CloudServicesUpdateDomainClientWalkUpdateDomainResponse, error)` +- New function `*SnapshotsClientDeletePollerResponse.Resume(context.Context, *SnapshotsClient, string) error` +- New function `*CapacityReservationsClientCreateOrUpdatePoller.Poll(context.Context) (*http.Response, error)` +- New function `*DedicatedHostsClientDeletePoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachineScaleSetVMsClientRunCommandPollerResponse.Resume(context.Context, *VirtualMachineScaleSetVMsClient, string) error` +- New function `*DisksClientListByResourceGroupPager.NextPage(context.Context) bool` +- New function `*DisksClientCreateOrUpdatePoller.FinalResponse(context.Context) (DisksClientCreateOrUpdateResponse, error)` +- New function `*CloudServiceRoleInstancesClientDeletePoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachineScaleSetVMsClientPowerOffPoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachineRunCommandsClientCreateOrUpdatePollerResponse.Resume(context.Context, *VirtualMachineRunCommandsClient, string) error` +- New function `*CloudServiceOperatingSystemsClientListOSFamiliesPager.PageResponse() CloudServiceOperatingSystemsClientListOSFamiliesResponse` +- New function `SnapshotsClientUpdatePollerResponse.PollUntilDone(context.Context, time.Duration) (SnapshotsClientUpdateResponse, error)` +- New function `*CloudServicesClientCreateOrUpdatePollerResponse.Resume(context.Context, *CloudServicesClient, string) error` +- New function `DiskEncryptionSetsClientDeletePollerResponse.PollUntilDone(context.Context, time.Duration) (DiskEncryptionSetsClientDeleteResponse, error)` +- New function `*VirtualMachineScaleSetExtensionsClientDeletePoller.Poll(context.Context) (*http.Response, error)` +- New function `GalleryImageVersionsClientCreateOrUpdatePollerResponse.PollUntilDone(context.Context, time.Duration) (GalleryImageVersionsClientCreateOrUpdateResponse, error)` +- New function `*VirtualMachineScaleSetVMsClientReimageAllPoller.ResumeToken() (string, error)` +- New function `*VirtualMachinesClientRunCommandPoller.Done() bool` +- New function `*RestorePointsClientDeletePollerResponse.Resume(context.Context, *RestorePointsClient, string) error` +- New function `*DisksClientDeletePoller.Done() bool` +- New function `*VirtualMachineScaleSetRollingUpgradesClientStartOSUpgradePoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachineScaleSetsClientDeallocatePoller.FinalResponse(context.Context) (VirtualMachineScaleSetsClientDeallocateResponse, error)` +- New function `*LogAnalyticsClientExportThrottledRequestsPoller.FinalResponse(context.Context) (LogAnalyticsClientExportThrottledRequestsResponse, error)` +- New function `*VirtualMachineExtensionsClientUpdatePoller.Done() bool` +- New function `*VirtualMachineScaleSetsClientDeallocatePoller.Poll(context.Context) (*http.Response, error)` +- New function `*DisksClientUpdatePoller.Done() bool` +- New function `*CloudServiceRoleInstancesClientDeletePoller.Done() bool` +- New function `*LogAnalyticsClientExportRequestRateByIntervalPoller.FinalResponse(context.Context) (LogAnalyticsClientExportRequestRateByIntervalResponse, error)` +- New function `VirtualMachineScaleSetsClientCreateOrUpdatePollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachineScaleSetsClientCreateOrUpdateResponse, error)` +- New function `*CloudServicesClientReimagePoller.Poll(context.Context) (*http.Response, error)` +- New function `VirtualMachineScaleSetVMExtensionsClientCreateOrUpdatePollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachineScaleSetVMExtensionsClientCreateOrUpdateResponse, error)` +- New function `*VirtualMachinesClientRestartPoller.FinalResponse(context.Context) (VirtualMachinesClientRestartResponse, error)` +- New function `*VirtualMachinesClientStartPoller.Poll(context.Context) (*http.Response, error)` +- New function `*CloudServiceRoleInstancesClientRebuildPoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachineScaleSetsClientDeallocatePollerResponse.Resume(context.Context, *VirtualMachineScaleSetsClient, string) error` +- New function `*VirtualMachineScaleSetVMRunCommandsClientUpdatePoller.FinalResponse(context.Context) (VirtualMachineScaleSetVMRunCommandsClientUpdateResponse, error)` +- New function `*CloudServiceRolesClientListPager.Err() error` +- New function `*VirtualMachineScaleSetVMsClientRedeployPollerResponse.Resume(context.Context, *VirtualMachineScaleSetVMsClient, string) error` +- New function `*GalleryApplicationsClientDeletePoller.Done() bool` +- New function `*VirtualMachineRunCommandsClientListPager.Err() error` +- New function `*ImagesClientUpdatePoller.FinalResponse(context.Context) (ImagesClientUpdateResponse, error)` +- New function `*VirtualMachineExtensionsClientCreateOrUpdatePoller.Done() bool` +- New function `*GalleriesClientDeletePoller.ResumeToken() (string, error)` +- New function `*CloudServiceRolesClientListPager.NextPage(context.Context) bool` +- New function `*CloudServicesClientPowerOffPoller.Done() bool` +- New function `*GalleryImagesClientDeletePoller.Done() bool` +- New function `VirtualMachinesClientReapplyPollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachinesClientReapplyResponse, error)` +- New function `*VirtualMachinesClientDeallocatePoller.FinalResponse(context.Context) (VirtualMachinesClientDeallocateResponse, error)` +- New function `*GalleryImagesClientCreateOrUpdatePoller.Done() bool` +- New function `*VirtualMachinesClientListAllPager.NextPage(context.Context) bool` +- New function `*GalleryApplicationsClientDeletePoller.Poll(context.Context) (*http.Response, error)` +- New function `*DedicatedHostGroupsClientListByResourceGroupPager.PageResponse() DedicatedHostGroupsClientListByResourceGroupResponse` +- New function `*VirtualMachineScaleSetVMRunCommandsClientCreateOrUpdatePoller.ResumeToken() (string, error)` +- New function `*VirtualMachineRunCommandsClientCreateOrUpdatePoller.Poll(context.Context) (*http.Response, error)` +- New function `VirtualMachinesClientConvertToManagedDisksPollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachinesClientConvertToManagedDisksResponse, error)` +- New function `*CloudServiceRoleInstancesClientReimagePoller.ResumeToken() (string, error)` +- New function `VirtualMachinesClientPerformMaintenancePollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachinesClientPerformMaintenanceResponse, error)` +- New function `*VirtualMachineScaleSetVMExtensionsClientDeletePoller.ResumeToken() (string, error)` +- New function `*DiskAccessesClientListPrivateEndpointConnectionsPager.Err() error` +- New function `VirtualMachinesClientReimagePollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachinesClientReimageResponse, error)` +- New function `VirtualMachineScaleSetVMsClientDeletePollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachineScaleSetVMsClientDeleteResponse, error)` +- New function `CloudServiceRoleInstancesClientRebuildPollerResponse.PollUntilDone(context.Context, time.Duration) (CloudServiceRoleInstancesClientRebuildResponse, error)` +- New function `*GalleryImagesClientUpdatePoller.Done() bool` +- New function `*VirtualMachineExtensionsClientUpdatePoller.ResumeToken() (string, error)` +- New function `DiskEncryptionSetsClientCreateOrUpdatePollerResponse.PollUntilDone(context.Context, time.Duration) (DiskEncryptionSetsClientCreateOrUpdateResponse, error)` +- New function `*DisksClientUpdatePoller.Poll(context.Context) (*http.Response, error)` +- New function `GallerySharingProfileClientUpdatePollerResponse.PollUntilDone(context.Context, time.Duration) (GallerySharingProfileClientUpdateResponse, error)` +- New function `*DiskAccessesClientUpdateAPrivateEndpointConnectionPollerResponse.Resume(context.Context, *DiskAccessesClient, string) error` +- New function `*VirtualMachinesClientCapturePoller.FinalResponse(context.Context) (VirtualMachinesClientCaptureResponse, error)` +- New function `*VirtualMachinesClientUpdatePollerResponse.Resume(context.Context, *VirtualMachinesClient, string) error` +- New function `*CloudServiceRoleInstancesClientListPager.NextPage(context.Context) bool` +- New function `VirtualMachineScaleSetVMsClientStartPollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachineScaleSetVMsClientStartResponse, error)` +- New function `DiskAccessesClientUpdateAPrivateEndpointConnectionPollerResponse.PollUntilDone(context.Context, time.Duration) (DiskAccessesClientUpdateAPrivateEndpointConnectionResponse, error)` +- New function `*RestorePointsClientCreatePoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachinesClientListByLocationPager.Err() error` +- New function `VirtualMachineScaleSetRollingUpgradesClientStartOSUpgradePollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachineScaleSetRollingUpgradesClientStartOSUpgradeResponse, error)` +- New function `CloudServicesClientUpdatePollerResponse.PollUntilDone(context.Context, time.Duration) (CloudServicesClientUpdateResponse, error)` +- New function `*CloudServicesUpdateDomainClientWalkUpdateDomainPoller.ResumeToken() (string, error)` +- New function `*VirtualMachinesClientRestartPoller.Done() bool` +- New function `*DiskEncryptionSetsClientCreateOrUpdatePoller.ResumeToken() (string, error)` +- New function `*VirtualMachineScaleSetRollingUpgradesClientCancelPoller.Poll(context.Context) (*http.Response, error)` +- New function `*SnapshotsClientUpdatePollerResponse.Resume(context.Context, *SnapshotsClient, string) error` +- New function `*GalleriesClientListPager.PageResponse() GalleriesClientListResponse` +- New function `VirtualMachinesClientUpdatePollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachinesClientUpdateResponse, error)` +- New function `*CloudServicesUpdateDomainClientWalkUpdateDomainPoller.Poll(context.Context) (*http.Response, error)` +- New function `VirtualMachinesClientCreateOrUpdatePollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachinesClientCreateOrUpdateResponse, error)` +- New function `*CloudServicesClientPowerOffPollerResponse.Resume(context.Context, *CloudServicesClient, string) error` +- New function `*RestorePointCollectionsClientListAllPager.NextPage(context.Context) bool` +- New function `*VirtualMachineScaleSetsClientReimageAllPoller.Done() bool` +- New function `*VirtualMachineScaleSetVMsClientUpdatePollerResponse.Resume(context.Context, *VirtualMachineScaleSetVMsClient, string) error` +- New function `*VirtualMachinesClientRedeployPoller.FinalResponse(context.Context) (VirtualMachinesClientRedeployResponse, error)` +- New function `CloudServiceRoleInstancesClientReimagePollerResponse.PollUntilDone(context.Context, time.Duration) (CloudServiceRoleInstancesClientReimageResponse, error)` +- New function `VirtualMachinesClientInstallPatchesPollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachinesClientInstallPatchesResponse, error)` +- New function `*GalleryImagesClientUpdatePollerResponse.Resume(context.Context, *GalleryImagesClient, string) error` +- New function `*CloudServicesClientListPager.Err() error` +- New function `*GalleryApplicationVersionsClientUpdatePoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachinesClientReapplyPoller.FinalResponse(context.Context) (VirtualMachinesClientReapplyResponse, error)` +- New function `*GalleryApplicationVersionsClientDeletePoller.Done() bool` +- New function `*VirtualMachineScaleSetExtensionsClientListPager.PageResponse() VirtualMachineScaleSetExtensionsClientListResponse` +- New function `VirtualMachineRunCommandsClientUpdatePollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachineRunCommandsClientUpdateResponse, error)` +- New function `*SSHPublicKeysClientListBySubscriptionPager.NextPage(context.Context) bool` +- New function `*VirtualMachineScaleSetsClientReimagePoller.FinalResponse(context.Context) (VirtualMachineScaleSetsClientReimageResponse, error)` +- New function `*CloudServicesUpdateDomainClientListUpdateDomainsPager.Err() error` +- New function `GalleryImageVersionsClientUpdatePollerResponse.PollUntilDone(context.Context, time.Duration) (GalleryImageVersionsClientUpdateResponse, error)` +- New function `*GalleriesClientListPager.NextPage(context.Context) bool` +- New function `*VirtualMachineScaleSetVMRunCommandsClientUpdatePoller.Poll(context.Context) (*http.Response, error)` +- New function `CloudServicesClientRestartPollerResponse.PollUntilDone(context.Context, time.Duration) (CloudServicesClientRestartResponse, error)` +- New function `*DiskEncryptionSetsClientListPager.NextPage(context.Context) bool` +- New function `*CapacityReservationsClientCreateOrUpdatePoller.Done() bool` +- New function `*VirtualMachineExtensionsClientCreateOrUpdatePollerResponse.Resume(context.Context, *VirtualMachineExtensionsClient, string) error` +- New function `*CloudServicesClientUpdatePoller.FinalResponse(context.Context) (CloudServicesClientUpdateResponse, error)` +- New function `*CloudServicesClientPowerOffPoller.FinalResponse(context.Context) (CloudServicesClientPowerOffResponse, error)` +- New function `*VirtualMachineScaleSetVMRunCommandsClientCreateOrUpdatePollerResponse.Resume(context.Context, *VirtualMachineScaleSetVMRunCommandsClient, string) error` +- New function `*DisksClientListPager.NextPage(context.Context) bool` +- New function `*VirtualMachineExtensionsClientUpdatePoller.FinalResponse(context.Context) (VirtualMachineExtensionsClientUpdateResponse, error)` +- New function `*VirtualMachineScaleSetVMsClientDeletePoller.FinalResponse(context.Context) (VirtualMachineScaleSetVMsClientDeleteResponse, error)` +- New function `*VirtualMachineScaleSetsClientUpdateInstancesPoller.FinalResponse(context.Context) (VirtualMachineScaleSetsClientUpdateInstancesResponse, error)` +- New function `*VirtualMachineScaleSetsClientSetOrchestrationServiceStatePoller.FinalResponse(context.Context) (VirtualMachineScaleSetsClientSetOrchestrationServiceStateResponse, error)` +- New function `*VirtualMachineScaleSetVMsClientReimagePollerResponse.Resume(context.Context, *VirtualMachineScaleSetVMsClient, string) error` +- New function `*SnapshotsClientRevokeAccessPollerResponse.Resume(context.Context, *SnapshotsClient, string) error` +- New function `*VirtualMachineScaleSetVMsClientListPager.Err() error` +- New function `*VirtualMachineScaleSetsClientListAllPager.Err() error` +- New function `*CloudServicesClientRestartPoller.ResumeToken() (string, error)` +- New function `*DisksClientListPager.PageResponse() DisksClientListResponse` +- New function `*DisksClientRevokeAccessPoller.FinalResponse(context.Context) (DisksClientRevokeAccessResponse, error)` +- New function `*VirtualMachineScaleSetsClientListByLocationPager.PageResponse() VirtualMachineScaleSetsClientListByLocationResponse` +- New function `*VirtualMachinesClientConvertToManagedDisksPoller.Done() bool` +- New function `*GalleryApplicationVersionsClientCreateOrUpdatePoller.ResumeToken() (string, error)` +- New function `*VirtualMachineScaleSetVMsClientRedeployPoller.ResumeToken() (string, error)` +- New function `GalleriesClientCreateOrUpdatePollerResponse.PollUntilDone(context.Context, time.Duration) (GalleriesClientCreateOrUpdateResponse, error)` +- New function `*VirtualMachineScaleSetsClientPowerOffPoller.Done() bool` +- New function `*GalleriesClientDeletePoller.Done() bool` +- New function `*VirtualMachineScaleSetVMsClientDeallocatePoller.FinalResponse(context.Context) (VirtualMachineScaleSetVMsClientDeallocateResponse, error)` +- New function `*VirtualMachineScaleSetsClientRedeployPollerResponse.Resume(context.Context, *VirtualMachineScaleSetsClient, string) error` +- New function `*VirtualMachineScaleSetVMsClientUpdatePoller.FinalResponse(context.Context) (VirtualMachineScaleSetVMsClientUpdateResponse, error)` +- New function `*GalleryImagesClientDeletePollerResponse.Resume(context.Context, *GalleryImagesClient, string) error` +- New function `*VirtualMachineScaleSetVMsClientRunCommandPoller.ResumeToken() (string, error)` +- New function `*VirtualMachineScaleSetsClientListPager.PageResponse() VirtualMachineScaleSetsClientListResponse` +- New function `*VirtualMachinesClientListAllPager.Err() error` +- New function `*VirtualMachineScaleSetVMsClientPerformMaintenancePoller.Done() bool` +- New function `GalleryImagesClientUpdatePollerResponse.PollUntilDone(context.Context, time.Duration) (GalleryImagesClientUpdateResponse, error)` +- New function `*VirtualMachinesClientInstallPatchesPoller.FinalResponse(context.Context) (VirtualMachinesClientInstallPatchesResponse, error)` +- New function `*VirtualMachineScaleSetsClientReimageAllPollerResponse.Resume(context.Context, *VirtualMachineScaleSetsClient, string) error` +- New function `*VirtualMachineScaleSetRollingUpgradesClientStartExtensionUpgradePoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachineScaleSetVMsClientPerformMaintenancePoller.Poll(context.Context) (*http.Response, error)` +- New function `*DisksClientGrantAccessPoller.FinalResponse(context.Context) (DisksClientGrantAccessResponse, error)` +- New function `*VirtualMachineScaleSetRollingUpgradesClientStartExtensionUpgradePoller.ResumeToken() (string, error)` +- New function `*SnapshotsClientGrantAccessPollerResponse.Resume(context.Context, *SnapshotsClient, string) error` +- New function `*DiskEncryptionSetsClientListByResourceGroupPager.PageResponse() DiskEncryptionSetsClientListByResourceGroupResponse` +- New function `*CloudServicesClientCreateOrUpdatePoller.ResumeToken() (string, error)` +- New function `*DiskEncryptionSetsClientDeletePoller.ResumeToken() (string, error)` +- New function `*VirtualMachineScaleSetVMsClientPerformMaintenancePoller.ResumeToken() (string, error)` +- New function `*VirtualMachineScaleSetsClientRestartPollerResponse.Resume(context.Context, *VirtualMachineScaleSetsClient, string) error` +- New function `*DiskAccessesClientListPager.PageResponse() DiskAccessesClientListResponse` +- New function `*RestorePointsClientDeletePoller.Done() bool` +- New function `*DiskAccessesClientUpdatePollerResponse.Resume(context.Context, *DiskAccessesClient, string) error` +- New function `*RestorePointsClientDeletePoller.FinalResponse(context.Context) (RestorePointsClientDeleteResponse, error)` +- New function `GalleryApplicationVersionsClientCreateOrUpdatePollerResponse.PollUntilDone(context.Context, time.Duration) (GalleryApplicationVersionsClientCreateOrUpdateResponse, error)` +- New function `VirtualMachineScaleSetsClientRestartPollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachineScaleSetsClientRestartResponse, error)` +- New function `*VirtualMachineScaleSetExtensionsClientUpdatePoller.FinalResponse(context.Context) (VirtualMachineScaleSetExtensionsClientUpdateResponse, error)` +- New function `*DiskAccessesClientDeletePoller.FinalResponse(context.Context) (DiskAccessesClientDeleteResponse, error)` +- New function `*VirtualMachinesClientAssessPatchesPoller.ResumeToken() (string, error)` +- New function `*VirtualMachinesClientAssessPatchesPoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachineRunCommandsClientListByVirtualMachinePager.Err() error` +- New function `GalleriesClientUpdatePollerResponse.PollUntilDone(context.Context, time.Duration) (GalleriesClientUpdateResponse, error)` +- New function `*VirtualMachineScaleSetVMsClientRestartPoller.Done() bool` +- New function `*DisksClientDeletePoller.ResumeToken() (string, error)` +- New function `*GalleriesClientCreateOrUpdatePoller.Done() bool` +- New function `*DiskEncryptionSetsClientListPager.Err() error` +- New function `*ImagesClientListPager.Err() error` +- New function `*VirtualMachineScaleSetExtensionsClientListPager.NextPage(context.Context) bool` +- New function `*CloudServicesClientStartPollerResponse.Resume(context.Context, *CloudServicesClient, string) error` +- New function `*CloudServiceOperatingSystemsClientListOSVersionsPager.NextPage(context.Context) bool` +- New function `*VirtualMachineScaleSetsClientListAllPager.PageResponse() VirtualMachineScaleSetsClientListAllResponse` +- New function `*DedicatedHostsClientDeletePollerResponse.Resume(context.Context, *DedicatedHostsClient, string) error` +- New function `*VirtualMachineRunCommandsClientListByVirtualMachinePager.NextPage(context.Context) bool` +- New function `*ImagesClientUpdatePoller.ResumeToken() (string, error)` +- New function `*DiskAccessesClientListByResourceGroupPager.PageResponse() DiskAccessesClientListByResourceGroupResponse` +- New function `*VirtualMachineScaleSetVMsClientReimageAllPoller.Done() bool` +- New function `*ResourceSKUsClientListPager.PageResponse() ResourceSKUsClientListResponse` +- New function `*LogAnalyticsClientExportRequestRateByIntervalPollerResponse.Resume(context.Context, *LogAnalyticsClient, string) error` +- New function `VirtualMachineScaleSetsClientDeletePollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachineScaleSetsClientDeleteResponse, error)` +- New function `*DiskRestorePointClientGrantAccessPoller.Done() bool` +- New function `*VirtualMachinesClientInstallPatchesPollerResponse.Resume(context.Context, *VirtualMachinesClient, string) error` +- New function `*AvailabilitySetsClientListBySubscriptionPager.PageResponse() AvailabilitySetsClientListBySubscriptionResponse` +- New function `VirtualMachineScaleSetExtensionsClientUpdatePollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachineScaleSetExtensionsClientUpdateResponse, error)` +- New function `*VirtualMachinesClientCapturePoller.ResumeToken() (string, error)` +- New function `*VirtualMachinesClientUpdatePoller.Done() bool` +- New function `*ImagesClientUpdatePollerResponse.Resume(context.Context, *ImagesClient, string) error` +- New function `*VirtualMachineScaleSetsClientDeletePoller.Done() bool` +- New function `*DiskEncryptionSetsClientDeletePoller.Poll(context.Context) (*http.Response, error)` +- New function `*DedicatedHostsClientDeletePoller.FinalResponse(context.Context) (DedicatedHostsClientDeleteResponse, error)` +- New function `*CloudServiceRoleInstancesClientReimagePollerResponse.Resume(context.Context, *CloudServiceRoleInstancesClient, string) error` +- New function `*RestorePointsClientDeletePoller.ResumeToken() (string, error)` +- New function `*DisksClientCreateOrUpdatePollerResponse.Resume(context.Context, *DisksClient, string) error` +- New function `*VirtualMachineScaleSetVMsClientDeletePollerResponse.Resume(context.Context, *VirtualMachineScaleSetVMsClient, string) error` +- New function `*RestorePointsClientCreatePoller.FinalResponse(context.Context) (RestorePointsClientCreateResponse, error)` +- New function `*GalleryImageVersionsClientCreateOrUpdatePollerResponse.Resume(context.Context, *GalleryImageVersionsClient, string) error` +- New function `VirtualMachinesClientStartPollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachinesClientStartResponse, error)` +- New function `DisksClientDeletePollerResponse.PollUntilDone(context.Context, time.Duration) (DisksClientDeleteResponse, error)` +- New function `*VirtualMachineScaleSetExtensionsClientListPager.Err() error` +- New function `*GalleriesClientUpdatePoller.FinalResponse(context.Context) (GalleriesClientUpdateResponse, error)` +- New function `*GalleriesClientListByResourceGroupPager.PageResponse() GalleriesClientListByResourceGroupResponse` +- New function `*SnapshotsClientUpdatePoller.ResumeToken() (string, error)` +- New function `*DedicatedHostsClientDeletePoller.Done() bool` +- New function `VirtualMachineExtensionsClientCreateOrUpdatePollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachineExtensionsClientCreateOrUpdateResponse, error)` +- New function `*VirtualMachineExtensionsClientDeletePollerResponse.Resume(context.Context, *VirtualMachineExtensionsClient, string) error` +- New function `*SharedGalleryImagesClientListPager.Err() error` +- New function `*DisksClientCreateOrUpdatePoller.Done() bool` +- New function `VirtualMachineScaleSetRollingUpgradesClientStartExtensionUpgradePollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachineScaleSetRollingUpgradesClientStartExtensionUpgradeResponse, error)` +- New function `VirtualMachineScaleSetVMRunCommandsClientCreateOrUpdatePollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachineScaleSetVMRunCommandsClientCreateOrUpdateResponse, error)` +- New function `*VirtualMachineScaleSetsClientUpdateInstancesPoller.ResumeToken() (string, error)` +- New function `*CloudServicesClientUpdatePoller.ResumeToken() (string, error)` +- New function `DiskRestorePointClientGrantAccessPollerResponse.PollUntilDone(context.Context, time.Duration) (DiskRestorePointClientGrantAccessResponse, error)` +- New function `VirtualMachineScaleSetVMsClientReimageAllPollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachineScaleSetVMsClientReimageAllResponse, error)` +- New function `VirtualMachinesClientCapturePollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachinesClientCaptureResponse, error)` +- New function `*CloudServicesClientReimagePoller.FinalResponse(context.Context) (CloudServicesClientReimageResponse, error)` +- New function `SnapshotsClientCreateOrUpdatePollerResponse.PollUntilDone(context.Context, time.Duration) (SnapshotsClientCreateOrUpdateResponse, error)` +- New function `*VirtualMachinesClientCapturePoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachineScaleSetsClientReimagePoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachineScaleSetsClientListPager.NextPage(context.Context) bool` +- New function `*ThrottledRequestsInput.UnmarshalJSON([]byte) error` +- New function `*VirtualMachineScaleSetVMRunCommandsClientCreateOrUpdatePoller.FinalResponse(context.Context) (VirtualMachineScaleSetVMRunCommandsClientCreateOrUpdateResponse, error)` +- New function `*VirtualMachinesClientUpdatePoller.FinalResponse(context.Context) (VirtualMachinesClientUpdateResponse, error)` +- New function `*VirtualMachinesClientPowerOffPoller.Done() bool` +- New function `*VirtualMachineExtensionsClientCreateOrUpdatePoller.Poll(context.Context) (*http.Response, error)` +- New function `*DiskAccessesClientListPrivateEndpointConnectionsPager.NextPage(context.Context) bool` +- New function `CloudServicesClientDeleteInstancesPollerResponse.PollUntilDone(context.Context, time.Duration) (CloudServicesClientDeleteInstancesResponse, error)` +- New function `*GalleryApplicationsClientCreateOrUpdatePoller.FinalResponse(context.Context) (GalleryApplicationsClientCreateOrUpdateResponse, error)` +- New function `*SnapshotsClientCreateOrUpdatePoller.Done() bool` +- New function `*VirtualMachineScaleSetsClientCreateOrUpdatePoller.ResumeToken() (string, error)` +- New function `*GalleryApplicationVersionsClientUpdatePoller.FinalResponse(context.Context) (GalleryApplicationVersionsClientUpdateResponse, error)` +- New function `GalleryApplicationsClientDeletePollerResponse.PollUntilDone(context.Context, time.Duration) (GalleryApplicationsClientDeleteResponse, error)` +- New function `*DiskAccessesClientDeleteAPrivateEndpointConnectionPollerResponse.Resume(context.Context, *DiskAccessesClient, string) error` +- New function `*CloudServicesClientDeletePoller.Done() bool` +- New function `*VirtualMachinesClientAssessPatchesPoller.Done() bool` +- New function `*VirtualMachineScaleSetRollingUpgradesClientStartOSUpgradePoller.Done() bool` +- New function `*VirtualMachineRunCommandsClientUpdatePoller.Poll(context.Context) (*http.Response, error)` +- New function `*GalleryImagesClientDeletePoller.FinalResponse(context.Context) (GalleryImagesClientDeleteResponse, error)` +- New function `*CloudServiceRoleInstancesClientDeletePoller.FinalResponse(context.Context) (CloudServiceRoleInstancesClientDeleteResponse, error)` +- New function `CloudServicesClientPowerOffPollerResponse.PollUntilDone(context.Context, time.Duration) (CloudServicesClientPowerOffResponse, error)` +- New function `DedicatedHostsClientUpdatePollerResponse.PollUntilDone(context.Context, time.Duration) (DedicatedHostsClientUpdateResponse, error)` +- New function `*DisksClientListByResourceGroupPager.PageResponse() DisksClientListByResourceGroupResponse` +- New function `*AvailabilitySetsClientListPager.Err() error` +- New function `*CloudServicesClientStartPoller.FinalResponse(context.Context) (CloudServicesClientStartResponse, error)` +- New function `*CloudServiceRoleInstancesClientDeletePoller.ResumeToken() (string, error)` +- New function `*VirtualMachineScaleSetsClientPerformMaintenancePoller.FinalResponse(context.Context) (VirtualMachineScaleSetsClientPerformMaintenanceResponse, error)` +- New function `*VirtualMachineScaleSetVMsClientStartPoller.FinalResponse(context.Context) (VirtualMachineScaleSetVMsClientStartResponse, error)` +- New function `*DiskRestorePointClientRevokeAccessPoller.Poll(context.Context) (*http.Response, error)` +- New function `*CapacityReservationGroupsClientListByResourceGroupPager.NextPage(context.Context) bool` +- New function `*SharedGalleryImageVersionsClientListPager.Err() error` +- New function `CloudServicesClientReimagePollerResponse.PollUntilDone(context.Context, time.Duration) (CloudServicesClientReimageResponse, error)` +- New function `*GalleryApplicationsClientListByGalleryPager.NextPage(context.Context) bool` +- New function `*DiskAccessesClientListByResourceGroupPager.Err() error` +- New function `*SnapshotsClientListPager.PageResponse() SnapshotsClientListResponse` +- New function `VirtualMachineExtensionsClientDeletePollerResponse.PollUntilDone(context.Context, time.Duration) (VirtualMachineExtensionsClientDeleteResponse, error)` +- New function `*CapacityReservationsClientUpdatePoller.Poll(context.Context) (*http.Response, error)` +- New function `*VirtualMachineScaleSetRollingUpgradesClientCancelPollerResponse.Resume(context.Context, *VirtualMachineScaleSetRollingUpgradesClient, string) error` +- New function `*GalleriesClientUpdatePoller.Done() bool` +- New function `*CloudServicesClientStartPoller.ResumeToken() (string, error)` +- New function `*RestorePointCollectionsClientDeletePoller.ResumeToken() (string, error)` +- New struct `AvailabilitySetsClientCreateOrUpdateOptions` +- New struct `AvailabilitySetsClientCreateOrUpdateResponse` +- New struct `AvailabilitySetsClientCreateOrUpdateResult` +- New struct `AvailabilitySetsClientDeleteOptions` +- New struct `AvailabilitySetsClientDeleteResponse` +- New struct `AvailabilitySetsClientGetOptions` +- New struct `AvailabilitySetsClientGetResponse` +- New struct `AvailabilitySetsClientGetResult` +- New struct `AvailabilitySetsClientListAvailableSizesOptions` +- New struct `AvailabilitySetsClientListAvailableSizesResponse` +- New struct `AvailabilitySetsClientListAvailableSizesResult` +- New struct `AvailabilitySetsClientListBySubscriptionOptions` +- New struct `AvailabilitySetsClientListBySubscriptionPager` +- New struct `AvailabilitySetsClientListBySubscriptionResponse` +- New struct `AvailabilitySetsClientListBySubscriptionResult` +- New struct `AvailabilitySetsClientListOptions` +- New struct `AvailabilitySetsClientListPager` +- New struct `AvailabilitySetsClientListResponse` +- New struct `AvailabilitySetsClientListResult` +- New struct `AvailabilitySetsClientUpdateOptions` +- New struct `AvailabilitySetsClientUpdateResponse` +- New struct `AvailabilitySetsClientUpdateResult` +- New struct `CapacityReservationGroupsClientCreateOrUpdateOptions` +- New struct `CapacityReservationGroupsClientCreateOrUpdateResponse` +- New struct `CapacityReservationGroupsClientCreateOrUpdateResult` +- New struct `CapacityReservationGroupsClientDeleteOptions` +- New struct `CapacityReservationGroupsClientDeleteResponse` +- New struct `CapacityReservationGroupsClientGetOptions` +- New struct `CapacityReservationGroupsClientGetResponse` +- New struct `CapacityReservationGroupsClientGetResult` +- New struct `CapacityReservationGroupsClientListByResourceGroupOptions` +- New struct `CapacityReservationGroupsClientListByResourceGroupPager` +- New struct `CapacityReservationGroupsClientListByResourceGroupResponse` +- New struct `CapacityReservationGroupsClientListByResourceGroupResult` +- New struct `CapacityReservationGroupsClientListBySubscriptionOptions` +- New struct `CapacityReservationGroupsClientListBySubscriptionPager` +- New struct `CapacityReservationGroupsClientListBySubscriptionResponse` +- New struct `CapacityReservationGroupsClientListBySubscriptionResult` +- New struct `CapacityReservationGroupsClientUpdateOptions` +- New struct `CapacityReservationGroupsClientUpdateResponse` +- New struct `CapacityReservationGroupsClientUpdateResult` +- New struct `CapacityReservationsClientBeginCreateOrUpdateOptions` +- New struct `CapacityReservationsClientBeginDeleteOptions` +- New struct `CapacityReservationsClientBeginUpdateOptions` +- New struct `CapacityReservationsClientCreateOrUpdatePoller` +- New struct `CapacityReservationsClientCreateOrUpdatePollerResponse` +- New struct `CapacityReservationsClientCreateOrUpdateResponse` +- New struct `CapacityReservationsClientCreateOrUpdateResult` +- New struct `CapacityReservationsClientDeletePoller` +- New struct `CapacityReservationsClientDeletePollerResponse` +- New struct `CapacityReservationsClientDeleteResponse` +- New struct `CapacityReservationsClientGetOptions` +- New struct `CapacityReservationsClientGetResponse` +- New struct `CapacityReservationsClientGetResult` +- New struct `CapacityReservationsClientListByCapacityReservationGroupOptions` +- New struct `CapacityReservationsClientListByCapacityReservationGroupPager` +- New struct `CapacityReservationsClientListByCapacityReservationGroupResponse` +- New struct `CapacityReservationsClientListByCapacityReservationGroupResult` +- New struct `CapacityReservationsClientUpdatePoller` +- New struct `CapacityReservationsClientUpdatePollerResponse` +- New struct `CapacityReservationsClientUpdateResponse` +- New struct `CapacityReservationsClientUpdateResult` +- New struct `CloudServiceOperatingSystemsClientGetOSFamilyOptions` +- New struct `CloudServiceOperatingSystemsClientGetOSFamilyResponse` +- New struct `CloudServiceOperatingSystemsClientGetOSFamilyResult` +- New struct `CloudServiceOperatingSystemsClientGetOSVersionOptions` +- New struct `CloudServiceOperatingSystemsClientGetOSVersionResponse` +- New struct `CloudServiceOperatingSystemsClientGetOSVersionResult` +- New struct `CloudServiceOperatingSystemsClientListOSFamiliesOptions` +- New struct `CloudServiceOperatingSystemsClientListOSFamiliesPager` +- New struct `CloudServiceOperatingSystemsClientListOSFamiliesResponse` +- New struct `CloudServiceOperatingSystemsClientListOSFamiliesResult` +- New struct `CloudServiceOperatingSystemsClientListOSVersionsOptions` +- New struct `CloudServiceOperatingSystemsClientListOSVersionsPager` +- New struct `CloudServiceOperatingSystemsClientListOSVersionsResponse` +- New struct `CloudServiceOperatingSystemsClientListOSVersionsResult` +- New struct `CloudServiceRoleInstancesClientBeginDeleteOptions` +- New struct `CloudServiceRoleInstancesClientBeginRebuildOptions` +- New struct `CloudServiceRoleInstancesClientBeginReimageOptions` +- New struct `CloudServiceRoleInstancesClientBeginRestartOptions` +- New struct `CloudServiceRoleInstancesClientDeletePoller` +- New struct `CloudServiceRoleInstancesClientDeletePollerResponse` +- New struct `CloudServiceRoleInstancesClientDeleteResponse` +- New struct `CloudServiceRoleInstancesClientGetInstanceViewOptions` +- New struct `CloudServiceRoleInstancesClientGetInstanceViewResponse` +- New struct `CloudServiceRoleInstancesClientGetInstanceViewResult` +- New struct `CloudServiceRoleInstancesClientGetOptions` +- New struct `CloudServiceRoleInstancesClientGetRemoteDesktopFileOptions` +- New struct `CloudServiceRoleInstancesClientGetRemoteDesktopFileResponse` +- New struct `CloudServiceRoleInstancesClientGetResponse` +- New struct `CloudServiceRoleInstancesClientGetResult` +- New struct `CloudServiceRoleInstancesClientListOptions` +- New struct `CloudServiceRoleInstancesClientListPager` +- New struct `CloudServiceRoleInstancesClientListResponse` +- New struct `CloudServiceRoleInstancesClientListResult` +- New struct `CloudServiceRoleInstancesClientRebuildPoller` +- New struct `CloudServiceRoleInstancesClientRebuildPollerResponse` +- New struct `CloudServiceRoleInstancesClientRebuildResponse` +- New struct `CloudServiceRoleInstancesClientReimagePoller` +- New struct `CloudServiceRoleInstancesClientReimagePollerResponse` +- New struct `CloudServiceRoleInstancesClientReimageResponse` +- New struct `CloudServiceRoleInstancesClientRestartPoller` +- New struct `CloudServiceRoleInstancesClientRestartPollerResponse` +- New struct `CloudServiceRoleInstancesClientRestartResponse` +- New struct `CloudServiceRolesClientGetOptions` +- New struct `CloudServiceRolesClientGetResponse` +- New struct `CloudServiceRolesClientGetResult` +- New struct `CloudServiceRolesClientListOptions` +- New struct `CloudServiceRolesClientListPager` +- New struct `CloudServiceRolesClientListResponse` +- New struct `CloudServiceRolesClientListResult` +- New struct `CloudServicesClientBeginCreateOrUpdateOptions` +- New struct `CloudServicesClientBeginDeleteInstancesOptions` +- New struct `CloudServicesClientBeginDeleteOptions` +- New struct `CloudServicesClientBeginPowerOffOptions` +- New struct `CloudServicesClientBeginRebuildOptions` +- New struct `CloudServicesClientBeginReimageOptions` +- New struct `CloudServicesClientBeginRestartOptions` +- New struct `CloudServicesClientBeginStartOptions` +- New struct `CloudServicesClientBeginUpdateOptions` +- New struct `CloudServicesClientCreateOrUpdatePoller` +- New struct `CloudServicesClientCreateOrUpdatePollerResponse` +- New struct `CloudServicesClientCreateOrUpdateResponse` +- New struct `CloudServicesClientCreateOrUpdateResult` +- New struct `CloudServicesClientDeleteInstancesPoller` +- New struct `CloudServicesClientDeleteInstancesPollerResponse` +- New struct `CloudServicesClientDeleteInstancesResponse` +- New struct `CloudServicesClientDeletePoller` +- New struct `CloudServicesClientDeletePollerResponse` +- New struct `CloudServicesClientDeleteResponse` +- New struct `CloudServicesClientGetInstanceViewOptions` +- New struct `CloudServicesClientGetInstanceViewResponse` +- New struct `CloudServicesClientGetInstanceViewResult` +- New struct `CloudServicesClientGetOptions` +- New struct `CloudServicesClientGetResponse` +- New struct `CloudServicesClientGetResult` +- New struct `CloudServicesClientListAllOptions` +- New struct `CloudServicesClientListAllPager` +- New struct `CloudServicesClientListAllResponse` +- New struct `CloudServicesClientListAllResult` +- New struct `CloudServicesClientListOptions` +- New struct `CloudServicesClientListPager` +- New struct `CloudServicesClientListResponse` +- New struct `CloudServicesClientListResult` +- New struct `CloudServicesClientPowerOffPoller` +- New struct `CloudServicesClientPowerOffPollerResponse` +- New struct `CloudServicesClientPowerOffResponse` +- New struct `CloudServicesClientRebuildPoller` +- New struct `CloudServicesClientRebuildPollerResponse` +- New struct `CloudServicesClientRebuildResponse` +- New struct `CloudServicesClientReimagePoller` +- New struct `CloudServicesClientReimagePollerResponse` +- New struct `CloudServicesClientReimageResponse` +- New struct `CloudServicesClientRestartPoller` +- New struct `CloudServicesClientRestartPollerResponse` +- New struct `CloudServicesClientRestartResponse` +- New struct `CloudServicesClientStartPoller` +- New struct `CloudServicesClientStartPollerResponse` +- New struct `CloudServicesClientStartResponse` +- New struct `CloudServicesClientUpdatePoller` +- New struct `CloudServicesClientUpdatePollerResponse` +- New struct `CloudServicesClientUpdateResponse` +- New struct `CloudServicesClientUpdateResult` +- New struct `CloudServicesUpdateDomainClientBeginWalkUpdateDomainOptions` +- New struct `CloudServicesUpdateDomainClientGetUpdateDomainOptions` +- New struct `CloudServicesUpdateDomainClientGetUpdateDomainResponse` +- New struct `CloudServicesUpdateDomainClientGetUpdateDomainResult` +- New struct `CloudServicesUpdateDomainClientListUpdateDomainsOptions` +- New struct `CloudServicesUpdateDomainClientListUpdateDomainsPager` +- New struct `CloudServicesUpdateDomainClientListUpdateDomainsResponse` +- New struct `CloudServicesUpdateDomainClientListUpdateDomainsResult` +- New struct `CloudServicesUpdateDomainClientWalkUpdateDomainPoller` +- New struct `CloudServicesUpdateDomainClientWalkUpdateDomainPollerResponse` +- New struct `CloudServicesUpdateDomainClientWalkUpdateDomainResponse` +- New struct `CommunityGalleriesClientGetOptions` +- New struct `CommunityGalleriesClientGetResponse` +- New struct `CommunityGalleriesClientGetResult` +- New struct `CommunityGalleryImageVersionsClientGetOptions` +- New struct `CommunityGalleryImageVersionsClientGetResponse` +- New struct `CommunityGalleryImageVersionsClientGetResult` +- New struct `CommunityGalleryImagesClientGetOptions` +- New struct `CommunityGalleryImagesClientGetResponse` +- New struct `CommunityGalleryImagesClientGetResult` +- New struct `DedicatedHostGroupsClientCreateOrUpdateOptions` +- New struct `DedicatedHostGroupsClientCreateOrUpdateResponse` +- New struct `DedicatedHostGroupsClientCreateOrUpdateResult` +- New struct `DedicatedHostGroupsClientDeleteOptions` +- New struct `DedicatedHostGroupsClientDeleteResponse` +- New struct `DedicatedHostGroupsClientGetOptions` +- New struct `DedicatedHostGroupsClientGetResponse` +- New struct `DedicatedHostGroupsClientGetResult` +- New struct `DedicatedHostGroupsClientListByResourceGroupOptions` +- New struct `DedicatedHostGroupsClientListByResourceGroupPager` +- New struct `DedicatedHostGroupsClientListByResourceGroupResponse` +- New struct `DedicatedHostGroupsClientListByResourceGroupResult` +- New struct `DedicatedHostGroupsClientListBySubscriptionOptions` +- New struct `DedicatedHostGroupsClientListBySubscriptionPager` +- New struct `DedicatedHostGroupsClientListBySubscriptionResponse` +- New struct `DedicatedHostGroupsClientListBySubscriptionResult` +- New struct `DedicatedHostGroupsClientUpdateOptions` +- New struct `DedicatedHostGroupsClientUpdateResponse` +- New struct `DedicatedHostGroupsClientUpdateResult` +- New struct `DedicatedHostsClientBeginCreateOrUpdateOptions` +- New struct `DedicatedHostsClientBeginDeleteOptions` +- New struct `DedicatedHostsClientBeginUpdateOptions` +- New struct `DedicatedHostsClientCreateOrUpdatePoller` +- New struct `DedicatedHostsClientCreateOrUpdatePollerResponse` +- New struct `DedicatedHostsClientCreateOrUpdateResponse` +- New struct `DedicatedHostsClientCreateOrUpdateResult` +- New struct `DedicatedHostsClientDeletePoller` +- New struct `DedicatedHostsClientDeletePollerResponse` +- New struct `DedicatedHostsClientDeleteResponse` +- New struct `DedicatedHostsClientGetOptions` +- New struct `DedicatedHostsClientGetResponse` +- New struct `DedicatedHostsClientGetResult` +- New struct `DedicatedHostsClientListByHostGroupOptions` +- New struct `DedicatedHostsClientListByHostGroupPager` +- New struct `DedicatedHostsClientListByHostGroupResponse` +- New struct `DedicatedHostsClientListByHostGroupResult` +- New struct `DedicatedHostsClientUpdatePoller` +- New struct `DedicatedHostsClientUpdatePollerResponse` +- New struct `DedicatedHostsClientUpdateResponse` +- New struct `DedicatedHostsClientUpdateResult` +- New struct `DiskAccessesClientBeginCreateOrUpdateOptions` +- New struct `DiskAccessesClientBeginDeleteAPrivateEndpointConnectionOptions` +- New struct `DiskAccessesClientBeginDeleteOptions` +- New struct `DiskAccessesClientBeginUpdateAPrivateEndpointConnectionOptions` +- New struct `DiskAccessesClientBeginUpdateOptions` +- New struct `DiskAccessesClientCreateOrUpdatePoller` +- New struct `DiskAccessesClientCreateOrUpdatePollerResponse` +- New struct `DiskAccessesClientCreateOrUpdateResponse` +- New struct `DiskAccessesClientCreateOrUpdateResult` +- New struct `DiskAccessesClientDeleteAPrivateEndpointConnectionPoller` +- New struct `DiskAccessesClientDeleteAPrivateEndpointConnectionPollerResponse` +- New struct `DiskAccessesClientDeleteAPrivateEndpointConnectionResponse` +- New struct `DiskAccessesClientDeletePoller` +- New struct `DiskAccessesClientDeletePollerResponse` +- New struct `DiskAccessesClientDeleteResponse` +- New struct `DiskAccessesClientGetAPrivateEndpointConnectionOptions` +- New struct `DiskAccessesClientGetAPrivateEndpointConnectionResponse` +- New struct `DiskAccessesClientGetAPrivateEndpointConnectionResult` +- New struct `DiskAccessesClientGetOptions` +- New struct `DiskAccessesClientGetPrivateLinkResourcesOptions` +- New struct `DiskAccessesClientGetPrivateLinkResourcesResponse` +- New struct `DiskAccessesClientGetPrivateLinkResourcesResult` +- New struct `DiskAccessesClientGetResponse` +- New struct `DiskAccessesClientGetResult` +- New struct `DiskAccessesClientListByResourceGroupOptions` +- New struct `DiskAccessesClientListByResourceGroupPager` +- New struct `DiskAccessesClientListByResourceGroupResponse` +- New struct `DiskAccessesClientListByResourceGroupResult` +- New struct `DiskAccessesClientListOptions` +- New struct `DiskAccessesClientListPager` +- New struct `DiskAccessesClientListPrivateEndpointConnectionsOptions` +- New struct `DiskAccessesClientListPrivateEndpointConnectionsPager` +- New struct `DiskAccessesClientListPrivateEndpointConnectionsResponse` +- New struct `DiskAccessesClientListPrivateEndpointConnectionsResult` +- New struct `DiskAccessesClientListResponse` +- New struct `DiskAccessesClientListResult` +- New struct `DiskAccessesClientUpdateAPrivateEndpointConnectionPoller` +- New struct `DiskAccessesClientUpdateAPrivateEndpointConnectionPollerResponse` +- New struct `DiskAccessesClientUpdateAPrivateEndpointConnectionResponse` +- New struct `DiskAccessesClientUpdateAPrivateEndpointConnectionResult` +- New struct `DiskAccessesClientUpdatePoller` +- New struct `DiskAccessesClientUpdatePollerResponse` +- New struct `DiskAccessesClientUpdateResponse` +- New struct `DiskAccessesClientUpdateResult` +- New struct `DiskEncryptionSetsClientBeginCreateOrUpdateOptions` +- New struct `DiskEncryptionSetsClientBeginDeleteOptions` +- New struct `DiskEncryptionSetsClientBeginUpdateOptions` +- New struct `DiskEncryptionSetsClientCreateOrUpdatePoller` +- New struct `DiskEncryptionSetsClientCreateOrUpdatePollerResponse` +- New struct `DiskEncryptionSetsClientCreateOrUpdateResponse` +- New struct `DiskEncryptionSetsClientCreateOrUpdateResult` +- New struct `DiskEncryptionSetsClientDeletePoller` +- New struct `DiskEncryptionSetsClientDeletePollerResponse` +- New struct `DiskEncryptionSetsClientDeleteResponse` +- New struct `DiskEncryptionSetsClientGetOptions` +- New struct `DiskEncryptionSetsClientGetResponse` +- New struct `DiskEncryptionSetsClientGetResult` +- New struct `DiskEncryptionSetsClientListAssociatedResourcesOptions` +- New struct `DiskEncryptionSetsClientListAssociatedResourcesPager` +- New struct `DiskEncryptionSetsClientListAssociatedResourcesResponse` +- New struct `DiskEncryptionSetsClientListAssociatedResourcesResult` +- New struct `DiskEncryptionSetsClientListByResourceGroupOptions` +- New struct `DiskEncryptionSetsClientListByResourceGroupPager` +- New struct `DiskEncryptionSetsClientListByResourceGroupResponse` +- New struct `DiskEncryptionSetsClientListByResourceGroupResult` +- New struct `DiskEncryptionSetsClientListOptions` +- New struct `DiskEncryptionSetsClientListPager` +- New struct `DiskEncryptionSetsClientListResponse` +- New struct `DiskEncryptionSetsClientListResult` +- New struct `DiskEncryptionSetsClientUpdatePoller` +- New struct `DiskEncryptionSetsClientUpdatePollerResponse` +- New struct `DiskEncryptionSetsClientUpdateResponse` +- New struct `DiskEncryptionSetsClientUpdateResult` +- New struct `DiskRestorePointClientBeginGrantAccessOptions` +- New struct `DiskRestorePointClientBeginRevokeAccessOptions` +- New struct `DiskRestorePointClientGetOptions` +- New struct `DiskRestorePointClientGetResponse` +- New struct `DiskRestorePointClientGetResult` +- New struct `DiskRestorePointClientGrantAccessPoller` +- New struct `DiskRestorePointClientGrantAccessPollerResponse` +- New struct `DiskRestorePointClientGrantAccessResponse` +- New struct `DiskRestorePointClientGrantAccessResult` +- New struct `DiskRestorePointClientListByRestorePointOptions` +- New struct `DiskRestorePointClientListByRestorePointPager` +- New struct `DiskRestorePointClientListByRestorePointResponse` +- New struct `DiskRestorePointClientListByRestorePointResult` +- New struct `DiskRestorePointClientRevokeAccessPoller` +- New struct `DiskRestorePointClientRevokeAccessPollerResponse` +- New struct `DiskRestorePointClientRevokeAccessResponse` +- New struct `DisksClientBeginCreateOrUpdateOptions` +- New struct `DisksClientBeginDeleteOptions` +- New struct `DisksClientBeginGrantAccessOptions` +- New struct `DisksClientBeginRevokeAccessOptions` +- New struct `DisksClientBeginUpdateOptions` +- New struct `DisksClientCreateOrUpdatePoller` +- New struct `DisksClientCreateOrUpdatePollerResponse` +- New struct `DisksClientCreateOrUpdateResponse` +- New struct `DisksClientCreateOrUpdateResult` +- New struct `DisksClientDeletePoller` +- New struct `DisksClientDeletePollerResponse` +- New struct `DisksClientDeleteResponse` +- New struct `DisksClientGetOptions` +- New struct `DisksClientGetResponse` +- New struct `DisksClientGetResult` +- New struct `DisksClientGrantAccessPoller` +- New struct `DisksClientGrantAccessPollerResponse` +- New struct `DisksClientGrantAccessResponse` +- New struct `DisksClientGrantAccessResult` +- New struct `DisksClientListByResourceGroupOptions` +- New struct `DisksClientListByResourceGroupPager` +- New struct `DisksClientListByResourceGroupResponse` +- New struct `DisksClientListByResourceGroupResult` +- New struct `DisksClientListOptions` +- New struct `DisksClientListPager` +- New struct `DisksClientListResponse` +- New struct `DisksClientListResult` +- New struct `DisksClientRevokeAccessPoller` +- New struct `DisksClientRevokeAccessPollerResponse` +- New struct `DisksClientRevokeAccessResponse` +- New struct `DisksClientUpdatePoller` +- New struct `DisksClientUpdatePollerResponse` +- New struct `DisksClientUpdateResponse` +- New struct `DisksClientUpdateResult` +- New struct `GalleriesClientBeginCreateOrUpdateOptions` +- New struct `GalleriesClientBeginDeleteOptions` +- New struct `GalleriesClientBeginUpdateOptions` +- New struct `GalleriesClientCreateOrUpdatePoller` +- New struct `GalleriesClientCreateOrUpdatePollerResponse` +- New struct `GalleriesClientCreateOrUpdateResponse` +- New struct `GalleriesClientCreateOrUpdateResult` +- New struct `GalleriesClientDeletePoller` +- New struct `GalleriesClientDeletePollerResponse` +- New struct `GalleriesClientDeleteResponse` +- New struct `GalleriesClientGetOptions` +- New struct `GalleriesClientGetResponse` +- New struct `GalleriesClientGetResult` +- New struct `GalleriesClientListByResourceGroupOptions` +- New struct `GalleriesClientListByResourceGroupPager` +- New struct `GalleriesClientListByResourceGroupResponse` +- New struct `GalleriesClientListByResourceGroupResult` +- New struct `GalleriesClientListOptions` +- New struct `GalleriesClientListPager` +- New struct `GalleriesClientListResponse` +- New struct `GalleriesClientListResult` +- New struct `GalleriesClientUpdatePoller` +- New struct `GalleriesClientUpdatePollerResponse` +- New struct `GalleriesClientUpdateResponse` +- New struct `GalleriesClientUpdateResult` +- New struct `GalleryApplicationVersionsClientBeginCreateOrUpdateOptions` +- New struct `GalleryApplicationVersionsClientBeginDeleteOptions` +- New struct `GalleryApplicationVersionsClientBeginUpdateOptions` +- New struct `GalleryApplicationVersionsClientCreateOrUpdatePoller` +- New struct `GalleryApplicationVersionsClientCreateOrUpdatePollerResponse` +- New struct `GalleryApplicationVersionsClientCreateOrUpdateResponse` +- New struct `GalleryApplicationVersionsClientCreateOrUpdateResult` +- New struct `GalleryApplicationVersionsClientDeletePoller` +- New struct `GalleryApplicationVersionsClientDeletePollerResponse` +- New struct `GalleryApplicationVersionsClientDeleteResponse` +- New struct `GalleryApplicationVersionsClientGetOptions` +- New struct `GalleryApplicationVersionsClientGetResponse` +- New struct `GalleryApplicationVersionsClientGetResult` +- New struct `GalleryApplicationVersionsClientListByGalleryApplicationOptions` +- New struct `GalleryApplicationVersionsClientListByGalleryApplicationPager` +- New struct `GalleryApplicationVersionsClientListByGalleryApplicationResponse` +- New struct `GalleryApplicationVersionsClientListByGalleryApplicationResult` +- New struct `GalleryApplicationVersionsClientUpdatePoller` +- New struct `GalleryApplicationVersionsClientUpdatePollerResponse` +- New struct `GalleryApplicationVersionsClientUpdateResponse` +- New struct `GalleryApplicationVersionsClientUpdateResult` +- New struct `GalleryApplicationsClientBeginCreateOrUpdateOptions` +- New struct `GalleryApplicationsClientBeginDeleteOptions` +- New struct `GalleryApplicationsClientBeginUpdateOptions` +- New struct `GalleryApplicationsClientCreateOrUpdatePoller` +- New struct `GalleryApplicationsClientCreateOrUpdatePollerResponse` +- New struct `GalleryApplicationsClientCreateOrUpdateResponse` +- New struct `GalleryApplicationsClientCreateOrUpdateResult` +- New struct `GalleryApplicationsClientDeletePoller` +- New struct `GalleryApplicationsClientDeletePollerResponse` +- New struct `GalleryApplicationsClientDeleteResponse` +- New struct `GalleryApplicationsClientGetOptions` +- New struct `GalleryApplicationsClientGetResponse` +- New struct `GalleryApplicationsClientGetResult` +- New struct `GalleryApplicationsClientListByGalleryOptions` +- New struct `GalleryApplicationsClientListByGalleryPager` +- New struct `GalleryApplicationsClientListByGalleryResponse` +- New struct `GalleryApplicationsClientListByGalleryResult` +- New struct `GalleryApplicationsClientUpdatePoller` +- New struct `GalleryApplicationsClientUpdatePollerResponse` +- New struct `GalleryApplicationsClientUpdateResponse` +- New struct `GalleryApplicationsClientUpdateResult` +- New struct `GalleryImageVersionsClientBeginCreateOrUpdateOptions` +- New struct `GalleryImageVersionsClientBeginDeleteOptions` +- New struct `GalleryImageVersionsClientBeginUpdateOptions` +- New struct `GalleryImageVersionsClientCreateOrUpdatePoller` +- New struct `GalleryImageVersionsClientCreateOrUpdatePollerResponse` +- New struct `GalleryImageVersionsClientCreateOrUpdateResponse` +- New struct `GalleryImageVersionsClientCreateOrUpdateResult` +- New struct `GalleryImageVersionsClientDeletePoller` +- New struct `GalleryImageVersionsClientDeletePollerResponse` +- New struct `GalleryImageVersionsClientDeleteResponse` +- New struct `GalleryImageVersionsClientGetOptions` +- New struct `GalleryImageVersionsClientGetResponse` +- New struct `GalleryImageVersionsClientGetResult` +- New struct `GalleryImageVersionsClientListByGalleryImageOptions` +- New struct `GalleryImageVersionsClientListByGalleryImagePager` +- New struct `GalleryImageVersionsClientListByGalleryImageResponse` +- New struct `GalleryImageVersionsClientListByGalleryImageResult` +- New struct `GalleryImageVersionsClientUpdatePoller` +- New struct `GalleryImageVersionsClientUpdatePollerResponse` +- New struct `GalleryImageVersionsClientUpdateResponse` +- New struct `GalleryImageVersionsClientUpdateResult` +- New struct `GalleryImagesClientBeginCreateOrUpdateOptions` +- New struct `GalleryImagesClientBeginDeleteOptions` +- New struct `GalleryImagesClientBeginUpdateOptions` +- New struct `GalleryImagesClientCreateOrUpdatePoller` +- New struct `GalleryImagesClientCreateOrUpdatePollerResponse` +- New struct `GalleryImagesClientCreateOrUpdateResponse` +- New struct `GalleryImagesClientCreateOrUpdateResult` +- New struct `GalleryImagesClientDeletePoller` +- New struct `GalleryImagesClientDeletePollerResponse` +- New struct `GalleryImagesClientDeleteResponse` +- New struct `GalleryImagesClientGetOptions` +- New struct `GalleryImagesClientGetResponse` +- New struct `GalleryImagesClientGetResult` +- New struct `GalleryImagesClientListByGalleryOptions` +- New struct `GalleryImagesClientListByGalleryPager` +- New struct `GalleryImagesClientListByGalleryResponse` +- New struct `GalleryImagesClientListByGalleryResult` +- New struct `GalleryImagesClientUpdatePoller` +- New struct `GalleryImagesClientUpdatePollerResponse` +- New struct `GalleryImagesClientUpdateResponse` +- New struct `GalleryImagesClientUpdateResult` +- New struct `GallerySharingProfileClientBeginUpdateOptions` +- New struct `GallerySharingProfileClientUpdatePoller` +- New struct `GallerySharingProfileClientUpdatePollerResponse` +- New struct `GallerySharingProfileClientUpdateResponse` +- New struct `GallerySharingProfileClientUpdateResult` +- New struct `ImagesClientBeginCreateOrUpdateOptions` +- New struct `ImagesClientBeginDeleteOptions` +- New struct `ImagesClientBeginUpdateOptions` +- New struct `ImagesClientCreateOrUpdatePoller` +- New struct `ImagesClientCreateOrUpdatePollerResponse` +- New struct `ImagesClientCreateOrUpdateResponse` +- New struct `ImagesClientCreateOrUpdateResult` +- New struct `ImagesClientDeletePoller` +- New struct `ImagesClientDeletePollerResponse` +- New struct `ImagesClientDeleteResponse` +- New struct `ImagesClientGetOptions` +- New struct `ImagesClientGetResponse` +- New struct `ImagesClientGetResult` +- New struct `ImagesClientListByResourceGroupOptions` +- New struct `ImagesClientListByResourceGroupPager` +- New struct `ImagesClientListByResourceGroupResponse` +- New struct `ImagesClientListByResourceGroupResult` +- New struct `ImagesClientListOptions` +- New struct `ImagesClientListPager` +- New struct `ImagesClientListResponse` +- New struct `ImagesClientListResult` +- New struct `ImagesClientUpdatePoller` +- New struct `ImagesClientUpdatePollerResponse` +- New struct `ImagesClientUpdateResponse` +- New struct `ImagesClientUpdateResult` +- New struct `LogAnalyticsClientBeginExportRequestRateByIntervalOptions` +- New struct `LogAnalyticsClientBeginExportThrottledRequestsOptions` +- New struct `LogAnalyticsClientExportRequestRateByIntervalPoller` +- New struct `LogAnalyticsClientExportRequestRateByIntervalPollerResponse` +- New struct `LogAnalyticsClientExportRequestRateByIntervalResponse` +- New struct `LogAnalyticsClientExportRequestRateByIntervalResult` +- New struct `LogAnalyticsClientExportThrottledRequestsPoller` +- New struct `LogAnalyticsClientExportThrottledRequestsPollerResponse` +- New struct `LogAnalyticsClientExportThrottledRequestsResponse` +- New struct `LogAnalyticsClientExportThrottledRequestsResult` +- New struct `OperationListResult` +- New struct `OperationValue` +- New struct `OperationValueDisplay` +- New struct `OperationsClientListOptions` +- New struct `OperationsClientListResponse` +- New struct `OperationsClientListResult` +- New struct `ProximityPlacementGroupsClientCreateOrUpdateOptions` +- New struct `ProximityPlacementGroupsClientCreateOrUpdateResponse` +- New struct `ProximityPlacementGroupsClientCreateOrUpdateResult` +- New struct `ProximityPlacementGroupsClientDeleteOptions` +- New struct `ProximityPlacementGroupsClientDeleteResponse` +- New struct `ProximityPlacementGroupsClientGetOptions` +- New struct `ProximityPlacementGroupsClientGetResponse` +- New struct `ProximityPlacementGroupsClientGetResult` +- New struct `ProximityPlacementGroupsClientListByResourceGroupOptions` +- New struct `ProximityPlacementGroupsClientListByResourceGroupPager` +- New struct `ProximityPlacementGroupsClientListByResourceGroupResponse` +- New struct `ProximityPlacementGroupsClientListByResourceGroupResult` +- New struct `ProximityPlacementGroupsClientListBySubscriptionOptions` +- New struct `ProximityPlacementGroupsClientListBySubscriptionPager` +- New struct `ProximityPlacementGroupsClientListBySubscriptionResponse` +- New struct `ProximityPlacementGroupsClientListBySubscriptionResult` +- New struct `ProximityPlacementGroupsClientUpdateOptions` +- New struct `ProximityPlacementGroupsClientUpdateResponse` +- New struct `ProximityPlacementGroupsClientUpdateResult` +- New struct `ResourceSKUsClientListOptions` +- New struct `ResourceSKUsClientListPager` +- New struct `ResourceSKUsClientListResponse` +- New struct `ResourceSKUsClientListResult` +- New struct `RestorePointCollectionsClientBeginDeleteOptions` +- New struct `RestorePointCollectionsClientCreateOrUpdateOptions` +- New struct `RestorePointCollectionsClientCreateOrUpdateResponse` +- New struct `RestorePointCollectionsClientCreateOrUpdateResult` +- New struct `RestorePointCollectionsClientDeletePoller` +- New struct `RestorePointCollectionsClientDeletePollerResponse` +- New struct `RestorePointCollectionsClientDeleteResponse` +- New struct `RestorePointCollectionsClientGetOptions` +- New struct `RestorePointCollectionsClientGetResponse` +- New struct `RestorePointCollectionsClientGetResult` +- New struct `RestorePointCollectionsClientListAllOptions` +- New struct `RestorePointCollectionsClientListAllPager` +- New struct `RestorePointCollectionsClientListAllResponse` +- New struct `RestorePointCollectionsClientListAllResult` +- New struct `RestorePointCollectionsClientListOptions` +- New struct `RestorePointCollectionsClientListPager` +- New struct `RestorePointCollectionsClientListResponse` +- New struct `RestorePointCollectionsClientListResult` +- New struct `RestorePointCollectionsClientUpdateOptions` +- New struct `RestorePointCollectionsClientUpdateResponse` +- New struct `RestorePointCollectionsClientUpdateResult` +- New struct `RestorePointsClientBeginCreateOptions` +- New struct `RestorePointsClientBeginDeleteOptions` +- New struct `RestorePointsClientCreatePoller` +- New struct `RestorePointsClientCreatePollerResponse` +- New struct `RestorePointsClientCreateResponse` +- New struct `RestorePointsClientCreateResult` +- New struct `RestorePointsClientDeletePoller` +- New struct `RestorePointsClientDeletePollerResponse` +- New struct `RestorePointsClientDeleteResponse` +- New struct `RestorePointsClientGetOptions` +- New struct `RestorePointsClientGetResponse` +- New struct `RestorePointsClientGetResult` +- New struct `SSHPublicKeysClientCreateOptions` +- New struct `SSHPublicKeysClientCreateResponse` +- New struct `SSHPublicKeysClientCreateResult` +- New struct `SSHPublicKeysClientDeleteOptions` +- New struct `SSHPublicKeysClientDeleteResponse` +- New struct `SSHPublicKeysClientGenerateKeyPairOptions` +- New struct `SSHPublicKeysClientGenerateKeyPairResponse` +- New struct `SSHPublicKeysClientGenerateKeyPairResult` +- New struct `SSHPublicKeysClientGetOptions` +- New struct `SSHPublicKeysClientGetResponse` +- New struct `SSHPublicKeysClientGetResult` +- New struct `SSHPublicKeysClientListByResourceGroupOptions` +- New struct `SSHPublicKeysClientListByResourceGroupPager` +- New struct `SSHPublicKeysClientListByResourceGroupResponse` +- New struct `SSHPublicKeysClientListByResourceGroupResult` +- New struct `SSHPublicKeysClientListBySubscriptionOptions` +- New struct `SSHPublicKeysClientListBySubscriptionPager` +- New struct `SSHPublicKeysClientListBySubscriptionResponse` +- New struct `SSHPublicKeysClientListBySubscriptionResult` +- New struct `SSHPublicKeysClientUpdateOptions` +- New struct `SSHPublicKeysClientUpdateResponse` +- New struct `SSHPublicKeysClientUpdateResult` +- New struct `SharedGalleriesClientGetOptions` +- New struct `SharedGalleriesClientGetResponse` +- New struct `SharedGalleriesClientGetResult` +- New struct `SharedGalleriesClientListOptions` +- New struct `SharedGalleriesClientListPager` +- New struct `SharedGalleriesClientListResponse` +- New struct `SharedGalleriesClientListResult` +- New struct `SharedGalleryImageVersionsClientGetOptions` +- New struct `SharedGalleryImageVersionsClientGetResponse` +- New struct `SharedGalleryImageVersionsClientGetResult` +- New struct `SharedGalleryImageVersionsClientListOptions` +- New struct `SharedGalleryImageVersionsClientListPager` +- New struct `SharedGalleryImageVersionsClientListResponse` +- New struct `SharedGalleryImageVersionsClientListResult` +- New struct `SharedGalleryImagesClientGetOptions` +- New struct `SharedGalleryImagesClientGetResponse` +- New struct `SharedGalleryImagesClientGetResult` +- New struct `SharedGalleryImagesClientListOptions` +- New struct `SharedGalleryImagesClientListPager` +- New struct `SharedGalleryImagesClientListResponse` +- New struct `SharedGalleryImagesClientListResult` +- New struct `SnapshotsClientBeginCreateOrUpdateOptions` +- New struct `SnapshotsClientBeginDeleteOptions` +- New struct `SnapshotsClientBeginGrantAccessOptions` +- New struct `SnapshotsClientBeginRevokeAccessOptions` +- New struct `SnapshotsClientBeginUpdateOptions` +- New struct `SnapshotsClientCreateOrUpdatePoller` +- New struct `SnapshotsClientCreateOrUpdatePollerResponse` +- New struct `SnapshotsClientCreateOrUpdateResponse` +- New struct `SnapshotsClientCreateOrUpdateResult` +- New struct `SnapshotsClientDeletePoller` +- New struct `SnapshotsClientDeletePollerResponse` +- New struct `SnapshotsClientDeleteResponse` +- New struct `SnapshotsClientGetOptions` +- New struct `SnapshotsClientGetResponse` +- New struct `SnapshotsClientGetResult` +- New struct `SnapshotsClientGrantAccessPoller` +- New struct `SnapshotsClientGrantAccessPollerResponse` +- New struct `SnapshotsClientGrantAccessResponse` +- New struct `SnapshotsClientGrantAccessResult` +- New struct `SnapshotsClientListByResourceGroupOptions` +- New struct `SnapshotsClientListByResourceGroupPager` +- New struct `SnapshotsClientListByResourceGroupResponse` +- New struct `SnapshotsClientListByResourceGroupResult` +- New struct `SnapshotsClientListOptions` +- New struct `SnapshotsClientListPager` +- New struct `SnapshotsClientListResponse` +- New struct `SnapshotsClientListResult` +- New struct `SnapshotsClientRevokeAccessPoller` +- New struct `SnapshotsClientRevokeAccessPollerResponse` +- New struct `SnapshotsClientRevokeAccessResponse` +- New struct `SnapshotsClientUpdatePoller` +- New struct `SnapshotsClientUpdatePollerResponse` +- New struct `SnapshotsClientUpdateResponse` +- New struct `SnapshotsClientUpdateResult` +- New struct `UsageClientListOptions` +- New struct `UsageClientListPager` +- New struct `UsageClientListResponse` +- New struct `UsageClientListResult` +- New struct `VirtualMachineExtensionImagesClientGetOptions` +- New struct `VirtualMachineExtensionImagesClientGetResponse` +- New struct `VirtualMachineExtensionImagesClientGetResult` +- New struct `VirtualMachineExtensionImagesClientListTypesOptions` +- New struct `VirtualMachineExtensionImagesClientListTypesResponse` +- New struct `VirtualMachineExtensionImagesClientListTypesResult` +- New struct `VirtualMachineExtensionImagesClientListVersionsOptions` +- New struct `VirtualMachineExtensionImagesClientListVersionsResponse` +- New struct `VirtualMachineExtensionImagesClientListVersionsResult` +- New struct `VirtualMachineExtensionsClientBeginCreateOrUpdateOptions` +- New struct `VirtualMachineExtensionsClientBeginDeleteOptions` +- New struct `VirtualMachineExtensionsClientBeginUpdateOptions` +- New struct `VirtualMachineExtensionsClientCreateOrUpdatePoller` +- New struct `VirtualMachineExtensionsClientCreateOrUpdatePollerResponse` +- New struct `VirtualMachineExtensionsClientCreateOrUpdateResponse` +- New struct `VirtualMachineExtensionsClientCreateOrUpdateResult` +- New struct `VirtualMachineExtensionsClientDeletePoller` +- New struct `VirtualMachineExtensionsClientDeletePollerResponse` +- New struct `VirtualMachineExtensionsClientDeleteResponse` +- New struct `VirtualMachineExtensionsClientGetOptions` +- New struct `VirtualMachineExtensionsClientGetResponse` +- New struct `VirtualMachineExtensionsClientGetResult` +- New struct `VirtualMachineExtensionsClientListOptions` +- New struct `VirtualMachineExtensionsClientListResponse` +- New struct `VirtualMachineExtensionsClientListResult` +- New struct `VirtualMachineExtensionsClientUpdatePoller` +- New struct `VirtualMachineExtensionsClientUpdatePollerResponse` +- New struct `VirtualMachineExtensionsClientUpdateResponse` +- New struct `VirtualMachineExtensionsClientUpdateResult` +- New struct `VirtualMachineImagesClientGetOptions` +- New struct `VirtualMachineImagesClientGetResponse` +- New struct `VirtualMachineImagesClientGetResult` +- New struct `VirtualMachineImagesClientListOffersOptions` +- New struct `VirtualMachineImagesClientListOffersResponse` +- New struct `VirtualMachineImagesClientListOffersResult` +- New struct `VirtualMachineImagesClientListOptions` +- New struct `VirtualMachineImagesClientListPublishersOptions` +- New struct `VirtualMachineImagesClientListPublishersResponse` +- New struct `VirtualMachineImagesClientListPublishersResult` +- New struct `VirtualMachineImagesClientListResponse` +- New struct `VirtualMachineImagesClientListResult` +- New struct `VirtualMachineImagesClientListSKUsOptions` +- New struct `VirtualMachineImagesClientListSKUsResponse` +- New struct `VirtualMachineImagesClientListSKUsResult` +- New struct `VirtualMachineImagesEdgeZoneClientGetOptions` +- New struct `VirtualMachineImagesEdgeZoneClientGetResponse` +- New struct `VirtualMachineImagesEdgeZoneClientGetResult` +- New struct `VirtualMachineImagesEdgeZoneClientListOffersOptions` +- New struct `VirtualMachineImagesEdgeZoneClientListOffersResponse` +- New struct `VirtualMachineImagesEdgeZoneClientListOffersResult` +- New struct `VirtualMachineImagesEdgeZoneClientListOptions` +- New struct `VirtualMachineImagesEdgeZoneClientListPublishersOptions` +- New struct `VirtualMachineImagesEdgeZoneClientListPublishersResponse` +- New struct `VirtualMachineImagesEdgeZoneClientListPublishersResult` +- New struct `VirtualMachineImagesEdgeZoneClientListResponse` +- New struct `VirtualMachineImagesEdgeZoneClientListResult` +- New struct `VirtualMachineImagesEdgeZoneClientListSKUsOptions` +- New struct `VirtualMachineImagesEdgeZoneClientListSKUsResponse` +- New struct `VirtualMachineImagesEdgeZoneClientListSKUsResult` +- New struct `VirtualMachineRunCommandsClientBeginCreateOrUpdateOptions` +- New struct `VirtualMachineRunCommandsClientBeginDeleteOptions` +- New struct `VirtualMachineRunCommandsClientBeginUpdateOptions` +- New struct `VirtualMachineRunCommandsClientCreateOrUpdatePoller` +- New struct `VirtualMachineRunCommandsClientCreateOrUpdatePollerResponse` +- New struct `VirtualMachineRunCommandsClientCreateOrUpdateResponse` +- New struct `VirtualMachineRunCommandsClientCreateOrUpdateResult` +- New struct `VirtualMachineRunCommandsClientDeletePoller` +- New struct `VirtualMachineRunCommandsClientDeletePollerResponse` +- New struct `VirtualMachineRunCommandsClientDeleteResponse` +- New struct `VirtualMachineRunCommandsClientGetByVirtualMachineOptions` +- New struct `VirtualMachineRunCommandsClientGetByVirtualMachineResponse` +- New struct `VirtualMachineRunCommandsClientGetByVirtualMachineResult` +- New struct `VirtualMachineRunCommandsClientGetOptions` +- New struct `VirtualMachineRunCommandsClientGetResponse` +- New struct `VirtualMachineRunCommandsClientGetResult` +- New struct `VirtualMachineRunCommandsClientListByVirtualMachineOptions` +- New struct `VirtualMachineRunCommandsClientListByVirtualMachinePager` +- New struct `VirtualMachineRunCommandsClientListByVirtualMachineResponse` +- New struct `VirtualMachineRunCommandsClientListByVirtualMachineResult` +- New struct `VirtualMachineRunCommandsClientListOptions` +- New struct `VirtualMachineRunCommandsClientListPager` +- New struct `VirtualMachineRunCommandsClientListResponse` +- New struct `VirtualMachineRunCommandsClientListResult` +- New struct `VirtualMachineRunCommandsClientUpdatePoller` +- New struct `VirtualMachineRunCommandsClientUpdatePollerResponse` +- New struct `VirtualMachineRunCommandsClientUpdateResponse` +- New struct `VirtualMachineRunCommandsClientUpdateResult` +- New struct `VirtualMachineScaleSetExtensionsClientBeginCreateOrUpdateOptions` +- New struct `VirtualMachineScaleSetExtensionsClientBeginDeleteOptions` +- New struct `VirtualMachineScaleSetExtensionsClientBeginUpdateOptions` +- New struct `VirtualMachineScaleSetExtensionsClientCreateOrUpdatePoller` +- New struct `VirtualMachineScaleSetExtensionsClientCreateOrUpdatePollerResponse` +- New struct `VirtualMachineScaleSetExtensionsClientCreateOrUpdateResponse` +- New struct `VirtualMachineScaleSetExtensionsClientCreateOrUpdateResult` +- New struct `VirtualMachineScaleSetExtensionsClientDeletePoller` +- New struct `VirtualMachineScaleSetExtensionsClientDeletePollerResponse` +- New struct `VirtualMachineScaleSetExtensionsClientDeleteResponse` +- New struct `VirtualMachineScaleSetExtensionsClientGetOptions` +- New struct `VirtualMachineScaleSetExtensionsClientGetResponse` +- New struct `VirtualMachineScaleSetExtensionsClientGetResult` +- New struct `VirtualMachineScaleSetExtensionsClientListOptions` +- New struct `VirtualMachineScaleSetExtensionsClientListPager` +- New struct `VirtualMachineScaleSetExtensionsClientListResponse` +- New struct `VirtualMachineScaleSetExtensionsClientListResult` +- New struct `VirtualMachineScaleSetExtensionsClientUpdatePoller` +- New struct `VirtualMachineScaleSetExtensionsClientUpdatePollerResponse` +- New struct `VirtualMachineScaleSetExtensionsClientUpdateResponse` +- New struct `VirtualMachineScaleSetExtensionsClientUpdateResult` +- New struct `VirtualMachineScaleSetRollingUpgradesClientBeginCancelOptions` +- New struct `VirtualMachineScaleSetRollingUpgradesClientBeginStartExtensionUpgradeOptions` +- New struct `VirtualMachineScaleSetRollingUpgradesClientBeginStartOSUpgradeOptions` +- New struct `VirtualMachineScaleSetRollingUpgradesClientCancelPoller` +- New struct `VirtualMachineScaleSetRollingUpgradesClientCancelPollerResponse` +- New struct `VirtualMachineScaleSetRollingUpgradesClientCancelResponse` +- New struct `VirtualMachineScaleSetRollingUpgradesClientGetLatestOptions` +- New struct `VirtualMachineScaleSetRollingUpgradesClientGetLatestResponse` +- New struct `VirtualMachineScaleSetRollingUpgradesClientGetLatestResult` +- New struct `VirtualMachineScaleSetRollingUpgradesClientStartExtensionUpgradePoller` +- New struct `VirtualMachineScaleSetRollingUpgradesClientStartExtensionUpgradePollerResponse` +- New struct `VirtualMachineScaleSetRollingUpgradesClientStartExtensionUpgradeResponse` +- New struct `VirtualMachineScaleSetRollingUpgradesClientStartOSUpgradePoller` +- New struct `VirtualMachineScaleSetRollingUpgradesClientStartOSUpgradePollerResponse` +- New struct `VirtualMachineScaleSetRollingUpgradesClientStartOSUpgradeResponse` +- New struct `VirtualMachineScaleSetVMExtensionsClientBeginCreateOrUpdateOptions` +- New struct `VirtualMachineScaleSetVMExtensionsClientBeginDeleteOptions` +- New struct `VirtualMachineScaleSetVMExtensionsClientBeginUpdateOptions` +- New struct `VirtualMachineScaleSetVMExtensionsClientCreateOrUpdatePoller` +- New struct `VirtualMachineScaleSetVMExtensionsClientCreateOrUpdatePollerResponse` +- New struct `VirtualMachineScaleSetVMExtensionsClientCreateOrUpdateResponse` +- New struct `VirtualMachineScaleSetVMExtensionsClientCreateOrUpdateResult` +- New struct `VirtualMachineScaleSetVMExtensionsClientDeletePoller` +- New struct `VirtualMachineScaleSetVMExtensionsClientDeletePollerResponse` +- New struct `VirtualMachineScaleSetVMExtensionsClientDeleteResponse` +- New struct `VirtualMachineScaleSetVMExtensionsClientGetOptions` +- New struct `VirtualMachineScaleSetVMExtensionsClientGetResponse` +- New struct `VirtualMachineScaleSetVMExtensionsClientGetResult` +- New struct `VirtualMachineScaleSetVMExtensionsClientListOptions` +- New struct `VirtualMachineScaleSetVMExtensionsClientListResponse` +- New struct `VirtualMachineScaleSetVMExtensionsClientListResult` +- New struct `VirtualMachineScaleSetVMExtensionsClientUpdatePoller` +- New struct `VirtualMachineScaleSetVMExtensionsClientUpdatePollerResponse` +- New struct `VirtualMachineScaleSetVMExtensionsClientUpdateResponse` +- New struct `VirtualMachineScaleSetVMExtensionsClientUpdateResult` +- New struct `VirtualMachineScaleSetVMRunCommandsClientBeginCreateOrUpdateOptions` +- New struct `VirtualMachineScaleSetVMRunCommandsClientBeginDeleteOptions` +- New struct `VirtualMachineScaleSetVMRunCommandsClientBeginUpdateOptions` +- New struct `VirtualMachineScaleSetVMRunCommandsClientCreateOrUpdatePoller` +- New struct `VirtualMachineScaleSetVMRunCommandsClientCreateOrUpdatePollerResponse` +- New struct `VirtualMachineScaleSetVMRunCommandsClientCreateOrUpdateResponse` +- New struct `VirtualMachineScaleSetVMRunCommandsClientCreateOrUpdateResult` +- New struct `VirtualMachineScaleSetVMRunCommandsClientDeletePoller` +- New struct `VirtualMachineScaleSetVMRunCommandsClientDeletePollerResponse` +- New struct `VirtualMachineScaleSetVMRunCommandsClientDeleteResponse` +- New struct `VirtualMachineScaleSetVMRunCommandsClientGetOptions` +- New struct `VirtualMachineScaleSetVMRunCommandsClientGetResponse` +- New struct `VirtualMachineScaleSetVMRunCommandsClientGetResult` +- New struct `VirtualMachineScaleSetVMRunCommandsClientListOptions` +- New struct `VirtualMachineScaleSetVMRunCommandsClientListPager` +- New struct `VirtualMachineScaleSetVMRunCommandsClientListResponse` +- New struct `VirtualMachineScaleSetVMRunCommandsClientListResult` +- New struct `VirtualMachineScaleSetVMRunCommandsClientUpdatePoller` +- New struct `VirtualMachineScaleSetVMRunCommandsClientUpdatePollerResponse` +- New struct `VirtualMachineScaleSetVMRunCommandsClientUpdateResponse` +- New struct `VirtualMachineScaleSetVMRunCommandsClientUpdateResult` +- New struct `VirtualMachineScaleSetVMsClientBeginDeallocateOptions` +- New struct `VirtualMachineScaleSetVMsClientBeginDeleteOptions` +- New struct `VirtualMachineScaleSetVMsClientBeginPerformMaintenanceOptions` +- New struct `VirtualMachineScaleSetVMsClientBeginPowerOffOptions` +- New struct `VirtualMachineScaleSetVMsClientBeginRedeployOptions` +- New struct `VirtualMachineScaleSetVMsClientBeginReimageAllOptions` +- New struct `VirtualMachineScaleSetVMsClientBeginReimageOptions` +- New struct `VirtualMachineScaleSetVMsClientBeginRestartOptions` +- New struct `VirtualMachineScaleSetVMsClientBeginRunCommandOptions` +- New struct `VirtualMachineScaleSetVMsClientBeginStartOptions` +- New struct `VirtualMachineScaleSetVMsClientBeginUpdateOptions` +- New struct `VirtualMachineScaleSetVMsClientDeallocatePoller` +- New struct `VirtualMachineScaleSetVMsClientDeallocatePollerResponse` +- New struct `VirtualMachineScaleSetVMsClientDeallocateResponse` +- New struct `VirtualMachineScaleSetVMsClientDeletePoller` +- New struct `VirtualMachineScaleSetVMsClientDeletePollerResponse` +- New struct `VirtualMachineScaleSetVMsClientDeleteResponse` +- New struct `VirtualMachineScaleSetVMsClientGetInstanceViewOptions` +- New struct `VirtualMachineScaleSetVMsClientGetInstanceViewResponse` +- New struct `VirtualMachineScaleSetVMsClientGetInstanceViewResult` +- New struct `VirtualMachineScaleSetVMsClientGetOptions` +- New struct `VirtualMachineScaleSetVMsClientGetResponse` +- New struct `VirtualMachineScaleSetVMsClientGetResult` +- New struct `VirtualMachineScaleSetVMsClientListOptions` +- New struct `VirtualMachineScaleSetVMsClientListPager` +- New struct `VirtualMachineScaleSetVMsClientListResponse` +- New struct `VirtualMachineScaleSetVMsClientListResult` +- New struct `VirtualMachineScaleSetVMsClientPerformMaintenancePoller` +- New struct `VirtualMachineScaleSetVMsClientPerformMaintenancePollerResponse` +- New struct `VirtualMachineScaleSetVMsClientPerformMaintenanceResponse` +- New struct `VirtualMachineScaleSetVMsClientPowerOffPoller` +- New struct `VirtualMachineScaleSetVMsClientPowerOffPollerResponse` +- New struct `VirtualMachineScaleSetVMsClientPowerOffResponse` +- New struct `VirtualMachineScaleSetVMsClientRedeployPoller` +- New struct `VirtualMachineScaleSetVMsClientRedeployPollerResponse` +- New struct `VirtualMachineScaleSetVMsClientRedeployResponse` +- New struct `VirtualMachineScaleSetVMsClientReimageAllPoller` +- New struct `VirtualMachineScaleSetVMsClientReimageAllPollerResponse` +- New struct `VirtualMachineScaleSetVMsClientReimageAllResponse` +- New struct `VirtualMachineScaleSetVMsClientReimagePoller` +- New struct `VirtualMachineScaleSetVMsClientReimagePollerResponse` +- New struct `VirtualMachineScaleSetVMsClientReimageResponse` +- New struct `VirtualMachineScaleSetVMsClientRestartPoller` +- New struct `VirtualMachineScaleSetVMsClientRestartPollerResponse` +- New struct `VirtualMachineScaleSetVMsClientRestartResponse` +- New struct `VirtualMachineScaleSetVMsClientRetrieveBootDiagnosticsDataOptions` +- New struct `VirtualMachineScaleSetVMsClientRetrieveBootDiagnosticsDataResponse` +- New struct `VirtualMachineScaleSetVMsClientRetrieveBootDiagnosticsDataResult` +- New struct `VirtualMachineScaleSetVMsClientRunCommandPoller` +- New struct `VirtualMachineScaleSetVMsClientRunCommandPollerResponse` +- New struct `VirtualMachineScaleSetVMsClientRunCommandResponse` +- New struct `VirtualMachineScaleSetVMsClientRunCommandResult` +- New struct `VirtualMachineScaleSetVMsClientSimulateEvictionOptions` +- New struct `VirtualMachineScaleSetVMsClientSimulateEvictionResponse` +- New struct `VirtualMachineScaleSetVMsClientStartPoller` +- New struct `VirtualMachineScaleSetVMsClientStartPollerResponse` +- New struct `VirtualMachineScaleSetVMsClientStartResponse` +- New struct `VirtualMachineScaleSetVMsClientUpdatePoller` +- New struct `VirtualMachineScaleSetVMsClientUpdatePollerResponse` +- New struct `VirtualMachineScaleSetVMsClientUpdateResponse` +- New struct `VirtualMachineScaleSetVMsClientUpdateResult` +- New struct `VirtualMachineScaleSetsClientBeginCreateOrUpdateOptions` +- New struct `VirtualMachineScaleSetsClientBeginDeallocateOptions` +- New struct `VirtualMachineScaleSetsClientBeginDeleteInstancesOptions` +- New struct `VirtualMachineScaleSetsClientBeginDeleteOptions` +- New struct `VirtualMachineScaleSetsClientBeginPerformMaintenanceOptions` +- New struct `VirtualMachineScaleSetsClientBeginPowerOffOptions` +- New struct `VirtualMachineScaleSetsClientBeginRedeployOptions` +- New struct `VirtualMachineScaleSetsClientBeginReimageAllOptions` +- New struct `VirtualMachineScaleSetsClientBeginReimageOptions` +- New struct `VirtualMachineScaleSetsClientBeginRestartOptions` +- New struct `VirtualMachineScaleSetsClientBeginSetOrchestrationServiceStateOptions` +- New struct `VirtualMachineScaleSetsClientBeginStartOptions` +- New struct `VirtualMachineScaleSetsClientBeginUpdateInstancesOptions` +- New struct `VirtualMachineScaleSetsClientBeginUpdateOptions` +- New struct `VirtualMachineScaleSetsClientConvertToSinglePlacementGroupOptions` +- New struct `VirtualMachineScaleSetsClientConvertToSinglePlacementGroupResponse` +- New struct `VirtualMachineScaleSetsClientCreateOrUpdatePoller` +- New struct `VirtualMachineScaleSetsClientCreateOrUpdatePollerResponse` +- New struct `VirtualMachineScaleSetsClientCreateOrUpdateResponse` +- New struct `VirtualMachineScaleSetsClientCreateOrUpdateResult` +- New struct `VirtualMachineScaleSetsClientDeallocatePoller` +- New struct `VirtualMachineScaleSetsClientDeallocatePollerResponse` +- New struct `VirtualMachineScaleSetsClientDeallocateResponse` +- New struct `VirtualMachineScaleSetsClientDeleteInstancesPoller` +- New struct `VirtualMachineScaleSetsClientDeleteInstancesPollerResponse` +- New struct `VirtualMachineScaleSetsClientDeleteInstancesResponse` +- New struct `VirtualMachineScaleSetsClientDeletePoller` +- New struct `VirtualMachineScaleSetsClientDeletePollerResponse` +- New struct `VirtualMachineScaleSetsClientDeleteResponse` +- New struct `VirtualMachineScaleSetsClientForceRecoveryServiceFabricPlatformUpdateDomainWalkOptions` +- New struct `VirtualMachineScaleSetsClientForceRecoveryServiceFabricPlatformUpdateDomainWalkResponse` +- New struct `VirtualMachineScaleSetsClientForceRecoveryServiceFabricPlatformUpdateDomainWalkResult` +- New struct `VirtualMachineScaleSetsClientGetInstanceViewOptions` +- New struct `VirtualMachineScaleSetsClientGetInstanceViewResponse` +- New struct `VirtualMachineScaleSetsClientGetInstanceViewResult` +- New struct `VirtualMachineScaleSetsClientGetOSUpgradeHistoryOptions` +- New struct `VirtualMachineScaleSetsClientGetOSUpgradeHistoryPager` +- New struct `VirtualMachineScaleSetsClientGetOSUpgradeHistoryResponse` +- New struct `VirtualMachineScaleSetsClientGetOSUpgradeHistoryResult` +- New struct `VirtualMachineScaleSetsClientGetOptions` +- New struct `VirtualMachineScaleSetsClientGetResponse` +- New struct `VirtualMachineScaleSetsClientGetResult` +- New struct `VirtualMachineScaleSetsClientListAllOptions` +- New struct `VirtualMachineScaleSetsClientListAllPager` +- New struct `VirtualMachineScaleSetsClientListAllResponse` +- New struct `VirtualMachineScaleSetsClientListAllResult` +- New struct `VirtualMachineScaleSetsClientListByLocationOptions` +- New struct `VirtualMachineScaleSetsClientListByLocationPager` +- New struct `VirtualMachineScaleSetsClientListByLocationResponse` +- New struct `VirtualMachineScaleSetsClientListByLocationResult` +- New struct `VirtualMachineScaleSetsClientListOptions` +- New struct `VirtualMachineScaleSetsClientListPager` +- New struct `VirtualMachineScaleSetsClientListResponse` +- New struct `VirtualMachineScaleSetsClientListResult` +- New struct `VirtualMachineScaleSetsClientListSKUsOptions` +- New struct `VirtualMachineScaleSetsClientListSKUsPager` +- New struct `VirtualMachineScaleSetsClientListSKUsResponse` +- New struct `VirtualMachineScaleSetsClientListSKUsResult` +- New struct `VirtualMachineScaleSetsClientPerformMaintenancePoller` +- New struct `VirtualMachineScaleSetsClientPerformMaintenancePollerResponse` +- New struct `VirtualMachineScaleSetsClientPerformMaintenanceResponse` +- New struct `VirtualMachineScaleSetsClientPowerOffPoller` +- New struct `VirtualMachineScaleSetsClientPowerOffPollerResponse` +- New struct `VirtualMachineScaleSetsClientPowerOffResponse` +- New struct `VirtualMachineScaleSetsClientRedeployPoller` +- New struct `VirtualMachineScaleSetsClientRedeployPollerResponse` +- New struct `VirtualMachineScaleSetsClientRedeployResponse` +- New struct `VirtualMachineScaleSetsClientReimageAllPoller` +- New struct `VirtualMachineScaleSetsClientReimageAllPollerResponse` +- New struct `VirtualMachineScaleSetsClientReimageAllResponse` +- New struct `VirtualMachineScaleSetsClientReimagePoller` +- New struct `VirtualMachineScaleSetsClientReimagePollerResponse` +- New struct `VirtualMachineScaleSetsClientReimageResponse` +- New struct `VirtualMachineScaleSetsClientRestartPoller` +- New struct `VirtualMachineScaleSetsClientRestartPollerResponse` +- New struct `VirtualMachineScaleSetsClientRestartResponse` +- New struct `VirtualMachineScaleSetsClientSetOrchestrationServiceStatePoller` +- New struct `VirtualMachineScaleSetsClientSetOrchestrationServiceStatePollerResponse` +- New struct `VirtualMachineScaleSetsClientSetOrchestrationServiceStateResponse` +- New struct `VirtualMachineScaleSetsClientStartPoller` +- New struct `VirtualMachineScaleSetsClientStartPollerResponse` +- New struct `VirtualMachineScaleSetsClientStartResponse` +- New struct `VirtualMachineScaleSetsClientUpdateInstancesPoller` +- New struct `VirtualMachineScaleSetsClientUpdateInstancesPollerResponse` +- New struct `VirtualMachineScaleSetsClientUpdateInstancesResponse` +- New struct `VirtualMachineScaleSetsClientUpdatePoller` +- New struct `VirtualMachineScaleSetsClientUpdatePollerResponse` +- New struct `VirtualMachineScaleSetsClientUpdateResponse` +- New struct `VirtualMachineScaleSetsClientUpdateResult` +- New struct `VirtualMachineSizesClientListOptions` +- New struct `VirtualMachineSizesClientListResponse` +- New struct `VirtualMachineSizesClientListResult` +- New struct `VirtualMachinesClientAssessPatchesPoller` +- New struct `VirtualMachinesClientAssessPatchesPollerResponse` +- New struct `VirtualMachinesClientAssessPatchesResponse` +- New struct `VirtualMachinesClientAssessPatchesResult` +- New struct `VirtualMachinesClientBeginAssessPatchesOptions` +- New struct `VirtualMachinesClientBeginCaptureOptions` +- New struct `VirtualMachinesClientBeginConvertToManagedDisksOptions` +- New struct `VirtualMachinesClientBeginCreateOrUpdateOptions` +- New struct `VirtualMachinesClientBeginDeallocateOptions` +- New struct `VirtualMachinesClientBeginDeleteOptions` +- New struct `VirtualMachinesClientBeginInstallPatchesOptions` +- New struct `VirtualMachinesClientBeginPerformMaintenanceOptions` +- New struct `VirtualMachinesClientBeginPowerOffOptions` +- New struct `VirtualMachinesClientBeginReapplyOptions` +- New struct `VirtualMachinesClientBeginRedeployOptions` +- New struct `VirtualMachinesClientBeginReimageOptions` +- New struct `VirtualMachinesClientBeginRestartOptions` +- New struct `VirtualMachinesClientBeginRunCommandOptions` +- New struct `VirtualMachinesClientBeginStartOptions` +- New struct `VirtualMachinesClientBeginUpdateOptions` +- New struct `VirtualMachinesClientCapturePoller` +- New struct `VirtualMachinesClientCapturePollerResponse` +- New struct `VirtualMachinesClientCaptureResponse` +- New struct `VirtualMachinesClientCaptureResult` +- New struct `VirtualMachinesClientConvertToManagedDisksPoller` +- New struct `VirtualMachinesClientConvertToManagedDisksPollerResponse` +- New struct `VirtualMachinesClientConvertToManagedDisksResponse` +- New struct `VirtualMachinesClientCreateOrUpdatePoller` +- New struct `VirtualMachinesClientCreateOrUpdatePollerResponse` +- New struct `VirtualMachinesClientCreateOrUpdateResponse` +- New struct `VirtualMachinesClientCreateOrUpdateResult` +- New struct `VirtualMachinesClientDeallocatePoller` +- New struct `VirtualMachinesClientDeallocatePollerResponse` +- New struct `VirtualMachinesClientDeallocateResponse` +- New struct `VirtualMachinesClientDeletePoller` +- New struct `VirtualMachinesClientDeletePollerResponse` +- New struct `VirtualMachinesClientDeleteResponse` +- New struct `VirtualMachinesClientGeneralizeOptions` +- New struct `VirtualMachinesClientGeneralizeResponse` +- New struct `VirtualMachinesClientGetOptions` +- New struct `VirtualMachinesClientGetResponse` +- New struct `VirtualMachinesClientGetResult` +- New struct `VirtualMachinesClientInstallPatchesPoller` +- New struct `VirtualMachinesClientInstallPatchesPollerResponse` +- New struct `VirtualMachinesClientInstallPatchesResponse` +- New struct `VirtualMachinesClientInstallPatchesResult` +- New struct `VirtualMachinesClientInstanceViewOptions` +- New struct `VirtualMachinesClientInstanceViewResponse` +- New struct `VirtualMachinesClientInstanceViewResult` +- New struct `VirtualMachinesClientListAllOptions` +- New struct `VirtualMachinesClientListAllPager` +- New struct `VirtualMachinesClientListAllResponse` +- New struct `VirtualMachinesClientListAllResult` +- New struct `VirtualMachinesClientListAvailableSizesOptions` +- New struct `VirtualMachinesClientListAvailableSizesResponse` +- New struct `VirtualMachinesClientListAvailableSizesResult` +- New struct `VirtualMachinesClientListByLocationOptions` +- New struct `VirtualMachinesClientListByLocationPager` +- New struct `VirtualMachinesClientListByLocationResponse` +- New struct `VirtualMachinesClientListByLocationResult` +- New struct `VirtualMachinesClientListOptions` +- New struct `VirtualMachinesClientListPager` +- New struct `VirtualMachinesClientListResponse` +- New struct `VirtualMachinesClientListResult` +- New struct `VirtualMachinesClientPerformMaintenancePoller` +- New struct `VirtualMachinesClientPerformMaintenancePollerResponse` +- New struct `VirtualMachinesClientPerformMaintenanceResponse` +- New struct `VirtualMachinesClientPowerOffPoller` +- New struct `VirtualMachinesClientPowerOffPollerResponse` +- New struct `VirtualMachinesClientPowerOffResponse` +- New struct `VirtualMachinesClientReapplyPoller` +- New struct `VirtualMachinesClientReapplyPollerResponse` +- New struct `VirtualMachinesClientReapplyResponse` +- New struct `VirtualMachinesClientRedeployPoller` +- New struct `VirtualMachinesClientRedeployPollerResponse` +- New struct `VirtualMachinesClientRedeployResponse` +- New struct `VirtualMachinesClientReimagePoller` +- New struct `VirtualMachinesClientReimagePollerResponse` +- New struct `VirtualMachinesClientReimageResponse` +- New struct `VirtualMachinesClientRestartPoller` +- New struct `VirtualMachinesClientRestartPollerResponse` +- New struct `VirtualMachinesClientRestartResponse` +- New struct `VirtualMachinesClientRetrieveBootDiagnosticsDataOptions` +- New struct `VirtualMachinesClientRetrieveBootDiagnosticsDataResponse` +- New struct `VirtualMachinesClientRetrieveBootDiagnosticsDataResult` +- New struct `VirtualMachinesClientRunCommandPoller` +- New struct `VirtualMachinesClientRunCommandPollerResponse` +- New struct `VirtualMachinesClientRunCommandResponse` +- New struct `VirtualMachinesClientRunCommandResult` +- New struct `VirtualMachinesClientSimulateEvictionOptions` +- New struct `VirtualMachinesClientSimulateEvictionResponse` +- New struct `VirtualMachinesClientStartPoller` +- New struct `VirtualMachinesClientStartPollerResponse` +- New struct `VirtualMachinesClientStartResponse` +- New struct `VirtualMachinesClientUpdatePoller` +- New struct `VirtualMachinesClientUpdatePollerResponse` +- New struct `VirtualMachinesClientUpdateResponse` +- New struct `VirtualMachinesClientUpdateResult` +- New field `Tags` in struct `CapacityReservationUpdate` +- New field `SupportedCapabilities` in struct `SnapshotUpdateProperties` +- New field `Tags` in struct `AvailabilitySetUpdate` +- New field `Location` in struct `Image` +- New field `Tags` in struct `Image` +- New field `ID` in struct `Image` +- New field `Name` in struct `Image` +- New field `Type` in struct `Image` +- New field `Name` in struct `RestorePoint` +- New field `Type` in struct `RestorePoint` +- New field `ID` in struct `RestorePoint` +- New field `Location` in struct `SSHPublicKeyResource` +- New field `Tags` in struct `SSHPublicKeyResource` +- New field `ID` in struct `SSHPublicKeyResource` +- New field `Name` in struct `SSHPublicKeyResource` +- New field `Type` in struct `SSHPublicKeyResource` +- New field `Tags` in struct `SSHPublicKeyUpdateResource` +- New field `ID` in struct `VirtualMachineImageResource` +- New field `BlobContainerSasURI` in struct `RequestRateByIntervalInput` +- New field `FromTime` in struct `RequestRateByIntervalInput` +- New field `ToTime` in struct `RequestRateByIntervalInput` +- New field `GroupByOperationName` in struct `RequestRateByIntervalInput` +- New field `GroupByClientApplicationID` in struct `RequestRateByIntervalInput` +- New field `GroupByResourceName` in struct `RequestRateByIntervalInput` +- New field `GroupByThrottlePolicy` in struct `RequestRateByIntervalInput` +- New field `GroupByUserAgent` in struct `RequestRateByIntervalInput` +- New field `Tags` in struct `DedicatedHostGroupUpdate` +- New field `Name` in struct `VirtualMachineScaleSet` +- New field `Location` in struct `VirtualMachineScaleSet` +- New field `ID` in struct `VirtualMachineScaleSet` +- New field `Type` in struct `VirtualMachineScaleSet` +- New field `Tags` in struct `VirtualMachineScaleSet` +- New field `Location` in struct `Gallery` +- New field `Tags` in struct `Gallery` +- New field `ID` in struct `Gallery` +- New field `Name` in struct `Gallery` +- New field `Type` in struct `Gallery` +- New field `ID` in struct `SubResourceWithColocationStatus` +- New field `SecurityProfile` in struct `SnapshotProperties` +- New field `Tags` in struct `CapacityReservationGroupUpdate` +- New field `Name` in struct `CapacityReservationGroup` +- New field `Type` in struct `CapacityReservationGroup` +- New field `Location` in struct `CapacityReservationGroup` +- New field `Tags` in struct `CapacityReservationGroup` +- New field `ID` in struct `CapacityReservationGroup` +- New field `Tags` in struct `ProximityPlacementGroupUpdate` +- New field `Tags` in struct `VirtualMachineExtensionUpdate` +- New field `Tags` in struct `VirtualMachineRunCommandUpdate` +- New field `ID` in struct `VirtualMachineScaleSetExtension` +- New field `TempDisk` in struct `VirtualMachineScaleSetReimageParameters` +- New field `Type` in struct `RollingUpgradeStatusInfo` +- New field `Location` in struct `RollingUpgradeStatusInfo` +- New field `Tags` in struct `RollingUpgradeStatusInfo` +- New field `ID` in struct `RollingUpgradeStatusInfo` +- New field `Name` in struct `RollingUpgradeStatusInfo` +- New field `Tags` in struct `GalleryApplicationVersionUpdate` +- New field `ID` in struct `GalleryApplicationVersionUpdate` +- New field `Name` in struct `GalleryApplicationVersionUpdate` +- New field `Type` in struct `GalleryApplicationVersionUpdate` +- New field `SecurityDataURI` in struct `CreationData` +- New field `StorageAccountType` in struct `ImageOSDisk` +- New field `BlobURI` in struct `ImageOSDisk` +- New field `Caching` in struct `ImageOSDisk` +- New field `DiskEncryptionSet` in struct `ImageOSDisk` +- New field `DiskSizeGB` in struct `ImageOSDisk` +- New field `Snapshot` in struct `ImageOSDisk` +- New field `ManagedDisk` in struct `ImageOSDisk` +- New field `UtilizationInfo` in struct `CapacityReservationInstanceViewWithName` +- New field `Statuses` in struct `CapacityReservationInstanceViewWithName` +- New field `Location` in struct `Snapshot` +- New field `ID` in struct `Snapshot` +- New field `Type` in struct `Snapshot` +- New field `Tags` in struct `Snapshot` +- New field `Name` in struct `Snapshot` +- New field `HostCaching` in struct `GalleryDataDiskImage` +- New field `Source` in struct `GalleryDataDiskImage` +- New field `SizeInGB` in struct `GalleryDataDiskImage` +- New field `ID` in struct `GalleryImageVersion` +- New field `Name` in struct `GalleryImageVersion` +- New field `Type` in struct `GalleryImageVersion` +- New field `Location` in struct `GalleryImageVersion` +- New field `Tags` in struct `GalleryImageVersion` +- New field `Location` in struct `DedicatedHost` +- New field `Tags` in struct `DedicatedHost` +- New field `ID` in struct `DedicatedHost` +- New field `Name` in struct `DedicatedHost` +- New field `Type` in struct `DedicatedHost` +- New field `ID` in struct `VirtualMachineCaptureResult` +- New field `ID` in struct `GalleryApplicationUpdate` +- New field `Name` in struct `GalleryApplicationUpdate` +- New field `Type` in struct `GalleryApplicationUpdate` +- New field `Tags` in struct `GalleryApplicationUpdate` +- New field `ID` in struct `GalleryImageVersionUpdate` +- New field `Name` in struct `GalleryImageVersionUpdate` +- New field `Type` in struct `GalleryImageVersionUpdate` +- New field `Tags` in struct `GalleryImageVersionUpdate` +- New field `ID` in struct `VirtualMachineScaleSetVMExtensionUpdate` +- New field `ID` in struct `VirtualMachineScaleSetNetworkConfiguration` +- New field `Tags` in struct `RestorePointCollectionUpdate` +- New field `ID` in struct `DiskEncryptionSetParameters` +- New field `ID` in struct `VirtualMachineScaleSetVMExtension` +- New field `Type` in struct `VirtualMachine` +- New field `Tags` in struct `VirtualMachine` +- New field `Location` in struct `VirtualMachine` +- New field `ID` in struct `VirtualMachine` +- New field `Name` in struct `VirtualMachine` +- New field `ID` in struct `DiskEncryptionSet` +- New field `Name` in struct `DiskEncryptionSet` +- New field `Type` in struct `DiskEncryptionSet` +- New field `Location` in struct `DiskEncryptionSet` +- New field `Tags` in struct `DiskEncryptionSet` +- New field `Tags` in struct `ImageUpdate` +- New field `DiskEncryptionSetID` in struct `OSDiskImageEncryption` +- New field `Tags` in struct `GalleryUpdate` +- New field `ID` in struct `GalleryUpdate` +- New field `Name` in struct `GalleryUpdate` +- New field `Type` in struct `GalleryUpdate` +- New field `Name` in struct `CapacityReservation` +- New field `Type` in struct `CapacityReservation` +- New field `Location` in struct `CapacityReservation` +- New field `Tags` in struct `CapacityReservation` +- New field `ID` in struct `CapacityReservation` +- New field `Tags` in struct `VirtualMachineUpdate` +- New field `ID` in struct `ManagedDiskParameters` +- New field `Identifier` in struct `CommunityGallery` +- New field `Location` in struct `CommunityGallery` +- New field `Name` in struct `CommunityGallery` +- New field `Type` in struct `CommunityGallery` +- New field `DiskSizeGB` in struct `ImageDataDisk` +- New field `ManagedDisk` in struct `ImageDataDisk` +- New field `Snapshot` in struct `ImageDataDisk` +- New field `StorageAccountType` in struct `ImageDataDisk` +- New field `BlobURI` in struct `ImageDataDisk` +- New field `Caching` in struct `ImageDataDisk` +- New field `DiskEncryptionSet` in struct `ImageDataDisk` +- New field `HostCaching` in struct `GalleryOSDiskImage` +- New field `Source` in struct `GalleryOSDiskImage` +- New field `SizeInGB` in struct `GalleryOSDiskImage` +- New field `GetSecureVMGuestStateSAS` in struct `GrantAccessData` +- New field `ID` in struct `VirtualMachineScaleSetUpdateIPConfiguration` +- New field `Name` in struct `VirtualMachineExtensionImage` +- New field `Type` in struct `VirtualMachineExtensionImage` +- New field `Location` in struct `VirtualMachineExtensionImage` +- New field `Tags` in struct `VirtualMachineExtensionImage` +- New field `ID` in struct `VirtualMachineExtensionImage` +- New field `ID` in struct `VirtualMachineScaleSetIPConfiguration` +- New field `Location` in struct `PirSharedGalleryResource` +- New field `Name` in struct `PirSharedGalleryResource` +- New field `SecurityDataAccessSAS` in struct `AccessURI` +- New field `Tags` in struct `VirtualMachineExtension` +- New field `ID` in struct `VirtualMachineExtension` +- New field `Name` in struct `VirtualMachineExtension` +- New field `Type` in struct `VirtualMachineExtension` +- New field `Location` in struct `VirtualMachineExtension` +- New field `Name` in struct `DedicatedHostGroup` +- New field `Type` in struct `DedicatedHostGroup` +- New field `Location` in struct `DedicatedHostGroup` +- New field `Tags` in struct `DedicatedHostGroup` +- New field `ID` in struct `DedicatedHostGroup` +- New field `Tags` in struct `GalleryImageUpdate` +- New field `ID` in struct `GalleryImageUpdate` +- New field `Name` in struct `GalleryImageUpdate` +- New field `Type` in struct `GalleryImageUpdate` +- New field `Tags` in struct `VirtualMachineScaleSetVM` +- New field `ID` in struct `VirtualMachineScaleSetVM` +- New field `Location` in struct `VirtualMachineScaleSetVM` +- New field `Name` in struct `VirtualMachineScaleSetVM` +- New field `Type` in struct `VirtualMachineScaleSetVM` +- New field `ExtendedLocation` in struct `VirtualMachineImage` +- New field `ID` in struct `VirtualMachineImage` +- New field `Tags` in struct `VirtualMachineImage` +- New field `Location` in struct `VirtualMachineImage` +- New field `Name` in struct `VirtualMachineImage` +- New field `ID` in struct `ProximityPlacementGroup` +- New field `Name` in struct `ProximityPlacementGroup` +- New field `Type` in struct `ProximityPlacementGroup` +- New field `Location` in struct `ProximityPlacementGroup` +- New field `Tags` in struct `ProximityPlacementGroup` +- New field `Type` in struct `VirtualMachineRunCommand` +- New field `Location` in struct `VirtualMachineRunCommand` +- New field `Tags` in struct `VirtualMachineRunCommand` +- New field `ID` in struct `VirtualMachineRunCommand` +- New field `Name` in struct `VirtualMachineRunCommand` +- New field `ID` in struct `RunCommandDocument` +- New field `Label` in struct `RunCommandDocument` +- New field `OSType` in struct `RunCommandDocument` +- New field `Schema` in struct `RunCommandDocument` +- New field `Description` in struct `RunCommandDocument` +- New field `ReplicationMode` in struct `GalleryApplicationVersionPublishingProfile` +- New field `StorageAccountType` in struct `GalleryApplicationVersionPublishingProfile` +- New field `TargetRegions` in struct `GalleryApplicationVersionPublishingProfile` +- New field `ExcludeFromLatest` in struct `GalleryApplicationVersionPublishingProfile` +- New field `PublishedDate` in struct `GalleryApplicationVersionPublishingProfile` +- New field `EndOfLifeDate` in struct `GalleryApplicationVersionPublishingProfile` +- New field `ReplicaCount` in struct `GalleryApplicationVersionPublishingProfile` +- New field `ID` in struct `ImageReference` +- New field `Tags` in struct `DedicatedHostUpdate` +- New field `Location` in struct `AvailabilitySet` +- New field `Tags` in struct `AvailabilitySet` +- New field `ID` in struct `AvailabilitySet` +- New field `Name` in struct `AvailabilitySet` +- New field `Type` in struct `AvailabilitySet` +- New field `DiskEncryptionSetID` in struct `DataDiskImageEncryption` +- New field `Identifier` in struct `SharedGallery` +- New field `Location` in struct `SharedGallery` +- New field `Name` in struct `SharedGallery` +- New field `ID` in struct `DiskRestorePoint` +- New field `Name` in struct `DiskRestorePoint` +- New field `Type` in struct `DiskRestorePoint` +- New field `SourceResourceLocation` in struct `DiskRestorePointProperties` +- New field `ReplicationState` in struct `DiskRestorePointProperties` +- New field `Error` in struct `CloudError` +- New field `EndOfLifeDate` in struct `GalleryImageVersionPublishingProfile` +- New field `ExcludeFromLatest` in struct `GalleryImageVersionPublishingProfile` +- New field `ReplicaCount` in struct `GalleryImageVersionPublishingProfile` +- New field `ReplicationMode` in struct `GalleryImageVersionPublishingProfile` +- New field `StorageAccountType` in struct `GalleryImageVersionPublishingProfile` +- New field `TargetRegions` in struct `GalleryImageVersionPublishingProfile` +- New field `PublishedDate` in struct `GalleryImageVersionPublishingProfile` +- New field `ID` in struct `RestorePointCollection` +- New field `Name` in struct `RestorePointCollection` +- New field `Type` in struct `RestorePointCollection` +- New field `Location` in struct `RestorePointCollection` +- New field `Tags` in struct `RestorePointCollection` +- New field `SecureVMDiskEncryptionSetID` in struct `DiskSecurityProfile` +- New field `Tags` in struct `VirtualMachineScaleSetUpdate` +- New field `Identifier` in struct `SharedGalleryImage` +- New field `Location` in struct `SharedGalleryImage` +- New field `Name` in struct `SharedGalleryImage` +- New field `Location` in struct `GalleryApplication` +- New field `Tags` in struct `GalleryApplication` +- New field `ID` in struct `GalleryApplication` +- New field `Name` in struct `GalleryApplication` +- New field `Type` in struct `GalleryApplication` +- New field `Location` in struct `CommunityGalleryImage` +- New field `Name` in struct `CommunityGalleryImage` +- New field `Type` in struct `CommunityGalleryImage` +- New field `Identifier` in struct `CommunityGalleryImage` +- New field `ID` in struct `VirtualMachineScaleSetExtensionUpdate` +- New field `Location` in struct `DiskAccess` +- New field `Tags` in struct `DiskAccess` +- New field `ID` in struct `DiskAccess` +- New field `Name` in struct `DiskAccess` +- New field `Type` in struct `DiskAccess` +- New field `ToTime` in struct `ThrottledRequestsInput` +- New field `GroupByClientApplicationID` in struct `ThrottledRequestsInput` +- New field `GroupByOperationName` in struct `ThrottledRequestsInput` +- New field `GroupByResourceName` in struct `ThrottledRequestsInput` +- New field `GroupByThrottlePolicy` in struct `ThrottledRequestsInput` +- New field `GroupByUserAgent` in struct `ThrottledRequestsInput` +- New field `BlobContainerSasURI` in struct `ThrottledRequestsInput` +- New field `FromTime` in struct `ThrottledRequestsInput` +- New field `ID` in struct `NetworkInterfaceReference` +- New field `TempDisk` in struct `VirtualMachineScaleSetVMReimageParameters` +- New field `Identifier` in struct `SharedGalleryImageVersion` +- New field `Location` in struct `SharedGalleryImageVersion` +- New field `Name` in struct `SharedGalleryImageVersion` +- New field `AssetID` in struct `DedicatedHostInstanceViewWithName` +- New field `AvailableCapacity` in struct `DedicatedHostInstanceViewWithName` +- New field `Statuses` in struct `DedicatedHostInstanceViewWithName` +- New field `Tags` in struct `GalleryImage` +- New field `ID` in struct `GalleryImage` +- New field `Name` in struct `GalleryImage` +- New field `Type` in struct `GalleryImage` +- New field `Location` in struct `GalleryImage` +- New field `Type` in struct `GalleryApplicationVersion` +- New field `Location` in struct `GalleryApplicationVersion` +- New field `Tags` in struct `GalleryApplicationVersion` +- New field `ID` in struct `GalleryApplicationVersion` +- New field `Name` in struct `GalleryApplicationVersion` +- New field `Identifier` in struct `CommunityGalleryImageVersion` +- New field `Location` in struct `CommunityGalleryImageVersion` +- New field `Name` in struct `CommunityGalleryImageVersion` +- New field `Type` in struct `CommunityGalleryImageVersion` +- New field `Location` in struct `Disk` +- New field `ID` in struct `Disk` +- New field `Type` in struct `Disk` +- New field `Tags` in struct `Disk` +- New field `Name` in struct `Disk` +- New field `ID` in struct `VirtualMachineScaleSetUpdateNetworkConfiguration` + + +## 0.2.1 (2021-11-26) + +### Other Changes + +- Now use `github.com/Azure/azure-sdk-for-go/sdk/azidentity@v0.12.0` explicitly. + +## 0.2.0 (2021-10-29) + +### Breaking Changes + +- `arm.Connection` has been removed in `github.com/Azure/azure-sdk-for-go/sdk/azcore/v0.20.0` +- The parameters of `NewXXXClient` has been changed from `(con *arm.Connection, subscriptionID string)` to `(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions)` + +## 0.1.0 (2021-09-29) +- To better align with the Azure SDK guidelines (https://azure.github.io/azure-sdk/general_introduction.html), we have decided to change the module path to "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute". Therefore, we are deprecating the old module path (which is "github.com/Azure/azure-sdk-for-go/sdk/compute/armcompute") to avoid confusion. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/LICENSE.txt b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/LICENSE.txt new file mode 100644 index 000000000..dc0c2ffb3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Microsoft Corporation. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/README.md new file mode 100644 index 000000000..7ccd1f866 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/README.md @@ -0,0 +1,88 @@ +# Azure Compute Module for Go + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute)](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute) + +The `armcompute` module provides operations for working with Azure Compute. + +[Source code](https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/resourcemanager/compute/armcompute) + +# Getting started + +## Prerequisites + +- an [Azure subscription](https://azure.microsoft.com/free/) +- Go 1.18 or above + +## Install the package + +This project uses [Go modules](https://github.com/golang/go/wiki/Modules) for versioning and dependency management. + +Install the Azure Compute module: + +```sh +go get github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute +``` + +## Authorization + +When creating a client, you will need to provide a credential for authenticating with Azure Compute. The `azidentity` module provides facilities for various ways of authenticating with Azure including client/secret, certificate, managed identity, and more. + +```go +cred, err := azidentity.NewDefaultAzureCredential(nil) +``` + +For more information on authentication, please see the documentation for `azidentity` at [pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity). + +## Clients + +Azure Compute modules consist of one or more clients. A client groups a set of related APIs, providing access to its functionality within the specified subscription. Create one or more clients to access the APIs you require using your credential. + +```go +client, err := armcompute.NewLogAnalyticsClient(, cred, nil) +``` + +You can use `ClientOptions` in package `github.com/Azure/azure-sdk-for-go/sdk/azcore/arm` to set endpoint to connect with public and sovereign clouds as well as Azure Stack. For more information, please see the documentation for `azcore` at [pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore). + +```go +options := arm.ClientOptions { + ClientOptions: azcore.ClientOptions { + Cloud: cloud.AzureChina, + }, +} +client, err := armcompute.NewLogAnalyticsClient(, cred, &options) +``` + +## More sample code + +- [Availability Set](https://aka.ms/azsdk/go/mgmt/samples?path=sdk/resourcemanager/compute/availabilityset) +- [Virtual Machine](https://aka.ms/azsdk/go/mgmt/samples?path=sdk/resourcemanager/compute/createVM) +- [Dedicated Host](https://aka.ms/azsdk/go/mgmt/samples?path=sdk/resourcemanager/compute/dedicated_host) +- [Disk](https://aka.ms/azsdk/go/mgmt/samples?path=sdk/resourcemanager/compute/disk) +- [Gallery](https://aka.ms/azsdk/go/mgmt/samples?path=sdk/resourcemanager/compute/gallery) +- [Proximity Placement Group](https://aka.ms/azsdk/go/mgmt/samples?path=sdk/resourcemanager/compute/proximity) +- [Snapshot](https://aka.ms/azsdk/go/mgmt/samples?path=sdk/resourcemanager/compute/snapshot) +- [Virtual Machine Scale Set](https://aka.ms/azsdk/go/mgmt/samples?path=sdk/resourcemanager/compute/vmscaleset) + +## Provide Feedback + +If you encounter bugs or have suggestions, please +[open an issue](https://github.com/Azure/azure-sdk-for-go/issues) and assign the `Compute` label. + +# Contributing + +This project welcomes contributions and suggestions. Most contributions require +you to agree to a Contributor License Agreement (CLA) declaring that you have +the right to, and actually do, grant us the rights to use your contribution. +For details, visit [https://cla.microsoft.com](https://cla.microsoft.com). + +When you submit a pull request, a CLA-bot will automatically determine whether +you need to provide a CLA and decorate the PR appropriately (e.g., label, +comment). Simply follow the instructions provided by the bot. You will only +need to do this once across all repos using our CLA. + +This project has adopted the +[Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information, see the +[Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) +or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any +additional questions or comments. \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/autorest.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/autorest.md new file mode 100644 index 000000000..6586bd0dd --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/autorest.md @@ -0,0 +1,12 @@ +### AutoRest Configuration + +> see https://aka.ms/autorest + +``` yaml +azure-arm: true +require: +- https://github.com/Azure/azure-rest-api-specs/blob/0cc5e2efd6ffccf30e80d1e150b488dd87198b94/specification/compute/resource-manager/readme.md +- https://github.com/Azure/azure-rest-api-specs/blob/0cc5e2efd6ffccf30e80d1e150b488dd87198b94/specification/compute/resource-manager/readme.go.md +license-header: MICROSOFT_MIT_NO_VERSION +module-version: 1.0.0 +``` \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/build.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/build.go new file mode 100644 index 000000000..78362d442 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/build.go @@ -0,0 +1,7 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +// This file enables 'go generate' to regenerate this specific SDK +//go:generate pwsh.exe ../../../../eng/scripts/build.ps1 -skipBuild -cleanGenerated -format -tidy -generate resourcemanager/compute/armcompute + +package armcompute diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/ci.yml new file mode 100644 index 000000000..084ed1b49 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/ci.yml @@ -0,0 +1,28 @@ +# NOTE: Please refer to https://aka.ms/azsdk/engsys/ci-yaml before editing this file. +trigger: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/resourcemanager/compute/armcompute/ + +pr: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/resourcemanager/compute/armcompute/ + +stages: +- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml + parameters: + IncludeRelease: true + ServiceDirectory: 'resourcemanager/compute/armcompute' diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_availabilitysets_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_availabilitysets_client.go new file mode 100644 index 000000000..595afa853 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_availabilitysets_client.go @@ -0,0 +1,466 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// AvailabilitySetsClient contains the methods for the AvailabilitySets group. +// Don't use this type directly, use NewAvailabilitySetsClient() instead. +type AvailabilitySetsClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewAvailabilitySetsClient creates a new instance of AvailabilitySetsClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewAvailabilitySetsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*AvailabilitySetsClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &AvailabilitySetsClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// CreateOrUpdate - Create or update an availability set. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// availabilitySetName - The name of the availability set. +// parameters - Parameters supplied to the Create Availability Set operation. +// options - AvailabilitySetsClientCreateOrUpdateOptions contains the optional parameters for the AvailabilitySetsClient.CreateOrUpdate +// method. +func (client *AvailabilitySetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, availabilitySetName string, parameters AvailabilitySet, options *AvailabilitySetsClientCreateOrUpdateOptions) (AvailabilitySetsClientCreateOrUpdateResponse, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, availabilitySetName, parameters, options) + if err != nil { + return AvailabilitySetsClientCreateOrUpdateResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return AvailabilitySetsClientCreateOrUpdateResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return AvailabilitySetsClientCreateOrUpdateResponse{}, runtime.NewResponseError(resp) + } + return client.createOrUpdateHandleResponse(resp) +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *AvailabilitySetsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, availabilitySetName string, parameters AvailabilitySet, options *AvailabilitySetsClientCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if availabilitySetName == "" { + return nil, errors.New("parameter availabilitySetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{availabilitySetName}", url.PathEscape(availabilitySetName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// createOrUpdateHandleResponse handles the CreateOrUpdate response. +func (client *AvailabilitySetsClient) createOrUpdateHandleResponse(resp *http.Response) (AvailabilitySetsClientCreateOrUpdateResponse, error) { + result := AvailabilitySetsClientCreateOrUpdateResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.AvailabilitySet); err != nil { + return AvailabilitySetsClientCreateOrUpdateResponse{}, err + } + return result, nil +} + +// Delete - Delete an availability set. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// availabilitySetName - The name of the availability set. +// options - AvailabilitySetsClientDeleteOptions contains the optional parameters for the AvailabilitySetsClient.Delete method. +func (client *AvailabilitySetsClient) Delete(ctx context.Context, resourceGroupName string, availabilitySetName string, options *AvailabilitySetsClientDeleteOptions) (AvailabilitySetsClientDeleteResponse, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, availabilitySetName, options) + if err != nil { + return AvailabilitySetsClientDeleteResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return AvailabilitySetsClientDeleteResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusNoContent) { + return AvailabilitySetsClientDeleteResponse{}, runtime.NewResponseError(resp) + } + return AvailabilitySetsClientDeleteResponse{}, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *AvailabilitySetsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, availabilitySetName string, options *AvailabilitySetsClientDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if availabilitySetName == "" { + return nil, errors.New("parameter availabilitySetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{availabilitySetName}", url.PathEscape(availabilitySetName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Retrieves information about an availability set. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// availabilitySetName - The name of the availability set. +// options - AvailabilitySetsClientGetOptions contains the optional parameters for the AvailabilitySetsClient.Get method. +func (client *AvailabilitySetsClient) Get(ctx context.Context, resourceGroupName string, availabilitySetName string, options *AvailabilitySetsClientGetOptions) (AvailabilitySetsClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, availabilitySetName, options) + if err != nil { + return AvailabilitySetsClientGetResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return AvailabilitySetsClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return AvailabilitySetsClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *AvailabilitySetsClient) getCreateRequest(ctx context.Context, resourceGroupName string, availabilitySetName string, options *AvailabilitySetsClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if availabilitySetName == "" { + return nil, errors.New("parameter availabilitySetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{availabilitySetName}", url.PathEscape(availabilitySetName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *AvailabilitySetsClient) getHandleResponse(resp *http.Response) (AvailabilitySetsClientGetResponse, error) { + result := AvailabilitySetsClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.AvailabilitySet); err != nil { + return AvailabilitySetsClientGetResponse{}, err + } + return result, nil +} + +// NewListPager - Lists all availability sets in a resource group. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// options - AvailabilitySetsClientListOptions contains the optional parameters for the AvailabilitySetsClient.List method. +func (client *AvailabilitySetsClient) NewListPager(resourceGroupName string, options *AvailabilitySetsClientListOptions) *runtime.Pager[AvailabilitySetsClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[AvailabilitySetsClientListResponse]{ + More: func(page AvailabilitySetsClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *AvailabilitySetsClientListResponse) (AvailabilitySetsClientListResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listCreateRequest(ctx, resourceGroupName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return AvailabilitySetsClientListResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return AvailabilitySetsClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return AvailabilitySetsClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *AvailabilitySetsClient) listCreateRequest(ctx context.Context, resourceGroupName string, options *AvailabilitySetsClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *AvailabilitySetsClient) listHandleResponse(resp *http.Response) (AvailabilitySetsClientListResponse, error) { + result := AvailabilitySetsClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.AvailabilitySetListResult); err != nil { + return AvailabilitySetsClientListResponse{}, err + } + return result, nil +} + +// NewListAvailableSizesPager - Lists all available virtual machine sizes that can be used to create a new virtual machine +// in an existing availability set. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// availabilitySetName - The name of the availability set. +// options - AvailabilitySetsClientListAvailableSizesOptions contains the optional parameters for the AvailabilitySetsClient.ListAvailableSizes +// method. +func (client *AvailabilitySetsClient) NewListAvailableSizesPager(resourceGroupName string, availabilitySetName string, options *AvailabilitySetsClientListAvailableSizesOptions) *runtime.Pager[AvailabilitySetsClientListAvailableSizesResponse] { + return runtime.NewPager(runtime.PagingHandler[AvailabilitySetsClientListAvailableSizesResponse]{ + More: func(page AvailabilitySetsClientListAvailableSizesResponse) bool { + return false + }, + Fetcher: func(ctx context.Context, page *AvailabilitySetsClientListAvailableSizesResponse) (AvailabilitySetsClientListAvailableSizesResponse, error) { + req, err := client.listAvailableSizesCreateRequest(ctx, resourceGroupName, availabilitySetName, options) + if err != nil { + return AvailabilitySetsClientListAvailableSizesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return AvailabilitySetsClientListAvailableSizesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return AvailabilitySetsClientListAvailableSizesResponse{}, runtime.NewResponseError(resp) + } + return client.listAvailableSizesHandleResponse(resp) + }, + }) +} + +// listAvailableSizesCreateRequest creates the ListAvailableSizes request. +func (client *AvailabilitySetsClient) listAvailableSizesCreateRequest(ctx context.Context, resourceGroupName string, availabilitySetName string, options *AvailabilitySetsClientListAvailableSizesOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}/vmSizes" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if availabilitySetName == "" { + return nil, errors.New("parameter availabilitySetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{availabilitySetName}", url.PathEscape(availabilitySetName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listAvailableSizesHandleResponse handles the ListAvailableSizes response. +func (client *AvailabilitySetsClient) listAvailableSizesHandleResponse(resp *http.Response) (AvailabilitySetsClientListAvailableSizesResponse, error) { + result := AvailabilitySetsClientListAvailableSizesResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineSizeListResult); err != nil { + return AvailabilitySetsClientListAvailableSizesResponse{}, err + } + return result, nil +} + +// NewListBySubscriptionPager - Lists all availability sets in a subscription. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// options - AvailabilitySetsClientListBySubscriptionOptions contains the optional parameters for the AvailabilitySetsClient.ListBySubscription +// method. +func (client *AvailabilitySetsClient) NewListBySubscriptionPager(options *AvailabilitySetsClientListBySubscriptionOptions) *runtime.Pager[AvailabilitySetsClientListBySubscriptionResponse] { + return runtime.NewPager(runtime.PagingHandler[AvailabilitySetsClientListBySubscriptionResponse]{ + More: func(page AvailabilitySetsClientListBySubscriptionResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *AvailabilitySetsClientListBySubscriptionResponse) (AvailabilitySetsClientListBySubscriptionResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listBySubscriptionCreateRequest(ctx, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return AvailabilitySetsClientListBySubscriptionResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return AvailabilitySetsClientListBySubscriptionResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return AvailabilitySetsClientListBySubscriptionResponse{}, runtime.NewResponseError(resp) + } + return client.listBySubscriptionHandleResponse(resp) + }, + }) +} + +// listBySubscriptionCreateRequest creates the ListBySubscription request. +func (client *AvailabilitySetsClient) listBySubscriptionCreateRequest(ctx context.Context, options *AvailabilitySetsClientListBySubscriptionOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/availabilitySets" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + if options != nil && options.Expand != nil { + reqQP.Set("$expand", *options.Expand) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listBySubscriptionHandleResponse handles the ListBySubscription response. +func (client *AvailabilitySetsClient) listBySubscriptionHandleResponse(resp *http.Response) (AvailabilitySetsClientListBySubscriptionResponse, error) { + result := AvailabilitySetsClientListBySubscriptionResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.AvailabilitySetListResult); err != nil { + return AvailabilitySetsClientListBySubscriptionResponse{}, err + } + return result, nil +} + +// Update - Update an availability set. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// availabilitySetName - The name of the availability set. +// parameters - Parameters supplied to the Update Availability Set operation. +// options - AvailabilitySetsClientUpdateOptions contains the optional parameters for the AvailabilitySetsClient.Update method. +func (client *AvailabilitySetsClient) Update(ctx context.Context, resourceGroupName string, availabilitySetName string, parameters AvailabilitySetUpdate, options *AvailabilitySetsClientUpdateOptions) (AvailabilitySetsClientUpdateResponse, error) { + req, err := client.updateCreateRequest(ctx, resourceGroupName, availabilitySetName, parameters, options) + if err != nil { + return AvailabilitySetsClientUpdateResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return AvailabilitySetsClientUpdateResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return AvailabilitySetsClientUpdateResponse{}, runtime.NewResponseError(resp) + } + return client.updateHandleResponse(resp) +} + +// updateCreateRequest creates the Update request. +func (client *AvailabilitySetsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, availabilitySetName string, parameters AvailabilitySetUpdate, options *AvailabilitySetsClientUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if availabilitySetName == "" { + return nil, errors.New("parameter availabilitySetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{availabilitySetName}", url.PathEscape(availabilitySetName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// updateHandleResponse handles the Update response. +func (client *AvailabilitySetsClient) updateHandleResponse(resp *http.Response) (AvailabilitySetsClientUpdateResponse, error) { + result := AvailabilitySetsClientUpdateResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.AvailabilitySet); err != nil { + return AvailabilitySetsClientUpdateResponse{}, err + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_capacityreservationgroups_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_capacityreservationgroups_client.go new file mode 100644 index 000000000..f698a3a55 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_capacityreservationgroups_client.go @@ -0,0 +1,418 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// CapacityReservationGroupsClient contains the methods for the CapacityReservationGroups group. +// Don't use this type directly, use NewCapacityReservationGroupsClient() instead. +type CapacityReservationGroupsClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewCapacityReservationGroupsClient creates a new instance of CapacityReservationGroupsClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewCapacityReservationGroupsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*CapacityReservationGroupsClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &CapacityReservationGroupsClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// CreateOrUpdate - The operation to create or update a capacity reservation group. When updating a capacity reservation group, +// only tags may be modified. Please refer to https://aka.ms/CapacityReservation for more +// details. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// capacityReservationGroupName - The name of the capacity reservation group. +// parameters - Parameters supplied to the Create capacity reservation Group. +// options - CapacityReservationGroupsClientCreateOrUpdateOptions contains the optional parameters for the CapacityReservationGroupsClient.CreateOrUpdate +// method. +func (client *CapacityReservationGroupsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, parameters CapacityReservationGroup, options *CapacityReservationGroupsClientCreateOrUpdateOptions) (CapacityReservationGroupsClientCreateOrUpdateResponse, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, capacityReservationGroupName, parameters, options) + if err != nil { + return CapacityReservationGroupsClientCreateOrUpdateResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return CapacityReservationGroupsClientCreateOrUpdateResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return CapacityReservationGroupsClientCreateOrUpdateResponse{}, runtime.NewResponseError(resp) + } + return client.createOrUpdateHandleResponse(resp) +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *CapacityReservationGroupsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, parameters CapacityReservationGroup, options *CapacityReservationGroupsClientCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if capacityReservationGroupName == "" { + return nil, errors.New("parameter capacityReservationGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{capacityReservationGroupName}", url.PathEscape(capacityReservationGroupName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// createOrUpdateHandleResponse handles the CreateOrUpdate response. +func (client *CapacityReservationGroupsClient) createOrUpdateHandleResponse(resp *http.Response) (CapacityReservationGroupsClientCreateOrUpdateResponse, error) { + result := CapacityReservationGroupsClientCreateOrUpdateResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.CapacityReservationGroup); err != nil { + return CapacityReservationGroupsClientCreateOrUpdateResponse{}, err + } + return result, nil +} + +// Delete - The operation to delete a capacity reservation group. This operation is allowed only if all the associated resources +// are disassociated from the reservation group and all capacity reservations under +// the reservation group have also been deleted. Please refer to https://aka.ms/CapacityReservation for more details. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// capacityReservationGroupName - The name of the capacity reservation group. +// options - CapacityReservationGroupsClientDeleteOptions contains the optional parameters for the CapacityReservationGroupsClient.Delete +// method. +func (client *CapacityReservationGroupsClient) Delete(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, options *CapacityReservationGroupsClientDeleteOptions) (CapacityReservationGroupsClientDeleteResponse, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, capacityReservationGroupName, options) + if err != nil { + return CapacityReservationGroupsClientDeleteResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return CapacityReservationGroupsClientDeleteResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusNoContent) { + return CapacityReservationGroupsClientDeleteResponse{}, runtime.NewResponseError(resp) + } + return CapacityReservationGroupsClientDeleteResponse{}, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *CapacityReservationGroupsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, options *CapacityReservationGroupsClientDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if capacityReservationGroupName == "" { + return nil, errors.New("parameter capacityReservationGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{capacityReservationGroupName}", url.PathEscape(capacityReservationGroupName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - The operation that retrieves information about a capacity reservation group. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// capacityReservationGroupName - The name of the capacity reservation group. +// options - CapacityReservationGroupsClientGetOptions contains the optional parameters for the CapacityReservationGroupsClient.Get +// method. +func (client *CapacityReservationGroupsClient) Get(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, options *CapacityReservationGroupsClientGetOptions) (CapacityReservationGroupsClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, capacityReservationGroupName, options) + if err != nil { + return CapacityReservationGroupsClientGetResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return CapacityReservationGroupsClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return CapacityReservationGroupsClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *CapacityReservationGroupsClient) getCreateRequest(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, options *CapacityReservationGroupsClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if capacityReservationGroupName == "" { + return nil, errors.New("parameter capacityReservationGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{capacityReservationGroupName}", url.PathEscape(capacityReservationGroupName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Expand != nil { + reqQP.Set("$expand", string(*options.Expand)) + } + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *CapacityReservationGroupsClient) getHandleResponse(resp *http.Response) (CapacityReservationGroupsClientGetResponse, error) { + result := CapacityReservationGroupsClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.CapacityReservationGroup); err != nil { + return CapacityReservationGroupsClientGetResponse{}, err + } + return result, nil +} + +// NewListByResourceGroupPager - Lists all of the capacity reservation groups in the specified resource group. Use the nextLink +// property in the response to get the next page of capacity reservation groups. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// options - CapacityReservationGroupsClientListByResourceGroupOptions contains the optional parameters for the CapacityReservationGroupsClient.ListByResourceGroup +// method. +func (client *CapacityReservationGroupsClient) NewListByResourceGroupPager(resourceGroupName string, options *CapacityReservationGroupsClientListByResourceGroupOptions) *runtime.Pager[CapacityReservationGroupsClientListByResourceGroupResponse] { + return runtime.NewPager(runtime.PagingHandler[CapacityReservationGroupsClientListByResourceGroupResponse]{ + More: func(page CapacityReservationGroupsClientListByResourceGroupResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *CapacityReservationGroupsClientListByResourceGroupResponse) (CapacityReservationGroupsClientListByResourceGroupResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return CapacityReservationGroupsClientListByResourceGroupResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return CapacityReservationGroupsClientListByResourceGroupResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return CapacityReservationGroupsClientListByResourceGroupResponse{}, runtime.NewResponseError(resp) + } + return client.listByResourceGroupHandleResponse(resp) + }, + }) +} + +// listByResourceGroupCreateRequest creates the ListByResourceGroup request. +func (client *CapacityReservationGroupsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *CapacityReservationGroupsClientListByResourceGroupOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + if options != nil && options.Expand != nil { + reqQP.Set("$expand", string(*options.Expand)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listByResourceGroupHandleResponse handles the ListByResourceGroup response. +func (client *CapacityReservationGroupsClient) listByResourceGroupHandleResponse(resp *http.Response) (CapacityReservationGroupsClientListByResourceGroupResponse, error) { + result := CapacityReservationGroupsClientListByResourceGroupResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.CapacityReservationGroupListResult); err != nil { + return CapacityReservationGroupsClientListByResourceGroupResponse{}, err + } + return result, nil +} + +// NewListBySubscriptionPager - Lists all of the capacity reservation groups in the subscription. Use the nextLink property +// in the response to get the next page of capacity reservation groups. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// options - CapacityReservationGroupsClientListBySubscriptionOptions contains the optional parameters for the CapacityReservationGroupsClient.ListBySubscription +// method. +func (client *CapacityReservationGroupsClient) NewListBySubscriptionPager(options *CapacityReservationGroupsClientListBySubscriptionOptions) *runtime.Pager[CapacityReservationGroupsClientListBySubscriptionResponse] { + return runtime.NewPager(runtime.PagingHandler[CapacityReservationGroupsClientListBySubscriptionResponse]{ + More: func(page CapacityReservationGroupsClientListBySubscriptionResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *CapacityReservationGroupsClientListBySubscriptionResponse) (CapacityReservationGroupsClientListBySubscriptionResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listBySubscriptionCreateRequest(ctx, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return CapacityReservationGroupsClientListBySubscriptionResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return CapacityReservationGroupsClientListBySubscriptionResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return CapacityReservationGroupsClientListBySubscriptionResponse{}, runtime.NewResponseError(resp) + } + return client.listBySubscriptionHandleResponse(resp) + }, + }) +} + +// listBySubscriptionCreateRequest creates the ListBySubscription request. +func (client *CapacityReservationGroupsClient) listBySubscriptionCreateRequest(ctx context.Context, options *CapacityReservationGroupsClientListBySubscriptionOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/capacityReservationGroups" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + if options != nil && options.Expand != nil { + reqQP.Set("$expand", string(*options.Expand)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listBySubscriptionHandleResponse handles the ListBySubscription response. +func (client *CapacityReservationGroupsClient) listBySubscriptionHandleResponse(resp *http.Response) (CapacityReservationGroupsClientListBySubscriptionResponse, error) { + result := CapacityReservationGroupsClientListBySubscriptionResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.CapacityReservationGroupListResult); err != nil { + return CapacityReservationGroupsClientListBySubscriptionResponse{}, err + } + return result, nil +} + +// Update - The operation to update a capacity reservation group. When updating a capacity reservation group, only tags may +// be modified. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// capacityReservationGroupName - The name of the capacity reservation group. +// parameters - Parameters supplied to the Update capacity reservation Group operation. +// options - CapacityReservationGroupsClientUpdateOptions contains the optional parameters for the CapacityReservationGroupsClient.Update +// method. +func (client *CapacityReservationGroupsClient) Update(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, parameters CapacityReservationGroupUpdate, options *CapacityReservationGroupsClientUpdateOptions) (CapacityReservationGroupsClientUpdateResponse, error) { + req, err := client.updateCreateRequest(ctx, resourceGroupName, capacityReservationGroupName, parameters, options) + if err != nil { + return CapacityReservationGroupsClientUpdateResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return CapacityReservationGroupsClientUpdateResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return CapacityReservationGroupsClientUpdateResponse{}, runtime.NewResponseError(resp) + } + return client.updateHandleResponse(resp) +} + +// updateCreateRequest creates the Update request. +func (client *CapacityReservationGroupsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, parameters CapacityReservationGroupUpdate, options *CapacityReservationGroupsClientUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if capacityReservationGroupName == "" { + return nil, errors.New("parameter capacityReservationGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{capacityReservationGroupName}", url.PathEscape(capacityReservationGroupName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// updateHandleResponse handles the Update response. +func (client *CapacityReservationGroupsClient) updateHandleResponse(resp *http.Response) (CapacityReservationGroupsClientUpdateResponse, error) { + result := CapacityReservationGroupsClientUpdateResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.CapacityReservationGroup); err != nil { + return CapacityReservationGroupsClientUpdateResponse{}, err + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_capacityreservations_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_capacityreservations_client.go new file mode 100644 index 000000000..cc613b801 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_capacityreservations_client.go @@ -0,0 +1,406 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// CapacityReservationsClient contains the methods for the CapacityReservations group. +// Don't use this type directly, use NewCapacityReservationsClient() instead. +type CapacityReservationsClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewCapacityReservationsClient creates a new instance of CapacityReservationsClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewCapacityReservationsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*CapacityReservationsClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &CapacityReservationsClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// BeginCreateOrUpdate - The operation to create or update a capacity reservation. Please note some properties can be set +// only during capacity reservation creation. Please refer to https://aka.ms/CapacityReservation for more +// details. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// capacityReservationGroupName - The name of the capacity reservation group. +// capacityReservationName - The name of the capacity reservation. +// parameters - Parameters supplied to the Create capacity reservation. +// options - CapacityReservationsClientBeginCreateOrUpdateOptions contains the optional parameters for the CapacityReservationsClient.BeginCreateOrUpdate +// method. +func (client *CapacityReservationsClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string, parameters CapacityReservation, options *CapacityReservationsClientBeginCreateOrUpdateOptions) (*runtime.Poller[CapacityReservationsClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, resourceGroupName, capacityReservationGroupName, capacityReservationName, parameters, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[CapacityReservationsClientCreateOrUpdateResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[CapacityReservationsClientCreateOrUpdateResponse](options.ResumeToken, client.pl, nil) + } +} + +// CreateOrUpdate - The operation to create or update a capacity reservation. Please note some properties can be set only +// during capacity reservation creation. Please refer to https://aka.ms/CapacityReservation for more +// details. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *CapacityReservationsClient) createOrUpdate(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string, parameters CapacityReservation, options *CapacityReservationsClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, capacityReservationGroupName, capacityReservationName, parameters, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *CapacityReservationsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string, parameters CapacityReservation, options *CapacityReservationsClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}/capacityReservations/{capacityReservationName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if capacityReservationGroupName == "" { + return nil, errors.New("parameter capacityReservationGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{capacityReservationGroupName}", url.PathEscape(capacityReservationGroupName)) + if capacityReservationName == "" { + return nil, errors.New("parameter capacityReservationName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{capacityReservationName}", url.PathEscape(capacityReservationName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// BeginDelete - The operation to delete a capacity reservation. This operation is allowed only when all the associated resources +// are disassociated from the capacity reservation. Please refer to +// https://aka.ms/CapacityReservation for more details. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// capacityReservationGroupName - The name of the capacity reservation group. +// capacityReservationName - The name of the capacity reservation. +// options - CapacityReservationsClientBeginDeleteOptions contains the optional parameters for the CapacityReservationsClient.BeginDelete +// method. +func (client *CapacityReservationsClient) BeginDelete(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string, options *CapacityReservationsClientBeginDeleteOptions) (*runtime.Poller[CapacityReservationsClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, capacityReservationGroupName, capacityReservationName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[CapacityReservationsClientDeleteResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[CapacityReservationsClientDeleteResponse](options.ResumeToken, client.pl, nil) + } +} + +// Delete - The operation to delete a capacity reservation. This operation is allowed only when all the associated resources +// are disassociated from the capacity reservation. Please refer to +// https://aka.ms/CapacityReservation for more details. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *CapacityReservationsClient) deleteOperation(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string, options *CapacityReservationsClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, capacityReservationGroupName, capacityReservationName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *CapacityReservationsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string, options *CapacityReservationsClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}/capacityReservations/{capacityReservationName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if capacityReservationGroupName == "" { + return nil, errors.New("parameter capacityReservationGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{capacityReservationGroupName}", url.PathEscape(capacityReservationGroupName)) + if capacityReservationName == "" { + return nil, errors.New("parameter capacityReservationName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{capacityReservationName}", url.PathEscape(capacityReservationName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - The operation that retrieves information about the capacity reservation. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// capacityReservationGroupName - The name of the capacity reservation group. +// capacityReservationName - The name of the capacity reservation. +// options - CapacityReservationsClientGetOptions contains the optional parameters for the CapacityReservationsClient.Get +// method. +func (client *CapacityReservationsClient) Get(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string, options *CapacityReservationsClientGetOptions) (CapacityReservationsClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, capacityReservationGroupName, capacityReservationName, options) + if err != nil { + return CapacityReservationsClientGetResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return CapacityReservationsClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return CapacityReservationsClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *CapacityReservationsClient) getCreateRequest(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string, options *CapacityReservationsClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}/capacityReservations/{capacityReservationName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if capacityReservationGroupName == "" { + return nil, errors.New("parameter capacityReservationGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{capacityReservationGroupName}", url.PathEscape(capacityReservationGroupName)) + if capacityReservationName == "" { + return nil, errors.New("parameter capacityReservationName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{capacityReservationName}", url.PathEscape(capacityReservationName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Expand != nil { + reqQP.Set("$expand", string(*options.Expand)) + } + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *CapacityReservationsClient) getHandleResponse(resp *http.Response) (CapacityReservationsClientGetResponse, error) { + result := CapacityReservationsClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.CapacityReservation); err != nil { + return CapacityReservationsClientGetResponse{}, err + } + return result, nil +} + +// NewListByCapacityReservationGroupPager - Lists all of the capacity reservations in the specified capacity reservation group. +// Use the nextLink property in the response to get the next page of capacity reservations. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// capacityReservationGroupName - The name of the capacity reservation group. +// options - CapacityReservationsClientListByCapacityReservationGroupOptions contains the optional parameters for the CapacityReservationsClient.ListByCapacityReservationGroup +// method. +func (client *CapacityReservationsClient) NewListByCapacityReservationGroupPager(resourceGroupName string, capacityReservationGroupName string, options *CapacityReservationsClientListByCapacityReservationGroupOptions) *runtime.Pager[CapacityReservationsClientListByCapacityReservationGroupResponse] { + return runtime.NewPager(runtime.PagingHandler[CapacityReservationsClientListByCapacityReservationGroupResponse]{ + More: func(page CapacityReservationsClientListByCapacityReservationGroupResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *CapacityReservationsClientListByCapacityReservationGroupResponse) (CapacityReservationsClientListByCapacityReservationGroupResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listByCapacityReservationGroupCreateRequest(ctx, resourceGroupName, capacityReservationGroupName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return CapacityReservationsClientListByCapacityReservationGroupResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return CapacityReservationsClientListByCapacityReservationGroupResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return CapacityReservationsClientListByCapacityReservationGroupResponse{}, runtime.NewResponseError(resp) + } + return client.listByCapacityReservationGroupHandleResponse(resp) + }, + }) +} + +// listByCapacityReservationGroupCreateRequest creates the ListByCapacityReservationGroup request. +func (client *CapacityReservationsClient) listByCapacityReservationGroupCreateRequest(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, options *CapacityReservationsClientListByCapacityReservationGroupOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}/capacityReservations" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if capacityReservationGroupName == "" { + return nil, errors.New("parameter capacityReservationGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{capacityReservationGroupName}", url.PathEscape(capacityReservationGroupName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listByCapacityReservationGroupHandleResponse handles the ListByCapacityReservationGroup response. +func (client *CapacityReservationsClient) listByCapacityReservationGroupHandleResponse(resp *http.Response) (CapacityReservationsClientListByCapacityReservationGroupResponse, error) { + result := CapacityReservationsClientListByCapacityReservationGroupResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.CapacityReservationListResult); err != nil { + return CapacityReservationsClientListByCapacityReservationGroupResponse{}, err + } + return result, nil +} + +// BeginUpdate - The operation to update a capacity reservation. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// capacityReservationGroupName - The name of the capacity reservation group. +// capacityReservationName - The name of the capacity reservation. +// parameters - Parameters supplied to the Update capacity reservation operation. +// options - CapacityReservationsClientBeginUpdateOptions contains the optional parameters for the CapacityReservationsClient.BeginUpdate +// method. +func (client *CapacityReservationsClient) BeginUpdate(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string, parameters CapacityReservationUpdate, options *CapacityReservationsClientBeginUpdateOptions) (*runtime.Poller[CapacityReservationsClientUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.update(ctx, resourceGroupName, capacityReservationGroupName, capacityReservationName, parameters, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[CapacityReservationsClientUpdateResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[CapacityReservationsClientUpdateResponse](options.ResumeToken, client.pl, nil) + } +} + +// Update - The operation to update a capacity reservation. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *CapacityReservationsClient) update(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string, parameters CapacityReservationUpdate, options *CapacityReservationsClientBeginUpdateOptions) (*http.Response, error) { + req, err := client.updateCreateRequest(ctx, resourceGroupName, capacityReservationGroupName, capacityReservationName, parameters, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// updateCreateRequest creates the Update request. +func (client *CapacityReservationsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, capacityReservationGroupName string, capacityReservationName string, parameters CapacityReservationUpdate, options *CapacityReservationsClientBeginUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}/capacityReservations/{capacityReservationName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if capacityReservationGroupName == "" { + return nil, errors.New("parameter capacityReservationGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{capacityReservationGroupName}", url.PathEscape(capacityReservationGroupName)) + if capacityReservationName == "" { + return nil, errors.New("parameter capacityReservationName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{capacityReservationName}", url.PathEscape(capacityReservationName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_cloudserviceoperatingsystems_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_cloudserviceoperatingsystems_client.go new file mode 100644 index 000000000..16515cffb --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_cloudserviceoperatingsystems_client.go @@ -0,0 +1,306 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// CloudServiceOperatingSystemsClient contains the methods for the CloudServiceOperatingSystems group. +// Don't use this type directly, use NewCloudServiceOperatingSystemsClient() instead. +type CloudServiceOperatingSystemsClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewCloudServiceOperatingSystemsClient creates a new instance of CloudServiceOperatingSystemsClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewCloudServiceOperatingSystemsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*CloudServiceOperatingSystemsClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &CloudServiceOperatingSystemsClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// GetOSFamily - Gets properties of a guest operating system family that can be specified in the XML service configuration +// (.cscfg) for a cloud service. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-03-01 +// location - Name of the location that the OS family pertains to. +// osFamilyName - Name of the OS family. +// options - CloudServiceOperatingSystemsClientGetOSFamilyOptions contains the optional parameters for the CloudServiceOperatingSystemsClient.GetOSFamily +// method. +func (client *CloudServiceOperatingSystemsClient) GetOSFamily(ctx context.Context, location string, osFamilyName string, options *CloudServiceOperatingSystemsClientGetOSFamilyOptions) (CloudServiceOperatingSystemsClientGetOSFamilyResponse, error) { + req, err := client.getOSFamilyCreateRequest(ctx, location, osFamilyName, options) + if err != nil { + return CloudServiceOperatingSystemsClientGetOSFamilyResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return CloudServiceOperatingSystemsClientGetOSFamilyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return CloudServiceOperatingSystemsClientGetOSFamilyResponse{}, runtime.NewResponseError(resp) + } + return client.getOSFamilyHandleResponse(resp) +} + +// getOSFamilyCreateRequest creates the GetOSFamily request. +func (client *CloudServiceOperatingSystemsClient) getOSFamilyCreateRequest(ctx context.Context, location string, osFamilyName string, options *CloudServiceOperatingSystemsClientGetOSFamilyOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/cloudServiceOsFamilies/{osFamilyName}" + if location == "" { + return nil, errors.New("parameter location cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{location}", url.PathEscape(location)) + if osFamilyName == "" { + return nil, errors.New("parameter osFamilyName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{osFamilyName}", url.PathEscape(osFamilyName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getOSFamilyHandleResponse handles the GetOSFamily response. +func (client *CloudServiceOperatingSystemsClient) getOSFamilyHandleResponse(resp *http.Response) (CloudServiceOperatingSystemsClientGetOSFamilyResponse, error) { + result := CloudServiceOperatingSystemsClientGetOSFamilyResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.OSFamily); err != nil { + return CloudServiceOperatingSystemsClientGetOSFamilyResponse{}, err + } + return result, nil +} + +// GetOSVersion - Gets properties of a guest operating system version that can be specified in the XML service configuration +// (.cscfg) for a cloud service. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-03-01 +// location - Name of the location that the OS version pertains to. +// osVersionName - Name of the OS version. +// options - CloudServiceOperatingSystemsClientGetOSVersionOptions contains the optional parameters for the CloudServiceOperatingSystemsClient.GetOSVersion +// method. +func (client *CloudServiceOperatingSystemsClient) GetOSVersion(ctx context.Context, location string, osVersionName string, options *CloudServiceOperatingSystemsClientGetOSVersionOptions) (CloudServiceOperatingSystemsClientGetOSVersionResponse, error) { + req, err := client.getOSVersionCreateRequest(ctx, location, osVersionName, options) + if err != nil { + return CloudServiceOperatingSystemsClientGetOSVersionResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return CloudServiceOperatingSystemsClientGetOSVersionResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return CloudServiceOperatingSystemsClientGetOSVersionResponse{}, runtime.NewResponseError(resp) + } + return client.getOSVersionHandleResponse(resp) +} + +// getOSVersionCreateRequest creates the GetOSVersion request. +func (client *CloudServiceOperatingSystemsClient) getOSVersionCreateRequest(ctx context.Context, location string, osVersionName string, options *CloudServiceOperatingSystemsClientGetOSVersionOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/cloudServiceOsVersions/{osVersionName}" + if location == "" { + return nil, errors.New("parameter location cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{location}", url.PathEscape(location)) + if osVersionName == "" { + return nil, errors.New("parameter osVersionName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{osVersionName}", url.PathEscape(osVersionName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getOSVersionHandleResponse handles the GetOSVersion response. +func (client *CloudServiceOperatingSystemsClient) getOSVersionHandleResponse(resp *http.Response) (CloudServiceOperatingSystemsClientGetOSVersionResponse, error) { + result := CloudServiceOperatingSystemsClientGetOSVersionResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.OSVersion); err != nil { + return CloudServiceOperatingSystemsClientGetOSVersionResponse{}, err + } + return result, nil +} + +// NewListOSFamiliesPager - Gets a list of all guest operating system families available to be specified in the XML service +// configuration (.cscfg) for a cloud service. Use nextLink property in the response to get the next page +// of OS Families. Do this till nextLink is null to fetch all the OS Families. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-03-01 +// location - Name of the location that the OS families pertain to. +// options - CloudServiceOperatingSystemsClientListOSFamiliesOptions contains the optional parameters for the CloudServiceOperatingSystemsClient.ListOSFamilies +// method. +func (client *CloudServiceOperatingSystemsClient) NewListOSFamiliesPager(location string, options *CloudServiceOperatingSystemsClientListOSFamiliesOptions) *runtime.Pager[CloudServiceOperatingSystemsClientListOSFamiliesResponse] { + return runtime.NewPager(runtime.PagingHandler[CloudServiceOperatingSystemsClientListOSFamiliesResponse]{ + More: func(page CloudServiceOperatingSystemsClientListOSFamiliesResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *CloudServiceOperatingSystemsClientListOSFamiliesResponse) (CloudServiceOperatingSystemsClientListOSFamiliesResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listOSFamiliesCreateRequest(ctx, location, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return CloudServiceOperatingSystemsClientListOSFamiliesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return CloudServiceOperatingSystemsClientListOSFamiliesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return CloudServiceOperatingSystemsClientListOSFamiliesResponse{}, runtime.NewResponseError(resp) + } + return client.listOSFamiliesHandleResponse(resp) + }, + }) +} + +// listOSFamiliesCreateRequest creates the ListOSFamilies request. +func (client *CloudServiceOperatingSystemsClient) listOSFamiliesCreateRequest(ctx context.Context, location string, options *CloudServiceOperatingSystemsClientListOSFamiliesOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/cloudServiceOsFamilies" + if location == "" { + return nil, errors.New("parameter location cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{location}", url.PathEscape(location)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listOSFamiliesHandleResponse handles the ListOSFamilies response. +func (client *CloudServiceOperatingSystemsClient) listOSFamiliesHandleResponse(resp *http.Response) (CloudServiceOperatingSystemsClientListOSFamiliesResponse, error) { + result := CloudServiceOperatingSystemsClientListOSFamiliesResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.OSFamilyListResult); err != nil { + return CloudServiceOperatingSystemsClientListOSFamiliesResponse{}, err + } + return result, nil +} + +// NewListOSVersionsPager - Gets a list of all guest operating system versions available to be specified in the XML service +// configuration (.cscfg) for a cloud service. Use nextLink property in the response to get the next page +// of OS versions. Do this till nextLink is null to fetch all the OS versions. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-03-01 +// location - Name of the location that the OS versions pertain to. +// options - CloudServiceOperatingSystemsClientListOSVersionsOptions contains the optional parameters for the CloudServiceOperatingSystemsClient.ListOSVersions +// method. +func (client *CloudServiceOperatingSystemsClient) NewListOSVersionsPager(location string, options *CloudServiceOperatingSystemsClientListOSVersionsOptions) *runtime.Pager[CloudServiceOperatingSystemsClientListOSVersionsResponse] { + return runtime.NewPager(runtime.PagingHandler[CloudServiceOperatingSystemsClientListOSVersionsResponse]{ + More: func(page CloudServiceOperatingSystemsClientListOSVersionsResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *CloudServiceOperatingSystemsClientListOSVersionsResponse) (CloudServiceOperatingSystemsClientListOSVersionsResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listOSVersionsCreateRequest(ctx, location, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return CloudServiceOperatingSystemsClientListOSVersionsResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return CloudServiceOperatingSystemsClientListOSVersionsResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return CloudServiceOperatingSystemsClientListOSVersionsResponse{}, runtime.NewResponseError(resp) + } + return client.listOSVersionsHandleResponse(resp) + }, + }) +} + +// listOSVersionsCreateRequest creates the ListOSVersions request. +func (client *CloudServiceOperatingSystemsClient) listOSVersionsCreateRequest(ctx context.Context, location string, options *CloudServiceOperatingSystemsClientListOSVersionsOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/cloudServiceOsVersions" + if location == "" { + return nil, errors.New("parameter location cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{location}", url.PathEscape(location)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listOSVersionsHandleResponse handles the ListOSVersions response. +func (client *CloudServiceOperatingSystemsClient) listOSVersionsHandleResponse(resp *http.Response) (CloudServiceOperatingSystemsClientListOSVersionsResponse, error) { + result := CloudServiceOperatingSystemsClientListOSVersionsResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.OSVersionListResult); err != nil { + return CloudServiceOperatingSystemsClientListOSVersionsResponse{}, err + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_cloudserviceroleinstances_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_cloudserviceroleinstances_client.go new file mode 100644 index 000000000..a7b01de46 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_cloudserviceroleinstances_client.go @@ -0,0 +1,573 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// CloudServiceRoleInstancesClient contains the methods for the CloudServiceRoleInstances group. +// Don't use this type directly, use NewCloudServiceRoleInstancesClient() instead. +type CloudServiceRoleInstancesClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewCloudServiceRoleInstancesClient creates a new instance of CloudServiceRoleInstancesClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewCloudServiceRoleInstancesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*CloudServiceRoleInstancesClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &CloudServiceRoleInstancesClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// BeginDelete - Deletes a role instance from a cloud service. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-03-01 +// roleInstanceName - Name of the role instance. +// options - CloudServiceRoleInstancesClientBeginDeleteOptions contains the optional parameters for the CloudServiceRoleInstancesClient.BeginDelete +// method. +func (client *CloudServiceRoleInstancesClient) BeginDelete(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string, options *CloudServiceRoleInstancesClientBeginDeleteOptions) (*runtime.Poller[CloudServiceRoleInstancesClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, roleInstanceName, resourceGroupName, cloudServiceName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[CloudServiceRoleInstancesClientDeleteResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[CloudServiceRoleInstancesClientDeleteResponse](options.ResumeToken, client.pl, nil) + } +} + +// Delete - Deletes a role instance from a cloud service. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-03-01 +func (client *CloudServiceRoleInstancesClient) deleteOperation(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string, options *CloudServiceRoleInstancesClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, roleInstanceName, resourceGroupName, cloudServiceName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *CloudServiceRoleInstancesClient) deleteCreateRequest(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string, options *CloudServiceRoleInstancesClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}" + if roleInstanceName == "" { + return nil, errors.New("parameter roleInstanceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{roleInstanceName}", url.PathEscape(roleInstanceName)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if cloudServiceName == "" { + return nil, errors.New("parameter cloudServiceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{cloudServiceName}", url.PathEscape(cloudServiceName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Gets a role instance from a cloud service. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-03-01 +// roleInstanceName - Name of the role instance. +// options - CloudServiceRoleInstancesClientGetOptions contains the optional parameters for the CloudServiceRoleInstancesClient.Get +// method. +func (client *CloudServiceRoleInstancesClient) Get(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string, options *CloudServiceRoleInstancesClientGetOptions) (CloudServiceRoleInstancesClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, roleInstanceName, resourceGroupName, cloudServiceName, options) + if err != nil { + return CloudServiceRoleInstancesClientGetResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return CloudServiceRoleInstancesClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return CloudServiceRoleInstancesClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *CloudServiceRoleInstancesClient) getCreateRequest(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string, options *CloudServiceRoleInstancesClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}" + if roleInstanceName == "" { + return nil, errors.New("parameter roleInstanceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{roleInstanceName}", url.PathEscape(roleInstanceName)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if cloudServiceName == "" { + return nil, errors.New("parameter cloudServiceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{cloudServiceName}", url.PathEscape(cloudServiceName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-03-01") + if options != nil && options.Expand != nil { + reqQP.Set("$expand", string(*options.Expand)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *CloudServiceRoleInstancesClient) getHandleResponse(resp *http.Response) (CloudServiceRoleInstancesClientGetResponse, error) { + result := CloudServiceRoleInstancesClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.RoleInstance); err != nil { + return CloudServiceRoleInstancesClientGetResponse{}, err + } + return result, nil +} + +// GetInstanceView - Retrieves information about the run-time state of a role instance in a cloud service. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-03-01 +// roleInstanceName - Name of the role instance. +// options - CloudServiceRoleInstancesClientGetInstanceViewOptions contains the optional parameters for the CloudServiceRoleInstancesClient.GetInstanceView +// method. +func (client *CloudServiceRoleInstancesClient) GetInstanceView(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string, options *CloudServiceRoleInstancesClientGetInstanceViewOptions) (CloudServiceRoleInstancesClientGetInstanceViewResponse, error) { + req, err := client.getInstanceViewCreateRequest(ctx, roleInstanceName, resourceGroupName, cloudServiceName, options) + if err != nil { + return CloudServiceRoleInstancesClientGetInstanceViewResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return CloudServiceRoleInstancesClientGetInstanceViewResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return CloudServiceRoleInstancesClientGetInstanceViewResponse{}, runtime.NewResponseError(resp) + } + return client.getInstanceViewHandleResponse(resp) +} + +// getInstanceViewCreateRequest creates the GetInstanceView request. +func (client *CloudServiceRoleInstancesClient) getInstanceViewCreateRequest(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string, options *CloudServiceRoleInstancesClientGetInstanceViewOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}/instanceView" + if roleInstanceName == "" { + return nil, errors.New("parameter roleInstanceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{roleInstanceName}", url.PathEscape(roleInstanceName)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if cloudServiceName == "" { + return nil, errors.New("parameter cloudServiceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{cloudServiceName}", url.PathEscape(cloudServiceName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getInstanceViewHandleResponse handles the GetInstanceView response. +func (client *CloudServiceRoleInstancesClient) getInstanceViewHandleResponse(resp *http.Response) (CloudServiceRoleInstancesClientGetInstanceViewResponse, error) { + result := CloudServiceRoleInstancesClientGetInstanceViewResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.RoleInstanceView); err != nil { + return CloudServiceRoleInstancesClientGetInstanceViewResponse{}, err + } + return result, nil +} + +// GetRemoteDesktopFile - Gets a remote desktop file for a role instance in a cloud service. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-03-01 +// roleInstanceName - Name of the role instance. +// options - CloudServiceRoleInstancesClientGetRemoteDesktopFileOptions contains the optional parameters for the CloudServiceRoleInstancesClient.GetRemoteDesktopFile +// method. +func (client *CloudServiceRoleInstancesClient) GetRemoteDesktopFile(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string, options *CloudServiceRoleInstancesClientGetRemoteDesktopFileOptions) (CloudServiceRoleInstancesClientGetRemoteDesktopFileResponse, error) { + req, err := client.getRemoteDesktopFileCreateRequest(ctx, roleInstanceName, resourceGroupName, cloudServiceName, options) + if err != nil { + return CloudServiceRoleInstancesClientGetRemoteDesktopFileResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return CloudServiceRoleInstancesClientGetRemoteDesktopFileResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return CloudServiceRoleInstancesClientGetRemoteDesktopFileResponse{}, runtime.NewResponseError(resp) + } + return CloudServiceRoleInstancesClientGetRemoteDesktopFileResponse{Body: resp.Body}, nil +} + +// getRemoteDesktopFileCreateRequest creates the GetRemoteDesktopFile request. +func (client *CloudServiceRoleInstancesClient) getRemoteDesktopFileCreateRequest(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string, options *CloudServiceRoleInstancesClientGetRemoteDesktopFileOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}/remoteDesktopFile" + if roleInstanceName == "" { + return nil, errors.New("parameter roleInstanceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{roleInstanceName}", url.PathEscape(roleInstanceName)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if cloudServiceName == "" { + return nil, errors.New("parameter cloudServiceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{cloudServiceName}", url.PathEscape(cloudServiceName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + runtime.SkipBodyDownload(req) + req.Raw().Header["Accept"] = []string{"application/x-rdp"} + return req, nil +} + +// NewListPager - Gets the list of all role instances in a cloud service. Use nextLink property in the response to get the +// next page of role instances. Do this till nextLink is null to fetch all the role instances. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-03-01 +// options - CloudServiceRoleInstancesClientListOptions contains the optional parameters for the CloudServiceRoleInstancesClient.List +// method. +func (client *CloudServiceRoleInstancesClient) NewListPager(resourceGroupName string, cloudServiceName string, options *CloudServiceRoleInstancesClientListOptions) *runtime.Pager[CloudServiceRoleInstancesClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[CloudServiceRoleInstancesClientListResponse]{ + More: func(page CloudServiceRoleInstancesClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *CloudServiceRoleInstancesClientListResponse) (CloudServiceRoleInstancesClientListResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listCreateRequest(ctx, resourceGroupName, cloudServiceName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return CloudServiceRoleInstancesClientListResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return CloudServiceRoleInstancesClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return CloudServiceRoleInstancesClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *CloudServiceRoleInstancesClient) listCreateRequest(ctx context.Context, resourceGroupName string, cloudServiceName string, options *CloudServiceRoleInstancesClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if cloudServiceName == "" { + return nil, errors.New("parameter cloudServiceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{cloudServiceName}", url.PathEscape(cloudServiceName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-03-01") + if options != nil && options.Expand != nil { + reqQP.Set("$expand", string(*options.Expand)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *CloudServiceRoleInstancesClient) listHandleResponse(resp *http.Response) (CloudServiceRoleInstancesClientListResponse, error) { + result := CloudServiceRoleInstancesClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.RoleInstanceListResult); err != nil { + return CloudServiceRoleInstancesClientListResponse{}, err + } + return result, nil +} + +// BeginRebuild - The Rebuild Role Instance asynchronous operation reinstalls the operating system on instances of web roles +// or worker roles and initializes the storage resources that are used by them. If you do not +// want to initialize storage resources, you can use Reimage Role Instance. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-03-01 +// roleInstanceName - Name of the role instance. +// options - CloudServiceRoleInstancesClientBeginRebuildOptions contains the optional parameters for the CloudServiceRoleInstancesClient.BeginRebuild +// method. +func (client *CloudServiceRoleInstancesClient) BeginRebuild(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string, options *CloudServiceRoleInstancesClientBeginRebuildOptions) (*runtime.Poller[CloudServiceRoleInstancesClientRebuildResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.rebuild(ctx, roleInstanceName, resourceGroupName, cloudServiceName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[CloudServiceRoleInstancesClientRebuildResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[CloudServiceRoleInstancesClientRebuildResponse](options.ResumeToken, client.pl, nil) + } +} + +// Rebuild - The Rebuild Role Instance asynchronous operation reinstalls the operating system on instances of web roles or +// worker roles and initializes the storage resources that are used by them. If you do not +// want to initialize storage resources, you can use Reimage Role Instance. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-03-01 +func (client *CloudServiceRoleInstancesClient) rebuild(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string, options *CloudServiceRoleInstancesClientBeginRebuildOptions) (*http.Response, error) { + req, err := client.rebuildCreateRequest(ctx, roleInstanceName, resourceGroupName, cloudServiceName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// rebuildCreateRequest creates the Rebuild request. +func (client *CloudServiceRoleInstancesClient) rebuildCreateRequest(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string, options *CloudServiceRoleInstancesClientBeginRebuildOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}/rebuild" + if roleInstanceName == "" { + return nil, errors.New("parameter roleInstanceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{roleInstanceName}", url.PathEscape(roleInstanceName)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if cloudServiceName == "" { + return nil, errors.New("parameter cloudServiceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{cloudServiceName}", url.PathEscape(cloudServiceName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// BeginReimage - The Reimage Role Instance asynchronous operation reinstalls the operating system on instances of web roles +// or worker roles. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-03-01 +// roleInstanceName - Name of the role instance. +// options - CloudServiceRoleInstancesClientBeginReimageOptions contains the optional parameters for the CloudServiceRoleInstancesClient.BeginReimage +// method. +func (client *CloudServiceRoleInstancesClient) BeginReimage(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string, options *CloudServiceRoleInstancesClientBeginReimageOptions) (*runtime.Poller[CloudServiceRoleInstancesClientReimageResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.reimage(ctx, roleInstanceName, resourceGroupName, cloudServiceName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[CloudServiceRoleInstancesClientReimageResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[CloudServiceRoleInstancesClientReimageResponse](options.ResumeToken, client.pl, nil) + } +} + +// Reimage - The Reimage Role Instance asynchronous operation reinstalls the operating system on instances of web roles or +// worker roles. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-03-01 +func (client *CloudServiceRoleInstancesClient) reimage(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string, options *CloudServiceRoleInstancesClientBeginReimageOptions) (*http.Response, error) { + req, err := client.reimageCreateRequest(ctx, roleInstanceName, resourceGroupName, cloudServiceName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// reimageCreateRequest creates the Reimage request. +func (client *CloudServiceRoleInstancesClient) reimageCreateRequest(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string, options *CloudServiceRoleInstancesClientBeginReimageOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}/reimage" + if roleInstanceName == "" { + return nil, errors.New("parameter roleInstanceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{roleInstanceName}", url.PathEscape(roleInstanceName)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if cloudServiceName == "" { + return nil, errors.New("parameter cloudServiceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{cloudServiceName}", url.PathEscape(cloudServiceName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// BeginRestart - The Reboot Role Instance asynchronous operation requests a reboot of a role instance in the cloud service. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-03-01 +// roleInstanceName - Name of the role instance. +// options - CloudServiceRoleInstancesClientBeginRestartOptions contains the optional parameters for the CloudServiceRoleInstancesClient.BeginRestart +// method. +func (client *CloudServiceRoleInstancesClient) BeginRestart(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string, options *CloudServiceRoleInstancesClientBeginRestartOptions) (*runtime.Poller[CloudServiceRoleInstancesClientRestartResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.restart(ctx, roleInstanceName, resourceGroupName, cloudServiceName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[CloudServiceRoleInstancesClientRestartResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[CloudServiceRoleInstancesClientRestartResponse](options.ResumeToken, client.pl, nil) + } +} + +// Restart - The Reboot Role Instance asynchronous operation requests a reboot of a role instance in the cloud service. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-03-01 +func (client *CloudServiceRoleInstancesClient) restart(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string, options *CloudServiceRoleInstancesClientBeginRestartOptions) (*http.Response, error) { + req, err := client.restartCreateRequest(ctx, roleInstanceName, resourceGroupName, cloudServiceName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// restartCreateRequest creates the Restart request. +func (client *CloudServiceRoleInstancesClient) restartCreateRequest(ctx context.Context, roleInstanceName string, resourceGroupName string, cloudServiceName string, options *CloudServiceRoleInstancesClientBeginRestartOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roleInstances/{roleInstanceName}/restart" + if roleInstanceName == "" { + return nil, errors.New("parameter roleInstanceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{roleInstanceName}", url.PathEscape(roleInstanceName)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if cloudServiceName == "" { + return nil, errors.New("parameter cloudServiceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{cloudServiceName}", url.PathEscape(cloudServiceName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_cloudserviceroles_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_cloudserviceroles_client.go new file mode 100644 index 000000000..19cec8958 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_cloudserviceroles_client.go @@ -0,0 +1,183 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// CloudServiceRolesClient contains the methods for the CloudServiceRoles group. +// Don't use this type directly, use NewCloudServiceRolesClient() instead. +type CloudServiceRolesClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewCloudServiceRolesClient creates a new instance of CloudServiceRolesClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewCloudServiceRolesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*CloudServiceRolesClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &CloudServiceRolesClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// Get - Gets a role from a cloud service. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-03-01 +// roleName - Name of the role. +// options - CloudServiceRolesClientGetOptions contains the optional parameters for the CloudServiceRolesClient.Get method. +func (client *CloudServiceRolesClient) Get(ctx context.Context, roleName string, resourceGroupName string, cloudServiceName string, options *CloudServiceRolesClientGetOptions) (CloudServiceRolesClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, roleName, resourceGroupName, cloudServiceName, options) + if err != nil { + return CloudServiceRolesClientGetResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return CloudServiceRolesClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return CloudServiceRolesClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *CloudServiceRolesClient) getCreateRequest(ctx context.Context, roleName string, resourceGroupName string, cloudServiceName string, options *CloudServiceRolesClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roles/{roleName}" + if roleName == "" { + return nil, errors.New("parameter roleName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{roleName}", url.PathEscape(roleName)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if cloudServiceName == "" { + return nil, errors.New("parameter cloudServiceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{cloudServiceName}", url.PathEscape(cloudServiceName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *CloudServiceRolesClient) getHandleResponse(resp *http.Response) (CloudServiceRolesClientGetResponse, error) { + result := CloudServiceRolesClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.CloudServiceRole); err != nil { + return CloudServiceRolesClientGetResponse{}, err + } + return result, nil +} + +// NewListPager - Gets a list of all roles in a cloud service. Use nextLink property in the response to get the next page +// of roles. Do this till nextLink is null to fetch all the roles. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-03-01 +// options - CloudServiceRolesClientListOptions contains the optional parameters for the CloudServiceRolesClient.List method. +func (client *CloudServiceRolesClient) NewListPager(resourceGroupName string, cloudServiceName string, options *CloudServiceRolesClientListOptions) *runtime.Pager[CloudServiceRolesClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[CloudServiceRolesClientListResponse]{ + More: func(page CloudServiceRolesClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *CloudServiceRolesClientListResponse) (CloudServiceRolesClientListResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listCreateRequest(ctx, resourceGroupName, cloudServiceName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return CloudServiceRolesClientListResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return CloudServiceRolesClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return CloudServiceRolesClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *CloudServiceRolesClient) listCreateRequest(ctx context.Context, resourceGroupName string, cloudServiceName string, options *CloudServiceRolesClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/roles" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if cloudServiceName == "" { + return nil, errors.New("parameter cloudServiceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{cloudServiceName}", url.PathEscape(cloudServiceName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *CloudServiceRolesClient) listHandleResponse(resp *http.Response) (CloudServiceRolesClientListResponse, error) { + result := CloudServiceRolesClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.CloudServiceRoleListResult); err != nil { + return CloudServiceRolesClientListResponse{}, err + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_cloudservices_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_cloudservices_client.go new file mode 100644 index 000000000..6923162c2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_cloudservices_client.go @@ -0,0 +1,886 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// CloudServicesClient contains the methods for the CloudServices group. +// Don't use this type directly, use NewCloudServicesClient() instead. +type CloudServicesClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewCloudServicesClient creates a new instance of CloudServicesClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewCloudServicesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*CloudServicesClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &CloudServicesClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// BeginCreateOrUpdate - Create or update a cloud service. Please note some properties can be set only during cloud service +// creation. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-03-01 +// resourceGroupName - Name of the resource group. +// cloudServiceName - Name of the cloud service. +// options - CloudServicesClientBeginCreateOrUpdateOptions contains the optional parameters for the CloudServicesClient.BeginCreateOrUpdate +// method. +func (client *CloudServicesClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, cloudServiceName string, options *CloudServicesClientBeginCreateOrUpdateOptions) (*runtime.Poller[CloudServicesClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, resourceGroupName, cloudServiceName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[CloudServicesClientCreateOrUpdateResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[CloudServicesClientCreateOrUpdateResponse](options.ResumeToken, client.pl, nil) + } +} + +// CreateOrUpdate - Create or update a cloud service. Please note some properties can be set only during cloud service creation. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-03-01 +func (client *CloudServicesClient) createOrUpdate(ctx context.Context, resourceGroupName string, cloudServiceName string, options *CloudServicesClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, cloudServiceName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *CloudServicesClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, cloudServiceName string, options *CloudServicesClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if cloudServiceName == "" { + return nil, errors.New("parameter cloudServiceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{cloudServiceName}", url.PathEscape(cloudServiceName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if options != nil && options.Parameters != nil { + return req, runtime.MarshalAsJSON(req, *options.Parameters) + } + return req, nil +} + +// BeginDelete - Deletes a cloud service. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-03-01 +// resourceGroupName - Name of the resource group. +// cloudServiceName - Name of the cloud service. +// options - CloudServicesClientBeginDeleteOptions contains the optional parameters for the CloudServicesClient.BeginDelete +// method. +func (client *CloudServicesClient) BeginDelete(ctx context.Context, resourceGroupName string, cloudServiceName string, options *CloudServicesClientBeginDeleteOptions) (*runtime.Poller[CloudServicesClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, cloudServiceName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[CloudServicesClientDeleteResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[CloudServicesClientDeleteResponse](options.ResumeToken, client.pl, nil) + } +} + +// Delete - Deletes a cloud service. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-03-01 +func (client *CloudServicesClient) deleteOperation(ctx context.Context, resourceGroupName string, cloudServiceName string, options *CloudServicesClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, cloudServiceName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *CloudServicesClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, cloudServiceName string, options *CloudServicesClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if cloudServiceName == "" { + return nil, errors.New("parameter cloudServiceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{cloudServiceName}", url.PathEscape(cloudServiceName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// BeginDeleteInstances - Deletes role instances in a cloud service. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-03-01 +// resourceGroupName - Name of the resource group. +// cloudServiceName - Name of the cloud service. +// options - CloudServicesClientBeginDeleteInstancesOptions contains the optional parameters for the CloudServicesClient.BeginDeleteInstances +// method. +func (client *CloudServicesClient) BeginDeleteInstances(ctx context.Context, resourceGroupName string, cloudServiceName string, options *CloudServicesClientBeginDeleteInstancesOptions) (*runtime.Poller[CloudServicesClientDeleteInstancesResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteInstances(ctx, resourceGroupName, cloudServiceName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[CloudServicesClientDeleteInstancesResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[CloudServicesClientDeleteInstancesResponse](options.ResumeToken, client.pl, nil) + } +} + +// DeleteInstances - Deletes role instances in a cloud service. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-03-01 +func (client *CloudServicesClient) deleteInstances(ctx context.Context, resourceGroupName string, cloudServiceName string, options *CloudServicesClientBeginDeleteInstancesOptions) (*http.Response, error) { + req, err := client.deleteInstancesCreateRequest(ctx, resourceGroupName, cloudServiceName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteInstancesCreateRequest creates the DeleteInstances request. +func (client *CloudServicesClient) deleteInstancesCreateRequest(ctx context.Context, resourceGroupName string, cloudServiceName string, options *CloudServicesClientBeginDeleteInstancesOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/delete" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if cloudServiceName == "" { + return nil, errors.New("parameter cloudServiceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{cloudServiceName}", url.PathEscape(cloudServiceName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if options != nil && options.Parameters != nil { + return req, runtime.MarshalAsJSON(req, *options.Parameters) + } + return req, nil +} + +// Get - Display information about a cloud service. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-03-01 +// resourceGroupName - Name of the resource group. +// cloudServiceName - Name of the cloud service. +// options - CloudServicesClientGetOptions contains the optional parameters for the CloudServicesClient.Get method. +func (client *CloudServicesClient) Get(ctx context.Context, resourceGroupName string, cloudServiceName string, options *CloudServicesClientGetOptions) (CloudServicesClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, cloudServiceName, options) + if err != nil { + return CloudServicesClientGetResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return CloudServicesClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return CloudServicesClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *CloudServicesClient) getCreateRequest(ctx context.Context, resourceGroupName string, cloudServiceName string, options *CloudServicesClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if cloudServiceName == "" { + return nil, errors.New("parameter cloudServiceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{cloudServiceName}", url.PathEscape(cloudServiceName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *CloudServicesClient) getHandleResponse(resp *http.Response) (CloudServicesClientGetResponse, error) { + result := CloudServicesClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.CloudService); err != nil { + return CloudServicesClientGetResponse{}, err + } + return result, nil +} + +// GetInstanceView - Gets the status of a cloud service. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-03-01 +// resourceGroupName - Name of the resource group. +// cloudServiceName - Name of the cloud service. +// options - CloudServicesClientGetInstanceViewOptions contains the optional parameters for the CloudServicesClient.GetInstanceView +// method. +func (client *CloudServicesClient) GetInstanceView(ctx context.Context, resourceGroupName string, cloudServiceName string, options *CloudServicesClientGetInstanceViewOptions) (CloudServicesClientGetInstanceViewResponse, error) { + req, err := client.getInstanceViewCreateRequest(ctx, resourceGroupName, cloudServiceName, options) + if err != nil { + return CloudServicesClientGetInstanceViewResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return CloudServicesClientGetInstanceViewResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return CloudServicesClientGetInstanceViewResponse{}, runtime.NewResponseError(resp) + } + return client.getInstanceViewHandleResponse(resp) +} + +// getInstanceViewCreateRequest creates the GetInstanceView request. +func (client *CloudServicesClient) getInstanceViewCreateRequest(ctx context.Context, resourceGroupName string, cloudServiceName string, options *CloudServicesClientGetInstanceViewOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/instanceView" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if cloudServiceName == "" { + return nil, errors.New("parameter cloudServiceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{cloudServiceName}", url.PathEscape(cloudServiceName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getInstanceViewHandleResponse handles the GetInstanceView response. +func (client *CloudServicesClient) getInstanceViewHandleResponse(resp *http.Response) (CloudServicesClientGetInstanceViewResponse, error) { + result := CloudServicesClientGetInstanceViewResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.CloudServiceInstanceView); err != nil { + return CloudServicesClientGetInstanceViewResponse{}, err + } + return result, nil +} + +// NewListPager - Gets a list of all cloud services under a resource group. Use nextLink property in the response to get the +// next page of Cloud Services. Do this till nextLink is null to fetch all the Cloud Services. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-03-01 +// resourceGroupName - Name of the resource group. +// options - CloudServicesClientListOptions contains the optional parameters for the CloudServicesClient.List method. +func (client *CloudServicesClient) NewListPager(resourceGroupName string, options *CloudServicesClientListOptions) *runtime.Pager[CloudServicesClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[CloudServicesClientListResponse]{ + More: func(page CloudServicesClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *CloudServicesClientListResponse) (CloudServicesClientListResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listCreateRequest(ctx, resourceGroupName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return CloudServicesClientListResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return CloudServicesClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return CloudServicesClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *CloudServicesClient) listCreateRequest(ctx context.Context, resourceGroupName string, options *CloudServicesClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *CloudServicesClient) listHandleResponse(resp *http.Response) (CloudServicesClientListResponse, error) { + result := CloudServicesClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.CloudServiceListResult); err != nil { + return CloudServicesClientListResponse{}, err + } + return result, nil +} + +// NewListAllPager - Gets a list of all cloud services in the subscription, regardless of the associated resource group. Use +// nextLink property in the response to get the next page of Cloud Services. Do this till nextLink +// is null to fetch all the Cloud Services. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-03-01 +// options - CloudServicesClientListAllOptions contains the optional parameters for the CloudServicesClient.ListAll method. +func (client *CloudServicesClient) NewListAllPager(options *CloudServicesClientListAllOptions) *runtime.Pager[CloudServicesClientListAllResponse] { + return runtime.NewPager(runtime.PagingHandler[CloudServicesClientListAllResponse]{ + More: func(page CloudServicesClientListAllResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *CloudServicesClientListAllResponse) (CloudServicesClientListAllResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listAllCreateRequest(ctx, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return CloudServicesClientListAllResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return CloudServicesClientListAllResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return CloudServicesClientListAllResponse{}, runtime.NewResponseError(resp) + } + return client.listAllHandleResponse(resp) + }, + }) +} + +// listAllCreateRequest creates the ListAll request. +func (client *CloudServicesClient) listAllCreateRequest(ctx context.Context, options *CloudServicesClientListAllOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/cloudServices" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listAllHandleResponse handles the ListAll response. +func (client *CloudServicesClient) listAllHandleResponse(resp *http.Response) (CloudServicesClientListAllResponse, error) { + result := CloudServicesClientListAllResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.CloudServiceListResult); err != nil { + return CloudServicesClientListAllResponse{}, err + } + return result, nil +} + +// BeginPowerOff - Power off the cloud service. Note that resources are still attached and you are getting charged for the +// resources. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-03-01 +// resourceGroupName - Name of the resource group. +// cloudServiceName - Name of the cloud service. +// options - CloudServicesClientBeginPowerOffOptions contains the optional parameters for the CloudServicesClient.BeginPowerOff +// method. +func (client *CloudServicesClient) BeginPowerOff(ctx context.Context, resourceGroupName string, cloudServiceName string, options *CloudServicesClientBeginPowerOffOptions) (*runtime.Poller[CloudServicesClientPowerOffResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.powerOff(ctx, resourceGroupName, cloudServiceName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[CloudServicesClientPowerOffResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[CloudServicesClientPowerOffResponse](options.ResumeToken, client.pl, nil) + } +} + +// PowerOff - Power off the cloud service. Note that resources are still attached and you are getting charged for the resources. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-03-01 +func (client *CloudServicesClient) powerOff(ctx context.Context, resourceGroupName string, cloudServiceName string, options *CloudServicesClientBeginPowerOffOptions) (*http.Response, error) { + req, err := client.powerOffCreateRequest(ctx, resourceGroupName, cloudServiceName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// powerOffCreateRequest creates the PowerOff request. +func (client *CloudServicesClient) powerOffCreateRequest(ctx context.Context, resourceGroupName string, cloudServiceName string, options *CloudServicesClientBeginPowerOffOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/poweroff" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if cloudServiceName == "" { + return nil, errors.New("parameter cloudServiceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{cloudServiceName}", url.PathEscape(cloudServiceName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// BeginRebuild - Rebuild Role Instances reinstalls the operating system on instances of web roles or worker roles and initializes +// the storage resources that are used by them. If you do not want to initialize storage +// resources, you can use Reimage Role Instances. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-03-01 +// resourceGroupName - Name of the resource group. +// cloudServiceName - Name of the cloud service. +// options - CloudServicesClientBeginRebuildOptions contains the optional parameters for the CloudServicesClient.BeginRebuild +// method. +func (client *CloudServicesClient) BeginRebuild(ctx context.Context, resourceGroupName string, cloudServiceName string, options *CloudServicesClientBeginRebuildOptions) (*runtime.Poller[CloudServicesClientRebuildResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.rebuild(ctx, resourceGroupName, cloudServiceName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[CloudServicesClientRebuildResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[CloudServicesClientRebuildResponse](options.ResumeToken, client.pl, nil) + } +} + +// Rebuild - Rebuild Role Instances reinstalls the operating system on instances of web roles or worker roles and initializes +// the storage resources that are used by them. If you do not want to initialize storage +// resources, you can use Reimage Role Instances. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-03-01 +func (client *CloudServicesClient) rebuild(ctx context.Context, resourceGroupName string, cloudServiceName string, options *CloudServicesClientBeginRebuildOptions) (*http.Response, error) { + req, err := client.rebuildCreateRequest(ctx, resourceGroupName, cloudServiceName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// rebuildCreateRequest creates the Rebuild request. +func (client *CloudServicesClient) rebuildCreateRequest(ctx context.Context, resourceGroupName string, cloudServiceName string, options *CloudServicesClientBeginRebuildOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/rebuild" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if cloudServiceName == "" { + return nil, errors.New("parameter cloudServiceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{cloudServiceName}", url.PathEscape(cloudServiceName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if options != nil && options.Parameters != nil { + return req, runtime.MarshalAsJSON(req, *options.Parameters) + } + return req, nil +} + +// BeginReimage - Reimage asynchronous operation reinstalls the operating system on instances of web roles or worker roles. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-03-01 +// resourceGroupName - Name of the resource group. +// cloudServiceName - Name of the cloud service. +// options - CloudServicesClientBeginReimageOptions contains the optional parameters for the CloudServicesClient.BeginReimage +// method. +func (client *CloudServicesClient) BeginReimage(ctx context.Context, resourceGroupName string, cloudServiceName string, options *CloudServicesClientBeginReimageOptions) (*runtime.Poller[CloudServicesClientReimageResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.reimage(ctx, resourceGroupName, cloudServiceName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[CloudServicesClientReimageResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[CloudServicesClientReimageResponse](options.ResumeToken, client.pl, nil) + } +} + +// Reimage - Reimage asynchronous operation reinstalls the operating system on instances of web roles or worker roles. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-03-01 +func (client *CloudServicesClient) reimage(ctx context.Context, resourceGroupName string, cloudServiceName string, options *CloudServicesClientBeginReimageOptions) (*http.Response, error) { + req, err := client.reimageCreateRequest(ctx, resourceGroupName, cloudServiceName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// reimageCreateRequest creates the Reimage request. +func (client *CloudServicesClient) reimageCreateRequest(ctx context.Context, resourceGroupName string, cloudServiceName string, options *CloudServicesClientBeginReimageOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/reimage" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if cloudServiceName == "" { + return nil, errors.New("parameter cloudServiceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{cloudServiceName}", url.PathEscape(cloudServiceName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if options != nil && options.Parameters != nil { + return req, runtime.MarshalAsJSON(req, *options.Parameters) + } + return req, nil +} + +// BeginRestart - Restarts one or more role instances in a cloud service. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-03-01 +// resourceGroupName - Name of the resource group. +// cloudServiceName - Name of the cloud service. +// options - CloudServicesClientBeginRestartOptions contains the optional parameters for the CloudServicesClient.BeginRestart +// method. +func (client *CloudServicesClient) BeginRestart(ctx context.Context, resourceGroupName string, cloudServiceName string, options *CloudServicesClientBeginRestartOptions) (*runtime.Poller[CloudServicesClientRestartResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.restart(ctx, resourceGroupName, cloudServiceName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[CloudServicesClientRestartResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[CloudServicesClientRestartResponse](options.ResumeToken, client.pl, nil) + } +} + +// Restart - Restarts one or more role instances in a cloud service. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-03-01 +func (client *CloudServicesClient) restart(ctx context.Context, resourceGroupName string, cloudServiceName string, options *CloudServicesClientBeginRestartOptions) (*http.Response, error) { + req, err := client.restartCreateRequest(ctx, resourceGroupName, cloudServiceName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// restartCreateRequest creates the Restart request. +func (client *CloudServicesClient) restartCreateRequest(ctx context.Context, resourceGroupName string, cloudServiceName string, options *CloudServicesClientBeginRestartOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/restart" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if cloudServiceName == "" { + return nil, errors.New("parameter cloudServiceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{cloudServiceName}", url.PathEscape(cloudServiceName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if options != nil && options.Parameters != nil { + return req, runtime.MarshalAsJSON(req, *options.Parameters) + } + return req, nil +} + +// BeginStart - Starts the cloud service. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-03-01 +// resourceGroupName - Name of the resource group. +// cloudServiceName - Name of the cloud service. +// options - CloudServicesClientBeginStartOptions contains the optional parameters for the CloudServicesClient.BeginStart +// method. +func (client *CloudServicesClient) BeginStart(ctx context.Context, resourceGroupName string, cloudServiceName string, options *CloudServicesClientBeginStartOptions) (*runtime.Poller[CloudServicesClientStartResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.start(ctx, resourceGroupName, cloudServiceName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[CloudServicesClientStartResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[CloudServicesClientStartResponse](options.ResumeToken, client.pl, nil) + } +} + +// Start - Starts the cloud service. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-03-01 +func (client *CloudServicesClient) start(ctx context.Context, resourceGroupName string, cloudServiceName string, options *CloudServicesClientBeginStartOptions) (*http.Response, error) { + req, err := client.startCreateRequest(ctx, resourceGroupName, cloudServiceName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// startCreateRequest creates the Start request. +func (client *CloudServicesClient) startCreateRequest(ctx context.Context, resourceGroupName string, cloudServiceName string, options *CloudServicesClientBeginStartOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/start" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if cloudServiceName == "" { + return nil, errors.New("parameter cloudServiceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{cloudServiceName}", url.PathEscape(cloudServiceName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// BeginUpdate - Update a cloud service. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-03-01 +// resourceGroupName - Name of the resource group. +// cloudServiceName - Name of the cloud service. +// options - CloudServicesClientBeginUpdateOptions contains the optional parameters for the CloudServicesClient.BeginUpdate +// method. +func (client *CloudServicesClient) BeginUpdate(ctx context.Context, resourceGroupName string, cloudServiceName string, options *CloudServicesClientBeginUpdateOptions) (*runtime.Poller[CloudServicesClientUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.update(ctx, resourceGroupName, cloudServiceName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[CloudServicesClientUpdateResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[CloudServicesClientUpdateResponse](options.ResumeToken, client.pl, nil) + } +} + +// Update - Update a cloud service. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-03-01 +func (client *CloudServicesClient) update(ctx context.Context, resourceGroupName string, cloudServiceName string, options *CloudServicesClientBeginUpdateOptions) (*http.Response, error) { + req, err := client.updateCreateRequest(ctx, resourceGroupName, cloudServiceName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// updateCreateRequest creates the Update request. +func (client *CloudServicesClient) updateCreateRequest(ctx context.Context, resourceGroupName string, cloudServiceName string, options *CloudServicesClientBeginUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if cloudServiceName == "" { + return nil, errors.New("parameter cloudServiceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{cloudServiceName}", url.PathEscape(cloudServiceName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if options != nil && options.Parameters != nil { + return req, runtime.MarshalAsJSON(req, *options.Parameters) + } + return req, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_cloudservicesupdatedomain_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_cloudservicesupdatedomain_client.go new file mode 100644 index 000000000..85103704b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_cloudservicesupdatedomain_client.go @@ -0,0 +1,257 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strconv" + "strings" +) + +// CloudServicesUpdateDomainClient contains the methods for the CloudServicesUpdateDomain group. +// Don't use this type directly, use NewCloudServicesUpdateDomainClient() instead. +type CloudServicesUpdateDomainClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewCloudServicesUpdateDomainClient creates a new instance of CloudServicesUpdateDomainClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewCloudServicesUpdateDomainClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*CloudServicesUpdateDomainClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &CloudServicesUpdateDomainClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// GetUpdateDomain - Gets the specified update domain of a cloud service. Use nextLink property in the response to get the +// next page of update domains. Do this till nextLink is null to fetch all the update domains. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-03-01 +// resourceGroupName - Name of the resource group. +// cloudServiceName - Name of the cloud service. +// updateDomain - Specifies an integer value that identifies the update domain. Update domains are identified with a zero-based +// index: the first update domain has an ID of 0, the second has an ID of 1, and so on. +// options - CloudServicesUpdateDomainClientGetUpdateDomainOptions contains the optional parameters for the CloudServicesUpdateDomainClient.GetUpdateDomain +// method. +func (client *CloudServicesUpdateDomainClient) GetUpdateDomain(ctx context.Context, resourceGroupName string, cloudServiceName string, updateDomain int32, options *CloudServicesUpdateDomainClientGetUpdateDomainOptions) (CloudServicesUpdateDomainClientGetUpdateDomainResponse, error) { + req, err := client.getUpdateDomainCreateRequest(ctx, resourceGroupName, cloudServiceName, updateDomain, options) + if err != nil { + return CloudServicesUpdateDomainClientGetUpdateDomainResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return CloudServicesUpdateDomainClientGetUpdateDomainResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return CloudServicesUpdateDomainClientGetUpdateDomainResponse{}, runtime.NewResponseError(resp) + } + return client.getUpdateDomainHandleResponse(resp) +} + +// getUpdateDomainCreateRequest creates the GetUpdateDomain request. +func (client *CloudServicesUpdateDomainClient) getUpdateDomainCreateRequest(ctx context.Context, resourceGroupName string, cloudServiceName string, updateDomain int32, options *CloudServicesUpdateDomainClientGetUpdateDomainOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/updateDomains/{updateDomain}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if cloudServiceName == "" { + return nil, errors.New("parameter cloudServiceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{cloudServiceName}", url.PathEscape(cloudServiceName)) + urlPath = strings.ReplaceAll(urlPath, "{updateDomain}", url.PathEscape(strconv.FormatInt(int64(updateDomain), 10))) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getUpdateDomainHandleResponse handles the GetUpdateDomain response. +func (client *CloudServicesUpdateDomainClient) getUpdateDomainHandleResponse(resp *http.Response) (CloudServicesUpdateDomainClientGetUpdateDomainResponse, error) { + result := CloudServicesUpdateDomainClientGetUpdateDomainResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.UpdateDomain); err != nil { + return CloudServicesUpdateDomainClientGetUpdateDomainResponse{}, err + } + return result, nil +} + +// NewListUpdateDomainsPager - Gets a list of all update domains in a cloud service. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-03-01 +// resourceGroupName - Name of the resource group. +// cloudServiceName - Name of the cloud service. +// options - CloudServicesUpdateDomainClientListUpdateDomainsOptions contains the optional parameters for the CloudServicesUpdateDomainClient.ListUpdateDomains +// method. +func (client *CloudServicesUpdateDomainClient) NewListUpdateDomainsPager(resourceGroupName string, cloudServiceName string, options *CloudServicesUpdateDomainClientListUpdateDomainsOptions) *runtime.Pager[CloudServicesUpdateDomainClientListUpdateDomainsResponse] { + return runtime.NewPager(runtime.PagingHandler[CloudServicesUpdateDomainClientListUpdateDomainsResponse]{ + More: func(page CloudServicesUpdateDomainClientListUpdateDomainsResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *CloudServicesUpdateDomainClientListUpdateDomainsResponse) (CloudServicesUpdateDomainClientListUpdateDomainsResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listUpdateDomainsCreateRequest(ctx, resourceGroupName, cloudServiceName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return CloudServicesUpdateDomainClientListUpdateDomainsResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return CloudServicesUpdateDomainClientListUpdateDomainsResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return CloudServicesUpdateDomainClientListUpdateDomainsResponse{}, runtime.NewResponseError(resp) + } + return client.listUpdateDomainsHandleResponse(resp) + }, + }) +} + +// listUpdateDomainsCreateRequest creates the ListUpdateDomains request. +func (client *CloudServicesUpdateDomainClient) listUpdateDomainsCreateRequest(ctx context.Context, resourceGroupName string, cloudServiceName string, options *CloudServicesUpdateDomainClientListUpdateDomainsOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/updateDomains" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if cloudServiceName == "" { + return nil, errors.New("parameter cloudServiceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{cloudServiceName}", url.PathEscape(cloudServiceName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listUpdateDomainsHandleResponse handles the ListUpdateDomains response. +func (client *CloudServicesUpdateDomainClient) listUpdateDomainsHandleResponse(resp *http.Response) (CloudServicesUpdateDomainClientListUpdateDomainsResponse, error) { + result := CloudServicesUpdateDomainClientListUpdateDomainsResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.UpdateDomainListResult); err != nil { + return CloudServicesUpdateDomainClientListUpdateDomainsResponse{}, err + } + return result, nil +} + +// BeginWalkUpdateDomain - Updates the role instances in the specified update domain. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-03-01 +// resourceGroupName - Name of the resource group. +// cloudServiceName - Name of the cloud service. +// updateDomain - Specifies an integer value that identifies the update domain. Update domains are identified with a zero-based +// index: the first update domain has an ID of 0, the second has an ID of 1, and so on. +// options - CloudServicesUpdateDomainClientBeginWalkUpdateDomainOptions contains the optional parameters for the CloudServicesUpdateDomainClient.BeginWalkUpdateDomain +// method. +func (client *CloudServicesUpdateDomainClient) BeginWalkUpdateDomain(ctx context.Context, resourceGroupName string, cloudServiceName string, updateDomain int32, options *CloudServicesUpdateDomainClientBeginWalkUpdateDomainOptions) (*runtime.Poller[CloudServicesUpdateDomainClientWalkUpdateDomainResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.walkUpdateDomain(ctx, resourceGroupName, cloudServiceName, updateDomain, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[CloudServicesUpdateDomainClientWalkUpdateDomainResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[CloudServicesUpdateDomainClientWalkUpdateDomainResponse](options.ResumeToken, client.pl, nil) + } +} + +// WalkUpdateDomain - Updates the role instances in the specified update domain. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-03-01 +func (client *CloudServicesUpdateDomainClient) walkUpdateDomain(ctx context.Context, resourceGroupName string, cloudServiceName string, updateDomain int32, options *CloudServicesUpdateDomainClientBeginWalkUpdateDomainOptions) (*http.Response, error) { + req, err := client.walkUpdateDomainCreateRequest(ctx, resourceGroupName, cloudServiceName, updateDomain, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// walkUpdateDomainCreateRequest creates the WalkUpdateDomain request. +func (client *CloudServicesUpdateDomainClient) walkUpdateDomainCreateRequest(ctx context.Context, resourceGroupName string, cloudServiceName string, updateDomain int32, options *CloudServicesUpdateDomainClientBeginWalkUpdateDomainOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/cloudServices/{cloudServiceName}/updateDomains/{updateDomain}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if cloudServiceName == "" { + return nil, errors.New("parameter cloudServiceName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{cloudServiceName}", url.PathEscape(cloudServiceName)) + urlPath = strings.ReplaceAll(urlPath, "{updateDomain}", url.PathEscape(strconv.FormatInt(int64(updateDomain), 10))) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if options != nil && options.Parameters != nil { + return req, runtime.MarshalAsJSON(req, *options.Parameters) + } + return req, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_communitygalleries_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_communitygalleries_client.go new file mode 100644 index 000000000..0c45e908b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_communitygalleries_client.go @@ -0,0 +1,112 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// CommunityGalleriesClient contains the methods for the CommunityGalleries group. +// Don't use this type directly, use NewCommunityGalleriesClient() instead. +type CommunityGalleriesClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewCommunityGalleriesClient creates a new instance of CommunityGalleriesClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewCommunityGalleriesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*CommunityGalleriesClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &CommunityGalleriesClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// Get - Get a community gallery by gallery public name. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-07-01 +// location - Resource location. +// publicGalleryName - The public name of the community gallery. +// options - CommunityGalleriesClientGetOptions contains the optional parameters for the CommunityGalleriesClient.Get method. +func (client *CommunityGalleriesClient) Get(ctx context.Context, location string, publicGalleryName string, options *CommunityGalleriesClientGetOptions) (CommunityGalleriesClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, location, publicGalleryName, options) + if err != nil { + return CommunityGalleriesClientGetResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return CommunityGalleriesClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return CommunityGalleriesClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *CommunityGalleriesClient) getCreateRequest(ctx context.Context, location string, publicGalleryName string, options *CommunityGalleriesClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/communityGalleries/{publicGalleryName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if location == "" { + return nil, errors.New("parameter location cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{location}", url.PathEscape(location)) + if publicGalleryName == "" { + return nil, errors.New("parameter publicGalleryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{publicGalleryName}", url.PathEscape(publicGalleryName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-07-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *CommunityGalleriesClient) getHandleResponse(resp *http.Response) (CommunityGalleriesClientGetResponse, error) { + result := CommunityGalleriesClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.CommunityGallery); err != nil { + return CommunityGalleriesClientGetResponse{}, err + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_communitygalleryimages_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_communitygalleryimages_client.go new file mode 100644 index 000000000..cffc731a3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_communitygalleryimages_client.go @@ -0,0 +1,118 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// CommunityGalleryImagesClient contains the methods for the CommunityGalleryImages group. +// Don't use this type directly, use NewCommunityGalleryImagesClient() instead. +type CommunityGalleryImagesClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewCommunityGalleryImagesClient creates a new instance of CommunityGalleryImagesClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewCommunityGalleryImagesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*CommunityGalleryImagesClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &CommunityGalleryImagesClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// Get - Get a community gallery image. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-07-01 +// location - Resource location. +// publicGalleryName - The public name of the community gallery. +// galleryImageName - The name of the community gallery image definition. +// options - CommunityGalleryImagesClientGetOptions contains the optional parameters for the CommunityGalleryImagesClient.Get +// method. +func (client *CommunityGalleryImagesClient) Get(ctx context.Context, location string, publicGalleryName string, galleryImageName string, options *CommunityGalleryImagesClientGetOptions) (CommunityGalleryImagesClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, location, publicGalleryName, galleryImageName, options) + if err != nil { + return CommunityGalleryImagesClientGetResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return CommunityGalleryImagesClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return CommunityGalleryImagesClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *CommunityGalleryImagesClient) getCreateRequest(ctx context.Context, location string, publicGalleryName string, galleryImageName string, options *CommunityGalleryImagesClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/communityGalleries/{publicGalleryName}/images/{galleryImageName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if location == "" { + return nil, errors.New("parameter location cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{location}", url.PathEscape(location)) + if publicGalleryName == "" { + return nil, errors.New("parameter publicGalleryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{publicGalleryName}", url.PathEscape(publicGalleryName)) + if galleryImageName == "" { + return nil, errors.New("parameter galleryImageName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryImageName}", url.PathEscape(galleryImageName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-07-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *CommunityGalleryImagesClient) getHandleResponse(resp *http.Response) (CommunityGalleryImagesClientGetResponse, error) { + result := CommunityGalleryImagesClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.CommunityGalleryImage); err != nil { + return CommunityGalleryImagesClientGetResponse{}, err + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_communitygalleryimageversions_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_communitygalleryimageversions_client.go new file mode 100644 index 000000000..d4287c90e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_communitygalleryimageversions_client.go @@ -0,0 +1,125 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// CommunityGalleryImageVersionsClient contains the methods for the CommunityGalleryImageVersions group. +// Don't use this type directly, use NewCommunityGalleryImageVersionsClient() instead. +type CommunityGalleryImageVersionsClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewCommunityGalleryImageVersionsClient creates a new instance of CommunityGalleryImageVersionsClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewCommunityGalleryImageVersionsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*CommunityGalleryImageVersionsClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &CommunityGalleryImageVersionsClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// Get - Get a community gallery image version. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-07-01 +// location - Resource location. +// publicGalleryName - The public name of the community gallery. +// galleryImageName - The name of the community gallery image definition. +// galleryImageVersionName - The name of the community gallery image version. Needs to follow semantic version name pattern: +// The allowed characters are digit and period. Digits must be within the range of a 32-bit integer. +// Format: .. +// options - CommunityGalleryImageVersionsClientGetOptions contains the optional parameters for the CommunityGalleryImageVersionsClient.Get +// method. +func (client *CommunityGalleryImageVersionsClient) Get(ctx context.Context, location string, publicGalleryName string, galleryImageName string, galleryImageVersionName string, options *CommunityGalleryImageVersionsClientGetOptions) (CommunityGalleryImageVersionsClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, location, publicGalleryName, galleryImageName, galleryImageVersionName, options) + if err != nil { + return CommunityGalleryImageVersionsClientGetResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return CommunityGalleryImageVersionsClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return CommunityGalleryImageVersionsClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *CommunityGalleryImageVersionsClient) getCreateRequest(ctx context.Context, location string, publicGalleryName string, galleryImageName string, galleryImageVersionName string, options *CommunityGalleryImageVersionsClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/communityGalleries/{publicGalleryName}/images/{galleryImageName}/versions/{galleryImageVersionName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if location == "" { + return nil, errors.New("parameter location cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{location}", url.PathEscape(location)) + if publicGalleryName == "" { + return nil, errors.New("parameter publicGalleryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{publicGalleryName}", url.PathEscape(publicGalleryName)) + if galleryImageName == "" { + return nil, errors.New("parameter galleryImageName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryImageName}", url.PathEscape(galleryImageName)) + if galleryImageVersionName == "" { + return nil, errors.New("parameter galleryImageVersionName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryImageVersionName}", url.PathEscape(galleryImageVersionName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-07-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *CommunityGalleryImageVersionsClient) getHandleResponse(resp *http.Response) (CommunityGalleryImageVersionsClientGetResponse, error) { + result := CommunityGalleryImageVersionsClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.CommunityGalleryImageVersion); err != nil { + return CommunityGalleryImageVersionsClientGetResponse{}, err + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_constants.go new file mode 100644 index 000000000..024379ab7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_constants.go @@ -0,0 +1,2495 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +const ( + moduleName = "armcompute" + moduleVersion = "v1.0.0" +) + +type AccessLevel string + +const ( + AccessLevelNone AccessLevel = "None" + AccessLevelRead AccessLevel = "Read" + AccessLevelWrite AccessLevel = "Write" +) + +// PossibleAccessLevelValues returns the possible values for the AccessLevel const type. +func PossibleAccessLevelValues() []AccessLevel { + return []AccessLevel{ + AccessLevelNone, + AccessLevelRead, + AccessLevelWrite, + } +} + +// AggregatedReplicationState - This is the aggregated replication status based on all the regional replication status flags. +type AggregatedReplicationState string + +const ( + AggregatedReplicationStateCompleted AggregatedReplicationState = "Completed" + AggregatedReplicationStateFailed AggregatedReplicationState = "Failed" + AggregatedReplicationStateInProgress AggregatedReplicationState = "InProgress" + AggregatedReplicationStateUnknown AggregatedReplicationState = "Unknown" +) + +// PossibleAggregatedReplicationStateValues returns the possible values for the AggregatedReplicationState const type. +func PossibleAggregatedReplicationStateValues() []AggregatedReplicationState { + return []AggregatedReplicationState{ + AggregatedReplicationStateCompleted, + AggregatedReplicationStateFailed, + AggregatedReplicationStateInProgress, + AggregatedReplicationStateUnknown, + } +} + +// Architecture - The architecture of the image. Applicable to OS disks only. +type Architecture string + +const ( + ArchitectureArm64 Architecture = "Arm64" + ArchitectureX64 Architecture = "x64" +) + +// PossibleArchitectureValues returns the possible values for the Architecture const type. +func PossibleArchitectureValues() []Architecture { + return []Architecture{ + ArchitectureArm64, + ArchitectureX64, + } +} + +// ArchitectureTypes - Specifies the Architecture Type +type ArchitectureTypes string + +const ( + ArchitectureTypesArm64 ArchitectureTypes = "Arm64" + ArchitectureTypesX64 ArchitectureTypes = "x64" +) + +// PossibleArchitectureTypesValues returns the possible values for the ArchitectureTypes const type. +func PossibleArchitectureTypesValues() []ArchitectureTypes { + return []ArchitectureTypes{ + ArchitectureTypesArm64, + ArchitectureTypesX64, + } +} + +// AvailabilitySetSKUTypes - Specifies the sku of an Availability Set. Use 'Aligned' for virtual machines with managed disks +// and 'Classic' for virtual machines with unmanaged disks. Default value is 'Classic'. +type AvailabilitySetSKUTypes string + +const ( + AvailabilitySetSKUTypesAligned AvailabilitySetSKUTypes = "Aligned" + AvailabilitySetSKUTypesClassic AvailabilitySetSKUTypes = "Classic" +) + +// PossibleAvailabilitySetSKUTypesValues returns the possible values for the AvailabilitySetSKUTypes const type. +func PossibleAvailabilitySetSKUTypesValues() []AvailabilitySetSKUTypes { + return []AvailabilitySetSKUTypes{ + AvailabilitySetSKUTypesAligned, + AvailabilitySetSKUTypesClassic, + } +} + +// CachingTypes - Specifies the caching requirements. +// Possible values are: +// None +// ReadOnly +// ReadWrite +// Default: None for Standard storage. ReadOnly for Premium storage +type CachingTypes string + +const ( + CachingTypesNone CachingTypes = "None" + CachingTypesReadOnly CachingTypes = "ReadOnly" + CachingTypesReadWrite CachingTypes = "ReadWrite" +) + +// PossibleCachingTypesValues returns the possible values for the CachingTypes const type. +func PossibleCachingTypesValues() []CachingTypes { + return []CachingTypes{ + CachingTypesNone, + CachingTypesReadOnly, + CachingTypesReadWrite, + } +} + +type CapacityReservationGroupInstanceViewTypes string + +const ( + CapacityReservationGroupInstanceViewTypesInstanceView CapacityReservationGroupInstanceViewTypes = "instanceView" +) + +// PossibleCapacityReservationGroupInstanceViewTypesValues returns the possible values for the CapacityReservationGroupInstanceViewTypes const type. +func PossibleCapacityReservationGroupInstanceViewTypesValues() []CapacityReservationGroupInstanceViewTypes { + return []CapacityReservationGroupInstanceViewTypes{ + CapacityReservationGroupInstanceViewTypesInstanceView, + } +} + +type CapacityReservationInstanceViewTypes string + +const ( + CapacityReservationInstanceViewTypesInstanceView CapacityReservationInstanceViewTypes = "instanceView" +) + +// PossibleCapacityReservationInstanceViewTypesValues returns the possible values for the CapacityReservationInstanceViewTypes const type. +func PossibleCapacityReservationInstanceViewTypesValues() []CapacityReservationInstanceViewTypes { + return []CapacityReservationInstanceViewTypes{ + CapacityReservationInstanceViewTypesInstanceView, + } +} + +// CloudServiceUpgradeMode - Update mode for the cloud service. Role instances are allocated to update domains when the service +// is deployed. Updates can be initiated manually in each update domain or initiated automatically in +// all update domains. Possible Values are +// Auto +// Manual +// Simultaneous +// If not specified, the default value is Auto. If set to Manual, PUT UpdateDomain must be called to apply the update. If +// set to Auto, the update is automatically applied to each update domain in +// sequence. +type CloudServiceUpgradeMode string + +const ( + CloudServiceUpgradeModeAuto CloudServiceUpgradeMode = "Auto" + CloudServiceUpgradeModeManual CloudServiceUpgradeMode = "Manual" + CloudServiceUpgradeModeSimultaneous CloudServiceUpgradeMode = "Simultaneous" +) + +// PossibleCloudServiceUpgradeModeValues returns the possible values for the CloudServiceUpgradeMode const type. +func PossibleCloudServiceUpgradeModeValues() []CloudServiceUpgradeMode { + return []CloudServiceUpgradeMode{ + CloudServiceUpgradeModeAuto, + CloudServiceUpgradeModeManual, + CloudServiceUpgradeModeSimultaneous, + } +} + +// ConfidentialVMEncryptionType - confidential VM encryption types +type ConfidentialVMEncryptionType string + +const ( + ConfidentialVMEncryptionTypeEncryptedVMGuestStateOnlyWithPmk ConfidentialVMEncryptionType = "EncryptedVMGuestStateOnlyWithPmk" + ConfidentialVMEncryptionTypeEncryptedWithCmk ConfidentialVMEncryptionType = "EncryptedWithCmk" + ConfidentialVMEncryptionTypeEncryptedWithPmk ConfidentialVMEncryptionType = "EncryptedWithPmk" +) + +// PossibleConfidentialVMEncryptionTypeValues returns the possible values for the ConfidentialVMEncryptionType const type. +func PossibleConfidentialVMEncryptionTypeValues() []ConfidentialVMEncryptionType { + return []ConfidentialVMEncryptionType{ + ConfidentialVMEncryptionTypeEncryptedVMGuestStateOnlyWithPmk, + ConfidentialVMEncryptionTypeEncryptedWithCmk, + ConfidentialVMEncryptionTypeEncryptedWithPmk, + } +} + +// ConsistencyModeTypes - ConsistencyMode of the RestorePoint. Can be specified in the input while creating a restore point. +// For now, only CrashConsistent is accepted as a valid input. Please refer to +// https://aka.ms/RestorePoints for more details. +type ConsistencyModeTypes string + +const ( + ConsistencyModeTypesApplicationConsistent ConsistencyModeTypes = "ApplicationConsistent" + ConsistencyModeTypesCrashConsistent ConsistencyModeTypes = "CrashConsistent" + ConsistencyModeTypesFileSystemConsistent ConsistencyModeTypes = "FileSystemConsistent" +) + +// PossibleConsistencyModeTypesValues returns the possible values for the ConsistencyModeTypes const type. +func PossibleConsistencyModeTypesValues() []ConsistencyModeTypes { + return []ConsistencyModeTypes{ + ConsistencyModeTypesApplicationConsistent, + ConsistencyModeTypesCrashConsistent, + ConsistencyModeTypesFileSystemConsistent, + } +} + +// DataAccessAuthMode - Additional authentication requirements when exporting or uploading to a disk or snapshot. +type DataAccessAuthMode string + +const ( + // DataAccessAuthModeAzureActiveDirectory - When export/upload URL is used, the system checks if the user has an identity + // in Azure Active Directory and has necessary permissions to export/upload the data. Please refer to aka.ms/DisksAzureADAuth. + DataAccessAuthModeAzureActiveDirectory DataAccessAuthMode = "AzureActiveDirectory" + // DataAccessAuthModeNone - No additional authentication would be performed when accessing export/upload URL. + DataAccessAuthModeNone DataAccessAuthMode = "None" +) + +// PossibleDataAccessAuthModeValues returns the possible values for the DataAccessAuthMode const type. +func PossibleDataAccessAuthModeValues() []DataAccessAuthMode { + return []DataAccessAuthMode{ + DataAccessAuthModeAzureActiveDirectory, + DataAccessAuthModeNone, + } +} + +// DedicatedHostLicenseTypes - Specifies the software license type that will be applied to the VMs deployed on the dedicated +// host. +// Possible values are: +// None +// WindowsServerHybrid +// WindowsServerPerpetual +// Default: None +type DedicatedHostLicenseTypes string + +const ( + DedicatedHostLicenseTypesNone DedicatedHostLicenseTypes = "None" + DedicatedHostLicenseTypesWindowsServerHybrid DedicatedHostLicenseTypes = "Windows_Server_Hybrid" + DedicatedHostLicenseTypesWindowsServerPerpetual DedicatedHostLicenseTypes = "Windows_Server_Perpetual" +) + +// PossibleDedicatedHostLicenseTypesValues returns the possible values for the DedicatedHostLicenseTypes const type. +func PossibleDedicatedHostLicenseTypesValues() []DedicatedHostLicenseTypes { + return []DedicatedHostLicenseTypes{ + DedicatedHostLicenseTypesNone, + DedicatedHostLicenseTypesWindowsServerHybrid, + DedicatedHostLicenseTypesWindowsServerPerpetual, + } +} + +// DeleteOptions - Specify what happens to the network interface when the VM is deleted +type DeleteOptions string + +const ( + DeleteOptionsDelete DeleteOptions = "Delete" + DeleteOptionsDetach DeleteOptions = "Detach" +) + +// PossibleDeleteOptionsValues returns the possible values for the DeleteOptions const type. +func PossibleDeleteOptionsValues() []DeleteOptions { + return []DeleteOptions{ + DeleteOptionsDelete, + DeleteOptionsDetach, + } +} + +// DiffDiskOptions - Specifies the ephemeral disk option for operating system disk. +type DiffDiskOptions string + +const ( + DiffDiskOptionsLocal DiffDiskOptions = "Local" +) + +// PossibleDiffDiskOptionsValues returns the possible values for the DiffDiskOptions const type. +func PossibleDiffDiskOptionsValues() []DiffDiskOptions { + return []DiffDiskOptions{ + DiffDiskOptionsLocal, + } +} + +// DiffDiskPlacement - Specifies the ephemeral disk placement for operating system disk. This property can be used by user +// in the request to choose the location i.e, cache disk or resource disk space for Ephemeral OS disk +// provisioning. For more information on Ephemeral OS disk size requirements, please refer Ephemeral OS disk size requirements +// for Windows VM at +// https://docs.microsoft.com/azure/virtual-machines/windows/ephemeral-os-disks#size-requirements and Linux VM at +// https://docs.microsoft.com/azure/virtual-machines/linux/ephemeral-os-disks#size-requirements +type DiffDiskPlacement string + +const ( + DiffDiskPlacementCacheDisk DiffDiskPlacement = "CacheDisk" + DiffDiskPlacementResourceDisk DiffDiskPlacement = "ResourceDisk" +) + +// PossibleDiffDiskPlacementValues returns the possible values for the DiffDiskPlacement const type. +func PossibleDiffDiskPlacementValues() []DiffDiskPlacement { + return []DiffDiskPlacement{ + DiffDiskPlacementCacheDisk, + DiffDiskPlacementResourceDisk, + } +} + +// DiskCreateOption - This enumerates the possible sources of a disk's creation. +type DiskCreateOption string + +const ( + // DiskCreateOptionAttach - Disk will be attached to a VM. + DiskCreateOptionAttach DiskCreateOption = "Attach" + // DiskCreateOptionCopy - Create a new disk or snapshot by copying from a disk or snapshot specified by the given sourceResourceId. + DiskCreateOptionCopy DiskCreateOption = "Copy" + // DiskCreateOptionCopyStart - Create a new disk by using a deep copy process, where the resource creation is considered complete + // only after all data has been copied from the source. + DiskCreateOptionCopyStart DiskCreateOption = "CopyStart" + // DiskCreateOptionEmpty - Create an empty data disk of a size given by diskSizeGB. + DiskCreateOptionEmpty DiskCreateOption = "Empty" + // DiskCreateOptionFromImage - Create a new disk from a platform image specified by the given imageReference or galleryImageReference. + DiskCreateOptionFromImage DiskCreateOption = "FromImage" + // DiskCreateOptionImport - Create a disk by importing from a blob specified by a sourceUri in a storage account specified + // by storageAccountId. + DiskCreateOptionImport DiskCreateOption = "Import" + // DiskCreateOptionImportSecure - Similar to Import create option. Create a new Trusted Launch VM or Confidential VM supported + // disk by importing additional blob for VM guest state specified by securityDataUri in storage account specified by storageAccountId + DiskCreateOptionImportSecure DiskCreateOption = "ImportSecure" + // DiskCreateOptionRestore - Create a new disk by copying from a backup recovery point. + DiskCreateOptionRestore DiskCreateOption = "Restore" + // DiskCreateOptionUpload - Create a new disk by obtaining a write token and using it to directly upload the contents of the + // disk. + DiskCreateOptionUpload DiskCreateOption = "Upload" + // DiskCreateOptionUploadPreparedSecure - Similar to Upload create option. Create a new Trusted Launch VM or Confidential + // VM supported disk and upload using write token in both disk and VM guest state + DiskCreateOptionUploadPreparedSecure DiskCreateOption = "UploadPreparedSecure" +) + +// PossibleDiskCreateOptionValues returns the possible values for the DiskCreateOption const type. +func PossibleDiskCreateOptionValues() []DiskCreateOption { + return []DiskCreateOption{ + DiskCreateOptionAttach, + DiskCreateOptionCopy, + DiskCreateOptionCopyStart, + DiskCreateOptionEmpty, + DiskCreateOptionFromImage, + DiskCreateOptionImport, + DiskCreateOptionImportSecure, + DiskCreateOptionRestore, + DiskCreateOptionUpload, + DiskCreateOptionUploadPreparedSecure, + } +} + +// DiskCreateOptionTypes - Specifies how the virtual machine should be created. +// Possible values are: +// Attach \u2013 This value is used when you are using a specialized disk to create the virtual machine. +// FromImage \u2013 This value is used when you are using an image to create the virtual machine. If you are using a platform +// image, you also use the imageReference element described above. If you are +// using a marketplace image, you also use the plan element previously described. +type DiskCreateOptionTypes string + +const ( + DiskCreateOptionTypesAttach DiskCreateOptionTypes = "Attach" + DiskCreateOptionTypesEmpty DiskCreateOptionTypes = "Empty" + DiskCreateOptionTypesFromImage DiskCreateOptionTypes = "FromImage" +) + +// PossibleDiskCreateOptionTypesValues returns the possible values for the DiskCreateOptionTypes const type. +func PossibleDiskCreateOptionTypesValues() []DiskCreateOptionTypes { + return []DiskCreateOptionTypes{ + DiskCreateOptionTypesAttach, + DiskCreateOptionTypesEmpty, + DiskCreateOptionTypesFromImage, + } +} + +// DiskDeleteOptionTypes - Specifies the behavior of the managed disk when the VM gets deleted i.e whether the managed disk +// is deleted or detached. Supported values: +// Delete If this value is used, the managed disk is deleted when VM gets deleted. +// Detach If this value is used, the managed disk is retained after VM gets deleted. +// Minimum api-version: 2021-03-01 +type DiskDeleteOptionTypes string + +const ( + DiskDeleteOptionTypesDelete DiskDeleteOptionTypes = "Delete" + DiskDeleteOptionTypesDetach DiskDeleteOptionTypes = "Detach" +) + +// PossibleDiskDeleteOptionTypesValues returns the possible values for the DiskDeleteOptionTypes const type. +func PossibleDiskDeleteOptionTypesValues() []DiskDeleteOptionTypes { + return []DiskDeleteOptionTypes{ + DiskDeleteOptionTypesDelete, + DiskDeleteOptionTypesDetach, + } +} + +// DiskDetachOptionTypes - Specifies the detach behavior to be used while detaching a disk or which is already in the process +// of detachment from the virtual machine. Supported values: ForceDetach. +// detachOption: ForceDetach is applicable only for managed data disks. If a previous detachment attempt of the data disk +// did not complete due to an unexpected failure from the virtual machine and the +// disk is still not released then use force-detach as a last resort option to detach the disk forcibly from the VM. All writes +// might not have been flushed when using this detach behavior. +// This feature is still in preview mode and is not supported for VirtualMachineScaleSet. To force-detach a data disk update +// toBeDetached to 'true' along with setting detachOption: 'ForceDetach'. +type DiskDetachOptionTypes string + +const ( + DiskDetachOptionTypesForceDetach DiskDetachOptionTypes = "ForceDetach" +) + +// PossibleDiskDetachOptionTypesValues returns the possible values for the DiskDetachOptionTypes const type. +func PossibleDiskDetachOptionTypesValues() []DiskDetachOptionTypes { + return []DiskDetachOptionTypes{ + DiskDetachOptionTypesForceDetach, + } +} + +// DiskEncryptionSetIdentityType - The type of Managed Identity used by the DiskEncryptionSet. Only SystemAssigned is supported +// for new creations. Disk Encryption Sets can be updated with Identity type None during migration of +// subscription to a new Azure Active Directory tenant; it will cause the encrypted resources to lose access to the keys. +type DiskEncryptionSetIdentityType string + +const ( + DiskEncryptionSetIdentityTypeNone DiskEncryptionSetIdentityType = "None" + DiskEncryptionSetIdentityTypeSystemAssigned DiskEncryptionSetIdentityType = "SystemAssigned" +) + +// PossibleDiskEncryptionSetIdentityTypeValues returns the possible values for the DiskEncryptionSetIdentityType const type. +func PossibleDiskEncryptionSetIdentityTypeValues() []DiskEncryptionSetIdentityType { + return []DiskEncryptionSetIdentityType{ + DiskEncryptionSetIdentityTypeNone, + DiskEncryptionSetIdentityTypeSystemAssigned, + } +} + +// DiskEncryptionSetType - The type of key used to encrypt the data of the disk. +type DiskEncryptionSetType string + +const ( + // DiskEncryptionSetTypeConfidentialVMEncryptedWithCustomerKey - Confidential VM supported disk and VM guest state would be + // encrypted with customer managed key. + DiskEncryptionSetTypeConfidentialVMEncryptedWithCustomerKey DiskEncryptionSetType = "ConfidentialVmEncryptedWithCustomerKey" + // DiskEncryptionSetTypeEncryptionAtRestWithCustomerKey - Resource using diskEncryptionSet would be encrypted at rest with + // Customer managed key that can be changed and revoked by a customer. + DiskEncryptionSetTypeEncryptionAtRestWithCustomerKey DiskEncryptionSetType = "EncryptionAtRestWithCustomerKey" + // DiskEncryptionSetTypeEncryptionAtRestWithPlatformAndCustomerKeys - Resource using diskEncryptionSet would be encrypted + // at rest with two layers of encryption. One of the keys is Customer managed and the other key is Platform managed. + DiskEncryptionSetTypeEncryptionAtRestWithPlatformAndCustomerKeys DiskEncryptionSetType = "EncryptionAtRestWithPlatformAndCustomerKeys" +) + +// PossibleDiskEncryptionSetTypeValues returns the possible values for the DiskEncryptionSetType const type. +func PossibleDiskEncryptionSetTypeValues() []DiskEncryptionSetType { + return []DiskEncryptionSetType{ + DiskEncryptionSetTypeConfidentialVMEncryptedWithCustomerKey, + DiskEncryptionSetTypeEncryptionAtRestWithCustomerKey, + DiskEncryptionSetTypeEncryptionAtRestWithPlatformAndCustomerKeys, + } +} + +// DiskSecurityTypes - Specifies the SecurityType of the VM. Applicable for OS disks only. +type DiskSecurityTypes string + +const ( + // DiskSecurityTypesConfidentialVMDiskEncryptedWithCustomerKey - Indicates Confidential VM disk with both OS disk and VM guest + // state encrypted with a customer managed key + DiskSecurityTypesConfidentialVMDiskEncryptedWithCustomerKey DiskSecurityTypes = "ConfidentialVM_DiskEncryptedWithCustomerKey" + // DiskSecurityTypesConfidentialVMDiskEncryptedWithPlatformKey - Indicates Confidential VM disk with both OS disk and VM guest + // state encrypted with a platform managed key + DiskSecurityTypesConfidentialVMDiskEncryptedWithPlatformKey DiskSecurityTypes = "ConfidentialVM_DiskEncryptedWithPlatformKey" + // DiskSecurityTypesConfidentialVMVmguestStateOnlyEncryptedWithPlatformKey - Indicates Confidential VM disk with only VM guest + // state encrypted + DiskSecurityTypesConfidentialVMVmguestStateOnlyEncryptedWithPlatformKey DiskSecurityTypes = "ConfidentialVM_VMGuestStateOnlyEncryptedWithPlatformKey" + // DiskSecurityTypesTrustedLaunch - Trusted Launch provides security features such as secure boot and virtual Trusted Platform + // Module (vTPM) + DiskSecurityTypesTrustedLaunch DiskSecurityTypes = "TrustedLaunch" +) + +// PossibleDiskSecurityTypesValues returns the possible values for the DiskSecurityTypes const type. +func PossibleDiskSecurityTypesValues() []DiskSecurityTypes { + return []DiskSecurityTypes{ + DiskSecurityTypesConfidentialVMDiskEncryptedWithCustomerKey, + DiskSecurityTypesConfidentialVMDiskEncryptedWithPlatformKey, + DiskSecurityTypesConfidentialVMVmguestStateOnlyEncryptedWithPlatformKey, + DiskSecurityTypesTrustedLaunch, + } +} + +// DiskState - This enumerates the possible state of the disk. +type DiskState string + +const ( + // DiskStateActiveSAS - The disk currently has an Active SAS Uri associated with it. + DiskStateActiveSAS DiskState = "ActiveSAS" + // DiskStateActiveSASFrozen - The disk is attached to a VM in hibernated state and has an active SAS URI associated with it. + DiskStateActiveSASFrozen DiskState = "ActiveSASFrozen" + // DiskStateActiveUpload - A disk is created for upload and a write token has been issued for uploading to it. + DiskStateActiveUpload DiskState = "ActiveUpload" + // DiskStateAttached - The disk is currently attached to a running VM. + DiskStateAttached DiskState = "Attached" + // DiskStateFrozen - The disk is attached to a VM which is in hibernated state. + DiskStateFrozen DiskState = "Frozen" + // DiskStateReadyToUpload - A disk is ready to be created by upload by requesting a write token. + DiskStateReadyToUpload DiskState = "ReadyToUpload" + // DiskStateReserved - The disk is attached to a stopped-deallocated VM. + DiskStateReserved DiskState = "Reserved" + // DiskStateUnattached - The disk is not being used and can be attached to a VM. + DiskStateUnattached DiskState = "Unattached" +) + +// PossibleDiskStateValues returns the possible values for the DiskState const type. +func PossibleDiskStateValues() []DiskState { + return []DiskState{ + DiskStateActiveSAS, + DiskStateActiveSASFrozen, + DiskStateActiveUpload, + DiskStateAttached, + DiskStateFrozen, + DiskStateReadyToUpload, + DiskStateReserved, + DiskStateUnattached, + } +} + +// DiskStorageAccountTypes - The sku name. +type DiskStorageAccountTypes string + +const ( + // DiskStorageAccountTypesPremiumLRS - Premium SSD locally redundant storage. Best for production and performance sensitive + // workloads. + DiskStorageAccountTypesPremiumLRS DiskStorageAccountTypes = "Premium_LRS" + // DiskStorageAccountTypesPremiumZRS - Premium SSD zone redundant storage. Best for the production workloads that need storage + // resiliency against zone failures. + DiskStorageAccountTypesPremiumZRS DiskStorageAccountTypes = "Premium_ZRS" + // DiskStorageAccountTypesStandardLRS - Standard HDD locally redundant storage. Best for backup, non-critical, and infrequent + // access. + DiskStorageAccountTypesStandardLRS DiskStorageAccountTypes = "Standard_LRS" + // DiskStorageAccountTypesStandardSSDLRS - Standard SSD locally redundant storage. Best for web servers, lightly used enterprise + // applications and dev/test. + DiskStorageAccountTypesStandardSSDLRS DiskStorageAccountTypes = "StandardSSD_LRS" + // DiskStorageAccountTypesStandardSSDZRS - Standard SSD zone redundant storage. Best for web servers, lightly used enterprise + // applications and dev/test that need storage resiliency against zone failures. + DiskStorageAccountTypesStandardSSDZRS DiskStorageAccountTypes = "StandardSSD_ZRS" + // DiskStorageAccountTypesUltraSSDLRS - Ultra SSD locally redundant storage. Best for IO-intensive workloads such as SAP HANA, + // top tier databases (for example, SQL, Oracle), and other transaction-heavy workloads. + DiskStorageAccountTypesUltraSSDLRS DiskStorageAccountTypes = "UltraSSD_LRS" +) + +// PossibleDiskStorageAccountTypesValues returns the possible values for the DiskStorageAccountTypes const type. +func PossibleDiskStorageAccountTypesValues() []DiskStorageAccountTypes { + return []DiskStorageAccountTypes{ + DiskStorageAccountTypesPremiumLRS, + DiskStorageAccountTypesPremiumZRS, + DiskStorageAccountTypesStandardLRS, + DiskStorageAccountTypesStandardSSDLRS, + DiskStorageAccountTypesStandardSSDZRS, + DiskStorageAccountTypesUltraSSDLRS, + } +} + +// EncryptionType - The type of key used to encrypt the data of the disk. +type EncryptionType string + +const ( + // EncryptionTypeEncryptionAtRestWithCustomerKey - Disk is encrypted at rest with Customer managed key that can be changed + // and revoked by a customer. + EncryptionTypeEncryptionAtRestWithCustomerKey EncryptionType = "EncryptionAtRestWithCustomerKey" + // EncryptionTypeEncryptionAtRestWithPlatformAndCustomerKeys - Disk is encrypted at rest with 2 layers of encryption. One + // of the keys is Customer managed and the other key is Platform managed. + EncryptionTypeEncryptionAtRestWithPlatformAndCustomerKeys EncryptionType = "EncryptionAtRestWithPlatformAndCustomerKeys" + // EncryptionTypeEncryptionAtRestWithPlatformKey - Disk is encrypted at rest with Platform managed key. It is the default + // encryption type. This is not a valid encryption type for disk encryption sets. + EncryptionTypeEncryptionAtRestWithPlatformKey EncryptionType = "EncryptionAtRestWithPlatformKey" +) + +// PossibleEncryptionTypeValues returns the possible values for the EncryptionType const type. +func PossibleEncryptionTypeValues() []EncryptionType { + return []EncryptionType{ + EncryptionTypeEncryptionAtRestWithCustomerKey, + EncryptionTypeEncryptionAtRestWithPlatformAndCustomerKeys, + EncryptionTypeEncryptionAtRestWithPlatformKey, + } +} + +// ExecutionState - Script execution status. +type ExecutionState string + +const ( + ExecutionStateCanceled ExecutionState = "Canceled" + ExecutionStateFailed ExecutionState = "Failed" + ExecutionStatePending ExecutionState = "Pending" + ExecutionStateRunning ExecutionState = "Running" + ExecutionStateSucceeded ExecutionState = "Succeeded" + ExecutionStateTimedOut ExecutionState = "TimedOut" + ExecutionStateUnknown ExecutionState = "Unknown" +) + +// PossibleExecutionStateValues returns the possible values for the ExecutionState const type. +func PossibleExecutionStateValues() []ExecutionState { + return []ExecutionState{ + ExecutionStateCanceled, + ExecutionStateFailed, + ExecutionStatePending, + ExecutionStateRunning, + ExecutionStateSucceeded, + ExecutionStateTimedOut, + ExecutionStateUnknown, + } +} + +type ExpandTypesForGetCapacityReservationGroups string + +const ( + ExpandTypesForGetCapacityReservationGroupsVirtualMachineScaleSetVMsRef ExpandTypesForGetCapacityReservationGroups = "virtualMachineScaleSetVMs/$ref" + ExpandTypesForGetCapacityReservationGroupsVirtualMachinesRef ExpandTypesForGetCapacityReservationGroups = "virtualMachines/$ref" +) + +// PossibleExpandTypesForGetCapacityReservationGroupsValues returns the possible values for the ExpandTypesForGetCapacityReservationGroups const type. +func PossibleExpandTypesForGetCapacityReservationGroupsValues() []ExpandTypesForGetCapacityReservationGroups { + return []ExpandTypesForGetCapacityReservationGroups{ + ExpandTypesForGetCapacityReservationGroupsVirtualMachineScaleSetVMsRef, + ExpandTypesForGetCapacityReservationGroupsVirtualMachinesRef, + } +} + +type ExpandTypesForGetVMScaleSets string + +const ( + ExpandTypesForGetVMScaleSetsUserData ExpandTypesForGetVMScaleSets = "userData" +) + +// PossibleExpandTypesForGetVMScaleSetsValues returns the possible values for the ExpandTypesForGetVMScaleSets const type. +func PossibleExpandTypesForGetVMScaleSetsValues() []ExpandTypesForGetVMScaleSets { + return []ExpandTypesForGetVMScaleSets{ + ExpandTypesForGetVMScaleSetsUserData, + } +} + +// ExtendedLocationType - The type of the extended location. +type ExtendedLocationType string + +const ( + ExtendedLocationTypeEdgeZone ExtendedLocationType = "EdgeZone" +) + +// PossibleExtendedLocationTypeValues returns the possible values for the ExtendedLocationType const type. +func PossibleExtendedLocationTypeValues() []ExtendedLocationType { + return []ExtendedLocationType{ + ExtendedLocationTypeEdgeZone, + } +} + +// ExtendedLocationTypes - The type of extendedLocation. +type ExtendedLocationTypes string + +const ( + ExtendedLocationTypesEdgeZone ExtendedLocationTypes = "EdgeZone" +) + +// PossibleExtendedLocationTypesValues returns the possible values for the ExtendedLocationTypes const type. +func PossibleExtendedLocationTypesValues() []ExtendedLocationTypes { + return []ExtendedLocationTypes{ + ExtendedLocationTypesEdgeZone, + } +} + +// GalleryApplicationVersionPropertiesProvisioningState - The provisioning state, which only appears in the response. +type GalleryApplicationVersionPropertiesProvisioningState string + +const ( + GalleryApplicationVersionPropertiesProvisioningStateCreating GalleryApplicationVersionPropertiesProvisioningState = "Creating" + GalleryApplicationVersionPropertiesProvisioningStateDeleting GalleryApplicationVersionPropertiesProvisioningState = "Deleting" + GalleryApplicationVersionPropertiesProvisioningStateFailed GalleryApplicationVersionPropertiesProvisioningState = "Failed" + GalleryApplicationVersionPropertiesProvisioningStateMigrating GalleryApplicationVersionPropertiesProvisioningState = "Migrating" + GalleryApplicationVersionPropertiesProvisioningStateSucceeded GalleryApplicationVersionPropertiesProvisioningState = "Succeeded" + GalleryApplicationVersionPropertiesProvisioningStateUpdating GalleryApplicationVersionPropertiesProvisioningState = "Updating" +) + +// PossibleGalleryApplicationVersionPropertiesProvisioningStateValues returns the possible values for the GalleryApplicationVersionPropertiesProvisioningState const type. +func PossibleGalleryApplicationVersionPropertiesProvisioningStateValues() []GalleryApplicationVersionPropertiesProvisioningState { + return []GalleryApplicationVersionPropertiesProvisioningState{ + GalleryApplicationVersionPropertiesProvisioningStateCreating, + GalleryApplicationVersionPropertiesProvisioningStateDeleting, + GalleryApplicationVersionPropertiesProvisioningStateFailed, + GalleryApplicationVersionPropertiesProvisioningStateMigrating, + GalleryApplicationVersionPropertiesProvisioningStateSucceeded, + GalleryApplicationVersionPropertiesProvisioningStateUpdating, + } +} + +type GalleryExpandParams string + +const ( + GalleryExpandParamsSharingProfileGroups GalleryExpandParams = "SharingProfile/Groups" +) + +// PossibleGalleryExpandParamsValues returns the possible values for the GalleryExpandParams const type. +func PossibleGalleryExpandParamsValues() []GalleryExpandParams { + return []GalleryExpandParams{ + GalleryExpandParamsSharingProfileGroups, + } +} + +// GalleryExtendedLocationType - It is type of the extended location. +type GalleryExtendedLocationType string + +const ( + GalleryExtendedLocationTypeEdgeZone GalleryExtendedLocationType = "EdgeZone" + GalleryExtendedLocationTypeUnknown GalleryExtendedLocationType = "Unknown" +) + +// PossibleGalleryExtendedLocationTypeValues returns the possible values for the GalleryExtendedLocationType const type. +func PossibleGalleryExtendedLocationTypeValues() []GalleryExtendedLocationType { + return []GalleryExtendedLocationType{ + GalleryExtendedLocationTypeEdgeZone, + GalleryExtendedLocationTypeUnknown, + } +} + +// GalleryImagePropertiesProvisioningState - The provisioning state, which only appears in the response. +type GalleryImagePropertiesProvisioningState string + +const ( + GalleryImagePropertiesProvisioningStateCreating GalleryImagePropertiesProvisioningState = "Creating" + GalleryImagePropertiesProvisioningStateDeleting GalleryImagePropertiesProvisioningState = "Deleting" + GalleryImagePropertiesProvisioningStateFailed GalleryImagePropertiesProvisioningState = "Failed" + GalleryImagePropertiesProvisioningStateMigrating GalleryImagePropertiesProvisioningState = "Migrating" + GalleryImagePropertiesProvisioningStateSucceeded GalleryImagePropertiesProvisioningState = "Succeeded" + GalleryImagePropertiesProvisioningStateUpdating GalleryImagePropertiesProvisioningState = "Updating" +) + +// PossibleGalleryImagePropertiesProvisioningStateValues returns the possible values for the GalleryImagePropertiesProvisioningState const type. +func PossibleGalleryImagePropertiesProvisioningStateValues() []GalleryImagePropertiesProvisioningState { + return []GalleryImagePropertiesProvisioningState{ + GalleryImagePropertiesProvisioningStateCreating, + GalleryImagePropertiesProvisioningStateDeleting, + GalleryImagePropertiesProvisioningStateFailed, + GalleryImagePropertiesProvisioningStateMigrating, + GalleryImagePropertiesProvisioningStateSucceeded, + GalleryImagePropertiesProvisioningStateUpdating, + } +} + +// GalleryImageVersionPropertiesProvisioningState - The provisioning state, which only appears in the response. +type GalleryImageVersionPropertiesProvisioningState string + +const ( + GalleryImageVersionPropertiesProvisioningStateCreating GalleryImageVersionPropertiesProvisioningState = "Creating" + GalleryImageVersionPropertiesProvisioningStateDeleting GalleryImageVersionPropertiesProvisioningState = "Deleting" + GalleryImageVersionPropertiesProvisioningStateFailed GalleryImageVersionPropertiesProvisioningState = "Failed" + GalleryImageVersionPropertiesProvisioningStateMigrating GalleryImageVersionPropertiesProvisioningState = "Migrating" + GalleryImageVersionPropertiesProvisioningStateSucceeded GalleryImageVersionPropertiesProvisioningState = "Succeeded" + GalleryImageVersionPropertiesProvisioningStateUpdating GalleryImageVersionPropertiesProvisioningState = "Updating" +) + +// PossibleGalleryImageVersionPropertiesProvisioningStateValues returns the possible values for the GalleryImageVersionPropertiesProvisioningState const type. +func PossibleGalleryImageVersionPropertiesProvisioningStateValues() []GalleryImageVersionPropertiesProvisioningState { + return []GalleryImageVersionPropertiesProvisioningState{ + GalleryImageVersionPropertiesProvisioningStateCreating, + GalleryImageVersionPropertiesProvisioningStateDeleting, + GalleryImageVersionPropertiesProvisioningStateFailed, + GalleryImageVersionPropertiesProvisioningStateMigrating, + GalleryImageVersionPropertiesProvisioningStateSucceeded, + GalleryImageVersionPropertiesProvisioningStateUpdating, + } +} + +// GalleryPropertiesProvisioningState - The provisioning state, which only appears in the response. +type GalleryPropertiesProvisioningState string + +const ( + GalleryPropertiesProvisioningStateCreating GalleryPropertiesProvisioningState = "Creating" + GalleryPropertiesProvisioningStateDeleting GalleryPropertiesProvisioningState = "Deleting" + GalleryPropertiesProvisioningStateFailed GalleryPropertiesProvisioningState = "Failed" + GalleryPropertiesProvisioningStateMigrating GalleryPropertiesProvisioningState = "Migrating" + GalleryPropertiesProvisioningStateSucceeded GalleryPropertiesProvisioningState = "Succeeded" + GalleryPropertiesProvisioningStateUpdating GalleryPropertiesProvisioningState = "Updating" +) + +// PossibleGalleryPropertiesProvisioningStateValues returns the possible values for the GalleryPropertiesProvisioningState const type. +func PossibleGalleryPropertiesProvisioningStateValues() []GalleryPropertiesProvisioningState { + return []GalleryPropertiesProvisioningState{ + GalleryPropertiesProvisioningStateCreating, + GalleryPropertiesProvisioningStateDeleting, + GalleryPropertiesProvisioningStateFailed, + GalleryPropertiesProvisioningStateMigrating, + GalleryPropertiesProvisioningStateSucceeded, + GalleryPropertiesProvisioningStateUpdating, + } +} + +// GallerySharingPermissionTypes - This property allows you to specify the permission of sharing gallery. +// Possible values are: +// Private +// Groups +type GallerySharingPermissionTypes string + +const ( + GallerySharingPermissionTypesGroups GallerySharingPermissionTypes = "Groups" + GallerySharingPermissionTypesPrivate GallerySharingPermissionTypes = "Private" +) + +// PossibleGallerySharingPermissionTypesValues returns the possible values for the GallerySharingPermissionTypes const type. +func PossibleGallerySharingPermissionTypesValues() []GallerySharingPermissionTypes { + return []GallerySharingPermissionTypes{ + GallerySharingPermissionTypesGroups, + GallerySharingPermissionTypesPrivate, + } +} + +// HostCaching - The host caching of the disk. Valid values are 'None', 'ReadOnly', and 'ReadWrite' +type HostCaching string + +const ( + HostCachingNone HostCaching = "None" + HostCachingReadOnly HostCaching = "ReadOnly" + HostCachingReadWrite HostCaching = "ReadWrite" +) + +// PossibleHostCachingValues returns the possible values for the HostCaching const type. +func PossibleHostCachingValues() []HostCaching { + return []HostCaching{ + HostCachingNone, + HostCachingReadOnly, + HostCachingReadWrite, + } +} + +// HyperVGeneration - The hypervisor generation of the Virtual Machine. Applicable to OS disks only. +type HyperVGeneration string + +const ( + HyperVGenerationV1 HyperVGeneration = "V1" + HyperVGenerationV2 HyperVGeneration = "V2" +) + +// PossibleHyperVGenerationValues returns the possible values for the HyperVGeneration const type. +func PossibleHyperVGenerationValues() []HyperVGeneration { + return []HyperVGeneration{ + HyperVGenerationV1, + HyperVGenerationV2, + } +} + +// HyperVGenerationType - Specifies the HyperVGeneration Type associated with a resource +type HyperVGenerationType string + +const ( + HyperVGenerationTypeV1 HyperVGenerationType = "V1" + HyperVGenerationTypeV2 HyperVGenerationType = "V2" +) + +// PossibleHyperVGenerationTypeValues returns the possible values for the HyperVGenerationType const type. +func PossibleHyperVGenerationTypeValues() []HyperVGenerationType { + return []HyperVGenerationType{ + HyperVGenerationTypeV1, + HyperVGenerationTypeV2, + } +} + +// HyperVGenerationTypes - Specifies the HyperVGeneration Type +type HyperVGenerationTypes string + +const ( + HyperVGenerationTypesV1 HyperVGenerationTypes = "V1" + HyperVGenerationTypesV2 HyperVGenerationTypes = "V2" +) + +// PossibleHyperVGenerationTypesValues returns the possible values for the HyperVGenerationTypes const type. +func PossibleHyperVGenerationTypesValues() []HyperVGenerationTypes { + return []HyperVGenerationTypes{ + HyperVGenerationTypesV1, + HyperVGenerationTypesV2, + } +} + +// IPVersion - Available from Api-Version 2017-03-30 onwards, it represents whether the specific ipconfiguration is IPv4 or +// IPv6. Default is taken as IPv4. Possible values are: 'IPv4' and 'IPv6'. +type IPVersion string + +const ( + IPVersionIPv4 IPVersion = "IPv4" + IPVersionIPv6 IPVersion = "IPv6" +) + +// PossibleIPVersionValues returns the possible values for the IPVersion const type. +func PossibleIPVersionValues() []IPVersion { + return []IPVersion{ + IPVersionIPv4, + IPVersionIPv6, + } +} + +// IPVersions - Available from Api-Version 2019-07-01 onwards, it represents whether the specific ipconfiguration is IPv4 +// or IPv6. Default is taken as IPv4. Possible values are: 'IPv4' and 'IPv6'. +type IPVersions string + +const ( + IPVersionsIPv4 IPVersions = "IPv4" + IPVersionsIPv6 IPVersions = "IPv6" +) + +// PossibleIPVersionsValues returns the possible values for the IPVersions const type. +func PossibleIPVersionsValues() []IPVersions { + return []IPVersions{ + IPVersionsIPv4, + IPVersionsIPv6, + } +} + +type InstanceViewTypes string + +const ( + InstanceViewTypesInstanceView InstanceViewTypes = "instanceView" + InstanceViewTypesUserData InstanceViewTypes = "userData" +) + +// PossibleInstanceViewTypesValues returns the possible values for the InstanceViewTypes const type. +func PossibleInstanceViewTypesValues() []InstanceViewTypes { + return []InstanceViewTypes{ + InstanceViewTypesInstanceView, + InstanceViewTypesUserData, + } +} + +// IntervalInMins - Interval value in minutes used to create LogAnalytics call rate logs. +type IntervalInMins string + +const ( + IntervalInMinsThreeMins IntervalInMins = "ThreeMins" + IntervalInMinsFiveMins IntervalInMins = "FiveMins" + IntervalInMinsThirtyMins IntervalInMins = "ThirtyMins" + IntervalInMinsSixtyMins IntervalInMins = "SixtyMins" +) + +// PossibleIntervalInMinsValues returns the possible values for the IntervalInMins const type. +func PossibleIntervalInMinsValues() []IntervalInMins { + return []IntervalInMins{ + IntervalInMinsThreeMins, + IntervalInMinsFiveMins, + IntervalInMinsThirtyMins, + IntervalInMinsSixtyMins, + } +} + +// LinuxPatchAssessmentMode - Specifies the mode of VM Guest Patch Assessment for the IaaS virtual machine. +// Possible values are: +// ImageDefault - You control the timing of patch assessments on a virtual machine. +// AutomaticByPlatform - The platform will trigger periodic patch assessments. The property provisionVMAgent must be true. +type LinuxPatchAssessmentMode string + +const ( + LinuxPatchAssessmentModeAutomaticByPlatform LinuxPatchAssessmentMode = "AutomaticByPlatform" + LinuxPatchAssessmentModeImageDefault LinuxPatchAssessmentMode = "ImageDefault" +) + +// PossibleLinuxPatchAssessmentModeValues returns the possible values for the LinuxPatchAssessmentMode const type. +func PossibleLinuxPatchAssessmentModeValues() []LinuxPatchAssessmentMode { + return []LinuxPatchAssessmentMode{ + LinuxPatchAssessmentModeAutomaticByPlatform, + LinuxPatchAssessmentModeImageDefault, + } +} + +// LinuxVMGuestPatchAutomaticByPlatformRebootSetting - Specifies the reboot setting for all AutomaticByPlatform patch installation +// operations. +type LinuxVMGuestPatchAutomaticByPlatformRebootSetting string + +const ( + LinuxVMGuestPatchAutomaticByPlatformRebootSettingAlways LinuxVMGuestPatchAutomaticByPlatformRebootSetting = "Always" + LinuxVMGuestPatchAutomaticByPlatformRebootSettingIfRequired LinuxVMGuestPatchAutomaticByPlatformRebootSetting = "IfRequired" + LinuxVMGuestPatchAutomaticByPlatformRebootSettingNever LinuxVMGuestPatchAutomaticByPlatformRebootSetting = "Never" + LinuxVMGuestPatchAutomaticByPlatformRebootSettingUnknown LinuxVMGuestPatchAutomaticByPlatformRebootSetting = "Unknown" +) + +// PossibleLinuxVMGuestPatchAutomaticByPlatformRebootSettingValues returns the possible values for the LinuxVMGuestPatchAutomaticByPlatformRebootSetting const type. +func PossibleLinuxVMGuestPatchAutomaticByPlatformRebootSettingValues() []LinuxVMGuestPatchAutomaticByPlatformRebootSetting { + return []LinuxVMGuestPatchAutomaticByPlatformRebootSetting{ + LinuxVMGuestPatchAutomaticByPlatformRebootSettingAlways, + LinuxVMGuestPatchAutomaticByPlatformRebootSettingIfRequired, + LinuxVMGuestPatchAutomaticByPlatformRebootSettingNever, + LinuxVMGuestPatchAutomaticByPlatformRebootSettingUnknown, + } +} + +// LinuxVMGuestPatchMode - Specifies the mode of VM Guest Patching to IaaS virtual machine or virtual machines associated +// to virtual machine scale set with OrchestrationMode as Flexible. +// Possible values are: +// ImageDefault - The virtual machine's default patching configuration is used. +// AutomaticByPlatform - The virtual machine will be automatically updated by the platform. The property provisionVMAgent +// must be true +type LinuxVMGuestPatchMode string + +const ( + LinuxVMGuestPatchModeAutomaticByPlatform LinuxVMGuestPatchMode = "AutomaticByPlatform" + LinuxVMGuestPatchModeImageDefault LinuxVMGuestPatchMode = "ImageDefault" +) + +// PossibleLinuxVMGuestPatchModeValues returns the possible values for the LinuxVMGuestPatchMode const type. +func PossibleLinuxVMGuestPatchModeValues() []LinuxVMGuestPatchMode { + return []LinuxVMGuestPatchMode{ + LinuxVMGuestPatchModeAutomaticByPlatform, + LinuxVMGuestPatchModeImageDefault, + } +} + +// MaintenanceOperationResultCodeTypes - The Last Maintenance Operation Result Code. +type MaintenanceOperationResultCodeTypes string + +const ( + MaintenanceOperationResultCodeTypesNone MaintenanceOperationResultCodeTypes = "None" + MaintenanceOperationResultCodeTypesRetryLater MaintenanceOperationResultCodeTypes = "RetryLater" + MaintenanceOperationResultCodeTypesMaintenanceAborted MaintenanceOperationResultCodeTypes = "MaintenanceAborted" + MaintenanceOperationResultCodeTypesMaintenanceCompleted MaintenanceOperationResultCodeTypes = "MaintenanceCompleted" +) + +// PossibleMaintenanceOperationResultCodeTypesValues returns the possible values for the MaintenanceOperationResultCodeTypes const type. +func PossibleMaintenanceOperationResultCodeTypesValues() []MaintenanceOperationResultCodeTypes { + return []MaintenanceOperationResultCodeTypes{ + MaintenanceOperationResultCodeTypesNone, + MaintenanceOperationResultCodeTypesRetryLater, + MaintenanceOperationResultCodeTypesMaintenanceAborted, + MaintenanceOperationResultCodeTypesMaintenanceCompleted, + } +} + +// NetworkAPIVersion - specifies the Microsoft.Network API version used when creating networking resources in the Network +// Interface Configurations +type NetworkAPIVersion string + +const ( + NetworkAPIVersionTwoThousandTwenty1101 NetworkAPIVersion = "2020-11-01" +) + +// PossibleNetworkAPIVersionValues returns the possible values for the NetworkAPIVersion const type. +func PossibleNetworkAPIVersionValues() []NetworkAPIVersion { + return []NetworkAPIVersion{ + NetworkAPIVersionTwoThousandTwenty1101, + } +} + +// NetworkAccessPolicy - Policy for accessing the disk via network. +type NetworkAccessPolicy string + +const ( + // NetworkAccessPolicyAllowAll - The disk can be exported or uploaded to from any network. + NetworkAccessPolicyAllowAll NetworkAccessPolicy = "AllowAll" + // NetworkAccessPolicyAllowPrivate - The disk can be exported or uploaded to using a DiskAccess resource's private endpoints. + NetworkAccessPolicyAllowPrivate NetworkAccessPolicy = "AllowPrivate" + // NetworkAccessPolicyDenyAll - The disk cannot be exported. + NetworkAccessPolicyDenyAll NetworkAccessPolicy = "DenyAll" +) + +// PossibleNetworkAccessPolicyValues returns the possible values for the NetworkAccessPolicy const type. +func PossibleNetworkAccessPolicyValues() []NetworkAccessPolicy { + return []NetworkAccessPolicy{ + NetworkAccessPolicyAllowAll, + NetworkAccessPolicyAllowPrivate, + NetworkAccessPolicyDenyAll, + } +} + +// OperatingSystemStateTypes - This property allows the user to specify whether the virtual machines created under this image +// are 'Generalized' or 'Specialized'. +type OperatingSystemStateTypes string + +const ( + OperatingSystemStateTypesGeneralized OperatingSystemStateTypes = "Generalized" + OperatingSystemStateTypesSpecialized OperatingSystemStateTypes = "Specialized" +) + +// PossibleOperatingSystemStateTypesValues returns the possible values for the OperatingSystemStateTypes const type. +func PossibleOperatingSystemStateTypesValues() []OperatingSystemStateTypes { + return []OperatingSystemStateTypes{ + OperatingSystemStateTypesGeneralized, + OperatingSystemStateTypesSpecialized, + } +} + +// OperatingSystemType - Gets the Operating System type. +type OperatingSystemType string + +const ( + OperatingSystemTypeLinux OperatingSystemType = "Linux" + OperatingSystemTypeWindows OperatingSystemType = "Windows" +) + +// PossibleOperatingSystemTypeValues returns the possible values for the OperatingSystemType const type. +func PossibleOperatingSystemTypeValues() []OperatingSystemType { + return []OperatingSystemType{ + OperatingSystemTypeLinux, + OperatingSystemTypeWindows, + } +} + +// OperatingSystemTypes - This property allows you to specify the type of the OS that is included in the disk when creating +// a VM from a managed image. +// Possible values are: +// Windows +// Linux +type OperatingSystemTypes string + +const ( + OperatingSystemTypesWindows OperatingSystemTypes = "Windows" + OperatingSystemTypesLinux OperatingSystemTypes = "Linux" +) + +// PossibleOperatingSystemTypesValues returns the possible values for the OperatingSystemTypes const type. +func PossibleOperatingSystemTypesValues() []OperatingSystemTypes { + return []OperatingSystemTypes{ + OperatingSystemTypesWindows, + OperatingSystemTypesLinux, + } +} + +// OrchestrationMode - Specifies the orchestration mode for the virtual machine scale set. +type OrchestrationMode string + +const ( + OrchestrationModeFlexible OrchestrationMode = "Flexible" + OrchestrationModeUniform OrchestrationMode = "Uniform" +) + +// PossibleOrchestrationModeValues returns the possible values for the OrchestrationMode const type. +func PossibleOrchestrationModeValues() []OrchestrationMode { + return []OrchestrationMode{ + OrchestrationModeFlexible, + OrchestrationModeUniform, + } +} + +// OrchestrationServiceNames - The name of the service. +type OrchestrationServiceNames string + +const ( + OrchestrationServiceNamesAutomaticRepairs OrchestrationServiceNames = "AutomaticRepairs" +) + +// PossibleOrchestrationServiceNamesValues returns the possible values for the OrchestrationServiceNames const type. +func PossibleOrchestrationServiceNamesValues() []OrchestrationServiceNames { + return []OrchestrationServiceNames{ + OrchestrationServiceNamesAutomaticRepairs, + } +} + +// OrchestrationServiceState - The current state of the service. +type OrchestrationServiceState string + +const ( + OrchestrationServiceStateNotRunning OrchestrationServiceState = "NotRunning" + OrchestrationServiceStateRunning OrchestrationServiceState = "Running" + OrchestrationServiceStateSuspended OrchestrationServiceState = "Suspended" +) + +// PossibleOrchestrationServiceStateValues returns the possible values for the OrchestrationServiceState const type. +func PossibleOrchestrationServiceStateValues() []OrchestrationServiceState { + return []OrchestrationServiceState{ + OrchestrationServiceStateNotRunning, + OrchestrationServiceStateRunning, + OrchestrationServiceStateSuspended, + } +} + +// OrchestrationServiceStateAction - The action to be performed. +type OrchestrationServiceStateAction string + +const ( + OrchestrationServiceStateActionResume OrchestrationServiceStateAction = "Resume" + OrchestrationServiceStateActionSuspend OrchestrationServiceStateAction = "Suspend" +) + +// PossibleOrchestrationServiceStateActionValues returns the possible values for the OrchestrationServiceStateAction const type. +func PossibleOrchestrationServiceStateActionValues() []OrchestrationServiceStateAction { + return []OrchestrationServiceStateAction{ + OrchestrationServiceStateActionResume, + OrchestrationServiceStateActionSuspend, + } +} + +// PatchAssessmentState - Describes the availability of a given patch. +type PatchAssessmentState string + +const ( + PatchAssessmentStateAvailable PatchAssessmentState = "Available" + PatchAssessmentStateUnknown PatchAssessmentState = "Unknown" +) + +// PossiblePatchAssessmentStateValues returns the possible values for the PatchAssessmentState const type. +func PossiblePatchAssessmentStateValues() []PatchAssessmentState { + return []PatchAssessmentState{ + PatchAssessmentStateAvailable, + PatchAssessmentStateUnknown, + } +} + +// PatchInstallationState - The state of the patch after the installation operation completed. +type PatchInstallationState string + +const ( + PatchInstallationStateExcluded PatchInstallationState = "Excluded" + PatchInstallationStateFailed PatchInstallationState = "Failed" + PatchInstallationStateInstalled PatchInstallationState = "Installed" + PatchInstallationStateNotSelected PatchInstallationState = "NotSelected" + PatchInstallationStatePending PatchInstallationState = "Pending" + PatchInstallationStateUnknown PatchInstallationState = "Unknown" +) + +// PossiblePatchInstallationStateValues returns the possible values for the PatchInstallationState const type. +func PossiblePatchInstallationStateValues() []PatchInstallationState { + return []PatchInstallationState{ + PatchInstallationStateExcluded, + PatchInstallationStateFailed, + PatchInstallationStateInstalled, + PatchInstallationStateNotSelected, + PatchInstallationStatePending, + PatchInstallationStateUnknown, + } +} + +// PatchOperationStatus - The overall success or failure status of the operation. It remains "InProgress" until the operation +// completes. At that point it will become "Unknown", "Failed", "Succeeded", or +// "CompletedWithWarnings." +type PatchOperationStatus string + +const ( + PatchOperationStatusCompletedWithWarnings PatchOperationStatus = "CompletedWithWarnings" + PatchOperationStatusFailed PatchOperationStatus = "Failed" + PatchOperationStatusInProgress PatchOperationStatus = "InProgress" + PatchOperationStatusSucceeded PatchOperationStatus = "Succeeded" + PatchOperationStatusUnknown PatchOperationStatus = "Unknown" +) + +// PossiblePatchOperationStatusValues returns the possible values for the PatchOperationStatus const type. +func PossiblePatchOperationStatusValues() []PatchOperationStatus { + return []PatchOperationStatus{ + PatchOperationStatusCompletedWithWarnings, + PatchOperationStatusFailed, + PatchOperationStatusInProgress, + PatchOperationStatusSucceeded, + PatchOperationStatusUnknown, + } +} + +// PrivateEndpointConnectionProvisioningState - The current provisioning state. +type PrivateEndpointConnectionProvisioningState string + +const ( + PrivateEndpointConnectionProvisioningStateCreating PrivateEndpointConnectionProvisioningState = "Creating" + PrivateEndpointConnectionProvisioningStateDeleting PrivateEndpointConnectionProvisioningState = "Deleting" + PrivateEndpointConnectionProvisioningStateFailed PrivateEndpointConnectionProvisioningState = "Failed" + PrivateEndpointConnectionProvisioningStateSucceeded PrivateEndpointConnectionProvisioningState = "Succeeded" +) + +// PossiblePrivateEndpointConnectionProvisioningStateValues returns the possible values for the PrivateEndpointConnectionProvisioningState const type. +func PossiblePrivateEndpointConnectionProvisioningStateValues() []PrivateEndpointConnectionProvisioningState { + return []PrivateEndpointConnectionProvisioningState{ + PrivateEndpointConnectionProvisioningStateCreating, + PrivateEndpointConnectionProvisioningStateDeleting, + PrivateEndpointConnectionProvisioningStateFailed, + PrivateEndpointConnectionProvisioningStateSucceeded, + } +} + +// PrivateEndpointServiceConnectionStatus - The private endpoint connection status. +type PrivateEndpointServiceConnectionStatus string + +const ( + PrivateEndpointServiceConnectionStatusApproved PrivateEndpointServiceConnectionStatus = "Approved" + PrivateEndpointServiceConnectionStatusPending PrivateEndpointServiceConnectionStatus = "Pending" + PrivateEndpointServiceConnectionStatusRejected PrivateEndpointServiceConnectionStatus = "Rejected" +) + +// PossiblePrivateEndpointServiceConnectionStatusValues returns the possible values for the PrivateEndpointServiceConnectionStatus const type. +func PossiblePrivateEndpointServiceConnectionStatusValues() []PrivateEndpointServiceConnectionStatus { + return []PrivateEndpointServiceConnectionStatus{ + PrivateEndpointServiceConnectionStatusApproved, + PrivateEndpointServiceConnectionStatusPending, + PrivateEndpointServiceConnectionStatusRejected, + } +} + +// ProtocolTypes - Specifies the protocol of WinRM listener. +// Possible values are: +// http +// https +type ProtocolTypes string + +const ( + ProtocolTypesHTTP ProtocolTypes = "Http" + ProtocolTypesHTTPS ProtocolTypes = "Https" +) + +// PossibleProtocolTypesValues returns the possible values for the ProtocolTypes const type. +func PossibleProtocolTypesValues() []ProtocolTypes { + return []ProtocolTypes{ + ProtocolTypesHTTP, + ProtocolTypesHTTPS, + } +} + +// ProximityPlacementGroupType - Specifies the type of the proximity placement group. +// Possible values are: +// Standard : Co-locate resources within an Azure region or Availability Zone. +// Ultra : For future use. +type ProximityPlacementGroupType string + +const ( + ProximityPlacementGroupTypeStandard ProximityPlacementGroupType = "Standard" + ProximityPlacementGroupTypeUltra ProximityPlacementGroupType = "Ultra" +) + +// PossibleProximityPlacementGroupTypeValues returns the possible values for the ProximityPlacementGroupType const type. +func PossibleProximityPlacementGroupTypeValues() []ProximityPlacementGroupType { + return []ProximityPlacementGroupType{ + ProximityPlacementGroupTypeStandard, + ProximityPlacementGroupTypeUltra, + } +} + +// PublicIPAddressSKUName - Specify public IP sku name +type PublicIPAddressSKUName string + +const ( + PublicIPAddressSKUNameBasic PublicIPAddressSKUName = "Basic" + PublicIPAddressSKUNameStandard PublicIPAddressSKUName = "Standard" +) + +// PossiblePublicIPAddressSKUNameValues returns the possible values for the PublicIPAddressSKUName const type. +func PossiblePublicIPAddressSKUNameValues() []PublicIPAddressSKUName { + return []PublicIPAddressSKUName{ + PublicIPAddressSKUNameBasic, + PublicIPAddressSKUNameStandard, + } +} + +// PublicIPAddressSKUTier - Specify public IP sku tier +type PublicIPAddressSKUTier string + +const ( + PublicIPAddressSKUTierGlobal PublicIPAddressSKUTier = "Global" + PublicIPAddressSKUTierRegional PublicIPAddressSKUTier = "Regional" +) + +// PossiblePublicIPAddressSKUTierValues returns the possible values for the PublicIPAddressSKUTier const type. +func PossiblePublicIPAddressSKUTierValues() []PublicIPAddressSKUTier { + return []PublicIPAddressSKUTier{ + PublicIPAddressSKUTierGlobal, + PublicIPAddressSKUTierRegional, + } +} + +// PublicIPAllocationMethod - Specify the public IP allocation type +type PublicIPAllocationMethod string + +const ( + PublicIPAllocationMethodDynamic PublicIPAllocationMethod = "Dynamic" + PublicIPAllocationMethodStatic PublicIPAllocationMethod = "Static" +) + +// PossiblePublicIPAllocationMethodValues returns the possible values for the PublicIPAllocationMethod const type. +func PossiblePublicIPAllocationMethodValues() []PublicIPAllocationMethod { + return []PublicIPAllocationMethod{ + PublicIPAllocationMethodDynamic, + PublicIPAllocationMethodStatic, + } +} + +// PublicNetworkAccess - Policy for controlling export on the disk. +type PublicNetworkAccess string + +const ( + // PublicNetworkAccessDisabled - You cannot access the underlying data of the disk publicly on the internet even when NetworkAccessPolicy + // is set to AllowAll. You can access the data via the SAS URI only from your trusted Azure VNET when NetworkAccessPolicy + // is set to AllowPrivate. + PublicNetworkAccessDisabled PublicNetworkAccess = "Disabled" + // PublicNetworkAccessEnabled - You can generate a SAS URI to access the underlying data of the disk publicly on the internet + // when NetworkAccessPolicy is set to AllowAll. You can access the data via the SAS URI only from your trusted Azure VNET + // when NetworkAccessPolicy is set to AllowPrivate. + PublicNetworkAccessEnabled PublicNetworkAccess = "Enabled" +) + +// PossiblePublicNetworkAccessValues returns the possible values for the PublicNetworkAccess const type. +func PossiblePublicNetworkAccessValues() []PublicNetworkAccess { + return []PublicNetworkAccess{ + PublicNetworkAccessDisabled, + PublicNetworkAccessEnabled, + } +} + +// RepairAction - Type of repair action (replace, restart, reimage) that will be used for repairing unhealthy virtual machines +// in the scale set. Default value is replace. +type RepairAction string + +const ( + RepairActionReimage RepairAction = "Reimage" + RepairActionReplace RepairAction = "Replace" + RepairActionRestart RepairAction = "Restart" +) + +// PossibleRepairActionValues returns the possible values for the RepairAction const type. +func PossibleRepairActionValues() []RepairAction { + return []RepairAction{ + RepairActionReimage, + RepairActionReplace, + RepairActionRestart, + } +} + +// ReplicationMode - Optional parameter which specifies the mode to be used for replication. This property is not updatable. +type ReplicationMode string + +const ( + ReplicationModeFull ReplicationMode = "Full" + ReplicationModeShallow ReplicationMode = "Shallow" +) + +// PossibleReplicationModeValues returns the possible values for the ReplicationMode const type. +func PossibleReplicationModeValues() []ReplicationMode { + return []ReplicationMode{ + ReplicationModeFull, + ReplicationModeShallow, + } +} + +// ReplicationState - This is the regional replication state. +type ReplicationState string + +const ( + ReplicationStateCompleted ReplicationState = "Completed" + ReplicationStateFailed ReplicationState = "Failed" + ReplicationStateReplicating ReplicationState = "Replicating" + ReplicationStateUnknown ReplicationState = "Unknown" +) + +// PossibleReplicationStateValues returns the possible values for the ReplicationState const type. +func PossibleReplicationStateValues() []ReplicationState { + return []ReplicationState{ + ReplicationStateCompleted, + ReplicationStateFailed, + ReplicationStateReplicating, + ReplicationStateUnknown, + } +} + +type ReplicationStatusTypes string + +const ( + ReplicationStatusTypesReplicationStatus ReplicationStatusTypes = "ReplicationStatus" +) + +// PossibleReplicationStatusTypesValues returns the possible values for the ReplicationStatusTypes const type. +func PossibleReplicationStatusTypesValues() []ReplicationStatusTypes { + return []ReplicationStatusTypes{ + ReplicationStatusTypesReplicationStatus, + } +} + +// ResourceIdentityType - The type of identity used for the virtual machine scale set. The type 'SystemAssigned, UserAssigned' +// includes both an implicitly created identity and a set of user assigned identities. The type 'None' +// will remove any identities from the virtual machine scale set. +type ResourceIdentityType string + +const ( + ResourceIdentityTypeSystemAssigned ResourceIdentityType = "SystemAssigned" + ResourceIdentityTypeUserAssigned ResourceIdentityType = "UserAssigned" + ResourceIdentityTypeSystemAssignedUserAssigned ResourceIdentityType = "SystemAssigned, UserAssigned" + ResourceIdentityTypeNone ResourceIdentityType = "None" +) + +// PossibleResourceIdentityTypeValues returns the possible values for the ResourceIdentityType const type. +func PossibleResourceIdentityTypeValues() []ResourceIdentityType { + return []ResourceIdentityType{ + ResourceIdentityTypeSystemAssigned, + ResourceIdentityTypeUserAssigned, + ResourceIdentityTypeSystemAssignedUserAssigned, + ResourceIdentityTypeNone, + } +} + +// ResourceSKUCapacityScaleType - The scale type applicable to the sku. +type ResourceSKUCapacityScaleType string + +const ( + ResourceSKUCapacityScaleTypeAutomatic ResourceSKUCapacityScaleType = "Automatic" + ResourceSKUCapacityScaleTypeManual ResourceSKUCapacityScaleType = "Manual" + ResourceSKUCapacityScaleTypeNone ResourceSKUCapacityScaleType = "None" +) + +// PossibleResourceSKUCapacityScaleTypeValues returns the possible values for the ResourceSKUCapacityScaleType const type. +func PossibleResourceSKUCapacityScaleTypeValues() []ResourceSKUCapacityScaleType { + return []ResourceSKUCapacityScaleType{ + ResourceSKUCapacityScaleTypeAutomatic, + ResourceSKUCapacityScaleTypeManual, + ResourceSKUCapacityScaleTypeNone, + } +} + +// ResourceSKURestrictionsReasonCode - The reason for restriction. +type ResourceSKURestrictionsReasonCode string + +const ( + ResourceSKURestrictionsReasonCodeQuotaID ResourceSKURestrictionsReasonCode = "QuotaId" + ResourceSKURestrictionsReasonCodeNotAvailableForSubscription ResourceSKURestrictionsReasonCode = "NotAvailableForSubscription" +) + +// PossibleResourceSKURestrictionsReasonCodeValues returns the possible values for the ResourceSKURestrictionsReasonCode const type. +func PossibleResourceSKURestrictionsReasonCodeValues() []ResourceSKURestrictionsReasonCode { + return []ResourceSKURestrictionsReasonCode{ + ResourceSKURestrictionsReasonCodeQuotaID, + ResourceSKURestrictionsReasonCodeNotAvailableForSubscription, + } +} + +// ResourceSKURestrictionsType - The type of restrictions. +type ResourceSKURestrictionsType string + +const ( + ResourceSKURestrictionsTypeLocation ResourceSKURestrictionsType = "Location" + ResourceSKURestrictionsTypeZone ResourceSKURestrictionsType = "Zone" +) + +// PossibleResourceSKURestrictionsTypeValues returns the possible values for the ResourceSKURestrictionsType const type. +func PossibleResourceSKURestrictionsTypeValues() []ResourceSKURestrictionsType { + return []ResourceSKURestrictionsType{ + ResourceSKURestrictionsTypeLocation, + ResourceSKURestrictionsTypeZone, + } +} + +type RestorePointCollectionExpandOptions string + +const ( + RestorePointCollectionExpandOptionsRestorePoints RestorePointCollectionExpandOptions = "restorePoints" +) + +// PossibleRestorePointCollectionExpandOptionsValues returns the possible values for the RestorePointCollectionExpandOptions const type. +func PossibleRestorePointCollectionExpandOptionsValues() []RestorePointCollectionExpandOptions { + return []RestorePointCollectionExpandOptions{ + RestorePointCollectionExpandOptionsRestorePoints, + } +} + +type RestorePointExpandOptions string + +const ( + RestorePointExpandOptionsInstanceView RestorePointExpandOptions = "instanceView" +) + +// PossibleRestorePointExpandOptionsValues returns the possible values for the RestorePointExpandOptions const type. +func PossibleRestorePointExpandOptionsValues() []RestorePointExpandOptions { + return []RestorePointExpandOptions{ + RestorePointExpandOptionsInstanceView, + } +} + +// RollingUpgradeActionType - The last action performed on the rolling upgrade. +type RollingUpgradeActionType string + +const ( + RollingUpgradeActionTypeStart RollingUpgradeActionType = "Start" + RollingUpgradeActionTypeCancel RollingUpgradeActionType = "Cancel" +) + +// PossibleRollingUpgradeActionTypeValues returns the possible values for the RollingUpgradeActionType const type. +func PossibleRollingUpgradeActionTypeValues() []RollingUpgradeActionType { + return []RollingUpgradeActionType{ + RollingUpgradeActionTypeStart, + RollingUpgradeActionTypeCancel, + } +} + +// RollingUpgradeStatusCode - Code indicating the current status of the upgrade. +type RollingUpgradeStatusCode string + +const ( + RollingUpgradeStatusCodeRollingForward RollingUpgradeStatusCode = "RollingForward" + RollingUpgradeStatusCodeCancelled RollingUpgradeStatusCode = "Cancelled" + RollingUpgradeStatusCodeCompleted RollingUpgradeStatusCode = "Completed" + RollingUpgradeStatusCodeFaulted RollingUpgradeStatusCode = "Faulted" +) + +// PossibleRollingUpgradeStatusCodeValues returns the possible values for the RollingUpgradeStatusCode const type. +func PossibleRollingUpgradeStatusCodeValues() []RollingUpgradeStatusCode { + return []RollingUpgradeStatusCode{ + RollingUpgradeStatusCodeRollingForward, + RollingUpgradeStatusCodeCancelled, + RollingUpgradeStatusCodeCompleted, + RollingUpgradeStatusCodeFaulted, + } +} + +// SecurityEncryptionTypes - Specifies the EncryptionType of the managed disk. +// It is set to DiskWithVMGuestState for encryption of the managed disk along with VMGuestState blob, and VMGuestStateOnly +// for encryption of just the VMGuestState blob. +// NOTE: It can be set for only Confidential VMs. +type SecurityEncryptionTypes string + +const ( + SecurityEncryptionTypesDiskWithVMGuestState SecurityEncryptionTypes = "DiskWithVMGuestState" + SecurityEncryptionTypesVMGuestStateOnly SecurityEncryptionTypes = "VMGuestStateOnly" +) + +// PossibleSecurityEncryptionTypesValues returns the possible values for the SecurityEncryptionTypes const type. +func PossibleSecurityEncryptionTypesValues() []SecurityEncryptionTypes { + return []SecurityEncryptionTypes{ + SecurityEncryptionTypesDiskWithVMGuestState, + SecurityEncryptionTypesVMGuestStateOnly, + } +} + +// SecurityTypes - Specifies the SecurityType of the virtual machine. It has to be set to any specified value to enable UefiSettings. +// Default: UefiSettings will not be enabled unless this property is set. +type SecurityTypes string + +const ( + SecurityTypesConfidentialVM SecurityTypes = "ConfidentialVM" + SecurityTypesTrustedLaunch SecurityTypes = "TrustedLaunch" +) + +// PossibleSecurityTypesValues returns the possible values for the SecurityTypes const type. +func PossibleSecurityTypesValues() []SecurityTypes { + return []SecurityTypes{ + SecurityTypesConfidentialVM, + SecurityTypesTrustedLaunch, + } +} + +type SelectPermissions string + +const ( + SelectPermissionsPermissions SelectPermissions = "Permissions" +) + +// PossibleSelectPermissionsValues returns the possible values for the SelectPermissions const type. +func PossibleSelectPermissionsValues() []SelectPermissions { + return []SelectPermissions{ + SelectPermissionsPermissions, + } +} + +// SettingNames - Specifies the name of the setting to which the content applies. Possible values are: FirstLogonCommands +// and AutoLogon. +type SettingNames string + +const ( + SettingNamesAutoLogon SettingNames = "AutoLogon" + SettingNamesFirstLogonCommands SettingNames = "FirstLogonCommands" +) + +// PossibleSettingNamesValues returns the possible values for the SettingNames const type. +func PossibleSettingNamesValues() []SettingNames { + return []SettingNames{ + SettingNamesAutoLogon, + SettingNamesFirstLogonCommands, + } +} + +type SharedToValues string + +const ( + SharedToValuesTenant SharedToValues = "tenant" +) + +// PossibleSharedToValuesValues returns the possible values for the SharedToValues const type. +func PossibleSharedToValuesValues() []SharedToValues { + return []SharedToValues{ + SharedToValuesTenant, + } +} + +// SharingProfileGroupTypes - This property allows you to specify the type of sharing group. +// Possible values are: +// Subscriptions +// AADTenants +// Community +type SharingProfileGroupTypes string + +const ( + SharingProfileGroupTypesAADTenants SharingProfileGroupTypes = "AADTenants" + SharingProfileGroupTypesCommunity SharingProfileGroupTypes = "Community" + SharingProfileGroupTypesSubscriptions SharingProfileGroupTypes = "Subscriptions" +) + +// PossibleSharingProfileGroupTypesValues returns the possible values for the SharingProfileGroupTypes const type. +func PossibleSharingProfileGroupTypesValues() []SharingProfileGroupTypes { + return []SharingProfileGroupTypes{ + SharingProfileGroupTypesAADTenants, + SharingProfileGroupTypesCommunity, + SharingProfileGroupTypesSubscriptions, + } +} + +// SharingState - The sharing state of the gallery, which only appears in the response. +type SharingState string + +const ( + SharingStateFailed SharingState = "Failed" + SharingStateInProgress SharingState = "InProgress" + SharingStateSucceeded SharingState = "Succeeded" + SharingStateUnknown SharingState = "Unknown" +) + +// PossibleSharingStateValues returns the possible values for the SharingState const type. +func PossibleSharingStateValues() []SharingState { + return []SharingState{ + SharingStateFailed, + SharingStateInProgress, + SharingStateSucceeded, + SharingStateUnknown, + } +} + +// SharingUpdateOperationTypes - This property allows you to specify the operation type of gallery sharing update. +// Possible values are: +// Add +// Remove +// Reset +type SharingUpdateOperationTypes string + +const ( + SharingUpdateOperationTypesAdd SharingUpdateOperationTypes = "Add" + SharingUpdateOperationTypesEnableCommunity SharingUpdateOperationTypes = "EnableCommunity" + SharingUpdateOperationTypesRemove SharingUpdateOperationTypes = "Remove" + SharingUpdateOperationTypesReset SharingUpdateOperationTypes = "Reset" +) + +// PossibleSharingUpdateOperationTypesValues returns the possible values for the SharingUpdateOperationTypes const type. +func PossibleSharingUpdateOperationTypesValues() []SharingUpdateOperationTypes { + return []SharingUpdateOperationTypes{ + SharingUpdateOperationTypesAdd, + SharingUpdateOperationTypesEnableCommunity, + SharingUpdateOperationTypesRemove, + SharingUpdateOperationTypesReset, + } +} + +// SnapshotStorageAccountTypes - The sku name. +type SnapshotStorageAccountTypes string + +const ( + // SnapshotStorageAccountTypesPremiumLRS - Premium SSD locally redundant storage + SnapshotStorageAccountTypesPremiumLRS SnapshotStorageAccountTypes = "Premium_LRS" + // SnapshotStorageAccountTypesStandardLRS - Standard HDD locally redundant storage + SnapshotStorageAccountTypesStandardLRS SnapshotStorageAccountTypes = "Standard_LRS" + // SnapshotStorageAccountTypesStandardZRS - Standard zone redundant storage + SnapshotStorageAccountTypesStandardZRS SnapshotStorageAccountTypes = "Standard_ZRS" +) + +// PossibleSnapshotStorageAccountTypesValues returns the possible values for the SnapshotStorageAccountTypes const type. +func PossibleSnapshotStorageAccountTypesValues() []SnapshotStorageAccountTypes { + return []SnapshotStorageAccountTypes{ + SnapshotStorageAccountTypesPremiumLRS, + SnapshotStorageAccountTypesStandardLRS, + SnapshotStorageAccountTypesStandardZRS, + } +} + +// StatusLevelTypes - The level code. +type StatusLevelTypes string + +const ( + StatusLevelTypesInfo StatusLevelTypes = "Info" + StatusLevelTypesWarning StatusLevelTypes = "Warning" + StatusLevelTypesError StatusLevelTypes = "Error" +) + +// PossibleStatusLevelTypesValues returns the possible values for the StatusLevelTypes const type. +func PossibleStatusLevelTypesValues() []StatusLevelTypes { + return []StatusLevelTypes{ + StatusLevelTypesInfo, + StatusLevelTypesWarning, + StatusLevelTypesError, + } +} + +// StorageAccountType - Specifies the storage account type to be used to store the image. This property is not updatable. +type StorageAccountType string + +const ( + StorageAccountTypePremiumLRS StorageAccountType = "Premium_LRS" + StorageAccountTypeStandardLRS StorageAccountType = "Standard_LRS" + StorageAccountTypeStandardZRS StorageAccountType = "Standard_ZRS" +) + +// PossibleStorageAccountTypeValues returns the possible values for the StorageAccountType const type. +func PossibleStorageAccountTypeValues() []StorageAccountType { + return []StorageAccountType{ + StorageAccountTypePremiumLRS, + StorageAccountTypeStandardLRS, + StorageAccountTypeStandardZRS, + } +} + +// StorageAccountTypes - Specifies the storage account type for the managed disk. Managed OS disk storage account type can +// only be set when you create the scale set. NOTE: UltraSSDLRS can only be used with data disks. It +// cannot be used with OS Disk. StandardLRS uses Standard HDD. StandardSSDLRS uses Standard SSD. PremiumLRS uses Premium SSD. +// UltraSSDLRS uses Ultra disk. PremiumZRS uses Premium SSD zone redundant +// storage. StandardSSD_ZRS uses Standard SSD zone redundant storage. For more information regarding disks supported for Windows +// Virtual Machines, refer to +// https://docs.microsoft.com/azure/virtual-machines/windows/disks-types and, for Linux Virtual Machines, refer to https://docs.microsoft.com/azure/virtual-machines/linux/disks-types +type StorageAccountTypes string + +const ( + StorageAccountTypesPremiumLRS StorageAccountTypes = "Premium_LRS" + StorageAccountTypesPremiumV2LRS StorageAccountTypes = "PremiumV2_LRS" + StorageAccountTypesPremiumZRS StorageAccountTypes = "Premium_ZRS" + StorageAccountTypesStandardLRS StorageAccountTypes = "Standard_LRS" + StorageAccountTypesStandardSSDLRS StorageAccountTypes = "StandardSSD_LRS" + StorageAccountTypesStandardSSDZRS StorageAccountTypes = "StandardSSD_ZRS" + StorageAccountTypesUltraSSDLRS StorageAccountTypes = "UltraSSD_LRS" +) + +// PossibleStorageAccountTypesValues returns the possible values for the StorageAccountTypes const type. +func PossibleStorageAccountTypesValues() []StorageAccountTypes { + return []StorageAccountTypes{ + StorageAccountTypesPremiumLRS, + StorageAccountTypesPremiumV2LRS, + StorageAccountTypesPremiumZRS, + StorageAccountTypesStandardLRS, + StorageAccountTypesStandardSSDLRS, + StorageAccountTypesStandardSSDZRS, + StorageAccountTypesUltraSSDLRS, + } +} + +// UpgradeMode - Specifies the mode of an upgrade to virtual machines in the scale set. +// Possible values are: +// Manual - You control the application of updates to virtual machines in the scale set. You do this by using the manualUpgrade +// action. +// Automatic - All virtual machines in the scale set are automatically updated at the same time. +type UpgradeMode string + +const ( + UpgradeModeAutomatic UpgradeMode = "Automatic" + UpgradeModeManual UpgradeMode = "Manual" + UpgradeModeRolling UpgradeMode = "Rolling" +) + +// PossibleUpgradeModeValues returns the possible values for the UpgradeMode const type. +func PossibleUpgradeModeValues() []UpgradeMode { + return []UpgradeMode{ + UpgradeModeAutomatic, + UpgradeModeManual, + UpgradeModeRolling, + } +} + +// UpgradeOperationInvoker - Invoker of the Upgrade Operation +type UpgradeOperationInvoker string + +const ( + UpgradeOperationInvokerUnknown UpgradeOperationInvoker = "Unknown" + UpgradeOperationInvokerUser UpgradeOperationInvoker = "User" + UpgradeOperationInvokerPlatform UpgradeOperationInvoker = "Platform" +) + +// PossibleUpgradeOperationInvokerValues returns the possible values for the UpgradeOperationInvoker const type. +func PossibleUpgradeOperationInvokerValues() []UpgradeOperationInvoker { + return []UpgradeOperationInvoker{ + UpgradeOperationInvokerUnknown, + UpgradeOperationInvokerUser, + UpgradeOperationInvokerPlatform, + } +} + +// UpgradeState - Code indicating the current status of the upgrade. +type UpgradeState string + +const ( + UpgradeStateRollingForward UpgradeState = "RollingForward" + UpgradeStateCancelled UpgradeState = "Cancelled" + UpgradeStateCompleted UpgradeState = "Completed" + UpgradeStateFaulted UpgradeState = "Faulted" +) + +// PossibleUpgradeStateValues returns the possible values for the UpgradeState const type. +func PossibleUpgradeStateValues() []UpgradeState { + return []UpgradeState{ + UpgradeStateRollingForward, + UpgradeStateCancelled, + UpgradeStateCompleted, + UpgradeStateFaulted, + } +} + +// VMDiskTypes - VM disk types which are disallowed. +type VMDiskTypes string + +const ( + VMDiskTypesNone VMDiskTypes = "None" + VMDiskTypesUnmanaged VMDiskTypes = "Unmanaged" +) + +// PossibleVMDiskTypesValues returns the possible values for the VMDiskTypes const type. +func PossibleVMDiskTypesValues() []VMDiskTypes { + return []VMDiskTypes{ + VMDiskTypesNone, + VMDiskTypesUnmanaged, + } +} + +type VMGuestPatchClassificationLinux string + +const ( + VMGuestPatchClassificationLinuxCritical VMGuestPatchClassificationLinux = "Critical" + VMGuestPatchClassificationLinuxOther VMGuestPatchClassificationLinux = "Other" + VMGuestPatchClassificationLinuxSecurity VMGuestPatchClassificationLinux = "Security" +) + +// PossibleVMGuestPatchClassificationLinuxValues returns the possible values for the VMGuestPatchClassificationLinux const type. +func PossibleVMGuestPatchClassificationLinuxValues() []VMGuestPatchClassificationLinux { + return []VMGuestPatchClassificationLinux{ + VMGuestPatchClassificationLinuxCritical, + VMGuestPatchClassificationLinuxOther, + VMGuestPatchClassificationLinuxSecurity, + } +} + +type VMGuestPatchClassificationWindows string + +const ( + VMGuestPatchClassificationWindowsCritical VMGuestPatchClassificationWindows = "Critical" + VMGuestPatchClassificationWindowsDefinition VMGuestPatchClassificationWindows = "Definition" + VMGuestPatchClassificationWindowsFeaturePack VMGuestPatchClassificationWindows = "FeaturePack" + VMGuestPatchClassificationWindowsSecurity VMGuestPatchClassificationWindows = "Security" + VMGuestPatchClassificationWindowsServicePack VMGuestPatchClassificationWindows = "ServicePack" + VMGuestPatchClassificationWindowsTools VMGuestPatchClassificationWindows = "Tools" + VMGuestPatchClassificationWindowsUpdateRollUp VMGuestPatchClassificationWindows = "UpdateRollUp" + VMGuestPatchClassificationWindowsUpdates VMGuestPatchClassificationWindows = "Updates" +) + +// PossibleVMGuestPatchClassificationWindowsValues returns the possible values for the VMGuestPatchClassificationWindows const type. +func PossibleVMGuestPatchClassificationWindowsValues() []VMGuestPatchClassificationWindows { + return []VMGuestPatchClassificationWindows{ + VMGuestPatchClassificationWindowsCritical, + VMGuestPatchClassificationWindowsDefinition, + VMGuestPatchClassificationWindowsFeaturePack, + VMGuestPatchClassificationWindowsSecurity, + VMGuestPatchClassificationWindowsServicePack, + VMGuestPatchClassificationWindowsTools, + VMGuestPatchClassificationWindowsUpdateRollUp, + VMGuestPatchClassificationWindowsUpdates, + } +} + +// VMGuestPatchRebootBehavior - Describes the reboot requirements of the patch. +type VMGuestPatchRebootBehavior string + +const ( + VMGuestPatchRebootBehaviorAlwaysRequiresReboot VMGuestPatchRebootBehavior = "AlwaysRequiresReboot" + VMGuestPatchRebootBehaviorCanRequestReboot VMGuestPatchRebootBehavior = "CanRequestReboot" + VMGuestPatchRebootBehaviorNeverReboots VMGuestPatchRebootBehavior = "NeverReboots" + VMGuestPatchRebootBehaviorUnknown VMGuestPatchRebootBehavior = "Unknown" +) + +// PossibleVMGuestPatchRebootBehaviorValues returns the possible values for the VMGuestPatchRebootBehavior const type. +func PossibleVMGuestPatchRebootBehaviorValues() []VMGuestPatchRebootBehavior { + return []VMGuestPatchRebootBehavior{ + VMGuestPatchRebootBehaviorAlwaysRequiresReboot, + VMGuestPatchRebootBehaviorCanRequestReboot, + VMGuestPatchRebootBehaviorNeverReboots, + VMGuestPatchRebootBehaviorUnknown, + } +} + +// VMGuestPatchRebootSetting - Defines when it is acceptable to reboot a VM during a software update operation. +type VMGuestPatchRebootSetting string + +const ( + VMGuestPatchRebootSettingAlways VMGuestPatchRebootSetting = "Always" + VMGuestPatchRebootSettingIfRequired VMGuestPatchRebootSetting = "IfRequired" + VMGuestPatchRebootSettingNever VMGuestPatchRebootSetting = "Never" +) + +// PossibleVMGuestPatchRebootSettingValues returns the possible values for the VMGuestPatchRebootSetting const type. +func PossibleVMGuestPatchRebootSettingValues() []VMGuestPatchRebootSetting { + return []VMGuestPatchRebootSetting{ + VMGuestPatchRebootSettingAlways, + VMGuestPatchRebootSettingIfRequired, + VMGuestPatchRebootSettingNever, + } +} + +// VMGuestPatchRebootStatus - The reboot state of the VM following completion of the operation. +type VMGuestPatchRebootStatus string + +const ( + VMGuestPatchRebootStatusCompleted VMGuestPatchRebootStatus = "Completed" + VMGuestPatchRebootStatusFailed VMGuestPatchRebootStatus = "Failed" + VMGuestPatchRebootStatusNotNeeded VMGuestPatchRebootStatus = "NotNeeded" + VMGuestPatchRebootStatusRequired VMGuestPatchRebootStatus = "Required" + VMGuestPatchRebootStatusStarted VMGuestPatchRebootStatus = "Started" + VMGuestPatchRebootStatusUnknown VMGuestPatchRebootStatus = "Unknown" +) + +// PossibleVMGuestPatchRebootStatusValues returns the possible values for the VMGuestPatchRebootStatus const type. +func PossibleVMGuestPatchRebootStatusValues() []VMGuestPatchRebootStatus { + return []VMGuestPatchRebootStatus{ + VMGuestPatchRebootStatusCompleted, + VMGuestPatchRebootStatusFailed, + VMGuestPatchRebootStatusNotNeeded, + VMGuestPatchRebootStatusRequired, + VMGuestPatchRebootStatusStarted, + VMGuestPatchRebootStatusUnknown, + } +} + +// VirtualMachineEvictionPolicyTypes - Specifies the eviction policy for the Azure Spot VM/VMSS +type VirtualMachineEvictionPolicyTypes string + +const ( + VirtualMachineEvictionPolicyTypesDeallocate VirtualMachineEvictionPolicyTypes = "Deallocate" + VirtualMachineEvictionPolicyTypesDelete VirtualMachineEvictionPolicyTypes = "Delete" +) + +// PossibleVirtualMachineEvictionPolicyTypesValues returns the possible values for the VirtualMachineEvictionPolicyTypes const type. +func PossibleVirtualMachineEvictionPolicyTypesValues() []VirtualMachineEvictionPolicyTypes { + return []VirtualMachineEvictionPolicyTypes{ + VirtualMachineEvictionPolicyTypesDeallocate, + VirtualMachineEvictionPolicyTypesDelete, + } +} + +// VirtualMachinePriorityTypes - Specifies the priority for a standalone virtual machine or the virtual machines in the scale +// set. +// 'Low' enum will be deprecated in the future, please use 'Spot' as the enum to deploy Azure Spot VM/VMSS. +type VirtualMachinePriorityTypes string + +const ( + VirtualMachinePriorityTypesLow VirtualMachinePriorityTypes = "Low" + VirtualMachinePriorityTypesRegular VirtualMachinePriorityTypes = "Regular" + VirtualMachinePriorityTypesSpot VirtualMachinePriorityTypes = "Spot" +) + +// PossibleVirtualMachinePriorityTypesValues returns the possible values for the VirtualMachinePriorityTypes const type. +func PossibleVirtualMachinePriorityTypesValues() []VirtualMachinePriorityTypes { + return []VirtualMachinePriorityTypes{ + VirtualMachinePriorityTypesLow, + VirtualMachinePriorityTypesRegular, + VirtualMachinePriorityTypesSpot, + } +} + +// VirtualMachineScaleSetSKUScaleType - The scale type applicable to the sku. +type VirtualMachineScaleSetSKUScaleType string + +const ( + VirtualMachineScaleSetSKUScaleTypeAutomatic VirtualMachineScaleSetSKUScaleType = "Automatic" + VirtualMachineScaleSetSKUScaleTypeNone VirtualMachineScaleSetSKUScaleType = "None" +) + +// PossibleVirtualMachineScaleSetSKUScaleTypeValues returns the possible values for the VirtualMachineScaleSetSKUScaleType const type. +func PossibleVirtualMachineScaleSetSKUScaleTypeValues() []VirtualMachineScaleSetSKUScaleType { + return []VirtualMachineScaleSetSKUScaleType{ + VirtualMachineScaleSetSKUScaleTypeAutomatic, + VirtualMachineScaleSetSKUScaleTypeNone, + } +} + +type VirtualMachineScaleSetScaleInRules string + +const ( + VirtualMachineScaleSetScaleInRulesDefault VirtualMachineScaleSetScaleInRules = "Default" + VirtualMachineScaleSetScaleInRulesNewestVM VirtualMachineScaleSetScaleInRules = "NewestVM" + VirtualMachineScaleSetScaleInRulesOldestVM VirtualMachineScaleSetScaleInRules = "OldestVM" +) + +// PossibleVirtualMachineScaleSetScaleInRulesValues returns the possible values for the VirtualMachineScaleSetScaleInRules const type. +func PossibleVirtualMachineScaleSetScaleInRulesValues() []VirtualMachineScaleSetScaleInRules { + return []VirtualMachineScaleSetScaleInRules{ + VirtualMachineScaleSetScaleInRulesDefault, + VirtualMachineScaleSetScaleInRulesNewestVM, + VirtualMachineScaleSetScaleInRulesOldestVM, + } +} + +// VirtualMachineSizeTypes - Specifies the size of the virtual machine. +// The enum data type is currently deprecated and will be removed by December 23rd 2023. +// Recommended way to get the list of available sizes is using these APIs: +// List all available virtual machine sizes in an availability set [https://docs.microsoft.com/rest/api/compute/availabilitysets/listavailablesizes] +// List all available virtual machine sizes in a region [https://docs.microsoft.com/rest/api/compute/resourceskus/list] +// List all available virtual machine sizes for resizing [https://docs.microsoft.com/rest/api/compute/virtualmachines/listavailablesizes]. +// For more information about virtual machine sizes, see Sizes for +// virtual machines [https://docs.microsoft.com/azure/virtual-machines/sizes]. +// The available VM sizes depend on region and availability set. +type VirtualMachineSizeTypes string + +const ( + VirtualMachineSizeTypesBasicA0 VirtualMachineSizeTypes = "Basic_A0" + VirtualMachineSizeTypesBasicA1 VirtualMachineSizeTypes = "Basic_A1" + VirtualMachineSizeTypesBasicA2 VirtualMachineSizeTypes = "Basic_A2" + VirtualMachineSizeTypesBasicA3 VirtualMachineSizeTypes = "Basic_A3" + VirtualMachineSizeTypesBasicA4 VirtualMachineSizeTypes = "Basic_A4" + VirtualMachineSizeTypesStandardA0 VirtualMachineSizeTypes = "Standard_A0" + VirtualMachineSizeTypesStandardA1 VirtualMachineSizeTypes = "Standard_A1" + VirtualMachineSizeTypesStandardA10 VirtualMachineSizeTypes = "Standard_A10" + VirtualMachineSizeTypesStandardA11 VirtualMachineSizeTypes = "Standard_A11" + VirtualMachineSizeTypesStandardA1V2 VirtualMachineSizeTypes = "Standard_A1_v2" + VirtualMachineSizeTypesStandardA2 VirtualMachineSizeTypes = "Standard_A2" + VirtualMachineSizeTypesStandardA2MV2 VirtualMachineSizeTypes = "Standard_A2m_v2" + VirtualMachineSizeTypesStandardA2V2 VirtualMachineSizeTypes = "Standard_A2_v2" + VirtualMachineSizeTypesStandardA3 VirtualMachineSizeTypes = "Standard_A3" + VirtualMachineSizeTypesStandardA4 VirtualMachineSizeTypes = "Standard_A4" + VirtualMachineSizeTypesStandardA4MV2 VirtualMachineSizeTypes = "Standard_A4m_v2" + VirtualMachineSizeTypesStandardA4V2 VirtualMachineSizeTypes = "Standard_A4_v2" + VirtualMachineSizeTypesStandardA5 VirtualMachineSizeTypes = "Standard_A5" + VirtualMachineSizeTypesStandardA6 VirtualMachineSizeTypes = "Standard_A6" + VirtualMachineSizeTypesStandardA7 VirtualMachineSizeTypes = "Standard_A7" + VirtualMachineSizeTypesStandardA8 VirtualMachineSizeTypes = "Standard_A8" + VirtualMachineSizeTypesStandardA8MV2 VirtualMachineSizeTypes = "Standard_A8m_v2" + VirtualMachineSizeTypesStandardA8V2 VirtualMachineSizeTypes = "Standard_A8_v2" + VirtualMachineSizeTypesStandardA9 VirtualMachineSizeTypes = "Standard_A9" + VirtualMachineSizeTypesStandardB1Ms VirtualMachineSizeTypes = "Standard_B1ms" + VirtualMachineSizeTypesStandardB1S VirtualMachineSizeTypes = "Standard_B1s" + VirtualMachineSizeTypesStandardB2Ms VirtualMachineSizeTypes = "Standard_B2ms" + VirtualMachineSizeTypesStandardB2S VirtualMachineSizeTypes = "Standard_B2s" + VirtualMachineSizeTypesStandardB4Ms VirtualMachineSizeTypes = "Standard_B4ms" + VirtualMachineSizeTypesStandardB8Ms VirtualMachineSizeTypes = "Standard_B8ms" + VirtualMachineSizeTypesStandardD1 VirtualMachineSizeTypes = "Standard_D1" + VirtualMachineSizeTypesStandardD11 VirtualMachineSizeTypes = "Standard_D11" + VirtualMachineSizeTypesStandardD11V2 VirtualMachineSizeTypes = "Standard_D11_v2" + VirtualMachineSizeTypesStandardD12 VirtualMachineSizeTypes = "Standard_D12" + VirtualMachineSizeTypesStandardD12V2 VirtualMachineSizeTypes = "Standard_D12_v2" + VirtualMachineSizeTypesStandardD13 VirtualMachineSizeTypes = "Standard_D13" + VirtualMachineSizeTypesStandardD13V2 VirtualMachineSizeTypes = "Standard_D13_v2" + VirtualMachineSizeTypesStandardD14 VirtualMachineSizeTypes = "Standard_D14" + VirtualMachineSizeTypesStandardD14V2 VirtualMachineSizeTypes = "Standard_D14_v2" + VirtualMachineSizeTypesStandardD15V2 VirtualMachineSizeTypes = "Standard_D15_v2" + VirtualMachineSizeTypesStandardD16SV3 VirtualMachineSizeTypes = "Standard_D16s_v3" + VirtualMachineSizeTypesStandardD16V3 VirtualMachineSizeTypes = "Standard_D16_v3" + VirtualMachineSizeTypesStandardD1V2 VirtualMachineSizeTypes = "Standard_D1_v2" + VirtualMachineSizeTypesStandardD2 VirtualMachineSizeTypes = "Standard_D2" + VirtualMachineSizeTypesStandardD2SV3 VirtualMachineSizeTypes = "Standard_D2s_v3" + VirtualMachineSizeTypesStandardD2V2 VirtualMachineSizeTypes = "Standard_D2_v2" + VirtualMachineSizeTypesStandardD2V3 VirtualMachineSizeTypes = "Standard_D2_v3" + VirtualMachineSizeTypesStandardD3 VirtualMachineSizeTypes = "Standard_D3" + VirtualMachineSizeTypesStandardD32SV3 VirtualMachineSizeTypes = "Standard_D32s_v3" + VirtualMachineSizeTypesStandardD32V3 VirtualMachineSizeTypes = "Standard_D32_v3" + VirtualMachineSizeTypesStandardD3V2 VirtualMachineSizeTypes = "Standard_D3_v2" + VirtualMachineSizeTypesStandardD4 VirtualMachineSizeTypes = "Standard_D4" + VirtualMachineSizeTypesStandardD4SV3 VirtualMachineSizeTypes = "Standard_D4s_v3" + VirtualMachineSizeTypesStandardD4V2 VirtualMachineSizeTypes = "Standard_D4_v2" + VirtualMachineSizeTypesStandardD4V3 VirtualMachineSizeTypes = "Standard_D4_v3" + VirtualMachineSizeTypesStandardD5V2 VirtualMachineSizeTypes = "Standard_D5_v2" + VirtualMachineSizeTypesStandardD64SV3 VirtualMachineSizeTypes = "Standard_D64s_v3" + VirtualMachineSizeTypesStandardD64V3 VirtualMachineSizeTypes = "Standard_D64_v3" + VirtualMachineSizeTypesStandardD8SV3 VirtualMachineSizeTypes = "Standard_D8s_v3" + VirtualMachineSizeTypesStandardD8V3 VirtualMachineSizeTypes = "Standard_D8_v3" + VirtualMachineSizeTypesStandardDS1 VirtualMachineSizeTypes = "Standard_DS1" + VirtualMachineSizeTypesStandardDS11 VirtualMachineSizeTypes = "Standard_DS11" + VirtualMachineSizeTypesStandardDS11V2 VirtualMachineSizeTypes = "Standard_DS11_v2" + VirtualMachineSizeTypesStandardDS12 VirtualMachineSizeTypes = "Standard_DS12" + VirtualMachineSizeTypesStandardDS12V2 VirtualMachineSizeTypes = "Standard_DS12_v2" + VirtualMachineSizeTypesStandardDS13 VirtualMachineSizeTypes = "Standard_DS13" + VirtualMachineSizeTypesStandardDS132V2 VirtualMachineSizeTypes = "Standard_DS13-2_v2" + VirtualMachineSizeTypesStandardDS134V2 VirtualMachineSizeTypes = "Standard_DS13-4_v2" + VirtualMachineSizeTypesStandardDS13V2 VirtualMachineSizeTypes = "Standard_DS13_v2" + VirtualMachineSizeTypesStandardDS14 VirtualMachineSizeTypes = "Standard_DS14" + VirtualMachineSizeTypesStandardDS144V2 VirtualMachineSizeTypes = "Standard_DS14-4_v2" + VirtualMachineSizeTypesStandardDS148V2 VirtualMachineSizeTypes = "Standard_DS14-8_v2" + VirtualMachineSizeTypesStandardDS14V2 VirtualMachineSizeTypes = "Standard_DS14_v2" + VirtualMachineSizeTypesStandardDS15V2 VirtualMachineSizeTypes = "Standard_DS15_v2" + VirtualMachineSizeTypesStandardDS1V2 VirtualMachineSizeTypes = "Standard_DS1_v2" + VirtualMachineSizeTypesStandardDS2 VirtualMachineSizeTypes = "Standard_DS2" + VirtualMachineSizeTypesStandardDS2V2 VirtualMachineSizeTypes = "Standard_DS2_v2" + VirtualMachineSizeTypesStandardDS3 VirtualMachineSizeTypes = "Standard_DS3" + VirtualMachineSizeTypesStandardDS3V2 VirtualMachineSizeTypes = "Standard_DS3_v2" + VirtualMachineSizeTypesStandardDS4 VirtualMachineSizeTypes = "Standard_DS4" + VirtualMachineSizeTypesStandardDS4V2 VirtualMachineSizeTypes = "Standard_DS4_v2" + VirtualMachineSizeTypesStandardDS5V2 VirtualMachineSizeTypes = "Standard_DS5_v2" + VirtualMachineSizeTypesStandardE16SV3 VirtualMachineSizeTypes = "Standard_E16s_v3" + VirtualMachineSizeTypesStandardE16V3 VirtualMachineSizeTypes = "Standard_E16_v3" + VirtualMachineSizeTypesStandardE2SV3 VirtualMachineSizeTypes = "Standard_E2s_v3" + VirtualMachineSizeTypesStandardE2V3 VirtualMachineSizeTypes = "Standard_E2_v3" + VirtualMachineSizeTypesStandardE3216V3 VirtualMachineSizeTypes = "Standard_E32-16_v3" + VirtualMachineSizeTypesStandardE328SV3 VirtualMachineSizeTypes = "Standard_E32-8s_v3" + VirtualMachineSizeTypesStandardE32SV3 VirtualMachineSizeTypes = "Standard_E32s_v3" + VirtualMachineSizeTypesStandardE32V3 VirtualMachineSizeTypes = "Standard_E32_v3" + VirtualMachineSizeTypesStandardE4SV3 VirtualMachineSizeTypes = "Standard_E4s_v3" + VirtualMachineSizeTypesStandardE4V3 VirtualMachineSizeTypes = "Standard_E4_v3" + VirtualMachineSizeTypesStandardE6416SV3 VirtualMachineSizeTypes = "Standard_E64-16s_v3" + VirtualMachineSizeTypesStandardE6432SV3 VirtualMachineSizeTypes = "Standard_E64-32s_v3" + VirtualMachineSizeTypesStandardE64SV3 VirtualMachineSizeTypes = "Standard_E64s_v3" + VirtualMachineSizeTypesStandardE64V3 VirtualMachineSizeTypes = "Standard_E64_v3" + VirtualMachineSizeTypesStandardE8SV3 VirtualMachineSizeTypes = "Standard_E8s_v3" + VirtualMachineSizeTypesStandardE8V3 VirtualMachineSizeTypes = "Standard_E8_v3" + VirtualMachineSizeTypesStandardF1 VirtualMachineSizeTypes = "Standard_F1" + VirtualMachineSizeTypesStandardF16 VirtualMachineSizeTypes = "Standard_F16" + VirtualMachineSizeTypesStandardF16S VirtualMachineSizeTypes = "Standard_F16s" + VirtualMachineSizeTypesStandardF16SV2 VirtualMachineSizeTypes = "Standard_F16s_v2" + VirtualMachineSizeTypesStandardF1S VirtualMachineSizeTypes = "Standard_F1s" + VirtualMachineSizeTypesStandardF2 VirtualMachineSizeTypes = "Standard_F2" + VirtualMachineSizeTypesStandardF2S VirtualMachineSizeTypes = "Standard_F2s" + VirtualMachineSizeTypesStandardF2SV2 VirtualMachineSizeTypes = "Standard_F2s_v2" + VirtualMachineSizeTypesStandardF32SV2 VirtualMachineSizeTypes = "Standard_F32s_v2" + VirtualMachineSizeTypesStandardF4 VirtualMachineSizeTypes = "Standard_F4" + VirtualMachineSizeTypesStandardF4S VirtualMachineSizeTypes = "Standard_F4s" + VirtualMachineSizeTypesStandardF4SV2 VirtualMachineSizeTypes = "Standard_F4s_v2" + VirtualMachineSizeTypesStandardF64SV2 VirtualMachineSizeTypes = "Standard_F64s_v2" + VirtualMachineSizeTypesStandardF72SV2 VirtualMachineSizeTypes = "Standard_F72s_v2" + VirtualMachineSizeTypesStandardF8 VirtualMachineSizeTypes = "Standard_F8" + VirtualMachineSizeTypesStandardF8S VirtualMachineSizeTypes = "Standard_F8s" + VirtualMachineSizeTypesStandardF8SV2 VirtualMachineSizeTypes = "Standard_F8s_v2" + VirtualMachineSizeTypesStandardG1 VirtualMachineSizeTypes = "Standard_G1" + VirtualMachineSizeTypesStandardG2 VirtualMachineSizeTypes = "Standard_G2" + VirtualMachineSizeTypesStandardG3 VirtualMachineSizeTypes = "Standard_G3" + VirtualMachineSizeTypesStandardG4 VirtualMachineSizeTypes = "Standard_G4" + VirtualMachineSizeTypesStandardG5 VirtualMachineSizeTypes = "Standard_G5" + VirtualMachineSizeTypesStandardGS1 VirtualMachineSizeTypes = "Standard_GS1" + VirtualMachineSizeTypesStandardGS2 VirtualMachineSizeTypes = "Standard_GS2" + VirtualMachineSizeTypesStandardGS3 VirtualMachineSizeTypes = "Standard_GS3" + VirtualMachineSizeTypesStandardGS4 VirtualMachineSizeTypes = "Standard_GS4" + VirtualMachineSizeTypesStandardGS44 VirtualMachineSizeTypes = "Standard_GS4-4" + VirtualMachineSizeTypesStandardGS48 VirtualMachineSizeTypes = "Standard_GS4-8" + VirtualMachineSizeTypesStandardGS5 VirtualMachineSizeTypes = "Standard_GS5" + VirtualMachineSizeTypesStandardGS516 VirtualMachineSizeTypes = "Standard_GS5-16" + VirtualMachineSizeTypesStandardGS58 VirtualMachineSizeTypes = "Standard_GS5-8" + VirtualMachineSizeTypesStandardH16 VirtualMachineSizeTypes = "Standard_H16" + VirtualMachineSizeTypesStandardH16M VirtualMachineSizeTypes = "Standard_H16m" + VirtualMachineSizeTypesStandardH16Mr VirtualMachineSizeTypes = "Standard_H16mr" + VirtualMachineSizeTypesStandardH16R VirtualMachineSizeTypes = "Standard_H16r" + VirtualMachineSizeTypesStandardH8 VirtualMachineSizeTypes = "Standard_H8" + VirtualMachineSizeTypesStandardH8M VirtualMachineSizeTypes = "Standard_H8m" + VirtualMachineSizeTypesStandardL16S VirtualMachineSizeTypes = "Standard_L16s" + VirtualMachineSizeTypesStandardL32S VirtualMachineSizeTypes = "Standard_L32s" + VirtualMachineSizeTypesStandardL4S VirtualMachineSizeTypes = "Standard_L4s" + VirtualMachineSizeTypesStandardL8S VirtualMachineSizeTypes = "Standard_L8s" + VirtualMachineSizeTypesStandardM12832Ms VirtualMachineSizeTypes = "Standard_M128-32ms" + VirtualMachineSizeTypesStandardM12864Ms VirtualMachineSizeTypes = "Standard_M128-64ms" + VirtualMachineSizeTypesStandardM128Ms VirtualMachineSizeTypes = "Standard_M128ms" + VirtualMachineSizeTypesStandardM128S VirtualMachineSizeTypes = "Standard_M128s" + VirtualMachineSizeTypesStandardM6416Ms VirtualMachineSizeTypes = "Standard_M64-16ms" + VirtualMachineSizeTypesStandardM6432Ms VirtualMachineSizeTypes = "Standard_M64-32ms" + VirtualMachineSizeTypesStandardM64Ms VirtualMachineSizeTypes = "Standard_M64ms" + VirtualMachineSizeTypesStandardM64S VirtualMachineSizeTypes = "Standard_M64s" + VirtualMachineSizeTypesStandardNC12 VirtualMachineSizeTypes = "Standard_NC12" + VirtualMachineSizeTypesStandardNC12SV2 VirtualMachineSizeTypes = "Standard_NC12s_v2" + VirtualMachineSizeTypesStandardNC12SV3 VirtualMachineSizeTypes = "Standard_NC12s_v3" + VirtualMachineSizeTypesStandardNC24 VirtualMachineSizeTypes = "Standard_NC24" + VirtualMachineSizeTypesStandardNC24R VirtualMachineSizeTypes = "Standard_NC24r" + VirtualMachineSizeTypesStandardNC24RsV2 VirtualMachineSizeTypes = "Standard_NC24rs_v2" + VirtualMachineSizeTypesStandardNC24RsV3 VirtualMachineSizeTypes = "Standard_NC24rs_v3" + VirtualMachineSizeTypesStandardNC24SV2 VirtualMachineSizeTypes = "Standard_NC24s_v2" + VirtualMachineSizeTypesStandardNC24SV3 VirtualMachineSizeTypes = "Standard_NC24s_v3" + VirtualMachineSizeTypesStandardNC6 VirtualMachineSizeTypes = "Standard_NC6" + VirtualMachineSizeTypesStandardNC6SV2 VirtualMachineSizeTypes = "Standard_NC6s_v2" + VirtualMachineSizeTypesStandardNC6SV3 VirtualMachineSizeTypes = "Standard_NC6s_v3" + VirtualMachineSizeTypesStandardND12S VirtualMachineSizeTypes = "Standard_ND12s" + VirtualMachineSizeTypesStandardND24Rs VirtualMachineSizeTypes = "Standard_ND24rs" + VirtualMachineSizeTypesStandardND24S VirtualMachineSizeTypes = "Standard_ND24s" + VirtualMachineSizeTypesStandardND6S VirtualMachineSizeTypes = "Standard_ND6s" + VirtualMachineSizeTypesStandardNV12 VirtualMachineSizeTypes = "Standard_NV12" + VirtualMachineSizeTypesStandardNV24 VirtualMachineSizeTypes = "Standard_NV24" + VirtualMachineSizeTypesStandardNV6 VirtualMachineSizeTypes = "Standard_NV6" +) + +// PossibleVirtualMachineSizeTypesValues returns the possible values for the VirtualMachineSizeTypes const type. +func PossibleVirtualMachineSizeTypesValues() []VirtualMachineSizeTypes { + return []VirtualMachineSizeTypes{ + VirtualMachineSizeTypesBasicA0, + VirtualMachineSizeTypesBasicA1, + VirtualMachineSizeTypesBasicA2, + VirtualMachineSizeTypesBasicA3, + VirtualMachineSizeTypesBasicA4, + VirtualMachineSizeTypesStandardA0, + VirtualMachineSizeTypesStandardA1, + VirtualMachineSizeTypesStandardA10, + VirtualMachineSizeTypesStandardA11, + VirtualMachineSizeTypesStandardA1V2, + VirtualMachineSizeTypesStandardA2, + VirtualMachineSizeTypesStandardA2MV2, + VirtualMachineSizeTypesStandardA2V2, + VirtualMachineSizeTypesStandardA3, + VirtualMachineSizeTypesStandardA4, + VirtualMachineSizeTypesStandardA4MV2, + VirtualMachineSizeTypesStandardA4V2, + VirtualMachineSizeTypesStandardA5, + VirtualMachineSizeTypesStandardA6, + VirtualMachineSizeTypesStandardA7, + VirtualMachineSizeTypesStandardA8, + VirtualMachineSizeTypesStandardA8MV2, + VirtualMachineSizeTypesStandardA8V2, + VirtualMachineSizeTypesStandardA9, + VirtualMachineSizeTypesStandardB1Ms, + VirtualMachineSizeTypesStandardB1S, + VirtualMachineSizeTypesStandardB2Ms, + VirtualMachineSizeTypesStandardB2S, + VirtualMachineSizeTypesStandardB4Ms, + VirtualMachineSizeTypesStandardB8Ms, + VirtualMachineSizeTypesStandardD1, + VirtualMachineSizeTypesStandardD11, + VirtualMachineSizeTypesStandardD11V2, + VirtualMachineSizeTypesStandardD12, + VirtualMachineSizeTypesStandardD12V2, + VirtualMachineSizeTypesStandardD13, + VirtualMachineSizeTypesStandardD13V2, + VirtualMachineSizeTypesStandardD14, + VirtualMachineSizeTypesStandardD14V2, + VirtualMachineSizeTypesStandardD15V2, + VirtualMachineSizeTypesStandardD16SV3, + VirtualMachineSizeTypesStandardD16V3, + VirtualMachineSizeTypesStandardD1V2, + VirtualMachineSizeTypesStandardD2, + VirtualMachineSizeTypesStandardD2SV3, + VirtualMachineSizeTypesStandardD2V2, + VirtualMachineSizeTypesStandardD2V3, + VirtualMachineSizeTypesStandardD3, + VirtualMachineSizeTypesStandardD32SV3, + VirtualMachineSizeTypesStandardD32V3, + VirtualMachineSizeTypesStandardD3V2, + VirtualMachineSizeTypesStandardD4, + VirtualMachineSizeTypesStandardD4SV3, + VirtualMachineSizeTypesStandardD4V2, + VirtualMachineSizeTypesStandardD4V3, + VirtualMachineSizeTypesStandardD5V2, + VirtualMachineSizeTypesStandardD64SV3, + VirtualMachineSizeTypesStandardD64V3, + VirtualMachineSizeTypesStandardD8SV3, + VirtualMachineSizeTypesStandardD8V3, + VirtualMachineSizeTypesStandardDS1, + VirtualMachineSizeTypesStandardDS11, + VirtualMachineSizeTypesStandardDS11V2, + VirtualMachineSizeTypesStandardDS12, + VirtualMachineSizeTypesStandardDS12V2, + VirtualMachineSizeTypesStandardDS13, + VirtualMachineSizeTypesStandardDS132V2, + VirtualMachineSizeTypesStandardDS134V2, + VirtualMachineSizeTypesStandardDS13V2, + VirtualMachineSizeTypesStandardDS14, + VirtualMachineSizeTypesStandardDS144V2, + VirtualMachineSizeTypesStandardDS148V2, + VirtualMachineSizeTypesStandardDS14V2, + VirtualMachineSizeTypesStandardDS15V2, + VirtualMachineSizeTypesStandardDS1V2, + VirtualMachineSizeTypesStandardDS2, + VirtualMachineSizeTypesStandardDS2V2, + VirtualMachineSizeTypesStandardDS3, + VirtualMachineSizeTypesStandardDS3V2, + VirtualMachineSizeTypesStandardDS4, + VirtualMachineSizeTypesStandardDS4V2, + VirtualMachineSizeTypesStandardDS5V2, + VirtualMachineSizeTypesStandardE16SV3, + VirtualMachineSizeTypesStandardE16V3, + VirtualMachineSizeTypesStandardE2SV3, + VirtualMachineSizeTypesStandardE2V3, + VirtualMachineSizeTypesStandardE3216V3, + VirtualMachineSizeTypesStandardE328SV3, + VirtualMachineSizeTypesStandardE32SV3, + VirtualMachineSizeTypesStandardE32V3, + VirtualMachineSizeTypesStandardE4SV3, + VirtualMachineSizeTypesStandardE4V3, + VirtualMachineSizeTypesStandardE6416SV3, + VirtualMachineSizeTypesStandardE6432SV3, + VirtualMachineSizeTypesStandardE64SV3, + VirtualMachineSizeTypesStandardE64V3, + VirtualMachineSizeTypesStandardE8SV3, + VirtualMachineSizeTypesStandardE8V3, + VirtualMachineSizeTypesStandardF1, + VirtualMachineSizeTypesStandardF16, + VirtualMachineSizeTypesStandardF16S, + VirtualMachineSizeTypesStandardF16SV2, + VirtualMachineSizeTypesStandardF1S, + VirtualMachineSizeTypesStandardF2, + VirtualMachineSizeTypesStandardF2S, + VirtualMachineSizeTypesStandardF2SV2, + VirtualMachineSizeTypesStandardF32SV2, + VirtualMachineSizeTypesStandardF4, + VirtualMachineSizeTypesStandardF4S, + VirtualMachineSizeTypesStandardF4SV2, + VirtualMachineSizeTypesStandardF64SV2, + VirtualMachineSizeTypesStandardF72SV2, + VirtualMachineSizeTypesStandardF8, + VirtualMachineSizeTypesStandardF8S, + VirtualMachineSizeTypesStandardF8SV2, + VirtualMachineSizeTypesStandardG1, + VirtualMachineSizeTypesStandardG2, + VirtualMachineSizeTypesStandardG3, + VirtualMachineSizeTypesStandardG4, + VirtualMachineSizeTypesStandardG5, + VirtualMachineSizeTypesStandardGS1, + VirtualMachineSizeTypesStandardGS2, + VirtualMachineSizeTypesStandardGS3, + VirtualMachineSizeTypesStandardGS4, + VirtualMachineSizeTypesStandardGS44, + VirtualMachineSizeTypesStandardGS48, + VirtualMachineSizeTypesStandardGS5, + VirtualMachineSizeTypesStandardGS516, + VirtualMachineSizeTypesStandardGS58, + VirtualMachineSizeTypesStandardH16, + VirtualMachineSizeTypesStandardH16M, + VirtualMachineSizeTypesStandardH16Mr, + VirtualMachineSizeTypesStandardH16R, + VirtualMachineSizeTypesStandardH8, + VirtualMachineSizeTypesStandardH8M, + VirtualMachineSizeTypesStandardL16S, + VirtualMachineSizeTypesStandardL32S, + VirtualMachineSizeTypesStandardL4S, + VirtualMachineSizeTypesStandardL8S, + VirtualMachineSizeTypesStandardM12832Ms, + VirtualMachineSizeTypesStandardM12864Ms, + VirtualMachineSizeTypesStandardM128Ms, + VirtualMachineSizeTypesStandardM128S, + VirtualMachineSizeTypesStandardM6416Ms, + VirtualMachineSizeTypesStandardM6432Ms, + VirtualMachineSizeTypesStandardM64Ms, + VirtualMachineSizeTypesStandardM64S, + VirtualMachineSizeTypesStandardNC12, + VirtualMachineSizeTypesStandardNC12SV2, + VirtualMachineSizeTypesStandardNC12SV3, + VirtualMachineSizeTypesStandardNC24, + VirtualMachineSizeTypesStandardNC24R, + VirtualMachineSizeTypesStandardNC24RsV2, + VirtualMachineSizeTypesStandardNC24RsV3, + VirtualMachineSizeTypesStandardNC24SV2, + VirtualMachineSizeTypesStandardNC24SV3, + VirtualMachineSizeTypesStandardNC6, + VirtualMachineSizeTypesStandardNC6SV2, + VirtualMachineSizeTypesStandardNC6SV3, + VirtualMachineSizeTypesStandardND12S, + VirtualMachineSizeTypesStandardND24Rs, + VirtualMachineSizeTypesStandardND24S, + VirtualMachineSizeTypesStandardND6S, + VirtualMachineSizeTypesStandardNV12, + VirtualMachineSizeTypesStandardNV24, + VirtualMachineSizeTypesStandardNV6, + } +} + +// WindowsPatchAssessmentMode - Specifies the mode of VM Guest patch assessment for the IaaS virtual machine. +// Possible values are: +// ImageDefault - You control the timing of patch assessments on a virtual machine. +// AutomaticByPlatform - The platform will trigger periodic patch assessments. The property provisionVMAgent must be true. +type WindowsPatchAssessmentMode string + +const ( + WindowsPatchAssessmentModeAutomaticByPlatform WindowsPatchAssessmentMode = "AutomaticByPlatform" + WindowsPatchAssessmentModeImageDefault WindowsPatchAssessmentMode = "ImageDefault" +) + +// PossibleWindowsPatchAssessmentModeValues returns the possible values for the WindowsPatchAssessmentMode const type. +func PossibleWindowsPatchAssessmentModeValues() []WindowsPatchAssessmentMode { + return []WindowsPatchAssessmentMode{ + WindowsPatchAssessmentModeAutomaticByPlatform, + WindowsPatchAssessmentModeImageDefault, + } +} + +// WindowsVMGuestPatchAutomaticByPlatformRebootSetting - Specifies the reboot setting for all AutomaticByPlatform patch installation +// operations. +type WindowsVMGuestPatchAutomaticByPlatformRebootSetting string + +const ( + WindowsVMGuestPatchAutomaticByPlatformRebootSettingAlways WindowsVMGuestPatchAutomaticByPlatformRebootSetting = "Always" + WindowsVMGuestPatchAutomaticByPlatformRebootSettingIfRequired WindowsVMGuestPatchAutomaticByPlatformRebootSetting = "IfRequired" + WindowsVMGuestPatchAutomaticByPlatformRebootSettingNever WindowsVMGuestPatchAutomaticByPlatformRebootSetting = "Never" + WindowsVMGuestPatchAutomaticByPlatformRebootSettingUnknown WindowsVMGuestPatchAutomaticByPlatformRebootSetting = "Unknown" +) + +// PossibleWindowsVMGuestPatchAutomaticByPlatformRebootSettingValues returns the possible values for the WindowsVMGuestPatchAutomaticByPlatformRebootSetting const type. +func PossibleWindowsVMGuestPatchAutomaticByPlatformRebootSettingValues() []WindowsVMGuestPatchAutomaticByPlatformRebootSetting { + return []WindowsVMGuestPatchAutomaticByPlatformRebootSetting{ + WindowsVMGuestPatchAutomaticByPlatformRebootSettingAlways, + WindowsVMGuestPatchAutomaticByPlatformRebootSettingIfRequired, + WindowsVMGuestPatchAutomaticByPlatformRebootSettingNever, + WindowsVMGuestPatchAutomaticByPlatformRebootSettingUnknown, + } +} + +// WindowsVMGuestPatchMode - Specifies the mode of VM Guest Patching to IaaS virtual machine or virtual machines associated +// to virtual machine scale set with OrchestrationMode as Flexible. +// Possible values are: +// Manual - You control the application of patches to a virtual machine. You do this by applying patches manually inside the +// VM. In this mode, automatic updates are disabled; the property +// WindowsConfiguration.enableAutomaticUpdates must be false +// AutomaticByOS - The virtual machine will automatically be updated by the OS. The property WindowsConfiguration.enableAutomaticUpdates +// must be true. +// AutomaticByPlatform - the virtual machine will automatically updated by the platform. The properties provisionVMAgent and +// WindowsConfiguration.enableAutomaticUpdates must be true +type WindowsVMGuestPatchMode string + +const ( + WindowsVMGuestPatchModeAutomaticByOS WindowsVMGuestPatchMode = "AutomaticByOS" + WindowsVMGuestPatchModeAutomaticByPlatform WindowsVMGuestPatchMode = "AutomaticByPlatform" + WindowsVMGuestPatchModeManual WindowsVMGuestPatchMode = "Manual" +) + +// PossibleWindowsVMGuestPatchModeValues returns the possible values for the WindowsVMGuestPatchMode const type. +func PossibleWindowsVMGuestPatchModeValues() []WindowsVMGuestPatchMode { + return []WindowsVMGuestPatchMode{ + WindowsVMGuestPatchModeAutomaticByOS, + WindowsVMGuestPatchModeAutomaticByPlatform, + WindowsVMGuestPatchModeManual, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_dedicatedhostgroups_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_dedicatedhostgroups_client.go new file mode 100644 index 000000000..0b8447c49 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_dedicatedhostgroups_client.go @@ -0,0 +1,407 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// DedicatedHostGroupsClient contains the methods for the DedicatedHostGroups group. +// Don't use this type directly, use NewDedicatedHostGroupsClient() instead. +type DedicatedHostGroupsClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewDedicatedHostGroupsClient creates a new instance of DedicatedHostGroupsClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewDedicatedHostGroupsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*DedicatedHostGroupsClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &DedicatedHostGroupsClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// CreateOrUpdate - Create or update a dedicated host group. For details of Dedicated Host and Dedicated Host Groups please +// see Dedicated Host Documentation [https://go.microsoft.com/fwlink/?linkid=2082596] +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// hostGroupName - The name of the dedicated host group. +// parameters - Parameters supplied to the Create Dedicated Host Group. +// options - DedicatedHostGroupsClientCreateOrUpdateOptions contains the optional parameters for the DedicatedHostGroupsClient.CreateOrUpdate +// method. +func (client *DedicatedHostGroupsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, hostGroupName string, parameters DedicatedHostGroup, options *DedicatedHostGroupsClientCreateOrUpdateOptions) (DedicatedHostGroupsClientCreateOrUpdateResponse, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, hostGroupName, parameters, options) + if err != nil { + return DedicatedHostGroupsClientCreateOrUpdateResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return DedicatedHostGroupsClientCreateOrUpdateResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return DedicatedHostGroupsClientCreateOrUpdateResponse{}, runtime.NewResponseError(resp) + } + return client.createOrUpdateHandleResponse(resp) +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *DedicatedHostGroupsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, hostGroupName string, parameters DedicatedHostGroup, options *DedicatedHostGroupsClientCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if hostGroupName == "" { + return nil, errors.New("parameter hostGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{hostGroupName}", url.PathEscape(hostGroupName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// createOrUpdateHandleResponse handles the CreateOrUpdate response. +func (client *DedicatedHostGroupsClient) createOrUpdateHandleResponse(resp *http.Response) (DedicatedHostGroupsClientCreateOrUpdateResponse, error) { + result := DedicatedHostGroupsClientCreateOrUpdateResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.DedicatedHostGroup); err != nil { + return DedicatedHostGroupsClientCreateOrUpdateResponse{}, err + } + return result, nil +} + +// Delete - Delete a dedicated host group. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// hostGroupName - The name of the dedicated host group. +// options - DedicatedHostGroupsClientDeleteOptions contains the optional parameters for the DedicatedHostGroupsClient.Delete +// method. +func (client *DedicatedHostGroupsClient) Delete(ctx context.Context, resourceGroupName string, hostGroupName string, options *DedicatedHostGroupsClientDeleteOptions) (DedicatedHostGroupsClientDeleteResponse, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, hostGroupName, options) + if err != nil { + return DedicatedHostGroupsClientDeleteResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return DedicatedHostGroupsClientDeleteResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusNoContent) { + return DedicatedHostGroupsClientDeleteResponse{}, runtime.NewResponseError(resp) + } + return DedicatedHostGroupsClientDeleteResponse{}, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *DedicatedHostGroupsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, hostGroupName string, options *DedicatedHostGroupsClientDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if hostGroupName == "" { + return nil, errors.New("parameter hostGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{hostGroupName}", url.PathEscape(hostGroupName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Retrieves information about a dedicated host group. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// hostGroupName - The name of the dedicated host group. +// options - DedicatedHostGroupsClientGetOptions contains the optional parameters for the DedicatedHostGroupsClient.Get method. +func (client *DedicatedHostGroupsClient) Get(ctx context.Context, resourceGroupName string, hostGroupName string, options *DedicatedHostGroupsClientGetOptions) (DedicatedHostGroupsClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, hostGroupName, options) + if err != nil { + return DedicatedHostGroupsClientGetResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return DedicatedHostGroupsClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return DedicatedHostGroupsClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *DedicatedHostGroupsClient) getCreateRequest(ctx context.Context, resourceGroupName string, hostGroupName string, options *DedicatedHostGroupsClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if hostGroupName == "" { + return nil, errors.New("parameter hostGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{hostGroupName}", url.PathEscape(hostGroupName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Expand != nil { + reqQP.Set("$expand", string(*options.Expand)) + } + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *DedicatedHostGroupsClient) getHandleResponse(resp *http.Response) (DedicatedHostGroupsClientGetResponse, error) { + result := DedicatedHostGroupsClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.DedicatedHostGroup); err != nil { + return DedicatedHostGroupsClientGetResponse{}, err + } + return result, nil +} + +// NewListByResourceGroupPager - Lists all of the dedicated host groups in the specified resource group. Use the nextLink +// property in the response to get the next page of dedicated host groups. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// options - DedicatedHostGroupsClientListByResourceGroupOptions contains the optional parameters for the DedicatedHostGroupsClient.ListByResourceGroup +// method. +func (client *DedicatedHostGroupsClient) NewListByResourceGroupPager(resourceGroupName string, options *DedicatedHostGroupsClientListByResourceGroupOptions) *runtime.Pager[DedicatedHostGroupsClientListByResourceGroupResponse] { + return runtime.NewPager(runtime.PagingHandler[DedicatedHostGroupsClientListByResourceGroupResponse]{ + More: func(page DedicatedHostGroupsClientListByResourceGroupResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *DedicatedHostGroupsClientListByResourceGroupResponse) (DedicatedHostGroupsClientListByResourceGroupResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return DedicatedHostGroupsClientListByResourceGroupResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return DedicatedHostGroupsClientListByResourceGroupResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return DedicatedHostGroupsClientListByResourceGroupResponse{}, runtime.NewResponseError(resp) + } + return client.listByResourceGroupHandleResponse(resp) + }, + }) +} + +// listByResourceGroupCreateRequest creates the ListByResourceGroup request. +func (client *DedicatedHostGroupsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *DedicatedHostGroupsClientListByResourceGroupOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listByResourceGroupHandleResponse handles the ListByResourceGroup response. +func (client *DedicatedHostGroupsClient) listByResourceGroupHandleResponse(resp *http.Response) (DedicatedHostGroupsClientListByResourceGroupResponse, error) { + result := DedicatedHostGroupsClientListByResourceGroupResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.DedicatedHostGroupListResult); err != nil { + return DedicatedHostGroupsClientListByResourceGroupResponse{}, err + } + return result, nil +} + +// NewListBySubscriptionPager - Lists all of the dedicated host groups in the subscription. Use the nextLink property in the +// response to get the next page of dedicated host groups. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// options - DedicatedHostGroupsClientListBySubscriptionOptions contains the optional parameters for the DedicatedHostGroupsClient.ListBySubscription +// method. +func (client *DedicatedHostGroupsClient) NewListBySubscriptionPager(options *DedicatedHostGroupsClientListBySubscriptionOptions) *runtime.Pager[DedicatedHostGroupsClientListBySubscriptionResponse] { + return runtime.NewPager(runtime.PagingHandler[DedicatedHostGroupsClientListBySubscriptionResponse]{ + More: func(page DedicatedHostGroupsClientListBySubscriptionResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *DedicatedHostGroupsClientListBySubscriptionResponse) (DedicatedHostGroupsClientListBySubscriptionResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listBySubscriptionCreateRequest(ctx, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return DedicatedHostGroupsClientListBySubscriptionResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return DedicatedHostGroupsClientListBySubscriptionResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return DedicatedHostGroupsClientListBySubscriptionResponse{}, runtime.NewResponseError(resp) + } + return client.listBySubscriptionHandleResponse(resp) + }, + }) +} + +// listBySubscriptionCreateRequest creates the ListBySubscription request. +func (client *DedicatedHostGroupsClient) listBySubscriptionCreateRequest(ctx context.Context, options *DedicatedHostGroupsClientListBySubscriptionOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/hostGroups" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listBySubscriptionHandleResponse handles the ListBySubscription response. +func (client *DedicatedHostGroupsClient) listBySubscriptionHandleResponse(resp *http.Response) (DedicatedHostGroupsClientListBySubscriptionResponse, error) { + result := DedicatedHostGroupsClientListBySubscriptionResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.DedicatedHostGroupListResult); err != nil { + return DedicatedHostGroupsClientListBySubscriptionResponse{}, err + } + return result, nil +} + +// Update - Update an dedicated host group. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// hostGroupName - The name of the dedicated host group. +// parameters - Parameters supplied to the Update Dedicated Host Group operation. +// options - DedicatedHostGroupsClientUpdateOptions contains the optional parameters for the DedicatedHostGroupsClient.Update +// method. +func (client *DedicatedHostGroupsClient) Update(ctx context.Context, resourceGroupName string, hostGroupName string, parameters DedicatedHostGroupUpdate, options *DedicatedHostGroupsClientUpdateOptions) (DedicatedHostGroupsClientUpdateResponse, error) { + req, err := client.updateCreateRequest(ctx, resourceGroupName, hostGroupName, parameters, options) + if err != nil { + return DedicatedHostGroupsClientUpdateResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return DedicatedHostGroupsClientUpdateResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return DedicatedHostGroupsClientUpdateResponse{}, runtime.NewResponseError(resp) + } + return client.updateHandleResponse(resp) +} + +// updateCreateRequest creates the Update request. +func (client *DedicatedHostGroupsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, hostGroupName string, parameters DedicatedHostGroupUpdate, options *DedicatedHostGroupsClientUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if hostGroupName == "" { + return nil, errors.New("parameter hostGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{hostGroupName}", url.PathEscape(hostGroupName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// updateHandleResponse handles the Update response. +func (client *DedicatedHostGroupsClient) updateHandleResponse(resp *http.Response) (DedicatedHostGroupsClientUpdateResponse, error) { + result := DedicatedHostGroupsClientUpdateResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.DedicatedHostGroup); err != nil { + return DedicatedHostGroupsClientUpdateResponse{}, err + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_dedicatedhosts_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_dedicatedhosts_client.go new file mode 100644 index 000000000..25f0e2946 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_dedicatedhosts_client.go @@ -0,0 +1,471 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// DedicatedHostsClient contains the methods for the DedicatedHosts group. +// Don't use this type directly, use NewDedicatedHostsClient() instead. +type DedicatedHostsClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewDedicatedHostsClient creates a new instance of DedicatedHostsClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewDedicatedHostsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*DedicatedHostsClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &DedicatedHostsClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// BeginCreateOrUpdate - Create or update a dedicated host . +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// hostGroupName - The name of the dedicated host group. +// hostName - The name of the dedicated host . +// parameters - Parameters supplied to the Create Dedicated Host. +// options - DedicatedHostsClientBeginCreateOrUpdateOptions contains the optional parameters for the DedicatedHostsClient.BeginCreateOrUpdate +// method. +func (client *DedicatedHostsClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, parameters DedicatedHost, options *DedicatedHostsClientBeginCreateOrUpdateOptions) (*runtime.Poller[DedicatedHostsClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, resourceGroupName, hostGroupName, hostName, parameters, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[DedicatedHostsClientCreateOrUpdateResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[DedicatedHostsClientCreateOrUpdateResponse](options.ResumeToken, client.pl, nil) + } +} + +// CreateOrUpdate - Create or update a dedicated host . +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *DedicatedHostsClient) createOrUpdate(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, parameters DedicatedHost, options *DedicatedHostsClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, hostGroupName, hostName, parameters, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *DedicatedHostsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, parameters DedicatedHost, options *DedicatedHostsClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if hostGroupName == "" { + return nil, errors.New("parameter hostGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{hostGroupName}", url.PathEscape(hostGroupName)) + if hostName == "" { + return nil, errors.New("parameter hostName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{hostName}", url.PathEscape(hostName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// BeginDelete - Delete a dedicated host. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// hostGroupName - The name of the dedicated host group. +// hostName - The name of the dedicated host. +// options - DedicatedHostsClientBeginDeleteOptions contains the optional parameters for the DedicatedHostsClient.BeginDelete +// method. +func (client *DedicatedHostsClient) BeginDelete(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, options *DedicatedHostsClientBeginDeleteOptions) (*runtime.Poller[DedicatedHostsClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, hostGroupName, hostName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[DedicatedHostsClientDeleteResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[DedicatedHostsClientDeleteResponse](options.ResumeToken, client.pl, nil) + } +} + +// Delete - Delete a dedicated host. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *DedicatedHostsClient) deleteOperation(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, options *DedicatedHostsClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, hostGroupName, hostName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *DedicatedHostsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, options *DedicatedHostsClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if hostGroupName == "" { + return nil, errors.New("parameter hostGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{hostGroupName}", url.PathEscape(hostGroupName)) + if hostName == "" { + return nil, errors.New("parameter hostName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{hostName}", url.PathEscape(hostName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Retrieves information about a dedicated host. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// hostGroupName - The name of the dedicated host group. +// hostName - The name of the dedicated host. +// options - DedicatedHostsClientGetOptions contains the optional parameters for the DedicatedHostsClient.Get method. +func (client *DedicatedHostsClient) Get(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, options *DedicatedHostsClientGetOptions) (DedicatedHostsClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, hostGroupName, hostName, options) + if err != nil { + return DedicatedHostsClientGetResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return DedicatedHostsClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return DedicatedHostsClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *DedicatedHostsClient) getCreateRequest(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, options *DedicatedHostsClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if hostGroupName == "" { + return nil, errors.New("parameter hostGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{hostGroupName}", url.PathEscape(hostGroupName)) + if hostName == "" { + return nil, errors.New("parameter hostName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{hostName}", url.PathEscape(hostName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Expand != nil { + reqQP.Set("$expand", string(*options.Expand)) + } + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *DedicatedHostsClient) getHandleResponse(resp *http.Response) (DedicatedHostsClientGetResponse, error) { + result := DedicatedHostsClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.DedicatedHost); err != nil { + return DedicatedHostsClientGetResponse{}, err + } + return result, nil +} + +// NewListByHostGroupPager - Lists all of the dedicated hosts in the specified dedicated host group. Use the nextLink property +// in the response to get the next page of dedicated hosts. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// hostGroupName - The name of the dedicated host group. +// options - DedicatedHostsClientListByHostGroupOptions contains the optional parameters for the DedicatedHostsClient.ListByHostGroup +// method. +func (client *DedicatedHostsClient) NewListByHostGroupPager(resourceGroupName string, hostGroupName string, options *DedicatedHostsClientListByHostGroupOptions) *runtime.Pager[DedicatedHostsClientListByHostGroupResponse] { + return runtime.NewPager(runtime.PagingHandler[DedicatedHostsClientListByHostGroupResponse]{ + More: func(page DedicatedHostsClientListByHostGroupResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *DedicatedHostsClientListByHostGroupResponse) (DedicatedHostsClientListByHostGroupResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listByHostGroupCreateRequest(ctx, resourceGroupName, hostGroupName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return DedicatedHostsClientListByHostGroupResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return DedicatedHostsClientListByHostGroupResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return DedicatedHostsClientListByHostGroupResponse{}, runtime.NewResponseError(resp) + } + return client.listByHostGroupHandleResponse(resp) + }, + }) +} + +// listByHostGroupCreateRequest creates the ListByHostGroup request. +func (client *DedicatedHostsClient) listByHostGroupCreateRequest(ctx context.Context, resourceGroupName string, hostGroupName string, options *DedicatedHostsClientListByHostGroupOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if hostGroupName == "" { + return nil, errors.New("parameter hostGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{hostGroupName}", url.PathEscape(hostGroupName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listByHostGroupHandleResponse handles the ListByHostGroup response. +func (client *DedicatedHostsClient) listByHostGroupHandleResponse(resp *http.Response) (DedicatedHostsClientListByHostGroupResponse, error) { + result := DedicatedHostsClientListByHostGroupResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.DedicatedHostListResult); err != nil { + return DedicatedHostsClientListByHostGroupResponse{}, err + } + return result, nil +} + +// BeginRestart - Restart the dedicated host. The operation will complete successfully once the dedicated host has restarted +// and is running. To determine the health of VMs deployed on the dedicated host after the +// restart check the Resource Health Center in the Azure Portal. Please refer to https://docs.microsoft.com/en-us/azure/service-health/resource-health-overview +// for more details. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// hostGroupName - The name of the dedicated host group. +// hostName - The name of the dedicated host. +// options - DedicatedHostsClientBeginRestartOptions contains the optional parameters for the DedicatedHostsClient.BeginRestart +// method. +func (client *DedicatedHostsClient) BeginRestart(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, options *DedicatedHostsClientBeginRestartOptions) (*runtime.Poller[DedicatedHostsClientRestartResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.restart(ctx, resourceGroupName, hostGroupName, hostName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[DedicatedHostsClientRestartResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[DedicatedHostsClientRestartResponse](options.ResumeToken, client.pl, nil) + } +} + +// Restart - Restart the dedicated host. The operation will complete successfully once the dedicated host has restarted and +// is running. To determine the health of VMs deployed on the dedicated host after the +// restart check the Resource Health Center in the Azure Portal. Please refer to https://docs.microsoft.com/en-us/azure/service-health/resource-health-overview +// for more details. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *DedicatedHostsClient) restart(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, options *DedicatedHostsClientBeginRestartOptions) (*http.Response, error) { + req, err := client.restartCreateRequest(ctx, resourceGroupName, hostGroupName, hostName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// restartCreateRequest creates the Restart request. +func (client *DedicatedHostsClient) restartCreateRequest(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, options *DedicatedHostsClientBeginRestartOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}/restart" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if hostGroupName == "" { + return nil, errors.New("parameter hostGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{hostGroupName}", url.PathEscape(hostGroupName)) + if hostName == "" { + return nil, errors.New("parameter hostName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{hostName}", url.PathEscape(hostName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// BeginUpdate - Update an dedicated host . +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// hostGroupName - The name of the dedicated host group. +// hostName - The name of the dedicated host . +// parameters - Parameters supplied to the Update Dedicated Host operation. +// options - DedicatedHostsClientBeginUpdateOptions contains the optional parameters for the DedicatedHostsClient.BeginUpdate +// method. +func (client *DedicatedHostsClient) BeginUpdate(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, parameters DedicatedHostUpdate, options *DedicatedHostsClientBeginUpdateOptions) (*runtime.Poller[DedicatedHostsClientUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.update(ctx, resourceGroupName, hostGroupName, hostName, parameters, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[DedicatedHostsClientUpdateResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[DedicatedHostsClientUpdateResponse](options.ResumeToken, client.pl, nil) + } +} + +// Update - Update an dedicated host . +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *DedicatedHostsClient) update(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, parameters DedicatedHostUpdate, options *DedicatedHostsClientBeginUpdateOptions) (*http.Response, error) { + req, err := client.updateCreateRequest(ctx, resourceGroupName, hostGroupName, hostName, parameters, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// updateCreateRequest creates the Update request. +func (client *DedicatedHostsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, hostGroupName string, hostName string, parameters DedicatedHostUpdate, options *DedicatedHostsClientBeginUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if hostGroupName == "" { + return nil, errors.New("parameter hostGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{hostGroupName}", url.PathEscape(hostGroupName)) + if hostName == "" { + return nil, errors.New("parameter hostName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{hostName}", url.PathEscape(hostName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_diskaccesses_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_diskaccesses_client.go new file mode 100644 index 000000000..d0d85b5ee --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_diskaccesses_client.go @@ -0,0 +1,774 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// DiskAccessesClient contains the methods for the DiskAccesses group. +// Don't use this type directly, use NewDiskAccessesClient() instead. +type DiskAccessesClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewDiskAccessesClient creates a new instance of DiskAccessesClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewDiskAccessesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*DiskAccessesClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &DiskAccessesClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// BeginCreateOrUpdate - Creates or updates a disk access resource +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +// resourceGroupName - The name of the resource group. +// diskAccessName - The name of the disk access resource that is being created. The name can't be changed after the disk encryption +// set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The +// maximum name length is 80 characters. +// diskAccess - disk access object supplied in the body of the Put disk access operation. +// options - DiskAccessesClientBeginCreateOrUpdateOptions contains the optional parameters for the DiskAccessesClient.BeginCreateOrUpdate +// method. +func (client *DiskAccessesClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, diskAccessName string, diskAccess DiskAccess, options *DiskAccessesClientBeginCreateOrUpdateOptions) (*runtime.Poller[DiskAccessesClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, resourceGroupName, diskAccessName, diskAccess, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[DiskAccessesClientCreateOrUpdateResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[DiskAccessesClientCreateOrUpdateResponse](options.ResumeToken, client.pl, nil) + } +} + +// CreateOrUpdate - Creates or updates a disk access resource +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +func (client *DiskAccessesClient) createOrUpdate(ctx context.Context, resourceGroupName string, diskAccessName string, diskAccess DiskAccess, options *DiskAccessesClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, diskAccessName, diskAccess, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *DiskAccessesClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, diskAccessName string, diskAccess DiskAccess, options *DiskAccessesClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if diskAccessName == "" { + return nil, errors.New("parameter diskAccessName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{diskAccessName}", url.PathEscape(diskAccessName)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-12-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, diskAccess) +} + +// BeginDelete - Deletes a disk access resource. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +// resourceGroupName - The name of the resource group. +// diskAccessName - The name of the disk access resource that is being created. The name can't be changed after the disk encryption +// set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The +// maximum name length is 80 characters. +// options - DiskAccessesClientBeginDeleteOptions contains the optional parameters for the DiskAccessesClient.BeginDelete +// method. +func (client *DiskAccessesClient) BeginDelete(ctx context.Context, resourceGroupName string, diskAccessName string, options *DiskAccessesClientBeginDeleteOptions) (*runtime.Poller[DiskAccessesClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, diskAccessName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[DiskAccessesClientDeleteResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[DiskAccessesClientDeleteResponse](options.ResumeToken, client.pl, nil) + } +} + +// Delete - Deletes a disk access resource. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +func (client *DiskAccessesClient) deleteOperation(ctx context.Context, resourceGroupName string, diskAccessName string, options *DiskAccessesClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, diskAccessName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *DiskAccessesClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, diskAccessName string, options *DiskAccessesClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if diskAccessName == "" { + return nil, errors.New("parameter diskAccessName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{diskAccessName}", url.PathEscape(diskAccessName)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-12-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// BeginDeleteAPrivateEndpointConnection - Deletes a private endpoint connection under a disk access resource. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +// resourceGroupName - The name of the resource group. +// diskAccessName - The name of the disk access resource that is being created. The name can't be changed after the disk encryption +// set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The +// maximum name length is 80 characters. +// privateEndpointConnectionName - The name of the private endpoint connection. +// options - DiskAccessesClientBeginDeleteAPrivateEndpointConnectionOptions contains the optional parameters for the DiskAccessesClient.BeginDeleteAPrivateEndpointConnection +// method. +func (client *DiskAccessesClient) BeginDeleteAPrivateEndpointConnection(ctx context.Context, resourceGroupName string, diskAccessName string, privateEndpointConnectionName string, options *DiskAccessesClientBeginDeleteAPrivateEndpointConnectionOptions) (*runtime.Poller[DiskAccessesClientDeleteAPrivateEndpointConnectionResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteAPrivateEndpointConnection(ctx, resourceGroupName, diskAccessName, privateEndpointConnectionName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[DiskAccessesClientDeleteAPrivateEndpointConnectionResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[DiskAccessesClientDeleteAPrivateEndpointConnectionResponse](options.ResumeToken, client.pl, nil) + } +} + +// DeleteAPrivateEndpointConnection - Deletes a private endpoint connection under a disk access resource. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +func (client *DiskAccessesClient) deleteAPrivateEndpointConnection(ctx context.Context, resourceGroupName string, diskAccessName string, privateEndpointConnectionName string, options *DiskAccessesClientBeginDeleteAPrivateEndpointConnectionOptions) (*http.Response, error) { + req, err := client.deleteAPrivateEndpointConnectionCreateRequest(ctx, resourceGroupName, diskAccessName, privateEndpointConnectionName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteAPrivateEndpointConnectionCreateRequest creates the DeleteAPrivateEndpointConnection request. +func (client *DiskAccessesClient) deleteAPrivateEndpointConnectionCreateRequest(ctx context.Context, resourceGroupName string, diskAccessName string, privateEndpointConnectionName string, options *DiskAccessesClientBeginDeleteAPrivateEndpointConnectionOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}/privateEndpointConnections/{privateEndpointConnectionName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if diskAccessName == "" { + return nil, errors.New("parameter diskAccessName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{diskAccessName}", url.PathEscape(diskAccessName)) + if privateEndpointConnectionName == "" { + return nil, errors.New("parameter privateEndpointConnectionName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{privateEndpointConnectionName}", url.PathEscape(privateEndpointConnectionName)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-12-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Gets information about a disk access resource. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +// resourceGroupName - The name of the resource group. +// diskAccessName - The name of the disk access resource that is being created. The name can't be changed after the disk encryption +// set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The +// maximum name length is 80 characters. +// options - DiskAccessesClientGetOptions contains the optional parameters for the DiskAccessesClient.Get method. +func (client *DiskAccessesClient) Get(ctx context.Context, resourceGroupName string, diskAccessName string, options *DiskAccessesClientGetOptions) (DiskAccessesClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, diskAccessName, options) + if err != nil { + return DiskAccessesClientGetResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return DiskAccessesClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return DiskAccessesClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *DiskAccessesClient) getCreateRequest(ctx context.Context, resourceGroupName string, diskAccessName string, options *DiskAccessesClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if diskAccessName == "" { + return nil, errors.New("parameter diskAccessName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{diskAccessName}", url.PathEscape(diskAccessName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-12-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *DiskAccessesClient) getHandleResponse(resp *http.Response) (DiskAccessesClientGetResponse, error) { + result := DiskAccessesClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.DiskAccess); err != nil { + return DiskAccessesClientGetResponse{}, err + } + return result, nil +} + +// GetAPrivateEndpointConnection - Gets information about a private endpoint connection under a disk access resource. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +// resourceGroupName - The name of the resource group. +// diskAccessName - The name of the disk access resource that is being created. The name can't be changed after the disk encryption +// set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The +// maximum name length is 80 characters. +// privateEndpointConnectionName - The name of the private endpoint connection. +// options - DiskAccessesClientGetAPrivateEndpointConnectionOptions contains the optional parameters for the DiskAccessesClient.GetAPrivateEndpointConnection +// method. +func (client *DiskAccessesClient) GetAPrivateEndpointConnection(ctx context.Context, resourceGroupName string, diskAccessName string, privateEndpointConnectionName string, options *DiskAccessesClientGetAPrivateEndpointConnectionOptions) (DiskAccessesClientGetAPrivateEndpointConnectionResponse, error) { + req, err := client.getAPrivateEndpointConnectionCreateRequest(ctx, resourceGroupName, diskAccessName, privateEndpointConnectionName, options) + if err != nil { + return DiskAccessesClientGetAPrivateEndpointConnectionResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return DiskAccessesClientGetAPrivateEndpointConnectionResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return DiskAccessesClientGetAPrivateEndpointConnectionResponse{}, runtime.NewResponseError(resp) + } + return client.getAPrivateEndpointConnectionHandleResponse(resp) +} + +// getAPrivateEndpointConnectionCreateRequest creates the GetAPrivateEndpointConnection request. +func (client *DiskAccessesClient) getAPrivateEndpointConnectionCreateRequest(ctx context.Context, resourceGroupName string, diskAccessName string, privateEndpointConnectionName string, options *DiskAccessesClientGetAPrivateEndpointConnectionOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}/privateEndpointConnections/{privateEndpointConnectionName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if diskAccessName == "" { + return nil, errors.New("parameter diskAccessName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{diskAccessName}", url.PathEscape(diskAccessName)) + if privateEndpointConnectionName == "" { + return nil, errors.New("parameter privateEndpointConnectionName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{privateEndpointConnectionName}", url.PathEscape(privateEndpointConnectionName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-12-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getAPrivateEndpointConnectionHandleResponse handles the GetAPrivateEndpointConnection response. +func (client *DiskAccessesClient) getAPrivateEndpointConnectionHandleResponse(resp *http.Response) (DiskAccessesClientGetAPrivateEndpointConnectionResponse, error) { + result := DiskAccessesClientGetAPrivateEndpointConnectionResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.PrivateEndpointConnection); err != nil { + return DiskAccessesClientGetAPrivateEndpointConnectionResponse{}, err + } + return result, nil +} + +// GetPrivateLinkResources - Gets the private link resources possible under disk access resource +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +// resourceGroupName - The name of the resource group. +// diskAccessName - The name of the disk access resource that is being created. The name can't be changed after the disk encryption +// set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The +// maximum name length is 80 characters. +// options - DiskAccessesClientGetPrivateLinkResourcesOptions contains the optional parameters for the DiskAccessesClient.GetPrivateLinkResources +// method. +func (client *DiskAccessesClient) GetPrivateLinkResources(ctx context.Context, resourceGroupName string, diskAccessName string, options *DiskAccessesClientGetPrivateLinkResourcesOptions) (DiskAccessesClientGetPrivateLinkResourcesResponse, error) { + req, err := client.getPrivateLinkResourcesCreateRequest(ctx, resourceGroupName, diskAccessName, options) + if err != nil { + return DiskAccessesClientGetPrivateLinkResourcesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return DiskAccessesClientGetPrivateLinkResourcesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return DiskAccessesClientGetPrivateLinkResourcesResponse{}, runtime.NewResponseError(resp) + } + return client.getPrivateLinkResourcesHandleResponse(resp) +} + +// getPrivateLinkResourcesCreateRequest creates the GetPrivateLinkResources request. +func (client *DiskAccessesClient) getPrivateLinkResourcesCreateRequest(ctx context.Context, resourceGroupName string, diskAccessName string, options *DiskAccessesClientGetPrivateLinkResourcesOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}/privateLinkResources" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if diskAccessName == "" { + return nil, errors.New("parameter diskAccessName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{diskAccessName}", url.PathEscape(diskAccessName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-12-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getPrivateLinkResourcesHandleResponse handles the GetPrivateLinkResources response. +func (client *DiskAccessesClient) getPrivateLinkResourcesHandleResponse(resp *http.Response) (DiskAccessesClientGetPrivateLinkResourcesResponse, error) { + result := DiskAccessesClientGetPrivateLinkResourcesResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.PrivateLinkResourceListResult); err != nil { + return DiskAccessesClientGetPrivateLinkResourcesResponse{}, err + } + return result, nil +} + +// NewListPager - Lists all the disk access resources under a subscription. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +// options - DiskAccessesClientListOptions contains the optional parameters for the DiskAccessesClient.List method. +func (client *DiskAccessesClient) NewListPager(options *DiskAccessesClientListOptions) *runtime.Pager[DiskAccessesClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[DiskAccessesClientListResponse]{ + More: func(page DiskAccessesClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *DiskAccessesClientListResponse) (DiskAccessesClientListResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listCreateRequest(ctx, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return DiskAccessesClientListResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return DiskAccessesClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return DiskAccessesClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *DiskAccessesClient) listCreateRequest(ctx context.Context, options *DiskAccessesClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/diskAccesses" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-12-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *DiskAccessesClient) listHandleResponse(resp *http.Response) (DiskAccessesClientListResponse, error) { + result := DiskAccessesClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.DiskAccessList); err != nil { + return DiskAccessesClientListResponse{}, err + } + return result, nil +} + +// NewListByResourceGroupPager - Lists all the disk access resources under a resource group. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +// resourceGroupName - The name of the resource group. +// options - DiskAccessesClientListByResourceGroupOptions contains the optional parameters for the DiskAccessesClient.ListByResourceGroup +// method. +func (client *DiskAccessesClient) NewListByResourceGroupPager(resourceGroupName string, options *DiskAccessesClientListByResourceGroupOptions) *runtime.Pager[DiskAccessesClientListByResourceGroupResponse] { + return runtime.NewPager(runtime.PagingHandler[DiskAccessesClientListByResourceGroupResponse]{ + More: func(page DiskAccessesClientListByResourceGroupResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *DiskAccessesClientListByResourceGroupResponse) (DiskAccessesClientListByResourceGroupResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return DiskAccessesClientListByResourceGroupResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return DiskAccessesClientListByResourceGroupResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return DiskAccessesClientListByResourceGroupResponse{}, runtime.NewResponseError(resp) + } + return client.listByResourceGroupHandleResponse(resp) + }, + }) +} + +// listByResourceGroupCreateRequest creates the ListByResourceGroup request. +func (client *DiskAccessesClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *DiskAccessesClientListByResourceGroupOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-12-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listByResourceGroupHandleResponse handles the ListByResourceGroup response. +func (client *DiskAccessesClient) listByResourceGroupHandleResponse(resp *http.Response) (DiskAccessesClientListByResourceGroupResponse, error) { + result := DiskAccessesClientListByResourceGroupResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.DiskAccessList); err != nil { + return DiskAccessesClientListByResourceGroupResponse{}, err + } + return result, nil +} + +// NewListPrivateEndpointConnectionsPager - List information about private endpoint connections under a disk access resource +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +// resourceGroupName - The name of the resource group. +// diskAccessName - The name of the disk access resource that is being created. The name can't be changed after the disk encryption +// set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The +// maximum name length is 80 characters. +// options - DiskAccessesClientListPrivateEndpointConnectionsOptions contains the optional parameters for the DiskAccessesClient.ListPrivateEndpointConnections +// method. +func (client *DiskAccessesClient) NewListPrivateEndpointConnectionsPager(resourceGroupName string, diskAccessName string, options *DiskAccessesClientListPrivateEndpointConnectionsOptions) *runtime.Pager[DiskAccessesClientListPrivateEndpointConnectionsResponse] { + return runtime.NewPager(runtime.PagingHandler[DiskAccessesClientListPrivateEndpointConnectionsResponse]{ + More: func(page DiskAccessesClientListPrivateEndpointConnectionsResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *DiskAccessesClientListPrivateEndpointConnectionsResponse) (DiskAccessesClientListPrivateEndpointConnectionsResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listPrivateEndpointConnectionsCreateRequest(ctx, resourceGroupName, diskAccessName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return DiskAccessesClientListPrivateEndpointConnectionsResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return DiskAccessesClientListPrivateEndpointConnectionsResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return DiskAccessesClientListPrivateEndpointConnectionsResponse{}, runtime.NewResponseError(resp) + } + return client.listPrivateEndpointConnectionsHandleResponse(resp) + }, + }) +} + +// listPrivateEndpointConnectionsCreateRequest creates the ListPrivateEndpointConnections request. +func (client *DiskAccessesClient) listPrivateEndpointConnectionsCreateRequest(ctx context.Context, resourceGroupName string, diskAccessName string, options *DiskAccessesClientListPrivateEndpointConnectionsOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}/privateEndpointConnections" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if diskAccessName == "" { + return nil, errors.New("parameter diskAccessName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{diskAccessName}", url.PathEscape(diskAccessName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-12-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listPrivateEndpointConnectionsHandleResponse handles the ListPrivateEndpointConnections response. +func (client *DiskAccessesClient) listPrivateEndpointConnectionsHandleResponse(resp *http.Response) (DiskAccessesClientListPrivateEndpointConnectionsResponse, error) { + result := DiskAccessesClientListPrivateEndpointConnectionsResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.PrivateEndpointConnectionListResult); err != nil { + return DiskAccessesClientListPrivateEndpointConnectionsResponse{}, err + } + return result, nil +} + +// BeginUpdate - Updates (patches) a disk access resource. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +// resourceGroupName - The name of the resource group. +// diskAccessName - The name of the disk access resource that is being created. The name can't be changed after the disk encryption +// set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The +// maximum name length is 80 characters. +// diskAccess - disk access object supplied in the body of the Patch disk access operation. +// options - DiskAccessesClientBeginUpdateOptions contains the optional parameters for the DiskAccessesClient.BeginUpdate +// method. +func (client *DiskAccessesClient) BeginUpdate(ctx context.Context, resourceGroupName string, diskAccessName string, diskAccess DiskAccessUpdate, options *DiskAccessesClientBeginUpdateOptions) (*runtime.Poller[DiskAccessesClientUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.update(ctx, resourceGroupName, diskAccessName, diskAccess, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[DiskAccessesClientUpdateResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[DiskAccessesClientUpdateResponse](options.ResumeToken, client.pl, nil) + } +} + +// Update - Updates (patches) a disk access resource. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +func (client *DiskAccessesClient) update(ctx context.Context, resourceGroupName string, diskAccessName string, diskAccess DiskAccessUpdate, options *DiskAccessesClientBeginUpdateOptions) (*http.Response, error) { + req, err := client.updateCreateRequest(ctx, resourceGroupName, diskAccessName, diskAccess, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// updateCreateRequest creates the Update request. +func (client *DiskAccessesClient) updateCreateRequest(ctx context.Context, resourceGroupName string, diskAccessName string, diskAccess DiskAccessUpdate, options *DiskAccessesClientBeginUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if diskAccessName == "" { + return nil, errors.New("parameter diskAccessName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{diskAccessName}", url.PathEscape(diskAccessName)) + req, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-12-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, diskAccess) +} + +// BeginUpdateAPrivateEndpointConnection - Approve or reject a private endpoint connection under disk access resource, this +// can't be used to create a new private endpoint connection. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +// resourceGroupName - The name of the resource group. +// diskAccessName - The name of the disk access resource that is being created. The name can't be changed after the disk encryption +// set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The +// maximum name length is 80 characters. +// privateEndpointConnectionName - The name of the private endpoint connection. +// privateEndpointConnection - private endpoint connection object supplied in the body of the Put private endpoint connection +// operation. +// options - DiskAccessesClientBeginUpdateAPrivateEndpointConnectionOptions contains the optional parameters for the DiskAccessesClient.BeginUpdateAPrivateEndpointConnection +// method. +func (client *DiskAccessesClient) BeginUpdateAPrivateEndpointConnection(ctx context.Context, resourceGroupName string, diskAccessName string, privateEndpointConnectionName string, privateEndpointConnection PrivateEndpointConnection, options *DiskAccessesClientBeginUpdateAPrivateEndpointConnectionOptions) (*runtime.Poller[DiskAccessesClientUpdateAPrivateEndpointConnectionResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.updateAPrivateEndpointConnection(ctx, resourceGroupName, diskAccessName, privateEndpointConnectionName, privateEndpointConnection, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[DiskAccessesClientUpdateAPrivateEndpointConnectionResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[DiskAccessesClientUpdateAPrivateEndpointConnectionResponse](options.ResumeToken, client.pl, nil) + } +} + +// UpdateAPrivateEndpointConnection - Approve or reject a private endpoint connection under disk access resource, this can't +// be used to create a new private endpoint connection. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +func (client *DiskAccessesClient) updateAPrivateEndpointConnection(ctx context.Context, resourceGroupName string, diskAccessName string, privateEndpointConnectionName string, privateEndpointConnection PrivateEndpointConnection, options *DiskAccessesClientBeginUpdateAPrivateEndpointConnectionOptions) (*http.Response, error) { + req, err := client.updateAPrivateEndpointConnectionCreateRequest(ctx, resourceGroupName, diskAccessName, privateEndpointConnectionName, privateEndpointConnection, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// updateAPrivateEndpointConnectionCreateRequest creates the UpdateAPrivateEndpointConnection request. +func (client *DiskAccessesClient) updateAPrivateEndpointConnectionCreateRequest(ctx context.Context, resourceGroupName string, diskAccessName string, privateEndpointConnectionName string, privateEndpointConnection PrivateEndpointConnection, options *DiskAccessesClientBeginUpdateAPrivateEndpointConnectionOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskAccesses/{diskAccessName}/privateEndpointConnections/{privateEndpointConnectionName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if diskAccessName == "" { + return nil, errors.New("parameter diskAccessName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{diskAccessName}", url.PathEscape(diskAccessName)) + if privateEndpointConnectionName == "" { + return nil, errors.New("parameter privateEndpointConnectionName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{privateEndpointConnectionName}", url.PathEscape(privateEndpointConnectionName)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-12-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, privateEndpointConnection) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_diskencryptionsets_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_diskencryptionsets_client.go new file mode 100644 index 000000000..77c4430f0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_diskencryptionsets_client.go @@ -0,0 +1,507 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// DiskEncryptionSetsClient contains the methods for the DiskEncryptionSets group. +// Don't use this type directly, use NewDiskEncryptionSetsClient() instead. +type DiskEncryptionSetsClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewDiskEncryptionSetsClient creates a new instance of DiskEncryptionSetsClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewDiskEncryptionSetsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*DiskEncryptionSetsClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &DiskEncryptionSetsClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// BeginCreateOrUpdate - Creates or updates a disk encryption set +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +// resourceGroupName - The name of the resource group. +// diskEncryptionSetName - The name of the disk encryption set that is being created. The name can't be changed after the +// disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The maximum +// name length is 80 characters. +// diskEncryptionSet - disk encryption set object supplied in the body of the Put disk encryption set operation. +// options - DiskEncryptionSetsClientBeginCreateOrUpdateOptions contains the optional parameters for the DiskEncryptionSetsClient.BeginCreateOrUpdate +// method. +func (client *DiskEncryptionSetsClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, diskEncryptionSetName string, diskEncryptionSet DiskEncryptionSet, options *DiskEncryptionSetsClientBeginCreateOrUpdateOptions) (*runtime.Poller[DiskEncryptionSetsClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, resourceGroupName, diskEncryptionSetName, diskEncryptionSet, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[DiskEncryptionSetsClientCreateOrUpdateResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[DiskEncryptionSetsClientCreateOrUpdateResponse](options.ResumeToken, client.pl, nil) + } +} + +// CreateOrUpdate - Creates or updates a disk encryption set +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +func (client *DiskEncryptionSetsClient) createOrUpdate(ctx context.Context, resourceGroupName string, diskEncryptionSetName string, diskEncryptionSet DiskEncryptionSet, options *DiskEncryptionSetsClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, diskEncryptionSetName, diskEncryptionSet, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *DiskEncryptionSetsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, diskEncryptionSetName string, diskEncryptionSet DiskEncryptionSet, options *DiskEncryptionSetsClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if diskEncryptionSetName == "" { + return nil, errors.New("parameter diskEncryptionSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{diskEncryptionSetName}", url.PathEscape(diskEncryptionSetName)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-12-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, diskEncryptionSet) +} + +// BeginDelete - Deletes a disk encryption set. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +// resourceGroupName - The name of the resource group. +// diskEncryptionSetName - The name of the disk encryption set that is being created. The name can't be changed after the +// disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The maximum +// name length is 80 characters. +// options - DiskEncryptionSetsClientBeginDeleteOptions contains the optional parameters for the DiskEncryptionSetsClient.BeginDelete +// method. +func (client *DiskEncryptionSetsClient) BeginDelete(ctx context.Context, resourceGroupName string, diskEncryptionSetName string, options *DiskEncryptionSetsClientBeginDeleteOptions) (*runtime.Poller[DiskEncryptionSetsClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, diskEncryptionSetName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[DiskEncryptionSetsClientDeleteResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[DiskEncryptionSetsClientDeleteResponse](options.ResumeToken, client.pl, nil) + } +} + +// Delete - Deletes a disk encryption set. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +func (client *DiskEncryptionSetsClient) deleteOperation(ctx context.Context, resourceGroupName string, diskEncryptionSetName string, options *DiskEncryptionSetsClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, diskEncryptionSetName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *DiskEncryptionSetsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, diskEncryptionSetName string, options *DiskEncryptionSetsClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if diskEncryptionSetName == "" { + return nil, errors.New("parameter diskEncryptionSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{diskEncryptionSetName}", url.PathEscape(diskEncryptionSetName)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-12-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Gets information about a disk encryption set. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +// resourceGroupName - The name of the resource group. +// diskEncryptionSetName - The name of the disk encryption set that is being created. The name can't be changed after the +// disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The maximum +// name length is 80 characters. +// options - DiskEncryptionSetsClientGetOptions contains the optional parameters for the DiskEncryptionSetsClient.Get method. +func (client *DiskEncryptionSetsClient) Get(ctx context.Context, resourceGroupName string, diskEncryptionSetName string, options *DiskEncryptionSetsClientGetOptions) (DiskEncryptionSetsClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, diskEncryptionSetName, options) + if err != nil { + return DiskEncryptionSetsClientGetResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return DiskEncryptionSetsClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return DiskEncryptionSetsClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *DiskEncryptionSetsClient) getCreateRequest(ctx context.Context, resourceGroupName string, diskEncryptionSetName string, options *DiskEncryptionSetsClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if diskEncryptionSetName == "" { + return nil, errors.New("parameter diskEncryptionSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{diskEncryptionSetName}", url.PathEscape(diskEncryptionSetName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-12-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *DiskEncryptionSetsClient) getHandleResponse(resp *http.Response) (DiskEncryptionSetsClientGetResponse, error) { + result := DiskEncryptionSetsClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.DiskEncryptionSet); err != nil { + return DiskEncryptionSetsClientGetResponse{}, err + } + return result, nil +} + +// NewListPager - Lists all the disk encryption sets under a subscription. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +// options - DiskEncryptionSetsClientListOptions contains the optional parameters for the DiskEncryptionSetsClient.List method. +func (client *DiskEncryptionSetsClient) NewListPager(options *DiskEncryptionSetsClientListOptions) *runtime.Pager[DiskEncryptionSetsClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[DiskEncryptionSetsClientListResponse]{ + More: func(page DiskEncryptionSetsClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *DiskEncryptionSetsClientListResponse) (DiskEncryptionSetsClientListResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listCreateRequest(ctx, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return DiskEncryptionSetsClientListResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return DiskEncryptionSetsClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return DiskEncryptionSetsClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *DiskEncryptionSetsClient) listCreateRequest(ctx context.Context, options *DiskEncryptionSetsClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/diskEncryptionSets" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-12-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *DiskEncryptionSetsClient) listHandleResponse(resp *http.Response) (DiskEncryptionSetsClientListResponse, error) { + result := DiskEncryptionSetsClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.DiskEncryptionSetList); err != nil { + return DiskEncryptionSetsClientListResponse{}, err + } + return result, nil +} + +// NewListAssociatedResourcesPager - Lists all resources that are encrypted with this disk encryption set. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +// resourceGroupName - The name of the resource group. +// diskEncryptionSetName - The name of the disk encryption set that is being created. The name can't be changed after the +// disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The maximum +// name length is 80 characters. +// options - DiskEncryptionSetsClientListAssociatedResourcesOptions contains the optional parameters for the DiskEncryptionSetsClient.ListAssociatedResources +// method. +func (client *DiskEncryptionSetsClient) NewListAssociatedResourcesPager(resourceGroupName string, diskEncryptionSetName string, options *DiskEncryptionSetsClientListAssociatedResourcesOptions) *runtime.Pager[DiskEncryptionSetsClientListAssociatedResourcesResponse] { + return runtime.NewPager(runtime.PagingHandler[DiskEncryptionSetsClientListAssociatedResourcesResponse]{ + More: func(page DiskEncryptionSetsClientListAssociatedResourcesResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *DiskEncryptionSetsClientListAssociatedResourcesResponse) (DiskEncryptionSetsClientListAssociatedResourcesResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listAssociatedResourcesCreateRequest(ctx, resourceGroupName, diskEncryptionSetName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return DiskEncryptionSetsClientListAssociatedResourcesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return DiskEncryptionSetsClientListAssociatedResourcesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return DiskEncryptionSetsClientListAssociatedResourcesResponse{}, runtime.NewResponseError(resp) + } + return client.listAssociatedResourcesHandleResponse(resp) + }, + }) +} + +// listAssociatedResourcesCreateRequest creates the ListAssociatedResources request. +func (client *DiskEncryptionSetsClient) listAssociatedResourcesCreateRequest(ctx context.Context, resourceGroupName string, diskEncryptionSetName string, options *DiskEncryptionSetsClientListAssociatedResourcesOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}/associatedResources" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if diskEncryptionSetName == "" { + return nil, errors.New("parameter diskEncryptionSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{diskEncryptionSetName}", url.PathEscape(diskEncryptionSetName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-12-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listAssociatedResourcesHandleResponse handles the ListAssociatedResources response. +func (client *DiskEncryptionSetsClient) listAssociatedResourcesHandleResponse(resp *http.Response) (DiskEncryptionSetsClientListAssociatedResourcesResponse, error) { + result := DiskEncryptionSetsClientListAssociatedResourcesResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.ResourceURIList); err != nil { + return DiskEncryptionSetsClientListAssociatedResourcesResponse{}, err + } + return result, nil +} + +// NewListByResourceGroupPager - Lists all the disk encryption sets under a resource group. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +// resourceGroupName - The name of the resource group. +// options - DiskEncryptionSetsClientListByResourceGroupOptions contains the optional parameters for the DiskEncryptionSetsClient.ListByResourceGroup +// method. +func (client *DiskEncryptionSetsClient) NewListByResourceGroupPager(resourceGroupName string, options *DiskEncryptionSetsClientListByResourceGroupOptions) *runtime.Pager[DiskEncryptionSetsClientListByResourceGroupResponse] { + return runtime.NewPager(runtime.PagingHandler[DiskEncryptionSetsClientListByResourceGroupResponse]{ + More: func(page DiskEncryptionSetsClientListByResourceGroupResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *DiskEncryptionSetsClientListByResourceGroupResponse) (DiskEncryptionSetsClientListByResourceGroupResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return DiskEncryptionSetsClientListByResourceGroupResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return DiskEncryptionSetsClientListByResourceGroupResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return DiskEncryptionSetsClientListByResourceGroupResponse{}, runtime.NewResponseError(resp) + } + return client.listByResourceGroupHandleResponse(resp) + }, + }) +} + +// listByResourceGroupCreateRequest creates the ListByResourceGroup request. +func (client *DiskEncryptionSetsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *DiskEncryptionSetsClientListByResourceGroupOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-12-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listByResourceGroupHandleResponse handles the ListByResourceGroup response. +func (client *DiskEncryptionSetsClient) listByResourceGroupHandleResponse(resp *http.Response) (DiskEncryptionSetsClientListByResourceGroupResponse, error) { + result := DiskEncryptionSetsClientListByResourceGroupResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.DiskEncryptionSetList); err != nil { + return DiskEncryptionSetsClientListByResourceGroupResponse{}, err + } + return result, nil +} + +// BeginUpdate - Updates (patches) a disk encryption set. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +// resourceGroupName - The name of the resource group. +// diskEncryptionSetName - The name of the disk encryption set that is being created. The name can't be changed after the +// disk encryption set is created. Supported characters for the name are a-z, A-Z, 0-9, _ and -. The maximum +// name length is 80 characters. +// diskEncryptionSet - disk encryption set object supplied in the body of the Patch disk encryption set operation. +// options - DiskEncryptionSetsClientBeginUpdateOptions contains the optional parameters for the DiskEncryptionSetsClient.BeginUpdate +// method. +func (client *DiskEncryptionSetsClient) BeginUpdate(ctx context.Context, resourceGroupName string, diskEncryptionSetName string, diskEncryptionSet DiskEncryptionSetUpdate, options *DiskEncryptionSetsClientBeginUpdateOptions) (*runtime.Poller[DiskEncryptionSetsClientUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.update(ctx, resourceGroupName, diskEncryptionSetName, diskEncryptionSet, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[DiskEncryptionSetsClientUpdateResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[DiskEncryptionSetsClientUpdateResponse](options.ResumeToken, client.pl, nil) + } +} + +// Update - Updates (patches) a disk encryption set. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +func (client *DiskEncryptionSetsClient) update(ctx context.Context, resourceGroupName string, diskEncryptionSetName string, diskEncryptionSet DiskEncryptionSetUpdate, options *DiskEncryptionSetsClientBeginUpdateOptions) (*http.Response, error) { + req, err := client.updateCreateRequest(ctx, resourceGroupName, diskEncryptionSetName, diskEncryptionSet, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// updateCreateRequest creates the Update request. +func (client *DiskEncryptionSetsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, diskEncryptionSetName string, diskEncryptionSet DiskEncryptionSetUpdate, options *DiskEncryptionSetsClientBeginUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if diskEncryptionSetName == "" { + return nil, errors.New("parameter diskEncryptionSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{diskEncryptionSetName}", url.PathEscape(diskEncryptionSetName)) + req, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-12-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, diskEncryptionSet) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_diskrestorepoint_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_diskrestorepoint_client.go new file mode 100644 index 000000000..82b47b227 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_diskrestorepoint_client.go @@ -0,0 +1,348 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// DiskRestorePointClient contains the methods for the DiskRestorePoint group. +// Don't use this type directly, use NewDiskRestorePointClient() instead. +type DiskRestorePointClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewDiskRestorePointClient creates a new instance of DiskRestorePointClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewDiskRestorePointClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*DiskRestorePointClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &DiskRestorePointClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// Get - Get disk restorePoint resource +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +// resourceGroupName - The name of the resource group. +// restorePointCollectionName - The name of the restore point collection that the disk restore point belongs. +// vmRestorePointName - The name of the vm restore point that the disk disk restore point belongs. +// diskRestorePointName - The name of the disk restore point created. +// options - DiskRestorePointClientGetOptions contains the optional parameters for the DiskRestorePointClient.Get method. +func (client *DiskRestorePointClient) Get(ctx context.Context, resourceGroupName string, restorePointCollectionName string, vmRestorePointName string, diskRestorePointName string, options *DiskRestorePointClientGetOptions) (DiskRestorePointClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, restorePointCollectionName, vmRestorePointName, diskRestorePointName, options) + if err != nil { + return DiskRestorePointClientGetResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return DiskRestorePointClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return DiskRestorePointClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *DiskRestorePointClient) getCreateRequest(ctx context.Context, resourceGroupName string, restorePointCollectionName string, vmRestorePointName string, diskRestorePointName string, options *DiskRestorePointClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}/restorePoints/{vmRestorePointName}/diskRestorePoints/{diskRestorePointName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if restorePointCollectionName == "" { + return nil, errors.New("parameter restorePointCollectionName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{restorePointCollectionName}", url.PathEscape(restorePointCollectionName)) + if vmRestorePointName == "" { + return nil, errors.New("parameter vmRestorePointName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmRestorePointName}", url.PathEscape(vmRestorePointName)) + if diskRestorePointName == "" { + return nil, errors.New("parameter diskRestorePointName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{diskRestorePointName}", url.PathEscape(diskRestorePointName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-12-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *DiskRestorePointClient) getHandleResponse(resp *http.Response) (DiskRestorePointClientGetResponse, error) { + result := DiskRestorePointClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.DiskRestorePoint); err != nil { + return DiskRestorePointClientGetResponse{}, err + } + return result, nil +} + +// BeginGrantAccess - Grants access to a diskRestorePoint. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +// resourceGroupName - The name of the resource group. +// restorePointCollectionName - The name of the restore point collection that the disk restore point belongs. +// vmRestorePointName - The name of the vm restore point that the disk disk restore point belongs. +// diskRestorePointName - The name of the disk restore point created. +// grantAccessData - Access data object supplied in the body of the get disk access operation. +// options - DiskRestorePointClientBeginGrantAccessOptions contains the optional parameters for the DiskRestorePointClient.BeginGrantAccess +// method. +func (client *DiskRestorePointClient) BeginGrantAccess(ctx context.Context, resourceGroupName string, restorePointCollectionName string, vmRestorePointName string, diskRestorePointName string, grantAccessData GrantAccessData, options *DiskRestorePointClientBeginGrantAccessOptions) (*runtime.Poller[DiskRestorePointClientGrantAccessResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.grantAccess(ctx, resourceGroupName, restorePointCollectionName, vmRestorePointName, diskRestorePointName, grantAccessData, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.pl, &runtime.NewPollerOptions[DiskRestorePointClientGrantAccessResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + }) + } else { + return runtime.NewPollerFromResumeToken[DiskRestorePointClientGrantAccessResponse](options.ResumeToken, client.pl, nil) + } +} + +// GrantAccess - Grants access to a diskRestorePoint. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +func (client *DiskRestorePointClient) grantAccess(ctx context.Context, resourceGroupName string, restorePointCollectionName string, vmRestorePointName string, diskRestorePointName string, grantAccessData GrantAccessData, options *DiskRestorePointClientBeginGrantAccessOptions) (*http.Response, error) { + req, err := client.grantAccessCreateRequest(ctx, resourceGroupName, restorePointCollectionName, vmRestorePointName, diskRestorePointName, grantAccessData, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// grantAccessCreateRequest creates the GrantAccess request. +func (client *DiskRestorePointClient) grantAccessCreateRequest(ctx context.Context, resourceGroupName string, restorePointCollectionName string, vmRestorePointName string, diskRestorePointName string, grantAccessData GrantAccessData, options *DiskRestorePointClientBeginGrantAccessOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}/restorePoints/{vmRestorePointName}/diskRestorePoints/{diskRestorePointName}/beginGetAccess" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if restorePointCollectionName == "" { + return nil, errors.New("parameter restorePointCollectionName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{restorePointCollectionName}", url.PathEscape(restorePointCollectionName)) + if vmRestorePointName == "" { + return nil, errors.New("parameter vmRestorePointName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmRestorePointName}", url.PathEscape(vmRestorePointName)) + if diskRestorePointName == "" { + return nil, errors.New("parameter diskRestorePointName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{diskRestorePointName}", url.PathEscape(diskRestorePointName)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-12-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, grantAccessData) +} + +// NewListByRestorePointPager - Lists diskRestorePoints under a vmRestorePoint. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +// resourceGroupName - The name of the resource group. +// restorePointCollectionName - The name of the restore point collection that the disk restore point belongs. +// vmRestorePointName - The name of the vm restore point that the disk disk restore point belongs. +// options - DiskRestorePointClientListByRestorePointOptions contains the optional parameters for the DiskRestorePointClient.ListByRestorePoint +// method. +func (client *DiskRestorePointClient) NewListByRestorePointPager(resourceGroupName string, restorePointCollectionName string, vmRestorePointName string, options *DiskRestorePointClientListByRestorePointOptions) *runtime.Pager[DiskRestorePointClientListByRestorePointResponse] { + return runtime.NewPager(runtime.PagingHandler[DiskRestorePointClientListByRestorePointResponse]{ + More: func(page DiskRestorePointClientListByRestorePointResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *DiskRestorePointClientListByRestorePointResponse) (DiskRestorePointClientListByRestorePointResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listByRestorePointCreateRequest(ctx, resourceGroupName, restorePointCollectionName, vmRestorePointName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return DiskRestorePointClientListByRestorePointResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return DiskRestorePointClientListByRestorePointResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return DiskRestorePointClientListByRestorePointResponse{}, runtime.NewResponseError(resp) + } + return client.listByRestorePointHandleResponse(resp) + }, + }) +} + +// listByRestorePointCreateRequest creates the ListByRestorePoint request. +func (client *DiskRestorePointClient) listByRestorePointCreateRequest(ctx context.Context, resourceGroupName string, restorePointCollectionName string, vmRestorePointName string, options *DiskRestorePointClientListByRestorePointOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}/restorePoints/{vmRestorePointName}/diskRestorePoints" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if restorePointCollectionName == "" { + return nil, errors.New("parameter restorePointCollectionName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{restorePointCollectionName}", url.PathEscape(restorePointCollectionName)) + if vmRestorePointName == "" { + return nil, errors.New("parameter vmRestorePointName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmRestorePointName}", url.PathEscape(vmRestorePointName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-12-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listByRestorePointHandleResponse handles the ListByRestorePoint response. +func (client *DiskRestorePointClient) listByRestorePointHandleResponse(resp *http.Response) (DiskRestorePointClientListByRestorePointResponse, error) { + result := DiskRestorePointClientListByRestorePointResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.DiskRestorePointList); err != nil { + return DiskRestorePointClientListByRestorePointResponse{}, err + } + return result, nil +} + +// BeginRevokeAccess - Revokes access to a diskRestorePoint. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +// resourceGroupName - The name of the resource group. +// restorePointCollectionName - The name of the restore point collection that the disk restore point belongs. +// vmRestorePointName - The name of the vm restore point that the disk disk restore point belongs. +// diskRestorePointName - The name of the disk restore point created. +// options - DiskRestorePointClientBeginRevokeAccessOptions contains the optional parameters for the DiskRestorePointClient.BeginRevokeAccess +// method. +func (client *DiskRestorePointClient) BeginRevokeAccess(ctx context.Context, resourceGroupName string, restorePointCollectionName string, vmRestorePointName string, diskRestorePointName string, options *DiskRestorePointClientBeginRevokeAccessOptions) (*runtime.Poller[DiskRestorePointClientRevokeAccessResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.revokeAccess(ctx, resourceGroupName, restorePointCollectionName, vmRestorePointName, diskRestorePointName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.pl, &runtime.NewPollerOptions[DiskRestorePointClientRevokeAccessResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + }) + } else { + return runtime.NewPollerFromResumeToken[DiskRestorePointClientRevokeAccessResponse](options.ResumeToken, client.pl, nil) + } +} + +// RevokeAccess - Revokes access to a diskRestorePoint. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +func (client *DiskRestorePointClient) revokeAccess(ctx context.Context, resourceGroupName string, restorePointCollectionName string, vmRestorePointName string, diskRestorePointName string, options *DiskRestorePointClientBeginRevokeAccessOptions) (*http.Response, error) { + req, err := client.revokeAccessCreateRequest(ctx, resourceGroupName, restorePointCollectionName, vmRestorePointName, diskRestorePointName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// revokeAccessCreateRequest creates the RevokeAccess request. +func (client *DiskRestorePointClient) revokeAccessCreateRequest(ctx context.Context, resourceGroupName string, restorePointCollectionName string, vmRestorePointName string, diskRestorePointName string, options *DiskRestorePointClientBeginRevokeAccessOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}/restorePoints/{vmRestorePointName}/diskRestorePoints/{diskRestorePointName}/endGetAccess" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if restorePointCollectionName == "" { + return nil, errors.New("parameter restorePointCollectionName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{restorePointCollectionName}", url.PathEscape(restorePointCollectionName)) + if vmRestorePointName == "" { + return nil, errors.New("parameter vmRestorePointName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmRestorePointName}", url.PathEscape(vmRestorePointName)) + if diskRestorePointName == "" { + return nil, errors.New("parameter diskRestorePointName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{diskRestorePointName}", url.PathEscape(diskRestorePointName)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-12-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_disks_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_disks_client.go new file mode 100644 index 000000000..37965463b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_disks_client.go @@ -0,0 +1,564 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// DisksClient contains the methods for the Disks group. +// Don't use this type directly, use NewDisksClient() instead. +type DisksClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewDisksClient creates a new instance of DisksClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewDisksClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*DisksClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &DisksClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// BeginCreateOrUpdate - Creates or updates a disk. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +// resourceGroupName - The name of the resource group. +// diskName - The name of the managed disk that is being created. The name can't be changed after the disk is created. Supported +// characters for the name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 +// characters. +// disk - Disk object supplied in the body of the Put disk operation. +// options - DisksClientBeginCreateOrUpdateOptions contains the optional parameters for the DisksClient.BeginCreateOrUpdate +// method. +func (client *DisksClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, diskName string, disk Disk, options *DisksClientBeginCreateOrUpdateOptions) (*runtime.Poller[DisksClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, resourceGroupName, diskName, disk, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[DisksClientCreateOrUpdateResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[DisksClientCreateOrUpdateResponse](options.ResumeToken, client.pl, nil) + } +} + +// CreateOrUpdate - Creates or updates a disk. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +func (client *DisksClient) createOrUpdate(ctx context.Context, resourceGroupName string, diskName string, disk Disk, options *DisksClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, diskName, disk, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *DisksClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, diskName string, disk Disk, options *DisksClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if diskName == "" { + return nil, errors.New("parameter diskName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{diskName}", url.PathEscape(diskName)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-12-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, disk) +} + +// BeginDelete - Deletes a disk. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +// resourceGroupName - The name of the resource group. +// diskName - The name of the managed disk that is being created. The name can't be changed after the disk is created. Supported +// characters for the name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 +// characters. +// options - DisksClientBeginDeleteOptions contains the optional parameters for the DisksClient.BeginDelete method. +func (client *DisksClient) BeginDelete(ctx context.Context, resourceGroupName string, diskName string, options *DisksClientBeginDeleteOptions) (*runtime.Poller[DisksClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, diskName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[DisksClientDeleteResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[DisksClientDeleteResponse](options.ResumeToken, client.pl, nil) + } +} + +// Delete - Deletes a disk. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +func (client *DisksClient) deleteOperation(ctx context.Context, resourceGroupName string, diskName string, options *DisksClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, diskName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *DisksClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, diskName string, options *DisksClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if diskName == "" { + return nil, errors.New("parameter diskName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{diskName}", url.PathEscape(diskName)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-12-01") + req.Raw().URL.RawQuery = reqQP.Encode() + return req, nil +} + +// Get - Gets information about a disk. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +// resourceGroupName - The name of the resource group. +// diskName - The name of the managed disk that is being created. The name can't be changed after the disk is created. Supported +// characters for the name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 +// characters. +// options - DisksClientGetOptions contains the optional parameters for the DisksClient.Get method. +func (client *DisksClient) Get(ctx context.Context, resourceGroupName string, diskName string, options *DisksClientGetOptions) (DisksClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, diskName, options) + if err != nil { + return DisksClientGetResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return DisksClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return DisksClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *DisksClient) getCreateRequest(ctx context.Context, resourceGroupName string, diskName string, options *DisksClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if diskName == "" { + return nil, errors.New("parameter diskName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{diskName}", url.PathEscape(diskName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-12-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *DisksClient) getHandleResponse(resp *http.Response) (DisksClientGetResponse, error) { + result := DisksClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.Disk); err != nil { + return DisksClientGetResponse{}, err + } + return result, nil +} + +// BeginGrantAccess - Grants access to a disk. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +// resourceGroupName - The name of the resource group. +// diskName - The name of the managed disk that is being created. The name can't be changed after the disk is created. Supported +// characters for the name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 +// characters. +// grantAccessData - Access data object supplied in the body of the get disk access operation. +// options - DisksClientBeginGrantAccessOptions contains the optional parameters for the DisksClient.BeginGrantAccess method. +func (client *DisksClient) BeginGrantAccess(ctx context.Context, resourceGroupName string, diskName string, grantAccessData GrantAccessData, options *DisksClientBeginGrantAccessOptions) (*runtime.Poller[DisksClientGrantAccessResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.grantAccess(ctx, resourceGroupName, diskName, grantAccessData, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.pl, &runtime.NewPollerOptions[DisksClientGrantAccessResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + }) + } else { + return runtime.NewPollerFromResumeToken[DisksClientGrantAccessResponse](options.ResumeToken, client.pl, nil) + } +} + +// GrantAccess - Grants access to a disk. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +func (client *DisksClient) grantAccess(ctx context.Context, resourceGroupName string, diskName string, grantAccessData GrantAccessData, options *DisksClientBeginGrantAccessOptions) (*http.Response, error) { + req, err := client.grantAccessCreateRequest(ctx, resourceGroupName, diskName, grantAccessData, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// grantAccessCreateRequest creates the GrantAccess request. +func (client *DisksClient) grantAccessCreateRequest(ctx context.Context, resourceGroupName string, diskName string, grantAccessData GrantAccessData, options *DisksClientBeginGrantAccessOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/beginGetAccess" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if diskName == "" { + return nil, errors.New("parameter diskName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{diskName}", url.PathEscape(diskName)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-12-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, grantAccessData) +} + +// NewListPager - Lists all the disks under a subscription. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +// options - DisksClientListOptions contains the optional parameters for the DisksClient.List method. +func (client *DisksClient) NewListPager(options *DisksClientListOptions) *runtime.Pager[DisksClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[DisksClientListResponse]{ + More: func(page DisksClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *DisksClientListResponse) (DisksClientListResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listCreateRequest(ctx, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return DisksClientListResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return DisksClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return DisksClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *DisksClient) listCreateRequest(ctx context.Context, options *DisksClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/disks" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-12-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *DisksClient) listHandleResponse(resp *http.Response) (DisksClientListResponse, error) { + result := DisksClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.DiskList); err != nil { + return DisksClientListResponse{}, err + } + return result, nil +} + +// NewListByResourceGroupPager - Lists all the disks under a resource group. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +// resourceGroupName - The name of the resource group. +// options - DisksClientListByResourceGroupOptions contains the optional parameters for the DisksClient.ListByResourceGroup +// method. +func (client *DisksClient) NewListByResourceGroupPager(resourceGroupName string, options *DisksClientListByResourceGroupOptions) *runtime.Pager[DisksClientListByResourceGroupResponse] { + return runtime.NewPager(runtime.PagingHandler[DisksClientListByResourceGroupResponse]{ + More: func(page DisksClientListByResourceGroupResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *DisksClientListByResourceGroupResponse) (DisksClientListByResourceGroupResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return DisksClientListByResourceGroupResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return DisksClientListByResourceGroupResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return DisksClientListByResourceGroupResponse{}, runtime.NewResponseError(resp) + } + return client.listByResourceGroupHandleResponse(resp) + }, + }) +} + +// listByResourceGroupCreateRequest creates the ListByResourceGroup request. +func (client *DisksClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *DisksClientListByResourceGroupOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-12-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listByResourceGroupHandleResponse handles the ListByResourceGroup response. +func (client *DisksClient) listByResourceGroupHandleResponse(resp *http.Response) (DisksClientListByResourceGroupResponse, error) { + result := DisksClientListByResourceGroupResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.DiskList); err != nil { + return DisksClientListByResourceGroupResponse{}, err + } + return result, nil +} + +// BeginRevokeAccess - Revokes access to a disk. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +// resourceGroupName - The name of the resource group. +// diskName - The name of the managed disk that is being created. The name can't be changed after the disk is created. Supported +// characters for the name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 +// characters. +// options - DisksClientBeginRevokeAccessOptions contains the optional parameters for the DisksClient.BeginRevokeAccess method. +func (client *DisksClient) BeginRevokeAccess(ctx context.Context, resourceGroupName string, diskName string, options *DisksClientBeginRevokeAccessOptions) (*runtime.Poller[DisksClientRevokeAccessResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.revokeAccess(ctx, resourceGroupName, diskName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.pl, &runtime.NewPollerOptions[DisksClientRevokeAccessResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + }) + } else { + return runtime.NewPollerFromResumeToken[DisksClientRevokeAccessResponse](options.ResumeToken, client.pl, nil) + } +} + +// RevokeAccess - Revokes access to a disk. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +func (client *DisksClient) revokeAccess(ctx context.Context, resourceGroupName string, diskName string, options *DisksClientBeginRevokeAccessOptions) (*http.Response, error) { + req, err := client.revokeAccessCreateRequest(ctx, resourceGroupName, diskName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// revokeAccessCreateRequest creates the RevokeAccess request. +func (client *DisksClient) revokeAccessCreateRequest(ctx context.Context, resourceGroupName string, diskName string, options *DisksClientBeginRevokeAccessOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/endGetAccess" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if diskName == "" { + return nil, errors.New("parameter diskName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{diskName}", url.PathEscape(diskName)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-12-01") + req.Raw().URL.RawQuery = reqQP.Encode() + return req, nil +} + +// BeginUpdate - Updates (patches) a disk. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +// resourceGroupName - The name of the resource group. +// diskName - The name of the managed disk that is being created. The name can't be changed after the disk is created. Supported +// characters for the name are a-z, A-Z, 0-9, _ and -. The maximum name length is 80 +// characters. +// disk - Disk object supplied in the body of the Patch disk operation. +// options - DisksClientBeginUpdateOptions contains the optional parameters for the DisksClient.BeginUpdate method. +func (client *DisksClient) BeginUpdate(ctx context.Context, resourceGroupName string, diskName string, disk DiskUpdate, options *DisksClientBeginUpdateOptions) (*runtime.Poller[DisksClientUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.update(ctx, resourceGroupName, diskName, disk, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[DisksClientUpdateResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[DisksClientUpdateResponse](options.ResumeToken, client.pl, nil) + } +} + +// Update - Updates (patches) a disk. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +func (client *DisksClient) update(ctx context.Context, resourceGroupName string, diskName string, disk DiskUpdate, options *DisksClientBeginUpdateOptions) (*http.Response, error) { + req, err := client.updateCreateRequest(ctx, resourceGroupName, diskName, disk, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// updateCreateRequest creates the Update request. +func (client *DisksClient) updateCreateRequest(ctx context.Context, resourceGroupName string, diskName string, disk DiskUpdate, options *DisksClientBeginUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if diskName == "" { + return nil, errors.New("parameter diskName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{diskName}", url.PathEscape(diskName)) + req, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-12-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, disk) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_galleries_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_galleries_client.go new file mode 100644 index 000000000..f2c82893b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_galleries_client.go @@ -0,0 +1,433 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// GalleriesClient contains the methods for the Galleries group. +// Don't use this type directly, use NewGalleriesClient() instead. +type GalleriesClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewGalleriesClient creates a new instance of GalleriesClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewGalleriesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*GalleriesClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &GalleriesClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// BeginCreateOrUpdate - Create or update a Shared Image Gallery. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-10-01 +// resourceGroupName - The name of the resource group. +// galleryName - The name of the Shared Image Gallery. The allowed characters are alphabets and numbers with dots and periods +// allowed in the middle. The maximum length is 80 characters. +// gallery - Parameters supplied to the create or update Shared Image Gallery operation. +// options - GalleriesClientBeginCreateOrUpdateOptions contains the optional parameters for the GalleriesClient.BeginCreateOrUpdate +// method. +func (client *GalleriesClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, gallery Gallery, options *GalleriesClientBeginCreateOrUpdateOptions) (*runtime.Poller[GalleriesClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, resourceGroupName, galleryName, gallery, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[GalleriesClientCreateOrUpdateResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[GalleriesClientCreateOrUpdateResponse](options.ResumeToken, client.pl, nil) + } +} + +// CreateOrUpdate - Create or update a Shared Image Gallery. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-10-01 +func (client *GalleriesClient) createOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, gallery Gallery, options *GalleriesClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, galleryName, gallery, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *GalleriesClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, galleryName string, gallery Gallery, options *GalleriesClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if galleryName == "" { + return nil, errors.New("parameter galleryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryName}", url.PathEscape(galleryName)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-10-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, gallery) +} + +// BeginDelete - Delete a Shared Image Gallery. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-10-01 +// resourceGroupName - The name of the resource group. +// galleryName - The name of the Shared Image Gallery to be deleted. +// options - GalleriesClientBeginDeleteOptions contains the optional parameters for the GalleriesClient.BeginDelete method. +func (client *GalleriesClient) BeginDelete(ctx context.Context, resourceGroupName string, galleryName string, options *GalleriesClientBeginDeleteOptions) (*runtime.Poller[GalleriesClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, galleryName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[GalleriesClientDeleteResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[GalleriesClientDeleteResponse](options.ResumeToken, client.pl, nil) + } +} + +// Delete - Delete a Shared Image Gallery. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-10-01 +func (client *GalleriesClient) deleteOperation(ctx context.Context, resourceGroupName string, galleryName string, options *GalleriesClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, galleryName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *GalleriesClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, galleryName string, options *GalleriesClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if galleryName == "" { + return nil, errors.New("parameter galleryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryName}", url.PathEscape(galleryName)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-10-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Retrieves information about a Shared Image Gallery. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-10-01 +// resourceGroupName - The name of the resource group. +// galleryName - The name of the Shared Image Gallery. +// options - GalleriesClientGetOptions contains the optional parameters for the GalleriesClient.Get method. +func (client *GalleriesClient) Get(ctx context.Context, resourceGroupName string, galleryName string, options *GalleriesClientGetOptions) (GalleriesClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, galleryName, options) + if err != nil { + return GalleriesClientGetResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return GalleriesClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return GalleriesClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *GalleriesClient) getCreateRequest(ctx context.Context, resourceGroupName string, galleryName string, options *GalleriesClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if galleryName == "" { + return nil, errors.New("parameter galleryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryName}", url.PathEscape(galleryName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-10-01") + if options != nil && options.Select != nil { + reqQP.Set("$select", string(*options.Select)) + } + if options != nil && options.Expand != nil { + reqQP.Set("$expand", string(*options.Expand)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *GalleriesClient) getHandleResponse(resp *http.Response) (GalleriesClientGetResponse, error) { + result := GalleriesClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.Gallery); err != nil { + return GalleriesClientGetResponse{}, err + } + return result, nil +} + +// NewListPager - List galleries under a subscription. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-10-01 +// options - GalleriesClientListOptions contains the optional parameters for the GalleriesClient.List method. +func (client *GalleriesClient) NewListPager(options *GalleriesClientListOptions) *runtime.Pager[GalleriesClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[GalleriesClientListResponse]{ + More: func(page GalleriesClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *GalleriesClientListResponse) (GalleriesClientListResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listCreateRequest(ctx, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return GalleriesClientListResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return GalleriesClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return GalleriesClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *GalleriesClient) listCreateRequest(ctx context.Context, options *GalleriesClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/galleries" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-10-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *GalleriesClient) listHandleResponse(resp *http.Response) (GalleriesClientListResponse, error) { + result := GalleriesClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.GalleryList); err != nil { + return GalleriesClientListResponse{}, err + } + return result, nil +} + +// NewListByResourceGroupPager - List galleries under a resource group. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-10-01 +// resourceGroupName - The name of the resource group. +// options - GalleriesClientListByResourceGroupOptions contains the optional parameters for the GalleriesClient.ListByResourceGroup +// method. +func (client *GalleriesClient) NewListByResourceGroupPager(resourceGroupName string, options *GalleriesClientListByResourceGroupOptions) *runtime.Pager[GalleriesClientListByResourceGroupResponse] { + return runtime.NewPager(runtime.PagingHandler[GalleriesClientListByResourceGroupResponse]{ + More: func(page GalleriesClientListByResourceGroupResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *GalleriesClientListByResourceGroupResponse) (GalleriesClientListByResourceGroupResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return GalleriesClientListByResourceGroupResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return GalleriesClientListByResourceGroupResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return GalleriesClientListByResourceGroupResponse{}, runtime.NewResponseError(resp) + } + return client.listByResourceGroupHandleResponse(resp) + }, + }) +} + +// listByResourceGroupCreateRequest creates the ListByResourceGroup request. +func (client *GalleriesClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *GalleriesClientListByResourceGroupOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-10-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listByResourceGroupHandleResponse handles the ListByResourceGroup response. +func (client *GalleriesClient) listByResourceGroupHandleResponse(resp *http.Response) (GalleriesClientListByResourceGroupResponse, error) { + result := GalleriesClientListByResourceGroupResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.GalleryList); err != nil { + return GalleriesClientListByResourceGroupResponse{}, err + } + return result, nil +} + +// BeginUpdate - Update a Shared Image Gallery. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-10-01 +// resourceGroupName - The name of the resource group. +// galleryName - The name of the Shared Image Gallery. The allowed characters are alphabets and numbers with dots and periods +// allowed in the middle. The maximum length is 80 characters. +// gallery - Parameters supplied to the update Shared Image Gallery operation. +// options - GalleriesClientBeginUpdateOptions contains the optional parameters for the GalleriesClient.BeginUpdate method. +func (client *GalleriesClient) BeginUpdate(ctx context.Context, resourceGroupName string, galleryName string, gallery GalleryUpdate, options *GalleriesClientBeginUpdateOptions) (*runtime.Poller[GalleriesClientUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.update(ctx, resourceGroupName, galleryName, gallery, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[GalleriesClientUpdateResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[GalleriesClientUpdateResponse](options.ResumeToken, client.pl, nil) + } +} + +// Update - Update a Shared Image Gallery. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-10-01 +func (client *GalleriesClient) update(ctx context.Context, resourceGroupName string, galleryName string, gallery GalleryUpdate, options *GalleriesClientBeginUpdateOptions) (*http.Response, error) { + req, err := client.updateCreateRequest(ctx, resourceGroupName, galleryName, gallery, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// updateCreateRequest creates the Update request. +func (client *GalleriesClient) updateCreateRequest(ctx context.Context, resourceGroupName string, galleryName string, gallery GalleryUpdate, options *GalleriesClientBeginUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if galleryName == "" { + return nil, errors.New("parameter galleryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryName}", url.PathEscape(galleryName)) + req, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-10-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, gallery) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_galleryapplications_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_galleryapplications_client.go new file mode 100644 index 000000000..342e82b55 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_galleryapplications_client.go @@ -0,0 +1,397 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// GalleryApplicationsClient contains the methods for the GalleryApplications group. +// Don't use this type directly, use NewGalleryApplicationsClient() instead. +type GalleryApplicationsClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewGalleryApplicationsClient creates a new instance of GalleryApplicationsClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewGalleryApplicationsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*GalleryApplicationsClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &GalleryApplicationsClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// BeginCreateOrUpdate - Create or update a gallery Application Definition. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-10-01 +// resourceGroupName - The name of the resource group. +// galleryName - The name of the Shared Application Gallery in which the Application Definition is to be created. +// galleryApplicationName - The name of the gallery Application Definition to be created or updated. The allowed characters +// are alphabets and numbers with dots, dashes, and periods allowed in the middle. The maximum length is 80 +// characters. +// galleryApplication - Parameters supplied to the create or update gallery Application operation. +// options - GalleryApplicationsClientBeginCreateOrUpdateOptions contains the optional parameters for the GalleryApplicationsClient.BeginCreateOrUpdate +// method. +func (client *GalleryApplicationsClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplication GalleryApplication, options *GalleryApplicationsClientBeginCreateOrUpdateOptions) (*runtime.Poller[GalleryApplicationsClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, resourceGroupName, galleryName, galleryApplicationName, galleryApplication, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[GalleryApplicationsClientCreateOrUpdateResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[GalleryApplicationsClientCreateOrUpdateResponse](options.ResumeToken, client.pl, nil) + } +} + +// CreateOrUpdate - Create or update a gallery Application Definition. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-10-01 +func (client *GalleryApplicationsClient) createOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplication GalleryApplication, options *GalleryApplicationsClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, galleryName, galleryApplicationName, galleryApplication, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *GalleryApplicationsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplication GalleryApplication, options *GalleryApplicationsClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if galleryName == "" { + return nil, errors.New("parameter galleryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryName}", url.PathEscape(galleryName)) + if galleryApplicationName == "" { + return nil, errors.New("parameter galleryApplicationName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryApplicationName}", url.PathEscape(galleryApplicationName)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-10-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, galleryApplication) +} + +// BeginDelete - Delete a gallery Application. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-10-01 +// resourceGroupName - The name of the resource group. +// galleryName - The name of the Shared Application Gallery in which the Application Definition is to be deleted. +// galleryApplicationName - The name of the gallery Application Definition to be deleted. +// options - GalleryApplicationsClientBeginDeleteOptions contains the optional parameters for the GalleryApplicationsClient.BeginDelete +// method. +func (client *GalleryApplicationsClient) BeginDelete(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, options *GalleryApplicationsClientBeginDeleteOptions) (*runtime.Poller[GalleryApplicationsClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, galleryName, galleryApplicationName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[GalleryApplicationsClientDeleteResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[GalleryApplicationsClientDeleteResponse](options.ResumeToken, client.pl, nil) + } +} + +// Delete - Delete a gallery Application. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-10-01 +func (client *GalleryApplicationsClient) deleteOperation(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, options *GalleryApplicationsClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, galleryName, galleryApplicationName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *GalleryApplicationsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, options *GalleryApplicationsClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if galleryName == "" { + return nil, errors.New("parameter galleryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryName}", url.PathEscape(galleryName)) + if galleryApplicationName == "" { + return nil, errors.New("parameter galleryApplicationName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryApplicationName}", url.PathEscape(galleryApplicationName)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-10-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Retrieves information about a gallery Application Definition. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-10-01 +// resourceGroupName - The name of the resource group. +// galleryName - The name of the Shared Application Gallery from which the Application Definitions are to be retrieved. +// galleryApplicationName - The name of the gallery Application Definition to be retrieved. +// options - GalleryApplicationsClientGetOptions contains the optional parameters for the GalleryApplicationsClient.Get method. +func (client *GalleryApplicationsClient) Get(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, options *GalleryApplicationsClientGetOptions) (GalleryApplicationsClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, galleryName, galleryApplicationName, options) + if err != nil { + return GalleryApplicationsClientGetResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return GalleryApplicationsClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return GalleryApplicationsClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *GalleryApplicationsClient) getCreateRequest(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, options *GalleryApplicationsClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if galleryName == "" { + return nil, errors.New("parameter galleryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryName}", url.PathEscape(galleryName)) + if galleryApplicationName == "" { + return nil, errors.New("parameter galleryApplicationName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryApplicationName}", url.PathEscape(galleryApplicationName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-10-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *GalleryApplicationsClient) getHandleResponse(resp *http.Response) (GalleryApplicationsClientGetResponse, error) { + result := GalleryApplicationsClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.GalleryApplication); err != nil { + return GalleryApplicationsClientGetResponse{}, err + } + return result, nil +} + +// NewListByGalleryPager - List gallery Application Definitions in a gallery. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-10-01 +// resourceGroupName - The name of the resource group. +// galleryName - The name of the Shared Application Gallery from which Application Definitions are to be listed. +// options - GalleryApplicationsClientListByGalleryOptions contains the optional parameters for the GalleryApplicationsClient.ListByGallery +// method. +func (client *GalleryApplicationsClient) NewListByGalleryPager(resourceGroupName string, galleryName string, options *GalleryApplicationsClientListByGalleryOptions) *runtime.Pager[GalleryApplicationsClientListByGalleryResponse] { + return runtime.NewPager(runtime.PagingHandler[GalleryApplicationsClientListByGalleryResponse]{ + More: func(page GalleryApplicationsClientListByGalleryResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *GalleryApplicationsClientListByGalleryResponse) (GalleryApplicationsClientListByGalleryResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listByGalleryCreateRequest(ctx, resourceGroupName, galleryName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return GalleryApplicationsClientListByGalleryResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return GalleryApplicationsClientListByGalleryResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return GalleryApplicationsClientListByGalleryResponse{}, runtime.NewResponseError(resp) + } + return client.listByGalleryHandleResponse(resp) + }, + }) +} + +// listByGalleryCreateRequest creates the ListByGallery request. +func (client *GalleryApplicationsClient) listByGalleryCreateRequest(ctx context.Context, resourceGroupName string, galleryName string, options *GalleryApplicationsClientListByGalleryOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if galleryName == "" { + return nil, errors.New("parameter galleryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryName}", url.PathEscape(galleryName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-10-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listByGalleryHandleResponse handles the ListByGallery response. +func (client *GalleryApplicationsClient) listByGalleryHandleResponse(resp *http.Response) (GalleryApplicationsClientListByGalleryResponse, error) { + result := GalleryApplicationsClientListByGalleryResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.GalleryApplicationList); err != nil { + return GalleryApplicationsClientListByGalleryResponse{}, err + } + return result, nil +} + +// BeginUpdate - Update a gallery Application Definition. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-10-01 +// resourceGroupName - The name of the resource group. +// galleryName - The name of the Shared Application Gallery in which the Application Definition is to be updated. +// galleryApplicationName - The name of the gallery Application Definition to be updated. The allowed characters are alphabets +// and numbers with dots, dashes, and periods allowed in the middle. The maximum length is 80 +// characters. +// galleryApplication - Parameters supplied to the update gallery Application operation. +// options - GalleryApplicationsClientBeginUpdateOptions contains the optional parameters for the GalleryApplicationsClient.BeginUpdate +// method. +func (client *GalleryApplicationsClient) BeginUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplication GalleryApplicationUpdate, options *GalleryApplicationsClientBeginUpdateOptions) (*runtime.Poller[GalleryApplicationsClientUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.update(ctx, resourceGroupName, galleryName, galleryApplicationName, galleryApplication, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[GalleryApplicationsClientUpdateResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[GalleryApplicationsClientUpdateResponse](options.ResumeToken, client.pl, nil) + } +} + +// Update - Update a gallery Application Definition. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-10-01 +func (client *GalleryApplicationsClient) update(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplication GalleryApplicationUpdate, options *GalleryApplicationsClientBeginUpdateOptions) (*http.Response, error) { + req, err := client.updateCreateRequest(ctx, resourceGroupName, galleryName, galleryApplicationName, galleryApplication, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// updateCreateRequest creates the Update request. +func (client *GalleryApplicationsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplication GalleryApplicationUpdate, options *GalleryApplicationsClientBeginUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if galleryName == "" { + return nil, errors.New("parameter galleryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryName}", url.PathEscape(galleryName)) + if galleryApplicationName == "" { + return nil, errors.New("parameter galleryApplicationName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryApplicationName}", url.PathEscape(galleryApplicationName)) + req, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-10-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, galleryApplication) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_galleryapplicationversions_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_galleryapplicationversions_client.go new file mode 100644 index 000000000..373f659ce --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_galleryapplicationversions_client.go @@ -0,0 +1,427 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// GalleryApplicationVersionsClient contains the methods for the GalleryApplicationVersions group. +// Don't use this type directly, use NewGalleryApplicationVersionsClient() instead. +type GalleryApplicationVersionsClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewGalleryApplicationVersionsClient creates a new instance of GalleryApplicationVersionsClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewGalleryApplicationVersionsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*GalleryApplicationVersionsClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &GalleryApplicationVersionsClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// BeginCreateOrUpdate - Create or update a gallery Application Version. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-10-01 +// resourceGroupName - The name of the resource group. +// galleryName - The name of the Shared Application Gallery in which the Application Definition resides. +// galleryApplicationName - The name of the gallery Application Definition in which the Application Version is to be created. +// galleryApplicationVersionName - The name of the gallery Application Version to be created. Needs to follow semantic version +// name pattern: The allowed characters are digit and period. Digits must be within the range of a 32-bit +// integer. Format: .. +// galleryApplicationVersion - Parameters supplied to the create or update gallery Application Version operation. +// options - GalleryApplicationVersionsClientBeginCreateOrUpdateOptions contains the optional parameters for the GalleryApplicationVersionsClient.BeginCreateOrUpdate +// method. +func (client *GalleryApplicationVersionsClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string, galleryApplicationVersion GalleryApplicationVersion, options *GalleryApplicationVersionsClientBeginCreateOrUpdateOptions) (*runtime.Poller[GalleryApplicationVersionsClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, resourceGroupName, galleryName, galleryApplicationName, galleryApplicationVersionName, galleryApplicationVersion, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[GalleryApplicationVersionsClientCreateOrUpdateResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[GalleryApplicationVersionsClientCreateOrUpdateResponse](options.ResumeToken, client.pl, nil) + } +} + +// CreateOrUpdate - Create or update a gallery Application Version. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-10-01 +func (client *GalleryApplicationVersionsClient) createOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string, galleryApplicationVersion GalleryApplicationVersion, options *GalleryApplicationVersionsClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, galleryName, galleryApplicationName, galleryApplicationVersionName, galleryApplicationVersion, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *GalleryApplicationVersionsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string, galleryApplicationVersion GalleryApplicationVersion, options *GalleryApplicationVersionsClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions/{galleryApplicationVersionName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if galleryName == "" { + return nil, errors.New("parameter galleryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryName}", url.PathEscape(galleryName)) + if galleryApplicationName == "" { + return nil, errors.New("parameter galleryApplicationName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryApplicationName}", url.PathEscape(galleryApplicationName)) + if galleryApplicationVersionName == "" { + return nil, errors.New("parameter galleryApplicationVersionName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryApplicationVersionName}", url.PathEscape(galleryApplicationVersionName)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-10-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, galleryApplicationVersion) +} + +// BeginDelete - Delete a gallery Application Version. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-10-01 +// resourceGroupName - The name of the resource group. +// galleryName - The name of the Shared Application Gallery in which the Application Definition resides. +// galleryApplicationName - The name of the gallery Application Definition in which the Application Version resides. +// galleryApplicationVersionName - The name of the gallery Application Version to be deleted. +// options - GalleryApplicationVersionsClientBeginDeleteOptions contains the optional parameters for the GalleryApplicationVersionsClient.BeginDelete +// method. +func (client *GalleryApplicationVersionsClient) BeginDelete(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string, options *GalleryApplicationVersionsClientBeginDeleteOptions) (*runtime.Poller[GalleryApplicationVersionsClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, galleryName, galleryApplicationName, galleryApplicationVersionName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[GalleryApplicationVersionsClientDeleteResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[GalleryApplicationVersionsClientDeleteResponse](options.ResumeToken, client.pl, nil) + } +} + +// Delete - Delete a gallery Application Version. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-10-01 +func (client *GalleryApplicationVersionsClient) deleteOperation(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string, options *GalleryApplicationVersionsClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, galleryName, galleryApplicationName, galleryApplicationVersionName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *GalleryApplicationVersionsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string, options *GalleryApplicationVersionsClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions/{galleryApplicationVersionName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if galleryName == "" { + return nil, errors.New("parameter galleryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryName}", url.PathEscape(galleryName)) + if galleryApplicationName == "" { + return nil, errors.New("parameter galleryApplicationName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryApplicationName}", url.PathEscape(galleryApplicationName)) + if galleryApplicationVersionName == "" { + return nil, errors.New("parameter galleryApplicationVersionName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryApplicationVersionName}", url.PathEscape(galleryApplicationVersionName)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-10-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Retrieves information about a gallery Application Version. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-10-01 +// resourceGroupName - The name of the resource group. +// galleryName - The name of the Shared Application Gallery in which the Application Definition resides. +// galleryApplicationName - The name of the gallery Application Definition in which the Application Version resides. +// galleryApplicationVersionName - The name of the gallery Application Version to be retrieved. +// options - GalleryApplicationVersionsClientGetOptions contains the optional parameters for the GalleryApplicationVersionsClient.Get +// method. +func (client *GalleryApplicationVersionsClient) Get(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string, options *GalleryApplicationVersionsClientGetOptions) (GalleryApplicationVersionsClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, galleryName, galleryApplicationName, galleryApplicationVersionName, options) + if err != nil { + return GalleryApplicationVersionsClientGetResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return GalleryApplicationVersionsClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return GalleryApplicationVersionsClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *GalleryApplicationVersionsClient) getCreateRequest(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string, options *GalleryApplicationVersionsClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions/{galleryApplicationVersionName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if galleryName == "" { + return nil, errors.New("parameter galleryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryName}", url.PathEscape(galleryName)) + if galleryApplicationName == "" { + return nil, errors.New("parameter galleryApplicationName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryApplicationName}", url.PathEscape(galleryApplicationName)) + if galleryApplicationVersionName == "" { + return nil, errors.New("parameter galleryApplicationVersionName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryApplicationVersionName}", url.PathEscape(galleryApplicationVersionName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Expand != nil { + reqQP.Set("$expand", string(*options.Expand)) + } + reqQP.Set("api-version", "2021-10-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *GalleryApplicationVersionsClient) getHandleResponse(resp *http.Response) (GalleryApplicationVersionsClientGetResponse, error) { + result := GalleryApplicationVersionsClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.GalleryApplicationVersion); err != nil { + return GalleryApplicationVersionsClientGetResponse{}, err + } + return result, nil +} + +// NewListByGalleryApplicationPager - List gallery Application Versions in a gallery Application Definition. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-10-01 +// resourceGroupName - The name of the resource group. +// galleryName - The name of the Shared Application Gallery in which the Application Definition resides. +// galleryApplicationName - The name of the Shared Application Gallery Application Definition from which the Application Versions +// are to be listed. +// options - GalleryApplicationVersionsClientListByGalleryApplicationOptions contains the optional parameters for the GalleryApplicationVersionsClient.ListByGalleryApplication +// method. +func (client *GalleryApplicationVersionsClient) NewListByGalleryApplicationPager(resourceGroupName string, galleryName string, galleryApplicationName string, options *GalleryApplicationVersionsClientListByGalleryApplicationOptions) *runtime.Pager[GalleryApplicationVersionsClientListByGalleryApplicationResponse] { + return runtime.NewPager(runtime.PagingHandler[GalleryApplicationVersionsClientListByGalleryApplicationResponse]{ + More: func(page GalleryApplicationVersionsClientListByGalleryApplicationResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *GalleryApplicationVersionsClientListByGalleryApplicationResponse) (GalleryApplicationVersionsClientListByGalleryApplicationResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listByGalleryApplicationCreateRequest(ctx, resourceGroupName, galleryName, galleryApplicationName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return GalleryApplicationVersionsClientListByGalleryApplicationResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return GalleryApplicationVersionsClientListByGalleryApplicationResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return GalleryApplicationVersionsClientListByGalleryApplicationResponse{}, runtime.NewResponseError(resp) + } + return client.listByGalleryApplicationHandleResponse(resp) + }, + }) +} + +// listByGalleryApplicationCreateRequest creates the ListByGalleryApplication request. +func (client *GalleryApplicationVersionsClient) listByGalleryApplicationCreateRequest(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, options *GalleryApplicationVersionsClientListByGalleryApplicationOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if galleryName == "" { + return nil, errors.New("parameter galleryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryName}", url.PathEscape(galleryName)) + if galleryApplicationName == "" { + return nil, errors.New("parameter galleryApplicationName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryApplicationName}", url.PathEscape(galleryApplicationName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-10-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listByGalleryApplicationHandleResponse handles the ListByGalleryApplication response. +func (client *GalleryApplicationVersionsClient) listByGalleryApplicationHandleResponse(resp *http.Response) (GalleryApplicationVersionsClientListByGalleryApplicationResponse, error) { + result := GalleryApplicationVersionsClientListByGalleryApplicationResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.GalleryApplicationVersionList); err != nil { + return GalleryApplicationVersionsClientListByGalleryApplicationResponse{}, err + } + return result, nil +} + +// BeginUpdate - Update a gallery Application Version. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-10-01 +// resourceGroupName - The name of the resource group. +// galleryName - The name of the Shared Application Gallery in which the Application Definition resides. +// galleryApplicationName - The name of the gallery Application Definition in which the Application Version is to be updated. +// galleryApplicationVersionName - The name of the gallery Application Version to be updated. Needs to follow semantic version +// name pattern: The allowed characters are digit and period. Digits must be within the range of a 32-bit +// integer. Format: .. +// galleryApplicationVersion - Parameters supplied to the update gallery Application Version operation. +// options - GalleryApplicationVersionsClientBeginUpdateOptions contains the optional parameters for the GalleryApplicationVersionsClient.BeginUpdate +// method. +func (client *GalleryApplicationVersionsClient) BeginUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string, galleryApplicationVersion GalleryApplicationVersionUpdate, options *GalleryApplicationVersionsClientBeginUpdateOptions) (*runtime.Poller[GalleryApplicationVersionsClientUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.update(ctx, resourceGroupName, galleryName, galleryApplicationName, galleryApplicationVersionName, galleryApplicationVersion, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[GalleryApplicationVersionsClientUpdateResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[GalleryApplicationVersionsClientUpdateResponse](options.ResumeToken, client.pl, nil) + } +} + +// Update - Update a gallery Application Version. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-10-01 +func (client *GalleryApplicationVersionsClient) update(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string, galleryApplicationVersion GalleryApplicationVersionUpdate, options *GalleryApplicationVersionsClientBeginUpdateOptions) (*http.Response, error) { + req, err := client.updateCreateRequest(ctx, resourceGroupName, galleryName, galleryApplicationName, galleryApplicationVersionName, galleryApplicationVersion, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// updateCreateRequest creates the Update request. +func (client *GalleryApplicationVersionsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, galleryName string, galleryApplicationName string, galleryApplicationVersionName string, galleryApplicationVersion GalleryApplicationVersionUpdate, options *GalleryApplicationVersionsClientBeginUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions/{galleryApplicationVersionName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if galleryName == "" { + return nil, errors.New("parameter galleryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryName}", url.PathEscape(galleryName)) + if galleryApplicationName == "" { + return nil, errors.New("parameter galleryApplicationName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryApplicationName}", url.PathEscape(galleryApplicationName)) + if galleryApplicationVersionName == "" { + return nil, errors.New("parameter galleryApplicationVersionName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryApplicationVersionName}", url.PathEscape(galleryApplicationVersionName)) + req, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-10-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, galleryApplicationVersion) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_galleryimages_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_galleryimages_client.go new file mode 100644 index 000000000..8427d5f4b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_galleryimages_client.go @@ -0,0 +1,396 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// GalleryImagesClient contains the methods for the GalleryImages group. +// Don't use this type directly, use NewGalleryImagesClient() instead. +type GalleryImagesClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewGalleryImagesClient creates a new instance of GalleryImagesClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewGalleryImagesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*GalleryImagesClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &GalleryImagesClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// BeginCreateOrUpdate - Create or update a gallery image definition. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-10-01 +// resourceGroupName - The name of the resource group. +// galleryName - The name of the Shared Image Gallery in which the Image Definition is to be created. +// galleryImageName - The name of the gallery image definition to be created or updated. The allowed characters are alphabets +// and numbers with dots, dashes, and periods allowed in the middle. The maximum length is 80 +// characters. +// galleryImage - Parameters supplied to the create or update gallery image operation. +// options - GalleryImagesClientBeginCreateOrUpdateOptions contains the optional parameters for the GalleryImagesClient.BeginCreateOrUpdate +// method. +func (client *GalleryImagesClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImage GalleryImage, options *GalleryImagesClientBeginCreateOrUpdateOptions) (*runtime.Poller[GalleryImagesClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, resourceGroupName, galleryName, galleryImageName, galleryImage, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[GalleryImagesClientCreateOrUpdateResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[GalleryImagesClientCreateOrUpdateResponse](options.ResumeToken, client.pl, nil) + } +} + +// CreateOrUpdate - Create or update a gallery image definition. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-10-01 +func (client *GalleryImagesClient) createOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImage GalleryImage, options *GalleryImagesClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, galleryName, galleryImageName, galleryImage, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *GalleryImagesClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImage GalleryImage, options *GalleryImagesClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if galleryName == "" { + return nil, errors.New("parameter galleryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryName}", url.PathEscape(galleryName)) + if galleryImageName == "" { + return nil, errors.New("parameter galleryImageName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryImageName}", url.PathEscape(galleryImageName)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-10-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, galleryImage) +} + +// BeginDelete - Delete a gallery image. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-10-01 +// resourceGroupName - The name of the resource group. +// galleryName - The name of the Shared Image Gallery in which the Image Definition is to be deleted. +// galleryImageName - The name of the gallery image definition to be deleted. +// options - GalleryImagesClientBeginDeleteOptions contains the optional parameters for the GalleryImagesClient.BeginDelete +// method. +func (client *GalleryImagesClient) BeginDelete(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, options *GalleryImagesClientBeginDeleteOptions) (*runtime.Poller[GalleryImagesClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, galleryName, galleryImageName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[GalleryImagesClientDeleteResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[GalleryImagesClientDeleteResponse](options.ResumeToken, client.pl, nil) + } +} + +// Delete - Delete a gallery image. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-10-01 +func (client *GalleryImagesClient) deleteOperation(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, options *GalleryImagesClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, galleryName, galleryImageName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *GalleryImagesClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, options *GalleryImagesClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if galleryName == "" { + return nil, errors.New("parameter galleryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryName}", url.PathEscape(galleryName)) + if galleryImageName == "" { + return nil, errors.New("parameter galleryImageName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryImageName}", url.PathEscape(galleryImageName)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-10-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Retrieves information about a gallery image definition. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-10-01 +// resourceGroupName - The name of the resource group. +// galleryName - The name of the Shared Image Gallery from which the Image Definitions are to be retrieved. +// galleryImageName - The name of the gallery image definition to be retrieved. +// options - GalleryImagesClientGetOptions contains the optional parameters for the GalleryImagesClient.Get method. +func (client *GalleryImagesClient) Get(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, options *GalleryImagesClientGetOptions) (GalleryImagesClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, galleryName, galleryImageName, options) + if err != nil { + return GalleryImagesClientGetResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return GalleryImagesClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return GalleryImagesClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *GalleryImagesClient) getCreateRequest(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, options *GalleryImagesClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if galleryName == "" { + return nil, errors.New("parameter galleryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryName}", url.PathEscape(galleryName)) + if galleryImageName == "" { + return nil, errors.New("parameter galleryImageName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryImageName}", url.PathEscape(galleryImageName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-10-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *GalleryImagesClient) getHandleResponse(resp *http.Response) (GalleryImagesClientGetResponse, error) { + result := GalleryImagesClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.GalleryImage); err != nil { + return GalleryImagesClientGetResponse{}, err + } + return result, nil +} + +// NewListByGalleryPager - List gallery image definitions in a gallery. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-10-01 +// resourceGroupName - The name of the resource group. +// galleryName - The name of the Shared Image Gallery from which Image Definitions are to be listed. +// options - GalleryImagesClientListByGalleryOptions contains the optional parameters for the GalleryImagesClient.ListByGallery +// method. +func (client *GalleryImagesClient) NewListByGalleryPager(resourceGroupName string, galleryName string, options *GalleryImagesClientListByGalleryOptions) *runtime.Pager[GalleryImagesClientListByGalleryResponse] { + return runtime.NewPager(runtime.PagingHandler[GalleryImagesClientListByGalleryResponse]{ + More: func(page GalleryImagesClientListByGalleryResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *GalleryImagesClientListByGalleryResponse) (GalleryImagesClientListByGalleryResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listByGalleryCreateRequest(ctx, resourceGroupName, galleryName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return GalleryImagesClientListByGalleryResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return GalleryImagesClientListByGalleryResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return GalleryImagesClientListByGalleryResponse{}, runtime.NewResponseError(resp) + } + return client.listByGalleryHandleResponse(resp) + }, + }) +} + +// listByGalleryCreateRequest creates the ListByGallery request. +func (client *GalleryImagesClient) listByGalleryCreateRequest(ctx context.Context, resourceGroupName string, galleryName string, options *GalleryImagesClientListByGalleryOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if galleryName == "" { + return nil, errors.New("parameter galleryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryName}", url.PathEscape(galleryName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-10-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listByGalleryHandleResponse handles the ListByGallery response. +func (client *GalleryImagesClient) listByGalleryHandleResponse(resp *http.Response) (GalleryImagesClientListByGalleryResponse, error) { + result := GalleryImagesClientListByGalleryResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.GalleryImageList); err != nil { + return GalleryImagesClientListByGalleryResponse{}, err + } + return result, nil +} + +// BeginUpdate - Update a gallery image definition. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-10-01 +// resourceGroupName - The name of the resource group. +// galleryName - The name of the Shared Image Gallery in which the Image Definition is to be updated. +// galleryImageName - The name of the gallery image definition to be updated. The allowed characters are alphabets and numbers +// with dots, dashes, and periods allowed in the middle. The maximum length is 80 characters. +// galleryImage - Parameters supplied to the update gallery image operation. +// options - GalleryImagesClientBeginUpdateOptions contains the optional parameters for the GalleryImagesClient.BeginUpdate +// method. +func (client *GalleryImagesClient) BeginUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImage GalleryImageUpdate, options *GalleryImagesClientBeginUpdateOptions) (*runtime.Poller[GalleryImagesClientUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.update(ctx, resourceGroupName, galleryName, galleryImageName, galleryImage, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[GalleryImagesClientUpdateResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[GalleryImagesClientUpdateResponse](options.ResumeToken, client.pl, nil) + } +} + +// Update - Update a gallery image definition. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-10-01 +func (client *GalleryImagesClient) update(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImage GalleryImageUpdate, options *GalleryImagesClientBeginUpdateOptions) (*http.Response, error) { + req, err := client.updateCreateRequest(ctx, resourceGroupName, galleryName, galleryImageName, galleryImage, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// updateCreateRequest creates the Update request. +func (client *GalleryImagesClient) updateCreateRequest(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImage GalleryImageUpdate, options *GalleryImagesClientBeginUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if galleryName == "" { + return nil, errors.New("parameter galleryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryName}", url.PathEscape(galleryName)) + if galleryImageName == "" { + return nil, errors.New("parameter galleryImageName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryImageName}", url.PathEscape(galleryImageName)) + req, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-10-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, galleryImage) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_galleryimageversions_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_galleryimageversions_client.go new file mode 100644 index 000000000..df24c4d2c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_galleryimageversions_client.go @@ -0,0 +1,426 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// GalleryImageVersionsClient contains the methods for the GalleryImageVersions group. +// Don't use this type directly, use NewGalleryImageVersionsClient() instead. +type GalleryImageVersionsClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewGalleryImageVersionsClient creates a new instance of GalleryImageVersionsClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewGalleryImageVersionsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*GalleryImageVersionsClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &GalleryImageVersionsClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// BeginCreateOrUpdate - Create or update a gallery image version. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-10-01 +// resourceGroupName - The name of the resource group. +// galleryName - The name of the Shared Image Gallery in which the Image Definition resides. +// galleryImageName - The name of the gallery image definition in which the Image Version is to be created. +// galleryImageVersionName - The name of the gallery image version to be created. Needs to follow semantic version name pattern: +// The allowed characters are digit and period. Digits must be within the range of a 32-bit integer. +// Format: .. +// galleryImageVersion - Parameters supplied to the create or update gallery image version operation. +// options - GalleryImageVersionsClientBeginCreateOrUpdateOptions contains the optional parameters for the GalleryImageVersionsClient.BeginCreateOrUpdate +// method. +func (client *GalleryImageVersionsClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, galleryImageVersion GalleryImageVersion, options *GalleryImageVersionsClientBeginCreateOrUpdateOptions) (*runtime.Poller[GalleryImageVersionsClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, resourceGroupName, galleryName, galleryImageName, galleryImageVersionName, galleryImageVersion, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[GalleryImageVersionsClientCreateOrUpdateResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[GalleryImageVersionsClientCreateOrUpdateResponse](options.ResumeToken, client.pl, nil) + } +} + +// CreateOrUpdate - Create or update a gallery image version. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-10-01 +func (client *GalleryImageVersionsClient) createOrUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, galleryImageVersion GalleryImageVersion, options *GalleryImageVersionsClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, galleryName, galleryImageName, galleryImageVersionName, galleryImageVersion, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *GalleryImageVersionsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, galleryImageVersion GalleryImageVersion, options *GalleryImageVersionsClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}/versions/{galleryImageVersionName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if galleryName == "" { + return nil, errors.New("parameter galleryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryName}", url.PathEscape(galleryName)) + if galleryImageName == "" { + return nil, errors.New("parameter galleryImageName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryImageName}", url.PathEscape(galleryImageName)) + if galleryImageVersionName == "" { + return nil, errors.New("parameter galleryImageVersionName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryImageVersionName}", url.PathEscape(galleryImageVersionName)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-10-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, galleryImageVersion) +} + +// BeginDelete - Delete a gallery image version. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-10-01 +// resourceGroupName - The name of the resource group. +// galleryName - The name of the Shared Image Gallery in which the Image Definition resides. +// galleryImageName - The name of the gallery image definition in which the Image Version resides. +// galleryImageVersionName - The name of the gallery image version to be deleted. +// options - GalleryImageVersionsClientBeginDeleteOptions contains the optional parameters for the GalleryImageVersionsClient.BeginDelete +// method. +func (client *GalleryImageVersionsClient) BeginDelete(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, options *GalleryImageVersionsClientBeginDeleteOptions) (*runtime.Poller[GalleryImageVersionsClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, galleryName, galleryImageName, galleryImageVersionName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[GalleryImageVersionsClientDeleteResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[GalleryImageVersionsClientDeleteResponse](options.ResumeToken, client.pl, nil) + } +} + +// Delete - Delete a gallery image version. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-10-01 +func (client *GalleryImageVersionsClient) deleteOperation(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, options *GalleryImageVersionsClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, galleryName, galleryImageName, galleryImageVersionName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *GalleryImageVersionsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, options *GalleryImageVersionsClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}/versions/{galleryImageVersionName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if galleryName == "" { + return nil, errors.New("parameter galleryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryName}", url.PathEscape(galleryName)) + if galleryImageName == "" { + return nil, errors.New("parameter galleryImageName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryImageName}", url.PathEscape(galleryImageName)) + if galleryImageVersionName == "" { + return nil, errors.New("parameter galleryImageVersionName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryImageVersionName}", url.PathEscape(galleryImageVersionName)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-10-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Retrieves information about a gallery image version. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-10-01 +// resourceGroupName - The name of the resource group. +// galleryName - The name of the Shared Image Gallery in which the Image Definition resides. +// galleryImageName - The name of the gallery image definition in which the Image Version resides. +// galleryImageVersionName - The name of the gallery image version to be retrieved. +// options - GalleryImageVersionsClientGetOptions contains the optional parameters for the GalleryImageVersionsClient.Get +// method. +func (client *GalleryImageVersionsClient) Get(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, options *GalleryImageVersionsClientGetOptions) (GalleryImageVersionsClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, galleryName, galleryImageName, galleryImageVersionName, options) + if err != nil { + return GalleryImageVersionsClientGetResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return GalleryImageVersionsClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return GalleryImageVersionsClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *GalleryImageVersionsClient) getCreateRequest(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, options *GalleryImageVersionsClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}/versions/{galleryImageVersionName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if galleryName == "" { + return nil, errors.New("parameter galleryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryName}", url.PathEscape(galleryName)) + if galleryImageName == "" { + return nil, errors.New("parameter galleryImageName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryImageName}", url.PathEscape(galleryImageName)) + if galleryImageVersionName == "" { + return nil, errors.New("parameter galleryImageVersionName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryImageVersionName}", url.PathEscape(galleryImageVersionName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Expand != nil { + reqQP.Set("$expand", string(*options.Expand)) + } + reqQP.Set("api-version", "2021-10-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *GalleryImageVersionsClient) getHandleResponse(resp *http.Response) (GalleryImageVersionsClientGetResponse, error) { + result := GalleryImageVersionsClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.GalleryImageVersion); err != nil { + return GalleryImageVersionsClientGetResponse{}, err + } + return result, nil +} + +// NewListByGalleryImagePager - List gallery image versions in a gallery image definition. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-10-01 +// resourceGroupName - The name of the resource group. +// galleryName - The name of the Shared Image Gallery in which the Image Definition resides. +// galleryImageName - The name of the Shared Image Gallery Image Definition from which the Image Versions are to be listed. +// options - GalleryImageVersionsClientListByGalleryImageOptions contains the optional parameters for the GalleryImageVersionsClient.ListByGalleryImage +// method. +func (client *GalleryImageVersionsClient) NewListByGalleryImagePager(resourceGroupName string, galleryName string, galleryImageName string, options *GalleryImageVersionsClientListByGalleryImageOptions) *runtime.Pager[GalleryImageVersionsClientListByGalleryImageResponse] { + return runtime.NewPager(runtime.PagingHandler[GalleryImageVersionsClientListByGalleryImageResponse]{ + More: func(page GalleryImageVersionsClientListByGalleryImageResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *GalleryImageVersionsClientListByGalleryImageResponse) (GalleryImageVersionsClientListByGalleryImageResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listByGalleryImageCreateRequest(ctx, resourceGroupName, galleryName, galleryImageName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return GalleryImageVersionsClientListByGalleryImageResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return GalleryImageVersionsClientListByGalleryImageResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return GalleryImageVersionsClientListByGalleryImageResponse{}, runtime.NewResponseError(resp) + } + return client.listByGalleryImageHandleResponse(resp) + }, + }) +} + +// listByGalleryImageCreateRequest creates the ListByGalleryImage request. +func (client *GalleryImageVersionsClient) listByGalleryImageCreateRequest(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, options *GalleryImageVersionsClientListByGalleryImageOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}/versions" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if galleryName == "" { + return nil, errors.New("parameter galleryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryName}", url.PathEscape(galleryName)) + if galleryImageName == "" { + return nil, errors.New("parameter galleryImageName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryImageName}", url.PathEscape(galleryImageName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-10-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listByGalleryImageHandleResponse handles the ListByGalleryImage response. +func (client *GalleryImageVersionsClient) listByGalleryImageHandleResponse(resp *http.Response) (GalleryImageVersionsClientListByGalleryImageResponse, error) { + result := GalleryImageVersionsClientListByGalleryImageResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.GalleryImageVersionList); err != nil { + return GalleryImageVersionsClientListByGalleryImageResponse{}, err + } + return result, nil +} + +// BeginUpdate - Update a gallery image version. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-10-01 +// resourceGroupName - The name of the resource group. +// galleryName - The name of the Shared Image Gallery in which the Image Definition resides. +// galleryImageName - The name of the gallery image definition in which the Image Version is to be updated. +// galleryImageVersionName - The name of the gallery image version to be updated. Needs to follow semantic version name pattern: +// The allowed characters are digit and period. Digits must be within the range of a 32-bit integer. +// Format: .. +// galleryImageVersion - Parameters supplied to the update gallery image version operation. +// options - GalleryImageVersionsClientBeginUpdateOptions contains the optional parameters for the GalleryImageVersionsClient.BeginUpdate +// method. +func (client *GalleryImageVersionsClient) BeginUpdate(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, galleryImageVersion GalleryImageVersionUpdate, options *GalleryImageVersionsClientBeginUpdateOptions) (*runtime.Poller[GalleryImageVersionsClientUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.update(ctx, resourceGroupName, galleryName, galleryImageName, galleryImageVersionName, galleryImageVersion, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[GalleryImageVersionsClientUpdateResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[GalleryImageVersionsClientUpdateResponse](options.ResumeToken, client.pl, nil) + } +} + +// Update - Update a gallery image version. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-10-01 +func (client *GalleryImageVersionsClient) update(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, galleryImageVersion GalleryImageVersionUpdate, options *GalleryImageVersionsClientBeginUpdateOptions) (*http.Response, error) { + req, err := client.updateCreateRequest(ctx, resourceGroupName, galleryName, galleryImageName, galleryImageVersionName, galleryImageVersion, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// updateCreateRequest creates the Update request. +func (client *GalleryImageVersionsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, galleryName string, galleryImageName string, galleryImageVersionName string, galleryImageVersion GalleryImageVersionUpdate, options *GalleryImageVersionsClientBeginUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}/versions/{galleryImageVersionName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if galleryName == "" { + return nil, errors.New("parameter galleryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryName}", url.PathEscape(galleryName)) + if galleryImageName == "" { + return nil, errors.New("parameter galleryImageName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryImageName}", url.PathEscape(galleryImageName)) + if galleryImageVersionName == "" { + return nil, errors.New("parameter galleryImageVersionName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryImageVersionName}", url.PathEscape(galleryImageVersionName)) + req, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-10-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, galleryImageVersion) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_gallerysharingprofile_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_gallerysharingprofile_client.go new file mode 100644 index 000000000..3ddc9614b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_gallerysharingprofile_client.go @@ -0,0 +1,120 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// GallerySharingProfileClient contains the methods for the GallerySharingProfile group. +// Don't use this type directly, use NewGallerySharingProfileClient() instead. +type GallerySharingProfileClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewGallerySharingProfileClient creates a new instance of GallerySharingProfileClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewGallerySharingProfileClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*GallerySharingProfileClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &GallerySharingProfileClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// BeginUpdate - Update sharing profile of a gallery. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-10-01 +// resourceGroupName - The name of the resource group. +// galleryName - The name of the Shared Image Gallery. +// sharingUpdate - Parameters supplied to the update gallery sharing profile. +// options - GallerySharingProfileClientBeginUpdateOptions contains the optional parameters for the GallerySharingProfileClient.BeginUpdate +// method. +func (client *GallerySharingProfileClient) BeginUpdate(ctx context.Context, resourceGroupName string, galleryName string, sharingUpdate SharingUpdate, options *GallerySharingProfileClientBeginUpdateOptions) (*runtime.Poller[GallerySharingProfileClientUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.update(ctx, resourceGroupName, galleryName, sharingUpdate, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[GallerySharingProfileClientUpdateResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[GallerySharingProfileClientUpdateResponse](options.ResumeToken, client.pl, nil) + } +} + +// Update - Update sharing profile of a gallery. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-10-01 +func (client *GallerySharingProfileClient) update(ctx context.Context, resourceGroupName string, galleryName string, sharingUpdate SharingUpdate, options *GallerySharingProfileClientBeginUpdateOptions) (*http.Response, error) { + req, err := client.updateCreateRequest(ctx, resourceGroupName, galleryName, sharingUpdate, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// updateCreateRequest creates the Update request. +func (client *GallerySharingProfileClient) updateCreateRequest(ctx context.Context, resourceGroupName string, galleryName string, sharingUpdate SharingUpdate, options *GallerySharingProfileClientBeginUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/share" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if galleryName == "" { + return nil, errors.New("parameter galleryName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryName}", url.PathEscape(galleryName)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-10-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, sharingUpdate) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_images_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_images_client.go new file mode 100644 index 000000000..1d09de1ab --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_images_client.go @@ -0,0 +1,429 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// ImagesClient contains the methods for the Images group. +// Don't use this type directly, use NewImagesClient() instead. +type ImagesClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewImagesClient creates a new instance of ImagesClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewImagesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ImagesClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &ImagesClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// BeginCreateOrUpdate - Create or update an image. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// imageName - The name of the image. +// parameters - Parameters supplied to the Create Image operation. +// options - ImagesClientBeginCreateOrUpdateOptions contains the optional parameters for the ImagesClient.BeginCreateOrUpdate +// method. +func (client *ImagesClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, imageName string, parameters Image, options *ImagesClientBeginCreateOrUpdateOptions) (*runtime.Poller[ImagesClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, resourceGroupName, imageName, parameters, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[ImagesClientCreateOrUpdateResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[ImagesClientCreateOrUpdateResponse](options.ResumeToken, client.pl, nil) + } +} + +// CreateOrUpdate - Create or update an image. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *ImagesClient) createOrUpdate(ctx context.Context, resourceGroupName string, imageName string, parameters Image, options *ImagesClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, imageName, parameters, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *ImagesClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, imageName string, parameters Image, options *ImagesClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if imageName == "" { + return nil, errors.New("parameter imageName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{imageName}", url.PathEscape(imageName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// BeginDelete - Deletes an Image. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// imageName - The name of the image. +// options - ImagesClientBeginDeleteOptions contains the optional parameters for the ImagesClient.BeginDelete method. +func (client *ImagesClient) BeginDelete(ctx context.Context, resourceGroupName string, imageName string, options *ImagesClientBeginDeleteOptions) (*runtime.Poller[ImagesClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, imageName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[ImagesClientDeleteResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[ImagesClientDeleteResponse](options.ResumeToken, client.pl, nil) + } +} + +// Delete - Deletes an Image. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *ImagesClient) deleteOperation(ctx context.Context, resourceGroupName string, imageName string, options *ImagesClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, imageName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *ImagesClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, imageName string, options *ImagesClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if imageName == "" { + return nil, errors.New("parameter imageName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{imageName}", url.PathEscape(imageName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Gets an image. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// imageName - The name of the image. +// options - ImagesClientGetOptions contains the optional parameters for the ImagesClient.Get method. +func (client *ImagesClient) Get(ctx context.Context, resourceGroupName string, imageName string, options *ImagesClientGetOptions) (ImagesClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, imageName, options) + if err != nil { + return ImagesClientGetResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ImagesClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ImagesClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *ImagesClient) getCreateRequest(ctx context.Context, resourceGroupName string, imageName string, options *ImagesClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if imageName == "" { + return nil, errors.New("parameter imageName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{imageName}", url.PathEscape(imageName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Expand != nil { + reqQP.Set("$expand", *options.Expand) + } + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *ImagesClient) getHandleResponse(resp *http.Response) (ImagesClientGetResponse, error) { + result := ImagesClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.Image); err != nil { + return ImagesClientGetResponse{}, err + } + return result, nil +} + +// NewListPager - Gets the list of Images in the subscription. Use nextLink property in the response to get the next page +// of Images. Do this till nextLink is null to fetch all the Images. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// options - ImagesClientListOptions contains the optional parameters for the ImagesClient.List method. +func (client *ImagesClient) NewListPager(options *ImagesClientListOptions) *runtime.Pager[ImagesClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[ImagesClientListResponse]{ + More: func(page ImagesClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *ImagesClientListResponse) (ImagesClientListResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listCreateRequest(ctx, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return ImagesClientListResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ImagesClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ImagesClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *ImagesClient) listCreateRequest(ctx context.Context, options *ImagesClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/images" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *ImagesClient) listHandleResponse(resp *http.Response) (ImagesClientListResponse, error) { + result := ImagesClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.ImageListResult); err != nil { + return ImagesClientListResponse{}, err + } + return result, nil +} + +// NewListByResourceGroupPager - Gets the list of images under a resource group. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// options - ImagesClientListByResourceGroupOptions contains the optional parameters for the ImagesClient.ListByResourceGroup +// method. +func (client *ImagesClient) NewListByResourceGroupPager(resourceGroupName string, options *ImagesClientListByResourceGroupOptions) *runtime.Pager[ImagesClientListByResourceGroupResponse] { + return runtime.NewPager(runtime.PagingHandler[ImagesClientListByResourceGroupResponse]{ + More: func(page ImagesClientListByResourceGroupResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *ImagesClientListByResourceGroupResponse) (ImagesClientListByResourceGroupResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return ImagesClientListByResourceGroupResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ImagesClientListByResourceGroupResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ImagesClientListByResourceGroupResponse{}, runtime.NewResponseError(resp) + } + return client.listByResourceGroupHandleResponse(resp) + }, + }) +} + +// listByResourceGroupCreateRequest creates the ListByResourceGroup request. +func (client *ImagesClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *ImagesClientListByResourceGroupOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listByResourceGroupHandleResponse handles the ListByResourceGroup response. +func (client *ImagesClient) listByResourceGroupHandleResponse(resp *http.Response) (ImagesClientListByResourceGroupResponse, error) { + result := ImagesClientListByResourceGroupResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.ImageListResult); err != nil { + return ImagesClientListByResourceGroupResponse{}, err + } + return result, nil +} + +// BeginUpdate - Update an image. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// imageName - The name of the image. +// parameters - Parameters supplied to the Update Image operation. +// options - ImagesClientBeginUpdateOptions contains the optional parameters for the ImagesClient.BeginUpdate method. +func (client *ImagesClient) BeginUpdate(ctx context.Context, resourceGroupName string, imageName string, parameters ImageUpdate, options *ImagesClientBeginUpdateOptions) (*runtime.Poller[ImagesClientUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.update(ctx, resourceGroupName, imageName, parameters, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[ImagesClientUpdateResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[ImagesClientUpdateResponse](options.ResumeToken, client.pl, nil) + } +} + +// Update - Update an image. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *ImagesClient) update(ctx context.Context, resourceGroupName string, imageName string, parameters ImageUpdate, options *ImagesClientBeginUpdateOptions) (*http.Response, error) { + req, err := client.updateCreateRequest(ctx, resourceGroupName, imageName, parameters, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// updateCreateRequest creates the Update request. +func (client *ImagesClient) updateCreateRequest(ctx context.Context, resourceGroupName string, imageName string, parameters ImageUpdate, options *ImagesClientBeginUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if imageName == "" { + return nil, errors.New("parameter imageName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{imageName}", url.PathEscape(imageName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_loganalytics_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_loganalytics_client.go new file mode 100644 index 000000000..b0dd5efe3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_loganalytics_client.go @@ -0,0 +1,181 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// LogAnalyticsClient contains the methods for the LogAnalytics group. +// Don't use this type directly, use NewLogAnalyticsClient() instead. +type LogAnalyticsClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewLogAnalyticsClient creates a new instance of LogAnalyticsClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewLogAnalyticsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*LogAnalyticsClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &LogAnalyticsClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// BeginExportRequestRateByInterval - Export logs that show Api requests made by this subscription in the given time window +// to show throttling activities. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// location - The location upon which virtual-machine-sizes is queried. +// parameters - Parameters supplied to the LogAnalytics getRequestRateByInterval Api. +// options - LogAnalyticsClientBeginExportRequestRateByIntervalOptions contains the optional parameters for the LogAnalyticsClient.BeginExportRequestRateByInterval +// method. +func (client *LogAnalyticsClient) BeginExportRequestRateByInterval(ctx context.Context, location string, parameters RequestRateByIntervalInput, options *LogAnalyticsClientBeginExportRequestRateByIntervalOptions) (*runtime.Poller[LogAnalyticsClientExportRequestRateByIntervalResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.exportRequestRateByInterval(ctx, location, parameters, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.pl, &runtime.NewPollerOptions[LogAnalyticsClientExportRequestRateByIntervalResponse]{ + FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + }) + } else { + return runtime.NewPollerFromResumeToken[LogAnalyticsClientExportRequestRateByIntervalResponse](options.ResumeToken, client.pl, nil) + } +} + +// ExportRequestRateByInterval - Export logs that show Api requests made by this subscription in the given time window to +// show throttling activities. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *LogAnalyticsClient) exportRequestRateByInterval(ctx context.Context, location string, parameters RequestRateByIntervalInput, options *LogAnalyticsClientBeginExportRequestRateByIntervalOptions) (*http.Response, error) { + req, err := client.exportRequestRateByIntervalCreateRequest(ctx, location, parameters, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// exportRequestRateByIntervalCreateRequest creates the ExportRequestRateByInterval request. +func (client *LogAnalyticsClient) exportRequestRateByIntervalCreateRequest(ctx context.Context, location string, parameters RequestRateByIntervalInput, options *LogAnalyticsClientBeginExportRequestRateByIntervalOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/logAnalytics/apiAccess/getRequestRateByInterval" + if location == "" { + return nil, errors.New("parameter location cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{location}", url.PathEscape(location)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// BeginExportThrottledRequests - Export logs that show total throttled Api requests for this subscription in the given time +// window. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// location - The location upon which virtual-machine-sizes is queried. +// parameters - Parameters supplied to the LogAnalytics getThrottledRequests Api. +// options - LogAnalyticsClientBeginExportThrottledRequestsOptions contains the optional parameters for the LogAnalyticsClient.BeginExportThrottledRequests +// method. +func (client *LogAnalyticsClient) BeginExportThrottledRequests(ctx context.Context, location string, parameters ThrottledRequestsInput, options *LogAnalyticsClientBeginExportThrottledRequestsOptions) (*runtime.Poller[LogAnalyticsClientExportThrottledRequestsResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.exportThrottledRequests(ctx, location, parameters, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.pl, &runtime.NewPollerOptions[LogAnalyticsClientExportThrottledRequestsResponse]{ + FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + }) + } else { + return runtime.NewPollerFromResumeToken[LogAnalyticsClientExportThrottledRequestsResponse](options.ResumeToken, client.pl, nil) + } +} + +// ExportThrottledRequests - Export logs that show total throttled Api requests for this subscription in the given time window. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *LogAnalyticsClient) exportThrottledRequests(ctx context.Context, location string, parameters ThrottledRequestsInput, options *LogAnalyticsClientBeginExportThrottledRequestsOptions) (*http.Response, error) { + req, err := client.exportThrottledRequestsCreateRequest(ctx, location, parameters, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// exportThrottledRequestsCreateRequest creates the ExportThrottledRequests request. +func (client *LogAnalyticsClient) exportThrottledRequestsCreateRequest(ctx context.Context, location string, parameters ThrottledRequestsInput, options *LogAnalyticsClientBeginExportThrottledRequestsOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/logAnalytics/apiAccess/getThrottledRequests" + if location == "" { + return nil, errors.New("parameter location cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{location}", url.PathEscape(location)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_models.go new file mode 100644 index 000000000..e74c02bae --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_models.go @@ -0,0 +1,9540 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import "time" + +// APIEntityReference - The API entity reference. +type APIEntityReference struct { + // The ARM resource id in the form of /subscriptions/{SubscriptionId}/resourceGroups/{ResourceGroupName}/… + ID *string `json:"id,omitempty"` +} + +// APIError - Api error. +type APIError struct { + // The error code. + Code *string `json:"code,omitempty"` + + // The Api error details + Details []*APIErrorBase `json:"details,omitempty"` + + // The Api inner error + Innererror *InnerError `json:"innererror,omitempty"` + + // The error message. + Message *string `json:"message,omitempty"` + + // The target of the particular error. + Target *string `json:"target,omitempty"` +} + +// APIErrorBase - Api error base. +type APIErrorBase struct { + // The error code. + Code *string `json:"code,omitempty"` + + // The error message. + Message *string `json:"message,omitempty"` + + // The target of the particular error. + Target *string `json:"target,omitempty"` +} + +// AccessURI - A disk access SAS uri. +type AccessURI struct { + // READ-ONLY; A SAS uri for accessing a disk. + AccessSAS *string `json:"accessSAS,omitempty" azure:"ro"` + + // READ-ONLY; A SAS uri for accessing a VM guest state. + SecurityDataAccessSAS *string `json:"securityDataAccessSAS,omitempty" azure:"ro"` +} + +// AdditionalCapabilities - Enables or disables a capability on the virtual machine or virtual machine scale set. +type AdditionalCapabilities struct { + // The flag that enables or disables hibernation capability on the VM. + HibernationEnabled *bool `json:"hibernationEnabled,omitempty"` + + // The flag that enables or disables a capability to have one or more managed data disks with UltraSSDLRS storage account + // type on the VM or VMSS. Managed disks with storage account type UltraSSDLRS can + // be added to a virtual machine or virtual machine scale set only if this property is enabled. + UltraSSDEnabled *bool `json:"ultraSSDEnabled,omitempty"` +} + +// AdditionalUnattendContent - Specifies additional XML formatted information that can be included in the Unattend.xml file, +// which is used by Windows Setup. Contents are defined by setting name, component name, and the pass in +// which the content is applied. +type AdditionalUnattendContent struct { + // The component name. Currently, the only allowable value is Microsoft-Windows-Shell-Setup. + ComponentName *string `json:"componentName,omitempty"` + + // Specifies the XML formatted content that is added to the unattend.xml file for the specified path and component. The XML + // must be less than 4KB and must include the root element for the setting or + // feature that is being inserted. + Content *string `json:"content,omitempty"` + + // The pass name. Currently, the only allowable value is OobeSystem. + PassName *string `json:"passName,omitempty"` + + // Specifies the name of the setting to which the content applies. Possible values are: FirstLogonCommands and AutoLogon. + SettingName *SettingNames `json:"settingName,omitempty"` +} + +// ApplicationProfile - Contains the list of gallery applications that should be made available to the VM/VMSS +type ApplicationProfile struct { + // Specifies the gallery applications that should be made available to the VM/VMSS + GalleryApplications []*VMGalleryApplication `json:"galleryApplications,omitempty"` +} + +// AutomaticOSUpgradePolicy - The configuration parameters used for performing automatic OS upgrade. +type AutomaticOSUpgradePolicy struct { + // Whether OS image rollback feature should be disabled. Default value is false. + DisableAutomaticRollback *bool `json:"disableAutomaticRollback,omitempty"` + + // Indicates whether OS upgrades should automatically be applied to scale set instances in a rolling fashion when a newer + // version of the OS image becomes available. Default value is false. + // If this is set to true for Windows based scale sets, enableAutomaticUpdates + // [https://docs.microsoft.com/dotnet/api/microsoft.azure.management.compute.models.windowsconfiguration.enableautomaticupdates?view=azure-dotnet] + // is automatically set to false and cannot be set to true. + EnableAutomaticOSUpgrade *bool `json:"enableAutomaticOSUpgrade,omitempty"` + + // Indicates whether rolling upgrade policy should be used during Auto OS Upgrade. Default value is false. Auto OS Upgrade + // will fallback to the default policy if no policy is defined on the VMSS. + UseRollingUpgradePolicy *bool `json:"useRollingUpgradePolicy,omitempty"` +} + +// AutomaticOSUpgradeProperties - Describes automatic OS upgrade properties on the image. +type AutomaticOSUpgradeProperties struct { + // REQUIRED; Specifies whether automatic OS upgrade is supported on the image. + AutomaticOSUpgradeSupported *bool `json:"automaticOSUpgradeSupported,omitempty"` +} + +// AutomaticRepairsPolicy - Specifies the configuration parameters for automatic repairs on the virtual machine scale set. +type AutomaticRepairsPolicy struct { + // Specifies whether automatic repairs should be enabled on the virtual machine scale set. The default value is false. + Enabled *bool `json:"enabled,omitempty"` + + // The amount of time for which automatic repairs are suspended due to a state change on VM. The grace time starts after the + // state change has completed. This helps avoid premature or accidental repairs. + // The time duration should be specified in ISO 8601 format. The minimum allowed grace period is 10 minutes (PT10M), which + // is also the default value. The maximum allowed grace period is 90 minutes + // (PT90M). + GracePeriod *string `json:"gracePeriod,omitempty"` + + // Type of repair action (replace, restart, reimage) that will be used for repairing unhealthy virtual machines in the scale + // set. Default value is replace. + RepairAction *RepairAction `json:"repairAction,omitempty"` +} + +// AvailabilitySet - Specifies information about the availability set that the virtual machine should be assigned to. Virtual +// machines specified in the same availability set are allocated to different nodes to maximize +// availability. For more information about availability sets, see Availability sets overview [https://docs.microsoft.com/azure/virtual-machines/availability-set-overview]. +// For more information on Azure planned maintenance, see Maintenance and updates for Virtual Machines in Azure [https://docs.microsoft.com/azure/virtual-machines/maintenance-and-updates] +// Currently, a VM can only be added to availability set at creation time. An existing VM cannot be added to an availability +// set. +type AvailabilitySet struct { + // REQUIRED; Resource location + Location *string `json:"location,omitempty"` + + // The instance view of a resource. + Properties *AvailabilitySetProperties `json:"properties,omitempty"` + + // Sku of the availability set, only name is required to be set. See AvailabilitySetSkuTypes for possible set of values. Use + // 'Aligned' for virtual machines with managed disks and 'Classic' for virtual + // machines with unmanaged disks. Default value is 'Classic'. + SKU *SKU `json:"sku,omitempty"` + + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` + + // READ-ONLY; Resource Id + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; Resource name + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; Resource type + Type *string `json:"type,omitempty" azure:"ro"` +} + +// AvailabilitySetListResult - The List Availability Set operation response. +type AvailabilitySetListResult struct { + // REQUIRED; The list of availability sets + Value []*AvailabilitySet `json:"value,omitempty"` + + // The URI to fetch the next page of AvailabilitySets. Call ListNext() with this URI to fetch the next page of AvailabilitySets. + NextLink *string `json:"nextLink,omitempty"` +} + +// AvailabilitySetProperties - The instance view of a resource. +type AvailabilitySetProperties struct { + // Fault Domain count. + PlatformFaultDomainCount *int32 `json:"platformFaultDomainCount,omitempty"` + + // Update Domain count. + PlatformUpdateDomainCount *int32 `json:"platformUpdateDomainCount,omitempty"` + + // Specifies information about the proximity placement group that the availability set should be assigned to. + // Minimum api-version: 2018-04-01. + ProximityPlacementGroup *SubResource `json:"proximityPlacementGroup,omitempty"` + + // A list of references to all virtual machines in the availability set. + VirtualMachines []*SubResource `json:"virtualMachines,omitempty"` + + // READ-ONLY; The resource status information. + Statuses []*InstanceViewStatus `json:"statuses,omitempty" azure:"ro"` +} + +// AvailabilitySetUpdate - Specifies information about the availability set that the virtual machine should be assigned to. +// Only tags may be updated. +type AvailabilitySetUpdate struct { + // The instance view of a resource. + Properties *AvailabilitySetProperties `json:"properties,omitempty"` + + // Sku of the availability set + SKU *SKU `json:"sku,omitempty"` + + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` +} + +// AvailabilitySetsClientCreateOrUpdateOptions contains the optional parameters for the AvailabilitySetsClient.CreateOrUpdate +// method. +type AvailabilitySetsClientCreateOrUpdateOptions struct { + // placeholder for future optional parameters +} + +// AvailabilitySetsClientDeleteOptions contains the optional parameters for the AvailabilitySetsClient.Delete method. +type AvailabilitySetsClientDeleteOptions struct { + // placeholder for future optional parameters +} + +// AvailabilitySetsClientGetOptions contains the optional parameters for the AvailabilitySetsClient.Get method. +type AvailabilitySetsClientGetOptions struct { + // placeholder for future optional parameters +} + +// AvailabilitySetsClientListAvailableSizesOptions contains the optional parameters for the AvailabilitySetsClient.ListAvailableSizes +// method. +type AvailabilitySetsClientListAvailableSizesOptions struct { + // placeholder for future optional parameters +} + +// AvailabilitySetsClientListBySubscriptionOptions contains the optional parameters for the AvailabilitySetsClient.ListBySubscription +// method. +type AvailabilitySetsClientListBySubscriptionOptions struct { + // The expand expression to apply to the operation. Allowed values are 'instanceView'. + Expand *string +} + +// AvailabilitySetsClientListOptions contains the optional parameters for the AvailabilitySetsClient.List method. +type AvailabilitySetsClientListOptions struct { + // placeholder for future optional parameters +} + +// AvailabilitySetsClientUpdateOptions contains the optional parameters for the AvailabilitySetsClient.Update method. +type AvailabilitySetsClientUpdateOptions struct { + // placeholder for future optional parameters +} + +// AvailablePatchSummary - Describes the properties of an virtual machine instance view for available patch summary. +type AvailablePatchSummary struct { + // READ-ONLY; The activity ID of the operation that produced this result. It is used to correlate across CRP and extension + // logs. + AssessmentActivityID *string `json:"assessmentActivityId,omitempty" azure:"ro"` + + // READ-ONLY; The number of critical or security patches that have been detected as available and not yet installed. + CriticalAndSecurityPatchCount *int32 `json:"criticalAndSecurityPatchCount,omitempty" azure:"ro"` + + // READ-ONLY; The errors that were encountered during execution of the operation. The details array contains the list of them. + Error *APIError `json:"error,omitempty" azure:"ro"` + + // READ-ONLY; The UTC timestamp when the operation began. + LastModifiedTime *time.Time `json:"lastModifiedTime,omitempty" azure:"ro"` + + // READ-ONLY; The number of all available patches excluding critical and security. + OtherPatchCount *int32 `json:"otherPatchCount,omitempty" azure:"ro"` + + // READ-ONLY; The overall reboot status of the VM. It will be true when partially installed patches require a reboot to complete + // installation but the reboot has not yet occurred. + RebootPending *bool `json:"rebootPending,omitempty" azure:"ro"` + + // READ-ONLY; The UTC timestamp when the operation began. + StartTime *time.Time `json:"startTime,omitempty" azure:"ro"` + + // READ-ONLY; The overall success or failure status of the operation. It remains "InProgress" until the operation completes. + // At that point it will become "Unknown", "Failed", "Succeeded", or + // "CompletedWithWarnings." + Status *PatchOperationStatus `json:"status,omitempty" azure:"ro"` +} + +// BillingProfile - Specifies the billing related details of a Azure Spot VM or VMSS. +// Minimum api-version: 2019-03-01. +type BillingProfile struct { + // Specifies the maximum price you are willing to pay for a Azure Spot VM/VMSS. This price is in US Dollars. + // This price will be compared with the current Azure Spot price for the VM size. Also, the prices are compared at the time + // of create/update of Azure Spot VM/VMSS and the operation will only succeed if + // the maxPrice is greater than the current Azure Spot price. + // The maxPrice will also be used for evicting a Azure Spot VM/VMSS if the current Azure Spot price goes beyond the maxPrice + // after creation of VM/VMSS. + // Possible values are: + // - Any decimal value greater than zero. Example: 0.01538 + // -1 – indicates default price to be up-to on-demand. + // You can set the maxPrice to -1 to indicate that the Azure Spot VM/VMSS should not be evicted for price reasons. Also, the + // default max price is -1 if it is not provided by you. + // Minimum api-version: 2019-03-01. + MaxPrice *float64 `json:"maxPrice,omitempty"` +} + +// BootDiagnostics - Boot Diagnostics is a debugging feature which allows you to view Console Output and Screenshot to diagnose +// VM status. +// You can easily view the output of your console log. +// Azure also enables you to see a screenshot of the VM from the hypervisor. +type BootDiagnostics struct { + // Whether boot diagnostics should be enabled on the Virtual Machine. + Enabled *bool `json:"enabled,omitempty"` + + // Uri of the storage account to use for placing the console output and screenshot. + // If storageUri is not specified while enabling boot diagnostics, managed storage will be used. + StorageURI *string `json:"storageUri,omitempty"` +} + +// BootDiagnosticsInstanceView - The instance view of a virtual machine boot diagnostics. +type BootDiagnosticsInstanceView struct { + // READ-ONLY; The console screenshot blob URI. + // NOTE: This will not be set if boot diagnostics is currently enabled with managed storage. + ConsoleScreenshotBlobURI *string `json:"consoleScreenshotBlobUri,omitempty" azure:"ro"` + + // READ-ONLY; The serial console log blob Uri. + // NOTE: This will not be set if boot diagnostics is currently enabled with managed storage. + SerialConsoleLogBlobURI *string `json:"serialConsoleLogBlobUri,omitempty" azure:"ro"` + + // READ-ONLY; The boot diagnostics status information for the VM. + // NOTE: It will be set only if there are errors encountered in enabling boot diagnostics. + Status *InstanceViewStatus `json:"status,omitempty" azure:"ro"` +} + +// CapacityReservation - Specifies information about the capacity reservation. +type CapacityReservation struct { + // REQUIRED; Resource location + Location *string `json:"location,omitempty"` + + // REQUIRED; SKU of the resource for which capacity needs be reserved. The SKU name and capacity is required to be set. Currently + // VM Skus with the capability called 'CapacityReservationSupported' set to true are + // supported. Refer to List Microsoft.Compute SKUs in a region (https://docs.microsoft.com/rest/api/compute/resourceskus/list) + // for supported values. + SKU *SKU `json:"sku,omitempty"` + + // Properties of the Capacity reservation. + Properties *CapacityReservationProperties `json:"properties,omitempty"` + + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` + + // Availability Zone to use for this capacity reservation. The zone has to be single value and also should be part for the + // list of zones specified during the capacity reservation group creation. The zone + // can be assigned only during creation. If not provided, the reservation supports only non-zonal deployments. If provided, + // enforces VM/VMSS using this capacity reservation to be in same zone. + Zones []*string `json:"zones,omitempty"` + + // READ-ONLY; Resource Id + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; Resource name + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; Resource type + Type *string `json:"type,omitempty" azure:"ro"` +} + +// CapacityReservationGroup - Specifies information about the capacity reservation group that the capacity reservations should +// be assigned to. +// Currently, a capacity reservation can only be added to a capacity reservation group at creation time. An existing capacity +// reservation cannot be added or moved to another capacity reservation group. +type CapacityReservationGroup struct { + // REQUIRED; Resource location + Location *string `json:"location,omitempty"` + + // capacity reservation group Properties. + Properties *CapacityReservationGroupProperties `json:"properties,omitempty"` + + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` + + // Availability Zones to use for this capacity reservation group. The zones can be assigned only during creation. If not provided, + // the group supports only regional resources in the region. If provided, + // enforces each capacity reservation in the group to be in one of the zones. + Zones []*string `json:"zones,omitempty"` + + // READ-ONLY; Resource Id + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; Resource name + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; Resource type + Type *string `json:"type,omitempty" azure:"ro"` +} + +type CapacityReservationGroupInstanceView struct { + // READ-ONLY; List of instance view of the capacity reservations under the capacity reservation group. + CapacityReservations []*CapacityReservationInstanceViewWithName `json:"capacityReservations,omitempty" azure:"ro"` +} + +// CapacityReservationGroupListResult - The List capacity reservation group with resource group response. +type CapacityReservationGroupListResult struct { + // REQUIRED; The list of capacity reservation groups + Value []*CapacityReservationGroup `json:"value,omitempty"` + + // The URI to fetch the next page of capacity reservation groups. Call ListNext() with this URI to fetch the next page of + // capacity reservation groups. + NextLink *string `json:"nextLink,omitempty"` +} + +// CapacityReservationGroupProperties - capacity reservation group Properties. +type CapacityReservationGroupProperties struct { + // READ-ONLY; A list of all capacity reservation resource ids that belong to capacity reservation group. + CapacityReservations []*SubResourceReadOnly `json:"capacityReservations,omitempty" azure:"ro"` + + // READ-ONLY; The capacity reservation group instance view which has the list of instance views for all the capacity reservations + // that belong to the capacity reservation group. + InstanceView *CapacityReservationGroupInstanceView `json:"instanceView,omitempty" azure:"ro"` + + // READ-ONLY; A list of references to all virtual machines associated to the capacity reservation group. + VirtualMachinesAssociated []*SubResourceReadOnly `json:"virtualMachinesAssociated,omitempty" azure:"ro"` +} + +// CapacityReservationGroupUpdate - Specifies information about the capacity reservation group. Only tags can be updated. +type CapacityReservationGroupUpdate struct { + // capacity reservation group Properties. + Properties *CapacityReservationGroupProperties `json:"properties,omitempty"` + + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` +} + +// CapacityReservationGroupsClientCreateOrUpdateOptions contains the optional parameters for the CapacityReservationGroupsClient.CreateOrUpdate +// method. +type CapacityReservationGroupsClientCreateOrUpdateOptions struct { + // placeholder for future optional parameters +} + +// CapacityReservationGroupsClientDeleteOptions contains the optional parameters for the CapacityReservationGroupsClient.Delete +// method. +type CapacityReservationGroupsClientDeleteOptions struct { + // placeholder for future optional parameters +} + +// CapacityReservationGroupsClientGetOptions contains the optional parameters for the CapacityReservationGroupsClient.Get +// method. +type CapacityReservationGroupsClientGetOptions struct { + // The expand expression to apply on the operation. 'InstanceView' will retrieve the list of instance views of the capacity + // reservations under the capacity reservation group which is a snapshot of the + // runtime properties of a capacity reservation that is managed by the platform and can change outside of control plane operations. + Expand *CapacityReservationGroupInstanceViewTypes +} + +// CapacityReservationGroupsClientListByResourceGroupOptions contains the optional parameters for the CapacityReservationGroupsClient.ListByResourceGroup +// method. +type CapacityReservationGroupsClientListByResourceGroupOptions struct { + // The expand expression to apply on the operation. Based on the expand param(s) specified we return Virtual Machine or ScaleSet + // VM Instance or both resource Ids which are associated to capacity + // reservation group in the response. + Expand *ExpandTypesForGetCapacityReservationGroups +} + +// CapacityReservationGroupsClientListBySubscriptionOptions contains the optional parameters for the CapacityReservationGroupsClient.ListBySubscription +// method. +type CapacityReservationGroupsClientListBySubscriptionOptions struct { + // The expand expression to apply on the operation. Based on the expand param(s) specified we return Virtual Machine or ScaleSet + // VM Instance or both resource Ids which are associated to capacity + // reservation group in the response. + Expand *ExpandTypesForGetCapacityReservationGroups +} + +// CapacityReservationGroupsClientUpdateOptions contains the optional parameters for the CapacityReservationGroupsClient.Update +// method. +type CapacityReservationGroupsClientUpdateOptions struct { + // placeholder for future optional parameters +} + +// CapacityReservationInstanceView - The instance view of a capacity reservation that provides as snapshot of the runtime +// properties of the capacity reservation that is managed by the platform and can change outside of control plane +// operations. +type CapacityReservationInstanceView struct { + // The resource status information. + Statuses []*InstanceViewStatus `json:"statuses,omitempty"` + + // Unutilized capacity of the capacity reservation. + UtilizationInfo *CapacityReservationUtilization `json:"utilizationInfo,omitempty"` +} + +// CapacityReservationInstanceViewWithName - The instance view of a capacity reservation that includes the name of the capacity +// reservation. It is used for the response to the instance view of a capacity reservation group. +type CapacityReservationInstanceViewWithName struct { + // The resource status information. + Statuses []*InstanceViewStatus `json:"statuses,omitempty"` + + // Unutilized capacity of the capacity reservation. + UtilizationInfo *CapacityReservationUtilization `json:"utilizationInfo,omitempty"` + + // READ-ONLY; The name of the capacity reservation. + Name *string `json:"name,omitempty" azure:"ro"` +} + +// CapacityReservationListResult - The list capacity reservation operation response. +type CapacityReservationListResult struct { + // REQUIRED; The list of capacity reservations + Value []*CapacityReservation `json:"value,omitempty"` + + // The URI to fetch the next page of capacity reservations. Call ListNext() with this URI to fetch the next page of capacity + // reservations. + NextLink *string `json:"nextLink,omitempty"` +} + +// CapacityReservationProfile - The parameters of a capacity reservation Profile. +type CapacityReservationProfile struct { + // Specifies the capacity reservation group resource id that should be used for allocating the virtual machine or scaleset + // vm instances provided enough capacity has been reserved. Please refer to + // https://aka.ms/CapacityReservation for more details. + CapacityReservationGroup *SubResource `json:"capacityReservationGroup,omitempty"` +} + +// CapacityReservationProperties - Properties of the Capacity reservation. +type CapacityReservationProperties struct { + // READ-ONLY; The Capacity reservation instance view. + InstanceView *CapacityReservationInstanceView `json:"instanceView,omitempty" azure:"ro"` + + // READ-ONLY; The provisioning state, which only appears in the response. + ProvisioningState *string `json:"provisioningState,omitempty" azure:"ro"` + + // READ-ONLY; The date time when the capacity reservation was last updated. + ProvisioningTime *time.Time `json:"provisioningTime,omitempty" azure:"ro"` + + // READ-ONLY; A unique id generated and assigned to the capacity reservation by the platform which does not change throughout + // the lifetime of the resource. + ReservationID *string `json:"reservationId,omitempty" azure:"ro"` + + // READ-ONLY; Specifies the time at which the Capacity Reservation resource was created. + // Minimum api-version: 2022-03-01. + TimeCreated *time.Time `json:"timeCreated,omitempty" azure:"ro"` + + // READ-ONLY; A list of all virtual machine resource ids that are associated with the capacity reservation. + VirtualMachinesAssociated []*SubResourceReadOnly `json:"virtualMachinesAssociated,omitempty" azure:"ro"` +} + +// CapacityReservationUpdate - Specifies information about the capacity reservation. Only tags and sku.capacity can be updated. +type CapacityReservationUpdate struct { + // Properties of the Capacity reservation. + Properties *CapacityReservationProperties `json:"properties,omitempty"` + + // SKU of the resource for which capacity needs be reserved. The SKU name and capacity is required to be set. Currently VM + // Skus with the capability called 'CapacityReservationSupported' set to true are + // supported. Refer to List Microsoft.Compute SKUs in a region (https://docs.microsoft.com/rest/api/compute/resourceskus/list) + // for supported values. + SKU *SKU `json:"sku,omitempty"` + + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` +} + +// CapacityReservationUtilization - Represents the capacity reservation utilization in terms of resources allocated. +type CapacityReservationUtilization struct { + // READ-ONLY; A list of all virtual machines resource ids allocated against the capacity reservation. + VirtualMachinesAllocated []*SubResourceReadOnly `json:"virtualMachinesAllocated,omitempty" azure:"ro"` +} + +// CapacityReservationsClientBeginCreateOrUpdateOptions contains the optional parameters for the CapacityReservationsClient.BeginCreateOrUpdate +// method. +type CapacityReservationsClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// CapacityReservationsClientBeginDeleteOptions contains the optional parameters for the CapacityReservationsClient.BeginDelete +// method. +type CapacityReservationsClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// CapacityReservationsClientBeginUpdateOptions contains the optional parameters for the CapacityReservationsClient.BeginUpdate +// method. +type CapacityReservationsClientBeginUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// CapacityReservationsClientGetOptions contains the optional parameters for the CapacityReservationsClient.Get method. +type CapacityReservationsClientGetOptions struct { + // The expand expression to apply on the operation. 'InstanceView' retrieves a snapshot of the runtime properties of the capacity + // reservation that is managed by the platform and can change outside of + // control plane operations. + Expand *CapacityReservationInstanceViewTypes +} + +// CapacityReservationsClientListByCapacityReservationGroupOptions contains the optional parameters for the CapacityReservationsClient.ListByCapacityReservationGroup +// method. +type CapacityReservationsClientListByCapacityReservationGroupOptions struct { + // placeholder for future optional parameters +} + +// CloudError - An error response from the Compute service. +type CloudError struct { + // Api error. + Error *APIError `json:"error,omitempty"` +} + +// CloudService - Describes the cloud service. +type CloudService struct { + // REQUIRED; Resource location. + Location *string `json:"location,omitempty"` + + // Cloud service properties + Properties *CloudServiceProperties `json:"properties,omitempty"` + + // Resource tags. + Tags map[string]*string `json:"tags,omitempty"` + + // READ-ONLY; Resource Id. + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; Resource name. + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; Resource type. + Type *string `json:"type,omitempty" azure:"ro"` +} + +// CloudServiceExtensionProfile - Describes a cloud service extension profile. +type CloudServiceExtensionProfile struct { + // List of extensions for the cloud service. + Extensions []*Extension `json:"extensions,omitempty"` +} + +// CloudServiceExtensionProperties - Extension Properties. +type CloudServiceExtensionProperties struct { + // Explicitly specify whether platform can automatically upgrade typeHandlerVersion to higher minor versions when they become + // available. + AutoUpgradeMinorVersion *bool `json:"autoUpgradeMinorVersion,omitempty"` + + // Tag to force apply the provided public and protected settings. Changing the tag value allows for re-running the extension + // without changing any of the public or protected settings. If forceUpdateTag is + // not changed, updates to public or protected settings would still be applied by the handler. If neither forceUpdateTag nor + // any of public or protected settings change, extension would flow to the role + // instance with the same sequence-number, and it is up to handler implementation whether to re-run it or not + ForceUpdateTag *string `json:"forceUpdateTag,omitempty"` + + // Protected settings for the extension which are encrypted before sent to the role instance. + ProtectedSettings *string `json:"protectedSettings,omitempty"` + ProtectedSettingsFromKeyVault *CloudServiceVaultAndSecretReference `json:"protectedSettingsFromKeyVault,omitempty"` + + // The name of the extension handler publisher. + Publisher *string `json:"publisher,omitempty"` + + // Optional list of roles to apply this extension. If property is not specified or '*' is specified, extension is applied + // to all roles in the cloud service. + RolesAppliedTo []*string `json:"rolesAppliedTo,omitempty"` + + // Public settings for the extension. For JSON extensions, this is the JSON settings for the extension. For XML Extension + // (like RDP), this is the XML setting for the extension. + Settings *string `json:"settings,omitempty"` + + // Specifies the type of the extension. + Type *string `json:"type,omitempty"` + + // Specifies the version of the extension. Specifies the version of the extension. If this element is not specified or an + // asterisk (*) is used as the value, the latest version of the extension is used. + // If the value is specified with a major version number and an asterisk as the minor version number (X.), the latest minor + // version of the specified major version is selected. If a major version number + // and a minor version number are specified (X.Y), the specific extension version is selected. If a version is specified, + // an auto-upgrade is performed on the role instance. + TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty"` + + // READ-ONLY; The provisioning state, which only appears in the response. + ProvisioningState *string `json:"provisioningState,omitempty" azure:"ro"` +} + +// CloudServiceInstanceView - InstanceView of CloudService as a whole +type CloudServiceInstanceView struct { + // Instance view statuses. + RoleInstance *InstanceViewStatusesSummary `json:"roleInstance,omitempty"` + + // READ-ONLY; Specifies a list of unique identifiers generated internally for the cloud service. + // NOTE: If you are using Azure Diagnostics extension, this property can be used as 'DeploymentId' for querying details. + PrivateIDs []*string `json:"privateIds,omitempty" azure:"ro"` + + // READ-ONLY; The version of the SDK that was used to generate the package for the cloud service. + SdkVersion *string `json:"sdkVersion,omitempty" azure:"ro"` + + // READ-ONLY + Statuses []*ResourceInstanceViewStatus `json:"statuses,omitempty" azure:"ro"` +} + +type CloudServiceListResult struct { + // REQUIRED + Value []*CloudService `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// CloudServiceNetworkProfile - Network Profile for the cloud service. +type CloudServiceNetworkProfile struct { + // List of Load balancer configurations. Cloud service can have up to two load balancer configurations, corresponding to a + // Public Load Balancer and an Internal Load Balancer. + LoadBalancerConfigurations []*LoadBalancerConfiguration `json:"loadBalancerConfigurations,omitempty"` + + // The id reference of the cloud service containing the target IP with which the subject cloud service can perform a swap. + // This property cannot be updated once it is set. The swappable cloud service + // referred by this id must be present otherwise an error will be thrown. + SwappableCloudService *SubResource `json:"swappableCloudService,omitempty"` +} + +// CloudServiceOperatingSystemsClientGetOSFamilyOptions contains the optional parameters for the CloudServiceOperatingSystemsClient.GetOSFamily +// method. +type CloudServiceOperatingSystemsClientGetOSFamilyOptions struct { + // placeholder for future optional parameters +} + +// CloudServiceOperatingSystemsClientGetOSVersionOptions contains the optional parameters for the CloudServiceOperatingSystemsClient.GetOSVersion +// method. +type CloudServiceOperatingSystemsClientGetOSVersionOptions struct { + // placeholder for future optional parameters +} + +// CloudServiceOperatingSystemsClientListOSFamiliesOptions contains the optional parameters for the CloudServiceOperatingSystemsClient.ListOSFamilies +// method. +type CloudServiceOperatingSystemsClientListOSFamiliesOptions struct { + // placeholder for future optional parameters +} + +// CloudServiceOperatingSystemsClientListOSVersionsOptions contains the optional parameters for the CloudServiceOperatingSystemsClient.ListOSVersions +// method. +type CloudServiceOperatingSystemsClientListOSVersionsOptions struct { + // placeholder for future optional parameters +} + +// CloudServiceOsProfile - Describes the OS profile for the cloud service. +type CloudServiceOsProfile struct { + // Specifies set of certificates that should be installed onto the role instances. + Secrets []*CloudServiceVaultSecretGroup `json:"secrets,omitempty"` +} + +// CloudServiceProperties - Cloud service properties +type CloudServiceProperties struct { + // (Optional) Indicates whether the role sku properties (roleProfile.roles.sku) specified in the model/template should override + // the role instance count and vm size specified in the .cscfg and .csdef + // respectively. The default value is false. + AllowModelOverride *bool `json:"allowModelOverride,omitempty"` + + // Specifies the XML service configuration (.cscfg) for the cloud service. + Configuration *string `json:"configuration,omitempty"` + + // Specifies a URL that refers to the location of the service configuration in the Blob service. The service package URL can + // be Shared Access Signature (SAS) URI from any storage account. This is a + // write-only property and is not returned in GET calls. + ConfigurationURL *string `json:"configurationUrl,omitempty"` + + // Describes a cloud service extension profile. + ExtensionProfile *CloudServiceExtensionProfile `json:"extensionProfile,omitempty"` + + // Network Profile for the cloud service. + NetworkProfile *CloudServiceNetworkProfile `json:"networkProfile,omitempty"` + + // Describes the OS profile for the cloud service. + OSProfile *CloudServiceOsProfile `json:"osProfile,omitempty"` + + // Specifies a URL that refers to the location of the service package in the Blob service. The service package URL can be + // Shared Access Signature (SAS) URI from any storage account. This is a write-only + // property and is not returned in GET calls. + PackageURL *string `json:"packageUrl,omitempty"` + + // Describes the role profile for the cloud service. + RoleProfile *CloudServiceRoleProfile `json:"roleProfile,omitempty"` + + // (Optional) Indicates whether to start the cloud service immediately after it is created. The default value is true. If + // false, the service model is still deployed, but the code is not run immediately. + // Instead, the service is PoweredOff until you call Start, at which time the service will be started. A deployed service + // still incurs charges, even if it is poweredoff. + StartCloudService *bool `json:"startCloudService,omitempty"` + + // Update mode for the cloud service. Role instances are allocated to update domains when the service is deployed. Updates + // can be initiated manually in each update domain or initiated automatically in + // all update domains. Possible Values are + // Auto + // Manual + // Simultaneous + // If not specified, the default value is Auto. If set to Manual, PUT UpdateDomain must be called to apply the update. If + // set to Auto, the update is automatically applied to each update domain in + // sequence. + UpgradeMode *CloudServiceUpgradeMode `json:"upgradeMode,omitempty"` + + // READ-ONLY; The provisioning state, which only appears in the response. + ProvisioningState *string `json:"provisioningState,omitempty" azure:"ro"` + + // READ-ONLY; The unique identifier for the cloud service. + UniqueID *string `json:"uniqueId,omitempty" azure:"ro"` +} + +// CloudServiceRole - Describes a role of the cloud service. +type CloudServiceRole struct { + Properties *CloudServiceRoleProperties `json:"properties,omitempty"` + + // Describes the cloud service role sku. + SKU *CloudServiceRoleSKU `json:"sku,omitempty"` + + // READ-ONLY; Resource id + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; Resource location + Location *string `json:"location,omitempty" azure:"ro"` + + // READ-ONLY; Resource name + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; Resource type + Type *string `json:"type,omitempty" azure:"ro"` +} + +// CloudServiceRoleInstancesClientBeginDeleteOptions contains the optional parameters for the CloudServiceRoleInstancesClient.BeginDelete +// method. +type CloudServiceRoleInstancesClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// CloudServiceRoleInstancesClientBeginRebuildOptions contains the optional parameters for the CloudServiceRoleInstancesClient.BeginRebuild +// method. +type CloudServiceRoleInstancesClientBeginRebuildOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// CloudServiceRoleInstancesClientBeginReimageOptions contains the optional parameters for the CloudServiceRoleInstancesClient.BeginReimage +// method. +type CloudServiceRoleInstancesClientBeginReimageOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// CloudServiceRoleInstancesClientBeginRestartOptions contains the optional parameters for the CloudServiceRoleInstancesClient.BeginRestart +// method. +type CloudServiceRoleInstancesClientBeginRestartOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// CloudServiceRoleInstancesClientGetInstanceViewOptions contains the optional parameters for the CloudServiceRoleInstancesClient.GetInstanceView +// method. +type CloudServiceRoleInstancesClientGetInstanceViewOptions struct { + // placeholder for future optional parameters +} + +// CloudServiceRoleInstancesClientGetOptions contains the optional parameters for the CloudServiceRoleInstancesClient.Get +// method. +type CloudServiceRoleInstancesClientGetOptions struct { + // The expand expression to apply to the operation. 'UserData' is not supported for cloud services. + Expand *InstanceViewTypes +} + +// CloudServiceRoleInstancesClientGetRemoteDesktopFileOptions contains the optional parameters for the CloudServiceRoleInstancesClient.GetRemoteDesktopFile +// method. +type CloudServiceRoleInstancesClientGetRemoteDesktopFileOptions struct { + // placeholder for future optional parameters +} + +// CloudServiceRoleInstancesClientListOptions contains the optional parameters for the CloudServiceRoleInstancesClient.List +// method. +type CloudServiceRoleInstancesClientListOptions struct { + // The expand expression to apply to the operation. 'UserData' is not supported for cloud services. + Expand *InstanceViewTypes +} + +type CloudServiceRoleListResult struct { + // REQUIRED + Value []*CloudServiceRole `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// CloudServiceRoleProfile - Describes the role profile for the cloud service. +type CloudServiceRoleProfile struct { + // List of roles for the cloud service. + Roles []*CloudServiceRoleProfileProperties `json:"roles,omitempty"` +} + +// CloudServiceRoleProfileProperties - Describes the role properties. +type CloudServiceRoleProfileProperties struct { + // Resource name. + Name *string `json:"name,omitempty"` + + // Describes the cloud service role sku. + SKU *CloudServiceRoleSKU `json:"sku,omitempty"` +} + +type CloudServiceRoleProperties struct { + // READ-ONLY; Specifies the ID which uniquely identifies a cloud service role. + UniqueID *string `json:"uniqueId,omitempty" azure:"ro"` +} + +// CloudServiceRoleSKU - Describes the cloud service role sku. +type CloudServiceRoleSKU struct { + // Specifies the number of role instances in the cloud service. + Capacity *int64 `json:"capacity,omitempty"` + + // The sku name. NOTE: If the new SKU is not supported on the hardware the cloud service is currently on, you need to delete + // and recreate the cloud service or move back to the old sku. + Name *string `json:"name,omitempty"` + + // Specifies the tier of the cloud service. Possible Values are + // Standard + // Basic + Tier *string `json:"tier,omitempty"` +} + +// CloudServiceRolesClientGetOptions contains the optional parameters for the CloudServiceRolesClient.Get method. +type CloudServiceRolesClientGetOptions struct { + // placeholder for future optional parameters +} + +// CloudServiceRolesClientListOptions contains the optional parameters for the CloudServiceRolesClient.List method. +type CloudServiceRolesClientListOptions struct { + // placeholder for future optional parameters +} + +type CloudServiceUpdate struct { + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` +} + +type CloudServiceVaultAndSecretReference struct { + SecretURL *string `json:"secretUrl,omitempty"` + SourceVault *SubResource `json:"sourceVault,omitempty"` +} + +// CloudServiceVaultCertificate - Describes a single certificate reference in a Key Vault, and where the certificate should +// reside on the role instance. +type CloudServiceVaultCertificate struct { + // This is the URL of a certificate that has been uploaded to Key Vault as a secret. + CertificateURL *string `json:"certificateUrl,omitempty"` +} + +// CloudServiceVaultSecretGroup - Describes a set of certificates which are all in the same Key Vault. +type CloudServiceVaultSecretGroup struct { + // The relative URL of the Key Vault containing all of the certificates in VaultCertificates. + SourceVault *SubResource `json:"sourceVault,omitempty"` + + // The list of key vault references in SourceVault which contain certificates. + VaultCertificates []*CloudServiceVaultCertificate `json:"vaultCertificates,omitempty"` +} + +// CloudServicesClientBeginCreateOrUpdateOptions contains the optional parameters for the CloudServicesClient.BeginCreateOrUpdate +// method. +type CloudServicesClientBeginCreateOrUpdateOptions struct { + // The cloud service object. + Parameters *CloudService + // Resumes the LRO from the provided token. + ResumeToken string +} + +// CloudServicesClientBeginDeleteInstancesOptions contains the optional parameters for the CloudServicesClient.BeginDeleteInstances +// method. +type CloudServicesClientBeginDeleteInstancesOptions struct { + // List of cloud service role instance names. + Parameters *RoleInstances + // Resumes the LRO from the provided token. + ResumeToken string +} + +// CloudServicesClientBeginDeleteOptions contains the optional parameters for the CloudServicesClient.BeginDelete method. +type CloudServicesClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// CloudServicesClientBeginPowerOffOptions contains the optional parameters for the CloudServicesClient.BeginPowerOff method. +type CloudServicesClientBeginPowerOffOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// CloudServicesClientBeginRebuildOptions contains the optional parameters for the CloudServicesClient.BeginRebuild method. +type CloudServicesClientBeginRebuildOptions struct { + // List of cloud service role instance names. + Parameters *RoleInstances + // Resumes the LRO from the provided token. + ResumeToken string +} + +// CloudServicesClientBeginReimageOptions contains the optional parameters for the CloudServicesClient.BeginReimage method. +type CloudServicesClientBeginReimageOptions struct { + // List of cloud service role instance names. + Parameters *RoleInstances + // Resumes the LRO from the provided token. + ResumeToken string +} + +// CloudServicesClientBeginRestartOptions contains the optional parameters for the CloudServicesClient.BeginRestart method. +type CloudServicesClientBeginRestartOptions struct { + // List of cloud service role instance names. + Parameters *RoleInstances + // Resumes the LRO from the provided token. + ResumeToken string +} + +// CloudServicesClientBeginStartOptions contains the optional parameters for the CloudServicesClient.BeginStart method. +type CloudServicesClientBeginStartOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// CloudServicesClientBeginUpdateOptions contains the optional parameters for the CloudServicesClient.BeginUpdate method. +type CloudServicesClientBeginUpdateOptions struct { + // The cloud service object. + Parameters *CloudServiceUpdate + // Resumes the LRO from the provided token. + ResumeToken string +} + +// CloudServicesClientGetInstanceViewOptions contains the optional parameters for the CloudServicesClient.GetInstanceView +// method. +type CloudServicesClientGetInstanceViewOptions struct { + // placeholder for future optional parameters +} + +// CloudServicesClientGetOptions contains the optional parameters for the CloudServicesClient.Get method. +type CloudServicesClientGetOptions struct { + // placeholder for future optional parameters +} + +// CloudServicesClientListAllOptions contains the optional parameters for the CloudServicesClient.ListAll method. +type CloudServicesClientListAllOptions struct { + // placeholder for future optional parameters +} + +// CloudServicesClientListOptions contains the optional parameters for the CloudServicesClient.List method. +type CloudServicesClientListOptions struct { + // placeholder for future optional parameters +} + +// CloudServicesUpdateDomainClientBeginWalkUpdateDomainOptions contains the optional parameters for the CloudServicesUpdateDomainClient.BeginWalkUpdateDomain +// method. +type CloudServicesUpdateDomainClientBeginWalkUpdateDomainOptions struct { + // The update domain object. + Parameters *UpdateDomain + // Resumes the LRO from the provided token. + ResumeToken string +} + +// CloudServicesUpdateDomainClientGetUpdateDomainOptions contains the optional parameters for the CloudServicesUpdateDomainClient.GetUpdateDomain +// method. +type CloudServicesUpdateDomainClientGetUpdateDomainOptions struct { + // placeholder for future optional parameters +} + +// CloudServicesUpdateDomainClientListUpdateDomainsOptions contains the optional parameters for the CloudServicesUpdateDomainClient.ListUpdateDomains +// method. +type CloudServicesUpdateDomainClientListUpdateDomainsOptions struct { + // placeholder for future optional parameters +} + +// CommunityGalleriesClientGetOptions contains the optional parameters for the CommunityGalleriesClient.Get method. +type CommunityGalleriesClientGetOptions struct { + // placeholder for future optional parameters +} + +// CommunityGallery - Specifies information about the Community Gallery that you want to create or update. +type CommunityGallery struct { + // The identifier information of community gallery. + Identifier *CommunityGalleryIdentifier `json:"identifier,omitempty"` + + // READ-ONLY; Resource location + Location *string `json:"location,omitempty" azure:"ro"` + + // READ-ONLY; Resource name + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; Resource type + Type *string `json:"type,omitempty" azure:"ro"` +} + +// CommunityGalleryIdentifier - The identifier information of community gallery. +type CommunityGalleryIdentifier struct { + // The unique id of this community gallery. + UniqueID *string `json:"uniqueId,omitempty"` +} + +// CommunityGalleryImage - Specifies information about the gallery image definition that you want to create or update. +type CommunityGalleryImage struct { + // The identifier information of community gallery. + Identifier *CommunityGalleryIdentifier `json:"identifier,omitempty"` + + // Describes the properties of a gallery image definition. + Properties *CommunityGalleryImageProperties `json:"properties,omitempty"` + + // READ-ONLY; Resource location + Location *string `json:"location,omitempty" azure:"ro"` + + // READ-ONLY; Resource name + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; Resource type + Type *string `json:"type,omitempty" azure:"ro"` +} + +// CommunityGalleryImageProperties - Describes the properties of a gallery image definition. +type CommunityGalleryImageProperties struct { + // REQUIRED; This is the gallery image definition identifier. + Identifier *GalleryImageIdentifier `json:"identifier,omitempty"` + + // REQUIRED; This property allows the user to specify whether the virtual machines created under this image are 'Generalized' + // or 'Specialized'. + OSState *OperatingSystemStateTypes `json:"osState,omitempty"` + + // REQUIRED; This property allows you to specify the type of the OS that is included in the disk when creating a VM from a + // managed image. + // Possible values are: + // Windows + // Linux + OSType *OperatingSystemTypes `json:"osType,omitempty"` + + // Describes the disallowed disk types. + Disallowed *Disallowed `json:"disallowed,omitempty"` + + // The end of life date of the gallery image definition. This property can be used for decommissioning purposes. This property + // is updatable. + EndOfLifeDate *time.Time `json:"endOfLifeDate,omitempty"` + + // A list of gallery image features. + Features []*GalleryImageFeature `json:"features,omitempty"` + + // The hypervisor generation of the Virtual Machine. Applicable to OS disks only. + HyperVGeneration *HyperVGeneration `json:"hyperVGeneration,omitempty"` + + // Describes the gallery image definition purchase plan. This is used by marketplace images. + PurchasePlan *ImagePurchasePlan `json:"purchasePlan,omitempty"` + + // The properties describe the recommended machine configuration for this Image Definition. These properties are updatable. + Recommended *RecommendedMachineConfiguration `json:"recommended,omitempty"` +} + +// CommunityGalleryImageVersion - Specifies information about the gallery image version that you want to create or update. +type CommunityGalleryImageVersion struct { + // The identifier information of community gallery. + Identifier *CommunityGalleryIdentifier `json:"identifier,omitempty"` + + // Describes the properties of a gallery image version. + Properties *CommunityGalleryImageVersionProperties `json:"properties,omitempty"` + + // READ-ONLY; Resource location + Location *string `json:"location,omitempty" azure:"ro"` + + // READ-ONLY; Resource name + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; Resource type + Type *string `json:"type,omitempty" azure:"ro"` +} + +// CommunityGalleryImageVersionProperties - Describes the properties of a gallery image version. +type CommunityGalleryImageVersionProperties struct { + // The end of life date of the gallery image version Definition. This property can be used for decommissioning purposes. This + // property is updatable. + EndOfLifeDate *time.Time `json:"endOfLifeDate,omitempty"` + + // The published date of the gallery image version Definition. This property can be used for decommissioning purposes. This + // property is updatable. + PublishedDate *time.Time `json:"publishedDate,omitempty"` +} + +// CommunityGalleryImageVersionsClientGetOptions contains the optional parameters for the CommunityGalleryImageVersionsClient.Get +// method. +type CommunityGalleryImageVersionsClientGetOptions struct { + // placeholder for future optional parameters +} + +// CommunityGalleryImagesClientGetOptions contains the optional parameters for the CommunityGalleryImagesClient.Get method. +type CommunityGalleryImagesClientGetOptions struct { + // placeholder for future optional parameters +} + +// CommunityGalleryInfo - Information of community gallery if current gallery is shared to community +type CommunityGalleryInfo struct { + // Community gallery publisher eula + Eula *string `json:"eula,omitempty"` + + // Community gallery public name prefix + PublicNamePrefix *string `json:"publicNamePrefix,omitempty"` + + // Community gallery publisher contact email + PublisherContact *string `json:"publisherContact,omitempty"` + + // Community gallery publisher uri + PublisherURI *string `json:"publisherUri,omitempty"` + + // READ-ONLY; Contains info about whether community gallery sharing is enabled. + CommunityGalleryEnabled *bool `json:"communityGalleryEnabled,omitempty" azure:"ro"` + + // READ-ONLY; Community gallery public name list. + PublicNames []*string `json:"publicNames,omitempty" azure:"ro"` +} + +// CreationData - Data used when creating a disk. +type CreationData struct { + // REQUIRED; This enumerates the possible sources of a disk's creation. + CreateOption *DiskCreateOption `json:"createOption,omitempty"` + + // Required if creating from a Gallery Image. The id of the ImageDiskReference will be the ARM id of the shared galley image + // version from which to create a disk. + GalleryImageReference *ImageDiskReference `json:"galleryImageReference,omitempty"` + + // Disk source information. + ImageReference *ImageDiskReference `json:"imageReference,omitempty"` + + // Logical sector size in bytes for Ultra disks. Supported values are 512 ad 4096. 4096 is the default. + LogicalSectorSize *int32 `json:"logicalSectorSize,omitempty"` + + // If createOption is ImportSecure, this is the URI of a blob to be imported into VM guest state. + SecurityDataURI *string `json:"securityDataUri,omitempty"` + + // If createOption is Copy, this is the ARM id of the source snapshot or disk. + SourceResourceID *string `json:"sourceResourceId,omitempty"` + + // If createOption is Import, this is the URI of a blob to be imported into a managed disk. + SourceURI *string `json:"sourceUri,omitempty"` + + // Required if createOption is Import. The Azure Resource Manager identifier of the storage account containing the blob to + // import as a disk. + StorageAccountID *string `json:"storageAccountId,omitempty"` + + // If createOption is Upload, this is the size of the contents of the upload including the VHD footer. This value should be + // between 20972032 (20 MiB + 512 bytes for the VHD footer) and 35183298347520 + // bytes (32 TiB + 512 bytes for the VHD footer). + UploadSizeBytes *int64 `json:"uploadSizeBytes,omitempty"` + + // READ-ONLY; If this field is set, this is the unique id identifying the source of this resource. + SourceUniqueID *string `json:"sourceUniqueId,omitempty" azure:"ro"` +} + +// DataDisk - Describes a data disk. +type DataDisk struct { + // REQUIRED; Specifies how the virtual machine should be created. + // Possible values are: + // Attach \u2013 This value is used when you are using a specialized disk to create the virtual machine. + // FromImage \u2013 This value is used when you are using an image to create the virtual machine. If you are using a platform + // image, you also use the imageReference element described above. If you are + // using a marketplace image, you also use the plan element previously described. + CreateOption *DiskCreateOptionTypes `json:"createOption,omitempty"` + + // REQUIRED; Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and + // therefore must be unique for each data disk attached to a VM. + Lun *int32 `json:"lun,omitempty"` + + // Specifies the caching requirements. + // Possible values are: + // None + // ReadOnly + // ReadWrite + // Default: None for Standard storage. ReadOnly for Premium storage + Caching *CachingTypes `json:"caching,omitempty"` + + // Specifies whether data disk should be deleted or detached upon VM deletion. + // Possible values: + // Delete If this value is used, the data disk is deleted when VM is deleted. + // Detach If this value is used, the data disk is retained after VM is deleted. + // The default value is set to detach + DeleteOption *DiskDeleteOptionTypes `json:"deleteOption,omitempty"` + + // Specifies the detach behavior to be used while detaching a disk or which is already in the process of detachment from the + // virtual machine. Supported values: ForceDetach. + // detachOption: ForceDetach is applicable only for managed data disks. If a previous detachment attempt of the data disk + // did not complete due to an unexpected failure from the virtual machine and the + // disk is still not released then use force-detach as a last resort option to detach the disk forcibly from the VM. All writes + // might not have been flushed when using this detach behavior. + // This feature is still in preview mode and is not supported for VirtualMachineScaleSet. To force-detach a data disk update + // toBeDetached to 'true' along with setting detachOption: 'ForceDetach'. + DetachOption *DiskDetachOptionTypes `json:"detachOption,omitempty"` + + // Specifies the size of an empty data disk in gigabytes. This element can be used to overwrite the size of the disk in a + // virtual machine image. + // This value cannot be larger than 1023 GB + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + + // The source user image virtual hard disk. The virtual hard disk will be copied before being attached to the virtual machine. + // If SourceImage is provided, the destination virtual hard drive must not + // exist. + Image *VirtualHardDisk `json:"image,omitempty"` + + // The managed disk parameters. + ManagedDisk *ManagedDiskParameters `json:"managedDisk,omitempty"` + + // The disk name. + Name *string `json:"name,omitempty"` + + // Specifies whether the data disk is in process of detachment from the VirtualMachine/VirtualMachineScaleset + ToBeDetached *bool `json:"toBeDetached,omitempty"` + + // The virtual hard disk. + Vhd *VirtualHardDisk `json:"vhd,omitempty"` + + // Specifies whether writeAccelerator should be enabled or disabled on the disk. + WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty"` + + // READ-ONLY; Specifies the Read-Write IOPS for the managed disk when StorageAccountType is UltraSSD_LRS. Returned only for + // VirtualMachine ScaleSet VM disks. Can be updated only via updates to the VirtualMachine + // Scale Set. + DiskIOPSReadWrite *int64 `json:"diskIOPSReadWrite,omitempty" azure:"ro"` + + // READ-ONLY; Specifies the bandwidth in MB per second for the managed disk when StorageAccountType is UltraSSD_LRS. Returned + // only for VirtualMachine ScaleSet VM disks. Can be updated only via updates to the + // VirtualMachine Scale Set. + DiskMBpsReadWrite *int64 `json:"diskMBpsReadWrite,omitempty" azure:"ro"` +} + +// DataDiskImage - Contains the data disk images information. +type DataDiskImage struct { + // READ-ONLY; Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM + // and therefore must be unique for each data disk attached to a VM. + Lun *int32 `json:"lun,omitempty" azure:"ro"` +} + +// DataDiskImageEncryption - Contains encryption settings for a data disk image. +type DataDiskImageEncryption struct { + // REQUIRED; This property specifies the logical unit number of the data disk. This value is used to identify data disks within + // the Virtual Machine and therefore must be unique for each data disk attached to the + // Virtual Machine. + Lun *int32 `json:"lun,omitempty"` + + // A relative URI containing the resource ID of the disk encryption set. + DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty"` +} + +// DedicatedHost - Specifies information about the Dedicated host. +type DedicatedHost struct { + // REQUIRED; Resource location + Location *string `json:"location,omitempty"` + + // REQUIRED; SKU of the dedicated host for Hardware Generation and VM family. Only name is required to be set. List Microsoft.Compute + // SKUs for a list of possible values. + SKU *SKU `json:"sku,omitempty"` + + // Properties of the dedicated host. + Properties *DedicatedHostProperties `json:"properties,omitempty"` + + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` + + // READ-ONLY; Resource Id + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; Resource name + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; Resource type + Type *string `json:"type,omitempty" azure:"ro"` +} + +// DedicatedHostAllocatableVM - Represents the dedicated host unutilized capacity in terms of a specific VM size. +type DedicatedHostAllocatableVM struct { + // Maximum number of VMs of size vmSize that can fit in the dedicated host's remaining capacity. + Count *float64 `json:"count,omitempty"` + + // VM size in terms of which the unutilized capacity is represented. + VMSize *string `json:"vmSize,omitempty"` +} + +// DedicatedHostAvailableCapacity - Dedicated host unutilized capacity. +type DedicatedHostAvailableCapacity struct { + // The unutilized capacity of the dedicated host represented in terms of each VM size that is allowed to be deployed to the + // dedicated host. + AllocatableVMs []*DedicatedHostAllocatableVM `json:"allocatableVMs,omitempty"` +} + +// DedicatedHostGroup - Specifies information about the dedicated host group that the dedicated hosts should be assigned to. +// Currently, a dedicated host can only be added to a dedicated host group at creation time. An existing dedicated host cannot +// be added to another dedicated host group. +type DedicatedHostGroup struct { + // REQUIRED; Resource location + Location *string `json:"location,omitempty"` + + // Dedicated Host Group Properties. + Properties *DedicatedHostGroupProperties `json:"properties,omitempty"` + + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` + + // Availability Zone to use for this host group. Only single zone is supported. The zone can be assigned only during creation. + // If not provided, the group supports all zones in the region. If provided, + // enforces each host in the group to be in the same zone. + Zones []*string `json:"zones,omitempty"` + + // READ-ONLY; Resource Id + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; Resource name + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; Resource type + Type *string `json:"type,omitempty" azure:"ro"` +} + +type DedicatedHostGroupInstanceView struct { + // List of instance view of the dedicated hosts under the dedicated host group. + Hosts []*DedicatedHostInstanceViewWithName `json:"hosts,omitempty"` +} + +// DedicatedHostGroupListResult - The List Dedicated Host Group with resource group response. +type DedicatedHostGroupListResult struct { + // REQUIRED; The list of dedicated host groups + Value []*DedicatedHostGroup `json:"value,omitempty"` + + // The URI to fetch the next page of Dedicated Host Groups. Call ListNext() with this URI to fetch the next page of Dedicated + // Host Groups. + NextLink *string `json:"nextLink,omitempty"` +} + +// DedicatedHostGroupProperties - Dedicated Host Group Properties. +type DedicatedHostGroupProperties struct { + // REQUIRED; Number of fault domains that the host group can span. + PlatformFaultDomainCount *int32 `json:"platformFaultDomainCount,omitempty"` + + // Enables or disables a capability on the dedicated host group. + // Minimum api-version: 2022-03-01. + AdditionalCapabilities *DedicatedHostGroupPropertiesAdditionalCapabilities `json:"additionalCapabilities,omitempty"` + + // Specifies whether virtual machines or virtual machine scale sets can be placed automatically on the dedicated host group. + // Automatic placement means resources are allocated on dedicated hosts, that are + // chosen by Azure, under the dedicated host group. The value is defaulted to 'false' when not provided. + // Minimum api-version: 2020-06-01. + SupportAutomaticPlacement *bool `json:"supportAutomaticPlacement,omitempty"` + + // READ-ONLY; A list of references to all dedicated hosts in the dedicated host group. + Hosts []*SubResourceReadOnly `json:"hosts,omitempty" azure:"ro"` + + // READ-ONLY; The dedicated host group instance view, which has the list of instance view of the dedicated hosts under the + // dedicated host group. + InstanceView *DedicatedHostGroupInstanceView `json:"instanceView,omitempty" azure:"ro"` +} + +// DedicatedHostGroupPropertiesAdditionalCapabilities - Enables or disables a capability on the dedicated host group. +// Minimum api-version: 2022-03-01. +type DedicatedHostGroupPropertiesAdditionalCapabilities struct { + // The flag that enables or disables a capability to have UltraSSD Enabled Virtual Machines on Dedicated Hosts of the Dedicated + // Host Group. For the Virtual Machines to be UltraSSD Enabled, + // UltraSSDEnabled flag for the resource needs to be set true as well. The value is defaulted to 'false' when not provided. + // Please refer to + // https://docs.microsoft.com/en-us/azure/virtual-machines/disks-enable-ultra-ssd for more details on Ultra SSD feature. + // NOTE: The ultraSSDEnabled setting can only be enabled for Host Groups that are created as zonal. + // Minimum api-version: 2022-03-01. + UltraSSDEnabled *bool `json:"ultraSSDEnabled,omitempty"` +} + +// DedicatedHostGroupUpdate - Specifies information about the dedicated host group that the dedicated host should be assigned +// to. Only tags may be updated. +type DedicatedHostGroupUpdate struct { + // Dedicated Host Group Properties. + Properties *DedicatedHostGroupProperties `json:"properties,omitempty"` + + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` + + // Availability Zone to use for this host group. Only single zone is supported. The zone can be assigned only during creation. + // If not provided, the group supports all zones in the region. If provided, + // enforces each host in the group to be in the same zone. + Zones []*string `json:"zones,omitempty"` +} + +// DedicatedHostGroupsClientCreateOrUpdateOptions contains the optional parameters for the DedicatedHostGroupsClient.CreateOrUpdate +// method. +type DedicatedHostGroupsClientCreateOrUpdateOptions struct { + // placeholder for future optional parameters +} + +// DedicatedHostGroupsClientDeleteOptions contains the optional parameters for the DedicatedHostGroupsClient.Delete method. +type DedicatedHostGroupsClientDeleteOptions struct { + // placeholder for future optional parameters +} + +// DedicatedHostGroupsClientGetOptions contains the optional parameters for the DedicatedHostGroupsClient.Get method. +type DedicatedHostGroupsClientGetOptions struct { + // The expand expression to apply on the operation. 'InstanceView' will retrieve the list of instance views of the dedicated + // hosts under the dedicated host group. 'UserData' is not supported for + // dedicated host group. + Expand *InstanceViewTypes +} + +// DedicatedHostGroupsClientListByResourceGroupOptions contains the optional parameters for the DedicatedHostGroupsClient.ListByResourceGroup +// method. +type DedicatedHostGroupsClientListByResourceGroupOptions struct { + // placeholder for future optional parameters +} + +// DedicatedHostGroupsClientListBySubscriptionOptions contains the optional parameters for the DedicatedHostGroupsClient.ListBySubscription +// method. +type DedicatedHostGroupsClientListBySubscriptionOptions struct { + // placeholder for future optional parameters +} + +// DedicatedHostGroupsClientUpdateOptions contains the optional parameters for the DedicatedHostGroupsClient.Update method. +type DedicatedHostGroupsClientUpdateOptions struct { + // placeholder for future optional parameters +} + +// DedicatedHostInstanceView - The instance view of a dedicated host. +type DedicatedHostInstanceView struct { + // Unutilized capacity of the dedicated host. + AvailableCapacity *DedicatedHostAvailableCapacity `json:"availableCapacity,omitempty"` + + // The resource status information. + Statuses []*InstanceViewStatus `json:"statuses,omitempty"` + + // READ-ONLY; Specifies the unique id of the dedicated physical machine on which the dedicated host resides. + AssetID *string `json:"assetId,omitempty" azure:"ro"` +} + +// DedicatedHostInstanceViewWithName - The instance view of a dedicated host that includes the name of the dedicated host. +// It is used for the response to the instance view of a dedicated host group. +type DedicatedHostInstanceViewWithName struct { + // Unutilized capacity of the dedicated host. + AvailableCapacity *DedicatedHostAvailableCapacity `json:"availableCapacity,omitempty"` + + // The resource status information. + Statuses []*InstanceViewStatus `json:"statuses,omitempty"` + + // READ-ONLY; Specifies the unique id of the dedicated physical machine on which the dedicated host resides. + AssetID *string `json:"assetId,omitempty" azure:"ro"` + + // READ-ONLY; The name of the dedicated host. + Name *string `json:"name,omitempty" azure:"ro"` +} + +// DedicatedHostListResult - The list dedicated host operation response. +type DedicatedHostListResult struct { + // REQUIRED; The list of dedicated hosts + Value []*DedicatedHost `json:"value,omitempty"` + + // The URI to fetch the next page of dedicated hosts. Call ListNext() with this URI to fetch the next page of dedicated hosts. + NextLink *string `json:"nextLink,omitempty"` +} + +// DedicatedHostProperties - Properties of the dedicated host. +type DedicatedHostProperties struct { + // Specifies whether the dedicated host should be replaced automatically in case of a failure. The value is defaulted to 'true' + // when not provided. + AutoReplaceOnFailure *bool `json:"autoReplaceOnFailure,omitempty"` + + // Specifies the software license type that will be applied to the VMs deployed on the dedicated host. + // Possible values are: + // None + // WindowsServerHybrid + // WindowsServerPerpetual + // Default: None + LicenseType *DedicatedHostLicenseTypes `json:"licenseType,omitempty"` + + // Fault domain of the dedicated host within a dedicated host group. + PlatformFaultDomain *int32 `json:"platformFaultDomain,omitempty"` + + // READ-ONLY; A unique id generated and assigned to the dedicated host by the platform. + // Does not change throughout the lifetime of the host. + HostID *string `json:"hostId,omitempty" azure:"ro"` + + // READ-ONLY; The dedicated host instance view. + InstanceView *DedicatedHostInstanceView `json:"instanceView,omitempty" azure:"ro"` + + // READ-ONLY; The provisioning state, which only appears in the response. + ProvisioningState *string `json:"provisioningState,omitempty" azure:"ro"` + + // READ-ONLY; The date when the host was first provisioned. + ProvisioningTime *time.Time `json:"provisioningTime,omitempty" azure:"ro"` + + // READ-ONLY; Specifies the time at which the Dedicated Host resource was created. + // Minimum api-version: 2022-03-01. + TimeCreated *time.Time `json:"timeCreated,omitempty" azure:"ro"` + + // READ-ONLY; A list of references to all virtual machines in the Dedicated Host. + VirtualMachines []*SubResourceReadOnly `json:"virtualMachines,omitempty" azure:"ro"` +} + +// DedicatedHostUpdate - Specifies information about the dedicated host. Only tags, autoReplaceOnFailure and licenseType may +// be updated. +type DedicatedHostUpdate struct { + // Properties of the dedicated host. + Properties *DedicatedHostProperties `json:"properties,omitempty"` + + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` +} + +// DedicatedHostsClientBeginCreateOrUpdateOptions contains the optional parameters for the DedicatedHostsClient.BeginCreateOrUpdate +// method. +type DedicatedHostsClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// DedicatedHostsClientBeginDeleteOptions contains the optional parameters for the DedicatedHostsClient.BeginDelete method. +type DedicatedHostsClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// DedicatedHostsClientBeginRestartOptions contains the optional parameters for the DedicatedHostsClient.BeginRestart method. +type DedicatedHostsClientBeginRestartOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// DedicatedHostsClientBeginUpdateOptions contains the optional parameters for the DedicatedHostsClient.BeginUpdate method. +type DedicatedHostsClientBeginUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// DedicatedHostsClientGetOptions contains the optional parameters for the DedicatedHostsClient.Get method. +type DedicatedHostsClientGetOptions struct { + // The expand expression to apply on the operation. 'InstanceView' will retrieve the list of instance views of the dedicated + // host. 'UserData' is not supported for dedicated host. + Expand *InstanceViewTypes +} + +// DedicatedHostsClientListByHostGroupOptions contains the optional parameters for the DedicatedHostsClient.ListByHostGroup +// method. +type DedicatedHostsClientListByHostGroupOptions struct { + // placeholder for future optional parameters +} + +// DiagnosticsProfile - Specifies the boot diagnostic settings state. +// Minimum api-version: 2015-06-15. +type DiagnosticsProfile struct { + // Boot Diagnostics is a debugging feature which allows you to view Console Output and Screenshot to diagnose VM status. + // NOTE: If storageUri is being specified then ensure that the storage account is in the same region and subscription as the + // VM. + // You can easily view the output of your console log. + // Azure also enables you to see a screenshot of the VM from the hypervisor. + BootDiagnostics *BootDiagnostics `json:"bootDiagnostics,omitempty"` +} + +// DiffDiskSettings - Describes the parameters of ephemeral disk settings that can be specified for operating system disk. +// NOTE: The ephemeral disk settings can only be specified for managed disk. +type DiffDiskSettings struct { + // Specifies the ephemeral disk settings for operating system disk. + Option *DiffDiskOptions `json:"option,omitempty"` + + // Specifies the ephemeral disk placement for operating system disk. + // Possible values are: + // CacheDisk + // ResourceDisk + // Default: CacheDisk if one is configured for the VM size otherwise ResourceDisk is used. + // Refer to VM size documentation for Windows VM at https://docs.microsoft.com/azure/virtual-machines/windows/sizes and Linux + // VM at https://docs.microsoft.com/azure/virtual-machines/linux/sizes to check + // which VM sizes exposes a cache disk. + Placement *DiffDiskPlacement `json:"placement,omitempty"` +} + +// Disallowed - Describes the disallowed disk types. +type Disallowed struct { + // A list of disk types. + DiskTypes []*string `json:"diskTypes,omitempty"` +} + +// DisallowedConfiguration - Specifies the disallowed configuration for a virtual machine image. +type DisallowedConfiguration struct { + // VM disk types which are disallowed. + VMDiskType *VMDiskTypes `json:"vmDiskType,omitempty"` +} + +// Disk resource. +type Disk struct { + // REQUIRED; Resource location + Location *string `json:"location,omitempty"` + + // The extended location where the disk will be created. Extended location cannot be changed. + ExtendedLocation *ExtendedLocation `json:"extendedLocation,omitempty"` + + // Disk resource properties. + Properties *DiskProperties `json:"properties,omitempty"` + + // The disks sku name. Can be StandardLRS, PremiumLRS, StandardSSDLRS, UltraSSDLRS, PremiumZRS, or StandardSSDZRS. + SKU *DiskSKU `json:"sku,omitempty"` + + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` + + // The Logical zone list for Disk. + Zones []*string `json:"zones,omitempty"` + + // READ-ONLY; Resource Id + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; A relative URI containing the ID of the VM that has the disk attached. + ManagedBy *string `json:"managedBy,omitempty" azure:"ro"` + + // READ-ONLY; List of relative URIs containing the IDs of the VMs that have the disk attached. maxShares should be set to + // a value greater than one for disks to allow attaching them to multiple VMs. + ManagedByExtended []*string `json:"managedByExtended,omitempty" azure:"ro"` + + // READ-ONLY; Resource name + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; Resource type + Type *string `json:"type,omitempty" azure:"ro"` +} + +// DiskAccess - disk access resource. +type DiskAccess struct { + // REQUIRED; Resource location + Location *string `json:"location,omitempty"` + + // The extended location where the disk access will be created. Extended location cannot be changed. + ExtendedLocation *ExtendedLocation `json:"extendedLocation,omitempty"` + Properties *DiskAccessProperties `json:"properties,omitempty"` + + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` + + // READ-ONLY; Resource Id + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; Resource name + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; Resource type + Type *string `json:"type,omitempty" azure:"ro"` +} + +// DiskAccessList - The List disk access operation response. +type DiskAccessList struct { + // REQUIRED; A list of disk access resources. + Value []*DiskAccess `json:"value,omitempty"` + + // The uri to fetch the next page of disk access resources. Call ListNext() with this to fetch the next page of disk access + // resources. + NextLink *string `json:"nextLink,omitempty"` +} + +type DiskAccessProperties struct { + // READ-ONLY; A readonly collection of private endpoint connections created on the disk. Currently only one endpoint connection + // is supported. + PrivateEndpointConnections []*PrivateEndpointConnection `json:"privateEndpointConnections,omitempty" azure:"ro"` + + // READ-ONLY; The disk access resource provisioning state. + ProvisioningState *string `json:"provisioningState,omitempty" azure:"ro"` + + // READ-ONLY; The time when the disk access was created. + TimeCreated *time.Time `json:"timeCreated,omitempty" azure:"ro"` +} + +// DiskAccessUpdate - Used for updating a disk access resource. +type DiskAccessUpdate struct { + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` +} + +// DiskAccessesClientBeginCreateOrUpdateOptions contains the optional parameters for the DiskAccessesClient.BeginCreateOrUpdate +// method. +type DiskAccessesClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// DiskAccessesClientBeginDeleteAPrivateEndpointConnectionOptions contains the optional parameters for the DiskAccessesClient.BeginDeleteAPrivateEndpointConnection +// method. +type DiskAccessesClientBeginDeleteAPrivateEndpointConnectionOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// DiskAccessesClientBeginDeleteOptions contains the optional parameters for the DiskAccessesClient.BeginDelete method. +type DiskAccessesClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// DiskAccessesClientBeginUpdateAPrivateEndpointConnectionOptions contains the optional parameters for the DiskAccessesClient.BeginUpdateAPrivateEndpointConnection +// method. +type DiskAccessesClientBeginUpdateAPrivateEndpointConnectionOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// DiskAccessesClientBeginUpdateOptions contains the optional parameters for the DiskAccessesClient.BeginUpdate method. +type DiskAccessesClientBeginUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// DiskAccessesClientGetAPrivateEndpointConnectionOptions contains the optional parameters for the DiskAccessesClient.GetAPrivateEndpointConnection +// method. +type DiskAccessesClientGetAPrivateEndpointConnectionOptions struct { + // placeholder for future optional parameters +} + +// DiskAccessesClientGetOptions contains the optional parameters for the DiskAccessesClient.Get method. +type DiskAccessesClientGetOptions struct { + // placeholder for future optional parameters +} + +// DiskAccessesClientGetPrivateLinkResourcesOptions contains the optional parameters for the DiskAccessesClient.GetPrivateLinkResources +// method. +type DiskAccessesClientGetPrivateLinkResourcesOptions struct { + // placeholder for future optional parameters +} + +// DiskAccessesClientListByResourceGroupOptions contains the optional parameters for the DiskAccessesClient.ListByResourceGroup +// method. +type DiskAccessesClientListByResourceGroupOptions struct { + // placeholder for future optional parameters +} + +// DiskAccessesClientListOptions contains the optional parameters for the DiskAccessesClient.List method. +type DiskAccessesClientListOptions struct { + // placeholder for future optional parameters +} + +// DiskAccessesClientListPrivateEndpointConnectionsOptions contains the optional parameters for the DiskAccessesClient.ListPrivateEndpointConnections +// method. +type DiskAccessesClientListPrivateEndpointConnectionsOptions struct { + // placeholder for future optional parameters +} + +// DiskEncryptionSet - disk encryption set resource. +type DiskEncryptionSet struct { + // REQUIRED; Resource location + Location *string `json:"location,omitempty"` + + // The managed identity for the disk encryption set. It should be given permission on the key vault before it can be used + // to encrypt disks. + Identity *EncryptionSetIdentity `json:"identity,omitempty"` + Properties *EncryptionSetProperties `json:"properties,omitempty"` + + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` + + // READ-ONLY; Resource Id + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; Resource name + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; Resource type + Type *string `json:"type,omitempty" azure:"ro"` +} + +// DiskEncryptionSetList - The List disk encryption set operation response. +type DiskEncryptionSetList struct { + // REQUIRED; A list of disk encryption sets. + Value []*DiskEncryptionSet `json:"value,omitempty"` + + // The uri to fetch the next page of disk encryption sets. Call ListNext() with this to fetch the next page of disk encryption + // sets. + NextLink *string `json:"nextLink,omitempty"` +} + +// DiskEncryptionSetParameters - Describes the parameter of customer managed disk encryption set resource id that can be specified +// for disk. +// NOTE: The disk encryption set resource id can only be specified for managed disk. Please refer https://aka.ms/mdssewithcmkoverview +// for more details. +type DiskEncryptionSetParameters struct { + // Resource Id + ID *string `json:"id,omitempty"` +} + +// DiskEncryptionSetUpdate - disk encryption set update resource. +type DiskEncryptionSetUpdate struct { + // The managed identity for the disk encryption set. It should be given permission on the key vault before it can be used + // to encrypt disks. + Identity *EncryptionSetIdentity `json:"identity,omitempty"` + + // disk encryption set resource update properties. + Properties *DiskEncryptionSetUpdateProperties `json:"properties,omitempty"` + + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` +} + +// DiskEncryptionSetUpdateProperties - disk encryption set resource update properties. +type DiskEncryptionSetUpdateProperties struct { + // Key Vault Key Url to be used for server side encryption of Managed Disks and Snapshots + ActiveKey *KeyForDiskEncryptionSet `json:"activeKey,omitempty"` + + // The type of key used to encrypt the data of the disk. + EncryptionType *DiskEncryptionSetType `json:"encryptionType,omitempty"` + + // Set this flag to true to enable auto-updating of this disk encryption set to the latest key version. + RotationToLatestKeyVersionEnabled *bool `json:"rotationToLatestKeyVersionEnabled,omitempty"` +} + +// DiskEncryptionSetsClientBeginCreateOrUpdateOptions contains the optional parameters for the DiskEncryptionSetsClient.BeginCreateOrUpdate +// method. +type DiskEncryptionSetsClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// DiskEncryptionSetsClientBeginDeleteOptions contains the optional parameters for the DiskEncryptionSetsClient.BeginDelete +// method. +type DiskEncryptionSetsClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// DiskEncryptionSetsClientBeginUpdateOptions contains the optional parameters for the DiskEncryptionSetsClient.BeginUpdate +// method. +type DiskEncryptionSetsClientBeginUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// DiskEncryptionSetsClientGetOptions contains the optional parameters for the DiskEncryptionSetsClient.Get method. +type DiskEncryptionSetsClientGetOptions struct { + // placeholder for future optional parameters +} + +// DiskEncryptionSetsClientListAssociatedResourcesOptions contains the optional parameters for the DiskEncryptionSetsClient.ListAssociatedResources +// method. +type DiskEncryptionSetsClientListAssociatedResourcesOptions struct { + // placeholder for future optional parameters +} + +// DiskEncryptionSetsClientListByResourceGroupOptions contains the optional parameters for the DiskEncryptionSetsClient.ListByResourceGroup +// method. +type DiskEncryptionSetsClientListByResourceGroupOptions struct { + // placeholder for future optional parameters +} + +// DiskEncryptionSetsClientListOptions contains the optional parameters for the DiskEncryptionSetsClient.List method. +type DiskEncryptionSetsClientListOptions struct { + // placeholder for future optional parameters +} + +// DiskEncryptionSettings - Describes a Encryption Settings for a Disk +type DiskEncryptionSettings struct { + // Specifies the location of the disk encryption key, which is a Key Vault Secret. + DiskEncryptionKey *KeyVaultSecretReference `json:"diskEncryptionKey,omitempty"` + + // Specifies whether disk encryption should be enabled on the virtual machine. + Enabled *bool `json:"enabled,omitempty"` + + // Specifies the location of the key encryption key in Key Vault. + KeyEncryptionKey *KeyVaultKeyReference `json:"keyEncryptionKey,omitempty"` +} + +// DiskImageEncryption - This is the disk image encryption base class. +type DiskImageEncryption struct { + // A relative URI containing the resource ID of the disk encryption set. + DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty"` +} + +// DiskInstanceView - The instance view of the disk. +type DiskInstanceView struct { + // Specifies the encryption settings for the OS Disk. + // Minimum api-version: 2015-06-15 + EncryptionSettings []*DiskEncryptionSettings `json:"encryptionSettings,omitempty"` + + // The disk name. + Name *string `json:"name,omitempty"` + + // The resource status information. + Statuses []*InstanceViewStatus `json:"statuses,omitempty"` +} + +// DiskList - The List Disks operation response. +type DiskList struct { + // REQUIRED; A list of disks. + Value []*Disk `json:"value,omitempty"` + + // The uri to fetch the next page of disks. Call ListNext() with this to fetch the next page of disks. + NextLink *string `json:"nextLink,omitempty"` +} + +// DiskProperties - Disk resource properties. +type DiskProperties struct { + // REQUIRED; Disk source information. CreationData information cannot be changed after the disk has been created. + CreationData *CreationData `json:"creationData,omitempty"` + + // Set to true to enable bursting beyond the provisioned performance target of the disk. Bursting is disabled by default. + // Does not apply to Ultra disks. + BurstingEnabled *bool `json:"burstingEnabled,omitempty"` + + // Percentage complete for the background copy when a resource is created via the CopyStart operation. + CompletionPercent *float32 `json:"completionPercent,omitempty"` + + // Additional authentication requirements when exporting or uploading to a disk or snapshot. + DataAccessAuthMode *DataAccessAuthMode `json:"dataAccessAuthMode,omitempty"` + + // ARM id of the DiskAccess resource for using private endpoints on disks. + DiskAccessID *string `json:"diskAccessId,omitempty"` + + // The total number of IOPS that will be allowed across all VMs mounting the shared disk as ReadOnly. One operation can transfer + // between 4k and 256k bytes. + DiskIOPSReadOnly *int64 `json:"diskIOPSReadOnly,omitempty"` + + // The number of IOPS allowed for this disk; only settable for UltraSSD disks. One operation can transfer between 4k and 256k + // bytes. + DiskIOPSReadWrite *int64 `json:"diskIOPSReadWrite,omitempty"` + + // The total throughput (MBps) that will be allowed across all VMs mounting the shared disk as ReadOnly. MBps means millions + // of bytes per second - MB here uses the ISO notation, of powers of 10. + DiskMBpsReadOnly *int64 `json:"diskMBpsReadOnly,omitempty"` + + // The bandwidth allowed for this disk; only settable for UltraSSD disks. MBps means millions of bytes per second - MB here + // uses the ISO notation, of powers of 10. + DiskMBpsReadWrite *int64 `json:"diskMBpsReadWrite,omitempty"` + + // If creationData.createOption is Empty, this field is mandatory and it indicates the size of the disk to create. If this + // field is present for updates or creation with other options, it indicates a + // resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size. + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + + // Encryption property can be used to encrypt data at rest with customer managed keys or platform managed keys. + Encryption *Encryption `json:"encryption,omitempty"` + + // Encryption settings collection used for Azure Disk Encryption, can contain multiple encryption settings per disk or snapshot. + EncryptionSettingsCollection *EncryptionSettingsCollection `json:"encryptionSettingsCollection,omitempty"` + + // The hypervisor generation of the Virtual Machine. Applicable to OS disks only. + HyperVGeneration *HyperVGeneration `json:"hyperVGeneration,omitempty"` + + // The maximum number of VMs that can attach to the disk at the same time. Value greater than one indicates a disk that can + // be mounted on multiple VMs at the same time. + MaxShares *int32 `json:"maxShares,omitempty"` + + // Policy for accessing the disk via network. + NetworkAccessPolicy *NetworkAccessPolicy `json:"networkAccessPolicy,omitempty"` + + // The Operating System type. + OSType *OperatingSystemTypes `json:"osType,omitempty"` + + // Policy for controlling export on the disk. + PublicNetworkAccess *PublicNetworkAccess `json:"publicNetworkAccess,omitempty"` + + // Purchase plan information for the the image from which the OS disk was created. E.g. - {name: 2019-Datacenter, publisher: + // MicrosoftWindowsServer, product: WindowsServer} + PurchasePlan *DiskPurchasePlan `json:"purchasePlan,omitempty"` + + // Contains the security related information for the resource. + SecurityProfile *DiskSecurityProfile `json:"securityProfile,omitempty"` + + // List of supported capabilities for the image from which the OS disk was created. + SupportedCapabilities *SupportedCapabilities `json:"supportedCapabilities,omitempty"` + + // Indicates the OS on a disk supports hibernation. + SupportsHibernation *bool `json:"supportsHibernation,omitempty"` + + // Performance tier of the disk (e.g, P4, S10) as described here: https://azure.microsoft.com/en-us/pricing/details/managed-disks/. + // Does not apply to Ultra disks. + Tier *string `json:"tier,omitempty"` + + // READ-ONLY; The size of the disk in bytes. This field is read only. + DiskSizeBytes *int64 `json:"diskSizeBytes,omitempty" azure:"ro"` + + // READ-ONLY; The state of the disk. + DiskState *DiskState `json:"diskState,omitempty" azure:"ro"` + + // READ-ONLY; Properties of the disk for which update is pending. + PropertyUpdatesInProgress *PropertyUpdatesInProgress `json:"propertyUpdatesInProgress,omitempty" azure:"ro"` + + // READ-ONLY; The disk provisioning state. + ProvisioningState *string `json:"provisioningState,omitempty" azure:"ro"` + + // READ-ONLY; Details of the list of all VMs that have the disk attached. maxShares should be set to a value greater than + // one for disks to allow attaching them to multiple VMs. + ShareInfo []*ShareInfoElement `json:"shareInfo,omitempty" azure:"ro"` + + // READ-ONLY; The time when the disk was created. + TimeCreated *time.Time `json:"timeCreated,omitempty" azure:"ro"` + + // READ-ONLY; Unique Guid identifying the resource. + UniqueID *string `json:"uniqueId,omitempty" azure:"ro"` +} + +// DiskPurchasePlan - Used for establishing the purchase context of any 3rd Party artifact through MarketPlace. +type DiskPurchasePlan struct { + // REQUIRED; The plan ID. + Name *string `json:"name,omitempty"` + + // REQUIRED; Specifies the product of the image from the marketplace. This is the same value as Offer under the imageReference + // element. + Product *string `json:"product,omitempty"` + + // REQUIRED; The publisher ID. + Publisher *string `json:"publisher,omitempty"` + + // The Offer Promotion Code. + PromotionCode *string `json:"promotionCode,omitempty"` +} + +// DiskRestorePoint - Properties of disk restore point +type DiskRestorePoint struct { + // Properties of an incremental disk restore point + Properties *DiskRestorePointProperties `json:"properties,omitempty"` + + // READ-ONLY; Resource Id + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; Resource name + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; Resource type + Type *string `json:"type,omitempty" azure:"ro"` +} + +// DiskRestorePointClientBeginGrantAccessOptions contains the optional parameters for the DiskRestorePointClient.BeginGrantAccess +// method. +type DiskRestorePointClientBeginGrantAccessOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// DiskRestorePointClientBeginRevokeAccessOptions contains the optional parameters for the DiskRestorePointClient.BeginRevokeAccess +// method. +type DiskRestorePointClientBeginRevokeAccessOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// DiskRestorePointClientGetOptions contains the optional parameters for the DiskRestorePointClient.Get method. +type DiskRestorePointClientGetOptions struct { + // placeholder for future optional parameters +} + +// DiskRestorePointClientListByRestorePointOptions contains the optional parameters for the DiskRestorePointClient.ListByRestorePoint +// method. +type DiskRestorePointClientListByRestorePointOptions struct { + // placeholder for future optional parameters +} + +// DiskRestorePointInstanceView - The instance view of a disk restore point. +type DiskRestorePointInstanceView struct { + // Disk restore point Id. + ID *string `json:"id,omitempty"` + + // The disk restore point replication status information. + ReplicationStatus *DiskRestorePointReplicationStatus `json:"replicationStatus,omitempty"` +} + +// DiskRestorePointList - The List Disk Restore Points operation response. +type DiskRestorePointList struct { + // REQUIRED; A list of disk restore points. + Value []*DiskRestorePoint `json:"value,omitempty"` + + // The uri to fetch the next page of disk restore points. Call ListNext() with this to fetch the next page of disk restore + // points. + NextLink *string `json:"nextLink,omitempty"` +} + +// DiskRestorePointProperties - Properties of an incremental disk restore point +type DiskRestorePointProperties struct { + // Percentage complete for the background copy of disk restore point when source resource is from a different region. + CompletionPercent *float32 `json:"completionPercent,omitempty"` + + // ARM id of the DiskAccess resource for using private endpoints on disks. + DiskAccessID *string `json:"diskAccessId,omitempty"` + + // The hypervisor generation of the Virtual Machine. Applicable to OS disks only. + HyperVGeneration *HyperVGeneration `json:"hyperVGeneration,omitempty"` + + // Policy for accessing the disk via network. + NetworkAccessPolicy *NetworkAccessPolicy `json:"networkAccessPolicy,omitempty"` + + // Policy for controlling export on the disk. + PublicNetworkAccess *PublicNetworkAccess `json:"publicNetworkAccess,omitempty"` + + // Purchase plan information for the the image from which the OS disk was created. + PurchasePlan *DiskPurchasePlan `json:"purchasePlan,omitempty"` + + // List of supported capabilities for the image from which the OS disk was created. + SupportedCapabilities *SupportedCapabilities `json:"supportedCapabilities,omitempty"` + + // Indicates the OS on a disk supports hibernation. + SupportsHibernation *bool `json:"supportsHibernation,omitempty"` + + // READ-ONLY; Encryption property can be used to encrypt data at rest with customer managed keys or platform managed keys. + Encryption *Encryption `json:"encryption,omitempty" azure:"ro"` + + // READ-ONLY; id of the backing snapshot's MIS family + FamilyID *string `json:"familyId,omitempty" azure:"ro"` + + // READ-ONLY; The Operating System type. + OSType *OperatingSystemTypes `json:"osType,omitempty" azure:"ro"` + + // READ-ONLY; Replication state of disk restore point when source resource is from a different region. + ReplicationState *string `json:"replicationState,omitempty" azure:"ro"` + + // READ-ONLY; arm id of source disk or source disk restore point. + SourceResourceID *string `json:"sourceResourceId,omitempty" azure:"ro"` + + // READ-ONLY; Location of source disk or source disk restore point when source resource is from a different region. + SourceResourceLocation *string `json:"sourceResourceLocation,omitempty" azure:"ro"` + + // READ-ONLY; unique incarnation id of the source disk + SourceUniqueID *string `json:"sourceUniqueId,omitempty" azure:"ro"` + + // READ-ONLY; The timestamp of restorePoint creation + TimeCreated *time.Time `json:"timeCreated,omitempty" azure:"ro"` +} + +// DiskRestorePointReplicationStatus - The instance view of a disk restore point. +type DiskRestorePointReplicationStatus struct { + // Replication completion percentage. + CompletionPercent *int32 `json:"completionPercent,omitempty"` + + // The resource status information. + Status *InstanceViewStatus `json:"status,omitempty"` +} + +// DiskSKU - The disks sku name. Can be StandardLRS, PremiumLRS, StandardSSDLRS, UltraSSDLRS, PremiumZRS, or StandardSSDZRS. +type DiskSKU struct { + // The sku name. + Name *DiskStorageAccountTypes `json:"name,omitempty"` + + // READ-ONLY; The sku tier. + Tier *string `json:"tier,omitempty" azure:"ro"` +} + +// DiskSecurityProfile - Contains the security related information for the resource. +type DiskSecurityProfile struct { + // ResourceId of the disk encryption set associated to Confidential VM supported disk encrypted with customer managed key + SecureVMDiskEncryptionSetID *string `json:"secureVMDiskEncryptionSetId,omitempty"` + + // Specifies the SecurityType of the VM. Applicable for OS disks only. + SecurityType *DiskSecurityTypes `json:"securityType,omitempty"` +} + +// DiskUpdate - Disk update resource. +type DiskUpdate struct { + // Disk resource update properties. + Properties *DiskUpdateProperties `json:"properties,omitempty"` + + // The disks sku name. Can be StandardLRS, PremiumLRS, StandardSSDLRS, UltraSSDLRS, PremiumZRS, or StandardSSDZRS. + SKU *DiskSKU `json:"sku,omitempty"` + + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` +} + +// DiskUpdateProperties - Disk resource update properties. +type DiskUpdateProperties struct { + // Set to true to enable bursting beyond the provisioned performance target of the disk. Bursting is disabled by default. + // Does not apply to Ultra disks. + BurstingEnabled *bool `json:"burstingEnabled,omitempty"` + + // Additional authentication requirements when exporting or uploading to a disk or snapshot. + DataAccessAuthMode *DataAccessAuthMode `json:"dataAccessAuthMode,omitempty"` + + // ARM id of the DiskAccess resource for using private endpoints on disks. + DiskAccessID *string `json:"diskAccessId,omitempty"` + + // The total number of IOPS that will be allowed across all VMs mounting the shared disk as ReadOnly. One operation can transfer + // between 4k and 256k bytes. + DiskIOPSReadOnly *int64 `json:"diskIOPSReadOnly,omitempty"` + + // The number of IOPS allowed for this disk; only settable for UltraSSD disks. One operation can transfer between 4k and 256k + // bytes. + DiskIOPSReadWrite *int64 `json:"diskIOPSReadWrite,omitempty"` + + // The total throughput (MBps) that will be allowed across all VMs mounting the shared disk as ReadOnly. MBps means millions + // of bytes per second - MB here uses the ISO notation, of powers of 10. + DiskMBpsReadOnly *int64 `json:"diskMBpsReadOnly,omitempty"` + + // The bandwidth allowed for this disk; only settable for UltraSSD disks. MBps means millions of bytes per second - MB here + // uses the ISO notation, of powers of 10. + DiskMBpsReadWrite *int64 `json:"diskMBpsReadWrite,omitempty"` + + // If creationData.createOption is Empty, this field is mandatory and it indicates the size of the disk to create. If this + // field is present for updates or creation with other options, it indicates a + // resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size. + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + + // Encryption property can be used to encrypt data at rest with customer managed keys or platform managed keys. + Encryption *Encryption `json:"encryption,omitempty"` + + // Encryption settings collection used be Azure Disk Encryption, can contain multiple encryption settings per disk or snapshot. + EncryptionSettingsCollection *EncryptionSettingsCollection `json:"encryptionSettingsCollection,omitempty"` + + // The maximum number of VMs that can attach to the disk at the same time. Value greater than one indicates a disk that can + // be mounted on multiple VMs at the same time. + MaxShares *int32 `json:"maxShares,omitempty"` + + // Policy for accessing the disk via network. + NetworkAccessPolicy *NetworkAccessPolicy `json:"networkAccessPolicy,omitempty"` + + // the Operating System type. + OSType *OperatingSystemTypes `json:"osType,omitempty"` + + // Policy for controlling export on the disk. + PublicNetworkAccess *PublicNetworkAccess `json:"publicNetworkAccess,omitempty"` + + // Purchase plan information to be added on the OS disk + PurchasePlan *DiskPurchasePlan `json:"purchasePlan,omitempty"` + + // List of supported capabilities to be added on the OS disk. + SupportedCapabilities *SupportedCapabilities `json:"supportedCapabilities,omitempty"` + + // Indicates the OS on a disk supports hibernation. + SupportsHibernation *bool `json:"supportsHibernation,omitempty"` + + // Performance tier of the disk (e.g, P4, S10) as described here: https://azure.microsoft.com/en-us/pricing/details/managed-disks/. + // Does not apply to Ultra disks. + Tier *string `json:"tier,omitempty"` + + // READ-ONLY; Properties of the disk for which update is pending. + PropertyUpdatesInProgress *PropertyUpdatesInProgress `json:"propertyUpdatesInProgress,omitempty" azure:"ro"` +} + +// DisksClientBeginCreateOrUpdateOptions contains the optional parameters for the DisksClient.BeginCreateOrUpdate method. +type DisksClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// DisksClientBeginDeleteOptions contains the optional parameters for the DisksClient.BeginDelete method. +type DisksClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// DisksClientBeginGrantAccessOptions contains the optional parameters for the DisksClient.BeginGrantAccess method. +type DisksClientBeginGrantAccessOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// DisksClientBeginRevokeAccessOptions contains the optional parameters for the DisksClient.BeginRevokeAccess method. +type DisksClientBeginRevokeAccessOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// DisksClientBeginUpdateOptions contains the optional parameters for the DisksClient.BeginUpdate method. +type DisksClientBeginUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// DisksClientGetOptions contains the optional parameters for the DisksClient.Get method. +type DisksClientGetOptions struct { + // placeholder for future optional parameters +} + +// DisksClientListByResourceGroupOptions contains the optional parameters for the DisksClient.ListByResourceGroup method. +type DisksClientListByResourceGroupOptions struct { + // placeholder for future optional parameters +} + +// DisksClientListOptions contains the optional parameters for the DisksClient.List method. +type DisksClientListOptions struct { + // placeholder for future optional parameters +} + +// Encryption at rest settings for disk or snapshot +type Encryption struct { + // ResourceId of the disk encryption set to use for enabling encryption at rest. + DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty"` + + // The type of key used to encrypt the data of the disk. + Type *EncryptionType `json:"type,omitempty"` +} + +// EncryptionImages - Optional. Allows users to provide customer managed keys for encrypting the OS and data disks in the +// gallery artifact. +type EncryptionImages struct { + // A list of encryption specifications for data disk images. + DataDiskImages []*DataDiskImageEncryption `json:"dataDiskImages,omitempty"` + + // Contains encryption settings for an OS disk image. + OSDiskImage *OSDiskImageEncryption `json:"osDiskImage,omitempty"` +} + +// EncryptionSetIdentity - The managed identity for the disk encryption set. It should be given permission on the key vault +// before it can be used to encrypt disks. +type EncryptionSetIdentity struct { + // The type of Managed Identity used by the DiskEncryptionSet. Only SystemAssigned is supported for new creations. Disk Encryption + // Sets can be updated with Identity type None during migration of + // subscription to a new Azure Active Directory tenant; it will cause the encrypted resources to lose access to the keys. + Type *DiskEncryptionSetIdentityType `json:"type,omitempty"` + + // READ-ONLY; The object id of the Managed Identity Resource. This will be sent to the RP from ARM via the x-ms-identity-principal-id + // header in the PUT request if the resource has a systemAssigned(implicit) + // identity + PrincipalID *string `json:"principalId,omitempty" azure:"ro"` + + // READ-ONLY; The tenant id of the Managed Identity Resource. This will be sent to the RP from ARM via the x-ms-client-tenant-id + // header in the PUT request if the resource has a systemAssigned(implicit) identity + TenantID *string `json:"tenantId,omitempty" azure:"ro"` +} + +type EncryptionSetProperties struct { + // The key vault key which is currently used by this disk encryption set. + ActiveKey *KeyForDiskEncryptionSet `json:"activeKey,omitempty"` + + // The type of key used to encrypt the data of the disk. + EncryptionType *DiskEncryptionSetType `json:"encryptionType,omitempty"` + + // Set this flag to true to enable auto-updating of this disk encryption set to the latest key version. + RotationToLatestKeyVersionEnabled *bool `json:"rotationToLatestKeyVersionEnabled,omitempty"` + + // READ-ONLY; The error that was encountered during auto-key rotation. If an error is present, then auto-key rotation will + // not be attempted until the error on this disk encryption set is fixed. + AutoKeyRotationError *APIError `json:"autoKeyRotationError,omitempty" azure:"ro"` + + // READ-ONLY; The time when the active key of this disk encryption set was updated. + LastKeyRotationTimestamp *time.Time `json:"lastKeyRotationTimestamp,omitempty" azure:"ro"` + + // READ-ONLY; A readonly collection of key vault keys previously used by this disk encryption set while a key rotation is + // in progress. It will be empty if there is no ongoing key rotation. + PreviousKeys []*KeyForDiskEncryptionSet `json:"previousKeys,omitempty" azure:"ro"` + + // READ-ONLY; The disk encryption set provisioning state. + ProvisioningState *string `json:"provisioningState,omitempty" azure:"ro"` +} + +// EncryptionSettingsCollection - Encryption settings for disk or snapshot +type EncryptionSettingsCollection struct { + // REQUIRED; Set this flag to true and provide DiskEncryptionKey and optional KeyEncryptionKey to enable encryption. Set this + // flag to false and remove DiskEncryptionKey and KeyEncryptionKey to disable encryption. + // If EncryptionSettings is null in the request object, the existing settings remain unchanged. + Enabled *bool `json:"enabled,omitempty"` + + // A collection of encryption settings, one for each disk volume. + EncryptionSettings []*EncryptionSettingsElement `json:"encryptionSettings,omitempty"` + + // Describes what type of encryption is used for the disks. Once this field is set, it cannot be overwritten. '1.0' corresponds + // to Azure Disk Encryption with AAD app.'1.1' corresponds to Azure Disk + // Encryption. + EncryptionSettingsVersion *string `json:"encryptionSettingsVersion,omitempty"` +} + +// EncryptionSettingsElement - Encryption settings for one disk volume. +type EncryptionSettingsElement struct { + // Key Vault Secret Url and vault id of the disk encryption key + DiskEncryptionKey *KeyVaultAndSecretReference `json:"diskEncryptionKey,omitempty"` + + // Key Vault Key Url and vault id of the key encryption key. KeyEncryptionKey is optional and when provided is used to unwrap + // the disk encryption key. + KeyEncryptionKey *KeyVaultAndKeyReference `json:"keyEncryptionKey,omitempty"` +} + +// ExtendedLocation - The complex type of the extended location. +type ExtendedLocation struct { + // The name of the extended location. + Name *string `json:"name,omitempty"` + + // The type of the extended location. + Type *ExtendedLocationTypes `json:"type,omitempty"` +} + +// Extension - Describes a cloud service Extension. +type Extension struct { + // The name of the extension. + Name *string `json:"name,omitempty"` + + // Extension Properties. + Properties *CloudServiceExtensionProperties `json:"properties,omitempty"` +} + +// GalleriesClientBeginCreateOrUpdateOptions contains the optional parameters for the GalleriesClient.BeginCreateOrUpdate +// method. +type GalleriesClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// GalleriesClientBeginDeleteOptions contains the optional parameters for the GalleriesClient.BeginDelete method. +type GalleriesClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// GalleriesClientBeginUpdateOptions contains the optional parameters for the GalleriesClient.BeginUpdate method. +type GalleriesClientBeginUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// GalleriesClientGetOptions contains the optional parameters for the GalleriesClient.Get method. +type GalleriesClientGetOptions struct { + // The expand query option to apply on the operation. + Expand *GalleryExpandParams + // The select expression to apply on the operation. + Select *SelectPermissions +} + +// GalleriesClientListByResourceGroupOptions contains the optional parameters for the GalleriesClient.ListByResourceGroup +// method. +type GalleriesClientListByResourceGroupOptions struct { + // placeholder for future optional parameters +} + +// GalleriesClientListOptions contains the optional parameters for the GalleriesClient.List method. +type GalleriesClientListOptions struct { + // placeholder for future optional parameters +} + +// Gallery - Specifies information about the Shared Image Gallery that you want to create or update. +type Gallery struct { + // REQUIRED; Resource location + Location *string `json:"location,omitempty"` + + // Describes the properties of a Shared Image Gallery. + Properties *GalleryProperties `json:"properties,omitempty"` + + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` + + // READ-ONLY; Resource Id + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; Resource name + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; Resource type + Type *string `json:"type,omitempty" azure:"ro"` +} + +// GalleryApplication - Specifies information about the gallery Application Definition that you want to create or update. +type GalleryApplication struct { + // REQUIRED; Resource location + Location *string `json:"location,omitempty"` + + // Describes the properties of a gallery Application Definition. + Properties *GalleryApplicationProperties `json:"properties,omitempty"` + + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` + + // READ-ONLY; Resource Id + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; Resource name + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; Resource type + Type *string `json:"type,omitempty" azure:"ro"` +} + +// GalleryApplicationList - The List Gallery Applications operation response. +type GalleryApplicationList struct { + // REQUIRED; A list of Gallery Applications. + Value []*GalleryApplication `json:"value,omitempty"` + + // The uri to fetch the next page of Application Definitions in the Application Gallery. Call ListNext() with this to fetch + // the next page of gallery Application Definitions. + NextLink *string `json:"nextLink,omitempty"` +} + +// GalleryApplicationProperties - Describes the properties of a gallery Application Definition. +type GalleryApplicationProperties struct { + // REQUIRED; This property allows you to specify the supported type of the OS that application is built for. + // Possible values are: + // Windows + // Linux + SupportedOSType *OperatingSystemTypes `json:"supportedOSType,omitempty"` + + // The description of this gallery Application Definition resource. This property is updatable. + Description *string `json:"description,omitempty"` + + // The end of life date of the gallery Application Definition. This property can be used for decommissioning purposes. This + // property is updatable. + EndOfLifeDate *time.Time `json:"endOfLifeDate,omitempty"` + + // The Eula agreement for the gallery Application Definition. + Eula *string `json:"eula,omitempty"` + + // The privacy statement uri. + PrivacyStatementURI *string `json:"privacyStatementUri,omitempty"` + + // The release note uri. + ReleaseNoteURI *string `json:"releaseNoteUri,omitempty"` +} + +// GalleryApplicationUpdate - Specifies information about the gallery Application Definition that you want to update. +type GalleryApplicationUpdate struct { + // Describes the properties of a gallery Application Definition. + Properties *GalleryApplicationProperties `json:"properties,omitempty"` + + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` + + // READ-ONLY; Resource Id + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; Resource name + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; Resource type + Type *string `json:"type,omitempty" azure:"ro"` +} + +// GalleryApplicationVersion - Specifies information about the gallery Application Version that you want to create or update. +type GalleryApplicationVersion struct { + // REQUIRED; Resource location + Location *string `json:"location,omitempty"` + + // Describes the properties of a gallery image version. + Properties *GalleryApplicationVersionProperties `json:"properties,omitempty"` + + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` + + // READ-ONLY; Resource Id + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; Resource name + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; Resource type + Type *string `json:"type,omitempty" azure:"ro"` +} + +// GalleryApplicationVersionList - The List Gallery Application version operation response. +type GalleryApplicationVersionList struct { + // REQUIRED; A list of gallery Application Versions. + Value []*GalleryApplicationVersion `json:"value,omitempty"` + + // The uri to fetch the next page of gallery Application Versions. Call ListNext() with this to fetch the next page of gallery + // Application Versions. + NextLink *string `json:"nextLink,omitempty"` +} + +// GalleryApplicationVersionProperties - Describes the properties of a gallery image version. +type GalleryApplicationVersionProperties struct { + // REQUIRED; The publishing profile of a gallery image version. + PublishingProfile *GalleryApplicationVersionPublishingProfile `json:"publishingProfile,omitempty"` + + // READ-ONLY; The provisioning state, which only appears in the response. + ProvisioningState *GalleryApplicationVersionPropertiesProvisioningState `json:"provisioningState,omitempty" azure:"ro"` + + // READ-ONLY; This is the replication status of the gallery image version. + ReplicationStatus *ReplicationStatus `json:"replicationStatus,omitempty" azure:"ro"` +} + +// GalleryApplicationVersionPublishingProfile - The publishing profile of a gallery image version. +type GalleryApplicationVersionPublishingProfile struct { + // REQUIRED; The source image from which the Image Version is going to be created. + Source *UserArtifactSource `json:"source,omitempty"` + + // Optional. Whether or not this application reports health. + EnableHealthCheck *bool `json:"enableHealthCheck,omitempty"` + + // The end of life date of the gallery image version. This property can be used for decommissioning purposes. This property + // is updatable. + EndOfLifeDate *time.Time `json:"endOfLifeDate,omitempty"` + + // If set to true, Virtual Machines deployed from the latest version of the Image Definition won't use this Image Version. + ExcludeFromLatest *bool `json:"excludeFromLatest,omitempty"` + ManageActions *UserArtifactManage `json:"manageActions,omitempty"` + + // The number of replicas of the Image Version to be created per region. This property would take effect for a region when + // regionalReplicaCount is not specified. This property is updatable. + ReplicaCount *int32 `json:"replicaCount,omitempty"` + + // Optional parameter which specifies the mode to be used for replication. This property is not updatable. + ReplicationMode *ReplicationMode `json:"replicationMode,omitempty"` + + // Specifies the storage account type to be used to store the image. This property is not updatable. + StorageAccountType *StorageAccountType `json:"storageAccountType,omitempty"` + + // The target extended locations where the Image Version is going to be replicated to. This property is updatable. + TargetExtendedLocations []*GalleryTargetExtendedLocation `json:"targetExtendedLocations,omitempty"` + + // The target regions where the Image Version is going to be replicated to. This property is updatable. + TargetRegions []*TargetRegion `json:"targetRegions,omitempty"` + + // READ-ONLY; The timestamp for when the gallery image version is published. + PublishedDate *time.Time `json:"publishedDate,omitempty" azure:"ro"` +} + +// GalleryApplicationVersionUpdate - Specifies information about the gallery Application Version that you want to update. +type GalleryApplicationVersionUpdate struct { + // Describes the properties of a gallery image version. + Properties *GalleryApplicationVersionProperties `json:"properties,omitempty"` + + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` + + // READ-ONLY; Resource Id + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; Resource name + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; Resource type + Type *string `json:"type,omitempty" azure:"ro"` +} + +// GalleryApplicationVersionsClientBeginCreateOrUpdateOptions contains the optional parameters for the GalleryApplicationVersionsClient.BeginCreateOrUpdate +// method. +type GalleryApplicationVersionsClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// GalleryApplicationVersionsClientBeginDeleteOptions contains the optional parameters for the GalleryApplicationVersionsClient.BeginDelete +// method. +type GalleryApplicationVersionsClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// GalleryApplicationVersionsClientBeginUpdateOptions contains the optional parameters for the GalleryApplicationVersionsClient.BeginUpdate +// method. +type GalleryApplicationVersionsClientBeginUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// GalleryApplicationVersionsClientGetOptions contains the optional parameters for the GalleryApplicationVersionsClient.Get +// method. +type GalleryApplicationVersionsClientGetOptions struct { + // The expand expression to apply on the operation. + Expand *ReplicationStatusTypes +} + +// GalleryApplicationVersionsClientListByGalleryApplicationOptions contains the optional parameters for the GalleryApplicationVersionsClient.ListByGalleryApplication +// method. +type GalleryApplicationVersionsClientListByGalleryApplicationOptions struct { + // placeholder for future optional parameters +} + +// GalleryApplicationsClientBeginCreateOrUpdateOptions contains the optional parameters for the GalleryApplicationsClient.BeginCreateOrUpdate +// method. +type GalleryApplicationsClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// GalleryApplicationsClientBeginDeleteOptions contains the optional parameters for the GalleryApplicationsClient.BeginDelete +// method. +type GalleryApplicationsClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// GalleryApplicationsClientBeginUpdateOptions contains the optional parameters for the GalleryApplicationsClient.BeginUpdate +// method. +type GalleryApplicationsClientBeginUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// GalleryApplicationsClientGetOptions contains the optional parameters for the GalleryApplicationsClient.Get method. +type GalleryApplicationsClientGetOptions struct { + // placeholder for future optional parameters +} + +// GalleryApplicationsClientListByGalleryOptions contains the optional parameters for the GalleryApplicationsClient.ListByGallery +// method. +type GalleryApplicationsClientListByGalleryOptions struct { + // placeholder for future optional parameters +} + +// GalleryArtifactPublishingProfileBase - Describes the basic gallery artifact publishing profile. +type GalleryArtifactPublishingProfileBase struct { + // The end of life date of the gallery image version. This property can be used for decommissioning purposes. This property + // is updatable. + EndOfLifeDate *time.Time `json:"endOfLifeDate,omitempty"` + + // If set to true, Virtual Machines deployed from the latest version of the Image Definition won't use this Image Version. + ExcludeFromLatest *bool `json:"excludeFromLatest,omitempty"` + + // The number of replicas of the Image Version to be created per region. This property would take effect for a region when + // regionalReplicaCount is not specified. This property is updatable. + ReplicaCount *int32 `json:"replicaCount,omitempty"` + + // Optional parameter which specifies the mode to be used for replication. This property is not updatable. + ReplicationMode *ReplicationMode `json:"replicationMode,omitempty"` + + // Specifies the storage account type to be used to store the image. This property is not updatable. + StorageAccountType *StorageAccountType `json:"storageAccountType,omitempty"` + + // The target extended locations where the Image Version is going to be replicated to. This property is updatable. + TargetExtendedLocations []*GalleryTargetExtendedLocation `json:"targetExtendedLocations,omitempty"` + + // The target regions where the Image Version is going to be replicated to. This property is updatable. + TargetRegions []*TargetRegion `json:"targetRegions,omitempty"` + + // READ-ONLY; The timestamp for when the gallery image version is published. + PublishedDate *time.Time `json:"publishedDate,omitempty" azure:"ro"` +} + +// GalleryArtifactSource - The source image from which the Image Version is going to be created. +type GalleryArtifactSource struct { + // REQUIRED; The managed artifact. + ManagedImage *ManagedArtifact `json:"managedImage,omitempty"` +} + +// GalleryArtifactVersionSource - The gallery artifact version source. +type GalleryArtifactVersionSource struct { + // The id of the gallery artifact version source. Can specify a disk uri, snapshot uri, user image or storage account resource. + ID *string `json:"id,omitempty"` + + // The uri of the gallery artifact version source. Currently used to specify vhd/blob source. + URI *string `json:"uri,omitempty"` +} + +// GalleryDataDiskImage - This is the data disk image. +type GalleryDataDiskImage struct { + // REQUIRED; This property specifies the logical unit number of the data disk. This value is used to identify data disks within + // the Virtual Machine and therefore must be unique for each data disk attached to the + // Virtual Machine. + Lun *int32 `json:"lun,omitempty"` + + // The host caching of the disk. Valid values are 'None', 'ReadOnly', and 'ReadWrite' + HostCaching *HostCaching `json:"hostCaching,omitempty"` + + // The gallery artifact version source. + Source *GalleryArtifactVersionSource `json:"source,omitempty"` + + // READ-ONLY; This property indicates the size of the VHD to be created. + SizeInGB *int32 `json:"sizeInGB,omitempty" azure:"ro"` +} + +// GalleryDiskImage - This is the disk image base class. +type GalleryDiskImage struct { + // The host caching of the disk. Valid values are 'None', 'ReadOnly', and 'ReadWrite' + HostCaching *HostCaching `json:"hostCaching,omitempty"` + + // The gallery artifact version source. + Source *GalleryArtifactVersionSource `json:"source,omitempty"` + + // READ-ONLY; This property indicates the size of the VHD to be created. + SizeInGB *int32 `json:"sizeInGB,omitempty" azure:"ro"` +} + +// GalleryExtendedLocation - The name of the extended location. +type GalleryExtendedLocation struct { + Name *string `json:"name,omitempty"` + + // It is type of the extended location. + Type *GalleryExtendedLocationType `json:"type,omitempty"` +} + +// GalleryIdentifier - Describes the gallery unique name. +type GalleryIdentifier struct { + // READ-ONLY; The unique name of the Shared Image Gallery. This name is generated automatically by Azure. + UniqueName *string `json:"uniqueName,omitempty" azure:"ro"` +} + +// GalleryImage - Specifies information about the gallery image definition that you want to create or update. +type GalleryImage struct { + // REQUIRED; Resource location + Location *string `json:"location,omitempty"` + + // Describes the properties of a gallery image definition. + Properties *GalleryImageProperties `json:"properties,omitempty"` + + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` + + // READ-ONLY; Resource Id + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; Resource name + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; Resource type + Type *string `json:"type,omitempty" azure:"ro"` +} + +// GalleryImageFeature - A feature for gallery image. +type GalleryImageFeature struct { + // The name of the gallery image feature. + Name *string `json:"name,omitempty"` + + // The value of the gallery image feature. + Value *string `json:"value,omitempty"` +} + +// GalleryImageIdentifier - This is the gallery image definition identifier. +type GalleryImageIdentifier struct { + // REQUIRED; The name of the gallery image definition offer. + Offer *string `json:"offer,omitempty"` + + // REQUIRED; The name of the gallery image definition publisher. + Publisher *string `json:"publisher,omitempty"` + + // REQUIRED; The name of the gallery image definition SKU. + SKU *string `json:"sku,omitempty"` +} + +// GalleryImageList - The List Gallery Images operation response. +type GalleryImageList struct { + // REQUIRED; A list of Shared Image Gallery images. + Value []*GalleryImage `json:"value,omitempty"` + + // The uri to fetch the next page of Image Definitions in the Shared Image Gallery. Call ListNext() with this to fetch the + // next page of gallery image definitions. + NextLink *string `json:"nextLink,omitempty"` +} + +// GalleryImageProperties - Describes the properties of a gallery image definition. +type GalleryImageProperties struct { + // REQUIRED; This is the gallery image definition identifier. + Identifier *GalleryImageIdentifier `json:"identifier,omitempty"` + + // REQUIRED; This property allows the user to specify whether the virtual machines created under this image are 'Generalized' + // or 'Specialized'. + OSState *OperatingSystemStateTypes `json:"osState,omitempty"` + + // REQUIRED; This property allows you to specify the type of the OS that is included in the disk when creating a VM from a + // managed image. + // Possible values are: + // Windows + // Linux + OSType *OperatingSystemTypes `json:"osType,omitempty"` + + // The architecture of the image. Applicable to OS disks only. + Architecture *Architecture `json:"architecture,omitempty"` + + // The description of this gallery image definition resource. This property is updatable. + Description *string `json:"description,omitempty"` + + // Describes the disallowed disk types. + Disallowed *Disallowed `json:"disallowed,omitempty"` + + // The end of life date of the gallery image definition. This property can be used for decommissioning purposes. This property + // is updatable. + EndOfLifeDate *time.Time `json:"endOfLifeDate,omitempty"` + + // The Eula agreement for the gallery image definition. + Eula *string `json:"eula,omitempty"` + + // A list of gallery image features. + Features []*GalleryImageFeature `json:"features,omitempty"` + + // The hypervisor generation of the Virtual Machine. Applicable to OS disks only. + HyperVGeneration *HyperVGeneration `json:"hyperVGeneration,omitempty"` + + // The privacy statement uri. + PrivacyStatementURI *string `json:"privacyStatementUri,omitempty"` + + // Describes the gallery image definition purchase plan. This is used by marketplace images. + PurchasePlan *ImagePurchasePlan `json:"purchasePlan,omitempty"` + + // The properties describe the recommended machine configuration for this Image Definition. These properties are updatable. + Recommended *RecommendedMachineConfiguration `json:"recommended,omitempty"` + + // The release note uri. + ReleaseNoteURI *string `json:"releaseNoteUri,omitempty"` + + // READ-ONLY; The provisioning state, which only appears in the response. + ProvisioningState *GalleryImagePropertiesProvisioningState `json:"provisioningState,omitempty" azure:"ro"` +} + +// GalleryImageUpdate - Specifies information about the gallery image definition that you want to update. +type GalleryImageUpdate struct { + // Describes the properties of a gallery image definition. + Properties *GalleryImageProperties `json:"properties,omitempty"` + + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` + + // READ-ONLY; Resource Id + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; Resource name + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; Resource type + Type *string `json:"type,omitempty" azure:"ro"` +} + +// GalleryImageVersion - Specifies information about the gallery image version that you want to create or update. +type GalleryImageVersion struct { + // REQUIRED; Resource location + Location *string `json:"location,omitempty"` + + // Describes the properties of a gallery image version. + Properties *GalleryImageVersionProperties `json:"properties,omitempty"` + + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` + + // READ-ONLY; Resource Id + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; Resource name + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; Resource type + Type *string `json:"type,omitempty" azure:"ro"` +} + +// GalleryImageVersionList - The List Gallery Image version operation response. +type GalleryImageVersionList struct { + // REQUIRED; A list of gallery image versions. + Value []*GalleryImageVersion `json:"value,omitempty"` + + // The uri to fetch the next page of gallery image versions. Call ListNext() with this to fetch the next page of gallery image + // versions. + NextLink *string `json:"nextLink,omitempty"` +} + +// GalleryImageVersionProperties - Describes the properties of a gallery image version. +type GalleryImageVersionProperties struct { + // REQUIRED; This is the storage profile of a Gallery Image Version. + StorageProfile *GalleryImageVersionStorageProfile `json:"storageProfile,omitempty"` + + // The publishing profile of a gallery image Version. + PublishingProfile *GalleryImageVersionPublishingProfile `json:"publishingProfile,omitempty"` + + // READ-ONLY; The provisioning state, which only appears in the response. + ProvisioningState *GalleryImageVersionPropertiesProvisioningState `json:"provisioningState,omitempty" azure:"ro"` + + // READ-ONLY; This is the replication status of the gallery image version. + ReplicationStatus *ReplicationStatus `json:"replicationStatus,omitempty" azure:"ro"` +} + +// GalleryImageVersionPublishingProfile - The publishing profile of a gallery image Version. +type GalleryImageVersionPublishingProfile struct { + // The end of life date of the gallery image version. This property can be used for decommissioning purposes. This property + // is updatable. + EndOfLifeDate *time.Time `json:"endOfLifeDate,omitempty"` + + // If set to true, Virtual Machines deployed from the latest version of the Image Definition won't use this Image Version. + ExcludeFromLatest *bool `json:"excludeFromLatest,omitempty"` + + // The number of replicas of the Image Version to be created per region. This property would take effect for a region when + // regionalReplicaCount is not specified. This property is updatable. + ReplicaCount *int32 `json:"replicaCount,omitempty"` + + // Optional parameter which specifies the mode to be used for replication. This property is not updatable. + ReplicationMode *ReplicationMode `json:"replicationMode,omitempty"` + + // Specifies the storage account type to be used to store the image. This property is not updatable. + StorageAccountType *StorageAccountType `json:"storageAccountType,omitempty"` + + // The target extended locations where the Image Version is going to be replicated to. This property is updatable. + TargetExtendedLocations []*GalleryTargetExtendedLocation `json:"targetExtendedLocations,omitempty"` + + // The target regions where the Image Version is going to be replicated to. This property is updatable. + TargetRegions []*TargetRegion `json:"targetRegions,omitempty"` + + // READ-ONLY; The timestamp for when the gallery image version is published. + PublishedDate *time.Time `json:"publishedDate,omitempty" azure:"ro"` +} + +// GalleryImageVersionStorageProfile - This is the storage profile of a Gallery Image Version. +type GalleryImageVersionStorageProfile struct { + // A list of data disk images. + DataDiskImages []*GalleryDataDiskImage `json:"dataDiskImages,omitempty"` + + // This is the OS disk image. + OSDiskImage *GalleryOSDiskImage `json:"osDiskImage,omitempty"` + + // The gallery artifact version source. + Source *GalleryArtifactVersionSource `json:"source,omitempty"` +} + +// GalleryImageVersionUpdate - Specifies information about the gallery image version that you want to update. +type GalleryImageVersionUpdate struct { + // Describes the properties of a gallery image version. + Properties *GalleryImageVersionProperties `json:"properties,omitempty"` + + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` + + // READ-ONLY; Resource Id + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; Resource name + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; Resource type + Type *string `json:"type,omitempty" azure:"ro"` +} + +// GalleryImageVersionsClientBeginCreateOrUpdateOptions contains the optional parameters for the GalleryImageVersionsClient.BeginCreateOrUpdate +// method. +type GalleryImageVersionsClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// GalleryImageVersionsClientBeginDeleteOptions contains the optional parameters for the GalleryImageVersionsClient.BeginDelete +// method. +type GalleryImageVersionsClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// GalleryImageVersionsClientBeginUpdateOptions contains the optional parameters for the GalleryImageVersionsClient.BeginUpdate +// method. +type GalleryImageVersionsClientBeginUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// GalleryImageVersionsClientGetOptions contains the optional parameters for the GalleryImageVersionsClient.Get method. +type GalleryImageVersionsClientGetOptions struct { + // The expand expression to apply on the operation. + Expand *ReplicationStatusTypes +} + +// GalleryImageVersionsClientListByGalleryImageOptions contains the optional parameters for the GalleryImageVersionsClient.ListByGalleryImage +// method. +type GalleryImageVersionsClientListByGalleryImageOptions struct { + // placeholder for future optional parameters +} + +// GalleryImagesClientBeginCreateOrUpdateOptions contains the optional parameters for the GalleryImagesClient.BeginCreateOrUpdate +// method. +type GalleryImagesClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// GalleryImagesClientBeginDeleteOptions contains the optional parameters for the GalleryImagesClient.BeginDelete method. +type GalleryImagesClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// GalleryImagesClientBeginUpdateOptions contains the optional parameters for the GalleryImagesClient.BeginUpdate method. +type GalleryImagesClientBeginUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// GalleryImagesClientGetOptions contains the optional parameters for the GalleryImagesClient.Get method. +type GalleryImagesClientGetOptions struct { + // placeholder for future optional parameters +} + +// GalleryImagesClientListByGalleryOptions contains the optional parameters for the GalleryImagesClient.ListByGallery method. +type GalleryImagesClientListByGalleryOptions struct { + // placeholder for future optional parameters +} + +// GalleryList - The List Galleries operation response. +type GalleryList struct { + // REQUIRED; A list of galleries. + Value []*Gallery `json:"value,omitempty"` + + // The uri to fetch the next page of galleries. Call ListNext() with this to fetch the next page of galleries. + NextLink *string `json:"nextLink,omitempty"` +} + +// GalleryOSDiskImage - This is the OS disk image. +type GalleryOSDiskImage struct { + // The host caching of the disk. Valid values are 'None', 'ReadOnly', and 'ReadWrite' + HostCaching *HostCaching `json:"hostCaching,omitempty"` + + // The gallery artifact version source. + Source *GalleryArtifactVersionSource `json:"source,omitempty"` + + // READ-ONLY; This property indicates the size of the VHD to be created. + SizeInGB *int32 `json:"sizeInGB,omitempty" azure:"ro"` +} + +// GalleryProperties - Describes the properties of a Shared Image Gallery. +type GalleryProperties struct { + // The description of this Shared Image Gallery resource. This property is updatable. + Description *string `json:"description,omitempty"` + + // Describes the gallery unique name. + Identifier *GalleryIdentifier `json:"identifier,omitempty"` + + // Profile for gallery sharing to subscription or tenant + SharingProfile *SharingProfile `json:"sharingProfile,omitempty"` + + // Contains information about the soft deletion policy of the gallery. + SoftDeletePolicy *SoftDeletePolicy `json:"softDeletePolicy,omitempty"` + + // READ-ONLY; The provisioning state, which only appears in the response. + ProvisioningState *GalleryPropertiesProvisioningState `json:"provisioningState,omitempty" azure:"ro"` + + // READ-ONLY; Sharing status of current gallery. + SharingStatus *SharingStatus `json:"sharingStatus,omitempty" azure:"ro"` +} + +// GallerySharingProfileClientBeginUpdateOptions contains the optional parameters for the GallerySharingProfileClient.BeginUpdate +// method. +type GallerySharingProfileClientBeginUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +type GalleryTargetExtendedLocation struct { + // Optional. Allows users to provide customer managed keys for encrypting the OS and data disks in the gallery artifact. + Encryption *EncryptionImages `json:"encryption,omitempty"` + + // The name of the extended location. + ExtendedLocation *GalleryExtendedLocation `json:"extendedLocation,omitempty"` + + // The number of replicas of the Image Version to be created per extended location. This property is updatable. + ExtendedLocationReplicaCount *int32 `json:"extendedLocationReplicaCount,omitempty"` + + // The name of the region. + Name *string `json:"name,omitempty"` + + // Specifies the storage account type to be used to store the image. This property is not updatable. + StorageAccountType *StorageAccountType `json:"storageAccountType,omitempty"` +} + +// GalleryUpdate - Specifies information about the Shared Image Gallery that you want to update. +type GalleryUpdate struct { + // Describes the properties of a Shared Image Gallery. + Properties *GalleryProperties `json:"properties,omitempty"` + + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` + + // READ-ONLY; Resource Id + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; Resource name + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; Resource type + Type *string `json:"type,omitempty" azure:"ro"` +} + +// GrantAccessData - Data used for requesting a SAS. +type GrantAccessData struct { + // REQUIRED + Access *AccessLevel `json:"access,omitempty"` + + // REQUIRED; Time duration in seconds until the SAS access expires. + DurationInSeconds *int32 `json:"durationInSeconds,omitempty"` + + // Set this flag to true to get additional SAS for VM guest state + GetSecureVMGuestStateSAS *bool `json:"getSecureVMGuestStateSAS,omitempty"` +} + +// HardwareProfile - Specifies the hardware settings for the virtual machine. +type HardwareProfile struct { + // Specifies the size of the virtual machine. + // The enum data type is currently deprecated and will be removed by December 23rd 2023. + // Recommended way to get the list of available sizes is using these APIs: + // List all available virtual machine sizes in an availability set [https://docs.microsoft.com/rest/api/compute/availabilitysets/listavailablesizes] + // List all available virtual machine sizes in a region [https://docs.microsoft.com/rest/api/compute/resourceskus/list] + // List all available virtual machine sizes for resizing [https://docs.microsoft.com/rest/api/compute/virtualmachines/listavailablesizes]. + // For more information about virtual machine sizes, see Sizes for + // virtual machines [https://docs.microsoft.com/azure/virtual-machines/sizes]. + // The available VM sizes depend on region and availability set. + VMSize *VirtualMachineSizeTypes `json:"vmSize,omitempty"` + + // Specifies the properties for customizing the size of the virtual machine. Minimum api-version: 2021-07-01. + // This feature is still in preview mode and is not supported for VirtualMachineScaleSet. + // Please follow the instructions in VM Customization [https://aka.ms/vmcustomization] for more details. + VMSizeProperties *VMSizeProperties `json:"vmSizeProperties,omitempty"` +} + +// Image - The source user image virtual hard disk. The virtual hard disk will be copied before being attached to the virtual +// machine. If SourceImage is provided, the destination virtual hard drive must not +// exist. +type Image struct { + // REQUIRED; Resource location + Location *string `json:"location,omitempty"` + + // The extended location of the Image. + ExtendedLocation *ExtendedLocation `json:"extendedLocation,omitempty"` + + // Describes the properties of an Image. + Properties *ImageProperties `json:"properties,omitempty"` + + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` + + // READ-ONLY; Resource Id + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; Resource name + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; Resource type + Type *string `json:"type,omitempty" azure:"ro"` +} + +// ImageDataDisk - Describes a data disk. +type ImageDataDisk struct { + // REQUIRED; Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and + // therefore must be unique for each data disk attached to a VM. + Lun *int32 `json:"lun,omitempty"` + + // The Virtual Hard Disk. + BlobURI *string `json:"blobUri,omitempty"` + + // Specifies the caching requirements. + // Possible values are: + // None + // ReadOnly + // ReadWrite + // Default: None for Standard storage. ReadOnly for Premium storage + Caching *CachingTypes `json:"caching,omitempty"` + + // Specifies the customer managed disk encryption set resource id for the managed image disk. + DiskEncryptionSet *DiskEncryptionSetParameters `json:"diskEncryptionSet,omitempty"` + + // Specifies the size of empty data disks in gigabytes. This element can be used to overwrite the name of the disk in a virtual + // machine image. + // This value cannot be larger than 1023 GB + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + + // The managedDisk. + ManagedDisk *SubResource `json:"managedDisk,omitempty"` + + // The snapshot. + Snapshot *SubResource `json:"snapshot,omitempty"` + + // Specifies the storage account type for the managed disk. NOTE: UltraSSD_LRS can only be used with data disks, it cannot + // be used with OS Disk. + StorageAccountType *StorageAccountTypes `json:"storageAccountType,omitempty"` +} + +// ImageDisk - Describes a image disk. +type ImageDisk struct { + // The Virtual Hard Disk. + BlobURI *string `json:"blobUri,omitempty"` + + // Specifies the caching requirements. + // Possible values are: + // None + // ReadOnly + // ReadWrite + // Default: None for Standard storage. ReadOnly for Premium storage + Caching *CachingTypes `json:"caching,omitempty"` + + // Specifies the customer managed disk encryption set resource id for the managed image disk. + DiskEncryptionSet *DiskEncryptionSetParameters `json:"diskEncryptionSet,omitempty"` + + // Specifies the size of empty data disks in gigabytes. This element can be used to overwrite the name of the disk in a virtual + // machine image. + // This value cannot be larger than 1023 GB + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + + // The managedDisk. + ManagedDisk *SubResource `json:"managedDisk,omitempty"` + + // The snapshot. + Snapshot *SubResource `json:"snapshot,omitempty"` + + // Specifies the storage account type for the managed disk. NOTE: UltraSSD_LRS can only be used with data disks, it cannot + // be used with OS Disk. + StorageAccountType *StorageAccountTypes `json:"storageAccountType,omitempty"` +} + +// ImageDiskReference - The source image used for creating the disk. +type ImageDiskReference struct { + // REQUIRED; A relative uri containing either a Platform Image Repository or user image reference. + ID *string `json:"id,omitempty"` + + // If the disk is created from an image's data disk, this is an index that indicates which of the data disks in the image + // to use. For OS disks, this field is null. + Lun *int32 `json:"lun,omitempty"` +} + +// ImageListResult - The List Image operation response. +type ImageListResult struct { + // REQUIRED; The list of Images. + Value []*Image `json:"value,omitempty"` + + // The uri to fetch the next page of Images. Call ListNext() with this to fetch the next page of Images. + NextLink *string `json:"nextLink,omitempty"` +} + +// ImageOSDisk - Describes an Operating System disk. +type ImageOSDisk struct { + // REQUIRED; The OS State. + OSState *OperatingSystemStateTypes `json:"osState,omitempty"` + + // REQUIRED; This property allows you to specify the type of the OS that is included in the disk if creating a VM from a custom + // image. + // Possible values are: + // Windows + // Linux + OSType *OperatingSystemTypes `json:"osType,omitempty"` + + // The Virtual Hard Disk. + BlobURI *string `json:"blobUri,omitempty"` + + // Specifies the caching requirements. + // Possible values are: + // None + // ReadOnly + // ReadWrite + // Default: None for Standard storage. ReadOnly for Premium storage + Caching *CachingTypes `json:"caching,omitempty"` + + // Specifies the customer managed disk encryption set resource id for the managed image disk. + DiskEncryptionSet *DiskEncryptionSetParameters `json:"diskEncryptionSet,omitempty"` + + // Specifies the size of empty data disks in gigabytes. This element can be used to overwrite the name of the disk in a virtual + // machine image. + // This value cannot be larger than 1023 GB + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + + // The managedDisk. + ManagedDisk *SubResource `json:"managedDisk,omitempty"` + + // The snapshot. + Snapshot *SubResource `json:"snapshot,omitempty"` + + // Specifies the storage account type for the managed disk. NOTE: UltraSSD_LRS can only be used with data disks, it cannot + // be used with OS Disk. + StorageAccountType *StorageAccountTypes `json:"storageAccountType,omitempty"` +} + +// ImageProperties - Describes the properties of an Image. +type ImageProperties struct { + // Specifies the HyperVGenerationType of the VirtualMachine created from the image. From API Version 2019-03-01 if the image + // source is a blob, then we need the user to specify the value, if the source is + // managed resource like disk or snapshot, we may require the user to specify the property if we cannot deduce it from the + // source managed resource. + HyperVGeneration *HyperVGenerationTypes `json:"hyperVGeneration,omitempty"` + + // The source virtual machine from which Image is created. + SourceVirtualMachine *SubResource `json:"sourceVirtualMachine,omitempty"` + + // Specifies the storage settings for the virtual machine disks. + StorageProfile *ImageStorageProfile `json:"storageProfile,omitempty"` + + // READ-ONLY; The provisioning state. + ProvisioningState *string `json:"provisioningState,omitempty" azure:"ro"` +} + +// ImagePurchasePlan - Describes the gallery image definition purchase plan. This is used by marketplace images. +type ImagePurchasePlan struct { + // The plan ID. + Name *string `json:"name,omitempty"` + + // The product ID. + Product *string `json:"product,omitempty"` + + // The publisher ID. + Publisher *string `json:"publisher,omitempty"` +} + +// ImageReference - Specifies information about the image to use. You can specify information about platform images, marketplace +// images, or virtual machine images. This element is required when you want to use a platform +// image, marketplace image, or virtual machine image, but is not used in other creation operations. NOTE: Image reference +// publisher and offer can only be set when you create the scale set. +type ImageReference struct { + // Specified the community gallery image unique id for vm deployment. This can be fetched from community gallery image GET + // call. + CommunityGalleryImageID *string `json:"communityGalleryImageId,omitempty"` + + // Resource Id + ID *string `json:"id,omitempty"` + + // Specifies the offer of the platform image or marketplace image used to create the virtual machine. + Offer *string `json:"offer,omitempty"` + + // The image publisher. + Publisher *string `json:"publisher,omitempty"` + + // The image SKU. + SKU *string `json:"sku,omitempty"` + + // Specified the shared gallery image unique id for vm deployment. This can be fetched from shared gallery image GET call. + SharedGalleryImageID *string `json:"sharedGalleryImageId,omitempty"` + + // Specifies the version of the platform image or marketplace image used to create the virtual machine. The allowed formats + // are Major.Minor.Build or 'latest'. Major, Minor, and Build are decimal numbers. + // Specify 'latest' to use the latest version of an image available at deploy time. Even if you use 'latest', the VM image + // will not automatically update after deploy time even if a new version becomes + // available. Please do not use field 'version' for gallery image deployment, gallery image should always use 'id' field for + // deployment, to use 'latest' version of gallery image, just set + // '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{imageName}' + // in the 'id' field without version input. + Version *string `json:"version,omitempty"` + + // READ-ONLY; Specifies in decimal numbers, the version of platform image or marketplace image used to create the virtual + // machine. This readonly field differs from 'version', only if the value specified in + // 'version' field is 'latest'. + ExactVersion *string `json:"exactVersion,omitempty" azure:"ro"` +} + +// ImageStorageProfile - Describes a storage profile. +type ImageStorageProfile struct { + // Specifies the parameters that are used to add a data disk to a virtual machine. + // For more information about disks, see About disks and VHDs for Azure virtual machines [https://docs.microsoft.com/azure/virtual-machines/managed-disks-overview]. + DataDisks []*ImageDataDisk `json:"dataDisks,omitempty"` + + // Specifies information about the operating system disk used by the virtual machine. + // For more information about disks, see About disks and VHDs for Azure virtual machines [https://docs.microsoft.com/azure/virtual-machines/managed-disks-overview]. + OSDisk *ImageOSDisk `json:"osDisk,omitempty"` + + // Specifies whether an image is zone resilient or not. Default is false. Zone resilient images can be created only in regions + // that provide Zone Redundant Storage (ZRS). + ZoneResilient *bool `json:"zoneResilient,omitempty"` +} + +// ImageUpdate - The source user image virtual hard disk. Only tags may be updated. +type ImageUpdate struct { + // Describes the properties of an Image. + Properties *ImageProperties `json:"properties,omitempty"` + + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` +} + +// ImagesClientBeginCreateOrUpdateOptions contains the optional parameters for the ImagesClient.BeginCreateOrUpdate method. +type ImagesClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// ImagesClientBeginDeleteOptions contains the optional parameters for the ImagesClient.BeginDelete method. +type ImagesClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// ImagesClientBeginUpdateOptions contains the optional parameters for the ImagesClient.BeginUpdate method. +type ImagesClientBeginUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// ImagesClientGetOptions contains the optional parameters for the ImagesClient.Get method. +type ImagesClientGetOptions struct { + // The expand expression to apply on the operation. + Expand *string +} + +// ImagesClientListByResourceGroupOptions contains the optional parameters for the ImagesClient.ListByResourceGroup method. +type ImagesClientListByResourceGroupOptions struct { + // placeholder for future optional parameters +} + +// ImagesClientListOptions contains the optional parameters for the ImagesClient.List method. +type ImagesClientListOptions struct { + // placeholder for future optional parameters +} + +// InnerError - Inner error details. +type InnerError struct { + // The internal error message or exception dump. + Errordetail *string `json:"errordetail,omitempty"` + + // The exception type. + Exceptiontype *string `json:"exceptiontype,omitempty"` +} + +type InstanceSKU struct { + // READ-ONLY; The sku name. + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; The tier of the cloud service role instance. + Tier *string `json:"tier,omitempty" azure:"ro"` +} + +// InstanceViewStatus - Instance view status. +type InstanceViewStatus struct { + // The status code. + Code *string `json:"code,omitempty"` + + // The short localizable label for the status. + DisplayStatus *string `json:"displayStatus,omitempty"` + + // The level code. + Level *StatusLevelTypes `json:"level,omitempty"` + + // The detailed status message, including for alerts and error messages. + Message *string `json:"message,omitempty"` + + // The time of the status. + Time *time.Time `json:"time,omitempty"` +} + +// InstanceViewStatusesSummary - Instance view statuses. +type InstanceViewStatusesSummary struct { + // READ-ONLY + StatusesSummary []*StatusCodeCount `json:"statusesSummary,omitempty" azure:"ro"` +} + +// KeyForDiskEncryptionSet - Key Vault Key Url to be used for server side encryption of Managed Disks and Snapshots +type KeyForDiskEncryptionSet struct { + // REQUIRED; Fully versioned Key Url pointing to a key in KeyVault. Version segment of the Url is required regardless of rotationToLatestKeyVersionEnabled + // value. + KeyURL *string `json:"keyUrl,omitempty"` + + // Resource id of the KeyVault containing the key or secret. This property is optional and cannot be used if the KeyVault + // subscription is not the same as the Disk Encryption Set subscription. + SourceVault *SourceVault `json:"sourceVault,omitempty"` +} + +// KeyVaultAndKeyReference - Key Vault Key Url and vault id of KeK, KeK is optional and when provided is used to unwrap the +// encryptionKey +type KeyVaultAndKeyReference struct { + // REQUIRED; Url pointing to a key or secret in KeyVault + KeyURL *string `json:"keyUrl,omitempty"` + + // REQUIRED; Resource id of the KeyVault containing the key or secret + SourceVault *SourceVault `json:"sourceVault,omitempty"` +} + +// KeyVaultAndSecretReference - Key Vault Secret Url and vault id of the encryption key +type KeyVaultAndSecretReference struct { + // REQUIRED; Url pointing to a key or secret in KeyVault + SecretURL *string `json:"secretUrl,omitempty"` + + // REQUIRED; Resource id of the KeyVault containing the key or secret + SourceVault *SourceVault `json:"sourceVault,omitempty"` +} + +// KeyVaultKeyReference - Describes a reference to Key Vault Key +type KeyVaultKeyReference struct { + // REQUIRED; The URL referencing a key encryption key in Key Vault. + KeyURL *string `json:"keyUrl,omitempty"` + + // REQUIRED; The relative URL of the Key Vault containing the key. + SourceVault *SubResource `json:"sourceVault,omitempty"` +} + +// KeyVaultSecretReference - Describes a reference to Key Vault Secret +type KeyVaultSecretReference struct { + // REQUIRED; The URL referencing a secret in a Key Vault. + SecretURL *string `json:"secretUrl,omitempty"` + + // REQUIRED; The relative URL of the Key Vault containing the secret. + SourceVault *SubResource `json:"sourceVault,omitempty"` +} + +// LastPatchInstallationSummary - Describes the properties of the last installed patch summary. +type LastPatchInstallationSummary struct { + // READ-ONLY; The errors that were encountered during execution of the operation. The details array contains the list of them. + Error *APIError `json:"error,omitempty" azure:"ro"` + + // READ-ONLY; The number of all available patches but excluded explicitly by a customer-specified exclusion list match. + ExcludedPatchCount *int32 `json:"excludedPatchCount,omitempty" azure:"ro"` + + // READ-ONLY; The count of patches that failed installation. + FailedPatchCount *int32 `json:"failedPatchCount,omitempty" azure:"ro"` + + // READ-ONLY; The activity ID of the operation that produced this result. It is used to correlate across CRP and extension + // logs. + InstallationActivityID *string `json:"installationActivityId,omitempty" azure:"ro"` + + // READ-ONLY; The count of patches that successfully installed. + InstalledPatchCount *int32 `json:"installedPatchCount,omitempty" azure:"ro"` + + // READ-ONLY; The UTC timestamp when the operation began. + LastModifiedTime *time.Time `json:"lastModifiedTime,omitempty" azure:"ro"` + + // READ-ONLY; Describes whether the operation ran out of time before it completed all its intended actions + MaintenanceWindowExceeded *bool `json:"maintenanceWindowExceeded,omitempty" azure:"ro"` + + // READ-ONLY; The number of all available patches but not going to be installed because it didn't match a classification or + // inclusion list entry. + NotSelectedPatchCount *int32 `json:"notSelectedPatchCount,omitempty" azure:"ro"` + + // READ-ONLY; The number of all available patches expected to be installed over the course of the patch installation operation. + PendingPatchCount *int32 `json:"pendingPatchCount,omitempty" azure:"ro"` + + // READ-ONLY; The UTC timestamp when the operation began. + StartTime *time.Time `json:"startTime,omitempty" azure:"ro"` + + // READ-ONLY; The overall success or failure status of the operation. It remains "InProgress" until the operation completes. + // At that point it will become "Unknown", "Failed", "Succeeded", or + // "CompletedWithWarnings." + Status *PatchOperationStatus `json:"status,omitempty" azure:"ro"` +} + +// LinuxConfiguration - Specifies the Linux operating system settings on the virtual machine. +// For a list of supported Linux distributions, see Linux on Azure-Endorsed Distributions [https://docs.microsoft.com/azure/virtual-machines/linux/endorsed-distros]. +type LinuxConfiguration struct { + // Specifies whether password authentication should be disabled. + DisablePasswordAuthentication *bool `json:"disablePasswordAuthentication,omitempty"` + + // [Preview Feature] Specifies settings related to VM Guest Patching on Linux. + PatchSettings *LinuxPatchSettings `json:"patchSettings,omitempty"` + + // Indicates whether virtual machine agent should be provisioned on the virtual machine. + // When this property is not specified in the request body, default behavior is to set it to true. This will ensure that VM + // Agent is installed on the VM so that extensions can be added to the VM later. + ProvisionVMAgent *bool `json:"provisionVMAgent,omitempty"` + + // Specifies the ssh key configuration for a Linux OS. + SSH *SSHConfiguration `json:"ssh,omitempty"` +} + +// LinuxParameters - Input for InstallPatches on a Linux VM, as directly received by the API +type LinuxParameters struct { + // The update classifications to select when installing patches for Linux. + ClassificationsToInclude []*VMGuestPatchClassificationLinux `json:"classificationsToInclude,omitempty"` + + // This is used as a maintenance run identifier for Auto VM Guest Patching in Linux. + MaintenanceRunID *string `json:"maintenanceRunId,omitempty"` + + // packages to exclude in the patch operation. Format: packageName_packageVersion + PackageNameMasksToExclude []*string `json:"packageNameMasksToExclude,omitempty"` + + // packages to include in the patch operation. Format: packageName_packageVersion + PackageNameMasksToInclude []*string `json:"packageNameMasksToInclude,omitempty"` +} + +// LinuxPatchSettings - Specifies settings related to VM Guest Patching on Linux. +type LinuxPatchSettings struct { + // Specifies the mode of VM Guest Patch Assessment for the IaaS virtual machine. + // Possible values are: + // ImageDefault - You control the timing of patch assessments on a virtual machine. + // AutomaticByPlatform - The platform will trigger periodic patch assessments. The property provisionVMAgent must be true. + AssessmentMode *LinuxPatchAssessmentMode `json:"assessmentMode,omitempty"` + + // Specifies additional settings for patch mode AutomaticByPlatform in VM Guest Patching on Linux. + AutomaticByPlatformSettings *LinuxVMGuestPatchAutomaticByPlatformSettings `json:"automaticByPlatformSettings,omitempty"` + + // Specifies the mode of VM Guest Patching to IaaS virtual machine or virtual machines associated to virtual machine scale + // set with OrchestrationMode as Flexible. + // Possible values are: + // ImageDefault - The virtual machine's default patching configuration is used. + // AutomaticByPlatform - The virtual machine will be automatically updated by the platform. The property provisionVMAgent + // must be true + PatchMode *LinuxVMGuestPatchMode `json:"patchMode,omitempty"` +} + +// LinuxVMGuestPatchAutomaticByPlatformSettings - Specifies additional settings to be applied when patch mode AutomaticByPlatform +// is selected in Linux patch settings. +type LinuxVMGuestPatchAutomaticByPlatformSettings struct { + // Specifies the reboot setting for all AutomaticByPlatform patch installation operations. + RebootSetting *LinuxVMGuestPatchAutomaticByPlatformRebootSetting `json:"rebootSetting,omitempty"` +} + +// ListUsagesResult - The List Usages operation response. +type ListUsagesResult struct { + // REQUIRED; The list of compute resource usages. + Value []*Usage `json:"value,omitempty"` + + // The URI to fetch the next page of compute resource usage information. Call ListNext() with this to fetch the next page + // of compute resource usage information. + NextLink *string `json:"nextLink,omitempty"` +} + +// LoadBalancerConfiguration - Describes the load balancer configuration. +type LoadBalancerConfiguration struct { + // REQUIRED; The name of the Load balancer + Name *string `json:"name,omitempty"` + + // REQUIRED; Properties of the load balancer configuration. + Properties *LoadBalancerConfigurationProperties `json:"properties,omitempty"` + + // Resource Id + ID *string `json:"id,omitempty"` +} + +type LoadBalancerConfigurationProperties struct { + // REQUIRED; Specifies the frontend IP to be used for the load balancer. Only IPv4 frontend IP address is supported. Each + // load balancer configuration must have exactly one frontend IP configuration. + FrontendIPConfigurations []*LoadBalancerFrontendIPConfiguration `json:"frontendIPConfigurations,omitempty"` +} + +type LoadBalancerFrontendIPConfiguration struct { + // REQUIRED; The name of the resource that is unique within the set of frontend IP configurations used by the load balancer. + // This name can be used to access the resource. + Name *string `json:"name,omitempty"` + + // REQUIRED; Properties of load balancer frontend ip configuration. + Properties *LoadBalancerFrontendIPConfigurationProperties `json:"properties,omitempty"` +} + +// LoadBalancerFrontendIPConfigurationProperties - Describes a cloud service IP Configuration +type LoadBalancerFrontendIPConfigurationProperties struct { + // The virtual network private IP address of the IP configuration. + PrivateIPAddress *string `json:"privateIPAddress,omitempty"` + + // The reference to the public ip address resource. + PublicIPAddress *SubResource `json:"publicIPAddress,omitempty"` + + // The reference to the virtual network subnet resource. + Subnet *SubResource `json:"subnet,omitempty"` +} + +// LogAnalyticsClientBeginExportRequestRateByIntervalOptions contains the optional parameters for the LogAnalyticsClient.BeginExportRequestRateByInterval +// method. +type LogAnalyticsClientBeginExportRequestRateByIntervalOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// LogAnalyticsClientBeginExportThrottledRequestsOptions contains the optional parameters for the LogAnalyticsClient.BeginExportThrottledRequests +// method. +type LogAnalyticsClientBeginExportThrottledRequestsOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// LogAnalyticsInputBase - Api input base class for LogAnalytics Api. +type LogAnalyticsInputBase struct { + // REQUIRED; SAS Uri of the logging blob container to which LogAnalytics Api writes output logs to. + BlobContainerSasURI *string `json:"blobContainerSasUri,omitempty"` + + // REQUIRED; From time of the query + FromTime *time.Time `json:"fromTime,omitempty"` + + // REQUIRED; To time of the query + ToTime *time.Time `json:"toTime,omitempty"` + + // Group query result by Client Application ID. + GroupByClientApplicationID *bool `json:"groupByClientApplicationId,omitempty"` + + // Group query result by Operation Name. + GroupByOperationName *bool `json:"groupByOperationName,omitempty"` + + // Group query result by Resource Name. + GroupByResourceName *bool `json:"groupByResourceName,omitempty"` + + // Group query result by Throttle Policy applied. + GroupByThrottlePolicy *bool `json:"groupByThrottlePolicy,omitempty"` + + // Group query result by User Agent. + GroupByUserAgent *bool `json:"groupByUserAgent,omitempty"` +} + +// LogAnalyticsOperationResult - LogAnalytics operation status response +type LogAnalyticsOperationResult struct { + // READ-ONLY; LogAnalyticsOutput + Properties *LogAnalyticsOutput `json:"properties,omitempty" azure:"ro"` +} + +// LogAnalyticsOutput - LogAnalytics output properties +type LogAnalyticsOutput struct { + // READ-ONLY; Output file Uri path to blob container. + Output *string `json:"output,omitempty" azure:"ro"` +} + +// MaintenanceRedeployStatus - Maintenance Operation Status. +type MaintenanceRedeployStatus struct { + // True, if customer is allowed to perform Maintenance. + IsCustomerInitiatedMaintenanceAllowed *bool `json:"isCustomerInitiatedMaintenanceAllowed,omitempty"` + + // Message returned for the last Maintenance Operation. + LastOperationMessage *string `json:"lastOperationMessage,omitempty"` + + // The Last Maintenance Operation Result Code. + LastOperationResultCode *MaintenanceOperationResultCodeTypes `json:"lastOperationResultCode,omitempty"` + + // End Time for the Maintenance Window. + MaintenanceWindowEndTime *time.Time `json:"maintenanceWindowEndTime,omitempty"` + + // Start Time for the Maintenance Window. + MaintenanceWindowStartTime *time.Time `json:"maintenanceWindowStartTime,omitempty"` + + // End Time for the Pre Maintenance Window. + PreMaintenanceWindowEndTime *time.Time `json:"preMaintenanceWindowEndTime,omitempty"` + + // Start Time for the Pre Maintenance Window. + PreMaintenanceWindowStartTime *time.Time `json:"preMaintenanceWindowStartTime,omitempty"` +} + +// ManagedArtifact - The managed artifact. +type ManagedArtifact struct { + // REQUIRED; The managed artifact id. + ID *string `json:"id,omitempty"` +} + +// ManagedDiskParameters - The parameters of a managed disk. +type ManagedDiskParameters struct { + // Specifies the customer managed disk encryption set resource id for the managed disk. + DiskEncryptionSet *DiskEncryptionSetParameters `json:"diskEncryptionSet,omitempty"` + + // Resource Id + ID *string `json:"id,omitempty"` + + // Specifies the security profile for the managed disk. + SecurityProfile *VMDiskSecurityProfile `json:"securityProfile,omitempty"` + + // Specifies the storage account type for the managed disk. NOTE: UltraSSD_LRS can only be used with data disks, it cannot + // be used with OS Disk. + StorageAccountType *StorageAccountTypes `json:"storageAccountType,omitempty"` +} + +// NetworkInterfaceReference - Describes a network interface reference. +type NetworkInterfaceReference struct { + // Resource Id + ID *string `json:"id,omitempty"` + + // Describes a network interface reference properties. + Properties *NetworkInterfaceReferenceProperties `json:"properties,omitempty"` +} + +// NetworkInterfaceReferenceProperties - Describes a network interface reference properties. +type NetworkInterfaceReferenceProperties struct { + // Specify what happens to the network interface when the VM is deleted + DeleteOption *DeleteOptions `json:"deleteOption,omitempty"` + + // Specifies the primary network interface in case the virtual machine has more than 1 network interface. + Primary *bool `json:"primary,omitempty"` +} + +// NetworkProfile - Specifies the network interfaces or the networking configuration of the virtual machine. +type NetworkProfile struct { + // specifies the Microsoft.Network API version used when creating networking resources in the Network Interface Configurations + NetworkAPIVersion *NetworkAPIVersion `json:"networkApiVersion,omitempty"` + + // Specifies the networking configurations that will be used to create the virtual machine networking resources. + NetworkInterfaceConfigurations []*VirtualMachineNetworkInterfaceConfiguration `json:"networkInterfaceConfigurations,omitempty"` + + // Specifies the list of resource Ids for the network interfaces associated with the virtual machine. + NetworkInterfaces []*NetworkInterfaceReference `json:"networkInterfaces,omitempty"` +} + +// OSDisk - Specifies information about the operating system disk used by the virtual machine. +// For more information about disks, see About disks and VHDs for Azure virtual machines [https://docs.microsoft.com/azure/virtual-machines/managed-disks-overview]. +type OSDisk struct { + // REQUIRED; Specifies how the virtual machine should be created. + // Possible values are: + // Attach \u2013 This value is used when you are using a specialized disk to create the virtual machine. + // FromImage \u2013 This value is used when you are using an image to create the virtual machine. If you are using a platform + // image, you also use the imageReference element described above. If you are + // using a marketplace image, you also use the plan element previously described. + CreateOption *DiskCreateOptionTypes `json:"createOption,omitempty"` + + // Specifies the caching requirements. + // Possible values are: + // None + // ReadOnly + // ReadWrite + // Default: None for Standard storage. ReadOnly for Premium storage. + Caching *CachingTypes `json:"caching,omitempty"` + + // Specifies whether OS Disk should be deleted or detached upon VM deletion. + // Possible values: + // Delete If this value is used, the OS disk is deleted when VM is deleted. + // Detach If this value is used, the os disk is retained after VM is deleted. + // The default value is set to detach. For an ephemeral OS Disk, the default value is set to Delete. User cannot change the + // delete option for ephemeral OS Disk. + DeleteOption *DiskDeleteOptionTypes `json:"deleteOption,omitempty"` + + // Specifies the ephemeral Disk Settings for the operating system disk used by the virtual machine. + DiffDiskSettings *DiffDiskSettings `json:"diffDiskSettings,omitempty"` + + // Specifies the size of an empty data disk in gigabytes. This element can be used to overwrite the size of the disk in a + // virtual machine image. + // This value cannot be larger than 1023 GB + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + + // Specifies the encryption settings for the OS Disk. + // Minimum api-version: 2015-06-15 + EncryptionSettings *DiskEncryptionSettings `json:"encryptionSettings,omitempty"` + + // The source user image virtual hard disk. The virtual hard disk will be copied before being attached to the virtual machine. + // If SourceImage is provided, the destination virtual hard drive must not + // exist. + Image *VirtualHardDisk `json:"image,omitempty"` + + // The managed disk parameters. + ManagedDisk *ManagedDiskParameters `json:"managedDisk,omitempty"` + + // The disk name. + Name *string `json:"name,omitempty"` + + // This property allows you to specify the type of the OS that is included in the disk if creating a VM from user-image or + // a specialized VHD. + // Possible values are: + // Windows + // Linux + OSType *OperatingSystemTypes `json:"osType,omitempty"` + + // The virtual hard disk. + Vhd *VirtualHardDisk `json:"vhd,omitempty"` + + // Specifies whether writeAccelerator should be enabled or disabled on the disk. + WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty"` +} + +// OSDiskImage - Contains the os disk image information. +type OSDiskImage struct { + // REQUIRED; The operating system of the osDiskImage. + OperatingSystem *OperatingSystemTypes `json:"operatingSystem,omitempty"` +} + +// OSDiskImageEncryption - Contains encryption settings for an OS disk image. +type OSDiskImageEncryption struct { + // A relative URI containing the resource ID of the disk encryption set. + DiskEncryptionSetID *string `json:"diskEncryptionSetId,omitempty"` + + // This property specifies the security profile of an OS disk image. + SecurityProfile *OSDiskImageSecurityProfile `json:"securityProfile,omitempty"` +} + +// OSDiskImageSecurityProfile - Contains security profile for an OS disk image. +type OSDiskImageSecurityProfile struct { + // confidential VM encryption types + ConfidentialVMEncryptionType *ConfidentialVMEncryptionType `json:"confidentialVMEncryptionType,omitempty"` + + // secure VM disk encryption set id + SecureVMDiskEncryptionSetID *string `json:"secureVMDiskEncryptionSetId,omitempty"` +} + +// OSFamily - Describes a cloud service OS family. +type OSFamily struct { + // OS family properties. + Properties *OSFamilyProperties `json:"properties,omitempty"` + + // READ-ONLY; Resource Id. + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; Resource location. + Location *string `json:"location,omitempty" azure:"ro"` + + // READ-ONLY; Resource name. + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; Resource type. + Type *string `json:"type,omitempty" azure:"ro"` +} + +type OSFamilyListResult struct { + // REQUIRED + Value []*OSFamily `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// OSFamilyProperties - OS family properties. +type OSFamilyProperties struct { + // READ-ONLY; The OS family label. + Label *string `json:"label,omitempty" azure:"ro"` + + // READ-ONLY; The OS family name. + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; List of OS versions belonging to this family. + Versions []*OSVersionPropertiesBase `json:"versions,omitempty" azure:"ro"` +} + +// OSProfile - Specifies the operating system settings for the virtual machine. Some of the settings cannot be changed once +// VM is provisioned. +type OSProfile struct { + // Specifies the password of the administrator account. + // Minimum-length (Windows): 8 characters + // Minimum-length (Linux): 6 characters + // Max-length (Windows): 123 characters + // Max-length (Linux): 72 characters + // Complexity requirements: 3 out of 4 conditions below need to be fulfilled + // Has lower characters + // Has upper characters + // Has a digit + // Has a special character (Regex match [\W_]) + // Disallowed values: "abc@123", "P@$$w0rd", "P@ssw0rd", "P@ssword123", "Pa$$word", "pass@word1", "Password!", "Password1", + // "Password22", "iloveyou!" + // For resetting the password, see How to reset the Remote Desktop service or its login password in a Windows VM [https://docs.microsoft.com/troubleshoot/azure/virtual-machines/reset-rdp] + // For resetting root password, see Manage users, SSH, and check or repair disks on Azure Linux VMs using the VMAccess Extension + // [https://docs.microsoft.com/troubleshoot/azure/virtual-machines/troubleshoot-ssh-connection] + AdminPassword *string `json:"adminPassword,omitempty"` + + // Specifies the name of the administrator account. + // This property cannot be updated after the VM is created. + // Windows-only restriction: Cannot end in "." + // Disallowed values: "administrator", "admin", "user", "user1", "test", "user2", "test1", "user3", "admin1", "1", "123", + // "a", "actuser", "adm", "admin2", "aspnet", "backup", "console", "david", "guest", + // "john", "owner", "root", "server", "sql", "support", "support_388945a0", "sys", "test2", "test3", "user4", "user5". + // Minimum-length (Linux): 1 character + // Max-length (Linux): 64 characters + // Max-length (Windows): 20 characters. + AdminUsername *string `json:"adminUsername,omitempty"` + + // Specifies whether extension operations should be allowed on the virtual machine. + // This may only be set to False when no extensions are present on the virtual machine. + AllowExtensionOperations *bool `json:"allowExtensionOperations,omitempty"` + + // Specifies the host OS name of the virtual machine. + // This name cannot be updated after the VM is created. + // Max-length (Windows): 15 characters + // Max-length (Linux): 64 characters. + // For naming conventions and restrictions see Azure infrastructure services implementation guidelines [https://docs.microsoft.com/azure/azure-resource-manager/management/resource-name-rules]. + ComputerName *string `json:"computerName,omitempty"` + + // Specifies a base-64 encoded string of custom data. The base-64 encoded string is decoded to a binary array that is saved + // as a file on the Virtual Machine. The maximum length of the binary array is + // 65535 bytes. + // Note: Do not pass any secrets or passwords in customData property + // This property cannot be updated after the VM is created. + // customData is passed to the VM to be saved as a file, for more information see Custom Data on Azure VMs [https://azure.microsoft.com/blog/custom-data-and-cloud-init-on-windows-azure/] + // For using cloud-init for your Linux VM, see Using cloud-init to customize a Linux VM during creation [https://docs.microsoft.com/azure/virtual-machines/linux/using-cloud-init] + CustomData *string `json:"customData,omitempty"` + + // Specifies the Linux operating system settings on the virtual machine. + // For a list of supported Linux distributions, see Linux on Azure-Endorsed Distributions [https://docs.microsoft.com/azure/virtual-machines/linux/endorsed-distros]. + LinuxConfiguration *LinuxConfiguration `json:"linuxConfiguration,omitempty"` + + // Optional property which must either be set to True or omitted. + RequireGuestProvisionSignal *bool `json:"requireGuestProvisionSignal,omitempty"` + + // Specifies set of certificates that should be installed onto the virtual machine. To install certificates on a virtual machine + // it is recommended to use the Azure Key Vault virtual machine extension for + // Linux [https://docs.microsoft.com/azure/virtual-machines/extensions/key-vault-linux] or the Azure Key Vault virtual machine + // extension for Windows + // [https://docs.microsoft.com/azure/virtual-machines/extensions/key-vault-windows]. + Secrets []*VaultSecretGroup `json:"secrets,omitempty"` + + // Specifies Windows operating system settings on the virtual machine. + WindowsConfiguration *WindowsConfiguration `json:"windowsConfiguration,omitempty"` +} + +// OSVersion - Describes a cloud service OS version. +type OSVersion struct { + // OS version properties. + Properties *OSVersionProperties `json:"properties,omitempty"` + + // READ-ONLY; Resource Id. + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; Resource location. + Location *string `json:"location,omitempty" azure:"ro"` + + // READ-ONLY; Resource name. + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; Resource type. + Type *string `json:"type,omitempty" azure:"ro"` +} + +type OSVersionListResult struct { + // REQUIRED + Value []*OSVersion `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// OSVersionProperties - OS version properties. +type OSVersionProperties struct { + // READ-ONLY; The family of this OS version. + Family *string `json:"family,omitempty" azure:"ro"` + + // READ-ONLY; The family label of this OS version. + FamilyLabel *string `json:"familyLabel,omitempty" azure:"ro"` + + // READ-ONLY; Specifies whether this OS version is active. + IsActive *bool `json:"isActive,omitempty" azure:"ro"` + + // READ-ONLY; Specifies whether this is the default OS version for its family. + IsDefault *bool `json:"isDefault,omitempty" azure:"ro"` + + // READ-ONLY; The OS version label. + Label *string `json:"label,omitempty" azure:"ro"` + + // READ-ONLY; The OS version. + Version *string `json:"version,omitempty" azure:"ro"` +} + +// OSVersionPropertiesBase - Configuration view of an OS version. +type OSVersionPropertiesBase struct { + // READ-ONLY; Specifies whether this OS version is active. + IsActive *bool `json:"isActive,omitempty" azure:"ro"` + + // READ-ONLY; Specifies whether this is the default OS version for its family. + IsDefault *bool `json:"isDefault,omitempty" azure:"ro"` + + // READ-ONLY; The OS version label. + Label *string `json:"label,omitempty" azure:"ro"` + + // READ-ONLY; The OS version. + Version *string `json:"version,omitempty" azure:"ro"` +} + +// OperationListResult - The List Compute Operation operation response. +type OperationListResult struct { + // READ-ONLY; The list of compute operations + Value []*OperationValue `json:"value,omitempty" azure:"ro"` +} + +// OperationValue - Describes the properties of a Compute Operation value. +type OperationValue struct { + // Describes the properties of a Compute Operation Value Display. + Display *OperationValueDisplay `json:"display,omitempty"` + + // READ-ONLY; The name of the compute operation. + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; The origin of the compute operation. + Origin *string `json:"origin,omitempty" azure:"ro"` +} + +// OperationValueDisplay - Describes the properties of a Compute Operation Value Display. +type OperationValueDisplay struct { + // READ-ONLY; The description of the operation. + Description *string `json:"description,omitempty" azure:"ro"` + + // READ-ONLY; The display name of the compute operation. + Operation *string `json:"operation,omitempty" azure:"ro"` + + // READ-ONLY; The resource provider for the operation. + Provider *string `json:"provider,omitempty" azure:"ro"` + + // READ-ONLY; The display name of the resource the operation applies to. + Resource *string `json:"resource,omitempty" azure:"ro"` +} + +// OperationsClientListOptions contains the optional parameters for the OperationsClient.List method. +type OperationsClientListOptions struct { + // placeholder for future optional parameters +} + +// OrchestrationServiceStateInput - The input for OrchestrationServiceState +type OrchestrationServiceStateInput struct { + // REQUIRED; The action to be performed. + Action *OrchestrationServiceStateAction `json:"action,omitempty"` + + // REQUIRED; The name of the service. + ServiceName *OrchestrationServiceNames `json:"serviceName,omitempty"` +} + +// OrchestrationServiceSummary - Summary for an orchestration service of a virtual machine scale set. +type OrchestrationServiceSummary struct { + // READ-ONLY; The name of the service. + ServiceName *OrchestrationServiceNames `json:"serviceName,omitempty" azure:"ro"` + + // READ-ONLY; The current state of the service. + ServiceState *OrchestrationServiceState `json:"serviceState,omitempty" azure:"ro"` +} + +// PatchInstallationDetail - Information about a specific patch that was encountered during an installation action. +type PatchInstallationDetail struct { + // READ-ONLY; The classification(s) of the patch as provided by the patch publisher. + Classifications []*string `json:"classifications,omitempty" azure:"ro"` + + // READ-ONLY; The state of the patch after the installation operation completed. + InstallationState *PatchInstallationState `json:"installationState,omitempty" azure:"ro"` + + // READ-ONLY; The KBID of the patch. Only applies to Windows patches. + KbID *string `json:"kbId,omitempty" azure:"ro"` + + // READ-ONLY; The friendly name of the patch. + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; A unique identifier for the patch. + PatchID *string `json:"patchId,omitempty" azure:"ro"` + + // READ-ONLY; The version string of the package. It may conform to Semantic Versioning. Only applies to Linux. + Version *string `json:"version,omitempty" azure:"ro"` +} + +// PatchSettings - Specifies settings related to VM Guest Patching on Windows. +type PatchSettings struct { + // Specifies the mode of VM Guest patch assessment for the IaaS virtual machine. + // Possible values are: + // ImageDefault - You control the timing of patch assessments on a virtual machine. + // AutomaticByPlatform - The platform will trigger periodic patch assessments. The property provisionVMAgent must be true. + AssessmentMode *WindowsPatchAssessmentMode `json:"assessmentMode,omitempty"` + + // Specifies additional settings for patch mode AutomaticByPlatform in VM Guest Patching on Windows. + AutomaticByPlatformSettings *WindowsVMGuestPatchAutomaticByPlatformSettings `json:"automaticByPlatformSettings,omitempty"` + + // Enables customers to patch their Azure VMs without requiring a reboot. For enableHotpatching, the 'provisionVMAgent' must + // be set to true and 'patchMode' must be set to 'AutomaticByPlatform'. + EnableHotpatching *bool `json:"enableHotpatching,omitempty"` + + // Specifies the mode of VM Guest Patching to IaaS virtual machine or virtual machines associated to virtual machine scale + // set with OrchestrationMode as Flexible. + // Possible values are: + // Manual - You control the application of patches to a virtual machine. You do this by applying patches manually inside the + // VM. In this mode, automatic updates are disabled; the property + // WindowsConfiguration.enableAutomaticUpdates must be false + // AutomaticByOS - The virtual machine will automatically be updated by the OS. The property WindowsConfiguration.enableAutomaticUpdates + // must be true. + // AutomaticByPlatform - the virtual machine will automatically updated by the platform. The properties provisionVMAgent and + // WindowsConfiguration.enableAutomaticUpdates must be true + PatchMode *WindowsVMGuestPatchMode `json:"patchMode,omitempty"` +} + +// PirCommunityGalleryResource - Base information about the community gallery resource in pir. +type PirCommunityGalleryResource struct { + // The identifier information of community gallery. + Identifier *CommunityGalleryIdentifier `json:"identifier,omitempty"` + + // READ-ONLY; Resource location + Location *string `json:"location,omitempty" azure:"ro"` + + // READ-ONLY; Resource name + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; Resource type + Type *string `json:"type,omitempty" azure:"ro"` +} + +// PirResource - The Resource model definition. +type PirResource struct { + // READ-ONLY; Resource location + Location *string `json:"location,omitempty" azure:"ro"` + + // READ-ONLY; Resource name + Name *string `json:"name,omitempty" azure:"ro"` +} + +// PirSharedGalleryResource - Base information about the shared gallery resource in pir. +type PirSharedGalleryResource struct { + // The identifier information of shared gallery. + Identifier *SharedGalleryIdentifier `json:"identifier,omitempty"` + + // READ-ONLY; Resource location + Location *string `json:"location,omitempty" azure:"ro"` + + // READ-ONLY; Resource name + Name *string `json:"name,omitempty" azure:"ro"` +} + +// Plan - Specifies information about the marketplace image used to create the virtual machine. This element is only used +// for marketplace images. Before you can use a marketplace image from an API, you must +// enable the image for programmatic use. In the Azure portal, find the marketplace image that you want to use and then click +// Want to deploy programmatically, Get Started ->. Enter any required +// information and then click Save. +type Plan struct { + // The plan ID. + Name *string `json:"name,omitempty"` + + // Specifies the product of the image from the marketplace. This is the same value as Offer under the imageReference element. + Product *string `json:"product,omitempty"` + + // The promotion code. + PromotionCode *string `json:"promotionCode,omitempty"` + + // The publisher ID. + Publisher *string `json:"publisher,omitempty"` +} + +// PrivateEndpoint - The Private Endpoint resource. +type PrivateEndpoint struct { + // READ-ONLY; The ARM identifier for Private Endpoint + ID *string `json:"id,omitempty" azure:"ro"` +} + +// PrivateEndpointConnection - The Private Endpoint Connection resource. +type PrivateEndpointConnection struct { + // Resource properties. + Properties *PrivateEndpointConnectionProperties `json:"properties,omitempty"` + + // READ-ONLY; private endpoint connection Id + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; private endpoint connection name + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; private endpoint connection type + Type *string `json:"type,omitempty" azure:"ro"` +} + +// PrivateEndpointConnectionListResult - A list of private link resources +type PrivateEndpointConnectionListResult struct { + // The uri to fetch the next page of snapshots. Call ListNext() with this to fetch the next page of snapshots. + NextLink *string `json:"nextLink,omitempty"` + + // Array of private endpoint connections + Value []*PrivateEndpointConnection `json:"value,omitempty"` +} + +// PrivateEndpointConnectionProperties - Properties of the PrivateEndpointConnectProperties. +type PrivateEndpointConnectionProperties struct { + // REQUIRED; A collection of information about the state of the connection between DiskAccess and Virtual Network. + PrivateLinkServiceConnectionState *PrivateLinkServiceConnectionState `json:"privateLinkServiceConnectionState,omitempty"` + + // READ-ONLY; The resource of private end point. + PrivateEndpoint *PrivateEndpoint `json:"privateEndpoint,omitempty" azure:"ro"` + + // READ-ONLY; The provisioning state of the private endpoint connection resource. + ProvisioningState *PrivateEndpointConnectionProvisioningState `json:"provisioningState,omitempty" azure:"ro"` +} + +// PrivateLinkResource - A private link resource +type PrivateLinkResource struct { + // Resource properties. + Properties *PrivateLinkResourceProperties `json:"properties,omitempty"` + + // READ-ONLY; private link resource Id + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; private link resource name + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; private link resource type + Type *string `json:"type,omitempty" azure:"ro"` +} + +// PrivateLinkResourceListResult - A list of private link resources +type PrivateLinkResourceListResult struct { + // Array of private link resources + Value []*PrivateLinkResource `json:"value,omitempty"` +} + +// PrivateLinkResourceProperties - Properties of a private link resource. +type PrivateLinkResourceProperties struct { + // The private link resource DNS zone name. + RequiredZoneNames []*string `json:"requiredZoneNames,omitempty"` + + // READ-ONLY; The private link resource group id. + GroupID *string `json:"groupId,omitempty" azure:"ro"` + + // READ-ONLY; The private link resource required member names. + RequiredMembers []*string `json:"requiredMembers,omitempty" azure:"ro"` +} + +// PrivateLinkServiceConnectionState - A collection of information about the state of the connection between service consumer +// and provider. +type PrivateLinkServiceConnectionState struct { + // A message indicating if changes on the service provider require any updates on the consumer. + ActionsRequired *string `json:"actionsRequired,omitempty"` + + // The reason for approval/rejection of the connection. + Description *string `json:"description,omitempty"` + + // Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service. + Status *PrivateEndpointServiceConnectionStatus `json:"status,omitempty"` +} + +// PropertyUpdatesInProgress - Properties of the disk for which update is pending. +type PropertyUpdatesInProgress struct { + // The target performance tier of the disk if a tier change operation is in progress. + TargetTier *string `json:"targetTier,omitempty"` +} + +// ProximityPlacementGroup - Specifies information about the proximity placement group. +type ProximityPlacementGroup struct { + // REQUIRED; Resource location + Location *string `json:"location,omitempty"` + + // Describes the properties of a Proximity Placement Group. + Properties *ProximityPlacementGroupProperties `json:"properties,omitempty"` + + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` + + // Specifies the Availability Zone where virtual machine, virtual machine scale set or availability set associated with the + // proximity placement group can be created. + Zones []*string `json:"zones,omitempty"` + + // READ-ONLY; Resource Id + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; Resource name + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; Resource type + Type *string `json:"type,omitempty" azure:"ro"` +} + +// ProximityPlacementGroupListResult - The List Proximity Placement Group operation response. +type ProximityPlacementGroupListResult struct { + // REQUIRED; The list of proximity placement groups + Value []*ProximityPlacementGroup `json:"value,omitempty"` + + // The URI to fetch the next page of proximity placement groups. + NextLink *string `json:"nextLink,omitempty"` +} + +// ProximityPlacementGroupProperties - Describes the properties of a Proximity Placement Group. +type ProximityPlacementGroupProperties struct { + // Describes colocation status of the Proximity Placement Group. + ColocationStatus *InstanceViewStatus `json:"colocationStatus,omitempty"` + + // Specifies the user intent of the proximity placement group. + Intent *ProximityPlacementGroupPropertiesIntent `json:"intent,omitempty"` + + // Specifies the type of the proximity placement group. + // Possible values are: + // Standard : Co-locate resources within an Azure region or Availability Zone. + // Ultra : For future use. + ProximityPlacementGroupType *ProximityPlacementGroupType `json:"proximityPlacementGroupType,omitempty"` + + // READ-ONLY; A list of references to all availability sets in the proximity placement group. + AvailabilitySets []*SubResourceWithColocationStatus `json:"availabilitySets,omitempty" azure:"ro"` + + // READ-ONLY; A list of references to all virtual machine scale sets in the proximity placement group. + VirtualMachineScaleSets []*SubResourceWithColocationStatus `json:"virtualMachineScaleSets,omitempty" azure:"ro"` + + // READ-ONLY; A list of references to all virtual machines in the proximity placement group. + VirtualMachines []*SubResourceWithColocationStatus `json:"virtualMachines,omitempty" azure:"ro"` +} + +// ProximityPlacementGroupPropertiesIntent - Specifies the user intent of the proximity placement group. +type ProximityPlacementGroupPropertiesIntent struct { + // Specifies possible sizes of virtual machines that can be created in the proximity placement group. + VMSizes []*string `json:"vmSizes,omitempty"` +} + +// ProximityPlacementGroupUpdate - Specifies information about the proximity placement group. +type ProximityPlacementGroupUpdate struct { + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` +} + +// ProximityPlacementGroupsClientCreateOrUpdateOptions contains the optional parameters for the ProximityPlacementGroupsClient.CreateOrUpdate +// method. +type ProximityPlacementGroupsClientCreateOrUpdateOptions struct { + // placeholder for future optional parameters +} + +// ProximityPlacementGroupsClientDeleteOptions contains the optional parameters for the ProximityPlacementGroupsClient.Delete +// method. +type ProximityPlacementGroupsClientDeleteOptions struct { + // placeholder for future optional parameters +} + +// ProximityPlacementGroupsClientGetOptions contains the optional parameters for the ProximityPlacementGroupsClient.Get method. +type ProximityPlacementGroupsClientGetOptions struct { + // includeColocationStatus=true enables fetching the colocation status of all the resources in the proximity placement group. + IncludeColocationStatus *string +} + +// ProximityPlacementGroupsClientListByResourceGroupOptions contains the optional parameters for the ProximityPlacementGroupsClient.ListByResourceGroup +// method. +type ProximityPlacementGroupsClientListByResourceGroupOptions struct { + // placeholder for future optional parameters +} + +// ProximityPlacementGroupsClientListBySubscriptionOptions contains the optional parameters for the ProximityPlacementGroupsClient.ListBySubscription +// method. +type ProximityPlacementGroupsClientListBySubscriptionOptions struct { + // placeholder for future optional parameters +} + +// ProximityPlacementGroupsClientUpdateOptions contains the optional parameters for the ProximityPlacementGroupsClient.Update +// method. +type ProximityPlacementGroupsClientUpdateOptions struct { + // placeholder for future optional parameters +} + +// ProxyOnlyResource - The ProxyOnly Resource model definition. +type ProxyOnlyResource struct { + // READ-ONLY; Resource Id + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; Resource name + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; Resource type + Type *string `json:"type,omitempty" azure:"ro"` +} + +// ProxyResource - The resource model definition for an Azure Resource Manager proxy resource. It will not have tags and a +// location +type ProxyResource struct { + // READ-ONLY; Resource Id + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; Resource name + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; Resource type + Type *string `json:"type,omitempty" azure:"ro"` +} + +// PublicIPAddressSKU - Describes the public IP Sku. It can only be set with OrchestrationMode as Flexible. +type PublicIPAddressSKU struct { + // Specify public IP sku name + Name *PublicIPAddressSKUName `json:"name,omitempty"` + + // Specify public IP sku tier + Tier *PublicIPAddressSKUTier `json:"tier,omitempty"` +} + +// PurchasePlan - Used for establishing the purchase context of any 3rd Party artifact through MarketPlace. +type PurchasePlan struct { + // REQUIRED; The plan ID. + Name *string `json:"name,omitempty"` + + // REQUIRED; Specifies the product of the image from the marketplace. This is the same value as Offer under the imageReference + // element. + Product *string `json:"product,omitempty"` + + // REQUIRED; The publisher ID. + Publisher *string `json:"publisher,omitempty"` +} + +// RecommendedMachineConfiguration - The properties describe the recommended machine configuration for this Image Definition. +// These properties are updatable. +type RecommendedMachineConfiguration struct { + // Describes the resource range. + Memory *ResourceRange `json:"memory,omitempty"` + + // Describes the resource range. + VCPUs *ResourceRange `json:"vCPUs,omitempty"` +} + +// RecoveryWalkResponse - Response after calling a manual recovery walk +type RecoveryWalkResponse struct { + // READ-ONLY; The next update domain that needs to be walked. Null means walk spanning all update domains has been completed + NextPlatformUpdateDomain *int32 `json:"nextPlatformUpdateDomain,omitempty" azure:"ro"` + + // READ-ONLY; Whether the recovery walk was performed + WalkPerformed *bool `json:"walkPerformed,omitempty" azure:"ro"` +} + +// RegionalReplicationStatus - This is the regional replication status. +type RegionalReplicationStatus struct { + // READ-ONLY; The details of the replication status. + Details *string `json:"details,omitempty" azure:"ro"` + + // READ-ONLY; It indicates progress of the replication job. + Progress *int32 `json:"progress,omitempty" azure:"ro"` + + // READ-ONLY; The region to which the gallery image version is being replicated to. + Region *string `json:"region,omitempty" azure:"ro"` + + // READ-ONLY; This is the regional replication state. + State *ReplicationState `json:"state,omitempty" azure:"ro"` +} + +// RegionalSharingStatus - Gallery regional sharing status +type RegionalSharingStatus struct { + // Details of gallery regional sharing failure. + Details *string `json:"details,omitempty"` + + // Region name + Region *string `json:"region,omitempty"` + + // READ-ONLY; Gallery sharing state in current region + State *SharingState `json:"state,omitempty" azure:"ro"` +} + +// ReplicationStatus - This is the replication status of the gallery image version. +type ReplicationStatus struct { + // READ-ONLY; This is the aggregated replication status based on all the regional replication status flags. + AggregatedState *AggregatedReplicationState `json:"aggregatedState,omitempty" azure:"ro"` + + // READ-ONLY; This is a summary of replication status for each region. + Summary []*RegionalReplicationStatus `json:"summary,omitempty" azure:"ro"` +} + +// RequestRateByIntervalInput - Api request input for LogAnalytics getRequestRateByInterval Api. +type RequestRateByIntervalInput struct { + // REQUIRED; SAS Uri of the logging blob container to which LogAnalytics Api writes output logs to. + BlobContainerSasURI *string `json:"blobContainerSasUri,omitempty"` + + // REQUIRED; From time of the query + FromTime *time.Time `json:"fromTime,omitempty"` + + // REQUIRED; Interval value in minutes used to create LogAnalytics call rate logs. + IntervalLength *IntervalInMins `json:"intervalLength,omitempty"` + + // REQUIRED; To time of the query + ToTime *time.Time `json:"toTime,omitempty"` + + // Group query result by Client Application ID. + GroupByClientApplicationID *bool `json:"groupByClientApplicationId,omitempty"` + + // Group query result by Operation Name. + GroupByOperationName *bool `json:"groupByOperationName,omitempty"` + + // Group query result by Resource Name. + GroupByResourceName *bool `json:"groupByResourceName,omitempty"` + + // Group query result by Throttle Policy applied. + GroupByThrottlePolicy *bool `json:"groupByThrottlePolicy,omitempty"` + + // Group query result by User Agent. + GroupByUserAgent *bool `json:"groupByUserAgent,omitempty"` +} + +// Resource - The Resource model definition. +type Resource struct { + // REQUIRED; Resource location + Location *string `json:"location,omitempty"` + + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` + + // READ-ONLY; Resource Id + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; Resource name + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; Resource type + Type *string `json:"type,omitempty" azure:"ro"` +} + +// ResourceInstanceViewStatus - Instance view status. +type ResourceInstanceViewStatus struct { + // The level code. + Level *StatusLevelTypes `json:"level,omitempty"` + + // READ-ONLY; The status code. + Code *string `json:"code,omitempty" azure:"ro"` + + // READ-ONLY; The short localizable label for the status. + DisplayStatus *string `json:"displayStatus,omitempty" azure:"ro"` + + // READ-ONLY; The detailed status message, including for alerts and error messages. + Message *string `json:"message,omitempty" azure:"ro"` + + // READ-ONLY; The time of the status. + Time *time.Time `json:"time,omitempty" azure:"ro"` +} + +// ResourceRange - Describes the resource range. +type ResourceRange struct { + // The maximum number of the resource. + Max *int32 `json:"max,omitempty"` + + // The minimum number of the resource. + Min *int32 `json:"min,omitempty"` +} + +// ResourceSKU - Describes an available Compute SKU. +type ResourceSKU struct { + // READ-ONLY; The api versions that support this SKU. + APIVersions []*string `json:"apiVersions,omitempty" azure:"ro"` + + // READ-ONLY; A name value pair to describe the capability. + Capabilities []*ResourceSKUCapabilities `json:"capabilities,omitempty" azure:"ro"` + + // READ-ONLY; Specifies the number of virtual machines in the scale set. + Capacity *ResourceSKUCapacity `json:"capacity,omitempty" azure:"ro"` + + // READ-ONLY; Metadata for retrieving price info. + Costs []*ResourceSKUCosts `json:"costs,omitempty" azure:"ro"` + + // READ-ONLY; The Family of this particular SKU. + Family *string `json:"family,omitempty" azure:"ro"` + + // READ-ONLY; The Kind of resources that are supported in this SKU. + Kind *string `json:"kind,omitempty" azure:"ro"` + + // READ-ONLY; A list of locations and availability zones in those locations where the SKU is available. + LocationInfo []*ResourceSKULocationInfo `json:"locationInfo,omitempty" azure:"ro"` + + // READ-ONLY; The set of locations that the SKU is available. + Locations []*string `json:"locations,omitempty" azure:"ro"` + + // READ-ONLY; The name of SKU. + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; The type of resource the SKU applies to. + ResourceType *string `json:"resourceType,omitempty" azure:"ro"` + + // READ-ONLY; The restrictions because of which SKU cannot be used. This is empty if there are no restrictions. + Restrictions []*ResourceSKURestrictions `json:"restrictions,omitempty" azure:"ro"` + + // READ-ONLY; The Size of the SKU. + Size *string `json:"size,omitempty" azure:"ro"` + + // READ-ONLY; Specifies the tier of virtual machines in a scale set. + // Possible Values: + // Standard + // Basic + Tier *string `json:"tier,omitempty" azure:"ro"` +} + +// ResourceSKUCapabilities - Describes The SKU capabilities object. +type ResourceSKUCapabilities struct { + // READ-ONLY; An invariant to describe the feature. + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; An invariant if the feature is measured by quantity. + Value *string `json:"value,omitempty" azure:"ro"` +} + +// ResourceSKUCapacity - Describes scaling information of a SKU. +type ResourceSKUCapacity struct { + // READ-ONLY; The default capacity. + Default *int64 `json:"default,omitempty" azure:"ro"` + + // READ-ONLY; The maximum capacity that can be set. + Maximum *int64 `json:"maximum,omitempty" azure:"ro"` + + // READ-ONLY; The minimum capacity. + Minimum *int64 `json:"minimum,omitempty" azure:"ro"` + + // READ-ONLY; The scale type applicable to the sku. + ScaleType *ResourceSKUCapacityScaleType `json:"scaleType,omitempty" azure:"ro"` +} + +// ResourceSKUCosts - Describes metadata for retrieving price info. +type ResourceSKUCosts struct { + // READ-ONLY; An invariant to show the extended unit. + ExtendedUnit *string `json:"extendedUnit,omitempty" azure:"ro"` + + // READ-ONLY; Used for querying price from commerce. + MeterID *string `json:"meterID,omitempty" azure:"ro"` + + // READ-ONLY; The multiplier is needed to extend the base metered cost. + Quantity *int64 `json:"quantity,omitempty" azure:"ro"` +} + +// ResourceSKULocationInfo - Describes an available Compute SKU Location Information. +type ResourceSKULocationInfo struct { + // READ-ONLY; The names of extended locations. + ExtendedLocations []*string `json:"extendedLocations,omitempty" azure:"ro"` + + // READ-ONLY; Location of the SKU + Location *string `json:"location,omitempty" azure:"ro"` + + // READ-ONLY; The type of the extended location. + Type *ExtendedLocationType `json:"type,omitempty" azure:"ro"` + + // READ-ONLY; Details of capabilities available to a SKU in specific zones. + ZoneDetails []*ResourceSKUZoneDetails `json:"zoneDetails,omitempty" azure:"ro"` + + // READ-ONLY; List of availability zones where the SKU is supported. + Zones []*string `json:"zones,omitempty" azure:"ro"` +} + +// ResourceSKURestrictionInfo - Describes an available Compute SKU Restriction Information. +type ResourceSKURestrictionInfo struct { + // READ-ONLY; Locations where the SKU is restricted + Locations []*string `json:"locations,omitempty" azure:"ro"` + + // READ-ONLY; List of availability zones where the SKU is restricted. + Zones []*string `json:"zones,omitempty" azure:"ro"` +} + +// ResourceSKURestrictions - Describes scaling information of a SKU. +type ResourceSKURestrictions struct { + // READ-ONLY; The reason for restriction. + ReasonCode *ResourceSKURestrictionsReasonCode `json:"reasonCode,omitempty" azure:"ro"` + + // READ-ONLY; The information about the restriction where the SKU cannot be used. + RestrictionInfo *ResourceSKURestrictionInfo `json:"restrictionInfo,omitempty" azure:"ro"` + + // READ-ONLY; The type of restrictions. + Type *ResourceSKURestrictionsType `json:"type,omitempty" azure:"ro"` + + // READ-ONLY; The value of restrictions. If the restriction type is set to location. This would be different locations where + // the SKU is restricted. + Values []*string `json:"values,omitempty" azure:"ro"` +} + +// ResourceSKUZoneDetails - Describes The zonal capabilities of a SKU. +type ResourceSKUZoneDetails struct { + // READ-ONLY; A list of capabilities that are available for the SKU in the specified list of zones. + Capabilities []*ResourceSKUCapabilities `json:"capabilities,omitempty" azure:"ro"` + + // READ-ONLY; The set of zones that the SKU is available in with the specified capabilities. + Name []*string `json:"name,omitempty" azure:"ro"` +} + +// ResourceSKUsClientListOptions contains the optional parameters for the ResourceSKUsClient.List method. +type ResourceSKUsClientListOptions struct { + // The filter to apply on the operation. Only location filter is supported currently. + Filter *string + // To Include Extended Locations information or not in the response. + IncludeExtendedLocations *string +} + +// ResourceSKUsResult - The List Resource Skus operation response. +type ResourceSKUsResult struct { + // REQUIRED; The list of skus available for the subscription. + Value []*ResourceSKU `json:"value,omitempty"` + + // The URI to fetch the next page of Resource Skus. Call ListNext() with this URI to fetch the next page of Resource Skus + NextLink *string `json:"nextLink,omitempty"` +} + +// ResourceURIList - The List resources which are encrypted with the disk encryption set. +type ResourceURIList struct { + // REQUIRED; A list of IDs or Owner IDs of resources which are encrypted with the disk encryption set. + Value []*string `json:"value,omitempty"` + + // The uri to fetch the next page of encrypted resources. Call ListNext() with this to fetch the next page of encrypted resources. + NextLink *string `json:"nextLink,omitempty"` +} + +// ResourceWithOptionalLocation - The Resource model definition with location property as optional. +type ResourceWithOptionalLocation struct { + // Resource location + Location *string `json:"location,omitempty"` + + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` + + // READ-ONLY; Resource Id + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; Resource name + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; Resource type + Type *string `json:"type,omitempty" azure:"ro"` +} + +// RestorePoint - Restore Point details. +type RestorePoint struct { + // The restore point properties. + Properties *RestorePointProperties `json:"properties,omitempty"` + + // READ-ONLY; Resource Id + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; Resource name + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; Resource type + Type *string `json:"type,omitempty" azure:"ro"` +} + +// RestorePointCollection - Create or update Restore Point collection parameters. +type RestorePointCollection struct { + // REQUIRED; Resource location + Location *string `json:"location,omitempty"` + + // The restore point collection properties. + Properties *RestorePointCollectionProperties `json:"properties,omitempty"` + + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` + + // READ-ONLY; Resource Id + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; Resource name + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; Resource type + Type *string `json:"type,omitempty" azure:"ro"` +} + +// RestorePointCollectionListResult - The List restore point collection operation response. +type RestorePointCollectionListResult struct { + // The uri to fetch the next page of RestorePointCollections. Call ListNext() with this to fetch the next page of RestorePointCollections + NextLink *string `json:"nextLink,omitempty"` + + // Gets the list of restore point collections. + Value []*RestorePointCollection `json:"value,omitempty"` +} + +// RestorePointCollectionProperties - The restore point collection properties. +type RestorePointCollectionProperties struct { + // The properties of the source resource that this restore point collection is created from. + Source *RestorePointCollectionSourceProperties `json:"source,omitempty"` + + // READ-ONLY; The provisioning state of the restore point collection. + ProvisioningState *string `json:"provisioningState,omitempty" azure:"ro"` + + // READ-ONLY; The unique id of the restore point collection. + RestorePointCollectionID *string `json:"restorePointCollectionId,omitempty" azure:"ro"` + + // READ-ONLY; A list containing all restore points created under this restore point collection. + RestorePoints []*RestorePoint `json:"restorePoints,omitempty" azure:"ro"` +} + +// RestorePointCollectionSourceProperties - The properties of the source resource that this restore point collection is created +// from. +type RestorePointCollectionSourceProperties struct { + // Resource Id of the source resource used to create this restore point collection + ID *string `json:"id,omitempty"` + + // READ-ONLY; Location of the source resource used to create this restore point collection. + Location *string `json:"location,omitempty" azure:"ro"` +} + +// RestorePointCollectionUpdate - Update Restore Point collection parameters. +type RestorePointCollectionUpdate struct { + // The restore point collection properties. + Properties *RestorePointCollectionProperties `json:"properties,omitempty"` + + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` +} + +// RestorePointCollectionsClientBeginDeleteOptions contains the optional parameters for the RestorePointCollectionsClient.BeginDelete +// method. +type RestorePointCollectionsClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// RestorePointCollectionsClientCreateOrUpdateOptions contains the optional parameters for the RestorePointCollectionsClient.CreateOrUpdate +// method. +type RestorePointCollectionsClientCreateOrUpdateOptions struct { + // placeholder for future optional parameters +} + +// RestorePointCollectionsClientGetOptions contains the optional parameters for the RestorePointCollectionsClient.Get method. +type RestorePointCollectionsClientGetOptions struct { + // The expand expression to apply on the operation. If expand=restorePoints, server will return all contained restore points + // in the restorePointCollection. + Expand *RestorePointCollectionExpandOptions +} + +// RestorePointCollectionsClientListAllOptions contains the optional parameters for the RestorePointCollectionsClient.ListAll +// method. +type RestorePointCollectionsClientListAllOptions struct { + // placeholder for future optional parameters +} + +// RestorePointCollectionsClientListOptions contains the optional parameters for the RestorePointCollectionsClient.List method. +type RestorePointCollectionsClientListOptions struct { + // placeholder for future optional parameters +} + +// RestorePointCollectionsClientUpdateOptions contains the optional parameters for the RestorePointCollectionsClient.Update +// method. +type RestorePointCollectionsClientUpdateOptions struct { + // placeholder for future optional parameters +} + +// RestorePointInstanceView - The instance view of a restore point. +type RestorePointInstanceView struct { + // The disk restore points information. + DiskRestorePoints []*DiskRestorePointInstanceView `json:"diskRestorePoints,omitempty"` + + // The resource status information. + Statuses []*InstanceViewStatus `json:"statuses,omitempty"` +} + +// RestorePointProperties - The restore point properties. +type RestorePointProperties struct { + // ConsistencyMode of the RestorePoint. Can be specified in the input while creating a restore point. For now, only CrashConsistent + // is accepted as a valid input. Please refer to + // https://aka.ms/RestorePoints for more details. + ConsistencyMode *ConsistencyModeTypes `json:"consistencyMode,omitempty"` + + // List of disk resource ids that the customer wishes to exclude from the restore point. If no disks are specified, all disks + // will be included. + ExcludeDisks []*APIEntityReference `json:"excludeDisks,omitempty"` + + // Resource Id of the source restore point from which a copy needs to be created. + SourceRestorePoint *APIEntityReference `json:"sourceRestorePoint,omitempty"` + + // Gets the creation time of the restore point. + TimeCreated *time.Time `json:"timeCreated,omitempty"` + + // READ-ONLY; The restore point instance view. + InstanceView *RestorePointInstanceView `json:"instanceView,omitempty" azure:"ro"` + + // READ-ONLY; Gets the provisioning state of the restore point. + ProvisioningState *string `json:"provisioningState,omitempty" azure:"ro"` + + // READ-ONLY; Gets the details of the VM captured at the time of the restore point creation. + SourceMetadata *RestorePointSourceMetadata `json:"sourceMetadata,omitempty" azure:"ro"` +} + +// RestorePointSourceMetadata - Describes the properties of the Virtual Machine for which the restore point was created. The +// properties provided are a subset and the snapshot of the overall Virtual Machine properties captured at the +// time of the restore point creation. +type RestorePointSourceMetadata struct { + // Gets the diagnostics profile. + DiagnosticsProfile *DiagnosticsProfile `json:"diagnosticsProfile,omitempty"` + + // Gets the hardware profile. + HardwareProfile *HardwareProfile `json:"hardwareProfile,omitempty"` + + // Gets the license type, which is for bring your own license scenario. + LicenseType *string `json:"licenseType,omitempty"` + + // Location of the VM from which the restore point was created. + Location *string `json:"location,omitempty"` + + // Gets the OS profile. + OSProfile *OSProfile `json:"osProfile,omitempty"` + + // Gets the security profile. + SecurityProfile *SecurityProfile `json:"securityProfile,omitempty"` + + // Gets the storage profile. + StorageProfile *RestorePointSourceVMStorageProfile `json:"storageProfile,omitempty"` + + // Gets the virtual machine unique id. + VMID *string `json:"vmId,omitempty"` +} + +// RestorePointSourceVMDataDisk - Describes a data disk. +type RestorePointSourceVMDataDisk struct { + // Gets the caching type. + Caching *CachingTypes `json:"caching,omitempty"` + + // Gets the disk restore point Id. + DiskRestorePoint *APIEntityReference `json:"diskRestorePoint,omitempty"` + + // Gets the initial disk size in GB for blank data disks, and the new desired size for existing OS and Data disks. + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + + // Gets the logical unit number. + Lun *int32 `json:"lun,omitempty"` + + // Gets the managed disk details + ManagedDisk *ManagedDiskParameters `json:"managedDisk,omitempty"` + + // Gets the disk name. + Name *string `json:"name,omitempty"` +} + +// RestorePointSourceVMOSDisk - Describes an Operating System disk. +type RestorePointSourceVMOSDisk struct { + // Gets the caching type. + Caching *CachingTypes `json:"caching,omitempty"` + + // Gets the disk restore point Id. + DiskRestorePoint *APIEntityReference `json:"diskRestorePoint,omitempty"` + + // Gets the disk size in GB. + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + + // Gets the disk encryption settings. + EncryptionSettings *DiskEncryptionSettings `json:"encryptionSettings,omitempty"` + + // Gets the managed disk details + ManagedDisk *ManagedDiskParameters `json:"managedDisk,omitempty"` + + // Gets the disk name. + Name *string `json:"name,omitempty"` + + // Gets the Operating System type. + OSType *OperatingSystemType `json:"osType,omitempty"` +} + +// RestorePointSourceVMStorageProfile - Describes the storage profile. +type RestorePointSourceVMStorageProfile struct { + // Gets the data disks of the VM captured at the time of the restore point creation. + DataDisks []*RestorePointSourceVMDataDisk `json:"dataDisks,omitempty"` + + // Gets the OS disk of the VM captured at the time of the restore point creation. + OSDisk *RestorePointSourceVMOSDisk `json:"osDisk,omitempty"` +} + +// RestorePointsClientBeginCreateOptions contains the optional parameters for the RestorePointsClient.BeginCreate method. +type RestorePointsClientBeginCreateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// RestorePointsClientBeginDeleteOptions contains the optional parameters for the RestorePointsClient.BeginDelete method. +type RestorePointsClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// RestorePointsClientGetOptions contains the optional parameters for the RestorePointsClient.Get method. +type RestorePointsClientGetOptions struct { + // The expand expression to apply on the operation. 'InstanceView' retrieves information about the run-time state of a restore + // point. + Expand *RestorePointExpandOptions +} + +// RetrieveBootDiagnosticsDataResult - The SAS URIs of the console screenshot and serial log blobs. +type RetrieveBootDiagnosticsDataResult struct { + // READ-ONLY; The console screenshot blob URI + ConsoleScreenshotBlobURI *string `json:"consoleScreenshotBlobUri,omitempty" azure:"ro"` + + // READ-ONLY; The serial console log blob URI. + SerialConsoleLogBlobURI *string `json:"serialConsoleLogBlobUri,omitempty" azure:"ro"` +} + +type RoleInstance struct { + Properties *RoleInstanceProperties `json:"properties,omitempty"` + SKU *InstanceSKU `json:"sku,omitempty"` + + // READ-ONLY; Resource Id + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; Resource Location. + Location *string `json:"location,omitempty" azure:"ro"` + + // READ-ONLY; Resource Name. + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; Resource tags. + Tags map[string]*string `json:"tags,omitempty" azure:"ro"` + + // READ-ONLY; Resource Type. + Type *string `json:"type,omitempty" azure:"ro"` +} + +type RoleInstanceListResult struct { + // REQUIRED + Value []*RoleInstance `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// RoleInstanceNetworkProfile - Describes the network profile for the role instance. +type RoleInstanceNetworkProfile struct { + // READ-ONLY; Specifies the list of resource Ids for the network interfaces associated with the role instance. + NetworkInterfaces []*SubResource `json:"networkInterfaces,omitempty" azure:"ro"` +} + +type RoleInstanceProperties struct { + // The instance view of the role instance. + InstanceView *RoleInstanceView `json:"instanceView,omitempty"` + + // Describes the network profile for the role instance. + NetworkProfile *RoleInstanceNetworkProfile `json:"networkProfile,omitempty"` +} + +// RoleInstanceView - The instance view of the role instance. +type RoleInstanceView struct { + // READ-ONLY; The Fault Domain. + PlatformFaultDomain *int32 `json:"platformFaultDomain,omitempty" azure:"ro"` + + // READ-ONLY; The Update Domain. + PlatformUpdateDomain *int32 `json:"platformUpdateDomain,omitempty" azure:"ro"` + + // READ-ONLY; Specifies a unique identifier generated internally for the cloud service associated with this role instance. + // NOTE: If you are using Azure Diagnostics extension, this property can be used as 'DeploymentId' for querying details. + PrivateID *string `json:"privateId,omitempty" azure:"ro"` + + // READ-ONLY + Statuses []*ResourceInstanceViewStatus `json:"statuses,omitempty" azure:"ro"` +} + +// RoleInstances - Specifies a list of role instances from the cloud service. +type RoleInstances struct { + // REQUIRED; List of cloud service role instance names. Value of '*' will signify all role instances of the cloud service. + RoleInstances []*string `json:"roleInstances,omitempty"` +} + +// RollbackStatusInfo - Information about rollback on failed VM instances after a OS Upgrade operation. +type RollbackStatusInfo struct { + // READ-ONLY; The number of instances which failed to rollback. + FailedRolledbackInstanceCount *int32 `json:"failedRolledbackInstanceCount,omitempty" azure:"ro"` + + // READ-ONLY; Error details if OS rollback failed. + RollbackError *APIError `json:"rollbackError,omitempty" azure:"ro"` + + // READ-ONLY; The number of instances which have been successfully rolled back. + SuccessfullyRolledbackInstanceCount *int32 `json:"successfullyRolledbackInstanceCount,omitempty" azure:"ro"` +} + +// RollingUpgradePolicy - The configuration parameters used while performing a rolling upgrade. +type RollingUpgradePolicy struct { + // Allow VMSS to ignore AZ boundaries when constructing upgrade batches. Take into consideration the Update Domain and maxBatchInstancePercent + // to determine the batch size. + EnableCrossZoneUpgrade *bool `json:"enableCrossZoneUpgrade,omitempty"` + + // The maximum percent of total virtual machine instances that will be upgraded simultaneously by the rolling upgrade in one + // batch. As this is a maximum, unhealthy instances in previous or future batches + // can cause the percentage of instances in a batch to decrease to ensure higher reliability. The default value for this parameter + // is 20%. + MaxBatchInstancePercent *int32 `json:"maxBatchInstancePercent,omitempty"` + + // The maximum percentage of the total virtual machine instances in the scale set that can be simultaneously unhealthy, either + // as a result of being upgraded, or by being found in an unhealthy state by + // the virtual machine health checks before the rolling upgrade aborts. This constraint will be checked prior to starting + // any batch. The default value for this parameter is 20%. + MaxUnhealthyInstancePercent *int32 `json:"maxUnhealthyInstancePercent,omitempty"` + + // The maximum percentage of upgraded virtual machine instances that can be found to be in an unhealthy state. This check + // will happen after each batch is upgraded. If this percentage is ever exceeded, + // the rolling update aborts. The default value for this parameter is 20%. + MaxUnhealthyUpgradedInstancePercent *int32 `json:"maxUnhealthyUpgradedInstancePercent,omitempty"` + + // The wait time between completing the update for all virtual machines in one batch and starting the next batch. The time + // duration should be specified in ISO 8601 format. The default value is 0 seconds + // (PT0S). + PauseTimeBetweenBatches *string `json:"pauseTimeBetweenBatches,omitempty"` + + // Upgrade all unhealthy instances in a scale set before any healthy instances. + PrioritizeUnhealthyInstances *bool `json:"prioritizeUnhealthyInstances,omitempty"` +} + +// RollingUpgradeProgressInfo - Information about the number of virtual machine instances in each upgrade state. +type RollingUpgradeProgressInfo struct { + // READ-ONLY; The number of instances that have failed to be upgraded successfully. + FailedInstanceCount *int32 `json:"failedInstanceCount,omitempty" azure:"ro"` + + // READ-ONLY; The number of instances that are currently being upgraded. + InProgressInstanceCount *int32 `json:"inProgressInstanceCount,omitempty" azure:"ro"` + + // READ-ONLY; The number of instances that have not yet begun to be upgraded. + PendingInstanceCount *int32 `json:"pendingInstanceCount,omitempty" azure:"ro"` + + // READ-ONLY; The number of instances that have been successfully upgraded. + SuccessfulInstanceCount *int32 `json:"successfulInstanceCount,omitempty" azure:"ro"` +} + +// RollingUpgradeRunningStatus - Information about the current running state of the overall upgrade. +type RollingUpgradeRunningStatus struct { + // READ-ONLY; Code indicating the current status of the upgrade. + Code *RollingUpgradeStatusCode `json:"code,omitempty" azure:"ro"` + + // READ-ONLY; The last action performed on the rolling upgrade. + LastAction *RollingUpgradeActionType `json:"lastAction,omitempty" azure:"ro"` + + // READ-ONLY; Last action time of the upgrade. + LastActionTime *time.Time `json:"lastActionTime,omitempty" azure:"ro"` + + // READ-ONLY; Start time of the upgrade. + StartTime *time.Time `json:"startTime,omitempty" azure:"ro"` +} + +// RollingUpgradeStatusInfo - The status of the latest virtual machine scale set rolling upgrade. +type RollingUpgradeStatusInfo struct { + // REQUIRED; Resource location + Location *string `json:"location,omitempty"` + + // The status of the latest virtual machine scale set rolling upgrade. + Properties *RollingUpgradeStatusInfoProperties `json:"properties,omitempty"` + + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` + + // READ-ONLY; Resource Id + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; Resource name + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; Resource type + Type *string `json:"type,omitempty" azure:"ro"` +} + +// RollingUpgradeStatusInfoProperties - The status of the latest virtual machine scale set rolling upgrade. +type RollingUpgradeStatusInfoProperties struct { + // READ-ONLY; Error details for this upgrade, if there are any. + Error *APIError `json:"error,omitempty" azure:"ro"` + + // READ-ONLY; The rolling upgrade policies applied for this upgrade. + Policy *RollingUpgradePolicy `json:"policy,omitempty" azure:"ro"` + + // READ-ONLY; Information about the number of virtual machine instances in each upgrade state. + Progress *RollingUpgradeProgressInfo `json:"progress,omitempty" azure:"ro"` + + // READ-ONLY; Information about the current running state of the overall upgrade. + RunningStatus *RollingUpgradeRunningStatus `json:"runningStatus,omitempty" azure:"ro"` +} + +// RunCommandDocument - Describes the properties of a Run Command. +type RunCommandDocument struct { + // REQUIRED; The VM run command description. + Description *string `json:"description,omitempty"` + + // REQUIRED; The VM run command id. + ID *string `json:"id,omitempty"` + + // REQUIRED; The VM run command label. + Label *string `json:"label,omitempty"` + + // REQUIRED; The Operating System type. + OSType *OperatingSystemTypes `json:"osType,omitempty"` + + // REQUIRED; The VM run command schema. + Schema *string `json:"$schema,omitempty"` + + // REQUIRED; The script to be executed. + Script []*string `json:"script,omitempty"` + + // The parameters used by the script. + Parameters []*RunCommandParameterDefinition `json:"parameters,omitempty"` +} + +// RunCommandDocumentBase - Describes the properties of a Run Command metadata. +type RunCommandDocumentBase struct { + // REQUIRED; The VM run command description. + Description *string `json:"description,omitempty"` + + // REQUIRED; The VM run command id. + ID *string `json:"id,omitempty"` + + // REQUIRED; The VM run command label. + Label *string `json:"label,omitempty"` + + // REQUIRED; The Operating System type. + OSType *OperatingSystemTypes `json:"osType,omitempty"` + + // REQUIRED; The VM run command schema. + Schema *string `json:"$schema,omitempty"` +} + +// RunCommandInput - Capture Virtual Machine parameters. +type RunCommandInput struct { + // REQUIRED; The run command id. + CommandID *string `json:"commandId,omitempty"` + + // The run command parameters. + Parameters []*RunCommandInputParameter `json:"parameters,omitempty"` + + // Optional. The script to be executed. When this value is given, the given script will override the default script of the + // command. + Script []*string `json:"script,omitempty"` +} + +// RunCommandInputParameter - Describes the properties of a run command parameter. +type RunCommandInputParameter struct { + // REQUIRED; The run command parameter name. + Name *string `json:"name,omitempty"` + + // REQUIRED; The run command parameter value. + Value *string `json:"value,omitempty"` +} + +// RunCommandListResult - The List Virtual Machine operation response. +type RunCommandListResult struct { + // REQUIRED; The list of virtual machine run commands. + Value []*RunCommandDocumentBase `json:"value,omitempty"` + + // The uri to fetch the next page of run commands. Call ListNext() with this to fetch the next page of run commands. + NextLink *string `json:"nextLink,omitempty"` +} + +// RunCommandParameterDefinition - Describes the properties of a run command parameter. +type RunCommandParameterDefinition struct { + // REQUIRED; The run command parameter name. + Name *string `json:"name,omitempty"` + + // REQUIRED; The run command parameter type. + Type *string `json:"type,omitempty"` + + // The run command parameter default value. + DefaultValue *string `json:"defaultValue,omitempty"` + + // The run command parameter required. + Required *bool `json:"required,omitempty"` +} + +type RunCommandResult struct { + // Run command operation response. + Value []*InstanceViewStatus `json:"value,omitempty"` +} + +// SKU - Describes a virtual machine scale set sku. NOTE: If the new VM SKU is not supported on the hardware the scale set +// is currently on, you need to deallocate the VMs in the scale set before you modify the +// SKU name. +type SKU struct { + // Specifies the number of virtual machines in the scale set. + Capacity *int64 `json:"capacity,omitempty"` + + // The sku name. + Name *string `json:"name,omitempty"` + + // Specifies the tier of virtual machines in a scale set. + // Possible Values: + // Standard + // Basic + Tier *string `json:"tier,omitempty"` +} + +// SSHConfiguration - SSH configuration for Linux based VMs running on Azure +type SSHConfiguration struct { + // The list of SSH public keys used to authenticate with linux based VMs. + PublicKeys []*SSHPublicKey `json:"publicKeys,omitempty"` +} + +// SSHPublicKey - Contains information about SSH certificate public key and the path on the Linux VM where the public key +// is placed. +type SSHPublicKey struct { + // SSH public key certificate used to authenticate with the VM through ssh. The key needs to be at least 2048-bit and in ssh-rsa + // format. + // For creating ssh keys, see [Create SSH keys on Linux and Mac for Linux VMs in Azure]https://docs.microsoft.com/azure/virtual-machines/linux/create-ssh-keys-detailed). + KeyData *string `json:"keyData,omitempty"` + + // Specifies the full path on the created VM where ssh public key is stored. If the file already exists, the specified key + // is appended to the file. Example: /home/user/.ssh/authorized_keys + Path *string `json:"path,omitempty"` +} + +// SSHPublicKeyGenerateKeyPairResult - Response from generation of an SSH key pair. +type SSHPublicKeyGenerateKeyPairResult struct { + // REQUIRED; The ARM resource id in the form of /subscriptions/{SubscriptionId}/resourceGroups/{ResourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{SshPublicKeyName} + ID *string `json:"id,omitempty"` + + // REQUIRED; Private key portion of the key pair used to authenticate to a virtual machine through ssh. The private key is + // returned in RFC3447 format and should be treated as a secret. + PrivateKey *string `json:"privateKey,omitempty"` + + // REQUIRED; Public key portion of the key pair used to authenticate to a virtual machine through ssh. The public key is in + // ssh-rsa format. + PublicKey *string `json:"publicKey,omitempty"` +} + +// SSHPublicKeyResource - Specifies information about the SSH public key. +type SSHPublicKeyResource struct { + // REQUIRED; Resource location + Location *string `json:"location,omitempty"` + + // Properties of the SSH public key. + Properties *SSHPublicKeyResourceProperties `json:"properties,omitempty"` + + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` + + // READ-ONLY; Resource Id + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; Resource name + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; Resource type + Type *string `json:"type,omitempty" azure:"ro"` +} + +// SSHPublicKeyResourceProperties - Properties of the SSH public key. +type SSHPublicKeyResourceProperties struct { + // SSH public key used to authenticate to a virtual machine through ssh. If this property is not initially provided when the + // resource is created, the publicKey property will be populated when + // generateKeyPair is called. If the public key is provided upon resource creation, the provided public key needs to be at + // least 2048-bit and in ssh-rsa format. + PublicKey *string `json:"publicKey,omitempty"` +} + +// SSHPublicKeyUpdateResource - Specifies information about the SSH public key. +type SSHPublicKeyUpdateResource struct { + // Properties of the SSH public key. + Properties *SSHPublicKeyResourceProperties `json:"properties,omitempty"` + + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` +} + +// SSHPublicKeysClientCreateOptions contains the optional parameters for the SSHPublicKeysClient.Create method. +type SSHPublicKeysClientCreateOptions struct { + // placeholder for future optional parameters +} + +// SSHPublicKeysClientDeleteOptions contains the optional parameters for the SSHPublicKeysClient.Delete method. +type SSHPublicKeysClientDeleteOptions struct { + // placeholder for future optional parameters +} + +// SSHPublicKeysClientGenerateKeyPairOptions contains the optional parameters for the SSHPublicKeysClient.GenerateKeyPair +// method. +type SSHPublicKeysClientGenerateKeyPairOptions struct { + // placeholder for future optional parameters +} + +// SSHPublicKeysClientGetOptions contains the optional parameters for the SSHPublicKeysClient.Get method. +type SSHPublicKeysClientGetOptions struct { + // placeholder for future optional parameters +} + +// SSHPublicKeysClientListByResourceGroupOptions contains the optional parameters for the SSHPublicKeysClient.ListByResourceGroup +// method. +type SSHPublicKeysClientListByResourceGroupOptions struct { + // placeholder for future optional parameters +} + +// SSHPublicKeysClientListBySubscriptionOptions contains the optional parameters for the SSHPublicKeysClient.ListBySubscription +// method. +type SSHPublicKeysClientListBySubscriptionOptions struct { + // placeholder for future optional parameters +} + +// SSHPublicKeysClientUpdateOptions contains the optional parameters for the SSHPublicKeysClient.Update method. +type SSHPublicKeysClientUpdateOptions struct { + // placeholder for future optional parameters +} + +// SSHPublicKeysGroupListResult - The list SSH public keys operation response. +type SSHPublicKeysGroupListResult struct { + // REQUIRED; The list of SSH public keys + Value []*SSHPublicKeyResource `json:"value,omitempty"` + + // The URI to fetch the next page of SSH public keys. Call ListNext() with this URI to fetch the next page of SSH public keys. + NextLink *string `json:"nextLink,omitempty"` +} + +// ScaleInPolicy - Describes a scale-in policy for a virtual machine scale set. +type ScaleInPolicy struct { + // This property allows you to specify if virtual machines chosen for removal have to be force deleted when a virtual machine + // scale set is being scaled-in.(Feature in Preview) + ForceDeletion *bool `json:"forceDeletion,omitempty"` + + // The rules to be followed when scaling-in a virtual machine scale set. + // Possible values are: + // Default When a virtual machine scale set is scaled in, the scale set will first be balanced across zones if it is a zonal + // scale set. Then, it will be balanced across Fault Domains as far as possible. + // Within each Fault Domain, the virtual machines chosen for removal will be the newest ones that are not protected from scale-in. + // OldestVM When a virtual machine scale set is being scaled-in, the oldest virtual machines that are not protected from scale-in + // will be chosen for removal. For zonal virtual machine scale sets, the + // scale set will first be balanced across zones. Within each zone, the oldest virtual machines that are not protected will + // be chosen for removal. + // NewestVM When a virtual machine scale set is being scaled-in, the newest virtual machines that are not protected from scale-in + // will be chosen for removal. For zonal virtual machine scale sets, the + // scale set will first be balanced across zones. Within each zone, the newest virtual machines that are not protected will + // be chosen for removal. + Rules []*VirtualMachineScaleSetScaleInRules `json:"rules,omitempty"` +} + +type ScheduledEventsProfile struct { + // Specifies Terminate Scheduled Event related configurations. + TerminateNotificationProfile *TerminateNotificationProfile `json:"terminateNotificationProfile,omitempty"` +} + +// SecurityProfile - Specifies the Security profile settings for the virtual machine or virtual machine scale set. +type SecurityProfile struct { + // This property can be used by user in the request to enable or disable the Host Encryption for the virtual machine or virtual + // machine scale set. This will enable the encryption for all the disks + // including Resource/Temp disk at host itself. + // Default: The Encryption at host will be disabled unless this property is set to true for the resource. + EncryptionAtHost *bool `json:"encryptionAtHost,omitempty"` + + // Specifies the SecurityType of the virtual machine. It has to be set to any specified value to enable UefiSettings. + // Default: UefiSettings will not be enabled unless this property is set. + SecurityType *SecurityTypes `json:"securityType,omitempty"` + + // Specifies the security settings like secure boot and vTPM used while creating the virtual machine. + // Minimum api-version: 2020-12-01 + UefiSettings *UefiSettings `json:"uefiSettings,omitempty"` +} + +type ShareInfoElement struct { + // READ-ONLY; A relative URI containing the ID of the VM that has the disk attached. + VMURI *string `json:"vmUri,omitempty" azure:"ro"` +} + +// SharedGalleriesClientGetOptions contains the optional parameters for the SharedGalleriesClient.Get method. +type SharedGalleriesClientGetOptions struct { + // placeholder for future optional parameters +} + +// SharedGalleriesClientListOptions contains the optional parameters for the SharedGalleriesClient.List method. +type SharedGalleriesClientListOptions struct { + // The query parameter to decide what shared galleries to fetch when doing listing operations. + SharedTo *SharedToValues +} + +// SharedGallery - Specifies information about the Shared Gallery that you want to create or update. +type SharedGallery struct { + // The identifier information of shared gallery. + Identifier *SharedGalleryIdentifier `json:"identifier,omitempty"` + + // READ-ONLY; Resource location + Location *string `json:"location,omitempty" azure:"ro"` + + // READ-ONLY; Resource name + Name *string `json:"name,omitempty" azure:"ro"` +} + +// SharedGalleryIdentifier - The identifier information of shared gallery. +type SharedGalleryIdentifier struct { + // The unique id of this shared gallery. + UniqueID *string `json:"uniqueId,omitempty"` +} + +// SharedGalleryImage - Specifies information about the gallery image definition that you want to create or update. +type SharedGalleryImage struct { + // The identifier information of shared gallery. + Identifier *SharedGalleryIdentifier `json:"identifier,omitempty"` + + // Describes the properties of a gallery image definition. + Properties *SharedGalleryImageProperties `json:"properties,omitempty"` + + // READ-ONLY; Resource location + Location *string `json:"location,omitempty" azure:"ro"` + + // READ-ONLY; Resource name + Name *string `json:"name,omitempty" azure:"ro"` +} + +// SharedGalleryImageList - The List Shared Gallery Images operation response. +type SharedGalleryImageList struct { + // REQUIRED; A list of shared gallery images. + Value []*SharedGalleryImage `json:"value,omitempty"` + + // The uri to fetch the next page of shared gallery images. Call ListNext() with this to fetch the next page of shared gallery + // images. + NextLink *string `json:"nextLink,omitempty"` +} + +// SharedGalleryImageProperties - Describes the properties of a gallery image definition. +type SharedGalleryImageProperties struct { + // REQUIRED; This is the gallery image definition identifier. + Identifier *GalleryImageIdentifier `json:"identifier,omitempty"` + + // REQUIRED; This property allows the user to specify whether the virtual machines created under this image are 'Generalized' + // or 'Specialized'. + OSState *OperatingSystemStateTypes `json:"osState,omitempty"` + + // REQUIRED; This property allows you to specify the type of the OS that is included in the disk when creating a VM from a + // managed image. + // Possible values are: + // Windows + // Linux + OSType *OperatingSystemTypes `json:"osType,omitempty"` + + // Describes the disallowed disk types. + Disallowed *Disallowed `json:"disallowed,omitempty"` + + // The end of life date of the gallery image definition. This property can be used for decommissioning purposes. This property + // is updatable. + EndOfLifeDate *time.Time `json:"endOfLifeDate,omitempty"` + + // A list of gallery image features. + Features []*GalleryImageFeature `json:"features,omitempty"` + + // The hypervisor generation of the Virtual Machine. Applicable to OS disks only. + HyperVGeneration *HyperVGeneration `json:"hyperVGeneration,omitempty"` + + // Describes the gallery image definition purchase plan. This is used by marketplace images. + PurchasePlan *ImagePurchasePlan `json:"purchasePlan,omitempty"` + + // The properties describe the recommended machine configuration for this Image Definition. These properties are updatable. + Recommended *RecommendedMachineConfiguration `json:"recommended,omitempty"` +} + +// SharedGalleryImageVersion - Specifies information about the gallery image version that you want to create or update. +type SharedGalleryImageVersion struct { + // The identifier information of shared gallery. + Identifier *SharedGalleryIdentifier `json:"identifier,omitempty"` + + // Describes the properties of a gallery image version. + Properties *SharedGalleryImageVersionProperties `json:"properties,omitempty"` + + // READ-ONLY; Resource location + Location *string `json:"location,omitempty" azure:"ro"` + + // READ-ONLY; Resource name + Name *string `json:"name,omitempty" azure:"ro"` +} + +// SharedGalleryImageVersionList - The List Shared Gallery Image versions operation response. +type SharedGalleryImageVersionList struct { + // REQUIRED; A list of shared gallery images versions. + Value []*SharedGalleryImageVersion `json:"value,omitempty"` + + // The uri to fetch the next page of shared gallery image versions. Call ListNext() with this to fetch the next page of shared + // gallery image versions. + NextLink *string `json:"nextLink,omitempty"` +} + +// SharedGalleryImageVersionProperties - Describes the properties of a gallery image version. +type SharedGalleryImageVersionProperties struct { + // The end of life date of the gallery image version Definition. This property can be used for decommissioning purposes. This + // property is updatable. + EndOfLifeDate *time.Time `json:"endOfLifeDate,omitempty"` + + // The published date of the gallery image version Definition. This property can be used for decommissioning purposes. This + // property is updatable. + PublishedDate *time.Time `json:"publishedDate,omitempty"` +} + +// SharedGalleryImageVersionsClientGetOptions contains the optional parameters for the SharedGalleryImageVersionsClient.Get +// method. +type SharedGalleryImageVersionsClientGetOptions struct { + // placeholder for future optional parameters +} + +// SharedGalleryImageVersionsClientListOptions contains the optional parameters for the SharedGalleryImageVersionsClient.List +// method. +type SharedGalleryImageVersionsClientListOptions struct { + // The query parameter to decide what shared galleries to fetch when doing listing operations. + SharedTo *SharedToValues +} + +// SharedGalleryImagesClientGetOptions contains the optional parameters for the SharedGalleryImagesClient.Get method. +type SharedGalleryImagesClientGetOptions struct { + // placeholder for future optional parameters +} + +// SharedGalleryImagesClientListOptions contains the optional parameters for the SharedGalleryImagesClient.List method. +type SharedGalleryImagesClientListOptions struct { + // The query parameter to decide what shared galleries to fetch when doing listing operations. + SharedTo *SharedToValues +} + +// SharedGalleryList - The List Shared Galleries operation response. +type SharedGalleryList struct { + // REQUIRED; A list of shared galleries. + Value []*SharedGallery `json:"value,omitempty"` + + // The uri to fetch the next page of shared galleries. Call ListNext() with this to fetch the next page of shared galleries. + NextLink *string `json:"nextLink,omitempty"` +} + +// SharingProfile - Profile for gallery sharing to subscription or tenant +type SharingProfile struct { + // Information of community gallery if current gallery is shared to community. + CommunityGalleryInfo interface{} `json:"communityGalleryInfo,omitempty"` + + // This property allows you to specify the permission of sharing gallery. + // Possible values are: + // Private + // Groups + Permissions *GallerySharingPermissionTypes `json:"permissions,omitempty"` + + // READ-ONLY; A list of sharing profile groups. + Groups []*SharingProfileGroup `json:"groups,omitempty" azure:"ro"` +} + +// SharingProfileGroup - Group of the gallery sharing profile +type SharingProfileGroup struct { + // A list of subscription/tenant ids the gallery is aimed to be shared to. + IDs []*string `json:"ids,omitempty"` + + // This property allows you to specify the type of sharing group. + // Possible values are: + // Subscriptions + // AADTenants + // Community + Type *SharingProfileGroupTypes `json:"type,omitempty"` +} + +// SharingStatus - Sharing status of current gallery. +type SharingStatus struct { + // Summary of all regional sharing status. + Summary []*RegionalSharingStatus `json:"summary,omitempty"` + + // READ-ONLY; Aggregated sharing state of current gallery. + AggregatedState *SharingState `json:"aggregatedState,omitempty" azure:"ro"` +} + +// SharingUpdate - Specifies information about the gallery sharing profile update. +type SharingUpdate struct { + // REQUIRED; This property allows you to specify the operation type of gallery sharing update. + // Possible values are: + // Add + // Remove + // Reset + OperationType *SharingUpdateOperationTypes `json:"operationType,omitempty"` + + // A list of sharing profile groups. + Groups []*SharingProfileGroup `json:"groups,omitempty"` +} + +// Snapshot resource. +type Snapshot struct { + // REQUIRED; Resource location + Location *string `json:"location,omitempty"` + + // The extended location where the snapshot will be created. Extended location cannot be changed. + ExtendedLocation *ExtendedLocation `json:"extendedLocation,omitempty"` + + // Snapshot resource properties. + Properties *SnapshotProperties `json:"properties,omitempty"` + + // The snapshots sku name. Can be StandardLRS, PremiumLRS, or Standard_ZRS. This is an optional parameter for incremental + // snapshot and the default behavior is the SKU will be set to the same sku as the + // previous snapshot + SKU *SnapshotSKU `json:"sku,omitempty"` + + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` + + // READ-ONLY; Resource Id + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; Unused. Always Null. + ManagedBy *string `json:"managedBy,omitempty" azure:"ro"` + + // READ-ONLY; Resource name + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; Resource type + Type *string `json:"type,omitempty" azure:"ro"` +} + +// SnapshotList - The List Snapshots operation response. +type SnapshotList struct { + // REQUIRED; A list of snapshots. + Value []*Snapshot `json:"value,omitempty"` + + // The uri to fetch the next page of snapshots. Call ListNext() with this to fetch the next page of snapshots. + NextLink *string `json:"nextLink,omitempty"` +} + +// SnapshotProperties - Snapshot resource properties. +type SnapshotProperties struct { + // REQUIRED; Disk source information. CreationData information cannot be changed after the disk has been created. + CreationData *CreationData `json:"creationData,omitempty"` + + // Percentage complete for the background copy when a resource is created via the CopyStart operation. + CompletionPercent *float32 `json:"completionPercent,omitempty"` + + // Additional authentication requirements when exporting or uploading to a disk or snapshot. + DataAccessAuthMode *DataAccessAuthMode `json:"dataAccessAuthMode,omitempty"` + + // ARM id of the DiskAccess resource for using private endpoints on disks. + DiskAccessID *string `json:"diskAccessId,omitempty"` + + // If creationData.createOption is Empty, this field is mandatory and it indicates the size of the disk to create. If this + // field is present for updates or creation with other options, it indicates a + // resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size. + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + + // Encryption property can be used to encrypt data at rest with customer managed keys or platform managed keys. + Encryption *Encryption `json:"encryption,omitempty"` + + // Encryption settings collection used be Azure Disk Encryption, can contain multiple encryption settings per disk or snapshot. + EncryptionSettingsCollection *EncryptionSettingsCollection `json:"encryptionSettingsCollection,omitempty"` + + // The hypervisor generation of the Virtual Machine. Applicable to OS disks only. + HyperVGeneration *HyperVGeneration `json:"hyperVGeneration,omitempty"` + + // Whether a snapshot is incremental. Incremental snapshots on the same disk occupy less space than full snapshots and can + // be diffed. + Incremental *bool `json:"incremental,omitempty"` + + // Policy for accessing the disk via network. + NetworkAccessPolicy *NetworkAccessPolicy `json:"networkAccessPolicy,omitempty"` + + // The Operating System type. + OSType *OperatingSystemTypes `json:"osType,omitempty"` + + // Policy for controlling export on the disk. + PublicNetworkAccess *PublicNetworkAccess `json:"publicNetworkAccess,omitempty"` + + // Purchase plan information for the image from which the source disk for the snapshot was originally created. + PurchasePlan *DiskPurchasePlan `json:"purchasePlan,omitempty"` + + // Contains the security related information for the resource. + SecurityProfile *DiskSecurityProfile `json:"securityProfile,omitempty"` + + // List of supported capabilities for the image from which the source disk from the snapshot was originally created. + SupportedCapabilities *SupportedCapabilities `json:"supportedCapabilities,omitempty"` + + // Indicates the OS on a snapshot supports hibernation. + SupportsHibernation *bool `json:"supportsHibernation,omitempty"` + + // READ-ONLY; The size of the disk in bytes. This field is read only. + DiskSizeBytes *int64 `json:"diskSizeBytes,omitempty" azure:"ro"` + + // READ-ONLY; The state of the snapshot. + DiskState *DiskState `json:"diskState,omitempty" azure:"ro"` + + // READ-ONLY; The disk provisioning state. + ProvisioningState *string `json:"provisioningState,omitempty" azure:"ro"` + + // READ-ONLY; The time when the snapshot was created. + TimeCreated *time.Time `json:"timeCreated,omitempty" azure:"ro"` + + // READ-ONLY; Unique Guid identifying the resource. + UniqueID *string `json:"uniqueId,omitempty" azure:"ro"` +} + +// SnapshotSKU - The snapshots sku name. Can be StandardLRS, PremiumLRS, or Standard_ZRS. This is an optional parameter for +// incremental snapshot and the default behavior is the SKU will be set to the same sku as the +// previous snapshot +type SnapshotSKU struct { + // The sku name. + Name *SnapshotStorageAccountTypes `json:"name,omitempty"` + + // READ-ONLY; The sku tier. + Tier *string `json:"tier,omitempty" azure:"ro"` +} + +// SnapshotUpdate - Snapshot update resource. +type SnapshotUpdate struct { + // Snapshot resource update properties. + Properties *SnapshotUpdateProperties `json:"properties,omitempty"` + + // The snapshots sku name. Can be StandardLRS, PremiumLRS, or Standard_ZRS. This is an optional parameter for incremental + // snapshot and the default behavior is the SKU will be set to the same sku as the + // previous snapshot + SKU *SnapshotSKU `json:"sku,omitempty"` + + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` +} + +// SnapshotUpdateProperties - Snapshot resource update properties. +type SnapshotUpdateProperties struct { + // Additional authentication requirements when exporting or uploading to a disk or snapshot. + DataAccessAuthMode *DataAccessAuthMode `json:"dataAccessAuthMode,omitempty"` + + // ARM id of the DiskAccess resource for using private endpoints on disks. + DiskAccessID *string `json:"diskAccessId,omitempty"` + + // If creationData.createOption is Empty, this field is mandatory and it indicates the size of the disk to create. If this + // field is present for updates or creation with other options, it indicates a + // resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size. + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + + // Encryption property can be used to encrypt data at rest with customer managed keys or platform managed keys. + Encryption *Encryption `json:"encryption,omitempty"` + + // Encryption settings collection used be Azure Disk Encryption, can contain multiple encryption settings per disk or snapshot. + EncryptionSettingsCollection *EncryptionSettingsCollection `json:"encryptionSettingsCollection,omitempty"` + + // Policy for accessing the disk via network. + NetworkAccessPolicy *NetworkAccessPolicy `json:"networkAccessPolicy,omitempty"` + + // the Operating System type. + OSType *OperatingSystemTypes `json:"osType,omitempty"` + + // Policy for controlling export on the disk. + PublicNetworkAccess *PublicNetworkAccess `json:"publicNetworkAccess,omitempty"` + + // List of supported capabilities for the image from which the OS disk was created. + SupportedCapabilities *SupportedCapabilities `json:"supportedCapabilities,omitempty"` + + // Indicates the OS on a snapshot supports hibernation. + SupportsHibernation *bool `json:"supportsHibernation,omitempty"` +} + +// SnapshotsClientBeginCreateOrUpdateOptions contains the optional parameters for the SnapshotsClient.BeginCreateOrUpdate +// method. +type SnapshotsClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// SnapshotsClientBeginDeleteOptions contains the optional parameters for the SnapshotsClient.BeginDelete method. +type SnapshotsClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// SnapshotsClientBeginGrantAccessOptions contains the optional parameters for the SnapshotsClient.BeginGrantAccess method. +type SnapshotsClientBeginGrantAccessOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// SnapshotsClientBeginRevokeAccessOptions contains the optional parameters for the SnapshotsClient.BeginRevokeAccess method. +type SnapshotsClientBeginRevokeAccessOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// SnapshotsClientBeginUpdateOptions contains the optional parameters for the SnapshotsClient.BeginUpdate method. +type SnapshotsClientBeginUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// SnapshotsClientGetOptions contains the optional parameters for the SnapshotsClient.Get method. +type SnapshotsClientGetOptions struct { + // placeholder for future optional parameters +} + +// SnapshotsClientListByResourceGroupOptions contains the optional parameters for the SnapshotsClient.ListByResourceGroup +// method. +type SnapshotsClientListByResourceGroupOptions struct { + // placeholder for future optional parameters +} + +// SnapshotsClientListOptions contains the optional parameters for the SnapshotsClient.List method. +type SnapshotsClientListOptions struct { + // placeholder for future optional parameters +} + +// SoftDeletePolicy - Contains information about the soft deletion policy of the gallery. +type SoftDeletePolicy struct { + // Enables soft-deletion for resources in this gallery, allowing them to be recovered within retention time. + IsSoftDeleteEnabled *bool `json:"isSoftDeleteEnabled,omitempty"` +} + +// SourceVault - The vault id is an Azure Resource Manager Resource id in the form /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName} +type SourceVault struct { + // Resource Id + ID *string `json:"id,omitempty"` +} + +// SpotRestorePolicy - Specifies the Spot-Try-Restore properties for the virtual machine scale set. +// With this property customer can enable or disable automatic restore of the evicted Spot VMSS VM instances opportunistically +// based on capacity availability and pricing constraint. +type SpotRestorePolicy struct { + // Enables the Spot-Try-Restore feature where evicted VMSS SPOT instances will be tried to be restored opportunistically based + // on capacity availability and pricing constraints + Enabled *bool `json:"enabled,omitempty"` + + // Timeout value expressed as an ISO 8601 time duration after which the platform will not try to restore the VMSS SPOT instances + RestoreTimeout *string `json:"restoreTimeout,omitempty"` +} + +type StatusCodeCount struct { + // READ-ONLY; The instance view status code + Code *string `json:"code,omitempty" azure:"ro"` + + // READ-ONLY; Number of instances having this status code + Count *int32 `json:"count,omitempty" azure:"ro"` +} + +// StorageProfile - Specifies the storage settings for the virtual machine disks. +type StorageProfile struct { + // Specifies the parameters that are used to add a data disk to a virtual machine. + // For more information about disks, see About disks and VHDs for Azure virtual machines [https://docs.microsoft.com/azure/virtual-machines/managed-disks-overview]. + DataDisks []*DataDisk `json:"dataDisks,omitempty"` + + // Specifies information about the image to use. You can specify information about platform images, marketplace images, or + // virtual machine images. This element is required when you want to use a platform + // image, marketplace image, or virtual machine image, but is not used in other creation operations. + ImageReference *ImageReference `json:"imageReference,omitempty"` + + // Specifies information about the operating system disk used by the virtual machine. + // For more information about disks, see About disks and VHDs for Azure virtual machines [https://docs.microsoft.com/azure/virtual-machines/managed-disks-overview]. + OSDisk *OSDisk `json:"osDisk,omitempty"` +} + +type SubResource struct { + // Resource Id + ID *string `json:"id,omitempty"` +} + +type SubResourceReadOnly struct { + // READ-ONLY; Resource Id + ID *string `json:"id,omitempty" azure:"ro"` +} + +type SubResourceWithColocationStatus struct { + // Describes colocation status of a resource in the Proximity Placement Group. + ColocationStatus *InstanceViewStatus `json:"colocationStatus,omitempty"` + + // Resource Id + ID *string `json:"id,omitempty"` +} + +// SupportedCapabilities - List of supported capabilities persisted on the disk resource for VM use. +type SupportedCapabilities struct { + // True if the image from which the OS disk is created supports accelerated networking. + AcceleratedNetwork *bool `json:"acceleratedNetwork,omitempty"` + + // CPU architecture supported by an OS disk. + Architecture *Architecture `json:"architecture,omitempty"` +} + +// TargetRegion - Describes the target region information. +type TargetRegion struct { + // REQUIRED; The name of the region. + Name *string `json:"name,omitempty"` + + // Optional. Allows users to provide customer managed keys for encrypting the OS and data disks in the gallery artifact. + Encryption *EncryptionImages `json:"encryption,omitempty"` + + // The number of replicas of the Image Version to be created per region. This property is updatable. + RegionalReplicaCount *int32 `json:"regionalReplicaCount,omitempty"` + + // Specifies the storage account type to be used to store the image. This property is not updatable. + StorageAccountType *StorageAccountType `json:"storageAccountType,omitempty"` +} + +type TerminateNotificationProfile struct { + // Specifies whether the Terminate Scheduled event is enabled or disabled. + Enable *bool `json:"enable,omitempty"` + + // Configurable length of time a Virtual Machine being deleted will have to potentially approve the Terminate Scheduled Event + // before the event is auto approved (timed out). The configuration must be + // specified in ISO 8601 format, the default value is 5 minutes (PT5M) + NotBeforeTimeout *string `json:"notBeforeTimeout,omitempty"` +} + +// ThrottledRequestsInput - Api request input for LogAnalytics getThrottledRequests Api. +type ThrottledRequestsInput struct { + // REQUIRED; SAS Uri of the logging blob container to which LogAnalytics Api writes output logs to. + BlobContainerSasURI *string `json:"blobContainerSasUri,omitempty"` + + // REQUIRED; From time of the query + FromTime *time.Time `json:"fromTime,omitempty"` + + // REQUIRED; To time of the query + ToTime *time.Time `json:"toTime,omitempty"` + + // Group query result by Client Application ID. + GroupByClientApplicationID *bool `json:"groupByClientApplicationId,omitempty"` + + // Group query result by Operation Name. + GroupByOperationName *bool `json:"groupByOperationName,omitempty"` + + // Group query result by Resource Name. + GroupByResourceName *bool `json:"groupByResourceName,omitempty"` + + // Group query result by Throttle Policy applied. + GroupByThrottlePolicy *bool `json:"groupByThrottlePolicy,omitempty"` + + // Group query result by User Agent. + GroupByUserAgent *bool `json:"groupByUserAgent,omitempty"` +} + +// UefiSettings - Specifies the security settings like secure boot and vTPM used while creating the virtual machine. +// Minimum api-version: 2020-12-01 +type UefiSettings struct { + // Specifies whether secure boot should be enabled on the virtual machine. + // Minimum api-version: 2020-12-01 + SecureBootEnabled *bool `json:"secureBootEnabled,omitempty"` + + // Specifies whether vTPM should be enabled on the virtual machine. + // Minimum api-version: 2020-12-01 + VTpmEnabled *bool `json:"vTpmEnabled,omitempty"` +} + +// UpdateDomain - Defines an update domain for the cloud service. +type UpdateDomain struct { + // READ-ONLY; Resource Id + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; Resource Name + Name *string `json:"name,omitempty" azure:"ro"` +} + +type UpdateDomainListResult struct { + // REQUIRED + Value []*UpdateDomain `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// UpdateResource - The Update Resource model definition. +type UpdateResource struct { + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` +} + +// UpdateResourceDefinition - The Update Resource model definition. +type UpdateResourceDefinition struct { + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` + + // READ-ONLY; Resource Id + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; Resource name + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; Resource type + Type *string `json:"type,omitempty" azure:"ro"` +} + +// UpgradeOperationHistoricalStatusInfo - Virtual Machine Scale Set OS Upgrade History operation response. +type UpgradeOperationHistoricalStatusInfo struct { + // READ-ONLY; Resource location + Location *string `json:"location,omitempty" azure:"ro"` + + // READ-ONLY; Information about the properties of the upgrade operation. + Properties *UpgradeOperationHistoricalStatusInfoProperties `json:"properties,omitempty" azure:"ro"` + + // READ-ONLY; Resource type + Type *string `json:"type,omitempty" azure:"ro"` +} + +// UpgradeOperationHistoricalStatusInfoProperties - Describes each OS upgrade on the Virtual Machine Scale Set. +type UpgradeOperationHistoricalStatusInfoProperties struct { + // READ-ONLY; Error Details for this upgrade if there are any. + Error *APIError `json:"error,omitempty" azure:"ro"` + + // READ-ONLY; Counts of the VMs in each state. + Progress *RollingUpgradeProgressInfo `json:"progress,omitempty" azure:"ro"` + + // READ-ONLY; Information about OS rollback if performed + RollbackInfo *RollbackStatusInfo `json:"rollbackInfo,omitempty" azure:"ro"` + + // READ-ONLY; Information about the overall status of the upgrade operation. + RunningStatus *UpgradeOperationHistoryStatus `json:"runningStatus,omitempty" azure:"ro"` + + // READ-ONLY; Invoker of the Upgrade Operation + StartedBy *UpgradeOperationInvoker `json:"startedBy,omitempty" azure:"ro"` + + // READ-ONLY; Image Reference details + TargetImageReference *ImageReference `json:"targetImageReference,omitempty" azure:"ro"` +} + +// UpgradeOperationHistoryStatus - Information about the current running state of the overall upgrade. +type UpgradeOperationHistoryStatus struct { + // READ-ONLY; Code indicating the current status of the upgrade. + Code *UpgradeState `json:"code,omitempty" azure:"ro"` + + // READ-ONLY; End time of the upgrade. + EndTime *time.Time `json:"endTime,omitempty" azure:"ro"` + + // READ-ONLY; Start time of the upgrade. + StartTime *time.Time `json:"startTime,omitempty" azure:"ro"` +} + +// UpgradePolicy - Describes an upgrade policy - automatic, manual, or rolling. +type UpgradePolicy struct { + // Configuration parameters used for performing automatic OS Upgrade. + AutomaticOSUpgradePolicy *AutomaticOSUpgradePolicy `json:"automaticOSUpgradePolicy,omitempty"` + + // Specifies the mode of an upgrade to virtual machines in the scale set. + // Possible values are: + // Manual - You control the application of updates to virtual machines in the scale set. You do this by using the manualUpgrade + // action. + // Automatic - All virtual machines in the scale set are automatically updated at the same time. + Mode *UpgradeMode `json:"mode,omitempty"` + + // The configuration parameters used while performing a rolling upgrade. + RollingUpgradePolicy *RollingUpgradePolicy `json:"rollingUpgradePolicy,omitempty"` +} + +// Usage - Describes Compute Resource Usage. +type Usage struct { + // REQUIRED; The current usage of the resource. + CurrentValue *int32 `json:"currentValue,omitempty"` + + // REQUIRED; The maximum permitted usage of the resource. + Limit *int64 `json:"limit,omitempty"` + + // REQUIRED; The name of the type of usage. + Name *UsageName `json:"name,omitempty"` + + // REQUIRED; An enum describing the unit of usage measurement. + Unit *string `json:"unit,omitempty"` +} + +// UsageClientListOptions contains the optional parameters for the UsageClient.List method. +type UsageClientListOptions struct { + // placeholder for future optional parameters +} + +// UsageName - The Usage Names. +type UsageName struct { + // The localized name of the resource. + LocalizedValue *string `json:"localizedValue,omitempty"` + + // The name of the resource. + Value *string `json:"value,omitempty"` +} + +type UserArtifactManage struct { + // REQUIRED; Required. The path and arguments to install the gallery application. This is limited to 4096 characters. + Install *string `json:"install,omitempty"` + + // REQUIRED; Required. The path and arguments to remove the gallery application. This is limited to 4096 characters. + Remove *string `json:"remove,omitempty"` + + // Optional. The path and arguments to update the gallery application. If not present, then update operation will invoke remove + // command on the previous version and install command on the current version + // of the gallery application. This is limited to 4096 characters. + Update *string `json:"update,omitempty"` +} + +// UserArtifactSource - The source image from which the Image Version is going to be created. +type UserArtifactSource struct { + // REQUIRED; Required. The mediaLink of the artifact, must be a readable storage page blob. + MediaLink *string `json:"mediaLink,omitempty"` + + // Optional. The defaultConfigurationLink of the artifact, must be a readable storage page blob. + DefaultConfigurationLink *string `json:"defaultConfigurationLink,omitempty"` +} + +type UserAssignedIdentitiesValue struct { + // READ-ONLY; The client id of user assigned identity. + ClientID *string `json:"clientId,omitempty" azure:"ro"` + + // READ-ONLY; The principal id of user assigned identity. + PrincipalID *string `json:"principalId,omitempty" azure:"ro"` +} + +// VMDiskSecurityProfile - Specifies the security profile settings for the managed disk. +// NOTE: It can only be set for Confidential VMs +type VMDiskSecurityProfile struct { + // Specifies the customer managed disk encryption set resource id for the managed disk that is used for Customer Managed Key + // encrypted ConfidentialVM OS Disk and VMGuest blob. + DiskEncryptionSet *DiskEncryptionSetParameters `json:"diskEncryptionSet,omitempty"` + + // Specifies the EncryptionType of the managed disk. + // It is set to DiskWithVMGuestState for encryption of the managed disk along with VMGuestState blob, and VMGuestStateOnly + // for encryption of just the VMGuestState blob. + // NOTE: It can be set for only Confidential VMs. + SecurityEncryptionType *SecurityEncryptionTypes `json:"securityEncryptionType,omitempty"` +} + +// VMGalleryApplication - Specifies the required information to reference a compute gallery application version +type VMGalleryApplication struct { + // REQUIRED; Specifies the GalleryApplicationVersion resource id on the form of + // /subscriptions/{SubscriptionId}/resourceGroups/{ResourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{application}/versions/{version} + PackageReferenceID *string `json:"packageReferenceId,omitempty"` + + // Optional, Specifies the uri to an azure blob that will replace the default configuration for the package if provided + ConfigurationReference *string `json:"configurationReference,omitempty"` + + // If set to true, when a new Gallery Application version is available in PIR/SIG, it will be automatically updated for the + // VM/VMSS + EnableAutomaticUpgrade *bool `json:"enableAutomaticUpgrade,omitempty"` + + // Optional, Specifies the order in which the packages have to be installed + Order *int32 `json:"order,omitempty"` + + // Optional, Specifies a passthrough value for more generic context. + Tags *string `json:"tags,omitempty"` + + // Optional, If true, any failure for any operation in the VmApplication will fail the deployment + TreatFailureAsDeploymentFailure *bool `json:"treatFailureAsDeploymentFailure,omitempty"` +} + +type VMScaleSetConvertToSinglePlacementGroupInput struct { + // Id of the placement group in which you want future virtual machine instances to be placed. To query placement group Id, + // please use Virtual Machine Scale Set VMs - Get API. If not provided, the + // platform will choose one with maximum number of virtual machine instances. + ActivePlacementGroupID *string `json:"activePlacementGroupId,omitempty"` +} + +// VMSizeProperties - Specifies VM Size Property settings on the virtual machine. +type VMSizeProperties struct { + // Specifies the number of vCPUs available for the VM. + // When this property is not specified in the request body the default behavior is to set it to the value of vCPUs available + // for that VM size exposed in api response of List all available virtual machine + // sizes in a region [https://docs.microsoft.com/en-us/rest/api/compute/resource-skus/list] . + VCPUsAvailable *int32 `json:"vCPUsAvailable,omitempty"` + + // Specifies the vCPU to physical core ratio. + // When this property is not specified in the request body the default behavior is set to the value of vCPUsPerCore for the + // VM Size exposed in api response of List all available virtual machine sizes in + // a region [https://docs.microsoft.com/en-us/rest/api/compute/resource-skus/list] + // Setting this property to 1 also means that hyper-threading is disabled. + VCPUsPerCore *int32 `json:"vCPUsPerCore,omitempty"` +} + +// VaultCertificate - Describes a single certificate reference in a Key Vault, and where the certificate should reside on +// the VM. +type VaultCertificate struct { + // For Windows VMs, specifies the certificate store on the Virtual Machine to which the certificate should be added. The specified + // certificate store is implicitly in the LocalMachine account. + // For Linux VMs, the certificate file is placed under the /var/lib/waagent directory, with the file name .crt + // for the X509 certificate file and .prv for private + // key. Both of these files are .pem formatted. + CertificateStore *string `json:"certificateStore,omitempty"` + + // This is the URL of a certificate that has been uploaded to Key Vault as a secret. For adding a secret to the Key Vault, + // see Add a key or secret to the key vault + // [https://docs.microsoft.com/azure/key-vault/key-vault-get-started/#add]. In this case, your certificate needs to be It + // is the Base64 encoding of the following JSON Object which is encoded in UTF-8: + // { + // "data":"", + // "dataType":"pfx", + // "password":"" + // } + // To install certificates on a virtual machine it is recommended to use the Azure Key Vault virtual machine extension for + // Linux + // [https://docs.microsoft.com/azure/virtual-machines/extensions/key-vault-linux] or the Azure Key Vault virtual machine extension + // for Windows + // [https://docs.microsoft.com/azure/virtual-machines/extensions/key-vault-windows]. + CertificateURL *string `json:"certificateUrl,omitempty"` +} + +// VaultSecretGroup - Describes a set of certificates which are all in the same Key Vault. +type VaultSecretGroup struct { + // The relative URL of the Key Vault containing all of the certificates in VaultCertificates. + SourceVault *SubResource `json:"sourceVault,omitempty"` + + // The list of key vault references in SourceVault which contain certificates. + VaultCertificates []*VaultCertificate `json:"vaultCertificates,omitempty"` +} + +// VirtualHardDisk - Describes the uri of a disk. +type VirtualHardDisk struct { + // Specifies the virtual hard disk's uri. + URI *string `json:"uri,omitempty"` +} + +// VirtualMachine - Describes a Virtual Machine. +type VirtualMachine struct { + // REQUIRED; Resource location + Location *string `json:"location,omitempty"` + + // The extended location of the Virtual Machine. + ExtendedLocation *ExtendedLocation `json:"extendedLocation,omitempty"` + + // The identity of the virtual machine, if configured. + Identity *VirtualMachineIdentity `json:"identity,omitempty"` + + // Specifies information about the marketplace image used to create the virtual machine. This element is only used for marketplace + // images. Before you can use a marketplace image from an API, you must + // enable the image for programmatic use. In the Azure portal, find the marketplace image that you want to use and then click + // Want to deploy programmatically, Get Started ->. Enter any required + // information and then click Save. + Plan *Plan `json:"plan,omitempty"` + + // Describes the properties of a Virtual Machine. + Properties *VirtualMachineProperties `json:"properties,omitempty"` + + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` + + // The virtual machine zones. + Zones []*string `json:"zones,omitempty"` + + // READ-ONLY; Resource Id + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; Resource name + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; The virtual machine child extension resources. + Resources []*VirtualMachineExtension `json:"resources,omitempty" azure:"ro"` + + // READ-ONLY; Resource type + Type *string `json:"type,omitempty" azure:"ro"` +} + +// VirtualMachineAgentInstanceView - The instance view of the VM Agent running on the virtual machine. +type VirtualMachineAgentInstanceView struct { + // The virtual machine extension handler instance view. + ExtensionHandlers []*VirtualMachineExtensionHandlerInstanceView `json:"extensionHandlers,omitempty"` + + // The resource status information. + Statuses []*InstanceViewStatus `json:"statuses,omitempty"` + + // The VM Agent full version. + VMAgentVersion *string `json:"vmAgentVersion,omitempty"` +} + +// VirtualMachineAssessPatchesResult - Describes the properties of an AssessPatches result. +type VirtualMachineAssessPatchesResult struct { + // READ-ONLY; The activity ID of the operation that produced this result. It is used to correlate across CRP and extension + // logs. + AssessmentActivityID *string `json:"assessmentActivityId,omitempty" azure:"ro"` + + // READ-ONLY; The list of patches that have been detected as available for installation. + AvailablePatches []*VirtualMachineSoftwarePatchProperties `json:"availablePatches,omitempty" azure:"ro"` + + // READ-ONLY; The number of critical or security patches that have been detected as available and not yet installed. + CriticalAndSecurityPatchCount *int32 `json:"criticalAndSecurityPatchCount,omitempty" azure:"ro"` + + // READ-ONLY; The errors that were encountered during execution of the operation. The details array contains the list of them. + Error *APIError `json:"error,omitempty" azure:"ro"` + + // READ-ONLY; The number of all available patches excluding critical and security. + OtherPatchCount *int32 `json:"otherPatchCount,omitempty" azure:"ro"` + + // READ-ONLY; The overall reboot status of the VM. It will be true when partially installed patches require a reboot to complete + // installation but the reboot has not yet occurred. + RebootPending *bool `json:"rebootPending,omitempty" azure:"ro"` + + // READ-ONLY; The UTC timestamp when the operation began. + StartDateTime *time.Time `json:"startDateTime,omitempty" azure:"ro"` + + // READ-ONLY; The overall success or failure status of the operation. It remains "InProgress" until the operation completes. + // At that point it will become "Unknown", "Failed", "Succeeded", or + // "CompletedWithWarnings." + Status *PatchOperationStatus `json:"status,omitempty" azure:"ro"` +} + +// VirtualMachineCaptureParameters - Capture Virtual Machine parameters. +type VirtualMachineCaptureParameters struct { + // REQUIRED; The destination container name. + DestinationContainerName *string `json:"destinationContainerName,omitempty"` + + // REQUIRED; Specifies whether to overwrite the destination virtual hard disk, in case of conflict. + OverwriteVhds *bool `json:"overwriteVhds,omitempty"` + + // REQUIRED; The captured virtual hard disk's name prefix. + VhdPrefix *string `json:"vhdPrefix,omitempty"` +} + +// VirtualMachineCaptureResult - Output of virtual machine capture operation. +type VirtualMachineCaptureResult struct { + // Resource Id + ID *string `json:"id,omitempty"` + + // READ-ONLY; the version of the content + ContentVersion *string `json:"contentVersion,omitempty" azure:"ro"` + + // READ-ONLY; parameters of the captured virtual machine + Parameters interface{} `json:"parameters,omitempty" azure:"ro"` + + // READ-ONLY; a list of resource items of the captured virtual machine + Resources []interface{} `json:"resources,omitempty" azure:"ro"` + + // READ-ONLY; the schema of the captured virtual machine + Schema *string `json:"$schema,omitempty" azure:"ro"` +} + +// VirtualMachineExtension - Describes a Virtual Machine Extension. +type VirtualMachineExtension struct { + // Resource location + Location *string `json:"location,omitempty"` + + // Describes the properties of a Virtual Machine Extension. + Properties *VirtualMachineExtensionProperties `json:"properties,omitempty"` + + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` + + // READ-ONLY; Resource Id + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; Resource name + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; Resource type + Type *string `json:"type,omitempty" azure:"ro"` +} + +// VirtualMachineExtensionHandlerInstanceView - The instance view of a virtual machine extension handler. +type VirtualMachineExtensionHandlerInstanceView struct { + // The extension handler status. + Status *InstanceViewStatus `json:"status,omitempty"` + + // Specifies the type of the extension; an example is "CustomScriptExtension". + Type *string `json:"type,omitempty"` + + // Specifies the version of the script handler. + TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty"` +} + +// VirtualMachineExtensionImage - Describes a Virtual Machine Extension Image. +type VirtualMachineExtensionImage struct { + // REQUIRED; Resource location + Location *string `json:"location,omitempty"` + + // Describes the properties of a Virtual Machine Extension Image. + Properties *VirtualMachineExtensionImageProperties `json:"properties,omitempty"` + + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` + + // READ-ONLY; Resource Id + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; Resource name + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; Resource type + Type *string `json:"type,omitempty" azure:"ro"` +} + +// VirtualMachineExtensionImageProperties - Describes the properties of a Virtual Machine Extension Image. +type VirtualMachineExtensionImageProperties struct { + // REQUIRED; The type of role (IaaS or PaaS) this extension supports. + ComputeRole *string `json:"computeRole,omitempty"` + + // REQUIRED; The schema defined by publisher, where extension consumers should provide settings in a matching schema. + HandlerSchema *string `json:"handlerSchema,omitempty"` + + // REQUIRED; The operating system this extension supports. + OperatingSystem *string `json:"operatingSystem,omitempty"` + + // Whether the handler can support multiple extensions. + SupportsMultipleExtensions *bool `json:"supportsMultipleExtensions,omitempty"` + + // Whether the extension can be used on xRP VMScaleSets. By default existing extensions are usable on scalesets, but there + // might be cases where a publisher wants to explicitly indicate the extension is + // only enabled for CRP VMs but not VMSS. + VMScaleSetEnabled *bool `json:"vmScaleSetEnabled,omitempty"` +} + +// VirtualMachineExtensionImagesClientGetOptions contains the optional parameters for the VirtualMachineExtensionImagesClient.Get +// method. +type VirtualMachineExtensionImagesClientGetOptions struct { + // placeholder for future optional parameters +} + +// VirtualMachineExtensionImagesClientListTypesOptions contains the optional parameters for the VirtualMachineExtensionImagesClient.ListTypes +// method. +type VirtualMachineExtensionImagesClientListTypesOptions struct { + // placeholder for future optional parameters +} + +// VirtualMachineExtensionImagesClientListVersionsOptions contains the optional parameters for the VirtualMachineExtensionImagesClient.ListVersions +// method. +type VirtualMachineExtensionImagesClientListVersionsOptions struct { + // The filter to apply on the operation. + Filter *string + Orderby *string + Top *int32 +} + +// VirtualMachineExtensionInstanceView - The instance view of a virtual machine extension. +type VirtualMachineExtensionInstanceView struct { + // The virtual machine extension name. + Name *string `json:"name,omitempty"` + + // The resource status information. + Statuses []*InstanceViewStatus `json:"statuses,omitempty"` + + // The resource status information. + Substatuses []*InstanceViewStatus `json:"substatuses,omitempty"` + + // Specifies the type of the extension; an example is "CustomScriptExtension". + Type *string `json:"type,omitempty"` + + // Specifies the version of the script handler. + TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty"` +} + +// VirtualMachineExtensionProperties - Describes the properties of a Virtual Machine Extension. +type VirtualMachineExtensionProperties struct { + // Indicates whether the extension should use a newer minor version if one is available at deployment time. Once deployed, + // however, the extension will not upgrade minor versions unless redeployed, even + // with this property set to true. + AutoUpgradeMinorVersion *bool `json:"autoUpgradeMinorVersion,omitempty"` + + // Indicates whether the extension should be automatically upgraded by the platform if there is a newer version of the extension + // available. + EnableAutomaticUpgrade *bool `json:"enableAutomaticUpgrade,omitempty"` + + // How the extension handler should be forced to update even if the extension configuration has not changed. + ForceUpdateTag *string `json:"forceUpdateTag,omitempty"` + + // The virtual machine extension instance view. + InstanceView *VirtualMachineExtensionInstanceView `json:"instanceView,omitempty"` + + // The extension can contain either protectedSettings or protectedSettingsFromKeyVault or no protected settings at all. + ProtectedSettings interface{} `json:"protectedSettings,omitempty"` + + // The extensions protected settings that are passed by reference, and consumed from key vault + ProtectedSettingsFromKeyVault interface{} `json:"protectedSettingsFromKeyVault,omitempty"` + + // The name of the extension handler publisher. + Publisher *string `json:"publisher,omitempty"` + + // Json formatted public settings for the extension. + Settings interface{} `json:"settings,omitempty"` + + // Indicates whether failures stemming from the extension will be suppressed (Operational failures such as not connecting + // to the VM will not be suppressed regardless of this value). The default is false. + SuppressFailures *bool `json:"suppressFailures,omitempty"` + + // Specifies the type of the extension; an example is "CustomScriptExtension". + Type *string `json:"type,omitempty"` + + // Specifies the version of the script handler. + TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty"` + + // READ-ONLY; The provisioning state, which only appears in the response. + ProvisioningState *string `json:"provisioningState,omitempty" azure:"ro"` +} + +// VirtualMachineExtensionUpdate - Describes a Virtual Machine Extension. +type VirtualMachineExtensionUpdate struct { + // Describes the properties of a Virtual Machine Extension. + Properties *VirtualMachineExtensionUpdateProperties `json:"properties,omitempty"` + + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` +} + +// VirtualMachineExtensionUpdateProperties - Describes the properties of a Virtual Machine Extension. +type VirtualMachineExtensionUpdateProperties struct { + // Indicates whether the extension should use a newer minor version if one is available at deployment time. Once deployed, + // however, the extension will not upgrade minor versions unless redeployed, even + // with this property set to true. + AutoUpgradeMinorVersion *bool `json:"autoUpgradeMinorVersion,omitempty"` + + // Indicates whether the extension should be automatically upgraded by the platform if there is a newer version of the extension + // available. + EnableAutomaticUpgrade *bool `json:"enableAutomaticUpgrade,omitempty"` + + // How the extension handler should be forced to update even if the extension configuration has not changed. + ForceUpdateTag *string `json:"forceUpdateTag,omitempty"` + + // The extension can contain either protectedSettings or protectedSettingsFromKeyVault or no protected settings at all. + ProtectedSettings interface{} `json:"protectedSettings,omitempty"` + + // The extensions protected settings that are passed by reference, and consumed from key vault + ProtectedSettingsFromKeyVault interface{} `json:"protectedSettingsFromKeyVault,omitempty"` + + // The name of the extension handler publisher. + Publisher *string `json:"publisher,omitempty"` + + // Json formatted public settings for the extension. + Settings interface{} `json:"settings,omitempty"` + + // Indicates whether failures stemming from the extension will be suppressed (Operational failures such as not connecting + // to the VM will not be suppressed regardless of this value). The default is false. + SuppressFailures *bool `json:"suppressFailures,omitempty"` + + // Specifies the type of the extension; an example is "CustomScriptExtension". + Type *string `json:"type,omitempty"` + + // Specifies the version of the script handler. + TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty"` +} + +// VirtualMachineExtensionsClientBeginCreateOrUpdateOptions contains the optional parameters for the VirtualMachineExtensionsClient.BeginCreateOrUpdate +// method. +type VirtualMachineExtensionsClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachineExtensionsClientBeginDeleteOptions contains the optional parameters for the VirtualMachineExtensionsClient.BeginDelete +// method. +type VirtualMachineExtensionsClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachineExtensionsClientBeginUpdateOptions contains the optional parameters for the VirtualMachineExtensionsClient.BeginUpdate +// method. +type VirtualMachineExtensionsClientBeginUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachineExtensionsClientGetOptions contains the optional parameters for the VirtualMachineExtensionsClient.Get method. +type VirtualMachineExtensionsClientGetOptions struct { + // The expand expression to apply on the operation. + Expand *string +} + +// VirtualMachineExtensionsClientListOptions contains the optional parameters for the VirtualMachineExtensionsClient.List +// method. +type VirtualMachineExtensionsClientListOptions struct { + // The expand expression to apply on the operation. + Expand *string +} + +// VirtualMachineExtensionsListResult - The List Extension operation response +type VirtualMachineExtensionsListResult struct { + // The list of extensions + Value []*VirtualMachineExtension `json:"value,omitempty"` +} + +// VirtualMachineHealthStatus - The health status of the VM. +type VirtualMachineHealthStatus struct { + // READ-ONLY; The health status information for the VM. + Status *InstanceViewStatus `json:"status,omitempty" azure:"ro"` +} + +// VirtualMachineIPTag - Contains the IP tag associated with the public IP address. +type VirtualMachineIPTag struct { + // IP tag type. Example: FirstPartyUsage. + IPTagType *string `json:"ipTagType,omitempty"` + + // IP tag associated with the public IP. Example: SQL, Storage etc. + Tag *string `json:"tag,omitempty"` +} + +// VirtualMachineIdentity - Identity for the virtual machine. +type VirtualMachineIdentity struct { + // The type of identity used for the virtual machine. The type 'SystemAssigned, UserAssigned' includes both an implicitly + // created identity and a set of user assigned identities. The type 'None' will + // remove any identities from the virtual machine. + Type *ResourceIdentityType `json:"type,omitempty"` + + // The list of user identities associated with the Virtual Machine. The user identity dictionary key references will be ARM + // resource ids in the form: + // '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. + UserAssignedIdentities map[string]*UserAssignedIdentitiesValue `json:"userAssignedIdentities,omitempty"` + + // READ-ONLY; The principal id of virtual machine identity. This property will only be provided for a system assigned identity. + PrincipalID *string `json:"principalId,omitempty" azure:"ro"` + + // READ-ONLY; The tenant id associated with the virtual machine. This property will only be provided for a system assigned + // identity. + TenantID *string `json:"tenantId,omitempty" azure:"ro"` +} + +// VirtualMachineImage - Describes a Virtual Machine Image. +type VirtualMachineImage struct { + // REQUIRED; The supported Azure location of the resource. + Location *string `json:"location,omitempty"` + + // REQUIRED; The name of the resource. + Name *string `json:"name,omitempty"` + + // The extended location of the Virtual Machine. + ExtendedLocation *ExtendedLocation `json:"extendedLocation,omitempty"` + + // Resource Id + ID *string `json:"id,omitempty"` + + // Describes the properties of a Virtual Machine Image. + Properties *VirtualMachineImageProperties `json:"properties,omitempty"` + + // Specifies the tags that are assigned to the virtual machine. For more information about using tags, see Using tags to organize + // your Azure resources + // [https://docs.microsoft.com/azure/azure-resource-manager/resource-group-using-tags.md]. + Tags map[string]*string `json:"tags,omitempty"` +} + +// VirtualMachineImageFeature - Specifies additional capabilities supported by the image +type VirtualMachineImageFeature struct { + // The name of the feature. + Name *string `json:"name,omitempty"` + + // The corresponding value for the feature. + Value *string `json:"value,omitempty"` +} + +// VirtualMachineImageProperties - Describes the properties of a Virtual Machine Image. +type VirtualMachineImageProperties struct { + // Specifies the Architecture Type + Architecture *ArchitectureTypes `json:"architecture,omitempty"` + + // Describes automatic OS upgrade properties on the image. + AutomaticOSUpgradeProperties *AutomaticOSUpgradeProperties `json:"automaticOSUpgradeProperties,omitempty"` + DataDiskImages []*DataDiskImage `json:"dataDiskImages,omitempty"` + + // Specifies disallowed configuration for the VirtualMachine created from the image + Disallowed *DisallowedConfiguration `json:"disallowed,omitempty"` + Features []*VirtualMachineImageFeature `json:"features,omitempty"` + + // Specifies the HyperVGeneration Type + HyperVGeneration *HyperVGenerationTypes `json:"hyperVGeneration,omitempty"` + + // Contains the os disk image information. + OSDiskImage *OSDiskImage `json:"osDiskImage,omitempty"` + + // Used for establishing the purchase context of any 3rd Party artifact through MarketPlace. + Plan *PurchasePlan `json:"plan,omitempty"` +} + +// VirtualMachineImageResource - Virtual machine image resource information. +type VirtualMachineImageResource struct { + // REQUIRED; The supported Azure location of the resource. + Location *string `json:"location,omitempty"` + + // REQUIRED; The name of the resource. + Name *string `json:"name,omitempty"` + + // The extended location of the Virtual Machine. + ExtendedLocation *ExtendedLocation `json:"extendedLocation,omitempty"` + + // Resource Id + ID *string `json:"id,omitempty"` + + // Specifies the tags that are assigned to the virtual machine. For more information about using tags, see Using tags to organize + // your Azure resources + // [https://docs.microsoft.com/azure/azure-resource-manager/resource-group-using-tags.md]. + Tags map[string]*string `json:"tags,omitempty"` +} + +// VirtualMachineImagesClientGetOptions contains the optional parameters for the VirtualMachineImagesClient.Get method. +type VirtualMachineImagesClientGetOptions struct { + // placeholder for future optional parameters +} + +// VirtualMachineImagesClientListOffersOptions contains the optional parameters for the VirtualMachineImagesClient.ListOffers +// method. +type VirtualMachineImagesClientListOffersOptions struct { + // placeholder for future optional parameters +} + +// VirtualMachineImagesClientListOptions contains the optional parameters for the VirtualMachineImagesClient.List method. +type VirtualMachineImagesClientListOptions struct { + // The expand expression to apply on the operation. + Expand *string + Orderby *string + Top *int32 +} + +// VirtualMachineImagesClientListPublishersOptions contains the optional parameters for the VirtualMachineImagesClient.ListPublishers +// method. +type VirtualMachineImagesClientListPublishersOptions struct { + // placeholder for future optional parameters +} + +// VirtualMachineImagesClientListSKUsOptions contains the optional parameters for the VirtualMachineImagesClient.ListSKUs +// method. +type VirtualMachineImagesClientListSKUsOptions struct { + // placeholder for future optional parameters +} + +// VirtualMachineImagesEdgeZoneClientGetOptions contains the optional parameters for the VirtualMachineImagesEdgeZoneClient.Get +// method. +type VirtualMachineImagesEdgeZoneClientGetOptions struct { + // placeholder for future optional parameters +} + +// VirtualMachineImagesEdgeZoneClientListOffersOptions contains the optional parameters for the VirtualMachineImagesEdgeZoneClient.ListOffers +// method. +type VirtualMachineImagesEdgeZoneClientListOffersOptions struct { + // placeholder for future optional parameters +} + +// VirtualMachineImagesEdgeZoneClientListOptions contains the optional parameters for the VirtualMachineImagesEdgeZoneClient.List +// method. +type VirtualMachineImagesEdgeZoneClientListOptions struct { + // The expand expression to apply on the operation. + Expand *string + // Specifies the order of the results returned. Formatted as an OData query. + Orderby *string + // An integer value specifying the number of images to return that matches supplied values. + Top *int32 +} + +// VirtualMachineImagesEdgeZoneClientListPublishersOptions contains the optional parameters for the VirtualMachineImagesEdgeZoneClient.ListPublishers +// method. +type VirtualMachineImagesEdgeZoneClientListPublishersOptions struct { + // placeholder for future optional parameters +} + +// VirtualMachineImagesEdgeZoneClientListSKUsOptions contains the optional parameters for the VirtualMachineImagesEdgeZoneClient.ListSKUs +// method. +type VirtualMachineImagesEdgeZoneClientListSKUsOptions struct { + // placeholder for future optional parameters +} + +// VirtualMachineInstallPatchesParameters - Input for InstallPatches as directly received by the API +type VirtualMachineInstallPatchesParameters struct { + // REQUIRED; Defines when it is acceptable to reboot a VM during a software update operation. + RebootSetting *VMGuestPatchRebootSetting `json:"rebootSetting,omitempty"` + + // Input for InstallPatches on a Linux VM, as directly received by the API + LinuxParameters *LinuxParameters `json:"linuxParameters,omitempty"` + + // Specifies the maximum amount of time that the operation will run. It must be an ISO 8601-compliant duration string such + // as PT4H (4 hours) + MaximumDuration *string `json:"maximumDuration,omitempty"` + + // Input for InstallPatches on a Windows VM, as directly received by the API + WindowsParameters *WindowsParameters `json:"windowsParameters,omitempty"` +} + +// VirtualMachineInstallPatchesResult - The result summary of an installation operation. +type VirtualMachineInstallPatchesResult struct { + // READ-ONLY; The errors that were encountered during execution of the operation. The details array contains the list of them. + Error *APIError `json:"error,omitempty" azure:"ro"` + + // READ-ONLY; The number of patches that were not installed due to the user blocking their installation. + ExcludedPatchCount *int32 `json:"excludedPatchCount,omitempty" azure:"ro"` + + // READ-ONLY; The number of patches that could not be installed due to some issue. See errors for details. + FailedPatchCount *int32 `json:"failedPatchCount,omitempty" azure:"ro"` + + // READ-ONLY; The activity ID of the operation that produced this result. It is used to correlate across CRP and extension + // logs. + InstallationActivityID *string `json:"installationActivityId,omitempty" azure:"ro"` + + // READ-ONLY; The number of patches successfully installed. + InstalledPatchCount *int32 `json:"installedPatchCount,omitempty" azure:"ro"` + + // READ-ONLY; Whether the operation ran out of time before it completed all its intended actions. + MaintenanceWindowExceeded *bool `json:"maintenanceWindowExceeded,omitempty" azure:"ro"` + + // READ-ONLY; The number of patches that were detected as available for install, but did not meet the operation's criteria. + NotSelectedPatchCount *int32 `json:"notSelectedPatchCount,omitempty" azure:"ro"` + + // READ-ONLY; The patches that were installed during the operation. + Patches []*PatchInstallationDetail `json:"patches,omitempty" azure:"ro"` + + // READ-ONLY; The number of patches that were identified as meeting the installation criteria, but were not able to be installed. + // Typically this happens when maintenanceWindowExceeded == true. + PendingPatchCount *int32 `json:"pendingPatchCount,omitempty" azure:"ro"` + + // READ-ONLY; The reboot state of the VM following completion of the operation. + RebootStatus *VMGuestPatchRebootStatus `json:"rebootStatus,omitempty" azure:"ro"` + + // READ-ONLY; The UTC timestamp when the operation began. + StartDateTime *time.Time `json:"startDateTime,omitempty" azure:"ro"` + + // READ-ONLY; The overall success or failure status of the operation. It remains "InProgress" until the operation completes. + // At that point it will become "Failed", "Succeeded", "Unknown" or "CompletedWithWarnings." + Status *PatchOperationStatus `json:"status,omitempty" azure:"ro"` +} + +// VirtualMachineInstanceView - The instance view of a virtual machine. +type VirtualMachineInstanceView struct { + // Boot Diagnostics is a debugging feature which allows you to view Console Output and Screenshot to diagnose VM status. + // You can easily view the output of your console log. + // Azure also enables you to see a screenshot of the VM from the hypervisor. + BootDiagnostics *BootDiagnosticsInstanceView `json:"bootDiagnostics,omitempty"` + + // The computer name assigned to the virtual machine. + ComputerName *string `json:"computerName,omitempty"` + + // The virtual machine disk information. + Disks []*DiskInstanceView `json:"disks,omitempty"` + + // The extensions information. + Extensions []*VirtualMachineExtensionInstanceView `json:"extensions,omitempty"` + + // Specifies the HyperVGeneration Type associated with a resource + HyperVGeneration *HyperVGenerationType `json:"hyperVGeneration,omitempty"` + + // The Maintenance Operation status on the virtual machine. + MaintenanceRedeployStatus *MaintenanceRedeployStatus `json:"maintenanceRedeployStatus,omitempty"` + + // The Operating System running on the virtual machine. + OSName *string `json:"osName,omitempty"` + + // The version of Operating System running on the virtual machine. + OSVersion *string `json:"osVersion,omitempty"` + + // [Preview Feature] The status of virtual machine patch operations. + PatchStatus *VirtualMachinePatchStatus `json:"patchStatus,omitempty"` + + // Specifies the fault domain of the virtual machine. + PlatformFaultDomain *int32 `json:"platformFaultDomain,omitempty"` + + // Specifies the update domain of the virtual machine. + PlatformUpdateDomain *int32 `json:"platformUpdateDomain,omitempty"` + + // The Remote desktop certificate thumbprint. + RdpThumbPrint *string `json:"rdpThumbPrint,omitempty"` + + // The resource status information. + Statuses []*InstanceViewStatus `json:"statuses,omitempty"` + + // The VM Agent running on the virtual machine. + VMAgent *VirtualMachineAgentInstanceView `json:"vmAgent,omitempty"` + + // READ-ONLY; Resource id of the dedicated host, on which the virtual machine is allocated through automatic placement, when + // the virtual machine is associated with a dedicated host group that has automatic + // placement enabled. + // Minimum api-version: 2020-06-01. + AssignedHost *string `json:"assignedHost,omitempty" azure:"ro"` + + // READ-ONLY; The health status for the VM. + VMHealth *VirtualMachineHealthStatus `json:"vmHealth,omitempty" azure:"ro"` +} + +// VirtualMachineListResult - The List Virtual Machine operation response. +type VirtualMachineListResult struct { + // REQUIRED; The list of virtual machines. + Value []*VirtualMachine `json:"value,omitempty"` + + // The URI to fetch the next page of VMs. Call ListNext() with this URI to fetch the next page of Virtual Machines. + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualMachineNetworkInterfaceConfiguration - Describes a virtual machine network interface configurations. +type VirtualMachineNetworkInterfaceConfiguration struct { + // REQUIRED; The network interface configuration name. + Name *string `json:"name,omitempty"` + + // Describes a virtual machine network profile's IP configuration. + Properties *VirtualMachineNetworkInterfaceConfigurationProperties `json:"properties,omitempty"` +} + +// VirtualMachineNetworkInterfaceConfigurationProperties - Describes a virtual machine network profile's IP configuration. +type VirtualMachineNetworkInterfaceConfigurationProperties struct { + // REQUIRED; Specifies the IP configurations of the network interface. + IPConfigurations []*VirtualMachineNetworkInterfaceIPConfiguration `json:"ipConfigurations,omitempty"` + + // The dns settings to be applied on the network interfaces. + DNSSettings *VirtualMachineNetworkInterfaceDNSSettingsConfiguration `json:"dnsSettings,omitempty"` + + // Specify what happens to the network interface when the VM is deleted + DeleteOption *DeleteOptions `json:"deleteOption,omitempty"` + DscpConfiguration *SubResource `json:"dscpConfiguration,omitempty"` + + // Specifies whether the network interface is accelerated networking-enabled. + EnableAcceleratedNetworking *bool `json:"enableAcceleratedNetworking,omitempty"` + + // Specifies whether the network interface is FPGA networking-enabled. + EnableFpga *bool `json:"enableFpga,omitempty"` + + // Whether IP forwarding enabled on this NIC. + EnableIPForwarding *bool `json:"enableIPForwarding,omitempty"` + + // The network security group. + NetworkSecurityGroup *SubResource `json:"networkSecurityGroup,omitempty"` + + // Specifies the primary network interface in case the virtual machine has more than 1 network interface. + Primary *bool `json:"primary,omitempty"` +} + +// VirtualMachineNetworkInterfaceDNSSettingsConfiguration - Describes a virtual machines network configuration's DNS settings. +type VirtualMachineNetworkInterfaceDNSSettingsConfiguration struct { + // List of DNS servers IP addresses + DNSServers []*string `json:"dnsServers,omitempty"` +} + +// VirtualMachineNetworkInterfaceIPConfiguration - Describes a virtual machine network profile's IP configuration. +type VirtualMachineNetworkInterfaceIPConfiguration struct { + // REQUIRED; The IP configuration name. + Name *string `json:"name,omitempty"` + + // Describes a virtual machine network interface IP configuration properties. + Properties *VirtualMachineNetworkInterfaceIPConfigurationProperties `json:"properties,omitempty"` +} + +// VirtualMachineNetworkInterfaceIPConfigurationProperties - Describes a virtual machine network interface IP configuration +// properties. +type VirtualMachineNetworkInterfaceIPConfigurationProperties struct { + // Specifies an array of references to backend address pools of application gateways. A virtual machine can reference backend + // address pools of multiple application gateways. Multiple virtual machines + // cannot use the same application gateway. + ApplicationGatewayBackendAddressPools []*SubResource `json:"applicationGatewayBackendAddressPools,omitempty"` + + // Specifies an array of references to application security group. + ApplicationSecurityGroups []*SubResource `json:"applicationSecurityGroups,omitempty"` + + // Specifies an array of references to backend address pools of load balancers. A virtual machine can reference backend address + // pools of one public and one internal load balancer. [Multiple virtual + // machines cannot use the same basic sku load balancer]. + LoadBalancerBackendAddressPools []*SubResource `json:"loadBalancerBackendAddressPools,omitempty"` + + // Specifies the primary network interface in case the virtual machine has more than 1 network interface. + Primary *bool `json:"primary,omitempty"` + + // Available from Api-Version 2017-03-30 onwards, it represents whether the specific ipconfiguration is IPv4 or IPv6. Default + // is taken as IPv4. Possible values are: 'IPv4' and 'IPv6'. + PrivateIPAddressVersion *IPVersions `json:"privateIPAddressVersion,omitempty"` + + // The publicIPAddressConfiguration. + PublicIPAddressConfiguration *VirtualMachinePublicIPAddressConfiguration `json:"publicIPAddressConfiguration,omitempty"` + + // Specifies the identifier of the subnet. + Subnet *SubResource `json:"subnet,omitempty"` +} + +// VirtualMachinePatchStatus - The status of virtual machine patch operations. +type VirtualMachinePatchStatus struct { + // The available patch summary of the latest assessment operation for the virtual machine. + AvailablePatchSummary *AvailablePatchSummary `json:"availablePatchSummary,omitempty"` + + // The installation summary of the latest installation operation for the virtual machine. + LastPatchInstallationSummary *LastPatchInstallationSummary `json:"lastPatchInstallationSummary,omitempty"` + + // READ-ONLY; The enablement status of the specified patchMode + ConfigurationStatuses []*InstanceViewStatus `json:"configurationStatuses,omitempty" azure:"ro"` +} + +// VirtualMachineProperties - Describes the properties of a Virtual Machine. +type VirtualMachineProperties struct { + // Specifies additional capabilities enabled or disabled on the virtual machine. + AdditionalCapabilities *AdditionalCapabilities `json:"additionalCapabilities,omitempty"` + + // Specifies the gallery applications that should be made available to the VM/VMSS + ApplicationProfile *ApplicationProfile `json:"applicationProfile,omitempty"` + + // Specifies information about the availability set that the virtual machine should be assigned to. Virtual machines specified + // in the same availability set are allocated to different nodes to maximize + // availability. For more information about availability sets, see Availability sets overview [https://docs.microsoft.com/azure/virtual-machines/availability-set-overview]. + // For more information on Azure planned maintenance, see Maintenance and updates for Virtual Machines in Azure [https://docs.microsoft.com/azure/virtual-machines/maintenance-and-updates] + // Currently, a VM can only be added to availability set at creation time. The availability set to which the VM is being added + // should be under the same resource group as the availability set resource. An + // existing VM cannot be added to an availability set. + // This property cannot exist along with a non-null properties.virtualMachineScaleSet reference. + AvailabilitySet *SubResource `json:"availabilitySet,omitempty"` + + // Specifies the billing related details of a Azure Spot virtual machine. + // Minimum api-version: 2019-03-01. + BillingProfile *BillingProfile `json:"billingProfile,omitempty"` + + // Specifies information about the capacity reservation that is used to allocate virtual machine. + // Minimum api-version: 2021-04-01. + CapacityReservation *CapacityReservationProfile `json:"capacityReservation,omitempty"` + + // Specifies the boot diagnostic settings state. + // Minimum api-version: 2015-06-15. + DiagnosticsProfile *DiagnosticsProfile `json:"diagnosticsProfile,omitempty"` + + // Specifies the eviction policy for the Azure Spot virtual machine and Azure Spot scale set. + // For Azure Spot virtual machines, both 'Deallocate' and 'Delete' are supported and the minimum api-version is 2019-03-01. + // For Azure Spot scale sets, both 'Deallocate' and 'Delete' are supported and the minimum api-version is 2017-10-30-preview. + EvictionPolicy *VirtualMachineEvictionPolicyTypes `json:"evictionPolicy,omitempty"` + + // Specifies the time alloted for all extensions to start. The time duration should be between 15 minutes and 120 minutes + // (inclusive) and should be specified in ISO 8601 format. The default value is 90 + // minutes (PT1H30M). + // Minimum api-version: 2020-06-01 + ExtensionsTimeBudget *string `json:"extensionsTimeBudget,omitempty"` + + // Specifies the hardware settings for the virtual machine. + HardwareProfile *HardwareProfile `json:"hardwareProfile,omitempty"` + + // Specifies information about the dedicated host that the virtual machine resides in. + // Minimum api-version: 2018-10-01. + Host *SubResource `json:"host,omitempty"` + + // Specifies information about the dedicated host group that the virtual machine resides in. + // Minimum api-version: 2020-06-01. + // NOTE: User cannot specify both host and hostGroup properties. + HostGroup *SubResource `json:"hostGroup,omitempty"` + + // Specifies that the image or disk that is being used was licensed on-premises. + // Possible values for Windows Server operating system are: + // WindowsClient + // WindowsServer + // Possible values for Linux Server operating system are: + // RHELBYOS (for RHEL) + // SLESBYOS (for SUSE) + // For more information, see Azure Hybrid Use Benefit for Windows Server [https://docs.microsoft.com/azure/virtual-machines/windows/hybrid-use-benefit-licensing] + // Azure Hybrid Use Benefit for Linux Server [https://docs.microsoft.com/azure/virtual-machines/linux/azure-hybrid-benefit-linux] + // Minimum api-version: 2015-06-15 + LicenseType *string `json:"licenseType,omitempty"` + + // Specifies the network interfaces of the virtual machine. + NetworkProfile *NetworkProfile `json:"networkProfile,omitempty"` + + // Specifies the operating system settings used while creating the virtual machine. Some of the settings cannot be changed + // once VM is provisioned. + OSProfile *OSProfile `json:"osProfile,omitempty"` + + // Specifies the scale set logical fault domain into which the Virtual Machine will be created. By default, the Virtual Machine + // will by automatically assigned to a fault domain that best maintains + // balance across available fault domains. + // This is applicable only if the 'virtualMachineScaleSet' property of this Virtual Machine is set.The Virtual Machine Scale + // Set that is referenced, must have 'platformFaultDomainCount' > 1.This property + // cannot be updated once the Virtual Machine is created.Fault domain assignment can be viewed in the Virtual Machine Instance + // View. + // Minimum api‐version: 2020‐12‐01 + PlatformFaultDomain *int32 `json:"platformFaultDomain,omitempty"` + + // Specifies the priority for the virtual machine. + // Minimum api-version: 2019-03-01 + Priority *VirtualMachinePriorityTypes `json:"priority,omitempty"` + + // Specifies information about the proximity placement group that the virtual machine should be assigned to. + // Minimum api-version: 2018-04-01. + ProximityPlacementGroup *SubResource `json:"proximityPlacementGroup,omitempty"` + + // Specifies Scheduled Event related configurations. + ScheduledEventsProfile *ScheduledEventsProfile `json:"scheduledEventsProfile,omitempty"` + + // Specifies the Security related profile settings for the virtual machine. + SecurityProfile *SecurityProfile `json:"securityProfile,omitempty"` + + // Specifies the storage settings for the virtual machine disks. + StorageProfile *StorageProfile `json:"storageProfile,omitempty"` + + // UserData for the VM, which must be base-64 encoded. Customer should not pass any secrets in here. + // Minimum api-version: 2021-03-01 + UserData *string `json:"userData,omitempty"` + + // Specifies information about the virtual machine scale set that the virtual machine should be assigned to. Virtual machines + // specified in the same virtual machine scale set are allocated to different + // nodes to maximize availability. Currently, a VM can only be added to virtual machine scale set at creation time. An existing + // VM cannot be added to a virtual machine scale set. + // This property cannot exist along with a non-null properties.availabilitySet reference. + // Minimum api‐version: 2019‐03‐01 + VirtualMachineScaleSet *SubResource `json:"virtualMachineScaleSet,omitempty"` + + // READ-ONLY; The virtual machine instance view. + InstanceView *VirtualMachineInstanceView `json:"instanceView,omitempty" azure:"ro"` + + // READ-ONLY; The provisioning state, which only appears in the response. + ProvisioningState *string `json:"provisioningState,omitempty" azure:"ro"` + + // READ-ONLY; Specifies the time at which the Virtual Machine resource was created. + // Minimum api-version: 2022-03-01. + TimeCreated *time.Time `json:"timeCreated,omitempty" azure:"ro"` + + // READ-ONLY; Specifies the VM unique ID which is a 128-bits identifier that is encoded and stored in all Azure IaaS VMs SMBIOS + // and can be read using platform BIOS commands. + VMID *string `json:"vmId,omitempty" azure:"ro"` +} + +// VirtualMachinePublicIPAddressConfiguration - Describes a virtual machines IP Configuration's PublicIPAddress configuration +type VirtualMachinePublicIPAddressConfiguration struct { + // REQUIRED; The publicIP address configuration name. + Name *string `json:"name,omitempty"` + + // Describes a virtual machines IP Configuration's PublicIPAddress configuration + Properties *VirtualMachinePublicIPAddressConfigurationProperties `json:"properties,omitempty"` + + // Describes the public IP Sku. It can only be set with OrchestrationMode as Flexible. + SKU *PublicIPAddressSKU `json:"sku,omitempty"` +} + +// VirtualMachinePublicIPAddressConfigurationProperties - Describes a virtual machines IP Configuration's PublicIPAddress +// configuration +type VirtualMachinePublicIPAddressConfigurationProperties struct { + // The dns settings to be applied on the publicIP addresses . + DNSSettings *VirtualMachinePublicIPAddressDNSSettingsConfiguration `json:"dnsSettings,omitempty"` + + // Specify what happens to the public IP address when the VM is deleted + DeleteOption *DeleteOptions `json:"deleteOption,omitempty"` + + // The list of IP tags associated with the public IP address. + IPTags []*VirtualMachineIPTag `json:"ipTags,omitempty"` + + // The idle timeout of the public IP address. + IdleTimeoutInMinutes *int32 `json:"idleTimeoutInMinutes,omitempty"` + + // Available from Api-Version 2019-07-01 onwards, it represents whether the specific ipconfiguration is IPv4 or IPv6. Default + // is taken as IPv4. Possible values are: 'IPv4' and 'IPv6'. + PublicIPAddressVersion *IPVersions `json:"publicIPAddressVersion,omitempty"` + + // Specify the public IP allocation type + PublicIPAllocationMethod *PublicIPAllocationMethod `json:"publicIPAllocationMethod,omitempty"` + + // The PublicIPPrefix from which to allocate publicIP addresses. + PublicIPPrefix *SubResource `json:"publicIPPrefix,omitempty"` +} + +// VirtualMachinePublicIPAddressDNSSettingsConfiguration - Describes a virtual machines network configuration's DNS settings. +type VirtualMachinePublicIPAddressDNSSettingsConfiguration struct { + // REQUIRED; The Domain name label prefix of the PublicIPAddress resources that will be created. The generated name label + // is the concatenation of the domain name label and vm network profile unique ID. + DomainNameLabel *string `json:"domainNameLabel,omitempty"` +} + +// VirtualMachineReimageParameters - Parameters for Reimaging Virtual Machine. NOTE: Virtual Machine OS disk will always be +// reimaged +type VirtualMachineReimageParameters struct { + // Specifies whether to reimage temp disk. Default value: false. Note: This temp disk reimage parameter is only supported + // for VM/VMSS with Ephemeral OS disk. + TempDisk *bool `json:"tempDisk,omitempty"` +} + +// VirtualMachineRunCommand - Describes a Virtual Machine run command. +type VirtualMachineRunCommand struct { + // REQUIRED; Resource location + Location *string `json:"location,omitempty"` + + // Describes the properties of a Virtual Machine run command. + Properties *VirtualMachineRunCommandProperties `json:"properties,omitempty"` + + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` + + // READ-ONLY; Resource Id + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; Resource name + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; Resource type + Type *string `json:"type,omitempty" azure:"ro"` +} + +// VirtualMachineRunCommandInstanceView - The instance view of a virtual machine run command. +type VirtualMachineRunCommandInstanceView struct { + // Script end time. + EndTime *time.Time `json:"endTime,omitempty"` + + // Script error stream. + Error *string `json:"error,omitempty"` + + // Communicate script configuration errors or execution messages. + ExecutionMessage *string `json:"executionMessage,omitempty"` + + // Script execution status. + ExecutionState *ExecutionState `json:"executionState,omitempty"` + + // Exit code returned from script execution. + ExitCode *int32 `json:"exitCode,omitempty"` + + // Script output stream. + Output *string `json:"output,omitempty"` + + // Script start time. + StartTime *time.Time `json:"startTime,omitempty"` + + // The resource status information. + Statuses []*InstanceViewStatus `json:"statuses,omitempty"` +} + +// VirtualMachineRunCommandProperties - Describes the properties of a Virtual Machine run command. +type VirtualMachineRunCommandProperties struct { + // Optional. If set to true, provisioning will complete as soon as the script starts and will not wait for script to complete. + AsyncExecution *bool `json:"asyncExecution,omitempty"` + + // Specifies the Azure storage blob where script error stream will be uploaded. + ErrorBlobURI *string `json:"errorBlobUri,omitempty"` + + // Specifies the Azure storage blob where script output stream will be uploaded. + OutputBlobURI *string `json:"outputBlobUri,omitempty"` + + // The parameters used by the script. + Parameters []*RunCommandInputParameter `json:"parameters,omitempty"` + + // The parameters used by the script. + ProtectedParameters []*RunCommandInputParameter `json:"protectedParameters,omitempty"` + + // Specifies the user account password on the VM when executing the run command. + RunAsPassword *string `json:"runAsPassword,omitempty"` + + // Specifies the user account on the VM when executing the run command. + RunAsUser *string `json:"runAsUser,omitempty"` + + // The source of the run command script. + Source *VirtualMachineRunCommandScriptSource `json:"source,omitempty"` + + // The timeout in seconds to execute the run command. + TimeoutInSeconds *int32 `json:"timeoutInSeconds,omitempty"` + + // READ-ONLY; The virtual machine run command instance view. + InstanceView *VirtualMachineRunCommandInstanceView `json:"instanceView,omitempty" azure:"ro"` + + // READ-ONLY; The provisioning state, which only appears in the response. + ProvisioningState *string `json:"provisioningState,omitempty" azure:"ro"` +} + +// VirtualMachineRunCommandScriptSource - Describes the script sources for run command. +type VirtualMachineRunCommandScriptSource struct { + // Specifies a commandId of predefined built-in script. + CommandID *string `json:"commandId,omitempty"` + + // Specifies the script content to be executed on the VM. + Script *string `json:"script,omitempty"` + + // Specifies the script download location. + ScriptURI *string `json:"scriptUri,omitempty"` +} + +// VirtualMachineRunCommandUpdate - Describes a Virtual Machine run command. +type VirtualMachineRunCommandUpdate struct { + // Describes the properties of a Virtual Machine run command. + Properties *VirtualMachineRunCommandProperties `json:"properties,omitempty"` + + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` +} + +// VirtualMachineRunCommandsClientBeginCreateOrUpdateOptions contains the optional parameters for the VirtualMachineRunCommandsClient.BeginCreateOrUpdate +// method. +type VirtualMachineRunCommandsClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachineRunCommandsClientBeginDeleteOptions contains the optional parameters for the VirtualMachineRunCommandsClient.BeginDelete +// method. +type VirtualMachineRunCommandsClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachineRunCommandsClientBeginUpdateOptions contains the optional parameters for the VirtualMachineRunCommandsClient.BeginUpdate +// method. +type VirtualMachineRunCommandsClientBeginUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachineRunCommandsClientGetByVirtualMachineOptions contains the optional parameters for the VirtualMachineRunCommandsClient.GetByVirtualMachine +// method. +type VirtualMachineRunCommandsClientGetByVirtualMachineOptions struct { + // The expand expression to apply on the operation. + Expand *string +} + +// VirtualMachineRunCommandsClientGetOptions contains the optional parameters for the VirtualMachineRunCommandsClient.Get +// method. +type VirtualMachineRunCommandsClientGetOptions struct { + // placeholder for future optional parameters +} + +// VirtualMachineRunCommandsClientListByVirtualMachineOptions contains the optional parameters for the VirtualMachineRunCommandsClient.ListByVirtualMachine +// method. +type VirtualMachineRunCommandsClientListByVirtualMachineOptions struct { + // The expand expression to apply on the operation. + Expand *string +} + +// VirtualMachineRunCommandsClientListOptions contains the optional parameters for the VirtualMachineRunCommandsClient.List +// method. +type VirtualMachineRunCommandsClientListOptions struct { + // placeholder for future optional parameters +} + +// VirtualMachineRunCommandsListResult - The List run command operation response +type VirtualMachineRunCommandsListResult struct { + // REQUIRED; The list of run commands + Value []*VirtualMachineRunCommand `json:"value,omitempty"` + + // The uri to fetch the next page of run commands. + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualMachineScaleSet - Describes a Virtual Machine Scale Set. +type VirtualMachineScaleSet struct { + // REQUIRED; Resource location + Location *string `json:"location,omitempty"` + + // The extended location of the Virtual Machine Scale Set. + ExtendedLocation *ExtendedLocation `json:"extendedLocation,omitempty"` + + // The identity of the virtual machine scale set, if configured. + Identity *VirtualMachineScaleSetIdentity `json:"identity,omitempty"` + + // Specifies information about the marketplace image used to create the virtual machine. This element is only used for marketplace + // images. Before you can use a marketplace image from an API, you must + // enable the image for programmatic use. In the Azure portal, find the marketplace image that you want to use and then click + // Want to deploy programmatically, Get Started ->. Enter any required + // information and then click Save. + Plan *Plan `json:"plan,omitempty"` + + // Describes the properties of a Virtual Machine Scale Set. + Properties *VirtualMachineScaleSetProperties `json:"properties,omitempty"` + + // The virtual machine scale set sku. + SKU *SKU `json:"sku,omitempty"` + + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` + + // The virtual machine scale set zones. NOTE: Availability zones can only be set when you create the scale set + Zones []*string `json:"zones,omitempty"` + + // READ-ONLY; Resource Id + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; Resource name + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; Resource type + Type *string `json:"type,omitempty" azure:"ro"` +} + +// VirtualMachineScaleSetDataDisk - Describes a virtual machine scale set data disk. +type VirtualMachineScaleSetDataDisk struct { + // REQUIRED; The create option. + CreateOption *DiskCreateOptionTypes `json:"createOption,omitempty"` + + // REQUIRED; Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and + // therefore must be unique for each data disk attached to a VM. + Lun *int32 `json:"lun,omitempty"` + + // Specifies the caching requirements. + // Possible values are: + // None + // ReadOnly + // ReadWrite + // Default: None for Standard storage. ReadOnly for Premium storage + Caching *CachingTypes `json:"caching,omitempty"` + + // Specifies whether data disk should be deleted or detached upon VMSS Flex deletion (This feature is available for VMSS with + // Flexible OrchestrationMode only). + // Possible values: + // Delete If this value is used, the data disk is deleted when the VMSS Flex VM is deleted. + // Detach If this value is used, the data disk is retained after VMSS Flex VM is deleted. + // The default value is set to Delete. + DeleteOption *DiskDeleteOptionTypes `json:"deleteOption,omitempty"` + + // Specifies the Read-Write IOPS for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. If not + // specified, a default value would be assigned based on diskSizeGB. + DiskIOPSReadWrite *int64 `json:"diskIOPSReadWrite,omitempty"` + + // Specifies the bandwidth in MB per second for the managed disk. Should be used only when StorageAccountType is UltraSSD_LRS. + // If not specified, a default value would be assigned based on diskSizeGB. + DiskMBpsReadWrite *int64 `json:"diskMBpsReadWrite,omitempty"` + + // Specifies the size of an empty data disk in gigabytes. This element can be used to overwrite the size of the disk in a + // virtual machine image. + // This value cannot be larger than 1023 GB + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + + // The managed disk parameters. + ManagedDisk *VirtualMachineScaleSetManagedDiskParameters `json:"managedDisk,omitempty"` + + // The disk name. + Name *string `json:"name,omitempty"` + + // Specifies whether writeAccelerator should be enabled or disabled on the disk. + WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty"` +} + +// VirtualMachineScaleSetExtension - Describes a Virtual Machine Scale Set Extension. +type VirtualMachineScaleSetExtension struct { + // The name of the extension. + Name *string `json:"name,omitempty"` + + // Describes the properties of a Virtual Machine Scale Set Extension. + Properties *VirtualMachineScaleSetExtensionProperties `json:"properties,omitempty"` + + // READ-ONLY; Resource Id + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; Resource type + Type *string `json:"type,omitempty" azure:"ro"` +} + +// VirtualMachineScaleSetExtensionListResult - The List VM scale set extension operation response. +type VirtualMachineScaleSetExtensionListResult struct { + // REQUIRED; The list of VM scale set extensions. + Value []*VirtualMachineScaleSetExtension `json:"value,omitempty"` + + // The uri to fetch the next page of VM scale set extensions. Call ListNext() with this to fetch the next page of VM scale + // set extensions. + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualMachineScaleSetExtensionProfile - Describes a virtual machine scale set extension profile. +type VirtualMachineScaleSetExtensionProfile struct { + // The virtual machine scale set child extension resources. + Extensions []*VirtualMachineScaleSetExtension `json:"extensions,omitempty"` + + // Specifies the time alloted for all extensions to start. The time duration should be between 15 minutes and 120 minutes + // (inclusive) and should be specified in ISO 8601 format. The default value is 90 + // minutes (PT1H30M). + // Minimum api-version: 2020-06-01 + ExtensionsTimeBudget *string `json:"extensionsTimeBudget,omitempty"` +} + +// VirtualMachineScaleSetExtensionProperties - Describes the properties of a Virtual Machine Scale Set Extension. +type VirtualMachineScaleSetExtensionProperties struct { + // Indicates whether the extension should use a newer minor version if one is available at deployment time. Once deployed, + // however, the extension will not upgrade minor versions unless redeployed, even + // with this property set to true. + AutoUpgradeMinorVersion *bool `json:"autoUpgradeMinorVersion,omitempty"` + + // Indicates whether the extension should be automatically upgraded by the platform if there is a newer version of the extension + // available. + EnableAutomaticUpgrade *bool `json:"enableAutomaticUpgrade,omitempty"` + + // If a value is provided and is different from the previous value, the extension handler will be forced to update even if + // the extension configuration has not changed. + ForceUpdateTag *string `json:"forceUpdateTag,omitempty"` + + // The extension can contain either protectedSettings or protectedSettingsFromKeyVault or no protected settings at all. + ProtectedSettings interface{} `json:"protectedSettings,omitempty"` + + // The extensions protected settings that are passed by reference, and consumed from key vault + ProtectedSettingsFromKeyVault interface{} `json:"protectedSettingsFromKeyVault,omitempty"` + + // Collection of extension names after which this extension needs to be provisioned. + ProvisionAfterExtensions []*string `json:"provisionAfterExtensions,omitempty"` + + // The name of the extension handler publisher. + Publisher *string `json:"publisher,omitempty"` + + // Json formatted public settings for the extension. + Settings interface{} `json:"settings,omitempty"` + + // Indicates whether failures stemming from the extension will be suppressed (Operational failures such as not connecting + // to the VM will not be suppressed regardless of this value). The default is false. + SuppressFailures *bool `json:"suppressFailures,omitempty"` + + // Specifies the type of the extension; an example is "CustomScriptExtension". + Type *string `json:"type,omitempty"` + + // Specifies the version of the script handler. + TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty"` + + // READ-ONLY; The provisioning state, which only appears in the response. + ProvisioningState *string `json:"provisioningState,omitempty" azure:"ro"` +} + +// VirtualMachineScaleSetExtensionUpdate - Describes a Virtual Machine Scale Set Extension. +type VirtualMachineScaleSetExtensionUpdate struct { + // Describes the properties of a Virtual Machine Scale Set Extension. + Properties *VirtualMachineScaleSetExtensionProperties `json:"properties,omitempty"` + + // READ-ONLY; Resource Id + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; The name of the extension. + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; Resource type + Type *string `json:"type,omitempty" azure:"ro"` +} + +// VirtualMachineScaleSetExtensionsClientBeginCreateOrUpdateOptions contains the optional parameters for the VirtualMachineScaleSetExtensionsClient.BeginCreateOrUpdate +// method. +type VirtualMachineScaleSetExtensionsClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachineScaleSetExtensionsClientBeginDeleteOptions contains the optional parameters for the VirtualMachineScaleSetExtensionsClient.BeginDelete +// method. +type VirtualMachineScaleSetExtensionsClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachineScaleSetExtensionsClientBeginUpdateOptions contains the optional parameters for the VirtualMachineScaleSetExtensionsClient.BeginUpdate +// method. +type VirtualMachineScaleSetExtensionsClientBeginUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachineScaleSetExtensionsClientGetOptions contains the optional parameters for the VirtualMachineScaleSetExtensionsClient.Get +// method. +type VirtualMachineScaleSetExtensionsClientGetOptions struct { + // The expand expression to apply on the operation. + Expand *string +} + +// VirtualMachineScaleSetExtensionsClientListOptions contains the optional parameters for the VirtualMachineScaleSetExtensionsClient.List +// method. +type VirtualMachineScaleSetExtensionsClientListOptions struct { + // placeholder for future optional parameters +} + +// VirtualMachineScaleSetHardwareProfile - Specifies the hardware settings for the virtual machine scale set. +type VirtualMachineScaleSetHardwareProfile struct { + // Specifies the properties for customizing the size of the virtual machine. Minimum api-version: 2022-03-01. + // Please follow the instructions in VM Customization [https://aka.ms/vmcustomization] for more details. + VMSizeProperties *VMSizeProperties `json:"vmSizeProperties,omitempty"` +} + +// VirtualMachineScaleSetIPConfiguration - Describes a virtual machine scale set network profile's IP configuration. +type VirtualMachineScaleSetIPConfiguration struct { + // REQUIRED; The IP configuration name. + Name *string `json:"name,omitempty"` + + // Resource Id + ID *string `json:"id,omitempty"` + + // Describes a virtual machine scale set network profile's IP configuration properties. + Properties *VirtualMachineScaleSetIPConfigurationProperties `json:"properties,omitempty"` +} + +// VirtualMachineScaleSetIPConfigurationProperties - Describes a virtual machine scale set network profile's IP configuration +// properties. +type VirtualMachineScaleSetIPConfigurationProperties struct { + // Specifies an array of references to backend address pools of application gateways. A scale set can reference backend address + // pools of multiple application gateways. Multiple scale sets cannot use the + // same application gateway. + ApplicationGatewayBackendAddressPools []*SubResource `json:"applicationGatewayBackendAddressPools,omitempty"` + + // Specifies an array of references to application security group. + ApplicationSecurityGroups []*SubResource `json:"applicationSecurityGroups,omitempty"` + + // Specifies an array of references to backend address pools of load balancers. A scale set can reference backend address + // pools of one public and one internal load balancer. Multiple scale sets cannot + // use the same basic sku load balancer. + LoadBalancerBackendAddressPools []*SubResource `json:"loadBalancerBackendAddressPools,omitempty"` + + // Specifies an array of references to inbound Nat pools of the load balancers. A scale set can reference inbound nat pools + // of one public and one internal load balancer. Multiple scale sets cannot use + // the same basic sku load balancer. + LoadBalancerInboundNatPools []*SubResource `json:"loadBalancerInboundNatPools,omitempty"` + + // Specifies the primary network interface in case the virtual machine has more than 1 network interface. + Primary *bool `json:"primary,omitempty"` + + // Available from Api-Version 2017-03-30 onwards, it represents whether the specific ipconfiguration is IPv4 or IPv6. Default + // is taken as IPv4. Possible values are: 'IPv4' and 'IPv6'. + PrivateIPAddressVersion *IPVersion `json:"privateIPAddressVersion,omitempty"` + + // The publicIPAddressConfiguration. + PublicIPAddressConfiguration *VirtualMachineScaleSetPublicIPAddressConfiguration `json:"publicIPAddressConfiguration,omitempty"` + + // Specifies the identifier of the subnet. + Subnet *APIEntityReference `json:"subnet,omitempty"` +} + +// VirtualMachineScaleSetIPTag - Contains the IP tag associated with the public IP address. +type VirtualMachineScaleSetIPTag struct { + // IP tag type. Example: FirstPartyUsage. + IPTagType *string `json:"ipTagType,omitempty"` + + // IP tag associated with the public IP. Example: SQL, Storage etc. + Tag *string `json:"tag,omitempty"` +} + +// VirtualMachineScaleSetIdentity - Identity for the virtual machine scale set. +type VirtualMachineScaleSetIdentity struct { + // The type of identity used for the virtual machine scale set. The type 'SystemAssigned, UserAssigned' includes both an implicitly + // created identity and a set of user assigned identities. The type 'None' + // will remove any identities from the virtual machine scale set. + Type *ResourceIdentityType `json:"type,omitempty"` + + // The list of user identities associated with the virtual machine scale set. The user identity dictionary key references + // will be ARM resource ids in the form: + // '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. + UserAssignedIdentities map[string]*VirtualMachineScaleSetIdentityUserAssignedIdentitiesValue `json:"userAssignedIdentities,omitempty"` + + // READ-ONLY; The principal id of virtual machine scale set identity. This property will only be provided for a system assigned + // identity. + PrincipalID *string `json:"principalId,omitempty" azure:"ro"` + + // READ-ONLY; The tenant id associated with the virtual machine scale set. This property will only be provided for a system + // assigned identity. + TenantID *string `json:"tenantId,omitempty" azure:"ro"` +} + +type VirtualMachineScaleSetIdentityUserAssignedIdentitiesValue struct { + // READ-ONLY; The client id of user assigned identity. + ClientID *string `json:"clientId,omitempty" azure:"ro"` + + // READ-ONLY; The principal id of user assigned identity. + PrincipalID *string `json:"principalId,omitempty" azure:"ro"` +} + +// VirtualMachineScaleSetInstanceView - The instance view of a virtual machine scale set. +type VirtualMachineScaleSetInstanceView struct { + // The resource status information. + Statuses []*InstanceViewStatus `json:"statuses,omitempty"` + + // READ-ONLY; The extensions information. + Extensions []*VirtualMachineScaleSetVMExtensionsSummary `json:"extensions,omitempty" azure:"ro"` + + // READ-ONLY; The orchestration services information. + OrchestrationServices []*OrchestrationServiceSummary `json:"orchestrationServices,omitempty" azure:"ro"` + + // READ-ONLY; The instance view status summary for the virtual machine scale set. + VirtualMachine *VirtualMachineScaleSetInstanceViewStatusesSummary `json:"virtualMachine,omitempty" azure:"ro"` +} + +// VirtualMachineScaleSetInstanceViewStatusesSummary - Instance view statuses summary for virtual machines of a virtual machine +// scale set. +type VirtualMachineScaleSetInstanceViewStatusesSummary struct { + // READ-ONLY; The extensions information. + StatusesSummary []*VirtualMachineStatusCodeCount `json:"statusesSummary,omitempty" azure:"ro"` +} + +// VirtualMachineScaleSetListOSUpgradeHistory - List of Virtual Machine Scale Set OS Upgrade History operation response. +type VirtualMachineScaleSetListOSUpgradeHistory struct { + // REQUIRED; The list of OS upgrades performed on the virtual machine scale set. + Value []*UpgradeOperationHistoricalStatusInfo `json:"value,omitempty"` + + // The uri to fetch the next page of OS Upgrade History. Call ListNext() with this to fetch the next page of history of upgrades. + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualMachineScaleSetListResult - The List Virtual Machine operation response. +type VirtualMachineScaleSetListResult struct { + // REQUIRED; The list of virtual machine scale sets. + Value []*VirtualMachineScaleSet `json:"value,omitempty"` + + // The uri to fetch the next page of Virtual Machine Scale Sets. Call ListNext() with this to fetch the next page of VMSS. + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualMachineScaleSetListSKUsResult - The Virtual Machine Scale Set List Skus operation response. +type VirtualMachineScaleSetListSKUsResult struct { + // REQUIRED; The list of skus available for the virtual machine scale set. + Value []*VirtualMachineScaleSetSKU `json:"value,omitempty"` + + // The uri to fetch the next page of Virtual Machine Scale Set Skus. Call ListNext() with this to fetch the next page of VMSS + // Skus. + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualMachineScaleSetListWithLinkResult - The List Virtual Machine operation response. +type VirtualMachineScaleSetListWithLinkResult struct { + // REQUIRED; The list of virtual machine scale sets. + Value []*VirtualMachineScaleSet `json:"value,omitempty"` + + // The uri to fetch the next page of Virtual Machine Scale Sets. Call ListNext() with this to fetch the next page of Virtual + // Machine Scale Sets. + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualMachineScaleSetManagedDiskParameters - Describes the parameters of a ScaleSet managed disk. +type VirtualMachineScaleSetManagedDiskParameters struct { + // Specifies the customer managed disk encryption set resource id for the managed disk. + DiskEncryptionSet *DiskEncryptionSetParameters `json:"diskEncryptionSet,omitempty"` + + // Specifies the security profile for the managed disk. + SecurityProfile *VMDiskSecurityProfile `json:"securityProfile,omitempty"` + + // Specifies the storage account type for the managed disk. NOTE: UltraSSD_LRS can only be used with data disks, it cannot + // be used with OS Disk. + StorageAccountType *StorageAccountTypes `json:"storageAccountType,omitempty"` +} + +// VirtualMachineScaleSetNetworkConfiguration - Describes a virtual machine scale set network profile's network configurations. +type VirtualMachineScaleSetNetworkConfiguration struct { + // REQUIRED; The network configuration name. + Name *string `json:"name,omitempty"` + + // Resource Id + ID *string `json:"id,omitempty"` + + // Describes a virtual machine scale set network profile's IP configuration. + Properties *VirtualMachineScaleSetNetworkConfigurationProperties `json:"properties,omitempty"` +} + +// VirtualMachineScaleSetNetworkConfigurationDNSSettings - Describes a virtual machines scale sets network configuration's +// DNS settings. +type VirtualMachineScaleSetNetworkConfigurationDNSSettings struct { + // List of DNS servers IP addresses + DNSServers []*string `json:"dnsServers,omitempty"` +} + +// VirtualMachineScaleSetNetworkConfigurationProperties - Describes a virtual machine scale set network profile's IP configuration. +type VirtualMachineScaleSetNetworkConfigurationProperties struct { + // REQUIRED; Specifies the IP configurations of the network interface. + IPConfigurations []*VirtualMachineScaleSetIPConfiguration `json:"ipConfigurations,omitempty"` + + // The dns settings to be applied on the network interfaces. + DNSSettings *VirtualMachineScaleSetNetworkConfigurationDNSSettings `json:"dnsSettings,omitempty"` + + // Specify what happens to the network interface when the VM is deleted + DeleteOption *DeleteOptions `json:"deleteOption,omitempty"` + + // Specifies whether the network interface is accelerated networking-enabled. + EnableAcceleratedNetworking *bool `json:"enableAcceleratedNetworking,omitempty"` + + // Specifies whether the network interface is FPGA networking-enabled. + EnableFpga *bool `json:"enableFpga,omitempty"` + + // Whether IP forwarding enabled on this NIC. + EnableIPForwarding *bool `json:"enableIPForwarding,omitempty"` + + // The network security group. + NetworkSecurityGroup *SubResource `json:"networkSecurityGroup,omitempty"` + + // Specifies the primary network interface in case the virtual machine has more than 1 network interface. + Primary *bool `json:"primary,omitempty"` +} + +// VirtualMachineScaleSetNetworkProfile - Describes a virtual machine scale set network profile. +type VirtualMachineScaleSetNetworkProfile struct { + // A reference to a load balancer probe used to determine the health of an instance in the virtual machine scale set. The + // reference will be in the form: + // '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/probes/{probeName}'. + HealthProbe *APIEntityReference `json:"healthProbe,omitempty"` + + // specifies the Microsoft.Network API version used when creating networking resources in the Network Interface Configurations + // for Virtual Machine Scale Set with orchestration mode 'Flexible' + NetworkAPIVersion *NetworkAPIVersion `json:"networkApiVersion,omitempty"` + + // The list of network configurations. + NetworkInterfaceConfigurations []*VirtualMachineScaleSetNetworkConfiguration `json:"networkInterfaceConfigurations,omitempty"` +} + +// VirtualMachineScaleSetOSDisk - Describes a virtual machine scale set operating system disk. +type VirtualMachineScaleSetOSDisk struct { + // REQUIRED; Specifies how the virtual machines in the scale set should be created. + // The only allowed value is: FromImage \u2013 This value is used when you are using an image to create the virtual machine. + // If you are using a platform image, you also use the imageReference element + // described above. If you are using a marketplace image, you also use the plan element previously described. + CreateOption *DiskCreateOptionTypes `json:"createOption,omitempty"` + + // Specifies the caching requirements. + // Possible values are: + // None + // ReadOnly + // ReadWrite + // Default: None for Standard storage. ReadOnly for Premium storage + Caching *CachingTypes `json:"caching,omitempty"` + + // Specifies whether OS Disk should be deleted or detached upon VMSS Flex deletion (This feature is available for VMSS with + // Flexible OrchestrationMode only). + // Possible values: + // Delete If this value is used, the OS disk is deleted when VMSS Flex VM is deleted. + // Detach If this value is used, the OS disk is retained after VMSS Flex VM is deleted. + // The default value is set to Delete. For an Ephemeral OS Disk, the default value is set to Delete. User cannot change the + // delete option for Ephemeral OS Disk. + DeleteOption *DiskDeleteOptionTypes `json:"deleteOption,omitempty"` + + // Specifies the ephemeral disk Settings for the operating system disk used by the virtual machine scale set. + DiffDiskSettings *DiffDiskSettings `json:"diffDiskSettings,omitempty"` + + // Specifies the size of the operating system disk in gigabytes. This element can be used to overwrite the size of the disk + // in a virtual machine image. + // This value cannot be larger than 1023 GB + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + + // Specifies information about the unmanaged user image to base the scale set on. + Image *VirtualHardDisk `json:"image,omitempty"` + + // The managed disk parameters. + ManagedDisk *VirtualMachineScaleSetManagedDiskParameters `json:"managedDisk,omitempty"` + + // The disk name. + Name *string `json:"name,omitempty"` + + // This property allows you to specify the type of the OS that is included in the disk if creating a VM from user-image or + // a specialized VHD. + // Possible values are: + // Windows + // Linux + OSType *OperatingSystemTypes `json:"osType,omitempty"` + + // Specifies the container urls that are used to store operating system disks for the scale set. + VhdContainers []*string `json:"vhdContainers,omitempty"` + + // Specifies whether writeAccelerator should be enabled or disabled on the disk. + WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty"` +} + +// VirtualMachineScaleSetOSProfile - Describes a virtual machine scale set OS profile. +type VirtualMachineScaleSetOSProfile struct { + // Specifies the password of the administrator account. + // Minimum-length (Windows): 8 characters + // Minimum-length (Linux): 6 characters + // Max-length (Windows): 123 characters + // Max-length (Linux): 72 characters + // Complexity requirements: 3 out of 4 conditions below need to be fulfilled + // Has lower characters + // Has upper characters + // Has a digit + // Has a special character (Regex match [\W_]) + // Disallowed values: "abc@123", "P@$$w0rd", "P@ssw0rd", "P@ssword123", "Pa$$word", "pass@word1", "Password!", "Password1", + // "Password22", "iloveyou!" + // For resetting the password, see How to reset the Remote Desktop service or its login password in a Windows VM [https://docs.microsoft.com/troubleshoot/azure/virtual-machines/reset-rdp] + // For resetting root password, see Manage users, SSH, and check or repair disks on Azure Linux VMs using the VMAccess Extension + // [https://docs.microsoft.com/troubleshoot/azure/virtual-machines/troubleshoot-ssh-connection] + AdminPassword *string `json:"adminPassword,omitempty"` + + // Specifies the name of the administrator account. + // Windows-only restriction: Cannot end in "." + // Disallowed values: "administrator", "admin", "user", "user1", "test", "user2", "test1", "user3", "admin1", "1", "123", + // "a", "actuser", "adm", "admin2", "aspnet", "backup", "console", "david", "guest", + // "john", "owner", "root", "server", "sql", "support", "support_388945a0", "sys", "test2", "test3", "user4", "user5". + // Minimum-length (Linux): 1 character + // Max-length (Linux): 64 characters + // Max-length (Windows): 20 characters + AdminUsername *string `json:"adminUsername,omitempty"` + + // Specifies whether extension operations should be allowed on the virtual machine scale set. + // This may only be set to False when no extensions are present on the virtual machine scale set. + AllowExtensionOperations *bool `json:"allowExtensionOperations,omitempty"` + + // Specifies the computer name prefix for all of the virtual machines in the scale set. Computer name prefixes must be 1 to + // 15 characters long. + ComputerNamePrefix *string `json:"computerNamePrefix,omitempty"` + + // Specifies a base-64 encoded string of custom data. The base-64 encoded string is decoded to a binary array that is saved + // as a file on the Virtual Machine. The maximum length of the binary array is + // 65535 bytes. + // For using cloud-init for your VM, see Using cloud-init to customize a Linux VM during creation [https://docs.microsoft.com/azure/virtual-machines/linux/using-cloud-init] + CustomData *string `json:"customData,omitempty"` + + // Specifies the Linux operating system settings on the virtual machine. + // For a list of supported Linux distributions, see Linux on Azure-Endorsed Distributions [https://docs.microsoft.com/azure/virtual-machines/linux/endorsed-distros]. + LinuxConfiguration *LinuxConfiguration `json:"linuxConfiguration,omitempty"` + + // Specifies set of certificates that should be installed onto the virtual machines in the scale set. To install certificates + // on a virtual machine it is recommended to use the Azure Key Vault virtual + // machine extension for Linux [https://docs.microsoft.com/azure/virtual-machines/extensions/key-vault-linux] or the Azure + // Key Vault virtual machine extension for Windows + // [https://docs.microsoft.com/azure/virtual-machines/extensions/key-vault-windows]. + Secrets []*VaultSecretGroup `json:"secrets,omitempty"` + + // Specifies Windows operating system settings on the virtual machine. + WindowsConfiguration *WindowsConfiguration `json:"windowsConfiguration,omitempty"` +} + +// VirtualMachineScaleSetProperties - Describes the properties of a Virtual Machine Scale Set. +type VirtualMachineScaleSetProperties struct { + // Specifies additional capabilities enabled or disabled on the Virtual Machines in the Virtual Machine Scale Set. For instance: + // whether the Virtual Machines have the capability to support attaching + // managed data disks with UltraSSD_LRS storage account type. + AdditionalCapabilities *AdditionalCapabilities `json:"additionalCapabilities,omitempty"` + + // Policy for automatic repairs. + AutomaticRepairsPolicy *AutomaticRepairsPolicy `json:"automaticRepairsPolicy,omitempty"` + + // When Overprovision is enabled, extensions are launched only on the requested number of VMs which are finally kept. This + // property will hence ensure that the extensions do not run on the extra + // overprovisioned VMs. + DoNotRunExtensionsOnOverprovisionedVMs *bool `json:"doNotRunExtensionsOnOverprovisionedVMs,omitempty"` + + // Specifies information about the dedicated host group that the virtual machine scale set resides in. + // Minimum api-version: 2020-06-01. + HostGroup *SubResource `json:"hostGroup,omitempty"` + + // Specifies the orchestration mode for the virtual machine scale set. + OrchestrationMode *OrchestrationMode `json:"orchestrationMode,omitempty"` + + // Specifies whether the Virtual Machine Scale Set should be overprovisioned. + Overprovision *bool `json:"overprovision,omitempty"` + + // Fault Domain count for each placement group. + PlatformFaultDomainCount *int32 `json:"platformFaultDomainCount,omitempty"` + + // Specifies information about the proximity placement group that the virtual machine scale set should be assigned to. + // Minimum api-version: 2018-04-01. + ProximityPlacementGroup *SubResource `json:"proximityPlacementGroup,omitempty"` + + // Specifies the policies applied when scaling in Virtual Machines in the Virtual Machine Scale Set. + ScaleInPolicy *ScaleInPolicy `json:"scaleInPolicy,omitempty"` + + // When true this limits the scale set to a single placement group, of max size 100 virtual machines. NOTE: If singlePlacementGroup + // is true, it may be modified to false. However, if singlePlacementGroup + // is false, it may not be modified to true. + SinglePlacementGroup *bool `json:"singlePlacementGroup,omitempty"` + + // Specifies the Spot Restore properties for the virtual machine scale set. + SpotRestorePolicy *SpotRestorePolicy `json:"spotRestorePolicy,omitempty"` + + // The upgrade policy. + UpgradePolicy *UpgradePolicy `json:"upgradePolicy,omitempty"` + + // The virtual machine profile. + VirtualMachineProfile *VirtualMachineScaleSetVMProfile `json:"virtualMachineProfile,omitempty"` + + // Whether to force strictly even Virtual Machine distribution cross x-zones in case there is zone outage. zoneBalance property + // can only be set if the zones property of the scale set contains more than + // one zone. If there are no zones or only one zone specified, then zoneBalance property should not be set. + ZoneBalance *bool `json:"zoneBalance,omitempty"` + + // READ-ONLY; The provisioning state, which only appears in the response. + ProvisioningState *string `json:"provisioningState,omitempty" azure:"ro"` + + // READ-ONLY; Specifies the time at which the Virtual Machine Scale Set resource was created. + // Minimum api-version: 2022-03-01. + TimeCreated *time.Time `json:"timeCreated,omitempty" azure:"ro"` + + // READ-ONLY; Specifies the ID which uniquely identifies a Virtual Machine Scale Set. + UniqueID *string `json:"uniqueId,omitempty" azure:"ro"` +} + +// VirtualMachineScaleSetPublicIPAddressConfiguration - Describes a virtual machines scale set IP Configuration's PublicIPAddress +// configuration +type VirtualMachineScaleSetPublicIPAddressConfiguration struct { + // REQUIRED; The publicIP address configuration name. + Name *string `json:"name,omitempty"` + + // Describes a virtual machines scale set IP Configuration's PublicIPAddress configuration + Properties *VirtualMachineScaleSetPublicIPAddressConfigurationProperties `json:"properties,omitempty"` + + // Describes the public IP Sku. It can only be set with OrchestrationMode as Flexible. + SKU *PublicIPAddressSKU `json:"sku,omitempty"` +} + +// VirtualMachineScaleSetPublicIPAddressConfigurationDNSSettings - Describes a virtual machines scale sets network configuration's +// DNS settings. +type VirtualMachineScaleSetPublicIPAddressConfigurationDNSSettings struct { + // REQUIRED; The Domain name label.The concatenation of the domain name label and vm index will be the domain name labels + // of the PublicIPAddress resources that will be created + DomainNameLabel *string `json:"domainNameLabel,omitempty"` +} + +// VirtualMachineScaleSetPublicIPAddressConfigurationProperties - Describes a virtual machines scale set IP Configuration's +// PublicIPAddress configuration +type VirtualMachineScaleSetPublicIPAddressConfigurationProperties struct { + // The dns settings to be applied on the publicIP addresses . + DNSSettings *VirtualMachineScaleSetPublicIPAddressConfigurationDNSSettings `json:"dnsSettings,omitempty"` + + // Specify what happens to the public IP when the VM is deleted + DeleteOption *DeleteOptions `json:"deleteOption,omitempty"` + + // The list of IP tags associated with the public IP address. + IPTags []*VirtualMachineScaleSetIPTag `json:"ipTags,omitempty"` + + // The idle timeout of the public IP address. + IdleTimeoutInMinutes *int32 `json:"idleTimeoutInMinutes,omitempty"` + + // Available from Api-Version 2019-07-01 onwards, it represents whether the specific ipconfiguration is IPv4 or IPv6. Default + // is taken as IPv4. Possible values are: 'IPv4' and 'IPv6'. + PublicIPAddressVersion *IPVersion `json:"publicIPAddressVersion,omitempty"` + + // The PublicIPPrefix from which to allocate publicIP addresses. + PublicIPPrefix *SubResource `json:"publicIPPrefix,omitempty"` +} + +// VirtualMachineScaleSetReimageParameters - Describes a Virtual Machine Scale Set VM Reimage Parameters. +type VirtualMachineScaleSetReimageParameters struct { + // The virtual machine scale set instance ids. Omitting the virtual machine scale set instance ids will result in the operation + // being performed on all virtual machines in the virtual machine scale set. + InstanceIDs []*string `json:"instanceIds,omitempty"` + + // Specifies whether to reimage temp disk. Default value: false. Note: This temp disk reimage parameter is only supported + // for VM/VMSS with Ephemeral OS disk. + TempDisk *bool `json:"tempDisk,omitempty"` +} + +// VirtualMachineScaleSetRollingUpgradesClientBeginCancelOptions contains the optional parameters for the VirtualMachineScaleSetRollingUpgradesClient.BeginCancel +// method. +type VirtualMachineScaleSetRollingUpgradesClientBeginCancelOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachineScaleSetRollingUpgradesClientBeginStartExtensionUpgradeOptions contains the optional parameters for the VirtualMachineScaleSetRollingUpgradesClient.BeginStartExtensionUpgrade +// method. +type VirtualMachineScaleSetRollingUpgradesClientBeginStartExtensionUpgradeOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachineScaleSetRollingUpgradesClientBeginStartOSUpgradeOptions contains the optional parameters for the VirtualMachineScaleSetRollingUpgradesClient.BeginStartOSUpgrade +// method. +type VirtualMachineScaleSetRollingUpgradesClientBeginStartOSUpgradeOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachineScaleSetRollingUpgradesClientGetLatestOptions contains the optional parameters for the VirtualMachineScaleSetRollingUpgradesClient.GetLatest +// method. +type VirtualMachineScaleSetRollingUpgradesClientGetLatestOptions struct { + // placeholder for future optional parameters +} + +// VirtualMachineScaleSetSKU - Describes an available virtual machine scale set sku. +type VirtualMachineScaleSetSKU struct { + // READ-ONLY; Specifies the number of virtual machines in the scale set. + Capacity *VirtualMachineScaleSetSKUCapacity `json:"capacity,omitempty" azure:"ro"` + + // READ-ONLY; The type of resource the sku applies to. + ResourceType *string `json:"resourceType,omitempty" azure:"ro"` + + // READ-ONLY; The Sku. + SKU *SKU `json:"sku,omitempty" azure:"ro"` +} + +// VirtualMachineScaleSetSKUCapacity - Describes scaling information of a sku. +type VirtualMachineScaleSetSKUCapacity struct { + // READ-ONLY; The default capacity. + DefaultCapacity *int64 `json:"defaultCapacity,omitempty" azure:"ro"` + + // READ-ONLY; The maximum capacity that can be set. + Maximum *int64 `json:"maximum,omitempty" azure:"ro"` + + // READ-ONLY; The minimum capacity. + Minimum *int64 `json:"minimum,omitempty" azure:"ro"` + + // READ-ONLY; The scale type applicable to the sku. + ScaleType *VirtualMachineScaleSetSKUScaleType `json:"scaleType,omitempty" azure:"ro"` +} + +// VirtualMachineScaleSetStorageProfile - Describes a virtual machine scale set storage profile. +type VirtualMachineScaleSetStorageProfile struct { + // Specifies the parameters that are used to add data disks to the virtual machines in the scale set. + // For more information about disks, see About disks and VHDs for Azure virtual machines [https://docs.microsoft.com/azure/virtual-machines/managed-disks-overview]. + DataDisks []*VirtualMachineScaleSetDataDisk `json:"dataDisks,omitempty"` + + // Specifies information about the image to use. You can specify information about platform images, marketplace images, or + // virtual machine images. This element is required when you want to use a platform + // image, marketplace image, or virtual machine image, but is not used in other creation operations. + ImageReference *ImageReference `json:"imageReference,omitempty"` + + // Specifies information about the operating system disk used by the virtual machines in the scale set. + // For more information about disks, see About disks and VHDs for Azure virtual machines [https://docs.microsoft.com/azure/virtual-machines/managed-disks-overview]. + OSDisk *VirtualMachineScaleSetOSDisk `json:"osDisk,omitempty"` +} + +// VirtualMachineScaleSetUpdate - Describes a Virtual Machine Scale Set. +type VirtualMachineScaleSetUpdate struct { + // The identity of the virtual machine scale set, if configured. + Identity *VirtualMachineScaleSetIdentity `json:"identity,omitempty"` + + // The purchase plan when deploying a virtual machine scale set from VM Marketplace images. + Plan *Plan `json:"plan,omitempty"` + + // Describes the properties of a Virtual Machine Scale Set. + Properties *VirtualMachineScaleSetUpdateProperties `json:"properties,omitempty"` + + // The virtual machine scale set sku. + SKU *SKU `json:"sku,omitempty"` + + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` +} + +// VirtualMachineScaleSetUpdateIPConfiguration - Describes a virtual machine scale set network profile's IP configuration. +// NOTE: The subnet of a scale set may be modified as long as the original subnet and the new subnet are in the same virtual +// network +type VirtualMachineScaleSetUpdateIPConfiguration struct { + // Resource Id + ID *string `json:"id,omitempty"` + + // The IP configuration name. + Name *string `json:"name,omitempty"` + + // Describes a virtual machine scale set network profile's IP configuration properties. + Properties *VirtualMachineScaleSetUpdateIPConfigurationProperties `json:"properties,omitempty"` +} + +// VirtualMachineScaleSetUpdateIPConfigurationProperties - Describes a virtual machine scale set network profile's IP configuration +// properties. +type VirtualMachineScaleSetUpdateIPConfigurationProperties struct { + // The application gateway backend address pools. + ApplicationGatewayBackendAddressPools []*SubResource `json:"applicationGatewayBackendAddressPools,omitempty"` + + // Specifies an array of references to application security group. + ApplicationSecurityGroups []*SubResource `json:"applicationSecurityGroups,omitempty"` + + // The load balancer backend address pools. + LoadBalancerBackendAddressPools []*SubResource `json:"loadBalancerBackendAddressPools,omitempty"` + + // The load balancer inbound nat pools. + LoadBalancerInboundNatPools []*SubResource `json:"loadBalancerInboundNatPools,omitempty"` + + // Specifies the primary IP Configuration in case the network interface has more than one IP Configuration. + Primary *bool `json:"primary,omitempty"` + + // Available from Api-Version 2017-03-30 onwards, it represents whether the specific ipconfiguration is IPv4 or IPv6. Default + // is taken as IPv4. Possible values are: 'IPv4' and 'IPv6'. + PrivateIPAddressVersion *IPVersion `json:"privateIPAddressVersion,omitempty"` + + // The publicIPAddressConfiguration. + PublicIPAddressConfiguration *VirtualMachineScaleSetUpdatePublicIPAddressConfiguration `json:"publicIPAddressConfiguration,omitempty"` + + // The subnet. + Subnet *APIEntityReference `json:"subnet,omitempty"` +} + +// VirtualMachineScaleSetUpdateNetworkConfiguration - Describes a virtual machine scale set network profile's network configurations. +type VirtualMachineScaleSetUpdateNetworkConfiguration struct { + // Resource Id + ID *string `json:"id,omitempty"` + + // The network configuration name. + Name *string `json:"name,omitempty"` + + // Describes a virtual machine scale set updatable network profile's IP configuration.Use this object for updating network + // profile's IP Configuration. + Properties *VirtualMachineScaleSetUpdateNetworkConfigurationProperties `json:"properties,omitempty"` +} + +// VirtualMachineScaleSetUpdateNetworkConfigurationProperties - Describes a virtual machine scale set updatable network profile's +// IP configuration.Use this object for updating network profile's IP Configuration. +type VirtualMachineScaleSetUpdateNetworkConfigurationProperties struct { + // The dns settings to be applied on the network interfaces. + DNSSettings *VirtualMachineScaleSetNetworkConfigurationDNSSettings `json:"dnsSettings,omitempty"` + + // Specify what happens to the network interface when the VM is deleted + DeleteOption *DeleteOptions `json:"deleteOption,omitempty"` + + // Specifies whether the network interface is accelerated networking-enabled. + EnableAcceleratedNetworking *bool `json:"enableAcceleratedNetworking,omitempty"` + + // Specifies whether the network interface is FPGA networking-enabled. + EnableFpga *bool `json:"enableFpga,omitempty"` + + // Whether IP forwarding enabled on this NIC. + EnableIPForwarding *bool `json:"enableIPForwarding,omitempty"` + + // The virtual machine scale set IP Configuration. + IPConfigurations []*VirtualMachineScaleSetUpdateIPConfiguration `json:"ipConfigurations,omitempty"` + + // The network security group. + NetworkSecurityGroup *SubResource `json:"networkSecurityGroup,omitempty"` + + // Whether this is a primary NIC on a virtual machine. + Primary *bool `json:"primary,omitempty"` +} + +// VirtualMachineScaleSetUpdateNetworkProfile - Describes a virtual machine scale set network profile. +type VirtualMachineScaleSetUpdateNetworkProfile struct { + // A reference to a load balancer probe used to determine the health of an instance in the virtual machine scale set. The + // reference will be in the form: + // '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/probes/{probeName}'. + HealthProbe *APIEntityReference `json:"healthProbe,omitempty"` + + // specifies the Microsoft.Network API version used when creating networking resources in the Network Interface Configurations + // for Virtual Machine Scale Set with orchestration mode 'Flexible' + NetworkAPIVersion *NetworkAPIVersion `json:"networkApiVersion,omitempty"` + + // The list of network configurations. + NetworkInterfaceConfigurations []*VirtualMachineScaleSetUpdateNetworkConfiguration `json:"networkInterfaceConfigurations,omitempty"` +} + +// VirtualMachineScaleSetUpdateOSDisk - Describes virtual machine scale set operating system disk Update Object. This should +// be used for Updating VMSS OS Disk. +type VirtualMachineScaleSetUpdateOSDisk struct { + // The caching type. + Caching *CachingTypes `json:"caching,omitempty"` + + // Specifies whether OS Disk should be deleted or detached upon VMSS Flex deletion (This feature is available for VMSS with + // Flexible OrchestrationMode only). + // Possible values: + // Delete If this value is used, the OS disk is deleted when VMSS Flex VM is deleted. + // Detach If this value is used, the OS disk is retained after VMSS Flex VM is deleted. + // The default value is set to Delete. For an Ephemeral OS Disk, the default value is set to Delete. User cannot change the + // delete option for Ephemeral OS Disk. + DeleteOption *DiskDeleteOptionTypes `json:"deleteOption,omitempty"` + + // Specifies the size of the operating system disk in gigabytes. This element can be used to overwrite the size of the disk + // in a virtual machine image. + // This value cannot be larger than 1023 GB + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + + // The Source User Image VirtualHardDisk. This VirtualHardDisk will be copied before using it to attach to the Virtual Machine. + // If SourceImage is provided, the destination VirtualHardDisk should not + // exist. + Image *VirtualHardDisk `json:"image,omitempty"` + + // The managed disk parameters. + ManagedDisk *VirtualMachineScaleSetManagedDiskParameters `json:"managedDisk,omitempty"` + + // The list of virtual hard disk container uris. + VhdContainers []*string `json:"vhdContainers,omitempty"` + + // Specifies whether writeAccelerator should be enabled or disabled on the disk. + WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty"` +} + +// VirtualMachineScaleSetUpdateOSProfile - Describes a virtual machine scale set OS profile. +type VirtualMachineScaleSetUpdateOSProfile struct { + // A base-64 encoded string of custom data. + CustomData *string `json:"customData,omitempty"` + + // The Linux Configuration of the OS profile. + LinuxConfiguration *LinuxConfiguration `json:"linuxConfiguration,omitempty"` + + // The List of certificates for addition to the VM. + Secrets []*VaultSecretGroup `json:"secrets,omitempty"` + + // The Windows Configuration of the OS profile. + WindowsConfiguration *WindowsConfiguration `json:"windowsConfiguration,omitempty"` +} + +// VirtualMachineScaleSetUpdateProperties - Describes the properties of a Virtual Machine Scale Set. +type VirtualMachineScaleSetUpdateProperties struct { + // Specifies additional capabilities enabled or disabled on the Virtual Machines in the Virtual Machine Scale Set. For instance: + // whether the Virtual Machines have the capability to support attaching + // managed data disks with UltraSSD_LRS storage account type. + AdditionalCapabilities *AdditionalCapabilities `json:"additionalCapabilities,omitempty"` + + // Policy for automatic repairs. + AutomaticRepairsPolicy *AutomaticRepairsPolicy `json:"automaticRepairsPolicy,omitempty"` + + // When Overprovision is enabled, extensions are launched only on the requested number of VMs which are finally kept. This + // property will hence ensure that the extensions do not run on the extra + // overprovisioned VMs. + DoNotRunExtensionsOnOverprovisionedVMs *bool `json:"doNotRunExtensionsOnOverprovisionedVMs,omitempty"` + + // Specifies whether the Virtual Machine Scale Set should be overprovisioned. + Overprovision *bool `json:"overprovision,omitempty"` + + // Specifies information about the proximity placement group that the virtual machine scale set should be assigned to. + // Minimum api-version: 2018-04-01. + ProximityPlacementGroup *SubResource `json:"proximityPlacementGroup,omitempty"` + + // Specifies the policies applied when scaling in Virtual Machines in the Virtual Machine Scale Set. + ScaleInPolicy *ScaleInPolicy `json:"scaleInPolicy,omitempty"` + + // When true this limits the scale set to a single placement group, of max size 100 virtual machines. NOTE: If singlePlacementGroup + // is true, it may be modified to false. However, if singlePlacementGroup + // is false, it may not be modified to true. + SinglePlacementGroup *bool `json:"singlePlacementGroup,omitempty"` + + // The upgrade policy. + UpgradePolicy *UpgradePolicy `json:"upgradePolicy,omitempty"` + + // The virtual machine profile. + VirtualMachineProfile *VirtualMachineScaleSetUpdateVMProfile `json:"virtualMachineProfile,omitempty"` +} + +// VirtualMachineScaleSetUpdatePublicIPAddressConfiguration - Describes a virtual machines scale set IP Configuration's PublicIPAddress +// configuration +type VirtualMachineScaleSetUpdatePublicIPAddressConfiguration struct { + // The publicIP address configuration name. + Name *string `json:"name,omitempty"` + + // Describes a virtual machines scale set IP Configuration's PublicIPAddress configuration + Properties *VirtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties `json:"properties,omitempty"` +} + +// VirtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties - Describes a virtual machines scale set IP Configuration's +// PublicIPAddress configuration +type VirtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties struct { + // The dns settings to be applied on the publicIP addresses . + DNSSettings *VirtualMachineScaleSetPublicIPAddressConfigurationDNSSettings `json:"dnsSettings,omitempty"` + + // Specify what happens to the public IP when the VM is deleted + DeleteOption *DeleteOptions `json:"deleteOption,omitempty"` + + // The idle timeout of the public IP address. + IdleTimeoutInMinutes *int32 `json:"idleTimeoutInMinutes,omitempty"` + + // The PublicIPPrefix from which to allocate publicIP addresses. + PublicIPPrefix *SubResource `json:"publicIPPrefix,omitempty"` +} + +// VirtualMachineScaleSetUpdateStorageProfile - Describes a virtual machine scale set storage profile. +type VirtualMachineScaleSetUpdateStorageProfile struct { + // The data disks. + DataDisks []*VirtualMachineScaleSetDataDisk `json:"dataDisks,omitempty"` + + // The image reference. + ImageReference *ImageReference `json:"imageReference,omitempty"` + + // The OS disk. + OSDisk *VirtualMachineScaleSetUpdateOSDisk `json:"osDisk,omitempty"` +} + +// VirtualMachineScaleSetUpdateVMProfile - Describes a virtual machine scale set virtual machine profile. +type VirtualMachineScaleSetUpdateVMProfile struct { + // Specifies the billing related details of a Azure Spot VMSS. + // Minimum api-version: 2019-03-01. + BillingProfile *BillingProfile `json:"billingProfile,omitempty"` + + // The virtual machine scale set diagnostics profile. + DiagnosticsProfile *DiagnosticsProfile `json:"diagnosticsProfile,omitempty"` + + // The virtual machine scale set extension profile. + ExtensionProfile *VirtualMachineScaleSetExtensionProfile `json:"extensionProfile,omitempty"` + + // The license type, which is for bring your own license scenario. + LicenseType *string `json:"licenseType,omitempty"` + + // The virtual machine scale set network profile. + NetworkProfile *VirtualMachineScaleSetUpdateNetworkProfile `json:"networkProfile,omitempty"` + + // The virtual machine scale set OS profile. + OSProfile *VirtualMachineScaleSetUpdateOSProfile `json:"osProfile,omitempty"` + + // Specifies Scheduled Event related configurations. + ScheduledEventsProfile *ScheduledEventsProfile `json:"scheduledEventsProfile,omitempty"` + + // The virtual machine scale set Security profile + SecurityProfile *SecurityProfile `json:"securityProfile,omitempty"` + + // The virtual machine scale set storage profile. + StorageProfile *VirtualMachineScaleSetUpdateStorageProfile `json:"storageProfile,omitempty"` + + // UserData for the VM, which must be base-64 encoded. Customer should not pass any secrets in here. + // Minimum api-version: 2021-03-01 + UserData *string `json:"userData,omitempty"` +} + +// VirtualMachineScaleSetVM - Describes a virtual machine scale set virtual machine. +type VirtualMachineScaleSetVM struct { + // REQUIRED; Resource location + Location *string `json:"location,omitempty"` + + // The identity of the virtual machine, if configured. + Identity *VirtualMachineIdentity `json:"identity,omitempty"` + + // Specifies information about the marketplace image used to create the virtual machine. This element is only used for marketplace + // images. Before you can use a marketplace image from an API, you must + // enable the image for programmatic use. In the Azure portal, find the marketplace image that you want to use and then click + // Want to deploy programmatically, Get Started ->. Enter any required + // information and then click Save. + Plan *Plan `json:"plan,omitempty"` + + // Describes the properties of a virtual machine scale set virtual machine. + Properties *VirtualMachineScaleSetVMProperties `json:"properties,omitempty"` + + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` + + // READ-ONLY; Resource Id + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; The virtual machine instance ID. + InstanceID *string `json:"instanceId,omitempty" azure:"ro"` + + // READ-ONLY; Resource name + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; The virtual machine child extension resources. + Resources []*VirtualMachineExtension `json:"resources,omitempty" azure:"ro"` + + // READ-ONLY; The virtual machine SKU. + SKU *SKU `json:"sku,omitempty" azure:"ro"` + + // READ-ONLY; Resource type + Type *string `json:"type,omitempty" azure:"ro"` + + // READ-ONLY; The virtual machine zones. + Zones []*string `json:"zones,omitempty" azure:"ro"` +} + +// VirtualMachineScaleSetVMExtension - Describes a VMSS VM Extension. +type VirtualMachineScaleSetVMExtension struct { + // Describes the properties of a Virtual Machine Extension. + Properties *VirtualMachineExtensionProperties `json:"properties,omitempty"` + + // READ-ONLY; Resource Id + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; The name of the extension. + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; Resource type + Type *string `json:"type,omitempty" azure:"ro"` +} + +// VirtualMachineScaleSetVMExtensionUpdate - Describes a VMSS VM Extension. +type VirtualMachineScaleSetVMExtensionUpdate struct { + // Describes the properties of a Virtual Machine Extension. + Properties *VirtualMachineExtensionUpdateProperties `json:"properties,omitempty"` + + // READ-ONLY; Resource Id + ID *string `json:"id,omitempty" azure:"ro"` + + // READ-ONLY; The name of the extension. + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; Resource type + Type *string `json:"type,omitempty" azure:"ro"` +} + +// VirtualMachineScaleSetVMExtensionsClientBeginCreateOrUpdateOptions contains the optional parameters for the VirtualMachineScaleSetVMExtensionsClient.BeginCreateOrUpdate +// method. +type VirtualMachineScaleSetVMExtensionsClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachineScaleSetVMExtensionsClientBeginDeleteOptions contains the optional parameters for the VirtualMachineScaleSetVMExtensionsClient.BeginDelete +// method. +type VirtualMachineScaleSetVMExtensionsClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachineScaleSetVMExtensionsClientBeginUpdateOptions contains the optional parameters for the VirtualMachineScaleSetVMExtensionsClient.BeginUpdate +// method. +type VirtualMachineScaleSetVMExtensionsClientBeginUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachineScaleSetVMExtensionsClientGetOptions contains the optional parameters for the VirtualMachineScaleSetVMExtensionsClient.Get +// method. +type VirtualMachineScaleSetVMExtensionsClientGetOptions struct { + // The expand expression to apply on the operation. + Expand *string +} + +// VirtualMachineScaleSetVMExtensionsClientListOptions contains the optional parameters for the VirtualMachineScaleSetVMExtensionsClient.List +// method. +type VirtualMachineScaleSetVMExtensionsClientListOptions struct { + // The expand expression to apply on the operation. + Expand *string +} + +// VirtualMachineScaleSetVMExtensionsListResult - The List VMSS VM Extension operation response +type VirtualMachineScaleSetVMExtensionsListResult struct { + // The list of VMSS VM extensions + Value []*VirtualMachineScaleSetVMExtension `json:"value,omitempty"` +} + +// VirtualMachineScaleSetVMExtensionsSummary - Extensions summary for virtual machines of a virtual machine scale set. +type VirtualMachineScaleSetVMExtensionsSummary struct { + // READ-ONLY; The extension name. + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; The extensions information. + StatusesSummary []*VirtualMachineStatusCodeCount `json:"statusesSummary,omitempty" azure:"ro"` +} + +// VirtualMachineScaleSetVMInstanceIDs - Specifies a list of virtual machine instance IDs from the VM scale set. +type VirtualMachineScaleSetVMInstanceIDs struct { + // The virtual machine scale set instance ids. Omitting the virtual machine scale set instance ids will result in the operation + // being performed on all virtual machines in the virtual machine scale set. + InstanceIDs []*string `json:"instanceIds,omitempty"` +} + +// VirtualMachineScaleSetVMInstanceRequiredIDs - Specifies a list of virtual machine instance IDs from the VM scale set. +type VirtualMachineScaleSetVMInstanceRequiredIDs struct { + // REQUIRED; The virtual machine scale set instance ids. + InstanceIDs []*string `json:"instanceIds,omitempty"` +} + +// VirtualMachineScaleSetVMInstanceView - The instance view of a virtual machine scale set VM. +type VirtualMachineScaleSetVMInstanceView struct { + // Boot Diagnostics is a debugging feature which allows you to view Console Output and Screenshot to diagnose VM status. + // You can easily view the output of your console log. + // Azure also enables you to see a screenshot of the VM from the hypervisor. + BootDiagnostics *BootDiagnosticsInstanceView `json:"bootDiagnostics,omitempty"` + + // The disks information. + Disks []*DiskInstanceView `json:"disks,omitempty"` + + // The extensions information. + Extensions []*VirtualMachineExtensionInstanceView `json:"extensions,omitempty"` + + // The Maintenance Operation status on the virtual machine. + MaintenanceRedeployStatus *MaintenanceRedeployStatus `json:"maintenanceRedeployStatus,omitempty"` + + // The placement group in which the VM is running. If the VM is deallocated it will not have a placementGroupId. + PlacementGroupID *string `json:"placementGroupId,omitempty"` + + // The Fault Domain count. + PlatformFaultDomain *int32 `json:"platformFaultDomain,omitempty"` + + // The Update Domain count. + PlatformUpdateDomain *int32 `json:"platformUpdateDomain,omitempty"` + + // The Remote desktop certificate thumbprint. + RdpThumbPrint *string `json:"rdpThumbPrint,omitempty"` + + // The resource status information. + Statuses []*InstanceViewStatus `json:"statuses,omitempty"` + + // The VM Agent running on the virtual machine. + VMAgent *VirtualMachineAgentInstanceView `json:"vmAgent,omitempty"` + + // READ-ONLY; Resource id of the dedicated host, on which the virtual machine is allocated through automatic placement, when + // the virtual machine is associated with a dedicated host group that has automatic + // placement enabled. + // Minimum api-version: 2020-06-01. + AssignedHost *string `json:"assignedHost,omitempty" azure:"ro"` + + // READ-ONLY; The health status for the VM. + VMHealth *VirtualMachineHealthStatus `json:"vmHealth,omitempty" azure:"ro"` +} + +// VirtualMachineScaleSetVMListResult - The List Virtual Machine Scale Set VMs operation response. +type VirtualMachineScaleSetVMListResult struct { + // REQUIRED; The list of virtual machine scale sets VMs. + Value []*VirtualMachineScaleSetVM `json:"value,omitempty"` + + // The uri to fetch the next page of Virtual Machine Scale Set VMs. Call ListNext() with this to fetch the next page of VMSS + // VMs + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualMachineScaleSetVMNetworkProfileConfiguration - Describes a virtual machine scale set VM network profile. +type VirtualMachineScaleSetVMNetworkProfileConfiguration struct { + // The list of network configurations. + NetworkInterfaceConfigurations []*VirtualMachineScaleSetNetworkConfiguration `json:"networkInterfaceConfigurations,omitempty"` +} + +// VirtualMachineScaleSetVMProfile - Describes a virtual machine scale set virtual machine profile. +type VirtualMachineScaleSetVMProfile struct { + // Specifies the gallery applications that should be made available to the VM/VMSS + ApplicationProfile *ApplicationProfile `json:"applicationProfile,omitempty"` + + // Specifies the billing related details of a Azure Spot VMSS. + // Minimum api-version: 2019-03-01. + BillingProfile *BillingProfile `json:"billingProfile,omitempty"` + + // Specifies the capacity reservation related details of a scale set. + // Minimum api-version: 2021-04-01. + CapacityReservation *CapacityReservationProfile `json:"capacityReservation,omitempty"` + + // Specifies the boot diagnostic settings state. + // Minimum api-version: 2015-06-15. + DiagnosticsProfile *DiagnosticsProfile `json:"diagnosticsProfile,omitempty"` + + // Specifies the eviction policy for the Azure Spot virtual machine and Azure Spot scale set. + // For Azure Spot virtual machines, both 'Deallocate' and 'Delete' are supported and the minimum api-version is 2019-03-01. + // For Azure Spot scale sets, both 'Deallocate' and 'Delete' are supported and the minimum api-version is 2017-10-30-preview. + EvictionPolicy *VirtualMachineEvictionPolicyTypes `json:"evictionPolicy,omitempty"` + + // Specifies a collection of settings for extensions installed on virtual machines in the scale set. + ExtensionProfile *VirtualMachineScaleSetExtensionProfile `json:"extensionProfile,omitempty"` + + // Specifies the hardware profile related details of a scale set. + // Minimum api-version: 2022-03-01. + HardwareProfile *VirtualMachineScaleSetHardwareProfile `json:"hardwareProfile,omitempty"` + + // Specifies that the image or disk that is being used was licensed on-premises. + // Possible values for Windows Server operating system are: + // WindowsClient + // WindowsServer + // Possible values for Linux Server operating system are: + // RHELBYOS (for RHEL) + // SLESBYOS (for SUSE) + // For more information, see Azure Hybrid Use Benefit for Windows Server [https://docs.microsoft.com/azure/virtual-machines/windows/hybrid-use-benefit-licensing] + // Azure Hybrid Use Benefit for Linux Server [https://docs.microsoft.com/azure/virtual-machines/linux/azure-hybrid-benefit-linux] + // Minimum api-version: 2015-06-15 + LicenseType *string `json:"licenseType,omitempty"` + + // Specifies properties of the network interfaces of the virtual machines in the scale set. + NetworkProfile *VirtualMachineScaleSetNetworkProfile `json:"networkProfile,omitempty"` + + // Specifies the operating system settings for the virtual machines in the scale set. + OSProfile *VirtualMachineScaleSetOSProfile `json:"osProfile,omitempty"` + + // Specifies the priority for the virtual machines in the scale set. + // Minimum api-version: 2017-10-30-preview + Priority *VirtualMachinePriorityTypes `json:"priority,omitempty"` + + // Specifies Scheduled Event related configurations. + ScheduledEventsProfile *ScheduledEventsProfile `json:"scheduledEventsProfile,omitempty"` + + // Specifies the Security related profile settings for the virtual machines in the scale set. + SecurityProfile *SecurityProfile `json:"securityProfile,omitempty"` + + // Specifies the storage settings for the virtual machine disks. + StorageProfile *VirtualMachineScaleSetStorageProfile `json:"storageProfile,omitempty"` + + // UserData for the virtual machines in the scale set, which must be base-64 encoded. Customer should not pass any secrets + // in here. + // Minimum api-version: 2021-03-01 + UserData *string `json:"userData,omitempty"` +} + +// VirtualMachineScaleSetVMProperties - Describes the properties of a virtual machine scale set virtual machine. +type VirtualMachineScaleSetVMProperties struct { + // Specifies additional capabilities enabled or disabled on the virtual machine in the scale set. For instance: whether the + // virtual machine has the capability to support attaching managed data disks with + // UltraSSD_LRS storage account type. + AdditionalCapabilities *AdditionalCapabilities `json:"additionalCapabilities,omitempty"` + + // Specifies information about the availability set that the virtual machine should be assigned to. Virtual machines specified + // in the same availability set are allocated to different nodes to maximize + // availability. For more information about availability sets, see Availability sets overview [https://docs.microsoft.com/azure/virtual-machines/availability-set-overview]. + // For more information on Azure planned maintenance, see Maintenance and updates for Virtual Machines in Azure [https://docs.microsoft.com/azure/virtual-machines/maintenance-and-updates] + // Currently, a VM can only be added to availability set at creation time. An existing VM cannot be added to an availability + // set. + AvailabilitySet *SubResource `json:"availabilitySet,omitempty"` + + // Specifies the boot diagnostic settings state. + // Minimum api-version: 2015-06-15. + DiagnosticsProfile *DiagnosticsProfile `json:"diagnosticsProfile,omitempty"` + + // Specifies the hardware settings for the virtual machine. + HardwareProfile *HardwareProfile `json:"hardwareProfile,omitempty"` + + // Specifies that the image or disk that is being used was licensed on-premises. + // Possible values for Windows Server operating system are: + // WindowsClient + // WindowsServer + // Possible values for Linux Server operating system are: + // RHELBYOS (for RHEL) + // SLESBYOS (for SUSE) + // For more information, see Azure Hybrid Use Benefit for Windows Server [https://docs.microsoft.com/azure/virtual-machines/windows/hybrid-use-benefit-licensing] + // Azure Hybrid Use Benefit for Linux Server [https://docs.microsoft.com/azure/virtual-machines/linux/azure-hybrid-benefit-linux] + // Minimum api-version: 2015-06-15 + LicenseType *string `json:"licenseType,omitempty"` + + // Specifies the network interfaces of the virtual machine. + NetworkProfile *NetworkProfile `json:"networkProfile,omitempty"` + + // Specifies the network profile configuration of the virtual machine. + NetworkProfileConfiguration *VirtualMachineScaleSetVMNetworkProfileConfiguration `json:"networkProfileConfiguration,omitempty"` + + // Specifies the operating system settings for the virtual machine. + OSProfile *OSProfile `json:"osProfile,omitempty"` + + // Specifies the protection policy of the virtual machine. + ProtectionPolicy *VirtualMachineScaleSetVMProtectionPolicy `json:"protectionPolicy,omitempty"` + + // Specifies the Security related profile settings for the virtual machine. + SecurityProfile *SecurityProfile `json:"securityProfile,omitempty"` + + // Specifies the storage settings for the virtual machine disks. + StorageProfile *StorageProfile `json:"storageProfile,omitempty"` + + // UserData for the VM, which must be base-64 encoded. Customer should not pass any secrets in here. + // Minimum api-version: 2021-03-01 + UserData *string `json:"userData,omitempty"` + + // READ-ONLY; The virtual machine instance view. + InstanceView *VirtualMachineScaleSetVMInstanceView `json:"instanceView,omitempty" azure:"ro"` + + // READ-ONLY; Specifies whether the latest model has been applied to the virtual machine. + LatestModelApplied *bool `json:"latestModelApplied,omitempty" azure:"ro"` + + // READ-ONLY; Specifies whether the model applied to the virtual machine is the model of the virtual machine scale set or + // the customized model for the virtual machine. + ModelDefinitionApplied *string `json:"modelDefinitionApplied,omitempty" azure:"ro"` + + // READ-ONLY; The provisioning state, which only appears in the response. + ProvisioningState *string `json:"provisioningState,omitempty" azure:"ro"` + + // READ-ONLY; Azure VM unique ID. + VMID *string `json:"vmId,omitempty" azure:"ro"` +} + +// VirtualMachineScaleSetVMProtectionPolicy - The protection policy of a virtual machine scale set VM. +type VirtualMachineScaleSetVMProtectionPolicy struct { + // Indicates that the virtual machine scale set VM shouldn't be considered for deletion during a scale-in operation. + ProtectFromScaleIn *bool `json:"protectFromScaleIn,omitempty"` + + // Indicates that model updates or actions (including scale-in) initiated on the virtual machine scale set should not be applied + // to the virtual machine scale set VM. + ProtectFromScaleSetActions *bool `json:"protectFromScaleSetActions,omitempty"` +} + +// VirtualMachineScaleSetVMReimageParameters - Describes a Virtual Machine Scale Set VM Reimage Parameters. +type VirtualMachineScaleSetVMReimageParameters struct { + // Specifies whether to reimage temp disk. Default value: false. Note: This temp disk reimage parameter is only supported + // for VM/VMSS with Ephemeral OS disk. + TempDisk *bool `json:"tempDisk,omitempty"` +} + +// VirtualMachineScaleSetVMRunCommandsClientBeginCreateOrUpdateOptions contains the optional parameters for the VirtualMachineScaleSetVMRunCommandsClient.BeginCreateOrUpdate +// method. +type VirtualMachineScaleSetVMRunCommandsClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachineScaleSetVMRunCommandsClientBeginDeleteOptions contains the optional parameters for the VirtualMachineScaleSetVMRunCommandsClient.BeginDelete +// method. +type VirtualMachineScaleSetVMRunCommandsClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachineScaleSetVMRunCommandsClientBeginUpdateOptions contains the optional parameters for the VirtualMachineScaleSetVMRunCommandsClient.BeginUpdate +// method. +type VirtualMachineScaleSetVMRunCommandsClientBeginUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachineScaleSetVMRunCommandsClientGetOptions contains the optional parameters for the VirtualMachineScaleSetVMRunCommandsClient.Get +// method. +type VirtualMachineScaleSetVMRunCommandsClientGetOptions struct { + // The expand expression to apply on the operation. + Expand *string +} + +// VirtualMachineScaleSetVMRunCommandsClientListOptions contains the optional parameters for the VirtualMachineScaleSetVMRunCommandsClient.List +// method. +type VirtualMachineScaleSetVMRunCommandsClientListOptions struct { + // The expand expression to apply on the operation. + Expand *string +} + +// VirtualMachineScaleSetVMsClientBeginDeallocateOptions contains the optional parameters for the VirtualMachineScaleSetVMsClient.BeginDeallocate +// method. +type VirtualMachineScaleSetVMsClientBeginDeallocateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachineScaleSetVMsClientBeginDeleteOptions contains the optional parameters for the VirtualMachineScaleSetVMsClient.BeginDelete +// method. +type VirtualMachineScaleSetVMsClientBeginDeleteOptions struct { + // Optional parameter to force delete a virtual machine from a VM scale set. (Feature in Preview) + ForceDeletion *bool + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachineScaleSetVMsClientBeginPerformMaintenanceOptions contains the optional parameters for the VirtualMachineScaleSetVMsClient.BeginPerformMaintenance +// method. +type VirtualMachineScaleSetVMsClientBeginPerformMaintenanceOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachineScaleSetVMsClientBeginPowerOffOptions contains the optional parameters for the VirtualMachineScaleSetVMsClient.BeginPowerOff +// method. +type VirtualMachineScaleSetVMsClientBeginPowerOffOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string + // The parameter to request non-graceful VM shutdown. True value for this flag indicates non-graceful shutdown whereas false + // indicates otherwise. Default value for this flag is false if not specified + SkipShutdown *bool +} + +// VirtualMachineScaleSetVMsClientBeginRedeployOptions contains the optional parameters for the VirtualMachineScaleSetVMsClient.BeginRedeploy +// method. +type VirtualMachineScaleSetVMsClientBeginRedeployOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachineScaleSetVMsClientBeginReimageAllOptions contains the optional parameters for the VirtualMachineScaleSetVMsClient.BeginReimageAll +// method. +type VirtualMachineScaleSetVMsClientBeginReimageAllOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachineScaleSetVMsClientBeginReimageOptions contains the optional parameters for the VirtualMachineScaleSetVMsClient.BeginReimage +// method. +type VirtualMachineScaleSetVMsClientBeginReimageOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string + // Parameters for the Reimaging Virtual machine in ScaleSet. + VMScaleSetVMReimageInput *VirtualMachineScaleSetVMReimageParameters +} + +// VirtualMachineScaleSetVMsClientBeginRestartOptions contains the optional parameters for the VirtualMachineScaleSetVMsClient.BeginRestart +// method. +type VirtualMachineScaleSetVMsClientBeginRestartOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachineScaleSetVMsClientBeginRunCommandOptions contains the optional parameters for the VirtualMachineScaleSetVMsClient.BeginRunCommand +// method. +type VirtualMachineScaleSetVMsClientBeginRunCommandOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachineScaleSetVMsClientBeginStartOptions contains the optional parameters for the VirtualMachineScaleSetVMsClient.BeginStart +// method. +type VirtualMachineScaleSetVMsClientBeginStartOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachineScaleSetVMsClientBeginUpdateOptions contains the optional parameters for the VirtualMachineScaleSetVMsClient.BeginUpdate +// method. +type VirtualMachineScaleSetVMsClientBeginUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachineScaleSetVMsClientGetInstanceViewOptions contains the optional parameters for the VirtualMachineScaleSetVMsClient.GetInstanceView +// method. +type VirtualMachineScaleSetVMsClientGetInstanceViewOptions struct { + // placeholder for future optional parameters +} + +// VirtualMachineScaleSetVMsClientGetOptions contains the optional parameters for the VirtualMachineScaleSetVMsClient.Get +// method. +type VirtualMachineScaleSetVMsClientGetOptions struct { + // The expand expression to apply on the operation. 'InstanceView' will retrieve the instance view of the virtual machine. + // 'UserData' will retrieve the UserData of the virtual machine. + Expand *InstanceViewTypes +} + +// VirtualMachineScaleSetVMsClientListOptions contains the optional parameters for the VirtualMachineScaleSetVMsClient.List +// method. +type VirtualMachineScaleSetVMsClientListOptions struct { + // The expand expression to apply to the operation. Allowed values are 'instanceView'. + Expand *string + // The filter to apply to the operation. Allowed values are 'startswith(instanceView/statuses/code, 'PowerState') eq true', + // 'properties/latestModelApplied eq true', 'properties/latestModelApplied eq + // false'. + Filter *string + // The list parameters. Allowed values are 'instanceView', 'instanceView/statuses'. + Select *string +} + +// VirtualMachineScaleSetVMsClientRetrieveBootDiagnosticsDataOptions contains the optional parameters for the VirtualMachineScaleSetVMsClient.RetrieveBootDiagnosticsData +// method. +type VirtualMachineScaleSetVMsClientRetrieveBootDiagnosticsDataOptions struct { + // Expiration duration in minutes for the SAS URIs with a value between 1 to 1440 minutes. + // NOTE: If not specified, SAS URIs will be generated with a default expiration duration of 120 minutes. + SasURIExpirationTimeInMinutes *int32 +} + +// VirtualMachineScaleSetVMsClientSimulateEvictionOptions contains the optional parameters for the VirtualMachineScaleSetVMsClient.SimulateEviction +// method. +type VirtualMachineScaleSetVMsClientSimulateEvictionOptions struct { + // placeholder for future optional parameters +} + +// VirtualMachineScaleSetsClientBeginCreateOrUpdateOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginCreateOrUpdate +// method. +type VirtualMachineScaleSetsClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachineScaleSetsClientBeginDeallocateOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginDeallocate +// method. +type VirtualMachineScaleSetsClientBeginDeallocateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string + // A list of virtual machine instance IDs from the VM scale set. + VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs +} + +// VirtualMachineScaleSetsClientBeginDeleteInstancesOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginDeleteInstances +// method. +type VirtualMachineScaleSetsClientBeginDeleteInstancesOptions struct { + // Optional parameter to force delete virtual machines from the VM scale set. (Feature in Preview) + ForceDeletion *bool + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachineScaleSetsClientBeginDeleteOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginDelete +// method. +type VirtualMachineScaleSetsClientBeginDeleteOptions struct { + // Optional parameter to force delete a VM scale set. (Feature in Preview) + ForceDeletion *bool + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachineScaleSetsClientBeginPerformMaintenanceOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginPerformMaintenance +// method. +type VirtualMachineScaleSetsClientBeginPerformMaintenanceOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string + // A list of virtual machine instance IDs from the VM scale set. + VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs +} + +// VirtualMachineScaleSetsClientBeginPowerOffOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginPowerOff +// method. +type VirtualMachineScaleSetsClientBeginPowerOffOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string + // The parameter to request non-graceful VM shutdown. True value for this flag indicates non-graceful shutdown whereas false + // indicates otherwise. Default value for this flag is false if not specified + SkipShutdown *bool + // A list of virtual machine instance IDs from the VM scale set. + VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs +} + +// VirtualMachineScaleSetsClientBeginRedeployOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginRedeploy +// method. +type VirtualMachineScaleSetsClientBeginRedeployOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string + // A list of virtual machine instance IDs from the VM scale set. + VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs +} + +// VirtualMachineScaleSetsClientBeginReimageAllOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginReimageAll +// method. +type VirtualMachineScaleSetsClientBeginReimageAllOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string + // A list of virtual machine instance IDs from the VM scale set. + VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs +} + +// VirtualMachineScaleSetsClientBeginReimageOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginReimage +// method. +type VirtualMachineScaleSetsClientBeginReimageOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string + // Parameters for Reimaging VM ScaleSet. + VMScaleSetReimageInput *VirtualMachineScaleSetReimageParameters +} + +// VirtualMachineScaleSetsClientBeginRestartOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginRestart +// method. +type VirtualMachineScaleSetsClientBeginRestartOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string + // A list of virtual machine instance IDs from the VM scale set. + VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs +} + +// VirtualMachineScaleSetsClientBeginSetOrchestrationServiceStateOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginSetOrchestrationServiceState +// method. +type VirtualMachineScaleSetsClientBeginSetOrchestrationServiceStateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachineScaleSetsClientBeginStartOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginStart +// method. +type VirtualMachineScaleSetsClientBeginStartOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string + // A list of virtual machine instance IDs from the VM scale set. + VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs +} + +// VirtualMachineScaleSetsClientBeginUpdateInstancesOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginUpdateInstances +// method. +type VirtualMachineScaleSetsClientBeginUpdateInstancesOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachineScaleSetsClientBeginUpdateOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginUpdate +// method. +type VirtualMachineScaleSetsClientBeginUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachineScaleSetsClientConvertToSinglePlacementGroupOptions contains the optional parameters for the VirtualMachineScaleSetsClient.ConvertToSinglePlacementGroup +// method. +type VirtualMachineScaleSetsClientConvertToSinglePlacementGroupOptions struct { + // placeholder for future optional parameters +} + +// VirtualMachineScaleSetsClientForceRecoveryServiceFabricPlatformUpdateDomainWalkOptions contains the optional parameters +// for the VirtualMachineScaleSetsClient.ForceRecoveryServiceFabricPlatformUpdateDomainWalk method. +type VirtualMachineScaleSetsClientForceRecoveryServiceFabricPlatformUpdateDomainWalkOptions struct { + // The placement group id for which the manual recovery walk is requested. + PlacementGroupID *string + // The zone in which the manual recovery walk is requested for cross zone virtual machine scale set + Zone *string +} + +// VirtualMachineScaleSetsClientGetInstanceViewOptions contains the optional parameters for the VirtualMachineScaleSetsClient.GetInstanceView +// method. +type VirtualMachineScaleSetsClientGetInstanceViewOptions struct { + // placeholder for future optional parameters +} + +// VirtualMachineScaleSetsClientGetOSUpgradeHistoryOptions contains the optional parameters for the VirtualMachineScaleSetsClient.GetOSUpgradeHistory +// method. +type VirtualMachineScaleSetsClientGetOSUpgradeHistoryOptions struct { + // placeholder for future optional parameters +} + +// VirtualMachineScaleSetsClientGetOptions contains the optional parameters for the VirtualMachineScaleSetsClient.Get method. +type VirtualMachineScaleSetsClientGetOptions struct { + // The expand expression to apply on the operation. 'UserData' retrieves the UserData property of the VM scale set that was + // provided by the user during the VM scale set Create/Update operation + Expand *ExpandTypesForGetVMScaleSets +} + +// VirtualMachineScaleSetsClientListAllOptions contains the optional parameters for the VirtualMachineScaleSetsClient.ListAll +// method. +type VirtualMachineScaleSetsClientListAllOptions struct { + // placeholder for future optional parameters +} + +// VirtualMachineScaleSetsClientListByLocationOptions contains the optional parameters for the VirtualMachineScaleSetsClient.ListByLocation +// method. +type VirtualMachineScaleSetsClientListByLocationOptions struct { + // placeholder for future optional parameters +} + +// VirtualMachineScaleSetsClientListOptions contains the optional parameters for the VirtualMachineScaleSetsClient.List method. +type VirtualMachineScaleSetsClientListOptions struct { + // placeholder for future optional parameters +} + +// VirtualMachineScaleSetsClientListSKUsOptions contains the optional parameters for the VirtualMachineScaleSetsClient.ListSKUs +// method. +type VirtualMachineScaleSetsClientListSKUsOptions struct { + // placeholder for future optional parameters +} + +// VirtualMachineSize - Describes the properties of a VM size. +type VirtualMachineSize struct { + // The maximum number of data disks that can be attached to the virtual machine size. + MaxDataDiskCount *int32 `json:"maxDataDiskCount,omitempty"` + + // The amount of memory, in MB, supported by the virtual machine size. + MemoryInMB *int32 `json:"memoryInMB,omitempty"` + + // The name of the virtual machine size. + Name *string `json:"name,omitempty"` + + // The number of cores supported by the virtual machine size. For Constrained vCPU capable VM sizes, this number represents + // the total vCPUs of quota that the VM uses. For accurate vCPU count, please + // refer to https://docs.microsoft.com/azure/virtual-machines/constrained-vcpu or https://docs.microsoft.com/rest/api/compute/resourceskus/list + NumberOfCores *int32 `json:"numberOfCores,omitempty"` + + // The OS disk size, in MB, allowed by the virtual machine size. + OSDiskSizeInMB *int32 `json:"osDiskSizeInMB,omitempty"` + + // The resource disk size, in MB, allowed by the virtual machine size. + ResourceDiskSizeInMB *int32 `json:"resourceDiskSizeInMB,omitempty"` +} + +// VirtualMachineSizeListResult - The List Virtual Machine operation response. +type VirtualMachineSizeListResult struct { + // The list of virtual machine sizes. + Value []*VirtualMachineSize `json:"value,omitempty"` +} + +// VirtualMachineSizesClientListOptions contains the optional parameters for the VirtualMachineSizesClient.List method. +type VirtualMachineSizesClientListOptions struct { + // placeholder for future optional parameters +} + +// VirtualMachineSoftwarePatchProperties - Describes the properties of a Virtual Machine software patch. +type VirtualMachineSoftwarePatchProperties struct { + // READ-ONLY; The activity ID of the operation that produced this result. It is used to correlate across CRP and extension + // logs. + ActivityID *string `json:"activityId,omitempty" azure:"ro"` + + // READ-ONLY; Describes the availability of a given patch. + AssessmentState *PatchAssessmentState `json:"assessmentState,omitempty" azure:"ro"` + + // READ-ONLY; The classification(s) of the patch as provided by the patch publisher. + Classifications []*string `json:"classifications,omitempty" azure:"ro"` + + // READ-ONLY; The KBID of the patch. Only applies to Windows patches. + KbID *string `json:"kbId,omitempty" azure:"ro"` + + // READ-ONLY; The UTC timestamp of the last update to this patch record. + LastModifiedDateTime *time.Time `json:"lastModifiedDateTime,omitempty" azure:"ro"` + + // READ-ONLY; The friendly name of the patch. + Name *string `json:"name,omitempty" azure:"ro"` + + // READ-ONLY; A unique identifier for the patch. + PatchID *string `json:"patchId,omitempty" azure:"ro"` + + // READ-ONLY; The UTC timestamp when the repository published this patch. + PublishedDate *time.Time `json:"publishedDate,omitempty" azure:"ro"` + + // READ-ONLY; Describes the reboot requirements of the patch. + RebootBehavior *VMGuestPatchRebootBehavior `json:"rebootBehavior,omitempty" azure:"ro"` + + // READ-ONLY; The version number of the patch. This property applies only to Linux patches. + Version *string `json:"version,omitempty" azure:"ro"` +} + +// VirtualMachineStatusCodeCount - The status code and count of the virtual machine scale set instance view status summary. +type VirtualMachineStatusCodeCount struct { + // READ-ONLY; The instance view status code. + Code *string `json:"code,omitempty" azure:"ro"` + + // READ-ONLY; The number of instances having a particular status code. + Count *int32 `json:"count,omitempty" azure:"ro"` +} + +// VirtualMachineUpdate - Describes a Virtual Machine Update. +type VirtualMachineUpdate struct { + // The identity of the virtual machine, if configured. + Identity *VirtualMachineIdentity `json:"identity,omitempty"` + + // Specifies information about the marketplace image used to create the virtual machine. This element is only used for marketplace + // images. Before you can use a marketplace image from an API, you must + // enable the image for programmatic use. In the Azure portal, find the marketplace image that you want to use and then click + // Want to deploy programmatically, Get Started ->. Enter any required + // information and then click Save. + Plan *Plan `json:"plan,omitempty"` + + // Describes the properties of a Virtual Machine. + Properties *VirtualMachineProperties `json:"properties,omitempty"` + + // Resource tags + Tags map[string]*string `json:"tags,omitempty"` + + // The virtual machine zones. + Zones []*string `json:"zones,omitempty"` +} + +// VirtualMachinesClientBeginAssessPatchesOptions contains the optional parameters for the VirtualMachinesClient.BeginAssessPatches +// method. +type VirtualMachinesClientBeginAssessPatchesOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachinesClientBeginCaptureOptions contains the optional parameters for the VirtualMachinesClient.BeginCapture method. +type VirtualMachinesClientBeginCaptureOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachinesClientBeginConvertToManagedDisksOptions contains the optional parameters for the VirtualMachinesClient.BeginConvertToManagedDisks +// method. +type VirtualMachinesClientBeginConvertToManagedDisksOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachinesClientBeginCreateOrUpdateOptions contains the optional parameters for the VirtualMachinesClient.BeginCreateOrUpdate +// method. +type VirtualMachinesClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachinesClientBeginDeallocateOptions contains the optional parameters for the VirtualMachinesClient.BeginDeallocate +// method. +type VirtualMachinesClientBeginDeallocateOptions struct { + // Optional parameter to hibernate a virtual machine. (Feature in Preview) + Hibernate *bool + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachinesClientBeginDeleteOptions contains the optional parameters for the VirtualMachinesClient.BeginDelete method. +type VirtualMachinesClientBeginDeleteOptions struct { + // Optional parameter to force delete virtual machines. + ForceDeletion *bool + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachinesClientBeginInstallPatchesOptions contains the optional parameters for the VirtualMachinesClient.BeginInstallPatches +// method. +type VirtualMachinesClientBeginInstallPatchesOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachinesClientBeginPerformMaintenanceOptions contains the optional parameters for the VirtualMachinesClient.BeginPerformMaintenance +// method. +type VirtualMachinesClientBeginPerformMaintenanceOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachinesClientBeginPowerOffOptions contains the optional parameters for the VirtualMachinesClient.BeginPowerOff +// method. +type VirtualMachinesClientBeginPowerOffOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string + // The parameter to request non-graceful VM shutdown. True value for this flag indicates non-graceful shutdown whereas false + // indicates otherwise. Default value for this flag is false if not specified + SkipShutdown *bool +} + +// VirtualMachinesClientBeginReapplyOptions contains the optional parameters for the VirtualMachinesClient.BeginReapply method. +type VirtualMachinesClientBeginReapplyOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachinesClientBeginRedeployOptions contains the optional parameters for the VirtualMachinesClient.BeginRedeploy +// method. +type VirtualMachinesClientBeginRedeployOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachinesClientBeginReimageOptions contains the optional parameters for the VirtualMachinesClient.BeginReimage method. +type VirtualMachinesClientBeginReimageOptions struct { + // Parameters supplied to the Reimage Virtual Machine operation. + Parameters *VirtualMachineReimageParameters + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachinesClientBeginRestartOptions contains the optional parameters for the VirtualMachinesClient.BeginRestart method. +type VirtualMachinesClientBeginRestartOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachinesClientBeginRunCommandOptions contains the optional parameters for the VirtualMachinesClient.BeginRunCommand +// method. +type VirtualMachinesClientBeginRunCommandOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachinesClientBeginStartOptions contains the optional parameters for the VirtualMachinesClient.BeginStart method. +type VirtualMachinesClientBeginStartOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachinesClientBeginUpdateOptions contains the optional parameters for the VirtualMachinesClient.BeginUpdate method. +type VirtualMachinesClientBeginUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// VirtualMachinesClientGeneralizeOptions contains the optional parameters for the VirtualMachinesClient.Generalize method. +type VirtualMachinesClientGeneralizeOptions struct { + // placeholder for future optional parameters +} + +// VirtualMachinesClientGetOptions contains the optional parameters for the VirtualMachinesClient.Get method. +type VirtualMachinesClientGetOptions struct { + // The expand expression to apply on the operation. 'InstanceView' retrieves a snapshot of the runtime properties of the virtual + // machine that is managed by the platform and can change outside of control + // plane operations. 'UserData' retrieves the UserData property as part of the VM model view that was provided by the user + // during the VM Create/Update operation. + Expand *InstanceViewTypes +} + +// VirtualMachinesClientInstanceViewOptions contains the optional parameters for the VirtualMachinesClient.InstanceView method. +type VirtualMachinesClientInstanceViewOptions struct { + // placeholder for future optional parameters +} + +// VirtualMachinesClientListAllOptions contains the optional parameters for the VirtualMachinesClient.ListAll method. +type VirtualMachinesClientListAllOptions struct { + // The system query option to filter VMs returned in the response. Allowed value is 'virtualMachineScaleSet/id' eq + // /subscriptions/{subId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmssName}' + Filter *string + // statusOnly=true enables fetching run time status of all Virtual Machines in the subscription. + StatusOnly *string +} + +// VirtualMachinesClientListAvailableSizesOptions contains the optional parameters for the VirtualMachinesClient.ListAvailableSizes +// method. +type VirtualMachinesClientListAvailableSizesOptions struct { + // placeholder for future optional parameters +} + +// VirtualMachinesClientListByLocationOptions contains the optional parameters for the VirtualMachinesClient.ListByLocation +// method. +type VirtualMachinesClientListByLocationOptions struct { + // placeholder for future optional parameters +} + +// VirtualMachinesClientListOptions contains the optional parameters for the VirtualMachinesClient.List method. +type VirtualMachinesClientListOptions struct { + // The system query option to filter VMs returned in the response. Allowed value is 'virtualMachineScaleSet/id' eq + // /subscriptions/{subId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmssName}' + Filter *string +} + +// VirtualMachinesClientRetrieveBootDiagnosticsDataOptions contains the optional parameters for the VirtualMachinesClient.RetrieveBootDiagnosticsData +// method. +type VirtualMachinesClientRetrieveBootDiagnosticsDataOptions struct { + // Expiration duration in minutes for the SAS URIs with a value between 1 to 1440 minutes. + // NOTE: If not specified, SAS URIs will be generated with a default expiration duration of 120 minutes. + SasURIExpirationTimeInMinutes *int32 +} + +// VirtualMachinesClientSimulateEvictionOptions contains the optional parameters for the VirtualMachinesClient.SimulateEviction +// method. +type VirtualMachinesClientSimulateEvictionOptions struct { + // placeholder for future optional parameters +} + +// WinRMConfiguration - Describes Windows Remote Management configuration of the VM +type WinRMConfiguration struct { + // The list of Windows Remote Management listeners + Listeners []*WinRMListener `json:"listeners,omitempty"` +} + +// WinRMListener - Describes Protocol and thumbprint of Windows Remote Management listener +type WinRMListener struct { + // This is the URL of a certificate that has been uploaded to Key Vault as a secret. For adding a secret to the Key Vault, + // see Add a key or secret to the key vault + // [https://docs.microsoft.com/azure/key-vault/key-vault-get-started/#add]. In this case, your certificate needs to be It + // is the Base64 encoding of the following JSON Object which is encoded in UTF-8: + // { + // "data":"", + // "dataType":"pfx", + // "password":"" + // } + // To install certificates on a virtual machine it is recommended to use the Azure Key Vault virtual machine extension for + // Linux + // [https://docs.microsoft.com/azure/virtual-machines/extensions/key-vault-linux] or the Azure Key Vault virtual machine extension + // for Windows + // [https://docs.microsoft.com/azure/virtual-machines/extensions/key-vault-windows]. + CertificateURL *string `json:"certificateUrl,omitempty"` + + // Specifies the protocol of WinRM listener. + // Possible values are: + // http + // https + Protocol *ProtocolTypes `json:"protocol,omitempty"` +} + +// WindowsConfiguration - Specifies Windows operating system settings on the virtual machine. +type WindowsConfiguration struct { + // Specifies additional base-64 encoded XML formatted information that can be included in the Unattend.xml file, which is + // used by Windows Setup. + AdditionalUnattendContent []*AdditionalUnattendContent `json:"additionalUnattendContent,omitempty"` + + // Indicates whether Automatic Updates is enabled for the Windows virtual machine. Default value is true. + // For virtual machine scale sets, this property can be updated and updates will take effect on OS reprovisioning. + EnableAutomaticUpdates *bool `json:"enableAutomaticUpdates,omitempty"` + + // [Preview Feature] Specifies settings related to VM Guest Patching on Windows. + PatchSettings *PatchSettings `json:"patchSettings,omitempty"` + + // Indicates whether virtual machine agent should be provisioned on the virtual machine. + // When this property is not specified in the request body, default behavior is to set it to true. This will ensure that VM + // Agent is installed on the VM so that extensions can be added to the VM later. + ProvisionVMAgent *bool `json:"provisionVMAgent,omitempty"` + + // Specifies the time zone of the virtual machine. e.g. "Pacific Standard Time". + // Possible values can be TimeZoneInfo.Id [https://docs.microsoft.com/dotnet/api/system.timezoneinfo.id?#System_TimeZoneInfo_Id] + // value from time zones returned by TimeZoneInfo.GetSystemTimeZones + // [https://docs.microsoft.com/dotnet/api/system.timezoneinfo.getsystemtimezones]. + TimeZone *string `json:"timeZone,omitempty"` + + // Specifies the Windows Remote Management listeners. This enables remote Windows PowerShell. + WinRM *WinRMConfiguration `json:"winRM,omitempty"` +} + +// WindowsParameters - Input for InstallPatches on a Windows VM, as directly received by the API +type WindowsParameters struct { + // The update classifications to select when installing patches for Windows. + ClassificationsToInclude []*VMGuestPatchClassificationWindows `json:"classificationsToInclude,omitempty"` + + // Filters out Kbs that don't have an InstallationRebootBehavior of 'NeverReboots' when this is set to true. + ExcludeKbsRequiringReboot *bool `json:"excludeKbsRequiringReboot,omitempty"` + + // Kbs to exclude in the patch operation + KbNumbersToExclude []*string `json:"kbNumbersToExclude,omitempty"` + + // Kbs to include in the patch operation + KbNumbersToInclude []*string `json:"kbNumbersToInclude,omitempty"` + + // This is used to install patches that were published on or before this given max published date. + MaxPatchPublishDate *time.Time `json:"maxPatchPublishDate,omitempty"` +} + +// WindowsVMGuestPatchAutomaticByPlatformSettings - Specifies additional settings to be applied when patch mode AutomaticByPlatform +// is selected in Windows patch settings. +type WindowsVMGuestPatchAutomaticByPlatformSettings struct { + // Specifies the reboot setting for all AutomaticByPlatform patch installation operations. + RebootSetting *WindowsVMGuestPatchAutomaticByPlatformRebootSetting `json:"rebootSetting,omitempty"` +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_models_serde.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_models_serde.go new file mode 100644 index 000000000..3de398566 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_models_serde.go @@ -0,0 +1,3416 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "encoding/json" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "reflect" +) + +// MarshalJSON implements the json.Marshaller interface for type APIError. +func (a APIError) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "code", a.Code) + populate(objectMap, "details", a.Details) + populate(objectMap, "innererror", a.Innererror) + populate(objectMap, "message", a.Message) + populate(objectMap, "target", a.Target) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type ApplicationProfile. +func (a ApplicationProfile) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "galleryApplications", a.GalleryApplications) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type AvailabilitySet. +func (a AvailabilitySet) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "id", a.ID) + populate(objectMap, "location", a.Location) + populate(objectMap, "name", a.Name) + populate(objectMap, "properties", a.Properties) + populate(objectMap, "sku", a.SKU) + populate(objectMap, "tags", a.Tags) + populate(objectMap, "type", a.Type) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type AvailabilitySetProperties. +func (a AvailabilitySetProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "platformFaultDomainCount", a.PlatformFaultDomainCount) + populate(objectMap, "platformUpdateDomainCount", a.PlatformUpdateDomainCount) + populate(objectMap, "proximityPlacementGroup", a.ProximityPlacementGroup) + populate(objectMap, "statuses", a.Statuses) + populate(objectMap, "virtualMachines", a.VirtualMachines) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type AvailabilitySetUpdate. +func (a AvailabilitySetUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "properties", a.Properties) + populate(objectMap, "sku", a.SKU) + populate(objectMap, "tags", a.Tags) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type AvailablePatchSummary. +func (a AvailablePatchSummary) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "assessmentActivityId", a.AssessmentActivityID) + populate(objectMap, "criticalAndSecurityPatchCount", a.CriticalAndSecurityPatchCount) + populate(objectMap, "error", a.Error) + populateTimeRFC3339(objectMap, "lastModifiedTime", a.LastModifiedTime) + populate(objectMap, "otherPatchCount", a.OtherPatchCount) + populate(objectMap, "rebootPending", a.RebootPending) + populateTimeRFC3339(objectMap, "startTime", a.StartTime) + populate(objectMap, "status", a.Status) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type AvailablePatchSummary. +func (a *AvailablePatchSummary) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "assessmentActivityId": + err = unpopulate(val, "AssessmentActivityID", &a.AssessmentActivityID) + delete(rawMsg, key) + case "criticalAndSecurityPatchCount": + err = unpopulate(val, "CriticalAndSecurityPatchCount", &a.CriticalAndSecurityPatchCount) + delete(rawMsg, key) + case "error": + err = unpopulate(val, "Error", &a.Error) + delete(rawMsg, key) + case "lastModifiedTime": + err = unpopulateTimeRFC3339(val, "LastModifiedTime", &a.LastModifiedTime) + delete(rawMsg, key) + case "otherPatchCount": + err = unpopulate(val, "OtherPatchCount", &a.OtherPatchCount) + delete(rawMsg, key) + case "rebootPending": + err = unpopulate(val, "RebootPending", &a.RebootPending) + delete(rawMsg, key) + case "startTime": + err = unpopulateTimeRFC3339(val, "StartTime", &a.StartTime) + delete(rawMsg, key) + case "status": + err = unpopulate(val, "Status", &a.Status) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type CapacityReservation. +func (c CapacityReservation) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "id", c.ID) + populate(objectMap, "location", c.Location) + populate(objectMap, "name", c.Name) + populate(objectMap, "properties", c.Properties) + populate(objectMap, "sku", c.SKU) + populate(objectMap, "tags", c.Tags) + populate(objectMap, "type", c.Type) + populate(objectMap, "zones", c.Zones) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type CapacityReservationGroup. +func (c CapacityReservationGroup) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "id", c.ID) + populate(objectMap, "location", c.Location) + populate(objectMap, "name", c.Name) + populate(objectMap, "properties", c.Properties) + populate(objectMap, "tags", c.Tags) + populate(objectMap, "type", c.Type) + populate(objectMap, "zones", c.Zones) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type CapacityReservationGroupInstanceView. +func (c CapacityReservationGroupInstanceView) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "capacityReservations", c.CapacityReservations) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type CapacityReservationGroupProperties. +func (c CapacityReservationGroupProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "capacityReservations", c.CapacityReservations) + populate(objectMap, "instanceView", c.InstanceView) + populate(objectMap, "virtualMachinesAssociated", c.VirtualMachinesAssociated) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type CapacityReservationGroupUpdate. +func (c CapacityReservationGroupUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "properties", c.Properties) + populate(objectMap, "tags", c.Tags) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type CapacityReservationInstanceView. +func (c CapacityReservationInstanceView) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "statuses", c.Statuses) + populate(objectMap, "utilizationInfo", c.UtilizationInfo) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type CapacityReservationInstanceViewWithName. +func (c CapacityReservationInstanceViewWithName) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "name", c.Name) + populate(objectMap, "statuses", c.Statuses) + populate(objectMap, "utilizationInfo", c.UtilizationInfo) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type CapacityReservationProperties. +func (c CapacityReservationProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "instanceView", c.InstanceView) + populate(objectMap, "provisioningState", c.ProvisioningState) + populateTimeRFC3339(objectMap, "provisioningTime", c.ProvisioningTime) + populate(objectMap, "reservationId", c.ReservationID) + populateTimeRFC3339(objectMap, "timeCreated", c.TimeCreated) + populate(objectMap, "virtualMachinesAssociated", c.VirtualMachinesAssociated) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type CapacityReservationProperties. +func (c *CapacityReservationProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "instanceView": + err = unpopulate(val, "InstanceView", &c.InstanceView) + delete(rawMsg, key) + case "provisioningState": + err = unpopulate(val, "ProvisioningState", &c.ProvisioningState) + delete(rawMsg, key) + case "provisioningTime": + err = unpopulateTimeRFC3339(val, "ProvisioningTime", &c.ProvisioningTime) + delete(rawMsg, key) + case "reservationId": + err = unpopulate(val, "ReservationID", &c.ReservationID) + delete(rawMsg, key) + case "timeCreated": + err = unpopulateTimeRFC3339(val, "TimeCreated", &c.TimeCreated) + delete(rawMsg, key) + case "virtualMachinesAssociated": + err = unpopulate(val, "VirtualMachinesAssociated", &c.VirtualMachinesAssociated) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type CapacityReservationUpdate. +func (c CapacityReservationUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "properties", c.Properties) + populate(objectMap, "sku", c.SKU) + populate(objectMap, "tags", c.Tags) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type CapacityReservationUtilization. +func (c CapacityReservationUtilization) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "virtualMachinesAllocated", c.VirtualMachinesAllocated) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type CloudService. +func (c CloudService) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "id", c.ID) + populate(objectMap, "location", c.Location) + populate(objectMap, "name", c.Name) + populate(objectMap, "properties", c.Properties) + populate(objectMap, "tags", c.Tags) + populate(objectMap, "type", c.Type) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type CloudServiceExtensionProfile. +func (c CloudServiceExtensionProfile) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "extensions", c.Extensions) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type CloudServiceExtensionProperties. +func (c CloudServiceExtensionProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "autoUpgradeMinorVersion", c.AutoUpgradeMinorVersion) + populate(objectMap, "forceUpdateTag", c.ForceUpdateTag) + populate(objectMap, "protectedSettings", c.ProtectedSettings) + populate(objectMap, "protectedSettingsFromKeyVault", c.ProtectedSettingsFromKeyVault) + populate(objectMap, "provisioningState", c.ProvisioningState) + populate(objectMap, "publisher", c.Publisher) + populate(objectMap, "rolesAppliedTo", c.RolesAppliedTo) + populate(objectMap, "settings", c.Settings) + populate(objectMap, "type", c.Type) + populate(objectMap, "typeHandlerVersion", c.TypeHandlerVersion) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type CloudServiceNetworkProfile. +func (c CloudServiceNetworkProfile) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "loadBalancerConfigurations", c.LoadBalancerConfigurations) + populate(objectMap, "swappableCloudService", c.SwappableCloudService) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type CloudServiceOsProfile. +func (c CloudServiceOsProfile) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "secrets", c.Secrets) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type CloudServiceRoleProfile. +func (c CloudServiceRoleProfile) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "roles", c.Roles) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type CloudServiceUpdate. +func (c CloudServiceUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "tags", c.Tags) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type CloudServiceVaultSecretGroup. +func (c CloudServiceVaultSecretGroup) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "sourceVault", c.SourceVault) + populate(objectMap, "vaultCertificates", c.VaultCertificates) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type CommunityGalleryImageProperties. +func (c *CommunityGalleryImageProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "disallowed": + err = unpopulate(val, "Disallowed", &c.Disallowed) + delete(rawMsg, key) + case "endOfLifeDate": + err = unpopulateTimeRFC3339(val, "EndOfLifeDate", &c.EndOfLifeDate) + delete(rawMsg, key) + case "features": + err = unpopulate(val, "Features", &c.Features) + delete(rawMsg, key) + case "hyperVGeneration": + err = unpopulate(val, "HyperVGeneration", &c.HyperVGeneration) + delete(rawMsg, key) + case "identifier": + err = unpopulate(val, "Identifier", &c.Identifier) + delete(rawMsg, key) + case "osState": + err = unpopulate(val, "OSState", &c.OSState) + delete(rawMsg, key) + case "osType": + err = unpopulate(val, "OSType", &c.OSType) + delete(rawMsg, key) + case "purchasePlan": + err = unpopulate(val, "PurchasePlan", &c.PurchasePlan) + delete(rawMsg, key) + case "recommended": + err = unpopulate(val, "Recommended", &c.Recommended) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type CommunityGalleryImageVersionProperties. +func (c *CommunityGalleryImageVersionProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "endOfLifeDate": + err = unpopulateTimeRFC3339(val, "EndOfLifeDate", &c.EndOfLifeDate) + delete(rawMsg, key) + case "publishedDate": + err = unpopulateTimeRFC3339(val, "PublishedDate", &c.PublishedDate) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type DedicatedHost. +func (d DedicatedHost) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "id", d.ID) + populate(objectMap, "location", d.Location) + populate(objectMap, "name", d.Name) + populate(objectMap, "properties", d.Properties) + populate(objectMap, "sku", d.SKU) + populate(objectMap, "tags", d.Tags) + populate(objectMap, "type", d.Type) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type DedicatedHostAvailableCapacity. +func (d DedicatedHostAvailableCapacity) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "allocatableVMs", d.AllocatableVMs) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type DedicatedHostGroup. +func (d DedicatedHostGroup) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "id", d.ID) + populate(objectMap, "location", d.Location) + populate(objectMap, "name", d.Name) + populate(objectMap, "properties", d.Properties) + populate(objectMap, "tags", d.Tags) + populate(objectMap, "type", d.Type) + populate(objectMap, "zones", d.Zones) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type DedicatedHostGroupInstanceView. +func (d DedicatedHostGroupInstanceView) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "hosts", d.Hosts) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type DedicatedHostGroupProperties. +func (d DedicatedHostGroupProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "additionalCapabilities", d.AdditionalCapabilities) + populate(objectMap, "hosts", d.Hosts) + populate(objectMap, "instanceView", d.InstanceView) + populate(objectMap, "platformFaultDomainCount", d.PlatformFaultDomainCount) + populate(objectMap, "supportAutomaticPlacement", d.SupportAutomaticPlacement) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type DedicatedHostGroupUpdate. +func (d DedicatedHostGroupUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "properties", d.Properties) + populate(objectMap, "tags", d.Tags) + populate(objectMap, "zones", d.Zones) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type DedicatedHostInstanceView. +func (d DedicatedHostInstanceView) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "assetId", d.AssetID) + populate(objectMap, "availableCapacity", d.AvailableCapacity) + populate(objectMap, "statuses", d.Statuses) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type DedicatedHostInstanceViewWithName. +func (d DedicatedHostInstanceViewWithName) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "assetId", d.AssetID) + populate(objectMap, "availableCapacity", d.AvailableCapacity) + populate(objectMap, "name", d.Name) + populate(objectMap, "statuses", d.Statuses) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type DedicatedHostProperties. +func (d DedicatedHostProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "autoReplaceOnFailure", d.AutoReplaceOnFailure) + populate(objectMap, "hostId", d.HostID) + populate(objectMap, "instanceView", d.InstanceView) + populate(objectMap, "licenseType", d.LicenseType) + populate(objectMap, "platformFaultDomain", d.PlatformFaultDomain) + populate(objectMap, "provisioningState", d.ProvisioningState) + populateTimeRFC3339(objectMap, "provisioningTime", d.ProvisioningTime) + populateTimeRFC3339(objectMap, "timeCreated", d.TimeCreated) + populate(objectMap, "virtualMachines", d.VirtualMachines) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type DedicatedHostProperties. +func (d *DedicatedHostProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "autoReplaceOnFailure": + err = unpopulate(val, "AutoReplaceOnFailure", &d.AutoReplaceOnFailure) + delete(rawMsg, key) + case "hostId": + err = unpopulate(val, "HostID", &d.HostID) + delete(rawMsg, key) + case "instanceView": + err = unpopulate(val, "InstanceView", &d.InstanceView) + delete(rawMsg, key) + case "licenseType": + err = unpopulate(val, "LicenseType", &d.LicenseType) + delete(rawMsg, key) + case "platformFaultDomain": + err = unpopulate(val, "PlatformFaultDomain", &d.PlatformFaultDomain) + delete(rawMsg, key) + case "provisioningState": + err = unpopulate(val, "ProvisioningState", &d.ProvisioningState) + delete(rawMsg, key) + case "provisioningTime": + err = unpopulateTimeRFC3339(val, "ProvisioningTime", &d.ProvisioningTime) + delete(rawMsg, key) + case "timeCreated": + err = unpopulateTimeRFC3339(val, "TimeCreated", &d.TimeCreated) + delete(rawMsg, key) + case "virtualMachines": + err = unpopulate(val, "VirtualMachines", &d.VirtualMachines) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type DedicatedHostUpdate. +func (d DedicatedHostUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "properties", d.Properties) + populate(objectMap, "tags", d.Tags) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type Disallowed. +func (d Disallowed) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "diskTypes", d.DiskTypes) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type Disk. +func (d Disk) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "extendedLocation", d.ExtendedLocation) + populate(objectMap, "id", d.ID) + populate(objectMap, "location", d.Location) + populate(objectMap, "managedBy", d.ManagedBy) + populate(objectMap, "managedByExtended", d.ManagedByExtended) + populate(objectMap, "name", d.Name) + populate(objectMap, "properties", d.Properties) + populate(objectMap, "sku", d.SKU) + populate(objectMap, "tags", d.Tags) + populate(objectMap, "type", d.Type) + populate(objectMap, "zones", d.Zones) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type DiskAccess. +func (d DiskAccess) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "extendedLocation", d.ExtendedLocation) + populate(objectMap, "id", d.ID) + populate(objectMap, "location", d.Location) + populate(objectMap, "name", d.Name) + populate(objectMap, "properties", d.Properties) + populate(objectMap, "tags", d.Tags) + populate(objectMap, "type", d.Type) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type DiskAccessProperties. +func (d DiskAccessProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "privateEndpointConnections", d.PrivateEndpointConnections) + populate(objectMap, "provisioningState", d.ProvisioningState) + populateTimeRFC3339(objectMap, "timeCreated", d.TimeCreated) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type DiskAccessProperties. +func (d *DiskAccessProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "privateEndpointConnections": + err = unpopulate(val, "PrivateEndpointConnections", &d.PrivateEndpointConnections) + delete(rawMsg, key) + case "provisioningState": + err = unpopulate(val, "ProvisioningState", &d.ProvisioningState) + delete(rawMsg, key) + case "timeCreated": + err = unpopulateTimeRFC3339(val, "TimeCreated", &d.TimeCreated) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type DiskAccessUpdate. +func (d DiskAccessUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "tags", d.Tags) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type DiskEncryptionSet. +func (d DiskEncryptionSet) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "id", d.ID) + populate(objectMap, "identity", d.Identity) + populate(objectMap, "location", d.Location) + populate(objectMap, "name", d.Name) + populate(objectMap, "properties", d.Properties) + populate(objectMap, "tags", d.Tags) + populate(objectMap, "type", d.Type) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type DiskEncryptionSetUpdate. +func (d DiskEncryptionSetUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "identity", d.Identity) + populate(objectMap, "properties", d.Properties) + populate(objectMap, "tags", d.Tags) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type DiskInstanceView. +func (d DiskInstanceView) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "encryptionSettings", d.EncryptionSettings) + populate(objectMap, "name", d.Name) + populate(objectMap, "statuses", d.Statuses) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type DiskProperties. +func (d DiskProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "burstingEnabled", d.BurstingEnabled) + populate(objectMap, "completionPercent", d.CompletionPercent) + populate(objectMap, "creationData", d.CreationData) + populate(objectMap, "dataAccessAuthMode", d.DataAccessAuthMode) + populate(objectMap, "diskAccessId", d.DiskAccessID) + populate(objectMap, "diskIOPSReadOnly", d.DiskIOPSReadOnly) + populate(objectMap, "diskIOPSReadWrite", d.DiskIOPSReadWrite) + populate(objectMap, "diskMBpsReadOnly", d.DiskMBpsReadOnly) + populate(objectMap, "diskMBpsReadWrite", d.DiskMBpsReadWrite) + populate(objectMap, "diskSizeBytes", d.DiskSizeBytes) + populate(objectMap, "diskSizeGB", d.DiskSizeGB) + populate(objectMap, "diskState", d.DiskState) + populate(objectMap, "encryption", d.Encryption) + populate(objectMap, "encryptionSettingsCollection", d.EncryptionSettingsCollection) + populate(objectMap, "hyperVGeneration", d.HyperVGeneration) + populate(objectMap, "maxShares", d.MaxShares) + populate(objectMap, "networkAccessPolicy", d.NetworkAccessPolicy) + populate(objectMap, "osType", d.OSType) + populate(objectMap, "propertyUpdatesInProgress", d.PropertyUpdatesInProgress) + populate(objectMap, "provisioningState", d.ProvisioningState) + populate(objectMap, "publicNetworkAccess", d.PublicNetworkAccess) + populate(objectMap, "purchasePlan", d.PurchasePlan) + populate(objectMap, "securityProfile", d.SecurityProfile) + populate(objectMap, "shareInfo", d.ShareInfo) + populate(objectMap, "supportedCapabilities", d.SupportedCapabilities) + populate(objectMap, "supportsHibernation", d.SupportsHibernation) + populate(objectMap, "tier", d.Tier) + populateTimeRFC3339(objectMap, "timeCreated", d.TimeCreated) + populate(objectMap, "uniqueId", d.UniqueID) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type DiskProperties. +func (d *DiskProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "burstingEnabled": + err = unpopulate(val, "BurstingEnabled", &d.BurstingEnabled) + delete(rawMsg, key) + case "completionPercent": + err = unpopulate(val, "CompletionPercent", &d.CompletionPercent) + delete(rawMsg, key) + case "creationData": + err = unpopulate(val, "CreationData", &d.CreationData) + delete(rawMsg, key) + case "dataAccessAuthMode": + err = unpopulate(val, "DataAccessAuthMode", &d.DataAccessAuthMode) + delete(rawMsg, key) + case "diskAccessId": + err = unpopulate(val, "DiskAccessID", &d.DiskAccessID) + delete(rawMsg, key) + case "diskIOPSReadOnly": + err = unpopulate(val, "DiskIOPSReadOnly", &d.DiskIOPSReadOnly) + delete(rawMsg, key) + case "diskIOPSReadWrite": + err = unpopulate(val, "DiskIOPSReadWrite", &d.DiskIOPSReadWrite) + delete(rawMsg, key) + case "diskMBpsReadOnly": + err = unpopulate(val, "DiskMBpsReadOnly", &d.DiskMBpsReadOnly) + delete(rawMsg, key) + case "diskMBpsReadWrite": + err = unpopulate(val, "DiskMBpsReadWrite", &d.DiskMBpsReadWrite) + delete(rawMsg, key) + case "diskSizeBytes": + err = unpopulate(val, "DiskSizeBytes", &d.DiskSizeBytes) + delete(rawMsg, key) + case "diskSizeGB": + err = unpopulate(val, "DiskSizeGB", &d.DiskSizeGB) + delete(rawMsg, key) + case "diskState": + err = unpopulate(val, "DiskState", &d.DiskState) + delete(rawMsg, key) + case "encryption": + err = unpopulate(val, "Encryption", &d.Encryption) + delete(rawMsg, key) + case "encryptionSettingsCollection": + err = unpopulate(val, "EncryptionSettingsCollection", &d.EncryptionSettingsCollection) + delete(rawMsg, key) + case "hyperVGeneration": + err = unpopulate(val, "HyperVGeneration", &d.HyperVGeneration) + delete(rawMsg, key) + case "maxShares": + err = unpopulate(val, "MaxShares", &d.MaxShares) + delete(rawMsg, key) + case "networkAccessPolicy": + err = unpopulate(val, "NetworkAccessPolicy", &d.NetworkAccessPolicy) + delete(rawMsg, key) + case "osType": + err = unpopulate(val, "OSType", &d.OSType) + delete(rawMsg, key) + case "propertyUpdatesInProgress": + err = unpopulate(val, "PropertyUpdatesInProgress", &d.PropertyUpdatesInProgress) + delete(rawMsg, key) + case "provisioningState": + err = unpopulate(val, "ProvisioningState", &d.ProvisioningState) + delete(rawMsg, key) + case "publicNetworkAccess": + err = unpopulate(val, "PublicNetworkAccess", &d.PublicNetworkAccess) + delete(rawMsg, key) + case "purchasePlan": + err = unpopulate(val, "PurchasePlan", &d.PurchasePlan) + delete(rawMsg, key) + case "securityProfile": + err = unpopulate(val, "SecurityProfile", &d.SecurityProfile) + delete(rawMsg, key) + case "shareInfo": + err = unpopulate(val, "ShareInfo", &d.ShareInfo) + delete(rawMsg, key) + case "supportedCapabilities": + err = unpopulate(val, "SupportedCapabilities", &d.SupportedCapabilities) + delete(rawMsg, key) + case "supportsHibernation": + err = unpopulate(val, "SupportsHibernation", &d.SupportsHibernation) + delete(rawMsg, key) + case "tier": + err = unpopulate(val, "Tier", &d.Tier) + delete(rawMsg, key) + case "timeCreated": + err = unpopulateTimeRFC3339(val, "TimeCreated", &d.TimeCreated) + delete(rawMsg, key) + case "uniqueId": + err = unpopulate(val, "UniqueID", &d.UniqueID) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + } + return nil +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type DiskRestorePointProperties. +func (d *DiskRestorePointProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "completionPercent": + err = unpopulate(val, "CompletionPercent", &d.CompletionPercent) + delete(rawMsg, key) + case "diskAccessId": + err = unpopulate(val, "DiskAccessID", &d.DiskAccessID) + delete(rawMsg, key) + case "encryption": + err = unpopulate(val, "Encryption", &d.Encryption) + delete(rawMsg, key) + case "familyId": + err = unpopulate(val, "FamilyID", &d.FamilyID) + delete(rawMsg, key) + case "hyperVGeneration": + err = unpopulate(val, "HyperVGeneration", &d.HyperVGeneration) + delete(rawMsg, key) + case "networkAccessPolicy": + err = unpopulate(val, "NetworkAccessPolicy", &d.NetworkAccessPolicy) + delete(rawMsg, key) + case "osType": + err = unpopulate(val, "OSType", &d.OSType) + delete(rawMsg, key) + case "publicNetworkAccess": + err = unpopulate(val, "PublicNetworkAccess", &d.PublicNetworkAccess) + delete(rawMsg, key) + case "purchasePlan": + err = unpopulate(val, "PurchasePlan", &d.PurchasePlan) + delete(rawMsg, key) + case "replicationState": + err = unpopulate(val, "ReplicationState", &d.ReplicationState) + delete(rawMsg, key) + case "sourceResourceId": + err = unpopulate(val, "SourceResourceID", &d.SourceResourceID) + delete(rawMsg, key) + case "sourceResourceLocation": + err = unpopulate(val, "SourceResourceLocation", &d.SourceResourceLocation) + delete(rawMsg, key) + case "sourceUniqueId": + err = unpopulate(val, "SourceUniqueID", &d.SourceUniqueID) + delete(rawMsg, key) + case "supportedCapabilities": + err = unpopulate(val, "SupportedCapabilities", &d.SupportedCapabilities) + delete(rawMsg, key) + case "supportsHibernation": + err = unpopulate(val, "SupportsHibernation", &d.SupportsHibernation) + delete(rawMsg, key) + case "timeCreated": + err = unpopulateTimeRFC3339(val, "TimeCreated", &d.TimeCreated) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type DiskUpdate. +func (d DiskUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "properties", d.Properties) + populate(objectMap, "sku", d.SKU) + populate(objectMap, "tags", d.Tags) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type EncryptionImages. +func (e EncryptionImages) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "dataDiskImages", e.DataDiskImages) + populate(objectMap, "osDiskImage", e.OSDiskImage) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type EncryptionSetProperties. +func (e EncryptionSetProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "activeKey", e.ActiveKey) + populate(objectMap, "autoKeyRotationError", e.AutoKeyRotationError) + populate(objectMap, "encryptionType", e.EncryptionType) + populateTimeRFC3339(objectMap, "lastKeyRotationTimestamp", e.LastKeyRotationTimestamp) + populate(objectMap, "previousKeys", e.PreviousKeys) + populate(objectMap, "provisioningState", e.ProvisioningState) + populate(objectMap, "rotationToLatestKeyVersionEnabled", e.RotationToLatestKeyVersionEnabled) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type EncryptionSetProperties. +func (e *EncryptionSetProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "activeKey": + err = unpopulate(val, "ActiveKey", &e.ActiveKey) + delete(rawMsg, key) + case "autoKeyRotationError": + err = unpopulate(val, "AutoKeyRotationError", &e.AutoKeyRotationError) + delete(rawMsg, key) + case "encryptionType": + err = unpopulate(val, "EncryptionType", &e.EncryptionType) + delete(rawMsg, key) + case "lastKeyRotationTimestamp": + err = unpopulateTimeRFC3339(val, "LastKeyRotationTimestamp", &e.LastKeyRotationTimestamp) + delete(rawMsg, key) + case "previousKeys": + err = unpopulate(val, "PreviousKeys", &e.PreviousKeys) + delete(rawMsg, key) + case "provisioningState": + err = unpopulate(val, "ProvisioningState", &e.ProvisioningState) + delete(rawMsg, key) + case "rotationToLatestKeyVersionEnabled": + err = unpopulate(val, "RotationToLatestKeyVersionEnabled", &e.RotationToLatestKeyVersionEnabled) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type EncryptionSettingsCollection. +func (e EncryptionSettingsCollection) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "enabled", e.Enabled) + populate(objectMap, "encryptionSettings", e.EncryptionSettings) + populate(objectMap, "encryptionSettingsVersion", e.EncryptionSettingsVersion) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type Gallery. +func (g Gallery) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "id", g.ID) + populate(objectMap, "location", g.Location) + populate(objectMap, "name", g.Name) + populate(objectMap, "properties", g.Properties) + populate(objectMap, "tags", g.Tags) + populate(objectMap, "type", g.Type) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type GalleryApplication. +func (g GalleryApplication) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "id", g.ID) + populate(objectMap, "location", g.Location) + populate(objectMap, "name", g.Name) + populate(objectMap, "properties", g.Properties) + populate(objectMap, "tags", g.Tags) + populate(objectMap, "type", g.Type) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type GalleryApplicationProperties. +func (g GalleryApplicationProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "description", g.Description) + populateTimeRFC3339(objectMap, "endOfLifeDate", g.EndOfLifeDate) + populate(objectMap, "eula", g.Eula) + populate(objectMap, "privacyStatementUri", g.PrivacyStatementURI) + populate(objectMap, "releaseNoteUri", g.ReleaseNoteURI) + populate(objectMap, "supportedOSType", g.SupportedOSType) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type GalleryApplicationProperties. +func (g *GalleryApplicationProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", g, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "description": + err = unpopulate(val, "Description", &g.Description) + delete(rawMsg, key) + case "endOfLifeDate": + err = unpopulateTimeRFC3339(val, "EndOfLifeDate", &g.EndOfLifeDate) + delete(rawMsg, key) + case "eula": + err = unpopulate(val, "Eula", &g.Eula) + delete(rawMsg, key) + case "privacyStatementUri": + err = unpopulate(val, "PrivacyStatementURI", &g.PrivacyStatementURI) + delete(rawMsg, key) + case "releaseNoteUri": + err = unpopulate(val, "ReleaseNoteURI", &g.ReleaseNoteURI) + delete(rawMsg, key) + case "supportedOSType": + err = unpopulate(val, "SupportedOSType", &g.SupportedOSType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", g, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type GalleryApplicationUpdate. +func (g GalleryApplicationUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "id", g.ID) + populate(objectMap, "name", g.Name) + populate(objectMap, "properties", g.Properties) + populate(objectMap, "tags", g.Tags) + populate(objectMap, "type", g.Type) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type GalleryApplicationVersion. +func (g GalleryApplicationVersion) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "id", g.ID) + populate(objectMap, "location", g.Location) + populate(objectMap, "name", g.Name) + populate(objectMap, "properties", g.Properties) + populate(objectMap, "tags", g.Tags) + populate(objectMap, "type", g.Type) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type GalleryApplicationVersionPublishingProfile. +func (g GalleryApplicationVersionPublishingProfile) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "enableHealthCheck", g.EnableHealthCheck) + populateTimeRFC3339(objectMap, "endOfLifeDate", g.EndOfLifeDate) + populate(objectMap, "excludeFromLatest", g.ExcludeFromLatest) + populate(objectMap, "manageActions", g.ManageActions) + populateTimeRFC3339(objectMap, "publishedDate", g.PublishedDate) + populate(objectMap, "replicaCount", g.ReplicaCount) + populate(objectMap, "replicationMode", g.ReplicationMode) + populate(objectMap, "source", g.Source) + populate(objectMap, "storageAccountType", g.StorageAccountType) + populate(objectMap, "targetExtendedLocations", g.TargetExtendedLocations) + populate(objectMap, "targetRegions", g.TargetRegions) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type GalleryApplicationVersionPublishingProfile. +func (g *GalleryApplicationVersionPublishingProfile) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", g, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "enableHealthCheck": + err = unpopulate(val, "EnableHealthCheck", &g.EnableHealthCheck) + delete(rawMsg, key) + case "endOfLifeDate": + err = unpopulateTimeRFC3339(val, "EndOfLifeDate", &g.EndOfLifeDate) + delete(rawMsg, key) + case "excludeFromLatest": + err = unpopulate(val, "ExcludeFromLatest", &g.ExcludeFromLatest) + delete(rawMsg, key) + case "manageActions": + err = unpopulate(val, "ManageActions", &g.ManageActions) + delete(rawMsg, key) + case "publishedDate": + err = unpopulateTimeRFC3339(val, "PublishedDate", &g.PublishedDate) + delete(rawMsg, key) + case "replicaCount": + err = unpopulate(val, "ReplicaCount", &g.ReplicaCount) + delete(rawMsg, key) + case "replicationMode": + err = unpopulate(val, "ReplicationMode", &g.ReplicationMode) + delete(rawMsg, key) + case "source": + err = unpopulate(val, "Source", &g.Source) + delete(rawMsg, key) + case "storageAccountType": + err = unpopulate(val, "StorageAccountType", &g.StorageAccountType) + delete(rawMsg, key) + case "targetExtendedLocations": + err = unpopulate(val, "TargetExtendedLocations", &g.TargetExtendedLocations) + delete(rawMsg, key) + case "targetRegions": + err = unpopulate(val, "TargetRegions", &g.TargetRegions) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", g, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type GalleryApplicationVersionUpdate. +func (g GalleryApplicationVersionUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "id", g.ID) + populate(objectMap, "name", g.Name) + populate(objectMap, "properties", g.Properties) + populate(objectMap, "tags", g.Tags) + populate(objectMap, "type", g.Type) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type GalleryArtifactPublishingProfileBase. +func (g GalleryArtifactPublishingProfileBase) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populateTimeRFC3339(objectMap, "endOfLifeDate", g.EndOfLifeDate) + populate(objectMap, "excludeFromLatest", g.ExcludeFromLatest) + populateTimeRFC3339(objectMap, "publishedDate", g.PublishedDate) + populate(objectMap, "replicaCount", g.ReplicaCount) + populate(objectMap, "replicationMode", g.ReplicationMode) + populate(objectMap, "storageAccountType", g.StorageAccountType) + populate(objectMap, "targetExtendedLocations", g.TargetExtendedLocations) + populate(objectMap, "targetRegions", g.TargetRegions) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type GalleryArtifactPublishingProfileBase. +func (g *GalleryArtifactPublishingProfileBase) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", g, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "endOfLifeDate": + err = unpopulateTimeRFC3339(val, "EndOfLifeDate", &g.EndOfLifeDate) + delete(rawMsg, key) + case "excludeFromLatest": + err = unpopulate(val, "ExcludeFromLatest", &g.ExcludeFromLatest) + delete(rawMsg, key) + case "publishedDate": + err = unpopulateTimeRFC3339(val, "PublishedDate", &g.PublishedDate) + delete(rawMsg, key) + case "replicaCount": + err = unpopulate(val, "ReplicaCount", &g.ReplicaCount) + delete(rawMsg, key) + case "replicationMode": + err = unpopulate(val, "ReplicationMode", &g.ReplicationMode) + delete(rawMsg, key) + case "storageAccountType": + err = unpopulate(val, "StorageAccountType", &g.StorageAccountType) + delete(rawMsg, key) + case "targetExtendedLocations": + err = unpopulate(val, "TargetExtendedLocations", &g.TargetExtendedLocations) + delete(rawMsg, key) + case "targetRegions": + err = unpopulate(val, "TargetRegions", &g.TargetRegions) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", g, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type GalleryImage. +func (g GalleryImage) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "id", g.ID) + populate(objectMap, "location", g.Location) + populate(objectMap, "name", g.Name) + populate(objectMap, "properties", g.Properties) + populate(objectMap, "tags", g.Tags) + populate(objectMap, "type", g.Type) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type GalleryImageProperties. +func (g GalleryImageProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "architecture", g.Architecture) + populate(objectMap, "description", g.Description) + populate(objectMap, "disallowed", g.Disallowed) + populateTimeRFC3339(objectMap, "endOfLifeDate", g.EndOfLifeDate) + populate(objectMap, "eula", g.Eula) + populate(objectMap, "features", g.Features) + populate(objectMap, "hyperVGeneration", g.HyperVGeneration) + populate(objectMap, "identifier", g.Identifier) + populate(objectMap, "osState", g.OSState) + populate(objectMap, "osType", g.OSType) + populate(objectMap, "privacyStatementUri", g.PrivacyStatementURI) + populate(objectMap, "provisioningState", g.ProvisioningState) + populate(objectMap, "purchasePlan", g.PurchasePlan) + populate(objectMap, "recommended", g.Recommended) + populate(objectMap, "releaseNoteUri", g.ReleaseNoteURI) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type GalleryImageProperties. +func (g *GalleryImageProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", g, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "architecture": + err = unpopulate(val, "Architecture", &g.Architecture) + delete(rawMsg, key) + case "description": + err = unpopulate(val, "Description", &g.Description) + delete(rawMsg, key) + case "disallowed": + err = unpopulate(val, "Disallowed", &g.Disallowed) + delete(rawMsg, key) + case "endOfLifeDate": + err = unpopulateTimeRFC3339(val, "EndOfLifeDate", &g.EndOfLifeDate) + delete(rawMsg, key) + case "eula": + err = unpopulate(val, "Eula", &g.Eula) + delete(rawMsg, key) + case "features": + err = unpopulate(val, "Features", &g.Features) + delete(rawMsg, key) + case "hyperVGeneration": + err = unpopulate(val, "HyperVGeneration", &g.HyperVGeneration) + delete(rawMsg, key) + case "identifier": + err = unpopulate(val, "Identifier", &g.Identifier) + delete(rawMsg, key) + case "osState": + err = unpopulate(val, "OSState", &g.OSState) + delete(rawMsg, key) + case "osType": + err = unpopulate(val, "OSType", &g.OSType) + delete(rawMsg, key) + case "privacyStatementUri": + err = unpopulate(val, "PrivacyStatementURI", &g.PrivacyStatementURI) + delete(rawMsg, key) + case "provisioningState": + err = unpopulate(val, "ProvisioningState", &g.ProvisioningState) + delete(rawMsg, key) + case "purchasePlan": + err = unpopulate(val, "PurchasePlan", &g.PurchasePlan) + delete(rawMsg, key) + case "recommended": + err = unpopulate(val, "Recommended", &g.Recommended) + delete(rawMsg, key) + case "releaseNoteUri": + err = unpopulate(val, "ReleaseNoteURI", &g.ReleaseNoteURI) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", g, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type GalleryImageUpdate. +func (g GalleryImageUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "id", g.ID) + populate(objectMap, "name", g.Name) + populate(objectMap, "properties", g.Properties) + populate(objectMap, "tags", g.Tags) + populate(objectMap, "type", g.Type) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type GalleryImageVersion. +func (g GalleryImageVersion) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "id", g.ID) + populate(objectMap, "location", g.Location) + populate(objectMap, "name", g.Name) + populate(objectMap, "properties", g.Properties) + populate(objectMap, "tags", g.Tags) + populate(objectMap, "type", g.Type) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type GalleryImageVersionPublishingProfile. +func (g GalleryImageVersionPublishingProfile) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populateTimeRFC3339(objectMap, "endOfLifeDate", g.EndOfLifeDate) + populate(objectMap, "excludeFromLatest", g.ExcludeFromLatest) + populateTimeRFC3339(objectMap, "publishedDate", g.PublishedDate) + populate(objectMap, "replicaCount", g.ReplicaCount) + populate(objectMap, "replicationMode", g.ReplicationMode) + populate(objectMap, "storageAccountType", g.StorageAccountType) + populate(objectMap, "targetExtendedLocations", g.TargetExtendedLocations) + populate(objectMap, "targetRegions", g.TargetRegions) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type GalleryImageVersionPublishingProfile. +func (g *GalleryImageVersionPublishingProfile) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", g, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "endOfLifeDate": + err = unpopulateTimeRFC3339(val, "EndOfLifeDate", &g.EndOfLifeDate) + delete(rawMsg, key) + case "excludeFromLatest": + err = unpopulate(val, "ExcludeFromLatest", &g.ExcludeFromLatest) + delete(rawMsg, key) + case "publishedDate": + err = unpopulateTimeRFC3339(val, "PublishedDate", &g.PublishedDate) + delete(rawMsg, key) + case "replicaCount": + err = unpopulate(val, "ReplicaCount", &g.ReplicaCount) + delete(rawMsg, key) + case "replicationMode": + err = unpopulate(val, "ReplicationMode", &g.ReplicationMode) + delete(rawMsg, key) + case "storageAccountType": + err = unpopulate(val, "StorageAccountType", &g.StorageAccountType) + delete(rawMsg, key) + case "targetExtendedLocations": + err = unpopulate(val, "TargetExtendedLocations", &g.TargetExtendedLocations) + delete(rawMsg, key) + case "targetRegions": + err = unpopulate(val, "TargetRegions", &g.TargetRegions) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", g, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type GalleryImageVersionStorageProfile. +func (g GalleryImageVersionStorageProfile) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "dataDiskImages", g.DataDiskImages) + populate(objectMap, "osDiskImage", g.OSDiskImage) + populate(objectMap, "source", g.Source) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type GalleryImageVersionUpdate. +func (g GalleryImageVersionUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "id", g.ID) + populate(objectMap, "name", g.Name) + populate(objectMap, "properties", g.Properties) + populate(objectMap, "tags", g.Tags) + populate(objectMap, "type", g.Type) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type GalleryUpdate. +func (g GalleryUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "id", g.ID) + populate(objectMap, "name", g.Name) + populate(objectMap, "properties", g.Properties) + populate(objectMap, "tags", g.Tags) + populate(objectMap, "type", g.Type) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type Image. +func (i Image) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "extendedLocation", i.ExtendedLocation) + populate(objectMap, "id", i.ID) + populate(objectMap, "location", i.Location) + populate(objectMap, "name", i.Name) + populate(objectMap, "properties", i.Properties) + populate(objectMap, "tags", i.Tags) + populate(objectMap, "type", i.Type) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type ImageStorageProfile. +func (i ImageStorageProfile) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "dataDisks", i.DataDisks) + populate(objectMap, "osDisk", i.OSDisk) + populate(objectMap, "zoneResilient", i.ZoneResilient) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type ImageUpdate. +func (i ImageUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "properties", i.Properties) + populate(objectMap, "tags", i.Tags) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type InstanceViewStatus. +func (i InstanceViewStatus) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "code", i.Code) + populate(objectMap, "displayStatus", i.DisplayStatus) + populate(objectMap, "level", i.Level) + populate(objectMap, "message", i.Message) + populateTimeRFC3339(objectMap, "time", i.Time) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type InstanceViewStatus. +func (i *InstanceViewStatus) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "code": + err = unpopulate(val, "Code", &i.Code) + delete(rawMsg, key) + case "displayStatus": + err = unpopulate(val, "DisplayStatus", &i.DisplayStatus) + delete(rawMsg, key) + case "level": + err = unpopulate(val, "Level", &i.Level) + delete(rawMsg, key) + case "message": + err = unpopulate(val, "Message", &i.Message) + delete(rawMsg, key) + case "time": + err = unpopulateTimeRFC3339(val, "Time", &i.Time) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type LastPatchInstallationSummary. +func (l LastPatchInstallationSummary) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "error", l.Error) + populate(objectMap, "excludedPatchCount", l.ExcludedPatchCount) + populate(objectMap, "failedPatchCount", l.FailedPatchCount) + populate(objectMap, "installationActivityId", l.InstallationActivityID) + populate(objectMap, "installedPatchCount", l.InstalledPatchCount) + populateTimeRFC3339(objectMap, "lastModifiedTime", l.LastModifiedTime) + populate(objectMap, "maintenanceWindowExceeded", l.MaintenanceWindowExceeded) + populate(objectMap, "notSelectedPatchCount", l.NotSelectedPatchCount) + populate(objectMap, "pendingPatchCount", l.PendingPatchCount) + populateTimeRFC3339(objectMap, "startTime", l.StartTime) + populate(objectMap, "status", l.Status) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type LastPatchInstallationSummary. +func (l *LastPatchInstallationSummary) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "error": + err = unpopulate(val, "Error", &l.Error) + delete(rawMsg, key) + case "excludedPatchCount": + err = unpopulate(val, "ExcludedPatchCount", &l.ExcludedPatchCount) + delete(rawMsg, key) + case "failedPatchCount": + err = unpopulate(val, "FailedPatchCount", &l.FailedPatchCount) + delete(rawMsg, key) + case "installationActivityId": + err = unpopulate(val, "InstallationActivityID", &l.InstallationActivityID) + delete(rawMsg, key) + case "installedPatchCount": + err = unpopulate(val, "InstalledPatchCount", &l.InstalledPatchCount) + delete(rawMsg, key) + case "lastModifiedTime": + err = unpopulateTimeRFC3339(val, "LastModifiedTime", &l.LastModifiedTime) + delete(rawMsg, key) + case "maintenanceWindowExceeded": + err = unpopulate(val, "MaintenanceWindowExceeded", &l.MaintenanceWindowExceeded) + delete(rawMsg, key) + case "notSelectedPatchCount": + err = unpopulate(val, "NotSelectedPatchCount", &l.NotSelectedPatchCount) + delete(rawMsg, key) + case "pendingPatchCount": + err = unpopulate(val, "PendingPatchCount", &l.PendingPatchCount) + delete(rawMsg, key) + case "startTime": + err = unpopulateTimeRFC3339(val, "StartTime", &l.StartTime) + delete(rawMsg, key) + case "status": + err = unpopulate(val, "Status", &l.Status) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type LinuxParameters. +func (l LinuxParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "classificationsToInclude", l.ClassificationsToInclude) + populate(objectMap, "maintenanceRunId", l.MaintenanceRunID) + populate(objectMap, "packageNameMasksToExclude", l.PackageNameMasksToExclude) + populate(objectMap, "packageNameMasksToInclude", l.PackageNameMasksToInclude) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type LoadBalancerConfigurationProperties. +func (l LoadBalancerConfigurationProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "frontendIPConfigurations", l.FrontendIPConfigurations) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type LogAnalyticsInputBase. +func (l LogAnalyticsInputBase) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "blobContainerSasUri", l.BlobContainerSasURI) + populateTimeRFC3339(objectMap, "fromTime", l.FromTime) + populate(objectMap, "groupByClientApplicationId", l.GroupByClientApplicationID) + populate(objectMap, "groupByOperationName", l.GroupByOperationName) + populate(objectMap, "groupByResourceName", l.GroupByResourceName) + populate(objectMap, "groupByThrottlePolicy", l.GroupByThrottlePolicy) + populate(objectMap, "groupByUserAgent", l.GroupByUserAgent) + populateTimeRFC3339(objectMap, "toTime", l.ToTime) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type LogAnalyticsInputBase. +func (l *LogAnalyticsInputBase) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "blobContainerSasUri": + err = unpopulate(val, "BlobContainerSasURI", &l.BlobContainerSasURI) + delete(rawMsg, key) + case "fromTime": + err = unpopulateTimeRFC3339(val, "FromTime", &l.FromTime) + delete(rawMsg, key) + case "groupByClientApplicationId": + err = unpopulate(val, "GroupByClientApplicationID", &l.GroupByClientApplicationID) + delete(rawMsg, key) + case "groupByOperationName": + err = unpopulate(val, "GroupByOperationName", &l.GroupByOperationName) + delete(rawMsg, key) + case "groupByResourceName": + err = unpopulate(val, "GroupByResourceName", &l.GroupByResourceName) + delete(rawMsg, key) + case "groupByThrottlePolicy": + err = unpopulate(val, "GroupByThrottlePolicy", &l.GroupByThrottlePolicy) + delete(rawMsg, key) + case "groupByUserAgent": + err = unpopulate(val, "GroupByUserAgent", &l.GroupByUserAgent) + delete(rawMsg, key) + case "toTime": + err = unpopulateTimeRFC3339(val, "ToTime", &l.ToTime) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type MaintenanceRedeployStatus. +func (m MaintenanceRedeployStatus) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "isCustomerInitiatedMaintenanceAllowed", m.IsCustomerInitiatedMaintenanceAllowed) + populate(objectMap, "lastOperationMessage", m.LastOperationMessage) + populate(objectMap, "lastOperationResultCode", m.LastOperationResultCode) + populateTimeRFC3339(objectMap, "maintenanceWindowEndTime", m.MaintenanceWindowEndTime) + populateTimeRFC3339(objectMap, "maintenanceWindowStartTime", m.MaintenanceWindowStartTime) + populateTimeRFC3339(objectMap, "preMaintenanceWindowEndTime", m.PreMaintenanceWindowEndTime) + populateTimeRFC3339(objectMap, "preMaintenanceWindowStartTime", m.PreMaintenanceWindowStartTime) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type MaintenanceRedeployStatus. +func (m *MaintenanceRedeployStatus) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "isCustomerInitiatedMaintenanceAllowed": + err = unpopulate(val, "IsCustomerInitiatedMaintenanceAllowed", &m.IsCustomerInitiatedMaintenanceAllowed) + delete(rawMsg, key) + case "lastOperationMessage": + err = unpopulate(val, "LastOperationMessage", &m.LastOperationMessage) + delete(rawMsg, key) + case "lastOperationResultCode": + err = unpopulate(val, "LastOperationResultCode", &m.LastOperationResultCode) + delete(rawMsg, key) + case "maintenanceWindowEndTime": + err = unpopulateTimeRFC3339(val, "MaintenanceWindowEndTime", &m.MaintenanceWindowEndTime) + delete(rawMsg, key) + case "maintenanceWindowStartTime": + err = unpopulateTimeRFC3339(val, "MaintenanceWindowStartTime", &m.MaintenanceWindowStartTime) + delete(rawMsg, key) + case "preMaintenanceWindowEndTime": + err = unpopulateTimeRFC3339(val, "PreMaintenanceWindowEndTime", &m.PreMaintenanceWindowEndTime) + delete(rawMsg, key) + case "preMaintenanceWindowStartTime": + err = unpopulateTimeRFC3339(val, "PreMaintenanceWindowStartTime", &m.PreMaintenanceWindowStartTime) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type NetworkProfile. +func (n NetworkProfile) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "networkApiVersion", n.NetworkAPIVersion) + populate(objectMap, "networkInterfaceConfigurations", n.NetworkInterfaceConfigurations) + populate(objectMap, "networkInterfaces", n.NetworkInterfaces) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type OSProfile. +func (o OSProfile) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "adminPassword", o.AdminPassword) + populate(objectMap, "adminUsername", o.AdminUsername) + populate(objectMap, "allowExtensionOperations", o.AllowExtensionOperations) + populate(objectMap, "computerName", o.ComputerName) + populate(objectMap, "customData", o.CustomData) + populate(objectMap, "linuxConfiguration", o.LinuxConfiguration) + populate(objectMap, "requireGuestProvisionSignal", o.RequireGuestProvisionSignal) + populate(objectMap, "secrets", o.Secrets) + populate(objectMap, "windowsConfiguration", o.WindowsConfiguration) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type ProximityPlacementGroup. +func (p ProximityPlacementGroup) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "id", p.ID) + populate(objectMap, "location", p.Location) + populate(objectMap, "name", p.Name) + populate(objectMap, "properties", p.Properties) + populate(objectMap, "tags", p.Tags) + populate(objectMap, "type", p.Type) + populate(objectMap, "zones", p.Zones) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type ProximityPlacementGroupProperties. +func (p ProximityPlacementGroupProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "availabilitySets", p.AvailabilitySets) + populate(objectMap, "colocationStatus", p.ColocationStatus) + populate(objectMap, "intent", p.Intent) + populate(objectMap, "proximityPlacementGroupType", p.ProximityPlacementGroupType) + populate(objectMap, "virtualMachineScaleSets", p.VirtualMachineScaleSets) + populate(objectMap, "virtualMachines", p.VirtualMachines) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type ProximityPlacementGroupPropertiesIntent. +func (p ProximityPlacementGroupPropertiesIntent) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "vmSizes", p.VMSizes) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type ProximityPlacementGroupUpdate. +func (p ProximityPlacementGroupUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "tags", p.Tags) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type ReplicationStatus. +func (r ReplicationStatus) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "aggregatedState", r.AggregatedState) + populate(objectMap, "summary", r.Summary) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type RequestRateByIntervalInput. +func (r RequestRateByIntervalInput) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "blobContainerSasUri", r.BlobContainerSasURI) + populateTimeRFC3339(objectMap, "fromTime", r.FromTime) + populate(objectMap, "groupByClientApplicationId", r.GroupByClientApplicationID) + populate(objectMap, "groupByOperationName", r.GroupByOperationName) + populate(objectMap, "groupByResourceName", r.GroupByResourceName) + populate(objectMap, "groupByThrottlePolicy", r.GroupByThrottlePolicy) + populate(objectMap, "groupByUserAgent", r.GroupByUserAgent) + populate(objectMap, "intervalLength", r.IntervalLength) + populateTimeRFC3339(objectMap, "toTime", r.ToTime) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type RequestRateByIntervalInput. +func (r *RequestRateByIntervalInput) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "blobContainerSasUri": + err = unpopulate(val, "BlobContainerSasURI", &r.BlobContainerSasURI) + delete(rawMsg, key) + case "fromTime": + err = unpopulateTimeRFC3339(val, "FromTime", &r.FromTime) + delete(rawMsg, key) + case "groupByClientApplicationId": + err = unpopulate(val, "GroupByClientApplicationID", &r.GroupByClientApplicationID) + delete(rawMsg, key) + case "groupByOperationName": + err = unpopulate(val, "GroupByOperationName", &r.GroupByOperationName) + delete(rawMsg, key) + case "groupByResourceName": + err = unpopulate(val, "GroupByResourceName", &r.GroupByResourceName) + delete(rawMsg, key) + case "groupByThrottlePolicy": + err = unpopulate(val, "GroupByThrottlePolicy", &r.GroupByThrottlePolicy) + delete(rawMsg, key) + case "groupByUserAgent": + err = unpopulate(val, "GroupByUserAgent", &r.GroupByUserAgent) + delete(rawMsg, key) + case "intervalLength": + err = unpopulate(val, "IntervalLength", &r.IntervalLength) + delete(rawMsg, key) + case "toTime": + err = unpopulateTimeRFC3339(val, "ToTime", &r.ToTime) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type Resource. +func (r Resource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "id", r.ID) + populate(objectMap, "location", r.Location) + populate(objectMap, "name", r.Name) + populate(objectMap, "tags", r.Tags) + populate(objectMap, "type", r.Type) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ResourceInstanceViewStatus. +func (r *ResourceInstanceViewStatus) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "code": + err = unpopulate(val, "Code", &r.Code) + delete(rawMsg, key) + case "displayStatus": + err = unpopulate(val, "DisplayStatus", &r.DisplayStatus) + delete(rawMsg, key) + case "level": + err = unpopulate(val, "Level", &r.Level) + delete(rawMsg, key) + case "message": + err = unpopulate(val, "Message", &r.Message) + delete(rawMsg, key) + case "time": + err = unpopulateTimeRFC3339(val, "Time", &r.Time) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ResourceWithOptionalLocation. +func (r ResourceWithOptionalLocation) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "id", r.ID) + populate(objectMap, "location", r.Location) + populate(objectMap, "name", r.Name) + populate(objectMap, "tags", r.Tags) + populate(objectMap, "type", r.Type) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type RestorePointCollection. +func (r RestorePointCollection) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "id", r.ID) + populate(objectMap, "location", r.Location) + populate(objectMap, "name", r.Name) + populate(objectMap, "properties", r.Properties) + populate(objectMap, "tags", r.Tags) + populate(objectMap, "type", r.Type) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type RestorePointCollectionProperties. +func (r RestorePointCollectionProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "provisioningState", r.ProvisioningState) + populate(objectMap, "restorePointCollectionId", r.RestorePointCollectionID) + populate(objectMap, "restorePoints", r.RestorePoints) + populate(objectMap, "source", r.Source) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type RestorePointCollectionUpdate. +func (r RestorePointCollectionUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "properties", r.Properties) + populate(objectMap, "tags", r.Tags) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type RestorePointInstanceView. +func (r RestorePointInstanceView) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "diskRestorePoints", r.DiskRestorePoints) + populate(objectMap, "statuses", r.Statuses) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type RestorePointProperties. +func (r RestorePointProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "consistencyMode", r.ConsistencyMode) + populate(objectMap, "excludeDisks", r.ExcludeDisks) + populate(objectMap, "instanceView", r.InstanceView) + populate(objectMap, "provisioningState", r.ProvisioningState) + populate(objectMap, "sourceMetadata", r.SourceMetadata) + populate(objectMap, "sourceRestorePoint", r.SourceRestorePoint) + populateTimeRFC3339(objectMap, "timeCreated", r.TimeCreated) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type RestorePointProperties. +func (r *RestorePointProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "consistencyMode": + err = unpopulate(val, "ConsistencyMode", &r.ConsistencyMode) + delete(rawMsg, key) + case "excludeDisks": + err = unpopulate(val, "ExcludeDisks", &r.ExcludeDisks) + delete(rawMsg, key) + case "instanceView": + err = unpopulate(val, "InstanceView", &r.InstanceView) + delete(rawMsg, key) + case "provisioningState": + err = unpopulate(val, "ProvisioningState", &r.ProvisioningState) + delete(rawMsg, key) + case "sourceMetadata": + err = unpopulate(val, "SourceMetadata", &r.SourceMetadata) + delete(rawMsg, key) + case "sourceRestorePoint": + err = unpopulate(val, "SourceRestorePoint", &r.SourceRestorePoint) + delete(rawMsg, key) + case "timeCreated": + err = unpopulateTimeRFC3339(val, "TimeCreated", &r.TimeCreated) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type RestorePointSourceVMStorageProfile. +func (r RestorePointSourceVMStorageProfile) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "dataDisks", r.DataDisks) + populate(objectMap, "osDisk", r.OSDisk) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type RoleInstances. +func (r RoleInstances) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "roleInstances", r.RoleInstances) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type RollingUpgradeRunningStatus. +func (r RollingUpgradeRunningStatus) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "code", r.Code) + populate(objectMap, "lastAction", r.LastAction) + populateTimeRFC3339(objectMap, "lastActionTime", r.LastActionTime) + populateTimeRFC3339(objectMap, "startTime", r.StartTime) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type RollingUpgradeRunningStatus. +func (r *RollingUpgradeRunningStatus) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "code": + err = unpopulate(val, "Code", &r.Code) + delete(rawMsg, key) + case "lastAction": + err = unpopulate(val, "LastAction", &r.LastAction) + delete(rawMsg, key) + case "lastActionTime": + err = unpopulateTimeRFC3339(val, "LastActionTime", &r.LastActionTime) + delete(rawMsg, key) + case "startTime": + err = unpopulateTimeRFC3339(val, "StartTime", &r.StartTime) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type RollingUpgradeStatusInfo. +func (r RollingUpgradeStatusInfo) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "id", r.ID) + populate(objectMap, "location", r.Location) + populate(objectMap, "name", r.Name) + populate(objectMap, "properties", r.Properties) + populate(objectMap, "tags", r.Tags) + populate(objectMap, "type", r.Type) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type RunCommandInput. +func (r RunCommandInput) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "commandId", r.CommandID) + populate(objectMap, "parameters", r.Parameters) + populate(objectMap, "script", r.Script) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type SSHConfiguration. +func (s SSHConfiguration) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "publicKeys", s.PublicKeys) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type SSHPublicKeyResource. +func (s SSHPublicKeyResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "id", s.ID) + populate(objectMap, "location", s.Location) + populate(objectMap, "name", s.Name) + populate(objectMap, "properties", s.Properties) + populate(objectMap, "tags", s.Tags) + populate(objectMap, "type", s.Type) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type SSHPublicKeyUpdateResource. +func (s SSHPublicKeyUpdateResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "properties", s.Properties) + populate(objectMap, "tags", s.Tags) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type ScaleInPolicy. +func (s ScaleInPolicy) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "forceDeletion", s.ForceDeletion) + populate(objectMap, "rules", s.Rules) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type SharedGalleryImageProperties. +func (s *SharedGalleryImageProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "disallowed": + err = unpopulate(val, "Disallowed", &s.Disallowed) + delete(rawMsg, key) + case "endOfLifeDate": + err = unpopulateTimeRFC3339(val, "EndOfLifeDate", &s.EndOfLifeDate) + delete(rawMsg, key) + case "features": + err = unpopulate(val, "Features", &s.Features) + delete(rawMsg, key) + case "hyperVGeneration": + err = unpopulate(val, "HyperVGeneration", &s.HyperVGeneration) + delete(rawMsg, key) + case "identifier": + err = unpopulate(val, "Identifier", &s.Identifier) + delete(rawMsg, key) + case "osState": + err = unpopulate(val, "OSState", &s.OSState) + delete(rawMsg, key) + case "osType": + err = unpopulate(val, "OSType", &s.OSType) + delete(rawMsg, key) + case "purchasePlan": + err = unpopulate(val, "PurchasePlan", &s.PurchasePlan) + delete(rawMsg, key) + case "recommended": + err = unpopulate(val, "Recommended", &s.Recommended) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type SharedGalleryImageVersionProperties. +func (s *SharedGalleryImageVersionProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "endOfLifeDate": + err = unpopulateTimeRFC3339(val, "EndOfLifeDate", &s.EndOfLifeDate) + delete(rawMsg, key) + case "publishedDate": + err = unpopulateTimeRFC3339(val, "PublishedDate", &s.PublishedDate) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type SharingProfile. +func (s SharingProfile) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "communityGalleryInfo", &s.CommunityGalleryInfo) + populate(objectMap, "groups", s.Groups) + populate(objectMap, "permissions", s.Permissions) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type SharingProfileGroup. +func (s SharingProfileGroup) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "ids", s.IDs) + populate(objectMap, "type", s.Type) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type SharingStatus. +func (s SharingStatus) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "aggregatedState", s.AggregatedState) + populate(objectMap, "summary", s.Summary) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type SharingUpdate. +func (s SharingUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "groups", s.Groups) + populate(objectMap, "operationType", s.OperationType) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type Snapshot. +func (s Snapshot) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "extendedLocation", s.ExtendedLocation) + populate(objectMap, "id", s.ID) + populate(objectMap, "location", s.Location) + populate(objectMap, "managedBy", s.ManagedBy) + populate(objectMap, "name", s.Name) + populate(objectMap, "properties", s.Properties) + populate(objectMap, "sku", s.SKU) + populate(objectMap, "tags", s.Tags) + populate(objectMap, "type", s.Type) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type SnapshotProperties. +func (s SnapshotProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "completionPercent", s.CompletionPercent) + populate(objectMap, "creationData", s.CreationData) + populate(objectMap, "dataAccessAuthMode", s.DataAccessAuthMode) + populate(objectMap, "diskAccessId", s.DiskAccessID) + populate(objectMap, "diskSizeBytes", s.DiskSizeBytes) + populate(objectMap, "diskSizeGB", s.DiskSizeGB) + populate(objectMap, "diskState", s.DiskState) + populate(objectMap, "encryption", s.Encryption) + populate(objectMap, "encryptionSettingsCollection", s.EncryptionSettingsCollection) + populate(objectMap, "hyperVGeneration", s.HyperVGeneration) + populate(objectMap, "incremental", s.Incremental) + populate(objectMap, "networkAccessPolicy", s.NetworkAccessPolicy) + populate(objectMap, "osType", s.OSType) + populate(objectMap, "provisioningState", s.ProvisioningState) + populate(objectMap, "publicNetworkAccess", s.PublicNetworkAccess) + populate(objectMap, "purchasePlan", s.PurchasePlan) + populate(objectMap, "securityProfile", s.SecurityProfile) + populate(objectMap, "supportedCapabilities", s.SupportedCapabilities) + populate(objectMap, "supportsHibernation", s.SupportsHibernation) + populateTimeRFC3339(objectMap, "timeCreated", s.TimeCreated) + populate(objectMap, "uniqueId", s.UniqueID) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type SnapshotProperties. +func (s *SnapshotProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "completionPercent": + err = unpopulate(val, "CompletionPercent", &s.CompletionPercent) + delete(rawMsg, key) + case "creationData": + err = unpopulate(val, "CreationData", &s.CreationData) + delete(rawMsg, key) + case "dataAccessAuthMode": + err = unpopulate(val, "DataAccessAuthMode", &s.DataAccessAuthMode) + delete(rawMsg, key) + case "diskAccessId": + err = unpopulate(val, "DiskAccessID", &s.DiskAccessID) + delete(rawMsg, key) + case "diskSizeBytes": + err = unpopulate(val, "DiskSizeBytes", &s.DiskSizeBytes) + delete(rawMsg, key) + case "diskSizeGB": + err = unpopulate(val, "DiskSizeGB", &s.DiskSizeGB) + delete(rawMsg, key) + case "diskState": + err = unpopulate(val, "DiskState", &s.DiskState) + delete(rawMsg, key) + case "encryption": + err = unpopulate(val, "Encryption", &s.Encryption) + delete(rawMsg, key) + case "encryptionSettingsCollection": + err = unpopulate(val, "EncryptionSettingsCollection", &s.EncryptionSettingsCollection) + delete(rawMsg, key) + case "hyperVGeneration": + err = unpopulate(val, "HyperVGeneration", &s.HyperVGeneration) + delete(rawMsg, key) + case "incremental": + err = unpopulate(val, "Incremental", &s.Incremental) + delete(rawMsg, key) + case "networkAccessPolicy": + err = unpopulate(val, "NetworkAccessPolicy", &s.NetworkAccessPolicy) + delete(rawMsg, key) + case "osType": + err = unpopulate(val, "OSType", &s.OSType) + delete(rawMsg, key) + case "provisioningState": + err = unpopulate(val, "ProvisioningState", &s.ProvisioningState) + delete(rawMsg, key) + case "publicNetworkAccess": + err = unpopulate(val, "PublicNetworkAccess", &s.PublicNetworkAccess) + delete(rawMsg, key) + case "purchasePlan": + err = unpopulate(val, "PurchasePlan", &s.PurchasePlan) + delete(rawMsg, key) + case "securityProfile": + err = unpopulate(val, "SecurityProfile", &s.SecurityProfile) + delete(rawMsg, key) + case "supportedCapabilities": + err = unpopulate(val, "SupportedCapabilities", &s.SupportedCapabilities) + delete(rawMsg, key) + case "supportsHibernation": + err = unpopulate(val, "SupportsHibernation", &s.SupportsHibernation) + delete(rawMsg, key) + case "timeCreated": + err = unpopulateTimeRFC3339(val, "TimeCreated", &s.TimeCreated) + delete(rawMsg, key) + case "uniqueId": + err = unpopulate(val, "UniqueID", &s.UniqueID) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type SnapshotUpdate. +func (s SnapshotUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "properties", s.Properties) + populate(objectMap, "sku", s.SKU) + populate(objectMap, "tags", s.Tags) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type StorageProfile. +func (s StorageProfile) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "dataDisks", s.DataDisks) + populate(objectMap, "imageReference", s.ImageReference) + populate(objectMap, "osDisk", s.OSDisk) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type ThrottledRequestsInput. +func (t ThrottledRequestsInput) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "blobContainerSasUri", t.BlobContainerSasURI) + populateTimeRFC3339(objectMap, "fromTime", t.FromTime) + populate(objectMap, "groupByClientApplicationId", t.GroupByClientApplicationID) + populate(objectMap, "groupByOperationName", t.GroupByOperationName) + populate(objectMap, "groupByResourceName", t.GroupByResourceName) + populate(objectMap, "groupByThrottlePolicy", t.GroupByThrottlePolicy) + populate(objectMap, "groupByUserAgent", t.GroupByUserAgent) + populateTimeRFC3339(objectMap, "toTime", t.ToTime) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ThrottledRequestsInput. +func (t *ThrottledRequestsInput) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", t, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "blobContainerSasUri": + err = unpopulate(val, "BlobContainerSasURI", &t.BlobContainerSasURI) + delete(rawMsg, key) + case "fromTime": + err = unpopulateTimeRFC3339(val, "FromTime", &t.FromTime) + delete(rawMsg, key) + case "groupByClientApplicationId": + err = unpopulate(val, "GroupByClientApplicationID", &t.GroupByClientApplicationID) + delete(rawMsg, key) + case "groupByOperationName": + err = unpopulate(val, "GroupByOperationName", &t.GroupByOperationName) + delete(rawMsg, key) + case "groupByResourceName": + err = unpopulate(val, "GroupByResourceName", &t.GroupByResourceName) + delete(rawMsg, key) + case "groupByThrottlePolicy": + err = unpopulate(val, "GroupByThrottlePolicy", &t.GroupByThrottlePolicy) + delete(rawMsg, key) + case "groupByUserAgent": + err = unpopulate(val, "GroupByUserAgent", &t.GroupByUserAgent) + delete(rawMsg, key) + case "toTime": + err = unpopulateTimeRFC3339(val, "ToTime", &t.ToTime) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", t, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type UpdateResource. +func (u UpdateResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "tags", u.Tags) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type UpdateResourceDefinition. +func (u UpdateResourceDefinition) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "id", u.ID) + populate(objectMap, "name", u.Name) + populate(objectMap, "tags", u.Tags) + populate(objectMap, "type", u.Type) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type UpgradeOperationHistoryStatus. +func (u *UpgradeOperationHistoryStatus) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", u, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "code": + err = unpopulate(val, "Code", &u.Code) + delete(rawMsg, key) + case "endTime": + err = unpopulateTimeRFC3339(val, "EndTime", &u.EndTime) + delete(rawMsg, key) + case "startTime": + err = unpopulateTimeRFC3339(val, "StartTime", &u.StartTime) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", u, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type VaultSecretGroup. +func (v VaultSecretGroup) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "sourceVault", v.SourceVault) + populate(objectMap, "vaultCertificates", v.VaultCertificates) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachine. +func (v VirtualMachine) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "extendedLocation", v.ExtendedLocation) + populate(objectMap, "id", v.ID) + populate(objectMap, "identity", v.Identity) + populate(objectMap, "location", v.Location) + populate(objectMap, "name", v.Name) + populate(objectMap, "plan", v.Plan) + populate(objectMap, "properties", v.Properties) + populate(objectMap, "resources", v.Resources) + populate(objectMap, "tags", v.Tags) + populate(objectMap, "type", v.Type) + populate(objectMap, "zones", v.Zones) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineAgentInstanceView. +func (v VirtualMachineAgentInstanceView) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "extensionHandlers", v.ExtensionHandlers) + populate(objectMap, "statuses", v.Statuses) + populate(objectMap, "vmAgentVersion", v.VMAgentVersion) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type VirtualMachineAssessPatchesResult. +func (v *VirtualMachineAssessPatchesResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", v, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "assessmentActivityId": + err = unpopulate(val, "AssessmentActivityID", &v.AssessmentActivityID) + delete(rawMsg, key) + case "availablePatches": + err = unpopulate(val, "AvailablePatches", &v.AvailablePatches) + delete(rawMsg, key) + case "criticalAndSecurityPatchCount": + err = unpopulate(val, "CriticalAndSecurityPatchCount", &v.CriticalAndSecurityPatchCount) + delete(rawMsg, key) + case "error": + err = unpopulate(val, "Error", &v.Error) + delete(rawMsg, key) + case "otherPatchCount": + err = unpopulate(val, "OtherPatchCount", &v.OtherPatchCount) + delete(rawMsg, key) + case "rebootPending": + err = unpopulate(val, "RebootPending", &v.RebootPending) + delete(rawMsg, key) + case "startDateTime": + err = unpopulateTimeRFC3339(val, "StartDateTime", &v.StartDateTime) + delete(rawMsg, key) + case "status": + err = unpopulate(val, "Status", &v.Status) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", v, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineCaptureResult. +func (v VirtualMachineCaptureResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "contentVersion", v.ContentVersion) + populate(objectMap, "id", v.ID) + populate(objectMap, "parameters", &v.Parameters) + populate(objectMap, "resources", v.Resources) + populate(objectMap, "$schema", v.Schema) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineExtension. +func (v VirtualMachineExtension) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "id", v.ID) + populate(objectMap, "location", v.Location) + populate(objectMap, "name", v.Name) + populate(objectMap, "properties", v.Properties) + populate(objectMap, "tags", v.Tags) + populate(objectMap, "type", v.Type) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineExtensionImage. +func (v VirtualMachineExtensionImage) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "id", v.ID) + populate(objectMap, "location", v.Location) + populate(objectMap, "name", v.Name) + populate(objectMap, "properties", v.Properties) + populate(objectMap, "tags", v.Tags) + populate(objectMap, "type", v.Type) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineExtensionInstanceView. +func (v VirtualMachineExtensionInstanceView) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "name", v.Name) + populate(objectMap, "statuses", v.Statuses) + populate(objectMap, "substatuses", v.Substatuses) + populate(objectMap, "type", v.Type) + populate(objectMap, "typeHandlerVersion", v.TypeHandlerVersion) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineExtensionUpdate. +func (v VirtualMachineExtensionUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "properties", v.Properties) + populate(objectMap, "tags", v.Tags) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineIdentity. +func (v VirtualMachineIdentity) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "principalId", v.PrincipalID) + populate(objectMap, "tenantId", v.TenantID) + populate(objectMap, "type", v.Type) + populate(objectMap, "userAssignedIdentities", v.UserAssignedIdentities) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineImage. +func (v VirtualMachineImage) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "extendedLocation", v.ExtendedLocation) + populate(objectMap, "id", v.ID) + populate(objectMap, "location", v.Location) + populate(objectMap, "name", v.Name) + populate(objectMap, "properties", v.Properties) + populate(objectMap, "tags", v.Tags) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineImageProperties. +func (v VirtualMachineImageProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "architecture", v.Architecture) + populate(objectMap, "automaticOSUpgradeProperties", v.AutomaticOSUpgradeProperties) + populate(objectMap, "dataDiskImages", v.DataDiskImages) + populate(objectMap, "disallowed", v.Disallowed) + populate(objectMap, "features", v.Features) + populate(objectMap, "hyperVGeneration", v.HyperVGeneration) + populate(objectMap, "osDiskImage", v.OSDiskImage) + populate(objectMap, "plan", v.Plan) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineImageResource. +func (v VirtualMachineImageResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "extendedLocation", v.ExtendedLocation) + populate(objectMap, "id", v.ID) + populate(objectMap, "location", v.Location) + populate(objectMap, "name", v.Name) + populate(objectMap, "tags", v.Tags) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type VirtualMachineInstallPatchesResult. +func (v *VirtualMachineInstallPatchesResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", v, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "error": + err = unpopulate(val, "Error", &v.Error) + delete(rawMsg, key) + case "excludedPatchCount": + err = unpopulate(val, "ExcludedPatchCount", &v.ExcludedPatchCount) + delete(rawMsg, key) + case "failedPatchCount": + err = unpopulate(val, "FailedPatchCount", &v.FailedPatchCount) + delete(rawMsg, key) + case "installationActivityId": + err = unpopulate(val, "InstallationActivityID", &v.InstallationActivityID) + delete(rawMsg, key) + case "installedPatchCount": + err = unpopulate(val, "InstalledPatchCount", &v.InstalledPatchCount) + delete(rawMsg, key) + case "maintenanceWindowExceeded": + err = unpopulate(val, "MaintenanceWindowExceeded", &v.MaintenanceWindowExceeded) + delete(rawMsg, key) + case "notSelectedPatchCount": + err = unpopulate(val, "NotSelectedPatchCount", &v.NotSelectedPatchCount) + delete(rawMsg, key) + case "patches": + err = unpopulate(val, "Patches", &v.Patches) + delete(rawMsg, key) + case "pendingPatchCount": + err = unpopulate(val, "PendingPatchCount", &v.PendingPatchCount) + delete(rawMsg, key) + case "rebootStatus": + err = unpopulate(val, "RebootStatus", &v.RebootStatus) + delete(rawMsg, key) + case "startDateTime": + err = unpopulateTimeRFC3339(val, "StartDateTime", &v.StartDateTime) + delete(rawMsg, key) + case "status": + err = unpopulate(val, "Status", &v.Status) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", v, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineInstanceView. +func (v VirtualMachineInstanceView) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "assignedHost", v.AssignedHost) + populate(objectMap, "bootDiagnostics", v.BootDiagnostics) + populate(objectMap, "computerName", v.ComputerName) + populate(objectMap, "disks", v.Disks) + populate(objectMap, "extensions", v.Extensions) + populate(objectMap, "hyperVGeneration", v.HyperVGeneration) + populate(objectMap, "maintenanceRedeployStatus", v.MaintenanceRedeployStatus) + populate(objectMap, "osName", v.OSName) + populate(objectMap, "osVersion", v.OSVersion) + populate(objectMap, "patchStatus", v.PatchStatus) + populate(objectMap, "platformFaultDomain", v.PlatformFaultDomain) + populate(objectMap, "platformUpdateDomain", v.PlatformUpdateDomain) + populate(objectMap, "rdpThumbPrint", v.RdpThumbPrint) + populate(objectMap, "statuses", v.Statuses) + populate(objectMap, "vmAgent", v.VMAgent) + populate(objectMap, "vmHealth", v.VMHealth) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineNetworkInterfaceConfigurationProperties. +func (v VirtualMachineNetworkInterfaceConfigurationProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "dnsSettings", v.DNSSettings) + populate(objectMap, "deleteOption", v.DeleteOption) + populate(objectMap, "dscpConfiguration", v.DscpConfiguration) + populate(objectMap, "enableAcceleratedNetworking", v.EnableAcceleratedNetworking) + populate(objectMap, "enableFpga", v.EnableFpga) + populate(objectMap, "enableIPForwarding", v.EnableIPForwarding) + populate(objectMap, "ipConfigurations", v.IPConfigurations) + populate(objectMap, "networkSecurityGroup", v.NetworkSecurityGroup) + populate(objectMap, "primary", v.Primary) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineNetworkInterfaceDNSSettingsConfiguration. +func (v VirtualMachineNetworkInterfaceDNSSettingsConfiguration) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "dnsServers", v.DNSServers) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineNetworkInterfaceIPConfigurationProperties. +func (v VirtualMachineNetworkInterfaceIPConfigurationProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "applicationGatewayBackendAddressPools", v.ApplicationGatewayBackendAddressPools) + populate(objectMap, "applicationSecurityGroups", v.ApplicationSecurityGroups) + populate(objectMap, "loadBalancerBackendAddressPools", v.LoadBalancerBackendAddressPools) + populate(objectMap, "primary", v.Primary) + populate(objectMap, "privateIPAddressVersion", v.PrivateIPAddressVersion) + populate(objectMap, "publicIPAddressConfiguration", v.PublicIPAddressConfiguration) + populate(objectMap, "subnet", v.Subnet) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachinePatchStatus. +func (v VirtualMachinePatchStatus) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "availablePatchSummary", v.AvailablePatchSummary) + populate(objectMap, "configurationStatuses", v.ConfigurationStatuses) + populate(objectMap, "lastPatchInstallationSummary", v.LastPatchInstallationSummary) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineProperties. +func (v VirtualMachineProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "additionalCapabilities", v.AdditionalCapabilities) + populate(objectMap, "applicationProfile", v.ApplicationProfile) + populate(objectMap, "availabilitySet", v.AvailabilitySet) + populate(objectMap, "billingProfile", v.BillingProfile) + populate(objectMap, "capacityReservation", v.CapacityReservation) + populate(objectMap, "diagnosticsProfile", v.DiagnosticsProfile) + populate(objectMap, "evictionPolicy", v.EvictionPolicy) + populate(objectMap, "extensionsTimeBudget", v.ExtensionsTimeBudget) + populate(objectMap, "hardwareProfile", v.HardwareProfile) + populate(objectMap, "host", v.Host) + populate(objectMap, "hostGroup", v.HostGroup) + populate(objectMap, "instanceView", v.InstanceView) + populate(objectMap, "licenseType", v.LicenseType) + populate(objectMap, "networkProfile", v.NetworkProfile) + populate(objectMap, "osProfile", v.OSProfile) + populate(objectMap, "platformFaultDomain", v.PlatformFaultDomain) + populate(objectMap, "priority", v.Priority) + populate(objectMap, "provisioningState", v.ProvisioningState) + populate(objectMap, "proximityPlacementGroup", v.ProximityPlacementGroup) + populate(objectMap, "scheduledEventsProfile", v.ScheduledEventsProfile) + populate(objectMap, "securityProfile", v.SecurityProfile) + populate(objectMap, "storageProfile", v.StorageProfile) + populateTimeRFC3339(objectMap, "timeCreated", v.TimeCreated) + populate(objectMap, "userData", v.UserData) + populate(objectMap, "vmId", v.VMID) + populate(objectMap, "virtualMachineScaleSet", v.VirtualMachineScaleSet) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type VirtualMachineProperties. +func (v *VirtualMachineProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", v, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "additionalCapabilities": + err = unpopulate(val, "AdditionalCapabilities", &v.AdditionalCapabilities) + delete(rawMsg, key) + case "applicationProfile": + err = unpopulate(val, "ApplicationProfile", &v.ApplicationProfile) + delete(rawMsg, key) + case "availabilitySet": + err = unpopulate(val, "AvailabilitySet", &v.AvailabilitySet) + delete(rawMsg, key) + case "billingProfile": + err = unpopulate(val, "BillingProfile", &v.BillingProfile) + delete(rawMsg, key) + case "capacityReservation": + err = unpopulate(val, "CapacityReservation", &v.CapacityReservation) + delete(rawMsg, key) + case "diagnosticsProfile": + err = unpopulate(val, "DiagnosticsProfile", &v.DiagnosticsProfile) + delete(rawMsg, key) + case "evictionPolicy": + err = unpopulate(val, "EvictionPolicy", &v.EvictionPolicy) + delete(rawMsg, key) + case "extensionsTimeBudget": + err = unpopulate(val, "ExtensionsTimeBudget", &v.ExtensionsTimeBudget) + delete(rawMsg, key) + case "hardwareProfile": + err = unpopulate(val, "HardwareProfile", &v.HardwareProfile) + delete(rawMsg, key) + case "host": + err = unpopulate(val, "Host", &v.Host) + delete(rawMsg, key) + case "hostGroup": + err = unpopulate(val, "HostGroup", &v.HostGroup) + delete(rawMsg, key) + case "instanceView": + err = unpopulate(val, "InstanceView", &v.InstanceView) + delete(rawMsg, key) + case "licenseType": + err = unpopulate(val, "LicenseType", &v.LicenseType) + delete(rawMsg, key) + case "networkProfile": + err = unpopulate(val, "NetworkProfile", &v.NetworkProfile) + delete(rawMsg, key) + case "osProfile": + err = unpopulate(val, "OSProfile", &v.OSProfile) + delete(rawMsg, key) + case "platformFaultDomain": + err = unpopulate(val, "PlatformFaultDomain", &v.PlatformFaultDomain) + delete(rawMsg, key) + case "priority": + err = unpopulate(val, "Priority", &v.Priority) + delete(rawMsg, key) + case "provisioningState": + err = unpopulate(val, "ProvisioningState", &v.ProvisioningState) + delete(rawMsg, key) + case "proximityPlacementGroup": + err = unpopulate(val, "ProximityPlacementGroup", &v.ProximityPlacementGroup) + delete(rawMsg, key) + case "scheduledEventsProfile": + err = unpopulate(val, "ScheduledEventsProfile", &v.ScheduledEventsProfile) + delete(rawMsg, key) + case "securityProfile": + err = unpopulate(val, "SecurityProfile", &v.SecurityProfile) + delete(rawMsg, key) + case "storageProfile": + err = unpopulate(val, "StorageProfile", &v.StorageProfile) + delete(rawMsg, key) + case "timeCreated": + err = unpopulateTimeRFC3339(val, "TimeCreated", &v.TimeCreated) + delete(rawMsg, key) + case "userData": + err = unpopulate(val, "UserData", &v.UserData) + delete(rawMsg, key) + case "vmId": + err = unpopulate(val, "VMID", &v.VMID) + delete(rawMsg, key) + case "virtualMachineScaleSet": + err = unpopulate(val, "VirtualMachineScaleSet", &v.VirtualMachineScaleSet) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", v, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachinePublicIPAddressConfigurationProperties. +func (v VirtualMachinePublicIPAddressConfigurationProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "dnsSettings", v.DNSSettings) + populate(objectMap, "deleteOption", v.DeleteOption) + populate(objectMap, "ipTags", v.IPTags) + populate(objectMap, "idleTimeoutInMinutes", v.IdleTimeoutInMinutes) + populate(objectMap, "publicIPAddressVersion", v.PublicIPAddressVersion) + populate(objectMap, "publicIPAllocationMethod", v.PublicIPAllocationMethod) + populate(objectMap, "publicIPPrefix", v.PublicIPPrefix) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineRunCommand. +func (v VirtualMachineRunCommand) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "id", v.ID) + populate(objectMap, "location", v.Location) + populate(objectMap, "name", v.Name) + populate(objectMap, "properties", v.Properties) + populate(objectMap, "tags", v.Tags) + populate(objectMap, "type", v.Type) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineRunCommandInstanceView. +func (v VirtualMachineRunCommandInstanceView) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populateTimeRFC3339(objectMap, "endTime", v.EndTime) + populate(objectMap, "error", v.Error) + populate(objectMap, "executionMessage", v.ExecutionMessage) + populate(objectMap, "executionState", v.ExecutionState) + populate(objectMap, "exitCode", v.ExitCode) + populate(objectMap, "output", v.Output) + populateTimeRFC3339(objectMap, "startTime", v.StartTime) + populate(objectMap, "statuses", v.Statuses) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type VirtualMachineRunCommandInstanceView. +func (v *VirtualMachineRunCommandInstanceView) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", v, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "endTime": + err = unpopulateTimeRFC3339(val, "EndTime", &v.EndTime) + delete(rawMsg, key) + case "error": + err = unpopulate(val, "Error", &v.Error) + delete(rawMsg, key) + case "executionMessage": + err = unpopulate(val, "ExecutionMessage", &v.ExecutionMessage) + delete(rawMsg, key) + case "executionState": + err = unpopulate(val, "ExecutionState", &v.ExecutionState) + delete(rawMsg, key) + case "exitCode": + err = unpopulate(val, "ExitCode", &v.ExitCode) + delete(rawMsg, key) + case "output": + err = unpopulate(val, "Output", &v.Output) + delete(rawMsg, key) + case "startTime": + err = unpopulateTimeRFC3339(val, "StartTime", &v.StartTime) + delete(rawMsg, key) + case "statuses": + err = unpopulate(val, "Statuses", &v.Statuses) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", v, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineRunCommandProperties. +func (v VirtualMachineRunCommandProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "asyncExecution", v.AsyncExecution) + populate(objectMap, "errorBlobUri", v.ErrorBlobURI) + populate(objectMap, "instanceView", v.InstanceView) + populate(objectMap, "outputBlobUri", v.OutputBlobURI) + populate(objectMap, "parameters", v.Parameters) + populate(objectMap, "protectedParameters", v.ProtectedParameters) + populate(objectMap, "provisioningState", v.ProvisioningState) + populate(objectMap, "runAsPassword", v.RunAsPassword) + populate(objectMap, "runAsUser", v.RunAsUser) + populate(objectMap, "source", v.Source) + populate(objectMap, "timeoutInSeconds", v.TimeoutInSeconds) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineRunCommandUpdate. +func (v VirtualMachineRunCommandUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "properties", v.Properties) + populate(objectMap, "tags", v.Tags) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineScaleSet. +func (v VirtualMachineScaleSet) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "extendedLocation", v.ExtendedLocation) + populate(objectMap, "id", v.ID) + populate(objectMap, "identity", v.Identity) + populate(objectMap, "location", v.Location) + populate(objectMap, "name", v.Name) + populate(objectMap, "plan", v.Plan) + populate(objectMap, "properties", v.Properties) + populate(objectMap, "sku", v.SKU) + populate(objectMap, "tags", v.Tags) + populate(objectMap, "type", v.Type) + populate(objectMap, "zones", v.Zones) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineScaleSetExtensionProfile. +func (v VirtualMachineScaleSetExtensionProfile) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "extensions", v.Extensions) + populate(objectMap, "extensionsTimeBudget", v.ExtensionsTimeBudget) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineScaleSetExtensionProperties. +func (v VirtualMachineScaleSetExtensionProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "autoUpgradeMinorVersion", v.AutoUpgradeMinorVersion) + populate(objectMap, "enableAutomaticUpgrade", v.EnableAutomaticUpgrade) + populate(objectMap, "forceUpdateTag", v.ForceUpdateTag) + populate(objectMap, "protectedSettings", &v.ProtectedSettings) + populate(objectMap, "protectedSettingsFromKeyVault", &v.ProtectedSettingsFromKeyVault) + populate(objectMap, "provisionAfterExtensions", v.ProvisionAfterExtensions) + populate(objectMap, "provisioningState", v.ProvisioningState) + populate(objectMap, "publisher", v.Publisher) + populate(objectMap, "settings", &v.Settings) + populate(objectMap, "suppressFailures", v.SuppressFailures) + populate(objectMap, "type", v.Type) + populate(objectMap, "typeHandlerVersion", v.TypeHandlerVersion) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineScaleSetExtensionUpdate. +func (v VirtualMachineScaleSetExtensionUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "id", v.ID) + populate(objectMap, "name", v.Name) + populate(objectMap, "properties", v.Properties) + populate(objectMap, "type", v.Type) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineScaleSetIPConfigurationProperties. +func (v VirtualMachineScaleSetIPConfigurationProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "applicationGatewayBackendAddressPools", v.ApplicationGatewayBackendAddressPools) + populate(objectMap, "applicationSecurityGroups", v.ApplicationSecurityGroups) + populate(objectMap, "loadBalancerBackendAddressPools", v.LoadBalancerBackendAddressPools) + populate(objectMap, "loadBalancerInboundNatPools", v.LoadBalancerInboundNatPools) + populate(objectMap, "primary", v.Primary) + populate(objectMap, "privateIPAddressVersion", v.PrivateIPAddressVersion) + populate(objectMap, "publicIPAddressConfiguration", v.PublicIPAddressConfiguration) + populate(objectMap, "subnet", v.Subnet) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineScaleSetIdentity. +func (v VirtualMachineScaleSetIdentity) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "principalId", v.PrincipalID) + populate(objectMap, "tenantId", v.TenantID) + populate(objectMap, "type", v.Type) + populate(objectMap, "userAssignedIdentities", v.UserAssignedIdentities) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineScaleSetNetworkConfigurationDNSSettings. +func (v VirtualMachineScaleSetNetworkConfigurationDNSSettings) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "dnsServers", v.DNSServers) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineScaleSetNetworkConfigurationProperties. +func (v VirtualMachineScaleSetNetworkConfigurationProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "dnsSettings", v.DNSSettings) + populate(objectMap, "deleteOption", v.DeleteOption) + populate(objectMap, "enableAcceleratedNetworking", v.EnableAcceleratedNetworking) + populate(objectMap, "enableFpga", v.EnableFpga) + populate(objectMap, "enableIPForwarding", v.EnableIPForwarding) + populate(objectMap, "ipConfigurations", v.IPConfigurations) + populate(objectMap, "networkSecurityGroup", v.NetworkSecurityGroup) + populate(objectMap, "primary", v.Primary) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineScaleSetNetworkProfile. +func (v VirtualMachineScaleSetNetworkProfile) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "healthProbe", v.HealthProbe) + populate(objectMap, "networkApiVersion", v.NetworkAPIVersion) + populate(objectMap, "networkInterfaceConfigurations", v.NetworkInterfaceConfigurations) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineScaleSetOSDisk. +func (v VirtualMachineScaleSetOSDisk) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "caching", v.Caching) + populate(objectMap, "createOption", v.CreateOption) + populate(objectMap, "deleteOption", v.DeleteOption) + populate(objectMap, "diffDiskSettings", v.DiffDiskSettings) + populate(objectMap, "diskSizeGB", v.DiskSizeGB) + populate(objectMap, "image", v.Image) + populate(objectMap, "managedDisk", v.ManagedDisk) + populate(objectMap, "name", v.Name) + populate(objectMap, "osType", v.OSType) + populate(objectMap, "vhdContainers", v.VhdContainers) + populate(objectMap, "writeAcceleratorEnabled", v.WriteAcceleratorEnabled) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineScaleSetOSProfile. +func (v VirtualMachineScaleSetOSProfile) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "adminPassword", v.AdminPassword) + populate(objectMap, "adminUsername", v.AdminUsername) + populate(objectMap, "allowExtensionOperations", v.AllowExtensionOperations) + populate(objectMap, "computerNamePrefix", v.ComputerNamePrefix) + populate(objectMap, "customData", v.CustomData) + populate(objectMap, "linuxConfiguration", v.LinuxConfiguration) + populate(objectMap, "secrets", v.Secrets) + populate(objectMap, "windowsConfiguration", v.WindowsConfiguration) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineScaleSetProperties. +func (v VirtualMachineScaleSetProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "additionalCapabilities", v.AdditionalCapabilities) + populate(objectMap, "automaticRepairsPolicy", v.AutomaticRepairsPolicy) + populate(objectMap, "doNotRunExtensionsOnOverprovisionedVMs", v.DoNotRunExtensionsOnOverprovisionedVMs) + populate(objectMap, "hostGroup", v.HostGroup) + populate(objectMap, "orchestrationMode", v.OrchestrationMode) + populate(objectMap, "overprovision", v.Overprovision) + populate(objectMap, "platformFaultDomainCount", v.PlatformFaultDomainCount) + populate(objectMap, "provisioningState", v.ProvisioningState) + populate(objectMap, "proximityPlacementGroup", v.ProximityPlacementGroup) + populate(objectMap, "scaleInPolicy", v.ScaleInPolicy) + populate(objectMap, "singlePlacementGroup", v.SinglePlacementGroup) + populate(objectMap, "spotRestorePolicy", v.SpotRestorePolicy) + populateTimeRFC3339(objectMap, "timeCreated", v.TimeCreated) + populate(objectMap, "uniqueId", v.UniqueID) + populate(objectMap, "upgradePolicy", v.UpgradePolicy) + populate(objectMap, "virtualMachineProfile", v.VirtualMachineProfile) + populate(objectMap, "zoneBalance", v.ZoneBalance) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type VirtualMachineScaleSetProperties. +func (v *VirtualMachineScaleSetProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", v, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "additionalCapabilities": + err = unpopulate(val, "AdditionalCapabilities", &v.AdditionalCapabilities) + delete(rawMsg, key) + case "automaticRepairsPolicy": + err = unpopulate(val, "AutomaticRepairsPolicy", &v.AutomaticRepairsPolicy) + delete(rawMsg, key) + case "doNotRunExtensionsOnOverprovisionedVMs": + err = unpopulate(val, "DoNotRunExtensionsOnOverprovisionedVMs", &v.DoNotRunExtensionsOnOverprovisionedVMs) + delete(rawMsg, key) + case "hostGroup": + err = unpopulate(val, "HostGroup", &v.HostGroup) + delete(rawMsg, key) + case "orchestrationMode": + err = unpopulate(val, "OrchestrationMode", &v.OrchestrationMode) + delete(rawMsg, key) + case "overprovision": + err = unpopulate(val, "Overprovision", &v.Overprovision) + delete(rawMsg, key) + case "platformFaultDomainCount": + err = unpopulate(val, "PlatformFaultDomainCount", &v.PlatformFaultDomainCount) + delete(rawMsg, key) + case "provisioningState": + err = unpopulate(val, "ProvisioningState", &v.ProvisioningState) + delete(rawMsg, key) + case "proximityPlacementGroup": + err = unpopulate(val, "ProximityPlacementGroup", &v.ProximityPlacementGroup) + delete(rawMsg, key) + case "scaleInPolicy": + err = unpopulate(val, "ScaleInPolicy", &v.ScaleInPolicy) + delete(rawMsg, key) + case "singlePlacementGroup": + err = unpopulate(val, "SinglePlacementGroup", &v.SinglePlacementGroup) + delete(rawMsg, key) + case "spotRestorePolicy": + err = unpopulate(val, "SpotRestorePolicy", &v.SpotRestorePolicy) + delete(rawMsg, key) + case "timeCreated": + err = unpopulateTimeRFC3339(val, "TimeCreated", &v.TimeCreated) + delete(rawMsg, key) + case "uniqueId": + err = unpopulate(val, "UniqueID", &v.UniqueID) + delete(rawMsg, key) + case "upgradePolicy": + err = unpopulate(val, "UpgradePolicy", &v.UpgradePolicy) + delete(rawMsg, key) + case "virtualMachineProfile": + err = unpopulate(val, "VirtualMachineProfile", &v.VirtualMachineProfile) + delete(rawMsg, key) + case "zoneBalance": + err = unpopulate(val, "ZoneBalance", &v.ZoneBalance) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", v, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineScaleSetPublicIPAddressConfigurationProperties. +func (v VirtualMachineScaleSetPublicIPAddressConfigurationProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "dnsSettings", v.DNSSettings) + populate(objectMap, "deleteOption", v.DeleteOption) + populate(objectMap, "ipTags", v.IPTags) + populate(objectMap, "idleTimeoutInMinutes", v.IdleTimeoutInMinutes) + populate(objectMap, "publicIPAddressVersion", v.PublicIPAddressVersion) + populate(objectMap, "publicIPPrefix", v.PublicIPPrefix) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineScaleSetReimageParameters. +func (v VirtualMachineScaleSetReimageParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "instanceIds", v.InstanceIDs) + populate(objectMap, "tempDisk", v.TempDisk) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineScaleSetStorageProfile. +func (v VirtualMachineScaleSetStorageProfile) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "dataDisks", v.DataDisks) + populate(objectMap, "imageReference", v.ImageReference) + populate(objectMap, "osDisk", v.OSDisk) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineScaleSetUpdate. +func (v VirtualMachineScaleSetUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "identity", v.Identity) + populate(objectMap, "plan", v.Plan) + populate(objectMap, "properties", v.Properties) + populate(objectMap, "sku", v.SKU) + populate(objectMap, "tags", v.Tags) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineScaleSetUpdateIPConfigurationProperties. +func (v VirtualMachineScaleSetUpdateIPConfigurationProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "applicationGatewayBackendAddressPools", v.ApplicationGatewayBackendAddressPools) + populate(objectMap, "applicationSecurityGroups", v.ApplicationSecurityGroups) + populate(objectMap, "loadBalancerBackendAddressPools", v.LoadBalancerBackendAddressPools) + populate(objectMap, "loadBalancerInboundNatPools", v.LoadBalancerInboundNatPools) + populate(objectMap, "primary", v.Primary) + populate(objectMap, "privateIPAddressVersion", v.PrivateIPAddressVersion) + populate(objectMap, "publicIPAddressConfiguration", v.PublicIPAddressConfiguration) + populate(objectMap, "subnet", v.Subnet) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineScaleSetUpdateNetworkConfigurationProperties. +func (v VirtualMachineScaleSetUpdateNetworkConfigurationProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "dnsSettings", v.DNSSettings) + populate(objectMap, "deleteOption", v.DeleteOption) + populate(objectMap, "enableAcceleratedNetworking", v.EnableAcceleratedNetworking) + populate(objectMap, "enableFpga", v.EnableFpga) + populate(objectMap, "enableIPForwarding", v.EnableIPForwarding) + populate(objectMap, "ipConfigurations", v.IPConfigurations) + populate(objectMap, "networkSecurityGroup", v.NetworkSecurityGroup) + populate(objectMap, "primary", v.Primary) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineScaleSetUpdateNetworkProfile. +func (v VirtualMachineScaleSetUpdateNetworkProfile) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "healthProbe", v.HealthProbe) + populate(objectMap, "networkApiVersion", v.NetworkAPIVersion) + populate(objectMap, "networkInterfaceConfigurations", v.NetworkInterfaceConfigurations) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineScaleSetUpdateOSDisk. +func (v VirtualMachineScaleSetUpdateOSDisk) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "caching", v.Caching) + populate(objectMap, "deleteOption", v.DeleteOption) + populate(objectMap, "diskSizeGB", v.DiskSizeGB) + populate(objectMap, "image", v.Image) + populate(objectMap, "managedDisk", v.ManagedDisk) + populate(objectMap, "vhdContainers", v.VhdContainers) + populate(objectMap, "writeAcceleratorEnabled", v.WriteAcceleratorEnabled) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineScaleSetUpdateOSProfile. +func (v VirtualMachineScaleSetUpdateOSProfile) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "customData", v.CustomData) + populate(objectMap, "linuxConfiguration", v.LinuxConfiguration) + populate(objectMap, "secrets", v.Secrets) + populate(objectMap, "windowsConfiguration", v.WindowsConfiguration) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineScaleSetUpdateStorageProfile. +func (v VirtualMachineScaleSetUpdateStorageProfile) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "dataDisks", v.DataDisks) + populate(objectMap, "imageReference", v.ImageReference) + populate(objectMap, "osDisk", v.OSDisk) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineScaleSetVM. +func (v VirtualMachineScaleSetVM) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "id", v.ID) + populate(objectMap, "identity", v.Identity) + populate(objectMap, "instanceId", v.InstanceID) + populate(objectMap, "location", v.Location) + populate(objectMap, "name", v.Name) + populate(objectMap, "plan", v.Plan) + populate(objectMap, "properties", v.Properties) + populate(objectMap, "resources", v.Resources) + populate(objectMap, "sku", v.SKU) + populate(objectMap, "tags", v.Tags) + populate(objectMap, "type", v.Type) + populate(objectMap, "zones", v.Zones) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineScaleSetVMExtensionUpdate. +func (v VirtualMachineScaleSetVMExtensionUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "id", v.ID) + populate(objectMap, "name", v.Name) + populate(objectMap, "properties", v.Properties) + populate(objectMap, "type", v.Type) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineScaleSetVMInstanceIDs. +func (v VirtualMachineScaleSetVMInstanceIDs) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "instanceIds", v.InstanceIDs) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineScaleSetVMInstanceRequiredIDs. +func (v VirtualMachineScaleSetVMInstanceRequiredIDs) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "instanceIds", v.InstanceIDs) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineScaleSetVMInstanceView. +func (v VirtualMachineScaleSetVMInstanceView) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "assignedHost", v.AssignedHost) + populate(objectMap, "bootDiagnostics", v.BootDiagnostics) + populate(objectMap, "disks", v.Disks) + populate(objectMap, "extensions", v.Extensions) + populate(objectMap, "maintenanceRedeployStatus", v.MaintenanceRedeployStatus) + populate(objectMap, "placementGroupId", v.PlacementGroupID) + populate(objectMap, "platformFaultDomain", v.PlatformFaultDomain) + populate(objectMap, "platformUpdateDomain", v.PlatformUpdateDomain) + populate(objectMap, "rdpThumbPrint", v.RdpThumbPrint) + populate(objectMap, "statuses", v.Statuses) + populate(objectMap, "vmAgent", v.VMAgent) + populate(objectMap, "vmHealth", v.VMHealth) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineScaleSetVMNetworkProfileConfiguration. +func (v VirtualMachineScaleSetVMNetworkProfileConfiguration) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "networkInterfaceConfigurations", v.NetworkInterfaceConfigurations) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type VirtualMachineSoftwarePatchProperties. +func (v *VirtualMachineSoftwarePatchProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", v, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "activityId": + err = unpopulate(val, "ActivityID", &v.ActivityID) + delete(rawMsg, key) + case "assessmentState": + err = unpopulate(val, "AssessmentState", &v.AssessmentState) + delete(rawMsg, key) + case "classifications": + err = unpopulate(val, "Classifications", &v.Classifications) + delete(rawMsg, key) + case "kbId": + err = unpopulate(val, "KbID", &v.KbID) + delete(rawMsg, key) + case "lastModifiedDateTime": + err = unpopulateTimeRFC3339(val, "LastModifiedDateTime", &v.LastModifiedDateTime) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &v.Name) + delete(rawMsg, key) + case "patchId": + err = unpopulate(val, "PatchID", &v.PatchID) + delete(rawMsg, key) + case "publishedDate": + err = unpopulateTimeRFC3339(val, "PublishedDate", &v.PublishedDate) + delete(rawMsg, key) + case "rebootBehavior": + err = unpopulate(val, "RebootBehavior", &v.RebootBehavior) + delete(rawMsg, key) + case "version": + err = unpopulate(val, "Version", &v.Version) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", v, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type VirtualMachineUpdate. +func (v VirtualMachineUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "identity", v.Identity) + populate(objectMap, "plan", v.Plan) + populate(objectMap, "properties", v.Properties) + populate(objectMap, "tags", v.Tags) + populate(objectMap, "zones", v.Zones) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type WinRMConfiguration. +func (w WinRMConfiguration) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "listeners", w.Listeners) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type WindowsConfiguration. +func (w WindowsConfiguration) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "additionalUnattendContent", w.AdditionalUnattendContent) + populate(objectMap, "enableAutomaticUpdates", w.EnableAutomaticUpdates) + populate(objectMap, "patchSettings", w.PatchSettings) + populate(objectMap, "provisionVMAgent", w.ProvisionVMAgent) + populate(objectMap, "timeZone", w.TimeZone) + populate(objectMap, "winRM", w.WinRM) + return json.Marshal(objectMap) +} + +// MarshalJSON implements the json.Marshaller interface for type WindowsParameters. +func (w WindowsParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + populate(objectMap, "classificationsToInclude", w.ClassificationsToInclude) + populate(objectMap, "excludeKbsRequiringReboot", w.ExcludeKbsRequiringReboot) + populate(objectMap, "kbNumbersToExclude", w.KbNumbersToExclude) + populate(objectMap, "kbNumbersToInclude", w.KbNumbersToInclude) + populateTimeRFC3339(objectMap, "maxPatchPublishDate", w.MaxPatchPublishDate) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type WindowsParameters. +func (w *WindowsParameters) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", w, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "classificationsToInclude": + err = unpopulate(val, "ClassificationsToInclude", &w.ClassificationsToInclude) + delete(rawMsg, key) + case "excludeKbsRequiringReboot": + err = unpopulate(val, "ExcludeKbsRequiringReboot", &w.ExcludeKbsRequiringReboot) + delete(rawMsg, key) + case "kbNumbersToExclude": + err = unpopulate(val, "KbNumbersToExclude", &w.KbNumbersToExclude) + delete(rawMsg, key) + case "kbNumbersToInclude": + err = unpopulate(val, "KbNumbersToInclude", &w.KbNumbersToInclude) + delete(rawMsg, key) + case "maxPatchPublishDate": + err = unpopulateTimeRFC3339(val, "MaxPatchPublishDate", &w.MaxPatchPublishDate) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", w, err) + } + } + return nil +} + +func populate(m map[string]interface{}, k string, v interface{}) { + if v == nil { + return + } else if azcore.IsNullValue(v) { + m[k] = nil + } else if !reflect.ValueOf(v).IsNil() { + m[k] = v + } +} + +func unpopulate(data json.RawMessage, fn string, v interface{}) error { + if data == nil { + return nil + } + if err := json.Unmarshal(data, v); err != nil { + return fmt.Errorf("struct field %s: %v", fn, err) + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_operations_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_operations_client.go new file mode 100644 index 000000000..4bc146400 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_operations_client.go @@ -0,0 +1,98 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" +) + +// OperationsClient contains the methods for the Operations group. +// Don't use this type directly, use NewOperationsClient() instead. +type OperationsClient struct { + host string + pl runtime.Pipeline +} + +// NewOperationsClient creates a new instance of OperationsClient with the specified values. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewOperationsClient(credential azcore.TokenCredential, options *arm.ClientOptions) (*OperationsClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &OperationsClient{ + host: ep, + pl: pl, + } + return client, nil +} + +// NewListPager - Gets a list of compute operations. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// options - OperationsClientListOptions contains the optional parameters for the OperationsClient.List method. +func (client *OperationsClient) NewListPager(options *OperationsClientListOptions) *runtime.Pager[OperationsClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[OperationsClientListResponse]{ + More: func(page OperationsClientListResponse) bool { + return false + }, + Fetcher: func(ctx context.Context, page *OperationsClientListResponse) (OperationsClientListResponse, error) { + req, err := client.listCreateRequest(ctx, options) + if err != nil { + return OperationsClientListResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return OperationsClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return OperationsClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *OperationsClient) listCreateRequest(ctx context.Context, options *OperationsClientListOptions) (*policy.Request, error) { + urlPath := "/providers/Microsoft.Compute/operations" + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *OperationsClient) listHandleResponse(resp *http.Response) (OperationsClientListResponse, error) { + result := OperationsClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.OperationListResult); err != nil { + return OperationsClientListResponse{}, err + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_proximityplacementgroups_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_proximityplacementgroups_client.go new file mode 100644 index 000000000..4c1fa00c3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_proximityplacementgroups_client.go @@ -0,0 +1,405 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// ProximityPlacementGroupsClient contains the methods for the ProximityPlacementGroups group. +// Don't use this type directly, use NewProximityPlacementGroupsClient() instead. +type ProximityPlacementGroupsClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewProximityPlacementGroupsClient creates a new instance of ProximityPlacementGroupsClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewProximityPlacementGroupsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ProximityPlacementGroupsClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &ProximityPlacementGroupsClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// CreateOrUpdate - Create or update a proximity placement group. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// proximityPlacementGroupName - The name of the proximity placement group. +// parameters - Parameters supplied to the Create Proximity Placement Group operation. +// options - ProximityPlacementGroupsClientCreateOrUpdateOptions contains the optional parameters for the ProximityPlacementGroupsClient.CreateOrUpdate +// method. +func (client *ProximityPlacementGroupsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, proximityPlacementGroupName string, parameters ProximityPlacementGroup, options *ProximityPlacementGroupsClientCreateOrUpdateOptions) (ProximityPlacementGroupsClientCreateOrUpdateResponse, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, proximityPlacementGroupName, parameters, options) + if err != nil { + return ProximityPlacementGroupsClientCreateOrUpdateResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ProximityPlacementGroupsClientCreateOrUpdateResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return ProximityPlacementGroupsClientCreateOrUpdateResponse{}, runtime.NewResponseError(resp) + } + return client.createOrUpdateHandleResponse(resp) +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *ProximityPlacementGroupsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, proximityPlacementGroupName string, parameters ProximityPlacementGroup, options *ProximityPlacementGroupsClientCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/proximityPlacementGroups/{proximityPlacementGroupName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if proximityPlacementGroupName == "" { + return nil, errors.New("parameter proximityPlacementGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{proximityPlacementGroupName}", url.PathEscape(proximityPlacementGroupName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// createOrUpdateHandleResponse handles the CreateOrUpdate response. +func (client *ProximityPlacementGroupsClient) createOrUpdateHandleResponse(resp *http.Response) (ProximityPlacementGroupsClientCreateOrUpdateResponse, error) { + result := ProximityPlacementGroupsClientCreateOrUpdateResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.ProximityPlacementGroup); err != nil { + return ProximityPlacementGroupsClientCreateOrUpdateResponse{}, err + } + return result, nil +} + +// Delete - Delete a proximity placement group. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// proximityPlacementGroupName - The name of the proximity placement group. +// options - ProximityPlacementGroupsClientDeleteOptions contains the optional parameters for the ProximityPlacementGroupsClient.Delete +// method. +func (client *ProximityPlacementGroupsClient) Delete(ctx context.Context, resourceGroupName string, proximityPlacementGroupName string, options *ProximityPlacementGroupsClientDeleteOptions) (ProximityPlacementGroupsClientDeleteResponse, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, proximityPlacementGroupName, options) + if err != nil { + return ProximityPlacementGroupsClientDeleteResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ProximityPlacementGroupsClientDeleteResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ProximityPlacementGroupsClientDeleteResponse{}, runtime.NewResponseError(resp) + } + return ProximityPlacementGroupsClientDeleteResponse{}, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *ProximityPlacementGroupsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, proximityPlacementGroupName string, options *ProximityPlacementGroupsClientDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/proximityPlacementGroups/{proximityPlacementGroupName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if proximityPlacementGroupName == "" { + return nil, errors.New("parameter proximityPlacementGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{proximityPlacementGroupName}", url.PathEscape(proximityPlacementGroupName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Retrieves information about a proximity placement group . +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// proximityPlacementGroupName - The name of the proximity placement group. +// options - ProximityPlacementGroupsClientGetOptions contains the optional parameters for the ProximityPlacementGroupsClient.Get +// method. +func (client *ProximityPlacementGroupsClient) Get(ctx context.Context, resourceGroupName string, proximityPlacementGroupName string, options *ProximityPlacementGroupsClientGetOptions) (ProximityPlacementGroupsClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, proximityPlacementGroupName, options) + if err != nil { + return ProximityPlacementGroupsClientGetResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ProximityPlacementGroupsClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ProximityPlacementGroupsClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *ProximityPlacementGroupsClient) getCreateRequest(ctx context.Context, resourceGroupName string, proximityPlacementGroupName string, options *ProximityPlacementGroupsClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/proximityPlacementGroups/{proximityPlacementGroupName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if proximityPlacementGroupName == "" { + return nil, errors.New("parameter proximityPlacementGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{proximityPlacementGroupName}", url.PathEscape(proximityPlacementGroupName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.IncludeColocationStatus != nil { + reqQP.Set("includeColocationStatus", *options.IncludeColocationStatus) + } + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *ProximityPlacementGroupsClient) getHandleResponse(resp *http.Response) (ProximityPlacementGroupsClientGetResponse, error) { + result := ProximityPlacementGroupsClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.ProximityPlacementGroup); err != nil { + return ProximityPlacementGroupsClientGetResponse{}, err + } + return result, nil +} + +// NewListByResourceGroupPager - Lists all proximity placement groups in a resource group. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// options - ProximityPlacementGroupsClientListByResourceGroupOptions contains the optional parameters for the ProximityPlacementGroupsClient.ListByResourceGroup +// method. +func (client *ProximityPlacementGroupsClient) NewListByResourceGroupPager(resourceGroupName string, options *ProximityPlacementGroupsClientListByResourceGroupOptions) *runtime.Pager[ProximityPlacementGroupsClientListByResourceGroupResponse] { + return runtime.NewPager(runtime.PagingHandler[ProximityPlacementGroupsClientListByResourceGroupResponse]{ + More: func(page ProximityPlacementGroupsClientListByResourceGroupResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *ProximityPlacementGroupsClientListByResourceGroupResponse) (ProximityPlacementGroupsClientListByResourceGroupResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return ProximityPlacementGroupsClientListByResourceGroupResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ProximityPlacementGroupsClientListByResourceGroupResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ProximityPlacementGroupsClientListByResourceGroupResponse{}, runtime.NewResponseError(resp) + } + return client.listByResourceGroupHandleResponse(resp) + }, + }) +} + +// listByResourceGroupCreateRequest creates the ListByResourceGroup request. +func (client *ProximityPlacementGroupsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *ProximityPlacementGroupsClientListByResourceGroupOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/proximityPlacementGroups" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listByResourceGroupHandleResponse handles the ListByResourceGroup response. +func (client *ProximityPlacementGroupsClient) listByResourceGroupHandleResponse(resp *http.Response) (ProximityPlacementGroupsClientListByResourceGroupResponse, error) { + result := ProximityPlacementGroupsClientListByResourceGroupResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.ProximityPlacementGroupListResult); err != nil { + return ProximityPlacementGroupsClientListByResourceGroupResponse{}, err + } + return result, nil +} + +// NewListBySubscriptionPager - Lists all proximity placement groups in a subscription. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// options - ProximityPlacementGroupsClientListBySubscriptionOptions contains the optional parameters for the ProximityPlacementGroupsClient.ListBySubscription +// method. +func (client *ProximityPlacementGroupsClient) NewListBySubscriptionPager(options *ProximityPlacementGroupsClientListBySubscriptionOptions) *runtime.Pager[ProximityPlacementGroupsClientListBySubscriptionResponse] { + return runtime.NewPager(runtime.PagingHandler[ProximityPlacementGroupsClientListBySubscriptionResponse]{ + More: func(page ProximityPlacementGroupsClientListBySubscriptionResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *ProximityPlacementGroupsClientListBySubscriptionResponse) (ProximityPlacementGroupsClientListBySubscriptionResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listBySubscriptionCreateRequest(ctx, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return ProximityPlacementGroupsClientListBySubscriptionResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ProximityPlacementGroupsClientListBySubscriptionResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ProximityPlacementGroupsClientListBySubscriptionResponse{}, runtime.NewResponseError(resp) + } + return client.listBySubscriptionHandleResponse(resp) + }, + }) +} + +// listBySubscriptionCreateRequest creates the ListBySubscription request. +func (client *ProximityPlacementGroupsClient) listBySubscriptionCreateRequest(ctx context.Context, options *ProximityPlacementGroupsClientListBySubscriptionOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/proximityPlacementGroups" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listBySubscriptionHandleResponse handles the ListBySubscription response. +func (client *ProximityPlacementGroupsClient) listBySubscriptionHandleResponse(resp *http.Response) (ProximityPlacementGroupsClientListBySubscriptionResponse, error) { + result := ProximityPlacementGroupsClientListBySubscriptionResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.ProximityPlacementGroupListResult); err != nil { + return ProximityPlacementGroupsClientListBySubscriptionResponse{}, err + } + return result, nil +} + +// Update - Update a proximity placement group. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// proximityPlacementGroupName - The name of the proximity placement group. +// parameters - Parameters supplied to the Update Proximity Placement Group operation. +// options - ProximityPlacementGroupsClientUpdateOptions contains the optional parameters for the ProximityPlacementGroupsClient.Update +// method. +func (client *ProximityPlacementGroupsClient) Update(ctx context.Context, resourceGroupName string, proximityPlacementGroupName string, parameters ProximityPlacementGroupUpdate, options *ProximityPlacementGroupsClientUpdateOptions) (ProximityPlacementGroupsClientUpdateResponse, error) { + req, err := client.updateCreateRequest(ctx, resourceGroupName, proximityPlacementGroupName, parameters, options) + if err != nil { + return ProximityPlacementGroupsClientUpdateResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ProximityPlacementGroupsClientUpdateResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ProximityPlacementGroupsClientUpdateResponse{}, runtime.NewResponseError(resp) + } + return client.updateHandleResponse(resp) +} + +// updateCreateRequest creates the Update request. +func (client *ProximityPlacementGroupsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, proximityPlacementGroupName string, parameters ProximityPlacementGroupUpdate, options *ProximityPlacementGroupsClientUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/proximityPlacementGroups/{proximityPlacementGroupName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if proximityPlacementGroupName == "" { + return nil, errors.New("parameter proximityPlacementGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{proximityPlacementGroupName}", url.PathEscape(proximityPlacementGroupName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// updateHandleResponse handles the Update response. +func (client *ProximityPlacementGroupsClient) updateHandleResponse(resp *http.Response) (ProximityPlacementGroupsClientUpdateResponse, error) { + result := ProximityPlacementGroupsClientUpdateResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.ProximityPlacementGroup); err != nil { + return ProximityPlacementGroupsClientUpdateResponse{}, err + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_resourceskus_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_resourceskus_client.go new file mode 100644 index 000000000..b064dbdd5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_resourceskus_client.go @@ -0,0 +1,121 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// ResourceSKUsClient contains the methods for the ResourceSKUs group. +// Don't use this type directly, use NewResourceSKUsClient() instead. +type ResourceSKUsClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewResourceSKUsClient creates a new instance of ResourceSKUsClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewResourceSKUsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ResourceSKUsClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &ResourceSKUsClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// NewListPager - Gets the list of Microsoft.Compute SKUs available for your Subscription. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-07-01 +// options - ResourceSKUsClientListOptions contains the optional parameters for the ResourceSKUsClient.List method. +func (client *ResourceSKUsClient) NewListPager(options *ResourceSKUsClientListOptions) *runtime.Pager[ResourceSKUsClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[ResourceSKUsClientListResponse]{ + More: func(page ResourceSKUsClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *ResourceSKUsClientListResponse) (ResourceSKUsClientListResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listCreateRequest(ctx, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return ResourceSKUsClientListResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ResourceSKUsClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ResourceSKUsClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *ResourceSKUsClient) listCreateRequest(ctx context.Context, options *ResourceSKUsClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/skus" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-07-01") + if options != nil && options.Filter != nil { + reqQP.Set("$filter", *options.Filter) + } + if options != nil && options.IncludeExtendedLocations != nil { + reqQP.Set("includeExtendedLocations", *options.IncludeExtendedLocations) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *ResourceSKUsClient) listHandleResponse(resp *http.Response) (ResourceSKUsClientListResponse, error) { + result := ResourceSKUsClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.ResourceSKUsResult); err != nil { + return ResourceSKUsClientListResponse{}, err + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_response_types.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_response_types.go new file mode 100644 index 000000000..4b95829db --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_response_types.go @@ -0,0 +1,1403 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import "io" + +// AvailabilitySetsClientCreateOrUpdateResponse contains the response from method AvailabilitySetsClient.CreateOrUpdate. +type AvailabilitySetsClientCreateOrUpdateResponse struct { + AvailabilitySet +} + +// AvailabilitySetsClientDeleteResponse contains the response from method AvailabilitySetsClient.Delete. +type AvailabilitySetsClientDeleteResponse struct { + // placeholder for future response values +} + +// AvailabilitySetsClientGetResponse contains the response from method AvailabilitySetsClient.Get. +type AvailabilitySetsClientGetResponse struct { + AvailabilitySet +} + +// AvailabilitySetsClientListAvailableSizesResponse contains the response from method AvailabilitySetsClient.ListAvailableSizes. +type AvailabilitySetsClientListAvailableSizesResponse struct { + VirtualMachineSizeListResult +} + +// AvailabilitySetsClientListBySubscriptionResponse contains the response from method AvailabilitySetsClient.ListBySubscription. +type AvailabilitySetsClientListBySubscriptionResponse struct { + AvailabilitySetListResult +} + +// AvailabilitySetsClientListResponse contains the response from method AvailabilitySetsClient.List. +type AvailabilitySetsClientListResponse struct { + AvailabilitySetListResult +} + +// AvailabilitySetsClientUpdateResponse contains the response from method AvailabilitySetsClient.Update. +type AvailabilitySetsClientUpdateResponse struct { + AvailabilitySet +} + +// CapacityReservationGroupsClientCreateOrUpdateResponse contains the response from method CapacityReservationGroupsClient.CreateOrUpdate. +type CapacityReservationGroupsClientCreateOrUpdateResponse struct { + CapacityReservationGroup +} + +// CapacityReservationGroupsClientDeleteResponse contains the response from method CapacityReservationGroupsClient.Delete. +type CapacityReservationGroupsClientDeleteResponse struct { + // placeholder for future response values +} + +// CapacityReservationGroupsClientGetResponse contains the response from method CapacityReservationGroupsClient.Get. +type CapacityReservationGroupsClientGetResponse struct { + CapacityReservationGroup +} + +// CapacityReservationGroupsClientListByResourceGroupResponse contains the response from method CapacityReservationGroupsClient.ListByResourceGroup. +type CapacityReservationGroupsClientListByResourceGroupResponse struct { + CapacityReservationGroupListResult +} + +// CapacityReservationGroupsClientListBySubscriptionResponse contains the response from method CapacityReservationGroupsClient.ListBySubscription. +type CapacityReservationGroupsClientListBySubscriptionResponse struct { + CapacityReservationGroupListResult +} + +// CapacityReservationGroupsClientUpdateResponse contains the response from method CapacityReservationGroupsClient.Update. +type CapacityReservationGroupsClientUpdateResponse struct { + CapacityReservationGroup +} + +// CapacityReservationsClientCreateOrUpdateResponse contains the response from method CapacityReservationsClient.CreateOrUpdate. +type CapacityReservationsClientCreateOrUpdateResponse struct { + CapacityReservation +} + +// CapacityReservationsClientDeleteResponse contains the response from method CapacityReservationsClient.Delete. +type CapacityReservationsClientDeleteResponse struct { + // placeholder for future response values +} + +// CapacityReservationsClientGetResponse contains the response from method CapacityReservationsClient.Get. +type CapacityReservationsClientGetResponse struct { + CapacityReservation +} + +// CapacityReservationsClientListByCapacityReservationGroupResponse contains the response from method CapacityReservationsClient.ListByCapacityReservationGroup. +type CapacityReservationsClientListByCapacityReservationGroupResponse struct { + CapacityReservationListResult +} + +// CapacityReservationsClientUpdateResponse contains the response from method CapacityReservationsClient.Update. +type CapacityReservationsClientUpdateResponse struct { + CapacityReservation +} + +// CloudServiceOperatingSystemsClientGetOSFamilyResponse contains the response from method CloudServiceOperatingSystemsClient.GetOSFamily. +type CloudServiceOperatingSystemsClientGetOSFamilyResponse struct { + OSFamily +} + +// CloudServiceOperatingSystemsClientGetOSVersionResponse contains the response from method CloudServiceOperatingSystemsClient.GetOSVersion. +type CloudServiceOperatingSystemsClientGetOSVersionResponse struct { + OSVersion +} + +// CloudServiceOperatingSystemsClientListOSFamiliesResponse contains the response from method CloudServiceOperatingSystemsClient.ListOSFamilies. +type CloudServiceOperatingSystemsClientListOSFamiliesResponse struct { + OSFamilyListResult +} + +// CloudServiceOperatingSystemsClientListOSVersionsResponse contains the response from method CloudServiceOperatingSystemsClient.ListOSVersions. +type CloudServiceOperatingSystemsClientListOSVersionsResponse struct { + OSVersionListResult +} + +// CloudServiceRoleInstancesClientDeleteResponse contains the response from method CloudServiceRoleInstancesClient.Delete. +type CloudServiceRoleInstancesClientDeleteResponse struct { + // placeholder for future response values +} + +// CloudServiceRoleInstancesClientGetInstanceViewResponse contains the response from method CloudServiceRoleInstancesClient.GetInstanceView. +type CloudServiceRoleInstancesClientGetInstanceViewResponse struct { + RoleInstanceView +} + +// CloudServiceRoleInstancesClientGetRemoteDesktopFileResponse contains the response from method CloudServiceRoleInstancesClient.GetRemoteDesktopFile. +type CloudServiceRoleInstancesClientGetRemoteDesktopFileResponse struct { + // Body contains the streaming response. + Body io.ReadCloser +} + +// CloudServiceRoleInstancesClientGetResponse contains the response from method CloudServiceRoleInstancesClient.Get. +type CloudServiceRoleInstancesClientGetResponse struct { + RoleInstance +} + +// CloudServiceRoleInstancesClientListResponse contains the response from method CloudServiceRoleInstancesClient.List. +type CloudServiceRoleInstancesClientListResponse struct { + RoleInstanceListResult +} + +// CloudServiceRoleInstancesClientRebuildResponse contains the response from method CloudServiceRoleInstancesClient.Rebuild. +type CloudServiceRoleInstancesClientRebuildResponse struct { + // placeholder for future response values +} + +// CloudServiceRoleInstancesClientReimageResponse contains the response from method CloudServiceRoleInstancesClient.Reimage. +type CloudServiceRoleInstancesClientReimageResponse struct { + // placeholder for future response values +} + +// CloudServiceRoleInstancesClientRestartResponse contains the response from method CloudServiceRoleInstancesClient.Restart. +type CloudServiceRoleInstancesClientRestartResponse struct { + // placeholder for future response values +} + +// CloudServiceRolesClientGetResponse contains the response from method CloudServiceRolesClient.Get. +type CloudServiceRolesClientGetResponse struct { + CloudServiceRole +} + +// CloudServiceRolesClientListResponse contains the response from method CloudServiceRolesClient.List. +type CloudServiceRolesClientListResponse struct { + CloudServiceRoleListResult +} + +// CloudServicesClientCreateOrUpdateResponse contains the response from method CloudServicesClient.CreateOrUpdate. +type CloudServicesClientCreateOrUpdateResponse struct { + CloudService +} + +// CloudServicesClientDeleteInstancesResponse contains the response from method CloudServicesClient.DeleteInstances. +type CloudServicesClientDeleteInstancesResponse struct { + // placeholder for future response values +} + +// CloudServicesClientDeleteResponse contains the response from method CloudServicesClient.Delete. +type CloudServicesClientDeleteResponse struct { + // placeholder for future response values +} + +// CloudServicesClientGetInstanceViewResponse contains the response from method CloudServicesClient.GetInstanceView. +type CloudServicesClientGetInstanceViewResponse struct { + CloudServiceInstanceView +} + +// CloudServicesClientGetResponse contains the response from method CloudServicesClient.Get. +type CloudServicesClientGetResponse struct { + CloudService +} + +// CloudServicesClientListAllResponse contains the response from method CloudServicesClient.ListAll. +type CloudServicesClientListAllResponse struct { + CloudServiceListResult +} + +// CloudServicesClientListResponse contains the response from method CloudServicesClient.List. +type CloudServicesClientListResponse struct { + CloudServiceListResult +} + +// CloudServicesClientPowerOffResponse contains the response from method CloudServicesClient.PowerOff. +type CloudServicesClientPowerOffResponse struct { + // placeholder for future response values +} + +// CloudServicesClientRebuildResponse contains the response from method CloudServicesClient.Rebuild. +type CloudServicesClientRebuildResponse struct { + // placeholder for future response values +} + +// CloudServicesClientReimageResponse contains the response from method CloudServicesClient.Reimage. +type CloudServicesClientReimageResponse struct { + // placeholder for future response values +} + +// CloudServicesClientRestartResponse contains the response from method CloudServicesClient.Restart. +type CloudServicesClientRestartResponse struct { + // placeholder for future response values +} + +// CloudServicesClientStartResponse contains the response from method CloudServicesClient.Start. +type CloudServicesClientStartResponse struct { + // placeholder for future response values +} + +// CloudServicesClientUpdateResponse contains the response from method CloudServicesClient.Update. +type CloudServicesClientUpdateResponse struct { + CloudService +} + +// CloudServicesUpdateDomainClientGetUpdateDomainResponse contains the response from method CloudServicesUpdateDomainClient.GetUpdateDomain. +type CloudServicesUpdateDomainClientGetUpdateDomainResponse struct { + UpdateDomain +} + +// CloudServicesUpdateDomainClientListUpdateDomainsResponse contains the response from method CloudServicesUpdateDomainClient.ListUpdateDomains. +type CloudServicesUpdateDomainClientListUpdateDomainsResponse struct { + UpdateDomainListResult +} + +// CloudServicesUpdateDomainClientWalkUpdateDomainResponse contains the response from method CloudServicesUpdateDomainClient.WalkUpdateDomain. +type CloudServicesUpdateDomainClientWalkUpdateDomainResponse struct { + // placeholder for future response values +} + +// CommunityGalleriesClientGetResponse contains the response from method CommunityGalleriesClient.Get. +type CommunityGalleriesClientGetResponse struct { + CommunityGallery +} + +// CommunityGalleryImageVersionsClientGetResponse contains the response from method CommunityGalleryImageVersionsClient.Get. +type CommunityGalleryImageVersionsClientGetResponse struct { + CommunityGalleryImageVersion +} + +// CommunityGalleryImagesClientGetResponse contains the response from method CommunityGalleryImagesClient.Get. +type CommunityGalleryImagesClientGetResponse struct { + CommunityGalleryImage +} + +// DedicatedHostGroupsClientCreateOrUpdateResponse contains the response from method DedicatedHostGroupsClient.CreateOrUpdate. +type DedicatedHostGroupsClientCreateOrUpdateResponse struct { + DedicatedHostGroup +} + +// DedicatedHostGroupsClientDeleteResponse contains the response from method DedicatedHostGroupsClient.Delete. +type DedicatedHostGroupsClientDeleteResponse struct { + // placeholder for future response values +} + +// DedicatedHostGroupsClientGetResponse contains the response from method DedicatedHostGroupsClient.Get. +type DedicatedHostGroupsClientGetResponse struct { + DedicatedHostGroup +} + +// DedicatedHostGroupsClientListByResourceGroupResponse contains the response from method DedicatedHostGroupsClient.ListByResourceGroup. +type DedicatedHostGroupsClientListByResourceGroupResponse struct { + DedicatedHostGroupListResult +} + +// DedicatedHostGroupsClientListBySubscriptionResponse contains the response from method DedicatedHostGroupsClient.ListBySubscription. +type DedicatedHostGroupsClientListBySubscriptionResponse struct { + DedicatedHostGroupListResult +} + +// DedicatedHostGroupsClientUpdateResponse contains the response from method DedicatedHostGroupsClient.Update. +type DedicatedHostGroupsClientUpdateResponse struct { + DedicatedHostGroup +} + +// DedicatedHostsClientCreateOrUpdateResponse contains the response from method DedicatedHostsClient.CreateOrUpdate. +type DedicatedHostsClientCreateOrUpdateResponse struct { + DedicatedHost +} + +// DedicatedHostsClientDeleteResponse contains the response from method DedicatedHostsClient.Delete. +type DedicatedHostsClientDeleteResponse struct { + // placeholder for future response values +} + +// DedicatedHostsClientGetResponse contains the response from method DedicatedHostsClient.Get. +type DedicatedHostsClientGetResponse struct { + DedicatedHost +} + +// DedicatedHostsClientListByHostGroupResponse contains the response from method DedicatedHostsClient.ListByHostGroup. +type DedicatedHostsClientListByHostGroupResponse struct { + DedicatedHostListResult +} + +// DedicatedHostsClientRestartResponse contains the response from method DedicatedHostsClient.Restart. +type DedicatedHostsClientRestartResponse struct { + // placeholder for future response values +} + +// DedicatedHostsClientUpdateResponse contains the response from method DedicatedHostsClient.Update. +type DedicatedHostsClientUpdateResponse struct { + DedicatedHost +} + +// DiskAccessesClientCreateOrUpdateResponse contains the response from method DiskAccessesClient.CreateOrUpdate. +type DiskAccessesClientCreateOrUpdateResponse struct { + DiskAccess +} + +// DiskAccessesClientDeleteAPrivateEndpointConnectionResponse contains the response from method DiskAccessesClient.DeleteAPrivateEndpointConnection. +type DiskAccessesClientDeleteAPrivateEndpointConnectionResponse struct { + // placeholder for future response values +} + +// DiskAccessesClientDeleteResponse contains the response from method DiskAccessesClient.Delete. +type DiskAccessesClientDeleteResponse struct { + // placeholder for future response values +} + +// DiskAccessesClientGetAPrivateEndpointConnectionResponse contains the response from method DiskAccessesClient.GetAPrivateEndpointConnection. +type DiskAccessesClientGetAPrivateEndpointConnectionResponse struct { + PrivateEndpointConnection +} + +// DiskAccessesClientGetPrivateLinkResourcesResponse contains the response from method DiskAccessesClient.GetPrivateLinkResources. +type DiskAccessesClientGetPrivateLinkResourcesResponse struct { + PrivateLinkResourceListResult +} + +// DiskAccessesClientGetResponse contains the response from method DiskAccessesClient.Get. +type DiskAccessesClientGetResponse struct { + DiskAccess +} + +// DiskAccessesClientListByResourceGroupResponse contains the response from method DiskAccessesClient.ListByResourceGroup. +type DiskAccessesClientListByResourceGroupResponse struct { + DiskAccessList +} + +// DiskAccessesClientListPrivateEndpointConnectionsResponse contains the response from method DiskAccessesClient.ListPrivateEndpointConnections. +type DiskAccessesClientListPrivateEndpointConnectionsResponse struct { + PrivateEndpointConnectionListResult +} + +// DiskAccessesClientListResponse contains the response from method DiskAccessesClient.List. +type DiskAccessesClientListResponse struct { + DiskAccessList +} + +// DiskAccessesClientUpdateAPrivateEndpointConnectionResponse contains the response from method DiskAccessesClient.UpdateAPrivateEndpointConnection. +type DiskAccessesClientUpdateAPrivateEndpointConnectionResponse struct { + PrivateEndpointConnection +} + +// DiskAccessesClientUpdateResponse contains the response from method DiskAccessesClient.Update. +type DiskAccessesClientUpdateResponse struct { + DiskAccess +} + +// DiskEncryptionSetsClientCreateOrUpdateResponse contains the response from method DiskEncryptionSetsClient.CreateOrUpdate. +type DiskEncryptionSetsClientCreateOrUpdateResponse struct { + DiskEncryptionSet +} + +// DiskEncryptionSetsClientDeleteResponse contains the response from method DiskEncryptionSetsClient.Delete. +type DiskEncryptionSetsClientDeleteResponse struct { + // placeholder for future response values +} + +// DiskEncryptionSetsClientGetResponse contains the response from method DiskEncryptionSetsClient.Get. +type DiskEncryptionSetsClientGetResponse struct { + DiskEncryptionSet +} + +// DiskEncryptionSetsClientListAssociatedResourcesResponse contains the response from method DiskEncryptionSetsClient.ListAssociatedResources. +type DiskEncryptionSetsClientListAssociatedResourcesResponse struct { + ResourceURIList +} + +// DiskEncryptionSetsClientListByResourceGroupResponse contains the response from method DiskEncryptionSetsClient.ListByResourceGroup. +type DiskEncryptionSetsClientListByResourceGroupResponse struct { + DiskEncryptionSetList +} + +// DiskEncryptionSetsClientListResponse contains the response from method DiskEncryptionSetsClient.List. +type DiskEncryptionSetsClientListResponse struct { + DiskEncryptionSetList +} + +// DiskEncryptionSetsClientUpdateResponse contains the response from method DiskEncryptionSetsClient.Update. +type DiskEncryptionSetsClientUpdateResponse struct { + DiskEncryptionSet +} + +// DiskRestorePointClientGetResponse contains the response from method DiskRestorePointClient.Get. +type DiskRestorePointClientGetResponse struct { + DiskRestorePoint +} + +// DiskRestorePointClientGrantAccessResponse contains the response from method DiskRestorePointClient.GrantAccess. +type DiskRestorePointClientGrantAccessResponse struct { + AccessURI +} + +// DiskRestorePointClientListByRestorePointResponse contains the response from method DiskRestorePointClient.ListByRestorePoint. +type DiskRestorePointClientListByRestorePointResponse struct { + DiskRestorePointList +} + +// DiskRestorePointClientRevokeAccessResponse contains the response from method DiskRestorePointClient.RevokeAccess. +type DiskRestorePointClientRevokeAccessResponse struct { + // placeholder for future response values +} + +// DisksClientCreateOrUpdateResponse contains the response from method DisksClient.CreateOrUpdate. +type DisksClientCreateOrUpdateResponse struct { + Disk +} + +// DisksClientDeleteResponse contains the response from method DisksClient.Delete. +type DisksClientDeleteResponse struct { + // placeholder for future response values +} + +// DisksClientGetResponse contains the response from method DisksClient.Get. +type DisksClientGetResponse struct { + Disk +} + +// DisksClientGrantAccessResponse contains the response from method DisksClient.GrantAccess. +type DisksClientGrantAccessResponse struct { + AccessURI +} + +// DisksClientListByResourceGroupResponse contains the response from method DisksClient.ListByResourceGroup. +type DisksClientListByResourceGroupResponse struct { + DiskList +} + +// DisksClientListResponse contains the response from method DisksClient.List. +type DisksClientListResponse struct { + DiskList +} + +// DisksClientRevokeAccessResponse contains the response from method DisksClient.RevokeAccess. +type DisksClientRevokeAccessResponse struct { + // placeholder for future response values +} + +// DisksClientUpdateResponse contains the response from method DisksClient.Update. +type DisksClientUpdateResponse struct { + Disk +} + +// GalleriesClientCreateOrUpdateResponse contains the response from method GalleriesClient.CreateOrUpdate. +type GalleriesClientCreateOrUpdateResponse struct { + Gallery +} + +// GalleriesClientDeleteResponse contains the response from method GalleriesClient.Delete. +type GalleriesClientDeleteResponse struct { + // placeholder for future response values +} + +// GalleriesClientGetResponse contains the response from method GalleriesClient.Get. +type GalleriesClientGetResponse struct { + Gallery +} + +// GalleriesClientListByResourceGroupResponse contains the response from method GalleriesClient.ListByResourceGroup. +type GalleriesClientListByResourceGroupResponse struct { + GalleryList +} + +// GalleriesClientListResponse contains the response from method GalleriesClient.List. +type GalleriesClientListResponse struct { + GalleryList +} + +// GalleriesClientUpdateResponse contains the response from method GalleriesClient.Update. +type GalleriesClientUpdateResponse struct { + Gallery +} + +// GalleryApplicationVersionsClientCreateOrUpdateResponse contains the response from method GalleryApplicationVersionsClient.CreateOrUpdate. +type GalleryApplicationVersionsClientCreateOrUpdateResponse struct { + GalleryApplicationVersion +} + +// GalleryApplicationVersionsClientDeleteResponse contains the response from method GalleryApplicationVersionsClient.Delete. +type GalleryApplicationVersionsClientDeleteResponse struct { + // placeholder for future response values +} + +// GalleryApplicationVersionsClientGetResponse contains the response from method GalleryApplicationVersionsClient.Get. +type GalleryApplicationVersionsClientGetResponse struct { + GalleryApplicationVersion +} + +// GalleryApplicationVersionsClientListByGalleryApplicationResponse contains the response from method GalleryApplicationVersionsClient.ListByGalleryApplication. +type GalleryApplicationVersionsClientListByGalleryApplicationResponse struct { + GalleryApplicationVersionList +} + +// GalleryApplicationVersionsClientUpdateResponse contains the response from method GalleryApplicationVersionsClient.Update. +type GalleryApplicationVersionsClientUpdateResponse struct { + GalleryApplicationVersion +} + +// GalleryApplicationsClientCreateOrUpdateResponse contains the response from method GalleryApplicationsClient.CreateOrUpdate. +type GalleryApplicationsClientCreateOrUpdateResponse struct { + GalleryApplication +} + +// GalleryApplicationsClientDeleteResponse contains the response from method GalleryApplicationsClient.Delete. +type GalleryApplicationsClientDeleteResponse struct { + // placeholder for future response values +} + +// GalleryApplicationsClientGetResponse contains the response from method GalleryApplicationsClient.Get. +type GalleryApplicationsClientGetResponse struct { + GalleryApplication +} + +// GalleryApplicationsClientListByGalleryResponse contains the response from method GalleryApplicationsClient.ListByGallery. +type GalleryApplicationsClientListByGalleryResponse struct { + GalleryApplicationList +} + +// GalleryApplicationsClientUpdateResponse contains the response from method GalleryApplicationsClient.Update. +type GalleryApplicationsClientUpdateResponse struct { + GalleryApplication +} + +// GalleryImageVersionsClientCreateOrUpdateResponse contains the response from method GalleryImageVersionsClient.CreateOrUpdate. +type GalleryImageVersionsClientCreateOrUpdateResponse struct { + GalleryImageVersion +} + +// GalleryImageVersionsClientDeleteResponse contains the response from method GalleryImageVersionsClient.Delete. +type GalleryImageVersionsClientDeleteResponse struct { + // placeholder for future response values +} + +// GalleryImageVersionsClientGetResponse contains the response from method GalleryImageVersionsClient.Get. +type GalleryImageVersionsClientGetResponse struct { + GalleryImageVersion +} + +// GalleryImageVersionsClientListByGalleryImageResponse contains the response from method GalleryImageVersionsClient.ListByGalleryImage. +type GalleryImageVersionsClientListByGalleryImageResponse struct { + GalleryImageVersionList +} + +// GalleryImageVersionsClientUpdateResponse contains the response from method GalleryImageVersionsClient.Update. +type GalleryImageVersionsClientUpdateResponse struct { + GalleryImageVersion +} + +// GalleryImagesClientCreateOrUpdateResponse contains the response from method GalleryImagesClient.CreateOrUpdate. +type GalleryImagesClientCreateOrUpdateResponse struct { + GalleryImage +} + +// GalleryImagesClientDeleteResponse contains the response from method GalleryImagesClient.Delete. +type GalleryImagesClientDeleteResponse struct { + // placeholder for future response values +} + +// GalleryImagesClientGetResponse contains the response from method GalleryImagesClient.Get. +type GalleryImagesClientGetResponse struct { + GalleryImage +} + +// GalleryImagesClientListByGalleryResponse contains the response from method GalleryImagesClient.ListByGallery. +type GalleryImagesClientListByGalleryResponse struct { + GalleryImageList +} + +// GalleryImagesClientUpdateResponse contains the response from method GalleryImagesClient.Update. +type GalleryImagesClientUpdateResponse struct { + GalleryImage +} + +// GallerySharingProfileClientUpdateResponse contains the response from method GallerySharingProfileClient.Update. +type GallerySharingProfileClientUpdateResponse struct { + SharingUpdate +} + +// ImagesClientCreateOrUpdateResponse contains the response from method ImagesClient.CreateOrUpdate. +type ImagesClientCreateOrUpdateResponse struct { + Image +} + +// ImagesClientDeleteResponse contains the response from method ImagesClient.Delete. +type ImagesClientDeleteResponse struct { + // placeholder for future response values +} + +// ImagesClientGetResponse contains the response from method ImagesClient.Get. +type ImagesClientGetResponse struct { + Image +} + +// ImagesClientListByResourceGroupResponse contains the response from method ImagesClient.ListByResourceGroup. +type ImagesClientListByResourceGroupResponse struct { + ImageListResult +} + +// ImagesClientListResponse contains the response from method ImagesClient.List. +type ImagesClientListResponse struct { + ImageListResult +} + +// ImagesClientUpdateResponse contains the response from method ImagesClient.Update. +type ImagesClientUpdateResponse struct { + Image +} + +// LogAnalyticsClientExportRequestRateByIntervalResponse contains the response from method LogAnalyticsClient.ExportRequestRateByInterval. +type LogAnalyticsClientExportRequestRateByIntervalResponse struct { + LogAnalyticsOperationResult +} + +// LogAnalyticsClientExportThrottledRequestsResponse contains the response from method LogAnalyticsClient.ExportThrottledRequests. +type LogAnalyticsClientExportThrottledRequestsResponse struct { + LogAnalyticsOperationResult +} + +// OperationsClientListResponse contains the response from method OperationsClient.List. +type OperationsClientListResponse struct { + OperationListResult +} + +// ProximityPlacementGroupsClientCreateOrUpdateResponse contains the response from method ProximityPlacementGroupsClient.CreateOrUpdate. +type ProximityPlacementGroupsClientCreateOrUpdateResponse struct { + ProximityPlacementGroup +} + +// ProximityPlacementGroupsClientDeleteResponse contains the response from method ProximityPlacementGroupsClient.Delete. +type ProximityPlacementGroupsClientDeleteResponse struct { + // placeholder for future response values +} + +// ProximityPlacementGroupsClientGetResponse contains the response from method ProximityPlacementGroupsClient.Get. +type ProximityPlacementGroupsClientGetResponse struct { + ProximityPlacementGroup +} + +// ProximityPlacementGroupsClientListByResourceGroupResponse contains the response from method ProximityPlacementGroupsClient.ListByResourceGroup. +type ProximityPlacementGroupsClientListByResourceGroupResponse struct { + ProximityPlacementGroupListResult +} + +// ProximityPlacementGroupsClientListBySubscriptionResponse contains the response from method ProximityPlacementGroupsClient.ListBySubscription. +type ProximityPlacementGroupsClientListBySubscriptionResponse struct { + ProximityPlacementGroupListResult +} + +// ProximityPlacementGroupsClientUpdateResponse contains the response from method ProximityPlacementGroupsClient.Update. +type ProximityPlacementGroupsClientUpdateResponse struct { + ProximityPlacementGroup +} + +// ResourceSKUsClientListResponse contains the response from method ResourceSKUsClient.List. +type ResourceSKUsClientListResponse struct { + ResourceSKUsResult +} + +// RestorePointCollectionsClientCreateOrUpdateResponse contains the response from method RestorePointCollectionsClient.CreateOrUpdate. +type RestorePointCollectionsClientCreateOrUpdateResponse struct { + RestorePointCollection +} + +// RestorePointCollectionsClientDeleteResponse contains the response from method RestorePointCollectionsClient.Delete. +type RestorePointCollectionsClientDeleteResponse struct { + // placeholder for future response values +} + +// RestorePointCollectionsClientGetResponse contains the response from method RestorePointCollectionsClient.Get. +type RestorePointCollectionsClientGetResponse struct { + RestorePointCollection +} + +// RestorePointCollectionsClientListAllResponse contains the response from method RestorePointCollectionsClient.ListAll. +type RestorePointCollectionsClientListAllResponse struct { + RestorePointCollectionListResult +} + +// RestorePointCollectionsClientListResponse contains the response from method RestorePointCollectionsClient.List. +type RestorePointCollectionsClientListResponse struct { + RestorePointCollectionListResult +} + +// RestorePointCollectionsClientUpdateResponse contains the response from method RestorePointCollectionsClient.Update. +type RestorePointCollectionsClientUpdateResponse struct { + RestorePointCollection +} + +// RestorePointsClientCreateResponse contains the response from method RestorePointsClient.Create. +type RestorePointsClientCreateResponse struct { + RestorePoint +} + +// RestorePointsClientDeleteResponse contains the response from method RestorePointsClient.Delete. +type RestorePointsClientDeleteResponse struct { + // placeholder for future response values +} + +// RestorePointsClientGetResponse contains the response from method RestorePointsClient.Get. +type RestorePointsClientGetResponse struct { + RestorePoint +} + +// SSHPublicKeysClientCreateResponse contains the response from method SSHPublicKeysClient.Create. +type SSHPublicKeysClientCreateResponse struct { + SSHPublicKeyResource +} + +// SSHPublicKeysClientDeleteResponse contains the response from method SSHPublicKeysClient.Delete. +type SSHPublicKeysClientDeleteResponse struct { + // placeholder for future response values +} + +// SSHPublicKeysClientGenerateKeyPairResponse contains the response from method SSHPublicKeysClient.GenerateKeyPair. +type SSHPublicKeysClientGenerateKeyPairResponse struct { + SSHPublicKeyGenerateKeyPairResult +} + +// SSHPublicKeysClientGetResponse contains the response from method SSHPublicKeysClient.Get. +type SSHPublicKeysClientGetResponse struct { + SSHPublicKeyResource +} + +// SSHPublicKeysClientListByResourceGroupResponse contains the response from method SSHPublicKeysClient.ListByResourceGroup. +type SSHPublicKeysClientListByResourceGroupResponse struct { + SSHPublicKeysGroupListResult +} + +// SSHPublicKeysClientListBySubscriptionResponse contains the response from method SSHPublicKeysClient.ListBySubscription. +type SSHPublicKeysClientListBySubscriptionResponse struct { + SSHPublicKeysGroupListResult +} + +// SSHPublicKeysClientUpdateResponse contains the response from method SSHPublicKeysClient.Update. +type SSHPublicKeysClientUpdateResponse struct { + SSHPublicKeyResource +} + +// SharedGalleriesClientGetResponse contains the response from method SharedGalleriesClient.Get. +type SharedGalleriesClientGetResponse struct { + SharedGallery +} + +// SharedGalleriesClientListResponse contains the response from method SharedGalleriesClient.List. +type SharedGalleriesClientListResponse struct { + SharedGalleryList +} + +// SharedGalleryImageVersionsClientGetResponse contains the response from method SharedGalleryImageVersionsClient.Get. +type SharedGalleryImageVersionsClientGetResponse struct { + SharedGalleryImageVersion +} + +// SharedGalleryImageVersionsClientListResponse contains the response from method SharedGalleryImageVersionsClient.List. +type SharedGalleryImageVersionsClientListResponse struct { + SharedGalleryImageVersionList +} + +// SharedGalleryImagesClientGetResponse contains the response from method SharedGalleryImagesClient.Get. +type SharedGalleryImagesClientGetResponse struct { + SharedGalleryImage +} + +// SharedGalleryImagesClientListResponse contains the response from method SharedGalleryImagesClient.List. +type SharedGalleryImagesClientListResponse struct { + SharedGalleryImageList +} + +// SnapshotsClientCreateOrUpdateResponse contains the response from method SnapshotsClient.CreateOrUpdate. +type SnapshotsClientCreateOrUpdateResponse struct { + Snapshot +} + +// SnapshotsClientDeleteResponse contains the response from method SnapshotsClient.Delete. +type SnapshotsClientDeleteResponse struct { + // placeholder for future response values +} + +// SnapshotsClientGetResponse contains the response from method SnapshotsClient.Get. +type SnapshotsClientGetResponse struct { + Snapshot +} + +// SnapshotsClientGrantAccessResponse contains the response from method SnapshotsClient.GrantAccess. +type SnapshotsClientGrantAccessResponse struct { + AccessURI +} + +// SnapshotsClientListByResourceGroupResponse contains the response from method SnapshotsClient.ListByResourceGroup. +type SnapshotsClientListByResourceGroupResponse struct { + SnapshotList +} + +// SnapshotsClientListResponse contains the response from method SnapshotsClient.List. +type SnapshotsClientListResponse struct { + SnapshotList +} + +// SnapshotsClientRevokeAccessResponse contains the response from method SnapshotsClient.RevokeAccess. +type SnapshotsClientRevokeAccessResponse struct { + // placeholder for future response values +} + +// SnapshotsClientUpdateResponse contains the response from method SnapshotsClient.Update. +type SnapshotsClientUpdateResponse struct { + Snapshot +} + +// UsageClientListResponse contains the response from method UsageClient.List. +type UsageClientListResponse struct { + ListUsagesResult +} + +// VirtualMachineExtensionImagesClientGetResponse contains the response from method VirtualMachineExtensionImagesClient.Get. +type VirtualMachineExtensionImagesClientGetResponse struct { + VirtualMachineExtensionImage +} + +// VirtualMachineExtensionImagesClientListTypesResponse contains the response from method VirtualMachineExtensionImagesClient.ListTypes. +type VirtualMachineExtensionImagesClientListTypesResponse struct { + // Array of VirtualMachineExtensionImage + VirtualMachineExtensionImageArray []*VirtualMachineExtensionImage +} + +// VirtualMachineExtensionImagesClientListVersionsResponse contains the response from method VirtualMachineExtensionImagesClient.ListVersions. +type VirtualMachineExtensionImagesClientListVersionsResponse struct { + // Array of VirtualMachineExtensionImage + VirtualMachineExtensionImageArray []*VirtualMachineExtensionImage +} + +// VirtualMachineExtensionsClientCreateOrUpdateResponse contains the response from method VirtualMachineExtensionsClient.CreateOrUpdate. +type VirtualMachineExtensionsClientCreateOrUpdateResponse struct { + VirtualMachineExtension +} + +// VirtualMachineExtensionsClientDeleteResponse contains the response from method VirtualMachineExtensionsClient.Delete. +type VirtualMachineExtensionsClientDeleteResponse struct { + // placeholder for future response values +} + +// VirtualMachineExtensionsClientGetResponse contains the response from method VirtualMachineExtensionsClient.Get. +type VirtualMachineExtensionsClientGetResponse struct { + VirtualMachineExtension +} + +// VirtualMachineExtensionsClientListResponse contains the response from method VirtualMachineExtensionsClient.List. +type VirtualMachineExtensionsClientListResponse struct { + VirtualMachineExtensionsListResult +} + +// VirtualMachineExtensionsClientUpdateResponse contains the response from method VirtualMachineExtensionsClient.Update. +type VirtualMachineExtensionsClientUpdateResponse struct { + VirtualMachineExtension +} + +// VirtualMachineImagesClientGetResponse contains the response from method VirtualMachineImagesClient.Get. +type VirtualMachineImagesClientGetResponse struct { + VirtualMachineImage +} + +// VirtualMachineImagesClientListOffersResponse contains the response from method VirtualMachineImagesClient.ListOffers. +type VirtualMachineImagesClientListOffersResponse struct { + // Array of VirtualMachineImageResource + VirtualMachineImageResourceArray []*VirtualMachineImageResource +} + +// VirtualMachineImagesClientListPublishersResponse contains the response from method VirtualMachineImagesClient.ListPublishers. +type VirtualMachineImagesClientListPublishersResponse struct { + // Array of VirtualMachineImageResource + VirtualMachineImageResourceArray []*VirtualMachineImageResource +} + +// VirtualMachineImagesClientListResponse contains the response from method VirtualMachineImagesClient.List. +type VirtualMachineImagesClientListResponse struct { + // Array of VirtualMachineImageResource + VirtualMachineImageResourceArray []*VirtualMachineImageResource +} + +// VirtualMachineImagesClientListSKUsResponse contains the response from method VirtualMachineImagesClient.ListSKUs. +type VirtualMachineImagesClientListSKUsResponse struct { + // Array of VirtualMachineImageResource + VirtualMachineImageResourceArray []*VirtualMachineImageResource +} + +// VirtualMachineImagesEdgeZoneClientGetResponse contains the response from method VirtualMachineImagesEdgeZoneClient.Get. +type VirtualMachineImagesEdgeZoneClientGetResponse struct { + VirtualMachineImage +} + +// VirtualMachineImagesEdgeZoneClientListOffersResponse contains the response from method VirtualMachineImagesEdgeZoneClient.ListOffers. +type VirtualMachineImagesEdgeZoneClientListOffersResponse struct { + // Array of VirtualMachineImageResource + VirtualMachineImageResourceArray []*VirtualMachineImageResource +} + +// VirtualMachineImagesEdgeZoneClientListPublishersResponse contains the response from method VirtualMachineImagesEdgeZoneClient.ListPublishers. +type VirtualMachineImagesEdgeZoneClientListPublishersResponse struct { + // Array of VirtualMachineImageResource + VirtualMachineImageResourceArray []*VirtualMachineImageResource +} + +// VirtualMachineImagesEdgeZoneClientListResponse contains the response from method VirtualMachineImagesEdgeZoneClient.List. +type VirtualMachineImagesEdgeZoneClientListResponse struct { + // Array of VirtualMachineImageResource + VirtualMachineImageResourceArray []*VirtualMachineImageResource +} + +// VirtualMachineImagesEdgeZoneClientListSKUsResponse contains the response from method VirtualMachineImagesEdgeZoneClient.ListSKUs. +type VirtualMachineImagesEdgeZoneClientListSKUsResponse struct { + // Array of VirtualMachineImageResource + VirtualMachineImageResourceArray []*VirtualMachineImageResource +} + +// VirtualMachineRunCommandsClientCreateOrUpdateResponse contains the response from method VirtualMachineRunCommandsClient.CreateOrUpdate. +type VirtualMachineRunCommandsClientCreateOrUpdateResponse struct { + VirtualMachineRunCommand +} + +// VirtualMachineRunCommandsClientDeleteResponse contains the response from method VirtualMachineRunCommandsClient.Delete. +type VirtualMachineRunCommandsClientDeleteResponse struct { + // placeholder for future response values +} + +// VirtualMachineRunCommandsClientGetByVirtualMachineResponse contains the response from method VirtualMachineRunCommandsClient.GetByVirtualMachine. +type VirtualMachineRunCommandsClientGetByVirtualMachineResponse struct { + VirtualMachineRunCommand +} + +// VirtualMachineRunCommandsClientGetResponse contains the response from method VirtualMachineRunCommandsClient.Get. +type VirtualMachineRunCommandsClientGetResponse struct { + RunCommandDocument +} + +// VirtualMachineRunCommandsClientListByVirtualMachineResponse contains the response from method VirtualMachineRunCommandsClient.ListByVirtualMachine. +type VirtualMachineRunCommandsClientListByVirtualMachineResponse struct { + VirtualMachineRunCommandsListResult +} + +// VirtualMachineRunCommandsClientListResponse contains the response from method VirtualMachineRunCommandsClient.List. +type VirtualMachineRunCommandsClientListResponse struct { + RunCommandListResult +} + +// VirtualMachineRunCommandsClientUpdateResponse contains the response from method VirtualMachineRunCommandsClient.Update. +type VirtualMachineRunCommandsClientUpdateResponse struct { + VirtualMachineRunCommand +} + +// VirtualMachineScaleSetExtensionsClientCreateOrUpdateResponse contains the response from method VirtualMachineScaleSetExtensionsClient.CreateOrUpdate. +type VirtualMachineScaleSetExtensionsClientCreateOrUpdateResponse struct { + VirtualMachineScaleSetExtension +} + +// VirtualMachineScaleSetExtensionsClientDeleteResponse contains the response from method VirtualMachineScaleSetExtensionsClient.Delete. +type VirtualMachineScaleSetExtensionsClientDeleteResponse struct { + // placeholder for future response values +} + +// VirtualMachineScaleSetExtensionsClientGetResponse contains the response from method VirtualMachineScaleSetExtensionsClient.Get. +type VirtualMachineScaleSetExtensionsClientGetResponse struct { + VirtualMachineScaleSetExtension +} + +// VirtualMachineScaleSetExtensionsClientListResponse contains the response from method VirtualMachineScaleSetExtensionsClient.List. +type VirtualMachineScaleSetExtensionsClientListResponse struct { + VirtualMachineScaleSetExtensionListResult +} + +// VirtualMachineScaleSetExtensionsClientUpdateResponse contains the response from method VirtualMachineScaleSetExtensionsClient.Update. +type VirtualMachineScaleSetExtensionsClientUpdateResponse struct { + VirtualMachineScaleSetExtension +} + +// VirtualMachineScaleSetRollingUpgradesClientCancelResponse contains the response from method VirtualMachineScaleSetRollingUpgradesClient.Cancel. +type VirtualMachineScaleSetRollingUpgradesClientCancelResponse struct { + // placeholder for future response values +} + +// VirtualMachineScaleSetRollingUpgradesClientGetLatestResponse contains the response from method VirtualMachineScaleSetRollingUpgradesClient.GetLatest. +type VirtualMachineScaleSetRollingUpgradesClientGetLatestResponse struct { + RollingUpgradeStatusInfo +} + +// VirtualMachineScaleSetRollingUpgradesClientStartExtensionUpgradeResponse contains the response from method VirtualMachineScaleSetRollingUpgradesClient.StartExtensionUpgrade. +type VirtualMachineScaleSetRollingUpgradesClientStartExtensionUpgradeResponse struct { + // placeholder for future response values +} + +// VirtualMachineScaleSetRollingUpgradesClientStartOSUpgradeResponse contains the response from method VirtualMachineScaleSetRollingUpgradesClient.StartOSUpgrade. +type VirtualMachineScaleSetRollingUpgradesClientStartOSUpgradeResponse struct { + // placeholder for future response values +} + +// VirtualMachineScaleSetVMExtensionsClientCreateOrUpdateResponse contains the response from method VirtualMachineScaleSetVMExtensionsClient.CreateOrUpdate. +type VirtualMachineScaleSetVMExtensionsClientCreateOrUpdateResponse struct { + VirtualMachineScaleSetVMExtension +} + +// VirtualMachineScaleSetVMExtensionsClientDeleteResponse contains the response from method VirtualMachineScaleSetVMExtensionsClient.Delete. +type VirtualMachineScaleSetVMExtensionsClientDeleteResponse struct { + // placeholder for future response values +} + +// VirtualMachineScaleSetVMExtensionsClientGetResponse contains the response from method VirtualMachineScaleSetVMExtensionsClient.Get. +type VirtualMachineScaleSetVMExtensionsClientGetResponse struct { + VirtualMachineScaleSetVMExtension +} + +// VirtualMachineScaleSetVMExtensionsClientListResponse contains the response from method VirtualMachineScaleSetVMExtensionsClient.List. +type VirtualMachineScaleSetVMExtensionsClientListResponse struct { + VirtualMachineScaleSetVMExtensionsListResult +} + +// VirtualMachineScaleSetVMExtensionsClientUpdateResponse contains the response from method VirtualMachineScaleSetVMExtensionsClient.Update. +type VirtualMachineScaleSetVMExtensionsClientUpdateResponse struct { + VirtualMachineScaleSetVMExtension +} + +// VirtualMachineScaleSetVMRunCommandsClientCreateOrUpdateResponse contains the response from method VirtualMachineScaleSetVMRunCommandsClient.CreateOrUpdate. +type VirtualMachineScaleSetVMRunCommandsClientCreateOrUpdateResponse struct { + VirtualMachineRunCommand +} + +// VirtualMachineScaleSetVMRunCommandsClientDeleteResponse contains the response from method VirtualMachineScaleSetVMRunCommandsClient.Delete. +type VirtualMachineScaleSetVMRunCommandsClientDeleteResponse struct { + // placeholder for future response values +} + +// VirtualMachineScaleSetVMRunCommandsClientGetResponse contains the response from method VirtualMachineScaleSetVMRunCommandsClient.Get. +type VirtualMachineScaleSetVMRunCommandsClientGetResponse struct { + VirtualMachineRunCommand +} + +// VirtualMachineScaleSetVMRunCommandsClientListResponse contains the response from method VirtualMachineScaleSetVMRunCommandsClient.List. +type VirtualMachineScaleSetVMRunCommandsClientListResponse struct { + VirtualMachineRunCommandsListResult +} + +// VirtualMachineScaleSetVMRunCommandsClientUpdateResponse contains the response from method VirtualMachineScaleSetVMRunCommandsClient.Update. +type VirtualMachineScaleSetVMRunCommandsClientUpdateResponse struct { + VirtualMachineRunCommand +} + +// VirtualMachineScaleSetVMsClientDeallocateResponse contains the response from method VirtualMachineScaleSetVMsClient.Deallocate. +type VirtualMachineScaleSetVMsClientDeallocateResponse struct { + // placeholder for future response values +} + +// VirtualMachineScaleSetVMsClientDeleteResponse contains the response from method VirtualMachineScaleSetVMsClient.Delete. +type VirtualMachineScaleSetVMsClientDeleteResponse struct { + // placeholder for future response values +} + +// VirtualMachineScaleSetVMsClientGetInstanceViewResponse contains the response from method VirtualMachineScaleSetVMsClient.GetInstanceView. +type VirtualMachineScaleSetVMsClientGetInstanceViewResponse struct { + VirtualMachineScaleSetVMInstanceView +} + +// VirtualMachineScaleSetVMsClientGetResponse contains the response from method VirtualMachineScaleSetVMsClient.Get. +type VirtualMachineScaleSetVMsClientGetResponse struct { + VirtualMachineScaleSetVM +} + +// VirtualMachineScaleSetVMsClientListResponse contains the response from method VirtualMachineScaleSetVMsClient.List. +type VirtualMachineScaleSetVMsClientListResponse struct { + VirtualMachineScaleSetVMListResult +} + +// VirtualMachineScaleSetVMsClientPerformMaintenanceResponse contains the response from method VirtualMachineScaleSetVMsClient.PerformMaintenance. +type VirtualMachineScaleSetVMsClientPerformMaintenanceResponse struct { + // placeholder for future response values +} + +// VirtualMachineScaleSetVMsClientPowerOffResponse contains the response from method VirtualMachineScaleSetVMsClient.PowerOff. +type VirtualMachineScaleSetVMsClientPowerOffResponse struct { + // placeholder for future response values +} + +// VirtualMachineScaleSetVMsClientRedeployResponse contains the response from method VirtualMachineScaleSetVMsClient.Redeploy. +type VirtualMachineScaleSetVMsClientRedeployResponse struct { + // placeholder for future response values +} + +// VirtualMachineScaleSetVMsClientReimageAllResponse contains the response from method VirtualMachineScaleSetVMsClient.ReimageAll. +type VirtualMachineScaleSetVMsClientReimageAllResponse struct { + // placeholder for future response values +} + +// VirtualMachineScaleSetVMsClientReimageResponse contains the response from method VirtualMachineScaleSetVMsClient.Reimage. +type VirtualMachineScaleSetVMsClientReimageResponse struct { + // placeholder for future response values +} + +// VirtualMachineScaleSetVMsClientRestartResponse contains the response from method VirtualMachineScaleSetVMsClient.Restart. +type VirtualMachineScaleSetVMsClientRestartResponse struct { + // placeholder for future response values +} + +// VirtualMachineScaleSetVMsClientRetrieveBootDiagnosticsDataResponse contains the response from method VirtualMachineScaleSetVMsClient.RetrieveBootDiagnosticsData. +type VirtualMachineScaleSetVMsClientRetrieveBootDiagnosticsDataResponse struct { + RetrieveBootDiagnosticsDataResult +} + +// VirtualMachineScaleSetVMsClientRunCommandResponse contains the response from method VirtualMachineScaleSetVMsClient.RunCommand. +type VirtualMachineScaleSetVMsClientRunCommandResponse struct { + RunCommandResult +} + +// VirtualMachineScaleSetVMsClientSimulateEvictionResponse contains the response from method VirtualMachineScaleSetVMsClient.SimulateEviction. +type VirtualMachineScaleSetVMsClientSimulateEvictionResponse struct { + // placeholder for future response values +} + +// VirtualMachineScaleSetVMsClientStartResponse contains the response from method VirtualMachineScaleSetVMsClient.Start. +type VirtualMachineScaleSetVMsClientStartResponse struct { + // placeholder for future response values +} + +// VirtualMachineScaleSetVMsClientUpdateResponse contains the response from method VirtualMachineScaleSetVMsClient.Update. +type VirtualMachineScaleSetVMsClientUpdateResponse struct { + VirtualMachineScaleSetVM +} + +// VirtualMachineScaleSetsClientConvertToSinglePlacementGroupResponse contains the response from method VirtualMachineScaleSetsClient.ConvertToSinglePlacementGroup. +type VirtualMachineScaleSetsClientConvertToSinglePlacementGroupResponse struct { + // placeholder for future response values +} + +// VirtualMachineScaleSetsClientCreateOrUpdateResponse contains the response from method VirtualMachineScaleSetsClient.CreateOrUpdate. +type VirtualMachineScaleSetsClientCreateOrUpdateResponse struct { + VirtualMachineScaleSet +} + +// VirtualMachineScaleSetsClientDeallocateResponse contains the response from method VirtualMachineScaleSetsClient.Deallocate. +type VirtualMachineScaleSetsClientDeallocateResponse struct { + // placeholder for future response values +} + +// VirtualMachineScaleSetsClientDeleteInstancesResponse contains the response from method VirtualMachineScaleSetsClient.DeleteInstances. +type VirtualMachineScaleSetsClientDeleteInstancesResponse struct { + // placeholder for future response values +} + +// VirtualMachineScaleSetsClientDeleteResponse contains the response from method VirtualMachineScaleSetsClient.Delete. +type VirtualMachineScaleSetsClientDeleteResponse struct { + // placeholder for future response values +} + +// VirtualMachineScaleSetsClientForceRecoveryServiceFabricPlatformUpdateDomainWalkResponse contains the response from method +// VirtualMachineScaleSetsClient.ForceRecoveryServiceFabricPlatformUpdateDomainWalk. +type VirtualMachineScaleSetsClientForceRecoveryServiceFabricPlatformUpdateDomainWalkResponse struct { + RecoveryWalkResponse +} + +// VirtualMachineScaleSetsClientGetInstanceViewResponse contains the response from method VirtualMachineScaleSetsClient.GetInstanceView. +type VirtualMachineScaleSetsClientGetInstanceViewResponse struct { + VirtualMachineScaleSetInstanceView +} + +// VirtualMachineScaleSetsClientGetOSUpgradeHistoryResponse contains the response from method VirtualMachineScaleSetsClient.GetOSUpgradeHistory. +type VirtualMachineScaleSetsClientGetOSUpgradeHistoryResponse struct { + VirtualMachineScaleSetListOSUpgradeHistory +} + +// VirtualMachineScaleSetsClientGetResponse contains the response from method VirtualMachineScaleSetsClient.Get. +type VirtualMachineScaleSetsClientGetResponse struct { + VirtualMachineScaleSet +} + +// VirtualMachineScaleSetsClientListAllResponse contains the response from method VirtualMachineScaleSetsClient.ListAll. +type VirtualMachineScaleSetsClientListAllResponse struct { + VirtualMachineScaleSetListWithLinkResult +} + +// VirtualMachineScaleSetsClientListByLocationResponse contains the response from method VirtualMachineScaleSetsClient.ListByLocation. +type VirtualMachineScaleSetsClientListByLocationResponse struct { + VirtualMachineScaleSetListResult +} + +// VirtualMachineScaleSetsClientListResponse contains the response from method VirtualMachineScaleSetsClient.List. +type VirtualMachineScaleSetsClientListResponse struct { + VirtualMachineScaleSetListResult +} + +// VirtualMachineScaleSetsClientListSKUsResponse contains the response from method VirtualMachineScaleSetsClient.ListSKUs. +type VirtualMachineScaleSetsClientListSKUsResponse struct { + VirtualMachineScaleSetListSKUsResult +} + +// VirtualMachineScaleSetsClientPerformMaintenanceResponse contains the response from method VirtualMachineScaleSetsClient.PerformMaintenance. +type VirtualMachineScaleSetsClientPerformMaintenanceResponse struct { + // placeholder for future response values +} + +// VirtualMachineScaleSetsClientPowerOffResponse contains the response from method VirtualMachineScaleSetsClient.PowerOff. +type VirtualMachineScaleSetsClientPowerOffResponse struct { + // placeholder for future response values +} + +// VirtualMachineScaleSetsClientRedeployResponse contains the response from method VirtualMachineScaleSetsClient.Redeploy. +type VirtualMachineScaleSetsClientRedeployResponse struct { + // placeholder for future response values +} + +// VirtualMachineScaleSetsClientReimageAllResponse contains the response from method VirtualMachineScaleSetsClient.ReimageAll. +type VirtualMachineScaleSetsClientReimageAllResponse struct { + // placeholder for future response values +} + +// VirtualMachineScaleSetsClientReimageResponse contains the response from method VirtualMachineScaleSetsClient.Reimage. +type VirtualMachineScaleSetsClientReimageResponse struct { + // placeholder for future response values +} + +// VirtualMachineScaleSetsClientRestartResponse contains the response from method VirtualMachineScaleSetsClient.Restart. +type VirtualMachineScaleSetsClientRestartResponse struct { + // placeholder for future response values +} + +// VirtualMachineScaleSetsClientSetOrchestrationServiceStateResponse contains the response from method VirtualMachineScaleSetsClient.SetOrchestrationServiceState. +type VirtualMachineScaleSetsClientSetOrchestrationServiceStateResponse struct { + // placeholder for future response values +} + +// VirtualMachineScaleSetsClientStartResponse contains the response from method VirtualMachineScaleSetsClient.Start. +type VirtualMachineScaleSetsClientStartResponse struct { + // placeholder for future response values +} + +// VirtualMachineScaleSetsClientUpdateInstancesResponse contains the response from method VirtualMachineScaleSetsClient.UpdateInstances. +type VirtualMachineScaleSetsClientUpdateInstancesResponse struct { + // placeholder for future response values +} + +// VirtualMachineScaleSetsClientUpdateResponse contains the response from method VirtualMachineScaleSetsClient.Update. +type VirtualMachineScaleSetsClientUpdateResponse struct { + VirtualMachineScaleSet +} + +// VirtualMachineSizesClientListResponse contains the response from method VirtualMachineSizesClient.List. +type VirtualMachineSizesClientListResponse struct { + VirtualMachineSizeListResult +} + +// VirtualMachinesClientAssessPatchesResponse contains the response from method VirtualMachinesClient.AssessPatches. +type VirtualMachinesClientAssessPatchesResponse struct { + VirtualMachineAssessPatchesResult +} + +// VirtualMachinesClientCaptureResponse contains the response from method VirtualMachinesClient.Capture. +type VirtualMachinesClientCaptureResponse struct { + VirtualMachineCaptureResult +} + +// VirtualMachinesClientConvertToManagedDisksResponse contains the response from method VirtualMachinesClient.ConvertToManagedDisks. +type VirtualMachinesClientConvertToManagedDisksResponse struct { + // placeholder for future response values +} + +// VirtualMachinesClientCreateOrUpdateResponse contains the response from method VirtualMachinesClient.CreateOrUpdate. +type VirtualMachinesClientCreateOrUpdateResponse struct { + VirtualMachine +} + +// VirtualMachinesClientDeallocateResponse contains the response from method VirtualMachinesClient.Deallocate. +type VirtualMachinesClientDeallocateResponse struct { + // placeholder for future response values +} + +// VirtualMachinesClientDeleteResponse contains the response from method VirtualMachinesClient.Delete. +type VirtualMachinesClientDeleteResponse struct { + // placeholder for future response values +} + +// VirtualMachinesClientGeneralizeResponse contains the response from method VirtualMachinesClient.Generalize. +type VirtualMachinesClientGeneralizeResponse struct { + // placeholder for future response values +} + +// VirtualMachinesClientGetResponse contains the response from method VirtualMachinesClient.Get. +type VirtualMachinesClientGetResponse struct { + VirtualMachine +} + +// VirtualMachinesClientInstallPatchesResponse contains the response from method VirtualMachinesClient.InstallPatches. +type VirtualMachinesClientInstallPatchesResponse struct { + VirtualMachineInstallPatchesResult +} + +// VirtualMachinesClientInstanceViewResponse contains the response from method VirtualMachinesClient.InstanceView. +type VirtualMachinesClientInstanceViewResponse struct { + VirtualMachineInstanceView +} + +// VirtualMachinesClientListAllResponse contains the response from method VirtualMachinesClient.ListAll. +type VirtualMachinesClientListAllResponse struct { + VirtualMachineListResult +} + +// VirtualMachinesClientListAvailableSizesResponse contains the response from method VirtualMachinesClient.ListAvailableSizes. +type VirtualMachinesClientListAvailableSizesResponse struct { + VirtualMachineSizeListResult +} + +// VirtualMachinesClientListByLocationResponse contains the response from method VirtualMachinesClient.ListByLocation. +type VirtualMachinesClientListByLocationResponse struct { + VirtualMachineListResult +} + +// VirtualMachinesClientListResponse contains the response from method VirtualMachinesClient.List. +type VirtualMachinesClientListResponse struct { + VirtualMachineListResult +} + +// VirtualMachinesClientPerformMaintenanceResponse contains the response from method VirtualMachinesClient.PerformMaintenance. +type VirtualMachinesClientPerformMaintenanceResponse struct { + // placeholder for future response values +} + +// VirtualMachinesClientPowerOffResponse contains the response from method VirtualMachinesClient.PowerOff. +type VirtualMachinesClientPowerOffResponse struct { + // placeholder for future response values +} + +// VirtualMachinesClientReapplyResponse contains the response from method VirtualMachinesClient.Reapply. +type VirtualMachinesClientReapplyResponse struct { + // placeholder for future response values +} + +// VirtualMachinesClientRedeployResponse contains the response from method VirtualMachinesClient.Redeploy. +type VirtualMachinesClientRedeployResponse struct { + // placeholder for future response values +} + +// VirtualMachinesClientReimageResponse contains the response from method VirtualMachinesClient.Reimage. +type VirtualMachinesClientReimageResponse struct { + // placeholder for future response values +} + +// VirtualMachinesClientRestartResponse contains the response from method VirtualMachinesClient.Restart. +type VirtualMachinesClientRestartResponse struct { + // placeholder for future response values +} + +// VirtualMachinesClientRetrieveBootDiagnosticsDataResponse contains the response from method VirtualMachinesClient.RetrieveBootDiagnosticsData. +type VirtualMachinesClientRetrieveBootDiagnosticsDataResponse struct { + RetrieveBootDiagnosticsDataResult +} + +// VirtualMachinesClientRunCommandResponse contains the response from method VirtualMachinesClient.RunCommand. +type VirtualMachinesClientRunCommandResponse struct { + RunCommandResult +} + +// VirtualMachinesClientSimulateEvictionResponse contains the response from method VirtualMachinesClient.SimulateEviction. +type VirtualMachinesClientSimulateEvictionResponse struct { + // placeholder for future response values +} + +// VirtualMachinesClientStartResponse contains the response from method VirtualMachinesClient.Start. +type VirtualMachinesClientStartResponse struct { + // placeholder for future response values +} + +// VirtualMachinesClientUpdateResponse contains the response from method VirtualMachinesClient.Update. +type VirtualMachinesClientUpdateResponse struct { + VirtualMachine +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_restorepointcollections_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_restorepointcollections_client.go new file mode 100644 index 000000000..b706d70fb --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_restorepointcollections_client.go @@ -0,0 +1,425 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// RestorePointCollectionsClient contains the methods for the RestorePointCollections group. +// Don't use this type directly, use NewRestorePointCollectionsClient() instead. +type RestorePointCollectionsClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewRestorePointCollectionsClient creates a new instance of RestorePointCollectionsClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewRestorePointCollectionsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*RestorePointCollectionsClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &RestorePointCollectionsClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// CreateOrUpdate - The operation to create or update the restore point collection. Please refer to https://aka.ms/RestorePoints +// for more details. When updating a restore point collection, only tags may be modified. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// restorePointCollectionName - The name of the restore point collection. +// parameters - Parameters supplied to the Create or Update restore point collection operation. +// options - RestorePointCollectionsClientCreateOrUpdateOptions contains the optional parameters for the RestorePointCollectionsClient.CreateOrUpdate +// method. +func (client *RestorePointCollectionsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, restorePointCollectionName string, parameters RestorePointCollection, options *RestorePointCollectionsClientCreateOrUpdateOptions) (RestorePointCollectionsClientCreateOrUpdateResponse, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, restorePointCollectionName, parameters, options) + if err != nil { + return RestorePointCollectionsClientCreateOrUpdateResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return RestorePointCollectionsClientCreateOrUpdateResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return RestorePointCollectionsClientCreateOrUpdateResponse{}, runtime.NewResponseError(resp) + } + return client.createOrUpdateHandleResponse(resp) +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *RestorePointCollectionsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, restorePointCollectionName string, parameters RestorePointCollection, options *RestorePointCollectionsClientCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if restorePointCollectionName == "" { + return nil, errors.New("parameter restorePointCollectionName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{restorePointCollectionName}", url.PathEscape(restorePointCollectionName)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// createOrUpdateHandleResponse handles the CreateOrUpdate response. +func (client *RestorePointCollectionsClient) createOrUpdateHandleResponse(resp *http.Response) (RestorePointCollectionsClientCreateOrUpdateResponse, error) { + result := RestorePointCollectionsClientCreateOrUpdateResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.RestorePointCollection); err != nil { + return RestorePointCollectionsClientCreateOrUpdateResponse{}, err + } + return result, nil +} + +// BeginDelete - The operation to delete the restore point collection. This operation will also delete all the contained restore +// points. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// restorePointCollectionName - The name of the Restore Point Collection. +// options - RestorePointCollectionsClientBeginDeleteOptions contains the optional parameters for the RestorePointCollectionsClient.BeginDelete +// method. +func (client *RestorePointCollectionsClient) BeginDelete(ctx context.Context, resourceGroupName string, restorePointCollectionName string, options *RestorePointCollectionsClientBeginDeleteOptions) (*runtime.Poller[RestorePointCollectionsClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, restorePointCollectionName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[RestorePointCollectionsClientDeleteResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[RestorePointCollectionsClientDeleteResponse](options.ResumeToken, client.pl, nil) + } +} + +// Delete - The operation to delete the restore point collection. This operation will also delete all the contained restore +// points. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *RestorePointCollectionsClient) deleteOperation(ctx context.Context, resourceGroupName string, restorePointCollectionName string, options *RestorePointCollectionsClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, restorePointCollectionName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *RestorePointCollectionsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, restorePointCollectionName string, options *RestorePointCollectionsClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if restorePointCollectionName == "" { + return nil, errors.New("parameter restorePointCollectionName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{restorePointCollectionName}", url.PathEscape(restorePointCollectionName)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - The operation to get the restore point collection. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// restorePointCollectionName - The name of the restore point collection. +// options - RestorePointCollectionsClientGetOptions contains the optional parameters for the RestorePointCollectionsClient.Get +// method. +func (client *RestorePointCollectionsClient) Get(ctx context.Context, resourceGroupName string, restorePointCollectionName string, options *RestorePointCollectionsClientGetOptions) (RestorePointCollectionsClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, restorePointCollectionName, options) + if err != nil { + return RestorePointCollectionsClientGetResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return RestorePointCollectionsClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return RestorePointCollectionsClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *RestorePointCollectionsClient) getCreateRequest(ctx context.Context, resourceGroupName string, restorePointCollectionName string, options *RestorePointCollectionsClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if restorePointCollectionName == "" { + return nil, errors.New("parameter restorePointCollectionName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{restorePointCollectionName}", url.PathEscape(restorePointCollectionName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Expand != nil { + reqQP.Set("$expand", string(*options.Expand)) + } + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *RestorePointCollectionsClient) getHandleResponse(resp *http.Response) (RestorePointCollectionsClientGetResponse, error) { + result := RestorePointCollectionsClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.RestorePointCollection); err != nil { + return RestorePointCollectionsClientGetResponse{}, err + } + return result, nil +} + +// NewListPager - Gets the list of restore point collections in a resource group. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// options - RestorePointCollectionsClientListOptions contains the optional parameters for the RestorePointCollectionsClient.List +// method. +func (client *RestorePointCollectionsClient) NewListPager(resourceGroupName string, options *RestorePointCollectionsClientListOptions) *runtime.Pager[RestorePointCollectionsClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[RestorePointCollectionsClientListResponse]{ + More: func(page RestorePointCollectionsClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *RestorePointCollectionsClientListResponse) (RestorePointCollectionsClientListResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listCreateRequest(ctx, resourceGroupName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return RestorePointCollectionsClientListResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return RestorePointCollectionsClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return RestorePointCollectionsClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *RestorePointCollectionsClient) listCreateRequest(ctx context.Context, resourceGroupName string, options *RestorePointCollectionsClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *RestorePointCollectionsClient) listHandleResponse(resp *http.Response) (RestorePointCollectionsClientListResponse, error) { + result := RestorePointCollectionsClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.RestorePointCollectionListResult); err != nil { + return RestorePointCollectionsClientListResponse{}, err + } + return result, nil +} + +// NewListAllPager - Gets the list of restore point collections in the subscription. Use nextLink property in the response +// to get the next page of restore point collections. Do this till nextLink is not null to fetch all +// the restore point collections. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// options - RestorePointCollectionsClientListAllOptions contains the optional parameters for the RestorePointCollectionsClient.ListAll +// method. +func (client *RestorePointCollectionsClient) NewListAllPager(options *RestorePointCollectionsClientListAllOptions) *runtime.Pager[RestorePointCollectionsClientListAllResponse] { + return runtime.NewPager(runtime.PagingHandler[RestorePointCollectionsClientListAllResponse]{ + More: func(page RestorePointCollectionsClientListAllResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *RestorePointCollectionsClientListAllResponse) (RestorePointCollectionsClientListAllResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listAllCreateRequest(ctx, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return RestorePointCollectionsClientListAllResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return RestorePointCollectionsClientListAllResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return RestorePointCollectionsClientListAllResponse{}, runtime.NewResponseError(resp) + } + return client.listAllHandleResponse(resp) + }, + }) +} + +// listAllCreateRequest creates the ListAll request. +func (client *RestorePointCollectionsClient) listAllCreateRequest(ctx context.Context, options *RestorePointCollectionsClientListAllOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/restorePointCollections" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listAllHandleResponse handles the ListAll response. +func (client *RestorePointCollectionsClient) listAllHandleResponse(resp *http.Response) (RestorePointCollectionsClientListAllResponse, error) { + result := RestorePointCollectionsClientListAllResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.RestorePointCollectionListResult); err != nil { + return RestorePointCollectionsClientListAllResponse{}, err + } + return result, nil +} + +// Update - The operation to update the restore point collection. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// restorePointCollectionName - The name of the restore point collection. +// parameters - Parameters supplied to the Update restore point collection operation. +// options - RestorePointCollectionsClientUpdateOptions contains the optional parameters for the RestorePointCollectionsClient.Update +// method. +func (client *RestorePointCollectionsClient) Update(ctx context.Context, resourceGroupName string, restorePointCollectionName string, parameters RestorePointCollectionUpdate, options *RestorePointCollectionsClientUpdateOptions) (RestorePointCollectionsClientUpdateResponse, error) { + req, err := client.updateCreateRequest(ctx, resourceGroupName, restorePointCollectionName, parameters, options) + if err != nil { + return RestorePointCollectionsClientUpdateResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return RestorePointCollectionsClientUpdateResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return RestorePointCollectionsClientUpdateResponse{}, runtime.NewResponseError(resp) + } + return client.updateHandleResponse(resp) +} + +// updateCreateRequest creates the Update request. +func (client *RestorePointCollectionsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, restorePointCollectionName string, parameters RestorePointCollectionUpdate, options *RestorePointCollectionsClientUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if restorePointCollectionName == "" { + return nil, errors.New("parameter restorePointCollectionName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{restorePointCollectionName}", url.PathEscape(restorePointCollectionName)) + req, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// updateHandleResponse handles the Update response. +func (client *RestorePointCollectionsClient) updateHandleResponse(resp *http.Response) (RestorePointCollectionsClientUpdateResponse, error) { + result := RestorePointCollectionsClientUpdateResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.RestorePointCollection); err != nil { + return RestorePointCollectionsClientUpdateResponse{}, err + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_restorepoints_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_restorepoints_client.go new file mode 100644 index 000000000..11ec43cf7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_restorepoints_client.go @@ -0,0 +1,257 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// RestorePointsClient contains the methods for the RestorePoints group. +// Don't use this type directly, use NewRestorePointsClient() instead. +type RestorePointsClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewRestorePointsClient creates a new instance of RestorePointsClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewRestorePointsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*RestorePointsClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &RestorePointsClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// BeginCreate - The operation to create the restore point. Updating properties of an existing restore point is not allowed +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// restorePointCollectionName - The name of the restore point collection. +// restorePointName - The name of the restore point. +// parameters - Parameters supplied to the Create restore point operation. +// options - RestorePointsClientBeginCreateOptions contains the optional parameters for the RestorePointsClient.BeginCreate +// method. +func (client *RestorePointsClient) BeginCreate(ctx context.Context, resourceGroupName string, restorePointCollectionName string, restorePointName string, parameters RestorePoint, options *RestorePointsClientBeginCreateOptions) (*runtime.Poller[RestorePointsClientCreateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.create(ctx, resourceGroupName, restorePointCollectionName, restorePointName, parameters, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[RestorePointsClientCreateResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[RestorePointsClientCreateResponse](options.ResumeToken, client.pl, nil) + } +} + +// Create - The operation to create the restore point. Updating properties of an existing restore point is not allowed +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *RestorePointsClient) create(ctx context.Context, resourceGroupName string, restorePointCollectionName string, restorePointName string, parameters RestorePoint, options *RestorePointsClientBeginCreateOptions) (*http.Response, error) { + req, err := client.createCreateRequest(ctx, resourceGroupName, restorePointCollectionName, restorePointName, parameters, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createCreateRequest creates the Create request. +func (client *RestorePointsClient) createCreateRequest(ctx context.Context, resourceGroupName string, restorePointCollectionName string, restorePointName string, parameters RestorePoint, options *RestorePointsClientBeginCreateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}/restorePoints/{restorePointName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if restorePointCollectionName == "" { + return nil, errors.New("parameter restorePointCollectionName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{restorePointCollectionName}", url.PathEscape(restorePointCollectionName)) + if restorePointName == "" { + return nil, errors.New("parameter restorePointName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{restorePointName}", url.PathEscape(restorePointName)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// BeginDelete - The operation to delete the restore point. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// restorePointCollectionName - The name of the Restore Point Collection. +// restorePointName - The name of the restore point. +// options - RestorePointsClientBeginDeleteOptions contains the optional parameters for the RestorePointsClient.BeginDelete +// method. +func (client *RestorePointsClient) BeginDelete(ctx context.Context, resourceGroupName string, restorePointCollectionName string, restorePointName string, options *RestorePointsClientBeginDeleteOptions) (*runtime.Poller[RestorePointsClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, restorePointCollectionName, restorePointName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[RestorePointsClientDeleteResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[RestorePointsClientDeleteResponse](options.ResumeToken, client.pl, nil) + } +} + +// Delete - The operation to delete the restore point. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *RestorePointsClient) deleteOperation(ctx context.Context, resourceGroupName string, restorePointCollectionName string, restorePointName string, options *RestorePointsClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, restorePointCollectionName, restorePointName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *RestorePointsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, restorePointCollectionName string, restorePointName string, options *RestorePointsClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}/restorePoints/{restorePointName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if restorePointCollectionName == "" { + return nil, errors.New("parameter restorePointCollectionName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{restorePointCollectionName}", url.PathEscape(restorePointCollectionName)) + if restorePointName == "" { + return nil, errors.New("parameter restorePointName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{restorePointName}", url.PathEscape(restorePointName)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - The operation to get the restore point. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// restorePointCollectionName - The name of the restore point collection. +// restorePointName - The name of the restore point. +// options - RestorePointsClientGetOptions contains the optional parameters for the RestorePointsClient.Get method. +func (client *RestorePointsClient) Get(ctx context.Context, resourceGroupName string, restorePointCollectionName string, restorePointName string, options *RestorePointsClientGetOptions) (RestorePointsClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, restorePointCollectionName, restorePointName, options) + if err != nil { + return RestorePointsClientGetResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return RestorePointsClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return RestorePointsClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *RestorePointsClient) getCreateRequest(ctx context.Context, resourceGroupName string, restorePointCollectionName string, restorePointName string, options *RestorePointsClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/restorePointCollections/{restorePointCollectionName}/restorePoints/{restorePointName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if restorePointCollectionName == "" { + return nil, errors.New("parameter restorePointCollectionName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{restorePointCollectionName}", url.PathEscape(restorePointCollectionName)) + if restorePointName == "" { + return nil, errors.New("parameter restorePointName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{restorePointName}", url.PathEscape(restorePointName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Expand != nil { + reqQP.Set("$expand", string(*options.Expand)) + } + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *RestorePointsClient) getHandleResponse(resp *http.Response) (RestorePointsClientGetResponse, error) { + result := RestorePointsClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.RestorePoint); err != nil { + return RestorePointsClientGetResponse{}, err + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_sharedgalleries_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_sharedgalleries_client.go new file mode 100644 index 000000000..9f73e7295 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_sharedgalleries_client.go @@ -0,0 +1,179 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// SharedGalleriesClient contains the methods for the SharedGalleries group. +// Don't use this type directly, use NewSharedGalleriesClient() instead. +type SharedGalleriesClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewSharedGalleriesClient creates a new instance of SharedGalleriesClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewSharedGalleriesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*SharedGalleriesClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &SharedGalleriesClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// Get - Get a shared gallery by subscription id or tenant id. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-07-01 +// location - Resource location. +// galleryUniqueName - The unique name of the Shared Gallery. +// options - SharedGalleriesClientGetOptions contains the optional parameters for the SharedGalleriesClient.Get method. +func (client *SharedGalleriesClient) Get(ctx context.Context, location string, galleryUniqueName string, options *SharedGalleriesClientGetOptions) (SharedGalleriesClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, location, galleryUniqueName, options) + if err != nil { + return SharedGalleriesClientGetResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return SharedGalleriesClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return SharedGalleriesClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *SharedGalleriesClient) getCreateRequest(ctx context.Context, location string, galleryUniqueName string, options *SharedGalleriesClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/sharedGalleries/{galleryUniqueName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if location == "" { + return nil, errors.New("parameter location cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{location}", url.PathEscape(location)) + if galleryUniqueName == "" { + return nil, errors.New("parameter galleryUniqueName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryUniqueName}", url.PathEscape(galleryUniqueName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-07-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *SharedGalleriesClient) getHandleResponse(resp *http.Response) (SharedGalleriesClientGetResponse, error) { + result := SharedGalleriesClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.SharedGallery); err != nil { + return SharedGalleriesClientGetResponse{}, err + } + return result, nil +} + +// NewListPager - List shared galleries by subscription id or tenant id. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-07-01 +// location - Resource location. +// options - SharedGalleriesClientListOptions contains the optional parameters for the SharedGalleriesClient.List method. +func (client *SharedGalleriesClient) NewListPager(location string, options *SharedGalleriesClientListOptions) *runtime.Pager[SharedGalleriesClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[SharedGalleriesClientListResponse]{ + More: func(page SharedGalleriesClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *SharedGalleriesClientListResponse) (SharedGalleriesClientListResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listCreateRequest(ctx, location, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return SharedGalleriesClientListResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return SharedGalleriesClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return SharedGalleriesClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *SharedGalleriesClient) listCreateRequest(ctx context.Context, location string, options *SharedGalleriesClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/sharedGalleries" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if location == "" { + return nil, errors.New("parameter location cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{location}", url.PathEscape(location)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-07-01") + if options != nil && options.SharedTo != nil { + reqQP.Set("sharedTo", string(*options.SharedTo)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *SharedGalleriesClient) listHandleResponse(resp *http.Response) (SharedGalleriesClientListResponse, error) { + result := SharedGalleriesClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.SharedGalleryList); err != nil { + return SharedGalleriesClientListResponse{}, err + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_sharedgalleryimages_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_sharedgalleryimages_client.go new file mode 100644 index 000000000..fca3e47af --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_sharedgalleryimages_client.go @@ -0,0 +1,190 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// SharedGalleryImagesClient contains the methods for the SharedGalleryImages group. +// Don't use this type directly, use NewSharedGalleryImagesClient() instead. +type SharedGalleryImagesClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewSharedGalleryImagesClient creates a new instance of SharedGalleryImagesClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewSharedGalleryImagesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*SharedGalleryImagesClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &SharedGalleryImagesClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// Get - Get a shared gallery image by subscription id or tenant id. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-07-01 +// location - Resource location. +// galleryUniqueName - The unique name of the Shared Gallery. +// galleryImageName - The name of the Shared Gallery Image Definition from which the Image Versions are to be listed. +// options - SharedGalleryImagesClientGetOptions contains the optional parameters for the SharedGalleryImagesClient.Get method. +func (client *SharedGalleryImagesClient) Get(ctx context.Context, location string, galleryUniqueName string, galleryImageName string, options *SharedGalleryImagesClientGetOptions) (SharedGalleryImagesClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, location, galleryUniqueName, galleryImageName, options) + if err != nil { + return SharedGalleryImagesClientGetResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return SharedGalleryImagesClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return SharedGalleryImagesClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *SharedGalleryImagesClient) getCreateRequest(ctx context.Context, location string, galleryUniqueName string, galleryImageName string, options *SharedGalleryImagesClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/sharedGalleries/{galleryUniqueName}/images/{galleryImageName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if location == "" { + return nil, errors.New("parameter location cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{location}", url.PathEscape(location)) + if galleryUniqueName == "" { + return nil, errors.New("parameter galleryUniqueName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryUniqueName}", url.PathEscape(galleryUniqueName)) + if galleryImageName == "" { + return nil, errors.New("parameter galleryImageName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryImageName}", url.PathEscape(galleryImageName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-07-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *SharedGalleryImagesClient) getHandleResponse(resp *http.Response) (SharedGalleryImagesClientGetResponse, error) { + result := SharedGalleryImagesClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.SharedGalleryImage); err != nil { + return SharedGalleryImagesClientGetResponse{}, err + } + return result, nil +} + +// NewListPager - List shared gallery images by subscription id or tenant id. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-07-01 +// location - Resource location. +// galleryUniqueName - The unique name of the Shared Gallery. +// options - SharedGalleryImagesClientListOptions contains the optional parameters for the SharedGalleryImagesClient.List +// method. +func (client *SharedGalleryImagesClient) NewListPager(location string, galleryUniqueName string, options *SharedGalleryImagesClientListOptions) *runtime.Pager[SharedGalleryImagesClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[SharedGalleryImagesClientListResponse]{ + More: func(page SharedGalleryImagesClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *SharedGalleryImagesClientListResponse) (SharedGalleryImagesClientListResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listCreateRequest(ctx, location, galleryUniqueName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return SharedGalleryImagesClientListResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return SharedGalleryImagesClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return SharedGalleryImagesClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *SharedGalleryImagesClient) listCreateRequest(ctx context.Context, location string, galleryUniqueName string, options *SharedGalleryImagesClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/sharedGalleries/{galleryUniqueName}/images" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if location == "" { + return nil, errors.New("parameter location cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{location}", url.PathEscape(location)) + if galleryUniqueName == "" { + return nil, errors.New("parameter galleryUniqueName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryUniqueName}", url.PathEscape(galleryUniqueName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-07-01") + if options != nil && options.SharedTo != nil { + reqQP.Set("sharedTo", string(*options.SharedTo)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *SharedGalleryImagesClient) listHandleResponse(resp *http.Response) (SharedGalleryImagesClientListResponse, error) { + result := SharedGalleryImagesClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.SharedGalleryImageList); err != nil { + return SharedGalleryImagesClientListResponse{}, err + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_sharedgalleryimageversions_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_sharedgalleryimageversions_client.go new file mode 100644 index 000000000..d2257bf32 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_sharedgalleryimageversions_client.go @@ -0,0 +1,203 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// SharedGalleryImageVersionsClient contains the methods for the SharedGalleryImageVersions group. +// Don't use this type directly, use NewSharedGalleryImageVersionsClient() instead. +type SharedGalleryImageVersionsClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewSharedGalleryImageVersionsClient creates a new instance of SharedGalleryImageVersionsClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewSharedGalleryImageVersionsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*SharedGalleryImageVersionsClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &SharedGalleryImageVersionsClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// Get - Get a shared gallery image version by subscription id or tenant id. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-07-01 +// location - Resource location. +// galleryUniqueName - The unique name of the Shared Gallery. +// galleryImageName - The name of the Shared Gallery Image Definition from which the Image Versions are to be listed. +// galleryImageVersionName - The name of the gallery image version to be created. Needs to follow semantic version name pattern: +// The allowed characters are digit and period. Digits must be within the range of a 32-bit integer. +// Format: .. +// options - SharedGalleryImageVersionsClientGetOptions contains the optional parameters for the SharedGalleryImageVersionsClient.Get +// method. +func (client *SharedGalleryImageVersionsClient) Get(ctx context.Context, location string, galleryUniqueName string, galleryImageName string, galleryImageVersionName string, options *SharedGalleryImageVersionsClientGetOptions) (SharedGalleryImageVersionsClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, location, galleryUniqueName, galleryImageName, galleryImageVersionName, options) + if err != nil { + return SharedGalleryImageVersionsClientGetResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return SharedGalleryImageVersionsClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return SharedGalleryImageVersionsClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *SharedGalleryImageVersionsClient) getCreateRequest(ctx context.Context, location string, galleryUniqueName string, galleryImageName string, galleryImageVersionName string, options *SharedGalleryImageVersionsClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/sharedGalleries/{galleryUniqueName}/images/{galleryImageName}/versions/{galleryImageVersionName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if location == "" { + return nil, errors.New("parameter location cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{location}", url.PathEscape(location)) + if galleryUniqueName == "" { + return nil, errors.New("parameter galleryUniqueName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryUniqueName}", url.PathEscape(galleryUniqueName)) + if galleryImageName == "" { + return nil, errors.New("parameter galleryImageName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryImageName}", url.PathEscape(galleryImageName)) + if galleryImageVersionName == "" { + return nil, errors.New("parameter galleryImageVersionName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryImageVersionName}", url.PathEscape(galleryImageVersionName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-07-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *SharedGalleryImageVersionsClient) getHandleResponse(resp *http.Response) (SharedGalleryImageVersionsClientGetResponse, error) { + result := SharedGalleryImageVersionsClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.SharedGalleryImageVersion); err != nil { + return SharedGalleryImageVersionsClientGetResponse{}, err + } + return result, nil +} + +// NewListPager - List shared gallery image versions by subscription id or tenant id. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-07-01 +// location - Resource location. +// galleryUniqueName - The unique name of the Shared Gallery. +// galleryImageName - The name of the Shared Gallery Image Definition from which the Image Versions are to be listed. +// options - SharedGalleryImageVersionsClientListOptions contains the optional parameters for the SharedGalleryImageVersionsClient.List +// method. +func (client *SharedGalleryImageVersionsClient) NewListPager(location string, galleryUniqueName string, galleryImageName string, options *SharedGalleryImageVersionsClientListOptions) *runtime.Pager[SharedGalleryImageVersionsClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[SharedGalleryImageVersionsClientListResponse]{ + More: func(page SharedGalleryImageVersionsClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *SharedGalleryImageVersionsClientListResponse) (SharedGalleryImageVersionsClientListResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listCreateRequest(ctx, location, galleryUniqueName, galleryImageName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return SharedGalleryImageVersionsClientListResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return SharedGalleryImageVersionsClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return SharedGalleryImageVersionsClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *SharedGalleryImageVersionsClient) listCreateRequest(ctx context.Context, location string, galleryUniqueName string, galleryImageName string, options *SharedGalleryImageVersionsClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/sharedGalleries/{galleryUniqueName}/images/{galleryImageName}/versions" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if location == "" { + return nil, errors.New("parameter location cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{location}", url.PathEscape(location)) + if galleryUniqueName == "" { + return nil, errors.New("parameter galleryUniqueName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryUniqueName}", url.PathEscape(galleryUniqueName)) + if galleryImageName == "" { + return nil, errors.New("parameter galleryImageName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{galleryImageName}", url.PathEscape(galleryImageName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-07-01") + if options != nil && options.SharedTo != nil { + reqQP.Set("sharedTo", string(*options.SharedTo)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *SharedGalleryImageVersionsClient) listHandleResponse(resp *http.Response) (SharedGalleryImageVersionsClientListResponse, error) { + result := SharedGalleryImageVersionsClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.SharedGalleryImageVersionList); err != nil { + return SharedGalleryImageVersionsClientListResponse{}, err + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_snapshots_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_snapshots_client.go new file mode 100644 index 000000000..31ee66076 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_snapshots_client.go @@ -0,0 +1,566 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// SnapshotsClient contains the methods for the Snapshots group. +// Don't use this type directly, use NewSnapshotsClient() instead. +type SnapshotsClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewSnapshotsClient creates a new instance of SnapshotsClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewSnapshotsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*SnapshotsClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &SnapshotsClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// BeginCreateOrUpdate - Creates or updates a snapshot. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +// resourceGroupName - The name of the resource group. +// snapshotName - The name of the snapshot that is being created. The name can't be changed after the snapshot is created. +// Supported characters for the name are a-z, A-Z, 0-9, _ and -. The max name length is 80 +// characters. +// snapshot - Snapshot object supplied in the body of the Put disk operation. +// options - SnapshotsClientBeginCreateOrUpdateOptions contains the optional parameters for the SnapshotsClient.BeginCreateOrUpdate +// method. +func (client *SnapshotsClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, snapshotName string, snapshot Snapshot, options *SnapshotsClientBeginCreateOrUpdateOptions) (*runtime.Poller[SnapshotsClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, resourceGroupName, snapshotName, snapshot, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[SnapshotsClientCreateOrUpdateResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[SnapshotsClientCreateOrUpdateResponse](options.ResumeToken, client.pl, nil) + } +} + +// CreateOrUpdate - Creates or updates a snapshot. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +func (client *SnapshotsClient) createOrUpdate(ctx context.Context, resourceGroupName string, snapshotName string, snapshot Snapshot, options *SnapshotsClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, snapshotName, snapshot, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *SnapshotsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, snapshotName string, snapshot Snapshot, options *SnapshotsClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if snapshotName == "" { + return nil, errors.New("parameter snapshotName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{snapshotName}", url.PathEscape(snapshotName)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-12-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, snapshot) +} + +// BeginDelete - Deletes a snapshot. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +// resourceGroupName - The name of the resource group. +// snapshotName - The name of the snapshot that is being created. The name can't be changed after the snapshot is created. +// Supported characters for the name are a-z, A-Z, 0-9, _ and -. The max name length is 80 +// characters. +// options - SnapshotsClientBeginDeleteOptions contains the optional parameters for the SnapshotsClient.BeginDelete method. +func (client *SnapshotsClient) BeginDelete(ctx context.Context, resourceGroupName string, snapshotName string, options *SnapshotsClientBeginDeleteOptions) (*runtime.Poller[SnapshotsClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, snapshotName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[SnapshotsClientDeleteResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[SnapshotsClientDeleteResponse](options.ResumeToken, client.pl, nil) + } +} + +// Delete - Deletes a snapshot. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +func (client *SnapshotsClient) deleteOperation(ctx context.Context, resourceGroupName string, snapshotName string, options *SnapshotsClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, snapshotName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *SnapshotsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, snapshotName string, options *SnapshotsClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if snapshotName == "" { + return nil, errors.New("parameter snapshotName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{snapshotName}", url.PathEscape(snapshotName)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-12-01") + req.Raw().URL.RawQuery = reqQP.Encode() + return req, nil +} + +// Get - Gets information about a snapshot. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +// resourceGroupName - The name of the resource group. +// snapshotName - The name of the snapshot that is being created. The name can't be changed after the snapshot is created. +// Supported characters for the name are a-z, A-Z, 0-9, _ and -. The max name length is 80 +// characters. +// options - SnapshotsClientGetOptions contains the optional parameters for the SnapshotsClient.Get method. +func (client *SnapshotsClient) Get(ctx context.Context, resourceGroupName string, snapshotName string, options *SnapshotsClientGetOptions) (SnapshotsClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, snapshotName, options) + if err != nil { + return SnapshotsClientGetResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return SnapshotsClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return SnapshotsClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *SnapshotsClient) getCreateRequest(ctx context.Context, resourceGroupName string, snapshotName string, options *SnapshotsClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if snapshotName == "" { + return nil, errors.New("parameter snapshotName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{snapshotName}", url.PathEscape(snapshotName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-12-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *SnapshotsClient) getHandleResponse(resp *http.Response) (SnapshotsClientGetResponse, error) { + result := SnapshotsClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.Snapshot); err != nil { + return SnapshotsClientGetResponse{}, err + } + return result, nil +} + +// BeginGrantAccess - Grants access to a snapshot. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +// resourceGroupName - The name of the resource group. +// snapshotName - The name of the snapshot that is being created. The name can't be changed after the snapshot is created. +// Supported characters for the name are a-z, A-Z, 0-9, _ and -. The max name length is 80 +// characters. +// grantAccessData - Access data object supplied in the body of the get snapshot access operation. +// options - SnapshotsClientBeginGrantAccessOptions contains the optional parameters for the SnapshotsClient.BeginGrantAccess +// method. +func (client *SnapshotsClient) BeginGrantAccess(ctx context.Context, resourceGroupName string, snapshotName string, grantAccessData GrantAccessData, options *SnapshotsClientBeginGrantAccessOptions) (*runtime.Poller[SnapshotsClientGrantAccessResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.grantAccess(ctx, resourceGroupName, snapshotName, grantAccessData, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.pl, &runtime.NewPollerOptions[SnapshotsClientGrantAccessResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + }) + } else { + return runtime.NewPollerFromResumeToken[SnapshotsClientGrantAccessResponse](options.ResumeToken, client.pl, nil) + } +} + +// GrantAccess - Grants access to a snapshot. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +func (client *SnapshotsClient) grantAccess(ctx context.Context, resourceGroupName string, snapshotName string, grantAccessData GrantAccessData, options *SnapshotsClientBeginGrantAccessOptions) (*http.Response, error) { + req, err := client.grantAccessCreateRequest(ctx, resourceGroupName, snapshotName, grantAccessData, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// grantAccessCreateRequest creates the GrantAccess request. +func (client *SnapshotsClient) grantAccessCreateRequest(ctx context.Context, resourceGroupName string, snapshotName string, grantAccessData GrantAccessData, options *SnapshotsClientBeginGrantAccessOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/beginGetAccess" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if snapshotName == "" { + return nil, errors.New("parameter snapshotName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{snapshotName}", url.PathEscape(snapshotName)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-12-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, grantAccessData) +} + +// NewListPager - Lists snapshots under a subscription. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +// options - SnapshotsClientListOptions contains the optional parameters for the SnapshotsClient.List method. +func (client *SnapshotsClient) NewListPager(options *SnapshotsClientListOptions) *runtime.Pager[SnapshotsClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[SnapshotsClientListResponse]{ + More: func(page SnapshotsClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *SnapshotsClientListResponse) (SnapshotsClientListResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listCreateRequest(ctx, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return SnapshotsClientListResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return SnapshotsClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return SnapshotsClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *SnapshotsClient) listCreateRequest(ctx context.Context, options *SnapshotsClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/snapshots" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-12-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *SnapshotsClient) listHandleResponse(resp *http.Response) (SnapshotsClientListResponse, error) { + result := SnapshotsClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.SnapshotList); err != nil { + return SnapshotsClientListResponse{}, err + } + return result, nil +} + +// NewListByResourceGroupPager - Lists snapshots under a resource group. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +// resourceGroupName - The name of the resource group. +// options - SnapshotsClientListByResourceGroupOptions contains the optional parameters for the SnapshotsClient.ListByResourceGroup +// method. +func (client *SnapshotsClient) NewListByResourceGroupPager(resourceGroupName string, options *SnapshotsClientListByResourceGroupOptions) *runtime.Pager[SnapshotsClientListByResourceGroupResponse] { + return runtime.NewPager(runtime.PagingHandler[SnapshotsClientListByResourceGroupResponse]{ + More: func(page SnapshotsClientListByResourceGroupResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *SnapshotsClientListByResourceGroupResponse) (SnapshotsClientListByResourceGroupResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return SnapshotsClientListByResourceGroupResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return SnapshotsClientListByResourceGroupResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return SnapshotsClientListByResourceGroupResponse{}, runtime.NewResponseError(resp) + } + return client.listByResourceGroupHandleResponse(resp) + }, + }) +} + +// listByResourceGroupCreateRequest creates the ListByResourceGroup request. +func (client *SnapshotsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *SnapshotsClientListByResourceGroupOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-12-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listByResourceGroupHandleResponse handles the ListByResourceGroup response. +func (client *SnapshotsClient) listByResourceGroupHandleResponse(resp *http.Response) (SnapshotsClientListByResourceGroupResponse, error) { + result := SnapshotsClientListByResourceGroupResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.SnapshotList); err != nil { + return SnapshotsClientListByResourceGroupResponse{}, err + } + return result, nil +} + +// BeginRevokeAccess - Revokes access to a snapshot. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +// resourceGroupName - The name of the resource group. +// snapshotName - The name of the snapshot that is being created. The name can't be changed after the snapshot is created. +// Supported characters for the name are a-z, A-Z, 0-9, _ and -. The max name length is 80 +// characters. +// options - SnapshotsClientBeginRevokeAccessOptions contains the optional parameters for the SnapshotsClient.BeginRevokeAccess +// method. +func (client *SnapshotsClient) BeginRevokeAccess(ctx context.Context, resourceGroupName string, snapshotName string, options *SnapshotsClientBeginRevokeAccessOptions) (*runtime.Poller[SnapshotsClientRevokeAccessResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.revokeAccess(ctx, resourceGroupName, snapshotName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.pl, &runtime.NewPollerOptions[SnapshotsClientRevokeAccessResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + }) + } else { + return runtime.NewPollerFromResumeToken[SnapshotsClientRevokeAccessResponse](options.ResumeToken, client.pl, nil) + } +} + +// RevokeAccess - Revokes access to a snapshot. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +func (client *SnapshotsClient) revokeAccess(ctx context.Context, resourceGroupName string, snapshotName string, options *SnapshotsClientBeginRevokeAccessOptions) (*http.Response, error) { + req, err := client.revokeAccessCreateRequest(ctx, resourceGroupName, snapshotName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// revokeAccessCreateRequest creates the RevokeAccess request. +func (client *SnapshotsClient) revokeAccessCreateRequest(ctx context.Context, resourceGroupName string, snapshotName string, options *SnapshotsClientBeginRevokeAccessOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/endGetAccess" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if snapshotName == "" { + return nil, errors.New("parameter snapshotName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{snapshotName}", url.PathEscape(snapshotName)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-12-01") + req.Raw().URL.RawQuery = reqQP.Encode() + return req, nil +} + +// BeginUpdate - Updates (patches) a snapshot. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +// resourceGroupName - The name of the resource group. +// snapshotName - The name of the snapshot that is being created. The name can't be changed after the snapshot is created. +// Supported characters for the name are a-z, A-Z, 0-9, _ and -. The max name length is 80 +// characters. +// snapshot - Snapshot object supplied in the body of the Patch snapshot operation. +// options - SnapshotsClientBeginUpdateOptions contains the optional parameters for the SnapshotsClient.BeginUpdate method. +func (client *SnapshotsClient) BeginUpdate(ctx context.Context, resourceGroupName string, snapshotName string, snapshot SnapshotUpdate, options *SnapshotsClientBeginUpdateOptions) (*runtime.Poller[SnapshotsClientUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.update(ctx, resourceGroupName, snapshotName, snapshot, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[SnapshotsClientUpdateResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[SnapshotsClientUpdateResponse](options.ResumeToken, client.pl, nil) + } +} + +// Update - Updates (patches) a snapshot. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2021-12-01 +func (client *SnapshotsClient) update(ctx context.Context, resourceGroupName string, snapshotName string, snapshot SnapshotUpdate, options *SnapshotsClientBeginUpdateOptions) (*http.Response, error) { + req, err := client.updateCreateRequest(ctx, resourceGroupName, snapshotName, snapshot, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// updateCreateRequest creates the Update request. +func (client *SnapshotsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, snapshotName string, snapshot SnapshotUpdate, options *SnapshotsClientBeginUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if snapshotName == "" { + return nil, errors.New("parameter snapshotName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{snapshotName}", url.PathEscape(snapshotName)) + req, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2021-12-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, snapshot) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_sshpublickeys_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_sshpublickeys_client.go new file mode 100644 index 000000000..c9e729857 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_sshpublickeys_client.go @@ -0,0 +1,459 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// SSHPublicKeysClient contains the methods for the SSHPublicKeys group. +// Don't use this type directly, use NewSSHPublicKeysClient() instead. +type SSHPublicKeysClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewSSHPublicKeysClient creates a new instance of SSHPublicKeysClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewSSHPublicKeysClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*SSHPublicKeysClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &SSHPublicKeysClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// Create - Creates a new SSH public key resource. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// sshPublicKeyName - The name of the SSH public key. +// parameters - Parameters supplied to create the SSH public key. +// options - SSHPublicKeysClientCreateOptions contains the optional parameters for the SSHPublicKeysClient.Create method. +func (client *SSHPublicKeysClient) Create(ctx context.Context, resourceGroupName string, sshPublicKeyName string, parameters SSHPublicKeyResource, options *SSHPublicKeysClientCreateOptions) (SSHPublicKeysClientCreateResponse, error) { + req, err := client.createCreateRequest(ctx, resourceGroupName, sshPublicKeyName, parameters, options) + if err != nil { + return SSHPublicKeysClientCreateResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return SSHPublicKeysClientCreateResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return SSHPublicKeysClientCreateResponse{}, runtime.NewResponseError(resp) + } + return client.createHandleResponse(resp) +} + +// createCreateRequest creates the Create request. +func (client *SSHPublicKeysClient) createCreateRequest(ctx context.Context, resourceGroupName string, sshPublicKeyName string, parameters SSHPublicKeyResource, options *SSHPublicKeysClientCreateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{sshPublicKeyName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if sshPublicKeyName == "" { + return nil, errors.New("parameter sshPublicKeyName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{sshPublicKeyName}", url.PathEscape(sshPublicKeyName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// createHandleResponse handles the Create response. +func (client *SSHPublicKeysClient) createHandleResponse(resp *http.Response) (SSHPublicKeysClientCreateResponse, error) { + result := SSHPublicKeysClientCreateResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.SSHPublicKeyResource); err != nil { + return SSHPublicKeysClientCreateResponse{}, err + } + return result, nil +} + +// Delete - Delete an SSH public key. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// sshPublicKeyName - The name of the SSH public key. +// options - SSHPublicKeysClientDeleteOptions contains the optional parameters for the SSHPublicKeysClient.Delete method. +func (client *SSHPublicKeysClient) Delete(ctx context.Context, resourceGroupName string, sshPublicKeyName string, options *SSHPublicKeysClientDeleteOptions) (SSHPublicKeysClientDeleteResponse, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, sshPublicKeyName, options) + if err != nil { + return SSHPublicKeysClientDeleteResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return SSHPublicKeysClientDeleteResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusNoContent) { + return SSHPublicKeysClientDeleteResponse{}, runtime.NewResponseError(resp) + } + return SSHPublicKeysClientDeleteResponse{}, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *SSHPublicKeysClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, sshPublicKeyName string, options *SSHPublicKeysClientDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{sshPublicKeyName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if sshPublicKeyName == "" { + return nil, errors.New("parameter sshPublicKeyName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{sshPublicKeyName}", url.PathEscape(sshPublicKeyName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// GenerateKeyPair - Generates and returns a public/private key pair and populates the SSH public key resource with the public +// key. The length of the key will be 3072 bits. This operation can only be performed once per +// SSH public key resource. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// sshPublicKeyName - The name of the SSH public key. +// options - SSHPublicKeysClientGenerateKeyPairOptions contains the optional parameters for the SSHPublicKeysClient.GenerateKeyPair +// method. +func (client *SSHPublicKeysClient) GenerateKeyPair(ctx context.Context, resourceGroupName string, sshPublicKeyName string, options *SSHPublicKeysClientGenerateKeyPairOptions) (SSHPublicKeysClientGenerateKeyPairResponse, error) { + req, err := client.generateKeyPairCreateRequest(ctx, resourceGroupName, sshPublicKeyName, options) + if err != nil { + return SSHPublicKeysClientGenerateKeyPairResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return SSHPublicKeysClientGenerateKeyPairResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return SSHPublicKeysClientGenerateKeyPairResponse{}, runtime.NewResponseError(resp) + } + return client.generateKeyPairHandleResponse(resp) +} + +// generateKeyPairCreateRequest creates the GenerateKeyPair request. +func (client *SSHPublicKeysClient) generateKeyPairCreateRequest(ctx context.Context, resourceGroupName string, sshPublicKeyName string, options *SSHPublicKeysClientGenerateKeyPairOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{sshPublicKeyName}/generateKeyPair" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if sshPublicKeyName == "" { + return nil, errors.New("parameter sshPublicKeyName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{sshPublicKeyName}", url.PathEscape(sshPublicKeyName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// generateKeyPairHandleResponse handles the GenerateKeyPair response. +func (client *SSHPublicKeysClient) generateKeyPairHandleResponse(resp *http.Response) (SSHPublicKeysClientGenerateKeyPairResponse, error) { + result := SSHPublicKeysClientGenerateKeyPairResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.SSHPublicKeyGenerateKeyPairResult); err != nil { + return SSHPublicKeysClientGenerateKeyPairResponse{}, err + } + return result, nil +} + +// Get - Retrieves information about an SSH public key. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// sshPublicKeyName - The name of the SSH public key. +// options - SSHPublicKeysClientGetOptions contains the optional parameters for the SSHPublicKeysClient.Get method. +func (client *SSHPublicKeysClient) Get(ctx context.Context, resourceGroupName string, sshPublicKeyName string, options *SSHPublicKeysClientGetOptions) (SSHPublicKeysClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, sshPublicKeyName, options) + if err != nil { + return SSHPublicKeysClientGetResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return SSHPublicKeysClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return SSHPublicKeysClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *SSHPublicKeysClient) getCreateRequest(ctx context.Context, resourceGroupName string, sshPublicKeyName string, options *SSHPublicKeysClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{sshPublicKeyName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if sshPublicKeyName == "" { + return nil, errors.New("parameter sshPublicKeyName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{sshPublicKeyName}", url.PathEscape(sshPublicKeyName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *SSHPublicKeysClient) getHandleResponse(resp *http.Response) (SSHPublicKeysClientGetResponse, error) { + result := SSHPublicKeysClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.SSHPublicKeyResource); err != nil { + return SSHPublicKeysClientGetResponse{}, err + } + return result, nil +} + +// NewListByResourceGroupPager - Lists all of the SSH public keys in the specified resource group. Use the nextLink property +// in the response to get the next page of SSH public keys. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// options - SSHPublicKeysClientListByResourceGroupOptions contains the optional parameters for the SSHPublicKeysClient.ListByResourceGroup +// method. +func (client *SSHPublicKeysClient) NewListByResourceGroupPager(resourceGroupName string, options *SSHPublicKeysClientListByResourceGroupOptions) *runtime.Pager[SSHPublicKeysClientListByResourceGroupResponse] { + return runtime.NewPager(runtime.PagingHandler[SSHPublicKeysClientListByResourceGroupResponse]{ + More: func(page SSHPublicKeysClientListByResourceGroupResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *SSHPublicKeysClientListByResourceGroupResponse) (SSHPublicKeysClientListByResourceGroupResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return SSHPublicKeysClientListByResourceGroupResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return SSHPublicKeysClientListByResourceGroupResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return SSHPublicKeysClientListByResourceGroupResponse{}, runtime.NewResponseError(resp) + } + return client.listByResourceGroupHandleResponse(resp) + }, + }) +} + +// listByResourceGroupCreateRequest creates the ListByResourceGroup request. +func (client *SSHPublicKeysClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *SSHPublicKeysClientListByResourceGroupOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listByResourceGroupHandleResponse handles the ListByResourceGroup response. +func (client *SSHPublicKeysClient) listByResourceGroupHandleResponse(resp *http.Response) (SSHPublicKeysClientListByResourceGroupResponse, error) { + result := SSHPublicKeysClientListByResourceGroupResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.SSHPublicKeysGroupListResult); err != nil { + return SSHPublicKeysClientListByResourceGroupResponse{}, err + } + return result, nil +} + +// NewListBySubscriptionPager - Lists all of the SSH public keys in the subscription. Use the nextLink property in the response +// to get the next page of SSH public keys. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// options - SSHPublicKeysClientListBySubscriptionOptions contains the optional parameters for the SSHPublicKeysClient.ListBySubscription +// method. +func (client *SSHPublicKeysClient) NewListBySubscriptionPager(options *SSHPublicKeysClientListBySubscriptionOptions) *runtime.Pager[SSHPublicKeysClientListBySubscriptionResponse] { + return runtime.NewPager(runtime.PagingHandler[SSHPublicKeysClientListBySubscriptionResponse]{ + More: func(page SSHPublicKeysClientListBySubscriptionResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *SSHPublicKeysClientListBySubscriptionResponse) (SSHPublicKeysClientListBySubscriptionResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listBySubscriptionCreateRequest(ctx, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return SSHPublicKeysClientListBySubscriptionResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return SSHPublicKeysClientListBySubscriptionResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return SSHPublicKeysClientListBySubscriptionResponse{}, runtime.NewResponseError(resp) + } + return client.listBySubscriptionHandleResponse(resp) + }, + }) +} + +// listBySubscriptionCreateRequest creates the ListBySubscription request. +func (client *SSHPublicKeysClient) listBySubscriptionCreateRequest(ctx context.Context, options *SSHPublicKeysClientListBySubscriptionOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/sshPublicKeys" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listBySubscriptionHandleResponse handles the ListBySubscription response. +func (client *SSHPublicKeysClient) listBySubscriptionHandleResponse(resp *http.Response) (SSHPublicKeysClientListBySubscriptionResponse, error) { + result := SSHPublicKeysClientListBySubscriptionResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.SSHPublicKeysGroupListResult); err != nil { + return SSHPublicKeysClientListBySubscriptionResponse{}, err + } + return result, nil +} + +// Update - Updates a new SSH public key resource. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// sshPublicKeyName - The name of the SSH public key. +// parameters - Parameters supplied to update the SSH public key. +// options - SSHPublicKeysClientUpdateOptions contains the optional parameters for the SSHPublicKeysClient.Update method. +func (client *SSHPublicKeysClient) Update(ctx context.Context, resourceGroupName string, sshPublicKeyName string, parameters SSHPublicKeyUpdateResource, options *SSHPublicKeysClientUpdateOptions) (SSHPublicKeysClientUpdateResponse, error) { + req, err := client.updateCreateRequest(ctx, resourceGroupName, sshPublicKeyName, parameters, options) + if err != nil { + return SSHPublicKeysClientUpdateResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return SSHPublicKeysClientUpdateResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return SSHPublicKeysClientUpdateResponse{}, runtime.NewResponseError(resp) + } + return client.updateHandleResponse(resp) +} + +// updateCreateRequest creates the Update request. +func (client *SSHPublicKeysClient) updateCreateRequest(ctx context.Context, resourceGroupName string, sshPublicKeyName string, parameters SSHPublicKeyUpdateResource, options *SSHPublicKeysClientUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/sshPublicKeys/{sshPublicKeyName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if sshPublicKeyName == "" { + return nil, errors.New("parameter sshPublicKeyName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{sshPublicKeyName}", url.PathEscape(sshPublicKeyName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// updateHandleResponse handles the Update response. +func (client *SSHPublicKeysClient) updateHandleResponse(resp *http.Response) (SSHPublicKeysClientUpdateResponse, error) { + result := SSHPublicKeysClientUpdateResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.SSHPublicKeyResource); err != nil { + return SSHPublicKeysClientUpdateResponse{}, err + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_time_rfc3339.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_time_rfc3339.go new file mode 100644 index 000000000..068195875 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_time_rfc3339.go @@ -0,0 +1,86 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "encoding/json" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "reflect" + "regexp" + "strings" + "time" +) + +const ( + utcLayoutJSON = `"2006-01-02T15:04:05.999999999"` + utcLayout = "2006-01-02T15:04:05.999999999" + rfc3339JSON = `"` + time.RFC3339Nano + `"` +) + +// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. +var tzOffsetRegex = regexp.MustCompile(`(Z|z|\+|-)(\d+:\d+)*"*$`) + +type timeRFC3339 time.Time + +func (t timeRFC3339) MarshalJSON() (json []byte, err error) { + tt := time.Time(t) + return tt.MarshalJSON() +} + +func (t timeRFC3339) MarshalText() (text []byte, err error) { + tt := time.Time(t) + return tt.MarshalText() +} + +func (t *timeRFC3339) UnmarshalJSON(data []byte) error { + layout := utcLayoutJSON + if tzOffsetRegex.Match(data) { + layout = rfc3339JSON + } + return t.Parse(layout, string(data)) +} + +func (t *timeRFC3339) UnmarshalText(data []byte) (err error) { + layout := utcLayout + if tzOffsetRegex.Match(data) { + layout = time.RFC3339Nano + } + return t.Parse(layout, string(data)) +} + +func (t *timeRFC3339) Parse(layout, value string) error { + p, err := time.Parse(layout, strings.ToUpper(value)) + *t = timeRFC3339(p) + return err +} + +func populateTimeRFC3339(m map[string]interface{}, k string, t *time.Time) { + if t == nil { + return + } else if azcore.IsNullValue(t) { + m[k] = nil + return + } else if reflect.ValueOf(t).IsNil() { + return + } + m[k] = (*timeRFC3339)(t) +} + +func unpopulateTimeRFC3339(data json.RawMessage, fn string, t **time.Time) error { + if data == nil || strings.EqualFold(string(data), "null") { + return nil + } + var aux timeRFC3339 + if err := json.Unmarshal(data, &aux); err != nil { + return fmt.Errorf("struct field %s: %v", fn, err) + } + *t = (*time.Time)(&aux) + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_usage_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_usage_client.go new file mode 100644 index 000000000..039ae8997 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_usage_client.go @@ -0,0 +1,121 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// UsageClient contains the methods for the Usage group. +// Don't use this type directly, use NewUsageClient() instead. +type UsageClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewUsageClient creates a new instance of UsageClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewUsageClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*UsageClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &UsageClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// NewListPager - Gets, for the specified location, the current compute resource usage information as well as the limits for +// compute resources under the subscription. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// location - The location for which resource usage is queried. +// options - UsageClientListOptions contains the optional parameters for the UsageClient.List method. +func (client *UsageClient) NewListPager(location string, options *UsageClientListOptions) *runtime.Pager[UsageClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[UsageClientListResponse]{ + More: func(page UsageClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *UsageClientListResponse) (UsageClientListResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listCreateRequest(ctx, location, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return UsageClientListResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return UsageClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return UsageClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *UsageClient) listCreateRequest(ctx context.Context, location string, options *UsageClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/usages" + if location == "" { + return nil, errors.New("parameter location cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{location}", url.PathEscape(location)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *UsageClient) listHandleResponse(resp *http.Response) (UsageClientListResponse, error) { + result := UsageClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.ListUsagesResult); err != nil { + return UsageClientListResponse{}, err + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachineextensionimages_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachineextensionimages_client.go new file mode 100644 index 000000000..6b17f3e68 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachineextensionimages_client.go @@ -0,0 +1,246 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strconv" + "strings" +) + +// VirtualMachineExtensionImagesClient contains the methods for the VirtualMachineExtensionImages group. +// Don't use this type directly, use NewVirtualMachineExtensionImagesClient() instead. +type VirtualMachineExtensionImagesClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewVirtualMachineExtensionImagesClient creates a new instance of VirtualMachineExtensionImagesClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewVirtualMachineExtensionImagesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*VirtualMachineExtensionImagesClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &VirtualMachineExtensionImagesClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// Get - Gets a virtual machine extension image. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// location - The name of a supported Azure region. +// options - VirtualMachineExtensionImagesClientGetOptions contains the optional parameters for the VirtualMachineExtensionImagesClient.Get +// method. +func (client *VirtualMachineExtensionImagesClient) Get(ctx context.Context, location string, publisherName string, typeParam string, version string, options *VirtualMachineExtensionImagesClientGetOptions) (VirtualMachineExtensionImagesClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, location, publisherName, typeParam, version, options) + if err != nil { + return VirtualMachineExtensionImagesClientGetResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachineExtensionImagesClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachineExtensionImagesClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *VirtualMachineExtensionImagesClient) getCreateRequest(ctx context.Context, location string, publisherName string, typeParam string, version string, options *VirtualMachineExtensionImagesClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types/{type}/versions/{version}" + if location == "" { + return nil, errors.New("parameter location cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{location}", url.PathEscape(location)) + if publisherName == "" { + return nil, errors.New("parameter publisherName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{publisherName}", url.PathEscape(publisherName)) + if typeParam == "" { + return nil, errors.New("parameter typeParam cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{type}", url.PathEscape(typeParam)) + if version == "" { + return nil, errors.New("parameter version cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{version}", url.PathEscape(version)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *VirtualMachineExtensionImagesClient) getHandleResponse(resp *http.Response) (VirtualMachineExtensionImagesClientGetResponse, error) { + result := VirtualMachineExtensionImagesClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineExtensionImage); err != nil { + return VirtualMachineExtensionImagesClientGetResponse{}, err + } + return result, nil +} + +// ListTypes - Gets a list of virtual machine extension image types. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// location - The name of a supported Azure region. +// options - VirtualMachineExtensionImagesClientListTypesOptions contains the optional parameters for the VirtualMachineExtensionImagesClient.ListTypes +// method. +func (client *VirtualMachineExtensionImagesClient) ListTypes(ctx context.Context, location string, publisherName string, options *VirtualMachineExtensionImagesClientListTypesOptions) (VirtualMachineExtensionImagesClientListTypesResponse, error) { + req, err := client.listTypesCreateRequest(ctx, location, publisherName, options) + if err != nil { + return VirtualMachineExtensionImagesClientListTypesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachineExtensionImagesClientListTypesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachineExtensionImagesClientListTypesResponse{}, runtime.NewResponseError(resp) + } + return client.listTypesHandleResponse(resp) +} + +// listTypesCreateRequest creates the ListTypes request. +func (client *VirtualMachineExtensionImagesClient) listTypesCreateRequest(ctx context.Context, location string, publisherName string, options *VirtualMachineExtensionImagesClientListTypesOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types" + if location == "" { + return nil, errors.New("parameter location cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{location}", url.PathEscape(location)) + if publisherName == "" { + return nil, errors.New("parameter publisherName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{publisherName}", url.PathEscape(publisherName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listTypesHandleResponse handles the ListTypes response. +func (client *VirtualMachineExtensionImagesClient) listTypesHandleResponse(resp *http.Response) (VirtualMachineExtensionImagesClientListTypesResponse, error) { + result := VirtualMachineExtensionImagesClientListTypesResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineExtensionImageArray); err != nil { + return VirtualMachineExtensionImagesClientListTypesResponse{}, err + } + return result, nil +} + +// ListVersions - Gets a list of virtual machine extension image versions. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// location - The name of a supported Azure region. +// options - VirtualMachineExtensionImagesClientListVersionsOptions contains the optional parameters for the VirtualMachineExtensionImagesClient.ListVersions +// method. +func (client *VirtualMachineExtensionImagesClient) ListVersions(ctx context.Context, location string, publisherName string, typeParam string, options *VirtualMachineExtensionImagesClientListVersionsOptions) (VirtualMachineExtensionImagesClientListVersionsResponse, error) { + req, err := client.listVersionsCreateRequest(ctx, location, publisherName, typeParam, options) + if err != nil { + return VirtualMachineExtensionImagesClientListVersionsResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachineExtensionImagesClientListVersionsResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachineExtensionImagesClientListVersionsResponse{}, runtime.NewResponseError(resp) + } + return client.listVersionsHandleResponse(resp) +} + +// listVersionsCreateRequest creates the ListVersions request. +func (client *VirtualMachineExtensionImagesClient) listVersionsCreateRequest(ctx context.Context, location string, publisherName string, typeParam string, options *VirtualMachineExtensionImagesClientListVersionsOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types/{type}/versions" + if location == "" { + return nil, errors.New("parameter location cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{location}", url.PathEscape(location)) + if publisherName == "" { + return nil, errors.New("parameter publisherName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{publisherName}", url.PathEscape(publisherName)) + if typeParam == "" { + return nil, errors.New("parameter typeParam cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{type}", url.PathEscape(typeParam)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Filter != nil { + reqQP.Set("$filter", *options.Filter) + } + if options != nil && options.Top != nil { + reqQP.Set("$top", strconv.FormatInt(int64(*options.Top), 10)) + } + if options != nil && options.Orderby != nil { + reqQP.Set("$orderby", *options.Orderby) + } + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listVersionsHandleResponse handles the ListVersions response. +func (client *VirtualMachineExtensionImagesClient) listVersionsHandleResponse(resp *http.Response) (VirtualMachineExtensionImagesClientListVersionsResponse, error) { + result := VirtualMachineExtensionImagesClientListVersionsResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineExtensionImageArray); err != nil { + return VirtualMachineExtensionImagesClientListVersionsResponse{}, err + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachineextensions_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachineextensions_client.go new file mode 100644 index 000000000..63b1c5b33 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachineextensions_client.go @@ -0,0 +1,387 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// VirtualMachineExtensionsClient contains the methods for the VirtualMachineExtensions group. +// Don't use this type directly, use NewVirtualMachineExtensionsClient() instead. +type VirtualMachineExtensionsClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewVirtualMachineExtensionsClient creates a new instance of VirtualMachineExtensionsClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewVirtualMachineExtensionsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*VirtualMachineExtensionsClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &VirtualMachineExtensionsClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// BeginCreateOrUpdate - The operation to create or update the extension. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmName - The name of the virtual machine where the extension should be created or updated. +// vmExtensionName - The name of the virtual machine extension. +// extensionParameters - Parameters supplied to the Create Virtual Machine Extension operation. +// options - VirtualMachineExtensionsClientBeginCreateOrUpdateOptions contains the optional parameters for the VirtualMachineExtensionsClient.BeginCreateOrUpdate +// method. +func (client *VirtualMachineExtensionsClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, vmName string, vmExtensionName string, extensionParameters VirtualMachineExtension, options *VirtualMachineExtensionsClientBeginCreateOrUpdateOptions) (*runtime.Poller[VirtualMachineExtensionsClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, resourceGroupName, vmName, vmExtensionName, extensionParameters, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachineExtensionsClientCreateOrUpdateResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachineExtensionsClientCreateOrUpdateResponse](options.ResumeToken, client.pl, nil) + } +} + +// CreateOrUpdate - The operation to create or update the extension. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachineExtensionsClient) createOrUpdate(ctx context.Context, resourceGroupName string, vmName string, vmExtensionName string, extensionParameters VirtualMachineExtension, options *VirtualMachineExtensionsClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, vmName, vmExtensionName, extensionParameters, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *VirtualMachineExtensionsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, vmName string, vmExtensionName string, extensionParameters VirtualMachineExtension, options *VirtualMachineExtensionsClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmName == "" { + return nil, errors.New("parameter vmName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmName}", url.PathEscape(vmName)) + if vmExtensionName == "" { + return nil, errors.New("parameter vmExtensionName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmExtensionName}", url.PathEscape(vmExtensionName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, extensionParameters) +} + +// BeginDelete - The operation to delete the extension. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmName - The name of the virtual machine where the extension should be deleted. +// vmExtensionName - The name of the virtual machine extension. +// options - VirtualMachineExtensionsClientBeginDeleteOptions contains the optional parameters for the VirtualMachineExtensionsClient.BeginDelete +// method. +func (client *VirtualMachineExtensionsClient) BeginDelete(ctx context.Context, resourceGroupName string, vmName string, vmExtensionName string, options *VirtualMachineExtensionsClientBeginDeleteOptions) (*runtime.Poller[VirtualMachineExtensionsClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, vmName, vmExtensionName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachineExtensionsClientDeleteResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachineExtensionsClientDeleteResponse](options.ResumeToken, client.pl, nil) + } +} + +// Delete - The operation to delete the extension. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachineExtensionsClient) deleteOperation(ctx context.Context, resourceGroupName string, vmName string, vmExtensionName string, options *VirtualMachineExtensionsClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, vmName, vmExtensionName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *VirtualMachineExtensionsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, vmName string, vmExtensionName string, options *VirtualMachineExtensionsClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmName == "" { + return nil, errors.New("parameter vmName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmName}", url.PathEscape(vmName)) + if vmExtensionName == "" { + return nil, errors.New("parameter vmExtensionName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmExtensionName}", url.PathEscape(vmExtensionName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - The operation to get the extension. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmName - The name of the virtual machine containing the extension. +// vmExtensionName - The name of the virtual machine extension. +// options - VirtualMachineExtensionsClientGetOptions contains the optional parameters for the VirtualMachineExtensionsClient.Get +// method. +func (client *VirtualMachineExtensionsClient) Get(ctx context.Context, resourceGroupName string, vmName string, vmExtensionName string, options *VirtualMachineExtensionsClientGetOptions) (VirtualMachineExtensionsClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, vmName, vmExtensionName, options) + if err != nil { + return VirtualMachineExtensionsClientGetResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachineExtensionsClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachineExtensionsClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *VirtualMachineExtensionsClient) getCreateRequest(ctx context.Context, resourceGroupName string, vmName string, vmExtensionName string, options *VirtualMachineExtensionsClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmName == "" { + return nil, errors.New("parameter vmName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmName}", url.PathEscape(vmName)) + if vmExtensionName == "" { + return nil, errors.New("parameter vmExtensionName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmExtensionName}", url.PathEscape(vmExtensionName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Expand != nil { + reqQP.Set("$expand", *options.Expand) + } + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *VirtualMachineExtensionsClient) getHandleResponse(resp *http.Response) (VirtualMachineExtensionsClientGetResponse, error) { + result := VirtualMachineExtensionsClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineExtension); err != nil { + return VirtualMachineExtensionsClientGetResponse{}, err + } + return result, nil +} + +// List - The operation to get all extensions of a Virtual Machine. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmName - The name of the virtual machine containing the extension. +// options - VirtualMachineExtensionsClientListOptions contains the optional parameters for the VirtualMachineExtensionsClient.List +// method. +func (client *VirtualMachineExtensionsClient) List(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachineExtensionsClientListOptions) (VirtualMachineExtensionsClientListResponse, error) { + req, err := client.listCreateRequest(ctx, resourceGroupName, vmName, options) + if err != nil { + return VirtualMachineExtensionsClientListResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachineExtensionsClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachineExtensionsClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) +} + +// listCreateRequest creates the List request. +func (client *VirtualMachineExtensionsClient) listCreateRequest(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachineExtensionsClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmName == "" { + return nil, errors.New("parameter vmName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmName}", url.PathEscape(vmName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Expand != nil { + reqQP.Set("$expand", *options.Expand) + } + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *VirtualMachineExtensionsClient) listHandleResponse(resp *http.Response) (VirtualMachineExtensionsClientListResponse, error) { + result := VirtualMachineExtensionsClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineExtensionsListResult); err != nil { + return VirtualMachineExtensionsClientListResponse{}, err + } + return result, nil +} + +// BeginUpdate - The operation to update the extension. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmName - The name of the virtual machine where the extension should be updated. +// vmExtensionName - The name of the virtual machine extension. +// extensionParameters - Parameters supplied to the Update Virtual Machine Extension operation. +// options - VirtualMachineExtensionsClientBeginUpdateOptions contains the optional parameters for the VirtualMachineExtensionsClient.BeginUpdate +// method. +func (client *VirtualMachineExtensionsClient) BeginUpdate(ctx context.Context, resourceGroupName string, vmName string, vmExtensionName string, extensionParameters VirtualMachineExtensionUpdate, options *VirtualMachineExtensionsClientBeginUpdateOptions) (*runtime.Poller[VirtualMachineExtensionsClientUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.update(ctx, resourceGroupName, vmName, vmExtensionName, extensionParameters, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachineExtensionsClientUpdateResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachineExtensionsClientUpdateResponse](options.ResumeToken, client.pl, nil) + } +} + +// Update - The operation to update the extension. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachineExtensionsClient) update(ctx context.Context, resourceGroupName string, vmName string, vmExtensionName string, extensionParameters VirtualMachineExtensionUpdate, options *VirtualMachineExtensionsClientBeginUpdateOptions) (*http.Response, error) { + req, err := client.updateCreateRequest(ctx, resourceGroupName, vmName, vmExtensionName, extensionParameters, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// updateCreateRequest creates the Update request. +func (client *VirtualMachineExtensionsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, vmName string, vmExtensionName string, extensionParameters VirtualMachineExtensionUpdate, options *VirtualMachineExtensionsClientBeginUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmName == "" { + return nil, errors.New("parameter vmName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmName}", url.PathEscape(vmName)) + if vmExtensionName == "" { + return nil, errors.New("parameter vmExtensionName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmExtensionName}", url.PathEscape(vmExtensionName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, extensionParameters) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachineimages_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachineimages_client.go new file mode 100644 index 000000000..1a6000e71 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachineimages_client.go @@ -0,0 +1,376 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strconv" + "strings" +) + +// VirtualMachineImagesClient contains the methods for the VirtualMachineImages group. +// Don't use this type directly, use NewVirtualMachineImagesClient() instead. +type VirtualMachineImagesClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewVirtualMachineImagesClient creates a new instance of VirtualMachineImagesClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewVirtualMachineImagesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*VirtualMachineImagesClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &VirtualMachineImagesClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// Get - Gets a virtual machine image. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// location - The name of a supported Azure region. +// publisherName - A valid image publisher. +// offer - A valid image publisher offer. +// skus - A valid image SKU. +// version - A valid image SKU version. +// options - VirtualMachineImagesClientGetOptions contains the optional parameters for the VirtualMachineImagesClient.Get +// method. +func (client *VirtualMachineImagesClient) Get(ctx context.Context, location string, publisherName string, offer string, skus string, version string, options *VirtualMachineImagesClientGetOptions) (VirtualMachineImagesClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, location, publisherName, offer, skus, version, options) + if err != nil { + return VirtualMachineImagesClientGetResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachineImagesClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachineImagesClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *VirtualMachineImagesClient) getCreateRequest(ctx context.Context, location string, publisherName string, offer string, skus string, version string, options *VirtualMachineImagesClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions/{version}" + if location == "" { + return nil, errors.New("parameter location cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{location}", url.PathEscape(location)) + if publisherName == "" { + return nil, errors.New("parameter publisherName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{publisherName}", url.PathEscape(publisherName)) + if offer == "" { + return nil, errors.New("parameter offer cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{offer}", url.PathEscape(offer)) + if skus == "" { + return nil, errors.New("parameter skus cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{skus}", url.PathEscape(skus)) + if version == "" { + return nil, errors.New("parameter version cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{version}", url.PathEscape(version)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *VirtualMachineImagesClient) getHandleResponse(resp *http.Response) (VirtualMachineImagesClientGetResponse, error) { + result := VirtualMachineImagesClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineImage); err != nil { + return VirtualMachineImagesClientGetResponse{}, err + } + return result, nil +} + +// List - Gets a list of all virtual machine image versions for the specified location, publisher, offer, and SKU. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// location - The name of a supported Azure region. +// publisherName - A valid image publisher. +// offer - A valid image publisher offer. +// skus - A valid image SKU. +// options - VirtualMachineImagesClientListOptions contains the optional parameters for the VirtualMachineImagesClient.List +// method. +func (client *VirtualMachineImagesClient) List(ctx context.Context, location string, publisherName string, offer string, skus string, options *VirtualMachineImagesClientListOptions) (VirtualMachineImagesClientListResponse, error) { + req, err := client.listCreateRequest(ctx, location, publisherName, offer, skus, options) + if err != nil { + return VirtualMachineImagesClientListResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachineImagesClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachineImagesClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) +} + +// listCreateRequest creates the List request. +func (client *VirtualMachineImagesClient) listCreateRequest(ctx context.Context, location string, publisherName string, offer string, skus string, options *VirtualMachineImagesClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions" + if location == "" { + return nil, errors.New("parameter location cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{location}", url.PathEscape(location)) + if publisherName == "" { + return nil, errors.New("parameter publisherName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{publisherName}", url.PathEscape(publisherName)) + if offer == "" { + return nil, errors.New("parameter offer cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{offer}", url.PathEscape(offer)) + if skus == "" { + return nil, errors.New("parameter skus cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{skus}", url.PathEscape(skus)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Expand != nil { + reqQP.Set("$expand", *options.Expand) + } + if options != nil && options.Top != nil { + reqQP.Set("$top", strconv.FormatInt(int64(*options.Top), 10)) + } + if options != nil && options.Orderby != nil { + reqQP.Set("$orderby", *options.Orderby) + } + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *VirtualMachineImagesClient) listHandleResponse(resp *http.Response) (VirtualMachineImagesClientListResponse, error) { + result := VirtualMachineImagesClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineImageResourceArray); err != nil { + return VirtualMachineImagesClientListResponse{}, err + } + return result, nil +} + +// ListOffers - Gets a list of virtual machine image offers for the specified location and publisher. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// location - The name of a supported Azure region. +// publisherName - A valid image publisher. +// options - VirtualMachineImagesClientListOffersOptions contains the optional parameters for the VirtualMachineImagesClient.ListOffers +// method. +func (client *VirtualMachineImagesClient) ListOffers(ctx context.Context, location string, publisherName string, options *VirtualMachineImagesClientListOffersOptions) (VirtualMachineImagesClientListOffersResponse, error) { + req, err := client.listOffersCreateRequest(ctx, location, publisherName, options) + if err != nil { + return VirtualMachineImagesClientListOffersResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachineImagesClientListOffersResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachineImagesClientListOffersResponse{}, runtime.NewResponseError(resp) + } + return client.listOffersHandleResponse(resp) +} + +// listOffersCreateRequest creates the ListOffers request. +func (client *VirtualMachineImagesClient) listOffersCreateRequest(ctx context.Context, location string, publisherName string, options *VirtualMachineImagesClientListOffersOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers" + if location == "" { + return nil, errors.New("parameter location cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{location}", url.PathEscape(location)) + if publisherName == "" { + return nil, errors.New("parameter publisherName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{publisherName}", url.PathEscape(publisherName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listOffersHandleResponse handles the ListOffers response. +func (client *VirtualMachineImagesClient) listOffersHandleResponse(resp *http.Response) (VirtualMachineImagesClientListOffersResponse, error) { + result := VirtualMachineImagesClientListOffersResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineImageResourceArray); err != nil { + return VirtualMachineImagesClientListOffersResponse{}, err + } + return result, nil +} + +// ListPublishers - Gets a list of virtual machine image publishers for the specified Azure location. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// location - The name of a supported Azure region. +// options - VirtualMachineImagesClientListPublishersOptions contains the optional parameters for the VirtualMachineImagesClient.ListPublishers +// method. +func (client *VirtualMachineImagesClient) ListPublishers(ctx context.Context, location string, options *VirtualMachineImagesClientListPublishersOptions) (VirtualMachineImagesClientListPublishersResponse, error) { + req, err := client.listPublishersCreateRequest(ctx, location, options) + if err != nil { + return VirtualMachineImagesClientListPublishersResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachineImagesClientListPublishersResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachineImagesClientListPublishersResponse{}, runtime.NewResponseError(resp) + } + return client.listPublishersHandleResponse(resp) +} + +// listPublishersCreateRequest creates the ListPublishers request. +func (client *VirtualMachineImagesClient) listPublishersCreateRequest(ctx context.Context, location string, options *VirtualMachineImagesClientListPublishersOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers" + if location == "" { + return nil, errors.New("parameter location cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{location}", url.PathEscape(location)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listPublishersHandleResponse handles the ListPublishers response. +func (client *VirtualMachineImagesClient) listPublishersHandleResponse(resp *http.Response) (VirtualMachineImagesClientListPublishersResponse, error) { + result := VirtualMachineImagesClientListPublishersResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineImageResourceArray); err != nil { + return VirtualMachineImagesClientListPublishersResponse{}, err + } + return result, nil +} + +// ListSKUs - Gets a list of virtual machine image SKUs for the specified location, publisher, and offer. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// location - The name of a supported Azure region. +// publisherName - A valid image publisher. +// offer - A valid image publisher offer. +// options - VirtualMachineImagesClientListSKUsOptions contains the optional parameters for the VirtualMachineImagesClient.ListSKUs +// method. +func (client *VirtualMachineImagesClient) ListSKUs(ctx context.Context, location string, publisherName string, offer string, options *VirtualMachineImagesClientListSKUsOptions) (VirtualMachineImagesClientListSKUsResponse, error) { + req, err := client.listSKUsCreateRequest(ctx, location, publisherName, offer, options) + if err != nil { + return VirtualMachineImagesClientListSKUsResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachineImagesClientListSKUsResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachineImagesClientListSKUsResponse{}, runtime.NewResponseError(resp) + } + return client.listSKUsHandleResponse(resp) +} + +// listSKUsCreateRequest creates the ListSKUs request. +func (client *VirtualMachineImagesClient) listSKUsCreateRequest(ctx context.Context, location string, publisherName string, offer string, options *VirtualMachineImagesClientListSKUsOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus" + if location == "" { + return nil, errors.New("parameter location cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{location}", url.PathEscape(location)) + if publisherName == "" { + return nil, errors.New("parameter publisherName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{publisherName}", url.PathEscape(publisherName)) + if offer == "" { + return nil, errors.New("parameter offer cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{offer}", url.PathEscape(offer)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listSKUsHandleResponse handles the ListSKUs response. +func (client *VirtualMachineImagesClient) listSKUsHandleResponse(resp *http.Response) (VirtualMachineImagesClientListSKUsResponse, error) { + result := VirtualMachineImagesClientListSKUsResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineImageResourceArray); err != nil { + return VirtualMachineImagesClientListSKUsResponse{}, err + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachineimagesedgezone_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachineimagesedgezone_client.go new file mode 100644 index 000000000..303bb09e7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachineimagesedgezone_client.go @@ -0,0 +1,401 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strconv" + "strings" +) + +// VirtualMachineImagesEdgeZoneClient contains the methods for the VirtualMachineImagesEdgeZone group. +// Don't use this type directly, use NewVirtualMachineImagesEdgeZoneClient() instead. +type VirtualMachineImagesEdgeZoneClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewVirtualMachineImagesEdgeZoneClient creates a new instance of VirtualMachineImagesEdgeZoneClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewVirtualMachineImagesEdgeZoneClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*VirtualMachineImagesEdgeZoneClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &VirtualMachineImagesEdgeZoneClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// Get - Gets a virtual machine image in an edge zone. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// location - The name of a supported Azure region. +// edgeZone - The name of the edge zone. +// publisherName - A valid image publisher. +// offer - A valid image publisher offer. +// skus - A valid image SKU. +// version - A valid image SKU version. +// options - VirtualMachineImagesEdgeZoneClientGetOptions contains the optional parameters for the VirtualMachineImagesEdgeZoneClient.Get +// method. +func (client *VirtualMachineImagesEdgeZoneClient) Get(ctx context.Context, location string, edgeZone string, publisherName string, offer string, skus string, version string, options *VirtualMachineImagesEdgeZoneClientGetOptions) (VirtualMachineImagesEdgeZoneClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, location, edgeZone, publisherName, offer, skus, version, options) + if err != nil { + return VirtualMachineImagesEdgeZoneClientGetResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachineImagesEdgeZoneClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachineImagesEdgeZoneClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *VirtualMachineImagesEdgeZoneClient) getCreateRequest(ctx context.Context, location string, edgeZone string, publisherName string, offer string, skus string, version string, options *VirtualMachineImagesEdgeZoneClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/edgeZones/{edgeZone}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions/{version}" + if location == "" { + return nil, errors.New("parameter location cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{location}", url.PathEscape(location)) + if edgeZone == "" { + return nil, errors.New("parameter edgeZone cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{edgeZone}", url.PathEscape(edgeZone)) + if publisherName == "" { + return nil, errors.New("parameter publisherName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{publisherName}", url.PathEscape(publisherName)) + if offer == "" { + return nil, errors.New("parameter offer cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{offer}", url.PathEscape(offer)) + if skus == "" { + return nil, errors.New("parameter skus cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{skus}", url.PathEscape(skus)) + if version == "" { + return nil, errors.New("parameter version cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{version}", url.PathEscape(version)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *VirtualMachineImagesEdgeZoneClient) getHandleResponse(resp *http.Response) (VirtualMachineImagesEdgeZoneClientGetResponse, error) { + result := VirtualMachineImagesEdgeZoneClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineImage); err != nil { + return VirtualMachineImagesEdgeZoneClientGetResponse{}, err + } + return result, nil +} + +// List - Gets a list of all virtual machine image versions for the specified location, edge zone, publisher, offer, and SKU. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// location - The name of a supported Azure region. +// edgeZone - The name of the edge zone. +// publisherName - A valid image publisher. +// offer - A valid image publisher offer. +// skus - A valid image SKU. +// options - VirtualMachineImagesEdgeZoneClientListOptions contains the optional parameters for the VirtualMachineImagesEdgeZoneClient.List +// method. +func (client *VirtualMachineImagesEdgeZoneClient) List(ctx context.Context, location string, edgeZone string, publisherName string, offer string, skus string, options *VirtualMachineImagesEdgeZoneClientListOptions) (VirtualMachineImagesEdgeZoneClientListResponse, error) { + req, err := client.listCreateRequest(ctx, location, edgeZone, publisherName, offer, skus, options) + if err != nil { + return VirtualMachineImagesEdgeZoneClientListResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachineImagesEdgeZoneClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachineImagesEdgeZoneClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) +} + +// listCreateRequest creates the List request. +func (client *VirtualMachineImagesEdgeZoneClient) listCreateRequest(ctx context.Context, location string, edgeZone string, publisherName string, offer string, skus string, options *VirtualMachineImagesEdgeZoneClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/edgeZones/{edgeZone}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions" + if location == "" { + return nil, errors.New("parameter location cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{location}", url.PathEscape(location)) + if edgeZone == "" { + return nil, errors.New("parameter edgeZone cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{edgeZone}", url.PathEscape(edgeZone)) + if publisherName == "" { + return nil, errors.New("parameter publisherName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{publisherName}", url.PathEscape(publisherName)) + if offer == "" { + return nil, errors.New("parameter offer cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{offer}", url.PathEscape(offer)) + if skus == "" { + return nil, errors.New("parameter skus cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{skus}", url.PathEscape(skus)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Expand != nil { + reqQP.Set("$expand", *options.Expand) + } + if options != nil && options.Top != nil { + reqQP.Set("$top", strconv.FormatInt(int64(*options.Top), 10)) + } + if options != nil && options.Orderby != nil { + reqQP.Set("$orderby", *options.Orderby) + } + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *VirtualMachineImagesEdgeZoneClient) listHandleResponse(resp *http.Response) (VirtualMachineImagesEdgeZoneClientListResponse, error) { + result := VirtualMachineImagesEdgeZoneClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineImageResourceArray); err != nil { + return VirtualMachineImagesEdgeZoneClientListResponse{}, err + } + return result, nil +} + +// ListOffers - Gets a list of virtual machine image offers for the specified location, edge zone and publisher. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// location - The name of a supported Azure region. +// edgeZone - The name of the edge zone. +// publisherName - A valid image publisher. +// options - VirtualMachineImagesEdgeZoneClientListOffersOptions contains the optional parameters for the VirtualMachineImagesEdgeZoneClient.ListOffers +// method. +func (client *VirtualMachineImagesEdgeZoneClient) ListOffers(ctx context.Context, location string, edgeZone string, publisherName string, options *VirtualMachineImagesEdgeZoneClientListOffersOptions) (VirtualMachineImagesEdgeZoneClientListOffersResponse, error) { + req, err := client.listOffersCreateRequest(ctx, location, edgeZone, publisherName, options) + if err != nil { + return VirtualMachineImagesEdgeZoneClientListOffersResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachineImagesEdgeZoneClientListOffersResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachineImagesEdgeZoneClientListOffersResponse{}, runtime.NewResponseError(resp) + } + return client.listOffersHandleResponse(resp) +} + +// listOffersCreateRequest creates the ListOffers request. +func (client *VirtualMachineImagesEdgeZoneClient) listOffersCreateRequest(ctx context.Context, location string, edgeZone string, publisherName string, options *VirtualMachineImagesEdgeZoneClientListOffersOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/edgeZones/{edgeZone}/publishers/{publisherName}/artifacttypes/vmimage/offers" + if location == "" { + return nil, errors.New("parameter location cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{location}", url.PathEscape(location)) + if edgeZone == "" { + return nil, errors.New("parameter edgeZone cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{edgeZone}", url.PathEscape(edgeZone)) + if publisherName == "" { + return nil, errors.New("parameter publisherName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{publisherName}", url.PathEscape(publisherName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listOffersHandleResponse handles the ListOffers response. +func (client *VirtualMachineImagesEdgeZoneClient) listOffersHandleResponse(resp *http.Response) (VirtualMachineImagesEdgeZoneClientListOffersResponse, error) { + result := VirtualMachineImagesEdgeZoneClientListOffersResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineImageResourceArray); err != nil { + return VirtualMachineImagesEdgeZoneClientListOffersResponse{}, err + } + return result, nil +} + +// ListPublishers - Gets a list of virtual machine image publishers for the specified Azure location and edge zone. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// location - The name of a supported Azure region. +// edgeZone - The name of the edge zone. +// options - VirtualMachineImagesEdgeZoneClientListPublishersOptions contains the optional parameters for the VirtualMachineImagesEdgeZoneClient.ListPublishers +// method. +func (client *VirtualMachineImagesEdgeZoneClient) ListPublishers(ctx context.Context, location string, edgeZone string, options *VirtualMachineImagesEdgeZoneClientListPublishersOptions) (VirtualMachineImagesEdgeZoneClientListPublishersResponse, error) { + req, err := client.listPublishersCreateRequest(ctx, location, edgeZone, options) + if err != nil { + return VirtualMachineImagesEdgeZoneClientListPublishersResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachineImagesEdgeZoneClientListPublishersResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachineImagesEdgeZoneClientListPublishersResponse{}, runtime.NewResponseError(resp) + } + return client.listPublishersHandleResponse(resp) +} + +// listPublishersCreateRequest creates the ListPublishers request. +func (client *VirtualMachineImagesEdgeZoneClient) listPublishersCreateRequest(ctx context.Context, location string, edgeZone string, options *VirtualMachineImagesEdgeZoneClientListPublishersOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/edgeZones/{edgeZone}/publishers" + if location == "" { + return nil, errors.New("parameter location cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{location}", url.PathEscape(location)) + if edgeZone == "" { + return nil, errors.New("parameter edgeZone cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{edgeZone}", url.PathEscape(edgeZone)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listPublishersHandleResponse handles the ListPublishers response. +func (client *VirtualMachineImagesEdgeZoneClient) listPublishersHandleResponse(resp *http.Response) (VirtualMachineImagesEdgeZoneClientListPublishersResponse, error) { + result := VirtualMachineImagesEdgeZoneClientListPublishersResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineImageResourceArray); err != nil { + return VirtualMachineImagesEdgeZoneClientListPublishersResponse{}, err + } + return result, nil +} + +// ListSKUs - Gets a list of virtual machine image SKUs for the specified location, edge zone, publisher, and offer. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// location - The name of a supported Azure region. +// edgeZone - The name of the edge zone. +// publisherName - A valid image publisher. +// offer - A valid image publisher offer. +// options - VirtualMachineImagesEdgeZoneClientListSKUsOptions contains the optional parameters for the VirtualMachineImagesEdgeZoneClient.ListSKUs +// method. +func (client *VirtualMachineImagesEdgeZoneClient) ListSKUs(ctx context.Context, location string, edgeZone string, publisherName string, offer string, options *VirtualMachineImagesEdgeZoneClientListSKUsOptions) (VirtualMachineImagesEdgeZoneClientListSKUsResponse, error) { + req, err := client.listSKUsCreateRequest(ctx, location, edgeZone, publisherName, offer, options) + if err != nil { + return VirtualMachineImagesEdgeZoneClientListSKUsResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachineImagesEdgeZoneClientListSKUsResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachineImagesEdgeZoneClientListSKUsResponse{}, runtime.NewResponseError(resp) + } + return client.listSKUsHandleResponse(resp) +} + +// listSKUsCreateRequest creates the ListSKUs request. +func (client *VirtualMachineImagesEdgeZoneClient) listSKUsCreateRequest(ctx context.Context, location string, edgeZone string, publisherName string, offer string, options *VirtualMachineImagesEdgeZoneClientListSKUsOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/edgeZones/{edgeZone}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus" + if location == "" { + return nil, errors.New("parameter location cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{location}", url.PathEscape(location)) + if edgeZone == "" { + return nil, errors.New("parameter edgeZone cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{edgeZone}", url.PathEscape(edgeZone)) + if publisherName == "" { + return nil, errors.New("parameter publisherName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{publisherName}", url.PathEscape(publisherName)) + if offer == "" { + return nil, errors.New("parameter offer cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{offer}", url.PathEscape(offer)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listSKUsHandleResponse handles the ListSKUs response. +func (client *VirtualMachineImagesEdgeZoneClient) listSKUsHandleResponse(resp *http.Response) (VirtualMachineImagesEdgeZoneClientListSKUsResponse, error) { + result := VirtualMachineImagesEdgeZoneClientListSKUsResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineImageResourceArray); err != nil { + return VirtualMachineImagesEdgeZoneClientListSKUsResponse{}, err + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachineruncommands_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachineruncommands_client.go new file mode 100644 index 000000000..eb3408ad2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachineruncommands_client.go @@ -0,0 +1,522 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// VirtualMachineRunCommandsClient contains the methods for the VirtualMachineRunCommands group. +// Don't use this type directly, use NewVirtualMachineRunCommandsClient() instead. +type VirtualMachineRunCommandsClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewVirtualMachineRunCommandsClient creates a new instance of VirtualMachineRunCommandsClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewVirtualMachineRunCommandsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*VirtualMachineRunCommandsClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &VirtualMachineRunCommandsClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// BeginCreateOrUpdate - The operation to create or update the run command. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmName - The name of the virtual machine where the run command should be created or updated. +// runCommandName - The name of the virtual machine run command. +// runCommand - Parameters supplied to the Create Virtual Machine RunCommand operation. +// options - VirtualMachineRunCommandsClientBeginCreateOrUpdateOptions contains the optional parameters for the VirtualMachineRunCommandsClient.BeginCreateOrUpdate +// method. +func (client *VirtualMachineRunCommandsClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, vmName string, runCommandName string, runCommand VirtualMachineRunCommand, options *VirtualMachineRunCommandsClientBeginCreateOrUpdateOptions) (*runtime.Poller[VirtualMachineRunCommandsClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, resourceGroupName, vmName, runCommandName, runCommand, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachineRunCommandsClientCreateOrUpdateResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachineRunCommandsClientCreateOrUpdateResponse](options.ResumeToken, client.pl, nil) + } +} + +// CreateOrUpdate - The operation to create or update the run command. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachineRunCommandsClient) createOrUpdate(ctx context.Context, resourceGroupName string, vmName string, runCommandName string, runCommand VirtualMachineRunCommand, options *VirtualMachineRunCommandsClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, vmName, runCommandName, runCommand, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *VirtualMachineRunCommandsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, vmName string, runCommandName string, runCommand VirtualMachineRunCommand, options *VirtualMachineRunCommandsClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/runCommands/{runCommandName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmName == "" { + return nil, errors.New("parameter vmName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmName}", url.PathEscape(vmName)) + if runCommandName == "" { + return nil, errors.New("parameter runCommandName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{runCommandName}", url.PathEscape(runCommandName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json, text/json"} + return req, runtime.MarshalAsJSON(req, runCommand) +} + +// BeginDelete - The operation to delete the run command. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmName - The name of the virtual machine where the run command should be deleted. +// runCommandName - The name of the virtual machine run command. +// options - VirtualMachineRunCommandsClientBeginDeleteOptions contains the optional parameters for the VirtualMachineRunCommandsClient.BeginDelete +// method. +func (client *VirtualMachineRunCommandsClient) BeginDelete(ctx context.Context, resourceGroupName string, vmName string, runCommandName string, options *VirtualMachineRunCommandsClientBeginDeleteOptions) (*runtime.Poller[VirtualMachineRunCommandsClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, vmName, runCommandName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachineRunCommandsClientDeleteResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachineRunCommandsClientDeleteResponse](options.ResumeToken, client.pl, nil) + } +} + +// Delete - The operation to delete the run command. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachineRunCommandsClient) deleteOperation(ctx context.Context, resourceGroupName string, vmName string, runCommandName string, options *VirtualMachineRunCommandsClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, vmName, runCommandName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *VirtualMachineRunCommandsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, vmName string, runCommandName string, options *VirtualMachineRunCommandsClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/runCommands/{runCommandName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmName == "" { + return nil, errors.New("parameter vmName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmName}", url.PathEscape(vmName)) + if runCommandName == "" { + return nil, errors.New("parameter runCommandName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{runCommandName}", url.PathEscape(runCommandName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json, text/json"} + return req, nil +} + +// Get - Gets specific run command for a subscription in a location. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// location - The location upon which run commands is queried. +// commandID - The command id. +// options - VirtualMachineRunCommandsClientGetOptions contains the optional parameters for the VirtualMachineRunCommandsClient.Get +// method. +func (client *VirtualMachineRunCommandsClient) Get(ctx context.Context, location string, commandID string, options *VirtualMachineRunCommandsClientGetOptions) (VirtualMachineRunCommandsClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, location, commandID, options) + if err != nil { + return VirtualMachineRunCommandsClientGetResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachineRunCommandsClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachineRunCommandsClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *VirtualMachineRunCommandsClient) getCreateRequest(ctx context.Context, location string, commandID string, options *VirtualMachineRunCommandsClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/runCommands/{commandId}" + if location == "" { + return nil, errors.New("parameter location cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{location}", url.PathEscape(location)) + if commandID == "" { + return nil, errors.New("parameter commandID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{commandId}", url.PathEscape(commandID)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json, text/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *VirtualMachineRunCommandsClient) getHandleResponse(resp *http.Response) (VirtualMachineRunCommandsClientGetResponse, error) { + result := VirtualMachineRunCommandsClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.RunCommandDocument); err != nil { + return VirtualMachineRunCommandsClientGetResponse{}, err + } + return result, nil +} + +// GetByVirtualMachine - The operation to get the run command. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmName - The name of the virtual machine containing the run command. +// runCommandName - The name of the virtual machine run command. +// options - VirtualMachineRunCommandsClientGetByVirtualMachineOptions contains the optional parameters for the VirtualMachineRunCommandsClient.GetByVirtualMachine +// method. +func (client *VirtualMachineRunCommandsClient) GetByVirtualMachine(ctx context.Context, resourceGroupName string, vmName string, runCommandName string, options *VirtualMachineRunCommandsClientGetByVirtualMachineOptions) (VirtualMachineRunCommandsClientGetByVirtualMachineResponse, error) { + req, err := client.getByVirtualMachineCreateRequest(ctx, resourceGroupName, vmName, runCommandName, options) + if err != nil { + return VirtualMachineRunCommandsClientGetByVirtualMachineResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachineRunCommandsClientGetByVirtualMachineResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachineRunCommandsClientGetByVirtualMachineResponse{}, runtime.NewResponseError(resp) + } + return client.getByVirtualMachineHandleResponse(resp) +} + +// getByVirtualMachineCreateRequest creates the GetByVirtualMachine request. +func (client *VirtualMachineRunCommandsClient) getByVirtualMachineCreateRequest(ctx context.Context, resourceGroupName string, vmName string, runCommandName string, options *VirtualMachineRunCommandsClientGetByVirtualMachineOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/runCommands/{runCommandName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmName == "" { + return nil, errors.New("parameter vmName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmName}", url.PathEscape(vmName)) + if runCommandName == "" { + return nil, errors.New("parameter runCommandName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{runCommandName}", url.PathEscape(runCommandName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Expand != nil { + reqQP.Set("$expand", *options.Expand) + } + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json, text/json"} + return req, nil +} + +// getByVirtualMachineHandleResponse handles the GetByVirtualMachine response. +func (client *VirtualMachineRunCommandsClient) getByVirtualMachineHandleResponse(resp *http.Response) (VirtualMachineRunCommandsClientGetByVirtualMachineResponse, error) { + result := VirtualMachineRunCommandsClientGetByVirtualMachineResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineRunCommand); err != nil { + return VirtualMachineRunCommandsClientGetByVirtualMachineResponse{}, err + } + return result, nil +} + +// NewListPager - Lists all available run commands for a subscription in a location. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// location - The location upon which run commands is queried. +// options - VirtualMachineRunCommandsClientListOptions contains the optional parameters for the VirtualMachineRunCommandsClient.List +// method. +func (client *VirtualMachineRunCommandsClient) NewListPager(location string, options *VirtualMachineRunCommandsClientListOptions) *runtime.Pager[VirtualMachineRunCommandsClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[VirtualMachineRunCommandsClientListResponse]{ + More: func(page VirtualMachineRunCommandsClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *VirtualMachineRunCommandsClientListResponse) (VirtualMachineRunCommandsClientListResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listCreateRequest(ctx, location, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return VirtualMachineRunCommandsClientListResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachineRunCommandsClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachineRunCommandsClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *VirtualMachineRunCommandsClient) listCreateRequest(ctx context.Context, location string, options *VirtualMachineRunCommandsClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/runCommands" + if location == "" { + return nil, errors.New("parameter location cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{location}", url.PathEscape(location)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json, text/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *VirtualMachineRunCommandsClient) listHandleResponse(resp *http.Response) (VirtualMachineRunCommandsClientListResponse, error) { + result := VirtualMachineRunCommandsClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.RunCommandListResult); err != nil { + return VirtualMachineRunCommandsClientListResponse{}, err + } + return result, nil +} + +// NewListByVirtualMachinePager - The operation to get all run commands of a Virtual Machine. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmName - The name of the virtual machine containing the run command. +// options - VirtualMachineRunCommandsClientListByVirtualMachineOptions contains the optional parameters for the VirtualMachineRunCommandsClient.ListByVirtualMachine +// method. +func (client *VirtualMachineRunCommandsClient) NewListByVirtualMachinePager(resourceGroupName string, vmName string, options *VirtualMachineRunCommandsClientListByVirtualMachineOptions) *runtime.Pager[VirtualMachineRunCommandsClientListByVirtualMachineResponse] { + return runtime.NewPager(runtime.PagingHandler[VirtualMachineRunCommandsClientListByVirtualMachineResponse]{ + More: func(page VirtualMachineRunCommandsClientListByVirtualMachineResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *VirtualMachineRunCommandsClientListByVirtualMachineResponse) (VirtualMachineRunCommandsClientListByVirtualMachineResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listByVirtualMachineCreateRequest(ctx, resourceGroupName, vmName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return VirtualMachineRunCommandsClientListByVirtualMachineResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachineRunCommandsClientListByVirtualMachineResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachineRunCommandsClientListByVirtualMachineResponse{}, runtime.NewResponseError(resp) + } + return client.listByVirtualMachineHandleResponse(resp) + }, + }) +} + +// listByVirtualMachineCreateRequest creates the ListByVirtualMachine request. +func (client *VirtualMachineRunCommandsClient) listByVirtualMachineCreateRequest(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachineRunCommandsClientListByVirtualMachineOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/runCommands" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmName == "" { + return nil, errors.New("parameter vmName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmName}", url.PathEscape(vmName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Expand != nil { + reqQP.Set("$expand", *options.Expand) + } + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json, text/json"} + return req, nil +} + +// listByVirtualMachineHandleResponse handles the ListByVirtualMachine response. +func (client *VirtualMachineRunCommandsClient) listByVirtualMachineHandleResponse(resp *http.Response) (VirtualMachineRunCommandsClientListByVirtualMachineResponse, error) { + result := VirtualMachineRunCommandsClientListByVirtualMachineResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineRunCommandsListResult); err != nil { + return VirtualMachineRunCommandsClientListByVirtualMachineResponse{}, err + } + return result, nil +} + +// BeginUpdate - The operation to update the run command. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmName - The name of the virtual machine where the run command should be updated. +// runCommandName - The name of the virtual machine run command. +// runCommand - Parameters supplied to the Update Virtual Machine RunCommand operation. +// options - VirtualMachineRunCommandsClientBeginUpdateOptions contains the optional parameters for the VirtualMachineRunCommandsClient.BeginUpdate +// method. +func (client *VirtualMachineRunCommandsClient) BeginUpdate(ctx context.Context, resourceGroupName string, vmName string, runCommandName string, runCommand VirtualMachineRunCommandUpdate, options *VirtualMachineRunCommandsClientBeginUpdateOptions) (*runtime.Poller[VirtualMachineRunCommandsClientUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.update(ctx, resourceGroupName, vmName, runCommandName, runCommand, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachineRunCommandsClientUpdateResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachineRunCommandsClientUpdateResponse](options.ResumeToken, client.pl, nil) + } +} + +// Update - The operation to update the run command. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachineRunCommandsClient) update(ctx context.Context, resourceGroupName string, vmName string, runCommandName string, runCommand VirtualMachineRunCommandUpdate, options *VirtualMachineRunCommandsClientBeginUpdateOptions) (*http.Response, error) { + req, err := client.updateCreateRequest(ctx, resourceGroupName, vmName, runCommandName, runCommand, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// updateCreateRequest creates the Update request. +func (client *VirtualMachineRunCommandsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, vmName string, runCommandName string, runCommand VirtualMachineRunCommandUpdate, options *VirtualMachineRunCommandsClientBeginUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/runCommands/{runCommandName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmName == "" { + return nil, errors.New("parameter vmName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmName}", url.PathEscape(vmName)) + if runCommandName == "" { + return nil, errors.New("parameter runCommandName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{runCommandName}", url.PathEscape(runCommandName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json, text/json"} + return req, runtime.MarshalAsJSON(req, runCommand) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachines_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachines_client.go new file mode 100644 index 000000000..504deb60b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachines_client.go @@ -0,0 +1,1639 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strconv" + "strings" +) + +// VirtualMachinesClient contains the methods for the VirtualMachines group. +// Don't use this type directly, use NewVirtualMachinesClient() instead. +type VirtualMachinesClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewVirtualMachinesClient creates a new instance of VirtualMachinesClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewVirtualMachinesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*VirtualMachinesClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &VirtualMachinesClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// BeginAssessPatches - Assess patches on the VM. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmName - The name of the virtual machine. +// options - VirtualMachinesClientBeginAssessPatchesOptions contains the optional parameters for the VirtualMachinesClient.BeginAssessPatches +// method. +func (client *VirtualMachinesClient) BeginAssessPatches(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginAssessPatchesOptions) (*runtime.Poller[VirtualMachinesClientAssessPatchesResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.assessPatches(ctx, resourceGroupName, vmName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.pl, &runtime.NewPollerOptions[VirtualMachinesClientAssessPatchesResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + }) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachinesClientAssessPatchesResponse](options.ResumeToken, client.pl, nil) + } +} + +// AssessPatches - Assess patches on the VM. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachinesClient) assessPatches(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginAssessPatchesOptions) (*http.Response, error) { + req, err := client.assessPatchesCreateRequest(ctx, resourceGroupName, vmName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// assessPatchesCreateRequest creates the AssessPatches request. +func (client *VirtualMachinesClient) assessPatchesCreateRequest(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginAssessPatchesOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/assessPatches" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmName == "" { + return nil, errors.New("parameter vmName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmName}", url.PathEscape(vmName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// BeginCapture - Captures the VM by copying virtual hard disks of the VM and outputs a template that can be used to create +// similar VMs. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmName - The name of the virtual machine. +// parameters - Parameters supplied to the Capture Virtual Machine operation. +// options - VirtualMachinesClientBeginCaptureOptions contains the optional parameters for the VirtualMachinesClient.BeginCapture +// method. +func (client *VirtualMachinesClient) BeginCapture(ctx context.Context, resourceGroupName string, vmName string, parameters VirtualMachineCaptureParameters, options *VirtualMachinesClientBeginCaptureOptions) (*runtime.Poller[VirtualMachinesClientCaptureResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.capture(ctx, resourceGroupName, vmName, parameters, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.pl, &runtime.NewPollerOptions[VirtualMachinesClientCaptureResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + }) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachinesClientCaptureResponse](options.ResumeToken, client.pl, nil) + } +} + +// Capture - Captures the VM by copying virtual hard disks of the VM and outputs a template that can be used to create similar +// VMs. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachinesClient) capture(ctx context.Context, resourceGroupName string, vmName string, parameters VirtualMachineCaptureParameters, options *VirtualMachinesClientBeginCaptureOptions) (*http.Response, error) { + req, err := client.captureCreateRequest(ctx, resourceGroupName, vmName, parameters, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// captureCreateRequest creates the Capture request. +func (client *VirtualMachinesClient) captureCreateRequest(ctx context.Context, resourceGroupName string, vmName string, parameters VirtualMachineCaptureParameters, options *VirtualMachinesClientBeginCaptureOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/capture" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmName == "" { + return nil, errors.New("parameter vmName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmName}", url.PathEscape(vmName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// BeginConvertToManagedDisks - Converts virtual machine disks from blob-based to managed disks. Virtual machine must be stop-deallocated +// before invoking this operation. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmName - The name of the virtual machine. +// options - VirtualMachinesClientBeginConvertToManagedDisksOptions contains the optional parameters for the VirtualMachinesClient.BeginConvertToManagedDisks +// method. +func (client *VirtualMachinesClient) BeginConvertToManagedDisks(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginConvertToManagedDisksOptions) (*runtime.Poller[VirtualMachinesClientConvertToManagedDisksResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.convertToManagedDisks(ctx, resourceGroupName, vmName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachinesClientConvertToManagedDisksResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachinesClientConvertToManagedDisksResponse](options.ResumeToken, client.pl, nil) + } +} + +// ConvertToManagedDisks - Converts virtual machine disks from blob-based to managed disks. Virtual machine must be stop-deallocated +// before invoking this operation. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachinesClient) convertToManagedDisks(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginConvertToManagedDisksOptions) (*http.Response, error) { + req, err := client.convertToManagedDisksCreateRequest(ctx, resourceGroupName, vmName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// convertToManagedDisksCreateRequest creates the ConvertToManagedDisks request. +func (client *VirtualMachinesClient) convertToManagedDisksCreateRequest(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginConvertToManagedDisksOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/convertToManagedDisks" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmName == "" { + return nil, errors.New("parameter vmName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmName}", url.PathEscape(vmName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// BeginCreateOrUpdate - The operation to create or update a virtual machine. Please note some properties can be set only +// during virtual machine creation. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmName - The name of the virtual machine. +// parameters - Parameters supplied to the Create Virtual Machine operation. +// options - VirtualMachinesClientBeginCreateOrUpdateOptions contains the optional parameters for the VirtualMachinesClient.BeginCreateOrUpdate +// method. +func (client *VirtualMachinesClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, vmName string, parameters VirtualMachine, options *VirtualMachinesClientBeginCreateOrUpdateOptions) (*runtime.Poller[VirtualMachinesClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, resourceGroupName, vmName, parameters, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachinesClientCreateOrUpdateResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachinesClientCreateOrUpdateResponse](options.ResumeToken, client.pl, nil) + } +} + +// CreateOrUpdate - The operation to create or update a virtual machine. Please note some properties can be set only during +// virtual machine creation. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachinesClient) createOrUpdate(ctx context.Context, resourceGroupName string, vmName string, parameters VirtualMachine, options *VirtualMachinesClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, vmName, parameters, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *VirtualMachinesClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, vmName string, parameters VirtualMachine, options *VirtualMachinesClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmName == "" { + return nil, errors.New("parameter vmName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmName}", url.PathEscape(vmName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// BeginDeallocate - Shuts down the virtual machine and releases the compute resources. You are not billed for the compute +// resources that this virtual machine uses. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmName - The name of the virtual machine. +// options - VirtualMachinesClientBeginDeallocateOptions contains the optional parameters for the VirtualMachinesClient.BeginDeallocate +// method. +func (client *VirtualMachinesClient) BeginDeallocate(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginDeallocateOptions) (*runtime.Poller[VirtualMachinesClientDeallocateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deallocate(ctx, resourceGroupName, vmName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachinesClientDeallocateResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachinesClientDeallocateResponse](options.ResumeToken, client.pl, nil) + } +} + +// Deallocate - Shuts down the virtual machine and releases the compute resources. You are not billed for the compute resources +// that this virtual machine uses. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachinesClient) deallocate(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginDeallocateOptions) (*http.Response, error) { + req, err := client.deallocateCreateRequest(ctx, resourceGroupName, vmName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deallocateCreateRequest creates the Deallocate request. +func (client *VirtualMachinesClient) deallocateCreateRequest(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginDeallocateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/deallocate" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmName == "" { + return nil, errors.New("parameter vmName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmName}", url.PathEscape(vmName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Hibernate != nil { + reqQP.Set("hibernate", strconv.FormatBool(*options.Hibernate)) + } + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// BeginDelete - The operation to delete a virtual machine. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmName - The name of the virtual machine. +// options - VirtualMachinesClientBeginDeleteOptions contains the optional parameters for the VirtualMachinesClient.BeginDelete +// method. +func (client *VirtualMachinesClient) BeginDelete(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginDeleteOptions) (*runtime.Poller[VirtualMachinesClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, vmName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachinesClientDeleteResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachinesClientDeleteResponse](options.ResumeToken, client.pl, nil) + } +} + +// Delete - The operation to delete a virtual machine. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachinesClient) deleteOperation(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, vmName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *VirtualMachinesClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmName == "" { + return nil, errors.New("parameter vmName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmName}", url.PathEscape(vmName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.ForceDeletion != nil { + reqQP.Set("forceDeletion", strconv.FormatBool(*options.ForceDeletion)) + } + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Generalize - Sets the OS state of the virtual machine to generalized. It is recommended to sysprep the virtual machine +// before performing this operation. For Windows, please refer to Create a managed image of a +// generalized VM in Azure [https://docs.microsoft.com/azure/virtual-machines/windows/capture-image-resource]. For Linux, +// please refer to How to create an image of a virtual machine or VHD +// [https://docs.microsoft.com/azure/virtual-machines/linux/capture-image]. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmName - The name of the virtual machine. +// options - VirtualMachinesClientGeneralizeOptions contains the optional parameters for the VirtualMachinesClient.Generalize +// method. +func (client *VirtualMachinesClient) Generalize(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientGeneralizeOptions) (VirtualMachinesClientGeneralizeResponse, error) { + req, err := client.generalizeCreateRequest(ctx, resourceGroupName, vmName, options) + if err != nil { + return VirtualMachinesClientGeneralizeResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachinesClientGeneralizeResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachinesClientGeneralizeResponse{}, runtime.NewResponseError(resp) + } + return VirtualMachinesClientGeneralizeResponse{}, nil +} + +// generalizeCreateRequest creates the Generalize request. +func (client *VirtualMachinesClient) generalizeCreateRequest(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientGeneralizeOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/generalize" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmName == "" { + return nil, errors.New("parameter vmName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmName}", url.PathEscape(vmName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Retrieves information about the model view or the instance view of a virtual machine. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmName - The name of the virtual machine. +// options - VirtualMachinesClientGetOptions contains the optional parameters for the VirtualMachinesClient.Get method. +func (client *VirtualMachinesClient) Get(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientGetOptions) (VirtualMachinesClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, vmName, options) + if err != nil { + return VirtualMachinesClientGetResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachinesClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachinesClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *VirtualMachinesClient) getCreateRequest(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmName == "" { + return nil, errors.New("parameter vmName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmName}", url.PathEscape(vmName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Expand != nil { + reqQP.Set("$expand", string(*options.Expand)) + } + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *VirtualMachinesClient) getHandleResponse(resp *http.Response) (VirtualMachinesClientGetResponse, error) { + result := VirtualMachinesClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachine); err != nil { + return VirtualMachinesClientGetResponse{}, err + } + return result, nil +} + +// BeginInstallPatches - Installs patches on the VM. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmName - The name of the virtual machine. +// installPatchesInput - Input for InstallPatches as directly received by the API +// options - VirtualMachinesClientBeginInstallPatchesOptions contains the optional parameters for the VirtualMachinesClient.BeginInstallPatches +// method. +func (client *VirtualMachinesClient) BeginInstallPatches(ctx context.Context, resourceGroupName string, vmName string, installPatchesInput VirtualMachineInstallPatchesParameters, options *VirtualMachinesClientBeginInstallPatchesOptions) (*runtime.Poller[VirtualMachinesClientInstallPatchesResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.installPatches(ctx, resourceGroupName, vmName, installPatchesInput, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.pl, &runtime.NewPollerOptions[VirtualMachinesClientInstallPatchesResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + }) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachinesClientInstallPatchesResponse](options.ResumeToken, client.pl, nil) + } +} + +// InstallPatches - Installs patches on the VM. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachinesClient) installPatches(ctx context.Context, resourceGroupName string, vmName string, installPatchesInput VirtualMachineInstallPatchesParameters, options *VirtualMachinesClientBeginInstallPatchesOptions) (*http.Response, error) { + req, err := client.installPatchesCreateRequest(ctx, resourceGroupName, vmName, installPatchesInput, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// installPatchesCreateRequest creates the InstallPatches request. +func (client *VirtualMachinesClient) installPatchesCreateRequest(ctx context.Context, resourceGroupName string, vmName string, installPatchesInput VirtualMachineInstallPatchesParameters, options *VirtualMachinesClientBeginInstallPatchesOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/installPatches" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmName == "" { + return nil, errors.New("parameter vmName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmName}", url.PathEscape(vmName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, installPatchesInput) +} + +// InstanceView - Retrieves information about the run-time state of a virtual machine. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmName - The name of the virtual machine. +// options - VirtualMachinesClientInstanceViewOptions contains the optional parameters for the VirtualMachinesClient.InstanceView +// method. +func (client *VirtualMachinesClient) InstanceView(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientInstanceViewOptions) (VirtualMachinesClientInstanceViewResponse, error) { + req, err := client.instanceViewCreateRequest(ctx, resourceGroupName, vmName, options) + if err != nil { + return VirtualMachinesClientInstanceViewResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachinesClientInstanceViewResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachinesClientInstanceViewResponse{}, runtime.NewResponseError(resp) + } + return client.instanceViewHandleResponse(resp) +} + +// instanceViewCreateRequest creates the InstanceView request. +func (client *VirtualMachinesClient) instanceViewCreateRequest(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientInstanceViewOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/instanceView" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmName == "" { + return nil, errors.New("parameter vmName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmName}", url.PathEscape(vmName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// instanceViewHandleResponse handles the InstanceView response. +func (client *VirtualMachinesClient) instanceViewHandleResponse(resp *http.Response) (VirtualMachinesClientInstanceViewResponse, error) { + result := VirtualMachinesClientInstanceViewResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineInstanceView); err != nil { + return VirtualMachinesClientInstanceViewResponse{}, err + } + return result, nil +} + +// NewListPager - Lists all of the virtual machines in the specified resource group. Use the nextLink property in the response +// to get the next page of virtual machines. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// options - VirtualMachinesClientListOptions contains the optional parameters for the VirtualMachinesClient.List method. +func (client *VirtualMachinesClient) NewListPager(resourceGroupName string, options *VirtualMachinesClientListOptions) *runtime.Pager[VirtualMachinesClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[VirtualMachinesClientListResponse]{ + More: func(page VirtualMachinesClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *VirtualMachinesClientListResponse) (VirtualMachinesClientListResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listCreateRequest(ctx, resourceGroupName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return VirtualMachinesClientListResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachinesClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachinesClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *VirtualMachinesClient) listCreateRequest(ctx context.Context, resourceGroupName string, options *VirtualMachinesClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Filter != nil { + reqQP.Set("$filter", *options.Filter) + } + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *VirtualMachinesClient) listHandleResponse(resp *http.Response) (VirtualMachinesClientListResponse, error) { + result := VirtualMachinesClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineListResult); err != nil { + return VirtualMachinesClientListResponse{}, err + } + return result, nil +} + +// NewListAllPager - Lists all of the virtual machines in the specified subscription. Use the nextLink property in the response +// to get the next page of virtual machines. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// options - VirtualMachinesClientListAllOptions contains the optional parameters for the VirtualMachinesClient.ListAll method. +func (client *VirtualMachinesClient) NewListAllPager(options *VirtualMachinesClientListAllOptions) *runtime.Pager[VirtualMachinesClientListAllResponse] { + return runtime.NewPager(runtime.PagingHandler[VirtualMachinesClientListAllResponse]{ + More: func(page VirtualMachinesClientListAllResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *VirtualMachinesClientListAllResponse) (VirtualMachinesClientListAllResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listAllCreateRequest(ctx, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return VirtualMachinesClientListAllResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachinesClientListAllResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachinesClientListAllResponse{}, runtime.NewResponseError(resp) + } + return client.listAllHandleResponse(resp) + }, + }) +} + +// listAllCreateRequest creates the ListAll request. +func (client *VirtualMachinesClient) listAllCreateRequest(ctx context.Context, options *VirtualMachinesClientListAllOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/virtualMachines" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + if options != nil && options.StatusOnly != nil { + reqQP.Set("statusOnly", *options.StatusOnly) + } + if options != nil && options.Filter != nil { + reqQP.Set("$filter", *options.Filter) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listAllHandleResponse handles the ListAll response. +func (client *VirtualMachinesClient) listAllHandleResponse(resp *http.Response) (VirtualMachinesClientListAllResponse, error) { + result := VirtualMachinesClientListAllResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineListResult); err != nil { + return VirtualMachinesClientListAllResponse{}, err + } + return result, nil +} + +// NewListAvailableSizesPager - Lists all available virtual machine sizes to which the specified virtual machine can be resized. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmName - The name of the virtual machine. +// options - VirtualMachinesClientListAvailableSizesOptions contains the optional parameters for the VirtualMachinesClient.ListAvailableSizes +// method. +func (client *VirtualMachinesClient) NewListAvailableSizesPager(resourceGroupName string, vmName string, options *VirtualMachinesClientListAvailableSizesOptions) *runtime.Pager[VirtualMachinesClientListAvailableSizesResponse] { + return runtime.NewPager(runtime.PagingHandler[VirtualMachinesClientListAvailableSizesResponse]{ + More: func(page VirtualMachinesClientListAvailableSizesResponse) bool { + return false + }, + Fetcher: func(ctx context.Context, page *VirtualMachinesClientListAvailableSizesResponse) (VirtualMachinesClientListAvailableSizesResponse, error) { + req, err := client.listAvailableSizesCreateRequest(ctx, resourceGroupName, vmName, options) + if err != nil { + return VirtualMachinesClientListAvailableSizesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachinesClientListAvailableSizesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachinesClientListAvailableSizesResponse{}, runtime.NewResponseError(resp) + } + return client.listAvailableSizesHandleResponse(resp) + }, + }) +} + +// listAvailableSizesCreateRequest creates the ListAvailableSizes request. +func (client *VirtualMachinesClient) listAvailableSizesCreateRequest(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientListAvailableSizesOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/vmSizes" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmName == "" { + return nil, errors.New("parameter vmName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmName}", url.PathEscape(vmName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listAvailableSizesHandleResponse handles the ListAvailableSizes response. +func (client *VirtualMachinesClient) listAvailableSizesHandleResponse(resp *http.Response) (VirtualMachinesClientListAvailableSizesResponse, error) { + result := VirtualMachinesClientListAvailableSizesResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineSizeListResult); err != nil { + return VirtualMachinesClientListAvailableSizesResponse{}, err + } + return result, nil +} + +// NewListByLocationPager - Gets all the virtual machines under the specified subscription for the specified location. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// location - The location for which virtual machines under the subscription are queried. +// options - VirtualMachinesClientListByLocationOptions contains the optional parameters for the VirtualMachinesClient.ListByLocation +// method. +func (client *VirtualMachinesClient) NewListByLocationPager(location string, options *VirtualMachinesClientListByLocationOptions) *runtime.Pager[VirtualMachinesClientListByLocationResponse] { + return runtime.NewPager(runtime.PagingHandler[VirtualMachinesClientListByLocationResponse]{ + More: func(page VirtualMachinesClientListByLocationResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *VirtualMachinesClientListByLocationResponse) (VirtualMachinesClientListByLocationResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listByLocationCreateRequest(ctx, location, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return VirtualMachinesClientListByLocationResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachinesClientListByLocationResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachinesClientListByLocationResponse{}, runtime.NewResponseError(resp) + } + return client.listByLocationHandleResponse(resp) + }, + }) +} + +// listByLocationCreateRequest creates the ListByLocation request. +func (client *VirtualMachinesClient) listByLocationCreateRequest(ctx context.Context, location string, options *VirtualMachinesClientListByLocationOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/virtualMachines" + if location == "" { + return nil, errors.New("parameter location cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{location}", url.PathEscape(location)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listByLocationHandleResponse handles the ListByLocation response. +func (client *VirtualMachinesClient) listByLocationHandleResponse(resp *http.Response) (VirtualMachinesClientListByLocationResponse, error) { + result := VirtualMachinesClientListByLocationResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineListResult); err != nil { + return VirtualMachinesClientListByLocationResponse{}, err + } + return result, nil +} + +// BeginPerformMaintenance - The operation to perform maintenance on a virtual machine. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmName - The name of the virtual machine. +// options - VirtualMachinesClientBeginPerformMaintenanceOptions contains the optional parameters for the VirtualMachinesClient.BeginPerformMaintenance +// method. +func (client *VirtualMachinesClient) BeginPerformMaintenance(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginPerformMaintenanceOptions) (*runtime.Poller[VirtualMachinesClientPerformMaintenanceResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.performMaintenance(ctx, resourceGroupName, vmName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachinesClientPerformMaintenanceResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachinesClientPerformMaintenanceResponse](options.ResumeToken, client.pl, nil) + } +} + +// PerformMaintenance - The operation to perform maintenance on a virtual machine. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachinesClient) performMaintenance(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginPerformMaintenanceOptions) (*http.Response, error) { + req, err := client.performMaintenanceCreateRequest(ctx, resourceGroupName, vmName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// performMaintenanceCreateRequest creates the PerformMaintenance request. +func (client *VirtualMachinesClient) performMaintenanceCreateRequest(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginPerformMaintenanceOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/performMaintenance" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmName == "" { + return nil, errors.New("parameter vmName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmName}", url.PathEscape(vmName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// BeginPowerOff - The operation to power off (stop) a virtual machine. The virtual machine can be restarted with the same +// provisioned resources. You are still charged for this virtual machine. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmName - The name of the virtual machine. +// options - VirtualMachinesClientBeginPowerOffOptions contains the optional parameters for the VirtualMachinesClient.BeginPowerOff +// method. +func (client *VirtualMachinesClient) BeginPowerOff(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginPowerOffOptions) (*runtime.Poller[VirtualMachinesClientPowerOffResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.powerOff(ctx, resourceGroupName, vmName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachinesClientPowerOffResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachinesClientPowerOffResponse](options.ResumeToken, client.pl, nil) + } +} + +// PowerOff - The operation to power off (stop) a virtual machine. The virtual machine can be restarted with the same provisioned +// resources. You are still charged for this virtual machine. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachinesClient) powerOff(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginPowerOffOptions) (*http.Response, error) { + req, err := client.powerOffCreateRequest(ctx, resourceGroupName, vmName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// powerOffCreateRequest creates the PowerOff request. +func (client *VirtualMachinesClient) powerOffCreateRequest(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginPowerOffOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/powerOff" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmName == "" { + return nil, errors.New("parameter vmName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmName}", url.PathEscape(vmName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.SkipShutdown != nil { + reqQP.Set("skipShutdown", strconv.FormatBool(*options.SkipShutdown)) + } + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// BeginReapply - The operation to reapply a virtual machine's state. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmName - The name of the virtual machine. +// options - VirtualMachinesClientBeginReapplyOptions contains the optional parameters for the VirtualMachinesClient.BeginReapply +// method. +func (client *VirtualMachinesClient) BeginReapply(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginReapplyOptions) (*runtime.Poller[VirtualMachinesClientReapplyResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.reapply(ctx, resourceGroupName, vmName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachinesClientReapplyResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachinesClientReapplyResponse](options.ResumeToken, client.pl, nil) + } +} + +// Reapply - The operation to reapply a virtual machine's state. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachinesClient) reapply(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginReapplyOptions) (*http.Response, error) { + req, err := client.reapplyCreateRequest(ctx, resourceGroupName, vmName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// reapplyCreateRequest creates the Reapply request. +func (client *VirtualMachinesClient) reapplyCreateRequest(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginReapplyOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/reapply" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmName == "" { + return nil, errors.New("parameter vmName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmName}", url.PathEscape(vmName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// BeginRedeploy - Shuts down the virtual machine, moves it to a new node, and powers it back on. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmName - The name of the virtual machine. +// options - VirtualMachinesClientBeginRedeployOptions contains the optional parameters for the VirtualMachinesClient.BeginRedeploy +// method. +func (client *VirtualMachinesClient) BeginRedeploy(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginRedeployOptions) (*runtime.Poller[VirtualMachinesClientRedeployResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.redeploy(ctx, resourceGroupName, vmName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachinesClientRedeployResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachinesClientRedeployResponse](options.ResumeToken, client.pl, nil) + } +} + +// Redeploy - Shuts down the virtual machine, moves it to a new node, and powers it back on. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachinesClient) redeploy(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginRedeployOptions) (*http.Response, error) { + req, err := client.redeployCreateRequest(ctx, resourceGroupName, vmName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// redeployCreateRequest creates the Redeploy request. +func (client *VirtualMachinesClient) redeployCreateRequest(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginRedeployOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/redeploy" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmName == "" { + return nil, errors.New("parameter vmName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmName}", url.PathEscape(vmName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// BeginReimage - Reimages the virtual machine which has an ephemeral OS disk back to its initial state. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmName - The name of the virtual machine. +// options - VirtualMachinesClientBeginReimageOptions contains the optional parameters for the VirtualMachinesClient.BeginReimage +// method. +func (client *VirtualMachinesClient) BeginReimage(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginReimageOptions) (*runtime.Poller[VirtualMachinesClientReimageResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.reimage(ctx, resourceGroupName, vmName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachinesClientReimageResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachinesClientReimageResponse](options.ResumeToken, client.pl, nil) + } +} + +// Reimage - Reimages the virtual machine which has an ephemeral OS disk back to its initial state. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachinesClient) reimage(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginReimageOptions) (*http.Response, error) { + req, err := client.reimageCreateRequest(ctx, resourceGroupName, vmName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// reimageCreateRequest creates the Reimage request. +func (client *VirtualMachinesClient) reimageCreateRequest(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginReimageOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/reimage" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmName == "" { + return nil, errors.New("parameter vmName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmName}", url.PathEscape(vmName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if options != nil && options.Parameters != nil { + return req, runtime.MarshalAsJSON(req, *options.Parameters) + } + return req, nil +} + +// BeginRestart - The operation to restart a virtual machine. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmName - The name of the virtual machine. +// options - VirtualMachinesClientBeginRestartOptions contains the optional parameters for the VirtualMachinesClient.BeginRestart +// method. +func (client *VirtualMachinesClient) BeginRestart(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginRestartOptions) (*runtime.Poller[VirtualMachinesClientRestartResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.restart(ctx, resourceGroupName, vmName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachinesClientRestartResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachinesClientRestartResponse](options.ResumeToken, client.pl, nil) + } +} + +// Restart - The operation to restart a virtual machine. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachinesClient) restart(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginRestartOptions) (*http.Response, error) { + req, err := client.restartCreateRequest(ctx, resourceGroupName, vmName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// restartCreateRequest creates the Restart request. +func (client *VirtualMachinesClient) restartCreateRequest(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginRestartOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/restart" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmName == "" { + return nil, errors.New("parameter vmName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmName}", url.PathEscape(vmName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// RetrieveBootDiagnosticsData - The operation to retrieve SAS URIs for a virtual machine's boot diagnostic logs. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmName - The name of the virtual machine. +// options - VirtualMachinesClientRetrieveBootDiagnosticsDataOptions contains the optional parameters for the VirtualMachinesClient.RetrieveBootDiagnosticsData +// method. +func (client *VirtualMachinesClient) RetrieveBootDiagnosticsData(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientRetrieveBootDiagnosticsDataOptions) (VirtualMachinesClientRetrieveBootDiagnosticsDataResponse, error) { + req, err := client.retrieveBootDiagnosticsDataCreateRequest(ctx, resourceGroupName, vmName, options) + if err != nil { + return VirtualMachinesClientRetrieveBootDiagnosticsDataResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachinesClientRetrieveBootDiagnosticsDataResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachinesClientRetrieveBootDiagnosticsDataResponse{}, runtime.NewResponseError(resp) + } + return client.retrieveBootDiagnosticsDataHandleResponse(resp) +} + +// retrieveBootDiagnosticsDataCreateRequest creates the RetrieveBootDiagnosticsData request. +func (client *VirtualMachinesClient) retrieveBootDiagnosticsDataCreateRequest(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientRetrieveBootDiagnosticsDataOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/retrieveBootDiagnosticsData" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmName == "" { + return nil, errors.New("parameter vmName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmName}", url.PathEscape(vmName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.SasURIExpirationTimeInMinutes != nil { + reqQP.Set("sasUriExpirationTimeInMinutes", strconv.FormatInt(int64(*options.SasURIExpirationTimeInMinutes), 10)) + } + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// retrieveBootDiagnosticsDataHandleResponse handles the RetrieveBootDiagnosticsData response. +func (client *VirtualMachinesClient) retrieveBootDiagnosticsDataHandleResponse(resp *http.Response) (VirtualMachinesClientRetrieveBootDiagnosticsDataResponse, error) { + result := VirtualMachinesClientRetrieveBootDiagnosticsDataResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.RetrieveBootDiagnosticsDataResult); err != nil { + return VirtualMachinesClientRetrieveBootDiagnosticsDataResponse{}, err + } + return result, nil +} + +// BeginRunCommand - Run command on the VM. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmName - The name of the virtual machine. +// parameters - Parameters supplied to the Run command operation. +// options - VirtualMachinesClientBeginRunCommandOptions contains the optional parameters for the VirtualMachinesClient.BeginRunCommand +// method. +func (client *VirtualMachinesClient) BeginRunCommand(ctx context.Context, resourceGroupName string, vmName string, parameters RunCommandInput, options *VirtualMachinesClientBeginRunCommandOptions) (*runtime.Poller[VirtualMachinesClientRunCommandResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.runCommand(ctx, resourceGroupName, vmName, parameters, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.pl, &runtime.NewPollerOptions[VirtualMachinesClientRunCommandResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + }) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachinesClientRunCommandResponse](options.ResumeToken, client.pl, nil) + } +} + +// RunCommand - Run command on the VM. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachinesClient) runCommand(ctx context.Context, resourceGroupName string, vmName string, parameters RunCommandInput, options *VirtualMachinesClientBeginRunCommandOptions) (*http.Response, error) { + req, err := client.runCommandCreateRequest(ctx, resourceGroupName, vmName, parameters, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// runCommandCreateRequest creates the RunCommand request. +func (client *VirtualMachinesClient) runCommandCreateRequest(ctx context.Context, resourceGroupName string, vmName string, parameters RunCommandInput, options *VirtualMachinesClientBeginRunCommandOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/runCommand" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmName == "" { + return nil, errors.New("parameter vmName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmName}", url.PathEscape(vmName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json, text/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// SimulateEviction - The operation to simulate the eviction of spot virtual machine. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmName - The name of the virtual machine. +// options - VirtualMachinesClientSimulateEvictionOptions contains the optional parameters for the VirtualMachinesClient.SimulateEviction +// method. +func (client *VirtualMachinesClient) SimulateEviction(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientSimulateEvictionOptions) (VirtualMachinesClientSimulateEvictionResponse, error) { + req, err := client.simulateEvictionCreateRequest(ctx, resourceGroupName, vmName, options) + if err != nil { + return VirtualMachinesClientSimulateEvictionResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachinesClientSimulateEvictionResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusNoContent) { + return VirtualMachinesClientSimulateEvictionResponse{}, runtime.NewResponseError(resp) + } + return VirtualMachinesClientSimulateEvictionResponse{}, nil +} + +// simulateEvictionCreateRequest creates the SimulateEviction request. +func (client *VirtualMachinesClient) simulateEvictionCreateRequest(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientSimulateEvictionOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/simulateEviction" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmName == "" { + return nil, errors.New("parameter vmName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmName}", url.PathEscape(vmName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// BeginStart - The operation to start a virtual machine. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmName - The name of the virtual machine. +// options - VirtualMachinesClientBeginStartOptions contains the optional parameters for the VirtualMachinesClient.BeginStart +// method. +func (client *VirtualMachinesClient) BeginStart(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginStartOptions) (*runtime.Poller[VirtualMachinesClientStartResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.start(ctx, resourceGroupName, vmName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachinesClientStartResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachinesClientStartResponse](options.ResumeToken, client.pl, nil) + } +} + +// Start - The operation to start a virtual machine. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachinesClient) start(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginStartOptions) (*http.Response, error) { + req, err := client.startCreateRequest(ctx, resourceGroupName, vmName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// startCreateRequest creates the Start request. +func (client *VirtualMachinesClient) startCreateRequest(ctx context.Context, resourceGroupName string, vmName string, options *VirtualMachinesClientBeginStartOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/start" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmName == "" { + return nil, errors.New("parameter vmName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmName}", url.PathEscape(vmName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// BeginUpdate - The operation to update a virtual machine. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmName - The name of the virtual machine. +// parameters - Parameters supplied to the Update Virtual Machine operation. +// options - VirtualMachinesClientBeginUpdateOptions contains the optional parameters for the VirtualMachinesClient.BeginUpdate +// method. +func (client *VirtualMachinesClient) BeginUpdate(ctx context.Context, resourceGroupName string, vmName string, parameters VirtualMachineUpdate, options *VirtualMachinesClientBeginUpdateOptions) (*runtime.Poller[VirtualMachinesClientUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.update(ctx, resourceGroupName, vmName, parameters, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachinesClientUpdateResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachinesClientUpdateResponse](options.ResumeToken, client.pl, nil) + } +} + +// Update - The operation to update a virtual machine. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachinesClient) update(ctx context.Context, resourceGroupName string, vmName string, parameters VirtualMachineUpdate, options *VirtualMachinesClientBeginUpdateOptions) (*http.Response, error) { + req, err := client.updateCreateRequest(ctx, resourceGroupName, vmName, parameters, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// updateCreateRequest creates the Update request. +func (client *VirtualMachinesClient) updateCreateRequest(ctx context.Context, resourceGroupName string, vmName string, parameters VirtualMachineUpdate, options *VirtualMachinesClientBeginUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmName == "" { + return nil, errors.New("parameter vmName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmName}", url.PathEscape(vmName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachinescalesetextensions_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachinescalesetextensions_client.go new file mode 100644 index 000000000..06b808c2d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachinescalesetextensions_client.go @@ -0,0 +1,397 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// VirtualMachineScaleSetExtensionsClient contains the methods for the VirtualMachineScaleSetExtensions group. +// Don't use this type directly, use NewVirtualMachineScaleSetExtensionsClient() instead. +type VirtualMachineScaleSetExtensionsClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewVirtualMachineScaleSetExtensionsClient creates a new instance of VirtualMachineScaleSetExtensionsClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewVirtualMachineScaleSetExtensionsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*VirtualMachineScaleSetExtensionsClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &VirtualMachineScaleSetExtensionsClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// BeginCreateOrUpdate - The operation to create or update an extension. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set where the extension should be create or updated. +// vmssExtensionName - The name of the VM scale set extension. +// extensionParameters - Parameters supplied to the Create VM scale set Extension operation. +// options - VirtualMachineScaleSetExtensionsClientBeginCreateOrUpdateOptions contains the optional parameters for the VirtualMachineScaleSetExtensionsClient.BeginCreateOrUpdate +// method. +func (client *VirtualMachineScaleSetExtensionsClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, vmScaleSetName string, vmssExtensionName string, extensionParameters VirtualMachineScaleSetExtension, options *VirtualMachineScaleSetExtensionsClientBeginCreateOrUpdateOptions) (*runtime.Poller[VirtualMachineScaleSetExtensionsClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, resourceGroupName, vmScaleSetName, vmssExtensionName, extensionParameters, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachineScaleSetExtensionsClientCreateOrUpdateResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachineScaleSetExtensionsClientCreateOrUpdateResponse](options.ResumeToken, client.pl, nil) + } +} + +// CreateOrUpdate - The operation to create or update an extension. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachineScaleSetExtensionsClient) createOrUpdate(ctx context.Context, resourceGroupName string, vmScaleSetName string, vmssExtensionName string, extensionParameters VirtualMachineScaleSetExtension, options *VirtualMachineScaleSetExtensionsClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, vmScaleSetName, vmssExtensionName, extensionParameters, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *VirtualMachineScaleSetExtensionsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, vmssExtensionName string, extensionParameters VirtualMachineScaleSetExtension, options *VirtualMachineScaleSetExtensionsClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions/{vmssExtensionName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if vmssExtensionName == "" { + return nil, errors.New("parameter vmssExtensionName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmssExtensionName}", url.PathEscape(vmssExtensionName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, extensionParameters) +} + +// BeginDelete - The operation to delete the extension. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set where the extension should be deleted. +// vmssExtensionName - The name of the VM scale set extension. +// options - VirtualMachineScaleSetExtensionsClientBeginDeleteOptions contains the optional parameters for the VirtualMachineScaleSetExtensionsClient.BeginDelete +// method. +func (client *VirtualMachineScaleSetExtensionsClient) BeginDelete(ctx context.Context, resourceGroupName string, vmScaleSetName string, vmssExtensionName string, options *VirtualMachineScaleSetExtensionsClientBeginDeleteOptions) (*runtime.Poller[VirtualMachineScaleSetExtensionsClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, vmScaleSetName, vmssExtensionName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachineScaleSetExtensionsClientDeleteResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachineScaleSetExtensionsClientDeleteResponse](options.ResumeToken, client.pl, nil) + } +} + +// Delete - The operation to delete the extension. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachineScaleSetExtensionsClient) deleteOperation(ctx context.Context, resourceGroupName string, vmScaleSetName string, vmssExtensionName string, options *VirtualMachineScaleSetExtensionsClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, vmScaleSetName, vmssExtensionName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *VirtualMachineScaleSetExtensionsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, vmssExtensionName string, options *VirtualMachineScaleSetExtensionsClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions/{vmssExtensionName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if vmssExtensionName == "" { + return nil, errors.New("parameter vmssExtensionName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmssExtensionName}", url.PathEscape(vmssExtensionName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - The operation to get the extension. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set containing the extension. +// vmssExtensionName - The name of the VM scale set extension. +// options - VirtualMachineScaleSetExtensionsClientGetOptions contains the optional parameters for the VirtualMachineScaleSetExtensionsClient.Get +// method. +func (client *VirtualMachineScaleSetExtensionsClient) Get(ctx context.Context, resourceGroupName string, vmScaleSetName string, vmssExtensionName string, options *VirtualMachineScaleSetExtensionsClientGetOptions) (VirtualMachineScaleSetExtensionsClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, vmScaleSetName, vmssExtensionName, options) + if err != nil { + return VirtualMachineScaleSetExtensionsClientGetResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachineScaleSetExtensionsClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachineScaleSetExtensionsClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *VirtualMachineScaleSetExtensionsClient) getCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, vmssExtensionName string, options *VirtualMachineScaleSetExtensionsClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions/{vmssExtensionName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if vmssExtensionName == "" { + return nil, errors.New("parameter vmssExtensionName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmssExtensionName}", url.PathEscape(vmssExtensionName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Expand != nil { + reqQP.Set("$expand", *options.Expand) + } + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *VirtualMachineScaleSetExtensionsClient) getHandleResponse(resp *http.Response) (VirtualMachineScaleSetExtensionsClientGetResponse, error) { + result := VirtualMachineScaleSetExtensionsClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineScaleSetExtension); err != nil { + return VirtualMachineScaleSetExtensionsClientGetResponse{}, err + } + return result, nil +} + +// NewListPager - Gets a list of all extensions in a VM scale set. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set containing the extension. +// options - VirtualMachineScaleSetExtensionsClientListOptions contains the optional parameters for the VirtualMachineScaleSetExtensionsClient.List +// method. +func (client *VirtualMachineScaleSetExtensionsClient) NewListPager(resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetExtensionsClientListOptions) *runtime.Pager[VirtualMachineScaleSetExtensionsClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[VirtualMachineScaleSetExtensionsClientListResponse]{ + More: func(page VirtualMachineScaleSetExtensionsClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *VirtualMachineScaleSetExtensionsClientListResponse) (VirtualMachineScaleSetExtensionsClientListResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listCreateRequest(ctx, resourceGroupName, vmScaleSetName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return VirtualMachineScaleSetExtensionsClientListResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachineScaleSetExtensionsClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachineScaleSetExtensionsClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *VirtualMachineScaleSetExtensionsClient) listCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetExtensionsClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *VirtualMachineScaleSetExtensionsClient) listHandleResponse(resp *http.Response) (VirtualMachineScaleSetExtensionsClientListResponse, error) { + result := VirtualMachineScaleSetExtensionsClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineScaleSetExtensionListResult); err != nil { + return VirtualMachineScaleSetExtensionsClientListResponse{}, err + } + return result, nil +} + +// BeginUpdate - The operation to update an extension. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set where the extension should be updated. +// vmssExtensionName - The name of the VM scale set extension. +// extensionParameters - Parameters supplied to the Update VM scale set Extension operation. +// options - VirtualMachineScaleSetExtensionsClientBeginUpdateOptions contains the optional parameters for the VirtualMachineScaleSetExtensionsClient.BeginUpdate +// method. +func (client *VirtualMachineScaleSetExtensionsClient) BeginUpdate(ctx context.Context, resourceGroupName string, vmScaleSetName string, vmssExtensionName string, extensionParameters VirtualMachineScaleSetExtensionUpdate, options *VirtualMachineScaleSetExtensionsClientBeginUpdateOptions) (*runtime.Poller[VirtualMachineScaleSetExtensionsClientUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.update(ctx, resourceGroupName, vmScaleSetName, vmssExtensionName, extensionParameters, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachineScaleSetExtensionsClientUpdateResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachineScaleSetExtensionsClientUpdateResponse](options.ResumeToken, client.pl, nil) + } +} + +// Update - The operation to update an extension. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachineScaleSetExtensionsClient) update(ctx context.Context, resourceGroupName string, vmScaleSetName string, vmssExtensionName string, extensionParameters VirtualMachineScaleSetExtensionUpdate, options *VirtualMachineScaleSetExtensionsClientBeginUpdateOptions) (*http.Response, error) { + req, err := client.updateCreateRequest(ctx, resourceGroupName, vmScaleSetName, vmssExtensionName, extensionParameters, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// updateCreateRequest creates the Update request. +func (client *VirtualMachineScaleSetExtensionsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, vmssExtensionName string, extensionParameters VirtualMachineScaleSetExtensionUpdate, options *VirtualMachineScaleSetExtensionsClientBeginUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions/{vmssExtensionName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if vmssExtensionName == "" { + return nil, errors.New("parameter vmssExtensionName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmssExtensionName}", url.PathEscape(vmssExtensionName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, extensionParameters) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachinescalesetrollingupgrades_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachinescalesetrollingupgrades_client.go new file mode 100644 index 000000000..d86eeec8a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachinescalesetrollingupgrades_client.go @@ -0,0 +1,310 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// VirtualMachineScaleSetRollingUpgradesClient contains the methods for the VirtualMachineScaleSetRollingUpgrades group. +// Don't use this type directly, use NewVirtualMachineScaleSetRollingUpgradesClient() instead. +type VirtualMachineScaleSetRollingUpgradesClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewVirtualMachineScaleSetRollingUpgradesClient creates a new instance of VirtualMachineScaleSetRollingUpgradesClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewVirtualMachineScaleSetRollingUpgradesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*VirtualMachineScaleSetRollingUpgradesClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &VirtualMachineScaleSetRollingUpgradesClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// BeginCancel - Cancels the current virtual machine scale set rolling upgrade. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set. +// options - VirtualMachineScaleSetRollingUpgradesClientBeginCancelOptions contains the optional parameters for the VirtualMachineScaleSetRollingUpgradesClient.BeginCancel +// method. +func (client *VirtualMachineScaleSetRollingUpgradesClient) BeginCancel(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetRollingUpgradesClientBeginCancelOptions) (*runtime.Poller[VirtualMachineScaleSetRollingUpgradesClientCancelResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.cancel(ctx, resourceGroupName, vmScaleSetName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachineScaleSetRollingUpgradesClientCancelResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachineScaleSetRollingUpgradesClientCancelResponse](options.ResumeToken, client.pl, nil) + } +} + +// Cancel - Cancels the current virtual machine scale set rolling upgrade. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachineScaleSetRollingUpgradesClient) cancel(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetRollingUpgradesClientBeginCancelOptions) (*http.Response, error) { + req, err := client.cancelCreateRequest(ctx, resourceGroupName, vmScaleSetName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// cancelCreateRequest creates the Cancel request. +func (client *VirtualMachineScaleSetRollingUpgradesClient) cancelCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetRollingUpgradesClientBeginCancelOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/cancel" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// GetLatest - Gets the status of the latest virtual machine scale set rolling upgrade. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set. +// options - VirtualMachineScaleSetRollingUpgradesClientGetLatestOptions contains the optional parameters for the VirtualMachineScaleSetRollingUpgradesClient.GetLatest +// method. +func (client *VirtualMachineScaleSetRollingUpgradesClient) GetLatest(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetRollingUpgradesClientGetLatestOptions) (VirtualMachineScaleSetRollingUpgradesClientGetLatestResponse, error) { + req, err := client.getLatestCreateRequest(ctx, resourceGroupName, vmScaleSetName, options) + if err != nil { + return VirtualMachineScaleSetRollingUpgradesClientGetLatestResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachineScaleSetRollingUpgradesClientGetLatestResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachineScaleSetRollingUpgradesClientGetLatestResponse{}, runtime.NewResponseError(resp) + } + return client.getLatestHandleResponse(resp) +} + +// getLatestCreateRequest creates the GetLatest request. +func (client *VirtualMachineScaleSetRollingUpgradesClient) getLatestCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetRollingUpgradesClientGetLatestOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/latest" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getLatestHandleResponse handles the GetLatest response. +func (client *VirtualMachineScaleSetRollingUpgradesClient) getLatestHandleResponse(resp *http.Response) (VirtualMachineScaleSetRollingUpgradesClientGetLatestResponse, error) { + result := VirtualMachineScaleSetRollingUpgradesClientGetLatestResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.RollingUpgradeStatusInfo); err != nil { + return VirtualMachineScaleSetRollingUpgradesClientGetLatestResponse{}, err + } + return result, nil +} + +// BeginStartExtensionUpgrade - Starts a rolling upgrade to move all extensions for all virtual machine scale set instances +// to the latest available extension version. Instances which are already running the latest extension versions +// are not affected. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set. +// options - VirtualMachineScaleSetRollingUpgradesClientBeginStartExtensionUpgradeOptions contains the optional parameters +// for the VirtualMachineScaleSetRollingUpgradesClient.BeginStartExtensionUpgrade method. +func (client *VirtualMachineScaleSetRollingUpgradesClient) BeginStartExtensionUpgrade(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetRollingUpgradesClientBeginStartExtensionUpgradeOptions) (*runtime.Poller[VirtualMachineScaleSetRollingUpgradesClientStartExtensionUpgradeResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.startExtensionUpgrade(ctx, resourceGroupName, vmScaleSetName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachineScaleSetRollingUpgradesClientStartExtensionUpgradeResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachineScaleSetRollingUpgradesClientStartExtensionUpgradeResponse](options.ResumeToken, client.pl, nil) + } +} + +// StartExtensionUpgrade - Starts a rolling upgrade to move all extensions for all virtual machine scale set instances to +// the latest available extension version. Instances which are already running the latest extension versions +// are not affected. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachineScaleSetRollingUpgradesClient) startExtensionUpgrade(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetRollingUpgradesClientBeginStartExtensionUpgradeOptions) (*http.Response, error) { + req, err := client.startExtensionUpgradeCreateRequest(ctx, resourceGroupName, vmScaleSetName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// startExtensionUpgradeCreateRequest creates the StartExtensionUpgrade request. +func (client *VirtualMachineScaleSetRollingUpgradesClient) startExtensionUpgradeCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetRollingUpgradesClientBeginStartExtensionUpgradeOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensionRollingUpgrade" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// BeginStartOSUpgrade - Starts a rolling upgrade to move all virtual machine scale set instances to the latest available +// Platform Image OS version. Instances which are already running the latest available OS version are not +// affected. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set. +// options - VirtualMachineScaleSetRollingUpgradesClientBeginStartOSUpgradeOptions contains the optional parameters for the +// VirtualMachineScaleSetRollingUpgradesClient.BeginStartOSUpgrade method. +func (client *VirtualMachineScaleSetRollingUpgradesClient) BeginStartOSUpgrade(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetRollingUpgradesClientBeginStartOSUpgradeOptions) (*runtime.Poller[VirtualMachineScaleSetRollingUpgradesClientStartOSUpgradeResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.startOSUpgrade(ctx, resourceGroupName, vmScaleSetName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachineScaleSetRollingUpgradesClientStartOSUpgradeResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachineScaleSetRollingUpgradesClientStartOSUpgradeResponse](options.ResumeToken, client.pl, nil) + } +} + +// StartOSUpgrade - Starts a rolling upgrade to move all virtual machine scale set instances to the latest available Platform +// Image OS version. Instances which are already running the latest available OS version are not +// affected. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachineScaleSetRollingUpgradesClient) startOSUpgrade(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetRollingUpgradesClientBeginStartOSUpgradeOptions) (*http.Response, error) { + req, err := client.startOSUpgradeCreateRequest(ctx, resourceGroupName, vmScaleSetName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// startOSUpgradeCreateRequest creates the StartOSUpgrade request. +func (client *VirtualMachineScaleSetRollingUpgradesClient) startOSUpgradeCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetRollingUpgradesClientBeginStartOSUpgradeOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/osRollingUpgrade" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachinescalesets_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachinescalesets_client.go new file mode 100644 index 000000000..5a5d8a7e0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachinescalesets_client.go @@ -0,0 +1,1562 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strconv" + "strings" +) + +// VirtualMachineScaleSetsClient contains the methods for the VirtualMachineScaleSets group. +// Don't use this type directly, use NewVirtualMachineScaleSetsClient() instead. +type VirtualMachineScaleSetsClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewVirtualMachineScaleSetsClient creates a new instance of VirtualMachineScaleSetsClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewVirtualMachineScaleSetsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*VirtualMachineScaleSetsClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &VirtualMachineScaleSetsClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// ConvertToSinglePlacementGroup - Converts SinglePlacementGroup property to false for a existing virtual machine scale set. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the virtual machine scale set to create or update. +// parameters - The input object for ConvertToSinglePlacementGroup API. +// options - VirtualMachineScaleSetsClientConvertToSinglePlacementGroupOptions contains the optional parameters for the VirtualMachineScaleSetsClient.ConvertToSinglePlacementGroup +// method. +func (client *VirtualMachineScaleSetsClient) ConvertToSinglePlacementGroup(ctx context.Context, resourceGroupName string, vmScaleSetName string, parameters VMScaleSetConvertToSinglePlacementGroupInput, options *VirtualMachineScaleSetsClientConvertToSinglePlacementGroupOptions) (VirtualMachineScaleSetsClientConvertToSinglePlacementGroupResponse, error) { + req, err := client.convertToSinglePlacementGroupCreateRequest(ctx, resourceGroupName, vmScaleSetName, parameters, options) + if err != nil { + return VirtualMachineScaleSetsClientConvertToSinglePlacementGroupResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachineScaleSetsClientConvertToSinglePlacementGroupResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachineScaleSetsClientConvertToSinglePlacementGroupResponse{}, runtime.NewResponseError(resp) + } + return VirtualMachineScaleSetsClientConvertToSinglePlacementGroupResponse{}, nil +} + +// convertToSinglePlacementGroupCreateRequest creates the ConvertToSinglePlacementGroup request. +func (client *VirtualMachineScaleSetsClient) convertToSinglePlacementGroupCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, parameters VMScaleSetConvertToSinglePlacementGroupInput, options *VirtualMachineScaleSetsClientConvertToSinglePlacementGroupOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/convertToSinglePlacementGroup" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// BeginCreateOrUpdate - Create or update a VM scale set. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set to create or update. +// parameters - The scale set object. +// options - VirtualMachineScaleSetsClientBeginCreateOrUpdateOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginCreateOrUpdate +// method. +func (client *VirtualMachineScaleSetsClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, vmScaleSetName string, parameters VirtualMachineScaleSet, options *VirtualMachineScaleSetsClientBeginCreateOrUpdateOptions) (*runtime.Poller[VirtualMachineScaleSetsClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, resourceGroupName, vmScaleSetName, parameters, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachineScaleSetsClientCreateOrUpdateResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachineScaleSetsClientCreateOrUpdateResponse](options.ResumeToken, client.pl, nil) + } +} + +// CreateOrUpdate - Create or update a VM scale set. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachineScaleSetsClient) createOrUpdate(ctx context.Context, resourceGroupName string, vmScaleSetName string, parameters VirtualMachineScaleSet, options *VirtualMachineScaleSetsClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, vmScaleSetName, parameters, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *VirtualMachineScaleSetsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, parameters VirtualMachineScaleSet, options *VirtualMachineScaleSetsClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// BeginDeallocate - Deallocates specific virtual machines in a VM scale set. Shuts down the virtual machines and releases +// the compute resources. You are not billed for the compute resources that this virtual machine +// scale set deallocates. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set. +// options - VirtualMachineScaleSetsClientBeginDeallocateOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginDeallocate +// method. +func (client *VirtualMachineScaleSetsClient) BeginDeallocate(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginDeallocateOptions) (*runtime.Poller[VirtualMachineScaleSetsClientDeallocateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deallocate(ctx, resourceGroupName, vmScaleSetName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachineScaleSetsClientDeallocateResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachineScaleSetsClientDeallocateResponse](options.ResumeToken, client.pl, nil) + } +} + +// Deallocate - Deallocates specific virtual machines in a VM scale set. Shuts down the virtual machines and releases the +// compute resources. You are not billed for the compute resources that this virtual machine +// scale set deallocates. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachineScaleSetsClient) deallocate(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginDeallocateOptions) (*http.Response, error) { + req, err := client.deallocateCreateRequest(ctx, resourceGroupName, vmScaleSetName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deallocateCreateRequest creates the Deallocate request. +func (client *VirtualMachineScaleSetsClient) deallocateCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginDeallocateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/deallocate" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if options != nil && options.VMInstanceIDs != nil { + return req, runtime.MarshalAsJSON(req, *options.VMInstanceIDs) + } + return req, nil +} + +// BeginDelete - Deletes a VM scale set. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set. +// options - VirtualMachineScaleSetsClientBeginDeleteOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginDelete +// method. +func (client *VirtualMachineScaleSetsClient) BeginDelete(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginDeleteOptions) (*runtime.Poller[VirtualMachineScaleSetsClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, vmScaleSetName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachineScaleSetsClientDeleteResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachineScaleSetsClientDeleteResponse](options.ResumeToken, client.pl, nil) + } +} + +// Delete - Deletes a VM scale set. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachineScaleSetsClient) deleteOperation(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, vmScaleSetName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *VirtualMachineScaleSetsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.ForceDeletion != nil { + reqQP.Set("forceDeletion", strconv.FormatBool(*options.ForceDeletion)) + } + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// BeginDeleteInstances - Deletes virtual machines in a VM scale set. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set. +// vmInstanceIDs - A list of virtual machine instance IDs from the VM scale set. +// options - VirtualMachineScaleSetsClientBeginDeleteInstancesOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginDeleteInstances +// method. +func (client *VirtualMachineScaleSetsClient) BeginDeleteInstances(ctx context.Context, resourceGroupName string, vmScaleSetName string, vmInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs, options *VirtualMachineScaleSetsClientBeginDeleteInstancesOptions) (*runtime.Poller[VirtualMachineScaleSetsClientDeleteInstancesResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteInstances(ctx, resourceGroupName, vmScaleSetName, vmInstanceIDs, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachineScaleSetsClientDeleteInstancesResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachineScaleSetsClientDeleteInstancesResponse](options.ResumeToken, client.pl, nil) + } +} + +// DeleteInstances - Deletes virtual machines in a VM scale set. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachineScaleSetsClient) deleteInstances(ctx context.Context, resourceGroupName string, vmScaleSetName string, vmInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs, options *VirtualMachineScaleSetsClientBeginDeleteInstancesOptions) (*http.Response, error) { + req, err := client.deleteInstancesCreateRequest(ctx, resourceGroupName, vmScaleSetName, vmInstanceIDs, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteInstancesCreateRequest creates the DeleteInstances request. +func (client *VirtualMachineScaleSetsClient) deleteInstancesCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, vmInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs, options *VirtualMachineScaleSetsClientBeginDeleteInstancesOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/delete" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.ForceDeletion != nil { + reqQP.Set("forceDeletion", strconv.FormatBool(*options.ForceDeletion)) + } + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, vmInstanceIDs) +} + +// ForceRecoveryServiceFabricPlatformUpdateDomainWalk - Manual platform update domain walk to update virtual machines in a +// service fabric virtual machine scale set. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set. +// platformUpdateDomain - The platform update domain for which a manual recovery walk is requested +// options - VirtualMachineScaleSetsClientForceRecoveryServiceFabricPlatformUpdateDomainWalkOptions contains the optional +// parameters for the VirtualMachineScaleSetsClient.ForceRecoveryServiceFabricPlatformUpdateDomainWalk method. +func (client *VirtualMachineScaleSetsClient) ForceRecoveryServiceFabricPlatformUpdateDomainWalk(ctx context.Context, resourceGroupName string, vmScaleSetName string, platformUpdateDomain int32, options *VirtualMachineScaleSetsClientForceRecoveryServiceFabricPlatformUpdateDomainWalkOptions) (VirtualMachineScaleSetsClientForceRecoveryServiceFabricPlatformUpdateDomainWalkResponse, error) { + req, err := client.forceRecoveryServiceFabricPlatformUpdateDomainWalkCreateRequest(ctx, resourceGroupName, vmScaleSetName, platformUpdateDomain, options) + if err != nil { + return VirtualMachineScaleSetsClientForceRecoveryServiceFabricPlatformUpdateDomainWalkResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachineScaleSetsClientForceRecoveryServiceFabricPlatformUpdateDomainWalkResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachineScaleSetsClientForceRecoveryServiceFabricPlatformUpdateDomainWalkResponse{}, runtime.NewResponseError(resp) + } + return client.forceRecoveryServiceFabricPlatformUpdateDomainWalkHandleResponse(resp) +} + +// forceRecoveryServiceFabricPlatformUpdateDomainWalkCreateRequest creates the ForceRecoveryServiceFabricPlatformUpdateDomainWalk request. +func (client *VirtualMachineScaleSetsClient) forceRecoveryServiceFabricPlatformUpdateDomainWalkCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, platformUpdateDomain int32, options *VirtualMachineScaleSetsClientForceRecoveryServiceFabricPlatformUpdateDomainWalkOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/forceRecoveryServiceFabricPlatformUpdateDomainWalk" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + reqQP.Set("platformUpdateDomain", strconv.FormatInt(int64(platformUpdateDomain), 10)) + if options != nil && options.Zone != nil { + reqQP.Set("zone", *options.Zone) + } + if options != nil && options.PlacementGroupID != nil { + reqQP.Set("placementGroupId", *options.PlacementGroupID) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// forceRecoveryServiceFabricPlatformUpdateDomainWalkHandleResponse handles the ForceRecoveryServiceFabricPlatformUpdateDomainWalk response. +func (client *VirtualMachineScaleSetsClient) forceRecoveryServiceFabricPlatformUpdateDomainWalkHandleResponse(resp *http.Response) (VirtualMachineScaleSetsClientForceRecoveryServiceFabricPlatformUpdateDomainWalkResponse, error) { + result := VirtualMachineScaleSetsClientForceRecoveryServiceFabricPlatformUpdateDomainWalkResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.RecoveryWalkResponse); err != nil { + return VirtualMachineScaleSetsClientForceRecoveryServiceFabricPlatformUpdateDomainWalkResponse{}, err + } + return result, nil +} + +// Get - Display information about a virtual machine scale set. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set. +// options - VirtualMachineScaleSetsClientGetOptions contains the optional parameters for the VirtualMachineScaleSetsClient.Get +// method. +func (client *VirtualMachineScaleSetsClient) Get(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientGetOptions) (VirtualMachineScaleSetsClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, vmScaleSetName, options) + if err != nil { + return VirtualMachineScaleSetsClientGetResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachineScaleSetsClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachineScaleSetsClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *VirtualMachineScaleSetsClient) getCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + if options != nil && options.Expand != nil { + reqQP.Set("$expand", string(*options.Expand)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *VirtualMachineScaleSetsClient) getHandleResponse(resp *http.Response) (VirtualMachineScaleSetsClientGetResponse, error) { + result := VirtualMachineScaleSetsClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineScaleSet); err != nil { + return VirtualMachineScaleSetsClientGetResponse{}, err + } + return result, nil +} + +// GetInstanceView - Gets the status of a VM scale set instance. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set. +// options - VirtualMachineScaleSetsClientGetInstanceViewOptions contains the optional parameters for the VirtualMachineScaleSetsClient.GetInstanceView +// method. +func (client *VirtualMachineScaleSetsClient) GetInstanceView(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientGetInstanceViewOptions) (VirtualMachineScaleSetsClientGetInstanceViewResponse, error) { + req, err := client.getInstanceViewCreateRequest(ctx, resourceGroupName, vmScaleSetName, options) + if err != nil { + return VirtualMachineScaleSetsClientGetInstanceViewResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachineScaleSetsClientGetInstanceViewResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachineScaleSetsClientGetInstanceViewResponse{}, runtime.NewResponseError(resp) + } + return client.getInstanceViewHandleResponse(resp) +} + +// getInstanceViewCreateRequest creates the GetInstanceView request. +func (client *VirtualMachineScaleSetsClient) getInstanceViewCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientGetInstanceViewOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/instanceView" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getInstanceViewHandleResponse handles the GetInstanceView response. +func (client *VirtualMachineScaleSetsClient) getInstanceViewHandleResponse(resp *http.Response) (VirtualMachineScaleSetsClientGetInstanceViewResponse, error) { + result := VirtualMachineScaleSetsClientGetInstanceViewResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineScaleSetInstanceView); err != nil { + return VirtualMachineScaleSetsClientGetInstanceViewResponse{}, err + } + return result, nil +} + +// NewGetOSUpgradeHistoryPager - Gets list of OS upgrades on a VM scale set instance. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set. +// options - VirtualMachineScaleSetsClientGetOSUpgradeHistoryOptions contains the optional parameters for the VirtualMachineScaleSetsClient.GetOSUpgradeHistory +// method. +func (client *VirtualMachineScaleSetsClient) NewGetOSUpgradeHistoryPager(resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientGetOSUpgradeHistoryOptions) *runtime.Pager[VirtualMachineScaleSetsClientGetOSUpgradeHistoryResponse] { + return runtime.NewPager(runtime.PagingHandler[VirtualMachineScaleSetsClientGetOSUpgradeHistoryResponse]{ + More: func(page VirtualMachineScaleSetsClientGetOSUpgradeHistoryResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *VirtualMachineScaleSetsClientGetOSUpgradeHistoryResponse) (VirtualMachineScaleSetsClientGetOSUpgradeHistoryResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.getOSUpgradeHistoryCreateRequest(ctx, resourceGroupName, vmScaleSetName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return VirtualMachineScaleSetsClientGetOSUpgradeHistoryResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachineScaleSetsClientGetOSUpgradeHistoryResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachineScaleSetsClientGetOSUpgradeHistoryResponse{}, runtime.NewResponseError(resp) + } + return client.getOSUpgradeHistoryHandleResponse(resp) + }, + }) +} + +// getOSUpgradeHistoryCreateRequest creates the GetOSUpgradeHistory request. +func (client *VirtualMachineScaleSetsClient) getOSUpgradeHistoryCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientGetOSUpgradeHistoryOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/osUpgradeHistory" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getOSUpgradeHistoryHandleResponse handles the GetOSUpgradeHistory response. +func (client *VirtualMachineScaleSetsClient) getOSUpgradeHistoryHandleResponse(resp *http.Response) (VirtualMachineScaleSetsClientGetOSUpgradeHistoryResponse, error) { + result := VirtualMachineScaleSetsClientGetOSUpgradeHistoryResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineScaleSetListOSUpgradeHistory); err != nil { + return VirtualMachineScaleSetsClientGetOSUpgradeHistoryResponse{}, err + } + return result, nil +} + +// NewListPager - Gets a list of all VM scale sets under a resource group. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// options - VirtualMachineScaleSetsClientListOptions contains the optional parameters for the VirtualMachineScaleSetsClient.List +// method. +func (client *VirtualMachineScaleSetsClient) NewListPager(resourceGroupName string, options *VirtualMachineScaleSetsClientListOptions) *runtime.Pager[VirtualMachineScaleSetsClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[VirtualMachineScaleSetsClientListResponse]{ + More: func(page VirtualMachineScaleSetsClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *VirtualMachineScaleSetsClientListResponse) (VirtualMachineScaleSetsClientListResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listCreateRequest(ctx, resourceGroupName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return VirtualMachineScaleSetsClientListResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachineScaleSetsClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachineScaleSetsClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *VirtualMachineScaleSetsClient) listCreateRequest(ctx context.Context, resourceGroupName string, options *VirtualMachineScaleSetsClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *VirtualMachineScaleSetsClient) listHandleResponse(resp *http.Response) (VirtualMachineScaleSetsClientListResponse, error) { + result := VirtualMachineScaleSetsClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineScaleSetListResult); err != nil { + return VirtualMachineScaleSetsClientListResponse{}, err + } + return result, nil +} + +// NewListAllPager - Gets a list of all VM Scale Sets in the subscription, regardless of the associated resource group. Use +// nextLink property in the response to get the next page of VM Scale Sets. Do this till nextLink is +// null to fetch all the VM Scale Sets. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// options - VirtualMachineScaleSetsClientListAllOptions contains the optional parameters for the VirtualMachineScaleSetsClient.ListAll +// method. +func (client *VirtualMachineScaleSetsClient) NewListAllPager(options *VirtualMachineScaleSetsClientListAllOptions) *runtime.Pager[VirtualMachineScaleSetsClientListAllResponse] { + return runtime.NewPager(runtime.PagingHandler[VirtualMachineScaleSetsClientListAllResponse]{ + More: func(page VirtualMachineScaleSetsClientListAllResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *VirtualMachineScaleSetsClientListAllResponse) (VirtualMachineScaleSetsClientListAllResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listAllCreateRequest(ctx, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return VirtualMachineScaleSetsClientListAllResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachineScaleSetsClientListAllResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachineScaleSetsClientListAllResponse{}, runtime.NewResponseError(resp) + } + return client.listAllHandleResponse(resp) + }, + }) +} + +// listAllCreateRequest creates the ListAll request. +func (client *VirtualMachineScaleSetsClient) listAllCreateRequest(ctx context.Context, options *VirtualMachineScaleSetsClientListAllOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/virtualMachineScaleSets" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listAllHandleResponse handles the ListAll response. +func (client *VirtualMachineScaleSetsClient) listAllHandleResponse(resp *http.Response) (VirtualMachineScaleSetsClientListAllResponse, error) { + result := VirtualMachineScaleSetsClientListAllResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineScaleSetListWithLinkResult); err != nil { + return VirtualMachineScaleSetsClientListAllResponse{}, err + } + return result, nil +} + +// NewListByLocationPager - Gets all the VM scale sets under the specified subscription for the specified location. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// location - The location for which VM scale sets under the subscription are queried. +// options - VirtualMachineScaleSetsClientListByLocationOptions contains the optional parameters for the VirtualMachineScaleSetsClient.ListByLocation +// method. +func (client *VirtualMachineScaleSetsClient) NewListByLocationPager(location string, options *VirtualMachineScaleSetsClientListByLocationOptions) *runtime.Pager[VirtualMachineScaleSetsClientListByLocationResponse] { + return runtime.NewPager(runtime.PagingHandler[VirtualMachineScaleSetsClientListByLocationResponse]{ + More: func(page VirtualMachineScaleSetsClientListByLocationResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *VirtualMachineScaleSetsClientListByLocationResponse) (VirtualMachineScaleSetsClientListByLocationResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listByLocationCreateRequest(ctx, location, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return VirtualMachineScaleSetsClientListByLocationResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachineScaleSetsClientListByLocationResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachineScaleSetsClientListByLocationResponse{}, runtime.NewResponseError(resp) + } + return client.listByLocationHandleResponse(resp) + }, + }) +} + +// listByLocationCreateRequest creates the ListByLocation request. +func (client *VirtualMachineScaleSetsClient) listByLocationCreateRequest(ctx context.Context, location string, options *VirtualMachineScaleSetsClientListByLocationOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/virtualMachineScaleSets" + if location == "" { + return nil, errors.New("parameter location cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{location}", url.PathEscape(location)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listByLocationHandleResponse handles the ListByLocation response. +func (client *VirtualMachineScaleSetsClient) listByLocationHandleResponse(resp *http.Response) (VirtualMachineScaleSetsClientListByLocationResponse, error) { + result := VirtualMachineScaleSetsClientListByLocationResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineScaleSetListResult); err != nil { + return VirtualMachineScaleSetsClientListByLocationResponse{}, err + } + return result, nil +} + +// NewListSKUsPager - Gets a list of SKUs available for your VM scale set, including the minimum and maximum VM instances +// allowed for each SKU. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set. +// options - VirtualMachineScaleSetsClientListSKUsOptions contains the optional parameters for the VirtualMachineScaleSetsClient.ListSKUs +// method. +func (client *VirtualMachineScaleSetsClient) NewListSKUsPager(resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientListSKUsOptions) *runtime.Pager[VirtualMachineScaleSetsClientListSKUsResponse] { + return runtime.NewPager(runtime.PagingHandler[VirtualMachineScaleSetsClientListSKUsResponse]{ + More: func(page VirtualMachineScaleSetsClientListSKUsResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *VirtualMachineScaleSetsClientListSKUsResponse) (VirtualMachineScaleSetsClientListSKUsResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listSKUsCreateRequest(ctx, resourceGroupName, vmScaleSetName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return VirtualMachineScaleSetsClientListSKUsResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachineScaleSetsClientListSKUsResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachineScaleSetsClientListSKUsResponse{}, runtime.NewResponseError(resp) + } + return client.listSKUsHandleResponse(resp) + }, + }) +} + +// listSKUsCreateRequest creates the ListSKUs request. +func (client *VirtualMachineScaleSetsClient) listSKUsCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientListSKUsOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/skus" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listSKUsHandleResponse handles the ListSKUs response. +func (client *VirtualMachineScaleSetsClient) listSKUsHandleResponse(resp *http.Response) (VirtualMachineScaleSetsClientListSKUsResponse, error) { + result := VirtualMachineScaleSetsClientListSKUsResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineScaleSetListSKUsResult); err != nil { + return VirtualMachineScaleSetsClientListSKUsResponse{}, err + } + return result, nil +} + +// BeginPerformMaintenance - Perform maintenance on one or more virtual machines in a VM scale set. Operation on instances +// which are not eligible for perform maintenance will be failed. Please refer to best practices for more +// details: https://docs.microsoft.com/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-maintenance-notifications +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set. +// options - VirtualMachineScaleSetsClientBeginPerformMaintenanceOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginPerformMaintenance +// method. +func (client *VirtualMachineScaleSetsClient) BeginPerformMaintenance(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginPerformMaintenanceOptions) (*runtime.Poller[VirtualMachineScaleSetsClientPerformMaintenanceResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.performMaintenance(ctx, resourceGroupName, vmScaleSetName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachineScaleSetsClientPerformMaintenanceResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachineScaleSetsClientPerformMaintenanceResponse](options.ResumeToken, client.pl, nil) + } +} + +// PerformMaintenance - Perform maintenance on one or more virtual machines in a VM scale set. Operation on instances which +// are not eligible for perform maintenance will be failed. Please refer to best practices for more +// details: https://docs.microsoft.com/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-maintenance-notifications +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachineScaleSetsClient) performMaintenance(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginPerformMaintenanceOptions) (*http.Response, error) { + req, err := client.performMaintenanceCreateRequest(ctx, resourceGroupName, vmScaleSetName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// performMaintenanceCreateRequest creates the PerformMaintenance request. +func (client *VirtualMachineScaleSetsClient) performMaintenanceCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginPerformMaintenanceOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/performMaintenance" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if options != nil && options.VMInstanceIDs != nil { + return req, runtime.MarshalAsJSON(req, *options.VMInstanceIDs) + } + return req, nil +} + +// BeginPowerOff - Power off (stop) one or more virtual machines in a VM scale set. Note that resources are still attached +// and you are getting charged for the resources. Instead, use deallocate to release resources and +// avoid charges. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set. +// options - VirtualMachineScaleSetsClientBeginPowerOffOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginPowerOff +// method. +func (client *VirtualMachineScaleSetsClient) BeginPowerOff(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginPowerOffOptions) (*runtime.Poller[VirtualMachineScaleSetsClientPowerOffResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.powerOff(ctx, resourceGroupName, vmScaleSetName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachineScaleSetsClientPowerOffResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachineScaleSetsClientPowerOffResponse](options.ResumeToken, client.pl, nil) + } +} + +// PowerOff - Power off (stop) one or more virtual machines in a VM scale set. Note that resources are still attached and +// you are getting charged for the resources. Instead, use deallocate to release resources and +// avoid charges. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachineScaleSetsClient) powerOff(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginPowerOffOptions) (*http.Response, error) { + req, err := client.powerOffCreateRequest(ctx, resourceGroupName, vmScaleSetName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// powerOffCreateRequest creates the PowerOff request. +func (client *VirtualMachineScaleSetsClient) powerOffCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginPowerOffOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/poweroff" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.SkipShutdown != nil { + reqQP.Set("skipShutdown", strconv.FormatBool(*options.SkipShutdown)) + } + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if options != nil && options.VMInstanceIDs != nil { + return req, runtime.MarshalAsJSON(req, *options.VMInstanceIDs) + } + return req, nil +} + +// BeginRedeploy - Shuts down all the virtual machines in the virtual machine scale set, moves them to a new node, and powers +// them back on. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set. +// options - VirtualMachineScaleSetsClientBeginRedeployOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginRedeploy +// method. +func (client *VirtualMachineScaleSetsClient) BeginRedeploy(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginRedeployOptions) (*runtime.Poller[VirtualMachineScaleSetsClientRedeployResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.redeploy(ctx, resourceGroupName, vmScaleSetName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachineScaleSetsClientRedeployResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachineScaleSetsClientRedeployResponse](options.ResumeToken, client.pl, nil) + } +} + +// Redeploy - Shuts down all the virtual machines in the virtual machine scale set, moves them to a new node, and powers them +// back on. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachineScaleSetsClient) redeploy(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginRedeployOptions) (*http.Response, error) { + req, err := client.redeployCreateRequest(ctx, resourceGroupName, vmScaleSetName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// redeployCreateRequest creates the Redeploy request. +func (client *VirtualMachineScaleSetsClient) redeployCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginRedeployOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/redeploy" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if options != nil && options.VMInstanceIDs != nil { + return req, runtime.MarshalAsJSON(req, *options.VMInstanceIDs) + } + return req, nil +} + +// BeginReimage - Reimages (upgrade the operating system) one or more virtual machines in a VM scale set which don't have +// a ephemeral OS disk, for virtual machines who have a ephemeral OS disk the virtual machine is +// reset to initial state. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set. +// options - VirtualMachineScaleSetsClientBeginReimageOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginReimage +// method. +func (client *VirtualMachineScaleSetsClient) BeginReimage(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginReimageOptions) (*runtime.Poller[VirtualMachineScaleSetsClientReimageResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.reimage(ctx, resourceGroupName, vmScaleSetName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachineScaleSetsClientReimageResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachineScaleSetsClientReimageResponse](options.ResumeToken, client.pl, nil) + } +} + +// Reimage - Reimages (upgrade the operating system) one or more virtual machines in a VM scale set which don't have a ephemeral +// OS disk, for virtual machines who have a ephemeral OS disk the virtual machine is +// reset to initial state. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachineScaleSetsClient) reimage(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginReimageOptions) (*http.Response, error) { + req, err := client.reimageCreateRequest(ctx, resourceGroupName, vmScaleSetName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// reimageCreateRequest creates the Reimage request. +func (client *VirtualMachineScaleSetsClient) reimageCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginReimageOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/reimage" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if options != nil && options.VMScaleSetReimageInput != nil { + return req, runtime.MarshalAsJSON(req, *options.VMScaleSetReimageInput) + } + return req, nil +} + +// BeginReimageAll - Reimages all the disks ( including data disks ) in the virtual machines in a VM scale set. This operation +// is only supported for managed disks. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set. +// options - VirtualMachineScaleSetsClientBeginReimageAllOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginReimageAll +// method. +func (client *VirtualMachineScaleSetsClient) BeginReimageAll(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginReimageAllOptions) (*runtime.Poller[VirtualMachineScaleSetsClientReimageAllResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.reimageAll(ctx, resourceGroupName, vmScaleSetName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachineScaleSetsClientReimageAllResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachineScaleSetsClientReimageAllResponse](options.ResumeToken, client.pl, nil) + } +} + +// ReimageAll - Reimages all the disks ( including data disks ) in the virtual machines in a VM scale set. This operation +// is only supported for managed disks. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachineScaleSetsClient) reimageAll(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginReimageAllOptions) (*http.Response, error) { + req, err := client.reimageAllCreateRequest(ctx, resourceGroupName, vmScaleSetName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// reimageAllCreateRequest creates the ReimageAll request. +func (client *VirtualMachineScaleSetsClient) reimageAllCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginReimageAllOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/reimageall" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if options != nil && options.VMInstanceIDs != nil { + return req, runtime.MarshalAsJSON(req, *options.VMInstanceIDs) + } + return req, nil +} + +// BeginRestart - Restarts one or more virtual machines in a VM scale set. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set. +// options - VirtualMachineScaleSetsClientBeginRestartOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginRestart +// method. +func (client *VirtualMachineScaleSetsClient) BeginRestart(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginRestartOptions) (*runtime.Poller[VirtualMachineScaleSetsClientRestartResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.restart(ctx, resourceGroupName, vmScaleSetName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachineScaleSetsClientRestartResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachineScaleSetsClientRestartResponse](options.ResumeToken, client.pl, nil) + } +} + +// Restart - Restarts one or more virtual machines in a VM scale set. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachineScaleSetsClient) restart(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginRestartOptions) (*http.Response, error) { + req, err := client.restartCreateRequest(ctx, resourceGroupName, vmScaleSetName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// restartCreateRequest creates the Restart request. +func (client *VirtualMachineScaleSetsClient) restartCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginRestartOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/restart" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if options != nil && options.VMInstanceIDs != nil { + return req, runtime.MarshalAsJSON(req, *options.VMInstanceIDs) + } + return req, nil +} + +// BeginSetOrchestrationServiceState - Changes ServiceState property for a given service +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the virtual machine scale set to create or update. +// parameters - The input object for SetOrchestrationServiceState API. +// options - VirtualMachineScaleSetsClientBeginSetOrchestrationServiceStateOptions contains the optional parameters for the +// VirtualMachineScaleSetsClient.BeginSetOrchestrationServiceState method. +func (client *VirtualMachineScaleSetsClient) BeginSetOrchestrationServiceState(ctx context.Context, resourceGroupName string, vmScaleSetName string, parameters OrchestrationServiceStateInput, options *VirtualMachineScaleSetsClientBeginSetOrchestrationServiceStateOptions) (*runtime.Poller[VirtualMachineScaleSetsClientSetOrchestrationServiceStateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.setOrchestrationServiceState(ctx, resourceGroupName, vmScaleSetName, parameters, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachineScaleSetsClientSetOrchestrationServiceStateResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachineScaleSetsClientSetOrchestrationServiceStateResponse](options.ResumeToken, client.pl, nil) + } +} + +// SetOrchestrationServiceState - Changes ServiceState property for a given service +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachineScaleSetsClient) setOrchestrationServiceState(ctx context.Context, resourceGroupName string, vmScaleSetName string, parameters OrchestrationServiceStateInput, options *VirtualMachineScaleSetsClientBeginSetOrchestrationServiceStateOptions) (*http.Response, error) { + req, err := client.setOrchestrationServiceStateCreateRequest(ctx, resourceGroupName, vmScaleSetName, parameters, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// setOrchestrationServiceStateCreateRequest creates the SetOrchestrationServiceState request. +func (client *VirtualMachineScaleSetsClient) setOrchestrationServiceStateCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, parameters OrchestrationServiceStateInput, options *VirtualMachineScaleSetsClientBeginSetOrchestrationServiceStateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/setOrchestrationServiceState" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// BeginStart - Starts one or more virtual machines in a VM scale set. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set. +// options - VirtualMachineScaleSetsClientBeginStartOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginStart +// method. +func (client *VirtualMachineScaleSetsClient) BeginStart(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginStartOptions) (*runtime.Poller[VirtualMachineScaleSetsClientStartResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.start(ctx, resourceGroupName, vmScaleSetName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachineScaleSetsClientStartResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachineScaleSetsClientStartResponse](options.ResumeToken, client.pl, nil) + } +} + +// Start - Starts one or more virtual machines in a VM scale set. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachineScaleSetsClient) start(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginStartOptions) (*http.Response, error) { + req, err := client.startCreateRequest(ctx, resourceGroupName, vmScaleSetName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// startCreateRequest creates the Start request. +func (client *VirtualMachineScaleSetsClient) startCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, options *VirtualMachineScaleSetsClientBeginStartOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/start" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if options != nil && options.VMInstanceIDs != nil { + return req, runtime.MarshalAsJSON(req, *options.VMInstanceIDs) + } + return req, nil +} + +// BeginUpdate - Update a VM scale set. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set to create or update. +// parameters - The scale set object. +// options - VirtualMachineScaleSetsClientBeginUpdateOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginUpdate +// method. +func (client *VirtualMachineScaleSetsClient) BeginUpdate(ctx context.Context, resourceGroupName string, vmScaleSetName string, parameters VirtualMachineScaleSetUpdate, options *VirtualMachineScaleSetsClientBeginUpdateOptions) (*runtime.Poller[VirtualMachineScaleSetsClientUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.update(ctx, resourceGroupName, vmScaleSetName, parameters, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachineScaleSetsClientUpdateResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachineScaleSetsClientUpdateResponse](options.ResumeToken, client.pl, nil) + } +} + +// Update - Update a VM scale set. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachineScaleSetsClient) update(ctx context.Context, resourceGroupName string, vmScaleSetName string, parameters VirtualMachineScaleSetUpdate, options *VirtualMachineScaleSetsClientBeginUpdateOptions) (*http.Response, error) { + req, err := client.updateCreateRequest(ctx, resourceGroupName, vmScaleSetName, parameters, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// updateCreateRequest creates the Update request. +func (client *VirtualMachineScaleSetsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, parameters VirtualMachineScaleSetUpdate, options *VirtualMachineScaleSetsClientBeginUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// BeginUpdateInstances - Upgrades one or more virtual machines to the latest SKU set in the VM scale set model. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set. +// vmInstanceIDs - A list of virtual machine instance IDs from the VM scale set. +// options - VirtualMachineScaleSetsClientBeginUpdateInstancesOptions contains the optional parameters for the VirtualMachineScaleSetsClient.BeginUpdateInstances +// method. +func (client *VirtualMachineScaleSetsClient) BeginUpdateInstances(ctx context.Context, resourceGroupName string, vmScaleSetName string, vmInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs, options *VirtualMachineScaleSetsClientBeginUpdateInstancesOptions) (*runtime.Poller[VirtualMachineScaleSetsClientUpdateInstancesResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.updateInstances(ctx, resourceGroupName, vmScaleSetName, vmInstanceIDs, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachineScaleSetsClientUpdateInstancesResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachineScaleSetsClientUpdateInstancesResponse](options.ResumeToken, client.pl, nil) + } +} + +// UpdateInstances - Upgrades one or more virtual machines to the latest SKU set in the VM scale set model. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachineScaleSetsClient) updateInstances(ctx context.Context, resourceGroupName string, vmScaleSetName string, vmInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs, options *VirtualMachineScaleSetsClientBeginUpdateInstancesOptions) (*http.Response, error) { + req, err := client.updateInstancesCreateRequest(ctx, resourceGroupName, vmScaleSetName, vmInstanceIDs, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// updateInstancesCreateRequest creates the UpdateInstances request. +func (client *VirtualMachineScaleSetsClient) updateInstancesCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, vmInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs, options *VirtualMachineScaleSetsClientBeginUpdateInstancesOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/manualupgrade" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, vmInstanceIDs) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachinescalesetvmextensions_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachinescalesetvmextensions_client.go new file mode 100644 index 000000000..eb3d4c839 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachinescalesetvmextensions_client.go @@ -0,0 +1,412 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// VirtualMachineScaleSetVMExtensionsClient contains the methods for the VirtualMachineScaleSetVMExtensions group. +// Don't use this type directly, use NewVirtualMachineScaleSetVMExtensionsClient() instead. +type VirtualMachineScaleSetVMExtensionsClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewVirtualMachineScaleSetVMExtensionsClient creates a new instance of VirtualMachineScaleSetVMExtensionsClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewVirtualMachineScaleSetVMExtensionsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*VirtualMachineScaleSetVMExtensionsClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &VirtualMachineScaleSetVMExtensionsClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// BeginCreateOrUpdate - The operation to create or update the VMSS VM extension. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set. +// instanceID - The instance ID of the virtual machine. +// vmExtensionName - The name of the virtual machine extension. +// extensionParameters - Parameters supplied to the Create Virtual Machine Extension operation. +// options - VirtualMachineScaleSetVMExtensionsClientBeginCreateOrUpdateOptions contains the optional parameters for the VirtualMachineScaleSetVMExtensionsClient.BeginCreateOrUpdate +// method. +func (client *VirtualMachineScaleSetVMExtensionsClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, vmExtensionName string, extensionParameters VirtualMachineScaleSetVMExtension, options *VirtualMachineScaleSetVMExtensionsClientBeginCreateOrUpdateOptions) (*runtime.Poller[VirtualMachineScaleSetVMExtensionsClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, resourceGroupName, vmScaleSetName, instanceID, vmExtensionName, extensionParameters, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachineScaleSetVMExtensionsClientCreateOrUpdateResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachineScaleSetVMExtensionsClientCreateOrUpdateResponse](options.ResumeToken, client.pl, nil) + } +} + +// CreateOrUpdate - The operation to create or update the VMSS VM extension. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachineScaleSetVMExtensionsClient) createOrUpdate(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, vmExtensionName string, extensionParameters VirtualMachineScaleSetVMExtension, options *VirtualMachineScaleSetVMExtensionsClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, vmScaleSetName, instanceID, vmExtensionName, extensionParameters, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *VirtualMachineScaleSetVMExtensionsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, vmExtensionName string, extensionParameters VirtualMachineScaleSetVMExtension, options *VirtualMachineScaleSetVMExtensionsClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/extensions/{vmExtensionName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if instanceID == "" { + return nil, errors.New("parameter instanceID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{instanceId}", url.PathEscape(instanceID)) + if vmExtensionName == "" { + return nil, errors.New("parameter vmExtensionName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmExtensionName}", url.PathEscape(vmExtensionName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, extensionParameters) +} + +// BeginDelete - The operation to delete the VMSS VM extension. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set. +// instanceID - The instance ID of the virtual machine. +// vmExtensionName - The name of the virtual machine extension. +// options - VirtualMachineScaleSetVMExtensionsClientBeginDeleteOptions contains the optional parameters for the VirtualMachineScaleSetVMExtensionsClient.BeginDelete +// method. +func (client *VirtualMachineScaleSetVMExtensionsClient) BeginDelete(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, vmExtensionName string, options *VirtualMachineScaleSetVMExtensionsClientBeginDeleteOptions) (*runtime.Poller[VirtualMachineScaleSetVMExtensionsClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, vmScaleSetName, instanceID, vmExtensionName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachineScaleSetVMExtensionsClientDeleteResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachineScaleSetVMExtensionsClientDeleteResponse](options.ResumeToken, client.pl, nil) + } +} + +// Delete - The operation to delete the VMSS VM extension. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachineScaleSetVMExtensionsClient) deleteOperation(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, vmExtensionName string, options *VirtualMachineScaleSetVMExtensionsClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, vmScaleSetName, instanceID, vmExtensionName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *VirtualMachineScaleSetVMExtensionsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, vmExtensionName string, options *VirtualMachineScaleSetVMExtensionsClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/extensions/{vmExtensionName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if instanceID == "" { + return nil, errors.New("parameter instanceID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{instanceId}", url.PathEscape(instanceID)) + if vmExtensionName == "" { + return nil, errors.New("parameter vmExtensionName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmExtensionName}", url.PathEscape(vmExtensionName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - The operation to get the VMSS VM extension. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set. +// instanceID - The instance ID of the virtual machine. +// vmExtensionName - The name of the virtual machine extension. +// options - VirtualMachineScaleSetVMExtensionsClientGetOptions contains the optional parameters for the VirtualMachineScaleSetVMExtensionsClient.Get +// method. +func (client *VirtualMachineScaleSetVMExtensionsClient) Get(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, vmExtensionName string, options *VirtualMachineScaleSetVMExtensionsClientGetOptions) (VirtualMachineScaleSetVMExtensionsClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, vmScaleSetName, instanceID, vmExtensionName, options) + if err != nil { + return VirtualMachineScaleSetVMExtensionsClientGetResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachineScaleSetVMExtensionsClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachineScaleSetVMExtensionsClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *VirtualMachineScaleSetVMExtensionsClient) getCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, vmExtensionName string, options *VirtualMachineScaleSetVMExtensionsClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/extensions/{vmExtensionName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if instanceID == "" { + return nil, errors.New("parameter instanceID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{instanceId}", url.PathEscape(instanceID)) + if vmExtensionName == "" { + return nil, errors.New("parameter vmExtensionName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmExtensionName}", url.PathEscape(vmExtensionName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Expand != nil { + reqQP.Set("$expand", *options.Expand) + } + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *VirtualMachineScaleSetVMExtensionsClient) getHandleResponse(resp *http.Response) (VirtualMachineScaleSetVMExtensionsClientGetResponse, error) { + result := VirtualMachineScaleSetVMExtensionsClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineScaleSetVMExtension); err != nil { + return VirtualMachineScaleSetVMExtensionsClientGetResponse{}, err + } + return result, nil +} + +// List - The operation to get all extensions of an instance in Virtual Machine Scaleset. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set. +// instanceID - The instance ID of the virtual machine. +// options - VirtualMachineScaleSetVMExtensionsClientListOptions contains the optional parameters for the VirtualMachineScaleSetVMExtensionsClient.List +// method. +func (client *VirtualMachineScaleSetVMExtensionsClient) List(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMExtensionsClientListOptions) (VirtualMachineScaleSetVMExtensionsClientListResponse, error) { + req, err := client.listCreateRequest(ctx, resourceGroupName, vmScaleSetName, instanceID, options) + if err != nil { + return VirtualMachineScaleSetVMExtensionsClientListResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachineScaleSetVMExtensionsClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachineScaleSetVMExtensionsClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) +} + +// listCreateRequest creates the List request. +func (client *VirtualMachineScaleSetVMExtensionsClient) listCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMExtensionsClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/extensions" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if instanceID == "" { + return nil, errors.New("parameter instanceID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{instanceId}", url.PathEscape(instanceID)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Expand != nil { + reqQP.Set("$expand", *options.Expand) + } + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *VirtualMachineScaleSetVMExtensionsClient) listHandleResponse(resp *http.Response) (VirtualMachineScaleSetVMExtensionsClientListResponse, error) { + result := VirtualMachineScaleSetVMExtensionsClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineScaleSetVMExtensionsListResult); err != nil { + return VirtualMachineScaleSetVMExtensionsClientListResponse{}, err + } + return result, nil +} + +// BeginUpdate - The operation to update the VMSS VM extension. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set. +// instanceID - The instance ID of the virtual machine. +// vmExtensionName - The name of the virtual machine extension. +// extensionParameters - Parameters supplied to the Update Virtual Machine Extension operation. +// options - VirtualMachineScaleSetVMExtensionsClientBeginUpdateOptions contains the optional parameters for the VirtualMachineScaleSetVMExtensionsClient.BeginUpdate +// method. +func (client *VirtualMachineScaleSetVMExtensionsClient) BeginUpdate(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, vmExtensionName string, extensionParameters VirtualMachineScaleSetVMExtensionUpdate, options *VirtualMachineScaleSetVMExtensionsClientBeginUpdateOptions) (*runtime.Poller[VirtualMachineScaleSetVMExtensionsClientUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.update(ctx, resourceGroupName, vmScaleSetName, instanceID, vmExtensionName, extensionParameters, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachineScaleSetVMExtensionsClientUpdateResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachineScaleSetVMExtensionsClientUpdateResponse](options.ResumeToken, client.pl, nil) + } +} + +// Update - The operation to update the VMSS VM extension. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachineScaleSetVMExtensionsClient) update(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, vmExtensionName string, extensionParameters VirtualMachineScaleSetVMExtensionUpdate, options *VirtualMachineScaleSetVMExtensionsClientBeginUpdateOptions) (*http.Response, error) { + req, err := client.updateCreateRequest(ctx, resourceGroupName, vmScaleSetName, instanceID, vmExtensionName, extensionParameters, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// updateCreateRequest creates the Update request. +func (client *VirtualMachineScaleSetVMExtensionsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, vmExtensionName string, extensionParameters VirtualMachineScaleSetVMExtensionUpdate, options *VirtualMachineScaleSetVMExtensionsClientBeginUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/extensions/{vmExtensionName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if instanceID == "" { + return nil, errors.New("parameter instanceID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{instanceId}", url.PathEscape(instanceID)) + if vmExtensionName == "" { + return nil, errors.New("parameter vmExtensionName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmExtensionName}", url.PathEscape(vmExtensionName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, extensionParameters) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachinescalesetvmruncommands_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachinescalesetvmruncommands_client.go new file mode 100644 index 000000000..0476b3573 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachinescalesetvmruncommands_client.go @@ -0,0 +1,425 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// VirtualMachineScaleSetVMRunCommandsClient contains the methods for the VirtualMachineScaleSetVMRunCommands group. +// Don't use this type directly, use NewVirtualMachineScaleSetVMRunCommandsClient() instead. +type VirtualMachineScaleSetVMRunCommandsClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewVirtualMachineScaleSetVMRunCommandsClient creates a new instance of VirtualMachineScaleSetVMRunCommandsClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewVirtualMachineScaleSetVMRunCommandsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*VirtualMachineScaleSetVMRunCommandsClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &VirtualMachineScaleSetVMRunCommandsClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// BeginCreateOrUpdate - The operation to create or update the VMSS VM run command. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set. +// instanceID - The instance ID of the virtual machine. +// runCommandName - The name of the virtual machine run command. +// runCommand - Parameters supplied to the Create Virtual Machine RunCommand operation. +// options - VirtualMachineScaleSetVMRunCommandsClientBeginCreateOrUpdateOptions contains the optional parameters for the +// VirtualMachineScaleSetVMRunCommandsClient.BeginCreateOrUpdate method. +func (client *VirtualMachineScaleSetVMRunCommandsClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, runCommandName string, runCommand VirtualMachineRunCommand, options *VirtualMachineScaleSetVMRunCommandsClientBeginCreateOrUpdateOptions) (*runtime.Poller[VirtualMachineScaleSetVMRunCommandsClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, resourceGroupName, vmScaleSetName, instanceID, runCommandName, runCommand, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachineScaleSetVMRunCommandsClientCreateOrUpdateResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachineScaleSetVMRunCommandsClientCreateOrUpdateResponse](options.ResumeToken, client.pl, nil) + } +} + +// CreateOrUpdate - The operation to create or update the VMSS VM run command. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachineScaleSetVMRunCommandsClient) createOrUpdate(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, runCommandName string, runCommand VirtualMachineRunCommand, options *VirtualMachineScaleSetVMRunCommandsClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, vmScaleSetName, instanceID, runCommandName, runCommand, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *VirtualMachineScaleSetVMRunCommandsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, runCommandName string, runCommand VirtualMachineRunCommand, options *VirtualMachineScaleSetVMRunCommandsClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/runCommands/{runCommandName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if instanceID == "" { + return nil, errors.New("parameter instanceID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{instanceId}", url.PathEscape(instanceID)) + if runCommandName == "" { + return nil, errors.New("parameter runCommandName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{runCommandName}", url.PathEscape(runCommandName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json, text/json"} + return req, runtime.MarshalAsJSON(req, runCommand) +} + +// BeginDelete - The operation to delete the VMSS VM run command. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set. +// instanceID - The instance ID of the virtual machine. +// runCommandName - The name of the virtual machine run command. +// options - VirtualMachineScaleSetVMRunCommandsClientBeginDeleteOptions contains the optional parameters for the VirtualMachineScaleSetVMRunCommandsClient.BeginDelete +// method. +func (client *VirtualMachineScaleSetVMRunCommandsClient) BeginDelete(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, runCommandName string, options *VirtualMachineScaleSetVMRunCommandsClientBeginDeleteOptions) (*runtime.Poller[VirtualMachineScaleSetVMRunCommandsClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, vmScaleSetName, instanceID, runCommandName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachineScaleSetVMRunCommandsClientDeleteResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachineScaleSetVMRunCommandsClientDeleteResponse](options.ResumeToken, client.pl, nil) + } +} + +// Delete - The operation to delete the VMSS VM run command. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachineScaleSetVMRunCommandsClient) deleteOperation(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, runCommandName string, options *VirtualMachineScaleSetVMRunCommandsClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, vmScaleSetName, instanceID, runCommandName, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *VirtualMachineScaleSetVMRunCommandsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, runCommandName string, options *VirtualMachineScaleSetVMRunCommandsClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/runCommands/{runCommandName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if instanceID == "" { + return nil, errors.New("parameter instanceID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{instanceId}", url.PathEscape(instanceID)) + if runCommandName == "" { + return nil, errors.New("parameter runCommandName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{runCommandName}", url.PathEscape(runCommandName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json, text/json"} + return req, nil +} + +// Get - The operation to get the VMSS VM run command. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set. +// instanceID - The instance ID of the virtual machine. +// runCommandName - The name of the virtual machine run command. +// options - VirtualMachineScaleSetVMRunCommandsClientGetOptions contains the optional parameters for the VirtualMachineScaleSetVMRunCommandsClient.Get +// method. +func (client *VirtualMachineScaleSetVMRunCommandsClient) Get(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, runCommandName string, options *VirtualMachineScaleSetVMRunCommandsClientGetOptions) (VirtualMachineScaleSetVMRunCommandsClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, vmScaleSetName, instanceID, runCommandName, options) + if err != nil { + return VirtualMachineScaleSetVMRunCommandsClientGetResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachineScaleSetVMRunCommandsClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachineScaleSetVMRunCommandsClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *VirtualMachineScaleSetVMRunCommandsClient) getCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, runCommandName string, options *VirtualMachineScaleSetVMRunCommandsClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/runCommands/{runCommandName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if instanceID == "" { + return nil, errors.New("parameter instanceID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{instanceId}", url.PathEscape(instanceID)) + if runCommandName == "" { + return nil, errors.New("parameter runCommandName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{runCommandName}", url.PathEscape(runCommandName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Expand != nil { + reqQP.Set("$expand", *options.Expand) + } + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json, text/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *VirtualMachineScaleSetVMRunCommandsClient) getHandleResponse(resp *http.Response) (VirtualMachineScaleSetVMRunCommandsClientGetResponse, error) { + result := VirtualMachineScaleSetVMRunCommandsClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineRunCommand); err != nil { + return VirtualMachineScaleSetVMRunCommandsClientGetResponse{}, err + } + return result, nil +} + +// NewListPager - The operation to get all run commands of an instance in Virtual Machine Scaleset. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set. +// instanceID - The instance ID of the virtual machine. +// options - VirtualMachineScaleSetVMRunCommandsClientListOptions contains the optional parameters for the VirtualMachineScaleSetVMRunCommandsClient.List +// method. +func (client *VirtualMachineScaleSetVMRunCommandsClient) NewListPager(resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMRunCommandsClientListOptions) *runtime.Pager[VirtualMachineScaleSetVMRunCommandsClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[VirtualMachineScaleSetVMRunCommandsClientListResponse]{ + More: func(page VirtualMachineScaleSetVMRunCommandsClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *VirtualMachineScaleSetVMRunCommandsClientListResponse) (VirtualMachineScaleSetVMRunCommandsClientListResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listCreateRequest(ctx, resourceGroupName, vmScaleSetName, instanceID, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return VirtualMachineScaleSetVMRunCommandsClientListResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachineScaleSetVMRunCommandsClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachineScaleSetVMRunCommandsClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *VirtualMachineScaleSetVMRunCommandsClient) listCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMRunCommandsClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/runCommands" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if instanceID == "" { + return nil, errors.New("parameter instanceID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{instanceId}", url.PathEscape(instanceID)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Expand != nil { + reqQP.Set("$expand", *options.Expand) + } + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json, text/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *VirtualMachineScaleSetVMRunCommandsClient) listHandleResponse(resp *http.Response) (VirtualMachineScaleSetVMRunCommandsClientListResponse, error) { + result := VirtualMachineScaleSetVMRunCommandsClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineRunCommandsListResult); err != nil { + return VirtualMachineScaleSetVMRunCommandsClientListResponse{}, err + } + return result, nil +} + +// BeginUpdate - The operation to update the VMSS VM run command. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set. +// instanceID - The instance ID of the virtual machine. +// runCommandName - The name of the virtual machine run command. +// runCommand - Parameters supplied to the Update Virtual Machine RunCommand operation. +// options - VirtualMachineScaleSetVMRunCommandsClientBeginUpdateOptions contains the optional parameters for the VirtualMachineScaleSetVMRunCommandsClient.BeginUpdate +// method. +func (client *VirtualMachineScaleSetVMRunCommandsClient) BeginUpdate(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, runCommandName string, runCommand VirtualMachineRunCommandUpdate, options *VirtualMachineScaleSetVMRunCommandsClientBeginUpdateOptions) (*runtime.Poller[VirtualMachineScaleSetVMRunCommandsClientUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.update(ctx, resourceGroupName, vmScaleSetName, instanceID, runCommandName, runCommand, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachineScaleSetVMRunCommandsClientUpdateResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachineScaleSetVMRunCommandsClientUpdateResponse](options.ResumeToken, client.pl, nil) + } +} + +// Update - The operation to update the VMSS VM run command. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachineScaleSetVMRunCommandsClient) update(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, runCommandName string, runCommand VirtualMachineRunCommandUpdate, options *VirtualMachineScaleSetVMRunCommandsClientBeginUpdateOptions) (*http.Response, error) { + req, err := client.updateCreateRequest(ctx, resourceGroupName, vmScaleSetName, instanceID, runCommandName, runCommand, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// updateCreateRequest creates the Update request. +func (client *VirtualMachineScaleSetVMRunCommandsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, runCommandName string, runCommand VirtualMachineRunCommandUpdate, options *VirtualMachineScaleSetVMRunCommandsClientBeginUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/runCommands/{runCommandName}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if instanceID == "" { + return nil, errors.New("parameter instanceID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{instanceId}", url.PathEscape(instanceID)) + if runCommandName == "" { + return nil, errors.New("parameter runCommandName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{runCommandName}", url.PathEscape(runCommandName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json, text/json"} + return req, runtime.MarshalAsJSON(req, runCommand) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachinescalesetvms_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachinescalesetvms_client.go new file mode 100644 index 000000000..32b2b3a23 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachinescalesetvms_client.go @@ -0,0 +1,1155 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strconv" + "strings" +) + +// VirtualMachineScaleSetVMsClient contains the methods for the VirtualMachineScaleSetVMs group. +// Don't use this type directly, use NewVirtualMachineScaleSetVMsClient() instead. +type VirtualMachineScaleSetVMsClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewVirtualMachineScaleSetVMsClient creates a new instance of VirtualMachineScaleSetVMsClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewVirtualMachineScaleSetVMsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*VirtualMachineScaleSetVMsClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &VirtualMachineScaleSetVMsClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// BeginDeallocate - Deallocates a specific virtual machine in a VM scale set. Shuts down the virtual machine and releases +// the compute resources it uses. You are not billed for the compute resources of this virtual +// machine once it is deallocated. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set. +// instanceID - The instance ID of the virtual machine. +// options - VirtualMachineScaleSetVMsClientBeginDeallocateOptions contains the optional parameters for the VirtualMachineScaleSetVMsClient.BeginDeallocate +// method. +func (client *VirtualMachineScaleSetVMsClient) BeginDeallocate(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginDeallocateOptions) (*runtime.Poller[VirtualMachineScaleSetVMsClientDeallocateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deallocate(ctx, resourceGroupName, vmScaleSetName, instanceID, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachineScaleSetVMsClientDeallocateResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachineScaleSetVMsClientDeallocateResponse](options.ResumeToken, client.pl, nil) + } +} + +// Deallocate - Deallocates a specific virtual machine in a VM scale set. Shuts down the virtual machine and releases the +// compute resources it uses. You are not billed for the compute resources of this virtual +// machine once it is deallocated. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachineScaleSetVMsClient) deallocate(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginDeallocateOptions) (*http.Response, error) { + req, err := client.deallocateCreateRequest(ctx, resourceGroupName, vmScaleSetName, instanceID, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deallocateCreateRequest creates the Deallocate request. +func (client *VirtualMachineScaleSetVMsClient) deallocateCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginDeallocateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/deallocate" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if instanceID == "" { + return nil, errors.New("parameter instanceID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{instanceId}", url.PathEscape(instanceID)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// BeginDelete - Deletes a virtual machine from a VM scale set. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set. +// instanceID - The instance ID of the virtual machine. +// options - VirtualMachineScaleSetVMsClientBeginDeleteOptions contains the optional parameters for the VirtualMachineScaleSetVMsClient.BeginDelete +// method. +func (client *VirtualMachineScaleSetVMsClient) BeginDelete(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginDeleteOptions) (*runtime.Poller[VirtualMachineScaleSetVMsClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, vmScaleSetName, instanceID, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachineScaleSetVMsClientDeleteResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachineScaleSetVMsClientDeleteResponse](options.ResumeToken, client.pl, nil) + } +} + +// Delete - Deletes a virtual machine from a VM scale set. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachineScaleSetVMsClient) deleteOperation(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, vmScaleSetName, instanceID, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *VirtualMachineScaleSetVMsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if instanceID == "" { + return nil, errors.New("parameter instanceID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{instanceId}", url.PathEscape(instanceID)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.ForceDeletion != nil { + reqQP.Set("forceDeletion", strconv.FormatBool(*options.ForceDeletion)) + } + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Gets a virtual machine from a VM scale set. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set. +// instanceID - The instance ID of the virtual machine. +// options - VirtualMachineScaleSetVMsClientGetOptions contains the optional parameters for the VirtualMachineScaleSetVMsClient.Get +// method. +func (client *VirtualMachineScaleSetVMsClient) Get(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientGetOptions) (VirtualMachineScaleSetVMsClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, vmScaleSetName, instanceID, options) + if err != nil { + return VirtualMachineScaleSetVMsClientGetResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachineScaleSetVMsClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachineScaleSetVMsClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *VirtualMachineScaleSetVMsClient) getCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if instanceID == "" { + return nil, errors.New("parameter instanceID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{instanceId}", url.PathEscape(instanceID)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Expand != nil { + reqQP.Set("$expand", string(*options.Expand)) + } + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *VirtualMachineScaleSetVMsClient) getHandleResponse(resp *http.Response) (VirtualMachineScaleSetVMsClientGetResponse, error) { + result := VirtualMachineScaleSetVMsClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineScaleSetVM); err != nil { + return VirtualMachineScaleSetVMsClientGetResponse{}, err + } + return result, nil +} + +// GetInstanceView - Gets the status of a virtual machine from a VM scale set. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set. +// instanceID - The instance ID of the virtual machine. +// options - VirtualMachineScaleSetVMsClientGetInstanceViewOptions contains the optional parameters for the VirtualMachineScaleSetVMsClient.GetInstanceView +// method. +func (client *VirtualMachineScaleSetVMsClient) GetInstanceView(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientGetInstanceViewOptions) (VirtualMachineScaleSetVMsClientGetInstanceViewResponse, error) { + req, err := client.getInstanceViewCreateRequest(ctx, resourceGroupName, vmScaleSetName, instanceID, options) + if err != nil { + return VirtualMachineScaleSetVMsClientGetInstanceViewResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachineScaleSetVMsClientGetInstanceViewResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachineScaleSetVMsClientGetInstanceViewResponse{}, runtime.NewResponseError(resp) + } + return client.getInstanceViewHandleResponse(resp) +} + +// getInstanceViewCreateRequest creates the GetInstanceView request. +func (client *VirtualMachineScaleSetVMsClient) getInstanceViewCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientGetInstanceViewOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/instanceView" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if instanceID == "" { + return nil, errors.New("parameter instanceID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{instanceId}", url.PathEscape(instanceID)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getInstanceViewHandleResponse handles the GetInstanceView response. +func (client *VirtualMachineScaleSetVMsClient) getInstanceViewHandleResponse(resp *http.Response) (VirtualMachineScaleSetVMsClientGetInstanceViewResponse, error) { + result := VirtualMachineScaleSetVMsClientGetInstanceViewResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineScaleSetVMInstanceView); err != nil { + return VirtualMachineScaleSetVMsClientGetInstanceViewResponse{}, err + } + return result, nil +} + +// NewListPager - Gets a list of all virtual machines in a VM scale sets. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// virtualMachineScaleSetName - The name of the VM scale set. +// options - VirtualMachineScaleSetVMsClientListOptions contains the optional parameters for the VirtualMachineScaleSetVMsClient.List +// method. +func (client *VirtualMachineScaleSetVMsClient) NewListPager(resourceGroupName string, virtualMachineScaleSetName string, options *VirtualMachineScaleSetVMsClientListOptions) *runtime.Pager[VirtualMachineScaleSetVMsClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[VirtualMachineScaleSetVMsClientListResponse]{ + More: func(page VirtualMachineScaleSetVMsClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *VirtualMachineScaleSetVMsClientListResponse) (VirtualMachineScaleSetVMsClientListResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listCreateRequest(ctx, resourceGroupName, virtualMachineScaleSetName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return VirtualMachineScaleSetVMsClientListResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachineScaleSetVMsClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachineScaleSetVMsClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *VirtualMachineScaleSetVMsClient) listCreateRequest(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, options *VirtualMachineScaleSetVMsClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if virtualMachineScaleSetName == "" { + return nil, errors.New("parameter virtualMachineScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{virtualMachineScaleSetName}", url.PathEscape(virtualMachineScaleSetName)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Filter != nil { + reqQP.Set("$filter", *options.Filter) + } + if options != nil && options.Select != nil { + reqQP.Set("$select", *options.Select) + } + if options != nil && options.Expand != nil { + reqQP.Set("$expand", *options.Expand) + } + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *VirtualMachineScaleSetVMsClient) listHandleResponse(resp *http.Response) (VirtualMachineScaleSetVMsClientListResponse, error) { + result := VirtualMachineScaleSetVMsClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineScaleSetVMListResult); err != nil { + return VirtualMachineScaleSetVMsClientListResponse{}, err + } + return result, nil +} + +// BeginPerformMaintenance - Performs maintenance on a virtual machine in a VM scale set. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set. +// instanceID - The instance ID of the virtual machine. +// options - VirtualMachineScaleSetVMsClientBeginPerformMaintenanceOptions contains the optional parameters for the VirtualMachineScaleSetVMsClient.BeginPerformMaintenance +// method. +func (client *VirtualMachineScaleSetVMsClient) BeginPerformMaintenance(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginPerformMaintenanceOptions) (*runtime.Poller[VirtualMachineScaleSetVMsClientPerformMaintenanceResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.performMaintenance(ctx, resourceGroupName, vmScaleSetName, instanceID, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachineScaleSetVMsClientPerformMaintenanceResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachineScaleSetVMsClientPerformMaintenanceResponse](options.ResumeToken, client.pl, nil) + } +} + +// PerformMaintenance - Performs maintenance on a virtual machine in a VM scale set. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachineScaleSetVMsClient) performMaintenance(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginPerformMaintenanceOptions) (*http.Response, error) { + req, err := client.performMaintenanceCreateRequest(ctx, resourceGroupName, vmScaleSetName, instanceID, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// performMaintenanceCreateRequest creates the PerformMaintenance request. +func (client *VirtualMachineScaleSetVMsClient) performMaintenanceCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginPerformMaintenanceOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/performMaintenance" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if instanceID == "" { + return nil, errors.New("parameter instanceID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{instanceId}", url.PathEscape(instanceID)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// BeginPowerOff - Power off (stop) a virtual machine in a VM scale set. Note that resources are still attached and you are +// getting charged for the resources. Instead, use deallocate to release resources and avoid +// charges. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set. +// instanceID - The instance ID of the virtual machine. +// options - VirtualMachineScaleSetVMsClientBeginPowerOffOptions contains the optional parameters for the VirtualMachineScaleSetVMsClient.BeginPowerOff +// method. +func (client *VirtualMachineScaleSetVMsClient) BeginPowerOff(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginPowerOffOptions) (*runtime.Poller[VirtualMachineScaleSetVMsClientPowerOffResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.powerOff(ctx, resourceGroupName, vmScaleSetName, instanceID, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachineScaleSetVMsClientPowerOffResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachineScaleSetVMsClientPowerOffResponse](options.ResumeToken, client.pl, nil) + } +} + +// PowerOff - Power off (stop) a virtual machine in a VM scale set. Note that resources are still attached and you are getting +// charged for the resources. Instead, use deallocate to release resources and avoid +// charges. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachineScaleSetVMsClient) powerOff(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginPowerOffOptions) (*http.Response, error) { + req, err := client.powerOffCreateRequest(ctx, resourceGroupName, vmScaleSetName, instanceID, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// powerOffCreateRequest creates the PowerOff request. +func (client *VirtualMachineScaleSetVMsClient) powerOffCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginPowerOffOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/poweroff" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if instanceID == "" { + return nil, errors.New("parameter instanceID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{instanceId}", url.PathEscape(instanceID)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.SkipShutdown != nil { + reqQP.Set("skipShutdown", strconv.FormatBool(*options.SkipShutdown)) + } + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// BeginRedeploy - Shuts down the virtual machine in the virtual machine scale set, moves it to a new node, and powers it +// back on. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set. +// instanceID - The instance ID of the virtual machine. +// options - VirtualMachineScaleSetVMsClientBeginRedeployOptions contains the optional parameters for the VirtualMachineScaleSetVMsClient.BeginRedeploy +// method. +func (client *VirtualMachineScaleSetVMsClient) BeginRedeploy(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginRedeployOptions) (*runtime.Poller[VirtualMachineScaleSetVMsClientRedeployResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.redeploy(ctx, resourceGroupName, vmScaleSetName, instanceID, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachineScaleSetVMsClientRedeployResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachineScaleSetVMsClientRedeployResponse](options.ResumeToken, client.pl, nil) + } +} + +// Redeploy - Shuts down the virtual machine in the virtual machine scale set, moves it to a new node, and powers it back +// on. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachineScaleSetVMsClient) redeploy(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginRedeployOptions) (*http.Response, error) { + req, err := client.redeployCreateRequest(ctx, resourceGroupName, vmScaleSetName, instanceID, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// redeployCreateRequest creates the Redeploy request. +func (client *VirtualMachineScaleSetVMsClient) redeployCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginRedeployOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/redeploy" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if instanceID == "" { + return nil, errors.New("parameter instanceID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{instanceId}", url.PathEscape(instanceID)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// BeginReimage - Reimages (upgrade the operating system) a specific virtual machine in a VM scale set. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set. +// instanceID - The instance ID of the virtual machine. +// options - VirtualMachineScaleSetVMsClientBeginReimageOptions contains the optional parameters for the VirtualMachineScaleSetVMsClient.BeginReimage +// method. +func (client *VirtualMachineScaleSetVMsClient) BeginReimage(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginReimageOptions) (*runtime.Poller[VirtualMachineScaleSetVMsClientReimageResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.reimage(ctx, resourceGroupName, vmScaleSetName, instanceID, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachineScaleSetVMsClientReimageResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachineScaleSetVMsClientReimageResponse](options.ResumeToken, client.pl, nil) + } +} + +// Reimage - Reimages (upgrade the operating system) a specific virtual machine in a VM scale set. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachineScaleSetVMsClient) reimage(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginReimageOptions) (*http.Response, error) { + req, err := client.reimageCreateRequest(ctx, resourceGroupName, vmScaleSetName, instanceID, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// reimageCreateRequest creates the Reimage request. +func (client *VirtualMachineScaleSetVMsClient) reimageCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginReimageOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/reimage" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if instanceID == "" { + return nil, errors.New("parameter instanceID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{instanceId}", url.PathEscape(instanceID)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if options != nil && options.VMScaleSetVMReimageInput != nil { + return req, runtime.MarshalAsJSON(req, *options.VMScaleSetVMReimageInput) + } + return req, nil +} + +// BeginReimageAll - Allows you to re-image all the disks ( including data disks ) in the a VM scale set instance. This operation +// is only supported for managed disks. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set. +// instanceID - The instance ID of the virtual machine. +// options - VirtualMachineScaleSetVMsClientBeginReimageAllOptions contains the optional parameters for the VirtualMachineScaleSetVMsClient.BeginReimageAll +// method. +func (client *VirtualMachineScaleSetVMsClient) BeginReimageAll(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginReimageAllOptions) (*runtime.Poller[VirtualMachineScaleSetVMsClientReimageAllResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.reimageAll(ctx, resourceGroupName, vmScaleSetName, instanceID, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachineScaleSetVMsClientReimageAllResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachineScaleSetVMsClientReimageAllResponse](options.ResumeToken, client.pl, nil) + } +} + +// ReimageAll - Allows you to re-image all the disks ( including data disks ) in the a VM scale set instance. This operation +// is only supported for managed disks. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachineScaleSetVMsClient) reimageAll(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginReimageAllOptions) (*http.Response, error) { + req, err := client.reimageAllCreateRequest(ctx, resourceGroupName, vmScaleSetName, instanceID, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// reimageAllCreateRequest creates the ReimageAll request. +func (client *VirtualMachineScaleSetVMsClient) reimageAllCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginReimageAllOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/reimageall" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if instanceID == "" { + return nil, errors.New("parameter instanceID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{instanceId}", url.PathEscape(instanceID)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// BeginRestart - Restarts a virtual machine in a VM scale set. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set. +// instanceID - The instance ID of the virtual machine. +// options - VirtualMachineScaleSetVMsClientBeginRestartOptions contains the optional parameters for the VirtualMachineScaleSetVMsClient.BeginRestart +// method. +func (client *VirtualMachineScaleSetVMsClient) BeginRestart(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginRestartOptions) (*runtime.Poller[VirtualMachineScaleSetVMsClientRestartResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.restart(ctx, resourceGroupName, vmScaleSetName, instanceID, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachineScaleSetVMsClientRestartResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachineScaleSetVMsClientRestartResponse](options.ResumeToken, client.pl, nil) + } +} + +// Restart - Restarts a virtual machine in a VM scale set. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachineScaleSetVMsClient) restart(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginRestartOptions) (*http.Response, error) { + req, err := client.restartCreateRequest(ctx, resourceGroupName, vmScaleSetName, instanceID, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// restartCreateRequest creates the Restart request. +func (client *VirtualMachineScaleSetVMsClient) restartCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginRestartOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/restart" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if instanceID == "" { + return nil, errors.New("parameter instanceID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{instanceId}", url.PathEscape(instanceID)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// RetrieveBootDiagnosticsData - The operation to retrieve SAS URIs of boot diagnostic logs for a virtual machine in a VM +// scale set. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set. +// instanceID - The instance ID of the virtual machine. +// options - VirtualMachineScaleSetVMsClientRetrieveBootDiagnosticsDataOptions contains the optional parameters for the VirtualMachineScaleSetVMsClient.RetrieveBootDiagnosticsData +// method. +func (client *VirtualMachineScaleSetVMsClient) RetrieveBootDiagnosticsData(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientRetrieveBootDiagnosticsDataOptions) (VirtualMachineScaleSetVMsClientRetrieveBootDiagnosticsDataResponse, error) { + req, err := client.retrieveBootDiagnosticsDataCreateRequest(ctx, resourceGroupName, vmScaleSetName, instanceID, options) + if err != nil { + return VirtualMachineScaleSetVMsClientRetrieveBootDiagnosticsDataResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachineScaleSetVMsClientRetrieveBootDiagnosticsDataResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachineScaleSetVMsClientRetrieveBootDiagnosticsDataResponse{}, runtime.NewResponseError(resp) + } + return client.retrieveBootDiagnosticsDataHandleResponse(resp) +} + +// retrieveBootDiagnosticsDataCreateRequest creates the RetrieveBootDiagnosticsData request. +func (client *VirtualMachineScaleSetVMsClient) retrieveBootDiagnosticsDataCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientRetrieveBootDiagnosticsDataOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/retrieveBootDiagnosticsData" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if instanceID == "" { + return nil, errors.New("parameter instanceID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{instanceId}", url.PathEscape(instanceID)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.SasURIExpirationTimeInMinutes != nil { + reqQP.Set("sasUriExpirationTimeInMinutes", strconv.FormatInt(int64(*options.SasURIExpirationTimeInMinutes), 10)) + } + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// retrieveBootDiagnosticsDataHandleResponse handles the RetrieveBootDiagnosticsData response. +func (client *VirtualMachineScaleSetVMsClient) retrieveBootDiagnosticsDataHandleResponse(resp *http.Response) (VirtualMachineScaleSetVMsClientRetrieveBootDiagnosticsDataResponse, error) { + result := VirtualMachineScaleSetVMsClientRetrieveBootDiagnosticsDataResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.RetrieveBootDiagnosticsDataResult); err != nil { + return VirtualMachineScaleSetVMsClientRetrieveBootDiagnosticsDataResponse{}, err + } + return result, nil +} + +// BeginRunCommand - Run command on a virtual machine in a VM scale set. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set. +// instanceID - The instance ID of the virtual machine. +// parameters - Parameters supplied to the Run command operation. +// options - VirtualMachineScaleSetVMsClientBeginRunCommandOptions contains the optional parameters for the VirtualMachineScaleSetVMsClient.BeginRunCommand +// method. +func (client *VirtualMachineScaleSetVMsClient) BeginRunCommand(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, parameters RunCommandInput, options *VirtualMachineScaleSetVMsClientBeginRunCommandOptions) (*runtime.Poller[VirtualMachineScaleSetVMsClientRunCommandResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.runCommand(ctx, resourceGroupName, vmScaleSetName, instanceID, parameters, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.pl, &runtime.NewPollerOptions[VirtualMachineScaleSetVMsClientRunCommandResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + }) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachineScaleSetVMsClientRunCommandResponse](options.ResumeToken, client.pl, nil) + } +} + +// RunCommand - Run command on a virtual machine in a VM scale set. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachineScaleSetVMsClient) runCommand(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, parameters RunCommandInput, options *VirtualMachineScaleSetVMsClientBeginRunCommandOptions) (*http.Response, error) { + req, err := client.runCommandCreateRequest(ctx, resourceGroupName, vmScaleSetName, instanceID, parameters, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// runCommandCreateRequest creates the RunCommand request. +func (client *VirtualMachineScaleSetVMsClient) runCommandCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, parameters RunCommandInput, options *VirtualMachineScaleSetVMsClientBeginRunCommandOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/runCommand" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if instanceID == "" { + return nil, errors.New("parameter instanceID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{instanceId}", url.PathEscape(instanceID)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json, text/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} + +// SimulateEviction - The operation to simulate the eviction of spot virtual machine in a VM scale set. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set. +// instanceID - The instance ID of the virtual machine. +// options - VirtualMachineScaleSetVMsClientSimulateEvictionOptions contains the optional parameters for the VirtualMachineScaleSetVMsClient.SimulateEviction +// method. +func (client *VirtualMachineScaleSetVMsClient) SimulateEviction(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientSimulateEvictionOptions) (VirtualMachineScaleSetVMsClientSimulateEvictionResponse, error) { + req, err := client.simulateEvictionCreateRequest(ctx, resourceGroupName, vmScaleSetName, instanceID, options) + if err != nil { + return VirtualMachineScaleSetVMsClientSimulateEvictionResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachineScaleSetVMsClientSimulateEvictionResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusNoContent) { + return VirtualMachineScaleSetVMsClientSimulateEvictionResponse{}, runtime.NewResponseError(resp) + } + return VirtualMachineScaleSetVMsClientSimulateEvictionResponse{}, nil +} + +// simulateEvictionCreateRequest creates the SimulateEviction request. +func (client *VirtualMachineScaleSetVMsClient) simulateEvictionCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientSimulateEvictionOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/simulateEviction" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if instanceID == "" { + return nil, errors.New("parameter instanceID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{instanceId}", url.PathEscape(instanceID)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// BeginStart - Starts a virtual machine in a VM scale set. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set. +// instanceID - The instance ID of the virtual machine. +// options - VirtualMachineScaleSetVMsClientBeginStartOptions contains the optional parameters for the VirtualMachineScaleSetVMsClient.BeginStart +// method. +func (client *VirtualMachineScaleSetVMsClient) BeginStart(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginStartOptions) (*runtime.Poller[VirtualMachineScaleSetVMsClientStartResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.start(ctx, resourceGroupName, vmScaleSetName, instanceID, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachineScaleSetVMsClientStartResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachineScaleSetVMsClientStartResponse](options.ResumeToken, client.pl, nil) + } +} + +// Start - Starts a virtual machine in a VM scale set. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachineScaleSetVMsClient) start(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginStartOptions) (*http.Response, error) { + req, err := client.startCreateRequest(ctx, resourceGroupName, vmScaleSetName, instanceID, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// startCreateRequest creates the Start request. +func (client *VirtualMachineScaleSetVMsClient) startCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, options *VirtualMachineScaleSetVMsClientBeginStartOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/start" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if instanceID == "" { + return nil, errors.New("parameter instanceID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{instanceId}", url.PathEscape(instanceID)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// BeginUpdate - Updates a virtual machine of a VM scale set. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// resourceGroupName - The name of the resource group. +// vmScaleSetName - The name of the VM scale set where the extension should be create or updated. +// instanceID - The instance ID of the virtual machine. +// parameters - Parameters supplied to the Update Virtual Machine Scale Sets VM operation. +// options - VirtualMachineScaleSetVMsClientBeginUpdateOptions contains the optional parameters for the VirtualMachineScaleSetVMsClient.BeginUpdate +// method. +func (client *VirtualMachineScaleSetVMsClient) BeginUpdate(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, parameters VirtualMachineScaleSetVM, options *VirtualMachineScaleSetVMsClientBeginUpdateOptions) (*runtime.Poller[VirtualMachineScaleSetVMsClientUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.update(ctx, resourceGroupName, vmScaleSetName, instanceID, parameters, options) + if err != nil { + return nil, err + } + return runtime.NewPoller[VirtualMachineScaleSetVMsClientUpdateResponse](resp, client.pl, nil) + } else { + return runtime.NewPollerFromResumeToken[VirtualMachineScaleSetVMsClientUpdateResponse](options.ResumeToken, client.pl, nil) + } +} + +// Update - Updates a virtual machine of a VM scale set. +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +func (client *VirtualMachineScaleSetVMsClient) update(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, parameters VirtualMachineScaleSetVM, options *VirtualMachineScaleSetVMsClientBeginUpdateOptions) (*http.Response, error) { + req, err := client.updateCreateRequest(ctx, resourceGroupName, vmScaleSetName, instanceID, parameters, options) + if err != nil { + return nil, err + } + resp, err := client.pl.Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// updateCreateRequest creates the Update request. +func (client *VirtualMachineScaleSetVMsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, vmScaleSetName string, instanceID string, parameters VirtualMachineScaleSetVM, options *VirtualMachineScaleSetVMsClientBeginUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}" + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if vmScaleSetName == "" { + return nil, errors.New("parameter vmScaleSetName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{vmScaleSetName}", url.PathEscape(vmScaleSetName)) + if instanceID == "" { + return nil, errors.New("parameter instanceID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{instanceId}", url.PathEscape(instanceID)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, parameters) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachinesizes_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachinesizes_client.go new file mode 100644 index 000000000..8add0cc32 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/zz_generated_virtualmachinesizes_client.go @@ -0,0 +1,115 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// VirtualMachineSizesClient contains the methods for the VirtualMachineSizes group. +// Don't use this type directly, use NewVirtualMachineSizesClient() instead. +type VirtualMachineSizesClient struct { + host string + subscriptionID string + pl runtime.Pipeline +} + +// NewVirtualMachineSizesClient creates a new instance of VirtualMachineSizesClient with the specified values. +// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms +// part of the URI for every service call. +// credential - used to authorize requests. Usually a credential from azidentity. +// options - pass nil to accept the default values. +func NewVirtualMachineSizesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*VirtualMachineSizesClient, error) { + if options == nil { + options = &arm.ClientOptions{} + } + ep := cloud.AzurePublic.Services[cloud.ResourceManager].Endpoint + if c, ok := options.Cloud.Services[cloud.ResourceManager]; ok { + ep = c.Endpoint + } + pl, err := armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options) + if err != nil { + return nil, err + } + client := &VirtualMachineSizesClient{ + subscriptionID: subscriptionID, + host: ep, + pl: pl, + } + return client, nil +} + +// NewListPager - This API is deprecated. Use Resources Skus [https://docs.microsoft.com/rest/api/compute/resourceskus/list] +// If the operation fails it returns an *azcore.ResponseError type. +// Generated from API version 2022-03-01 +// location - The location upon which virtual-machine-sizes is queried. +// options - VirtualMachineSizesClientListOptions contains the optional parameters for the VirtualMachineSizesClient.List +// method. +func (client *VirtualMachineSizesClient) NewListPager(location string, options *VirtualMachineSizesClientListOptions) *runtime.Pager[VirtualMachineSizesClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[VirtualMachineSizesClientListResponse]{ + More: func(page VirtualMachineSizesClientListResponse) bool { + return false + }, + Fetcher: func(ctx context.Context, page *VirtualMachineSizesClientListResponse) (VirtualMachineSizesClientListResponse, error) { + req, err := client.listCreateRequest(ctx, location, options) + if err != nil { + return VirtualMachineSizesClientListResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return VirtualMachineSizesClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return VirtualMachineSizesClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *VirtualMachineSizesClient) listCreateRequest(ctx context.Context, location string, options *VirtualMachineSizesClientListOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/vmSizes" + if location == "" { + return nil, errors.New("parameter location cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{location}", url.PathEscape(location)) + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.host, urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-03-01") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *VirtualMachineSizesClient) listHandleResponse(resp *http.Response) (VirtualMachineSizesClientListResponse, error) { + result := VirtualMachineSizesClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.VirtualMachineSizeListResult); err != nil { + return VirtualMachineSizesClientListResponse{}, err + } + return result, nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index e7142ab34..7ebe257f4 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -37,6 +37,9 @@ github.com/Azure/azure-sdk-for-go/sdk/internal/log github.com/Azure/azure-sdk-for-go/sdk/internal/poller github.com/Azure/azure-sdk-for-go/sdk/internal/temporal github.com/Azure/azure-sdk-for-go/sdk/internal/uuid +# github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute v1.0.0 +## explicit; go 1.18 +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute # github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6 v6.3.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6 From 044db07ff8f133fe6747438d7276f2330dd0e269 Mon Sep 17 00:00:00 2001 From: Adrian Riobo Date: Fri, 21 Mar 2025 08:23:15 +0100 Subject: [PATCH 07/11] fix: integrations snippet sanitize With eaf24bec441c3f8751e1e2b436730b005bf29703 the code to get the snippet based on the integration system was refactores and that introduced a regression when no integration params are set on the execution. In that case we are having a panic error. This commit will check the content and return noSnippet as expected if no params are set. Fix #434 Signed-off-by: Adrian Riobo --- pkg/integrations/cirrus/persistentworker.go | 3 +++ pkg/integrations/github/ghrunner.go | 3 +++ pkg/integrations/integrations.go | 8 ++++---- 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/pkg/integrations/cirrus/persistentworker.go b/pkg/integrations/cirrus/persistentworker.go index 787bbc6dd..42b3d458b 100644 --- a/pkg/integrations/cirrus/persistentworker.go +++ b/pkg/integrations/cirrus/persistentworker.go @@ -36,6 +36,9 @@ func Init(args *PersistentWorkerArgs) { } func (args *PersistentWorkerArgs) GetUserDataValues() *integrations.UserDataValues { + if args == nil { + return nil + } return &integrations.UserDataValues{ CliURL: downloadURL(), Name: pwa.Name, diff --git a/pkg/integrations/github/ghrunner.go b/pkg/integrations/github/ghrunner.go index 594ee77ce..ecb772b03 100644 --- a/pkg/integrations/github/ghrunner.go +++ b/pkg/integrations/github/ghrunner.go @@ -36,6 +36,9 @@ func Init(args *GithubRunnerArgs) { } func (args *GithubRunnerArgs) GetUserDataValues() *integrations.UserDataValues { + if args == nil { + return nil + } return &integrations.UserDataValues{ Name: args.Name, Token: args.Token, diff --git a/pkg/integrations/integrations.go b/pkg/integrations/integrations.go index f7bc70706..960ce8dec 100644 --- a/pkg/integrations/integrations.go +++ b/pkg/integrations/integrations.go @@ -23,13 +23,13 @@ type IntegrationConfig interface { } func GetIntegrationSnippet(intCfg IntegrationConfig, username string) (*string, error) { - if intCfg == nil { + userDataValues := intCfg.GetUserDataValues() + if userDataValues == nil { noSnippet := "" return &noSnippet, nil } - userDatatValues := intCfg.GetUserDataValues() - userDatatValues.User = username - snippet, err := file.Template(userDatatValues, intCfg.GetSetupScriptTemplate()) + userDataValues.User = username + snippet, err := file.Template(userDataValues, intCfg.GetSetupScriptTemplate()) return &snippet, err } From da820ae32879d69423a6a8a13cc14bba2d13634f Mon Sep 17 00:00:00 2001 From: Adrian Riobo Date: Fri, 21 Mar 2025 08:24:53 +0100 Subject: [PATCH 08/11] fix: spotprice example dependencies update Signed-off-by: Adrian Riobo --- examples/spotprice/.gitignore | 1 + examples/spotprice/go.mod | 80 ++++++++------ examples/spotprice/go.sum | 203 +++++++++++++++++----------------- 3 files changed, 145 insertions(+), 139 deletions(-) create mode 100644 examples/spotprice/.gitignore diff --git a/examples/spotprice/.gitignore b/examples/spotprice/.gitignore new file mode 100644 index 000000000..28ad6d1dc --- /dev/null +++ b/examples/spotprice/.gitignore @@ -0,0 +1 @@ +spotprice \ No newline at end of file diff --git a/examples/spotprice/go.mod b/examples/spotprice/go.mod index fbea107bd..37fc1466d 100644 --- a/examples/spotprice/go.mod +++ b/examples/spotprice/go.mod @@ -1,41 +1,52 @@ module spotprice -go 1.21 +go 1.22.0 + +toolchain go1.23.7 require github.com/redhat-developer/mapt v0.6.9 require ( - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6 v6.0.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6 v6.3.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resourcegraph/armresourcegraph v0.9.0 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.4.1 // indirect github.com/atotto/clipboard v0.1.4 // indirect github.com/aws/amazon-ec2-instance-selector/v2 v2.4.2-0.20231006140257-d989c5d76f0e // indirect - github.com/aws/aws-sdk-go v1.45.6 // indirect - github.com/aws/aws-sdk-go-v2 v1.27.2 // indirect - github.com/aws/aws-sdk-go-v2/config v1.27.18 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.17.18 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.5 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.9 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.9 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect - github.com/aws/aws-sdk-go-v2/service/ec2 v1.142.0 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.11 // indirect - github.com/aws/aws-sdk-go-v2/service/pricing v1.21.6 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.20.11 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.5 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.28.12 // indirect - github.com/aws/smithy-go v1.20.2 // indirect + github.com/aws/aws-sdk-go v1.50.36 // indirect + github.com/aws/aws-sdk-go-v2 v1.36.3 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 // indirect + github.com/aws/aws-sdk-go-v2/config v1.29.8 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.61 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 // indirect + github.com/aws/aws-sdk-go-v2/service/ec2 v1.203.1 // indirect + github.com/aws/aws-sdk-go-v2/service/ecs v1.53.0 // indirect + github.com/aws/aws-sdk-go-v2/service/iam v1.39.2 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5 // indirect + github.com/aws/aws-sdk-go-v2/service/pricing v1.32.17 // indirect + github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.25.0 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.0 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.33.16 // indirect + github.com/aws/smithy-go v1.22.3 // indirect github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect - github.com/charmbracelet/bubbles v0.16.1 // indirect - github.com/charmbracelet/bubbletea v0.25.0 // indirect - github.com/charmbracelet/lipgloss v0.7.1 // indirect - github.com/containerd/console v1.0.4 // indirect - github.com/evertras/bubble-table v0.15.2 // indirect + github.com/charmbracelet/bubbles v0.20.0 // indirect + github.com/charmbracelet/bubbletea v1.1.0 // indirect + github.com/charmbracelet/lipgloss v0.13.0 // indirect + github.com/charmbracelet/x/ansi v0.2.3 // indirect + github.com/charmbracelet/x/term v0.2.1 // indirect + github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect + github.com/evertras/bubble-table v0.17.1 // indirect github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/google/uuid v1.6.0 // indirect github.com/imdario/mergo v0.3.16 // indirect @@ -44,7 +55,7 @@ require ( github.com/lucasb-eyer/go-colorful v1.2.0 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-localereader v0.0.1 // indirect - github.com/mattn/go-runewidth v0.0.15 // indirect + github.com/mattn/go-runewidth v0.0.16 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect github.com/muesli/cancelreader v0.2.2 // indirect @@ -54,16 +65,15 @@ require ( github.com/patrickmn/go-cache v2.1.0+incompatible // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/rivo/uniseg v0.4.7 // indirect - github.com/sahilm/fuzzy v0.1.0 // indirect + github.com/sahilm/fuzzy v0.1.1 // indirect github.com/sirupsen/logrus v1.9.3 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.25.0 // indirect - golang.org/x/exp v0.0.0-20240604190554-fc45aab8b7f8 // indirect - golang.org/x/net v0.27.0 // indirect - golang.org/x/sync v0.7.0 // indirect - golang.org/x/sys v0.22.0 // indirect - golang.org/x/term v0.22.0 // indirect - golang.org/x/text v0.16.0 // indirect + golang.org/x/crypto v0.33.0 // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect + golang.org/x/net v0.35.0 // indirect + golang.org/x/sync v0.11.0 // indirect + golang.org/x/sys v0.30.0 // indirect + golang.org/x/text v0.22.0 // indirect ) replace github.com/redhat-developer/mapt => ../../ diff --git a/examples/spotprice/go.sum b/examples/spotprice/go.sum index 62822118a..d2e87f296 100644 --- a/examples/spotprice/go.sum +++ b/examples/spotprice/go.sum @@ -1,81 +1,100 @@ -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 h1:GJHeeA2N7xrG3q30L2UXDyuWRzDM900/65j70wcM4Ww= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 h1:g0EZJwz7xkXQiZAI5xi9f3WWFYBlX1CPTrR+NDToRkQ= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0/go.mod h1:XCW7KnZet0Opnr7HccfUw1PLc4CjHqpcaxW8DHklNkQ= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2 h1:F0gBpfdPLGsw+nsgk6aqqkZS1jiixa5WwFe3fk/T3Ys= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2/go.mod h1:SqINnQ9lVVdRlyC8cd1lCI0SdX4n2paeABd2K8ggfnE= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6 v6.0.0 h1:ubTqH0Sqcc7KgjHGKstw446zi7SurSXESKgd4hJea7w= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6 v6.0.0/go.mod h1:zflC9v4VfViJrSvcvplqws/yGXVbUEMZi/iHpZdSPWA= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6 v6.3.0 h1:Dc9miZr1Mhaqbb3cmJCRokkG16uk8JKkqOADf084zy4= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6 v6.3.0/go.mod h1:CHo9QYhWEvrKVeXsEMJSl2bpmYYNu6aG12JsSaFBXlY= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v3 v3.1.0 h1:2qsIIvxVT+uE6yrNldntJKlLRgxGbZ85kgtz5SNBhMw= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v3 v3.1.0/go.mod h1:AW8VEadnhw9xox+VaVd9sP7NjzOAnaZBLRH6Tq3cJ38= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resourcegraph/armresourcegraph v0.9.0 h1:zLzoX5+W2l95UJoVwiyNS4dX8vHyQ6x2xRLoBBL9wMk= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resourcegraph/armresourcegraph v0.9.0/go.mod h1:wVEOJfGTj0oPAUGA1JuRAvz/lxXQsWW16axmHPP47Bk= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 h1:Dd+RhdJn0OTtVGaeDLZpcumkIVCtA/3/Fo42+eoYvVM= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0/go.mod h1:5kakwfW5CjC9KK+Q4wjXAg+ShuIm2mBMua0ZFj2C8PE= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM= +github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= +github.com/AzureAD/microsoft-authentication-library-for-go v1.4.1 h1:8BKxhZZLX/WosEeoCvWysmKUscfa9v8LIPEEU0JjE2o= +github.com/AzureAD/microsoft-authentication-library-for-go v1.4.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4= github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI= github.com/aws/amazon-ec2-instance-selector/v2 v2.4.2-0.20231006140257-d989c5d76f0e h1:WWOuG4hx+k8KjLO0tHGrYUwY95nKJMMU9A/IGeIEKnU= github.com/aws/amazon-ec2-instance-selector/v2 v2.4.2-0.20231006140257-d989c5d76f0e/go.mod h1:xN+IXvhDWtzz9rQrNVLK+wwXvNie/bc6gv9gSP1cxlg= -github.com/aws/aws-sdk-go v1.45.6 h1:Y2isQQBZsnO15dzUQo9YQRThtHgrV200XCH05BRHVJI= -github.com/aws/aws-sdk-go v1.45.6/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= -github.com/aws/aws-sdk-go-v2 v1.21.0/go.mod h1:/RfNgGmRxI+iFOB1OeJUyxiU+9s88k3pfHvDagGEp0M= -github.com/aws/aws-sdk-go-v2 v1.27.2 h1:pLsTXqX93rimAOZG2FIYraDQstZaaGVVN4tNw65v0h8= -github.com/aws/aws-sdk-go-v2 v1.27.2/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM= -github.com/aws/aws-sdk-go-v2/config v1.27.18 h1:wFvAnwOKKe7QAyIxziwSKjmer9JBMH1vzIL6W+fYuKk= -github.com/aws/aws-sdk-go-v2/config v1.27.18/go.mod h1:0xz6cgdX55+kmppvPm2IaKzIXOheGJhAufacPJaXZ7c= -github.com/aws/aws-sdk-go-v2/credentials v1.17.18 h1:D/ALDWqK4JdY3OFgA2thcPO1c9aYTT5STS/CvnkqY1c= -github.com/aws/aws-sdk-go-v2/credentials v1.17.18/go.mod h1:JuitCWq+F5QGUrmMPsk945rop6bB57jdscu+Glozdnc= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.5 h1:dDgptDO9dxeFkXy+tEgVkzSClHZje/6JkPW5aZyEvrQ= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.5/go.mod h1:gjvE2KBUgUQhcv89jqxrIxH9GaKs1JbZzWejj/DaHGA= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.41/go.mod h1:CrObHAuPneJBlfEJ5T3szXOUkLEThaGfvnhTf33buas= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.9 h1:cy8ahBJuhtM8GTTSyOkfy6WVPV1IE+SS5/wfXUYuulw= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.9/go.mod h1:CZBXGLaJnEZI6EVNcPd7a6B5IC5cA/GkRWtu9fp3S6Y= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.35/go.mod h1:SJC1nEVVva1g3pHAIdCp7QsRIkMmLAgoDquQ9Rr8kYw= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.9 h1:A4SYk07ef04+vxZToz9LWvAXl9LW0NClpPpMsi31cz0= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.9/go.mod h1:5jJcHuwDagxN+ErjQ3PU3ocf6Ylc/p9x+BLO/+X4iXw= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.142.0 h1:VrFC1uEZjX4ghkm/et8ATVGb1mT75Iv8aPKPjUE+F8A= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.142.0/go.mod h1:qjhtI9zjpUHRc6khtrIM9fb48+ii6+UikL3/b+MKYn0= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 h1:Ji0DY1xUsUr3I8cHps0G+XM3WWU16lP6yG8qu1GAZAs= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2/go.mod h1:5CsjAbs3NlGQyZNFACh+zztPDI7fU6eW9QsxjfnuBKg= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.11 h1:o4T+fKxA3gTMcluBNZZXE9DNaMkJuUL1O3mffCUjoJo= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.11/go.mod h1:84oZdJ+VjuJKs9v1UTC9NaodRZRseOXCTgku+vQJWR8= -github.com/aws/aws-sdk-go-v2/service/pricing v1.21.6 h1:k/f3T13s7wx/By6aKovlVsjdNkRVT0QRR2RlZEvaTGg= -github.com/aws/aws-sdk-go-v2/service/pricing v1.21.6/go.mod h1:9n3tkRCngy3+Iw/8vK3C69iXh22SCGsy3yn16nTxH+s= -github.com/aws/aws-sdk-go-v2/service/sso v1.20.11 h1:gEYM2GSpr4YNWc6hCd5nod4+d4kd9vWIAWrmGuLdlMw= -github.com/aws/aws-sdk-go-v2/service/sso v1.20.11/go.mod h1:gVvwPdPNYehHSP9Rs7q27U1EU+3Or2ZpXvzAYJNh63w= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.5 h1:iXjh3uaH3vsVcnyZX7MqCoCfcyxIrVE9iOQruRaWPrQ= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.5/go.mod h1:5ZXesEuy/QcO0WUnt+4sDkxhdXRHTu2yG0uCSH8B6os= -github.com/aws/aws-sdk-go-v2/service/sts v1.28.12 h1:M/1u4HBpwLuMtjlxuI2y6HoVLzF5e2mfxHCg7ZVMYmk= -github.com/aws/aws-sdk-go-v2/service/sts v1.28.12/go.mod h1:kcfd+eTdEi/40FIbLq4Hif3XMXnl5b/+t/KTfLt9xIk= -github.com/aws/smithy-go v1.14.2/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= -github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q= -github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= +github.com/aws/aws-sdk-go v1.50.36 h1:PjWXHwZPuTLMR1NIb8nEjLucZBMzmf84TLoLbD8BZqk= +github.com/aws/aws-sdk-go v1.50.36/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go-v2 v1.36.3 h1:mJoei2CxPutQVxaATCzDUjcZEjVRdpsiiXi2o38yqWM= +github.com/aws/aws-sdk-go-v2 v1.36.3/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 h1:zAybnyUQXIZ5mok5Jqwlf58/TFE7uvd3IAsa1aF9cXs= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10/go.mod h1:qqvMj6gHLR/EXWZw4ZbqlPbQUyenf4h82UQUlKc+l14= +github.com/aws/aws-sdk-go-v2/config v1.29.8 h1:RpwAfYcV2lr/yRc4lWhUM9JRPQqKgKWmou3LV7UfWP4= +github.com/aws/aws-sdk-go-v2/config v1.29.8/go.mod h1:t+G7Fq1OcO8cXTPPXzxQSnj/5Xzdc9jAAD3Xrn9/Mgo= +github.com/aws/aws-sdk-go-v2/credentials v1.17.61 h1:Hd/uX6Wo2iUW1JWII+rmyCD7MMhOe7ALwQXN6sKDd1o= +github.com/aws/aws-sdk-go-v2/credentials v1.17.61/go.mod h1:L7vaLkwHY1qgW0gG1zG0z/X0sQ5tpIY5iI13+j3qI80= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 h1:x793wxmUWVDhshP8WW2mlnXuFrO4cOd3HLBroh1paFw= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30/go.mod h1:Jpne2tDnYiFascUEs2AWHJL9Yp7A5ZVy3TNyxaAjD6M= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 h1:ZK5jHhnrioRkUNOc+hOgQKlUL5JeC3S6JgLxtQ+Rm0Q= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34/go.mod h1:p4VfIceZokChbA9FzMbRGz5OV+lekcVtHlPKEO0gSZY= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 h1:SZwFm17ZUNNg5Np0ioo/gq8Mn6u9w19Mri8DnJ15Jf0= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34/go.mod h1:dFZsC0BLo346mvKQLWmoJxT+Sjp+qcVR1tRVHQGOH9Q= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 h1:ZNTqv4nIdE/DiBfUUfXcLZ/Spcuz+RjeziUtNJackkM= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34/go.mod h1:zf7Vcd1ViW7cPqYWEHLHJkS50X0JS2IKz9Cgaj6ugrs= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.203.1 h1:ZgY9zeVAe+54Qa7o1GXKRNTez79lffCeJSSinhl+qec= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.203.1/go.mod h1:0naMk66LtdeTmE+1CWQTKwtzOQ2t8mavOhMhR0Pv1m0= +github.com/aws/aws-sdk-go-v2/service/ecs v1.53.0 h1:TCQZX4ztlcWXAcZouKh9qJMcVaH/qTidFTfsvJwUI30= +github.com/aws/aws-sdk-go-v2/service/ecs v1.53.0/go.mod h1:Ghi1OWUv4+VMEULWiHsKH2gNA3KAcMoLWsvU0eRXvIA= +github.com/aws/aws-sdk-go-v2/service/iam v1.39.2 h1:2JLLGua711n8vn773xw2iwGh0zxLJJ3UDWQ2L7fy0wY= +github.com/aws/aws-sdk-go-v2/service/iam v1.39.2/go.mod h1:ZpAQJqd/i2bgRVa4vTa1ZX96sWgd3MZ/dxkABRXqvyI= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 h1:eAh2A4b5IzM/lum78bZ590jy36+d/aFLgKF/4Vd1xPE= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3/go.mod h1:0yKJC/kb8sAnmlYa6Zs3QVYqaC8ug2AbnNChv5Ox3uA= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 h1:ZMeFZ5yk+Ek+jNr1+uwCd2tG89t6oTS5yVWpa6yy2es= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7/go.mod h1:mxV05U+4JiHqIpGqqYXOHLPKUC6bDXC44bsUhNjOEwY= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 h1:dM9/92u2F1JbDaGooxTq18wmmFzbJRfXfVfy96/1CXM= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15/go.mod h1:SwFBy2vjtA0vZbjjaFtfN045boopadnoVPhu4Fv66vY= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5 h1:f9RyWNtS8oH7cZlbn+/JNPpjUk5+5fLd5lM9M0i49Ys= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5/go.mod h1:h5CoMZV2VF297/VLhRhO1WF+XYWOzXo+4HsObA4HjBQ= +github.com/aws/aws-sdk-go-v2/service/pricing v1.32.17 h1:EtZFyL/uhaXlHjIwHW0KSJvppg+Ie1fzQ3wEXLEUj0I= +github.com/aws/aws-sdk-go-v2/service/pricing v1.32.17/go.mod h1:l7bufyRvU+8mY0Z1BNWbWvjr59dlj9YrLKmeiz5CJ30= +github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1 h1:6cnno47Me9bRykw9AEv9zkXE+5or7jz8TsskTTccbgc= +github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1/go.mod h1:qmdkIIAC+GCLASF7R2whgNrJADz0QZPX+Seiw/i4S3o= +github.com/aws/aws-sdk-go-v2/service/sso v1.25.0 h1:2U9sF8nKy7UgyEeLiZTRg6ShBS22z8UnYpV6aRFL0is= +github.com/aws/aws-sdk-go-v2/service/sso v1.25.0/go.mod h1:qs4a9T5EMLl/Cajiw2TcbNt2UNo/Hqlyp+GiuG4CFDI= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.0 h1:wjAdc85cXdQR5uLx5FwWvGIHm4OPJhTyzUHU8craXtE= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.0/go.mod h1:MlYRNmYu/fGPoxBQVvBYr9nyr948aY/WLUvwBMBJubs= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.16 h1:BHEK2Q/7CMRMCb3nySi/w8UbIcPhKvYP5s1xf8/izn0= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.16/go.mod h1:cQnB8CUnxbMU82JvlqjKR2HBOm3fe9pWorWBza6MBJ4= +github.com/aws/smithy-go v1.22.3 h1:Z//5NuZCSW6R4PhQ93hShNbyBbn8BWCmCVCt+Q8Io5k= +github.com/aws/smithy-go v1.22.3/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/charmbracelet/bubbles v0.16.1 h1:6uzpAAaT9ZqKssntbvZMlksWHruQLNxg49H5WdeuYSY= -github.com/charmbracelet/bubbles v0.16.1/go.mod h1:2QCp9LFlEsBQMvIYERr7Ww2H2bA7xen1idUDIzm/+Xc= -github.com/charmbracelet/bubbletea v0.25.0 h1:bAfwk7jRz7FKFl9RzlIULPkStffg5k6pNt5dywy4TcM= -github.com/charmbracelet/bubbletea v0.25.0/go.mod h1:EN3QDR1T5ZdWmdfDzYcqOCAps45+QIJbLOBxmVNWNNg= -github.com/charmbracelet/lipgloss v0.7.1 h1:17WMwi7N1b1rVWOjMT+rCh7sQkvDU75B2hbZpc5Kc1E= -github.com/charmbracelet/lipgloss v0.7.1/go.mod h1:yG0k3giv8Qj8edTCbbg6AlQ5e8KNWpFujkNawKNhE2c= -github.com/containerd/console v1.0.4 h1:F2g4+oChYvBTsASRTz8NP6iIAi97J3TtSAsLbIFn4ro= -github.com/containerd/console v1.0.4/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/charmbracelet/bubbles v0.20.0 h1:jSZu6qD8cRQ6k9OMfR1WlM+ruM8fkPWkHvQWD9LIutE= +github.com/charmbracelet/bubbles v0.20.0/go.mod h1:39slydyswPy+uVOHZ5x/GjwVAFkCsV8IIVy+4MhzwwU= +github.com/charmbracelet/bubbletea v1.1.0 h1:FjAl9eAL3HBCHenhz/ZPjkKdScmaS5SK69JAK2YJK9c= +github.com/charmbracelet/bubbletea v1.1.0/go.mod h1:9Ogk0HrdbHolIKHdjfFpyXJmiCzGwy+FesYkZr7hYU4= +github.com/charmbracelet/lipgloss v0.13.0 h1:4X3PPeoWEDCMvzDvGmTajSyYPcZM4+y8sCA/SsA3cjw= +github.com/charmbracelet/lipgloss v0.13.0/go.mod h1:nw4zy0SBX/F/eAO1cWdcvy6qnkDUxr8Lw7dvFrAIbbY= +github.com/charmbracelet/x/ansi v0.2.3 h1:VfFN0NUpcjBRd4DnKfRaIRo53KRgey/nhOoEqosGDEY= +github.com/charmbracelet/x/ansi v0.2.3/go.mod h1:dk73KoMTT5AX5BsX0KrqhsTqAnhZZoCBjs7dGWp4Ktw= +github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ= +github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/evertras/bubble-table v0.15.2 h1:hVj27V9tk5TD5p6mVv0RK/KJu2sHq0U+mBMux/HptkU= -github.com/evertras/bubble-table v0.15.2/go.mod h1:SPOZKbIpyYWPHBNki3fyNpiPBQkvkULAtOT7NTD5fKY= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4= +github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM= +github.com/evertras/bubble-table v0.17.1 h1:HJwq3iQrZulXDE93ZcqJNiUVQCBbN4IJ2CkB/IxO3kk= +github.com/evertras/bubble-table v0.17.1/go.mod h1:ifHujS1YxwnYSOgcR2+m3GnJ84f7CVU/4kUOxUCjEbQ= github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= @@ -84,6 +103,8 @@ github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9Y github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6 h1:IsMZxCuZqKuao2vNdfD82fjjgPLfyHLpR41Z88viRWs= +github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6/go.mod h1:3VeWNIJaW+O5xpRQbPp0Ybqu1vJd/pm7s2F473HRrkw= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= @@ -93,8 +114,8 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4= github.com/mattn/go-localereader v0.0.1/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88= github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= -github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= -github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 h1:ZK8zHtRHOkbHy6Mmr5D264iyp3TiX5OmNcI5cIARiQI= @@ -111,66 +132,40 @@ github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaR github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E= +github.com/redis/go-redis/v9 v9.7.0/go.mod h1:f6zhXITC7JUJIlPEiBOTXxJgPLdZcA93GewI7inzyWw= github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/sahilm/fuzzy v0.1.0 h1:FzWGaw2Opqyu+794ZQ9SYifWv2EIXpwP4q8dY1kDAwI= -github.com/sahilm/fuzzy v0.1.0/go.mod h1:VFvziUEIMCrT6A6tw2RFIXPXXmzXbOsSHF0DOI8ZK9Y= +github.com/sahilm/fuzzy v0.1.1 h1:ceu5RHF8DGgoi+/dR5PsECjCDH1BE3Fnmpo7aVXOdRA= +github.com/sahilm/fuzzy v0.1.1/go.mod h1:VFvziUEIMCrT6A6tw2RFIXPXXmzXbOsSHF0DOI8ZK9Y= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= -golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= -golang.org/x/exp v0.0.0-20240604190554-fc45aab8b7f8 h1:LoYXNGAShUG3m/ehNk4iFctuhGX/+R1ZpfJ4/ia80JM= -golang.org/x/exp v0.0.0-20240604190554-fc45aab8b7f8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= -golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus= +golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= +golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8= +golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk= +golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= +golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= -golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= +golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= +golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= From 32f503abd118817049c709f38dad585cbcb102dc Mon Sep 17 00:00:00 2001 From: Adrian Riobo Date: Fri, 21 Mar 2025 08:58:14 +0100 Subject: [PATCH 09/11] chore: manage init function per provider Per comment https://github.com/redhat-developer/mapt/pull/435#discussion_r2005170252 we may missing a required initialization on azure provider, to ensure all initializations happens on each provider we added an interface which each provider should implement and will contain all required initializacion specific to the provider. That initialization function is called from context initalization. Signed-off-by: Adrian Riobo --- pkg/manager/context/context.go | 34 ++++-------------- pkg/provider/aws/action/fedora/fedora.go | 4 +-- pkg/provider/aws/action/mac-pool/mac-pool.go | 8 ++--- pkg/provider/aws/action/mac/mac.go | 6 ++-- pkg/provider/aws/action/rhel/rhel.go | 4 +-- pkg/provider/aws/action/windows/windows.go | 4 +-- pkg/provider/aws/aws.go | 36 +++++++++++++++++++ pkg/provider/aws/modules/mac/util/util.go | 2 +- pkg/provider/azure/action/aks/aks.go | 4 +-- pkg/provider/azure/action/linux/linux.go | 4 +-- pkg/provider/azure/action/windows/windows.go | 4 +-- pkg/provider/azure/azure.go | 31 ++++++++++++++++ pkg/provider/azure/data/images.go | 3 -- .../azure/module/identity/azidentity.go | 24 ------------- pkg/provider/util/instancetypes/azure.go | 3 +- pkg/spot/azure/spot.go | 3 -- 16 files changed, 95 insertions(+), 79 deletions(-) delete mode 100644 pkg/provider/azure/module/identity/azidentity.go diff --git a/pkg/manager/context/context.go b/pkg/manager/context/context.go index 128d6dec7..f3ac46a62 100644 --- a/pkg/manager/context/context.go +++ b/pkg/manager/context/context.go @@ -2,12 +2,10 @@ package context import ( "fmt" - "os" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" "github.com/redhat-developer/mapt/pkg/integrations/cirrus" "github.com/redhat-developer/mapt/pkg/integrations/github" - "github.com/redhat-developer/mapt/pkg/provider/aws/data" "github.com/redhat-developer/mapt/pkg/util" "github.com/redhat-developer/mapt/pkg/util/logging" utilMaps "github.com/redhat-developer/mapt/pkg/util/maps" @@ -61,7 +59,11 @@ type context struct { // mapt context var mc *context -func Init(ca *ContextArgs) error { +type Provider interface { + Init(backedURL string) error +} + +func Init(ca *ContextArgs, provider Provider) error { mc = &context{ runID: CreateRunID(), projectName: ca.ProjectName, @@ -73,8 +75,8 @@ func Init(ca *ContextArgs) error { serverless: ca.Serverless, } addCommonTags() - // Manage remote state requirements - if err := manageRemoteState(ca.BackedURL); err != nil { + // Init provider + if err := provider.Init(ca.BackedURL); err != nil { return err } // Manage integrations @@ -144,28 +146,6 @@ func addCommonTags() { mc.tags[TagKeyProjectName] = mc.projectName } -// Under some circumstances it is poosible we need to update Location initial configuration -// due to usage of remote backed url. i.e. https://github.com/redhat-developer/mapt/issues/392 - -// This function will check if backed url is remote and if so change initial values to be able to -// use it. -func manageRemoteState(backedURL string) error { - if data.ValidateS3Path(backedURL) { - awsRegion, err := data.GetBucketLocationFromS3Path(backedURL) - if err != nil { - return err - } - if err := os.Setenv("AWS_DEFAULT_REGION", *awsRegion); err != nil { - return err - } - if err := os.Setenv("AWS_REGION", *awsRegion); err != nil { - return err - } - return nil - } - return nil -} - func manageIntegration(ca *ContextArgs) error { if ca.GHRunnerArgs != nil { ca.GHRunnerArgs.Name = RunID() diff --git a/pkg/provider/aws/action/fedora/fedora.go b/pkg/provider/aws/action/fedora/fedora.go index d9c547978..94f80d2f1 100644 --- a/pkg/provider/aws/action/fedora/fedora.go +++ b/pkg/provider/aws/action/fedora/fedora.go @@ -70,7 +70,7 @@ var CloudConfigBase []byte // Then it will run the stack for windows dedicated host func Create(ctx *maptContext.ContextArgs, r *Request) error { // Create mapt Context - if err := maptContext.Init(ctx); err != nil { + if err := maptContext.Init(ctx, aws.Provider()); err != nil { return err } @@ -125,7 +125,7 @@ func Create(ctx *maptContext.ContextArgs, r *Request) error { func Destroy(ctx *maptContext.ContextArgs) (err error) { logging.Debug("Run fedora destroy") // Create mapt Context - if err := maptContext.Init(ctx); err != nil { + if err := maptContext.Init(ctx, aws.Provider()); err != nil { return err } diff --git a/pkg/provider/aws/action/mac-pool/mac-pool.go b/pkg/provider/aws/action/mac-pool/mac-pool.go index 8447786ff..a557b42da 100644 --- a/pkg/provider/aws/action/mac-pool/mac-pool.go +++ b/pkg/provider/aws/action/mac-pool/mac-pool.go @@ -26,7 +26,7 @@ import ( // Even if we want to destroy the pool we will set params to max size 0 func Create(ctx *maptContext.ContextArgs, r *MacPoolRequestArgs) error { // Create mapt Context - if err := maptContext.Init(ctx); err != nil { + if err := maptContext.Init(ctx, aws.Provider()); err != nil { return err } if err := r.addMachinesToPool(r.OfferedCapacity); err != nil { @@ -41,7 +41,7 @@ func Create(ctx *maptContext.ContextArgs, r *MacPoolRequestArgs) error { // TODO decide how to destroy machines in the pool as they may need to wait to reach 24 hours func Destroy(ctx *maptContext.ContextArgs) error { // Create mapt Context - if err := maptContext.Init(ctx); err != nil { + if err := maptContext.Init(ctx, aws.Provider()); err != nil { return err } if err := iam.Destroy(); err != nil { @@ -56,7 +56,7 @@ func Destroy(ctx *maptContext.ContextArgs) error { func HouseKeeper(ctx *maptContext.ContextArgs, r *MacPoolRequestArgs) error { // Create mapt Context, this is a special case where we need change the context // based on the operation - if err := maptContext.Init(ctx); err != nil { + if err := maptContext.Init(ctx, aws.Provider()); err != nil { return err } @@ -104,7 +104,7 @@ func Request(ctx *maptContext.ContextArgs, r *RequestMachineArgs) error { // Create mapt Context ctx.ProjectName = *hi.ProjectName ctx.BackedURL = *hi.BackedURL - if err := maptContext.Init(ctx); err != nil { + if err := maptContext.Init(ctx, aws.Provider()); err != nil { return err } diff --git a/pkg/provider/aws/action/mac/mac.go b/pkg/provider/aws/action/mac/mac.go index 42b74d2a6..7796d692b 100644 --- a/pkg/provider/aws/action/mac/mac.go +++ b/pkg/provider/aws/action/mac/mac.go @@ -29,7 +29,7 @@ import ( // ... func Request(ctx *maptContext.ContextArgs, r *MacRequestArgs) error { // Create mapt Context - if err := maptContext.Init(ctx); err != nil { + if err := maptContext.Init(ctx, aws.Provider()); err != nil { return err } @@ -83,7 +83,7 @@ func Release(ctx *maptContext.ContextArgs, hostID string) error { // Create mapt Context ctx.ProjectName = *hi.ProjectName ctx.BackedURL = *hi.BackedURL - if err := maptContext.Init(ctx); err != nil { + if err := maptContext.Init(ctx, aws.Provider()); err != nil { return err } // replace machine @@ -102,7 +102,7 @@ func Destroy(ctx *maptContext.ContextArgs, hostID string) error { // Create mapt Context ctx.ProjectName = *hi.ProjectName ctx.BackedURL = *hi.BackedURL - if err := maptContext.Init(ctx); err != nil { + if err := maptContext.Init(ctx, aws.Provider()); err != nil { return err } // Dedicated host is not on a valid state to be deleted diff --git a/pkg/provider/aws/action/rhel/rhel.go b/pkg/provider/aws/action/rhel/rhel.go index e6befd71e..64881879d 100644 --- a/pkg/provider/aws/action/rhel/rhel.go +++ b/pkg/provider/aws/action/rhel/rhel.go @@ -64,7 +64,7 @@ type Request struct { // Then it will run the stack for windows dedicated host func Create(ctx *maptContext.ContextArgs, r *Request) error { // Create mapt Context - if err := maptContext.Init(ctx); err != nil { + if err := maptContext.Init(ctx, aws.Provider()); err != nil { return err } @@ -119,7 +119,7 @@ func Create(ctx *maptContext.ContextArgs, r *Request) error { func Destroy(ctx *maptContext.ContextArgs) error { logging.Debug("Run rhel destroy") // Create mapt Context - if err := maptContext.Init(ctx); err != nil { + if err := maptContext.Init(ctx, aws.Provider()); err != nil { return err } diff --git a/pkg/provider/aws/action/windows/windows.go b/pkg/provider/aws/action/windows/windows.go index 24af7fd8e..dbf0261ad 100644 --- a/pkg/provider/aws/action/windows/windows.go +++ b/pkg/provider/aws/action/windows/windows.go @@ -81,7 +81,7 @@ var BootstrapScript []byte // Then it will run the stack for windows dedicated host func Create(ctx *maptContext.ContextArgs, r *Request) error { // Create mapt Context - if err := maptContext.Init(ctx); err != nil { + if err := maptContext.Init(ctx, aws.Provider()); err != nil { return err } @@ -152,7 +152,7 @@ func Create(ctx *maptContext.ContextArgs, r *Request) error { func Destroy(ctx *maptContext.ContextArgs) (err error) { logging.Debug("Run windows destroy") // Create mapt Context - if err := maptContext.Init(ctx); err != nil { + if err := maptContext.Init(ctx, aws.Provider()); err != nil { return err } diff --git a/pkg/provider/aws/aws.go b/pkg/provider/aws/aws.go index dc9e975e8..52f9c0be5 100644 --- a/pkg/provider/aws/aws.go +++ b/pkg/provider/aws/aws.go @@ -15,11 +15,47 @@ import ( maptContext "github.com/redhat-developer/mapt/pkg/manager/context" "github.com/redhat-developer/mapt/pkg/manager/credentials" awsConstants "github.com/redhat-developer/mapt/pkg/provider/aws/constants" + "github.com/redhat-developer/mapt/pkg/provider/aws/data" "github.com/redhat-developer/mapt/pkg/util" "github.com/redhat-developer/mapt/pkg/util/logging" "github.com/redhat-developer/mapt/pkg/util/maps" ) +type AWS struct{} + +func (a *AWS) Init(backedURL string) error { + // Manage remote state requirements, if backedURL + // is on a different region we need to change to that region + // in order to interact with the state + return manageRemoteState(backedURL) +} + +func Provider() *AWS { + return &AWS{} +} + +// Under some circumstances it is possible we need to update Location initial configuration +// due to usage of remote backed url. i.e. https://github.com/redhat-developer/mapt/issues/392 + +// This function will check if backed url is remote and if so change initial values to be able to +// use it. +func manageRemoteState(backedURL string) error { + if data.ValidateS3Path(backedURL) { + awsRegion, err := data.GetBucketLocationFromS3Path(backedURL) + if err != nil { + return err + } + if err := os.Setenv("AWS_DEFAULT_REGION", *awsRegion); err != nil { + return err + } + if err := os.Setenv("AWS_REGION", *awsRegion); err != nil { + return err + } + return nil + } + return nil +} + // pulumi config key : aws env credential var envCredentials = map[string]string{ awsConstants.CONFIG_AWS_REGION: "AWS_DEFAULT_REGION", diff --git a/pkg/provider/aws/modules/mac/util/util.go b/pkg/provider/aws/modules/mac/util/util.go index 8aa5c4be3..d8a6e178f 100644 --- a/pkg/provider/aws/modules/mac/util/util.go +++ b/pkg/provider/aws/modules/mac/util/util.go @@ -83,7 +83,7 @@ func Release(ctx *maptContext.ContextArgs, hostID string) error { // Create mapt Context ctx.ProjectName = *hi.ProjectName ctx.BackedURL = *hi.BackedURL - if err := maptContext.Init(ctx); err != nil { + if err := maptContext.Init(ctx, aws.Provider()); err != nil { return err } // replace machine diff --git a/pkg/provider/azure/action/aks/aks.go b/pkg/provider/azure/action/aks/aks.go index e8f41f036..f51c96d32 100644 --- a/pkg/provider/azure/action/aks/aks.go +++ b/pkg/provider/azure/action/aks/aks.go @@ -36,7 +36,7 @@ type AKSRequest struct { func Create(ctx *maptContext.ContextArgs, r *AKSRequest) (err error) { // Create mapt Context logging.Debug("Creating AKS") - if err := maptContext.Init(ctx); err != nil { + if err := maptContext.Init(ctx, azure.Provider()); err != nil { return err } cs := manager.Stack{ @@ -53,7 +53,7 @@ func Create(ctx *maptContext.ContextArgs, r *AKSRequest) (err error) { func Destroy(ctx *maptContext.ContextArgs) error { // Create mapt Context logging.Debug("Destroy AKS") - if err := maptContext.Init(ctx); err != nil { + if err := maptContext.Init(ctx, azure.Provider()); err != nil { return err } return azure.Destroy( diff --git a/pkg/provider/azure/action/linux/linux.go b/pkg/provider/azure/action/linux/linux.go index 3c478598f..c9a252155 100644 --- a/pkg/provider/azure/action/linux/linux.go +++ b/pkg/provider/azure/action/linux/linux.go @@ -52,7 +52,7 @@ type LinuxRequest struct { func Create(ctx *maptContext.ContextArgs, r *LinuxRequest) (err error) { // Create mapt Context - if err := maptContext.Init(ctx); err != nil { + if err := maptContext.Init(ctx, azure.Provider()); err != nil { return err } @@ -79,7 +79,7 @@ func Create(ctx *maptContext.ContextArgs, r *LinuxRequest) (err error) { func Destroy(ctx *maptContext.ContextArgs) error { // Create mapt Context - if err := maptContext.Init(ctx); err != nil { + if err := maptContext.Init(ctx, azure.Provider()); err != nil { return err } // destroy diff --git a/pkg/provider/azure/action/windows/windows.go b/pkg/provider/azure/action/windows/windows.go index 662a62af9..8c79d92b2 100644 --- a/pkg/provider/azure/action/windows/windows.go +++ b/pkg/provider/azure/action/windows/windows.go @@ -58,7 +58,7 @@ type ghActionsRunnerData struct { func Create(ctx *maptContext.ContextArgs, r *WindowsRequest) (err error) { // Create mapt Context - if err := maptContext.Init(ctx); err != nil { + if err := maptContext.Init(ctx, azure.Provider()); err != nil { return err } @@ -84,7 +84,7 @@ func Create(ctx *maptContext.ContextArgs, r *WindowsRequest) (err error) { func Destroy(ctx *maptContext.ContextArgs) error { // Create mapt Context - if err := maptContext.Init(ctx); err != nil { + if err := maptContext.Init(ctx, azure.Provider()); err != nil { return err } // destroy diff --git a/pkg/provider/azure/azure.go b/pkg/provider/azure/azure.go index aa711d3c6..7afc8218b 100644 --- a/pkg/provider/azure/azure.go +++ b/pkg/provider/azure/azure.go @@ -1,12 +1,43 @@ package azure import ( + "os" "slices" + "strings" "github.com/redhat-developer/mapt/pkg/manager" "github.com/redhat-developer/mapt/pkg/manager/credentials" ) +var azIdentityEnvs = []string{ + "AZURE_TENANT_ID", + "AZURE_SUBSCRIPTION_ID", + "AZURE_CLIENT_ID", + "AZURE_CLIENT_SECRET", +} + +type Azure struct{} + +func Provider() *Azure { + return &Azure{} +} + +func (a *Azure) Init(backedURL string) error { + setAZIdentityEnvs() + return nil +} + +// Envs required for auth with go sdk +// https://learn.microsoft.com/es-es/azure/developer/go/azure-sdk-authentication?tabs=bash#service-principal-with-a-secret +// do not match standard envs for pulumi envs for auth with native sdk +// https://www.pulumi.com/registry/packages/azure-native/installation-configuration/#set-configuration-using-environment-variables +func setAZIdentityEnvs() { + for _, e := range azIdentityEnvs { + os.Setenv(e, + os.Getenv(strings.ReplaceAll(e, "AZURE", "ARM"))) + } +} + func GetClouProviderCredentials(fixedCredentials map[string]string) credentials.ProviderCredentials { return credentials.ProviderCredentials{ SetCredentialFunc: nil, diff --git a/pkg/provider/azure/data/images.go b/pkg/provider/azure/data/images.go index 55d932a97..e65cb3ea8 100644 --- a/pkg/provider/azure/data/images.go +++ b/pkg/provider/azure/data/images.go @@ -8,7 +8,6 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azidentity" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6" - maptAzIdentity "github.com/redhat-developer/mapt/pkg/provider/azure/module/identity" "github.com/redhat-developer/mapt/pkg/util/logging" ) @@ -18,8 +17,6 @@ type ImageRequest struct { } func GetImage(req ImageRequest) (*armcompute.CommunityGalleryImagesClientGetResponse, error) { - maptAzIdentity.SetAZIdentityEnvs() - cred, err := azidentity.NewDefaultAzureCredential(nil) if err != nil { return nil, err diff --git a/pkg/provider/azure/module/identity/azidentity.go b/pkg/provider/azure/module/identity/azidentity.go deleted file mode 100644 index ed1e107b2..000000000 --- a/pkg/provider/azure/module/identity/azidentity.go +++ /dev/null @@ -1,24 +0,0 @@ -package identity - -import ( - "os" - "strings" -) - -var azIdentityEnvs = []string{ - "AZURE_TENANT_ID", - "AZURE_SUBSCRIPTION_ID", - "AZURE_CLIENT_ID", - "AZURE_CLIENT_SECRET", -} - -// Envs required for auth with go sdk -// https://learn.microsoft.com/es-es/azure/developer/go/azure-sdk-authentication?tabs=bash#service-principal-with-a-secret -// do not match standard envs for pulumi envs for auth with native sdk -// https://www.pulumi.com/registry/packages/azure-native/installation-configuration/#set-configuration-using-environment-variables -func SetAZIdentityEnvs() { - for _, e := range azIdentityEnvs { - os.Setenv(e, - os.Getenv(strings.ReplaceAll(e, "AZURE", "ARM"))) - } -} diff --git a/pkg/provider/util/instancetypes/azure.go b/pkg/provider/util/instancetypes/azure.go index 55a436097..016e9a770 100644 --- a/pkg/provider/util/instancetypes/azure.go +++ b/pkg/provider/util/instancetypes/azure.go @@ -11,11 +11,10 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azidentity" armcompute "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v6" - maptAzIdenity "github.com/redhat-developer/mapt/pkg/provider/azure/module/identity" ) func getAzureVMSKUs(cpus, memory int32, arch arch, nestedVirt bool) ([]string, error) { - maptAzIdenity.SetAZIdentityEnvs() + cred, err := azidentity.NewDefaultAzureCredential(nil) if err != nil { return nil, err diff --git a/pkg/spot/azure/spot.go b/pkg/spot/azure/spot.go index 25d4a5269..d5d4af8e4 100644 --- a/pkg/spot/azure/spot.go +++ b/pkg/spot/azure/spot.go @@ -14,7 +14,6 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azidentity" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resourcegraph/armresourcegraph" "github.com/redhat-developer/mapt/pkg/provider/azure/data" - maptAzIdentity "github.com/redhat-developer/mapt/pkg/provider/azure/module/identity" "github.com/redhat-developer/mapt/pkg/util" "github.com/redhat-developer/mapt/pkg/util/logging" "golang.org/x/exp/maps" @@ -113,8 +112,6 @@ func GetBestSpotChoice(r BestSpotChoiceRequest) (*BestSpotChoiceResponse, error) } func getGraphClient() (*armresourcegraph.Client, error) { - // Auth identity - maptAzIdentity.SetAZIdentityEnvs() cred, err := azidentity.NewDefaultAzureCredential(nil) if err != nil { return nil, fmt.Errorf("error getting the best spot price choice: %v", err) From df3f7c906e782947f8ac453e8c43dab29bda7280 Mon Sep 17 00:00:00 2001 From: Adrian Riobo Date: Fri, 21 Mar 2025 09:00:36 +0100 Subject: [PATCH 10/11] fix: azure pick only vmsizes available for current subscription the function to get vmsizes according to the required params on azure was returning some types with restricction within the subscription being used. This commit will remove those vmsizes with any type of restriction. Fix #433. Signed-off-by: Adrian Riobo --- pkg/provider/util/instancetypes/azure.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pkg/provider/util/instancetypes/azure.go b/pkg/provider/util/instancetypes/azure.go index 016e9a770..396404b9a 100644 --- a/pkg/provider/util/instancetypes/azure.go +++ b/pkg/provider/util/instancetypes/azure.go @@ -101,6 +101,10 @@ func resourceSKUToVirtualMachine(res *armcompute.ResourceSKU) *virtualMachine { if res.ResourceType != nil && *res.ResourceType != "virtualMachines" { return nil } + // If Machine type has any type of restriccions discard + if len(res.Restrictions) > 0 { + return nil + } vm := &virtualMachine{ Name: *res.Name, Family: *res.Family, From 178f7ad56b64f9907f5f75e8e2d2f55a92b362e6 Mon Sep 17 00:00:00 2001 From: Adrian Riobo Date: Fri, 21 Mar 2025 13:26:27 +0100 Subject: [PATCH 11/11] fix: Containerfile wrong shasum for builder image. This commit fixed a regression introduced by renovate update PR. Now the builder image is set to fedora 41 Signed-off-by: Adrian Riobo --- oci/Containerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/oci/Containerfile b/oci/Containerfile index 1ad05d195..f168870be 100644 --- a/oci/Containerfile +++ b/oci/Containerfile @@ -1,5 +1,5 @@ # Moved to Fedora 41 and install go tools until ubi9 go tools support go 1.23 -FROM quay.io/fedora/fedora:43@sha256:cd46550fc71e9401db4ad6e2b992df3278f6874a16d0abf1050e332f376802a5 as builder +FROM quay.io/fedora/fedora:41 as builder RUN dnf install -y make go ARG TARGETARCH USER root